Skip to content

Commit

Permalink
partners[patch]: Handle setting background callbacks to false in all …
Browse files Browse the repository at this point in the history
…relevant int tests (#6006)

* partners[patch]: Handle setting background callbacks to false in all relevant int tests

* update tests which use callbackManager:
  • Loading branch information
bracesproul authored Jul 8, 2024
1 parent e6616a5 commit 68dca7c
Show file tree
Hide file tree
Showing 8 changed files with 305 additions and 167 deletions.
52 changes: 33 additions & 19 deletions libs/langchain-aws/src/tests/chat_models.int.test.ts
Original file line number Diff line number Diff line change
@@ -1,10 +1,14 @@
/* eslint-disable no-process-env */

import { test, expect } from "@jest/globals";
import { AIMessageChunk, HumanMessage } from "@langchain/core/messages";
import { tool } from "@langchain/core/tools";
import { z } from "zod";
import { ChatBedrockConverse } from "../chat_models.js";

// Save the original value of the 'LANGCHAIN_CALLBACKS_BACKGROUND' environment variable
const originalBackground = process.env.LANGCHAIN_CALLBACKS_BACKGROUND;

const baseConstructorArgs: Partial<
ConstructorParameters<typeof ChatBedrockConverse>[0]
> = {
Expand Down Expand Up @@ -44,28 +48,38 @@ test("Test ChatBedrockConverse stream method", async () => {
});

test("Test ChatBedrockConverse in streaming mode", async () => {
let nrNewTokens = 0;
let streamedCompletion = "";
// Running LangChain callbacks in the background will sometimes cause the callbackManager to execute
// after the test/llm call has already finished & returned. Set that environment variable to false
// to prevent that from happening.
process.env.LANGCHAIN_CALLBACKS_BACKGROUND = "false";

const model = new ChatBedrockConverse({
...baseConstructorArgs,
streaming: true,
maxTokens: 10,
callbacks: [
{
async handleLLMNewToken(token: string) {
nrNewTokens += 1;
streamedCompletion += token;
try {
let nrNewTokens = 0;
let streamedCompletion = "";

const model = new ChatBedrockConverse({
...baseConstructorArgs,
streaming: true,
maxTokens: 10,
callbacks: [
{
async handleLLMNewToken(token: string) {
nrNewTokens += 1;
streamedCompletion += token;
},
},
},
],
});
const message = new HumanMessage("Hello!");
const result = await model.invoke([message]);
console.log(result);
],
});
const message = new HumanMessage("Hello!");
const result = await model.invoke([message]);
console.log(result);

expect(nrNewTokens > 0).toBe(true);
expect(result.content).toBe(streamedCompletion);
expect(nrNewTokens > 0).toBe(true);
expect(result.content).toBe(streamedCompletion);
} finally {
// Reset the environment variable
process.env.LANGCHAIN_CALLBACKS_BACKGROUND = originalBackground;
}
}, 10000);

test("Test ChatBedrockConverse with stop", async () => {
Expand Down
45 changes: 30 additions & 15 deletions libs/langchain-cloudflare/src/tests/chat_models.int.test.ts
Original file line number Diff line number Diff line change
@@ -1,3 +1,5 @@
/* eslint-disable no-process-env */

import { describe, test } from "@jest/globals";
import { ChatMessage, HumanMessage } from "@langchain/core/messages";
import {
Expand All @@ -10,6 +12,9 @@ import {
import { getEnvironmentVariable } from "@langchain/core/utils/env";
import { ChatCloudflareWorkersAI } from "../chat_models.js";

// Save the original value of the 'LANGCHAIN_CALLBACKS_BACKGROUND' environment variable
const originalBackground = process.env.LANGCHAIN_CALLBACKS_BACKGROUND;

describe("ChatCloudflareWorkersAI", () => {
test("call", async () => {
const chat = new ChatCloudflareWorkersAI();
Expand All @@ -26,22 +31,32 @@ describe("ChatCloudflareWorkersAI", () => {
});

test("generate with streaming true", async () => {
const chat = new ChatCloudflareWorkersAI({
streaming: true,
});
const message = new HumanMessage("What is 2 + 2?");
const tokens: string[] = [];
const res = await chat.generate([[message]], {
callbacks: [
{
handleLLMNewToken: (token) => {
tokens.push(token);
// Running LangChain callbacks in the background will sometimes cause the callbackManager to execute
// after the test/llm call has already finished & returned. Set that environment variable to false
// to prevent that from happening.
process.env.LANGCHAIN_CALLBACKS_BACKGROUND = "false";

try {
const chat = new ChatCloudflareWorkersAI({
streaming: true,
});
const message = new HumanMessage("What is 2 + 2?");
const tokens: string[] = [];
const res = await chat.generate([[message]], {
callbacks: [
{
handleLLMNewToken: (token) => {
tokens.push(token);
},
},
},
],
});
expect(tokens.length).toBeGreaterThan(1);
expect(tokens.join("")).toEqual(res.generations[0][0].text);
],
});
expect(tokens.length).toBeGreaterThan(1);
expect(tokens.join("")).toEqual(res.generations[0][0].text);
} finally {
// Reset the environment variable
process.env.LANGCHAIN_CALLBACKS_BACKGROUND = originalBackground;
}
});

test("stream", async () => {
Expand Down
45 changes: 30 additions & 15 deletions libs/langchain-cloudflare/src/tests/llms.int.test.ts
Original file line number Diff line number Diff line change
@@ -1,30 +1,45 @@
/* eslint-disable no-process-env */

import { test } from "@jest/globals";
import { getEnvironmentVariable } from "@langchain/core/utils/env";
import { CloudflareWorkersAI } from "../llms.js";

// Save the original value of the 'LANGCHAIN_CALLBACKS_BACKGROUND' environment variable
const originalBackground = process.env.LANGCHAIN_CALLBACKS_BACKGROUND;

test("Test CloudflareWorkersAI", async () => {
const model = new CloudflareWorkersAI({});
const res = await model.invoke("1 + 1 =");
console.log(res);
}, 50000);

test("generate with streaming true", async () => {
const model = new CloudflareWorkersAI({
streaming: true,
});
const tokens: string[] = [];
const res = await model.invoke("What is 2 + 2?", {
callbacks: [
{
handleLLMNewToken: (token) => {
console.log(token);
tokens.push(token);
// Running LangChain callbacks in the background will sometimes cause the callbackManager to execute
// after the test/llm call has already finished & returned. Set that environment variable to false
// to prevent that from happening.
process.env.LANGCHAIN_CALLBACKS_BACKGROUND = "false";

try {
const model = new CloudflareWorkersAI({
streaming: true,
});
const tokens: string[] = [];
const res = await model.invoke("What is 2 + 2?", {
callbacks: [
{
handleLLMNewToken: (token) => {
console.log(token);
tokens.push(token);
},
},
},
],
});
expect(tokens.length).toBeGreaterThan(1);
expect(tokens.join("")).toEqual(res);
],
});
expect(tokens.length).toBeGreaterThan(1);
expect(tokens.join("")).toEqual(res);
} finally {
// Reset the environment variable
process.env.LANGCHAIN_CALLBACKS_BACKGROUND = originalBackground;
}
});

test("Test CloudflareWorkersAI streaming", async () => {
Expand Down
52 changes: 33 additions & 19 deletions libs/langchain-cohere/src/tests/llms.int.test.ts
Original file line number Diff line number Diff line change
@@ -1,7 +1,11 @@
/* eslint-disable no-promise-executor-return */
/* eslint-disable no-promise-executor-return, no-process-env */

import { test } from "@jest/globals";
import { Cohere } from "../llms.js";

// Save the original value of the 'LANGCHAIN_CALLBACKS_BACKGROUND' environment variable
const originalBackground = process.env.LANGCHAIN_CALLBACKS_BACKGROUND;

test("test invoke", async () => {
const cohere = new Cohere({});
const result = await cohere.invoke(
Expand All @@ -11,25 +15,35 @@ test("test invoke", async () => {
});

test("test invoke with callback", async () => {
const cohere = new Cohere({
model: "command-light",
});
const tokens: string[] = [];
const result = await cohere.invoke(
"What is a good name for a company that makes colorful socks?",
{
callbacks: [
{
handleLLMNewToken(token) {
tokens.push(token);
// Running LangChain callbacks in the background will sometimes cause the callbackManager to execute
// after the test/llm call has already finished & returned. Set that environment variable to false
// to prevent that from happening.
process.env.LANGCHAIN_CALLBACKS_BACKGROUND = "false";

try {
const cohere = new Cohere({
model: "command-light",
});
const tokens: string[] = [];
const result = await cohere.invoke(
"What is a good name for a company that makes colorful socks?",
{
callbacks: [
{
handleLLMNewToken(token) {
tokens.push(token);
},
},
},
],
}
);
// Not streaming, so we should only get one token
expect(tokens.length).toBe(1);
expect(result).toEqual(tokens.join(""));
],
}
);
// Not streaming, so we should only get one token
expect(tokens.length).toBe(1);
expect(result).toEqual(tokens.join(""));
} finally {
// Reset the environment variable
process.env.LANGCHAIN_CALLBACKS_BACKGROUND = originalBackground;
}
});

test("should abort the request", async () => {
Expand Down
Loading

0 comments on commit 68dca7c

Please sign in to comment.