diff --git a/langchain-core/src/language_models/llms.ts b/langchain-core/src/language_models/llms.ts index 61b8ae664935..5f93d6c5ce49 100644 --- a/langchain-core/src/language_models/llms.ts +++ b/langchain-core/src/language_models/llms.ts @@ -156,7 +156,7 @@ export abstract class BaseLLM< }); try { for await (const chunk of this._streamResponseChunks( - input.toString(), + prompt.toString(), callOptions, runManagers?.[0] )) { diff --git a/langchain-core/src/language_models/tests/llms.test.ts b/langchain-core/src/language_models/tests/llms.test.ts index d8bb5248fdc3..66463cc87a15 100644 --- a/langchain-core/src/language_models/tests/llms.test.ts +++ b/langchain-core/src/language_models/tests/llms.test.ts @@ -1,7 +1,8 @@ /* eslint-disable no-promise-executor-return */ import { test } from "@jest/globals"; -import { FakeLLM } from "../../utils/testing/index.js"; +import { FakeLLM, FakeStreamingLLM } from "../../utils/testing/index.js"; +import { HumanMessagePromptTemplate } from "../../prompts/chat.js"; test("Test FakeLLM uses callbacks", async () => { const model = new FakeLLM({}); @@ -40,3 +41,16 @@ test("Test FakeLLM uses callbacks with a cache", async () => { expect(response).toEqual(response2); expect(response2).toEqual(acc); }); + +test("Test FakeStreamingLLM works when streaming through a prompt", async () => { + const prompt = HumanMessagePromptTemplate.fromTemplate("hello there {name}"); + const model = new FakeStreamingLLM({}); + const chain = prompt.pipe(model); + const stream = await chain.stream({ name: "test" }); + const chunks = []; + for await (const chunk of stream) { + chunks.push(chunk); + } + expect(chunks.length).toBeGreaterThan(1); + expect(chunks.join("")).toEqual("Human: hello there test"); +});