Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

core: Add signal/timeout options to RunnableConfig #6305

Merged
merged 20 commits into from
Aug 2, 2024
Merged
Show file tree
Hide file tree
Changes from 3 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 0 additions & 12 deletions langchain-core/src/language_models/base.ts
Original file line number Diff line number Diff line change
Expand Up @@ -207,18 +207,6 @@ export interface BaseLanguageModelCallOptions extends RunnableConfig {
* If not provided, the default stop tokens for the model will be used.
*/
stop?: string[];

/**
* Timeout for this call in milliseconds.
*/
timeout?: number;

/**
* Abort signal for this call.
* If provided, the call will be aborted when the signal is aborted.
* @see https://developer.mozilla.org/en-US/docs/Web/API/AbortSignal
*/
signal?: AbortSignal;
}

export interface FunctionDefinition {
Expand Down
18 changes: 9 additions & 9 deletions langchain-core/src/language_models/chat_models.ts
Original file line number Diff line number Diff line change
Expand Up @@ -145,7 +145,7 @@ export abstract class BaseChatModel<
> extends BaseLanguageModel<OutputMessageType, CallOptions> {
declare ParsedCallOptions: Omit<
CallOptions,
keyof RunnableConfig & "timeout"
Exclude<keyof RunnableConfig, "signal">
>;

// Only ever instantiated in main LangChain
Expand All @@ -159,14 +159,13 @@ export abstract class BaseChatModel<
...llmOutputs: LLMResult["llmOutput"][]
): LLMResult["llmOutput"];

protected _separateRunnableConfigFromCallOptions(
protected _separateRunnableConfigFromCallOptionsCompat(
options?: Partial<CallOptions>
): [RunnableConfig, this["ParsedCallOptions"]] {
// For backwards compat, keep `signal` in both runnableConfig and callOptions
const [runnableConfig, callOptions] =
super._separateRunnableConfigFromCallOptions(options);
if (callOptions?.timeout && !callOptions.signal) {
callOptions.signal = AbortSignal.timeout(callOptions.timeout);
}
(callOptions as this["ParsedCallOptions"]).signal = runnableConfig.signal;
return [runnableConfig, callOptions as this["ParsedCallOptions"]];
}

Expand Down Expand Up @@ -232,7 +231,7 @@ export abstract class BaseChatModel<
const prompt = BaseChatModel._convertInputToPromptValue(input);
const messages = prompt.toChatMessages();
const [runnableConfig, callOptions] =
this._separateRunnableConfigFromCallOptions(options);
this._separateRunnableConfigFromCallOptionsCompat(options);

const inheritableMetadata = {
...runnableConfig.metadata,
Expand Down Expand Up @@ -578,16 +577,17 @@ export abstract class BaseChatModel<
);

const [runnableConfig, callOptions] =
this._separateRunnableConfigFromCallOptions(parsedOptions);
this._separateRunnableConfigFromCallOptionsCompat(parsedOptions);
runnableConfig.callbacks = runnableConfig.callbacks ?? callbacks;

if (!this.cache) {
return this._generateUncached(baseMessages, callOptions, runnableConfig);
}

const { cache } = this;
const llmStringKey =
this._getSerializedCacheKeyParametersForCall(callOptions);
const llmStringKey = this._getSerializedCacheKeyParametersForCall(
callOptions as CallOptions
);

const { generations, missingPromptIndices } = await this._generateCached({
messages: baseMessages,
Expand Down
18 changes: 9 additions & 9 deletions langchain-core/src/language_models/llms.ts
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,7 @@ export abstract class BaseLLM<
> extends BaseLanguageModel<string, CallOptions> {
declare ParsedCallOptions: Omit<
CallOptions,
keyof RunnableConfig & "timeout"
Exclude<keyof RunnableConfig, "signal">
>;

// Only ever instantiated in main LangChain
Expand Down Expand Up @@ -103,14 +103,13 @@ export abstract class BaseLLM<
throw new Error("Not implemented.");
}

protected _separateRunnableConfigFromCallOptions(
protected _separateRunnableConfigFromCallOptionsCompat(
options?: Partial<CallOptions>
): [RunnableConfig, this["ParsedCallOptions"]] {
// For backwards compat, keep `signal` in both runnableConfig and callOptions
const [runnableConfig, callOptions] =
super._separateRunnableConfigFromCallOptions(options);
if (callOptions?.timeout && !callOptions.signal) {
callOptions.signal = AbortSignal.timeout(callOptions.timeout);
}
(callOptions as this["ParsedCallOptions"]).signal = runnableConfig.signal;
return [runnableConfig, callOptions as this["ParsedCallOptions"]];
}

Expand All @@ -126,7 +125,7 @@ export abstract class BaseLLM<
} else {
const prompt = BaseLLM._convertInputToPromptValue(input);
const [runnableConfig, callOptions] =
this._separateRunnableConfigFromCallOptions(options);
this._separateRunnableConfigFromCallOptionsCompat(options);
const callbackManager_ = await CallbackManager.configure(
runnableConfig.callbacks,
this.callbacks,
Expand Down Expand Up @@ -461,16 +460,17 @@ export abstract class BaseLLM<
}

const [runnableConfig, callOptions] =
this._separateRunnableConfigFromCallOptions(parsedOptions);
this._separateRunnableConfigFromCallOptionsCompat(parsedOptions);
runnableConfig.callbacks = runnableConfig.callbacks ?? callbacks;

if (!this.cache) {
return this._generateUncached(prompts, callOptions, runnableConfig);
}

const { cache } = this;
const llmStringKey =
this._getSerializedCacheKeyParametersForCall(callOptions);
const llmStringKey = this._getSerializedCacheKeyParametersForCall(
callOptions as CallOptions
);
const { generations, missingPromptIndices } = await this._generateCached({
prompts,
cache,
Expand Down
Loading
Loading