Skip to content

Commit

Permalink
Merge branch 'main' into brace/typedoc-monorepo
Browse files Browse the repository at this point in the history
  • Loading branch information
bracesproul authored Aug 12, 2024
2 parents eea6cdd + 50ecb6f commit 409ab37
Show file tree
Hide file tree
Showing 27 changed files with 770 additions and 175 deletions.
4 changes: 4 additions & 0 deletions .github/workflows/standard-tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,10 @@ jobs:
GROQ_API_KEY: ${{ secrets.GROQ_API_KEY }}
MISTRAL_API_KEY: ${{ secrets.MISTRAL_API_KEY }}
DISABLE_CONSOLE_LOGS: "true"
# @langchain/aws credentials
BEDROCK_AWS_ACCESS_KEY_ID: ${{ secrets.BEDROCK_AWS_ACCESS_KEY_ID }}
BEDROCK_AWS_SECRET_ACCESS_KEY: ${{ secrets.BEDROCK_AWS_SECRET_ACCESS_KEY }}
BEDROCK_AWS_REGION: "us-east-1"

# The `@langchain/openai` package contains standard tests for ChatOpenAI and AzureChatOpenAI
# We want to run these separately, so we need to pass the exact path for each test, which means
Expand Down
2 changes: 1 addition & 1 deletion docs/core_docs/docs/integrations/chat/friendli.mdx
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ npm install @langchain/community
Sign in to [Friendli Suite](https://suite.friendli.ai/) to create a Personal Access Token, and set it as the `FRIENDLI_TOKEN` environment.
You can set team id as `FRIENDLI_TEAM` environment.

You can initialize a Friendli chat model with selecting the model you want to use. The default model is `llama-2-13b-chat`. You can check the available models at [docs.friendli.ai](https://docs.friendli.ai/guides/serverless_endpoints/pricing#text-generation-models).
You can initialize a Friendli chat model with selecting the model you want to use. The default model is `meta-llama-3-8b-instruct`. You can check the available models at [docs.friendli.ai](https://docs.friendli.ai/guides/serverless_endpoints/pricing#text-generation-models).

## Usage

Expand Down
2 changes: 1 addition & 1 deletion examples/src/models/chat/friendli.ts
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
import { ChatFriendli } from "@langchain/community/chat_models/friendli";

const model = new ChatFriendli({
model: "llama-2-13b-chat", // Default value
model: "meta-llama-3-8b-instruct", // Default value
friendliToken: process.env.FRIENDLI_TOKEN,
friendliTeam: process.env.FRIENDLI_TEAM,
maxTokens: 800,
Expand Down
2 changes: 1 addition & 1 deletion langchain-core/package.json
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
{
"name": "@langchain/core",
"version": "0.2.22",
"version": "0.2.23",
"description": "Core LangChain.js abstractions and schemas",
"type": "module",
"engines": {
Expand Down
84 changes: 80 additions & 4 deletions langchain-core/src/runnables/base.ts
Original file line number Diff line number Diff line change
Expand Up @@ -196,13 +196,18 @@ export abstract class Runnable<
* @param fields.fallbacks Other runnables to call if the runnable errors.
* @returns A new RunnableWithFallbacks.
*/
withFallbacks(fields: {
fallbacks: Runnable<RunInput, RunOutput>[];
}): RunnableWithFallbacks<RunInput, RunOutput> {
withFallbacks(
fields:
| {
fallbacks: Runnable<RunInput, RunOutput>[];
}
| Runnable<RunInput, RunOutput>[]
): RunnableWithFallbacks<RunInput, RunOutput> {
const fallbacks = Array.isArray(fields) ? fields : fields.fallbacks;
// eslint-disable-next-line @typescript-eslint/no-use-before-define
return new RunnableWithFallbacks<RunInput, RunOutput>({
runnable: this,
fallbacks: fields.fallbacks,
fallbacks,
});
}

Expand Down Expand Up @@ -2493,6 +2498,22 @@ export class RunnableParallel<RunInput> extends RunnableMap<RunInput> {}

/**
* A Runnable that can fallback to other Runnables if it fails.
* External APIs (e.g., APIs for a language model) may at times experience
* degraded performance or even downtime.
*
* In these cases, it can be useful to have a fallback Runnable that can be
* used in place of the original Runnable (e.g., fallback to another LLM provider).
*
* Fallbacks can be defined at the level of a single Runnable, or at the level
* of a chain of Runnables. Fallbacks are tried in order until one succeeds or
* all fail.
*
* While you can instantiate a `RunnableWithFallbacks` directly, it is usually
* more convenient to use the `withFallbacks` method on an existing Runnable.
*
* When streaming, fallbacks will only be called on failures during the initial
* stream creation. Errors that occur after a stream starts will not fallback
* to the next Runnable.
*/
export class RunnableWithFallbacks<RunInput, RunOutput> extends Runnable<
RunInput,
Expand Down Expand Up @@ -2565,6 +2586,61 @@ export class RunnableWithFallbacks<RunInput, RunOutput> extends Runnable<
throw firstError;
}

async *_streamIterator(
input: RunInput,
options?: Partial<RunnableConfig> | undefined
): AsyncGenerator<RunOutput> {
const config = ensureConfig(options);
const callbackManager_ = await getCallbackManagerForConfig(options);
const { runId, ...otherConfigFields } = config;
const runManager = await callbackManager_?.handleChainStart(
this.toJSON(),
_coerceToDict(input, "input"),
runId,
undefined,
undefined,
undefined,
otherConfigFields?.runName
);
let firstError;
let stream;
for (const runnable of this.runnables()) {
config?.signal?.throwIfAborted();
const childConfig = patchConfig(otherConfigFields, {
callbacks: runManager?.getChild(),
});
try {
stream = await runnable.stream(input, childConfig);
break;
} catch (e) {
if (firstError === undefined) {
firstError = e;
}
}
}
if (stream === undefined) {
const error =
firstError ?? new Error("No error stored at end of fallback.");
await runManager?.handleChainError(error);
throw error;
}
let output;
try {
for await (const chunk of stream) {
yield chunk;
try {
output = output === undefined ? output : concat(output, chunk);
} catch (e) {
output = undefined;
}
}
} catch (e) {
await runManager?.handleChainError(e);
throw e;
}
await runManager?.handleChainEnd(_coerceToDict(output, "output"));
}

async batch(
inputs: RunInput[],
options?: Partial<RunnableConfig> | Partial<RunnableConfig>[],
Expand Down
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
/* eslint-disable no-promise-executor-return */
/* eslint-disable @typescript-eslint/no-explicit-any */
import { test, expect } from "@jest/globals";
import { FakeLLM } from "../../utils/testing/index.js";
import { FakeLLM, FakeStreamingLLM } from "../../utils/testing/index.js";

test("RunnableWithFallbacks", async () => {
const llm = new FakeLLM({
Expand Down Expand Up @@ -36,3 +36,22 @@ test("RunnableWithFallbacks batch", async () => {
]);
expect(result2).toEqual(["What up 1", "What up 2", "What up 3"]);
});

test("RunnableWithFallbacks stream", async () => {
const llm = new FakeStreamingLLM({
thrownErrorString: "Bad error!",
});
await expect(async () => {
await llm.stream("What up");
}).rejects.toThrow();
const llmWithFallbacks = llm.withFallbacks({
fallbacks: [new FakeStreamingLLM({})],
});
const stream = await llmWithFallbacks.stream("What up");
const chunks = [];
for await (const chunk of stream) {
chunks.push(chunk);
}
expect(chunks.length).toBeGreaterThan(1);
expect(chunks.join("")).toEqual("What up");
});
48 changes: 48 additions & 0 deletions langchain-core/src/runnables/tests/signal.test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,9 @@ import {
FakeChatMessageHistory,
FakeListChatModel,
} from "../../utils/testing/index.js";
import { StringOutputParser } from "../../output_parsers/string.js";
import { Document } from "../../documents/document.js";
import { ChatPromptTemplate } from "../../prompts/chat.js";

const chatModel = new FakeListChatModel({ responses: ["hey"], sleep: 500 });

Expand Down Expand Up @@ -152,3 +155,48 @@ describe.each(Object.keys(TEST_CASES))("Test runnable %s", (name) => {
}).rejects.toThrowError();
});
});

test("Should not raise node warning", async () => {
const formatDocumentsAsString = (documents: Document[]) => {
return documents.map((doc) => doc.pageContent).join("\n\n");
};
const retriever = RunnableLambda.from(() => {
return [
new Document({ pageContent: "test1" }),
new Document({ pageContent: "test2" }),
new Document({ pageContent: "test4" }),
new Document({ pageContent: "test5" }),
];
});
const ragChainWithSources = RunnableMap.from({
// Return raw documents here for now since we want to return them at
// the end - we'll format in the next step of the chain
context: retriever,
question: new RunnablePassthrough(),
}).assign({
answer: RunnableSequence.from([
(input) => {
return {
// Now we format the documents as strings for the prompt
context: formatDocumentsAsString(input.context as Document[]),
question: input.question,
};
},
ChatPromptTemplate.fromTemplate("Hello"),
new FakeListChatModel({ responses: ["test"] }),
new StringOutputParser(),
]),
});

const stream = await ragChainWithSources.stream(
{
question: "What is the capital of France?",
},
{
signal: new AbortController().signal,
}
);
for await (const _ of stream) {
// console.log(_);
}
});
8 changes: 5 additions & 3 deletions langchain-core/src/utils/signal.ts
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@ export async function raceWithSignal<T>(
if (signal === undefined) {
return promise;
}
let listener: () => void;
return Promise.race([
promise.catch<T>((err) => {
if (!signal?.aborted) {
Expand All @@ -14,13 +15,14 @@ export async function raceWithSignal<T>(
}
}),
new Promise<never>((_, reject) => {
signal.addEventListener("abort", () => {
listener = () => {
reject(new Error("Aborted"));
});
};
signal.addEventListener("abort", listener);
// Must be here inside the promise to avoid a race condition
if (signal.aborted) {
reject(new Error("Aborted"));
}
}),
]);
]).finally(() => signal.removeEventListener("abort", listener));
}
2 changes: 1 addition & 1 deletion langchain/package.json
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
{
"name": "langchain",
"version": "0.2.14",
"version": "0.2.15",
"description": "Typescript bindings for langchain",
"type": "module",
"engines": {
Expand Down
48 changes: 48 additions & 0 deletions langchain/src/agents/tests/create_tool_calling_agent.int.test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -138,3 +138,51 @@ test("createToolCallingAgent stream events works for multiple turns", async () =
}
}
});

test("createToolCallingAgent accepts fallbacks", async () => {
const prompt = ChatPromptTemplate.fromMessages([
["system", "You are a helpful assistant"],
["placeholder", "{chat_history}"],
["human", "{input}"],
["placeholder", "{agent_scratchpad}"],
]);
const llm = new ChatOpenAI({
modelName: "gpt-4o",
temperature: 0,
})
.bindTools(tools)
.withFallbacks({
fallbacks: [
new ChatOpenAI({
modelName: "gpt-4o",
temperature: 0,
}).bindTools(tools),
],
});
const agent = await createToolCallingAgent({
llm,
tools,
prompt,
});
const agentExecutor = new AgentExecutor({
agent,
tools,
});
const input = "what is the current weather in SF?";
const eventStream = agentExecutor.streamEvents(
{
input,
},
{
version: "v2",
}
);

for await (const event of eventStream) {
const eventType = event.event;
// console.log("Event type: ", eventType);
if (eventType === "on_chat_model_stream") {
// console.log("Content: ", event.data);
}
}
});
33 changes: 24 additions & 9 deletions langchain/src/agents/tool_calling/index.ts
Original file line number Diff line number Diff line change
@@ -1,15 +1,26 @@
import { BaseChatModel } from "@langchain/core/language_models/chat_models";
import { ChatPromptTemplate } from "@langchain/core/prompts";
import { StructuredToolInterface } from "@langchain/core/tools";
import { RunnablePassthrough } from "@langchain/core/runnables";
import { ToolDefinition } from "@langchain/core/language_models/base";
import {
LanguageModelLike,
ToolDefinition,
} from "@langchain/core/language_models/base";
import { BaseChatModel } from "@langchain/core/language_models/chat_models";
import { AgentRunnableSequence } from "../agent.js";
import {
ToolCallingAgentOutputParser,
ToolsAgentStep,
} from "./output_parser.js";
import { formatToToolMessages } from "../format_scratchpad/tool_calling.js";

function _isBaseChatModel(x: LanguageModelLike): x is BaseChatModel {
const model = x as BaseChatModel;
return (
typeof model._modelType === "function" &&
model._modelType() === "base_chat_model"
);
}

/**
* Params used by the createOpenAIToolsAgent function.
*/
Expand All @@ -19,7 +30,7 @@ export type CreateToolCallingAgentParams = {
* so must either be an OpenAI model that supports that or a wrapper of
* a different model that adds in equivalent support.
*/
llm: BaseChatModel;
llm: LanguageModelLike;
/** Tools this agent has access to. */
tools: StructuredToolInterface[] | ToolDefinition[];
/** The prompt to use, must have an input key of `agent_scratchpad`. */
Expand Down Expand Up @@ -95,14 +106,18 @@ export function createToolCallingAgent({
].join("\n")
);
}
if (llm.bindTools === undefined) {
throw new Error(
`This agent requires that the "bind_tools()" method be implemented on the input model.`
);
let modelWithTools;
if (_isBaseChatModel(llm)) {
if (llm.bindTools === undefined) {
throw new Error(
`This agent requires that the "bind_tools()" method be implemented on the input model.`
);
}
modelWithTools = llm.bindTools(tools);
} else {
modelWithTools = llm;
}

const modelWithTools = llm.bindTools(tools);

const agent = AgentRunnableSequence.fromRunnables(
[
RunnablePassthrough.assign({
Expand Down
2 changes: 1 addition & 1 deletion langchain/src/chains/retrieval_qa.ts
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ export interface RetrievalQAChainInput extends Omit<ChainInputs, "memory"> {
* documents,
* embeddings
* );
* const prompt = ChatPromptTemplate.fromTemplate(`Answer the user's question: {input}`);
* const prompt = ChatPromptTemplate.fromTemplate(`Answer the user's question: {input} based on the following context {context}`);
*
* const combineDocsChain = await createStuffDocumentsChain({
* llm,
Expand Down
Loading

0 comments on commit 409ab37

Please sign in to comment.