id
stringlengths
6
6
text
stringlengths
20
17.2k
title
stringclasses
1 value
147787
test("Test Azure ChatOpenAI token usage reporting for streaming function calls", async () => { // Running LangChain callbacks in the background will sometimes cause the callbackManager to execute // after the test/llm call has already finished & returned. Set that environment variable to false // to prevent that ...
147810
export class PineconeStore extends VectorStore { declare FilterType: PineconeMetadata; textKey: string; namespace?: string; pineconeIndex: PineconeIndex; filter?: PineconeMetadata; caller: AsyncCaller; _vectorstoreType(): string { return "pinecone"; } constructor(embeddings: EmbeddingsInter...
147860
/* eslint-disable no-process-env */ import { test } from "@jest/globals"; import * as fs from "node:fs/promises"; import { fileURLToPath } from "node:url"; import * as path from "node:path"; import { AIMessage, AIMessageChunk, HumanMessage, SystemMessage, ToolMessage, } from "@langchain/core/messages"; impor...
147912
import { AsyncCaller, AsyncCallerCallOptions, AsyncCallerParams, } from "@langchain/core/utils/async_caller"; import { getEnvironmentVariable } from "@langchain/core/utils/env"; import { MediaBlob, BlobStore, BlobStoreOptions, MediaBlobData, } from "./utils/media_core.js"; import { GoogleConnectionParam...
147913
export interface GoogleCloudStorageDownloadConnectionParams<AuthOptions> extends GoogleCloudStorageConnectionParams, GoogleConnectionParams<AuthOptions> { method: GoogleAbstractedClientOpsMethod; alt: "media" | undefined; } export class GoogleCloudStorageDownloadConnection< ResponseType extends GoogleRespo...
147970
import type { BaseLanguageModelCallOptions } from "@langchain/core/language_models/base"; import { CallbackManagerForLLMRun } from "@langchain/core/callbacks/manager"; import { GenerationChunk } from "@langchain/core/outputs"; import type { StringWithAutocomplete } from "@langchain/core/utils/types"; import { LLM, type...
147984
import { test, expect } from "@jest/globals"; import * as fs from "node:fs/promises"; import { fileURLToPath } from "node:url"; import * as path from "node:path"; import { AIMessage, HumanMessage } from "@langchain/core/messages"; import { PromptTemplate } from "@langchain/core/prompts"; import { BytesOutputParser, ...
148029
> [!IMPORTANT] > This package is now deprecated in favor of the new Azure integration in the OpenAI SDK. Please use the package [`@langchain/openai`](https://www.npmjs.com/package/@langchain/openai) instead. > You can find the migration guide [here](https://js.langchain.com/docs/integrations/llms/azure#migration-from-a...
148038
import { Embeddings } from "@langchain/core/embeddings"; import { type OpenAIClientOptions as AzureOpenAIClientOptions, OpenAIClient as AzureOpenAIClient, AzureKeyCredential, OpenAIKeyCredential, } from "@azure/openai"; import { KeyCredential, TokenCredential, isTokenCredential, } from "@azure/core-auth";...
148039
import type { OpenAIClientOptions, AzureExtensionsOptions, ChatRequestMessage, } from "@azure/openai"; import type { BaseLanguageModelCallOptions } from "@langchain/core/language_models/base"; import type { TiktokenModel } from "js-tiktoken/lite"; import type { EmbeddingsParams } from "@langchain/core/embeddings"...
148104
import { SelfQueryRetriever } from "langchain/retrievers/self_query"; import { OpenAI } from "@langchain/openai"; import { VectaraStore } from "@langchain/community/vectorstores/vectara"; import { FakeEmbeddings } from "@langchain/core/utils/testing"; import { Document } from "@langchain/core/documents"; import { Vecta...
148115
import { EnsembleRetriever } from "langchain/retrievers/ensemble"; import { MemoryVectorStore } from "langchain/vectorstores/memory"; import { OpenAIEmbeddings } from "@langchain/openai"; import { BaseRetriever, BaseRetrieverInput } from "@langchain/core/retrievers"; import { Document } from "@langchain/core/documents"...
148117
import * as uuid from "uuid"; import { MultiVectorRetriever } from "langchain/retrievers/multi_vector"; import { FaissStore } from "@langchain/community/vectorstores/faiss"; import { OpenAIEmbeddings } from "@langchain/openai"; import { RecursiveCharacterTextSplitter } from "@langchain/textsplitters"; import { InMemor...
148123
import { BaseRetriever, type BaseRetrieverInput, } from "@langchain/core/retrievers"; import type { CallbackManagerForRetrieverRun } from "@langchain/core/callbacks/manager"; import { Document } from "@langchain/core/documents"; /** * interface BaseRetrieverInput { * callbacks?: Callbacks; * tags?: string[]...
148125
import * as uuid from "uuid"; import { ChatOpenAI, OpenAIEmbeddings } from "@langchain/openai"; import { MultiVectorRetriever } from "langchain/retrievers/multi_vector"; import { FaissStore } from "@langchain/community/vectorstores/faiss"; import { RecursiveCharacterTextSplitter } from "@langchain/textsplitters"; impo...
148153
import type { ChatPromptTemplate } from "@langchain/core/prompts"; import { pull } from "langchain/hub"; import { AgentExecutor, createToolCallingAgent } from "langchain/agents"; import { SessionsPythonREPLTool } from "@langchain/azure-dynamic-sessions"; import { AzureChatOpenAI } from "@langchain/openai"; const tools...
148154
import { Redis } from "ioredis"; import { OpenAIEmbeddings } from "@langchain/openai"; import { CacheBackedEmbeddings } from "langchain/embeddings/cache_backed"; import { RecursiveCharacterTextSplitter } from "@langchain/textsplitters"; import { FaissStore } from "@langchain/community/vectorstores/faiss"; import { Red...
148158
import { OpenAIEmbeddings } from "@langchain/openai"; import { CacheBackedEmbeddings } from "langchain/embeddings/cache_backed"; import { InMemoryStore } from "@langchain/core/stores"; import { RecursiveCharacterTextSplitter } from "@langchain/textsplitters"; import { FaissStore } from "@langchain/community/vectorstore...
148172
"use node"; import { TextLoader } from "langchain/document_loaders/fs/text"; import { CacheBackedEmbeddings } from "langchain/embeddings/cache_backed"; import { OpenAIEmbeddings } from "@langchain/openai"; import { ConvexKVStore } from "@langchain/community/storage/convex"; import { RecursiveCharacterTextSplitter } fr...
148180
import { ChatOpenAI } from "@langchain/openai"; import { BufferMemory, CombinedMemory, ConversationSummaryMemory, } from "langchain/memory"; import { ConversationChain } from "langchain/chains"; import { PromptTemplate } from "@langchain/core/prompts"; // buffer memory const bufferMemory = new BufferMemory({ m...
148182
import { OpenAI } from "@langchain/openai"; import { ConversationSummaryMemory } from "langchain/memory"; import { LLMChain } from "langchain/chains"; import { PromptTemplate } from "@langchain/core/prompts"; export const run = async () => { const memory = new ConversationSummaryMemory({ memoryKey: "chat_history...
148185
import { OpenAI } from "@langchain/openai"; import { ConversationTokenBufferMemory } from "langchain/memory"; const model = new OpenAI({}); const memory = new ConversationTokenBufferMemory({ llm: model, maxTokenLimit: 10, }); await memory.saveContext({ input: "hi" }, { output: "whats up" }); await memory.saveCont...
148195
import { BufferMemory } from "langchain/memory"; import { UpstashRedisChatMessageHistory } from "@langchain/community/stores/message/upstash_redis"; import { ChatOpenAI } from "@langchain/openai"; import { ConversationChain } from "langchain/chains"; const memory = new BufferMemory({ chatHistory: new UpstashRedisCha...
148201
import { OpenAI, ChatOpenAI } from "@langchain/openai"; import { ConversationSummaryBufferMemory } from "langchain/memory"; import { ConversationChain } from "langchain/chains"; import { ChatPromptTemplate, HumanMessagePromptTemplate, MessagesPlaceholder, SystemMessagePromptTemplate, } from "@langchain/core/pro...
148202
import { OpenAI, OpenAIEmbeddings } from "@langchain/openai"; import { VectorStoreRetrieverMemory } from "langchain/memory"; import { LLMChain } from "langchain/chains"; import { MemoryVectorStore } from "langchain/vectorstores/memory"; import { PromptTemplate } from "@langchain/core/prompts"; const vectorStore = new ...
148203
import { RunnableWithMessageHistory } from "@langchain/core/runnables"; import { ChatPromptTemplate, MessagesPlaceholder, } from "@langchain/core/prompts"; import { StringOutputParser } from "@langchain/core/output_parsers"; import { ChatOpenAI } from "@langchain/openai"; import { AstraDBChatMessageHistory } from "...
148204
import { ChatOpenAI } from "@langchain/openai"; import { ConversationSummaryMemory } from "langchain/memory"; import { LLMChain } from "langchain/chains"; import { PromptTemplate } from "@langchain/core/prompts"; export const run = async () => { const memory = new ConversationSummaryMemory({ memoryKey: "chat_his...
148206
/* eslint-disable import/first */ /* eslint-disable import/no-duplicates */ import { BufferMemory } from "langchain/memory"; import { HumanMessage, AIMessage } from "@langchain/core/messages"; const memory = new BufferMemory(); await memory.chatHistory.addMessage(new HumanMessage("Hi!")); await memory.chatHistory.add...
148226
import { AgentExecutor, createReactAgent } from "langchain/agents"; import { pull } from "langchain/hub"; import type { PromptTemplate } from "@langchain/core/prompts"; import { OpenAI } from "@langchain/openai"; import { SerpAPI } from "@langchain/community/tools/serpapi"; export const run = async () => { // Defi...
148227
import { AgentExecutor, ChatAgent } from "langchain/agents"; import { ConversationChain, LLMChain } from "langchain/chains"; import { ChatOpenAI } from "@langchain/openai"; import { BufferMemory } from "langchain/memory"; import { ChatPromptTemplate, HumanMessagePromptTemplate, MessagesPlaceholder, SystemMessag...
148228
import { LLMChain } from "langchain/chains"; import { ChatOpenAI } from "@langchain/openai"; import { ChatPromptTemplate } from "@langchain/core/prompts"; export const run = async () => { const chat = new ChatOpenAI({ temperature: 0 }); const chatPrompt = ChatPromptTemplate.fromMessages([ [ "system", ...
148229
import { ConversationChain } from "langchain/chains"; import { ChatOpenAI } from "@langchain/openai"; import { ChatPromptTemplate, MessagesPlaceholder, } from "@langchain/core/prompts"; import { BufferMemory } from "langchain/memory"; const chat = new ChatOpenAI({ temperature: 0 }); const chatPrompt = ChatPromptT...
148230
import { OpenAI } from "@langchain/openai"; import { ConsoleCallbackHandler } from "@langchain/core/tracers/console"; const llm = new OpenAI({ temperature: 0, // These tags will be attached to all calls made with this LLM. tags: ["example", "callbacks", "constructor"], // This handler will be used for all call...
148231
import { OpenAI } from "@langchain/openai"; import { ConsoleCallbackHandler } from "@langchain/core/tracers/console"; const llm = new OpenAI({ temperature: 0, }); const response = await llm.invoke("1 + 1 =", { // These tags will be attached only to this call to the LLM. tags: ["example", "callbacks", "request"]...
148243
import { LLMChain } from "langchain/chains"; import { OpenAI } from "@langchain/openai"; import { ConsoleCallbackHandler } from "@langchain/core/tracers/console"; import { PromptTemplate } from "@langchain/core/prompts"; export const run = async () => { const handler = new ConsoleCallbackHandler(); const llm = new...
148263
import { LLMChain } from "langchain/chains"; import { AgentExecutor, ZeroShotAgent } from "langchain/agents"; import { ChatOpenAI } from "@langchain/openai"; import { Calculator } from "@langchain/community/tools/calculator"; import { Serialized } from "@langchain/core/load/serializable"; import { BaseCallbackHandler }...
148267
import { OpenAI } from "@langchain/openai"; import { SqlDatabase } from "langchain/sql_db"; import { createSqlAgent, SqlToolkit } from "langchain/agents/toolkits/sql"; import { DataSource } from "typeorm"; /** This example uses Chinook database, which is a sample database available for SQL Server, Oracle, MySQL, etc. ...
148271
import { ChatOpenAI } from "@langchain/openai"; import { AgentExecutor } from "langchain/agents"; import { Calculator } from "@langchain/community/tools/calculator"; import { pull } from "langchain/hub"; import { BufferMemory } from "langchain/memory"; import { formatLogToString } from "langchain/agents/format_scratchp...
148273
import { ChatOpenAI } from "@langchain/openai"; import { initializeAgentExecutorWithOptions } from "langchain/agents"; import { Calculator } from "@langchain/community/tools/calculator"; import { BufferMemory } from "langchain/memory"; import { MessagesPlaceholder } from "@langchain/core/prompts"; export const run = a...
148283
import { OpenAI, OpenAIEmbeddings } from "@langchain/openai"; import { HNSWLib } from "@langchain/community/vectorstores/hnswlib"; import { RecursiveCharacterTextSplitter } from "@langchain/textsplitters"; import * as fs from "fs"; import { VectorStoreToolkit, createVectorStoreAgent, VectorStoreInfo, } from "lang...
148294
import { ChatOpenAI } from "@langchain/openai"; import type { BasePromptTemplate } from "@langchain/core/prompts"; import { Calculator } from "@langchain/community/tools/calculator"; import { pull } from "langchain/hub"; import { AgentExecutor, createReactAgent } from "langchain/agents"; // Define the tools the agent...
148303
import { OpenAI } from "@langchain/openai"; import { TavilySearchResults } from "@langchain/community/tools/tavily_search"; import type { PromptTemplate } from "@langchain/core/prompts"; import { pull } from "langchain/hub"; import { AgentExecutor, createReactAgent } from "langchain/agents"; // Define the tools the a...
148304
import { ChatOpenAI } from "@langchain/openai"; import { initializeAgentExecutorWithOptions } from "langchain/agents"; import { Calculator } from "@langchain/community/tools/calculator"; import { SerpAPI } from "@langchain/community/tools/serpapi"; export const run = async () => { process.env.LANGCHAIN_TRACING = "tr...
148312
import { OpenAI, ChatOpenAI } from "@langchain/openai"; import process from "process"; import { HumanMessage } from "@langchain/core/messages"; process.env.LANGCHAIN_TRACING_V2 = "true"; const model = new OpenAI({}); const prompts = [ "Say hello to Bob.", "Say hello to Alice.", "Say hello to John.", "Say hel...
148319
import { SageMakerEndpoint, SageMakerLLMContentHandler, } from "@langchain/community/llms/sagemaker_endpoint"; interface ResponseJsonInterface { generation: { content: string; }; } // Custom for whatever model you'll be using class LLama213BHandler implements SageMakerLLMContentHandler { contentType = "...
148322
import { AzureOpenAI } from "@langchain/openai"; const model = new AzureOpenAI({ azureOpenAIApiKey: "<your_key>", // In Node.js defaults to process.env.AZURE_OPENAI_API_KEY azureOpenAIApiInstanceName: "<your_instance_name>", // In Node.js defaults to process.env.AZURE_OPENAI_API_INSTANCE_NAME azureOpenAIApiDeplo...
148342
import { OpenAI } from "@langchain/openai"; // To enable streaming, we pass in `streaming: true` to the LLM constructor. // Additionally, we pass in a handler for the `handleLLMNewToken` event. const model = new OpenAI({ maxTokens: 25, streaming: true, }); const response = await model.invoke("Tell me a joke.", { ...
148350
import { AzureOpenAIEmbeddings } from "@langchain/openai"; const model = new AzureOpenAIEmbeddings({ azureOpenAIApiKey: "<your_key>", // In Node.js defaults to process.env.AZURE_OPENAI_API_KEY azureOpenAIApiInstanceName: "<your_instance_name>", // In Node.js defaults to process.env.AZURE_OPENAI_API_INSTANCE_NAME ...
148356
import { AzureOpenAIEmbeddings } from "@langchain/openai"; const model = new AzureOpenAIEmbeddings({ azureOpenAIApiKey: "<your_key>", // In Node.js defaults to process.env.AZURE_OPENAI_API_KEY azureOpenAIApiEmbeddingsDeploymentName: "<your_embedding_deployment_name>", // In Node.js defaults to process.env.AZURE_OP...
148361
import { DefaultAzureCredential, getBearerTokenProvider, } from "@azure/identity"; import { AzureChatOpenAI } from "@langchain/openai"; const credentials = new DefaultAzureCredential(); const azureADTokenProvider = getBearerTokenProvider( credentials, "https://cognitiveservices.azure.com/.default" ); const mo...
148365
import { ChatMistralAI } from "@langchain/mistralai"; import { ChatPromptTemplate } from "@langchain/core/prompts"; const model = new ChatMistralAI({ apiKey: process.env.MISTRAL_API_KEY, model: "mistral-small", }); const prompt = ChatPromptTemplate.fromMessages([ ["system", "You are a helpful assistant"], ["hu...
148373
import { ChatOpenAI } from "@langchain/openai"; import { HumanMessage } from "@langchain/core/messages"; const chat = new ChatOpenAI({ maxTokens: 25, streaming: true, }); const response = await chat.invoke([new HumanMessage("Tell me a joke.")], { callbacks: [ { handleLLMNewToken(token: string) { ...
148384
import { ChatOpenAI } from "@langchain/openai"; const chatModel = new ChatOpenAI({ model: "gpt-3.5-turbo-0125", }); const res = await chatModel.invoke("Tell me a joke."); console.log(res.usage_metadata); /* { input_tokens: 12, output_tokens: 17, total_tokens: 29 } */
148392
import { ChatOpenAI } from "@langchain/openai"; // See https://cookbook.openai.com/examples/using_logprobs for details const model = new ChatOpenAI({ logprobs: true, // topLogprobs: 5, }); const responseMessage = await model.invoke("Hi there!"); console.log(JSON.stringify(responseMessage, null, 2)); /* { ...
148401
import { LLMChain } from "langchain/chains"; import { ChatMinimax } from "@langchain/community/chat_models/minimax"; import { ChatPromptTemplate, HumanMessagePromptTemplate, SystemMessagePromptTemplate, } from "@langchain/core/prompts"; // We can also construct an LLMChain from a ChatPromptTemplate and a chat mo...
148407
import { ChatMistralAI } from "@langchain/mistralai"; import { ChatPromptTemplate } from "@langchain/core/prompts"; import { StringOutputParser } from "@langchain/core/output_parsers"; const model = new ChatMistralAI({ apiKey: process.env.MISTRAL_API_KEY, model: "mistral-small", }); const prompt = ChatPromptTempla...
148415
import { AzureChatOpenAI } from "@langchain/openai"; const model = new AzureChatOpenAI({ temperature: 0.9, azureOpenAIApiKey: "<your_key>", // In Node.js defaults to process.env.AZURE_OPENAI_API_KEY azureOpenAIApiDeploymentName: "<your_deployment_name>", // In Node.js defaults to process.env.AZURE_OPENAI_API_DEP...
148419
import { ChatOpenAI } from "@langchain/openai"; import { HumanMessage } from "@langchain/core/messages"; const model = new ChatOpenAI({ temperature: 0.9, apiKey: "YOUR-API-KEY", // In Node.js defaults to process.env.OPENAI_API_KEY }); // You can also pass tools or functions to the model, learn more here // https:...
148429
import { ChatOpenAI } from "@langchain/openai"; const model = new ChatOpenAI({ temperature: 0.9, configuration: { baseURL: "https://your_custom_url.com", }, }); const message = await model.invoke("Hi there!"); console.log(message); /* AIMessage { content: 'Hello! How can I assist you today?', ad...
148442
import { type LLMResult } from "@langchain/core/outputs"; import { ChatOpenAI } from "@langchain/openai"; import { HumanMessage } from "@langchain/core/messages"; import { Serialized } from "@langchain/core/load/serializable"; // We can pass in a list of CallbackHandlers to the LLM constructor to get callbacks for var...
148464
import { AzureChatOpenAI } from "@langchain/openai"; const model = new AzureChatOpenAI({ temperature: 0.9, azureOpenAIApiKey: "<your_key>", // In Node.js defaults to process.env.AZURE_OPENAI_API_KEY azureOpenAIApiInstanceName: "<your_instance_name>", // In Node.js defaults to process.env.AZURE_OPENAI_API_INSTANC...
148475
import { ChatCohere } from "@langchain/cohere"; import { ChatPromptTemplate } from "@langchain/core/prompts"; const model = new ChatCohere({ apiKey: process.env.COHERE_API_KEY, // Default }); const prompt = ChatPromptTemplate.fromMessages([ ["ai", "You are a helpful assistant"], ["human", "{input}"], ]); const c...
148492
/* eslint-disable import/first */ import { ChatOpenAI } from "@langchain/openai"; const chatModel = new ChatOpenAI({}); console.log(await chatModel.invoke("what is LangSmith?")); /* AIMessage { content: 'Langsmith can help with testing by generating test cases, automating the testing process, and analyzing tes...
148493
/* eslint-disable import/first */ /* eslint-disable import/no-duplicates */ import { CheerioWebBaseLoader } from "@langchain/community/document_loaders/web/cheerio"; import { ChatOpenAI, OpenAIEmbeddings } from "@langchain/openai"; const chatModel = new ChatOpenAI({}); const embeddings = new OpenAIEmbeddings({}); co...
148505
import { ChatOpenAI } from "@langchain/openai"; import { ChatPromptTemplate } from "@langchain/core/prompts"; import { HttpResponseOutputParser } from "langchain/output_parsers"; const TEMPLATE = `You are a pirate named Patchy. All responses must be extremely verbose and in pirate dialect. {input}`; const prompt = C...
148506
import { ChatOpenAI } from "@langchain/openai"; import { ChatPromptTemplate } from "@langchain/core/prompts"; import { HttpResponseOutputParser } from "langchain/output_parsers"; const TEMPLATE = `You are a pirate named Patchy. All responses must be extremely verbose and in pirate dialect. {input}`; const prompt = C...
148514
import { loadEvaluator } from "langchain/evaluation"; const evaluator = await loadEvaluator("criteria", { criteria: "conciseness" }); const res = await evaluator.evaluateStrings({ input: "What's 2+2?", prediction: "What's 2+2? That's an elementary question. The answer you're looking for is that two and two is...
148522
import { ChatOpenAI } from "@langchain/openai"; // Use a model with a shorter context window const shorterLlm = new ChatOpenAI({ model: "gpt-3.5-turbo", maxRetries: 0, }); const longerLlm = new ChatOpenAI({ model: "gpt-3.5-turbo-16k", }); const modelWithFallback = shorterLlm.withFallbacks([longerLlm]); const ...
148523
import { z } from "zod"; import { OpenAI, ChatOpenAI } from "@langchain/openai"; import { PromptTemplate } from "@langchain/core/prompts"; import { StructuredOutputParser } from "@langchain/core/output_parsers"; const prompt = PromptTemplate.fromTemplate( `Return a JSON object containing the following value wrapped ...
148526
import { FaissStore } from "@langchain/community/vectorstores/faiss"; import { OpenAIEmbeddings, ChatOpenAI } from "@langchain/openai"; import { TextLoader } from "langchain/document_loaders/fs/text"; import { RecursiveCharacterTextSplitter } from "@langchain/textsplitters"; import { createRetrieverTool, createConv...
148529
import { ChatOpenAI } from "@langchain/openai"; import { PromptTemplate } from "@langchain/core/prompts"; const model = new ChatOpenAI({ model: "badmodel", }); const promptTemplate = PromptTemplate.fromTemplate( "Tell me a joke about {topic}" ); const chain = promptTemplate.pipe(model); const result = await chai...
148541
import { ChatPromptTemplate } from "@langchain/core/prompts"; import { StringOutputParser } from "@langchain/core/output_parsers"; import { RunnableSequence } from "@langchain/core/runnables"; import { ChatAnthropic } from "@langchain/anthropic"; const promptTemplate = ChatPromptTemplate.fromTemplate(`Given the user...
148545
import { ChatOpenAI } from "@langchain/openai"; import { ChatPromptTemplate, MessagesPlaceholder, } from "@langchain/core/prompts"; import { RunnableConfig, RunnableWithMessageHistory, } from "@langchain/core/runnables"; import { ChatMessageHistory } from "@langchain/community/stores/message/in_memory"; // Ins...
148546
import { ChatOpenAI, OpenAIEmbeddings } from "@langchain/openai"; import { HNSWLib } from "@langchain/community/vectorstores/hnswlib"; import { formatDocumentsAsString } from "langchain/util/document"; import { PromptTemplate } from "@langchain/core/prompts"; import { RunnableSequence, RunnablePassthrough, } from "...
148555
import { ZepClient } from "@getzep/zep-cloud"; import { ChatPromptTemplate, MessagesPlaceholder, } from "@langchain/core/prompts"; import { ConsoleCallbackHandler } from "@langchain/core/tracers/console"; import { ChatOpenAI } from "@langchain/openai"; import { RunnableWithMessageHistory } from "@langchain/core/run...
148558
import { ChatOpenAI } from "@langchain/openai"; const model = new ChatOpenAI({}); const promptAsString = "Human: Tell me a short joke about ice cream"; const response = await model.invoke(promptAsString); console.log(response); /** AIMessage { content: 'Sure, here you go: Why did the ice cream go to school? Because...
148559
import { ChatPromptTemplate } from "@langchain/core/prompts"; const prompt = ChatPromptTemplate.fromMessages([ ["human", "Tell me a short joke about {topic}"], ]); const promptValue = await prompt.invoke({ topic: "ice cream" }); console.log(promptValue); /** ChatPromptValue { messages: [ HumanMessage { c...
148560
import { ChatOpenAI, OpenAIEmbeddings } from "@langchain/openai"; import { HNSWLib } from "@langchain/community/vectorstores/hnswlib"; import { Document } from "@langchain/core/documents"; import { ChatPromptTemplate } from "@langchain/core/prompts"; import { RunnableLambda, RunnableMap, RunnablePassthrough, } fr...
148564
import { z } from "zod"; import { ChatOpenAI } from "@langchain/openai"; import { LLMChain } from "langchain/chains"; import { OutputFixingParser } from "langchain/output_parsers"; import { PromptTemplate } from "@langchain/core/prompts"; import { StructuredOutputParser } from "@langchain/core/output_parsers"; const o...
148567
/* eslint-disable @typescript-eslint/no-non-null-assertion */ // Requires a vectorstore that supports maximal marginal relevance search import { Pinecone } from "@pinecone-database/pinecone"; import { OpenAIEmbeddings, ChatOpenAI } from "@langchain/openai"; import { PineconeStore } from "@langchain/pinecone"; import {...
148568
import { ChatPromptTemplate, HumanMessagePromptTemplate, PromptTemplate, SystemMessagePromptTemplate, } from "@langchain/core/prompts"; export const run = async () => { const template = "What is a good name for a company that makes {product}?"; const promptA = new PromptTemplate({ template, inputVariables:...
148570
import { OpenAI } from "@langchain/openai"; import { RunnableSequence } from "@langchain/core/runnables"; import { PromptTemplate } from "@langchain/core/prompts"; import { StructuredOutputParser } from "@langchain/core/output_parsers"; const parser = StructuredOutputParser.fromNamesAndDescriptions({ answer: "answer...
148571
import { ChatOpenAI } from "@langchain/openai"; import { HttpResponseOutputParser } from "langchain/output_parsers"; import { JsonOutputFunctionsParser } from "@langchain/core/output_parsers/openai_functions"; const handler = async () => { const parser = new HttpResponseOutputParser({ contentType: "text/event-st...
148572
import { PromptTemplate } from "@langchain/core/prompts"; export const run = async () => { // The `partial` method returns a new `PromptTemplate` object that can be used to format the prompt with only some of the input variables. const promptA = new PromptTemplate({ template: "{foo}{bar}", inputVariables: ...
148584
import { ChatPromptTemplate, HumanMessagePromptTemplate, PromptTemplate, SystemMessagePromptTemplate, } from "@langchain/core/prompts"; export const run = async () => { // A `PromptTemplate` consists of a template string and a list of input variables. const template = "What is a good name for a company tha...
148585
import { OpenAI } from "@langchain/openai"; import { PromptTemplate } from "@langchain/core/prompts"; import { StructuredOutputParser } from "@langchain/core/output_parsers"; // With a `StructuredOutputParser` we can define a schema for the output. const parser = StructuredOutputParser.fromNamesAndDescriptions({ ans...
148586
import { z } from "zod"; import { OpenAI } from "@langchain/openai"; import { RunnableSequence } from "@langchain/core/runnables"; import { PromptTemplate } from "@langchain/core/prompts"; import { StructuredOutputParser } from "@langchain/core/output_parsers"; // We can use zod to define a schema for the output using...
148587
import { FewShotPromptTemplate, PromptTemplate } from "@langchain/core/prompts"; export const run = async () => { // First, create a list of few-shot examples. const examples = [ { word: "happy", antonym: "sad" }, { word: "tall", antonym: "short" }, ]; // Next, we specify the template to format the ex...
148592
// Ephemeral, in-memory vector store for demo purposes import { MemoryVectorStore } from "langchain/vectorstores/memory"; import { OpenAIEmbeddings, ChatOpenAI } from "@langchain/openai"; import { PromptTemplate, FewShotPromptTemplate } from "@langchain/core/prompts"; import { SemanticSimilarityExampleSelector } from "...
148595
import { OpenAI } from "@langchain/openai"; import { PromptTemplate } from "@langchain/core/prompts"; import { CustomListOutputParser } from "@langchain/core/output_parsers"; // With a `CustomListOutputParser`, we can parse a list with a specific length and separator. const parser = new CustomListOutputParser({ length...
148598
import { z } from "zod"; import { ChatOpenAI } from "@langchain/openai"; import { OutputFixingParser } from "langchain/output_parsers"; import { StructuredOutputParser } from "@langchain/core/output_parsers"; export const run = async () => { const parser = StructuredOutputParser.fromZodSchema( z.object({ a...
148599
import { z } from "zod"; import { OpenAI } from "@langchain/openai"; import { PromptTemplate } from "@langchain/core/prompts"; import { StructuredOutputParser } from "@langchain/core/output_parsers"; // We can use zod to define a schema for the output using the `fromZodSchema` method of `StructuredOutputParser`. const...
148604
import { ChatPromptTemplate } from "@langchain/core/prompts"; const systemTemplate = "You are a helpful assistant that translates {input_language} to {output_language}."; const humanTemplate = "{text}"; const chatPrompt = ChatPromptTemplate.fromMessages([ ["system", systemTemplate], ["human", humanTemplate], ])...
148621
import { TextLoader } from "langchain/document_loaders/fs/text"; const loader = new TextLoader("src/document_loaders/example_data/example.txt"); const docs = await loader.load();
148624
import { ApifyDatasetLoader } from "@langchain/community/document_loaders/web/apify_dataset"; import { HNSWLib } from "@langchain/community/vectorstores/hnswlib"; import { OpenAIEmbeddings, ChatOpenAI } from "@langchain/openai"; import { Document } from "@langchain/core/documents"; import { ChatPromptTemplate } from "@...
148638
import { ApifyDatasetLoader } from "@langchain/community/document_loaders/web/apify_dataset"; import { HNSWLib } from "@langchain/community/vectorstores/hnswlib"; import { OpenAIEmbeddings, ChatOpenAI } from "@langchain/openai"; import { Document } from "@langchain/core/documents"; import { ChatPromptTemplate } from "@...
148646
import { DirectoryLoader } from "langchain/document_loaders/fs/directory"; import { PDFLoader } from "@langchain/community/document_loaders/fs/pdf"; import { RecursiveCharacterTextSplitter } from "@langchain/textsplitters"; /* Load all PDFs within the specified directory */ const directoryLoader = new DirectoryLoader(...
148648
import { ChatOpenAI, OpenAIEmbeddings } from "@langchain/openai"; import { MemoryVectorStore } from "langchain/vectorstores/memory"; import { TokenTextSplitter } from "@langchain/textsplitters"; import { SearchApiLoader } from "@langchain/community/document_loaders/web/searchapi"; import { ChatPromptTemplate } from "@l...
148650
import { ChatOpenAI, OpenAIEmbeddings } from "@langchain/openai"; import { MemoryVectorStore } from "langchain/vectorstores/memory"; import { SerpAPILoader } from "@langchain/community/document_loaders/web/serpapi"; import { ChatPromptTemplate } from "@langchain/core/prompts"; import { createStuffDocumentsChain } from ...
148675
import { RecursiveCharacterTextSplitter } from "@langchain/textsplitters"; import { OpenAIEmbeddings } from "@langchain/openai"; import { Chroma } from "@langchain/community/vectorstores/chroma"; import { getDocs } from "./docs.js"; const docs = await getDocs(); const textSplitter = new RecursiveCharacterTextSplitter(...