code stringlengths 141 79.4k | apis listlengths 1 23 | extract_api stringlengths 126 73.2k |
|---|---|---|
"""
.. warning::
Beta Feature!
**Cache** provides an optional caching layer for LLMs.
Cache is useful for two reasons:
- It can save you money by reducing the number of API calls you make to the LLM
provider if you're often requesting the same completion multiple times.
- It can speed up your application by redu... | [
"langchain.utils.get_from_env",
"langchain.schema.Generation",
"langchain.load.dump.dumps",
"langchain.vectorstores.redis.Redis.from_existing_index",
"langchain.vectorstores.redis.Redis",
"langchain.load.load.loads"
] | [((1483, 1510), 'logging.getLogger', 'logging.getLogger', (['__file__'], {}), '(__file__)\n', (1500, 1510), False, 'import logging\n'), ((3955, 3973), 'sqlalchemy.ext.declarative.declarative_base', 'declarative_base', ([], {}), '()\n', (3971, 3973), False, 'from sqlalchemy.ext.declarative import declarative_base\n'), (... |
import os
import json
import base64
import requests
import langchain
from flask import jsonify
from langchain.chat_models import ChatOpenAI
from typing import Sequence
from threading import Thread
from queue import Queue, Empty
from langchain.callbacks.base import BaseCallbackHandler
from typing import Any, Callable
fr... | [
"langchain.pydantic_v1.Field",
"langchain.chat_models.ChatOpenAI",
"langchain.prompts.ChatPromptTemplate.from_messages",
"langchain.chains.openai_functions.create_structured_output_runnable",
"langchain.chains.LLMChain",
"langchain.vectorstores.pinecone.Pinecone.from_existing_index",
"langchain.prompts.... | [((786, 799), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (797, 799), False, 'from dotenv import load_dotenv\n'), ((820, 849), 'os.getenv', 'os.getenv', (['"""PINECONE_API_KEY"""'], {}), "('PINECONE_API_KEY')\n", (829, 849), False, 'import os\n'), ((865, 890), 'os.getenv', 'os.getenv', (['"""PINECONE_ENV"""'... |
"""Utilities for running language models or Chains over datasets."""
from __future__ import annotations
import asyncio
import functools
import inspect
import itertools
import logging
import uuid
import warnings
from enum import Enum
from typing import (
TYPE_CHECKING,
Any,
Callable,
Coroutine,
Dic... | [
"langchain.schema.messages.messages_from_dict",
"langchain.smith.evaluation.string_run_evaluator.StringRunEvaluatorChain.from_run_and_data_type",
"langchain.evaluation.schema.EvaluatorType",
"langchain.callbacks.tracers.langchain.LangChainTracer",
"langchain.callbacks.tracers.evaluation.EvaluatorCallbackHan... | [((1500, 1527), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1517, 1527), False, 'import logging\n'), ((2799, 2816), 'urllib.parse.urlparse', 'urlparse', (['api_url'], {}), '(api_url)\n', (2807, 2816), False, 'from urllib.parse import urlparse, urlunparse\n'), ((27519, 27539), 'asyncio... |
from langchain.llms import HuggingFacePipeline, CTransformers
import langchain
from ingest import load_db
from langchain.cache import InMemoryCache
from langchain.schema import prompt
from langchain.chains import RetrievalQA
from langchain.callbacks import StdOutCallbackHandler
from langchain import PromptTemplate
impo... | [
"langchain.chains.RetrievalQA.from_chain_type",
"langchain.llms.CTransformers",
"langchain.callbacks.StdOutCallbackHandler",
"langchain.cache.InMemoryCache",
"langchain.PromptTemplate"
] | [((403, 418), 'langchain.cache.InMemoryCache', 'InMemoryCache', ([], {}), '()\n', (416, 418), False, 'from langchain.cache import InMemoryCache\n'), ((709, 732), 'langchain.callbacks.StdOutCallbackHandler', 'StdOutCallbackHandler', ([], {}), '()\n', (730, 732), False, 'from langchain.callbacks import StdOutCallbackHand... |
#!/usr/bin/env python
# coding: utf-8
# # Building hotel room search with self-querying retrieval
#
# In this example we'll walk through how to build and iterate on a hotel room search service that leverages an LLM to generate structured filter queries that can then be passed to a vector store.
#
# For an introducti... | [
"langchain_openai.ChatOpenAI",
"langchain.chains.query_constructor.base.get_query_constructor_prompt",
"langchain_openai.OpenAIEmbeddings",
"langchain.retrievers.SelfQueryRetriever",
"langchain_community.vectorstores.ElasticsearchStore"
] | [((2434, 2459), 'langchain_openai.ChatOpenAI', 'ChatOpenAI', ([], {'model': '"""gpt-4"""'}), "(model='gpt-4')\n", (2444, 2459), False, 'from langchain_openai import ChatOpenAI\n'), ((2780, 2795), 'json.loads', 'json.loads', (['res'], {}), '(res)\n', (2790, 2795), False, 'import json\n'), ((3742, 3800), 'langchain.chain... |
from llama_index.prompts import PromptTemplate
from llama_index import VectorStoreIndex, SimpleDirectoryReader
from llama_index.vector_stores import WeaviateVectorStore
import weaviate
from llama_index.node_parser import (
SentenceWindowNodeParser,
)
from llama_index import (
GPTVectorStoreIndex,
ServiceCon... | [
"langchain_community.llms.HuggingFaceTextGenInference",
"langchain.embeddings.huggingface.HuggingFaceBgeEmbeddings"
] | [((1010, 1022), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1019, 1022), False, 'import json\n'), ((3664, 3704), 'weaviate.Client', 'weaviate.Client', (['"""http://localhost:8029"""'], {}), "('http://localhost:8029')\n", (3679, 3704), False, 'import weaviate\n'), ((3736, 3838), 'llama_index.vector_stores.WeaviateV... |
# Import langchain modules
from langchain.memory import Memory
from langchain.tools import VectorStore
# Import other modules
import os
import requests
# Define code memory class
class CodeMemory(Memory):
def __init__(self):
# Initialize the memory with an empty dictionary
super().__... | [
"langchain.tools.VectorStore"
] | [((435, 488), 'langchain.tools.VectorStore', 'VectorStore', ([], {'model': '"""codebert-base"""', 'index_name': '"""code"""'}), "(model='codebert-base', index_name='code')\n", (446, 488), False, 'from langchain.tools import VectorStore\n'), ((3648, 3668), 'os.path.isfile', 'os.path.isfile', (['path'], {}), '(path)\n', ... |
import streamlit as st
from langchain.chat_models import ChatOpenAI
from langchain.chains import ConversationalRetrievalChain
from langchain.prompts.prompt import PromptTemplate
from langchain.callbacks import get_openai_callback
from langchain.chains import FlareChain
from langchain.prompts import MessagesPlaceholder
... | [
"langchain.prompts.prompt.PromptTemplate",
"langchain.chains.ConversationalRetrievalChain.from_llm",
"langchain.chat_models.ChatOpenAI",
"langchain.callbacks.get_openai_callback",
"langchain.chains.FlareChain.from_llm"
] | [((736, 822), 'langchain.prompts.prompt.PromptTemplate', 'PromptTemplate', ([], {'template': 'self.qa_template', 'input_variables': "['context', 'question']"}), "(template=self.qa_template, input_variables=['context',\n 'question'])\n", (750, 822), False, 'from langchain.prompts.prompt import PromptTemplate\n'), ((1... |
# Import Langchain dependencies
from langchain.document_loaders import PyPDFLoader
from langchain.indexes import VectorstoreIndexCreator
from langchain.chains import RetrievalQA
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.text_splitter import RecursiveCharacterTextSplitter
# Bring in streamlit... | [
"langchain.document_loaders.PyPDFLoader",
"langchain.embeddings.HuggingFaceEmbeddings",
"langchain.text_splitter.RecursiveCharacterTextSplitter"
] | [((539, 717), 'watsonxlangchain.LangChainInterface', 'LangChainInterface', ([], {'credentials': 'creds', 'model': '"""meta-llama/llama-2-70b-chat"""', 'params': "{'decoding_method': 'sample', 'max_new_tokens': 200, 'temperature': 0.5}", 'project_id': '""""""'}), "(credentials=creds, model='meta-llama/llama-2-70b-chat',... |
import streamlit as st
import langchain
from langchain_community.document_loaders import RecursiveUrlLoader, TextLoader, JSONLoader
from langchain_community.document_transformers import Html2TextTransformer
from langchain.docstore.document import Document
from langchain_community.embeddings.openai import OpenAIEmbeddi... | [
"langchain.agents.openai_functions_agent.agent_token_buffer_memory.AgentTokenBufferMemory",
"langchain.text_splitter.CharacterTextSplitter",
"langchain.agents.AgentExecutor",
"langchain_community.embeddings.openai.OpenAIEmbeddings",
"langchain_community.vectorstores.Chroma.from_documents",
"langchain.docs... | [((1732, 1759), 'os.getenv', 'os.getenv', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (1741, 1759), False, 'import os, openai, requests, json, zeep, datetime, pandas as pd\n'), ((1857, 1875), 'langchain_community.embeddings.openai.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (1873, 1875), False,... |
# Using flask to make an api
# import necessary libraries and functions
from flask import Flask, jsonify, request, render_template
from pydantic import BaseModel
from ast import literal_eval
import os
import openai
openai.api_key = os.getenv("OPENAI_API_KEY")
import langchain
from langchain.vectorstores import FAI... | [
"langchain.vectorstores.FAISS.load_local",
"langchain.embeddings.openai.OpenAIEmbeddings"
] | [((235, 262), 'os.getenv', 'os.getenv', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (244, 262), False, 'import os\n'), ((412, 427), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (417, 427), False, 'from flask import Flask, jsonify, request, render_template\n'), ((1396, 1414), 'langchain.embedd... |
# -*- coding: utf-8 -*-
import random
import streamlit as st
from langchain.llms import OpenAI
from langchain.text_splitter import RecursiveCharacterTextSplitter
#from langchain.text_splitter import CharacterTextSplitter
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.vectorstores import FA... | [
"langchain.LLMChain",
"langchain.embeddings.HuggingFaceEmbeddings",
"langchain.text_splitter.RecursiveCharacterTextSplitter",
"langchain.llms.OpenAI",
"langchain.vectorstores.FAISS.from_documents"
] | [((1047, 1159), 'langchain.llms.OpenAI', 'OpenAI', ([], {'model_name': '"""text-davinci-003"""', 'temperature': '(0.2)', 'max_tokens': '(512)', 'openai_api_key': "st.secrets['api_key']"}), "(model_name='text-davinci-003', temperature=0.2, max_tokens=512,\n openai_api_key=st.secrets['api_key'])\n", (1053, 1159), Fals... |
import langchain
from langchain.agents import load_tools, initialize_agent, AgentType
from langchain.chat_models import ChatOpenAI
langchain.verbose = True
langchain.debug = True
def get_chat():
return ChatOpenAI(model_name="gpt-3.5-turbo", temperature=0)
if __name__ == "__main__":
chat = get_chat()
to... | [
"langchain.agents.initialize_agent",
"langchain.agents.load_tools",
"langchain.chat_models.ChatOpenAI"
] | [((209, 262), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model_name': '"""gpt-3.5-turbo"""', 'temperature': '(0)'}), "(model_name='gpt-3.5-turbo', temperature=0)\n", (219, 262), False, 'from langchain.chat_models import ChatOpenAI\n'), ((326, 350), 'langchain.agents.load_tools', 'load_tools', (["['termina... |
from llama_index import (
GPTVectorStoreIndex,
ServiceContext,
)
from llama_index.postprocessor import SentenceTransformerRerank
from llama_index.embeddings import LangchainEmbedding
from langchain.embeddings.huggingface import (
HuggingFaceBgeEmbeddings,
)
from llama_index.vector_stores import WeaviateVect... | [
"langchain_community.llms.HuggingFaceTextGenInference",
"langchain.embeddings.huggingface.HuggingFaceBgeEmbeddings"
] | [((1058, 1076), 'huggingface_hub.commands.user.login', 'login', ([], {'token': 'token'}), '(token=token)\n', (1063, 1076), False, 'from huggingface_hub.commands.user import login\n'), ((1116, 1160), 'weaviate.Client', 'weaviate.Client', (['"""http://192.168.88.10:8080"""'], {}), "('http://192.168.88.10:8080')\n", (1131... |
import os
import time
import pickle as pkl
# import re
# import yaml
import toml
import logging
from datetime import date
# import aiohttp
import pandas as pd
from pytrends.request import TrendReq
import serpapi
from serpapi import GoogleSearch
import asyncio
import streamlit as st
import streamlit.components.v1 as co... | [
"langchain.docstore.document.Document",
"langchain.text_splitter.RecursiveCharacterTextSplitter"
] | [((872, 885), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (883, 885), False, 'from dotenv import load_dotenv\n'), ((1028, 1055), 'os.path.exists', 'os.path.exists', (['config_path'], {}), '(config_path)\n', (1042, 1055), False, 'import os\n'), ((1341, 1393), 'logging.info', 'logging.info', (['f"""session sta... |
import requests
import re
import langchain
import openai
# Set up OpenAI GPT API credentials
openai.api_key = 'MY_OPENAI_API_KEY'
# Function to fetch a GitHub user's repositories
def fetch_user_repos(username):
url = f'https://api.github.com/users/{username}/repos'
response = requests.get(url)
... | [
"langchain.extract_metrics_from_github_repo"
] | [((300, 317), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (312, 317), False, 'import requests\n'), ((811, 835), 'requests.get', 'requests.get', (['readme_url'], {}), '(readme_url)\n', (823, 835), False, 'import requests\n'), ((1100, 1206), 'openai.Completion.create', 'openai.Completion.create', ([], {'eng... |
# import supporting packages and modules
from abc import ABC
import yaml
import os
import tempfile
import requests
from urllib.parse import urlparse
from typing import List
import json
import re
# import langchain modules
from langchain.docstore.document import Document
from langchain.document_loaders.base import Base... | [
"langchain.text_splitter.RecursiveCharacterTextSplitter",
"langchain.docstore.document.Document",
"langchain.document_loaders.OnlinePDFLoader",
"langchain.document_loaders.WebBaseLoader",
"langchain.embeddings.OpenAIEmbeddings",
"langchain.PromptTemplate"
] | [((1437, 1462), 'os.path.isfile', 'os.path.isfile', (['file_path'], {}), '(file_path)\n', (1451, 1462), False, 'import os\n'), ((2579, 2592), 'urllib.parse.urlparse', 'urlparse', (['url'], {}), '(url)\n', (2587, 2592), False, 'from urllib.parse import urlparse\n'), ((12738, 12824), 'langchain.PromptTemplate', 'PromptTe... |
from langchain import OpenAI, SQLDatabase
from langchain_experimental.sql import SQLDatabaseChain
from langchain.memory import ConversationBufferMemory
from langchain.agents import (AgentType,
AgentExecutor,
create_react_agent,
c... | [
"langchain.text_splitter.CharacterTextSplitter",
"langchain_experimental.sql.SQLDatabaseChain",
"langchain.agents.initialize.initialize_agent",
"langchain.tools.Tool",
"langchain.document_loaders.pdf.PyPDFLoader",
"langchain.memory.ConversationBufferMemory",
"langchain_community.document_loaders.text.Te... | [((1372, 1405), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (1395, 1405), False, 'import warnings\n'), ((1458, 1572), 'langchain.SQLDatabase.from_uri', 'SQLDatabase.from_uri', (['f"""postgresql+psycopg2://postgres:{constants.DBPASS}@localhost:5433/{constants.DB}"""'], {... |
"""Base interface that all chains should implement."""
import json
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Any, Dict, List, Optional, Union
import yaml
from pydantic import BaseModel, Field, validator
import langchain
from langchain.callbacks import get_callback_manager
from la... | [
"langchain.callbacks.get_callback_manager"
] | [((646, 703), 'pydantic.Field', 'Field', ([], {'default_factory': 'get_callback_manager', 'exclude': '(True)'}), '(default_factory=get_callback_manager, exclude=True)\n', (651, 703), False, 'from pydantic import BaseModel, Field, validator\n'), ((738, 775), 'pydantic.Field', 'Field', ([], {'default_factory': '_get_verb... |
from typing import Dict, List, Optional
from langchain.agents.load_tools import (
_EXTRA_LLM_TOOLS,
_EXTRA_OPTIONAL_TOOLS,
_LLM_TOOLS,
)
from langflow.custom import customs
from langflow.interface.base import LangChainTypeCreator
from langflow.interface.tools.constants import (
ALL_TOOLS_NAMES,
CU... | [
"langchain.agents.load_tools._LLM_TOOLS.keys",
"langchain.agents.load_tools._EXTRA_LLM_TOOLS.keys",
"langchain.agents.load_tools._EXTRA_OPTIONAL_TOOLS.keys"
] | [((709, 811), 'langflow.template.field.base.TemplateField', 'TemplateField', ([], {'field_type': '"""str"""', 'required': '(True)', 'is_list': '(False)', 'show': '(True)', 'placeholder': '""""""', 'value': '""""""'}), "(field_type='str', required=True, is_list=False, show=True,\n placeholder='', value='')\n", (722, ... |
#!/usr/bin/env python3
from restapi_helper import LangChainHelper
from langchain.schema import HumanMessage
print('==Simple message predict==')
with LangChainHelper() as lch:
text = 'Hey there!'
messages = [HumanMessage(content=text)]
print(lch.predict_messages(messages))
print('==As English t... | [
"langchain.schema.HumanMessage"
] | [((157, 174), 'restapi_helper.LangChainHelper', 'LangChainHelper', ([], {}), '()\n', (172, 174), False, 'from restapi_helper import LangChainHelper\n'), ((355, 372), 'restapi_helper.LangChainHelper', 'LangChainHelper', ([], {}), '()\n', (370, 372), False, 'from restapi_helper import LangChainHelper\n'), ((572, 589), 'r... |
from typing import Optional
from langchain.chains.openai_functions import create_structured_output_runnable
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.pydantic_v1 import BaseModel, Field
import logging
import langchain
from langchain_community.vectorstores import FAISS
from langchain_co... | [
"langchain_core.prompts.ChatPromptTemplate.from_template",
"langchain_openai.ChatOpenAI",
"langchain_core.runnables.RunnableParallel",
"langchain_core.output_parsers.StrOutputParser"
] | [((565, 592), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (582, 592), False, 'import logging\n'), ((624, 664), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.DEBUG'}), '(level=logging.DEBUG)\n', (643, 664), False, 'import logging\n'), ((683, 706), 'logging.Stream... |
import streamlit as st
# Import transformer classes for generaiton
from transformers import AutoTokenizer, AutoModelForCausalLM, TextStreamer, GPT2Tokenizer, GPT2LMHeadModel, GPT2Model
# Import torch for datatype attributes
import torch
# Import the prompt wrapper...but for llama index
from llama_index.prompts.prompts... | [
"langchain.embeddings.huggingface.HuggingFaceEmbeddings"
] | [((1422, 1473), 'streamlit.title', 'st.title', (['"""LLM Deployment Prototype for Production"""'], {}), "('LLM Deployment Prototype for Production')\n", (1430, 1473), True, 'import streamlit as st\n'), ((1474, 1718), 'streamlit.caption', 'st.caption', (['"""Special thanks to my mentor, Medkham Chanthavong, for all the ... |
from langchain_openai import ChatOpenAI
from langchain.chains import LLMChain
from langchain.prompts import MessagesPlaceholder, HumanMessagePromptTemplate, ChatPromptTemplate
from langchain.memory import ConversationBufferMemory, FileChatMessageHistory
from dotenv import load_dotenv
import sqlite3
import sqlparse
impo... | [
"langchain_openai.ChatOpenAI",
"langchain.prompts.HumanMessagePromptTemplate.from_template",
"langchain.prompts.MessagesPlaceholder",
"langchain.memory.FileChatMessageHistory",
"langchain.chains.LLMChain"
] | [((476, 547), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {'category': 'LangChainDeprecationWarning'}), "('ignore', category=LangChainDeprecationWarning)\n", (499, 547), False, 'import warnings\n'), ((550, 563), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (561, 563), False, 'from... |
import langchain_visualizer # isort:skip # noqa: F401
import asyncio
import vcr_langchain as vcr
from fvalues import FValue
from langchain import PromptTemplate
from langchain.llms import OpenAI
# ========================== Start of langchain example code ==========================
# https://langchain.readthedocs.i... | [
"langchain_visualizer.visualize",
"langchain.llms.OpenAI",
"langchain.PromptTemplate"
] | [((434, 524), 'langchain.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['adjective']", 'template': '"""Tell me a {adjective} joke."""'}), "(input_variables=['adjective'], template=\n 'Tell me a {adjective} joke.')\n", (448, 524), False, 'from langchain import PromptTemplate\n'), ((837, 855), 'vcr_lang... |
from typing import Optional, List
from langchain.chains.openai_functions import create_structured_output_runnable
from langchain_community.chat_models import ChatOpenAI
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.pydantic_v1 import BaseModel, Field
import logging
import langchain
from dr... | [
"langchain_core.prompts.ChatPromptTemplate.from_messages",
"langchain.chains.openai_functions.create_structured_output_runnable",
"langchain_community.chat_models.ChatOpenAI"
] | [((453, 480), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (470, 480), False, 'import logging\n'), ((512, 552), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.DEBUG'}), '(level=logging.DEBUG)\n', (531, 552), False, 'import logging\n'), ((571, 594), 'logging.Stream... |
import logging
from langchain.chat_models import ChatOpenAI
from dreamsboard.dreams.builder_cosplay_code.base import StructuredDreamsStoryboard
from dreamsboard.dreams.dreams_personality_chain.base import StoryBoardDreamsGenerationChain
import langchain
from dreamsboard.engine.generate.code_generate import QueryProg... | [
"langchain.chat_models.ChatOpenAI"
] | [((545, 572), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (562, 572), False, 'import logging\n'), ((623, 646), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (644, 646), False, 'import logging\n'), ((806, 859), 'dreamsboard.engine.storage.storage_context.StorageCon... |
import langchain_openai
# Disable because of version conflict
# import langchain_anthropic
import pytest
from interlab.queries.count_tokens import count_tokens
TEXT = (
"Lorem ipsum dolor sit amet, consectetur adipiscing elit. Sed ultrices lacus sed leo ornare, "
"sed iaculis mi pharetra. Pellentesque non ni... | [
"langchain_openai.ChatOpenAI",
"langchain_openai.OpenAI"
] | [((385, 411), 'interlab.queries.count_tokens.count_tokens.cache_clear', 'count_tokens.cache_clear', ([], {}), '()\n', (409, 411), False, 'from interlab.queries.count_tokens import count_tokens\n'), ((460, 486), 'interlab.queries.count_tokens.count_tokens', 'count_tokens', (['TEXT', '"""gpt2"""'], {}), "(TEXT, 'gpt2')\n... |
from model.chain_spec import ChainSpec, LLMChainSpec, SequentialChainSpec, CaseChainSpec, APIChainSpec, ReformatChainSpec, TransformChainSpec, VectorSearchChainSpec
from model.chain_revision import ChainRevision
from model.lang_chain_context import LangChainContext
from langchain.llms.fake import FakeListLLM
from model... | [
"langchain.llms.fake.FakeListLLM"
] | [((471, 620), 'model.chain_spec.LLMChainSpec', 'LLMChainSpec', ([], {'chain_id': '(1)', 'input_keys': "['input1', 'input2']", 'output_key': '"""output1"""', 'prompt': '"""prompt"""', 'llm_key': '"""llm_key"""', 'chain_type': '"""llm_chain_spec"""'}), "(chain_id=1, input_keys=['input1', 'input2'], output_key=\n 'outp... |
import threading
import time
import unittest
import unittest.mock
from typing import Any, Dict
from uuid import UUID
import pytest
from langsmith import Client
from langchain_core.outputs import LLMResult
from langchain_core.tracers.langchain import LangChainTracer
from langchain_core.tracers.schemas import Run
def... | [
"langchain_core.tracers.langchain.LangChainTracer",
"langchain_core.outputs.LLMResult"
] | [((640, 676), 'unittest.mock.MagicMock', 'unittest.mock.MagicMock', ([], {'spec': 'Client'}), '(spec=Client)\n', (663, 676), False, 'import unittest\n'), ((730, 760), 'langchain_core.tracers.langchain.LangChainTracer', 'LangChainTracer', ([], {'client': 'client'}), '(client=client)\n', (745, 760), False, 'from langchai... |
import openai
import os
import dotenv
from llama_index.agent.openai import OpenAIAgent
from llama_index.llms.azure_openai import AzureOpenAI
from llama_index.core.tools.tool_spec.load_and_search.base import LoadAndSearchToolSpec
from llama_index.tools.google import GoogleSearchToolSpec
from llama_index.tools.weather im... | [
"langchain.embeddings.OpenAIEmbeddings"
] | [((577, 597), 'dotenv.load_dotenv', 'dotenv.load_dotenv', ([], {}), '()\n', (595, 597), False, 'import dotenv\n'), ((615, 647), 'os.environ.get', 'os.environ.get', (['"""GOOGLE_API_KEY"""'], {}), "('GOOGLE_API_KEY')\n", (629, 647), False, 'import os\n'), ((664, 695), 'os.environ.get', 'os.environ.get', (['"""GOOGLE_CSE... |
import os
import re
import sys
import langchain
import langchain.prompts # noqa: F401
import mock
import pytest
from ddtrace.internal.utils.version import parse_version
from tests.contrib.langchain.utils import get_request_vcr
from tests.utils import override_global_config
SHOULD_USE_LANGCHAIN_COMMUNITY = parse_ve... | [
"langchain_pinecone.PineconeVectorStore",
"langchain.prompts.chat.AIMessagePromptTemplate.from_template",
"langchain.chains.TransformChain",
"langchain.chains.SequentialChain",
"langchain.prompts.chat.SystemMessagePromptTemplate.from_template",
"langchain.schema.HumanMessage",
"langchain.schema.SystemMe... | [((434, 597), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(not SHOULD_USE_LANGCHAIN_COMMUNITY or sys.version_info < (3, 10))'], {'reason': '"""This module only tests langchain_community and Python 3.10+"""'}), "(not SHOULD_USE_LANGCHAIN_COMMUNITY or sys.version_info <\n (3, 10), reason=\n 'This module only tes... |
import os
from typing import Union
from langchain.memory import ConversationBufferMemory
from langchain.chat_models import AzureChatOpenAI
from langchain.agents import AgentType, initialize_agent, tool
import langchain
from langchain.prompts.chat import MessagesPlaceholder, SystemMessagePromptTemplate
import json
imp... | [
"langchain.agents.initialize_agent",
"langchain.prompts.chat.SystemMessagePromptTemplate.from_template",
"langchain.memory.ConversationBufferMemory",
"langchain.tools.tool",
"langchain.chat_models.AzureChatOpenAI",
"langchain.prompts.chat.MessagesPlaceholder"
] | [((3143, 3211), 'langchain.tools.tool', 'tool', (['"""parts_order"""'], {'return_direct': '(True)', 'args_schema': 'PartsOrderInput'}), "('parts_order', return_direct=True, args_schema=PartsOrderInput)\n", (3147, 3211), False, 'from langchain.tools import tool\n'), ((6557, 6630), 'langchain.memory.ConversationBufferMem... |
"""Interface with the LangChain Hub."""
from __future__ import annotations
from typing import TYPE_CHECKING, Any, Optional
from langchain_core.load.dump import dumps
from langchain_core.load.load import loads
if TYPE_CHECKING:
from langchainhub import Client
def _get_client(api_url: Optional[str] = None, api_k... | [
"langchain_core.load.load.loads",
"langchain_core.load.dump.dumps",
"langchainhub.Client"
] | [((679, 711), 'langchainhub.Client', 'Client', (['api_url'], {'api_key': 'api_key'}), '(api_url, api_key=api_key)\n', (685, 711), False, 'from langchainhub import Client\n'), ((1912, 1925), 'langchain_core.load.dump.dumps', 'dumps', (['object'], {}), '(object)\n', (1917, 1925), False, 'from langchain_core.load.dump imp... |
"""Interface with the LangChain Hub."""
from __future__ import annotations
from typing import TYPE_CHECKING, Any, Optional
from langchain_core.load.dump import dumps
from langchain_core.load.load import loads
if TYPE_CHECKING:
from langchainhub import Client
def _get_client(api_url: Optional[str] = None, api_k... | [
"langchain_core.load.load.loads",
"langchain_core.load.dump.dumps",
"langchainhub.Client"
] | [((679, 711), 'langchainhub.Client', 'Client', (['api_url'], {'api_key': 'api_key'}), '(api_url, api_key=api_key)\n', (685, 711), False, 'from langchainhub import Client\n'), ((1912, 1925), 'langchain_core.load.dump.dumps', 'dumps', (['object'], {}), '(object)\n', (1917, 1925), False, 'from langchain_core.load.dump imp... |
"""Interface with the LangChain Hub."""
from __future__ import annotations
from typing import TYPE_CHECKING, Any, Optional
from langchain_core.load.dump import dumps
from langchain_core.load.load import loads
if TYPE_CHECKING:
from langchainhub import Client
def _get_client(api_url: Optional[str] = None, api_k... | [
"langchain_core.load.load.loads",
"langchain_core.load.dump.dumps",
"langchainhub.Client"
] | [((679, 711), 'langchainhub.Client', 'Client', (['api_url'], {'api_key': 'api_key'}), '(api_url, api_key=api_key)\n', (685, 711), False, 'from langchainhub import Client\n'), ((1912, 1925), 'langchain_core.load.dump.dumps', 'dumps', (['object'], {}), '(object)\n', (1917, 1925), False, 'from langchain_core.load.dump imp... |
"""Interface with the LangChain Hub."""
from __future__ import annotations
from typing import TYPE_CHECKING, Any, Optional
from langchain_core.load.dump import dumps
from langchain_core.load.load import loads
if TYPE_CHECKING:
from langchainhub import Client
def _get_client(api_url: Optional[str] = None, api_k... | [
"langchain_core.load.load.loads",
"langchain_core.load.dump.dumps",
"langchainhub.Client"
] | [((679, 711), 'langchainhub.Client', 'Client', (['api_url'], {'api_key': 'api_key'}), '(api_url, api_key=api_key)\n', (685, 711), False, 'from langchainhub import Client\n'), ((1912, 1925), 'langchain_core.load.dump.dumps', 'dumps', (['object'], {}), '(object)\n', (1917, 1925), False, 'from langchain_core.load.dump imp... |
"""
.. warning::
Beta Feature!
**Cache** provides an optional caching layer for LLMs.
Cache is useful for two reasons:
- It can save you money by reducing the number of API calls you make to the LLM
provider if you're often requesting the same completion multiple times.
- It can speed up your application by redu... | [
"langchain.load.load.loads",
"langchain.utils.get_from_env",
"langchain.schema.Generation",
"langchain.load.dump.dumps"
] | [((1586, 1613), 'logging.getLogger', 'logging.getLogger', (['__file__'], {}), '(__file__)\n', (1603, 1613), False, 'import logging\n'), ((5793, 5811), 'sqlalchemy.ext.declarative.declarative_base', 'declarative_base', ([], {}), '()\n', (5809, 5811), False, 'from sqlalchemy.ext.declarative import declarative_base\n'), (... |
"""
.. warning::
Beta Feature!
**Cache** provides an optional caching layer for LLMs.
Cache is useful for two reasons:
- It can save you money by reducing the number of API calls you make to the LLM
provider if you're often requesting the same completion multiple times.
- It can speed up your application by redu... | [
"langchain.load.load.loads",
"langchain.utils.get_from_env",
"langchain.schema.Generation",
"langchain.load.dump.dumps"
] | [((1586, 1613), 'logging.getLogger', 'logging.getLogger', (['__file__'], {}), '(__file__)\n', (1603, 1613), False, 'import logging\n'), ((5793, 5811), 'sqlalchemy.ext.declarative.declarative_base', 'declarative_base', ([], {}), '()\n', (5809, 5811), False, 'from sqlalchemy.ext.declarative import declarative_base\n'), (... |
import asyncio
import inspect
import warnings
from abc import ABC, abstractmethod
from functools import partial
from typing import Any, Dict, List, Mapping, Optional, Sequence
from pydantic import Field, root_validator
import langchain
from langchain.callbacks.base import BaseCallbackManager
from langchain.callbacks.... | [
"langchain.callbacks.manager.AsyncCallbackManager.configure",
"langchain.schema.messages.AIMessage",
"langchain.schema.ChatResult",
"langchain.load.dump.dumps",
"langchain.callbacks.manager.CallbackManager.configure",
"langchain.load.dump.dumpd",
"langchain.schema.RunInfo",
"langchain.schema.messages.... | [((923, 960), 'pydantic.Field', 'Field', ([], {'default_factory': '_get_verbosity'}), '(default_factory=_get_verbosity)\n', (928, 960), False, 'from pydantic import Field, root_validator\n'), ((1034, 1067), 'pydantic.Field', 'Field', ([], {'default': 'None', 'exclude': '(True)'}), '(default=None, exclude=True)\n', (103... |
import asyncio
import inspect
import warnings
from abc import ABC, abstractmethod
from functools import partial
from typing import Any, Dict, List, Mapping, Optional, Sequence
from pydantic import Field, root_validator
import langchain
from langchain.callbacks.base import BaseCallbackManager
from langchain.callbacks.... | [
"langchain.callbacks.manager.AsyncCallbackManager.configure",
"langchain.schema.messages.AIMessage",
"langchain.schema.ChatResult",
"langchain.load.dump.dumps",
"langchain.callbacks.manager.CallbackManager.configure",
"langchain.load.dump.dumpd",
"langchain.schema.RunInfo",
"langchain.schema.messages.... | [((923, 960), 'pydantic.Field', 'Field', ([], {'default_factory': '_get_verbosity'}), '(default_factory=_get_verbosity)\n', (928, 960), False, 'from pydantic import Field, root_validator\n'), ((1034, 1067), 'pydantic.Field', 'Field', ([], {'default': 'None', 'exclude': '(True)'}), '(default=None, exclude=True)\n', (103... |
import asyncio
import inspect
import warnings
from abc import ABC, abstractmethod
from functools import partial
from typing import Any, Dict, List, Mapping, Optional, Sequence
from pydantic import Field, root_validator
import langchain
from langchain.callbacks.base import BaseCallbackManager
from langchain.callbacks.... | [
"langchain.callbacks.manager.AsyncCallbackManager.configure",
"langchain.schema.messages.AIMessage",
"langchain.schema.ChatResult",
"langchain.load.dump.dumps",
"langchain.callbacks.manager.CallbackManager.configure",
"langchain.load.dump.dumpd",
"langchain.schema.RunInfo",
"langchain.schema.messages.... | [((923, 960), 'pydantic.Field', 'Field', ([], {'default_factory': '_get_verbosity'}), '(default_factory=_get_verbosity)\n', (928, 960), False, 'from pydantic import Field, root_validator\n'), ((1034, 1067), 'pydantic.Field', 'Field', ([], {'default': 'None', 'exclude': '(True)'}), '(default=None, exclude=True)\n', (103... |
import asyncio
import inspect
import warnings
from abc import ABC, abstractmethod
from functools import partial
from typing import Any, Dict, List, Mapping, Optional, Sequence
from pydantic import Field, root_validator
import langchain
from langchain.callbacks.base import BaseCallbackManager
from langchain.callbacks.... | [
"langchain.callbacks.manager.AsyncCallbackManager.configure",
"langchain.schema.messages.AIMessage",
"langchain.schema.ChatResult",
"langchain.load.dump.dumps",
"langchain.callbacks.manager.CallbackManager.configure",
"langchain.load.dump.dumpd",
"langchain.schema.RunInfo",
"langchain.schema.messages.... | [((923, 960), 'pydantic.Field', 'Field', ([], {'default_factory': '_get_verbosity'}), '(default_factory=_get_verbosity)\n', (928, 960), False, 'from pydantic import Field, root_validator\n'), ((1034, 1067), 'pydantic.Field', 'Field', ([], {'default': 'None', 'exclude': '(True)'}), '(default=None, exclude=True)\n', (103... |
from __future__ import annotations
import asyncio
import functools
import logging
import os
import warnings
from contextlib import asynccontextmanager, contextmanager
from contextvars import ContextVar
from typing import (
Any,
AsyncGenerator,
Dict,
Generator,
List,
Optional,
Type,
Type... | [
"langchain.schema.get_buffer_string",
"langchain.callbacks.stdout.StdOutCallbackHandler",
"langchain.callbacks.tracers.wandb.WandbTracer",
"langchain.callbacks.openai_info.OpenAICallbackHandler",
"langchain.callbacks.tracers.stdout.ConsoleCallbackHandler",
"langchain.callbacks.tracers.langchain.LangChainT... | [((1114, 1141), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1131, 1141), False, 'import logging\n'), ((1286, 1329), 'contextvars.ContextVar', 'ContextVar', (['"""openai_callback"""'], {'default': 'None'}), "('openai_callback', default=None)\n", (1296, 1329), False, 'from contextvars i... |
from __future__ import annotations
import asyncio
import functools
import logging
import os
import warnings
from contextlib import asynccontextmanager, contextmanager
from contextvars import ContextVar
from typing import (
Any,
AsyncGenerator,
Dict,
Generator,
List,
Optional,
Type,
Type... | [
"langchain.schema.get_buffer_string",
"langchain.callbacks.stdout.StdOutCallbackHandler",
"langchain.callbacks.tracers.wandb.WandbTracer",
"langchain.callbacks.openai_info.OpenAICallbackHandler",
"langchain.callbacks.tracers.stdout.ConsoleCallbackHandler",
"langchain.callbacks.tracers.langchain.LangChainT... | [((1114, 1141), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1131, 1141), False, 'import logging\n'), ((1286, 1329), 'contextvars.ContextVar', 'ContextVar', (['"""openai_callback"""'], {'default': 'None'}), "('openai_callback', default=None)\n", (1296, 1329), False, 'from contextvars i... |
"""A tracer that runs evaluators over completed runs."""
from __future__ import annotations
import logging
from concurrent.futures import Future, ThreadPoolExecutor
from typing import Any, Dict, List, Optional, Sequence, Set, Union
from uuid import UUID
import langsmith
from langsmith import schemas as langsmith_sche... | [
"langchain.callbacks.tracers.langchain.get_client",
"langchain.callbacks.manager.tracing_v2_enabled"
] | [((553, 580), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (570, 580), False, 'import logging\n'), ((2572, 2588), 'uuid.UUID', 'UUID', (['example_id'], {}), '(example_id)\n', (2576, 2588), False, 'from uuid import UUID\n'), ((2678, 2707), 'langchain.callbacks.tracers.langchain.get_clien... |
"""A tracer that runs evaluators over completed runs."""
from __future__ import annotations
import logging
import threading
import weakref
from concurrent.futures import Future, ThreadPoolExecutor, wait
from typing import Any, Dict, List, Optional, Sequence, Tuple, Union, cast
from uuid import UUID
import langsmith
f... | [
"langchain.callbacks.tracers.langchain._get_executor",
"langchain.callbacks.tracers.langchain.get_client",
"langchain.callbacks.manager.tracing_v2_enabled"
] | [((672, 699), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (689, 699), False, 'import logging\n'), ((755, 772), 'weakref.WeakSet', 'weakref.WeakSet', ([], {}), '()\n', (770, 772), False, 'import weakref\n'), ((3430, 3447), 'weakref.WeakSet', 'weakref.WeakSet', ([], {}), '()\n', (3445, 3... |
"""A tracer that runs evaluators over completed runs."""
from __future__ import annotations
import logging
import threading
import weakref
from concurrent.futures import Future, ThreadPoolExecutor, wait
from typing import Any, Dict, List, Optional, Sequence, Tuple, Union, cast
from uuid import UUID
import langsmith
f... | [
"langchain.callbacks.tracers.langchain._get_executor",
"langchain.callbacks.tracers.langchain.get_client",
"langchain.callbacks.manager.tracing_v2_enabled"
] | [((672, 699), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (689, 699), False, 'import logging\n'), ((755, 772), 'weakref.WeakSet', 'weakref.WeakSet', ([], {}), '()\n', (770, 772), False, 'import weakref\n'), ((3430, 3447), 'weakref.WeakSet', 'weakref.WeakSet', ([], {}), '()\n', (3445, 3... |
import os
import re
from uuid import UUID
from typing import Any, Dict, List, Optional, Union
import asyncio
import langchain
import streamlit as st
from langchain.schema import LLMResult
from langchain.chat_models import ChatOpenAI
from langchain.agents import Tool
from langchain.agents import AgentType
from langcha... | [
"langchain.agents.initialize_agent",
"langchain.memory.ConversationBufferMemory",
"langchain.llms.OpenAI",
"langchain.chat_models.ChatOpenAI",
"langchain.agents.Tool"
] | [((815, 826), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (824, 826), False, 'import os\n'), ((6031, 6120), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model_name': '"""gpt-3.5-turbo"""', 'temperature': '(0)', 'openai_api_key': 'openai_api_key'}), "(model_name='gpt-3.5-turbo', temperature=0, openai_api_key... |
from abc import ABC, abstractmethod
from typing import List, Optional
from pydantic import BaseModel, Extra, Field, validator
import langchain
from langchain.callbacks import get_callback_manager
from langchain.callbacks.base import BaseCallbackManager
from langchain.schema import (
AIMessage,
BaseLanguageMod... | [
"langchain.schema.ChatResult",
"langchain.schema.ChatGeneration",
"langchain.schema.HumanMessage",
"langchain.schema.AIMessage",
"langchain.schema.LLMResult",
"langchain.callbacks.get_callback_manager"
] | [((568, 605), 'pydantic.Field', 'Field', ([], {'default_factory': '_get_verbosity'}), '(default_factory=_get_verbosity)\n', (573, 605), False, 'from pydantic import BaseModel, Extra, Field, validator\n'), ((696, 739), 'pydantic.Field', 'Field', ([], {'default_factory': 'get_callback_manager'}), '(default_factory=get_ca... |
from abc import ABC, abstractmethod
from typing import List, Optional
from pydantic import BaseModel, Extra, Field, validator
import langchain
from langchain.callbacks import get_callback_manager
from langchain.callbacks.base import BaseCallbackManager
from langchain.schema import (
AIMessage,
BaseLanguageMod... | [
"langchain.schema.ChatResult",
"langchain.schema.ChatGeneration",
"langchain.schema.HumanMessage",
"langchain.schema.AIMessage",
"langchain.schema.LLMResult",
"langchain.callbacks.get_callback_manager"
] | [((568, 605), 'pydantic.Field', 'Field', ([], {'default_factory': '_get_verbosity'}), '(default_factory=_get_verbosity)\n', (573, 605), False, 'from pydantic import BaseModel, Extra, Field, validator\n'), ((696, 739), 'pydantic.Field', 'Field', ([], {'default_factory': 'get_callback_manager'}), '(default_factory=get_ca... |
"""Base interface for large language models to expose."""
import inspect
import json
import warnings
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Any, Dict, List, Mapping, Optional, Sequence, Tuple, Union
import yaml
from pydantic import Extra, Field, root_validator, validator
impor... | [
"langchain.callbacks.manager.AsyncCallbackManager.configure",
"langchain.schema.Generation",
"langchain.load.dump.dumpd",
"langchain.schema.get_buffer_string",
"langchain.callbacks.manager.CallbackManager.configure",
"langchain.schema.RunInfo",
"langchain.schema.AIMessage",
"langchain.llm_cache.lookup... | [((2353, 2390), 'pydantic.Field', 'Field', ([], {'default_factory': '_get_verbosity'}), '(default_factory=_get_verbosity)\n', (2358, 2390), False, 'from pydantic import Extra, Field, root_validator, validator\n'), ((2464, 2497), 'pydantic.Field', 'Field', ([], {'default': 'None', 'exclude': '(True)'}), '(default=None, ... |
"""Base interface for large language models to expose."""
import inspect
import json
import warnings
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Any, Dict, List, Mapping, Optional, Sequence, Tuple, Union
import yaml
from pydantic import Extra, Field, root_validator, validator
impor... | [
"langchain.callbacks.manager.AsyncCallbackManager.configure",
"langchain.schema.Generation",
"langchain.load.dump.dumpd",
"langchain.schema.get_buffer_string",
"langchain.callbacks.manager.CallbackManager.configure",
"langchain.schema.RunInfo",
"langchain.schema.AIMessage",
"langchain.llm_cache.lookup... | [((2353, 2390), 'pydantic.Field', 'Field', ([], {'default_factory': '_get_verbosity'}), '(default_factory=_get_verbosity)\n', (2358, 2390), False, 'from pydantic import Extra, Field, root_validator, validator\n'), ((2464, 2497), 'pydantic.Field', 'Field', ([], {'default': 'None', 'exclude': '(True)'}), '(default=None, ... |
from langchain.agents import AgentExecutor, LLMSingleActionAgent, AgentOutputParser
from langchain.tools import Tool, StructuredTool
from langchain.prompts import StringPromptTemplate
from langchain.chat_models import ChatOpenAI
from langchain.chains import LLMChain
from langchain.llms import VertexAI
from typing imp... | [
"langchain.agents.AgentExecutor.from_agent_and_tools",
"langchain.schema.AgentAction",
"langchain.llms.VertexAI",
"langchain.schema.AgentFinish",
"langchain.callbacks.FileCallbackHandler",
"langchain.chains.LLMChain"
] | [((1046, 1098), 'os.makedirs', 'os.makedirs', (['f"""./results/{timestamp}"""'], {'exist_ok': '(True)'}), "(f'./results/{timestamp}', exist_ok=True)\n", (1057, 1098), False, 'import os\n'), ((1332, 1364), 'logging.getLogger', 'logging.getLogger', (['"""info_logger"""'], {}), "('info_logger')\n", (1349, 1364), False, 'i... |
"""Base interface that all chains should implement."""
import inspect
import json
import logging
import warnings
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Any, Dict, List, Optional, Union
import yaml
from pydantic import Field, root_validator, validator
import langchain
from lang... | [
"langchain.schema.RunInfo",
"langchain.callbacks.manager.AsyncCallbackManager.configure",
"langchain.load.dump.dumpd",
"langchain.callbacks.manager.CallbackManager.configure"
] | [((702, 729), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (719, 729), False, 'import logging\n'), ((2435, 2468), 'pydantic.Field', 'Field', ([], {'default': 'None', 'exclude': '(True)'}), '(default=None, exclude=True)\n', (2440, 2468), False, 'from pydantic import Field, root_validator... |
"""Base interface that all chains should implement."""
import inspect
import json
import logging
import warnings
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Any, Dict, List, Optional, Union
import yaml
from pydantic import Field, root_validator, validator
import langchain
from lang... | [
"langchain.schema.RunInfo",
"langchain.callbacks.manager.AsyncCallbackManager.configure",
"langchain.load.dump.dumpd",
"langchain.callbacks.manager.CallbackManager.configure"
] | [((702, 729), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (719, 729), False, 'import logging\n'), ((2435, 2468), 'pydantic.Field', 'Field', ([], {'default': 'None', 'exclude': '(True)'}), '(default=None, exclude=True)\n', (2440, 2468), False, 'from pydantic import Field, root_validator... |
"""Base interface that all chains should implement."""
import inspect
import json
import logging
import warnings
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Any, Dict, List, Optional, Union
import yaml
from pydantic import Field, root_validator, validator
import langchain
from lang... | [
"langchain.schema.RunInfo",
"langchain.callbacks.manager.AsyncCallbackManager.configure",
"langchain.load.dump.dumpd",
"langchain.callbacks.manager.CallbackManager.configure"
] | [((702, 729), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (719, 729), False, 'import logging\n'), ((2435, 2468), 'pydantic.Field', 'Field', ([], {'default': 'None', 'exclude': '(True)'}), '(default=None, exclude=True)\n', (2440, 2468), False, 'from pydantic import Field, root_validator... |
"""Utilities for running language models or Chains over datasets."""
from __future__ import annotations
import functools
import inspect
import logging
import uuid
from enum import Enum
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
List,
Optional,
Sequence,
Tuple,
Union,
... | [
"langchain.schema.messages.messages_from_dict",
"langchain._api.warn_deprecated",
"langchain.schema.runnable.config.get_executor_for_config",
"langchain.evaluation.schema.EvaluatorType",
"langchain.smith.evaluation.name_generation.random_name",
"langchain.smith.evaluation.StringRunEvaluatorChain.from_run_... | [((1724, 1751), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1741, 1751), False, 'import logging\n'), ((33983, 34008), 'langchain.callbacks.tracers.evaluation.wait_for_all_evaluators', 'wait_for_all_evaluators', ([], {}), '()\n', (34006, 34008), False, 'from langchain.callbacks.tracers... |
"""Schemas for the langchainplus API."""
from __future__ import annotations
import logging
import os
from concurrent.futures import Future, ThreadPoolExecutor, wait
from datetime import datetime
from typing import Dict, List, Optional, Union, cast
from uuid import UUID, uuid4
from pydantic import Field, PrivateAttr, ... | [
"langchainplus_sdk.utils.get_runtime_environment"
] | [((533, 560), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (550, 560), False, 'import logging\n'), ((683, 716), 'concurrent.futures.ThreadPoolExecutor', 'ThreadPoolExecutor', ([], {'max_workers': '(1)'}), '(max_workers=1)\n', (701, 716), False, 'from concurrent.futures import Future, Th... |
"""Schemas for the langchainplus API."""
from __future__ import annotations
import logging
import os
from concurrent.futures import Future, ThreadPoolExecutor, wait
from datetime import datetime
from typing import Dict, List, Optional, Union, cast
from uuid import UUID, uuid4
from pydantic import Field, PrivateAttr, ... | [
"langchainplus_sdk.utils.get_runtime_environment"
] | [((533, 560), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (550, 560), False, 'import logging\n'), ((683, 716), 'concurrent.futures.ThreadPoolExecutor', 'ThreadPoolExecutor', ([], {'max_workers': '(1)'}), '(max_workers=1)\n', (701, 716), False, 'from concurrent.futures import Future, Th... |
"""Schemas for the langchainplus API."""
from __future__ import annotations
import logging
import os
from concurrent.futures import Future, ThreadPoolExecutor, wait
from datetime import datetime
from typing import Dict, List, Optional, Union, cast
from uuid import UUID, uuid4
from pydantic import Field, PrivateAttr, ... | [
"langchainplus_sdk.utils.get_runtime_environment"
] | [((533, 560), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (550, 560), False, 'import logging\n'), ((683, 716), 'concurrent.futures.ThreadPoolExecutor', 'ThreadPoolExecutor', ([], {'max_workers': '(1)'}), '(max_workers=1)\n', (701, 716), False, 'from concurrent.futures import Future, Th... |
import os
import dotenv
dotenv.load_dotenv()
### Load the credentials
api_key = os.getenv("API_KEY", None)
ibm_cloud_url = os.getenv("IBM_CLOUD_URL", None)
project_id = os.getenv("PROJECT_ID", None)
HUGGINGFACEHUB_API_TOKEN = os.getenv("HUGGINGFACEHUB_API_TOKEN", None)
min_new_tokens=1
max_new_tokens=300
temperature... | [
"langchain.embeddings.HuggingFaceHubEmbeddings"
] | [((24, 44), 'dotenv.load_dotenv', 'dotenv.load_dotenv', ([], {}), '()\n', (42, 44), False, 'import dotenv\n'), ((81, 107), 'os.getenv', 'os.getenv', (['"""API_KEY"""', 'None'], {}), "('API_KEY', None)\n", (90, 107), False, 'import os\n'), ((124, 156), 'os.getenv', 'os.getenv', (['"""IBM_CLOUD_URL"""', 'None'], {}), "('... |
"""Base interface for large language models to expose."""
import inspect
import json
import warnings
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Any, Dict, List, Mapping, Optional, Sequence, Tuple, Union
import yaml
from pydantic import Extra, Field, root_validator, validator
impor... | [
"langchain.callbacks.manager.AsyncCallbackManager.configure",
"langchain.schema.Generation",
"langchain.schema.get_buffer_string",
"langchain.callbacks.manager.CallbackManager.configure",
"langchain.schema.AIMessage",
"langchain.llm_cache.lookup",
"langchain.llm_cache.update",
"langchain.schema.LLMRes... | [((2302, 2339), 'pydantic.Field', 'Field', ([], {'default_factory': '_get_verbosity'}), '(default_factory=_get_verbosity)\n', (2307, 2339), False, 'from pydantic import Extra, Field, root_validator, validator\n'), ((2413, 2446), 'pydantic.Field', 'Field', ([], {'default': 'None', 'exclude': '(True)'}), '(default=None, ... |
"""Base interface for large language models to expose."""
import inspect
import json
import warnings
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Any, Dict, List, Mapping, Optional, Sequence, Tuple, Union
import yaml
from pydantic import Extra, Field, root_validator, validator
impor... | [
"langchain.callbacks.manager.AsyncCallbackManager.configure",
"langchain.schema.Generation",
"langchain.schema.get_buffer_string",
"langchain.callbacks.manager.CallbackManager.configure",
"langchain.schema.AIMessage",
"langchain.llm_cache.lookup",
"langchain.llm_cache.update",
"langchain.schema.LLMRes... | [((2302, 2339), 'pydantic.Field', 'Field', ([], {'default_factory': '_get_verbosity'}), '(default_factory=_get_verbosity)\n', (2307, 2339), False, 'from pydantic import Extra, Field, root_validator, validator\n'), ((2413, 2446), 'pydantic.Field', 'Field', ([], {'default': 'None', 'exclude': '(True)'}), '(default=None, ... |
"""Base interface for large language models to expose."""
import inspect
import json
import warnings
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Any, Dict, List, Mapping, Optional, Sequence, Tuple, Union
import yaml
from pydantic import Extra, Field, root_validator, validator
impor... | [
"langchain.callbacks.manager.AsyncCallbackManager.configure",
"langchain.schema.Generation",
"langchain.schema.get_buffer_string",
"langchain.callbacks.manager.CallbackManager.configure",
"langchain.schema.AIMessage",
"langchain.llm_cache.lookup",
"langchain.llm_cache.update",
"langchain.schema.LLMRes... | [((2302, 2339), 'pydantic.Field', 'Field', ([], {'default_factory': '_get_verbosity'}), '(default_factory=_get_verbosity)\n', (2307, 2339), False, 'from pydantic import Extra, Field, root_validator, validator\n'), ((2413, 2446), 'pydantic.Field', 'Field', ([], {'default': 'None', 'exclude': '(True)'}), '(default=None, ... |
"""Base interface for large language models to expose."""
import inspect
import json
import warnings
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Any, Dict, List, Mapping, Optional, Sequence, Tuple, Union
import yaml
from pydantic import Extra, Field, root_validator, validator
impor... | [
"langchain.callbacks.manager.AsyncCallbackManager.configure",
"langchain.schema.Generation",
"langchain.schema.get_buffer_string",
"langchain.callbacks.manager.CallbackManager.configure",
"langchain.schema.AIMessage",
"langchain.llm_cache.lookup",
"langchain.llm_cache.update",
"langchain.schema.LLMRes... | [((2302, 2339), 'pydantic.Field', 'Field', ([], {'default_factory': '_get_verbosity'}), '(default_factory=_get_verbosity)\n', (2307, 2339), False, 'from pydantic import Extra, Field, root_validator, validator\n'), ((2413, 2446), 'pydantic.Field', 'Field', ([], {'default': 'None', 'exclude': '(True)'}), '(default=None, ... |
import tempfile
from copy import deepcopy
from pathlib import Path
from typing import Any, Callable, Dict, List, Optional, Sequence
import langchain
from langchain.callbacks.base import BaseCallbackHandler
from langchain.callbacks.utils import (
BaseMetadataCallbackHandler,
flatten_dict,
import_pandas,
... | [
"langchain.callbacks.utils.import_spacy",
"langchain.callbacks.utils.import_pandas",
"langchain.callbacks.utils.import_textstat",
"langchain.callbacks.utils.flatten_dict"
] | [((1047, 1114), 'comet_ml.Experiment', 'comet_ml.Experiment', ([], {'workspace': 'workspace', 'project_name': 'project_name'}), '(workspace=workspace, project_name=project_name)\n', (1066, 1114), False, 'import comet_ml\n'), ((1249, 1266), 'langchain.callbacks.utils.import_textstat', 'import_textstat', ([], {}), '()\n'... |
import tempfile
from copy import deepcopy
from pathlib import Path
from typing import Any, Callable, Dict, List, Optional, Sequence
import langchain
from langchain.callbacks.base import BaseCallbackHandler
from langchain.callbacks.utils import (
BaseMetadataCallbackHandler,
flatten_dict,
import_pandas,
... | [
"langchain.callbacks.utils.import_spacy",
"langchain.callbacks.utils.import_pandas",
"langchain.callbacks.utils.import_textstat",
"langchain.callbacks.utils.flatten_dict"
] | [((1047, 1114), 'comet_ml.Experiment', 'comet_ml.Experiment', ([], {'workspace': 'workspace', 'project_name': 'project_name'}), '(workspace=workspace, project_name=project_name)\n', (1066, 1114), False, 'import comet_ml\n'), ((1249, 1266), 'langchain.callbacks.utils.import_textstat', 'import_textstat', ([], {}), '()\n'... |
import tempfile
from copy import deepcopy
from pathlib import Path
from typing import Any, Callable, Dict, List, Optional, Sequence
import langchain
from langchain.callbacks.base import BaseCallbackHandler
from langchain.callbacks.utils import (
BaseMetadataCallbackHandler,
flatten_dict,
import_pandas,
... | [
"langchain.callbacks.utils.import_spacy",
"langchain.callbacks.utils.import_pandas",
"langchain.callbacks.utils.import_textstat",
"langchain.callbacks.utils.flatten_dict"
] | [((1047, 1114), 'comet_ml.Experiment', 'comet_ml.Experiment', ([], {'workspace': 'workspace', 'project_name': 'project_name'}), '(workspace=workspace, project_name=project_name)\n', (1066, 1114), False, 'import comet_ml\n'), ((1249, 1266), 'langchain.callbacks.utils.import_textstat', 'import_textstat', ([], {}), '()\n'... |
import tempfile
from copy import deepcopy
from pathlib import Path
from typing import Any, Callable, Dict, List, Optional, Sequence
import langchain
from langchain.callbacks.base import BaseCallbackHandler
from langchain.callbacks.utils import (
BaseMetadataCallbackHandler,
flatten_dict,
import_pandas,
... | [
"langchain.callbacks.utils.import_spacy",
"langchain.callbacks.utils.import_pandas",
"langchain.callbacks.utils.import_textstat",
"langchain.callbacks.utils.flatten_dict"
] | [((1047, 1114), 'comet_ml.Experiment', 'comet_ml.Experiment', ([], {'workspace': 'workspace', 'project_name': 'project_name'}), '(workspace=workspace, project_name=project_name)\n', (1066, 1114), False, 'import comet_ml\n'), ((1249, 1266), 'langchain.callbacks.utils.import_textstat', 'import_textstat', ([], {}), '()\n'... |
"""Utilities for running language models or Chains over datasets."""
from __future__ import annotations
import asyncio
import functools
import itertools
import logging
import uuid
from enum import Enum
from typing import (
Any,
Callable,
Coroutine,
Dict,
Iterator,
List,
Optional,
Seque... | [
"langchain.schema.messages.messages_from_dict",
"langchain.smith.evaluation.string_run_evaluator.StringRunEvaluatorChain.from_run_and_data_type",
"langchain.callbacks.tracers.langchain.LangChainTracer",
"langchain.chat_models.openai.ChatOpenAI",
"langchain.callbacks.tracers.evaluation.EvaluatorCallbackHandl... | [((1370, 1397), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1387, 1397), False, 'import logging\n'), ((1708, 1725), 'urllib.parse.urlparse', 'urlparse', (['api_url'], {}), '(api_url)\n', (1716, 1725), False, 'from urllib.parse import urlparse, urlunparse\n'), ((24648, 24668), 'asyncio... |
"""
.. warning::
Beta Feature!
**Cache** provides an optional caching layer for LLMs.
Cache is useful for two reasons:
- It can save you money by reducing the number of API calls you make to the LLM
provider if you're often requesting the same completion multiple times.
- It can speed up your application by redu... | [
"langchain.utils.get_from_env",
"langchain.schema.Generation",
"langchain.load.dump.dumps",
"langchain.vectorstores.redis.Redis.from_existing_index",
"langchain.vectorstores.redis.Redis",
"langchain.load.load.loads"
] | [((1483, 1510), 'logging.getLogger', 'logging.getLogger', (['__file__'], {}), '(__file__)\n', (1500, 1510), False, 'import logging\n'), ((3955, 3973), 'sqlalchemy.ext.declarative.declarative_base', 'declarative_base', ([], {}), '()\n', (3971, 3973), False, 'from sqlalchemy.ext.declarative import declarative_base\n'), (... |
"""
.. warning::
Beta Feature!
**Cache** provides an optional caching layer for LLMs.
Cache is useful for two reasons:
- It can save you money by reducing the number of API calls you make to the LLM
provider if you're often requesting the same completion multiple times.
- It can speed up your application by redu... | [
"langchain.utils.get_from_env",
"langchain.schema.Generation",
"langchain.load.dump.dumps",
"langchain.vectorstores.redis.Redis.from_existing_index",
"langchain.vectorstores.redis.Redis",
"langchain.load.load.loads"
] | [((1483, 1510), 'logging.getLogger', 'logging.getLogger', (['__file__'], {}), '(__file__)\n', (1500, 1510), False, 'import logging\n'), ((3955, 3973), 'sqlalchemy.ext.declarative.declarative_base', 'declarative_base', ([], {}), '()\n', (3971, 3973), False, 'from sqlalchemy.ext.declarative import declarative_base\n'), (... |
"""
.. warning::
Beta Feature!
**Cache** provides an optional caching layer for LLMs.
Cache is useful for two reasons:
- It can save you money by reducing the number of API calls you make to the LLM
provider if you're often requesting the same completion multiple times.
- It can speed up your application by redu... | [
"langchain.utils.get_from_env",
"langchain.schema.Generation",
"langchain.load.dump.dumps",
"langchain.vectorstores.redis.Redis.from_existing_index",
"langchain.vectorstores.redis.Redis",
"langchain.load.load.loads"
] | [((1483, 1510), 'logging.getLogger', 'logging.getLogger', (['__file__'], {}), '(__file__)\n', (1500, 1510), False, 'import logging\n'), ((3955, 3973), 'sqlalchemy.ext.declarative.declarative_base', 'declarative_base', ([], {}), '()\n', (3971, 3973), False, 'from sqlalchemy.ext.declarative import declarative_base\n'), (... |
"""
.. warning::
Beta Feature!
**Cache** provides an optional caching layer for LLMs.
Cache is useful for two reasons:
- It can save you money by reducing the number of API calls you make to the LLM
provider if you're often requesting the same completion multiple times.
- It can speed up your application by redu... | [
"langchain.utils.get_from_env",
"langchain.schema.Generation",
"langchain.load.dump.dumps",
"langchain.vectorstores.redis.Redis.from_existing_index",
"langchain.vectorstores.redis.Redis",
"langchain.load.load.loads"
] | [((1483, 1510), 'logging.getLogger', 'logging.getLogger', (['__file__'], {}), '(__file__)\n', (1500, 1510), False, 'import logging\n'), ((3955, 3973), 'sqlalchemy.ext.declarative.declarative_base', 'declarative_base', ([], {}), '()\n', (3971, 3973), False, 'from sqlalchemy.ext.declarative import declarative_base\n'), (... |
"""Utilities for running language models or Chains over datasets."""
from __future__ import annotations
import asyncio
import functools
import inspect
import itertools
import logging
import uuid
import warnings
from enum import Enum
from typing import (
TYPE_CHECKING,
Any,
Callable,
Coroutine,
Dic... | [
"langchain.schema.messages.messages_from_dict",
"langchain.smith.evaluation.string_run_evaluator.StringRunEvaluatorChain.from_run_and_data_type",
"langchain.evaluation.schema.EvaluatorType",
"langchain.callbacks.tracers.langchain.LangChainTracer",
"langchain.callbacks.tracers.evaluation.EvaluatorCallbackHan... | [((1500, 1527), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1517, 1527), False, 'import logging\n'), ((2799, 2816), 'urllib.parse.urlparse', 'urlparse', (['api_url'], {}), '(api_url)\n', (2807, 2816), False, 'from urllib.parse import urlparse, urlunparse\n'), ((27519, 27539), 'asyncio... |
"""Base interface that all chains should implement."""
import json
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Any, Dict, List, Optional, Union
import yaml
from pydantic import BaseModel, Field, validator
import langchain
from langchain.callbacks import get_callback_manager
from la... | [
"langchain.callbacks.get_callback_manager"
] | [((646, 703), 'pydantic.Field', 'Field', ([], {'default_factory': 'get_callback_manager', 'exclude': '(True)'}), '(default_factory=get_callback_manager, exclude=True)\n', (651, 703), False, 'from pydantic import BaseModel, Field, validator\n'), ((738, 775), 'pydantic.Field', 'Field', ([], {'default_factory': '_get_verb... |
"""Base interface that all chains should implement."""
import json
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Any, Dict, List, Optional, Union
import yaml
from pydantic import BaseModel, Field, validator
import langchain
from langchain.callbacks import get_callback_manager
from la... | [
"langchain.callbacks.get_callback_manager"
] | [((646, 703), 'pydantic.Field', 'Field', ([], {'default_factory': 'get_callback_manager', 'exclude': '(True)'}), '(default_factory=get_callback_manager, exclude=True)\n', (651, 703), False, 'from pydantic import BaseModel, Field, validator\n'), ((738, 775), 'pydantic.Field', 'Field', ([], {'default_factory': '_get_verb... |
"""Base interface that all chains should implement."""
import json
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Any, Dict, List, Optional, Union
import yaml
from pydantic import BaseModel, Field, validator
import langchain
from langchain.callbacks import get_callback_manager
from la... | [
"langchain.callbacks.get_callback_manager"
] | [((646, 703), 'pydantic.Field', 'Field', ([], {'default_factory': 'get_callback_manager', 'exclude': '(True)'}), '(default_factory=get_callback_manager, exclude=True)\n', (651, 703), False, 'from pydantic import BaseModel, Field, validator\n'), ((738, 775), 'pydantic.Field', 'Field', ([], {'default_factory': '_get_verb... |
"""Base interface that all chains should implement."""
import json
from abc import ABC, abstractmethod
from pathlib import Path
from typing import Any, Dict, List, Optional, Union
import yaml
from pydantic import BaseModel, Field, validator
import langchain
from langchain.callbacks import get_callback_manager
from la... | [
"langchain.callbacks.get_callback_manager"
] | [((646, 703), 'pydantic.Field', 'Field', ([], {'default_factory': 'get_callback_manager', 'exclude': '(True)'}), '(default_factory=get_callback_manager, exclude=True)\n', (651, 703), False, 'from pydantic import BaseModel, Field, validator\n'), ((738, 775), 'pydantic.Field', 'Field', ([], {'default_factory': '_get_verb... |
from typing import Dict, List, Optional
from langchain.agents.load_tools import (
_EXTRA_LLM_TOOLS,
_EXTRA_OPTIONAL_TOOLS,
_LLM_TOOLS,
)
from langflow.custom import customs
from langflow.interface.base import LangChainTypeCreator
from langflow.interface.tools.constants import (
ALL_TOOLS_NAMES,
CU... | [
"langchain.agents.load_tools._LLM_TOOLS.keys",
"langchain.agents.load_tools._EXTRA_LLM_TOOLS.keys",
"langchain.agents.load_tools._EXTRA_OPTIONAL_TOOLS.keys"
] | [((709, 811), 'langflow.template.field.base.TemplateField', 'TemplateField', ([], {'field_type': '"""str"""', 'required': '(True)', 'is_list': '(False)', 'show': '(True)', 'placeholder': '""""""', 'value': '""""""'}), "(field_type='str', required=True, is_list=False, show=True,\n placeholder='', value='')\n", (722, ... |
from typing import Dict, List, Optional
from langchain.agents.load_tools import (
_EXTRA_LLM_TOOLS,
_EXTRA_OPTIONAL_TOOLS,
_LLM_TOOLS,
)
from langflow.custom import customs
from langflow.interface.base import LangChainTypeCreator
from langflow.interface.tools.constants import (
ALL_TOOLS_NAMES,
CU... | [
"langchain.agents.load_tools._LLM_TOOLS.keys",
"langchain.agents.load_tools._EXTRA_LLM_TOOLS.keys",
"langchain.agents.load_tools._EXTRA_OPTIONAL_TOOLS.keys"
] | [((709, 811), 'langflow.template.field.base.TemplateField', 'TemplateField', ([], {'field_type': '"""str"""', 'required': '(True)', 'is_list': '(False)', 'show': '(True)', 'placeholder': '""""""', 'value': '""""""'}), "(field_type='str', required=True, is_list=False, show=True,\n placeholder='', value='')\n", (722, ... |
from typing import Dict, List, Optional
from langchain.agents.load_tools import (
_EXTRA_LLM_TOOLS,
_EXTRA_OPTIONAL_TOOLS,
_LLM_TOOLS,
)
from langflow.custom import customs
from langflow.interface.base import LangChainTypeCreator
from langflow.interface.tools.constants import (
ALL_TOOLS_NAMES,
CU... | [
"langchain.agents.load_tools._LLM_TOOLS.keys",
"langchain.agents.load_tools._EXTRA_LLM_TOOLS.keys",
"langchain.agents.load_tools._EXTRA_OPTIONAL_TOOLS.keys"
] | [((709, 811), 'langflow.template.field.base.TemplateField', 'TemplateField', ([], {'field_type': '"""str"""', 'required': '(True)', 'is_list': '(False)', 'show': '(True)', 'placeholder': '""""""', 'value': '""""""'}), "(field_type='str', required=True, is_list=False, show=True,\n placeholder='', value='')\n", (722, ... |
from typing import Dict, List, Optional
from langchain.agents.load_tools import (
_EXTRA_LLM_TOOLS,
_EXTRA_OPTIONAL_TOOLS,
_LLM_TOOLS,
)
from langflow.custom import customs
from langflow.interface.base import LangChainTypeCreator
from langflow.interface.tools.constants import (
ALL_TOOLS_NAMES,
CU... | [
"langchain.agents.load_tools._LLM_TOOLS.keys",
"langchain.agents.load_tools._EXTRA_LLM_TOOLS.keys",
"langchain.agents.load_tools._EXTRA_OPTIONAL_TOOLS.keys"
] | [((709, 811), 'langflow.template.field.base.TemplateField', 'TemplateField', ([], {'field_type': '"""str"""', 'required': '(True)', 'is_list': '(False)', 'show': '(True)', 'placeholder': '""""""', 'value': '""""""'}), "(field_type='str', required=True, is_list=False, show=True,\n placeholder='', value='')\n", (722, ... |
import langchain_visualizer # isort:skip # noqa: F401
import asyncio
import vcr_langchain as vcr
from fvalues import FValue
from langchain import PromptTemplate
from langchain.llms import OpenAI
# ========================== Start of langchain example code ==========================
# https://langchain.readthedocs.i... | [
"langchain_visualizer.visualize",
"langchain.llms.OpenAI",
"langchain.PromptTemplate"
] | [((434, 524), 'langchain.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['adjective']", 'template': '"""Tell me a {adjective} joke."""'}), "(input_variables=['adjective'], template=\n 'Tell me a {adjective} joke.')\n", (448, 524), False, 'from langchain import PromptTemplate\n'), ((837, 855), 'vcr_lang... |
import threading
import time
import unittest
import unittest.mock
from typing import Any, Dict
from uuid import UUID
import pytest
from langsmith import Client
from langchain_core.outputs import LLMResult
from langchain_core.tracers.langchain import LangChainTracer
from langchain_core.tracers.schemas import Run
def... | [
"langchain_core.tracers.langchain.LangChainTracer",
"langchain_core.outputs.LLMResult"
] | [((640, 676), 'unittest.mock.MagicMock', 'unittest.mock.MagicMock', ([], {'spec': 'Client'}), '(spec=Client)\n', (663, 676), False, 'import unittest\n'), ((730, 760), 'langchain_core.tracers.langchain.LangChainTracer', 'LangChainTracer', ([], {'client': 'client'}), '(client=client)\n', (745, 760), False, 'from langchai... |
import threading
import time
import unittest
import unittest.mock
from typing import Any, Dict
from uuid import UUID
import pytest
from langsmith import Client
from langchain_core.outputs import LLMResult
from langchain_core.tracers.langchain import LangChainTracer
from langchain_core.tracers.schemas import Run
def... | [
"langchain_core.tracers.langchain.LangChainTracer",
"langchain_core.outputs.LLMResult"
] | [((640, 676), 'unittest.mock.MagicMock', 'unittest.mock.MagicMock', ([], {'spec': 'Client'}), '(spec=Client)\n', (663, 676), False, 'import unittest\n'), ((730, 760), 'langchain_core.tracers.langchain.LangChainTracer', 'LangChainTracer', ([], {'client': 'client'}), '(client=client)\n', (745, 760), False, 'from langchai... |
# coding=utf-8
import json
import hashlib
from datetime import datetime
import os
import time
import openai
import flet as ft
import re
import shutil
from flet import (
ElevatedButton,
FilePicker,
FilePickerResultEvent,
Page,
Row,
Text,
icons,
)
from prompt_engineering imp... | [
"langchain.chains.summarize.load_summarize_chain",
"langchain.text_splitter.RecursiveCharacterTextSplitter",
"langchain.llms.OpenAI",
"langchain.docstore.document.Document",
"langchain.prompts.PromptTemplate"
] | [((6345, 6470), 'openai.Completion.create', 'openai.Completion.create', ([], {'model': '"""text-ada-001"""', 'prompt': 'f"""你要总结这一文本的关键词,并以python列表的形式返回数个关键词字符串:{content}。"""', 'temperature': '(0)'}), "(model='text-ada-001', prompt=\n f'你要总结这一文本的关键词,并以python列表的形式返回数个关键词字符串:{content}。', temperature=0)\n", (6369, 6470... |
####################################################################################
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.or... | [
"langchain.embeddings.VertexAIEmbeddings"
] | [((1403, 1423), 'langchain.embeddings.VertexAIEmbeddings', 'VertexAIEmbeddings', ([], {}), '()\n', (1421, 1423), False, 'from langchain.embeddings import VertexAIEmbeddings\n')] |
import os
import langchain.text_splitter
from langchain import PromptTemplate, LLMChain
from langchain.callbacks.manager import CallbackManager
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from langchain.llms import LlamaCpp
try:
from extensions.telegram_bot.source.generators.ab... | [
"langchain.callbacks.streaming_stdout.StreamingStdOutCallbackHandler",
"langchain.llms.LlamaCpp",
"langchain.LLMChain",
"langchain.PromptTemplate"
] | [((905, 1003), 'langchain.llms.LlamaCpp', 'LlamaCpp', ([], {'model_path': 'model_path', 'n_ctx': 'n_ctx', 'callback_manager': 'callback_manager', 'verbose': '(True)'}), '(model_path=model_path, n_ctx=n_ctx, callback_manager=\n callback_manager, verbose=True)\n', (913, 1003), False, 'from langchain.llms import LlamaC... |
import os
import openai
from dotenv import load_dotenv
from langchain.chat_models import AzureChatOpenAI
from langchain.chat_models import ChatOpenAI
from langchain.embeddings import OpenAIEmbeddings
from langchain.callbacks.base import BaseCallbackHandler
from langchain.vectorstores import FAISS
from langchain.chain... | [
"langchain.document_loaders.UnstructuredWordDocumentLoader",
"langchain.document_loaders.UnstructuredFileLoader",
"langchain.vectorstores.FAISS.load_local",
"langchain.text_splitter.RecursiveCharacterTextSplitter",
"langchain.document_loaders.UnstructuredPowerPointLoader",
"langchain.vectorstores.FAISS.sa... | [((4507, 4520), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (4518, 4520), False, 'from dotenv import load_dotenv\n'), ((5613, 5676), 'langchain.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {'deployment': 'embedding_deployment', 'chunk_size': '(1)'}), '(deployment=embedding_deployment, chunk_size=1)... |
import os
import re
import langchain
import paperqa
import paperscraper
from langchain.base_language import BaseLanguageModel
from langchain.tools import BaseTool
from pypdf.errors import PdfReadError
def paper_scraper(search: str, pdir: str = "query") -> dict:
try:
return paperscraper.search_papers(sear... | [
"langchain.prompts.PromptTemplate",
"langchain.chains.llm.LLMChain"
] | [((419, 753), 'langchain.prompts.PromptTemplate', 'langchain.prompts.PromptTemplate', ([], {'input_variables': "['question']", 'template': '"""\n I would like to find scholarly papers to answer\n this question: {question}. Your response must be at\n most 10 words long.\n \'A search query tha... |
"""
Class for Langchain chain, this chain makes a request to OpenAI to provide information
in a given location and time period.
"""
import os
import logging
from pathlib import Path
import langchain
PROMPT_STRING = """
You just gave historical information for {location} around the time period of {time_period} and \n... | [
"langchain.OpenAI",
"langchain.PromptTemplate"
] | [((1201, 1272), 'langchain.PromptTemplate', 'langchain.PromptTemplate', ([], {'input_variables': 'input', 'template': 'PROMPT_STRING'}), '(input_variables=input, template=PROMPT_STRING)\n', (1225, 1272), False, 'import langchain\n'), ((1512, 1587), 'langchain.OpenAI', 'langchain.OpenAI', ([], {'openai_api_key': 'self.o... |
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from langchain.chains import LLMChain
from langchain.prompts.few_shot import FewShotPromptTemplate
def get_prompt(is_zh: bool = False, sydney: bool = False) -> 'FewShotPromptTemplate':
from langchain.prompts.few_shot import FewShotPromptTemplate
fro... | [
"langchain.chains.LLMChain",
"langchain.prompts.prompt.PromptTemplate",
"langchain.llms.OpenAIChat",
"langchain.prompts.few_shot.FewShotPromptTemplate"
] | [((400, 498), 'langchain.prompts.prompt.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['question', 'answer']", 'template': '"""Q: {question}\n{answer}"""'}), '(input_variables=[\'question\', \'answer\'], template=\n """Q: {question}\n{answer}""")\n', (414, 498), False, 'from langchain.prompts.prompt i... |
from langchain.text_splitter import CharacterTextSplitter
from langchain.vectorstores.faiss import FAISS
from langchain.embeddings import OpenAIEmbeddings
from langchain.memory import ConversationBufferMemory
from langchain.chains import ConversationalRetrievalChain
from langchain.chat_models import ChatOpenAI
from PyP... | [
"langchain.text_splitter.CharacterTextSplitter",
"langchain.memory.ConversationBufferMemory",
"langchain.chat_models.ChatOpenAI",
"langchain.vectorstores.faiss.FAISS.from_texts",
"langchain.embeddings.OpenAIEmbeddings"
] | [((1580, 1607), 'os.getenv', 'os.getenv', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (1589, 1607), False, 'import os\n'), ((1282, 1379), 'langchain.text_splitter.CharacterTextSplitter', 'CharacterTextSplitter', ([], {'separator': '"""\n"""', 'chunk_size': '(800)', 'chunk_overlap': '(200)', 'length_function... |
import zipfile
from langchain.chat_models import ChatOpenAI
from langchain.schema import (
HumanMessage,
SystemMessage
)
import langchain
from langchain.cache import SQLiteCache
langchain.llm_cache = SQLiteCache(
database_path=".langchain.db"
) # caches queries that are the same.
def generate_code(ques... | [
"langchain.schema.HumanMessage",
"langchain.schema.SystemMessage",
"langchain.chat_models.ChatOpenAI",
"langchain.cache.SQLiteCache"
] | [((211, 253), 'langchain.cache.SQLiteCache', 'SQLiteCache', ([], {'database_path': '""".langchain.db"""'}), "(database_path='.langchain.db')\n", (222, 253), False, 'from langchain.cache import SQLiteCache\n'), ((621, 688), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'temperature': '(0)', 'model': 'model_typ... |
import langchain
from langchain.chat_models import ChatOpenAI
from langchain_core.tools import Tool
langchain.verbose = True
langchain.debug = True
def get_chat():
return ChatOpenAI(model_name="gpt-3.5-turbo", temperature=0)
def my_super_func(params):
return 42
if __name__ == "__main__":
tools = [
... | [
"langchain_core.tools.Tool.from_function",
"langchain.chat_models.ChatOpenAI"
] | [((178, 231), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model_name': '"""gpt-3.5-turbo"""', 'temperature': '(0)'}), "(model_name='gpt-3.5-turbo', temperature=0)\n", (188, 231), False, 'from langchain.chat_models import ChatOpenAI\n'), ((326, 427), 'langchain_core.tools.Tool.from_function', 'Tool.from_fun... |
import os
import tkinter as tk
from tkinter import Label, Entry, Button, Text, Scrollbar
import langchain
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.chat_models import ChatOpenAI
class ProjectEvaluatorApp:
def __init__(self, root):
self.root = root
self.root.... | [
"langchain.chat_models.ChatOpenAI"
] | [((3005, 3012), 'tkinter.Tk', 'tk.Tk', ([], {}), '()\n', (3010, 3012), True, 'import tkinter as tk\n'), ((380, 446), 'tkinter.Label', 'Label', (['root'], {'text': '"""Rate your coding ability on a scale of 1 to 5:"""'}), "(root, text='Rate your coding ability on a scale of 1 to 5:')\n", (385, 446), False, 'from tkinter... |
from __future__ import annotations
from collections import OrderedDict
from typing import Any, Dict, List, Optional, Tuple
import langchain
import numpy as np
import orjson
import pandas as pd
from langchain.cache import InMemoryCache
from peewee import ModelSelect, fn
from .constants import *
from .orm import Knowl... | [
"langchain.cache.InMemoryCache"
] | [((506, 521), 'langchain.cache.InMemoryCache', 'InMemoryCache', ([], {}), '()\n', (519, 521), False, 'from langchain.cache import InMemoryCache\n'), ((11527, 11540), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (11538, 11540), False, 'from collections import OrderedDict\n'), ((4157, 4207), 'orjson.dumps'... |
from typing import Any, Dict, List, Optional
from langchain import PromptTemplate ,LLMChain
import langchain
from langchain.chat_models import ChatOpenAI ,AzureChatOpenAI
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
import sys
import re
import argparse
import os
from langchain.prompt... | [
"langchain.LLMChain",
"langchain.callbacks.streaming_stdout.StreamingStdOutCallbackHandler",
"langchain.prompts.chat.ChatPromptTemplate",
"langchain.schema.SystemMessage",
"langchain.prompts.chat.HumanMessagePromptTemplate.from_template"
] | [((4074, 4127), 'langchain.prompts.chat.ChatPromptTemplate', 'ChatPromptTemplate', ([], {'messages': 'prompt_messages_1stTrans'}), '(messages=prompt_messages_1stTrans)\n', (4092, 4127), False, 'from langchain.prompts.chat import ChatPromptTemplate, SystemMessagePromptTemplate, AIMessagePromptTemplate, HumanMessagePromp... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.