code stringlengths 141 79.4k | apis listlengths 1 23 | extract_api stringlengths 126 73.2k |
|---|---|---|
"""Load markdown, html, text from files, clean up, split, ingest into Pinecone."""
import pinecone
import tiktoken
from langchain.document_loaders import ReadTheDocsLoader
from langchain.embeddings import OpenAIEmbeddings
from langchain.text_splitter import NLTKTextSplitter
from langchain.vectorstores.pinecone import P... | [
"langchain.document_loaders.ReadTheDocsLoader",
"langchain.embeddings.OpenAIEmbeddings",
"langchain.vectorstores.pinecone.Pinecone.from_documents",
"langchain.text_splitter.NLTKTextSplitter.from_tiktoken_encoder"
] | [((402, 445), 'langchain.document_loaders.ReadTheDocsLoader', 'ReadTheDocsLoader', (['"""hasura.io/docs/latest/"""'], {}), "('hasura.io/docs/latest/')\n", (419, 445), False, 'from langchain.document_loaders import ReadTheDocsLoader\n'), ((500, 573), 'langchain.text_splitter.NLTKTextSplitter.from_tiktoken_encoder', 'NLT... |
"""Module for loading index."""
import logging
from typing import TYPE_CHECKING, Any, Optional
from llama_index import ServiceContext, StorageContext, load_index_from_storage
from llama_index.indices.base import BaseIndex
from ols.app.models.config import ReferenceContent
# This is to avoid importing HuggingFaceBge... | [
"langchain_community.embeddings.HuggingFaceBgeEmbeddings"
] | [((661, 688), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (678, 688), False, 'import logging\n'), ((2376, 2445), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'embed_model': 'self._embed_model', 'llm': 'None'}), '(embed_model=self._embed_model, llm=N... |
"""The function tools tht are actually implemented"""
import json
import subprocess
from langchain.agents.load_tools import load_tools
from langchain.tools import BaseTool
from langchain.utilities.bash import BashProcess
from toolemu.tools.tool_interface import (
ArgException,
ArgParameter,
ArgReturn,
... | [
"langchain.agents.load_tools.load_tools"
] | [((2863, 2930), 'json.dumps', 'json.dumps', (["{'output': tool_output[0], 'exit_code': tool_output[1]}"], {}), "({'output': tool_output[0], 'exit_code': tool_output[1]})\n", (2873, 2930), False, 'import json\n'), ((4269, 4296), 'langchain.agents.load_tools.load_tools', 'load_tools', (["['python_repl']"], {}), "(['pytho... |
from typing import List
import re
from langchain.docstore.document import Document
from langchain_community.document_loaders import WebBaseLoader
from .DocumentLoadingBase import DocumentLoadingBase
from ..common.SourceDocument import SourceDocument
class WebDocumentLoading(DocumentLoadingBase):
def __init__(self... | [
"langchain_community.document_loaders.WebBaseLoader"
] | [((564, 611), 're.sub', 're.sub', (['"""\n{3,}"""', '"""\n\n"""', 'document.page_content'], {}), "('\\n{3,}', '\\n\\n', document.page_content)\n", (570, 611), False, 'import re\n'), ((710, 787), 're.compile', 're.compile', (['"""[\\\\x00-\\\\x1f\\\\x7f\\\\u0080-\\\\u00a0\\\\u2000-\\\\u3000\\\\ufff0-\\\\uffff]"""'], {})... |
from typing import List, Optional, Any, Dict
from langchain.llms.base import LLM
from langchain.utils import get_from_dict_or_env
from pydantic import Extra, root_validator
from sam.gpt.quora import PoeClient, PoeResponse
# token = "KaEMfvDPEXoS115jzAFRRg%3D%3D"
# prompt = "write a java function that prints the nt... | [
"langchain.utils.get_from_dict_or_env"
] | [((573, 589), 'pydantic.root_validator', 'root_validator', ([], {}), '()\n', (587, 589), False, 'from pydantic import Extra, root_validator\n'), ((663, 714), 'langchain.utils.get_from_dict_or_env', 'get_from_dict_or_env', (['values', '"""token"""', '"""POE_COOKIE"""'], {}), "(values, 'token', 'POE_COOKIE')\n", (683, 71... |
from __future__ import annotations
from typing import List, Optional
from pydantic import ValidationError
from langchain.chains.llm import LLMChain
from langchain.chat_models.base import BaseChatModel
from langchain.experimental.autonomous_agents.autogpt.output_parser import (
AutoGPTOutputParser,
BaseAutoGP... | [
"langchain.experimental.autonomous_agents.autogpt.output_parser.AutoGPTOutputParser",
"langchain.tools.human.tool.HumanInputRun",
"langchain.experimental.autonomous_agents.autogpt.prompt.AutoGPTPrompt",
"langchain.schema.HumanMessage",
"langchain.chains.llm.LLMChain",
"langchain.schema.AIMessage",
"lang... | [((1753, 1918), 'langchain.experimental.autonomous_agents.autogpt.prompt.AutoGPTPrompt', 'AutoGPTPrompt', ([], {'ai_name': 'ai_name', 'ai_role': 'ai_role', 'tools': 'tools', 'input_variables': "['memory', 'messages', 'goals', 'user_input']", 'token_counter': 'llm.get_num_tokens'}), "(ai_name=ai_name, ai_role=ai_role, t... |
"""Main entrypoint for the app."""
import asyncio
import os
from operator import itemgetter
from typing import List, Optional, Sequence, Tuple, Union
from uuid import UUID
from fastapi import Depends, FastAPI, Request
from fastapi.middleware.cors import CORSMiddleware
from langchain.callbacks.manager import CallbackMa... | [
"langchain.document_transformers.Html2TextTransformer",
"langchain.utilities.GoogleSearchAPIWrapper",
"langchain.schema.runnable.ConfigurableField",
"langchain.schema.messages.AIMessage",
"langchain.text_splitter.RecursiveCharacterTextSplitter",
"langchain.schema.output_parser.StrOutputParser",
"langcha... | [((4923, 4932), 'fastapi.FastAPI', 'FastAPI', ([], {}), '()\n', (4930, 4932), False, 'from fastapi import Depends, FastAPI, Request\n'), ((12927, 12987), 'os.path.isfile', 'os.path.isfile', (["os.environ['GOOGLE_APPLICATION_CREDENTIALS']"], {}), "(os.environ['GOOGLE_APPLICATION_CREDENTIALS'])\n", (12941, 12987), False,... |
from langchain.chat_models import ChatOpenAI
from langchain_experimental.plan_and_execute import PlanAndExecute, load_agent_executor, load_chat_planner
from langchain.llms import OpenAI
from langchain import SerpAPIWrapper
from langchain.agents.tools import Tool
from langchain import LLMMathChain
search = SerpAPIWrapp... | [
"langchain.llms.OpenAI",
"langchain.chat_models.ChatOpenAI",
"langchain.LLMMathChain.from_llm",
"langchain.SerpAPIWrapper",
"langchain_experimental.plan_and_execute.load_chat_planner",
"langchain_experimental.plan_and_execute.PlanAndExecute",
"langchain.agents.tools.Tool",
"langchain_experimental.plan... | [((308, 324), 'langchain.SerpAPIWrapper', 'SerpAPIWrapper', ([], {}), '()\n', (322, 324), False, 'from langchain import SerpAPIWrapper\n'), ((331, 352), 'langchain.llms.OpenAI', 'OpenAI', ([], {'temperature': '(0)'}), '(temperature=0)\n', (337, 352), False, 'from langchain.llms import OpenAI\n'), ((370, 414), 'langchai... |
"""Map-reduce chain.
Splits up a document, sends the smaller parts to the LLM with one prompt,
then combines the results with another one.
"""
from __future__ import annotations
from typing import Any, Dict, List, Mapping, Optional
from langchain.callbacks.manager import CallbackManagerForChainRun, Callbacks
from la... | [
"langchain.chains.ReduceDocumentsChain",
"langchain.chains.combine_documents.map_reduce.MapReduceDocumentsChain",
"langchain.chains.combine_documents.stuff.StuffDocumentsChain",
"langchain.docstore.document.Document",
"langchain.chains.llm.LLMChain",
"langchain.callbacks.manager.CallbackManagerForChainRun... | [((1734, 1787), 'langchain.chains.llm.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'prompt', 'callbacks': 'callbacks'}), '(llm=llm, prompt=prompt, callbacks=callbacks)\n', (1742, 1787), False, 'from langchain.chains.llm import LLMChain\n'), ((1810, 1930), 'langchain.chains.combine_documents.stuff.StuffDocuments... |
from typing import Any, Dict, List, Optional, Sequence
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
from langchain.llms.utils import enforce_stop_tokens
from langchain.pydantic_v1 import Extra, root_validator
from langchain.utils import get_from_dict_or_env
cla... | [
"langchain.llms.utils.enforce_stop_tokens",
"langchain.utils.get_from_dict_or_env",
"langchain.pydantic_v1.root_validator"
] | [((6229, 6245), 'langchain.pydantic_v1.root_validator', 'root_validator', ([], {}), '()\n', (6243, 6245), False, 'from langchain.pydantic_v1 import Extra, root_validator\n'), ((6411, 6485), 'langchain.utils.get_from_dict_or_env', 'get_from_dict_or_env', (['values', '"""aleph_alpha_api_key"""', '"""ALEPH_ALPHA_API_KEY""... |
import pandas as pd
import streamlit as st
from operator import itemgetter
from langchain.chains.openai_tools import create_extraction_chain_pydantic
from langchain_core.pydantic_v1 import BaseModel, Field
from langchain_openai import ChatOpenAI
llm = ChatOpenAI(model="gpt-3.5-turbo-1106", temperature=0)
from typing i... | [
"langchain.chains.openai_tools.create_extraction_chain_pydantic",
"langchain_openai.ChatOpenAI",
"langchain_core.pydantic_v1.Field"
] | [((253, 306), 'langchain_openai.ChatOpenAI', 'ChatOpenAI', ([], {'model': '"""gpt-3.5-turbo-1106"""', 'temperature': '(0)'}), "(model='gpt-3.5-turbo-1106', temperature=0)\n", (263, 306), False, 'from langchain_openai import ChatOpenAI\n'), ((437, 483), 'pandas.read_csv', 'pd.read_csv', (['"""database_table_descriptions... |
import os
import re
import argparse
import json
import boto3
from bs4 import BeautifulSoup
from langchain.document_loaders import PDFMinerPDFasHTMLLoader
from langchain.docstore.document import Document
from langchain.text_splitter import RecursiveCharacterTextSplitter,CharacterTextSplitter
import statistics
smr_clien... | [
"langchain.document_loaders.PDFMinerPDFasHTMLLoader",
"langchain.text_splitter.RecursiveCharacterTextSplitter"
] | [((324, 357), 'boto3.client', 'boto3.client', (['"""sagemaker-runtime"""'], {}), "('sagemaker-runtime')\n", (336, 357), False, 'import boto3\n'), ((8397, 8430), 'langchain.document_loaders.PDFMinerPDFasHTMLLoader', 'PDFMinerPDFasHTMLLoader', (['pdf_path'], {}), '(pdf_path)\n', (8420, 8430), False, 'from langchain.docum... |
from langchain.vectorstores import Chroma
from langchain.embeddings import OpenAIEmbeddings
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.llms import OpenAI
from langchain.chains import VectorDBQA
from langchain.document_loaders import TextLoader
from typing import List
from langchai... | [
"langchain.text_splitter.RecursiveCharacterTextSplitter",
"langchain.document_loaders.TextLoader",
"langchain.llms.OpenAI",
"langchain.vectorstores.Chroma.from_documents",
"langchain.embeddings.OpenAIEmbeddings"
] | [((515, 541), 'langchain.document_loaders.TextLoader', 'TextLoader', (['self.file_path'], {}), '(self.file_path)\n', (525, 541), False, 'from langchain.document_loaders import TextLoader\n'), ((886, 950), 'langchain.text_splitter.RecursiveCharacterTextSplitter', 'RecursiveCharacterTextSplitter', ([], {'chunk_size': '(1... |
import dataclasses
import json
import numpy as np
import os
import requests
import sys
from typing import List
from langchain.text_splitter import CharacterTextSplitter
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.vectorstores import Chroma
from langchain.chat_models import ChatOpenAI
from l... | [
"langchain.text_splitter.CharacterTextSplitter",
"langchain.chat_models.ChatOpenAI",
"langchain.schema.Document",
"langchain.vectorstores.Chroma.from_documents",
"langchain.prompts.PromptTemplate",
"langchain.embeddings.openai.OpenAIEmbeddings"
] | [((1563, 1648), 'langchain.prompts.PromptTemplate', 'PromptTemplate', ([], {'template': 'prompt_template', 'input_variables': "['context', 'question']"}), "(template=prompt_template, input_variables=['context',\n 'question'])\n", (1577, 1648), False, 'from langchain.prompts import PromptTemplate\n'), ((1786, 1811), ... |
import logging
from pathlib import Path
from typing import List, Optional, Tuple
from dotenv import load_dotenv
load_dotenv()
from queue import Empty, Queue
from threading import Thread
import gradio as gr
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
from langchain.chat_models imp... | [
"langchain.schema.AIMessage",
"langchain.prompts.HumanMessagePromptTemplate.from_template",
"langchain.schema.SystemMessage",
"langchain.schema.HumanMessage"
] | [((114, 127), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (125, 127), False, 'from dotenv import load_dotenv\n'), ((604, 698), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""[%(asctime)s %(levelname)s]: %(message)s"""', 'level': 'logging.INFO'}), "(format='[%(asctime)s %(levelname)s]: %(me... |
"""
View stage example selector.
| Copyright 2017-2023, Voxel51, Inc.
| `voxel51.com <https://voxel51.com/>`_
|
"""
import os
import pickle
from langchain.prompts import FewShotPromptTemplate, PromptTemplate
import numpy as np
import pandas as pd
from scipy.spatial.distance import cosine
# pylint: disable=relative-b... | [
"langchain.prompts.FewShotPromptTemplate",
"langchain.prompts.PromptTemplate"
] | [((489, 523), 'os.path.join', 'os.path.join', (['ROOT_DIR', '"""examples"""'], {}), "(ROOT_DIR, 'examples')\n", (501, 523), False, 'import os\n'), ((551, 605), 'os.path.join', 'os.path.join', (['EXAMPLES_DIR', '"""viewstage_embeddings.pkl"""'], {}), "(EXAMPLES_DIR, 'viewstage_embeddings.pkl')\n", (563, 605), False, 'im... |
import logging
from langchain.chains import RetrievalQA
from neogpt.prompts.prompt import get_prompt
def local_retriever(db, llm, persona="default"):
"""
Fn: local_retriever
Description: The function sets up the local retrieval-based question-answering system.
Args:
db (object): The database... | [
"langchain.chains.RetrievalQA.from_chain_type"
] | [((466, 493), 'neogpt.prompts.prompt.get_prompt', 'get_prompt', ([], {'persona': 'persona'}), '(persona=persona)\n', (476, 493), False, 'from neogpt.prompts.prompt import get_prompt\n'), ((590, 768), 'langchain.chains.RetrievalQA.from_chain_type', 'RetrievalQA.from_chain_type', ([], {'llm': 'llm', 'retriever': 'local_r... |
from typing import Any, Dict
from injector import inject, singleton
from langchain_core.output_parsers.json import JsonOutputParser
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
from langchain_core.runnables import RunnableSerializable
from bao.components.llms import LLMs
from bao.setting... | [
"langchain_core.prompts.MessagesPlaceholder",
"langchain_core.output_parsers.json.JsonOutputParser"
] | [((1029, 1047), 'langchain_core.output_parsers.json.JsonOutputParser', 'JsonOutputParser', ([], {}), '()\n', (1045, 1047), False, 'from langchain_core.output_parsers.json import JsonOutputParser\n'), ((876, 925), 'langchain_core.prompts.MessagesPlaceholder', 'MessagesPlaceholder', ([], {'variable_name': '"""chat_histor... |
from langchain import PromptTemplate
PROMPT = """
你需要扮演一个优秀的关键信息提取助手,从人类的对话中提取关键性内容(最多5个关键词),以协助其他助手更精准地回答问题。
注意:你不需要做任何解释说明,只需严格按照示例的格式输出关键词。
示例:
人类:我有一个服装厂,是否可以应用你们的装箱算法改善装载率呢?
AI: 服装厂, 装箱算法, 装载率
现在开始:
人类:{query}
AI:
"""
def information_extraction_raw_prompt():
return PromptTemplate(template=PROMPT, input_v... | [
"langchain.PromptTemplate"
] | [((281, 339), 'langchain.PromptTemplate', 'PromptTemplate', ([], {'template': 'PROMPT', 'input_variables': "['query']"}), "(template=PROMPT, input_variables=['query'])\n", (295, 339), False, 'from langchain import PromptTemplate\n'), ((397, 455), 'langchain.PromptTemplate', 'PromptTemplate', ([], {'template': 'PROMPT',... |
import base64
import email
from enum import Enum
from typing import Any, Dict, List, Optional, Type
from langchain.callbacks.manager import CallbackManagerForToolRun
from langchain.pydantic_v1 import BaseModel, Field
from langchain.tools.gmail.base import GmailBaseTool
from langchain.tools.gmail.utils import clean_ema... | [
"langchain.pydantic_v1.Field",
"langchain.tools.gmail.utils.clean_email_body"
] | [((606, 1054), 'langchain.pydantic_v1.Field', 'Field', (['...'], {'description': '"""The Gmail query. Example filters include from:sender, to:recipient, subject:subject, -filtered_term, in:folder, is:important|read|starred, after:year/mo/date, before:year/mo/date, label:label_name "exact phrase". Search newer/older tha... |
from langchain import PromptTemplate
from codedog.templates import grimoire_en
TRANSLATE_PROMPT = PromptTemplate(
template=grimoire_en.TRANSLATE_PR_REVIEW, input_variables=["language", "description", "content"]
)
| [
"langchain.PromptTemplate"
] | [((100, 217), 'langchain.PromptTemplate', 'PromptTemplate', ([], {'template': 'grimoire_en.TRANSLATE_PR_REVIEW', 'input_variables': "['language', 'description', 'content']"}), "(template=grimoire_en.TRANSLATE_PR_REVIEW, input_variables=[\n 'language', 'description', 'content'])\n", (114, 217), False, 'from langchain... |
# Importing necessary library
import streamlit as st
# Setting up the page configuration
st.set_page_config(
page_title="QuickDigest AI",
page_icon=":brain:",
layout="wide",
initial_sidebar_state="expanded"
)
# Defining the function to display the home page
def home():
import streamlit as st
... | [
"langchain.agents.AgentExecutor.from_agent_and_tools",
"langchain.memory.ConversationBufferMemory",
"langchain.agents.create_pandas_dataframe_agent",
"langchain.chat_models.ChatOpenAI",
"langchain.tools.DuckDuckGoSearchRun",
"langchain.agents.ConversationalChatAgent.from_llm_and_tools",
"langchain.memor... | [((91, 213), 'streamlit.set_page_config', 'st.set_page_config', ([], {'page_title': '"""QuickDigest AI"""', 'page_icon': '""":brain:"""', 'layout': '"""wide"""', 'initial_sidebar_state': '"""expanded"""'}), "(page_title='QuickDigest AI', page_icon=':brain:', layout\n ='wide', initial_sidebar_state='expanded')\n", (1... |
from time import monotonic
from rich.console import Console
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.llms import OpenAI
class Experiment:
"""
A class representing an experiment.
Attributes:
params (dict): A dictionary containing experiment parameters.
... | [
"langchain.llms.OpenAI",
"langchain.text_splitter.RecursiveCharacterTextSplitter"
] | [((1970, 1979), 'rich.console.Console', 'Console', ([], {}), '()\n', (1977, 1979), False, 'from rich.console import Console\n'), ((2643, 2654), 'time.monotonic', 'monotonic', ([], {}), '()\n', (2652, 2654), False, 'from time import monotonic\n'), ((2680, 2769), 'langchain.text_splitter.RecursiveCharacterTextSplitter', ... |
import os
os.environ["CUDA_VISIBLE_DEVICES"] = "2"
import re
import torch
import gradio as gr
from clc.langchain_application import LangChainApplication, torch_gc
from transformers import StoppingCriteriaList, StoppingCriteriaList
from clc.callbacks import Iteratorize, Stream
from clc.matching import key_words_match_in... | [
"langchain.schema.Document"
] | [((1155, 1183), 'clc.langchain_application.LangChainApplication', 'LangChainApplication', (['config'], {}), '(config)\n', (1175, 1183), False, 'from clc.langchain_application import LangChainApplication, torch_gc\n'), ((3620, 3630), 'clc.langchain_application.torch_gc', 'torch_gc', ([], {}), '()\n', (3628, 3630), False... |
"""Callback Handler that writes to a file."""
from typing import Any, Dict, Optional, TextIO, cast
from langchain_core.agents import AgentAction, AgentFinish
from langchain_core.callbacks import BaseCallbackHandler
from langchain_core.utils.input import print_text
class FileCallbackHandler(BaseCallbackHandler):
... | [
"langchain_core.utils.input.print_text"
] | [((989, 1084), 'langchain_core.utils.input.print_text', 'print_text', (['f"""\n\n\x1b[1m> Entering new {class_name} chain...\x1b[0m"""'], {'end': '"""\n"""', 'file': 'self.file'}), '(f"""\n\n\x1b[1m> Entering new {class_name} chain...\x1b[0m""", end=\'\\n\',\n file=self.file)\n', (999, 1084), False, 'from langchain_... |
import base64
import json
from langchain_community.chat_models import ChatOpenAI
from langchain_core.output_parsers import StrOutputParser
from langchain_core.prompts import ChatPromptTemplate, SystemMessagePromptTemplate
from langchain_core.pydantic_v1 import Field
from langserve import CustomUserType
from .prompts ... | [
"langchain_core.pydantic_v1.Field",
"langchain_core.prompts.SystemMessagePromptTemplate.from_template",
"langchain_core.output_parsers.StrOutputParser",
"langchain_community.chat_models.ChatOpenAI"
] | [((454, 494), 'langchain_community.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'temperature': '(0)', 'model': '"""gpt-4"""'}), "(temperature=0, model='gpt-4')\n", (464, 494), False, 'from langchain_community.chat_models import ChatOpenAI\n'), ((1047, 1099), 'langchain_core.pydantic_v1.Field', 'Field', (['...'], {'extr... |
"""This script is used to initialize the Qdrant db backend with Azure OpenAI."""
import os
from typing import Any, List, Optional, Tuple
import openai
from dotenv import load_dotenv
from langchain.docstore.document import Document
from langchain.text_splitter import NLTKTextSplitter
from langchain_community.document_l... | [
"langchain.text_splitter.NLTKTextSplitter",
"langchain_community.document_loaders.DirectoryLoader",
"langchain_community.embeddings.AzureOpenAIEmbeddings",
"langchain_community.embeddings.OpenAIEmbeddings"
] | [((692, 705), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (703, 705), False, 'from dotenv import load_dotenv\n'), ((709, 746), 'ultra_simple_config.load_config', 'load_config', ([], {'location': '"""config/db.yml"""'}), "(location='config/db.yml')\n", (720, 746), False, 'from ultra_simple_config import load_... |
import json
from typing import Any, Callable, List
from langchain_core.tracers.base import BaseTracer
from langchain_core.tracers.schemas import Run
from langchain_core.utils.input import get_bolded_text, get_colored_text
def try_json_stringify(obj: Any, fallback: str) -> str:
"""
Try to stringify an object ... | [
"langchain_core.utils.input.get_colored_text",
"langchain_core.utils.input.get_bolded_text"
] | [((588, 633), 'json.dumps', 'json.dumps', (['obj'], {'indent': '(2)', 'ensure_ascii': '(False)'}), '(obj, indent=2, ensure_ascii=False)\n', (598, 633), False, 'import json\n'), ((2591, 2659), 'langchain_core.utils.input.get_bolded_text', 'get_bolded_text', (['f"""[{crumbs}] Entering {run_type} run with input:\n"""'], {... |
import sys
from langchain.chains.summarize import load_summarize_chain
from langchain import OpenAI
from langchain.text_splitter import RecursiveCharacterTextSplitter
text_splitter = RecursiveCharacterTextSplitter()
# get transcript file key from args
file_key = sys.argv[1]
# get transcript text
text = open(file_k... | [
"langchain.chains.summarize.load_summarize_chain",
"langchain.docstore.document.Document",
"langchain.OpenAI",
"langchain.text_splitter.RecursiveCharacterTextSplitter"
] | [((186, 218), 'langchain.text_splitter.RecursiveCharacterTextSplitter', 'RecursiveCharacterTextSplitter', ([], {}), '()\n', (216, 218), False, 'from langchain.text_splitter import RecursiveCharacterTextSplitter\n'), ((344, 365), 'langchain.OpenAI', 'OpenAI', ([], {'temperature': '(0)'}), '(temperature=0)\n', (350, 365)... |
from base64 import b64decode
import os
import textwrap
from math import ceil
from dotenv import load_dotenv
load_dotenv() # take environment variables from .env.
from fastapi import FastAPI
from pydantic import BaseModel
from fastapi.middleware.cors import CORSMiddleware
from langchain.prompts import PromptTemplate... | [
"langchain.chains.summarize.load_summarize_chain",
"langchain_openai.llms.OpenAI",
"langchain.docstore.document.Document",
"langchain_community.llms.HuggingFaceHub"
] | [((109, 122), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (120, 122), False, 'from dotenv import load_dotenv\n'), ((930, 1033), 'fastapi.FastAPI', 'FastAPI', ([], {'docs_url': '"""/api/llm/docs"""', 'redoc_url': '"""/api/llm/redoc"""', 'openapi_url': '"""/api/llm/openapi.json"""'}), "(docs_url='/api/llm/docs... |
from typing import Any, Dict, List, Union
from langchain.memory.chat_memory import BaseChatMemory
from langchain.schema.messages import BaseMessage, get_buffer_string
class ConversationBufferWindowMemory(BaseChatMemory):
"""Buffer for storing conversation memory inside a limited size window."""
human_prefix... | [
"langchain.schema.messages.get_buffer_string"
] | [((899, 989), 'langchain.schema.messages.get_buffer_string', 'get_buffer_string', (['messages'], {'human_prefix': 'self.human_prefix', 'ai_prefix': 'self.ai_prefix'}), '(messages, human_prefix=self.human_prefix, ai_prefix=self.\n ai_prefix)\n', (916, 989), False, 'from langchain.schema.messages import BaseMessage, g... |
from typing import Any, Dict, Optional, Type # type: ignore
import langchain
from langchain import LLMChain, PromptTemplate
from langchain.experimental.autonomous_agents import AutoGPT
from sam.core.utils import logger
class AutoGptAgent:
agent: AutoGPT
def __init__(
self, ai_name: str, ai_role: s... | [
"langchain.experimental.autonomous_agents.AutoGPT.from_llm_and_tools"
] | [((434, 535), 'langchain.experimental.autonomous_agents.AutoGPT.from_llm_and_tools', 'AutoGPT.from_llm_and_tools', ([], {'ai_name': 'ai_name', 'ai_role': 'ai_role', 'llm': 'llm', 'memory': 'memory', 'tools': 'tools'}), '(ai_name=ai_name, ai_role=ai_role, llm=llm,\n memory=memory, tools=tools)\n', (460, 535), False, ... |
from langchain.agents import Tool, AgentExecutor, LLMSingleActionAgent, AgentOutputParser
from langchain.prompts import StringPromptTemplate
from langchain import OpenAI, SerpAPIWrapper, LLMChain
from typing import List, Union
from langchain.schema import AgentAction, AgentFinish
import re
from langchain.utilities impo... | [
"langchain.agents.AgentExecutor.from_agent_and_tools",
"langchain.agents.LLMSingleActionAgent",
"langchain_tools.cwtool.CloudWatchInsightQuery",
"langchain.LLMChain",
"langchain.tools.human.tool.HumanInputRun",
"langchain.utilities.BashProcess",
"langchain.SerpAPIWrapper",
"langchain.agents.Tool",
"... | [((2630, 2646), 'langchain.SerpAPIWrapper', 'SerpAPIWrapper', ([], {}), '()\n', (2644, 2646), False, 'from langchain import OpenAI, SerpAPIWrapper, LLMChain\n'), ((2658, 2671), 'langchain.utilities.BashProcess', 'BashProcess', ([], {}), '()\n', (2669, 2671), False, 'from langchain.utilities import BashProcess\n'), ((26... |
# coding: utf-8
import os
import gradio as gr
import re
import uuid
from PIL import Image, ImageDraw, ImageOps, ImageFont
import numpy as np
import argparse
import inspect
from langchain.agents.initialize import initialize_agent
from langchain.agents.tools import Tool
from langchain.chains.conversation.memory import Co... | [
"langchain.agents.initialize.initialize_agent",
"langchain.agents.tools.Tool",
"langchain.chains.conversation.memory.ConversationBufferMemory"
] | [((1924, 1959), 'os.makedirs', 'os.makedirs', (['"""image"""'], {'exist_ok': '(True)'}), "('image', exist_ok=True)\n", (1935, 1959), False, 'import os\n'), ((7453, 7478), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (7476, 7478), False, 'import argparse\n'), ((3840, 3879), 'gpt4tools.llm.Llam... |
"""Zero-shot agent with toolkit."""
import re
from langchain.agents.agent import Agent
from langchain.agents.mrkl.base import ZeroShotAgent
from langchain.base_language import BaseLanguageModel
from langchain.callbacks.base import BaseCallbackManager
from langchain.chains import LLMChain
from langchain.prompts import ... | [
"langchain.schema.AgentAction",
"langchain.schema.AgentFinish",
"langchain.schema.SystemMessage",
"langchain.prompts.chat.HumanMessagePromptTemplate.from_template",
"langchain.chains.LLMChain",
"langchain.prompts.PromptTemplate",
"langchain.prompts.chat.ChatPromptTemplate.from_messages"
] | [((2095, 2157), 'procoder.functional.add_refnames', 'add_refnames', (['AGENT_DUMMY_VARS', 'inputs'], {'include_brackets': '(False)'}), '(AGENT_DUMMY_VARS, inputs, include_brackets=False)\n', (2107, 2157), False, 'from procoder.functional import add_refnames, collect_refnames, format_multiple_prompts, format_prompt\n'),... |
#import os
from dotenv import load_dotenv, find_dotenv
_ = load_dotenv(find_dotenv()) # read local .env file
import warnings
warnings.filterwarnings("ignore")
from langchain.agents.agent_toolkits import create_python_agent
from langchain.agents import load_tools, initialize_agent
from langchain.agents import AgentT... | [
"langchain.agents.initialize_agent",
"langchain.tools.python.tool.PythonREPLTool",
"langchain.agents.load_tools",
"langchain.chat_models.ChatOpenAI"
] | [((128, 161), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (151, 161), False, 'import warnings\n'), ((489, 514), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'temperature': '(0)'}), '(temperature=0)\n', (499, 514), False, 'from langchain.chat_models import Cha... |
from typing import List, Optional, Type
from langchain.memory import (
ChatMessageHistory,
ConversationBufferMemory,
ConversationSummaryMemory,
RedisChatMessageHistory,
RedisEntityStore,
VectorStoreRetrieverMemory,
)
class Memory:
@staticmethod
def messageHistory(path: str):
h... | [
"langchain.memory.ConversationSummaryMemory",
"langchain.memory.ConversationBufferMemory",
"langchain.memory.ChatMessageHistory"
] | [((329, 349), 'langchain.memory.ChatMessageHistory', 'ChatMessageHistory', ([], {}), '()\n', (347, 349), False, 'from langchain.memory import ChatMessageHistory, ConversationBufferMemory, ConversationSummaryMemory, RedisChatMessageHistory, RedisEntityStore, VectorStoreRetrieverMemory\n'), ((442, 468), 'langchain.memory... |
"""Callback Handler that prints to std out."""
from __future__ import annotations
from typing import TYPE_CHECKING, Any, Dict, Optional
from langchain_core.callbacks.base import BaseCallbackHandler
from langchain_core.utils import print_text
if TYPE_CHECKING:
from langchain_core.agents import AgentAction, Agent... | [
"langchain_core.utils.print_text"
] | [((1261, 1310), 'langchain_core.utils.print_text', 'print_text', (['action.log'], {'color': '(color or self.color)'}), '(action.log, color=color or self.color)\n', (1271, 1310), False, 'from langchain_core.utils import print_text\n'), ((1727, 1772), 'langchain_core.utils.print_text', 'print_text', (['output'], {'color'... |
import os
from langchain import ElasticVectorSearch
from langchain.docstore.document import Document
from langchain.vectorstores import VectorStore
from langchain.embeddings.base import Embeddings
db_persistent_path = f"""{os.environ["db_persistent_path"]}/elasticsearch"""
INDEX_NAME = "esindex"
def upload(documents... | [
"langchain.ElasticVectorSearch",
"langchain.ElasticVectorSearch.from_documents"
] | [((382, 510), 'langchain.ElasticVectorSearch.from_documents', 'ElasticVectorSearch.from_documents', (['documents', 'embeddings'], {'elasticsearch_url': '"""http://localhost:9200"""', 'index_name': 'INDEX_NAME'}), "(documents, embeddings, elasticsearch_url\n ='http://localhost:9200', index_name=INDEX_NAME)\n", (416, ... |
import json
from pydantic import BaseModel, Field
from pydantic import BaseModel, Field
from langchain.llms.base import BaseLLM
from typing import List, Any
from langchain import LLMChain
from llm.generate_task_plan.prompt import get_template
from llm.list_output_parser import LLMListOutputParser
class Task(BaseModel... | [
"langchain.LLMChain"
] | [((359, 392), 'pydantic.Field', 'Field', (['...'], {'description': '"""Task ID"""'}), "(..., description='Task ID')\n", (364, 392), False, 'from pydantic import BaseModel, Field\n'), ((416, 458), 'pydantic.Field', 'Field', (['...'], {'description': '"""Task description"""'}), "(..., description='Task description')\n", ... |
"""
The Purpose of this file is to provide a wrapper around the PINECONE from langchain
"""
from langchain.schema.document import Document
from langchain_community.embeddings import HuggingFaceInstructEmbeddings
from pinecone import Pinecone
from neogpt.settings.config import (
DEVICE_TYPE,
EMBEDDING_DIME... | [
"langchain_community.embeddings.HuggingFaceInstructEmbeddings"
] | [((689, 819), 'langchain_community.embeddings.HuggingFaceInstructEmbeddings', 'HuggingFaceInstructEmbeddings', ([], {'model_name': 'EMBEDDING_MODEL', 'model_kwargs': "{'device': DEVICE_TYPE}", 'cache_folder': 'MODEL_DIRECTORY'}), "(model_name=EMBEDDING_MODEL, model_kwargs={\n 'device': DEVICE_TYPE}, cache_folder=MOD... |
# Ingest Documents into a Zep Collection
import os
from dotenv import find_dotenv, load_dotenv
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain_community.document_loaders import WebBaseLoader
from zep_python import ZepClient
from zep_python.langchain.vectorstore import ZepVectorStore
... | [
"langchain_community.document_loaders.WebBaseLoader",
"langchain.text_splitter.RecursiveCharacterTextSplitter"
] | [((449, 478), 'os.environ.get', 'os.environ.get', (['"""ZEP_API_URL"""'], {}), "('ZEP_API_URL')\n", (463, 478), False, 'import os\n'), ((549, 578), 'os.environ.get', 'os.environ.get', (['"""ZEP_API_KEY"""'], {}), "('ZEP_API_KEY')\n", (563, 578), False, 'import os\n'), ((784, 821), 'os.environ.get', 'os.environ.get', ([... |
#model_settings.py
import streamlit as st
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
from llama_index import LangchainEmbedding, LLMPredictor, PromptHelper, OpenAIEmbedding, ServiceContext
from llama_index.logger import LlamaLogger
from langchain.chat_models import ChatOpenAI
from langchain imp... | [
"langchain.embeddings.huggingface.HuggingFaceEmbeddings",
"langchain.chat_models.ChatOpenAI"
] | [((705, 751), 'streamlit.selectbox', 'st.selectbox', (['"""Sentence transformer:"""', 'options'], {}), "('Sentence transformer:', options)\n", (717, 751), True, 'import streamlit as st\n'), ((1220, 1279), 'llama_index.PromptHelper', 'PromptHelper', (['max_input_size', 'num_output', 'max_chunk_overlap'], {}), '(max_inpu... |
import os
from dotenv import load_dotenv
import streamlit as st
from langchain.llms import OpenAI
from langchain.chat_models import ChatOpenAI
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.chains import ConversationalRetrievalC... | [
"langchain.llms.OpenAI",
"langchain.vectorstores.LanceDB",
"langchain.embeddings.openai.OpenAIEmbeddings"
] | [((924, 983), 'streamlit.set_page_config', 'st.set_page_config', ([], {'page_title': '"""GlobeBotter"""', 'page_icon': '"""🎬"""'}), "(page_title='GlobeBotter', page_icon='🎬')\n", (942, 983), True, 'import streamlit as st\n'), ((984, 1055), 'streamlit.header', 'st.header', (['"""🎬 Welcome to MovieHarbor, your favouri... |
from langchain_community.document_loaders import PyPDFLoader
from langchain_community.document_loaders.csv_loader import CSVLoader
from langchain_community.document_loaders import HNLoader
from langchain.text_splitter import CharacterTextSplitter
from langchain.text_splitter import RecursiveCharacterTextSplitter
... | [
"langchain_community.document_loaders.PyPDFLoader",
"langchain.text_splitter.CharacterTextSplitter",
"langchain_openai.llms.OpenAI",
"langchain_community.document_loaders.csv_loader.CSVLoader",
"langchain.text_splitter.RecursiveCharacterTextSplitter",
"langchain_community.document_loaders.UnstructuredHTML... | [((741, 785), 'langchain_community.document_loaders.PyPDFLoader', 'PyPDFLoader', (['"""attention is all you need.pdf"""'], {}), "('attention is all you need.pdf')\n", (752, 785), False, 'from langchain_community.document_loaders import PyPDFLoader\n'), ((838, 878), 'langchain_community.document_loaders.csv_loader.CSVLo... |
# define chain components
from langchain.memory import ConversationBufferMemory
from langchain.chat_models import ChatOpenAI
from langchain.chains import ConversationChain
from langchain.prompts.prompt import PromptTemplate
from database import save_message_to_db, connect_2_db
import os
from pymongo import Mong... | [
"langchain.chains.ConversationChain",
"langchain.prompts.prompt.PromptTemplate",
"langchain.memory.ConversationBufferMemory"
] | [((460, 473), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (471, 473), False, 'from dotenv import load_dotenv\n'), ((664, 690), 'langchain.memory.ConversationBufferMemory', 'ConversationBufferMemory', ([], {}), '()\n', (688, 690), False, 'from langchain.memory import ConversationBufferMemory\n'), ((719, 733),... |
import streamlit as st
from pathlib import Path
from streamlit_chat import message
from langchain.document_loaders import CSVLoader
from langchain.indexes import VectorstoreIndexCreator
from langchain.chains import RetrievalQA
from langchain.llms import OpenAI
import os
os.environ["OPENAI_API_KEY"] = st.secrets["open_... | [
"langchain.llms.OpenAI",
"langchain.indexes.VectorstoreIndexCreator"
] | [((334, 377), 'streamlit.title', 'st.title', (['"""CSV Question and answer ChatBot"""'], {}), "('CSV Question and answer ChatBot')\n", (342, 377), True, 'import streamlit as st\n'), ((400, 451), 'streamlit.file_uploader', 'st.file_uploader', ([], {'label': '"""Upload your CSV File here"""'}), "(label='Upload your CSV F... |
from typing import Any, Dict, List, Literal, Optional, Union
from exa_py import Exa # type: ignore
from exa_py.api import HighlightsContentsOptions, TextContentsOptions # type: ignore
from langchain_core.callbacks import CallbackManagerForRetrieverRun
from langchain_core.documents import Document
from langchain_core... | [
"langchain_exa._utilities.initialize_client",
"langchain_core.pydantic_v1.Field",
"langchain_core.pydantic_v1.root_validator"
] | [((2332, 2351), 'langchain_core.pydantic_v1.Field', 'Field', ([], {'default': 'None'}), '(default=None)\n', (2337, 2351), False, 'from langchain_core.pydantic_v1 import Field, SecretStr, root_validator\n'), ((2381, 2400), 'langchain_core.pydantic_v1.Field', 'Field', ([], {'default': 'None'}), '(default=None)\n', (2386,... |
# Copyright 2023 Lei Zhang
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, so... | [
"langchain.text_splitter.CharacterTextSplitter",
"langchain.agents.initialize_agent",
"langchain_plantuml.diagram.activity_diagram_callback",
"langchain.document_loaders.TextLoader",
"langchain.tools.Tool",
"langchain.chat_models.ChatOpenAI",
"langchain_plantuml.diagram.sequence_diagram_callback",
"la... | [((1171, 1184), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (1182, 1184), False, 'from dotenv import load_dotenv\n'), ((3316, 3371), 'langchain_plantuml.diagram.activity_diagram_callback', 'diagram.activity_diagram_callback', ([], {'note_max_length': '(2000)'}), '(note_max_length=2000)\n', (3349, 3371), Fals... |
from __future__ import annotations
from typing import Any, TypeVar
from langchain_core.exceptions import OutputParserException
from langchain_core.language_models import BaseLanguageModel
from langchain_core.output_parsers import BaseOutputParser
from langchain_core.prompts import BasePromptTemplate
from langchain.o... | [
"langchain_core.exceptions.OutputParserException",
"langchain.chains.llm.LLMChain"
] | [((371, 383), 'typing.TypeVar', 'TypeVar', (['"""T"""'], {}), "('T')\n", (378, 383), False, 'from typing import Any, TypeVar\n'), ((1545, 1577), 'langchain.chains.llm.LLMChain', 'LLMChain', ([], {'llm': 'llm', 'prompt': 'prompt'}), '(llm=llm, prompt=prompt)\n', (1553, 1577), False, 'from langchain.chains.llm import LLM... |
import logging
import os
import nextcord # add this
import openai
from langchain import OpenAI
from langchain.chains.summarize import load_summarize_chain
from langchain.text_splitter import RecursiveCharacterTextSplitter
from nextcord.ext import commands
from pytube import YouTube
logging.basicConfig(
level=log... | [
"langchain.chains.summarize.load_summarize_chain",
"langchain.text_splitter.RecursiveCharacterTextSplitter"
] | [((286, 393), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO', 'format': '"""%(asctime)s - %(name)s - %(levelname)s - %(message)s"""'}), "(level=logging.INFO, format=\n '%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n", (305, 393), False, 'import logging\n'), ((404, 431), 'loggin... |
from __future__ import annotations
import uuid
from typing import TYPE_CHECKING, Any, Dict, Iterable, List, Optional, Tuple, Type
from langchain.docstore.document import Document
from langchain.embeddings.base import Embeddings
from langchain.utils import get_from_env
from langchain.vectorstores.base import VectorSto... | [
"langchain.utils.get_from_env",
"langchain.docstore.document.Document"
] | [((965, 1009), 'meilisearch.Client', 'meilisearch.Client', ([], {'url': 'url', 'api_key': 'api_key'}), '(url=url, api_key=api_key)\n', (983, 1009), False, 'import meilisearch\n'), ((776, 814), 'langchain.utils.get_from_env', 'get_from_env', (['"""url"""', '"""MEILI_HTTP_ADDR"""'], {}), "('url', 'MEILI_HTTP_ADDR')\n", (... |
# Author: Yiannis Charalambous
from langchain.base_language import BaseLanguageModel
from langchain.schema import AIMessage, BaseMessage, HumanMessage
from esbmc_ai.config import ChatPromptSettings
from .base_chat_interface import BaseChatInterface, ChatResponse
from .ai_models import AIModel
class OptimizeCode(Bas... | [
"langchain.schema.AIMessage",
"langchain.schema.HumanMessage"
] | [((838, 964), 'langchain.schema.HumanMessage', 'HumanMessage', ([], {'content': 'f"""Reply OK if you understand the following is the source code to optimize:\n\n{source_code}"""'}), '(content=\n f"""Reply OK if you understand the following is the source code to optimize:\n\n{source_code}"""\n )\n', (850, 964), Fa... |
import os
from typing import Any, Optional
from langchain.prompts import PromptTemplate
from langchain.chains import LLMChain
from pydantic import Extra
import registry
import streaming
from .base import BaseTool, BASE_TOOL_DESCRIPTION_TEMPLATE
current_dir = os.path.dirname(__file__)
project_root = os.path.join(curr... | [
"langchain.prompts.PromptTemplate"
] | [((262, 287), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (277, 287), False, 'import os\n'), ((303, 335), 'os.path.join', 'os.path.join', (['current_dir', '"""../"""'], {}), "(current_dir, '../')\n", (315, 335), False, 'import os\n'), ((355, 399), 'os.path.join', 'os.path.join', (['project... |
from langchain.utilities import WikipediaAPIWrapper
def wikipedia_function(topic):
"""
Runs a query on the Wikipedia API.
Args:
topic (str): The topic to query.
Returns:
dict: The result of the query.
Examples:
>>> wikipedia_function('Python')
{'title': 'Python', 'summary': ... | [
"langchain.utilities.WikipediaAPIWrapper"
] | [((383, 404), 'langchain.utilities.WikipediaAPIWrapper', 'WikipediaAPIWrapper', ([], {}), '()\n', (402, 404), False, 'from langchain.utilities import WikipediaAPIWrapper\n')] |
import streamlit as st
import datetime
import os
import psycopg2
from dotenv import load_dotenv
from langchain.prompts import PromptTemplate
from langchain.docstore.document import Document
def log(message):
current_time = datetime.datetime.now()
milliseconds = current_time.microsecond // 1000
timestamp ... | [
"langchain.docstore.document.Document",
"langchain.prompts.PromptTemplate"
] | [((2668, 2806), 'langchain.prompts.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['input_question', 'table_info', 'columns_info', 'top_k', 'no_answer_text']", 'template': '_postgres_prompt'}), "(input_variables=['input_question', 'table_info',\n 'columns_info', 'top_k', 'no_answer_text'], template=_po... |
import os
import pandas as pd
from langchain.chains import LLMChain
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
import mlflow
assert (
"OPENAI_API_KEY" in os.environ
), "Please set the OPENAI_API_KEY environment variable to run this example."
def build_and_evalute_model_with_... | [
"langchain.chains.LLMChain",
"langchain.prompts.PromptTemplate",
"langchain.llms.OpenAI"
] | [((1832, 1932), 'mlflow.load_table', 'mlflow.load_table', (['"""eval_results_table.json"""'], {'extra_columns': "['run_id', 'params.prompt_template']"}), "('eval_results_table.json', extra_columns=['run_id',\n 'params.prompt_template'])\n", (1849, 1932), False, 'import mlflow\n'), ((349, 367), 'mlflow.start_run', 'm... |
import os
import pandas as pd
from langchain.chains import LLMChain
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
import mlflow
assert (
"OPENAI_API_KEY" in os.environ
), "Please set the OPENAI_API_KEY environment variable to run this example."
def build_and_evalute_model_with_... | [
"langchain.chains.LLMChain",
"langchain.prompts.PromptTemplate",
"langchain.llms.OpenAI"
] | [((1832, 1932), 'mlflow.load_table', 'mlflow.load_table', (['"""eval_results_table.json"""'], {'extra_columns': "['run_id', 'params.prompt_template']"}), "('eval_results_table.json', extra_columns=['run_id',\n 'params.prompt_template'])\n", (1849, 1932), False, 'import mlflow\n'), ((349, 367), 'mlflow.start_run', 'm... |
import hashlib
try:
from langchain_community.document_loaders import UnstructuredXMLLoader
except ImportError:
raise ImportError(
'XML file requires extra dependencies. Install with `pip install --upgrade "embedchain[dataloaders]"`'
) from None
from embedchain.helpers.json_serializable import regis... | [
"langchain_community.document_loaders.UnstructuredXMLLoader"
] | [((588, 618), 'langchain_community.document_loaders.UnstructuredXMLLoader', 'UnstructuredXMLLoader', (['xml_url'], {}), '(xml_url)\n', (609, 618), False, 'from langchain_community.document_loaders import UnstructuredXMLLoader\n'), ((705, 726), 'embedchain.utils.misc.clean_string', 'clean_string', (['content'], {}), '(c... |
import os
import voyager.utils as U
from langchain.chat_models import ChatOpenAI
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.schema import HumanMessage, SystemMessage
from langchain.vectorstores import Chroma
from voyager.prompts import load_prompt
from voyager.control_primitives import lo... | [
"langchain.embeddings.openai.OpenAIEmbeddings",
"langchain.schema.HumanMessage",
"langchain.chat_models.ChatOpenAI"
] | [((583, 678), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'model_name': 'model_name', 'temperature': 'temperature', 'request_timeout': 'request_timout'}), '(model_name=model_name, temperature=temperature, request_timeout=\n request_timout)\n', (593, 678), False, 'from langchain.chat_models import ChatOpe... |
from langflow import CustomComponent
from langchain.agents import AgentExecutor, create_json_agent
from langflow.field_typing import (
BaseLanguageModel,
)
from langchain_community.agent_toolkits.json.toolkit import JsonToolkit
class JsonAgentComponent(CustomComponent):
display_name = "JsonAgent"
descript... | [
"langchain.agents.create_json_agent"
] | [((657, 700), 'langchain.agents.create_json_agent', 'create_json_agent', ([], {'llm': 'llm', 'toolkit': 'toolkit'}), '(llm=llm, toolkit=toolkit)\n', (674, 700), False, 'from langchain.agents import AgentExecutor, create_json_agent\n')] |
from langflow import CustomComponent
from langchain.agents import AgentExecutor, create_json_agent
from langflow.field_typing import (
BaseLanguageModel,
)
from langchain_community.agent_toolkits.json.toolkit import JsonToolkit
class JsonAgentComponent(CustomComponent):
display_name = "JsonAgent"
descript... | [
"langchain.agents.create_json_agent"
] | [((657, 700), 'langchain.agents.create_json_agent', 'create_json_agent', ([], {'llm': 'llm', 'toolkit': 'toolkit'}), '(llm=llm, toolkit=toolkit)\n', (674, 700), False, 'from langchain.agents import AgentExecutor, create_json_agent\n')] |
from langflow import CustomComponent
from langchain.agents import AgentExecutor, create_json_agent
from langflow.field_typing import (
BaseLanguageModel,
)
from langchain_community.agent_toolkits.json.toolkit import JsonToolkit
class JsonAgentComponent(CustomComponent):
display_name = "JsonAgent"
descript... | [
"langchain.agents.create_json_agent"
] | [((657, 700), 'langchain.agents.create_json_agent', 'create_json_agent', ([], {'llm': 'llm', 'toolkit': 'toolkit'}), '(llm=llm, toolkit=toolkit)\n', (674, 700), False, 'from langchain.agents import AgentExecutor, create_json_agent\n')] |
from typing import Annotated, List, Optional
from uuid import UUID
from fastapi import APIRouter, Depends, HTTPException, Query, Request
from fastapi.responses import StreamingResponse
from langchain.embeddings.ollama import OllamaEmbeddings
from langchain.embeddings.openai import OpenAIEmbeddings
from logger import g... | [
"langchain.embeddings.ollama.OllamaEmbeddings",
"langchain.embeddings.openai.OpenAIEmbeddings"
] | [((1158, 1178), 'logger.get_logger', 'get_logger', (['__name__'], {}), '(__name__)\n', (1168, 1178), False, 'from logger import get_logger\n'), ((1194, 1205), 'fastapi.APIRouter', 'APIRouter', ([], {}), '()\n', (1203, 1205), False, 'from fastapi import APIRouter, Depends, HTTPException, Query, Request\n'), ((1230, 1251... |
import json
import os
import pickle
from taskweaver.plugin import Plugin, register_plugin
@register_plugin
class DocumentRetriever(Plugin):
vectorstore = None
def _init(self):
try:
import tiktoken
from langchain_community.embeddings import HuggingFaceEmbeddings
fr... | [
"langchain_community.embeddings.HuggingFaceEmbeddings"
] | [((499, 551), 'langchain_community.embeddings.HuggingFaceEmbeddings', 'HuggingFaceEmbeddings', ([], {'model_name': '"""all-MiniLM-L6-v2"""'}), "(model_name='all-MiniLM-L6-v2')\n", (520, 551), False, 'from langchain_community.embeddings import HuggingFaceEmbeddings\n'), ((960, 1004), 'tiktoken.encoding_for_model', 'tikt... |
from langchain.utilities import BashProcess
from langchain.agents import load_tools
def get_built_in_tools(tools: list[str]):
bash = BashProcess()
load_tools(["human"])
return [bash]
| [
"langchain.utilities.BashProcess",
"langchain.agents.load_tools"
] | [((139, 152), 'langchain.utilities.BashProcess', 'BashProcess', ([], {}), '()\n', (150, 152), False, 'from langchain.utilities import BashProcess\n'), ((158, 179), 'langchain.agents.load_tools', 'load_tools', (["['human']"], {}), "(['human'])\n", (168, 179), False, 'from langchain.agents import load_tools\n')] |
#
# Copyright 2016 The BigDL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in ... | [
"langchain.llms.utils.enforce_stop_tokens"
] | [((5354, 5476), 'transformers.pipeline', 'hf_pipeline', ([], {'task': 'task', 'model': 'model', 'tokenizer': 'tokenizer', 'device': '"""cpu"""', 'model_kwargs': '_model_kwargs'}), "(task=task, model=model, tokenizer=tokenizer, device='cpu',\n model_kwargs=_model_kwargs, **_pipeline_kwargs)\n", (5365, 5476), True, 'f... |
#
# Copyright 2016 The BigDL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in ... | [
"langchain.llms.utils.enforce_stop_tokens"
] | [((5354, 5476), 'transformers.pipeline', 'hf_pipeline', ([], {'task': 'task', 'model': 'model', 'tokenizer': 'tokenizer', 'device': '"""cpu"""', 'model_kwargs': '_model_kwargs'}), "(task=task, model=model, tokenizer=tokenizer, device='cpu',\n model_kwargs=_model_kwargs, **_pipeline_kwargs)\n", (5365, 5476), True, 'f... |
from typing import AsyncGenerator, Optional, Tuple
from langchain import ConversationChain
import logging
from typing import Optional, Tuple
from pydantic.v1 import SecretStr
from vocode.streaming.agent.base_agent import RespondAgent
from vocode.streaming.agent.utils import get_sentence_from_buffer
from langchain im... | [
"langchain_community.chat_models.ChatAnthropic",
"langchain.prompts.HumanMessagePromptTemplate.from_template",
"langchain.memory.ConversationBufferMemory",
"langchain.prompts.MessagesPlaceholder",
"langchain.schema.HumanMessage",
"langchain.schema.AIMessage",
"langchain.ConversationChain"
] | [((2147, 2238), 'langchain_community.chat_models.ChatAnthropic', 'ChatAnthropic', ([], {'model_name': 'agent_config.model_name', 'anthropic_api_key': 'anthropic_api_key'}), '(model_name=agent_config.model_name, anthropic_api_key=\n anthropic_api_key)\n', (2160, 2238), False, 'from langchain_community.chat_models imp... |
from typing import Any, Dict
from langchain.base_language import BaseLanguageModel
from langchain.prompts import (
ChatPromptTemplate,
HumanMessagePromptTemplate,
MessagesPlaceholder,
SystemMessagePromptTemplate,
)
from langchain.chains import ConversationChain
from real_agents.adapters.exe... | [
"langchain.chains.ConversationChain",
"langchain.prompts.HumanMessagePromptTemplate.from_template",
"langchain.prompts.SystemMessagePromptTemplate.from_template",
"langchain.prompts.MessagesPlaceholder"
] | [((894, 940), 'real_agents.adapters.memory.ConversationBufferMemory', 'ConversationBufferMemory', ([], {'return_messages': '(True)'}), '(return_messages=True)\n', (918, 940), False, 'from real_agents.adapters.memory import ConversationBufferMemory\n'), ((1746, 1824), 'langchain.chains.ConversationChain', 'ConversationC... |
import os
from dotenv import load_dotenv, find_dotenv
from langchain import HuggingFaceHub
from langchain import PromptTemplate, LLMChain, OpenAI
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.chains.summarize import load_summarize_chain
from langchain.document_loaders import YoutubeL... | [
"langchain.chains.summarize.load_summarize_chain",
"langchain.LLMChain",
"langchain.text_splitter.RecursiveCharacterTextSplitter",
"langchain.OpenAI",
"langchain.document_loaders.YoutubeLoader.from_youtube_url",
"langchain.HuggingFaceHub",
"langchain.PromptTemplate"
] | [((955, 1048), 'langchain.HuggingFaceHub', 'HuggingFaceHub', ([], {'repo_id': 'repo_id', 'model_kwargs': "{'temperature': 0.1, 'max_new_tokens': 500}"}), "(repo_id=repo_id, model_kwargs={'temperature': 0.1,\n 'max_new_tokens': 500})\n", (969, 1048), False, 'from langchain import HuggingFaceHub\n'), ((1305, 1368), 'l... |
import os
from dotenv import load_dotenv, find_dotenv
from langchain import HuggingFaceHub
from langchain import PromptTemplate, LLMChain, OpenAI
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.chains.summarize import load_summarize_chain
from langchain.document_loaders import YoutubeL... | [
"langchain.chains.summarize.load_summarize_chain",
"langchain.LLMChain",
"langchain.text_splitter.RecursiveCharacterTextSplitter",
"langchain.OpenAI",
"langchain.document_loaders.YoutubeLoader.from_youtube_url",
"langchain.HuggingFaceHub",
"langchain.PromptTemplate"
] | [((955, 1048), 'langchain.HuggingFaceHub', 'HuggingFaceHub', ([], {'repo_id': 'repo_id', 'model_kwargs': "{'temperature': 0.1, 'max_new_tokens': 500}"}), "(repo_id=repo_id, model_kwargs={'temperature': 0.1,\n 'max_new_tokens': 500})\n", (969, 1048), False, 'from langchain import HuggingFaceHub\n'), ((1305, 1368), 'l... |
import os
from dotenv import load_dotenv, find_dotenv
from langchain import HuggingFaceHub
from langchain import PromptTemplate, LLMChain, OpenAI
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.chains.summarize import load_summarize_chain
from langchain.document_loaders import YoutubeL... | [
"langchain.chains.summarize.load_summarize_chain",
"langchain.LLMChain",
"langchain.text_splitter.RecursiveCharacterTextSplitter",
"langchain.OpenAI",
"langchain.document_loaders.YoutubeLoader.from_youtube_url",
"langchain.HuggingFaceHub",
"langchain.PromptTemplate"
] | [((955, 1048), 'langchain.HuggingFaceHub', 'HuggingFaceHub', ([], {'repo_id': 'repo_id', 'model_kwargs': "{'temperature': 0.1, 'max_new_tokens': 500}"}), "(repo_id=repo_id, model_kwargs={'temperature': 0.1,\n 'max_new_tokens': 500})\n", (969, 1048), False, 'from langchain import HuggingFaceHub\n'), ((1305, 1368), 'l... |
import os
from dotenv import load_dotenv, find_dotenv
from langchain import HuggingFaceHub
from langchain import PromptTemplate, LLMChain, OpenAI
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.chains.summarize import load_summarize_chain
from langchain.document_loaders import YoutubeL... | [
"langchain.chains.summarize.load_summarize_chain",
"langchain.LLMChain",
"langchain.text_splitter.RecursiveCharacterTextSplitter",
"langchain.OpenAI",
"langchain.document_loaders.YoutubeLoader.from_youtube_url",
"langchain.HuggingFaceHub",
"langchain.PromptTemplate"
] | [((955, 1048), 'langchain.HuggingFaceHub', 'HuggingFaceHub', ([], {'repo_id': 'repo_id', 'model_kwargs': "{'temperature': 0.1, 'max_new_tokens': 500}"}), "(repo_id=repo_id, model_kwargs={'temperature': 0.1,\n 'max_new_tokens': 500})\n", (969, 1048), False, 'from langchain import HuggingFaceHub\n'), ((1305, 1368), 'l... |
#
# Copyright (c) 2023 Airbyte, Inc., all rights reserved.
#
import json
import logging
from dataclasses import dataclass
from typing import Any, Dict, List, Mapping, Optional, Tuple
import dpath.util
from airbyte_cdk.destinations.vector_db_based.config import ProcessingConfigModel, SeparatorSplitterConfigModel, Text... | [
"langchain.document_loaders.base.Document",
"langchain.text_splitter.RecursiveCharacterTextSplitter.from_tiktoken_encoder",
"langchain.text_splitter.Language",
"langchain.utils.stringify_dict"
] | [((4888, 4935), 'logging.getLogger', 'logging.getLogger', (['"""airbyte.document_processor"""'], {}), "('airbyte.document_processor')\n", (4905, 4935), False, 'import logging\n'), ((6600, 6631), 'langchain.utils.stringify_dict', 'stringify_dict', (['relevant_fields'], {}), '(relevant_fields)\n', (6614, 6631), False, 'f... |
#
# Copyright (c) 2023 Airbyte, Inc., all rights reserved.
#
import json
import logging
from dataclasses import dataclass
from typing import Any, Dict, List, Mapping, Optional, Tuple
import dpath.util
from airbyte_cdk.destinations.vector_db_based.config import ProcessingConfigModel, SeparatorSplitterConfigModel, Text... | [
"langchain.document_loaders.base.Document",
"langchain.text_splitter.RecursiveCharacterTextSplitter.from_tiktoken_encoder",
"langchain.text_splitter.Language",
"langchain.utils.stringify_dict"
] | [((4888, 4935), 'logging.getLogger', 'logging.getLogger', (['"""airbyte.document_processor"""'], {}), "('airbyte.document_processor')\n", (4905, 4935), False, 'import logging\n'), ((6600, 6631), 'langchain.utils.stringify_dict', 'stringify_dict', (['relevant_fields'], {}), '(relevant_fields)\n', (6614, 6631), False, 'f... |
#
# Copyright (c) 2023 Airbyte, Inc., all rights reserved.
#
import json
import logging
from dataclasses import dataclass
from typing import Any, Dict, List, Mapping, Optional, Tuple
import dpath.util
from airbyte_cdk.destinations.vector_db_based.config import ProcessingConfigModel, SeparatorSplitterConfigModel, Text... | [
"langchain.document_loaders.base.Document",
"langchain.text_splitter.RecursiveCharacterTextSplitter.from_tiktoken_encoder",
"langchain.text_splitter.Language",
"langchain.utils.stringify_dict"
] | [((4888, 4935), 'logging.getLogger', 'logging.getLogger', (['"""airbyte.document_processor"""'], {}), "('airbyte.document_processor')\n", (4905, 4935), False, 'import logging\n'), ((6600, 6631), 'langchain.utils.stringify_dict', 'stringify_dict', (['relevant_fields'], {}), '(relevant_fields)\n', (6614, 6631), False, 'f... |
#
# Copyright (c) 2023 Airbyte, Inc., all rights reserved.
#
import json
import logging
from dataclasses import dataclass
from typing import Any, Dict, List, Mapping, Optional, Tuple
import dpath.util
from airbyte_cdk.destinations.vector_db_based.config import ProcessingConfigModel, SeparatorSplitterConfigModel, Text... | [
"langchain.document_loaders.base.Document",
"langchain.text_splitter.RecursiveCharacterTextSplitter.from_tiktoken_encoder",
"langchain.text_splitter.Language",
"langchain.utils.stringify_dict"
] | [((4888, 4935), 'logging.getLogger', 'logging.getLogger', (['"""airbyte.document_processor"""'], {}), "('airbyte.document_processor')\n", (4905, 4935), False, 'import logging\n'), ((6600, 6631), 'langchain.utils.stringify_dict', 'stringify_dict', (['relevant_fields'], {}), '(relevant_fields)\n', (6614, 6631), False, 'f... |
from waifu.llm.Brain import Brain
from waifu.llm.VectorDB import VectorDB
from waifu.llm.SentenceTransformer import STEmbedding
from langchain.chat_models import ChatOpenAI
from langchain.embeddings import OpenAIEmbeddings
from typing import Any, List, Mapping, Optional
from langchain.schema import BaseMessage
import o... | [
"langchain.embeddings.OpenAIEmbeddings",
"langchain.chat_models.ChatOpenAI"
] | [((576, 690), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'openai_api_key': 'api_key', 'model_name': 'model', 'streaming': 'stream', 'callbacks': '[callback]', 'temperature': '(0.85)'}), '(openai_api_key=api_key, model_name=model, streaming=stream,\n callbacks=[callback], temperature=0.85)\n', (586, 690)... |
from waifu.llm.Brain import Brain
from waifu.llm.VectorDB import VectorDB
from waifu.llm.SentenceTransformer import STEmbedding
from langchain.chat_models import ChatOpenAI
from langchain.embeddings import OpenAIEmbeddings
from typing import Any, List, Mapping, Optional
from langchain.schema import BaseMessage
import o... | [
"langchain.embeddings.OpenAIEmbeddings",
"langchain.chat_models.ChatOpenAI"
] | [((576, 690), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'openai_api_key': 'api_key', 'model_name': 'model', 'streaming': 'stream', 'callbacks': '[callback]', 'temperature': '(0.85)'}), '(openai_api_key=api_key, model_name=model, streaming=stream,\n callbacks=[callback], temperature=0.85)\n', (586, 690)... |
from time import sleep
import copy
import redis
import json
import pickle
import traceback
from flask import Response, request, stream_with_context
from typing import Dict, Union
import os
from langchain.schema import HumanMessage, SystemMessage
from backend.api.language_model import get_llm
from backend.main import ... | [
"langchain.schema.SystemMessage",
"langchain.schema.HumanMessage"
] | [((11305, 11357), 'backend.main.app.route', 'app.route', (['"""/api/chat_xlang_webot"""'], {'methods': "['POST']"}), "('/api/chat_xlang_webot', methods=['POST'])\n", (11314, 11357), False, 'from backend.main import app, message_id_register, message_pool, logger\n'), ((2664, 2689), 'real_agents.web_agent.WebBrowsingExec... |
from langchain.chains import LLMChain
from langchain_core.prompts import PromptTemplate
from langchain_openai import ChatOpenAI
from output_parsers import summary_parser, ice_breaker_parser, topics_of_interest_parser
llm = ChatOpenAI(temperature=0, model_name="gpt-3.5-turbo")
llm_creative = ChatOpenAI(temperature=1, ... | [
"langchain.chains.LLMChain",
"langchain_openai.ChatOpenAI"
] | [((225, 278), 'langchain_openai.ChatOpenAI', 'ChatOpenAI', ([], {'temperature': '(0)', 'model_name': '"""gpt-3.5-turbo"""'}), "(temperature=0, model_name='gpt-3.5-turbo')\n", (235, 278), False, 'from langchain_openai import ChatOpenAI\n'), ((294, 347), 'langchain_openai.ChatOpenAI', 'ChatOpenAI', ([], {'temperature': '... |
from langchain.chains import LLMChain
from langchain_core.prompts import PromptTemplate
from langchain_openai import ChatOpenAI
from output_parsers import summary_parser, ice_breaker_parser, topics_of_interest_parser
llm = ChatOpenAI(temperature=0, model_name="gpt-3.5-turbo")
llm_creative = ChatOpenAI(temperature=1, ... | [
"langchain.chains.LLMChain",
"langchain_openai.ChatOpenAI"
] | [((225, 278), 'langchain_openai.ChatOpenAI', 'ChatOpenAI', ([], {'temperature': '(0)', 'model_name': '"""gpt-3.5-turbo"""'}), "(temperature=0, model_name='gpt-3.5-turbo')\n", (235, 278), False, 'from langchain_openai import ChatOpenAI\n'), ((294, 347), 'langchain_openai.ChatOpenAI', 'ChatOpenAI', ([], {'temperature': '... |
import asyncio
import uvicorn
from typing import AsyncIterable, Awaitable
from dotenv import load_dotenv
from fastapi import FastAPI
from fastapi.responses import FileResponse, StreamingResponse
from langchain.callbacks import AsyncIteratorCallbackHandler
from langchain.chat_models import ChatOpenAI
from langchain.sch... | [
"langchain.callbacks.AsyncIteratorCallbackHandler",
"langchain.schema.HumanMessage",
"langchain.chat_models.ChatOpenAI"
] | [((345, 358), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (356, 358), False, 'from dotenv import load_dotenv\n'), ((959, 968), 'fastapi.FastAPI', 'FastAPI', ([], {}), '()\n', (966, 968), False, 'from fastapi import FastAPI\n'), ((616, 646), 'langchain.callbacks.AsyncIteratorCallbackHandler', 'AsyncIteratorCa... |
import asyncio
import uvicorn
from typing import AsyncIterable, Awaitable
from dotenv import load_dotenv
from fastapi import FastAPI
from fastapi.responses import FileResponse, StreamingResponse
from langchain.callbacks import AsyncIteratorCallbackHandler
from langchain.chat_models import ChatOpenAI
from langchain.sch... | [
"langchain.callbacks.AsyncIteratorCallbackHandler",
"langchain.schema.HumanMessage",
"langchain.chat_models.ChatOpenAI"
] | [((345, 358), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (356, 358), False, 'from dotenv import load_dotenv\n'), ((959, 968), 'fastapi.FastAPI', 'FastAPI', ([], {}), '()\n', (966, 968), False, 'from fastapi import FastAPI\n'), ((616, 646), 'langchain.callbacks.AsyncIteratorCallbackHandler', 'AsyncIteratorCa... |
# -*- coding: UTF-8 -*-
"""
@Project : AI-Vtuber
@File : claude_model.py
@Author : HildaM
@Email : Hilda_quan@163.com
@Date : 2023/06/17 下午 4:44
@Description : 本地向量数据库模型设置
"""
from langchain.embeddings import HuggingFaceEmbeddings
import os
# 项目根路径
TEC2VEC_MODELS_PATH = os.getcwd() + "\\" + "data" + "\\" + ... | [
"langchain.embeddings.HuggingFaceEmbeddings"
] | [((468, 542), 'langchain.embeddings.HuggingFaceEmbeddings', 'HuggingFaceEmbeddings', ([], {'model_name': '(TEC2VEC_MODELS_PATH + DEFAULT_MODEL_NAME)'}), '(model_name=TEC2VEC_MODELS_PATH + DEFAULT_MODEL_NAME)\n', (489, 542), False, 'from langchain.embeddings import HuggingFaceEmbeddings\n'), ((908, 934), 'os.path.exists... |
""" Adapted from https://github.com/QwenLM/Qwen-7B/blob/main/examples/react_demo.py """
import json
import os
from langchain.llms import OpenAI
llm = OpenAI(
model_name="qwen",
temperature=0,
openai_api_base="http://192.168.0.53:7891/v1",
openai_api_key="xxx",
)
# 将一个插件的关键信息拼接成一段文本的模版。
TOOL_DESC = ... | [
"langchain.SerpAPIWrapper",
"langchain.llms.OpenAI"
] | [((153, 267), 'langchain.llms.OpenAI', 'OpenAI', ([], {'model_name': '"""qwen"""', 'temperature': '(0)', 'openai_api_base': '"""http://192.168.0.53:7891/v1"""', 'openai_api_key': '"""xxx"""'}), "(model_name='qwen', temperature=0, openai_api_base=\n 'http://192.168.0.53:7891/v1', openai_api_key='xxx')\n", (159, 267),... |
"""Wrapper around Cohere APIs."""
from __future__ import annotations
import logging
from typing import Any, Callable, Dict, List, Optional
from pydantic import Extra, root_validator
from tenacity import (
before_sleep_log,
retry,
retry_if_exception_type,
stop_after_attempt,
wait_exponential,
)
fr... | [
"langchain.llms.utils.enforce_stop_tokens",
"langchain.utils.get_from_dict_or_env"
] | [((531, 558), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (548, 558), False, 'import logging\n'), ((3018, 3034), 'pydantic.root_validator', 'root_validator', ([], {}), '()\n', (3032, 3034), False, 'from pydantic import Extra, root_validator\n'), ((3195, 3259), 'langchain.utils.get_from... |
"""Wrapper around Cohere APIs."""
from __future__ import annotations
import logging
from typing import Any, Callable, Dict, List, Optional
from pydantic import Extra, root_validator
from tenacity import (
before_sleep_log,
retry,
retry_if_exception_type,
stop_after_attempt,
wait_exponential,
)
fr... | [
"langchain.llms.utils.enforce_stop_tokens",
"langchain.utils.get_from_dict_or_env"
] | [((531, 558), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (548, 558), False, 'import logging\n'), ((3018, 3034), 'pydantic.root_validator', 'root_validator', ([], {}), '()\n', (3032, 3034), False, 'from pydantic import Extra, root_validator\n'), ((3195, 3259), 'langchain.utils.get_from... |
"""Wrapper around Cohere APIs."""
from __future__ import annotations
import logging
from typing import Any, Callable, Dict, List, Optional
from pydantic import Extra, root_validator
from tenacity import (
before_sleep_log,
retry,
retry_if_exception_type,
stop_after_attempt,
wait_exponential,
)
fr... | [
"langchain.llms.utils.enforce_stop_tokens",
"langchain.utils.get_from_dict_or_env"
] | [((531, 558), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (548, 558), False, 'import logging\n'), ((3018, 3034), 'pydantic.root_validator', 'root_validator', ([], {}), '()\n', (3032, 3034), False, 'from pydantic import Extra, root_validator\n'), ((3195, 3259), 'langchain.utils.get_from... |
"""Wrapper around Cohere APIs."""
from __future__ import annotations
import logging
from typing import Any, Callable, Dict, List, Optional
from pydantic import Extra, root_validator
from tenacity import (
before_sleep_log,
retry,
retry_if_exception_type,
stop_after_attempt,
wait_exponential,
)
fr... | [
"langchain.llms.utils.enforce_stop_tokens",
"langchain.utils.get_from_dict_or_env"
] | [((531, 558), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (548, 558), False, 'import logging\n'), ((3018, 3034), 'pydantic.root_validator', 'root_validator', ([], {}), '()\n', (3032, 3034), False, 'from pydantic import Extra, root_validator\n'), ((3195, 3259), 'langchain.utils.get_from... |
"""Wrapper around GooseAI API."""
import logging
from typing import Any, Dict, List, Mapping, Optional
from pydantic import Extra, Field, root_validator
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
from langchain.utils import get_from_dict_or_env
logger = loggi... | [
"langchain.utils.get_from_dict_or_env"
] | [((315, 342), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (332, 342), False, 'import logging\n'), ((1675, 1702), 'pydantic.Field', 'Field', ([], {'default_factory': 'dict'}), '(default_factory=dict)\n', (1680, 1702), False, 'from pydantic import Extra, Field, root_validator\n'), ((1836... |
"""Wrapper around GooseAI API."""
import logging
from typing import Any, Dict, List, Mapping, Optional
from pydantic import Extra, Field, root_validator
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
from langchain.utils import get_from_dict_or_env
logger = loggi... | [
"langchain.utils.get_from_dict_or_env"
] | [((315, 342), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (332, 342), False, 'import logging\n'), ((1675, 1702), 'pydantic.Field', 'Field', ([], {'default_factory': 'dict'}), '(default_factory=dict)\n', (1680, 1702), False, 'from pydantic import Extra, Field, root_validator\n'), ((1836... |
"""Wrapper around GooseAI API."""
import logging
from typing import Any, Dict, List, Mapping, Optional
from pydantic import Extra, Field, root_validator
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
from langchain.utils import get_from_dict_or_env
logger = loggi... | [
"langchain.utils.get_from_dict_or_env"
] | [((315, 342), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (332, 342), False, 'import logging\n'), ((1675, 1702), 'pydantic.Field', 'Field', ([], {'default_factory': 'dict'}), '(default_factory=dict)\n', (1680, 1702), False, 'from pydantic import Extra, Field, root_validator\n'), ((1836... |
"""Wrapper around GooseAI API."""
import logging
from typing import Any, Dict, List, Mapping, Optional
from pydantic import Extra, Field, root_validator
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
from langchain.utils import get_from_dict_or_env
logger = loggi... | [
"langchain.utils.get_from_dict_or_env"
] | [((315, 342), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (332, 342), False, 'import logging\n'), ((1675, 1702), 'pydantic.Field', 'Field', ([], {'default_factory': 'dict'}), '(default_factory=dict)\n', (1680, 1702), False, 'from pydantic import Extra, Field, root_validator\n'), ((1836... |
"""Wrapper around Anyscale"""
from typing import Any, Dict, List, Mapping, Optional
import requests
from pydantic import Extra, root_validator
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
from langchain.llms.utils import enforce_stop_tokens
from langchain.utils ... | [
"langchain.llms.utils.enforce_stop_tokens",
"langchain.utils.get_from_dict_or_env"
] | [((1679, 1695), 'pydantic.root_validator', 'root_validator', ([], {}), '()\n', (1693, 1695), False, 'from pydantic import Extra, root_validator\n'), ((1862, 1938), 'langchain.utils.get_from_dict_or_env', 'get_from_dict_or_env', (['values', '"""anyscale_service_url"""', '"""ANYSCALE_SERVICE_URL"""'], {}), "(values, 'any... |
"""Wrapper around Anyscale"""
from typing import Any, Dict, List, Mapping, Optional
import requests
from pydantic import Extra, root_validator
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
from langchain.llms.utils import enforce_stop_tokens
from langchain.utils ... | [
"langchain.llms.utils.enforce_stop_tokens",
"langchain.utils.get_from_dict_or_env"
] | [((1679, 1695), 'pydantic.root_validator', 'root_validator', ([], {}), '()\n', (1693, 1695), False, 'from pydantic import Extra, root_validator\n'), ((1862, 1938), 'langchain.utils.get_from_dict_or_env', 'get_from_dict_or_env', (['values', '"""anyscale_service_url"""', '"""ANYSCALE_SERVICE_URL"""'], {}), "(values, 'any... |
"""Wrapper around Anyscale"""
from typing import Any, Dict, List, Mapping, Optional
import requests
from pydantic import Extra, root_validator
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
from langchain.llms.utils import enforce_stop_tokens
from langchain.utils ... | [
"langchain.llms.utils.enforce_stop_tokens",
"langchain.utils.get_from_dict_or_env"
] | [((1679, 1695), 'pydantic.root_validator', 'root_validator', ([], {}), '()\n', (1693, 1695), False, 'from pydantic import Extra, root_validator\n'), ((1862, 1938), 'langchain.utils.get_from_dict_or_env', 'get_from_dict_or_env', (['values', '"""anyscale_service_url"""', '"""ANYSCALE_SERVICE_URL"""'], {}), "(values, 'any... |
"""Wrapper around Anyscale"""
from typing import Any, Dict, List, Mapping, Optional
import requests
from pydantic import Extra, root_validator
from langchain.callbacks.manager import CallbackManagerForLLMRun
from langchain.llms.base import LLM
from langchain.llms.utils import enforce_stop_tokens
from langchain.utils ... | [
"langchain.llms.utils.enforce_stop_tokens",
"langchain.utils.get_from_dict_or_env"
] | [((1679, 1695), 'pydantic.root_validator', 'root_validator', ([], {}), '()\n', (1693, 1695), False, 'from pydantic import Extra, root_validator\n'), ((1862, 1938), 'langchain.utils.get_from_dict_or_env', 'get_from_dict_or_env', (['values', '"""anyscale_service_url"""', '"""ANYSCALE_SERVICE_URL"""'], {}), "(values, 'any... |
from langchain.prompts import PromptTemplate
_symptom_extract_template = """Consider the following conversation patient note:
Patient note: {note}
Choose on of the symptoms to be the chief complaint (it is usually the first symptom mentioned).
Provide your response strictly in the following format, replacing only th... | [
"langchain.prompts.PromptTemplate.from_template"
] | [((830, 885), 'langchain.prompts.PromptTemplate.from_template', 'PromptTemplate.from_template', (['_symptom_extract_template'], {}), '(_symptom_extract_template)\n', (858, 885), False, 'from langchain.prompts import PromptTemplate\n'), ((904, 957), 'langchain.prompts.PromptTemplate.from_template', 'PromptTemplate.from_... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.