code stringlengths 141 79.4k | apis listlengths 1 23 | extract_api stringlengths 126 73.2k |
|---|---|---|
# from __future__ import annotations
import os
import re
import itertools
import openai
import tiktoken
import json
from dotenv import load_dotenv
from typing import Any, Dict, List, Optional
from pydantic import Extra
from langchain.schema.language_model import BaseLanguageModel
from langchain.callbacks.manager im... | [
"langchain.prompts.PromptTemplate.from_template",
"langchain.tools.DuckDuckGoSearchRun"
] | [((1312, 1333), 'langchain.tools.DuckDuckGoSearchRun', 'DuckDuckGoSearchRun', ([], {}), '()\n', (1331, 1333), False, 'from langchain.tools import DuckDuckGoSearchRun\n'), ((3808, 3877), 'langchain.prompts.PromptTemplate.from_template', 'PromptTemplate.from_template', (['prompts.EXECUTE_PLAN_PROMPT_SEARCH_TOOL'], {}), '... |
import langchain
from langchain.llms import OpenAI
from langchain.chat_models import ChatOpenAI
from langchain.schema import AIMessage, HumanMessage, SystemMessage
from langchain.cache import InMemoryCache
from langchain import PromptTemplate
import os
import openai
from langchain.prompts import (
ChatPromptTemplat... | [
"langchain.prompts.HumanMessagePromptTemplate.from_template",
"langchain.llms.OpenAI",
"langchain.prompts.AIMessagePromptTemplate.from_template",
"langchain.chat_models.ChatOpenAI",
"langchain.prompts.ChatPromptTemplate.from_messages",
"langchain.prompts.SystemMessagePromptTemplate.from_template"
] | [((734, 742), 'langchain.llms.OpenAI', 'OpenAI', ([], {}), '()\n', (740, 742), False, 'from langchain.llms import OpenAI\n'), ((750, 784), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'openai_api_key': 'api_key'}), '(openai_api_key=api_key)\n', (760, 784), False, 'from langchain.chat_models import ChatOpenAI... |
import langchain_helper as lch
import streamlit as st
st.title("Pets Name Generator")
animal_type = st.sidebar.selectbox("What is your pet",
("Cat", "Dog", "Cow", "Hamster"))
if animal_type == "Cat":
pet_color = st.sidebar.text_area(label = "What color is your cat?", max_chars ... | [
"langchain_helper.generate_pet_name"
] | [((56, 87), 'streamlit.title', 'st.title', (['"""Pets Name Generator"""'], {}), "('Pets Name Generator')\n", (64, 87), True, 'import streamlit as st\n'), ((102, 176), 'streamlit.sidebar.selectbox', 'st.sidebar.selectbox', (['"""What is your pet"""', "('Cat', 'Dog', 'Cow', 'Hamster')"], {}), "('What is your pet', ('Cat'... |
"""Load html from files, clean up, split, ingest into Weaviate."""
import logging
import os
import re
# from parser import langchain_docs_extractor
import weaviate
import faiss
from bs4 import BeautifulSoup, SoupStrainer
from langchain_community.document_loaders import RecursiveUrlLoader, SitemapLoader, DirectoryLoade... | [
"langchain.text_splitter.RecursiveCharacterTextSplitter",
"langchain_community.document_loaders.DirectoryLoader",
"langchain.vectorstores.weaviate.Weaviate",
"langchain_community.document_loaders.RecursiveUrlLoader",
"langchain.indexes.SQLRecordManager"
] | [((722, 761), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (741, 761), False, 'import logging\n'), ((771, 798), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (788, 798), False, 'import logging\n'), ((3746, 3773), 'bs4.BeautifulS... |
import time
import os
from pathlib import Path
from typing import Dict, Any
import langchain
from langchain_community.llms import GPT4All, FakeListLLM, LlamaCpp
from langchain_community.callbacks import get_openai_callback
from langchain.callbacks.manager import CallbackManager
from langchain.callbacks.streaming_stdo... | [
"langchain_community.chat_models.ChatLiteLLM"
] | [((1626, 1688), 'langchain_community.chat_models.ChatLiteLLM', 'ChatLiteLLM', ([], {'model_name': 'modelname', 'temperature': '(0)', 'verbose': '(True)'}), '(model_name=modelname, temperature=0, verbose=True)\n', (1637, 1688), False, 'from langchain_community.chat_models import ChatLiteLLM\n'), ((2428, 2455), 'os.geten... |
import langchain
from langchain.llms import GooglePalm
from langchain.document_loaders import CSVLoader
from langchain.embeddings import HuggingFaceInstructEmbeddings
from langchain.vectorstores import FAISS
from langchain.prompts import PromptTemplate
from langchain.chains import RetrievalQA
import os
from dot... | [
"langchain.llms.GooglePalm",
"langchain.chains.RetrievalQA.from_chain_type",
"langchain.vectorstores.FAISS.from_documents",
"langchain.document_loaders.CSVLoader",
"langchain.embeddings.HuggingFaceInstructEmbeddings",
"langchain.prompts.PromptTemplate"
] | [((344, 357), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (355, 357), False, 'from dotenv import load_dotenv\n'), ((917, 989), 'langchain.llms.GooglePalm', 'GooglePalm', ([], {'google_api_key': "os.environ['GOOGLE_API_KEY']", 'temperature': '(0.7)'}), "(google_api_key=os.environ['GOOGLE_API_KEY'], temperatur... |
from pathlib import Path
import sys
import faiss
import langchain
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.vectorstores import FAISS
from langchain.embeddings import OpenAIEmbeddings
import pickle
from langchain import LLMChain
from langchain.llms import OpenAIChat
from langchai... | [
"langchain.prompts.Prompt",
"langchain.text_splitter.RecursiveCharacterTextSplitter",
"langchain.llms.OpenAIChat",
"langchain.vectorstores.FAISS.from_texts",
"langchain.embeddings.OpenAIEmbeddings"
] | [((631, 644), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (642, 644), False, 'from dotenv import load_dotenv\n'), ((8815, 8833), 'sqlalchemy.ext.declarative.declarative_base', 'declarative_base', ([], {}), '()\n', (8831, 8833), False, 'from sqlalchemy.ext.declarative import declarative_base\n'), ((891, 908),... |
import traceback
import logging
import json
import uvicorn
import aiohttp
import nest_asyncio
from typing import Dict, Tuple, Optional
from logging import FileHandler, StreamHandler
from logging.handlers import TimedRotatingFileHandler
from fastapi import FastAPI, Query, Request, File, Form, UploadFile
from fastapi.res... | [
"langchain_client.LangchainClient"
] | [((461, 481), 'nest_asyncio.apply', 'nest_asyncio.apply', ([], {}), '()\n', (479, 481), False, 'import nest_asyncio\n'), ((488, 497), 'fastapi.FastAPI', 'FastAPI', ([], {}), '()\n', (495, 497), False, 'from fastapi import FastAPI, Query, Request, File, Form, UploadFile\n'), ((515, 530), 'logging.StreamHandler', 'Stream... |
import langchain
from langchain.llms import Replicate
from flask import Flask
from flask import request
import os
import requests
import json
class WhatsAppClient:
API_URL = "https://graph.facebook.com/v17.0/"
WHATSAPP_API_TOKEN = "<Temporary access token from your WhatsApp API Setup>"
WHATSAPP_CLOUD_NUM... | [
"langchain.llms.Replicate"
] | [((1337, 1444), 'langchain.llms.Replicate', 'Replicate', ([], {'model': 'llama2_13b_chat', 'model_kwargs': "{'temperature': 0.01, 'top_p': 1, 'max_new_tokens': 500}"}), "(model=llama2_13b_chat, model_kwargs={'temperature': 0.01, 'top_p':\n 1, 'max_new_tokens': 500})\n", (1346, 1444), False, 'from langchain.llms impo... |
import streamlit as st
import langchain as lc
from typing import Callable
from utils import *
#####################################################
# This file contains everything reusable in the app #
#####################################################
def show_past_conversations():
conversations = get_conver... | [
"langchain.callbacks.get_openai_callback"
] | [((942, 1102), 'streamlit.number_input', 'st.number_input', (['"""Monthly limit ($)"""'], {'value': '(15.0)', 'min_value': '(1.0)', 'max_value': '(120.0)', 'step': '(1.0)', 'format': '"""%.2f"""', 'help': '"""The monthly limit for the OpenAI API"""'}), "('Monthly limit ($)', value=15.0, min_value=1.0, max_value=\n 1... |
from __future__ import annotations
import asyncio
import functools
import logging
import os
import warnings
from contextlib import asynccontextmanager, contextmanager
from contextvars import ContextVar
from typing import (
Any,
AsyncGenerator,
Dict,
Generator,
List,
Optional,
Type,
Type... | [
"langchain.schema.get_buffer_string",
"langchain.callbacks.stdout.StdOutCallbackHandler",
"langchain.callbacks.tracers.wandb.WandbTracer",
"langchain.callbacks.openai_info.OpenAICallbackHandler",
"langchain.callbacks.tracers.stdout.ConsoleCallbackHandler",
"langchain.callbacks.tracers.langchain.LangChainT... | [((1114, 1141), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1131, 1141), False, 'import logging\n'), ((1286, 1329), 'contextvars.ContextVar', 'ContextVar', (['"""openai_callback"""'], {'default': 'None'}), "('openai_callback', default=None)\n", (1296, 1329), False, 'from contextvars i... |
"""Utility functions for mlflow.langchain."""
import json
import logging
import os
import shutil
import types
from functools import lru_cache
from importlib.util import find_spec
from typing import NamedTuple
import cloudpickle
import yaml
from packaging import version
import mlflow
from mlflow.utils.class_utils impo... | [
"langchain.schema.output_parser.StrOutputParser",
"langchain.agents.initialize_agent",
"langchain.chains.loading.load_chain"
] | [((2001, 2028), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (2018, 2028), False, 'import logging\n'), ((10189, 10235), 'os.path.join', 'os.path.join', (['path', '_MODEL_DATA_YAML_FILE_NAME'], {}), '(path, _MODEL_DATA_YAML_FILE_NAME)\n', (10201, 10235), False, 'import os\n'), ((5685, 57... |
import langchain
from langchain.chains.llm import LLMChain
from langchain_openai import AzureChatOpenAI
from langchain.memory import ReadOnlySharedMemory, ConversationBufferMemory
from langchain.agents import BaseSingleActionAgent, Tool, AgentType, initialize_agent, AgentExecutor
from langchain.chat_models.base import... | [
"langchain.agents.initialize_agent",
"langchain.agents.AgentExecutor.from_agent_and_tools",
"langchain.schema.OutputParserException",
"langchain.prompts.chat.SystemMessagePromptTemplate.from_template",
"langchain.schema.AgentAction",
"langchain.chains.llm.LLMChain",
"langchain_openai.AzureChatOpenAI",
... | [((4432, 4548), 'langchain.chains.llm.LLMChain', 'LLMChain', ([], {'llm': 'self.chat_model', 'prompt': 'router_prompt_template', 'memory': 'self.readonly_memory', 'verbose': 'self.verbose'}), '(llm=self.chat_model, prompt=router_prompt_template, memory=self.\n readonly_memory, verbose=self.verbose)\n', (4440, 4548),... |
import os
import openai
import pinecone
from langchain.document_loaders import DirectoryLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.vectorstores import Pinecone
from langchain.llms import OpenAI
from langchain.chat_mod... | [
"langchain.document_loaders.DirectoryLoader",
"langchain.text_splitter.RecursiveCharacterTextSplitter",
"langchain.memory.ConversationBufferMemory",
"langchain.vectorstores.Pinecone.from_documents",
"langchain.llms.OpenAI",
"langchain.embeddings.openai.OpenAIEmbeddings"
] | [((579, 592), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (590, 592), False, 'from dotenv import load_dotenv\n'), ((609, 636), 'os.getenv', 'os.getenv', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (618, 636), False, 'import os\n'), ((656, 685), 'os.getenv', 'os.getenv', (['"""PINECONE_API_KEY"""'... |
# Import langchain and azure cognitive search
import langchain
from typing import Dict, List
from pydantic import BaseModel, Extra, root_validator
from langchain.utils import get_from_dict_or_env
from langchain.tools.base import BaseTool
from azure.core.credentials import AzureKeyCredential
from azure.search.d... | [
"langchain.utils.get_from_dict_or_env"
] | [((1527, 1551), 'pydantic.root_validator', 'root_validator', ([], {'pre': '(True)'}), '(pre=True)\n', (1541, 1551), False, 'from pydantic import BaseModel, Extra, root_validator\n'), ((1721, 1813), 'langchain.utils.get_from_dict_or_env', 'get_from_dict_or_env', (['values', '"""azure_cognitive_search_key"""', '"""AZURE_... |
import streamlit as st
import os
import requests
import pickle
import functools
from requests_auth_aws_sigv4 import AWSSigV4
from langchain.callbacks import get_openai_callback
from .models_config import MODELS_JSON
from langchain_utils.utils import LangchainUtils
from exceptions.exceptions import (
LlmModelSelecti... | [
"langchain_utils.utils.LangchainUtils",
"langchain.callbacks.get_openai_callback"
] | [((570, 609), 'os.environ.get', 'os.environ.get', (['"""OPENAI_API_KEYS"""', 'None'], {}), "('OPENAI_API_KEYS', None)\n", (584, 609), False, 'import os\n'), ((643, 684), 'os.environ.get', 'os.environ.get', (['"""AWS_ACCESS_KEY_ID"""', 'None'], {}), "('AWS_ACCESS_KEY_ID', None)\n", (657, 684), False, 'import os\n'), ((7... |
from __future__ import annotations
import asyncio
import functools
import logging
import os
import warnings
from contextlib import asynccontextmanager, contextmanager
from contextvars import ContextVar
from typing import (
Any,
AsyncGenerator,
Dict,
Generator,
List,
Optional,
Type,
Type... | [
"langchain.schema.get_buffer_string",
"langchain.callbacks.stdout.StdOutCallbackHandler",
"langchain.callbacks.tracers.wandb.WandbTracer",
"langchain.callbacks.openai_info.OpenAICallbackHandler",
"langchain.callbacks.tracers.stdout.ConsoleCallbackHandler",
"langchain.callbacks.tracers.langchain.LangChainT... | [((1114, 1141), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1131, 1141), False, 'import logging\n'), ((1286, 1329), 'contextvars.ContextVar', 'ContextVar', (['"""openai_callback"""'], {'default': 'None'}), "('openai_callback', default=None)\n", (1296, 1329), False, 'from contextvars i... |
from langchain.chat_models import ChatOpenAI
from langchain.agents import tool, load_tools
from langchain.agents import initialize_agent
from langchain.agents import AgentType
import langchain
langchain.debug = True
# llm
llm = ChatOpenAI(temperature=0)
# tools
@tool
def get_word_length(word: str) -> int:
"""Re... | [
"langchain.agents.initialize_agent",
"langchain.agents.load_tools",
"langchain.chat_models.ChatOpenAI"
] | [((231, 256), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'temperature': '(0)'}), '(temperature=0)\n', (241, 256), False, 'from langchain.chat_models import ChatOpenAI\n'), ((381, 414), 'langchain.agents.load_tools', 'load_tools', (["['llm-math']"], {'llm': 'llm'}), "(['llm-math'], llm=llm)\n", (391, 414), ... |
from abc import ABC, abstractmethod
import chromadb
from chromadb.config import Settings
import requests, json
import uuid
# import langchain
# from langchain.cache import InMemoryCache
from langchain.vectorstores import Chroma
from langchain.embeddings import HuggingFaceInstructEmbeddings
# from langchain import Hug... | [
"langchain.chains.ConversationalRetrievalChain.from_llm",
"langchain.memory.ConversationBufferMemory",
"langchain.llms.HuggingFaceHub",
"langchain.embeddings.HuggingFaceInstructEmbeddings",
"langchain.vectorstores.Chroma"
] | [((1379, 1452), 'langchain.memory.ConversationBufferMemory', 'ConversationBufferMemory', ([], {'memory_key': '"""chat_history"""', 'return_messages': '(True)'}), "(memory_key='chat_history', return_messages=True)\n", (1403, 1452), False, 'from langchain.memory import ConversationBufferMemory\n'), ((2211, 2386), 'chroma... |
import io
import logging
import asyncio
import traceback
import html
import json
from tempfile import NamedTemporaryFile
from PIL import Image
from datetime import datetime
import openai
import telegram
from telegram import (
Update,
User,
InlineKeyboardButton,
InlineKeyboardMarkup,
BotCommand,
... | [
"langchain_utils.VectorSotr",
"langchain_utils.set_vectors",
"langchain_utils.LANGCHAIN"
] | [((715, 734), 'database.Database', 'database.Database', ([], {}), '()\n', (732, 734), False, 'import database\n'), ((744, 771), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (761, 771), False, 'import logging\n'), ((14463, 14475), 'io.BytesIO', 'io.BytesIO', ([], {}), '()\n', (14473, 144... |
from langchain.agents import (
initialize_agent,
Tool,
AgentType
)
from llama_index.callbacks import (
CallbackManager,
LlamaDebugHandler
)
from llama_index.node_parser.simple import SimpleNodeParser
from llama_index import (
VectorStoreIndex,
SummaryIndex,
SimpleDirectoryReader,
ServiceConte... | [
"langchain.chat_models.ChatOpenAI"
] | [((398, 456), 'logging.basicConfig', 'logging.basicConfig', ([], {'stream': 'sys.stdout', 'level': 'logging.INFO'}), '(stream=sys.stdout, level=logging.INFO)\n', (417, 456), False, 'import logging\n'), ((529, 545), 'os.getenv', 'os.getenv', (['"""LLM"""'], {}), "('LLM')\n", (538, 545), False, 'import os\n'), ((1217, 12... |
'''
@Author: WANG Maonan
@Date: 2023-09-04 20:46:09
@Description: 基于 LLM-ReAct 的 Traffic Light Control
1. 会有数据库, 我们会搜索最相似的场景 (如何定义场景的相似程度), 然后可以存储在 memory 里面, 或者放在 query 里面
2. 不同的 action 检查
- getAvailableActions, 获得当前所有的动作
- get queue length of all phases
- get emergency vehicle
- check possible queu... | [
"langchain.chat_models.ChatOpenAI"
] | [((1268, 1290), 'tshub.utils.get_abs_path.get_abs_path', 'get_abs_path', (['__file__'], {}), '(__file__)\n', (1280, 1290), False, 'from tshub.utils.get_abs_path import get_abs_path\n'), ((1379, 1392), 'utils.readConfig.read_config', 'read_config', ([], {}), '()\n', (1390, 1392), False, 'from utils.readConfig import rea... |
import langchain
import requests
from pydantic import ValidationError
from langchain_core.prompts import ChatPromptTemplate
#from langchain import chains
from langchain.callbacks.streaming_stdout import StreamingStdOutCallbackHandler
#from rmrkl import ChatZeroShotAgent, RetryAgentExecutor
from langchain.agents ... | [
"langchain_openai.ChatOpenAI",
"langchain.agents.AgentExecutor",
"langchain.agents.format_scratchpad.openai_tools.format_to_openai_tool_messages",
"langchain.callbacks.streaming_stdout.StreamingStdOutCallbackHandler",
"langchain.prompts.MessagesPlaceholder",
"langchain.agents.output_parsers.openai_tools.O... | [((1066, 1220), 'langchain_openai.ChatOpenAI', 'ChatOpenAI', ([], {'temperature': 'temp', 'model_name': 'model', 'request_timeout': '(1000)', 'streaming': '(False)', 'callbacks': 'callbacks', 'openai_api_key': 'api_key', 'verbose': '(False)'}), '(temperature=temp, model_name=model, request_timeout=1000,\n streaming=... |
"""Beta Feature: base interface for cache."""
import hashlib
import json
from abc import ABC, abstractmethod
from typing import Any, Callable, Dict, List, Optional, Tuple, Type, cast
from sqlalchemy import Column, Integer, String, create_engine, select
from sqlalchemy.engine.base import Engine
from sqlalchemy.orm impo... | [
"langchain.vectorstores.redis.Redis.from_existing_index",
"langchain.vectorstores.redis.Redis",
"langchain.schema.Generation"
] | [((2037, 2055), 'sqlalchemy.ext.declarative.declarative_base', 'declarative_base', ([], {}), '()\n', (2053, 2055), False, 'from sqlalchemy.ext.declarative import declarative_base\n'), ((2212, 2244), 'sqlalchemy.Column', 'Column', (['String'], {'primary_key': '(True)'}), '(String, primary_key=True)\n', (2218, 2244), Fal... |
import os
from dotenv import load_dotenv
from llama_index import PromptTemplate, SimpleDirectoryReader, VectorStoreIndex
from ragas.metrics import (
faithfulness,
answer_relevancy,
context_precision,
context_recall,
)
from ragas.metrics.critique import harmfulness
from ragas.llama_index import evaluate... | [
"langchain.embeddings.AzureOpenAIEmbeddings",
"langchain.chat_models.AzureChatOpenAI"
] | [((1361, 1411), 'llama_index.node_parser.SentenceSplitter', 'SentenceSplitter', ([], {'chunk_size': '(256)', 'chunk_overlap': '(50)'}), '(chunk_size=256, chunk_overlap=50)\n', (1377, 1411), False, 'from llama_index.node_parser import SentenceSplitter\n'), ((1440, 1491), 'llama_index.node_parser.SentenceSplitter', 'Sent... |
import os
import pickle
import langchain
import faiss
from langchain import HuggingFaceHub, PromptTemplate
from langchain.chains import ConversationalRetrievalChain, LLMChain
from langchain.chat_models import ChatOpenAI
from langchain.llms import OpenAI
from langchain.document_loaders import DirectoryLoader, TextLoade... | [
"langchain.text_splitter.CharacterTextSplitter",
"langchain.embeddings.HuggingFaceHubEmbeddings",
"langchain.cache.InMemoryCache",
"langchain.document_loaders.DirectoryLoader",
"langchain.memory.ConversationBufferWindowMemory",
"langchain.chains.ConversationalRetrievalChain.from_llm",
"langchain.output_... | [((1981, 1996), 'langchain.cache.InMemoryCache', 'InMemoryCache', ([], {}), '()\n', (1994, 1996), False, 'from langchain.cache import InMemoryCache\n'), ((2193, 2260), 'langchain.llms.OpenAI', 'OpenAI', ([], {'model_name': '"""gpt-3.5-turbo-16k"""', 'temperature': '(0)', 'verbose': '(True)'}), "(model_name='gpt-3.5-tur... |
from dotenv import load_dotenv
load_dotenv()
import logging
from dotenv import load_dotenv, find_dotenv
import os
from genai.extensions.langchain import LangChainInterface
from genai.schemas import GenerateParams as GenaiGenerateParams
from genai.credentials import Credentials
# from ibm_watson_machine_learning.met... | [
"langchain.chains.SequentialChain",
"langchain.prompts.HumanMessagePromptTemplate.from_template",
"langchain.prompts.ChatPromptTemplate.from_messages",
"langchain.schema.SystemMessage",
"langchain.chains.LLMChain"
] | [((31, 44), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (42, 44), False, 'from dotenv import load_dotenv, find_dotenv\n'), ((2003, 2071), 'opentelemetry.sdk._logs.LoggingHandler', 'LoggingHandler', ([], {'level': 'logging.DEBUG', 'logger_provider': 'logger_provider'}), '(level=logging.DEBUG, logger_provider=... |
# Needs to be in same directory as configs, data folder
# Imports
from _OpalLLM import OpalLLM
from _OpalLLM import OpalLLM
import sys
sys.path.append('/home/jovyan/.local/lib/python3.8/site-packages')
import torch
from langchain.agents import Tool, AgentExecutor, LLMSingleActionAgent, AgentOutputParser
from langcha... | [
"langchain.chains.ConversationChain",
"langchain.LLMChain",
"langchain.llms.HuggingFacePipeline",
"langchain.PromptTemplate"
] | [((138, 204), 'sys.path.append', 'sys.path.append', (['"""/home/jovyan/.local/lib/python3.8/site-packages"""'], {}), "('/home/jovyan/.local/lib/python3.8/site-packages')\n", (153, 204), False, 'import sys\n'), ((3396, 3513), '_OpalLLM.OpalLLM', 'OpalLLM', ([], {'model': '"""lmsys/vicuna-33b"""', 'temperature': '(0.1)',... |
from gptcache import Cache
from gptcache.manager.factory import manager_factory
from gptcache.processor.pre import get_prompt
from langchain.cache import GPTCache
import hashlib
import openai
import os
import langchain
import yaml
import sys
from db.utils import VectorDB
sys.path.append('..')
openai.api_key = os.envi... | [
"langchain.cache.GPTCache"
] | [((273, 294), 'sys.path.append', 'sys.path.append', (['""".."""'], {}), "('..')\n", (288, 294), False, 'import sys\n'), ((313, 345), 'os.environ.get', 'os.environ.get', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (327, 345), False, 'import os\n'), ((690, 713), 'langchain.cache.GPTCache', 'GPTCache', (['init... |
"""Utilities for running language models or Chains over datasets."""
from __future__ import annotations
import asyncio
import functools
import itertools
import logging
from datetime import datetime
from typing import (
Any,
Callable,
Coroutine,
Dict,
Iterator,
List,
Optional,
Sequence,... | [
"langchain.schema.messages.messages_from_dict",
"langchain.smith.evaluation.string_run_evaluator.StringRunEvaluatorChain.from_run_and_data_type",
"langchain.callbacks.tracers.langchain.LangChainTracer",
"langchain.chat_models.openai.ChatOpenAI",
"langchain.callbacks.tracers.evaluation.EvaluatorCallbackHandl... | [((1366, 1393), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1383, 1393), False, 'import logging\n'), ((1704, 1721), 'urllib.parse.urlparse', 'urlparse', (['api_url'], {}), '(api_url)\n', (1712, 1721), False, 'from urllib.parse import urlparse, urlunparse\n'), ((24715, 24735), 'asyncio... |
import langchain_helper
import streamlit as st
st.header("Dumbledore: The PDF Wizard")
# query = st.text_input("Enter your Question here")
if "messages" not in st.session_state:
st.session_state.messages = []
for message in st.session_state.messages:
with st.chat_message(message['role']):
st.markdo... | [
"langchain_helper.get_qa_chain"
] | [((48, 87), 'streamlit.header', 'st.header', (['"""Dumbledore: The PDF Wizard"""'], {}), "('Dumbledore: The PDF Wizard')\n", (57, 87), True, 'import streamlit as st\n'), ((352, 378), 'streamlit.chat_input', 'st.chat_input', (['"""Whats up?"""'], {}), "('Whats up?')\n", (365, 378), True, 'import streamlit as st\n'), ((4... |
import logging
import os
import openai
from langchain.chat_models import AzureChatOpenAI
import vishwa
from vishwa.mlmonitor.langchain.decorators.map_xpuls_project import MapXpulsProject
from vishwa.mlmonitor.langchain.decorators.telemetry_override_labels import TelemetryOverrideLabels
from vishwa.mlmonitor.langchain... | [
"langchain.chat_models.AzureChatOpenAI"
] | [((498, 525), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (515, 525), False, 'import logging\n'), ((544, 571), 'os.getenv', 'os.getenv', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (553, 571), False, 'import os\n'), ((616, 639), 'os.getenv', 'os.getenv', (['"""OPENAI_URL"""... |
"""Beta Feature: base interface for cache."""
from __future__ import annotations
import hashlib
import inspect
import json
import logging
import warnings
from abc import ABC, abstractmethod
from datetime import timedelta
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
Optional,
Sequenc... | [
"langchain.utils.get_from_env",
"langchain.schema.Generation",
"langchain.load.dump.dumps",
"langchain.vectorstores.redis.Redis.from_existing_index",
"langchain.vectorstores.redis.Redis",
"langchain.load.load.loads"
] | [((950, 977), 'logging.getLogger', 'logging.getLogger', (['__file__'], {}), '(__file__)\n', (967, 977), False, 'import logging\n'), ((3422, 3440), 'sqlalchemy.ext.declarative.declarative_base', 'declarative_base', ([], {}), '()\n', (3438, 3440), False, 'from sqlalchemy.ext.declarative import declarative_base\n'), ((359... |
import ast
import copy
import json
import logging
from typing import List, Tuple, Dict, Callable
import langchain
from langchain.prompts import ChatPromptTemplate, HumanMessagePromptTemplate, AIMessagePromptTemplate
from langchain.prompts.chat import BaseMessagePromptTemplate
from langchain.schema import LLMResult
fro... | [
"langchain.schema.LLMResult",
"langchain.prompts.HumanMessagePromptTemplate",
"langchain.LLMChain",
"langchain.PromptTemplate.from_template"
] | [((557, 584), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (574, 584), False, 'import logging\n'), ((2451, 2481), 'copy.deepcopy', 'copy.deepcopy', (['self.gen_kwargs'], {}), '(self.gen_kwargs)\n', (2464, 2481), False, 'import copy\n'), ((2738, 2774), 'langchain.LLMChain', 'LLMChain', (... |
import os
import openai
from langchain.llms import OpenAI
from langchain.prompts import PromptTemplate
from langchain.chains import LLMChain
from langchain.chains import SequentialChain
from dotenv import load_dotenv, find_dotenv
load_dotenv(find_dotenv())
openai.api_key = os.environ['OPENAI_API_KEY']
llm = OpenAI(te... | [
"langchain.chains.SequentialChain",
"langchain_helper.generate_restaurant_name_and_items",
"langchain.llms.OpenAI",
"langchain.chains.LLMChain",
"langchain.prompts.PromptTemplate"
] | [((311, 334), 'langchain.llms.OpenAI', 'OpenAI', ([], {'temperature': '(0.7)'}), '(temperature=0.7)\n', (317, 334), False, 'from langchain.llms import OpenAI\n'), ((384, 421), 'streamlit.title', 'st.title', (['"""Restaurant Name Generator"""'], {}), "('Restaurant Name Generator')\n", (392, 421), True, 'import streamlit... |
from __future__ import annotations
import asyncio
import functools
import logging
import os
import uuid
from concurrent.futures import ThreadPoolExecutor
from contextlib import asynccontextmanager, contextmanager
from contextvars import ContextVar
from typing import (
TYPE_CHECKING,
Any,
AsyncGenerator,
... | [
"langchain.callbacks.stdout.StdOutCallbackHandler",
"langchain.callbacks.tracers.wandb.WandbTracer",
"langchain.callbacks.openai_info.OpenAICallbackHandler",
"langchain.callbacks.tracers.run_collector.RunCollectorCallbackHandler",
"langchain.callbacks.tracers.stdout.ConsoleCallbackHandler",
"langchain.cal... | [((1521, 1548), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1538, 1548), False, 'import logging\n'), ((1617, 1660), 'contextvars.ContextVar', 'ContextVar', (['"""openai_callback"""'], {'default': 'None'}), "('openai_callback', default=None)\n", (1627, 1660), False, 'from contextvars i... |
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.text_splitter import CharacterTextSplitter
from langchain.vectorstores import Chroma
from langchain.docstore.document import Document
from langchain.prompts import PromptTemplate
from langchain.indexes.vectorstore import VectorstoreIndexCreator
fro... | [
"langchain.chains.question_answering.load_qa_chain",
"langchain.docstore.document.Document",
"langchain.OpenAI",
"langchain.embeddings.openai.OpenAIEmbeddings"
] | [((568, 616), 'os.system', 'os.system', (["('cls' if os.name == 'nt' else 'clear')"], {}), "('cls' if os.name == 'nt' else 'clear')\n", (577, 616), False, 'import os\n'), ((666, 684), 'langchain.embeddings.openai.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (682, 684), False, 'from langchain.embeddings.open... |
import langchain
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain.text_splitter import CharacterTextSplitter
from langchain.vectorstores import Chroma
from langchain.chat_models import ChatOpenAI
from langchain.chains import RetrievalQA
from langchain.cache import InMemoryCache
from dotenv import... | [
"langchain.text_splitter.CharacterTextSplitter",
"langchain.cache.InMemoryCache",
"langchain.chat_models.ChatOpenAI",
"langchain.embeddings.openai.OpenAIEmbeddings"
] | [((468, 483), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (473, 483), False, 'from flask import Flask, request, jsonify\n'), ((484, 493), 'flask_cors.CORS', 'CORS', (['app'], {}), '(app)\n', (488, 493), False, 'from flask_cors import CORS\n'), ((516, 531), 'langchain.cache.InMemoryCache', 'InMemoryCache... |
# Copyright 2023-2024 ByteBrain AI
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in wri... | [
"langchain.llms.openai.OpenAI",
"langchain.prompts.PromptTemplate.from_template",
"langchain.embeddings.OpenAIEmbeddings",
"langchain.prompts.PromptTemplate"
] | [((1500, 1518), 'langchain.embeddings.OpenAIEmbeddings', 'OpenAIEmbeddings', ([], {}), '()\n', (1516, 1518), False, 'from langchain.embeddings import OpenAIEmbeddings\n'), ((2219, 2294), 'langchain.prompts.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['page_content']", 'template': '"""{page_content}"""'... |
import os
import pandas as pd
import requests
import openai
import chromadb
import langchain
from langchain.chains import RetrievalQA, SimpleSequentialChain, LLMChain
from langchain.llms import OpenAI
from langchain.chat_models import ChatOpenAI
from langchain.prompts import PromptTemplate
from langchain.docstore.docum... | [
"langchain.chains.question_answering.load_qa_chain",
"langchain.text_splitter.CharacterTextSplitter",
"langchain.docstore.document.Document",
"langchain.embeddings.openai.OpenAIEmbeddings",
"langchain.chat_models.ChatOpenAI",
"langchain.vectorstores.Chroma"
] | [((591, 604), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (602, 604), False, 'from dotenv import load_dotenv\n'), ((612, 639), 'os.environ.get', 'os.environ.get', (['"""peace_dir"""'], {}), "('peace_dir')\n", (626, 639), False, 'import os\n'), ((657, 689), 'os.environ.get', 'os.environ.get', (['"""OPENAI_API... |
import os
from dotenv import load_dotenv
import openai
import langchain
import azure.cognitiveservices.speech as speechsdk
import elevenlabs
import json
import requests
from langchain.agents.agent_toolkits import SQLDatabaseToolkit
from langchain.sql_database import SQLDatabase
from langchain.agents import AgentExecut... | [
"langchain.agents.initialize_agent",
"langchain.agents.agent_toolkits.SQLDatabaseToolkit",
"langchain.agents.create_sql_agent",
"langchain.chat_models.ChatOpenAI",
"langchain.SerpAPIWrapper",
"langchain.agents.Tool",
"langchain.SQLDatabase.from_uri",
"langchain.OpenAI"
] | [((968, 1038), 'azure.cognitiveservices.speech.SpeechConfig', 'speechsdk.SpeechConfig', ([], {'subscription': 'speech_key', 'region': 'service_region'}), '(subscription=speech_key, region=service_region)\n', (990, 1038), True, 'import azure.cognitiveservices.speech as speechsdk\n'), ((1059, 1114), 'azure.cognitiveservi... |
# main.py
#####################################################################
# Amazon Bedrock - boto3
#####################################################################
import boto3
# Setup bedrock
bedrock_runtime = boto3.client(
service_name="bedrock-runtime",
region_name="us-east-1",
)
#############... | [
"langchain.llms.Bedrock",
"langchain.embeddings.BedrockEmbeddings"
] | [((225, 294), 'boto3.client', 'boto3.client', ([], {'service_name': '"""bedrock-runtime"""', 'region_name': '"""us-east-1"""'}), "(service_name='bedrock-runtime', region_name='us-east-1')\n", (237, 294), False, 'import boto3\n'), ((760, 837), 'langchain.llms.Bedrock', 'Bedrock', ([], {'client': 'bedrock_runtime', 'mode... |
import asyncio
import inspect
import warnings
from abc import ABC, abstractmethod
from functools import partial
from typing import Any, Dict, List, Mapping, Optional, Sequence
from pydantic import Field, root_validator
import langchain
from langchain.base_language import BaseLanguageModel
from langchain.callbacks.bas... | [
"langchain.callbacks.manager.AsyncCallbackManager.configure",
"langchain.schema.messages.AIMessage",
"langchain.schema.ChatResult",
"langchain.load.dump.dumps",
"langchain.callbacks.manager.CallbackManager.configure",
"langchain.load.dump.dumpd",
"langchain.schema.RunInfo",
"langchain.schema.messages.... | [((915, 952), 'pydantic.Field', 'Field', ([], {'default_factory': '_get_verbosity'}), '(default_factory=_get_verbosity)\n', (920, 952), False, 'from pydantic import Field, root_validator\n'), ((1026, 1059), 'pydantic.Field', 'Field', ([], {'default': 'None', 'exclude': '(True)'}), '(default=None, exclude=True)\n', (103... |
import os
import streamlit as st
import pickle
import time
import langchain
from langchain.llms import OpenAI
from langchain.document_loaders import UnstructuredURLLoader
from langchain.chains import RetrievalQAWithSourcesChain
from langchain.chains.qa_with_sources.loading import load_qa_with_sources_chain
from langch... | [
"langchain.document_loaders.UnstructuredURLLoader",
"langchain.text_splitter.RecursiveCharacterTextSplitter",
"langchain.llms.OpenAI",
"langchain.vectorstores.FAISS.from_documents",
"langchain.embeddings.OpenAIEmbeddings"
] | [((468, 500), 'os.environ.get', 'os.environ.get', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (482, 500), False, 'import os\n'), ((508, 547), 'langchain.llms.OpenAI', 'OpenAI', ([], {'temperature': '(0.7)', 'max_tokens': '(500)'}), '(temperature=0.7, max_tokens=500)\n', (514, 547), False, 'from langchain.ll... |
#!/usr/bin/env python
# coding: utf-8
# Blackboard-PAGI - LLM Proto-AGI using the Blackboard Pattern
# Copyright (c) 2023. Andreas Kirsch
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Found... | [
"langchain.output_parsers.PydanticOutputParser",
"langchain.cache.SQLiteCache"
] | [((1950, 1998), 'langchain.cache.SQLiteCache', 'SQLiteCache', (['""".execution_llm_spike.langchain.db"""'], {}), "('.execution_llm_spike.langchain.db')\n", (1961, 1998), False, 'from langchain.cache import SQLiteCache\n'), ((2081, 2113), 'blackboard_pagi.cached_chat_model.CachedChatOpenAI', 'CachedChatOpenAI', ([], {'m... |
import httpcore
setattr(httpcore, 'SyncHTTPTransport', 'AsyncHTTPProxy')
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import speech_recognition as sr
import langid
from pydub import AudioSegment
import langchain
import subprocess
from langchain.chat_models im... | [
"langchain.prompts.PromptTemplate.from_template",
"langchain.memory.ConversationBufferMemory",
"langchain.chat_models.ChatOpenAI"
] | [((3911, 4381), 'langchain.prompts.PromptTemplate.from_template', 'PromptTemplate.from_template', (['"""You are a normal consulting nurse/doctor. You will recieve some keywords or sentences described by the patient as input. You have to ask the patient two follow up question so as to acquire the information important t... |
import streamlit as st
from streamlit_chat import message
import langchain_helper as lch
from langchain.schema import (SystemMessage, HumanMessage, AIMessage, messages)
def main():
st.set_page_config(
page_title="Iliad technical assessment",
page_icon="🤖",
)
st.header("ChatBot Free Assist... | [
"langchain.schema.AIMessage",
"langchain_helper.main",
"langchain.schema.HumanMessage"
] | [((187, 261), 'streamlit.set_page_config', 'st.set_page_config', ([], {'page_title': '"""Iliad technical assessment"""', 'page_icon': '"""🤖"""'}), "(page_title='Iliad technical assessment', page_icon='🤖')\n", (205, 261), True, 'import streamlit as st\n'), ((289, 325), 'streamlit.header', 'st.header', (['"""ChatBot Fr... |
from typing import ClassVar
from langchain.chains.base import Chain
from typing import Any, Type
import os
import langchain
from langchain.cache import SQLiteCache
langchain.llm_cache = SQLiteCache()
class BaseChain(Chain):
template_file: ClassVar[str]
generator_template: ClassVar[str]
normalizer_templa... | [
"langchain.cache.SQLiteCache"
] | [((188, 201), 'langchain.cache.SQLiteCache', 'SQLiteCache', ([], {}), '()\n', (199, 201), False, 'from langchain.cache import SQLiteCache\n')] |
import os
import time
import openai
import pickle
import langchain
import streamlit as st
from langchain import OpenAI
from langchain.chains import RetrievalQAWithSourcesChain
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.document_loaders import UnstructuredURLLoader
from langchain.e... | [
"langchain.text_splitter.RecursiveCharacterTextSplitter",
"langchain.document_loaders.UnstructuredURLLoader",
"langchain.vectorstores.FAISS.from_documents",
"langchain.embeddings.OpenAIEmbeddings",
"langchain.OpenAI"
] | [((487, 518), 'streamlit.title', 'st.title', (['"""News Research tool """'], {}), "('News Research tool ')\n", (495, 518), True, 'import streamlit as st\n'), ((519, 556), 'streamlit.sidebar.title', 'st.sidebar.title', (['"""News Article URLs"""'], {}), "('News Article URLs')\n", (535, 556), True, 'import streamlit as s... |
import json
import os
import uuid
from typing import Optional, Dict, Any
from langchain.callbacks import LangChainTracer
from langchain.chains.base import Chain
from langchain.load.dump import dumpd
from langchain.schema.runnable import RunnableConfig, RunnableSequence
from langchain.schema.runnable.base import Input
... | [
"langchain.callbacks.LangChainTracer",
"langchain.schema.runnable.config.ensure_config",
"langchain.load.dump.dumpd"
] | [((1302, 1330), 'vishwa.mlmonitor.langchain.patches.utils.get_scoped_override_labels', 'get_scoped_override_labels', ([], {}), '()\n', (1328, 1330), False, 'from vishwa.mlmonitor.langchain.patches.utils import get_scoped_override_labels, get_scoped_project_info\n'), ((1357, 1382), 'vishwa.mlmonitor.langchain.patches.ut... |
""" This example shows how to use the map-reduce chain to summarize a document. """
import os
import langchain
from langchain_openai import ChatOpenAI
from langchain.chains.summarize import load_summarize_chain
from langchain_community.document_loaders import PyPDFLoader
from dotenv import load_dotenv
lo... | [
"langchain_community.document_loaders.PyPDFLoader",
"langchain_openai.ChatOpenAI",
"langchain.chains.summarize.load_summarize_chain"
] | [((318, 331), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (329, 331), False, 'from dotenv import load_dotenv\n'), ((352, 379), 'os.getenv', 'os.getenv', (['"""OPENAI_API_KEY"""'], {}), "('OPENAI_API_KEY')\n", (361, 379), False, 'import os\n'), ((415, 479), 'langchain_openai.ChatOpenAI', 'ChatOpenAI', ([], {'... |
from __future__ import annotations
import asyncio
import functools
import logging
import os
import uuid
from contextlib import asynccontextmanager, contextmanager
from contextvars import ContextVar
from typing import (
TYPE_CHECKING,
Any,
AsyncGenerator,
Dict,
Generator,
List,
Optional,
... | [
"langchain.callbacks.stdout.StdOutCallbackHandler",
"langchain.callbacks.tracers.wandb.WandbTracer",
"langchain.callbacks.openai_info.OpenAICallbackHandler",
"langchain.callbacks.tracers.stdout.ConsoleCallbackHandler",
"langchain.callbacks.tracers.langchain.LangChainTracer",
"langchain.callbacks.tracers.l... | [((1329, 1356), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1346, 1356), False, 'import logging\n'), ((1425, 1468), 'contextvars.ContextVar', 'ContextVar', (['"""openai_callback"""'], {'default': 'None'}), "('openai_callback', default=None)\n", (1435, 1468), False, 'from contextvars i... |
from openai import OpenAI
import streamlit as st
import pandas as pd
from langchain.document_loaders.csv_loader import CSVLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.vectorstores import Chroma
from langchain_openai import OpenAIEmbeddings, ChatOpenAI
from langchain.chains im... | [
"langchain_openai.ChatOpenAI",
"langchain.text_splitter.RecursiveCharacterTextSplitter",
"langchain_openai.OpenAIEmbeddings",
"langchain.vectorstores.Chroma.from_documents",
"langchain.document_loaders.csv_loader.CSVLoader"
] | [((380, 413), 'streamlit.title', 'st.title', (['"""RAG - Product Reviews"""'], {}), "('RAG - Product Reviews')\n", (388, 413), True, 'import streamlit as st\n'), ((424, 468), 'openai.OpenAI', 'OpenAI', ([], {'api_key': "st.secrets['OPENAI_API_KEY']"}), "(api_key=st.secrets['OPENAI_API_KEY'])\n", (430, 468), False, 'fro... |
"""LLM Chains for executing Retrival Augmented Generation."""
import base64
import os
from functools import lru_cache
from pathlib import Path
from typing import TYPE_CHECKING, Generator, List, Optional
import torch
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.llms import HuggingFaceTextGenInf... | [
"langchain.llms.HuggingFaceTextGenInference",
"langchain.embeddings.HuggingFaceEmbeddings"
] | [((3156, 3202), 'os.environ.get', 'os.environ.get', (['"""APP_CONFIG_FILE"""', '"""/dev/null"""'], {}), "('APP_CONFIG_FILE', '/dev/null')\n", (3170, 3202), False, 'import os\n'), ((3216, 3262), 'chain_server.configuration.AppConfig.from_file', 'configuration.AppConfig.from_file', (['config_file'], {}), '(config_file)\n... |
from llama_index import VectorStoreIndex, SimpleDirectoryReader, ServiceContext
from llama_index import LangchainEmbedding
from langchain.embeddings.huggingface import HuggingFaceEmbeddings
from llama_setup import llm
def setup_memory():
documents = SimpleDirectoryReader("./Data").load_data()
embed_model = Lan... | [
"langchain.embeddings.huggingface.HuggingFaceEmbeddings"
] | [((429, 507), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'chunk_size': '(256)', 'llm': 'llm', 'embed_model': 'embed_model'}), '(chunk_size=256, llm=llm, embed_model=embed_model)\n', (457, 507), False, 'from llama_index import VectorStoreIndex, SimpleDirectoryReader, ServiceContext... |
from modules.preprocessors import BasePreprocessor
from modules.templates import CONDENSE_QUESTION_TEMPLATE
from utils import create_collection, create_save_collection
import langchain
from typing import Optional, Any, Dict, Union
from langchain.schema import BaseDocumentTransformer
from langchain.schema.prompt_templa... | [
"langchain.text_splitter.RecursiveCharacterTextSplitter",
"langchain.chains.ConversationalRetrievalChain.from_llm",
"langchain.memory.ConversationBufferMemory",
"langchain.chat_models.ChatOpenAI",
"langchain.cache.InMemoryCache",
"langchain.vectorstores.Chroma"
] | [((1674, 1689), 'langchain.cache.InMemoryCache', 'InMemoryCache', ([], {}), '()\n', (1687, 1689), False, 'from langchain.cache import InMemoryCache\n'), ((3798, 3896), 'langchain.memory.ConversationBufferMemory', 'ConversationBufferMemory', ([], {'memory_key': '"""chat_history"""', 'output_key': '"""answer"""', 'return... |
import os
from dotenv import load_dotenv, find_dotenv
_ = load_dotenv(find_dotenv()) # read local .env file
from langchain.chains import RetrievalQA
from langchain.chat_models import ChatOpenAI
from langchain.document_loaders import CSVLoader
from langchain.indexes import VectorstoreIndexCreator
from langchain.vecto... | [
"langchain.evaluation.qa.QAEvalChain.from_llm",
"langchain.document_loaders.CSVLoader",
"langchain.indexes.VectorstoreIndexCreator",
"langchain.chat_models.ChatOpenAI"
] | [((409, 434), 'langchain.document_loaders.CSVLoader', 'CSVLoader', ([], {'file_path': 'file'}), '(file_path=file)\n', (418, 434), False, 'from langchain.document_loaders import CSVLoader\n'), ((565, 592), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([], {'temperature': '(0.0)'}), '(temperature=0.0)\n', (575, 592)... |
import os
import openai
import sys
import langchain
from langchain.document_loaders import PyPDFLoader
import pinecone
import numpy as np
from langchain.embeddings.openai import OpenAIEmbeddings
import tensorflow as tf
sys.path.append("../..")
sys.path.append("/path/to/pinecone-client")
from dotenv import load_doten... | [
"langchain.document_loaders.PyPDFLoader",
"langchain.embeddings.openai.OpenAIEmbeddings"
] | [((221, 245), 'sys.path.append', 'sys.path.append', (['"""../.."""'], {}), "('../..')\n", (236, 245), False, 'import sys\n'), ((246, 289), 'sys.path.append', 'sys.path.append', (['"""/path/to/pinecone-client"""'], {}), "('/path/to/pinecone-client')\n", (261, 289), False, 'import sys\n'), ((424, 453), 'langchain.documen... |
import langchain_visualizer # isort:skip # noqa: F401
from fvalues import FValue
from langchain import FewShotPromptTemplate, PromptTemplate
def test_few_shot_f():
examples = [
{"word": "happy", "antonym": "sad"},
{"word": "tall", "antonym": "short"},
# Should be able to handle extra ke... | [
"langchain.FewShotPromptTemplate",
"langchain.PromptTemplate"
] | [((455, 544), 'langchain.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['word', 'antonym']", 'template': '"""w={word},a={antonym}"""'}), "(input_variables=['word', 'antonym'], template=\n 'w={word},a={antonym}')\n", (469, 544), False, 'from langchain import FewShotPromptTemplate, PromptTemplate\n'), (... |
import langchain.utilities.opaqueprompts as op
from langchain import LLMChain, PromptTemplate
from langchain.llms import OpenAI
from langchain.llms.opaqueprompts import OpaquePrompts
from langchain.memory import ConversationBufferWindowMemory
from langchain.schema.output_parser import StrOutputParser
from langchain.sch... | [
"langchain.memory.ConversationBufferWindowMemory",
"langchain.schema.output_parser.StrOutputParser",
"langchain.llms.OpenAI",
"langchain.PromptTemplate.from_template",
"langchain.utilities.opaqueprompts.desanitize"
] | [((2863, 2871), 'langchain.llms.OpenAI', 'OpenAI', ([], {}), '()\n', (2869, 2871), False, 'from langchain.llms import OpenAI\n'), ((2805, 2850), 'langchain.PromptTemplate.from_template', 'PromptTemplate.from_template', (['prompt_template'], {}), '(prompt_template)\n', (2833, 2850), False, 'from langchain import LLMChai... |
from langchain.chat_models import ChatOpenAI
from dreamsboard.dreams.dreams_personality_chain.base import StoryBoardDreamsGenerationChain
import logging
import langchain
langchain.verbose = True
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
# 控制台打印
handler = logging.StreamHandler()
handler.setLev... | [
"langchain.chat_models.ChatOpenAI"
] | [((205, 232), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (222, 232), False, 'import logging\n'), ((282, 305), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (303, 305), False, 'import logging\n'), ((782, 806), 'langchain.chat_models.ChatOpenAI', 'ChatOpenAI', ([],... |
from unittest.mock import MagicMock
import pytest
from langchain_core.callbacks.base import BaseCallbackHandler
from langchain_core.outputs import GenerationChunk
from genai import Client
from genai.extensions.langchain import LangChainInterface
from genai.schema import (
TextGenerationCreateEndpoint,
TextGen... | [
"langchain_core.callbacks.base.BaseCallbackHandler"
] | [((635, 661), 'genai.schema.TextGenerationParameters', 'TextGenerationParameters', ([], {}), '()\n', (659, 661), False, 'from genai.schema import TextGenerationCreateEndpoint, TextGenerationCreateResponse, TextGenerationParameters, TextGenerationStreamCreateEndpoint, TextGenerationStreamCreateResponse\n'), ((783, 868),... |
"""Test Upstash Redis cache functionality."""
import uuid
import pytest
import langchain
from langchain.cache import UpstashRedisCache
from langchain.schema import Generation, LLMResult
from tests.unit_tests.llms.fake_chat_model import FakeChatModel
from tests.unit_tests.llms.fake_llm import FakeLLM
URL = "<UPSTASH_... | [
"langchain.llm_cache.clear",
"langchain.schema.Generation",
"langchain.llm_cache.redis.flushall",
"langchain.llm_cache.redis.pttl",
"langchain.llm_cache._key",
"langchain.llm_cache.lookup"
] | [((436, 473), 'pytest.mark.requires', 'pytest.mark.requires', (['"""upstash_redis"""'], {}), "('upstash_redis')\n", (456, 473), False, 'import pytest\n'), ((809, 846), 'pytest.mark.requires', 'pytest.mark.requires', (['"""upstash_redis"""'], {}), "('upstash_redis')\n", (829, 846), False, 'import pytest\n'), ((2491, 252... |
from uuid import UUID
from langchain.agents import Tool, AgentExecutor, LLMSingleActionAgent, AgentOutputParser, initialize_agent
from langchain.prompts import HumanMessagePromptTemplate, SystemMessagePromptTemplate, ChatPromptTemplate, AIMessagePromptTemplate, PromptTemplate
from langchain import OpenAI, SerpAPIWrappe... | [
"langchain.agents.AgentExecutor.from_agent_and_tools",
"langchain.agents.LLMSingleActionAgent",
"langchain.LLMChain",
"langchain.schema.AgentAction",
"langchain.chat_models.ChatOpenAI",
"langchain.prompts.ChatPromptTemplate.from_messages",
"langchain.prompts.HumanMessagePromptTemplate",
"langchain.sch... | [((987, 1047), 'langchain.prompts.SystemMessagePromptTemplate', 'SystemMessagePromptTemplate', ([], {'prompt': 'test_human_system_prompt'}), '(prompt=test_human_system_prompt)\n', (1014, 1047), False, 'from langchain.prompts import HumanMessagePromptTemplate, SystemMessagePromptTemplate, ChatPromptTemplate, AIMessagePr... |
"""Test the LangChain+ client."""
import uuid
from datetime import datetime
from typing import Any, Dict, List, Optional, Union
from unittest import mock
import pytest
from langchainplus_sdk.client import LangChainPlusClient
from langchainplus_sdk.schemas import Dataset, Example
from langchain.base_language import Ba... | [
"langchainplus_sdk.client.LangChainPlusClient",
"langchain.client.runner_utils._get_prompts",
"langchain.client.runner_utils._get_messages",
"langchain.client.runner_utils.arun_on_dataset"
] | [((645, 674), 'datetime.datetime', 'datetime', (['(2015)', '(1)', '(1)', '(0)', '(0)', '(0)'], {}), '(2015, 1, 1, 0, 0, 0)\n', (653, 674), False, 'from datetime import datetime\n'), ((1406, 1456), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""inputs"""', '_VALID_MESSAGES'], {}), "('inputs', _VALID_MESSAGE... |
"""Langchain BaseHandler instrumentation"""
import logging
from typing import Collection
from opentelemetry.trace import get_tracer
from opentelemetry.instrumentation.langchain.version import __version__
from opentelemetry.semconv.ai import TraceloopSpanKindValues
from otel_lib.instrumentor import LangChainHandlerInst... | [
"langchain.chains.SequentialChain",
"langchain.prompts.HumanMessagePromptTemplate.from_template",
"langchain.chat_models.ChatOpenAI",
"langchain.schema.HumanMessage",
"langchain.prompts.ChatPromptTemplate.from_messages",
"langchain.schema.SystemMessage",
"langchain.chains.LLMChain"
] | [((340, 367), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (357, 367), False, 'import logging\n'), ((1248, 1312), 'opentelemetry.exporter.otlp.proto.grpc.trace_exporter.OTLPSpanExporter', 'OTLPSpanExporter', ([], {'endpoint': "(os.environ['OTLP_EXPORTER'] + ':4317')"}), "(endpoint=os.en... |
import time
import unittest.mock
from typing import Any
from uuid import UUID
from langchainplus_sdk import LangChainPlusClient
from langchain.callbacks.tracers.langchain import LangChainTracer
from langchain.callbacks.tracers.schemas import Run
from langchain.schema.output import LLMResult
def test_example_id_assi... | [
"langchain.callbacks.tracers.langchain.LangChainTracer",
"langchainplus_sdk.LangChainPlusClient",
"langchain.schema.output.LLMResult"
] | [((741, 762), 'langchainplus_sdk.LangChainPlusClient', 'LangChainPlusClient', ([], {}), '()\n', (760, 762), False, 'from langchainplus_sdk import LangChainPlusClient\n'), ((780, 810), 'langchain.callbacks.tracers.langchain.LangChainTracer', 'LangChainTracer', ([], {'client': 'client'}), '(client=client)\n', (795, 810),... |
"""
The ``mlflow.langchain`` module provides an API for logging and loading LangChain models.
This module exports multivariate LangChain models in the langchain flavor and univariate
LangChain models in the pyfunc flavor:
LangChain (native) format
This is the main flavor that can be accessed with LangChain APIs.
:... | [
"langchain.agents.initialize_agent",
"langchain.chains.loading.load_chain"
] | [((1906, 1940), 'logging.getLogger', 'logging.getLogger', (['mlflow.__name__'], {}), '(mlflow.__name__)\n', (1923, 1940), False, 'import logging\n'), ((6540, 6616), 'mlflow.utils.environment._validate_env_arguments', '_validate_env_arguments', (['conda_env', 'pip_requirements', 'extra_pip_requirements'], {}), '(conda_e... |
# Copyright (c) Meta Platforms, Inc. and affiliates.
# This software may be used and distributed according to the terms of the Llama 2 Community License Agreement.
import langchain
from langchain.llms import Replicate
from flask import Flask
from flask import request
import os
import requests
import json
os.environ[... | [
"langchain.llms.Replicate"
] | [((488, 595), 'langchain.llms.Replicate', 'Replicate', ([], {'model': 'llama2_13b_chat', 'model_kwargs': "{'temperature': 0.01, 'top_p': 1, 'max_new_tokens': 500}"}), "(model=llama2_13b_chat, model_kwargs={'temperature': 0.01, 'top_p':\n 1, 'max_new_tokens': 500})\n", (497, 595), False, 'from langchain.llms import R... |
"""**Document Transformers** are classes to transform Documents.
**Document Transformers** usually used to transform a lot of Documents in a single run.
**Class hierarchy:**
.. code-block::
BaseDocumentTransformer --> <name> # Examples: DoctranQATransformer, DoctranTextTranslator
**Main helpers:**
.. code-bl... | [
"langchain.utils.interactive_env.is_interactive_env"
] | [((677, 697), 'langchain.utils.interactive_env.is_interactive_env', 'is_interactive_env', ([], {}), '()\n', (695, 697), False, 'from langchain.utils.interactive_env import is_interactive_env\n'), ((707, 1102), 'warnings.warn', 'warnings.warn', (['f"""Importing document transformers from langchain is deprecated. Importi... |
"""**Document Transformers** are classes to transform Documents.
**Document Transformers** usually used to transform a lot of Documents in a single run.
**Class hierarchy:**
.. code-block::
BaseDocumentTransformer --> <name> # Examples: DoctranQATransformer, DoctranTextTranslator
**Main helpers:**
.. code-bl... | [
"langchain.utils.interactive_env.is_interactive_env"
] | [((677, 697), 'langchain.utils.interactive_env.is_interactive_env', 'is_interactive_env', ([], {}), '()\n', (695, 697), False, 'from langchain.utils.interactive_env import is_interactive_env\n'), ((707, 1102), 'warnings.warn', 'warnings.warn', (['f"""Importing document transformers from langchain is deprecated. Importi... |
"""**Document Transformers** are classes to transform Documents.
**Document Transformers** usually used to transform a lot of Documents in a single run.
**Class hierarchy:**
.. code-block::
BaseDocumentTransformer --> <name> # Examples: DoctranQATransformer, DoctranTextTranslator
**Main helpers:**
.. code-bl... | [
"langchain.utils.interactive_env.is_interactive_env"
] | [((677, 697), 'langchain.utils.interactive_env.is_interactive_env', 'is_interactive_env', ([], {}), '()\n', (695, 697), False, 'from langchain.utils.interactive_env import is_interactive_env\n'), ((707, 1102), 'warnings.warn', 'warnings.warn', (['f"""Importing document transformers from langchain is deprecated. Importi... |
"""**Document Transformers** are classes to transform Documents.
**Document Transformers** usually used to transform a lot of Documents in a single run.
**Class hierarchy:**
.. code-block::
BaseDocumentTransformer --> <name> # Examples: DoctranQATransformer, DoctranTextTranslator
**Main helpers:**
.. code-bl... | [
"langchain.utils.interactive_env.is_interactive_env"
] | [((677, 697), 'langchain.utils.interactive_env.is_interactive_env', 'is_interactive_env', ([], {}), '()\n', (695, 697), False, 'from langchain.utils.interactive_env import is_interactive_env\n'), ((707, 1102), 'warnings.warn', 'warnings.warn', (['f"""Importing document transformers from langchain is deprecated. Importi... |
"""Beta Feature: base interface for cache."""
from __future__ import annotations
import hashlib
import inspect
import json
import logging
from abc import ABC, abstractmethod
from datetime import timedelta
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
Optional,
Sequence,
Tuple,
... | [
"langchain.utils.get_from_env",
"langchain.schema.Generation",
"langchain.load.dump.dumps",
"langchain.vectorstores.redis.Redis.from_existing_index",
"langchain.vectorstores.redis.Redis",
"langchain.load.load.loads"
] | [((918, 945), 'logging.getLogger', 'logging.getLogger', (['__file__'], {}), '(__file__)\n', (935, 945), False, 'import logging\n'), ((3390, 3408), 'sqlalchemy.ext.declarative.declarative_base', 'declarative_base', ([], {}), '()\n', (3406, 3408), False, 'from sqlalchemy.ext.declarative import declarative_base\n'), ((356... |
"""Beta Feature: base interface for cache."""
from __future__ import annotations
import hashlib
import inspect
import json
import logging
from abc import ABC, abstractmethod
from datetime import timedelta
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
Optional,
Sequence,
Tuple,
... | [
"langchain.utils.get_from_env",
"langchain.schema.Generation",
"langchain.load.dump.dumps",
"langchain.vectorstores.redis.Redis.from_existing_index",
"langchain.vectorstores.redis.Redis",
"langchain.load.load.loads"
] | [((918, 945), 'logging.getLogger', 'logging.getLogger', (['__file__'], {}), '(__file__)\n', (935, 945), False, 'import logging\n'), ((3390, 3408), 'sqlalchemy.ext.declarative.declarative_base', 'declarative_base', ([], {}), '()\n', (3406, 3408), False, 'from sqlalchemy.ext.declarative import declarative_base\n'), ((356... |
"""Beta Feature: base interface for cache."""
from __future__ import annotations
import hashlib
import inspect
import json
import logging
from abc import ABC, abstractmethod
from datetime import timedelta
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
Optional,
Sequence,
Tuple,
... | [
"langchain.utils.get_from_env",
"langchain.schema.Generation",
"langchain.load.dump.dumps",
"langchain.vectorstores.redis.Redis.from_existing_index",
"langchain.vectorstores.redis.Redis",
"langchain.load.load.loads"
] | [((918, 945), 'logging.getLogger', 'logging.getLogger', (['__file__'], {}), '(__file__)\n', (935, 945), False, 'import logging\n'), ((3390, 3408), 'sqlalchemy.ext.declarative.declarative_base', 'declarative_base', ([], {}), '()\n', (3406, 3408), False, 'from sqlalchemy.ext.declarative import declarative_base\n'), ((356... |
"""Beta Feature: base interface for cache."""
from __future__ import annotations
import hashlib
import inspect
import json
import logging
from abc import ABC, abstractmethod
from datetime import timedelta
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
Optional,
Sequence,
Tuple,
... | [
"langchain.utils.get_from_env",
"langchain.schema.Generation",
"langchain.load.dump.dumps",
"langchain.vectorstores.redis.Redis.from_existing_index",
"langchain.vectorstores.redis.Redis",
"langchain.load.load.loads"
] | [((918, 945), 'logging.getLogger', 'logging.getLogger', (['__file__'], {}), '(__file__)\n', (935, 945), False, 'import logging\n'), ((3390, 3408), 'sqlalchemy.ext.declarative.declarative_base', 'declarative_base', ([], {}), '()\n', (3406, 3408), False, 'from sqlalchemy.ext.declarative import declarative_base\n'), ((356... |
import logging
import os
import pickle
import tempfile
import streamlit as st
from dotenv import load_dotenv
from ibm_watson_machine_learning.metanames import GenTextParamsMetaNames as GenParams
from ibm_watson_machine_learning.foundation_models.utils.enums import ModelTypes
from langchain.callbacks import StdOutCallb... | [
"langchain.chains.question_answering.load_qa_chain",
"langchain.embeddings.HuggingFaceHubEmbeddings",
"langchain.text_splitter.RecursiveCharacterTextSplitter",
"langchain.callbacks.StdOutCallbackHandler",
"langchain.vectorstores.FAISS.from_documents",
"langchain.document_loaders.PyPDFLoader"
] | [((861, 993), 'streamlit.set_page_config', 'st.set_page_config', ([], {'page_title': '"""Retrieval Augmented Generation"""', 'page_icon': '"""🧊"""', 'layout': '"""wide"""', 'initial_sidebar_state': '"""expanded"""'}), "(page_title='Retrieval Augmented Generation', page_icon=\n '🧊', layout='wide', initial_sidebar_s... |
# Import Langchain modules
from langchain.document_loaders import PyPDFLoader
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.embeddings import OpenAIEmbeddings
from langchain.vectorstores import FAISS
from langchain.chains import RetrievalQA
from langchain.llms import OpenAI
# Impo... | [
"langchain.text_splitter.RecursiveCharacterTextSplitter",
"langchain.llms.OpenAI",
"langchain.vectorstores.FAISS.from_documents",
"langchain.document_loaders.PyPDFLoader",
"langchain.embeddings.OpenAIEmbeddings"
] | [((573, 606), 'warnings.filterwarnings', 'warnings.filterwarnings', (['"""ignore"""'], {}), "('ignore')\n", (596, 606), False, 'import warnings\n'), ((712, 808), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO', 'format': '"""%(asctime)s - %(levelname)s - %(message)s"""'}), "(level=logging.IN... |
import langchain
from langchain.llms import Replicate
from flask import Flask
from flask import request
import os
import requests
import json
class WhatsAppClient:
API_URL = "https://graph.facebook.com/v17.0/"
WHATSAPP_API_TOKEN = "<Temporary access token from your WhatsApp API Setup>"
WHATSAPP_CLOUD_NUM... | [
"langchain.llms.Replicate"
] | [((1337, 1444), 'langchain.llms.Replicate', 'Replicate', ([], {'model': 'llama2_13b_chat', 'model_kwargs': "{'temperature': 0.01, 'top_p': 1, 'max_new_tokens': 500}"}), "(model=llama2_13b_chat, model_kwargs={'temperature': 0.01, 'top_p':\n 1, 'max_new_tokens': 500})\n", (1346, 1444), False, 'from langchain.llms impo... |
import langchain
from langchain.llms import Replicate
from flask import Flask
from flask import request
import os
import requests
import json
class WhatsAppClient:
API_URL = "https://graph.facebook.com/v17.0/"
WHATSAPP_API_TOKEN = "<Temporary access token from your WhatsApp API Setup>"
WHATSAPP_CLOUD_NUM... | [
"langchain.llms.Replicate"
] | [((1337, 1444), 'langchain.llms.Replicate', 'Replicate', ([], {'model': 'llama2_13b_chat', 'model_kwargs': "{'temperature': 0.01, 'top_p': 1, 'max_new_tokens': 500}"}), "(model=llama2_13b_chat, model_kwargs={'temperature': 0.01, 'top_p':\n 1, 'max_new_tokens': 500})\n", (1346, 1444), False, 'from langchain.llms impo... |
import langchain
from langchain.llms import Replicate
from flask import Flask
from flask import request
import os
import requests
import json
class WhatsAppClient:
API_URL = "https://graph.facebook.com/v17.0/"
WHATSAPP_API_TOKEN = "<Temporary access token from your WhatsApp API Setup>"
WHATSAPP_CLOUD_NUM... | [
"langchain.llms.Replicate"
] | [((1337, 1444), 'langchain.llms.Replicate', 'Replicate', ([], {'model': 'llama2_13b_chat', 'model_kwargs': "{'temperature': 0.01, 'top_p': 1, 'max_new_tokens': 500}"}), "(model=llama2_13b_chat, model_kwargs={'temperature': 0.01, 'top_p':\n 1, 'max_new_tokens': 500})\n", (1346, 1444), False, 'from langchain.llms impo... |
import langchain
from langchain.llms import Replicate
from flask import Flask
from flask import request
import os
import requests
import json
class WhatsAppClient:
API_URL = "https://graph.facebook.com/v17.0/"
WHATSAPP_API_TOKEN = "<Temporary access token from your WhatsApp API Setup>"
WHATSAPP_CLOUD_NUM... | [
"langchain.llms.Replicate"
] | [((1337, 1444), 'langchain.llms.Replicate', 'Replicate', ([], {'model': 'llama2_13b_chat', 'model_kwargs': "{'temperature': 0.01, 'top_p': 1, 'max_new_tokens': 500}"}), "(model=llama2_13b_chat, model_kwargs={'temperature': 0.01, 'top_p':\n 1, 'max_new_tokens': 500})\n", (1346, 1444), False, 'from langchain.llms impo... |
"""Utility functions for mlflow.langchain."""
import json
import logging
import os
import shutil
import types
from functools import lru_cache
from importlib.util import find_spec
from typing import NamedTuple
import cloudpickle
import yaml
from packaging import version
import mlflow
from mlflow.utils.class_utils impo... | [
"langchain.schema.output_parser.StrOutputParser",
"langchain.agents.initialize_agent",
"langchain.chains.loading.load_chain"
] | [((2001, 2028), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (2018, 2028), False, 'import logging\n'), ((10189, 10235), 'os.path.join', 'os.path.join', (['path', '_MODEL_DATA_YAML_FILE_NAME'], {}), '(path, _MODEL_DATA_YAML_FILE_NAME)\n', (10201, 10235), False, 'import os\n'), ((5685, 57... |
from __future__ import annotations
import asyncio
import functools
import logging
import os
import warnings
from contextlib import asynccontextmanager, contextmanager
from contextvars import ContextVar
from typing import (
Any,
AsyncGenerator,
Dict,
Generator,
List,
Optional,
Type,
Type... | [
"langchain.schema.get_buffer_string",
"langchain.callbacks.stdout.StdOutCallbackHandler",
"langchain.callbacks.tracers.wandb.WandbTracer",
"langchain.callbacks.openai_info.OpenAICallbackHandler",
"langchain.callbacks.tracers.stdout.ConsoleCallbackHandler",
"langchain.callbacks.tracers.langchain.LangChainT... | [((1114, 1141), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1131, 1141), False, 'import logging\n'), ((1286, 1329), 'contextvars.ContextVar', 'ContextVar', (['"""openai_callback"""'], {'default': 'None'}), "('openai_callback', default=None)\n", (1296, 1329), False, 'from contextvars i... |
"""Beta Feature: base interface for cache."""
import hashlib
import json
from abc import ABC, abstractmethod
from typing import Any, Callable, Dict, List, Optional, Tuple, Type, cast
from sqlalchemy import Column, Integer, String, create_engine, select
from sqlalchemy.engine.base import Engine
from sqlalchemy.orm impo... | [
"langchain.vectorstores.redis.Redis.from_existing_index",
"langchain.vectorstores.redis.Redis",
"langchain.schema.Generation"
] | [((2037, 2055), 'sqlalchemy.ext.declarative.declarative_base', 'declarative_base', ([], {}), '()\n', (2053, 2055), False, 'from sqlalchemy.ext.declarative import declarative_base\n'), ((2212, 2244), 'sqlalchemy.Column', 'Column', (['String'], {'primary_key': '(True)'}), '(String, primary_key=True)\n', (2218, 2244), Fal... |
# Needs to be in same directory as configs, data folder
# Imports
from _OpalLLM import OpalLLM
from _OpalLLM import OpalLLM
import sys
sys.path.append('/home/jovyan/.local/lib/python3.8/site-packages')
import torch
from langchain.agents import Tool, AgentExecutor, LLMSingleActionAgent, AgentOutputParser
from langcha... | [
"langchain.chains.ConversationChain",
"langchain.LLMChain",
"langchain.llms.HuggingFacePipeline",
"langchain.PromptTemplate"
] | [((138, 204), 'sys.path.append', 'sys.path.append', (['"""/home/jovyan/.local/lib/python3.8/site-packages"""'], {}), "('/home/jovyan/.local/lib/python3.8/site-packages')\n", (153, 204), False, 'import sys\n'), ((3396, 3513), '_OpalLLM.OpalLLM', 'OpalLLM', ([], {'model': '"""lmsys/vicuna-33b"""', 'temperature': '(0.1)',... |
"""Utilities for running language models or Chains over datasets."""
from __future__ import annotations
import asyncio
import functools
import itertools
import logging
from datetime import datetime
from typing import (
Any,
Callable,
Coroutine,
Dict,
Iterator,
List,
Optional,
Sequence,... | [
"langchain.schema.messages.messages_from_dict",
"langchain.smith.evaluation.string_run_evaluator.StringRunEvaluatorChain.from_run_and_data_type",
"langchain.callbacks.tracers.langchain.LangChainTracer",
"langchain.chat_models.openai.ChatOpenAI",
"langchain.callbacks.tracers.evaluation.EvaluatorCallbackHandl... | [((1366, 1393), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1383, 1393), False, 'import logging\n'), ((1704, 1721), 'urllib.parse.urlparse', 'urlparse', (['api_url'], {}), '(api_url)\n', (1712, 1721), False, 'from urllib.parse import urlparse, urlunparse\n'), ((24715, 24735), 'asyncio... |
import langchain_helper
import streamlit as st
st.header("Dumbledore: The PDF Wizard")
# query = st.text_input("Enter your Question here")
if "messages" not in st.session_state:
st.session_state.messages = []
for message in st.session_state.messages:
with st.chat_message(message['role']):
st.markdo... | [
"langchain_helper.get_qa_chain"
] | [((48, 87), 'streamlit.header', 'st.header', (['"""Dumbledore: The PDF Wizard"""'], {}), "('Dumbledore: The PDF Wizard')\n", (57, 87), True, 'import streamlit as st\n'), ((352, 378), 'streamlit.chat_input', 'st.chat_input', (['"""Whats up?"""'], {}), "('Whats up?')\n", (365, 378), True, 'import streamlit as st\n'), ((4... |
"""Beta Feature: base interface for cache."""
from __future__ import annotations
import hashlib
import inspect
import json
import logging
import warnings
from abc import ABC, abstractmethod
from datetime import timedelta
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
Optional,
Sequenc... | [
"langchain.utils.get_from_env",
"langchain.schema.Generation",
"langchain.load.dump.dumps",
"langchain.vectorstores.redis.Redis.from_existing_index",
"langchain.vectorstores.redis.Redis",
"langchain.load.load.loads"
] | [((950, 977), 'logging.getLogger', 'logging.getLogger', (['__file__'], {}), '(__file__)\n', (967, 977), False, 'import logging\n'), ((3422, 3440), 'sqlalchemy.ext.declarative.declarative_base', 'declarative_base', ([], {}), '()\n', (3438, 3440), False, 'from sqlalchemy.ext.declarative import declarative_base\n'), ((359... |
"""Beta Feature: base interface for cache."""
from __future__ import annotations
import hashlib
import inspect
import json
import logging
import warnings
from abc import ABC, abstractmethod
from datetime import timedelta
from typing import (
TYPE_CHECKING,
Any,
Callable,
Dict,
Optional,
Sequenc... | [
"langchain.utils.get_from_env",
"langchain.schema.Generation",
"langchain.load.dump.dumps",
"langchain.vectorstores.redis.Redis.from_existing_index",
"langchain.vectorstores.redis.Redis",
"langchain.load.load.loads"
] | [((950, 977), 'logging.getLogger', 'logging.getLogger', (['__file__'], {}), '(__file__)\n', (967, 977), False, 'import logging\n'), ((3422, 3440), 'sqlalchemy.ext.declarative.declarative_base', 'declarative_base', ([], {}), '()\n', (3438, 3440), False, 'from sqlalchemy.ext.declarative import declarative_base\n'), ((359... |
import asyncio
import inspect
import warnings
from abc import ABC, abstractmethod
from functools import partial
from typing import Any, Dict, List, Mapping, Optional, Sequence
from pydantic import Field, root_validator
import langchain
from langchain.base_language import BaseLanguageModel
from langchain.callbacks.bas... | [
"langchain.callbacks.manager.AsyncCallbackManager.configure",
"langchain.schema.messages.AIMessage",
"langchain.schema.ChatResult",
"langchain.load.dump.dumps",
"langchain.callbacks.manager.CallbackManager.configure",
"langchain.load.dump.dumpd",
"langchain.schema.RunInfo",
"langchain.schema.messages.... | [((915, 952), 'pydantic.Field', 'Field', ([], {'default_factory': '_get_verbosity'}), '(default_factory=_get_verbosity)\n', (920, 952), False, 'from pydantic import Field, root_validator\n'), ((1026, 1059), 'pydantic.Field', 'Field', ([], {'default': 'None', 'exclude': '(True)'}), '(default=None, exclude=True)\n', (103... |
from __future__ import annotations
import asyncio
import functools
import logging
import os
import uuid
from contextlib import asynccontextmanager, contextmanager
from contextvars import ContextVar
from typing import (
TYPE_CHECKING,
Any,
AsyncGenerator,
Dict,
Generator,
List,
Optional,
... | [
"langchain.callbacks.stdout.StdOutCallbackHandler",
"langchain.callbacks.tracers.wandb.WandbTracer",
"langchain.callbacks.openai_info.OpenAICallbackHandler",
"langchain.callbacks.tracers.stdout.ConsoleCallbackHandler",
"langchain.callbacks.tracers.langchain.LangChainTracer",
"langchain.callbacks.tracers.l... | [((1329, 1356), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1346, 1356), False, 'import logging\n'), ((1425, 1468), 'contextvars.ContextVar', 'ContextVar', (['"""openai_callback"""'], {'default': 'None'}), "('openai_callback', default=None)\n", (1435, 1468), False, 'from contextvars i... |
from __future__ import annotations
import asyncio
import functools
import logging
import os
import uuid
from contextlib import asynccontextmanager, contextmanager
from contextvars import ContextVar
from typing import (
TYPE_CHECKING,
Any,
AsyncGenerator,
Dict,
Generator,
List,
Optional,
... | [
"langchain.callbacks.stdout.StdOutCallbackHandler",
"langchain.callbacks.tracers.wandb.WandbTracer",
"langchain.callbacks.openai_info.OpenAICallbackHandler",
"langchain.callbacks.tracers.stdout.ConsoleCallbackHandler",
"langchain.callbacks.tracers.langchain.LangChainTracer",
"langchain.callbacks.tracers.l... | [((1329, 1356), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1346, 1356), False, 'import logging\n'), ((1425, 1468), 'contextvars.ContextVar', 'ContextVar', (['"""openai_callback"""'], {'default': 'None'}), "('openai_callback', default=None)\n", (1435, 1468), False, 'from contextvars i... |
from __future__ import annotations
import asyncio
import functools
import logging
import os
import uuid
from contextlib import asynccontextmanager, contextmanager
from contextvars import ContextVar
from typing import (
TYPE_CHECKING,
Any,
AsyncGenerator,
Dict,
Generator,
List,
Optional,
... | [
"langchain.callbacks.stdout.StdOutCallbackHandler",
"langchain.callbacks.tracers.wandb.WandbTracer",
"langchain.callbacks.openai_info.OpenAICallbackHandler",
"langchain.callbacks.tracers.stdout.ConsoleCallbackHandler",
"langchain.callbacks.tracers.langchain.LangChainTracer",
"langchain.callbacks.tracers.l... | [((1329, 1356), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1346, 1356), False, 'import logging\n'), ((1425, 1468), 'contextvars.ContextVar', 'ContextVar', (['"""openai_callback"""'], {'default': 'None'}), "('openai_callback', default=None)\n", (1435, 1468), False, 'from contextvars i... |
from __future__ import annotations
import asyncio
import functools
import logging
import os
import uuid
from contextlib import asynccontextmanager, contextmanager
from contextvars import ContextVar
from typing import (
TYPE_CHECKING,
Any,
AsyncGenerator,
Dict,
Generator,
List,
Optional,
... | [
"langchain.callbacks.stdout.StdOutCallbackHandler",
"langchain.callbacks.tracers.wandb.WandbTracer",
"langchain.callbacks.openai_info.OpenAICallbackHandler",
"langchain.callbacks.tracers.stdout.ConsoleCallbackHandler",
"langchain.callbacks.tracers.langchain.LangChainTracer",
"langchain.callbacks.tracers.l... | [((1329, 1356), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1346, 1356), False, 'import logging\n'), ((1425, 1468), 'contextvars.ContextVar', 'ContextVar', (['"""openai_callback"""'], {'default': 'None'}), "('openai_callback', default=None)\n", (1435, 1468), False, 'from contextvars i... |
from modules.preprocessors import BasePreprocessor
from modules.templates import CONDENSE_QUESTION_TEMPLATE
from utils import create_collection, create_save_collection
import langchain
from typing import Optional, Any, Dict, Union
from langchain.schema import BaseDocumentTransformer
from langchain.schema.prompt_templa... | [
"langchain.text_splitter.RecursiveCharacterTextSplitter",
"langchain.chains.ConversationalRetrievalChain.from_llm",
"langchain.memory.ConversationBufferMemory",
"langchain.chat_models.ChatOpenAI",
"langchain.cache.InMemoryCache",
"langchain.vectorstores.Chroma"
] | [((1674, 1689), 'langchain.cache.InMemoryCache', 'InMemoryCache', ([], {}), '()\n', (1687, 1689), False, 'from langchain.cache import InMemoryCache\n'), ((3798, 3896), 'langchain.memory.ConversationBufferMemory', 'ConversationBufferMemory', ([], {'memory_key': '"""chat_history"""', 'output_key': '"""answer"""', 'return... |
from modules.preprocessors import BasePreprocessor
from modules.templates import CONDENSE_QUESTION_TEMPLATE
from utils import create_collection, create_save_collection
import langchain
from typing import Optional, Any, Dict, Union
from langchain.schema import BaseDocumentTransformer
from langchain.schema.prompt_templa... | [
"langchain.text_splitter.RecursiveCharacterTextSplitter",
"langchain.chains.ConversationalRetrievalChain.from_llm",
"langchain.memory.ConversationBufferMemory",
"langchain.chat_models.ChatOpenAI",
"langchain.cache.InMemoryCache",
"langchain.vectorstores.Chroma"
] | [((1674, 1689), 'langchain.cache.InMemoryCache', 'InMemoryCache', ([], {}), '()\n', (1687, 1689), False, 'from langchain.cache import InMemoryCache\n'), ((3798, 3896), 'langchain.memory.ConversationBufferMemory', 'ConversationBufferMemory', ([], {'memory_key': '"""chat_history"""', 'output_key': '"""answer"""', 'return... |
import langchain_visualizer # isort:skip # noqa: F401
from fvalues import FValue
from langchain import FewShotPromptTemplate, PromptTemplate
def test_few_shot_f():
examples = [
{"word": "happy", "antonym": "sad"},
{"word": "tall", "antonym": "short"},
# Should be able to handle extra ke... | [
"langchain.FewShotPromptTemplate",
"langchain.PromptTemplate"
] | [((455, 544), 'langchain.PromptTemplate', 'PromptTemplate', ([], {'input_variables': "['word', 'antonym']", 'template': '"""w={word},a={antonym}"""'}), "(input_variables=['word', 'antonym'], template=\n 'w={word},a={antonym}')\n", (469, 544), False, 'from langchain import FewShotPromptTemplate, PromptTemplate\n'), (... |
"""Test Upstash Redis cache functionality."""
import uuid
import pytest
import langchain
from langchain.cache import UpstashRedisCache
from langchain.schema import Generation, LLMResult
from tests.unit_tests.llms.fake_chat_model import FakeChatModel
from tests.unit_tests.llms.fake_llm import FakeLLM
URL = "<UPSTASH_... | [
"langchain.llm_cache.clear",
"langchain.schema.Generation",
"langchain.llm_cache.redis.flushall",
"langchain.llm_cache.redis.pttl",
"langchain.llm_cache._key",
"langchain.llm_cache.lookup"
] | [((436, 473), 'pytest.mark.requires', 'pytest.mark.requires', (['"""upstash_redis"""'], {}), "('upstash_redis')\n", (456, 473), False, 'import pytest\n'), ((809, 846), 'pytest.mark.requires', 'pytest.mark.requires', (['"""upstash_redis"""'], {}), "('upstash_redis')\n", (829, 846), False, 'import pytest\n'), ((2491, 252... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.