Datasets:
File size: 173,988 Bytes
cd15502 | 1 | {"repo": "Forethought-Technologies/AutoChain", "n_pairs": 38, "version": "v2_function_scoped", "contexts": {"tests/memory/test_long_term_memory.py::18": {"resolved_imports": ["autochain/agent/message.py", "autochain/memory/long_term_memory.py", "autochain/tools/internal_search/chromadb_tool.py", "autochain/tools/internal_search/pinecone_tool.py", "autochain/tools/internal_search/lancedb_tool.py"], "used_names": ["ChromaDBSearch", "LongTermMemory"], "enclosing_function": "test_long_term_kv_memory_chromadb", "extracted_code": "# Source: autochain/memory/long_term_memory.py\nclass LongTermMemory(BaseMemory):\n \"\"\"Buffer for storing conversation memory and an in-memory kv store.\"\"\"\n\n conversation_history = ChatMessageHistory()\n kv_memory = {}\n long_term_memory: BaseSearchTool = None\n\n class Config:\n keep_untouched = SEARCH_PROVIDERS\n\n def load_memory(\n self,\n key: Optional[str] = None,\n default: Optional[Any] = None,\n top_k: int = 1,\n **kwargs\n ) -> Any:\n \"\"\"Return history buffer by key or all memories.\"\"\"\n if key in self.kv_memory:\n return self.kv_memory[key]\n\n # else try to retrieve from long term memory\n result = self.long_term_memory.run({\"query\": key, \"top_k\": top_k})\n return result or default\n\n def load_conversation(self, **kwargs) -> ChatMessageHistory:\n \"\"\"Return history buffer and format it into a conversational string format.\"\"\"\n return self.conversation_history\n\n def save_memory(self, key: str, value: Any) -> None:\n if (\n isinstance(value, list)\n and len(value) > 0\n and (isinstance(value[0], SEARCH_DOC_TYPES))\n ):\n self.long_term_memory.add_docs(docs=value)\n elif key:\n self.kv_memory[key] = value\n\n def save_conversation(\n self, message: str, message_type: MessageType, **kwargs\n ) -> None:\n \"\"\"Save context from this conversation to buffer.\"\"\"\n self.conversation_history.save_message(\n message=message, message_type=message_type, **kwargs\n )\n\n def clear(self) -> None:\n \"\"\"Clear memory contents.\"\"\"\n self.conversation_history.clear()\n self.long_term_memory.clear_index()\n self.kv_memory = {}\n\n\n# Source: autochain/tools/internal_search/chromadb_tool.py\nclass ChromaDBSearch(Tool, BaseSearchTool):\n \"\"\"\n Use ChromaDB as internal search tool\n \"\"\"\n\n collection_name: str = \"index\"\n collection: Optional[Any] = None\n\n class Config:\n \"\"\"Configuration for this pydantic object.\"\"\"\n\n extra = Extra.forbid\n arbitrary_types_allowed = True\n\n def __init__(self, docs: List[ChromaDoc], **kwargs):\n super().__init__(**kwargs)\n client = chromadb.Client()\n\n collection = client.create_collection(self.collection_name)\n self.collection = collection\n\n # Add docs to the collection. Can also update and delete. Row-based API coming soon!\n self.add_docs(docs=docs)\n\n def _run(\n self,\n query: str,\n top_k: int = 2,\n *args: Any,\n **kwargs: Any,\n ) -> str:\n def _format_output(query_result: QueryResult) -> str:\n \"\"\"Only return the document since they are likely to be passed to prompt\"\"\"\n documents = query_result.get(\"documents\", [])\n if len(documents) == 0:\n return \"\"\n\n docs = documents[0]\n return \"\\n\".join([f\"Doc {i}: {doc}\" for i, doc in enumerate(docs)])\n\n result = self.collection.query(\n query_texts=[query],\n n_results=top_k,\n )\n return _format_output(result)\n\n def add_docs(self, docs: List[ChromaDoc], **kwargs):\n \"\"\"Add a list of documents to collection\"\"\"\n if docs:\n self.collection.add(\n documents=[d.doc for d in docs],\n # we embed for you, or bring your own\n metadatas=[d.metadata for d in docs],\n # filter on arbitrary metadata!\n ids=[d.id for d in docs], # must be unique for each doc\n )\n\n def clear_index(self):\n self.collection.delete()", "n_imports_parsed": 6, "n_files_resolved": 5, "n_chars_extracted": 3689}, "tests/memory/test_buffer_memory.py::9": {"resolved_imports": ["autochain/agent/message.py", "autochain/memory/buffer_memory.py"], "used_names": ["BufferMemory"], "enclosing_function": "test_buffer_kv_memory", "extracted_code": "# Source: autochain/memory/buffer_memory.py\nclass BufferMemory(BaseMemory):\n \"\"\"Buffer for storing conversation memory and an in-memory kv store.\"\"\"\n\n conversation_history = ChatMessageHistory()\n kv_memory = {}\n\n def load_memory(\n self, key: Optional[str] = None, default: Optional[Any] = None, **kwargs\n ) -> Any:\n \"\"\"Return history buffer by key or all memories.\"\"\"\n if not key:\n return self.kv_memory\n\n return self.kv_memory.get(key, default)\n\n def load_conversation(self, **kwargs) -> ChatMessageHistory:\n \"\"\"Return history buffer and format it into a conversational string format.\"\"\"\n return self.conversation_history\n\n def save_memory(self, key: str, value: Any) -> None:\n self.kv_memory[key] = value\n\n def save_conversation(\n self, message: str, message_type: MessageType, **kwargs\n ) -> None:\n \"\"\"Save context from this conversation to buffer.\"\"\"\n self.conversation_history.save_message(\n message=message, message_type=message_type, **kwargs\n )\n\n def clear(self) -> None:\n \"\"\"Clear memory contents.\"\"\"\n self.conversation_history.clear()\n self.kv_memory = {}", "n_imports_parsed": 2, "n_files_resolved": 2, "n_chars_extracted": 1215}, "tests/tools/test_chromadb_tool.py::13": {"resolved_imports": ["autochain/tools/internal_search/chromadb_tool.py"], "used_names": ["ChromaDBSearch", "ChromaDoc"], "enclosing_function": "test_chromadb_tool_run", "extracted_code": "# Source: autochain/tools/internal_search/chromadb_tool.py\nclass ChromaDoc:\n doc: str\n metadata: Dict[str, Any]\n id: str = field(default_factory=lambda: str(uuid.uuid1()))\n\nclass ChromaDBSearch(Tool, BaseSearchTool):\n \"\"\"\n Use ChromaDB as internal search tool\n \"\"\"\n\n collection_name: str = \"index\"\n collection: Optional[Any] = None\n\n class Config:\n \"\"\"Configuration for this pydantic object.\"\"\"\n\n extra = Extra.forbid\n arbitrary_types_allowed = True\n\n def __init__(self, docs: List[ChromaDoc], **kwargs):\n super().__init__(**kwargs)\n client = chromadb.Client()\n\n collection = client.create_collection(self.collection_name)\n self.collection = collection\n\n # Add docs to the collection. Can also update and delete. Row-based API coming soon!\n self.add_docs(docs=docs)\n\n def _run(\n self,\n query: str,\n top_k: int = 2,\n *args: Any,\n **kwargs: Any,\n ) -> str:\n def _format_output(query_result: QueryResult) -> str:\n \"\"\"Only return the document since they are likely to be passed to prompt\"\"\"\n documents = query_result.get(\"documents\", [])\n if len(documents) == 0:\n return \"\"\n\n docs = documents[0]\n return \"\\n\".join([f\"Doc {i}: {doc}\" for i, doc in enumerate(docs)])\n\n result = self.collection.query(\n query_texts=[query],\n n_results=top_k,\n )\n return _format_output(result)\n\n def add_docs(self, docs: List[ChromaDoc], **kwargs):\n \"\"\"Add a list of documents to collection\"\"\"\n if docs:\n self.collection.add(\n documents=[d.doc for d in docs],\n # we embed for you, or bring your own\n metadatas=[d.metadata for d in docs],\n # filter on arbitrary metadata!\n ids=[d.id for d in docs], # must be unique for each doc\n )\n\n def clear_index(self):\n self.collection.delete()", "n_imports_parsed": 1, "n_files_resolved": 1, "n_chars_extracted": 2035}, "tests/tools/test_base_tool.py::17": {"resolved_imports": ["autochain/tools/base.py"], "used_names": ["Tool"], "enclosing_function": "test_run_tool", "extracted_code": "# Source: autochain/tools/base.py\nclass Tool(ABC, BaseModel):\n \"\"\"Interface AutoChain tools must implement.\"\"\"\n\n name: Optional[str] = None\n \"\"\"The unique name of the tool that clearly communicates its purpose.\n If not provided, it will be named after the func name.\n The more descriptive it is, the easier it would be for model to call the right tool\n \"\"\"\n\n description: str\n \"\"\"Used to tell the model how/when/why to use the tool.\n You can provide few-shot examples as a part of the description.\n \"\"\"\n\n arg_description: Optional[Dict[str, Any]] = None\n \"\"\"Dictionary of arg name and description when using OpenAIFunctionsAgent to provide \n additional argument information\"\"\"\n\n args_schema: Optional[Type[BaseModel]] = None\n \"\"\"Pydantic model class to validate and parse the tool's input arguments.\"\"\"\n\n func: Union[Callable[..., str], None] = None\n\n @root_validator()\n def validate_environment(cls, values: Dict) -> Dict:\n \"\"\"Validate that api key and python package exists in environment.\"\"\"\n func = values.get(\"func\")\n if func and not values.get(\"name\"):\n values[\"name\"] = values[\"func\"].__name__\n\n # check if all args from arg_description exist in func args\n if values.get(\"arg_description\") and func:\n inspection = inspect.getfullargspec(func)\n override_args = set(values[\"arg_description\"].keys())\n args = set(inspection.args)\n override_without_args = override_args - args\n if len(override_without_args) > 0:\n raise ValueError(\n f\"Provide arg description for not existed args: {override_without_args}\"\n )\n\n return values\n\n def _parse_input(\n self,\n tool_input: Union[str, Dict],\n ) -> Union[str, Dict[str, Any]]:\n \"\"\"Convert tool input to pydantic model.\"\"\"\n input_args = self.args_schema\n if isinstance(tool_input, str):\n if input_args is not None:\n key_ = next(iter(input_args.__fields__.keys()))\n input_args.validate({key_: tool_input})\n return tool_input\n else:\n if input_args is not None:\n result = input_args.parse_obj(tool_input)\n return {k: v for k, v in result.dict().items() if k in tool_input}\n return tool_input\n\n def _to_args_and_kwargs(self, tool_input: Union[str, Dict]) -> Tuple[Tuple, Dict]:\n # For backwards compatibility, if run_input is a string,\n # pass as a positional argument.\n if isinstance(tool_input, str):\n return (tool_input,), {}\n else:\n return (), tool_input\n\n def _run(\n self,\n *args: Any,\n **kwargs: Any,\n ) -> str:\n return self.func(*args, **kwargs)\n\n def run(\n self,\n tool_input: Union[str, Dict] = \"\",\n **kwargs: Any,\n ) -> str:\n \"\"\"Run the tool.\"\"\"\n try:\n parsed_input = self._parse_input(tool_input)\n except ValueError as e:\n # return exception as tool output\n raise ToolRunningError(message=f\"Tool input args value Error: {e}\") from e\n\n try:\n tool_args, tool_kwargs = self._to_args_and_kwargs(parsed_input)\n tool_output = self._run(*tool_args, **tool_kwargs)\n except (Exception, KeyboardInterrupt) as e:\n raise ToolRunningError(\n message=f\"Failed to run tool {self.name} due to {e}\"\n ) from e\n\n return tool_output", "n_imports_parsed": 2, "n_files_resolved": 1, "n_chars_extracted": 3566}, "tests/agent/test_openai_functions_agent.py::97": {"resolved_imports": ["autochain/agent/message.py", "autochain/agent/openai_functions_agent/openai_functions_agent.py", "autochain/agent/structs.py", "autochain/models/chat_openai.py"], "used_names": ["AgentAction", "ChatMessageHistory", "ChatOpenAI", "MessageType", "OpenAIFunctionsAgent"], "enclosing_function": "test_function_calling_plan", "extracted_code": "# Source: autochain/agent/message.py\nclass MessageType(enum.Enum):\n UserMessage = enum.auto()\n AIMessage = enum.auto()\n SystemMessage = enum.auto()\n FunctionMessage = enum.auto()\n\nclass ChatMessageHistory(BaseModel):\n messages: List[BaseMessage] = []\n\n def save_message(self, message: str, message_type: MessageType, **kwargs):\n if message_type == MessageType.AIMessage:\n self.messages.append(AIMessage(content=message))\n elif message_type == MessageType.UserMessage:\n self.messages.append(UserMessage(content=message))\n elif message_type == MessageType.FunctionMessage:\n self.messages.append(\n FunctionMessage(\n content=message,\n name=kwargs[\"name\"],\n conversational_message=kwargs[\"conversational_message\"],\n )\n )\n elif message_type == MessageType.SystemMessage:\n self.messages.append(SystemMessage(content=message))\n\n def format_message(self):\n string_messages = []\n if len(self.messages) > 0:\n for m in self.messages:\n if isinstance(m, FunctionMessage):\n string_messages.append(f\"Action: {m.conversational_message}\")\n continue\n\n if isinstance(m, UserMessage):\n role = \"User\"\n elif isinstance(m, AIMessage):\n role = \"Assistant\"\n elif isinstance(m, SystemMessage):\n role = \"System\"\n else:\n continue\n string_messages.append(f\"{role}: {m.content}\")\n return \"\\n\".join(string_messages) + \"\\n\"\n return \"\"\n\n def get_latest_user_message(self) -> UserMessage:\n for message in reversed(self.messages):\n if isinstance(message, UserMessage):\n return message\n return UserMessage(content=\"n/a\")\n\n def clear(self) -> None:\n self.messages = []\n\n\n# Source: autochain/agent/openai_functions_agent/openai_functions_agent.py\nclass OpenAIFunctionsAgent(BaseAgent):\n \"\"\"\n Agent supports function calling natively in OpenAI, which leverage function message to\n determine which tool should be used\n When tool is not selected, responds just like conversational agent\n Tool descriptions are generated from typing from the tool\n \"\"\"\n\n llm: BaseLanguageModel = None\n allowed_tools: Dict[str, Tool] = {}\n tools: List[Tool] = []\n prompt: Optional[str] = None\n min_confidence: int = 3\n\n @classmethod\n def from_llm_and_tools(\n cls,\n llm: BaseLanguageModel,\n tools: Optional[List[Tool]] = None,\n output_parser: Optional[OpenAIFunctionOutputParser] = None,\n prompt: str = None,\n min_confidence: int = 3,\n **kwargs: Any,\n ) -> OpenAIFunctionsAgent:\n tools = tools or []\n\n allowed_tools = {tool.name: tool for tool in tools}\n _output_parser = output_parser or OpenAIFunctionOutputParser()\n return cls(\n llm=llm,\n allowed_tools=allowed_tools,\n output_parser=_output_parser,\n tools=tools,\n prompt=prompt,\n min_confidence=min_confidence,\n **kwargs,\n )\n\n def plan(\n self,\n history: ChatMessageHistory,\n intermediate_steps: List[AgentAction],\n retries: int = 2,\n **kwargs: Any,\n ) -> Union[AgentAction, AgentFinish]:\n while retries > 0:\n print_with_color(\"Planning\", Fore.LIGHTYELLOW_EX)\n\n final_messages = []\n if self.prompt:\n final_messages.append(SystemMessage(content=self.prompt))\n final_messages += history.messages\n\n logger.info(f\"\\nPlanning Input: {[m.content for m in final_messages]} \\n\")\n full_output: Generation = self.llm.generate(\n final_messages, self.tools\n ).generations[0]\n\n agent_output: Union[AgentAction, AgentFinish] = self.output_parser.parse(\n full_output.message\n )\n print(\n f\"Planning output: \\nmessage content: {repr(full_output.message.content)}; \"\n f\"function_call: \"\n f\"{repr(full_output.message.function_call)}\",\n Fore.YELLOW,\n )\n if isinstance(agent_output, AgentAction):\n print_with_color(\n f\"Plan to take action '{agent_output.tool}'\", Fore.LIGHTYELLOW_EX\n )\n\n generation_is_confident = self.is_generation_confident(\n history=history,\n agent_output=agent_output,\n min_confidence=self.min_confidence,\n )\n if not generation_is_confident:\n retries -= 1\n print_with_color(\n f\"Generation is not confident, {retries} retries left\",\n Fore.LIGHTYELLOW_EX,\n )\n continue\n else:\n return agent_output\n\n def is_generation_confident(\n self,\n history: ChatMessageHistory,\n agent_output: Union[AgentAction, AgentFinish],\n min_confidence: int = 3,\n ) -> bool:\n \"\"\"\n Estimate the confidence of the generation\n Args:\n history: history of the conversation\n agent_output: the output from the agent\n min_confidence: minimum confidence score to be considered as confident\n \"\"\"\n\n def _format_assistant_message(action_output: Union[AgentAction, AgentFinish]):\n if isinstance(action_output, AgentFinish):\n assistant_message = f\"Assistant: {action_output.message}\"\n elif isinstance(action_output, AgentAction):\n assistant_message = f\"Action: {action_output.tool} with input: {action_output.tool_input}\"\n else:\n raise ValueError(\"Unsupported action for estimating confidence score\")\n\n return assistant_message\n\n prompt = Template(ESTIMATE_CONFIDENCE_PROMPT).substitute(\n policy=self.prompt,\n conversation_history=history.format_message(),\n assistant_message=_format_assistant_message(agent_output),\n )\n logger.info(f\"\\nEstimate confidence prompt: {prompt} \\n\")\n\n message = UserMessage(content=prompt)\n\n full_output: Generation = self.llm.generate([message], self.tools).generations[\n 0\n ]\n\n estimated_confidence = self.output_parser.parse_estimated_confidence(\n full_output.message\n )\n\n return estimated_confidence >= min_confidence\n\n\n# Source: autochain/agent/structs.py\nclass AgentAction(BaseModel):\n \"\"\"Agent's action to take.\"\"\"\n\n tool: str\n tool_input: Union[str, dict]\n \"\"\"tool outputs\"\"\"\n tool_output: str = \"\"\n\n \"\"\"log message for debugging\"\"\"\n log: str = \"\"\n\n \"\"\"model response or \"\"\"\n model_response: str = \"\"\n\n @property\n def response(self):\n \"\"\"message to be stored in memory and shared with next prompt\"\"\"\n if self.model_response and not self.tool_output:\n # share the model response or log message as output if tool fails to call\n return self.model_response\n return (\n f\"Outputs from using tool '{self.tool}' for inputs {self.tool_input} \"\n f\"is '{self.tool_output}'\\n\"\n )\n\n\n# Source: autochain/models/chat_openai.py\nclass ChatOpenAI(BaseLanguageModel):\n \"\"\"Wrapper around OpenAI Chat large language models.\n\n To use, you should have the ``openai`` python package installed, and the\n environment variable ``OPENAI_API_KEY`` set with your API key.\n\n Any parameters that are valid to be passed to the openai.create call can be passed\n in, even if not explicitly saved on this class.\n\n Example:\n .. code-block:: python\n\n from autochain.models.chat_openai import ChatOpenAI\n openai = ChatOpenAI()\n \"\"\"\n\n client: Any #: :meta private:\n model_name: str = \"gpt-3.5-turbo\"\n \"\"\"Model name to use.\"\"\"\n temperature: float = 0\n \"\"\"What sampling temperature to use.\"\"\"\n model_kwargs: Dict[str, Any] = Field(default_factory=dict)\n \"\"\"Holds any model parameters valid for `create` call not explicitly specified.\"\"\"\n openai_api_key: Optional[str] = None\n openai_organization: Optional[str] = None\n api_type: Optional[str] = None\n \"\"\"OpenAI API type, it can be `openai` or `azure`.\"\"\"\n api_base: Optional[str] = None\n \"\"\"The OpenAI API base url or Azure OpenAI API base url.\"\"\"\n azure_api_version: Optional[str] = None\n \"\"\"Azure API version.\"\"\"\n azure_deployment_name: Optional[str] = None\n \"\"\"Azure deployment name.\"\"\"\n request_timeout: Optional[Union[float, Tuple[float, float]]] = None\n \"\"\"Timeout for requests to OpenAI completion API. Default is 600 seconds.\"\"\"\n max_retries: int = 6\n \"\"\"Maximum number of retries to make when generating.\"\"\"\n # TODO: support streaming\n # streaming: bool = False\n # \"\"\"Whether to stream the results or not.\"\"\"\n # n: int = 1\n # \"\"\"Number of chat completions to generate for each prompt.\"\"\"\n max_tokens: Optional[int] = None\n \"\"\"Maximum number of tokens to generate.\"\"\"\n\n class Config:\n \"\"\"Configuration for this pydantic object.\"\"\"\n\n extra = Extra.ignore\n\n @root_validator()\n def validate_environment(cls, values: Dict) -> Dict:\n \"\"\"Validate that api key and python package exists in environment.\"\"\"\n openai_api_key = os.environ[\"OPENAI_API_KEY\"]\n openai_api_type = os.environ.get(\"OPENAI_API_TYPE\", \"open_ai\")\n openai_api_base = os.environ.get(\"OPENAI_API_BASE\", None)\n try:\n import openai\n\n except ImportError:\n raise ValueError(\n \"Could not import openai python package. \"\n \"Please install it with `pip install openai`.\"\n )\n values[\"api_key\"] = openai.api_key = openai_api_key\n values[\"api_type\"] = openai.api_type = openai_api_type\n if openai_api_base:\n values[\"api_base\"] = openai.api_base = openai_api_base\n if openai_api_type == \"azure\":\n values[\"azure_api_version\"] = openai.api_version = os.environ.get(\"OPENAI_API_VERSION\", \"2023-05-15\")\n try:\n values[\"client\"] = openai.ChatCompletion\n except AttributeError:\n raise ValueError(\n \"`openai` has no `ChatCompletion` attribute, this is likely \"\n \"due to an old version of the openai package. Try upgrading it \"\n \"with `pip install --upgrade openai`.\"\n )\n # if values[\"n\"] < 1:\n # raise ValueError(\"n must be at least 1.\")\n # if values[\"n\"] > 1 and values[\"streaming\"]:\n # raise ValueError(\"n must be 1 when streaming.\")\n return values\n\n def generate(\n self,\n messages: List[BaseMessage],\n functions: Optional[List[Tool]] = None,\n stop: Optional[List[str]] = None,\n ) -> LLMResult:\n message_dicts, function_dicts, params = self._create_message_dicts(\n messages, functions, stop\n )\n\n generation_param = {\n \"messages\": message_dicts,\n **params,\n }\n if len(function_dicts) > 0:\n generation_param[\"functions\"] = function_dicts\n\n response = self.generate_with_retry(**generation_param)\n return self._create_llm_result(response)\n\n def _create_message_dicts(\n self,\n messages: List[BaseMessage],\n tools: Optional[List[Tool]],\n stop: Optional[List[str]],\n ) -> Tuple[List[Dict[str, Any]], List[Dict[str, Any]], Dict[str, Any]]:\n params: Dict[str, Any] = {**{\"model\": self.model_name}, **self._default_params}\n if self.azure_deployment_name and self.api_type == \"azure\":\n params[\"engine\"] = self.azure_deployment_name\n if stop is not None:\n if \"stop\" in params:\n raise ValueError(\"`stop` found in both the input and default params.\")\n params[\"stop\"] = stop\n message_dicts = [convert_message_to_dict(m) for m in messages]\n function_dicts = []\n if tools:\n function_dicts = [convert_tool_to_dict(t) for t in tools]\n return message_dicts, function_dicts, params\n\n def _create_llm_result(self, response: Mapping[str, Any]) -> LLMResult:\n generations = []\n for res in response[\"choices\"]:\n message = convert_dict_to_message(res[\"message\"])\n gen = Generation(message=message)\n generations.append(gen)\n llm_output = {\"token_usage\": response[\"usage\"], \"model_name\": self.model_name}\n result = LLMResult(generations=generations, llm_output=llm_output)\n return result", "n_imports_parsed": 6, "n_files_resolved": 4, "n_chars_extracted": 12937}, "tests/tools/test_google_search.py::24": {"resolved_imports": ["autochain/tools/google_search/util.py"], "used_names": ["GoogleSearchAPIWrapper", "os"], "enclosing_function": "test_google_search", "extracted_code": "# Source: autochain/tools/google_search/util.py\nclass GoogleSearchAPIWrapper(BaseModel):\n \"\"\"Wrapper for Google Search API.\n\n Adapted from: Instructions adapted from https://stackoverflow.com/questions/\n 37083058/\n programmatically-searching-google-in-python-using-custom-search\n\n TODO: DOCS for using it\n 1. Install google-api-python-client\n - If you don't already have a Google account, sign up.\n - If you have never created a Google APIs Console project,\n read the Managing Projects page and create a project in the Google API Console.\n - Install the library using pip install google-api-python-client\n The current version of the library is 2.70.0 at this time\n\n 2. To create an API key:\n - Navigate to the APIs & Services→Credentials panel in Cloud Console.\n - Select Create credentials, then select API key from the drop-down menu.\n - The API key created dialog box displays your newly created key.\n - You now have an API_KEY\n\n 3. Setup Custom Search Engine so you can search the entire web\n - Create a custom search engine in this link.\n - In Sites to search, add any valid URL (i.e. www.stackoverflow.com).\n - That’s all you have to fill up, the rest doesn’t matter.\n In the left-side menu, click Edit search engine → {your search engine name}\n → Setup Set Search the entire web to ON. Remove the URL you added from\n the list of Sites to search.\n - Under Search engine ID you’ll find the search-engine-ID.\n\n 4. Enable the Custom Search API\n - Navigate to the APIs & Services→Dashboard panel in Cloud Console.\n - Click Enable APIs and Services.\n - Search for Custom Search API and click on it.\n - Click Enable.\n URL for it: https://console.cloud.google.com/apis/library/customsearch.googleapis\n .com\n \"\"\"\n\n search_engine: Any #: :meta private:\n google_api_key: Optional[str] = None\n google_cse_id: Optional[str] = None\n k: int = 10\n siterestrict: bool = False\n\n class Config:\n \"\"\"Configuration for this pydantic object.\"\"\"\n\n extra = Extra.forbid\n\n def _google_search_results(self, search_term: str, **kwargs: Any) -> List[dict]:\n cse = self.search_engine.cse()\n if self.siterestrict:\n cse = cse.siterestrict()\n res = cse.list(q=search_term, cx=self.google_cse_id, **kwargs).execute()\n return res.get(\"items\", [])\n\n @root_validator()\n def validate_environment(cls, values: Dict) -> Dict:\n \"\"\"Validate that api key and python package exists in environment.\"\"\"\n google_api_key = get_from_dict_or_env(\n values, \"google_api_key\", \"GOOGLE_API_KEY\"\n )\n values[\"google_api_key\"] = google_api_key\n\n google_cse_id = get_from_dict_or_env(values, \"google_cse_id\", \"GOOGLE_CSE_ID\")\n values[\"google_cse_id\"] = google_cse_id\n\n try:\n from googleapiclient.discovery import build\n\n except ImportError:\n raise ImportError(\n \"google-api-python-client is not installed. \"\n \"Please install it with `pip install google-api-python-client`\"\n )\n\n service = build(\"customsearch\", \"v1\", developerKey=google_api_key)\n values[\"search_engine\"] = service\n\n return values\n\n def run(self, query: str) -> str:\n \"\"\"Run query through GoogleSearch and parse result.\"\"\"\n snippets = []\n results = self._google_search_results(query, num=self.k)\n if len(results) == 0:\n return \"No good Google Search Result was found\"\n for result in results:\n if \"snippet\" in result:\n snippets.append(result[\"snippet\"])\n\n return \" \".join(snippets)\n\n def results(self, query: str, num_results: int) -> List[Dict]:\n \"\"\"Run query through GoogleSearch and return metadata.\n\n Args:\n query: The query to search for.\n num_results: The number of results to return.\n\n Returns:\n A list of dictionaries with the following keys:\n snippet - The description of the result.\n title - The title of the result.\n link - The link to the result.\n \"\"\"\n metadata_results = []\n results = self._google_search_results(query, num=num_results)\n if len(results) == 0:\n return [{\"Result\": \"No good Google Search Result was found\"}]\n for result in results:\n metadata_result = {\n \"title\": result[\"title\"],\n \"link\": result[\"link\"],\n }\n if \"snippet\" in result:\n metadata_result[\"snippet\"] = result[\"snippet\"]\n metadata_results.append(metadata_result)\n\n return metadata_results", "n_imports_parsed": 4, "n_files_resolved": 1, "n_chars_extracted": 4744}, "tests/agent/test_conversational_agent.py::102": {"resolved_imports": ["autochain/agent/conversational_agent/conversational_agent.py", "autochain/agent/message.py", "autochain/agent/structs.py", "autochain/models/chat_openai.py", "autochain/tools/simple_handoff/tool.py"], "used_names": ["AgentFinish", "ChatMessageHistory", "ChatOpenAI", "ConversationalAgent", "MessageType", "os"], "enclosing_function": "test_should_answer_prompt", "extracted_code": "# Source: autochain/agent/conversational_agent/conversational_agent.py\nclass ConversationalAgent(BaseAgent):\n \"\"\"\n Conversational agent who can use tools available to make a conversation by following\n the conversational planning prompt\n \"\"\"\n\n output_parser: ConvoJSONOutputParser = ConvoJSONOutputParser()\n llm: BaseLanguageModel = None\n prompt_template: JSONPromptTemplate = None\n allowed_tools: Dict[str, Tool] = {}\n tools: List[Tool] = []\n\n # Optionally you could set a prompt for this conversational agent or directly update the prompt\n prompt: str = \"\"\n\n @classmethod\n def from_llm_and_tools(\n cls,\n llm: BaseLanguageModel,\n tools: Optional[List[Tool]] = None,\n output_parser: Optional[ConvoJSONOutputParser] = None,\n prompt_template: str = PLANNING_PROMPT_TEMPLATE,\n input_variables: Optional[List[str]] = None,\n prompt: str = \"\",\n **kwargs: Any,\n ) -> ConversationalAgent:\n \"\"\"Construct an agent from an LLM and tools.\"\"\"\n tools = tools or []\n\n template = cls.get_prompt_template(\n template=prompt_template,\n input_variables=input_variables,\n )\n\n allowed_tools = {tool.name: tool for tool in tools}\n _output_parser = output_parser or ConvoJSONOutputParser()\n return cls(\n llm=llm,\n allowed_tools=allowed_tools,\n output_parser=_output_parser,\n prompt_template=template,\n tools=tools,\n prompt=prompt,\n **kwargs,\n )\n\n def should_answer(\n self,\n should_answer_prompt_template: str = SHOULD_ANSWER_PROMPT_TEMPLATE,\n **kwargs,\n ) -> Optional[AgentFinish]:\n \"\"\"Determine if agent should continue to answer user questions based on the latest user\n query\"\"\"\n if \"history\" not in kwargs or not kwargs[\"history\"]:\n return None\n\n history = kwargs.pop(\"history\")\n inputs = {\n \"history\": history.format_message(),\n **kwargs,\n }\n\n def _parse_response(res: str):\n if \"yes\" in res.lower():\n return AgentFinish(\n message=\"Thank your for contacting\",\n log=\"Thank your for contacting\",\n )\n else:\n return None\n\n prompt = Template(should_answer_prompt_template).substitute(**inputs)\n response = (\n self.llm.generate([UserMessage(content=prompt)])\n .generations[0]\n .message.content\n )\n return _parse_response(response)\n\n @staticmethod\n def format_prompt(\n template: JSONPromptTemplate,\n intermediate_steps: List[AgentAction],\n **kwargs: Any,\n ) -> List[BaseMessage]:\n def _construct_scratchpad(\n actions: List[AgentAction],\n ) -> Union[str, List[BaseMessage]]:\n scratchpad = \"\"\n for action in actions:\n scratchpad += action.response\n return scratchpad\n\n \"\"\"Create the planning inputs for the LLMChain from intermediate steps.\"\"\"\n thoughts = _construct_scratchpad(intermediate_steps)\n new_inputs = {\"agent_scratchpad\": thoughts}\n full_inputs = {**kwargs, **new_inputs}\n prompt = template.format_prompt(**full_inputs)\n return prompt\n\n @staticmethod\n def get_prompt_template(\n template: str = \"\",\n input_variables: Optional[List[str]] = None,\n ) -> JSONPromptTemplate:\n \"\"\"Create prompt in the style of the zero shot agent.\n\n Args:\n template: message to be injected between prefix and suffix.\n input_variables: List of input variables the final prompt will expect.\n\n Returns:\n A PromptTemplate with the template assembled from the pieces here.\n \"\"\"\n template = Template(template)\n\n if input_variables is None:\n input_variables = [\"input\", \"agent_scratchpad\"]\n return JSONPromptTemplate(template=template, input_variables=input_variables)\n\n def plan(\n self,\n history: ChatMessageHistory,\n intermediate_steps: List[AgentAction],\n **kwargs: Any,\n ) -> Union[AgentAction, AgentFinish]:\n \"\"\"\n Plan the next step. either taking an action with AgentAction or respond to user with AgentFinish\n Args:\n history: entire chat conversation between user and agent including the latest query\n intermediate_steps: List of AgentAction that has been performed with outputs\n **kwargs: key value pairs from chain, which contains query and other stored memories\n\n Returns:\n AgentAction or AgentFinish\n \"\"\"\n print_with_color(\"Planning\", Fore.LIGHTYELLOW_EX)\n tool_names = \", \".join([tool.name for tool in self.tools])\n tool_strings = \"\\n\\n\".join(\n [f\"> {tool.name}: \\n{tool.description}\" for tool in self.tools]\n )\n inputs = {\n \"tool_names\": tool_names,\n \"tools\": tool_strings,\n \"history\": history.format_message(),\n \"prompt\": self.prompt,\n **kwargs,\n }\n final_prompt = self.format_prompt(\n self.prompt_template, intermediate_steps, **inputs\n )\n logger.info(f\"\\nPlanning Input: {final_prompt[0].content} \\n\")\n\n full_output: Generation = self.llm.generate(final_prompt).generations[0]\n agent_output: Union[AgentAction, AgentFinish] = self.output_parser.parse(\n full_output.message\n )\n\n print(f\"Planning output: \\n{repr(full_output.message.content)}\", Fore.YELLOW)\n if isinstance(agent_output, AgentAction):\n print_with_color(\n f\"Plan to take action '{agent_output.tool}'\", Fore.LIGHTYELLOW_EX\n )\n\n return agent_output\n\n def clarify_args_for_agent_action(\n self,\n agent_action: AgentAction,\n history: ChatMessageHistory,\n intermediate_steps: List[AgentAction],\n **kwargs: Any,\n ):\n \"\"\"\n Ask clarifying question if needed. When agent is about to perform an action, we could\n use this function with different prompt to ask clarifying question for input if needed.\n Sometimes the planning response would already have the clarifying question, but we found\n it is more precise if there is a different prompt just for clarifying args\n\n Args:\n agent_action: agent action about to take\n history: conversation history including the latest query\n intermediate_steps: list of agent action taken so far\n **kwargs:\n\n Returns:\n Either a clarifying question (AgentFinish) or take the planned action (AgentAction)\n \"\"\"\n print_with_color(\"Deciding if need clarification\", Fore.LIGHTYELLOW_EX)\n if not self.allowed_tools.get(agent_action.tool):\n return agent_action\n else:\n inputs = {\n \"tool_name\": agent_action.tool,\n \"tool_desp\": self.allowed_tools.get(agent_action.tool).description,\n \"history\": history.format_message(),\n **kwargs,\n }\n\n clarifying_template = self.get_prompt_template(\n template=CLARIFYING_QUESTION_PROMPT_TEMPLATE\n )\n\n final_prompt = self.format_prompt(\n clarifying_template, intermediate_steps, **inputs\n )\n logger.info(f\"\\nClarification inputs: {final_prompt[0].content}\")\n full_output: Generation = self.llm.generate(final_prompt).generations[0]\n print(f\"Clarification outputs: {repr(full_output.message.content)}\")\n return self.output_parser.parse_clarification(\n full_output.message, agent_action=agent_action\n )\n\n def fix_action_input(\n self, tool: Tool, action: AgentAction, error: str\n ) -> AgentAction:\n \"\"\"If the tool failed due to error, what should be the fix for inputs\"\"\"\n prompt = FIX_TOOL_INPUT_PROMPT_TEMPLATE.format(\n tool_description=tool.description, inputs=action.tool_input, error=error\n )\n\n logger.info(f\"\\nFixing tool input prompt: {prompt}\")\n messages = UserMessage(content=prompt)\n output = self.llm.generate([messages]).generations[0]\n new_tool_inputs = self.output_parser.load_json_output(output.message)\n\n logger.info(f\"\\nFixed tool output: {new_tool_inputs}\")\n new_action = AgentAction(tool=action.tool, tool_input=new_tool_inputs)\n return new_action\n\n\n# Source: autochain/agent/message.py\nclass MessageType(enum.Enum):\n UserMessage = enum.auto()\n AIMessage = enum.auto()\n SystemMessage = enum.auto()\n FunctionMessage = enum.auto()\n\nclass ChatMessageHistory(BaseModel):\n messages: List[BaseMessage] = []\n\n def save_message(self, message: str, message_type: MessageType, **kwargs):\n if message_type == MessageType.AIMessage:\n self.messages.append(AIMessage(content=message))\n elif message_type == MessageType.UserMessage:\n self.messages.append(UserMessage(content=message))\n elif message_type == MessageType.FunctionMessage:\n self.messages.append(\n FunctionMessage(\n content=message,\n name=kwargs[\"name\"],\n conversational_message=kwargs[\"conversational_message\"],\n )\n )\n elif message_type == MessageType.SystemMessage:\n self.messages.append(SystemMessage(content=message))\n\n def format_message(self):\n string_messages = []\n if len(self.messages) > 0:\n for m in self.messages:\n if isinstance(m, FunctionMessage):\n string_messages.append(f\"Action: {m.conversational_message}\")\n continue\n\n if isinstance(m, UserMessage):\n role = \"User\"\n elif isinstance(m, AIMessage):\n role = \"Assistant\"\n elif isinstance(m, SystemMessage):\n role = \"System\"\n else:\n continue\n string_messages.append(f\"{role}: {m.content}\")\n return \"\\n\".join(string_messages) + \"\\n\"\n return \"\"\n\n def get_latest_user_message(self) -> UserMessage:\n for message in reversed(self.messages):\n if isinstance(message, UserMessage):\n return message\n return UserMessage(content=\"n/a\")\n\n def clear(self) -> None:\n self.messages = []\n\n\n# Source: autochain/agent/structs.py\nclass AgentFinish(BaseModel):\n \"\"\"Agent's return value.\"\"\"\n\n message: str\n log: str\n intermediate_steps: List[AgentAction] = []\n\n def format_output(self) -> Dict[str, Any]:\n final_output = {\n \"message\": self.message,\n constants.INTERMEDIATE_STEPS: self.intermediate_steps,\n }\n return final_output\n\n\n# Source: autochain/models/chat_openai.py\nclass ChatOpenAI(BaseLanguageModel):\n \"\"\"Wrapper around OpenAI Chat large language models.\n\n To use, you should have the ``openai`` python package installed, and the\n environment variable ``OPENAI_API_KEY`` set with your API key.\n\n Any parameters that are valid to be passed to the openai.create call can be passed\n in, even if not explicitly saved on this class.\n\n Example:\n .. code-block:: python\n\n from autochain.models.chat_openai import ChatOpenAI\n openai = ChatOpenAI()\n \"\"\"\n\n client: Any #: :meta private:\n model_name: str = \"gpt-3.5-turbo\"\n \"\"\"Model name to use.\"\"\"\n temperature: float = 0\n \"\"\"What sampling temperature to use.\"\"\"\n model_kwargs: Dict[str, Any] = Field(default_factory=dict)\n \"\"\"Holds any model parameters valid for `create` call not explicitly specified.\"\"\"\n openai_api_key: Optional[str] = None\n openai_organization: Optional[str] = None\n api_type: Optional[str] = None\n \"\"\"OpenAI API type, it can be `openai` or `azure`.\"\"\"\n api_base: Optional[str] = None\n \"\"\"The OpenAI API base url or Azure OpenAI API base url.\"\"\"\n azure_api_version: Optional[str] = None\n \"\"\"Azure API version.\"\"\"\n azure_deployment_name: Optional[str] = None\n \"\"\"Azure deployment name.\"\"\"\n request_timeout: Optional[Union[float, Tuple[float, float]]] = None\n \"\"\"Timeout for requests to OpenAI completion API. Default is 600 seconds.\"\"\"\n max_retries: int = 6\n \"\"\"Maximum number of retries to make when generating.\"\"\"\n # TODO: support streaming\n # streaming: bool = False\n # \"\"\"Whether to stream the results or not.\"\"\"\n # n: int = 1\n # \"\"\"Number of chat completions to generate for each prompt.\"\"\"\n max_tokens: Optional[int] = None\n \"\"\"Maximum number of tokens to generate.\"\"\"\n\n class Config:\n \"\"\"Configuration for this pydantic object.\"\"\"\n\n extra = Extra.ignore\n\n @root_validator()\n def validate_environment(cls, values: Dict) -> Dict:\n \"\"\"Validate that api key and python package exists in environment.\"\"\"\n openai_api_key = os.environ[\"OPENAI_API_KEY\"]\n openai_api_type = os.environ.get(\"OPENAI_API_TYPE\", \"open_ai\")\n openai_api_base = os.environ.get(\"OPENAI_API_BASE\", None)\n try:\n import openai\n\n except ImportError:\n raise ValueError(\n \"Could not import openai python package. \"\n \"Please install it with `pip install openai`.\"\n )\n values[\"api_key\"] = openai.api_key = openai_api_key\n values[\"api_type\"] = openai.api_type = openai_api_type\n if openai_api_base:\n values[\"api_base\"] = openai.api_base = openai_api_base\n if openai_api_type == \"azure\":\n values[\"azure_api_version\"] = openai.api_version = os.environ.get(\"OPENAI_API_VERSION\", \"2023-05-15\")\n try:\n values[\"client\"] = openai.ChatCompletion\n except AttributeError:\n raise ValueError(\n \"`openai` has no `ChatCompletion` attribute, this is likely \"\n \"due to an old version of the openai package. Try upgrading it \"\n \"with `pip install --upgrade openai`.\"\n )\n # if values[\"n\"] < 1:\n # raise ValueError(\"n must be at least 1.\")\n # if values[\"n\"] > 1 and values[\"streaming\"]:\n # raise ValueError(\"n must be 1 when streaming.\")\n return values\n\n def generate(\n self,\n messages: List[BaseMessage],\n functions: Optional[List[Tool]] = None,\n stop: Optional[List[str]] = None,\n ) -> LLMResult:\n message_dicts, function_dicts, params = self._create_message_dicts(\n messages, functions, stop\n )\n\n generation_param = {\n \"messages\": message_dicts,\n **params,\n }\n if len(function_dicts) > 0:\n generation_param[\"functions\"] = function_dicts\n\n response = self.generate_with_retry(**generation_param)\n return self._create_llm_result(response)\n\n def _create_message_dicts(\n self,\n messages: List[BaseMessage],\n tools: Optional[List[Tool]],\n stop: Optional[List[str]],\n ) -> Tuple[List[Dict[str, Any]], List[Dict[str, Any]], Dict[str, Any]]:\n params: Dict[str, Any] = {**{\"model\": self.model_name}, **self._default_params}\n if self.azure_deployment_name and self.api_type == \"azure\":\n params[\"engine\"] = self.azure_deployment_name\n if stop is not None:\n if \"stop\" in params:\n raise ValueError(\"`stop` found in both the input and default params.\")\n params[\"stop\"] = stop\n message_dicts = [convert_message_to_dict(m) for m in messages]\n function_dicts = []\n if tools:\n function_dicts = [convert_tool_to_dict(t) for t in tools]\n return message_dicts, function_dicts, params\n\n def _create_llm_result(self, response: Mapping[str, Any]) -> LLMResult:\n generations = []\n for res in response[\"choices\"]:\n message = convert_dict_to_message(res[\"message\"])\n gen = Generation(message=message)\n generations.append(gen)\n llm_output = {\"token_usage\": response[\"usage\"], \"model_name\": self.model_name}\n result = LLMResult(generations=generations, llm_output=llm_output)\n return result", "n_imports_parsed": 9, "n_files_resolved": 5, "n_chars_extracted": 16534}, "tests/memory/test_redis_memory.py::49": {"resolved_imports": ["autochain/agent/message.py", "autochain/memory/redis_memory.py"], "used_names": ["AIMessage", "MagicMock", "MessageType", "Redis", "RedisMemory", "UserMessage", "pickle"], "enclosing_function": "test_redis_conversation_memory", "extracted_code": "# Source: autochain/agent/message.py\nclass MessageType(enum.Enum):\n UserMessage = enum.auto()\n AIMessage = enum.auto()\n SystemMessage = enum.auto()\n FunctionMessage = enum.auto()\n\nclass UserMessage(BaseMessage):\n \"\"\"Type of message that is spoken by the human.\"\"\"\n\n example: bool = False\n\n @property\n def type(self) -> str:\n \"\"\"Type of the message, used for serialization.\"\"\"\n return \"user\"\n\nclass AIMessage(BaseMessage):\n \"\"\"Type of message that is spoken by the AI.\"\"\"\n\n example: bool = False\n function_call: Dict[str, Any] = {}\n\n @property\n def type(self) -> str:\n \"\"\"Type of the message, used for serialization.\"\"\"\n return \"ai\"\n\n\n# Source: autochain/memory/redis_memory.py\nclass RedisMemory(BaseMemory):\n \"\"\"Store conversation info in redis memory.\"\"\"\n\n expire_time: int = ONE_HOUR\n redis_key_prefix: str\n redis_client: Redis\n\n class Config:\n \"\"\"Configuration for this pydantic object.\"\"\"\n\n arbitrary_types_allowed = True\n\n def load_memory(\n self, key: Optional[str] = None, default: Optional[Any] = None, **kwargs\n ) -> Any:\n \"\"\"Get the key's corresponding value from redis.\"\"\"\n if not key.startswith(self.redis_key_prefix):\n key = self.redis_key_prefix + f\":{key}\"\n pickled = self.redis_client.get(key)\n if not pickled:\n return default\n return pickle.loads(pickled)\n\n def load_conversation(self, **kwargs: Dict[str, Any]) -> ChatMessageHistory:\n \"\"\"Return chat message history.\"\"\"\n redis_key = self.redis_key_prefix + f\":{ChatMessageHistory.__name__}\"\n return ChatMessageHistory(messages=self.load_memory(redis_key, []))\n\n def save_memory(self, key: str, value: Any) -> None:\n \"\"\"Save the key value pair to redis.\"\"\"\n if not key.startswith(self.redis_key_prefix):\n key = self.redis_key_prefix + f\":{key}\"\n pickled = pickle.dumps(value)\n self.redis_client.set(key, pickled, ex=self.expire_time)\n\n def save_conversation(\n self, message: str, message_type: MessageType, **kwargs\n ) -> None:\n \"\"\"Save context from this conversation to redis.\"\"\"\n redis_key = self.redis_key_prefix + f\":{ChatMessageHistory.__name__}\"\n pickled = self.redis_client.get(redis_key)\n if pickled:\n messages: list[BaseMessage] = pickle.loads(pickled)\n else:\n messages = []\n if message_type == MessageType.AIMessage:\n messages.append(AIMessage(content=message))\n elif message_type == MessageType.UserMessage:\n messages.append(UserMessage(content=message))\n elif message_type == MessageType.FunctionMessage:\n messages.append(FunctionMessage(content=message, name=kwargs[\"name\"]))\n elif message_type == MessageType.SystemMessage:\n messages.append(SystemMessage(content=message))\n else:\n raise ValueError(f\"Unsupported message type: {message_type}\")\n self.save_memory(redis_key, messages)\n\n def clear(self) -> None:\n \"\"\"Clear redis memory.\"\"\"\n for key in self.redis_client.keys(f\"{self.redis_key_prefix}:*\"):\n self.redis_client.delete(key)", "n_imports_parsed": 5, "n_files_resolved": 2, "n_chars_extracted": 3239}, "tests/memory/test_redis_memory.py::24": {"resolved_imports": ["autochain/agent/message.py", "autochain/memory/redis_memory.py"], "used_names": ["MagicMock", "Redis", "RedisMemory", "pickle"], "enclosing_function": "test_redis_kv_memory", "extracted_code": "# Source: autochain/memory/redis_memory.py\nclass RedisMemory(BaseMemory):\n \"\"\"Store conversation info in redis memory.\"\"\"\n\n expire_time: int = ONE_HOUR\n redis_key_prefix: str\n redis_client: Redis\n\n class Config:\n \"\"\"Configuration for this pydantic object.\"\"\"\n\n arbitrary_types_allowed = True\n\n def load_memory(\n self, key: Optional[str] = None, default: Optional[Any] = None, **kwargs\n ) -> Any:\n \"\"\"Get the key's corresponding value from redis.\"\"\"\n if not key.startswith(self.redis_key_prefix):\n key = self.redis_key_prefix + f\":{key}\"\n pickled = self.redis_client.get(key)\n if not pickled:\n return default\n return pickle.loads(pickled)\n\n def load_conversation(self, **kwargs: Dict[str, Any]) -> ChatMessageHistory:\n \"\"\"Return chat message history.\"\"\"\n redis_key = self.redis_key_prefix + f\":{ChatMessageHistory.__name__}\"\n return ChatMessageHistory(messages=self.load_memory(redis_key, []))\n\n def save_memory(self, key: str, value: Any) -> None:\n \"\"\"Save the key value pair to redis.\"\"\"\n if not key.startswith(self.redis_key_prefix):\n key = self.redis_key_prefix + f\":{key}\"\n pickled = pickle.dumps(value)\n self.redis_client.set(key, pickled, ex=self.expire_time)\n\n def save_conversation(\n self, message: str, message_type: MessageType, **kwargs\n ) -> None:\n \"\"\"Save context from this conversation to redis.\"\"\"\n redis_key = self.redis_key_prefix + f\":{ChatMessageHistory.__name__}\"\n pickled = self.redis_client.get(redis_key)\n if pickled:\n messages: list[BaseMessage] = pickle.loads(pickled)\n else:\n messages = []\n if message_type == MessageType.AIMessage:\n messages.append(AIMessage(content=message))\n elif message_type == MessageType.UserMessage:\n messages.append(UserMessage(content=message))\n elif message_type == MessageType.FunctionMessage:\n messages.append(FunctionMessage(content=message, name=kwargs[\"name\"]))\n elif message_type == MessageType.SystemMessage:\n messages.append(SystemMessage(content=message))\n else:\n raise ValueError(f\"Unsupported message type: {message_type}\")\n self.save_memory(redis_key, messages)\n\n def clear(self) -> None:\n \"\"\"Clear redis memory.\"\"\"\n for key in self.redis_client.keys(f\"{self.redis_key_prefix}:*\"):\n self.redis_client.delete(key)", "n_imports_parsed": 5, "n_files_resolved": 2, "n_chars_extracted": 2538}, "tests/tools/test_pinecone_tool.py::17": {"resolved_imports": ["autochain/tools/internal_search/pinecone_tool.py"], "used_names": ["DummyEncoder", "PineconeDoc", "PineconeSearch"], "enclosing_function": "test_pinecone_search", "extracted_code": "# Source: autochain/tools/internal_search/pinecone_tool.py\nclass PineconeDoc:\n doc: str\n vector: List[float] = None\n id: str = field(default_factory=lambda: str(uuid.uuid1()))\n\nclass PineconeSearch(Tool, BaseSearchTool):\n \"\"\"\n Use Pinecone as the internal search tool\n \"\"\"\n\n docs: List[PineconeDoc]\n index_name: str = \"index\"\n index: Optional[Any] = None\n dimension: int = 8\n metric: str = \"euclidean\"\n encoder: BaseLanguageModel = None # such as OpenAIAdaEncoder\n id2doc: Dict[str, str] = {}\n\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n pinecone.create_index(\n self.index_name, dimension=self.dimension, metric=self.metric\n )\n self.index = pinecone.Index(self.index_name)\n\n self.add_docs(self.docs)\n\n def _encode(self, doc: PineconeDoc) -> None:\n if not doc.vector and self.encoder:\n # TODO: encoder over batches\n doc.vector = self.encoder.encode([doc.doc]).embeddings[0]\n\n def _run(\n self,\n query: str,\n top_k: int = 2,\n include_values: bool = False,\n *args: Any,\n **kwargs: Any,\n ) -> str:\n def _format_output(query_response: QueryResponse) -> str:\n \"\"\"Only return the document since they are likely to be passed to prompt\"\"\"\n documents = query_response.get(\"matches\", [])\n if len(documents) == 0:\n return \"\"\n\n return \"\\n\".join(\n [\n f\"Doc {i}: {self.id2doc[doc['id']]}\"\n for i, doc in enumerate(documents)\n ]\n )\n\n encoding = self.encoder.encode([query]).embeddings[0]\n\n response: QueryResponse = self.index.query(\n vector=encoding, top_k=top_k, include_values=include_values\n )\n return _format_output(response)\n\n def add_docs(self, docs: List[PineconeDoc], **kwargs):\n if not len(docs):\n return\n\n for doc in docs:\n self._encode(doc)\n self.id2doc[doc.id] = doc.doc\n\n self.index.upsert([(d.id, d.vector) for d in docs])\n\n def clear_index(self):\n pinecone.delete_index(self.index_name)\n pinecone.create_index(\n self.index_name, dimension=self.dimension, metric=self.metric\n )\n self.index = pinecone.Index(self.index_name)", "n_imports_parsed": 2, "n_files_resolved": 1, "n_chars_extracted": 2389}, "tests/memory/test_long_term_memory.py::47": {"resolved_imports": ["autochain/agent/message.py", "autochain/memory/long_term_memory.py", "autochain/tools/internal_search/chromadb_tool.py", "autochain/tools/internal_search/pinecone_tool.py", "autochain/tools/internal_search/lancedb_tool.py"], "used_names": ["ChromaDBSearch", "ChromaDoc", "LongTermMemory"], "enclosing_function": "test_long_term_memory", "extracted_code": "# Source: autochain/memory/long_term_memory.py\nclass LongTermMemory(BaseMemory):\n \"\"\"Buffer for storing conversation memory and an in-memory kv store.\"\"\"\n\n conversation_history = ChatMessageHistory()\n kv_memory = {}\n long_term_memory: BaseSearchTool = None\n\n class Config:\n keep_untouched = SEARCH_PROVIDERS\n\n def load_memory(\n self,\n key: Optional[str] = None,\n default: Optional[Any] = None,\n top_k: int = 1,\n **kwargs\n ) -> Any:\n \"\"\"Return history buffer by key or all memories.\"\"\"\n if key in self.kv_memory:\n return self.kv_memory[key]\n\n # else try to retrieve from long term memory\n result = self.long_term_memory.run({\"query\": key, \"top_k\": top_k})\n return result or default\n\n def load_conversation(self, **kwargs) -> ChatMessageHistory:\n \"\"\"Return history buffer and format it into a conversational string format.\"\"\"\n return self.conversation_history\n\n def save_memory(self, key: str, value: Any) -> None:\n if (\n isinstance(value, list)\n and len(value) > 0\n and (isinstance(value[0], SEARCH_DOC_TYPES))\n ):\n self.long_term_memory.add_docs(docs=value)\n elif key:\n self.kv_memory[key] = value\n\n def save_conversation(\n self, message: str, message_type: MessageType, **kwargs\n ) -> None:\n \"\"\"Save context from this conversation to buffer.\"\"\"\n self.conversation_history.save_message(\n message=message, message_type=message_type, **kwargs\n )\n\n def clear(self) -> None:\n \"\"\"Clear memory contents.\"\"\"\n self.conversation_history.clear()\n self.long_term_memory.clear_index()\n self.kv_memory = {}\n\n\n# Source: autochain/tools/internal_search/chromadb_tool.py\nclass ChromaDoc:\n doc: str\n metadata: Dict[str, Any]\n id: str = field(default_factory=lambda: str(uuid.uuid1()))\n\nclass ChromaDBSearch(Tool, BaseSearchTool):\n \"\"\"\n Use ChromaDB as internal search tool\n \"\"\"\n\n collection_name: str = \"index\"\n collection: Optional[Any] = None\n\n class Config:\n \"\"\"Configuration for this pydantic object.\"\"\"\n\n extra = Extra.forbid\n arbitrary_types_allowed = True\n\n def __init__(self, docs: List[ChromaDoc], **kwargs):\n super().__init__(**kwargs)\n client = chromadb.Client()\n\n collection = client.create_collection(self.collection_name)\n self.collection = collection\n\n # Add docs to the collection. Can also update and delete. Row-based API coming soon!\n self.add_docs(docs=docs)\n\n def _run(\n self,\n query: str,\n top_k: int = 2,\n *args: Any,\n **kwargs: Any,\n ) -> str:\n def _format_output(query_result: QueryResult) -> str:\n \"\"\"Only return the document since they are likely to be passed to prompt\"\"\"\n documents = query_result.get(\"documents\", [])\n if len(documents) == 0:\n return \"\"\n\n docs = documents[0]\n return \"\\n\".join([f\"Doc {i}: {doc}\" for i, doc in enumerate(docs)])\n\n result = self.collection.query(\n query_texts=[query],\n n_results=top_k,\n )\n return _format_output(result)\n\n def add_docs(self, docs: List[ChromaDoc], **kwargs):\n \"\"\"Add a list of documents to collection\"\"\"\n if docs:\n self.collection.add(\n documents=[d.doc for d in docs],\n # we embed for you, or bring your own\n metadatas=[d.metadata for d in docs],\n # filter on arbitrary metadata!\n ids=[d.id for d in docs], # must be unique for each doc\n )\n\n def clear_index(self):\n self.collection.delete()", "n_imports_parsed": 6, "n_files_resolved": 5, "n_chars_extracted": 3812}, "tests/agent/test_openai_functions_agent.py::129": {"resolved_imports": ["autochain/agent/message.py", "autochain/agent/openai_functions_agent/openai_functions_agent.py", "autochain/agent/structs.py", "autochain/models/chat_openai.py"], "used_names": ["AgentAction", "AgentFinish", "ChatMessageHistory", "ChatOpenAI", "MessageType", "OpenAIFunctionsAgent"], "enclosing_function": "test_estimate_confidence", "extracted_code": "# Source: autochain/agent/message.py\nclass MessageType(enum.Enum):\n UserMessage = enum.auto()\n AIMessage = enum.auto()\n SystemMessage = enum.auto()\n FunctionMessage = enum.auto()\n\nclass ChatMessageHistory(BaseModel):\n messages: List[BaseMessage] = []\n\n def save_message(self, message: str, message_type: MessageType, **kwargs):\n if message_type == MessageType.AIMessage:\n self.messages.append(AIMessage(content=message))\n elif message_type == MessageType.UserMessage:\n self.messages.append(UserMessage(content=message))\n elif message_type == MessageType.FunctionMessage:\n self.messages.append(\n FunctionMessage(\n content=message,\n name=kwargs[\"name\"],\n conversational_message=kwargs[\"conversational_message\"],\n )\n )\n elif message_type == MessageType.SystemMessage:\n self.messages.append(SystemMessage(content=message))\n\n def format_message(self):\n string_messages = []\n if len(self.messages) > 0:\n for m in self.messages:\n if isinstance(m, FunctionMessage):\n string_messages.append(f\"Action: {m.conversational_message}\")\n continue\n\n if isinstance(m, UserMessage):\n role = \"User\"\n elif isinstance(m, AIMessage):\n role = \"Assistant\"\n elif isinstance(m, SystemMessage):\n role = \"System\"\n else:\n continue\n string_messages.append(f\"{role}: {m.content}\")\n return \"\\n\".join(string_messages) + \"\\n\"\n return \"\"\n\n def get_latest_user_message(self) -> UserMessage:\n for message in reversed(self.messages):\n if isinstance(message, UserMessage):\n return message\n return UserMessage(content=\"n/a\")\n\n def clear(self) -> None:\n self.messages = []\n\n\n# Source: autochain/agent/openai_functions_agent/openai_functions_agent.py\nclass OpenAIFunctionsAgent(BaseAgent):\n \"\"\"\n Agent supports function calling natively in OpenAI, which leverage function message to\n determine which tool should be used\n When tool is not selected, responds just like conversational agent\n Tool descriptions are generated from typing from the tool\n \"\"\"\n\n llm: BaseLanguageModel = None\n allowed_tools: Dict[str, Tool] = {}\n tools: List[Tool] = []\n prompt: Optional[str] = None\n min_confidence: int = 3\n\n @classmethod\n def from_llm_and_tools(\n cls,\n llm: BaseLanguageModel,\n tools: Optional[List[Tool]] = None,\n output_parser: Optional[OpenAIFunctionOutputParser] = None,\n prompt: str = None,\n min_confidence: int = 3,\n **kwargs: Any,\n ) -> OpenAIFunctionsAgent:\n tools = tools or []\n\n allowed_tools = {tool.name: tool for tool in tools}\n _output_parser = output_parser or OpenAIFunctionOutputParser()\n return cls(\n llm=llm,\n allowed_tools=allowed_tools,\n output_parser=_output_parser,\n tools=tools,\n prompt=prompt,\n min_confidence=min_confidence,\n **kwargs,\n )\n\n def plan(\n self,\n history: ChatMessageHistory,\n intermediate_steps: List[AgentAction],\n retries: int = 2,\n **kwargs: Any,\n ) -> Union[AgentAction, AgentFinish]:\n while retries > 0:\n print_with_color(\"Planning\", Fore.LIGHTYELLOW_EX)\n\n final_messages = []\n if self.prompt:\n final_messages.append(SystemMessage(content=self.prompt))\n final_messages += history.messages\n\n logger.info(f\"\\nPlanning Input: {[m.content for m in final_messages]} \\n\")\n full_output: Generation = self.llm.generate(\n final_messages, self.tools\n ).generations[0]\n\n agent_output: Union[AgentAction, AgentFinish] = self.output_parser.parse(\n full_output.message\n )\n print(\n f\"Planning output: \\nmessage content: {repr(full_output.message.content)}; \"\n f\"function_call: \"\n f\"{repr(full_output.message.function_call)}\",\n Fore.YELLOW,\n )\n if isinstance(agent_output, AgentAction):\n print_with_color(\n f\"Plan to take action '{agent_output.tool}'\", Fore.LIGHTYELLOW_EX\n )\n\n generation_is_confident = self.is_generation_confident(\n history=history,\n agent_output=agent_output,\n min_confidence=self.min_confidence,\n )\n if not generation_is_confident:\n retries -= 1\n print_with_color(\n f\"Generation is not confident, {retries} retries left\",\n Fore.LIGHTYELLOW_EX,\n )\n continue\n else:\n return agent_output\n\n def is_generation_confident(\n self,\n history: ChatMessageHistory,\n agent_output: Union[AgentAction, AgentFinish],\n min_confidence: int = 3,\n ) -> bool:\n \"\"\"\n Estimate the confidence of the generation\n Args:\n history: history of the conversation\n agent_output: the output from the agent\n min_confidence: minimum confidence score to be considered as confident\n \"\"\"\n\n def _format_assistant_message(action_output: Union[AgentAction, AgentFinish]):\n if isinstance(action_output, AgentFinish):\n assistant_message = f\"Assistant: {action_output.message}\"\n elif isinstance(action_output, AgentAction):\n assistant_message = f\"Action: {action_output.tool} with input: {action_output.tool_input}\"\n else:\n raise ValueError(\"Unsupported action for estimating confidence score\")\n\n return assistant_message\n\n prompt = Template(ESTIMATE_CONFIDENCE_PROMPT).substitute(\n policy=self.prompt,\n conversation_history=history.format_message(),\n assistant_message=_format_assistant_message(agent_output),\n )\n logger.info(f\"\\nEstimate confidence prompt: {prompt} \\n\")\n\n message = UserMessage(content=prompt)\n\n full_output: Generation = self.llm.generate([message], self.tools).generations[\n 0\n ]\n\n estimated_confidence = self.output_parser.parse_estimated_confidence(\n full_output.message\n )\n\n return estimated_confidence >= min_confidence\n\n\n# Source: autochain/agent/structs.py\nclass AgentAction(BaseModel):\n \"\"\"Agent's action to take.\"\"\"\n\n tool: str\n tool_input: Union[str, dict]\n \"\"\"tool outputs\"\"\"\n tool_output: str = \"\"\n\n \"\"\"log message for debugging\"\"\"\n log: str = \"\"\n\n \"\"\"model response or \"\"\"\n model_response: str = \"\"\n\n @property\n def response(self):\n \"\"\"message to be stored in memory and shared with next prompt\"\"\"\n if self.model_response and not self.tool_output:\n # share the model response or log message as output if tool fails to call\n return self.model_response\n return (\n f\"Outputs from using tool '{self.tool}' for inputs {self.tool_input} \"\n f\"is '{self.tool_output}'\\n\"\n )\n\nclass AgentFinish(BaseModel):\n \"\"\"Agent's return value.\"\"\"\n\n message: str\n log: str\n intermediate_steps: List[AgentAction] = []\n\n def format_output(self) -> Dict[str, Any]:\n final_output = {\n \"message\": self.message,\n constants.INTERMEDIATE_STEPS: self.intermediate_steps,\n }\n return final_output\n\n\n# Source: autochain/models/chat_openai.py\nclass ChatOpenAI(BaseLanguageModel):\n \"\"\"Wrapper around OpenAI Chat large language models.\n\n To use, you should have the ``openai`` python package installed, and the\n environment variable ``OPENAI_API_KEY`` set with your API key.\n\n Any parameters that are valid to be passed to the openai.create call can be passed\n in, even if not explicitly saved on this class.\n\n Example:\n .. code-block:: python\n\n from autochain.models.chat_openai import ChatOpenAI\n openai = ChatOpenAI()\n \"\"\"\n\n client: Any #: :meta private:\n model_name: str = \"gpt-3.5-turbo\"\n \"\"\"Model name to use.\"\"\"\n temperature: float = 0\n \"\"\"What sampling temperature to use.\"\"\"\n model_kwargs: Dict[str, Any] = Field(default_factory=dict)\n \"\"\"Holds any model parameters valid for `create` call not explicitly specified.\"\"\"\n openai_api_key: Optional[str] = None\n openai_organization: Optional[str] = None\n api_type: Optional[str] = None\n \"\"\"OpenAI API type, it can be `openai` or `azure`.\"\"\"\n api_base: Optional[str] = None\n \"\"\"The OpenAI API base url or Azure OpenAI API base url.\"\"\"\n azure_api_version: Optional[str] = None\n \"\"\"Azure API version.\"\"\"\n azure_deployment_name: Optional[str] = None\n \"\"\"Azure deployment name.\"\"\"\n request_timeout: Optional[Union[float, Tuple[float, float]]] = None\n \"\"\"Timeout for requests to OpenAI completion API. Default is 600 seconds.\"\"\"\n max_retries: int = 6\n \"\"\"Maximum number of retries to make when generating.\"\"\"\n # TODO: support streaming\n # streaming: bool = False\n # \"\"\"Whether to stream the results or not.\"\"\"\n # n: int = 1\n # \"\"\"Number of chat completions to generate for each prompt.\"\"\"\n max_tokens: Optional[int] = None\n \"\"\"Maximum number of tokens to generate.\"\"\"\n\n class Config:\n \"\"\"Configuration for this pydantic object.\"\"\"\n\n extra = Extra.ignore\n\n @root_validator()\n def validate_environment(cls, values: Dict) -> Dict:\n \"\"\"Validate that api key and python package exists in environment.\"\"\"\n openai_api_key = os.environ[\"OPENAI_API_KEY\"]\n openai_api_type = os.environ.get(\"OPENAI_API_TYPE\", \"open_ai\")\n openai_api_base = os.environ.get(\"OPENAI_API_BASE\", None)\n try:\n import openai\n\n except ImportError:\n raise ValueError(\n \"Could not import openai python package. \"\n \"Please install it with `pip install openai`.\"\n )\n values[\"api_key\"] = openai.api_key = openai_api_key\n values[\"api_type\"] = openai.api_type = openai_api_type\n if openai_api_base:\n values[\"api_base\"] = openai.api_base = openai_api_base\n if openai_api_type == \"azure\":\n values[\"azure_api_version\"] = openai.api_version = os.environ.get(\"OPENAI_API_VERSION\", \"2023-05-15\")\n try:\n values[\"client\"] = openai.ChatCompletion\n except AttributeError:\n raise ValueError(\n \"`openai` has no `ChatCompletion` attribute, this is likely \"\n \"due to an old version of the openai package. Try upgrading it \"\n \"with `pip install --upgrade openai`.\"\n )\n # if values[\"n\"] < 1:\n # raise ValueError(\"n must be at least 1.\")\n # if values[\"n\"] > 1 and values[\"streaming\"]:\n # raise ValueError(\"n must be 1 when streaming.\")\n return values\n\n def generate(\n self,\n messages: List[BaseMessage],\n functions: Optional[List[Tool]] = None,\n stop: Optional[List[str]] = None,\n ) -> LLMResult:\n message_dicts, function_dicts, params = self._create_message_dicts(\n messages, functions, stop\n )\n\n generation_param = {\n \"messages\": message_dicts,\n **params,\n }\n if len(function_dicts) > 0:\n generation_param[\"functions\"] = function_dicts\n\n response = self.generate_with_retry(**generation_param)\n return self._create_llm_result(response)\n\n def _create_message_dicts(\n self,\n messages: List[BaseMessage],\n tools: Optional[List[Tool]],\n stop: Optional[List[str]],\n ) -> Tuple[List[Dict[str, Any]], List[Dict[str, Any]], Dict[str, Any]]:\n params: Dict[str, Any] = {**{\"model\": self.model_name}, **self._default_params}\n if self.azure_deployment_name and self.api_type == \"azure\":\n params[\"engine\"] = self.azure_deployment_name\n if stop is not None:\n if \"stop\" in params:\n raise ValueError(\"`stop` found in both the input and default params.\")\n params[\"stop\"] = stop\n message_dicts = [convert_message_to_dict(m) for m in messages]\n function_dicts = []\n if tools:\n function_dicts = [convert_tool_to_dict(t) for t in tools]\n return message_dicts, function_dicts, params\n\n def _create_llm_result(self, response: Mapping[str, Any]) -> LLMResult:\n generations = []\n for res in response[\"choices\"]:\n message = convert_dict_to_message(res[\"message\"])\n gen = Generation(message=message)\n generations.append(gen)\n llm_output = {\"token_usage\": response[\"usage\"], \"model_name\": self.model_name}\n result = LLMResult(generations=generations, llm_output=llm_output)\n return result", "n_imports_parsed": 6, "n_files_resolved": 4, "n_chars_extracted": 13293}, "tests/tools/test_base_tool.py::42": {"resolved_imports": ["autochain/tools/base.py"], "used_names": ["Tool", "pytest"], "enclosing_function": "test_arg_description", "extracted_code": "# Source: autochain/tools/base.py\nclass Tool(ABC, BaseModel):\n \"\"\"Interface AutoChain tools must implement.\"\"\"\n\n name: Optional[str] = None\n \"\"\"The unique name of the tool that clearly communicates its purpose.\n If not provided, it will be named after the func name.\n The more descriptive it is, the easier it would be for model to call the right tool\n \"\"\"\n\n description: str\n \"\"\"Used to tell the model how/when/why to use the tool.\n You can provide few-shot examples as a part of the description.\n \"\"\"\n\n arg_description: Optional[Dict[str, Any]] = None\n \"\"\"Dictionary of arg name and description when using OpenAIFunctionsAgent to provide \n additional argument information\"\"\"\n\n args_schema: Optional[Type[BaseModel]] = None\n \"\"\"Pydantic model class to validate and parse the tool's input arguments.\"\"\"\n\n func: Union[Callable[..., str], None] = None\n\n @root_validator()\n def validate_environment(cls, values: Dict) -> Dict:\n \"\"\"Validate that api key and python package exists in environment.\"\"\"\n func = values.get(\"func\")\n if func and not values.get(\"name\"):\n values[\"name\"] = values[\"func\"].__name__\n\n # check if all args from arg_description exist in func args\n if values.get(\"arg_description\") and func:\n inspection = inspect.getfullargspec(func)\n override_args = set(values[\"arg_description\"].keys())\n args = set(inspection.args)\n override_without_args = override_args - args\n if len(override_without_args) > 0:\n raise ValueError(\n f\"Provide arg description for not existed args: {override_without_args}\"\n )\n\n return values\n\n def _parse_input(\n self,\n tool_input: Union[str, Dict],\n ) -> Union[str, Dict[str, Any]]:\n \"\"\"Convert tool input to pydantic model.\"\"\"\n input_args = self.args_schema\n if isinstance(tool_input, str):\n if input_args is not None:\n key_ = next(iter(input_args.__fields__.keys()))\n input_args.validate({key_: tool_input})\n return tool_input\n else:\n if input_args is not None:\n result = input_args.parse_obj(tool_input)\n return {k: v for k, v in result.dict().items() if k in tool_input}\n return tool_input\n\n def _to_args_and_kwargs(self, tool_input: Union[str, Dict]) -> Tuple[Tuple, Dict]:\n # For backwards compatibility, if run_input is a string,\n # pass as a positional argument.\n if isinstance(tool_input, str):\n return (tool_input,), {}\n else:\n return (), tool_input\n\n def _run(\n self,\n *args: Any,\n **kwargs: Any,\n ) -> str:\n return self.func(*args, **kwargs)\n\n def run(\n self,\n tool_input: Union[str, Dict] = \"\",\n **kwargs: Any,\n ) -> str:\n \"\"\"Run the tool.\"\"\"\n try:\n parsed_input = self._parse_input(tool_input)\n except ValueError as e:\n # return exception as tool output\n raise ToolRunningError(message=f\"Tool input args value Error: {e}\") from e\n\n try:\n tool_args, tool_kwargs = self._to_args_and_kwargs(parsed_input)\n tool_output = self._run(*tool_args, **tool_kwargs)\n except (Exception, KeyboardInterrupt) as e:\n raise ToolRunningError(\n message=f\"Failed to run tool {self.name} due to {e}\"\n ) from e\n\n return tool_output", "n_imports_parsed": 2, "n_files_resolved": 1, "n_chars_extracted": 3566}, "tests/models/test_chat_openai.py::87": {"resolved_imports": ["autochain/tools/base.py", "autochain/agent/message.py", "autochain/models/base.py", "autochain/models/chat_openai.py"], "used_names": ["Tool", "convert_tool_to_dict"], "enclosing_function": "test_convert_tool_to_dict", "extracted_code": "# Source: autochain/tools/base.py\nclass Tool(ABC, BaseModel):\n \"\"\"Interface AutoChain tools must implement.\"\"\"\n\n name: Optional[str] = None\n \"\"\"The unique name of the tool that clearly communicates its purpose.\n If not provided, it will be named after the func name.\n The more descriptive it is, the easier it would be for model to call the right tool\n \"\"\"\n\n description: str\n \"\"\"Used to tell the model how/when/why to use the tool.\n You can provide few-shot examples as a part of the description.\n \"\"\"\n\n arg_description: Optional[Dict[str, Any]] = None\n \"\"\"Dictionary of arg name and description when using OpenAIFunctionsAgent to provide \n additional argument information\"\"\"\n\n args_schema: Optional[Type[BaseModel]] = None\n \"\"\"Pydantic model class to validate and parse the tool's input arguments.\"\"\"\n\n func: Union[Callable[..., str], None] = None\n\n @root_validator()\n def validate_environment(cls, values: Dict) -> Dict:\n \"\"\"Validate that api key and python package exists in environment.\"\"\"\n func = values.get(\"func\")\n if func and not values.get(\"name\"):\n values[\"name\"] = values[\"func\"].__name__\n\n # check if all args from arg_description exist in func args\n if values.get(\"arg_description\") and func:\n inspection = inspect.getfullargspec(func)\n override_args = set(values[\"arg_description\"].keys())\n args = set(inspection.args)\n override_without_args = override_args - args\n if len(override_without_args) > 0:\n raise ValueError(\n f\"Provide arg description for not existed args: {override_without_args}\"\n )\n\n return values\n\n def _parse_input(\n self,\n tool_input: Union[str, Dict],\n ) -> Union[str, Dict[str, Any]]:\n \"\"\"Convert tool input to pydantic model.\"\"\"\n input_args = self.args_schema\n if isinstance(tool_input, str):\n if input_args is not None:\n key_ = next(iter(input_args.__fields__.keys()))\n input_args.validate({key_: tool_input})\n return tool_input\n else:\n if input_args is not None:\n result = input_args.parse_obj(tool_input)\n return {k: v for k, v in result.dict().items() if k in tool_input}\n return tool_input\n\n def _to_args_and_kwargs(self, tool_input: Union[str, Dict]) -> Tuple[Tuple, Dict]:\n # For backwards compatibility, if run_input is a string,\n # pass as a positional argument.\n if isinstance(tool_input, str):\n return (tool_input,), {}\n else:\n return (), tool_input\n\n def _run(\n self,\n *args: Any,\n **kwargs: Any,\n ) -> str:\n return self.func(*args, **kwargs)\n\n def run(\n self,\n tool_input: Union[str, Dict] = \"\",\n **kwargs: Any,\n ) -> str:\n \"\"\"Run the tool.\"\"\"\n try:\n parsed_input = self._parse_input(tool_input)\n except ValueError as e:\n # return exception as tool output\n raise ToolRunningError(message=f\"Tool input args value Error: {e}\") from e\n\n try:\n tool_args, tool_kwargs = self._to_args_and_kwargs(parsed_input)\n tool_output = self._run(*tool_args, **tool_kwargs)\n except (Exception, KeyboardInterrupt) as e:\n raise ToolRunningError(\n message=f\"Failed to run tool {self.name} due to {e}\"\n ) from e\n\n return tool_output\n\n\n# Source: autochain/models/chat_openai.py\ndef convert_tool_to_dict(tool: Tool):\n \"\"\"Convert tool into function parameter for openai\"\"\"\n inspection = inspect.getfullargspec(tool.func)\n arg_description = tool.arg_description or {}\n\n def _type_to_string(t: type) -> str:\n prog = re.compile(r\"<class '(\\w+)'>\")\n cls = prog.findall(str(t))\n\n primary_type_map = {\"str\": \"string\"}\n\n if len(cls) > 0:\n cls_name = cls[0].split(\".\")[-1]\n return primary_type_map.get(cls_name, cls_name)\n\n if issubclass(t, enum.Enum):\n return \"enum\"\n\n return str(t)\n\n def _format_property(t: type, arg_desp: str):\n p = {\"type\": _type_to_string(t)}\n if arg_desp:\n p[\"description\"] = arg_desp\n\n return p\n\n arg_annotations = inspection.annotations\n if arg_annotations:\n properties = {\n arg: _format_property(t, arg_description.get(arg))\n for arg, t in arg_annotations.items()\n }\n else:\n properties = {\n arg: _format_property(str, arg_description.get(arg))\n for arg in inspection.args\n }\n\n default_args = inspection.defaults or []\n required_args = inspection.args[: len(inspection.args) - len(default_args)]\n\n output = {\n \"name\": tool.name,\n \"description\": tool.description,\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": properties,\n \"required\": required_args,\n },\n }\n\n return output", "n_imports_parsed": 7, "n_files_resolved": 4, "n_chars_extracted": 5104}, "tests/memory/test_buffer_memory.py::12": {"resolved_imports": ["autochain/agent/message.py", "autochain/memory/buffer_memory.py"], "used_names": ["BufferMemory"], "enclosing_function": "test_buffer_kv_memory", "extracted_code": "# Source: autochain/memory/buffer_memory.py\nclass BufferMemory(BaseMemory):\n \"\"\"Buffer for storing conversation memory and an in-memory kv store.\"\"\"\n\n conversation_history = ChatMessageHistory()\n kv_memory = {}\n\n def load_memory(\n self, key: Optional[str] = None, default: Optional[Any] = None, **kwargs\n ) -> Any:\n \"\"\"Return history buffer by key or all memories.\"\"\"\n if not key:\n return self.kv_memory\n\n return self.kv_memory.get(key, default)\n\n def load_conversation(self, **kwargs) -> ChatMessageHistory:\n \"\"\"Return history buffer and format it into a conversational string format.\"\"\"\n return self.conversation_history\n\n def save_memory(self, key: str, value: Any) -> None:\n self.kv_memory[key] = value\n\n def save_conversation(\n self, message: str, message_type: MessageType, **kwargs\n ) -> None:\n \"\"\"Save context from this conversation to buffer.\"\"\"\n self.conversation_history.save_message(\n message=message, message_type=message_type, **kwargs\n )\n\n def clear(self) -> None:\n \"\"\"Clear memory contents.\"\"\"\n self.conversation_history.clear()\n self.kv_memory = {}", "n_imports_parsed": 2, "n_files_resolved": 2, "n_chars_extracted": 1215}, "tests/memory/test_buffer_memory.py::15": {"resolved_imports": ["autochain/agent/message.py", "autochain/memory/buffer_memory.py"], "used_names": ["BufferMemory"], "enclosing_function": "test_buffer_kv_memory", "extracted_code": "# Source: autochain/memory/buffer_memory.py\nclass BufferMemory(BaseMemory):\n \"\"\"Buffer for storing conversation memory and an in-memory kv store.\"\"\"\n\n conversation_history = ChatMessageHistory()\n kv_memory = {}\n\n def load_memory(\n self, key: Optional[str] = None, default: Optional[Any] = None, **kwargs\n ) -> Any:\n \"\"\"Return history buffer by key or all memories.\"\"\"\n if not key:\n return self.kv_memory\n\n return self.kv_memory.get(key, default)\n\n def load_conversation(self, **kwargs) -> ChatMessageHistory:\n \"\"\"Return history buffer and format it into a conversational string format.\"\"\"\n return self.conversation_history\n\n def save_memory(self, key: str, value: Any) -> None:\n self.kv_memory[key] = value\n\n def save_conversation(\n self, message: str, message_type: MessageType, **kwargs\n ) -> None:\n \"\"\"Save context from this conversation to buffer.\"\"\"\n self.conversation_history.save_message(\n message=message, message_type=message_type, **kwargs\n )\n\n def clear(self) -> None:\n \"\"\"Clear memory contents.\"\"\"\n self.conversation_history.clear()\n self.kv_memory = {}", "n_imports_parsed": 2, "n_files_resolved": 2, "n_chars_extracted": 1215}, "tests/memory/test_long_term_memory.py::15": {"resolved_imports": ["autochain/agent/message.py", "autochain/memory/long_term_memory.py", "autochain/tools/internal_search/chromadb_tool.py", "autochain/tools/internal_search/pinecone_tool.py", "autochain/tools/internal_search/lancedb_tool.py"], "used_names": ["ChromaDBSearch", "LongTermMemory"], "enclosing_function": "test_long_term_kv_memory_chromadb", "extracted_code": "# Source: autochain/memory/long_term_memory.py\nclass LongTermMemory(BaseMemory):\n \"\"\"Buffer for storing conversation memory and an in-memory kv store.\"\"\"\n\n conversation_history = ChatMessageHistory()\n kv_memory = {}\n long_term_memory: BaseSearchTool = None\n\n class Config:\n keep_untouched = SEARCH_PROVIDERS\n\n def load_memory(\n self,\n key: Optional[str] = None,\n default: Optional[Any] = None,\n top_k: int = 1,\n **kwargs\n ) -> Any:\n \"\"\"Return history buffer by key or all memories.\"\"\"\n if key in self.kv_memory:\n return self.kv_memory[key]\n\n # else try to retrieve from long term memory\n result = self.long_term_memory.run({\"query\": key, \"top_k\": top_k})\n return result or default\n\n def load_conversation(self, **kwargs) -> ChatMessageHistory:\n \"\"\"Return history buffer and format it into a conversational string format.\"\"\"\n return self.conversation_history\n\n def save_memory(self, key: str, value: Any) -> None:\n if (\n isinstance(value, list)\n and len(value) > 0\n and (isinstance(value[0], SEARCH_DOC_TYPES))\n ):\n self.long_term_memory.add_docs(docs=value)\n elif key:\n self.kv_memory[key] = value\n\n def save_conversation(\n self, message: str, message_type: MessageType, **kwargs\n ) -> None:\n \"\"\"Save context from this conversation to buffer.\"\"\"\n self.conversation_history.save_message(\n message=message, message_type=message_type, **kwargs\n )\n\n def clear(self) -> None:\n \"\"\"Clear memory contents.\"\"\"\n self.conversation_history.clear()\n self.long_term_memory.clear_index()\n self.kv_memory = {}\n\n\n# Source: autochain/tools/internal_search/chromadb_tool.py\nclass ChromaDBSearch(Tool, BaseSearchTool):\n \"\"\"\n Use ChromaDB as internal search tool\n \"\"\"\n\n collection_name: str = \"index\"\n collection: Optional[Any] = None\n\n class Config:\n \"\"\"Configuration for this pydantic object.\"\"\"\n\n extra = Extra.forbid\n arbitrary_types_allowed = True\n\n def __init__(self, docs: List[ChromaDoc], **kwargs):\n super().__init__(**kwargs)\n client = chromadb.Client()\n\n collection = client.create_collection(self.collection_name)\n self.collection = collection\n\n # Add docs to the collection. Can also update and delete. Row-based API coming soon!\n self.add_docs(docs=docs)\n\n def _run(\n self,\n query: str,\n top_k: int = 2,\n *args: Any,\n **kwargs: Any,\n ) -> str:\n def _format_output(query_result: QueryResult) -> str:\n \"\"\"Only return the document since they are likely to be passed to prompt\"\"\"\n documents = query_result.get(\"documents\", [])\n if len(documents) == 0:\n return \"\"\n\n docs = documents[0]\n return \"\\n\".join([f\"Doc {i}: {doc}\" for i, doc in enumerate(docs)])\n\n result = self.collection.query(\n query_texts=[query],\n n_results=top_k,\n )\n return _format_output(result)\n\n def add_docs(self, docs: List[ChromaDoc], **kwargs):\n \"\"\"Add a list of documents to collection\"\"\"\n if docs:\n self.collection.add(\n documents=[d.doc for d in docs],\n # we embed for you, or bring your own\n metadatas=[d.metadata for d in docs],\n # filter on arbitrary metadata!\n ids=[d.id for d in docs], # must be unique for each doc\n )\n\n def clear_index(self):\n self.collection.delete()", "n_imports_parsed": 6, "n_files_resolved": 5, "n_chars_extracted": 3689}, "tests/tools/test_simple_handoff.py::7": {"resolved_imports": ["autochain/tools/simple_handoff/tool.py"], "used_names": ["HandOffToAgent"], "enclosing_function": "test_simple_handoff", "extracted_code": "# Source: autochain/tools/simple_handoff/tool.py\nclass HandOffToAgent(Tool):\n name = \"Hand off\"\n description = \"Hand off to a human agent\"\n handoff_msg = \"Let me hand you off to an agent now\"\n\n def _run(self, *args: Any, **kwargs: Any) -> str:\n return self.handoff_msg", "n_imports_parsed": 1, "n_files_resolved": 1, "n_chars_extracted": 287}, "tests/tools/test_base_tool.py::28": {"resolved_imports": ["autochain/tools/base.py"], "used_names": ["Tool"], "enclosing_function": "test_tool_name_override", "extracted_code": "# Source: autochain/tools/base.py\nclass Tool(ABC, BaseModel):\n \"\"\"Interface AutoChain tools must implement.\"\"\"\n\n name: Optional[str] = None\n \"\"\"The unique name of the tool that clearly communicates its purpose.\n If not provided, it will be named after the func name.\n The more descriptive it is, the easier it would be for model to call the right tool\n \"\"\"\n\n description: str\n \"\"\"Used to tell the model how/when/why to use the tool.\n You can provide few-shot examples as a part of the description.\n \"\"\"\n\n arg_description: Optional[Dict[str, Any]] = None\n \"\"\"Dictionary of arg name and description when using OpenAIFunctionsAgent to provide \n additional argument information\"\"\"\n\n args_schema: Optional[Type[BaseModel]] = None\n \"\"\"Pydantic model class to validate and parse the tool's input arguments.\"\"\"\n\n func: Union[Callable[..., str], None] = None\n\n @root_validator()\n def validate_environment(cls, values: Dict) -> Dict:\n \"\"\"Validate that api key and python package exists in environment.\"\"\"\n func = values.get(\"func\")\n if func and not values.get(\"name\"):\n values[\"name\"] = values[\"func\"].__name__\n\n # check if all args from arg_description exist in func args\n if values.get(\"arg_description\") and func:\n inspection = inspect.getfullargspec(func)\n override_args = set(values[\"arg_description\"].keys())\n args = set(inspection.args)\n override_without_args = override_args - args\n if len(override_without_args) > 0:\n raise ValueError(\n f\"Provide arg description for not existed args: {override_without_args}\"\n )\n\n return values\n\n def _parse_input(\n self,\n tool_input: Union[str, Dict],\n ) -> Union[str, Dict[str, Any]]:\n \"\"\"Convert tool input to pydantic model.\"\"\"\n input_args = self.args_schema\n if isinstance(tool_input, str):\n if input_args is not None:\n key_ = next(iter(input_args.__fields__.keys()))\n input_args.validate({key_: tool_input})\n return tool_input\n else:\n if input_args is not None:\n result = input_args.parse_obj(tool_input)\n return {k: v for k, v in result.dict().items() if k in tool_input}\n return tool_input\n\n def _to_args_and_kwargs(self, tool_input: Union[str, Dict]) -> Tuple[Tuple, Dict]:\n # For backwards compatibility, if run_input is a string,\n # pass as a positional argument.\n if isinstance(tool_input, str):\n return (tool_input,), {}\n else:\n return (), tool_input\n\n def _run(\n self,\n *args: Any,\n **kwargs: Any,\n ) -> str:\n return self.func(*args, **kwargs)\n\n def run(\n self,\n tool_input: Union[str, Dict] = \"\",\n **kwargs: Any,\n ) -> str:\n \"\"\"Run the tool.\"\"\"\n try:\n parsed_input = self._parse_input(tool_input)\n except ValueError as e:\n # return exception as tool output\n raise ToolRunningError(message=f\"Tool input args value Error: {e}\") from e\n\n try:\n tool_args, tool_kwargs = self._to_args_and_kwargs(parsed_input)\n tool_output = self._run(*tool_args, **tool_kwargs)\n except (Exception, KeyboardInterrupt) as e:\n raise ToolRunningError(\n message=f\"Failed to run tool {self.name} due to {e}\"\n ) from e\n\n return tool_output", "n_imports_parsed": 2, "n_files_resolved": 1, "n_chars_extracted": 3566}, "tests/models/test_chat_openai.py::44": {"resolved_imports": ["autochain/tools/base.py", "autochain/agent/message.py", "autochain/models/base.py", "autochain/models/chat_openai.py"], "used_names": ["ChatOpenAI", "LLMResult", "UserMessage", "os"], "enclosing_function": "test_chat_completion", "extracted_code": "# Source: autochain/agent/message.py\nclass UserMessage(BaseMessage):\n \"\"\"Type of message that is spoken by the human.\"\"\"\n\n example: bool = False\n\n @property\n def type(self) -> str:\n \"\"\"Type of the message, used for serialization.\"\"\"\n return \"user\"\n\n\n# Source: autochain/models/base.py\nclass LLMResult(BaseModel):\n \"\"\"Class that contains all relevant information for an LLM Result.\"\"\"\n\n generations: List[Generation]\n \"\"\"List of the things generated. This is List[List[]] because\n each input could have multiple generations.\"\"\"\n llm_output: Optional[dict] = None\n \"\"\"For arbitrary LLM provider specific output.\"\"\"\n\n\n# Source: autochain/models/chat_openai.py\nclass ChatOpenAI(BaseLanguageModel):\n \"\"\"Wrapper around OpenAI Chat large language models.\n\n To use, you should have the ``openai`` python package installed, and the\n environment variable ``OPENAI_API_KEY`` set with your API key.\n\n Any parameters that are valid to be passed to the openai.create call can be passed\n in, even if not explicitly saved on this class.\n\n Example:\n .. code-block:: python\n\n from autochain.models.chat_openai import ChatOpenAI\n openai = ChatOpenAI()\n \"\"\"\n\n client: Any #: :meta private:\n model_name: str = \"gpt-3.5-turbo\"\n \"\"\"Model name to use.\"\"\"\n temperature: float = 0\n \"\"\"What sampling temperature to use.\"\"\"\n model_kwargs: Dict[str, Any] = Field(default_factory=dict)\n \"\"\"Holds any model parameters valid for `create` call not explicitly specified.\"\"\"\n openai_api_key: Optional[str] = None\n openai_organization: Optional[str] = None\n api_type: Optional[str] = None\n \"\"\"OpenAI API type, it can be `openai` or `azure`.\"\"\"\n api_base: Optional[str] = None\n \"\"\"The OpenAI API base url or Azure OpenAI API base url.\"\"\"\n azure_api_version: Optional[str] = None\n \"\"\"Azure API version.\"\"\"\n azure_deployment_name: Optional[str] = None\n \"\"\"Azure deployment name.\"\"\"\n request_timeout: Optional[Union[float, Tuple[float, float]]] = None\n \"\"\"Timeout for requests to OpenAI completion API. Default is 600 seconds.\"\"\"\n max_retries: int = 6\n \"\"\"Maximum number of retries to make when generating.\"\"\"\n # TODO: support streaming\n # streaming: bool = False\n # \"\"\"Whether to stream the results or not.\"\"\"\n # n: int = 1\n # \"\"\"Number of chat completions to generate for each prompt.\"\"\"\n max_tokens: Optional[int] = None\n \"\"\"Maximum number of tokens to generate.\"\"\"\n\n class Config:\n \"\"\"Configuration for this pydantic object.\"\"\"\n\n extra = Extra.ignore\n\n @root_validator()\n def validate_environment(cls, values: Dict) -> Dict:\n \"\"\"Validate that api key and python package exists in environment.\"\"\"\n openai_api_key = os.environ[\"OPENAI_API_KEY\"]\n openai_api_type = os.environ.get(\"OPENAI_API_TYPE\", \"open_ai\")\n openai_api_base = os.environ.get(\"OPENAI_API_BASE\", None)\n try:\n import openai\n\n except ImportError:\n raise ValueError(\n \"Could not import openai python package. \"\n \"Please install it with `pip install openai`.\"\n )\n values[\"api_key\"] = openai.api_key = openai_api_key\n values[\"api_type\"] = openai.api_type = openai_api_type\n if openai_api_base:\n values[\"api_base\"] = openai.api_base = openai_api_base\n if openai_api_type == \"azure\":\n values[\"azure_api_version\"] = openai.api_version = os.environ.get(\"OPENAI_API_VERSION\", \"2023-05-15\")\n try:\n values[\"client\"] = openai.ChatCompletion\n except AttributeError:\n raise ValueError(\n \"`openai` has no `ChatCompletion` attribute, this is likely \"\n \"due to an old version of the openai package. Try upgrading it \"\n \"with `pip install --upgrade openai`.\"\n )\n # if values[\"n\"] < 1:\n # raise ValueError(\"n must be at least 1.\")\n # if values[\"n\"] > 1 and values[\"streaming\"]:\n # raise ValueError(\"n must be 1 when streaming.\")\n return values\n\n def generate(\n self,\n messages: List[BaseMessage],\n functions: Optional[List[Tool]] = None,\n stop: Optional[List[str]] = None,\n ) -> LLMResult:\n message_dicts, function_dicts, params = self._create_message_dicts(\n messages, functions, stop\n )\n\n generation_param = {\n \"messages\": message_dicts,\n **params,\n }\n if len(function_dicts) > 0:\n generation_param[\"functions\"] = function_dicts\n\n response = self.generate_with_retry(**generation_param)\n return self._create_llm_result(response)\n\n def _create_message_dicts(\n self,\n messages: List[BaseMessage],\n tools: Optional[List[Tool]],\n stop: Optional[List[str]],\n ) -> Tuple[List[Dict[str, Any]], List[Dict[str, Any]], Dict[str, Any]]:\n params: Dict[str, Any] = {**{\"model\": self.model_name}, **self._default_params}\n if self.azure_deployment_name and self.api_type == \"azure\":\n params[\"engine\"] = self.azure_deployment_name\n if stop is not None:\n if \"stop\" in params:\n raise ValueError(\"`stop` found in both the input and default params.\")\n params[\"stop\"] = stop\n message_dicts = [convert_message_to_dict(m) for m in messages]\n function_dicts = []\n if tools:\n function_dicts = [convert_tool_to_dict(t) for t in tools]\n return message_dicts, function_dicts, params\n\n def _create_llm_result(self, response: Mapping[str, Any]) -> LLMResult:\n generations = []\n for res in response[\"choices\"]:\n message = convert_dict_to_message(res[\"message\"])\n gen = Generation(message=message)\n generations.append(gen)\n llm_output = {\"token_usage\": response[\"usage\"], \"model_name\": self.model_name}\n result = LLMResult(generations=generations, llm_output=llm_output)\n return result", "n_imports_parsed": 7, "n_files_resolved": 4, "n_chars_extracted": 6095}, "tests/tools/test_pinecone_tool.py::22": {"resolved_imports": ["autochain/tools/internal_search/pinecone_tool.py"], "used_names": ["DummyEncoder", "PineconeDoc", "PineconeSearch"], "enclosing_function": "test_pinecone_search", "extracted_code": "# Source: autochain/tools/internal_search/pinecone_tool.py\nclass PineconeDoc:\n doc: str\n vector: List[float] = None\n id: str = field(default_factory=lambda: str(uuid.uuid1()))\n\nclass PineconeSearch(Tool, BaseSearchTool):\n \"\"\"\n Use Pinecone as the internal search tool\n \"\"\"\n\n docs: List[PineconeDoc]\n index_name: str = \"index\"\n index: Optional[Any] = None\n dimension: int = 8\n metric: str = \"euclidean\"\n encoder: BaseLanguageModel = None # such as OpenAIAdaEncoder\n id2doc: Dict[str, str] = {}\n\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n pinecone.create_index(\n self.index_name, dimension=self.dimension, metric=self.metric\n )\n self.index = pinecone.Index(self.index_name)\n\n self.add_docs(self.docs)\n\n def _encode(self, doc: PineconeDoc) -> None:\n if not doc.vector and self.encoder:\n # TODO: encoder over batches\n doc.vector = self.encoder.encode([doc.doc]).embeddings[0]\n\n def _run(\n self,\n query: str,\n top_k: int = 2,\n include_values: bool = False,\n *args: Any,\n **kwargs: Any,\n ) -> str:\n def _format_output(query_response: QueryResponse) -> str:\n \"\"\"Only return the document since they are likely to be passed to prompt\"\"\"\n documents = query_response.get(\"matches\", [])\n if len(documents) == 0:\n return \"\"\n\n return \"\\n\".join(\n [\n f\"Doc {i}: {self.id2doc[doc['id']]}\"\n for i, doc in enumerate(documents)\n ]\n )\n\n encoding = self.encoder.encode([query]).embeddings[0]\n\n response: QueryResponse = self.index.query(\n vector=encoding, top_k=top_k, include_values=include_values\n )\n return _format_output(response)\n\n def add_docs(self, docs: List[PineconeDoc], **kwargs):\n if not len(docs):\n return\n\n for doc in docs:\n self._encode(doc)\n self.id2doc[doc.id] = doc.doc\n\n self.index.upsert([(d.id, d.vector) for d in docs])\n\n def clear_index(self):\n pinecone.delete_index(self.index_name)\n pinecone.create_index(\n self.index_name, dimension=self.dimension, metric=self.metric\n )\n self.index = pinecone.Index(self.index_name)", "n_imports_parsed": 2, "n_files_resolved": 1, "n_chars_extracted": 2389}, "tests/tools/test_lancedb_tool.py::14": {"resolved_imports": ["autochain/tools/internal_search/lancedb_tool.py"], "used_names": ["DummyEncoder", "LanceDBDoc", "LanceDBSeach"], "enclosing_function": "test_lancedb_search", "extracted_code": "# Source: autochain/tools/internal_search/lancedb_tool.py\nclass LanceDBDoc:\n doc: str\n vector: List[float] = None\n\nclass LanceDBSeach(Tool, BaseSearchTool):\n \"\"\"\n Use LanceDB as the internal search tool\n\n LanceDB is a vector database that supports vector search.\n\n Args:\n uri: the uri of the database. Default to \"lancedb\"\n table_name: the name of the table. Default to \"table\"\n metric: the metric used for vector search. Default to \"cosine\"\n encoder: the encoder used to encode the documents. Default to None\n docs: the documents to be indexed. Default to None\n \"\"\"\n class Config:\n \"\"\"Configuration for this pydantic object.\"\"\"\n\n arbitrary_types_allowed = True\n\n docs: List[LanceDBDoc]\n uri: str = \"lancedb\"\n table_name: str = \"table\"\n metric: str = \"cosine\"\n encoder: BaseLanguageModel = None\n db: lancedb.db.DBConnection = None\n table: lancedb.table.Table = None\n def __init__(self, **kwargs) -> None:\n super().__init__(**kwargs)\n self.db = lancedb.connect(self.uri)\n if self.docs:\n self._encode_docs(self.docs)\n self._create_table(self.docs)\n \n def _create_table(self, docs: List[LanceDBDoc]) -> None:\n self.table = self.db.create_table(self.table_name, self._docs_to_dataframe(docs), mode=\"overwrite\")\n\n def _encode_docs(self, docs: List[LanceDBDoc]) -> None:\n for doc in docs:\n if not doc.vector:\n if not self.encoder:\n raise ValueError(\"Encoder is not provided for encoding docs\")\n doc.vector = self.encoder.encode([doc.doc]).embeddings[0]\n \n def _docs_to_dataframe(self, docs: List[LanceDBDoc]) -> pd.DataFrame:\n return pd.DataFrame(\n [\n {\"doc\": doc.doc, \"vector\": doc.vector}\n for doc in docs\n ]\n )\n \n def _run(\n self,\n query: str,\n top_k: int = 2,\n *args: Any,\n **kwargs: Any,\n ) -> str:\n if self.table is None:\n return \"\"\n\n embeddings = self.encoder.encode([query]).embeddings[0]\n result = self.table.search(embeddings).limit(top_k).to_df()[\"doc\"].to_list()\n\n return \"\\n\".join([f\"Doc {i}: {doc}\" for i, doc in enumerate(result)])\n\n def add_docs(self, docs: List[LanceDBDoc], **kwargs):\n if not len(docs):\n return\n\n self._encode_docs(docs)\n self.table.add(self._docs_to_dataframe(docs)) if self.table else self._create_table(docs)\n \n def clear_index(self):\n if self.table_name in self.db.table_names():\n self.db.drop_table(self.table_name)\n self.table = None", "n_imports_parsed": 2, "n_files_resolved": 1, "n_chars_extracted": 2713}, "tests/tools/test_lancedb_tool.py::19": {"resolved_imports": ["autochain/tools/internal_search/lancedb_tool.py"], "used_names": ["DummyEncoder", "LanceDBDoc", "LanceDBSeach"], "enclosing_function": "test_lancedb_search", "extracted_code": "# Source: autochain/tools/internal_search/lancedb_tool.py\nclass LanceDBDoc:\n doc: str\n vector: List[float] = None\n\nclass LanceDBSeach(Tool, BaseSearchTool):\n \"\"\"\n Use LanceDB as the internal search tool\n\n LanceDB is a vector database that supports vector search.\n\n Args:\n uri: the uri of the database. Default to \"lancedb\"\n table_name: the name of the table. Default to \"table\"\n metric: the metric used for vector search. Default to \"cosine\"\n encoder: the encoder used to encode the documents. Default to None\n docs: the documents to be indexed. Default to None\n \"\"\"\n class Config:\n \"\"\"Configuration for this pydantic object.\"\"\"\n\n arbitrary_types_allowed = True\n\n docs: List[LanceDBDoc]\n uri: str = \"lancedb\"\n table_name: str = \"table\"\n metric: str = \"cosine\"\n encoder: BaseLanguageModel = None\n db: lancedb.db.DBConnection = None\n table: lancedb.table.Table = None\n def __init__(self, **kwargs) -> None:\n super().__init__(**kwargs)\n self.db = lancedb.connect(self.uri)\n if self.docs:\n self._encode_docs(self.docs)\n self._create_table(self.docs)\n \n def _create_table(self, docs: List[LanceDBDoc]) -> None:\n self.table = self.db.create_table(self.table_name, self._docs_to_dataframe(docs), mode=\"overwrite\")\n\n def _encode_docs(self, docs: List[LanceDBDoc]) -> None:\n for doc in docs:\n if not doc.vector:\n if not self.encoder:\n raise ValueError(\"Encoder is not provided for encoding docs\")\n doc.vector = self.encoder.encode([doc.doc]).embeddings[0]\n \n def _docs_to_dataframe(self, docs: List[LanceDBDoc]) -> pd.DataFrame:\n return pd.DataFrame(\n [\n {\"doc\": doc.doc, \"vector\": doc.vector}\n for doc in docs\n ]\n )\n \n def _run(\n self,\n query: str,\n top_k: int = 2,\n *args: Any,\n **kwargs: Any,\n ) -> str:\n if self.table is None:\n return \"\"\n\n embeddings = self.encoder.encode([query]).embeddings[0]\n result = self.table.search(embeddings).limit(top_k).to_df()[\"doc\"].to_list()\n\n return \"\\n\".join([f\"Doc {i}: {doc}\" for i, doc in enumerate(result)])\n\n def add_docs(self, docs: List[LanceDBDoc], **kwargs):\n if not len(docs):\n return\n\n self._encode_docs(docs)\n self.table.add(self._docs_to_dataframe(docs)) if self.table else self._create_table(docs)\n \n def clear_index(self):\n if self.table_name in self.db.table_names():\n self.db.drop_table(self.table_name)\n self.table = None", "n_imports_parsed": 2, "n_files_resolved": 1, "n_chars_extracted": 2713}, "tests/memory/test_redis_memory.py::21": {"resolved_imports": ["autochain/agent/message.py", "autochain/memory/redis_memory.py"], "used_names": ["MagicMock", "Redis", "RedisMemory", "pickle"], "enclosing_function": "test_redis_kv_memory", "extracted_code": "# Source: autochain/memory/redis_memory.py\nclass RedisMemory(BaseMemory):\n \"\"\"Store conversation info in redis memory.\"\"\"\n\n expire_time: int = ONE_HOUR\n redis_key_prefix: str\n redis_client: Redis\n\n class Config:\n \"\"\"Configuration for this pydantic object.\"\"\"\n\n arbitrary_types_allowed = True\n\n def load_memory(\n self, key: Optional[str] = None, default: Optional[Any] = None, **kwargs\n ) -> Any:\n \"\"\"Get the key's corresponding value from redis.\"\"\"\n if not key.startswith(self.redis_key_prefix):\n key = self.redis_key_prefix + f\":{key}\"\n pickled = self.redis_client.get(key)\n if not pickled:\n return default\n return pickle.loads(pickled)\n\n def load_conversation(self, **kwargs: Dict[str, Any]) -> ChatMessageHistory:\n \"\"\"Return chat message history.\"\"\"\n redis_key = self.redis_key_prefix + f\":{ChatMessageHistory.__name__}\"\n return ChatMessageHistory(messages=self.load_memory(redis_key, []))\n\n def save_memory(self, key: str, value: Any) -> None:\n \"\"\"Save the key value pair to redis.\"\"\"\n if not key.startswith(self.redis_key_prefix):\n key = self.redis_key_prefix + f\":{key}\"\n pickled = pickle.dumps(value)\n self.redis_client.set(key, pickled, ex=self.expire_time)\n\n def save_conversation(\n self, message: str, message_type: MessageType, **kwargs\n ) -> None:\n \"\"\"Save context from this conversation to redis.\"\"\"\n redis_key = self.redis_key_prefix + f\":{ChatMessageHistory.__name__}\"\n pickled = self.redis_client.get(redis_key)\n if pickled:\n messages: list[BaseMessage] = pickle.loads(pickled)\n else:\n messages = []\n if message_type == MessageType.AIMessage:\n messages.append(AIMessage(content=message))\n elif message_type == MessageType.UserMessage:\n messages.append(UserMessage(content=message))\n elif message_type == MessageType.FunctionMessage:\n messages.append(FunctionMessage(content=message, name=kwargs[\"name\"]))\n elif message_type == MessageType.SystemMessage:\n messages.append(SystemMessage(content=message))\n else:\n raise ValueError(f\"Unsupported message type: {message_type}\")\n self.save_memory(redis_key, messages)\n\n def clear(self) -> None:\n \"\"\"Clear redis memory.\"\"\"\n for key in self.redis_client.keys(f\"{self.redis_key_prefix}:*\"):\n self.redis_client.delete(key)", "n_imports_parsed": 5, "n_files_resolved": 2, "n_chars_extracted": 2538}, "tests/models/test_chat_openai.py::55": {"resolved_imports": ["autochain/tools/base.py", "autochain/agent/message.py", "autochain/models/base.py", "autochain/models/chat_openai.py"], "used_names": ["Tool", "convert_tool_to_dict"], "enclosing_function": "test_convert_tool_to_dict", "extracted_code": "# Source: autochain/tools/base.py\nclass Tool(ABC, BaseModel):\n \"\"\"Interface AutoChain tools must implement.\"\"\"\n\n name: Optional[str] = None\n \"\"\"The unique name of the tool that clearly communicates its purpose.\n If not provided, it will be named after the func name.\n The more descriptive it is, the easier it would be for model to call the right tool\n \"\"\"\n\n description: str\n \"\"\"Used to tell the model how/when/why to use the tool.\n You can provide few-shot examples as a part of the description.\n \"\"\"\n\n arg_description: Optional[Dict[str, Any]] = None\n \"\"\"Dictionary of arg name and description when using OpenAIFunctionsAgent to provide \n additional argument information\"\"\"\n\n args_schema: Optional[Type[BaseModel]] = None\n \"\"\"Pydantic model class to validate and parse the tool's input arguments.\"\"\"\n\n func: Union[Callable[..., str], None] = None\n\n @root_validator()\n def validate_environment(cls, values: Dict) -> Dict:\n \"\"\"Validate that api key and python package exists in environment.\"\"\"\n func = values.get(\"func\")\n if func and not values.get(\"name\"):\n values[\"name\"] = values[\"func\"].__name__\n\n # check if all args from arg_description exist in func args\n if values.get(\"arg_description\") and func:\n inspection = inspect.getfullargspec(func)\n override_args = set(values[\"arg_description\"].keys())\n args = set(inspection.args)\n override_without_args = override_args - args\n if len(override_without_args) > 0:\n raise ValueError(\n f\"Provide arg description for not existed args: {override_without_args}\"\n )\n\n return values\n\n def _parse_input(\n self,\n tool_input: Union[str, Dict],\n ) -> Union[str, Dict[str, Any]]:\n \"\"\"Convert tool input to pydantic model.\"\"\"\n input_args = self.args_schema\n if isinstance(tool_input, str):\n if input_args is not None:\n key_ = next(iter(input_args.__fields__.keys()))\n input_args.validate({key_: tool_input})\n return tool_input\n else:\n if input_args is not None:\n result = input_args.parse_obj(tool_input)\n return {k: v for k, v in result.dict().items() if k in tool_input}\n return tool_input\n\n def _to_args_and_kwargs(self, tool_input: Union[str, Dict]) -> Tuple[Tuple, Dict]:\n # For backwards compatibility, if run_input is a string,\n # pass as a positional argument.\n if isinstance(tool_input, str):\n return (tool_input,), {}\n else:\n return (), tool_input\n\n def _run(\n self,\n *args: Any,\n **kwargs: Any,\n ) -> str:\n return self.func(*args, **kwargs)\n\n def run(\n self,\n tool_input: Union[str, Dict] = \"\",\n **kwargs: Any,\n ) -> str:\n \"\"\"Run the tool.\"\"\"\n try:\n parsed_input = self._parse_input(tool_input)\n except ValueError as e:\n # return exception as tool output\n raise ToolRunningError(message=f\"Tool input args value Error: {e}\") from e\n\n try:\n tool_args, tool_kwargs = self._to_args_and_kwargs(parsed_input)\n tool_output = self._run(*tool_args, **tool_kwargs)\n except (Exception, KeyboardInterrupt) as e:\n raise ToolRunningError(\n message=f\"Failed to run tool {self.name} due to {e}\"\n ) from e\n\n return tool_output\n\n\n# Source: autochain/models/chat_openai.py\ndef convert_tool_to_dict(tool: Tool):\n \"\"\"Convert tool into function parameter for openai\"\"\"\n inspection = inspect.getfullargspec(tool.func)\n arg_description = tool.arg_description or {}\n\n def _type_to_string(t: type) -> str:\n prog = re.compile(r\"<class '(\\w+)'>\")\n cls = prog.findall(str(t))\n\n primary_type_map = {\"str\": \"string\"}\n\n if len(cls) > 0:\n cls_name = cls[0].split(\".\")[-1]\n return primary_type_map.get(cls_name, cls_name)\n\n if issubclass(t, enum.Enum):\n return \"enum\"\n\n return str(t)\n\n def _format_property(t: type, arg_desp: str):\n p = {\"type\": _type_to_string(t)}\n if arg_desp:\n p[\"description\"] = arg_desp\n\n return p\n\n arg_annotations = inspection.annotations\n if arg_annotations:\n properties = {\n arg: _format_property(t, arg_description.get(arg))\n for arg, t in arg_annotations.items()\n }\n else:\n properties = {\n arg: _format_property(str, arg_description.get(arg))\n for arg in inspection.args\n }\n\n default_args = inspection.defaults or []\n required_args = inspection.args[: len(inspection.args) - len(default_args)]\n\n output = {\n \"name\": tool.name,\n \"description\": tool.description,\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": properties,\n \"required\": required_args,\n },\n }\n\n return output", "n_imports_parsed": 7, "n_files_resolved": 4, "n_chars_extracted": 5104}, "tests/memory/test_redis_memory.py::18": {"resolved_imports": ["autochain/agent/message.py", "autochain/memory/redis_memory.py"], "used_names": ["MagicMock", "Redis", "RedisMemory", "pickle"], "enclosing_function": "test_redis_kv_memory", "extracted_code": "# Source: autochain/memory/redis_memory.py\nclass RedisMemory(BaseMemory):\n \"\"\"Store conversation info in redis memory.\"\"\"\n\n expire_time: int = ONE_HOUR\n redis_key_prefix: str\n redis_client: Redis\n\n class Config:\n \"\"\"Configuration for this pydantic object.\"\"\"\n\n arbitrary_types_allowed = True\n\n def load_memory(\n self, key: Optional[str] = None, default: Optional[Any] = None, **kwargs\n ) -> Any:\n \"\"\"Get the key's corresponding value from redis.\"\"\"\n if not key.startswith(self.redis_key_prefix):\n key = self.redis_key_prefix + f\":{key}\"\n pickled = self.redis_client.get(key)\n if not pickled:\n return default\n return pickle.loads(pickled)\n\n def load_conversation(self, **kwargs: Dict[str, Any]) -> ChatMessageHistory:\n \"\"\"Return chat message history.\"\"\"\n redis_key = self.redis_key_prefix + f\":{ChatMessageHistory.__name__}\"\n return ChatMessageHistory(messages=self.load_memory(redis_key, []))\n\n def save_memory(self, key: str, value: Any) -> None:\n \"\"\"Save the key value pair to redis.\"\"\"\n if not key.startswith(self.redis_key_prefix):\n key = self.redis_key_prefix + f\":{key}\"\n pickled = pickle.dumps(value)\n self.redis_client.set(key, pickled, ex=self.expire_time)\n\n def save_conversation(\n self, message: str, message_type: MessageType, **kwargs\n ) -> None:\n \"\"\"Save context from this conversation to redis.\"\"\"\n redis_key = self.redis_key_prefix + f\":{ChatMessageHistory.__name__}\"\n pickled = self.redis_client.get(redis_key)\n if pickled:\n messages: list[BaseMessage] = pickle.loads(pickled)\n else:\n messages = []\n if message_type == MessageType.AIMessage:\n messages.append(AIMessage(content=message))\n elif message_type == MessageType.UserMessage:\n messages.append(UserMessage(content=message))\n elif message_type == MessageType.FunctionMessage:\n messages.append(FunctionMessage(content=message, name=kwargs[\"name\"]))\n elif message_type == MessageType.SystemMessage:\n messages.append(SystemMessage(content=message))\n else:\n raise ValueError(f\"Unsupported message type: {message_type}\")\n self.save_memory(redis_key, messages)\n\n def clear(self) -> None:\n \"\"\"Clear redis memory.\"\"\"\n for key in self.redis_client.keys(f\"{self.redis_key_prefix}:*\"):\n self.redis_client.delete(key)", "n_imports_parsed": 5, "n_files_resolved": 2, "n_chars_extracted": 2538}, "tests/models/test_chat_openai.py::104": {"resolved_imports": ["autochain/tools/base.py", "autochain/agent/message.py", "autochain/models/base.py", "autochain/models/chat_openai.py"], "used_names": ["Tool", "convert_tool_to_dict"], "enclosing_function": "test_convert_tool_to_dict", "extracted_code": "# Source: autochain/tools/base.py\nclass Tool(ABC, BaseModel):\n \"\"\"Interface AutoChain tools must implement.\"\"\"\n\n name: Optional[str] = None\n \"\"\"The unique name of the tool that clearly communicates its purpose.\n If not provided, it will be named after the func name.\n The more descriptive it is, the easier it would be for model to call the right tool\n \"\"\"\n\n description: str\n \"\"\"Used to tell the model how/when/why to use the tool.\n You can provide few-shot examples as a part of the description.\n \"\"\"\n\n arg_description: Optional[Dict[str, Any]] = None\n \"\"\"Dictionary of arg name and description when using OpenAIFunctionsAgent to provide \n additional argument information\"\"\"\n\n args_schema: Optional[Type[BaseModel]] = None\n \"\"\"Pydantic model class to validate and parse the tool's input arguments.\"\"\"\n\n func: Union[Callable[..., str], None] = None\n\n @root_validator()\n def validate_environment(cls, values: Dict) -> Dict:\n \"\"\"Validate that api key and python package exists in environment.\"\"\"\n func = values.get(\"func\")\n if func and not values.get(\"name\"):\n values[\"name\"] = values[\"func\"].__name__\n\n # check if all args from arg_description exist in func args\n if values.get(\"arg_description\") and func:\n inspection = inspect.getfullargspec(func)\n override_args = set(values[\"arg_description\"].keys())\n args = set(inspection.args)\n override_without_args = override_args - args\n if len(override_without_args) > 0:\n raise ValueError(\n f\"Provide arg description for not existed args: {override_without_args}\"\n )\n\n return values\n\n def _parse_input(\n self,\n tool_input: Union[str, Dict],\n ) -> Union[str, Dict[str, Any]]:\n \"\"\"Convert tool input to pydantic model.\"\"\"\n input_args = self.args_schema\n if isinstance(tool_input, str):\n if input_args is not None:\n key_ = next(iter(input_args.__fields__.keys()))\n input_args.validate({key_: tool_input})\n return tool_input\n else:\n if input_args is not None:\n result = input_args.parse_obj(tool_input)\n return {k: v for k, v in result.dict().items() if k in tool_input}\n return tool_input\n\n def _to_args_and_kwargs(self, tool_input: Union[str, Dict]) -> Tuple[Tuple, Dict]:\n # For backwards compatibility, if run_input is a string,\n # pass as a positional argument.\n if isinstance(tool_input, str):\n return (tool_input,), {}\n else:\n return (), tool_input\n\n def _run(\n self,\n *args: Any,\n **kwargs: Any,\n ) -> str:\n return self.func(*args, **kwargs)\n\n def run(\n self,\n tool_input: Union[str, Dict] = \"\",\n **kwargs: Any,\n ) -> str:\n \"\"\"Run the tool.\"\"\"\n try:\n parsed_input = self._parse_input(tool_input)\n except ValueError as e:\n # return exception as tool output\n raise ToolRunningError(message=f\"Tool input args value Error: {e}\") from e\n\n try:\n tool_args, tool_kwargs = self._to_args_and_kwargs(parsed_input)\n tool_output = self._run(*tool_args, **tool_kwargs)\n except (Exception, KeyboardInterrupt) as e:\n raise ToolRunningError(\n message=f\"Failed to run tool {self.name} due to {e}\"\n ) from e\n\n return tool_output\n\n\n# Source: autochain/models/chat_openai.py\ndef convert_tool_to_dict(tool: Tool):\n \"\"\"Convert tool into function parameter for openai\"\"\"\n inspection = inspect.getfullargspec(tool.func)\n arg_description = tool.arg_description or {}\n\n def _type_to_string(t: type) -> str:\n prog = re.compile(r\"<class '(\\w+)'>\")\n cls = prog.findall(str(t))\n\n primary_type_map = {\"str\": \"string\"}\n\n if len(cls) > 0:\n cls_name = cls[0].split(\".\")[-1]\n return primary_type_map.get(cls_name, cls_name)\n\n if issubclass(t, enum.Enum):\n return \"enum\"\n\n return str(t)\n\n def _format_property(t: type, arg_desp: str):\n p = {\"type\": _type_to_string(t)}\n if arg_desp:\n p[\"description\"] = arg_desp\n\n return p\n\n arg_annotations = inspection.annotations\n if arg_annotations:\n properties = {\n arg: _format_property(t, arg_description.get(arg))\n for arg, t in arg_annotations.items()\n }\n else:\n properties = {\n arg: _format_property(str, arg_description.get(arg))\n for arg in inspection.args\n }\n\n default_args = inspection.defaults or []\n required_args = inspection.args[: len(inspection.args) - len(default_args)]\n\n output = {\n \"name\": tool.name,\n \"description\": tool.description,\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": properties,\n \"required\": required_args,\n },\n }\n\n return output", "n_imports_parsed": 7, "n_files_resolved": 4, "n_chars_extracted": 5104}, "tests/memory/test_buffer_memory.py::24": {"resolved_imports": ["autochain/agent/message.py", "autochain/memory/buffer_memory.py"], "used_names": ["BufferMemory", "MessageType"], "enclosing_function": "test_buffer_conversation_memory", "extracted_code": "# Source: autochain/agent/message.py\nclass MessageType(enum.Enum):\n UserMessage = enum.auto()\n AIMessage = enum.auto()\n SystemMessage = enum.auto()\n FunctionMessage = enum.auto()\n\n\n# Source: autochain/memory/buffer_memory.py\nclass BufferMemory(BaseMemory):\n \"\"\"Buffer for storing conversation memory and an in-memory kv store.\"\"\"\n\n conversation_history = ChatMessageHistory()\n kv_memory = {}\n\n def load_memory(\n self, key: Optional[str] = None, default: Optional[Any] = None, **kwargs\n ) -> Any:\n \"\"\"Return history buffer by key or all memories.\"\"\"\n if not key:\n return self.kv_memory\n\n return self.kv_memory.get(key, default)\n\n def load_conversation(self, **kwargs) -> ChatMessageHistory:\n \"\"\"Return history buffer and format it into a conversational string format.\"\"\"\n return self.conversation_history\n\n def save_memory(self, key: str, value: Any) -> None:\n self.kv_memory[key] = value\n\n def save_conversation(\n self, message: str, message_type: MessageType, **kwargs\n ) -> None:\n \"\"\"Save context from this conversation to buffer.\"\"\"\n self.conversation_history.save_message(\n message=message, message_type=message_type, **kwargs\n )\n\n def clear(self) -> None:\n \"\"\"Clear memory contents.\"\"\"\n self.conversation_history.clear()\n self.kv_memory = {}", "n_imports_parsed": 2, "n_files_resolved": 2, "n_chars_extracted": 1408}, "tests/memory/test_long_term_memory.py::21": {"resolved_imports": ["autochain/agent/message.py", "autochain/memory/long_term_memory.py", "autochain/tools/internal_search/chromadb_tool.py", "autochain/tools/internal_search/pinecone_tool.py", "autochain/tools/internal_search/lancedb_tool.py"], "used_names": ["ChromaDBSearch", "LongTermMemory"], "enclosing_function": "test_long_term_kv_memory_chromadb", "extracted_code": "# Source: autochain/memory/long_term_memory.py\nclass LongTermMemory(BaseMemory):\n \"\"\"Buffer for storing conversation memory and an in-memory kv store.\"\"\"\n\n conversation_history = ChatMessageHistory()\n kv_memory = {}\n long_term_memory: BaseSearchTool = None\n\n class Config:\n keep_untouched = SEARCH_PROVIDERS\n\n def load_memory(\n self,\n key: Optional[str] = None,\n default: Optional[Any] = None,\n top_k: int = 1,\n **kwargs\n ) -> Any:\n \"\"\"Return history buffer by key or all memories.\"\"\"\n if key in self.kv_memory:\n return self.kv_memory[key]\n\n # else try to retrieve from long term memory\n result = self.long_term_memory.run({\"query\": key, \"top_k\": top_k})\n return result or default\n\n def load_conversation(self, **kwargs) -> ChatMessageHistory:\n \"\"\"Return history buffer and format it into a conversational string format.\"\"\"\n return self.conversation_history\n\n def save_memory(self, key: str, value: Any) -> None:\n if (\n isinstance(value, list)\n and len(value) > 0\n and (isinstance(value[0], SEARCH_DOC_TYPES))\n ):\n self.long_term_memory.add_docs(docs=value)\n elif key:\n self.kv_memory[key] = value\n\n def save_conversation(\n self, message: str, message_type: MessageType, **kwargs\n ) -> None:\n \"\"\"Save context from this conversation to buffer.\"\"\"\n self.conversation_history.save_message(\n message=message, message_type=message_type, **kwargs\n )\n\n def clear(self) -> None:\n \"\"\"Clear memory contents.\"\"\"\n self.conversation_history.clear()\n self.long_term_memory.clear_index()\n self.kv_memory = {}\n\n\n# Source: autochain/tools/internal_search/chromadb_tool.py\nclass ChromaDBSearch(Tool, BaseSearchTool):\n \"\"\"\n Use ChromaDB as internal search tool\n \"\"\"\n\n collection_name: str = \"index\"\n collection: Optional[Any] = None\n\n class Config:\n \"\"\"Configuration for this pydantic object.\"\"\"\n\n extra = Extra.forbid\n arbitrary_types_allowed = True\n\n def __init__(self, docs: List[ChromaDoc], **kwargs):\n super().__init__(**kwargs)\n client = chromadb.Client()\n\n collection = client.create_collection(self.collection_name)\n self.collection = collection\n\n # Add docs to the collection. Can also update and delete. Row-based API coming soon!\n self.add_docs(docs=docs)\n\n def _run(\n self,\n query: str,\n top_k: int = 2,\n *args: Any,\n **kwargs: Any,\n ) -> str:\n def _format_output(query_result: QueryResult) -> str:\n \"\"\"Only return the document since they are likely to be passed to prompt\"\"\"\n documents = query_result.get(\"documents\", [])\n if len(documents) == 0:\n return \"\"\n\n docs = documents[0]\n return \"\\n\".join([f\"Doc {i}: {doc}\" for i, doc in enumerate(docs)])\n\n result = self.collection.query(\n query_texts=[query],\n n_results=top_k,\n )\n return _format_output(result)\n\n def add_docs(self, docs: List[ChromaDoc], **kwargs):\n \"\"\"Add a list of documents to collection\"\"\"\n if docs:\n self.collection.add(\n documents=[d.doc for d in docs],\n # we embed for you, or bring your own\n metadatas=[d.metadata for d in docs],\n # filter on arbitrary metadata!\n ids=[d.id for d in docs], # must be unique for each doc\n )\n\n def clear_index(self):\n self.collection.delete()", "n_imports_parsed": 6, "n_files_resolved": 5, "n_chars_extracted": 3689}, "tests/models/test_openai_ada_encoder.py::45": {"resolved_imports": ["autochain/models/ada_embedding.py", "autochain/models/base.py"], "used_names": ["EmbeddingResult", "OpenAIAdaEncoder", "os"], "enclosing_function": "test_ada_encoder", "extracted_code": "# Source: autochain/models/ada_embedding.py\nclass OpenAIAdaEncoder(BaseLanguageModel):\n \"\"\"\n Text encoder using OpenAI Model\n \"\"\"\n\n client: Any #: :meta private:\n model_name: str = \"text-embedding-ada-002\"\n\n @root_validator()\n def validate_environment(cls, values: Dict) -> Dict:\n \"\"\"Validate that api key and python package exists in environment.\"\"\"\n openai_api_key = os.environ[\"OPENAI_API_KEY\"]\n try:\n import openai\n\n except ImportError:\n raise ValueError(\n \"Could not import openai python package. \"\n \"Please install it with `pip install openai`.\"\n )\n openai.api_key = openai_api_key\n try:\n values[\"client\"] = openai.Embedding\n except AttributeError:\n raise ValueError(\n \"`openai` has no `ChatCompletion` attribute, this is likely \"\n \"due to an old version of the openai package. Try upgrading it \"\n \"with `pip install --upgrade openai`.\"\n )\n return values\n\n def generate(\n self,\n messages: List[BaseMessage],\n functions: Optional[List[Tool]] = None,\n stop: Optional[List[str]] = None,\n ) -> LLMResult:\n pass\n\n def encode(self, texts: List[str]) -> EmbeddingResult:\n def _format_response(texts, resp) -> EmbeddingResult:\n embeddings = [d.get(\"embedding\") for d in resp.get(\"data\", [])]\n return EmbeddingResult(texts=texts, embeddings=embeddings)\n\n params: Dict[str, Any] = {\n \"model\": self.model_name,\n \"input\": texts,\n **self._default_params,\n }\n\n response = self.generate_with_retry(**params)\n return _format_response(texts=texts, resp=response)\n\n\n# Source: autochain/models/base.py\nclass EmbeddingResult(BaseModel):\n texts: List[str]\n embeddings: List[List[float]]", "n_imports_parsed": 5, "n_files_resolved": 2, "n_chars_extracted": 1930}, "tests/memory/test_redis_memory.py::45": {"resolved_imports": ["autochain/agent/message.py", "autochain/memory/redis_memory.py"], "used_names": ["AIMessage", "MagicMock", "MessageType", "Redis", "RedisMemory", "UserMessage", "pickle"], "enclosing_function": "test_redis_conversation_memory", "extracted_code": "# Source: autochain/agent/message.py\nclass MessageType(enum.Enum):\n UserMessage = enum.auto()\n AIMessage = enum.auto()\n SystemMessage = enum.auto()\n FunctionMessage = enum.auto()\n\nclass UserMessage(BaseMessage):\n \"\"\"Type of message that is spoken by the human.\"\"\"\n\n example: bool = False\n\n @property\n def type(self) -> str:\n \"\"\"Type of the message, used for serialization.\"\"\"\n return \"user\"\n\nclass AIMessage(BaseMessage):\n \"\"\"Type of message that is spoken by the AI.\"\"\"\n\n example: bool = False\n function_call: Dict[str, Any] = {}\n\n @property\n def type(self) -> str:\n \"\"\"Type of the message, used for serialization.\"\"\"\n return \"ai\"\n\n\n# Source: autochain/memory/redis_memory.py\nclass RedisMemory(BaseMemory):\n \"\"\"Store conversation info in redis memory.\"\"\"\n\n expire_time: int = ONE_HOUR\n redis_key_prefix: str\n redis_client: Redis\n\n class Config:\n \"\"\"Configuration for this pydantic object.\"\"\"\n\n arbitrary_types_allowed = True\n\n def load_memory(\n self, key: Optional[str] = None, default: Optional[Any] = None, **kwargs\n ) -> Any:\n \"\"\"Get the key's corresponding value from redis.\"\"\"\n if not key.startswith(self.redis_key_prefix):\n key = self.redis_key_prefix + f\":{key}\"\n pickled = self.redis_client.get(key)\n if not pickled:\n return default\n return pickle.loads(pickled)\n\n def load_conversation(self, **kwargs: Dict[str, Any]) -> ChatMessageHistory:\n \"\"\"Return chat message history.\"\"\"\n redis_key = self.redis_key_prefix + f\":{ChatMessageHistory.__name__}\"\n return ChatMessageHistory(messages=self.load_memory(redis_key, []))\n\n def save_memory(self, key: str, value: Any) -> None:\n \"\"\"Save the key value pair to redis.\"\"\"\n if not key.startswith(self.redis_key_prefix):\n key = self.redis_key_prefix + f\":{key}\"\n pickled = pickle.dumps(value)\n self.redis_client.set(key, pickled, ex=self.expire_time)\n\n def save_conversation(\n self, message: str, message_type: MessageType, **kwargs\n ) -> None:\n \"\"\"Save context from this conversation to redis.\"\"\"\n redis_key = self.redis_key_prefix + f\":{ChatMessageHistory.__name__}\"\n pickled = self.redis_client.get(redis_key)\n if pickled:\n messages: list[BaseMessage] = pickle.loads(pickled)\n else:\n messages = []\n if message_type == MessageType.AIMessage:\n messages.append(AIMessage(content=message))\n elif message_type == MessageType.UserMessage:\n messages.append(UserMessage(content=message))\n elif message_type == MessageType.FunctionMessage:\n messages.append(FunctionMessage(content=message, name=kwargs[\"name\"]))\n elif message_type == MessageType.SystemMessage:\n messages.append(SystemMessage(content=message))\n else:\n raise ValueError(f\"Unsupported message type: {message_type}\")\n self.save_memory(redis_key, messages)\n\n def clear(self) -> None:\n \"\"\"Clear redis memory.\"\"\"\n for key in self.redis_client.keys(f\"{self.redis_key_prefix}:*\"):\n self.redis_client.delete(key)", "n_imports_parsed": 5, "n_files_resolved": 2, "n_chars_extracted": 3239}, "tests/models/test_chat_openai.py::43": {"resolved_imports": ["autochain/tools/base.py", "autochain/agent/message.py", "autochain/models/base.py", "autochain/models/chat_openai.py"], "used_names": ["ChatOpenAI", "LLMResult", "UserMessage", "os"], "enclosing_function": "test_chat_completion", "extracted_code": "# Source: autochain/agent/message.py\nclass UserMessage(BaseMessage):\n \"\"\"Type of message that is spoken by the human.\"\"\"\n\n example: bool = False\n\n @property\n def type(self) -> str:\n \"\"\"Type of the message, used for serialization.\"\"\"\n return \"user\"\n\n\n# Source: autochain/models/base.py\nclass LLMResult(BaseModel):\n \"\"\"Class that contains all relevant information for an LLM Result.\"\"\"\n\n generations: List[Generation]\n \"\"\"List of the things generated. This is List[List[]] because\n each input could have multiple generations.\"\"\"\n llm_output: Optional[dict] = None\n \"\"\"For arbitrary LLM provider specific output.\"\"\"\n\n\n# Source: autochain/models/chat_openai.py\nclass ChatOpenAI(BaseLanguageModel):\n \"\"\"Wrapper around OpenAI Chat large language models.\n\n To use, you should have the ``openai`` python package installed, and the\n environment variable ``OPENAI_API_KEY`` set with your API key.\n\n Any parameters that are valid to be passed to the openai.create call can be passed\n in, even if not explicitly saved on this class.\n\n Example:\n .. code-block:: python\n\n from autochain.models.chat_openai import ChatOpenAI\n openai = ChatOpenAI()\n \"\"\"\n\n client: Any #: :meta private:\n model_name: str = \"gpt-3.5-turbo\"\n \"\"\"Model name to use.\"\"\"\n temperature: float = 0\n \"\"\"What sampling temperature to use.\"\"\"\n model_kwargs: Dict[str, Any] = Field(default_factory=dict)\n \"\"\"Holds any model parameters valid for `create` call not explicitly specified.\"\"\"\n openai_api_key: Optional[str] = None\n openai_organization: Optional[str] = None\n api_type: Optional[str] = None\n \"\"\"OpenAI API type, it can be `openai` or `azure`.\"\"\"\n api_base: Optional[str] = None\n \"\"\"The OpenAI API base url or Azure OpenAI API base url.\"\"\"\n azure_api_version: Optional[str] = None\n \"\"\"Azure API version.\"\"\"\n azure_deployment_name: Optional[str] = None\n \"\"\"Azure deployment name.\"\"\"\n request_timeout: Optional[Union[float, Tuple[float, float]]] = None\n \"\"\"Timeout for requests to OpenAI completion API. Default is 600 seconds.\"\"\"\n max_retries: int = 6\n \"\"\"Maximum number of retries to make when generating.\"\"\"\n # TODO: support streaming\n # streaming: bool = False\n # \"\"\"Whether to stream the results or not.\"\"\"\n # n: int = 1\n # \"\"\"Number of chat completions to generate for each prompt.\"\"\"\n max_tokens: Optional[int] = None\n \"\"\"Maximum number of tokens to generate.\"\"\"\n\n class Config:\n \"\"\"Configuration for this pydantic object.\"\"\"\n\n extra = Extra.ignore\n\n @root_validator()\n def validate_environment(cls, values: Dict) -> Dict:\n \"\"\"Validate that api key and python package exists in environment.\"\"\"\n openai_api_key = os.environ[\"OPENAI_API_KEY\"]\n openai_api_type = os.environ.get(\"OPENAI_API_TYPE\", \"open_ai\")\n openai_api_base = os.environ.get(\"OPENAI_API_BASE\", None)\n try:\n import openai\n\n except ImportError:\n raise ValueError(\n \"Could not import openai python package. \"\n \"Please install it with `pip install openai`.\"\n )\n values[\"api_key\"] = openai.api_key = openai_api_key\n values[\"api_type\"] = openai.api_type = openai_api_type\n if openai_api_base:\n values[\"api_base\"] = openai.api_base = openai_api_base\n if openai_api_type == \"azure\":\n values[\"azure_api_version\"] = openai.api_version = os.environ.get(\"OPENAI_API_VERSION\", \"2023-05-15\")\n try:\n values[\"client\"] = openai.ChatCompletion\n except AttributeError:\n raise ValueError(\n \"`openai` has no `ChatCompletion` attribute, this is likely \"\n \"due to an old version of the openai package. Try upgrading it \"\n \"with `pip install --upgrade openai`.\"\n )\n # if values[\"n\"] < 1:\n # raise ValueError(\"n must be at least 1.\")\n # if values[\"n\"] > 1 and values[\"streaming\"]:\n # raise ValueError(\"n must be 1 when streaming.\")\n return values\n\n def generate(\n self,\n messages: List[BaseMessage],\n functions: Optional[List[Tool]] = None,\n stop: Optional[List[str]] = None,\n ) -> LLMResult:\n message_dicts, function_dicts, params = self._create_message_dicts(\n messages, functions, stop\n )\n\n generation_param = {\n \"messages\": message_dicts,\n **params,\n }\n if len(function_dicts) > 0:\n generation_param[\"functions\"] = function_dicts\n\n response = self.generate_with_retry(**generation_param)\n return self._create_llm_result(response)\n\n def _create_message_dicts(\n self,\n messages: List[BaseMessage],\n tools: Optional[List[Tool]],\n stop: Optional[List[str]],\n ) -> Tuple[List[Dict[str, Any]], List[Dict[str, Any]], Dict[str, Any]]:\n params: Dict[str, Any] = {**{\"model\": self.model_name}, **self._default_params}\n if self.azure_deployment_name and self.api_type == \"azure\":\n params[\"engine\"] = self.azure_deployment_name\n if stop is not None:\n if \"stop\" in params:\n raise ValueError(\"`stop` found in both the input and default params.\")\n params[\"stop\"] = stop\n message_dicts = [convert_message_to_dict(m) for m in messages]\n function_dicts = []\n if tools:\n function_dicts = [convert_tool_to_dict(t) for t in tools]\n return message_dicts, function_dicts, params\n\n def _create_llm_result(self, response: Mapping[str, Any]) -> LLMResult:\n generations = []\n for res in response[\"choices\"]:\n message = convert_dict_to_message(res[\"message\"])\n gen = Generation(message=message)\n generations.append(gen)\n llm_output = {\"token_usage\": response[\"usage\"], \"model_name\": self.model_name}\n result = LLMResult(generations=generations, llm_output=llm_output)\n return result", "n_imports_parsed": 7, "n_files_resolved": 4, "n_chars_extracted": 6095}, "tests/models/test_openai_ada_encoder.py::44": {"resolved_imports": ["autochain/models/ada_embedding.py", "autochain/models/base.py"], "used_names": ["EmbeddingResult", "OpenAIAdaEncoder", "os"], "enclosing_function": "test_ada_encoder", "extracted_code": "# Source: autochain/models/ada_embedding.py\nclass OpenAIAdaEncoder(BaseLanguageModel):\n \"\"\"\n Text encoder using OpenAI Model\n \"\"\"\n\n client: Any #: :meta private:\n model_name: str = \"text-embedding-ada-002\"\n\n @root_validator()\n def validate_environment(cls, values: Dict) -> Dict:\n \"\"\"Validate that api key and python package exists in environment.\"\"\"\n openai_api_key = os.environ[\"OPENAI_API_KEY\"]\n try:\n import openai\n\n except ImportError:\n raise ValueError(\n \"Could not import openai python package. \"\n \"Please install it with `pip install openai`.\"\n )\n openai.api_key = openai_api_key\n try:\n values[\"client\"] = openai.Embedding\n except AttributeError:\n raise ValueError(\n \"`openai` has no `ChatCompletion` attribute, this is likely \"\n \"due to an old version of the openai package. Try upgrading it \"\n \"with `pip install --upgrade openai`.\"\n )\n return values\n\n def generate(\n self,\n messages: List[BaseMessage],\n functions: Optional[List[Tool]] = None,\n stop: Optional[List[str]] = None,\n ) -> LLMResult:\n pass\n\n def encode(self, texts: List[str]) -> EmbeddingResult:\n def _format_response(texts, resp) -> EmbeddingResult:\n embeddings = [d.get(\"embedding\") for d in resp.get(\"data\", [])]\n return EmbeddingResult(texts=texts, embeddings=embeddings)\n\n params: Dict[str, Any] = {\n \"model\": self.model_name,\n \"input\": texts,\n **self._default_params,\n }\n\n response = self.generate_with_retry(**params)\n return _format_response(texts=texts, resp=response)\n\n\n# Source: autochain/models/base.py\nclass EmbeddingResult(BaseModel):\n texts: List[str]\n embeddings: List[List[float]]", "n_imports_parsed": 5, "n_files_resolved": 2, "n_chars_extracted": 1930}, "tests/memory/test_long_term_memory.py::32": {"resolved_imports": ["autochain/agent/message.py", "autochain/memory/long_term_memory.py", "autochain/tools/internal_search/chromadb_tool.py", "autochain/tools/internal_search/pinecone_tool.py", "autochain/tools/internal_search/lancedb_tool.py"], "used_names": ["ChromaDBSearch", "LongTermMemory", "MessageType"], "enclosing_function": "test_buffer_conversation_memory", "extracted_code": "# Source: autochain/agent/message.py\nclass MessageType(enum.Enum):\n UserMessage = enum.auto()\n AIMessage = enum.auto()\n SystemMessage = enum.auto()\n FunctionMessage = enum.auto()\n\n\n# Source: autochain/memory/long_term_memory.py\nclass LongTermMemory(BaseMemory):\n \"\"\"Buffer for storing conversation memory and an in-memory kv store.\"\"\"\n\n conversation_history = ChatMessageHistory()\n kv_memory = {}\n long_term_memory: BaseSearchTool = None\n\n class Config:\n keep_untouched = SEARCH_PROVIDERS\n\n def load_memory(\n self,\n key: Optional[str] = None,\n default: Optional[Any] = None,\n top_k: int = 1,\n **kwargs\n ) -> Any:\n \"\"\"Return history buffer by key or all memories.\"\"\"\n if key in self.kv_memory:\n return self.kv_memory[key]\n\n # else try to retrieve from long term memory\n result = self.long_term_memory.run({\"query\": key, \"top_k\": top_k})\n return result or default\n\n def load_conversation(self, **kwargs) -> ChatMessageHistory:\n \"\"\"Return history buffer and format it into a conversational string format.\"\"\"\n return self.conversation_history\n\n def save_memory(self, key: str, value: Any) -> None:\n if (\n isinstance(value, list)\n and len(value) > 0\n and (isinstance(value[0], SEARCH_DOC_TYPES))\n ):\n self.long_term_memory.add_docs(docs=value)\n elif key:\n self.kv_memory[key] = value\n\n def save_conversation(\n self, message: str, message_type: MessageType, **kwargs\n ) -> None:\n \"\"\"Save context from this conversation to buffer.\"\"\"\n self.conversation_history.save_message(\n message=message, message_type=message_type, **kwargs\n )\n\n def clear(self) -> None:\n \"\"\"Clear memory contents.\"\"\"\n self.conversation_history.clear()\n self.long_term_memory.clear_index()\n self.kv_memory = {}\n\n\n# Source: autochain/tools/internal_search/chromadb_tool.py\nclass ChromaDBSearch(Tool, BaseSearchTool):\n \"\"\"\n Use ChromaDB as internal search tool\n \"\"\"\n\n collection_name: str = \"index\"\n collection: Optional[Any] = None\n\n class Config:\n \"\"\"Configuration for this pydantic object.\"\"\"\n\n extra = Extra.forbid\n arbitrary_types_allowed = True\n\n def __init__(self, docs: List[ChromaDoc], **kwargs):\n super().__init__(**kwargs)\n client = chromadb.Client()\n\n collection = client.create_collection(self.collection_name)\n self.collection = collection\n\n # Add docs to the collection. Can also update and delete. Row-based API coming soon!\n self.add_docs(docs=docs)\n\n def _run(\n self,\n query: str,\n top_k: int = 2,\n *args: Any,\n **kwargs: Any,\n ) -> str:\n def _format_output(query_result: QueryResult) -> str:\n \"\"\"Only return the document since they are likely to be passed to prompt\"\"\"\n documents = query_result.get(\"documents\", [])\n if len(documents) == 0:\n return \"\"\n\n docs = documents[0]\n return \"\\n\".join([f\"Doc {i}: {doc}\" for i, doc in enumerate(docs)])\n\n result = self.collection.query(\n query_texts=[query],\n n_results=top_k,\n )\n return _format_output(result)\n\n def add_docs(self, docs: List[ChromaDoc], **kwargs):\n \"\"\"Add a list of documents to collection\"\"\"\n if docs:\n self.collection.add(\n documents=[d.doc for d in docs],\n # we embed for you, or bring your own\n metadatas=[d.metadata for d in docs],\n # filter on arbitrary metadata!\n ids=[d.id for d in docs], # must be unique for each doc\n )\n\n def clear_index(self):\n self.collection.delete()", "n_imports_parsed": 6, "n_files_resolved": 5, "n_chars_extracted": 3882}, "tests/models/test_chat_openai.py::71": {"resolved_imports": ["autochain/tools/base.py", "autochain/agent/message.py", "autochain/models/base.py", "autochain/models/chat_openai.py"], "used_names": ["Tool", "convert_tool_to_dict"], "enclosing_function": "test_convert_tool_to_dict", "extracted_code": "# Source: autochain/tools/base.py\nclass Tool(ABC, BaseModel):\n \"\"\"Interface AutoChain tools must implement.\"\"\"\n\n name: Optional[str] = None\n \"\"\"The unique name of the tool that clearly communicates its purpose.\n If not provided, it will be named after the func name.\n The more descriptive it is, the easier it would be for model to call the right tool\n \"\"\"\n\n description: str\n \"\"\"Used to tell the model how/when/why to use the tool.\n You can provide few-shot examples as a part of the description.\n \"\"\"\n\n arg_description: Optional[Dict[str, Any]] = None\n \"\"\"Dictionary of arg name and description when using OpenAIFunctionsAgent to provide \n additional argument information\"\"\"\n\n args_schema: Optional[Type[BaseModel]] = None\n \"\"\"Pydantic model class to validate and parse the tool's input arguments.\"\"\"\n\n func: Union[Callable[..., str], None] = None\n\n @root_validator()\n def validate_environment(cls, values: Dict) -> Dict:\n \"\"\"Validate that api key and python package exists in environment.\"\"\"\n func = values.get(\"func\")\n if func and not values.get(\"name\"):\n values[\"name\"] = values[\"func\"].__name__\n\n # check if all args from arg_description exist in func args\n if values.get(\"arg_description\") and func:\n inspection = inspect.getfullargspec(func)\n override_args = set(values[\"arg_description\"].keys())\n args = set(inspection.args)\n override_without_args = override_args - args\n if len(override_without_args) > 0:\n raise ValueError(\n f\"Provide arg description for not existed args: {override_without_args}\"\n )\n\n return values\n\n def _parse_input(\n self,\n tool_input: Union[str, Dict],\n ) -> Union[str, Dict[str, Any]]:\n \"\"\"Convert tool input to pydantic model.\"\"\"\n input_args = self.args_schema\n if isinstance(tool_input, str):\n if input_args is not None:\n key_ = next(iter(input_args.__fields__.keys()))\n input_args.validate({key_: tool_input})\n return tool_input\n else:\n if input_args is not None:\n result = input_args.parse_obj(tool_input)\n return {k: v for k, v in result.dict().items() if k in tool_input}\n return tool_input\n\n def _to_args_and_kwargs(self, tool_input: Union[str, Dict]) -> Tuple[Tuple, Dict]:\n # For backwards compatibility, if run_input is a string,\n # pass as a positional argument.\n if isinstance(tool_input, str):\n return (tool_input,), {}\n else:\n return (), tool_input\n\n def _run(\n self,\n *args: Any,\n **kwargs: Any,\n ) -> str:\n return self.func(*args, **kwargs)\n\n def run(\n self,\n tool_input: Union[str, Dict] = \"\",\n **kwargs: Any,\n ) -> str:\n \"\"\"Run the tool.\"\"\"\n try:\n parsed_input = self._parse_input(tool_input)\n except ValueError as e:\n # return exception as tool output\n raise ToolRunningError(message=f\"Tool input args value Error: {e}\") from e\n\n try:\n tool_args, tool_kwargs = self._to_args_and_kwargs(parsed_input)\n tool_output = self._run(*tool_args, **tool_kwargs)\n except (Exception, KeyboardInterrupt) as e:\n raise ToolRunningError(\n message=f\"Failed to run tool {self.name} due to {e}\"\n ) from e\n\n return tool_output\n\n\n# Source: autochain/models/chat_openai.py\ndef convert_tool_to_dict(tool: Tool):\n \"\"\"Convert tool into function parameter for openai\"\"\"\n inspection = inspect.getfullargspec(tool.func)\n arg_description = tool.arg_description or {}\n\n def _type_to_string(t: type) -> str:\n prog = re.compile(r\"<class '(\\w+)'>\")\n cls = prog.findall(str(t))\n\n primary_type_map = {\"str\": \"string\"}\n\n if len(cls) > 0:\n cls_name = cls[0].split(\".\")[-1]\n return primary_type_map.get(cls_name, cls_name)\n\n if issubclass(t, enum.Enum):\n return \"enum\"\n\n return str(t)\n\n def _format_property(t: type, arg_desp: str):\n p = {\"type\": _type_to_string(t)}\n if arg_desp:\n p[\"description\"] = arg_desp\n\n return p\n\n arg_annotations = inspection.annotations\n if arg_annotations:\n properties = {\n arg: _format_property(t, arg_description.get(arg))\n for arg, t in arg_annotations.items()\n }\n else:\n properties = {\n arg: _format_property(str, arg_description.get(arg))\n for arg in inspection.args\n }\n\n default_args = inspection.defaults or []\n required_args = inspection.args[: len(inspection.args) - len(default_args)]\n\n output = {\n \"name\": tool.name,\n \"description\": tool.description,\n \"parameters\": {\n \"type\": \"object\",\n \"properties\": properties,\n \"required\": required_args,\n },\n }\n\n return output", "n_imports_parsed": 7, "n_files_resolved": 4, "n_chars_extracted": 5104}, "tests/memory/test_long_term_memory.py::36": {"resolved_imports": ["autochain/agent/message.py", "autochain/memory/long_term_memory.py", "autochain/tools/internal_search/chromadb_tool.py", "autochain/tools/internal_search/pinecone_tool.py", "autochain/tools/internal_search/lancedb_tool.py"], "used_names": ["ChromaDBSearch", "LongTermMemory", "MessageType"], "enclosing_function": "test_buffer_conversation_memory", "extracted_code": "# Source: autochain/agent/message.py\nclass MessageType(enum.Enum):\n UserMessage = enum.auto()\n AIMessage = enum.auto()\n SystemMessage = enum.auto()\n FunctionMessage = enum.auto()\n\n\n# Source: autochain/memory/long_term_memory.py\nclass LongTermMemory(BaseMemory):\n \"\"\"Buffer for storing conversation memory and an in-memory kv store.\"\"\"\n\n conversation_history = ChatMessageHistory()\n kv_memory = {}\n long_term_memory: BaseSearchTool = None\n\n class Config:\n keep_untouched = SEARCH_PROVIDERS\n\n def load_memory(\n self,\n key: Optional[str] = None,\n default: Optional[Any] = None,\n top_k: int = 1,\n **kwargs\n ) -> Any:\n \"\"\"Return history buffer by key or all memories.\"\"\"\n if key in self.kv_memory:\n return self.kv_memory[key]\n\n # else try to retrieve from long term memory\n result = self.long_term_memory.run({\"query\": key, \"top_k\": top_k})\n return result or default\n\n def load_conversation(self, **kwargs) -> ChatMessageHistory:\n \"\"\"Return history buffer and format it into a conversational string format.\"\"\"\n return self.conversation_history\n\n def save_memory(self, key: str, value: Any) -> None:\n if (\n isinstance(value, list)\n and len(value) > 0\n and (isinstance(value[0], SEARCH_DOC_TYPES))\n ):\n self.long_term_memory.add_docs(docs=value)\n elif key:\n self.kv_memory[key] = value\n\n def save_conversation(\n self, message: str, message_type: MessageType, **kwargs\n ) -> None:\n \"\"\"Save context from this conversation to buffer.\"\"\"\n self.conversation_history.save_message(\n message=message, message_type=message_type, **kwargs\n )\n\n def clear(self) -> None:\n \"\"\"Clear memory contents.\"\"\"\n self.conversation_history.clear()\n self.long_term_memory.clear_index()\n self.kv_memory = {}\n\n\n# Source: autochain/tools/internal_search/chromadb_tool.py\nclass ChromaDBSearch(Tool, BaseSearchTool):\n \"\"\"\n Use ChromaDB as internal search tool\n \"\"\"\n\n collection_name: str = \"index\"\n collection: Optional[Any] = None\n\n class Config:\n \"\"\"Configuration for this pydantic object.\"\"\"\n\n extra = Extra.forbid\n arbitrary_types_allowed = True\n\n def __init__(self, docs: List[ChromaDoc], **kwargs):\n super().__init__(**kwargs)\n client = chromadb.Client()\n\n collection = client.create_collection(self.collection_name)\n self.collection = collection\n\n # Add docs to the collection. Can also update and delete. Row-based API coming soon!\n self.add_docs(docs=docs)\n\n def _run(\n self,\n query: str,\n top_k: int = 2,\n *args: Any,\n **kwargs: Any,\n ) -> str:\n def _format_output(query_result: QueryResult) -> str:\n \"\"\"Only return the document since they are likely to be passed to prompt\"\"\"\n documents = query_result.get(\"documents\", [])\n if len(documents) == 0:\n return \"\"\n\n docs = documents[0]\n return \"\\n\".join([f\"Doc {i}: {doc}\" for i, doc in enumerate(docs)])\n\n result = self.collection.query(\n query_texts=[query],\n n_results=top_k,\n )\n return _format_output(result)\n\n def add_docs(self, docs: List[ChromaDoc], **kwargs):\n \"\"\"Add a list of documents to collection\"\"\"\n if docs:\n self.collection.add(\n documents=[d.doc for d in docs],\n # we embed for you, or bring your own\n metadatas=[d.metadata for d in docs],\n # filter on arbitrary metadata!\n ids=[d.id for d in docs], # must be unique for each doc\n )\n\n def clear_index(self):\n self.collection.delete()", "n_imports_parsed": 6, "n_files_resolved": 5, "n_chars_extracted": 3882}, "tests/models/test_openai_ada_encoder.py::43": {"resolved_imports": ["autochain/models/ada_embedding.py", "autochain/models/base.py"], "used_names": ["EmbeddingResult", "OpenAIAdaEncoder", "os"], "enclosing_function": "test_ada_encoder", "extracted_code": "# Source: autochain/models/ada_embedding.py\nclass OpenAIAdaEncoder(BaseLanguageModel):\n \"\"\"\n Text encoder using OpenAI Model\n \"\"\"\n\n client: Any #: :meta private:\n model_name: str = \"text-embedding-ada-002\"\n\n @root_validator()\n def validate_environment(cls, values: Dict) -> Dict:\n \"\"\"Validate that api key and python package exists in environment.\"\"\"\n openai_api_key = os.environ[\"OPENAI_API_KEY\"]\n try:\n import openai\n\n except ImportError:\n raise ValueError(\n \"Could not import openai python package. \"\n \"Please install it with `pip install openai`.\"\n )\n openai.api_key = openai_api_key\n try:\n values[\"client\"] = openai.Embedding\n except AttributeError:\n raise ValueError(\n \"`openai` has no `ChatCompletion` attribute, this is likely \"\n \"due to an old version of the openai package. Try upgrading it \"\n \"with `pip install --upgrade openai`.\"\n )\n return values\n\n def generate(\n self,\n messages: List[BaseMessage],\n functions: Optional[List[Tool]] = None,\n stop: Optional[List[str]] = None,\n ) -> LLMResult:\n pass\n\n def encode(self, texts: List[str]) -> EmbeddingResult:\n def _format_response(texts, resp) -> EmbeddingResult:\n embeddings = [d.get(\"embedding\") for d in resp.get(\"data\", [])]\n return EmbeddingResult(texts=texts, embeddings=embeddings)\n\n params: Dict[str, Any] = {\n \"model\": self.model_name,\n \"input\": texts,\n **self._default_params,\n }\n\n response = self.generate_with_retry(**params)\n return _format_response(texts=texts, resp=response)\n\n\n# Source: autochain/models/base.py\nclass EmbeddingResult(BaseModel):\n texts: List[str]\n embeddings: List[List[float]]", "n_imports_parsed": 5, "n_files_resolved": 2, "n_chars_extracted": 1930}, "tests/memory/test_buffer_memory.py::28": {"resolved_imports": ["autochain/agent/message.py", "autochain/memory/buffer_memory.py"], "used_names": ["BufferMemory", "MessageType"], "enclosing_function": "test_buffer_conversation_memory", "extracted_code": "# Source: autochain/agent/message.py\nclass MessageType(enum.Enum):\n UserMessage = enum.auto()\n AIMessage = enum.auto()\n SystemMessage = enum.auto()\n FunctionMessage = enum.auto()\n\n\n# Source: autochain/memory/buffer_memory.py\nclass BufferMemory(BaseMemory):\n \"\"\"Buffer for storing conversation memory and an in-memory kv store.\"\"\"\n\n conversation_history = ChatMessageHistory()\n kv_memory = {}\n\n def load_memory(\n self, key: Optional[str] = None, default: Optional[Any] = None, **kwargs\n ) -> Any:\n \"\"\"Return history buffer by key or all memories.\"\"\"\n if not key:\n return self.kv_memory\n\n return self.kv_memory.get(key, default)\n\n def load_conversation(self, **kwargs) -> ChatMessageHistory:\n \"\"\"Return history buffer and format it into a conversational string format.\"\"\"\n return self.conversation_history\n\n def save_memory(self, key: str, value: Any) -> None:\n self.kv_memory[key] = value\n\n def save_conversation(\n self, message: str, message_type: MessageType, **kwargs\n ) -> None:\n \"\"\"Save context from this conversation to buffer.\"\"\"\n self.conversation_history.save_message(\n message=message, message_type=message_type, **kwargs\n )\n\n def clear(self) -> None:\n \"\"\"Clear memory contents.\"\"\"\n self.conversation_history.clear()\n self.kv_memory = {}", "n_imports_parsed": 2, "n_files_resolved": 2, "n_chars_extracted": 1408}}} |