text
stringlengths
1
93.6k
self.llm = ChatOpenAI(model=OPEN_AI_MODEL, temperature=0)
def _execute(self, desired_output_format: str | None = None) -> str:
SAMPLE_DIR = Path("sample_output")
USER_QUERY_FILE = Path("user_query.txt")
TOPICS_FILE = Path("topics.json")
SINGLE_FILE_OUTPUT_FILE = Path("SUMMARY_DEBUG_OUTPUT.md")
assert self.llm
self.llm.temperature = 0
# user_query = self.resource_manager.read_file(USER_QUERY_FILE)
# topics = self.resource_manager.read_file(TOPICS_FILE)
with (SAMPLE_DIR / USER_QUERY_FILE).open("r") as f:
user_query = f.read()
with (SAMPLE_DIR / TOPICS_FILE).open("r") as f:
topics = f.read()
topics_str_list = []
for topic in json.loads(topics):
notes_file = SAMPLE_DIR / Path(topic["notes_file"])
with notes_file.open("r") as f:
notes = f.read()
# format is:
# name, description, notes_file, relevant_because, researched
topic_str = f"""
Topic name: {topic["name"]}
Topic description: {topic["description"]}
Relevant because: {topic["relevant_because"]}
Notes: {notes}
"""
topics_str_list.append(topic_str)
markdown_prompt = f"""
The user query is: {user_query}
###
Given the following topics and notes about the topic, write an article addressing the user query
the best you can. If there is an question, try to answer it. If the user query has incorrect
facts or assumptions, address that.
Start with a problem statement of some sort based on the user query, then follow up with a conclusion.
After the conclusion, explain how that conclusion was derived from the
topics researched. If needed, create a section for relevant topic, if it is important enough,
and explain how the topic contributes to the conclusion. You do not need to specifically mention
the conclusion when describing topics.
When you can, cite your sources
### The topics are:
{" # next topic # ".join(topics_str_list)}
# Reminder! The conclusion should be helpful and specific. If there are upper and lower bounds or circumstances where something
may be true or false, then define it. If you cannot, then identify further research needed to get there. Do not make anything up!
If you do not know why you know something, then do not mention it, or identify further research needed to confirm it.
Use inline citations.
Markdown file contents:
"""
logging.warning(markdown_prompt)
# content = self.llm.chat_completion([{"role": "system", "content": markdown_prompt}])[
# "content"
# ]
system_message_prompt = SystemMessage(content=markdown_prompt)
response = self.llm([system_message_prompt])
content = response.content
print(content)
with SINGLE_FILE_OUTPUT_FILE.open("w") as f:
f.write(content)
return f"Deep research completed! Check the resource manager for {SINGLE_FILE_OUTPUT_FILE} to view the result!"
DeepResearchWriterTool()._execute("test")
# <FILESEP>
import torch
import torch.nn as nn
# from .utils import load_state_dict_from_url
__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101',
'resnet152', 'resnext50_32x4d', 'resnext101_32x8d',
'wide_resnet50_2', 'wide_resnet101_2']
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',