text
stringlengths
1
93.6k
messages.append({"role": "assistant", "content": example["assistant"]})
force_json = self.output_json and not model_supports_json_output(self.model_name)
def message_processor(msg):
# msg["content"] = stringify_content(msg["content"])
if isinstance(msg["content"], dict):
msg["content"] = json.dumps(msg["content"])
if msg["role"] == "user" and force_json:
msg["content"] += "\nRespond with a valid JSON object."
return msg
messages += context.get_messages(
include_fields=["role", "content"],
processor=message_processor
)
if force_json and messages[-1]["role"] == "user":
messages.append({"role": "assistant", "content": "{"}) # prefill technique https://docs.anthropic.com/en/docs/build-with-claude/prompt-engineering/prefill-claudes-response#example-maintaining-character-without-role-prompting
logger.debug("LLM context:\n{}", self._messages_to_text(messages))
response = litellm.completion(
model=self.model_name,
messages=messages,
response_format={"type": "json_object"} if self.output_json else None,
temperature=temperature,
stream=stream
)
if stream:
return SentenceStream(response, preprocessor=lambda x: x.choices[0].delta.content)
content = response.choices[0].message.content
logger.debug("Response content: {}", content)
if self.output_json:
try:
content = json.loads(content)
except json.JSONDecodeError:
logger.error("Failed to parse JSON response: {}", content)
content = self.llm_json_corrector(content)
if content is None:
logger.error("Failed to fix JSON response, using fallback response")
content = json_parse_error_response
return content
def llm_json_corrector(self, content):
messages = [
{
"role": "system",
"content": (
"You are a JSON error corrector. "
"You are given a JSON object that was generated by an LLM and failed to parse. "
"You must fix it and return a valid JSON object. "
"If the input is a plain text, wrap it in a JSON object according to the system prompt of the LLM:\n\n"
"<start of system prompt>\n"
f"{self.system_prompt}\n"
"<end of system prompt>"
)
},
{
"role": "user",
"content": (
"<start of input>\n"
f"{content}\n"
"<end of input>\n"
"Reply only with the fixed JSON object."
)
},
{
"role": "assistant",
"content": "{"
}
]
logger.debug("JSON corrector context:\n{}", self._messages_to_text(messages))
response = litellm.completion(
model=self.model_name,
messages=messages
)
content = response.choices[0].message.content
try:
content = json.loads(content)
except json.JSONDecodeError:
logger.error("Failed to parse JSON response: {}", content)
return None
logger.debug("Fixed JSON response: {}", content)
return content
def _extra_context_to_text(self, context):
# Override this in child class
return ""