text
stringlengths
1
93.6k
torch.save(model.state_dict(), args.savemodel_dir)
# <FILESEP>
from openai import OpenAI, APIConnectionError, AuthenticationError
import chardet
import sys
import json
import locale
import os
import requests
import subprocess
import re
import shutil
from diff import apply_patch
from log_writer import logger
import config
def initialize() -> None:
"""
Initializes the software.
This function logs the software launch, including the version number and platform.
Args:
None
Returns:
None
"""
locale.setlocale(locale.LC_ALL, "en_US.UTF-8")
logger(f"Launch. Software version {config.VERSION_NUMBER}, platform {sys.platform}")
def askgpt(
system_prompt: str,
user_prompt: str,
model_name: str,
disable_json_mode: bool = False,
image_url: str = None
) -> str:
"""
Interacts with the LLM using the specified prompts.
Args:
system_prompt (str): The system prompt.
user_prompt (str): The user prompt.
model_name (str): The model name to use.
disable_json_mode (bool): Whether to disable JSON mode.
Returns:
str: The response from the LLM.
"""
if image_url is not None and config.USE_DIFFERENT_APIKEY_FOR_VISION_MODEL:
logger("Using different API key for vision model.")
client = OpenAI(api_key=config.VISION_API_KEY, base_url=config.VISION_BASE_URL)
else:
client = OpenAI(api_key=config.API_KEY, base_url=config.BASE_URL)
logger("Initialized the OpenAI client.")
# Define the messages for the conversation
if image_url is not None:
messages = [
{"role": "system", "content": system_prompt},
{
"role": "user",
"content": [
{"type": "text", "text": user_prompt},
{"type": "image_url", "image_url": {"url": image_url}},
],
},
]
elif config.GENERATION_MODEL == "o1-preview" or config.GENERATION_MODEL == "o1-mini":
messages = [
{"role": "user", "content": system_prompt},
{"role": "user", "content": user_prompt}
]
else:
messages = [
{"role": "system", "content": system_prompt},
{"role": "user", "content": user_prompt}
]
logger(f"askgpt: system {system_prompt}")
logger(f"askgpt: user {user_prompt}")
# Create a chat completion
try:
response = client.chat.completions.create(
model=model_name, messages=messages,
max_tokens=10000,
max_completion_tokens=10000,
extra_headers={
"HTTP-Referer": "https://cubegpt.org",
"X-Title": "CubeGPT"
}
)
except APIConnectionError as e:
raise Exception("Failed to connect to your LLM provider. Please check your configuration (make sure the BASE_URL ends with /v1) and internet connection. IT IS NOT A BUG OF BUKKITGPT.")