AniseF's picture
Update utils.py
cd59240 verified
import os
import re
import requests
# MANTENHA ESTES IMPORTS (essenciais para ler o config.py)
from config import DOCS, MODEL_PRIORITY_A, MODEL_PRIORITY_B
def detect_language(passage):
greek_chars = re.findall(r'[\u0370-\u03FF\u1F00-\u1FFF]', passage)
return 'greek' if len(greek_chars) > len(passage) * 0.1 else 'latin'
def call_openrouter(passage, mode, category):
api_key = os.getenv("OPENROUTER_API_KEY")
lang = detect_language(passage)
url_doc = DOCS.get(category)
try:
resp = requests.get(url_doc, timeout=15)
resp.raise_for_status()
questions = [l.strip() for l in resp.text.splitlines() if l.strip().endswith('?')]
except Exception as e:
return f"Erro ao acessar Google Docs: {str(e)}", "Nenhum"
model_chain = MODEL_PRIORITY_A if "Alta" in mode else MODEL_PRIORITY_B
full_report = [f"--- ANÁLISE FILOLÓGICA: {category.upper()} ---", f"Texto: {passage}\n"]
batch_size = 5
# --- Lógica de Seleção de Modelo com Fallback ---
working_model = None
for model_candidate in model_chain:
try:
# Teste rápido para ver se o modelo responde
test_resp = requests.post(
url="https://openrouter.ai/api/v1/chat/completions",
headers={"Authorization": f"Bearer {api_key}"},
json={"model": model_candidate, "messages": [{"role": "user", "content": "test"}], "max_tokens": 1},
timeout=10
)
if test_resp.status_code == 200:
working_model = model_candidate
break
except:
continue
if not working_model:
return "Erro: Nenhum modelo da lista está disponível no momento (OpenRouter Offline?).", "Falha"
# --- Execução dos Lotes com o Modelo que Funcionou ---
for i in range(0, len(questions), batch_size):
batch = questions[i:i + batch_size]
prompt = f"""Atue como um Filólogo especialista em {lang}.
Passagem: "{passage}"
Responda detalhadamente em PORTUGUÊS.
OBRIGATÓRIO: Escreva a PERGUNTA completa antes de cada resposta.
QUESTÕES:
{chr(10).join(batch)}"""
try:
response = requests.post(
url="https://openrouter.ai/api/v1/chat/completions",
headers={"Authorization": f"Bearer {api_key}"},
json={
"model": working_model,
"messages": [{"role": "user", "content": prompt}],
"temperature": 0.1,
"max_tokens": 4000
},
timeout=120
)
if response.status_code == 200:
full_report.append(response.json()['choices'][0]['message']['content'])
else:
full_report.append(f"\n[Erro no lote {i//batch_size + 1}: Status {response.status_code}]")
except Exception as e:
full_report.append(f"\n[Falha de conexão: {str(e)}]")
return "\n\n".join(full_report), working_model