File size: 12,168 Bytes
0ee66d2 9940e16 0ee66d2 9940e16 0ee66d2 2a482a5 0ee66d2 e93446d 159a5fa 0ee66d2 159a5fa 0ee66d2 9940e16 0ee66d2 2a482a5 212d2d9 2a482a5 212d2d9 2a482a5 212d2d9 2a482a5 0ee66d2 212d2d9 0ee66d2 0181886 159a5fa 0181886 0ee66d2 cd968e7 0ee66d2 cd968e7 2a482a5 0ee66d2 212d2d9 0ee66d2 0181886 159a5fa 0181886 0ee66d2 0181886 0ee66d2 0181886 0ee66d2 2a482a5 0ee66d2 2a482a5 0ee66d2 2a482a5 cd968e7 2a482a5 0ee66d2 2a482a5 0ee66d2 e93446d | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 | """
AgentDebuggerEnv Baseline Inference Script
==========================================
Baseline evaluation script for testing agent performance in the
AgentDebugger environment.
System Configuration:
- API_BASE_URL: LLM API endpoint
- MODEL_NAME: Model identifier for evaluation
- HF_TOKEN: Authentication token
"""
import os
import json
import time
import re
import random
from openai import OpenAI, APIError, RateLimitError, APIConnectionError, APITimeoutError
import requests
# ββ Environment variables (never hardcode these) ββββββββββββββββββββββββββββββ
API_BASE_URL = os.environ.get("API_BASE_URL", "https://router.huggingface.co/v1")
MODEL_NAME = os.environ.get("MODEL_NAME", "meta-llama/Llama-3.1-70B-Instruct")
HF_TOKEN = os.environ.get("HF_TOKEN") or os.environ.get("OPENAI_API_KEY", "")
ENV_BASE_URL = os.environ.get("ENV_BASE_URL", "http://localhost:8000")
client = OpenAI(base_url=API_BASE_URL, api_key=HF_TOKEN or "EMPTY")
SYSTEM_PROMPT = """You are an expert software debugger. You will be given broken code and a
failing test suite. Your job is to:
1. Analyze the error output carefully
2. Form a hypothesis about the root cause (required for every fix attempt)
3. Submit a corrected version of the complete code
4. Observe the new test results and update your hypothesis if needed
5. Repeat until all tests pass or you run out of attempts
You must ALWAYS respond with a valid JSON action object. Available actions:
Submit a fix:
{
"action_type": "submit_fix",
"fixed_code": "<complete corrected Python code as a string>",
"hypothesis": "<your hypothesis about what the bug is and where>"
}
Query for more context (use sparingly β first one is free):
{
"action_type": "query_context",
"query_type": "error_explanation" | "function_signature" | "related_code" | "test_details",
"query_target": "<function name or line number or test name>"
}
Give up (if you cannot find the bug):
{
"action_type": "give_up",
"final_diagnosis": "<your best guess at what the bug was>"
}
Analyze the error output carefully and provide a corrected version of the complete code.
You must always include a hypothesis explaining the root cause of the bug before
submitting your fix.
Guidelines:
- Submit complete source code files, not partial snippets or diffs.
- Incorporate all feedback from previous execution attempts.
- For concurrent tasks, ensure atomic operations and proper synchronization.
"""
# ββ Robust API Completion Helper ββββββββββββββββββββββββββββββββββββββββββββββ
def get_completion(messages: list, model: str = MODEL_NAME, max_retries: int = 5) -> str:
"""Gets LLM completion with exponential backoff and retry logic."""
for attempt in range(max_retries):
try:
completion = client.chat.completions.create(
model=model,
messages=messages,
max_tokens=1200,
temperature=0.2,
timeout=60.0 # Add a timeout to prevent hanging forever
)
return completion.choices[0].message.content
except (RateLimitError, APIConnectionError, APITimeoutError) as e:
if attempt == max_retries - 1:
return ""
wait_time = (2 ** attempt) + random.random()
print(f" [!] API Error ({type(e).__name__}). Retrying in {wait_time:.1f}s... (Attempt {attempt+1}/{max_retries})")
time.sleep(wait_time)
except APIError as e:
# For general API errors, log and potentially retry if it's a 5xx
print(f" [!] OpenAI API Error: {e}")
if attempt == max_retries - 1:
return ""
time.sleep(2)
except Exception as e:
print(f" [!] Unexpected error during completion: {e}")
return ""
return ""
def parse_action(raw: str) -> dict:
"""Parse LLM response to action dict. Handle markdown code blocks."""
raw = raw.strip()
# Strip markdown code blocks if present
raw = re.sub(r'^```(?:json)?\s*', '', raw, flags=re.MULTILINE)
raw = re.sub(r'\s*```$', '', raw, flags=re.MULTILINE)
try:
return json.loads(raw)
except json.JSONDecodeError:
# Try to extract first JSON object
match = re.search(r'\{.*\}', raw, re.DOTALL)
if match:
try:
return json.loads(match.group())
except json.JSONDecodeError:
pass
# Fallback: give up
return {
"action_type": "give_up",
"final_diagnosis": f"Failed to parse response: {raw[:200]}"
}
def build_initial_message(obs: dict) -> str:
return (
f"=== DEBUGGING TASK: {obs['task_id'].upper()} ===\n\n"
f"TASK DESCRIPTION:\n{obs['task_description']}\n\n"
f"BUGGY CODE:\n```python\n{obs['buggy_code']}\n```\n\n"
f"TEST SUITE:\n```python\n{obs['test_suite']}\n```\n\n"
f"INITIAL ERROR OUTPUT:\n{obs['initial_error_output']}\n\n"
f"Attempts remaining: {obs['attempts_remaining']}\n"
f"Max steps: {obs['max_steps']}\n\n"
f"Analyze the error and submit your first fix attempt."
)
def build_step_message(obs: dict, reward: dict, info: dict) -> str:
last_attempt = obs['previous_attempts'][-1] if obs['previous_attempts'] else None
msg = f"Step {obs['step_number']} result:\n"
msg += f"Step reward: {reward['step_reward']:+.3f} | Cumulative: {reward['cumulative_reward']:.3f}\n"
msg += f"Tests passing: {obs['tests_passed']}/{obs['tests_total']}\n"
msg += f"Attempts remaining: {obs['attempts_remaining']}\n"
if info.get("error"):
msg += f"ERROR: {info['error']}\n"
if info.get("query_result"):
msg += f"\nQUERY RESULT:\n{info['query_result']}\n"
if last_attempt and last_attempt.get("execution_output"):
output = last_attempt["execution_output"]
# Truncate long outputs to stay within token budget
if len(output) > 1500:
output = output[:750] + "\n...[truncated]...\n" + output[-750:]
msg += f"\nNEW TEST OUTPUT:\n{output}\n"
if obs['tests_passed'] == obs['tests_total']:
msg += "\nβ ALL TESTS PASS! Episode solved."
else:
msg += f"\nContinue debugging. {obs['tests_total'] - obs['tests_passed']} tests still failing."
return msg
def run_episode(task_id: str) -> dict:
"""Run one complete debugging episode. Returns result dict."""
# Reset environment
reset_resp = requests.post(f"{ENV_BASE_URL}/reset", json={"task_id": task_id}, timeout=60)
reset_resp.raise_for_status()
obs = reset_resp.json()
# [START] task=NAME
print(f"\n[START] task={task_id}", flush=True)
print(f" Description: {obs['task_description'][:100]}...", flush=True)
messages = [
{"role": "system", "content": SYSTEM_PROMPT},
{"role": "user", "content": build_initial_message(obs)}
]
done = False
last_result = {"reward": {"grader_score": 0.0, "cumulative_reward": 0.0}, "observation": obs}
action = {}
max_steps = obs.get("max_steps",25)
step_count = 0
while not done:
step_count+=1
if(step_count>max_steps+5):
print(f"[!] Safety limit reached, breaking loop")
break
try:
raw = get_completion(messages)
if not raw:
raise ValueError("Empty response from LLM")
action = parse_action(raw)
except Exception as e:
print(f" [β] Failed to get response from LLM after retries: {e}")
# Fallback action to avoid crashing the whole episode
action = {
"action_type": "give_up",
"final_diagnosis": f"Inference system failure: {str(e)}"
}
raw = json.dumps(action)
# Submit action to environment
step_resp = requests.post(f"{ENV_BASE_URL}/step", json=action, timeout=60)
step_resp.raise_for_status()
result = step_resp.json()
obs = result["observation"]
reward = result["reward"]
done = result["done"]
info = result["info"]
last_result = result
# [STEP] step=N reward=R
print(f" [STEP {obs['step_number']}] Action: {action.get('action_type')} | Tests: {obs['tests_passed']}/{obs['tests_total']} | Reward: {reward['step_reward']:+.3f}", flush=True)
# Build context for next LLM call
step_msg = build_step_message(obs, reward, info)
messages.append({"role": "assistant", "content": raw})
messages.append({"role": "user", "content": step_msg})
if done:
break
final_obs = last_result["observation"]
result = {
"task_id": task_id,
"grader_score": last_result["reward"]["grader_score"],
"cumulative_reward": last_result["reward"]["cumulative_reward"],
"steps_taken": final_obs["step_number"],
"attempts_used": final_obs["max_attempts"] - final_obs["attempts_remaining"],
"tests_passed": final_obs["tests_passed"],
"tests_total": final_obs["tests_total"],
"solved": final_obs["tests_passed"] == final_obs["tests_total"],
"final_action_type": action.get("action_type", "unknown")
}
# [END] task=NAME score=S steps=N
print(f"[END] task={task_id} score={result['grader_score']} steps={result['steps_taken']}", flush=True)
return result
def main():
print("AgentDebuggerEnv β Baseline Inference")
# ββ Environment validation ββββββββββββββββββββββββββββββββββββββββββββββββ
has_token = bool(HF_TOKEN and len(HF_TOKEN) > 5)
masked_token = f"{HF_TOKEN[:4]}...{HF_TOKEN[-4:]}" if has_token else "MISSING"
print(f"Model: {MODEL_NAME}")
print(f"API: {API_BASE_URL}")
print(f"Token: {masked_token}")
print(f"Env: {ENV_BASE_URL}")
if not has_token and "openai.com" in API_BASE_URL:
print("WARNING: HF_TOKEN is missing. API calls will likely fail.")
print("=" * 55)
results = []
start_time = time.time()
for task_id in ["easy", "medium", "hard"]:
print(f"\nTask: {task_id}")
t0 = time.time()
try:
result = run_episode(task_id)
except Exception as e:
print(f" [β] Error running episode '{task_id}': {e}")
result = {
"task_id": task_id,
"grader_score": 0.0,
"cumulative_reward": 0.0,
"steps_taken": 0,
"attempts_used": 0,
"tests_passed": 0,
"tests_total": 0,
"solved": False,
"final_action_type": "error"
}
elapsed = time.time() - t0
solved_str = "β SOLVED" if result["solved"] else "β UNSOLVED"
print(f" Score: {result['grader_score']:.3f}")
print(f" Outcome: {solved_str}")
print(f" Attempts: {result['attempts_used']}")
print(f" Tests: {result['tests_passed']}/{result['tests_total']}")
print(f" Time: {elapsed:.1f}s")
results.append(result)
total_time = time.time() - start_time
mean_score = sum(r["grader_score"] for r in results) / len(results)
print("\n" + "=" * 55)
print(f"Mean Score: {mean_score:.3f}")
print(f"Total Time: {total_time:.1f}s (limit: 1200s)")
print("=" * 55)
output = {
"model": MODEL_NAME,
"api_base_url": API_BASE_URL,
"results": results,
"mean_score": mean_score,
"total_time_seconds": round(total_time, 1)
}
with open("baseline_results.json", "w") as f:
json.dump(output, f, indent=2)
print("\nSaved β baseline_results.json")
if __name__ == "__main__":
main() |