""" WM Bench — Example Submission Script ===================================== Any world model can participate in WM Bench using this template. No 3D environment needed — text input/output only. Usage: python example_submission.py --api_url YOUR_MODEL_API --api_key YOUR_KEY --model YOUR_MODEL_NAME """ import json import argparse import requests import time from pathlib import Path # ── 설정 ──────────────────────────────────────────────────────── DATASET_PATH = Path(__file__).parent.parent / "data" / "wm_bench_dataset.json" SYSTEM_PROMPT = """You are a world model. Given scene_context as JSON, respond in exactly 2 lines: Line 1: PREDICT: left=(), right=(), fwd=(), back=() Line 2: MOTION: Respond ONLY these 2 lines. No explanation.""" # ── 메인 평가 함수 ─────────────────────────────────────────────── def run_evaluation(api_url: str, api_key: str, model: str, output_path: str = "my_submission.json"): """ WM Bench 평가를 실행하고 제출 파일을 생성합니다. Parameters: api_url: OpenAI 호환 API URL (예: https://api.openai.com/v1/chat/completions) api_key: API 키 model: 모델 이름 output_path: 제출 파일 경로 """ # 데이터셋 로드 with open(DATASET_PATH, "r", encoding="utf-8") as f: dataset = json.load(f) scenarios = dataset["scenarios"] print(f"✅ 데이터셋 로드: {len(scenarios)}개 시나리오") print(f"🤖 모델: {model}") print(f"🔗 API: {api_url}\n") results = [] errors = 0 for i, scenario in enumerate(scenarios): sc_id = scenario["id"] cat = scenario["cat"] scene = scenario["scene_context"] # API 호출 t0 = time.time() response_text, latency_ms = call_api(api_url, api_key, model, scene) if response_text is None: errors += 1 print(f" ❌ {sc_id} ({cat}): API 오류") results.append({ "id": sc_id, "cat": cat, "response": None, "latency_ms": latency_ms, "error": True }) else: results.append({ "id": sc_id, "cat": cat, "response": response_text, "latency_ms": round(latency_ms, 1), "error": False }) if (i + 1) % 10 == 0: print(f" ✓ {i+1}/100 완료 ({cat})") # 제출 파일 생성 submission = { "model": model, "api_url": api_url, "track": "A", # Text-Only Track "submitted_at": time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime()), "total_scenarios": len(scenarios), "errors": errors, "results": results } with open(output_path, "w", encoding="utf-8") as f: json.dump(submission, f, ensure_ascii=False, indent=2) print(f"\n✅ 제출 파일 생성: {output_path}") print(f" 총 시나리오: {len(scenarios)}, 오류: {errors}") print(f"\n📤 다음 단계: WM Bench Space에 제출 파일 업로드") print(f" https://huggingface.co/spaces/FINAL-Bench/worldmodel-bench") return output_path def call_api(api_url: str, api_key: str, model: str, scene_context: dict): """OpenAI 호환 API 호출""" headers = { "Content-Type": "application/json", "Authorization": f"Bearer {api_key}" } payload = { "model": model, "max_tokens": 200, "temperature": 0.0, "messages": [ {"role": "system", "content": SYSTEM_PROMPT}, {"role": "user", "content": f"scene_context: {json.dumps(scene_context)}"} ] } t0 = time.time() try: r = requests.post(api_url, headers=headers, json=payload, timeout=30) r.raise_for_status() text = r.json()["choices"][0]["message"]["content"] return text, (time.time() - t0) * 1000 except Exception as e: return None, (time.time() - t0) * 1000 if __name__ == "__main__": parser = argparse.ArgumentParser(description="WM Bench Submission Script") parser.add_argument("--api_url", required=True, help="OpenAI-compatible API URL") parser.add_argument("--api_key", required=True, help="API Key") parser.add_argument("--model", required=True, help="Model name") parser.add_argument("--output", default="my_submission.json", help="Output file path") args = parser.parse_args() run_evaluation(args.api_url, args.api_key, args.model, args.output)