File size: 4,972 Bytes
2ff0108 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 | """
WM Bench β Example Submission Script
=====================================
Any world model can participate in WM Bench using this template.
No 3D environment needed β text input/output only.
Usage:
python example_submission.py --api_url YOUR_MODEL_API --api_key YOUR_KEY --model YOUR_MODEL_NAME
"""
import json
import argparse
import requests
import time
from pathlib import Path
# ββ μ€μ ββββββββββββββββββββββββββββββββββββββββββββββββββββββββ
DATASET_PATH = Path(__file__).parent.parent / "data" / "wm_bench_dataset.json"
SYSTEM_PROMPT = """You are a world model. Given scene_context as JSON, respond in exactly 2 lines:
Line 1: PREDICT: left=<safe|danger>(<reason>), right=<safe|danger>(<reason>), fwd=<safe|danger>(<reason>), back=<safe|danger>(<reason>)
Line 2: MOTION: <describe the character's physical motion and emotional state in one sentence>
Respond ONLY these 2 lines. No explanation."""
# ββ λ©μΈ νκ° ν¨μ βββββββββββββββββββββββββββββββββββββββββββββββ
def run_evaluation(api_url: str, api_key: str, model: str, output_path: str = "my_submission.json"):
"""
WM Bench νκ°λ₯Ό μ€ννκ³ μ μΆ νμΌμ μμ±ν©λλ€.
Parameters:
api_url: OpenAI νΈν API URL (μ: https://api.openai.com/v1/chat/completions)
api_key: API ν€
model: λͺ¨λΈ μ΄λ¦
output_path: μ μΆ νμΌ κ²½λ‘
"""
# λ°μ΄ν°μ
λ‘λ
with open(DATASET_PATH, "r", encoding="utf-8") as f:
dataset = json.load(f)
scenarios = dataset["scenarios"]
print(f"β
λ°μ΄ν°μ
λ‘λ: {len(scenarios)}κ° μλ리μ€")
print(f"π€ λͺ¨λΈ: {model}")
print(f"π API: {api_url}\n")
results = []
errors = 0
for i, scenario in enumerate(scenarios):
sc_id = scenario["id"]
cat = scenario["cat"]
scene = scenario["scene_context"]
# API νΈμΆ
t0 = time.time()
response_text, latency_ms = call_api(api_url, api_key, model, scene)
if response_text is None:
errors += 1
print(f" β {sc_id} ({cat}): API μ€λ₯")
results.append({
"id": sc_id,
"cat": cat,
"response": None,
"latency_ms": latency_ms,
"error": True
})
else:
results.append({
"id": sc_id,
"cat": cat,
"response": response_text,
"latency_ms": round(latency_ms, 1),
"error": False
})
if (i + 1) % 10 == 0:
print(f" β {i+1}/100 μλ£ ({cat})")
# μ μΆ νμΌ μμ±
submission = {
"model": model,
"api_url": api_url,
"track": "A", # Text-Only Track
"submitted_at": time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime()),
"total_scenarios": len(scenarios),
"errors": errors,
"results": results
}
with open(output_path, "w", encoding="utf-8") as f:
json.dump(submission, f, ensure_ascii=False, indent=2)
print(f"\nβ
μ μΆ νμΌ μμ±: {output_path}")
print(f" μ΄ μλ리μ€: {len(scenarios)}, μ€λ₯: {errors}")
print(f"\nπ€ λ€μ λ¨κ³: WM Bench Spaceμ μ μΆ νμΌ μ
λ‘λ")
print(f" https://huggingface.co/spaces/FINAL-Bench/worldmodel-bench")
return output_path
def call_api(api_url: str, api_key: str, model: str, scene_context: dict):
"""OpenAI νΈν API νΈμΆ"""
headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {api_key}"
}
payload = {
"model": model,
"max_tokens": 200,
"temperature": 0.0,
"messages": [
{"role": "system", "content": SYSTEM_PROMPT},
{"role": "user", "content": f"scene_context: {json.dumps(scene_context)}"}
]
}
t0 = time.time()
try:
r = requests.post(api_url, headers=headers, json=payload, timeout=30)
r.raise_for_status()
text = r.json()["choices"][0]["message"]["content"]
return text, (time.time() - t0) * 1000
except Exception as e:
return None, (time.time() - t0) * 1000
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="WM Bench Submission Script")
parser.add_argument("--api_url", required=True, help="OpenAI-compatible API URL")
parser.add_argument("--api_key", required=True, help="API Key")
parser.add_argument("--model", required=True, help="Model name")
parser.add_argument("--output", default="my_submission.json", help="Output file path")
args = parser.parse_args()
run_evaluation(args.api_url, args.api_key, args.model, args.output)
|