data_factory / generators /generate_cups_cot_batch.py
marcuskwan's picture
Add files using upload-large-folder tool
7d93608 verified
#!/usr/bin/env python3
"""Generate train samples for cups_cot with structured, parse-friendly CoT text."""
from __future__ import annotations
import argparse
import json
import os
import random
import re
from concurrent.futures import ProcessPoolExecutor, ThreadPoolExecutor, as_completed
from pathlib import Path
from generators.cups_shuffle import (
CupsShuffleConfig,
_jitter_color as _cups_jitter_color,
_shuffle_keyframes as _cups_shuffle_keyframes,
generate_cups_shuffle,
)
PROJECT_ROOT = Path(__file__).resolve().parents[1]
ROOT = PROJECT_ROOT
CATEGORY = "cups_cot"
SEED_PATTERN = re.compile(r"cups_cot_train_s(\d+)\.mp4$")
def _label_from_index(index: int) -> str:
return chr(ord("A") + int(index))
def _default_id_from_rel_path(rel_path: str) -> str:
value = Path(rel_path).with_suffix("").as_posix().replace("/", "_").strip("_")
return value or "sample"
def _split_category(name: str) -> tuple[str, str]:
if name == "cups_cot":
return "cups_cot", "cups_cot"
if name.startswith("hidden_container_"):
return "hidden_container", name.replace("hidden_container_", "", 1)
if name == "random_dots":
return "biological", "random_dots"
if "_" in name:
major, sub = name.split("_", 1)
return major, sub
return name, name
def _make_cups_cfg(seed: int) -> CupsShuffleConfig:
return CupsShuffleConfig(
width=1280,
height=720,
fps=30,
intro_duration=2.0,
cover_duration=2.0,
shuffle_duration=16.0,
swaps=4,
seed=int(seed),
ball_radius=18,
cup_count=4,
)
def _consume_style_rng(cfg: CupsShuffleConfig, rng: random.Random) -> None:
if not cfg.randomize_style:
return
cfg.size_scale = rng.uniform(0.92, 1.08)
cfg.arc_scale = rng.uniform(0.9, 1.2)
cfg.lane_scale = rng.uniform(0.9, 1.15)
cfg.cup_color = _cups_jitter_color(cfg.cup_color, rng, 14)
cfg.cup_shade = _cups_jitter_color(cfg.cup_shade, rng, 12)
cfg.cup_outline = _cups_jitter_color(cfg.cup_outline, rng, 12)
cfg.cup_rim = _cups_jitter_color(cfg.cup_rim, rng, 14)
cfg.cup_inner = _cups_jitter_color(cfg.cup_inner, rng, 8)
cfg.cup_highlight = _cups_jitter_color(cfg.cup_highlight, rng, 16)
def _derive_trace(seed: int) -> dict:
cfg = _make_cups_cfg(seed)
rng = random.Random(cfg.seed)
_consume_style_rng(cfg, rng)
ball_cup_id = rng.randint(0, cfg.cup_count - 1)
keyframes = _cups_shuffle_keyframes(cfg, cfg.cup_count)
if len(keyframes) != cfg.swaps + 1:
raise RuntimeError(
f"Unexpected keyframe length for seed={seed}: "
f"{len(keyframes)} (expected {cfg.swaps + 1})"
)
swaps: list[tuple[str, str]] = []
for step in range(cfg.swaps):
prev_order = keyframes[step]
next_order = keyframes[step + 1]
diff_positions = [
idx
for idx, (prev_cup, next_cup) in enumerate(zip(prev_order, next_order))
if prev_cup != next_cup
]
if len(diff_positions) != 2:
raise RuntimeError(
f"Seed {seed} step {step + 1}: expected 2 changed positions, got "
f"{len(diff_positions)}"
)
left_label = _label_from_index(prev_order[diff_positions[0]])
right_label = _label_from_index(prev_order[diff_positions[1]])
cup_a, cup_b = sorted((left_label, right_label))
swaps.append((cup_a, cup_b))
final_order = keyframes[-1]
final_order_labels = [_label_from_index(cup_id) for cup_id in final_order]
final_pos = final_order.index(ball_cup_id)
answer = _label_from_index(final_pos)
return {
"ball_start_under": _label_from_index(ball_cup_id),
"swap_count": int(cfg.swaps),
"swaps": swaps,
"final_order": final_order_labels,
"ball_end_under": answer,
}
def _build_cot_text(trace: dict) -> str:
lines = [
"COT_VERSION:1",
"TASK:cups_cot",
"CUP_LABELS:A,B,C,D",
"LABEL_SEMANTICS:IDENTITY_FIXED",
"INIT_ORDER_LEFT_TO_RIGHT:A,B,C,D",
f"BALL_START_UNDER:{trace['ball_start_under']}",
f"SWAP_COUNT:{trace['swap_count']}",
]
for idx, (cup_a, cup_b) in enumerate(trace["swaps"], start=1):
lines.append(f"SWAP_{idx:02d}:{cup_a}<->{cup_b}")
lines.extend(
[
f"FINAL_ORDER_LEFT_TO_RIGHT:{','.join(trace['final_order'])}",
f"BALL_END_UNDER:{trace['ball_end_under']}",
f"**Answer:{trace['ball_end_under']}**",
]
)
return "\n".join(lines)
def _build_question() -> str:
return (
"Based on the video, track cup swaps step by step and determine which cup "
"covers the red ball at the end. Cups are labeled A, B, C, and D from left "
"to right at the start. Answer in the format **Answer:X**."
)
def _build_entry(rel_path: str, text: str, question: str, answer: str) -> dict:
major, sub = _split_category(CATEGORY)
abs_video = str((ROOT / rel_path).resolve())
return {
"id": _default_id_from_rel_path(rel_path),
"path": rel_path,
"category": CATEGORY,
"major": major,
"sub": sub,
"text": text,
"question": question,
"answer": answer,
"data_split": "train",
"video_path": abs_video,
"image_gt": "",
"image_paths": [],
}
def _load_dataset(path: Path) -> list[dict]:
if not path.exists():
return []
try:
raw = json.loads(path.read_text(encoding="utf-8"))
except json.JSONDecodeError:
return []
if isinstance(raw, dict) and isinstance(raw.get("items"), list):
raw = raw["items"]
if not isinstance(raw, list):
return []
return [dict(item) for item in raw if isinstance(item, dict)]
def _existing_seed_set(existing_entries: list[dict]) -> set[int]:
used: set[int] = set()
for item in existing_entries:
if str(item.get("category") or "").strip() != CATEGORY:
continue
path_text = str(item.get("path") or "").strip()
match = SEED_PATTERN.search(path_text)
if match:
used.add(int(match.group(1)))
video_dir = ROOT / "videos" / CATEGORY
if video_dir.exists():
for file_path in video_dir.glob("*.mp4"):
match = SEED_PATTERN.search(file_path.name)
if match:
used.add(int(match.group(1)))
return used
def _pick_seed_list(count: int, seed_start: int, used: set[int]) -> list[int]:
seeds: list[int] = []
cursor = int(seed_start)
while len(seeds) < int(count):
if cursor not in used:
seeds.append(cursor)
cursor += 1
return seeds
def _upsert_entries(existing_entries: list[dict], new_entries: list[dict]) -> list[dict]:
new_by_path = {
str(entry.get("path") or "").strip(): entry
for entry in new_entries
if str(entry.get("path") or "").strip()
}
out: list[dict] = []
replaced_paths: set[str] = set()
for item in existing_entries:
path_text = str(item.get("path") or "").strip()
if path_text and path_text in new_by_path:
out.append(new_by_path[path_text])
replaced_paths.add(path_text)
else:
out.append(item)
for entry in new_entries:
path_text = str(entry.get("path") or "").strip()
if path_text and path_text not in replaced_paths:
out.append(entry)
return out
def _write_json(path: Path, payload: object) -> None:
path.write_text(json.dumps(payload, ensure_ascii=False, indent=2) + "\n", encoding="utf-8")
def _generate_one(seed: int, force_render: bool = False) -> dict:
out_path = ROOT / "videos" / CATEGORY / f"{CATEGORY}_train_s{int(seed)}.mp4"
out_path.parent.mkdir(parents=True, exist_ok=True)
should_render = bool(force_render) or not (out_path.exists() and out_path.stat().st_size > 0)
rendered_answer = ""
if should_render:
render_cfg = _make_cups_cfg(seed)
_, rendered_answer = generate_cups_shuffle(
str(out_path),
render_cfg,
return_answer=True,
)
trace = _derive_trace(seed)
answer = str(trace["ball_end_under"])
if rendered_answer and rendered_answer != answer:
raise RuntimeError(
f"Answer mismatch for seed={seed}: rendered={rendered_answer}, "
f"trace={answer}"
)
rel_path = out_path.relative_to(ROOT).as_posix()
entry = _build_entry(
rel_path=rel_path,
text=_build_cot_text(trace),
question=_build_question(),
answer=answer,
)
return {
"seed": int(seed),
"status": "rendered" if should_render else "reused",
"entry": entry,
"rel_path": rel_path,
}
def main() -> None:
parser = argparse.ArgumentParser(description="Generate cups_cot train samples.")
parser.add_argument("--count", type=int, default=50, help="Number of new samples to generate.")
parser.add_argument("--seed-start", type=int, default=95000, help="Seed start for this batch.")
parser.add_argument("--workers", type=int, default=0, help="Parallel workers; 0 means auto.")
parser.add_argument(
"--executor",
choices=("thread", "process"),
default="thread",
help="Parallel backend.",
)
parser.add_argument(
"--dataset",
default="video_dataset_train.json",
help="Train dataset JSON path.",
)
parser.add_argument("--force-render", action="store_true", help="Re-render even if target video already exists.")
parser.add_argument("--dry-run", action="store_true", help="Build plan only.")
parser.add_argument(
"--plan-output",
default="tmp_cups_cot_plan.json",
help="Write planned seeds/tasks to this JSON file.",
)
parser.add_argument(
"--records-output",
default="tmp_cups_cot_records.json",
help="Write generation records to this JSON file.",
)
args = parser.parse_args()
if args.count <= 0:
raise SystemExit("--count must be > 0")
dataset_path = Path(args.dataset)
if not dataset_path.is_absolute():
dataset_path = ROOT / dataset_path
plan_path = Path(args.plan_output)
if not plan_path.is_absolute():
plan_path = ROOT / plan_path
records_path = Path(args.records_output)
if not records_path.is_absolute():
records_path = ROOT / records_path
existing_entries = _load_dataset(dataset_path)
used_seeds = _existing_seed_set(existing_entries)
seeds = _pick_seed_list(args.count, args.seed_start, used_seeds)
plan = {
"category": CATEGORY,
"count": int(args.count),
"seed_start_arg": int(args.seed_start),
"seed_list": seeds,
"dataset_path": str(dataset_path),
"executor": args.executor,
}
_write_json(plan_path, plan)
print(f"Existing train entries: {len(existing_entries)}")
print(f"Planned new {CATEGORY} items: {len(seeds)}")
print(f"Seed list: {seeds[0]}..{seeds[-1]}")
print(f"Plan written to: {plan_path}")
if args.dry_run:
return
max_workers = int(args.workers) if int(args.workers) > 0 else min(8, max(1, os.cpu_count() or 1))
print(f"Using workers: {max_workers} ({args.executor})")
executor_cls = ThreadPoolExecutor if args.executor == "thread" else ProcessPoolExecutor
results: list[dict] = []
failures: list[dict] = []
with executor_cls(max_workers=max_workers) as executor:
future_map = {
executor.submit(_generate_one, seed, bool(args.force_render)): seed
for seed in seeds
}
for done_count, future in enumerate(as_completed(future_map), start=1):
seed = future_map[future]
try:
result = future.result()
except Exception as exc: # noqa: BLE001
failures.append({"seed": int(seed), "error": repr(exc)})
else:
results.append(result)
if done_count % 10 == 0 or done_count == len(seeds):
print(f"Progress: {done_count}/{len(seeds)}, failures={len(failures)}")
if failures:
fail_path = ROOT / "tmp_cups_cot_failures.json"
_write_json(fail_path, failures)
raise SystemExit(f"Generation failed for {len(failures)} seeds. See {fail_path}")
results.sort(key=lambda item: int(item["seed"]))
new_entries = [dict(item["entry"]) for item in results]
merged_entries = _upsert_entries(existing_entries, new_entries)
_write_json(dataset_path, merged_entries)
records = {
"category": CATEGORY,
"count": len(results),
"seed_list": [int(item["seed"]) for item in results],
"status_counter": {
"rendered": sum(1 for item in results if item["status"] == "rendered"),
"reused": sum(1 for item in results if item["status"] == "reused"),
},
"records": results,
}
_write_json(records_path, records)
print(f"Wrote dataset: {dataset_path}")
print(f"Wrote records: {records_path}")
print(f"Train size: {len(existing_entries)} -> {len(merged_entries)}")
if __name__ == "__main__":
main()