Datasets:
CarolinePascal commited on
chore(clean): cleaning up python files
Browse files- fix_episode_indices.py +0 -227
fix_episode_indices.py
DELETED
|
@@ -1,227 +0,0 @@
|
|
| 1 |
-
"""Renumber episode_index so every recording becomes a unique, contiguous episode.
|
| 2 |
-
|
| 3 |
-
See README / chat context. Operates in-place on ./data, ./meta, ./meta/info.json.
|
| 4 |
-
Run from the dataset root. Backup is made under ./.backup.
|
| 5 |
-
"""
|
| 6 |
-
|
| 7 |
-
from __future__ import annotations
|
| 8 |
-
|
| 9 |
-
import glob
|
| 10 |
-
import json
|
| 11 |
-
import os
|
| 12 |
-
from pathlib import Path
|
| 13 |
-
|
| 14 |
-
import pyarrow as pa
|
| 15 |
-
import pyarrow.parquet as pq
|
| 16 |
-
|
| 17 |
-
ROOT = Path(__file__).resolve().parent
|
| 18 |
-
DATA_DIR = ROOT / "data"
|
| 19 |
-
META_EP_DIR = ROOT / "meta" / "episodes"
|
| 20 |
-
INFO_PATH = ROOT / "meta" / "info.json"
|
| 21 |
-
|
| 22 |
-
|
| 23 |
-
def read_all_episode_meta():
|
| 24 |
-
"""Return list of dict rows across every meta/episodes file (small number of columns)."""
|
| 25 |
-
cols = [
|
| 26 |
-
"episode_index",
|
| 27 |
-
"length",
|
| 28 |
-
"data/chunk_index",
|
| 29 |
-
"data/file_index",
|
| 30 |
-
"dataset_from_index",
|
| 31 |
-
"dataset_to_index",
|
| 32 |
-
]
|
| 33 |
-
rows = []
|
| 34 |
-
for f in sorted(glob.glob(str(META_EP_DIR / "chunk-*" / "*.parquet"))):
|
| 35 |
-
t = pq.read_table(f, columns=cols)
|
| 36 |
-
d = t.to_pydict()
|
| 37 |
-
for i in range(t.num_rows):
|
| 38 |
-
rows.append({
|
| 39 |
-
"episode_index": d["episode_index"][i],
|
| 40 |
-
"length": d["length"][i],
|
| 41 |
-
"chunk_index": d["data/chunk_index"][i],
|
| 42 |
-
"file_index": d["data/file_index"][i],
|
| 43 |
-
"dataset_from_index": d["dataset_from_index"][i],
|
| 44 |
-
"dataset_to_index": d["dataset_to_index"][i],
|
| 45 |
-
"meta_file": f,
|
| 46 |
-
})
|
| 47 |
-
return rows
|
| 48 |
-
|
| 49 |
-
|
| 50 |
-
def build_mapping(rows):
|
| 51 |
-
"""Sort rows by (chunk_index, file_index, dataset_from_index) and assign new ids 0..N-1.
|
| 52 |
-
|
| 53 |
-
Also compute the new compact `index` range per (chunk, file, old_ep) so we can
|
| 54 |
-
update the data parquet `index` column. The meta's dataset_from/to_index already
|
| 55 |
-
lives in the compact space and matches this ordering, so we use it directly.
|
| 56 |
-
"""
|
| 57 |
-
rows_sorted = sorted(
|
| 58 |
-
rows,
|
| 59 |
-
key=lambda r: (r["chunk_index"], r["file_index"], r["dataset_from_index"]),
|
| 60 |
-
)
|
| 61 |
-
mapping = {} # (chunk, file, old_ep) -> dict(new_ep, new_from, new_to, old_from, old_to)
|
| 62 |
-
for new_ep, r in enumerate(rows_sorted):
|
| 63 |
-
key = (r["chunk_index"], r["file_index"], r["episode_index"])
|
| 64 |
-
assert key not in mapping, f"duplicate key {key}"
|
| 65 |
-
mapping[key] = {
|
| 66 |
-
"new_ep": new_ep,
|
| 67 |
-
"new_from": r["dataset_from_index"],
|
| 68 |
-
"new_to": r["dataset_to_index"],
|
| 69 |
-
"old_from": r["dataset_from_index"], # placeholder; we fill real old_from below
|
| 70 |
-
"old_to": r["dataset_to_index"],
|
| 71 |
-
"length": r["length"],
|
| 72 |
-
}
|
| 73 |
-
return mapping, rows_sorted
|
| 74 |
-
|
| 75 |
-
|
| 76 |
-
def fix_data_files(mapping):
|
| 77 |
-
"""Rewrite every data parquet: remap episode_index and re-number index column.
|
| 78 |
-
|
| 79 |
-
`index` for each data file starts at `cumulative_frames` seen so far (chronological,
|
| 80 |
-
numeric file order), matching the meta's compact `dataset_from_index` space.
|
| 81 |
-
"""
|
| 82 |
-
data_files = sorted(glob.glob(str(DATA_DIR / "chunk-*" / "*.parquet")))
|
| 83 |
-
cum = 0
|
| 84 |
-
for df in data_files:
|
| 85 |
-
# Parse chunk/file from path
|
| 86 |
-
chunk = int(Path(df).parent.name.split("-")[1])
|
| 87 |
-
file_idx = int(Path(df).stem.split("-")[1])
|
| 88 |
-
|
| 89 |
-
table = pq.read_table(df)
|
| 90 |
-
schema = table.schema
|
| 91 |
-
d = table.to_pydict()
|
| 92 |
-
n = table.num_rows
|
| 93 |
-
|
| 94 |
-
# New index column: cum .. cum + n - 1
|
| 95 |
-
new_index = list(range(cum, cum + n))
|
| 96 |
-
|
| 97 |
-
# New episode_index: map each row's old ep -> new ep using (chunk, file, old_ep)
|
| 98 |
-
old_eps = d["episode_index"]
|
| 99 |
-
new_eps = []
|
| 100 |
-
for old_ep in old_eps:
|
| 101 |
-
key = (chunk, file_idx, old_ep)
|
| 102 |
-
m = mapping[key]
|
| 103 |
-
new_eps.append(m["new_ep"])
|
| 104 |
-
|
| 105 |
-
d["index"] = new_index
|
| 106 |
-
d["episode_index"] = new_eps
|
| 107 |
-
|
| 108 |
-
new_table = pa.Table.from_pydict(d, schema=schema)
|
| 109 |
-
pq.write_table(new_table, df, compression="snappy")
|
| 110 |
-
print(
|
| 111 |
-
f"data/chunk-{chunk:03d}/file-{file_idx:03d}.parquet: "
|
| 112 |
-
f"rows={n} index={cum}..{cum + n - 1} eps={sorted(set(new_eps))[0]}..{sorted(set(new_eps))[-1]}"
|
| 113 |
-
)
|
| 114 |
-
cum += n
|
| 115 |
-
|
| 116 |
-
return cum
|
| 117 |
-
|
| 118 |
-
|
| 119 |
-
def fix_meta_files(mapping):
|
| 120 |
-
"""For each meta/episodes parquet file, update episode_index and index-related stats."""
|
| 121 |
-
meta_files = sorted(glob.glob(str(META_EP_DIR / "chunk-*" / "*.parquet")))
|
| 122 |
-
index_stat_cols = [
|
| 123 |
-
"stats/index/min",
|
| 124 |
-
"stats/index/max",
|
| 125 |
-
"stats/index/mean",
|
| 126 |
-
"stats/index/q01",
|
| 127 |
-
"stats/index/q10",
|
| 128 |
-
"stats/index/q50",
|
| 129 |
-
"stats/index/q90",
|
| 130 |
-
"stats/index/q99",
|
| 131 |
-
# std is invariant to translation, count is invariant
|
| 132 |
-
]
|
| 133 |
-
|
| 134 |
-
for mf in meta_files:
|
| 135 |
-
chunk = int(Path(mf).parent.name.split("-")[1])
|
| 136 |
-
# meta file index: derive file_index from the data/file_index column of the first row
|
| 137 |
-
table = pq.read_table(mf)
|
| 138 |
-
schema = table.schema
|
| 139 |
-
d = table.to_pydict()
|
| 140 |
-
n = table.num_rows
|
| 141 |
-
|
| 142 |
-
new_ep_col = []
|
| 143 |
-
# Stats: translate index stats by (new_from - old_from). All rows in this file
|
| 144 |
-
# may correspond to different data files in theory, but in practice each meta file
|
| 145 |
-
# maps 1:1 to a data file. Regardless, we translate per-row.
|
| 146 |
-
for i in range(n):
|
| 147 |
-
old_ep = d["episode_index"][i]
|
| 148 |
-
ch = d["data/chunk_index"][i]
|
| 149 |
-
fi = d["data/file_index"][i]
|
| 150 |
-
key = (ch, fi, old_ep)
|
| 151 |
-
m = mapping[key]
|
| 152 |
-
new_ep_col.append(m["new_ep"])
|
| 153 |
-
|
| 154 |
-
# Translate index-based stats
|
| 155 |
-
# old_start = stats/index/min[i][0]
|
| 156 |
-
# new_start = m["new_from"]
|
| 157 |
-
old_start = d["stats/index/min"][i][0]
|
| 158 |
-
new_start = m["new_from"]
|
| 159 |
-
delta = new_start - old_start
|
| 160 |
-
if delta != 0:
|
| 161 |
-
for col in index_stat_cols:
|
| 162 |
-
v = d[col][i]
|
| 163 |
-
# v is a list with one element (or possibly a scalar inside)
|
| 164 |
-
d[col][i] = [v[0] + delta]
|
| 165 |
-
|
| 166 |
-
# Replace episode_index stats with the single new value
|
| 167 |
-
for col, val in [
|
| 168 |
-
("stats/episode_index/min", [new_ep_col[-1]]),
|
| 169 |
-
("stats/episode_index/max", [new_ep_col[-1]]),
|
| 170 |
-
("stats/episode_index/mean", [float(new_ep_col[-1])]),
|
| 171 |
-
("stats/episode_index/q01", [float(new_ep_col[-1])]),
|
| 172 |
-
("stats/episode_index/q10", [float(new_ep_col[-1])]),
|
| 173 |
-
("stats/episode_index/q50", [float(new_ep_col[-1])]),
|
| 174 |
-
("stats/episode_index/q90", [float(new_ep_col[-1])]),
|
| 175 |
-
("stats/episode_index/q99", [float(new_ep_col[-1])]),
|
| 176 |
-
]:
|
| 177 |
-
if col in d:
|
| 178 |
-
d[col][i] = val
|
| 179 |
-
# std=0 and count=length are already correct (single-valued per episode)
|
| 180 |
-
|
| 181 |
-
d["episode_index"] = new_ep_col
|
| 182 |
-
|
| 183 |
-
# Sort rows within the meta file by new episode_index for cleanliness
|
| 184 |
-
order = sorted(range(n), key=lambda k: new_ep_col[k])
|
| 185 |
-
for col in list(d.keys()):
|
| 186 |
-
d[col] = [d[col][k] for k in order]
|
| 187 |
-
|
| 188 |
-
new_table = pa.Table.from_pydict(d, schema=schema)
|
| 189 |
-
pq.write_table(new_table, mf, compression="snappy")
|
| 190 |
-
print(
|
| 191 |
-
f"meta/episodes/chunk-{chunk:03d}/{Path(mf).name}: "
|
| 192 |
-
f"rows={n} new_eps={d['episode_index'][0]}..{d['episode_index'][-1]}"
|
| 193 |
-
)
|
| 194 |
-
|
| 195 |
-
|
| 196 |
-
def update_info_json(total_episodes: int, total_frames: int):
|
| 197 |
-
with open(INFO_PATH) as f:
|
| 198 |
-
info = json.load(f)
|
| 199 |
-
info["total_episodes"] = total_episodes
|
| 200 |
-
info["total_frames"] = total_frames
|
| 201 |
-
info["splits"] = {"train": f"0:{total_episodes}"}
|
| 202 |
-
with open(INFO_PATH, "w") as f:
|
| 203 |
-
json.dump(info, f, indent=4)
|
| 204 |
-
print(f"info.json updated: total_episodes={total_episodes} total_frames={total_frames}")
|
| 205 |
-
|
| 206 |
-
|
| 207 |
-
def main():
|
| 208 |
-
os.chdir(ROOT)
|
| 209 |
-
rows = read_all_episode_meta()
|
| 210 |
-
print(f"Loaded {len(rows)} episode metadata rows")
|
| 211 |
-
|
| 212 |
-
mapping, rows_sorted = build_mapping(rows)
|
| 213 |
-
print(f"Built mapping for {len(mapping)} unique (chunk, file, old_ep) triples")
|
| 214 |
-
|
| 215 |
-
# Sanity preview
|
| 216 |
-
preview = [(r["chunk_index"], r["file_index"], r["episode_index"], mapping[(r["chunk_index"], r["file_index"], r["episode_index"])]["new_ep"]) for r in rows_sorted[:5]]
|
| 217 |
-
print("First 5 (chunk, file, old_ep, new_ep):", preview)
|
| 218 |
-
preview_end = [(r["chunk_index"], r["file_index"], r["episode_index"], mapping[(r["chunk_index"], r["file_index"], r["episode_index"])]["new_ep"]) for r in rows_sorted[-5:]]
|
| 219 |
-
print("Last 5 (chunk, file, old_ep, new_ep):", preview_end)
|
| 220 |
-
|
| 221 |
-
total_frames = fix_data_files(mapping)
|
| 222 |
-
fix_meta_files(mapping)
|
| 223 |
-
update_info_json(total_episodes=len(mapping), total_frames=total_frames)
|
| 224 |
-
|
| 225 |
-
|
| 226 |
-
if __name__ == "__main__":
|
| 227 |
-
main()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|