arashakb commited on
Commit
fe2e77b
·
verified ·
1 Parent(s): 1eadd29

Upload get_llm_calib_data.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. get_llm_calib_data.py +347 -0
get_llm_calib_data.py ADDED
@@ -0,0 +1,347 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ get_llm_calib_data.py
3
+
4
+ Script to extract LLM input embeddings from OpenVLA-OFT forward pass for use as calibration data in quantization.
5
+ This script captures the multimodal embeddings (vision + text + proprio) that are fed to the LLM.
6
+
7
+ The script randomly samples episodes from the dataset and captures ALL frames within each selected episode.
8
+
9
+ Run with:
10
+ python vla-scripts/get_llm_calib_data.py \
11
+ --vla_path <PATH/TO/CHECKPOINT> \
12
+ --dataset_name libero_spatial_no_noops \
13
+ --output_path ./calib_data/libero_spatial.bin \
14
+ --num_episodes 10
15
+ """
16
+
17
+ import json
18
+ import os
19
+ import random
20
+ import struct
21
+ from dataclasses import dataclass
22
+ from pathlib import Path
23
+ from typing import Dict, List
24
+
25
+ import draccus
26
+ import numpy as np
27
+ import torch
28
+ import tqdm
29
+ from transformers import AutoConfig, AutoImageProcessor, AutoModelForVision2Seq, AutoProcessor
30
+
31
+ from prismatic.extern.hf.configuration_prismatic import OpenVLAConfig
32
+ from prismatic.extern.hf.modeling_prismatic import OpenVLAForActionPrediction
33
+ from prismatic.extern.hf.processing_prismatic import PrismaticImageProcessor, PrismaticProcessor
34
+ from prismatic.models.backbones.llm.prompting import PurePromptBuilder
35
+ from prismatic.util.data_utils import PaddedCollatorForActionPrediction
36
+ from prismatic.vla.action_tokenizer import ActionTokenizer
37
+ from prismatic.vla.datasets import EpisodicRLDSDataset, RLDSBatchTransform
38
+
39
+ os.environ["TOKENIZERS_PARALLELISM"] = "false"
40
+
41
+ # Binary format constants
42
+ CALIB_MAGIC = b"OPENVLA_CALIB\0\0\0" # 16 bytes
43
+ CALIB_VERSION = 2
44
+
45
+
46
+ @dataclass
47
+ class CalibrationConfig:
48
+ # fmt: off
49
+ vla_path: str = "openvla/openvla-7b" # HuggingFace Hub ID or local path
50
+ data_root_dir: Path = Path("modified_libero_rlds_data") # Path to RLDS dataset directory
51
+ dataset_name: str = "libero_spatial_no_noops" # Name of dataset (task suite)
52
+ output_path: Path = Path("calibration_data.bin") # Output binary file path
53
+ num_episodes: int = -1 # Number of episodes to sample (-1 = all)
54
+ num_images_in_input: int = 2 # Number of images (1 or 2)
55
+ use_proprio: bool = True # Use proprioception
56
+ batch_size: int = 1 # Batch size for processing
57
+ seed: int = 42 # Random seed
58
+ targets_only: bool = False # Only extract action targets (no model needed)
59
+ # fmt: on
60
+
61
+
62
+ class EpisodeTaskTransform:
63
+ """Minimal transform to extract task language per episode."""
64
+
65
+ def __call__(self, rlds_batch: Dict) -> Dict[str, str]:
66
+ lang = rlds_batch["task"]["language_instruction"].decode().lower()
67
+ return {"language_instruction": lang}
68
+
69
+
70
+ def select_episode_indices_stratified(
71
+ cfg: CalibrationConfig,
72
+ image_sizes,
73
+ ) -> set:
74
+ """Select episode indices with equal per-task sampling."""
75
+ # Build a lightweight episodic dataset to map episode index -> task description.
76
+ # We keep the same dataset construction so episode indexing matches the main pass.
77
+ index_dataset = EpisodicRLDSDataset(
78
+ cfg.data_root_dir,
79
+ cfg.dataset_name,
80
+ EpisodeTaskTransform(),
81
+ resize_resolution=image_sizes,
82
+ shuffle_buffer_size=1,
83
+ image_aug=False,
84
+ )
85
+
86
+ task_to_indices: Dict[str, List[int]] = {}
87
+ num_total = len(index_dataset)
88
+ for ep_idx, episode_frames in enumerate(tqdm.tqdm(index_dataset, total=num_total, desc="Indexing episodes by task")):
89
+ if len(episode_frames) == 0:
90
+ continue
91
+ task = episode_frames[0]["language_instruction"]
92
+ task_to_indices.setdefault(task, []).append(ep_idx)
93
+
94
+ if cfg.num_episodes == -1 or cfg.num_episodes >= num_total:
95
+ selected = set(range(num_total))
96
+ print(f"[*] Collecting all episodes: {len(selected)}")
97
+ return selected
98
+
99
+ num_tasks = len(task_to_indices)
100
+ if num_tasks == 0:
101
+ raise ValueError("No tasks found while indexing episodes.")
102
+ if cfg.num_episodes % num_tasks != 0:
103
+ raise ValueError(
104
+ f"num_episodes={cfg.num_episodes} must be divisible by number of tasks={num_tasks} "
105
+ f"for balanced per-task sampling."
106
+ )
107
+
108
+ per_task = cfg.num_episodes // num_tasks
109
+ selected = set()
110
+ print(f"[*] Stratified sampling: {per_task} episode(s) per task across {num_tasks} tasks")
111
+
112
+ for task in sorted(task_to_indices.keys()):
113
+ indices = task_to_indices[task]
114
+ if per_task > len(indices):
115
+ raise ValueError(
116
+ f"Requested {per_task} episodes for task '{task}', but only {len(indices)} available."
117
+ )
118
+ chosen = random.sample(indices, per_task)
119
+ selected.update(chosen)
120
+ print(f" - {task}: selected {len(chosen)} / {len(indices)}")
121
+
122
+ print(f"[*] Selected {len(selected)} episodes total (balanced)")
123
+ return selected
124
+
125
+
126
+ def save_embeddings_to_binary(embeddings_list: List[np.ndarray], output_path: Path, config: dict) -> None:
127
+ """Save embeddings to binary format for llama.cpp imatrix calibration."""
128
+ num_samples = len(embeddings_list)
129
+ if num_samples == 0:
130
+ raise ValueError("No embeddings to save!")
131
+
132
+ hidden_dim = embeddings_list[0].shape[1]
133
+ print(f"\nSaving {num_samples} samples to {output_path}")
134
+ print(f" Hidden dim: {hidden_dim}")
135
+
136
+ # Compute sequence lengths and offsets
137
+ sequence_lengths = [emb.shape[0] for emb in embeddings_list]
138
+ offsets = []
139
+ current_offset = 0
140
+ for emb in embeddings_list:
141
+ offsets.append(current_offset)
142
+ current_offset += emb.shape[0] * hidden_dim * 4 # float32
143
+
144
+ seq_lens_array = np.array(sequence_lengths)
145
+ print(f" Seq lengths: min={seq_lens_array.min()}, max={seq_lens_array.max()}, mean={seq_lens_array.mean():.1f}")
146
+ print(f" Total size: {current_offset / (1024**2):.2f} MB")
147
+
148
+ # Create output directory
149
+ output_path.parent.mkdir(parents=True, exist_ok=True)
150
+
151
+ # Write binary file
152
+ with open(output_path, 'wb') as f:
153
+ f.write(CALIB_MAGIC)
154
+ f.write(struct.pack('<I', CALIB_VERSION))
155
+ f.write(struct.pack('<I', num_samples))
156
+ f.write(struct.pack('<I', hidden_dim))
157
+ f.write(struct.pack('<IIII', 0, 0, 0, 0)) # reserved
158
+ f.write(struct.pack('<I', 0)) # padding to 48 bytes
159
+
160
+ for seq_len in sequence_lengths:
161
+ f.write(struct.pack('<I', seq_len))
162
+ for offset in offsets:
163
+ f.write(struct.pack('<Q', offset))
164
+ for emb in embeddings_list:
165
+ f.write(emb.astype(np.float32).tobytes())
166
+
167
+ # Write metadata JSON
168
+ metadata = {
169
+ "format": "openvla_oft_calibration",
170
+ "version": CALIB_VERSION,
171
+ "num_frames": num_samples,
172
+ "hidden_dim": hidden_dim,
173
+ "dataset": config['dataset_name'],
174
+ "model": config['vla_path'],
175
+ "num_episodes": config['num_episodes'],
176
+ "sequence_length_stats": {
177
+ "min": int(seq_lens_array.min()),
178
+ "max": int(seq_lens_array.max()),
179
+ "mean": float(seq_lens_array.mean()),
180
+ },
181
+ }
182
+ with open(output_path.with_suffix('.json'), 'w') as f:
183
+ json.dump(metadata, f, indent=2)
184
+
185
+ print(f"Saved to {output_path}")
186
+
187
+
188
+ @draccus.wrap()
189
+ def collect_calibration_data(cfg: CalibrationConfig) -> None:
190
+ print(f"Collecting calibration data from `{cfg.vla_path}` using `{cfg.dataset_name}`")
191
+ if cfg.targets_only:
192
+ print("[*] targets_only mode: skipping model loading, only extracting action labels")
193
+
194
+ random.seed(cfg.seed)
195
+
196
+ # Register model classes
197
+ AutoConfig.register("openvla", OpenVLAConfig)
198
+ AutoImageProcessor.register(OpenVLAConfig, PrismaticImageProcessor)
199
+ AutoProcessor.register(OpenVLAConfig, PrismaticProcessor)
200
+ AutoModelForVision2Seq.register(OpenVLAConfig, OpenVLAForActionPrediction)
201
+
202
+ # Load processor (always needed for tokenizer/dataset)
203
+ print("[*] Loading processor...")
204
+ processor = AutoProcessor.from_pretrained(cfg.vla_path, trust_remote_code=True)
205
+
206
+ # Load model only if we need embeddings
207
+ vla = None
208
+ image_sizes = None
209
+ if not cfg.targets_only:
210
+ device_id = 0
211
+ torch.cuda.set_device(device_id)
212
+ print("[*] Loading model...")
213
+ vla = AutoModelForVision2Seq.from_pretrained(
214
+ cfg.vla_path,
215
+ torch_dtype=torch.bfloat16,
216
+ low_cpu_mem_usage=True,
217
+ trust_remote_code=True,
218
+ ).to(device_id)
219
+ vla.vision_backbone.set_num_images_in_input(cfg.num_images_in_input)
220
+ vla.eval()
221
+
222
+ # Monkey-patch forward method to use our local version with calibration_mode support
223
+ from prismatic.extern.hf.modeling_prismatic import PrismaticForConditionalGeneration
224
+ vla.forward = PrismaticForConditionalGeneration.forward.__get__(vla, type(vla))
225
+
226
+ print(f" Hidden dim: {vla.llm_dim}")
227
+ print(f" Num patches: {vla.vision_backbone.get_num_patches()}")
228
+ image_sizes = tuple(vla.config.image_sizes)
229
+ else:
230
+ # Load config to get image_sizes without loading the full model
231
+ model_config = AutoConfig.from_pretrained(cfg.vla_path, trust_remote_code=True)
232
+ image_sizes = tuple(model_config.image_sizes)
233
+
234
+ # Create dataset
235
+ print(f"[*] Loading dataset: {cfg.dataset_name}")
236
+ action_tokenizer = ActionTokenizer(processor.tokenizer)
237
+ batch_transform = RLDSBatchTransform(
238
+ action_tokenizer,
239
+ processor.tokenizer,
240
+ image_transform=processor.image_processor.apply_transform,
241
+ prompt_builder_fn=PurePromptBuilder,
242
+ use_wrist_image=(cfg.num_images_in_input > 1),
243
+ use_proprio=cfg.use_proprio,
244
+ )
245
+
246
+ dataset = EpisodicRLDSDataset(
247
+ cfg.data_root_dir,
248
+ cfg.dataset_name,
249
+ batch_transform,
250
+ resize_resolution=image_sizes,
251
+ shuffle_buffer_size=1,
252
+ image_aug=False,
253
+ )
254
+ print(f" Total episodes: {len(dataset)}")
255
+
256
+ collator = PaddedCollatorForActionPrediction(
257
+ processor.tokenizer.model_max_length,
258
+ processor.tokenizer.pad_token_id,
259
+ padding_side="right"
260
+ )
261
+
262
+ # Stratified episode sampling: equal number from each task description.
263
+ selected = select_episode_indices_stratified(cfg, image_sizes)
264
+ num_total = len(dataset)
265
+ print(f"[*] Collecting {len(selected)} episodes (all frames per episode)")
266
+
267
+ # Collect embeddings, token-ID labels, and continuous OFT actions
268
+ embeddings_list: List[np.ndarray] = []
269
+ labels_list: List[np.ndarray] = []
270
+ oft_actions_list: List[np.ndarray] = [] # continuous (8, 7) actions for AMF
271
+ episodes_done = 0
272
+ IGNORE_INDEX = -100
273
+
274
+ with torch.no_grad():
275
+ for ep_idx, episode_frames in enumerate(tqdm.tqdm(dataset, total=num_total)):
276
+ if ep_idx not in selected:
277
+ continue
278
+
279
+ # Process ALL frames in this episode
280
+ for i in range(0, len(episode_frames), cfg.batch_size):
281
+ batch_frames = episode_frames[i:i + cfg.batch_size]
282
+ batch = collator(batch_frames)
283
+
284
+ if not cfg.targets_only:
285
+ # Forward pass with calibration_mode=True to get multimodal embeddings
286
+ with torch.autocast("cuda", dtype=torch.bfloat16):
287
+ output = vla(
288
+ input_ids=batch["input_ids"].to(device_id),
289
+ attention_mask=batch["attention_mask"].to(device_id),
290
+ pixel_values=batch["pixel_values"].to(torch.bfloat16).to(device_id),
291
+ labels=batch["labels"].to(device_id),
292
+ calibration_mode=True,
293
+ )
294
+
295
+ # Extract multimodal embeddings
296
+ mm_embeds = output["multimodal_embeddings"] # [B, seq_len, hidden_dim]
297
+ for j in range(mm_embeds.shape[0]):
298
+ embeddings_list.append(mm_embeds[j].float().cpu().numpy())
299
+
300
+ # Extract action token ID labels (for NLL Fisher)
301
+ frame_labels = batch["labels"] # [B, seq_len]
302
+ for j in range(frame_labels.shape[0]):
303
+ lbl = frame_labels[j].cpu().numpy()
304
+ labels_list.append(lbl[lbl != IGNORE_INDEX])
305
+
306
+ # Extract continuous OFT actions (for Action-Mahalanobis Fisher / AMF)
307
+ # shape: (B, 8, 7) — normalized actions in approximately [-1, 1]
308
+ if "actions" in batch:
309
+ actions = batch["actions"].float().numpy() # (B, 8, 7)
310
+ for j in range(actions.shape[0]):
311
+ oft_actions_list.append(actions[j]) # (8, 7)
312
+
313
+ episodes_done += 1
314
+ if episodes_done >= len(selected):
315
+ break
316
+
317
+ print(f"\n[*] Collected {len(labels_list)} frames from {episodes_done} episodes")
318
+
319
+ # Save embeddings (skip in targets_only mode)
320
+ if not cfg.targets_only:
321
+ if embeddings_list:
322
+ print(f" Sample shape: {embeddings_list[0].shape}")
323
+ config_dict = {
324
+ "dataset_name": cfg.dataset_name,
325
+ "vla_path": cfg.vla_path,
326
+ "num_episodes": episodes_done,
327
+ }
328
+ save_embeddings_to_binary(embeddings_list, cfg.output_path, config_dict)
329
+
330
+ # Save action token ID targets (for NLL Fisher / imatrix)
331
+ targets_array = np.stack(labels_list, axis=0)
332
+ targets_path = cfg.output_path.with_name(cfg.output_path.stem + "_targets.npy")
333
+ np.save(targets_path, targets_array)
334
+ print(f"Saved token-ID targets: shape={targets_array.shape} to {targets_path}")
335
+
336
+ # Save continuous OFT action targets (for AMF — Action-Mahalanobis Fisher)
337
+ if oft_actions_list:
338
+ oft_targets_array = np.stack(oft_actions_list, axis=0) # (N, 8, 7)
339
+ oft_targets_path = cfg.output_path.with_name(cfg.output_path.stem + "_oft_targets.npy")
340
+ np.save(oft_targets_path, oft_targets_array)
341
+ print(f"Saved OFT action targets: shape={oft_targets_array.shape} to {oft_targets_path}")
342
+
343
+ print("\nDone!")
344
+
345
+
346
+ if __name__ == "__main__":
347
+ collect_calibration_data()