davanstrien HF Staff commited on
Commit
7d98f3f
·
verified ·
1 Parent(s): 1e52e71

Upload qwen3vl-detect-fewshot.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. qwen3vl-detect-fewshot.py +657 -0
qwen3vl-detect-fewshot.py ADDED
@@ -0,0 +1,657 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # /// script
2
+ # requires-python = ">=3.11"
3
+ # dependencies = [
4
+ # "datasets>=4.0.0",
5
+ # "huggingface-hub",
6
+ # "pillow",
7
+ # "toolz",
8
+ # "torch",
9
+ # "tqdm",
10
+ # "transformers",
11
+ # "vllm>=0.15.1",
12
+ # ]
13
+ # ///
14
+
15
+ """
16
+ [FEW-SHOT VARIANT] Instruction-oriented object detection with Qwen3-VL via vLLM.
17
+
18
+ This is the v1.1+few-shot variant: forks `qwen3vl-detect.py` to add support for
19
+ a single in-context example. Pass `--example-image PATH` and
20
+ `--example-answer-file PATH` and each target image is prefixed with the example
21
+ + its labelled output before being asked to detect. Same script otherwise.
22
+
23
+
24
+ Takes an HF dataset with an image column, runs each image through Qwen3-VL with
25
+ a free-form detection prompt, parses bounding-box JSON from the response, and
26
+ pushes a labelled dataset back to the Hub. Designed as a VLM-as-labeller
27
+ primitive for bootstrapping object-detection datasets.
28
+
29
+ The prompt is free-form: "detect all components and identify their reference
30
+ designators with bbox_2d, label, sub_label" or "detect every car and its
31
+ colour". Qwen3-VL emits bbox JSON on a 0-1000 normalised scale; we extract,
32
+ denormalise to original-image pixel coords, and store as ints ready for
33
+ downstream labelling tools (Label Studio, FiftyOne, COCO conversion, etc.).
34
+
35
+ Sibling: `uv-scripts/sam3/detect-objects.py` does class-prompted detection.
36
+ This script is the instruction-prompted counterpart.
37
+
38
+ Output columns added:
39
+ - detections: list[{bbox: [x1,y1,x2,y2] in ORIGINAL-IMAGE PIXELS, label, sub_label}]
40
+ - raw_response: full model text (for QA, re-parsing, audit)
41
+ - inference_info: JSON with model, prompt, image_size, min/max pixels, timestamp
42
+ """
43
+
44
+ import argparse
45
+ import io
46
+ import json
47
+ import logging
48
+ import os
49
+ import re
50
+ import sys
51
+ from datetime import datetime
52
+ from typing import Any, Optional, Union
53
+
54
+ import torch
55
+ from datasets import load_dataset
56
+ from huggingface_hub import login
57
+ from PIL import Image
58
+ from toolz import partition_all
59
+ from tqdm.auto import tqdm
60
+ from transformers import AutoProcessor
61
+ from vllm import LLM, SamplingParams
62
+
63
+ logging.basicConfig(
64
+ level=logging.INFO, format="%(asctime)s [%(levelname)s] %(message)s"
65
+ )
66
+ logger = logging.getLogger(__name__)
67
+
68
+
69
+ DEFAULT_MODEL = "Qwen/Qwen3.6-35B-A3B"
70
+
71
+ # Qwen-VL bbox prompt template — used when no --prompt is supplied. The
72
+ # "bbox_2d / label / sub_label" schema matches the original demo (app.py) so
73
+ # the same prompts work here.
74
+ DEFAULT_PROMPT = (
75
+ "Detect every distinct object in the image. For each object, output a JSON "
76
+ "object with keys: bbox_2d (an array of four numbers [x1, y1, x2, y2]), "
77
+ "label (the object category), and sub_label (a short descriptive attribute "
78
+ 'or "" if none applies). Return a JSON array of these objects. Example: '
79
+ '[{"bbox_2d": [x1, y1, x2, y2], "label": "car", "sub_label": "red"}].'
80
+ )
81
+
82
+
83
+ def eval_pixel_string(s: str) -> int:
84
+ """Evaluate a pixel expression like '1024*32*32' to an integer.
85
+
86
+ Lifted from app.py:284-289. Lets users write `--min-image-tokens 1024`
87
+ and have it become 1024*32*32 pixels (Qwen-VL uses 32x32 patches).
88
+ """
89
+ try:
90
+ return int(eval(s.strip().replace(" ", ""), {"__builtins__": {}}))
91
+ except Exception as e:
92
+ raise ValueError(f"Invalid pixel expression: {s}") from e
93
+
94
+
95
+ def to_pil(image: Union[Image.Image, dict, str]) -> Image.Image:
96
+ """Convert HF dataset image entries (PIL / dict-with-bytes / path) to PIL."""
97
+ if isinstance(image, Image.Image):
98
+ return image
99
+ if isinstance(image, dict) and "bytes" in image:
100
+ return Image.open(io.BytesIO(image["bytes"]))
101
+ if isinstance(image, str):
102
+ return Image.open(image)
103
+ raise ValueError(f"Unsupported image type: {type(image)}")
104
+
105
+
106
+ def parse_bboxes_from_response(text: str) -> list[dict[str, Any]]:
107
+ """Extract bbox objects from a model response.
108
+
109
+ Lifted from app.py:147-194. Tolerates fenced code blocks, comments,
110
+ trailing commas, and malformed JSON. Falls back to regex bbox extraction
111
+ if no parseable objects are found.
112
+ """
113
+ results: list[dict[str, Any]] = []
114
+
115
+ pattern = r'\{[^{}]*"bbox_2d"\s*:\s*\[[\d\s.,\-]+\][^{}]*\}'
116
+ for match in re.findall(pattern, text, re.DOTALL):
117
+ try:
118
+ obj = json.loads(match)
119
+ if "bbox_2d" in obj:
120
+ results.append(obj)
121
+ continue
122
+ except json.JSONDecodeError:
123
+ pass
124
+ try:
125
+ cleaned = re.sub(r"#.*$", "", match, flags=re.MULTILINE).strip()
126
+ cleaned = re.sub(r",\s*}", "}", cleaned)
127
+ cleaned = re.sub(r",\s*\]", "]", cleaned)
128
+ obj = json.loads(cleaned)
129
+ if "bbox_2d" in obj:
130
+ results.append(obj)
131
+ except json.JSONDecodeError:
132
+ continue
133
+
134
+ if not results:
135
+ bbox_pattern = (
136
+ r'"bbox_2d"\s*:\s*\[\s*([\d.\-]+)\s*,\s*([\d.\-]+)\s*,\s*'
137
+ r"([\d.\-]+)\s*,\s*([\d.\-]+)\s*\]"
138
+ )
139
+ for i, (x1, y1, x2, y2) in enumerate(re.findall(bbox_pattern, text)):
140
+ results.append(
141
+ {
142
+ "bbox_2d": [float(x1), float(y1), float(x2), float(y2)],
143
+ "label": f"object_{i + 1}",
144
+ "sub_label": "",
145
+ }
146
+ )
147
+
148
+ return results
149
+
150
+
151
+ def denormalize_bbox(bbox: list[float], width: int, height: int) -> list[int]:
152
+ """Convert a Qwen-VL 0-1000 normalised bbox to original-image pixel coords.
153
+
154
+ Empirically confirmed (smoke runs on cppe-5 and Beyond Words, May 2026): the
155
+ instruction-tuned Qwen3-VL family emits `bbox_2d` values in the 0-1000
156
+ normalised space regardless of input image size or smart-resize behaviour.
157
+ Multiply by W/1000 and H/1000 to recover pixel coords on the original image,
158
+ then round to ints and clip to image bounds for safety.
159
+ """
160
+ if len(bbox) != 4:
161
+ return []
162
+ sx, sy = width / 1000.0, height / 1000.0
163
+ x1, y1, x2, y2 = bbox
164
+ return [
165
+ max(0, min(width, round(x1 * sx))),
166
+ max(0, min(height, round(y1 * sy))),
167
+ max(0, min(width, round(x2 * sx))),
168
+ max(0, min(height, round(y2 * sy))),
169
+ ]
170
+
171
+
172
+ def normalise_detection(obj: dict[str, Any], width: int, height: int) -> dict[str, Any]:
173
+ """Coerce a parsed bbox dict to the canonical detection schema.
174
+
175
+ Output: {bbox: list[int] length 4 in ORIGINAL-IMAGE PIXELS, label, sub_label}
176
+ """
177
+ raw = obj.get("bbox_2d", [])
178
+ raw = [float(x) for x in raw] if isinstance(raw, list) and len(raw) == 4 else []
179
+ return {
180
+ "bbox": denormalize_bbox(raw, width, height),
181
+ "label": str(obj.get("label", "")),
182
+ "sub_label": str(obj.get("sub_label") or ""),
183
+ }
184
+
185
+
186
+ def main(
187
+ input_dataset: str,
188
+ output_dataset: str,
189
+ prompt: str,
190
+ image_column: str = "image",
191
+ model: str = DEFAULT_MODEL,
192
+ batch_size: int = 8,
193
+ max_samples: Optional[int] = None,
194
+ split: str = "train",
195
+ # Defaults below assume Qwen3.6-35B-A3B on a100-large (80 GB) via the
196
+ # vllm/vllm-openai image. For smaller GPUs use Qwen/Qwen3.5-9B and lower
197
+ # budgets, e.g.:
198
+ # --max-model-len 12288 --max-image-tokens 4096 --gpu-memory-utilization 0.92
199
+ max_model_len: int = 32768,
200
+ max_tokens: int = 8192,
201
+ min_image_tokens: int = 1024,
202
+ max_image_tokens: int = 9800,
203
+ gpu_memory_utilization: float = 0.90,
204
+ tensor_parallel_size: Optional[int] = None,
205
+ temperature: float = 0.0,
206
+ repetition_penalty: float = 1.05,
207
+ grayscale: bool = False,
208
+ example_image: Optional[str] = None,
209
+ example_answer_file: Optional[str] = None,
210
+ hf_token: Optional[str] = None,
211
+ private: bool = False,
212
+ shuffle: bool = False,
213
+ seed: int = 42,
214
+ create_pr: bool = False,
215
+ ) -> None:
216
+ if not torch.cuda.is_available():
217
+ logger.error("CUDA is not available. This script requires a GPU.")
218
+ logger.error("For cloud execution: hf jobs uv run --flavor l4x1 ...")
219
+ sys.exit(1)
220
+ logger.info("CUDA OK — GPU: %s", torch.cuda.get_device_name(0))
221
+
222
+ HF_TOKEN = hf_token or os.environ.get("HF_TOKEN")
223
+ if HF_TOKEN:
224
+ login(token=HF_TOKEN)
225
+
226
+ start_time = datetime.now()
227
+
228
+ min_pixels = eval_pixel_string(str(min_image_tokens)) * 32 * 32
229
+ max_pixels = eval_pixel_string(str(max_image_tokens)) * 32 * 32
230
+ logger.info(
231
+ "Pixel budget: min=%d (%d tokens), max=%d (%d tokens)",
232
+ min_pixels,
233
+ min_image_tokens,
234
+ max_pixels,
235
+ max_image_tokens,
236
+ )
237
+
238
+ logger.info("Loading dataset: %s (split=%s)", input_dataset, split)
239
+ dataset = load_dataset(input_dataset, split=split)
240
+
241
+ if image_column not in dataset.column_names:
242
+ raise ValueError(
243
+ f"Column '{image_column}' not found. Available: {dataset.column_names}"
244
+ )
245
+
246
+ if shuffle:
247
+ logger.info("Shuffling dataset (seed=%d)", seed)
248
+ dataset = dataset.shuffle(seed=seed)
249
+
250
+ if max_samples:
251
+ dataset = dataset.select(range(min(max_samples, len(dataset))))
252
+ logger.info("Limited to %d samples", len(dataset))
253
+
254
+ if tensor_parallel_size is None:
255
+ tensor_parallel_size = torch.cuda.device_count()
256
+ logger.info(
257
+ "Auto-detected %d GPU(s) for tensor parallelism", tensor_parallel_size
258
+ )
259
+
260
+ # Few-shot setup: load example image + its expected answer once.
261
+ fewshot_pil: Optional[Image.Image] = None
262
+ fewshot_answer: Optional[str] = None
263
+ if example_image and example_answer_file:
264
+ logger.info("Few-shot mode: example=%s answer=%s", example_image, example_answer_file)
265
+ fewshot_pil = Image.open(example_image).convert("RGB")
266
+ if grayscale:
267
+ fewshot_pil = fewshot_pil.convert("L").convert("RGB")
268
+ with open(example_answer_file, "r", encoding="utf-8") as f:
269
+ fewshot_answer = f.read().strip()
270
+ logger.info(
271
+ "Example image: %dx%d answer length: %d chars",
272
+ fewshot_pil.width,
273
+ fewshot_pil.height,
274
+ len(fewshot_answer),
275
+ )
276
+ elif example_image or example_answer_file:
277
+ raise ValueError("--example-image and --example-answer-file must be provided together")
278
+
279
+ logger.info("Loading vLLM model: %s", model)
280
+ # Cap max_num_seqs to our batch_size: hybrid-Mamba architectures (Qwen3.6's
281
+ # qwen3_5_moe uses Gated DeltaNet) allocate per-sequence SSM cache blocks
282
+ # separately from KV cache; vLLM's default 256 can exceed available blocks
283
+ # and crash CUDA graph capture. We never decode > batch_size concurrently.
284
+ llm_kwargs: dict[str, Any] = {
285
+ "model": model,
286
+ "trust_remote_code": True,
287
+ "gpu_memory_utilization": gpu_memory_utilization,
288
+ "tensor_parallel_size": tensor_parallel_size,
289
+ "limit_mm_per_prompt": {"image": 2 if fewshot_pil else 1},
290
+ "mm_processor_kwargs": {
291
+ "min_pixels": min_pixels,
292
+ "max_pixels": max_pixels,
293
+ },
294
+ "max_num_seqs": max(batch_size, 8),
295
+ }
296
+ if max_model_len:
297
+ llm_kwargs["max_model_len"] = max_model_len
298
+
299
+ llm = LLM(**llm_kwargs)
300
+ processor = AutoProcessor.from_pretrained(model, trust_remote_code=True)
301
+
302
+ # repetition_penalty > 1.0 prevents the catastrophic loop where the model
303
+ # emits the same detection JSON object hundreds of times until it hits the
304
+ # token cap (observed on Qwen3-VL-30B-A3B-Instruct on cppe-5 row 3/5).
305
+ sampling_params = SamplingParams(
306
+ temperature=temperature,
307
+ max_tokens=max_tokens,
308
+ repetition_penalty=repetition_penalty,
309
+ )
310
+ logger.info(
311
+ "Sampling: temperature=%.2f repetition_penalty=%.2f max_tokens=%d",
312
+ temperature,
313
+ repetition_penalty,
314
+ max_tokens,
315
+ )
316
+
317
+ logger.info("Processing %d images in batches of %d", len(dataset), batch_size)
318
+ logger.info("Prompt: %s", prompt[:200] + ("..." if len(prompt) > 200 else ""))
319
+
320
+ all_detections: list[list[dict[str, Any]]] = []
321
+ all_raw: list[str] = []
322
+ all_sizes: list[list[int]] = []
323
+
324
+ for batch_indices in tqdm(
325
+ partition_all(batch_size, range(len(dataset))),
326
+ total=(len(dataset) + batch_size - 1) // batch_size,
327
+ desc="Qwen3-VL detection",
328
+ ):
329
+ batch_indices = list(batch_indices)
330
+ batch_inputs: list[dict[str, Any]] = []
331
+ batch_sizes: list[list[int]] = []
332
+
333
+ for idx in batch_indices:
334
+ pil_img = to_pil(dataset[idx][image_column]).convert("RGB")
335
+ if grayscale:
336
+ # Convert to luminance and replicate to 3 channels; useful for
337
+ # discoloured/sepia historical scans where the colour channel
338
+ # is misleading noise rather than signal.
339
+ pil_img = pil_img.convert("L").convert("RGB")
340
+ batch_sizes.append([pil_img.width, pil_img.height])
341
+
342
+ if fewshot_pil is not None:
343
+ # Single user turn with example inline: clearer than a fake
344
+ # assistant turn (which could be mistaken for prior model
345
+ # output to continue from).
346
+ messages = [
347
+ {
348
+ "role": "user",
349
+ "content": [
350
+ {"type": "text", "text": prompt},
351
+ {"type": "text", "text": "Here is an example image and the JSON output I want:"},
352
+ {"type": "image"},
353
+ {"type": "text", "text": f"Expected output for the example image:\n{fewshot_answer}"},
354
+ {"type": "text", "text": "Now produce the same JSON detections for this image:"},
355
+ {"type": "image"},
356
+ ],
357
+ },
358
+ ]
359
+ mm_data = {"image": [fewshot_pil, pil_img]}
360
+ else:
361
+ messages = [
362
+ {
363
+ "role": "user",
364
+ "content": [
365
+ {"type": "image"},
366
+ {"type": "text", "text": prompt},
367
+ ],
368
+ }
369
+ ]
370
+ mm_data = {"image": pil_img}
371
+
372
+ templated_prompt = processor.apply_chat_template(
373
+ messages, add_generation_prompt=True, tokenize=False
374
+ )
375
+ batch_inputs.append(
376
+ {
377
+ "prompt": templated_prompt,
378
+ "multi_modal_data": mm_data,
379
+ }
380
+ )
381
+
382
+ try:
383
+ outputs = llm.generate(batch_inputs, sampling_params, use_tqdm=False)
384
+ except Exception as e:
385
+ logger.error("Batch failed: %s", e)
386
+ for size in batch_sizes:
387
+ all_detections.append([])
388
+ all_raw.append(f"[ERROR] {e}")
389
+ all_sizes.append(size)
390
+ continue
391
+
392
+ for output, size in zip(outputs, batch_sizes):
393
+ text = output.outputs[0].text if output.outputs else ""
394
+ parsed = parse_bboxes_from_response(text)
395
+ W, H = size
396
+ all_detections.append([normalise_detection(o, W, H) for o in parsed])
397
+ all_raw.append(text)
398
+ all_sizes.append(size)
399
+
400
+ processing_duration = datetime.now() - start_time
401
+ logger.info(
402
+ "Processing complete in %.1f min", processing_duration.total_seconds() / 60
403
+ )
404
+
405
+ # Per-row inference_info so the image_size travels with the row (bbox
406
+ # coordinate-frame work in v2 needs this).
407
+ inference_info_rows: list[str] = []
408
+ for size in all_sizes:
409
+ inference_info_rows.append(
410
+ json.dumps(
411
+ {
412
+ "model_id": model,
413
+ "prompt": prompt,
414
+ "image_size": size,
415
+ "min_pixels": min_pixels,
416
+ "max_pixels": max_pixels,
417
+ "script": "qwen3vl-detect.py",
418
+ "timestamp": datetime.now().isoformat(timespec="seconds"),
419
+ }
420
+ )
421
+ )
422
+
423
+ logger.info("Adding columns to dataset")
424
+ dataset = dataset.add_column("detections", all_detections)
425
+ dataset = dataset.add_column("raw_response", all_raw)
426
+ dataset = dataset.add_column("inference_info", inference_info_rows)
427
+
428
+ logger.info("Pushing to %s", output_dataset)
429
+ dataset.push_to_hub(
430
+ output_dataset,
431
+ private=private,
432
+ token=HF_TOKEN,
433
+ create_pr=create_pr,
434
+ commit_message=(f"Qwen3-VL detection: {model} on {len(dataset)} samples"),
435
+ )
436
+
437
+ n_with_dets = sum(1 for d in all_detections if d)
438
+ total_dets = sum(len(d) for d in all_detections)
439
+ logger.info("Done.")
440
+ logger.info(" Rows with >=1 detection: %d / %d", n_with_dets, len(all_detections))
441
+ logger.info(" Total detections: %d", total_dets)
442
+ logger.info(" Output: https://huggingface.co/datasets/%s", output_dataset)
443
+
444
+
445
+ def build_parser() -> argparse.ArgumentParser:
446
+ parser = argparse.ArgumentParser(
447
+ description="Instruction-oriented object detection with Qwen3-VL (vLLM batch).",
448
+ formatter_class=argparse.RawDescriptionHelpFormatter,
449
+ epilog="""
450
+ Examples:
451
+ # Generic detection
452
+ uv run qwen3vl-detect.py \\
453
+ input-dataset username/output-dataset \\
454
+ --prompt "Detect all visible objects, return JSON with bbox_2d, label, sub_label"
455
+
456
+ # Component detection (matches the demo's example 1)
457
+ uv run qwen3vl-detect.py \\
458
+ pcb-images username/labelled-pcbs \\
459
+ --prompt-file prompts/components.txt \\
460
+ --max-samples 20
461
+
462
+ # HF Jobs (recommended config)
463
+ hf jobs uv run --image vllm/vllm-openai:latest --flavor a100-large \\
464
+ --python /usr/bin/python3 \\
465
+ -e PYTHONPATH=/usr/local/lib/python3.12/dist-packages \\
466
+ -s HF_TOKEN \\
467
+ ./qwen3vl-detect-fewshot.py \\
468
+ input-dataset username/output-dataset \\
469
+ --prompt "detect all photographs" --max-samples 5
470
+
471
+ Notes:
472
+ Default --model is Qwen/Qwen3.6-35B-A3B (needs A100/80GB via
473
+ vllm/vllm-openai:latest image). For smaller GPUs use Qwen/Qwen3.5-9B
474
+ and lower --max-model-len / --max-image-tokens.
475
+ """,
476
+ )
477
+ parser.add_argument("input_dataset", help="Input dataset ID on the Hub")
478
+ parser.add_argument("output_dataset", help="Output dataset ID on the Hub")
479
+
480
+ prompt_group = parser.add_mutually_exclusive_group()
481
+ prompt_group.add_argument(
482
+ "--prompt",
483
+ default=None,
484
+ help="Detection instruction (free-form). Defaults to a generic JSON-bbox prompt.",
485
+ )
486
+ prompt_group.add_argument(
487
+ "--prompt-file",
488
+ default=None,
489
+ help="Path to a text file containing the detection prompt.",
490
+ )
491
+
492
+ parser.add_argument(
493
+ "--image-column", default="image", help="Image column name (default: image)"
494
+ )
495
+ parser.add_argument(
496
+ "--model", default=DEFAULT_MODEL, help=f"VLM (default: {DEFAULT_MODEL})"
497
+ )
498
+ parser.add_argument(
499
+ "--batch-size", type=int, default=8, help="Batch size (default: 8)"
500
+ )
501
+ parser.add_argument(
502
+ "--max-samples", type=int, default=None, help="Cap rows processed (for testing)"
503
+ )
504
+ parser.add_argument(
505
+ "--split", default="train", help="Dataset split (default: train)"
506
+ )
507
+ parser.add_argument(
508
+ "--max-model-len",
509
+ type=int,
510
+ default=32768,
511
+ help="vLLM max_model_len (default: 32768, tuned for A100/80GB). "
512
+ "Lower to ~12288 on L4-class GPUs.",
513
+ )
514
+ parser.add_argument(
515
+ "--max-tokens",
516
+ type=int,
517
+ default=8192,
518
+ help="Max generation tokens (default: 8192)",
519
+ )
520
+ parser.add_argument(
521
+ "--min-image-tokens",
522
+ type=int,
523
+ default=1024,
524
+ help="Min image tokens (32x32 patches); default 1024",
525
+ )
526
+ parser.add_argument(
527
+ "--max-image-tokens",
528
+ type=int,
529
+ default=9800,
530
+ help="Max image tokens (32x32 patches); default 9800 (A100/80GB). "
531
+ "Lower to ~4096 on L4-class GPUs.",
532
+ )
533
+ parser.add_argument(
534
+ "--gpu-memory-utilization",
535
+ type=float,
536
+ default=0.90,
537
+ help="GPU memory utilisation (default: 0.90)",
538
+ )
539
+ parser.add_argument(
540
+ "--tensor-parallel-size",
541
+ type=int,
542
+ default=None,
543
+ help="Tensor-parallel GPUs (default: auto)",
544
+ )
545
+ parser.add_argument(
546
+ "--temperature",
547
+ type=float,
548
+ default=0.0,
549
+ help="Sampling temperature (default: 0.0)",
550
+ )
551
+ parser.add_argument(
552
+ "--repetition-penalty",
553
+ type=float,
554
+ default=1.05,
555
+ help="vLLM repetition_penalty (default: 1.05). Prevents the duplicate-detection loop "
556
+ "observed on Qwen3-VL-30B-A3B. Set to 1.0 to disable.",
557
+ )
558
+ parser.add_argument(
559
+ "--grayscale",
560
+ action="store_true",
561
+ help="Convert each image to greyscale (L channel replicated to RGB) before "
562
+ "inference. Useful for sepia/discoloured historical scans where the colour "
563
+ "channel is noise rather than signal.",
564
+ )
565
+ parser.add_argument(
566
+ "--example-image",
567
+ default=None,
568
+ help="Path to a single in-context example image (PNG/JPG). When set together with "
569
+ "--example-answer-file, each target is preceded by this example + its labelled "
570
+ "output to demonstrate the task before the real ask.",
571
+ )
572
+ parser.add_argument(
573
+ "--example-answer-file",
574
+ default=None,
575
+ help="Path to a text file containing the expected JSON detection output for "
576
+ "--example-image (used inline in the prompt to demonstrate the task).",
577
+ )
578
+ parser.add_argument(
579
+ "--hf-token", default=None, help="HF token (or set HF_TOKEN env)"
580
+ )
581
+ parser.add_argument(
582
+ "--private", action="store_true", help="Push output dataset as private"
583
+ )
584
+ parser.add_argument(
585
+ "--shuffle", action="store_true", help="Shuffle before processing"
586
+ )
587
+ parser.add_argument(
588
+ "--seed", type=int, default=42, help="Shuffle seed (default: 42)"
589
+ )
590
+ parser.add_argument(
591
+ "--create-pr",
592
+ action="store_true",
593
+ help="Push as PR instead of direct commit (useful for parallel runs)",
594
+ )
595
+ return parser
596
+
597
+
598
+ if __name__ == "__main__":
599
+ parser = build_parser()
600
+
601
+ if len(sys.argv) == 1:
602
+ parser.print_help()
603
+ print("\n" + "=" * 60)
604
+ print("Example HF Jobs command:")
605
+ print("=" * 60)
606
+ print(
607
+ """
608
+ hf jobs uv run \\
609
+ --image vllm/vllm-openai:latest \\
610
+ --flavor a100-large \\
611
+ --python /usr/bin/python3 \\
612
+ -e PYTHONPATH=/usr/local/lib/python3.12/dist-packages \\
613
+ -s HF_TOKEN \\
614
+ ./qwen3vl-detect-fewshot.py \\
615
+ INPUT_DATASET OUTPUT_DATASET \\
616
+ --prompt "detect all visible objects" \\
617
+ --max-samples 5
618
+ """
619
+ )
620
+ sys.exit(0)
621
+
622
+ args = parser.parse_args()
623
+
624
+ if args.prompt_file:
625
+ with open(args.prompt_file, "r", encoding="utf-8") as f:
626
+ prompt = f.read().strip()
627
+ elif args.prompt:
628
+ prompt = args.prompt
629
+ else:
630
+ prompt = DEFAULT_PROMPT
631
+
632
+ main(
633
+ input_dataset=args.input_dataset,
634
+ output_dataset=args.output_dataset,
635
+ prompt=prompt,
636
+ image_column=args.image_column,
637
+ model=args.model,
638
+ batch_size=args.batch_size,
639
+ max_samples=args.max_samples,
640
+ split=args.split,
641
+ max_model_len=args.max_model_len,
642
+ max_tokens=args.max_tokens,
643
+ min_image_tokens=args.min_image_tokens,
644
+ max_image_tokens=args.max_image_tokens,
645
+ gpu_memory_utilization=args.gpu_memory_utilization,
646
+ tensor_parallel_size=args.tensor_parallel_size,
647
+ temperature=args.temperature,
648
+ repetition_penalty=args.repetition_penalty,
649
+ grayscale=args.grayscale,
650
+ example_image=args.example_image,
651
+ example_answer_file=args.example_answer_file,
652
+ hf_token=args.hf_token,
653
+ private=args.private,
654
+ shuffle=args.shuffle,
655
+ seed=args.seed,
656
+ create_pr=args.create_pr,
657
+ )