davanstrien HF Staff commited on
Commit
1e52e71
·
verified ·
1 Parent(s): a44b30b

Upload qwen3vl-detect.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. qwen3vl-detect.py +591 -0
qwen3vl-detect.py ADDED
@@ -0,0 +1,591 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # /// script
2
+ # requires-python = ">=3.11"
3
+ # dependencies = [
4
+ # "datasets>=4.0.0",
5
+ # "huggingface-hub",
6
+ # "pillow",
7
+ # "toolz",
8
+ # "torch",
9
+ # "tqdm",
10
+ # "transformers",
11
+ # "vllm>=0.15.1",
12
+ # ]
13
+ # ///
14
+
15
+ """
16
+ Instruction-oriented object detection with Qwen3-VL via vLLM.
17
+
18
+ Takes an HF dataset with an image column, runs each image through Qwen3-VL with
19
+ a free-form detection prompt, parses bounding-box JSON from the response, and
20
+ pushes a labelled dataset back to the Hub. Designed as a VLM-as-labeller
21
+ primitive for bootstrapping object-detection datasets.
22
+
23
+ The prompt is free-form: "detect all components and identify their reference
24
+ designators with bbox_2d, label, sub_label" or "detect every car and its
25
+ colour". Qwen3-VL emits bbox JSON on a 0-1000 normalised scale; we extract,
26
+ denormalise to original-image pixel coords, and store as ints ready for
27
+ downstream labelling tools (Label Studio, FiftyOne, COCO conversion, etc.).
28
+
29
+ Sibling: `uv-scripts/sam3/detect-objects.py` does class-prompted detection.
30
+ This script is the instruction-prompted counterpart.
31
+
32
+ Output columns added:
33
+ - detections: list[{bbox: [x1,y1,x2,y2] in ORIGINAL-IMAGE PIXELS, label, sub_label}]
34
+ - raw_response: full model text (for QA, re-parsing, audit)
35
+ - inference_info: JSON with model, prompt, image_size, min/max pixels, timestamp
36
+ """
37
+
38
+ import argparse
39
+ import io
40
+ import json
41
+ import logging
42
+ import os
43
+ import re
44
+ import sys
45
+ from datetime import datetime
46
+ from typing import Any, Optional, Union
47
+
48
+ import torch
49
+ from datasets import load_dataset
50
+ from huggingface_hub import login
51
+ from PIL import Image
52
+ from toolz import partition_all
53
+ from tqdm.auto import tqdm
54
+ from transformers import AutoProcessor
55
+ from vllm import LLM, SamplingParams
56
+
57
+ logging.basicConfig(
58
+ level=logging.INFO, format="%(asctime)s [%(levelname)s] %(message)s"
59
+ )
60
+ logger = logging.getLogger(__name__)
61
+
62
+
63
+ DEFAULT_MODEL = "Qwen/Qwen3.6-35B-A3B"
64
+
65
+ # Qwen-VL bbox prompt template — used when no --prompt is supplied. The
66
+ # "bbox_2d / label / sub_label" schema matches the original demo (app.py) so
67
+ # the same prompts work here.
68
+ DEFAULT_PROMPT = (
69
+ "Detect every distinct object in the image. For each object, output a JSON "
70
+ "object with keys: bbox_2d (an array of four numbers [x1, y1, x2, y2]), "
71
+ "label (the object category), and sub_label (a short descriptive attribute "
72
+ 'or "" if none applies). Return a JSON array of these objects. Example: '
73
+ '[{"bbox_2d": [x1, y1, x2, y2], "label": "car", "sub_label": "red"}].'
74
+ )
75
+
76
+
77
+ def eval_pixel_string(s: str) -> int:
78
+ """Evaluate a pixel expression like '1024*32*32' to an integer.
79
+
80
+ Lifted from app.py:284-289. Lets users write `--min-image-tokens 1024`
81
+ and have it become 1024*32*32 pixels (Qwen-VL uses 32x32 patches).
82
+ """
83
+ try:
84
+ return int(eval(s.strip().replace(" ", ""), {"__builtins__": {}}))
85
+ except Exception as e:
86
+ raise ValueError(f"Invalid pixel expression: {s}") from e
87
+
88
+
89
+ def to_pil(image: Union[Image.Image, dict, str]) -> Image.Image:
90
+ """Convert HF dataset image entries (PIL / dict-with-bytes / path) to PIL."""
91
+ if isinstance(image, Image.Image):
92
+ return image
93
+ if isinstance(image, dict) and "bytes" in image:
94
+ return Image.open(io.BytesIO(image["bytes"]))
95
+ if isinstance(image, str):
96
+ return Image.open(image)
97
+ raise ValueError(f"Unsupported image type: {type(image)}")
98
+
99
+
100
+ def parse_bboxes_from_response(text: str) -> list[dict[str, Any]]:
101
+ """Extract bbox objects from a model response.
102
+
103
+ Lifted from app.py:147-194. Tolerates fenced code blocks, comments,
104
+ trailing commas, and malformed JSON. Falls back to regex bbox extraction
105
+ if no parseable objects are found.
106
+ """
107
+ results: list[dict[str, Any]] = []
108
+
109
+ pattern = r'\{[^{}]*"bbox_2d"\s*:\s*\[[\d\s.,\-]+\][^{}]*\}'
110
+ for match in re.findall(pattern, text, re.DOTALL):
111
+ try:
112
+ obj = json.loads(match)
113
+ if "bbox_2d" in obj:
114
+ results.append(obj)
115
+ continue
116
+ except json.JSONDecodeError:
117
+ pass
118
+ try:
119
+ cleaned = re.sub(r"#.*$", "", match, flags=re.MULTILINE).strip()
120
+ cleaned = re.sub(r",\s*}", "}", cleaned)
121
+ cleaned = re.sub(r",\s*\]", "]", cleaned)
122
+ obj = json.loads(cleaned)
123
+ if "bbox_2d" in obj:
124
+ results.append(obj)
125
+ except json.JSONDecodeError:
126
+ continue
127
+
128
+ if not results:
129
+ bbox_pattern = (
130
+ r'"bbox_2d"\s*:\s*\[\s*([\d.\-]+)\s*,\s*([\d.\-]+)\s*,\s*'
131
+ r"([\d.\-]+)\s*,\s*([\d.\-]+)\s*\]"
132
+ )
133
+ for i, (x1, y1, x2, y2) in enumerate(re.findall(bbox_pattern, text)):
134
+ results.append(
135
+ {
136
+ "bbox_2d": [float(x1), float(y1), float(x2), float(y2)],
137
+ "label": f"object_{i + 1}",
138
+ "sub_label": "",
139
+ }
140
+ )
141
+
142
+ return results
143
+
144
+
145
+ def denormalize_bbox(bbox: list[float], width: int, height: int) -> list[int]:
146
+ """Convert a Qwen-VL 0-1000 normalised bbox to original-image pixel coords.
147
+
148
+ Empirically confirmed (smoke runs on cppe-5 and Beyond Words, May 2026): the
149
+ instruction-tuned Qwen3-VL family emits `bbox_2d` values in the 0-1000
150
+ normalised space regardless of input image size or smart-resize behaviour.
151
+ Multiply by W/1000 and H/1000 to recover pixel coords on the original image,
152
+ then round to ints and clip to image bounds for safety.
153
+ """
154
+ if len(bbox) != 4:
155
+ return []
156
+ sx, sy = width / 1000.0, height / 1000.0
157
+ x1, y1, x2, y2 = bbox
158
+ return [
159
+ max(0, min(width, round(x1 * sx))),
160
+ max(0, min(height, round(y1 * sy))),
161
+ max(0, min(width, round(x2 * sx))),
162
+ max(0, min(height, round(y2 * sy))),
163
+ ]
164
+
165
+
166
+ def normalise_detection(obj: dict[str, Any], width: int, height: int) -> dict[str, Any]:
167
+ """Coerce a parsed bbox dict to the canonical detection schema.
168
+
169
+ Output: {bbox: list[int] length 4 in ORIGINAL-IMAGE PIXELS, label, sub_label}
170
+ """
171
+ raw = obj.get("bbox_2d", [])
172
+ raw = [float(x) for x in raw] if isinstance(raw, list) and len(raw) == 4 else []
173
+ return {
174
+ "bbox": denormalize_bbox(raw, width, height),
175
+ "label": str(obj.get("label", "")),
176
+ "sub_label": str(obj.get("sub_label") or ""),
177
+ }
178
+
179
+
180
+ def main(
181
+ input_dataset: str,
182
+ output_dataset: str,
183
+ prompt: str,
184
+ image_column: str = "image",
185
+ model: str = DEFAULT_MODEL,
186
+ batch_size: int = 8,
187
+ max_samples: Optional[int] = None,
188
+ split: str = "train",
189
+ # Defaults below assume Qwen3.6-35B-A3B on a100-large (80 GB) via the
190
+ # vllm/vllm-openai image. For smaller GPUs use Qwen/Qwen3.5-9B and lower
191
+ # budgets, e.g.:
192
+ # --max-model-len 12288 --max-image-tokens 4096 --gpu-memory-utilization 0.92
193
+ max_model_len: int = 32768,
194
+ max_tokens: int = 8192,
195
+ min_image_tokens: int = 1024,
196
+ max_image_tokens: int = 9800,
197
+ gpu_memory_utilization: float = 0.90,
198
+ tensor_parallel_size: Optional[int] = None,
199
+ temperature: float = 0.0,
200
+ repetition_penalty: float = 1.05,
201
+ grayscale: bool = False,
202
+ hf_token: Optional[str] = None,
203
+ private: bool = False,
204
+ shuffle: bool = False,
205
+ seed: int = 42,
206
+ create_pr: bool = False,
207
+ ) -> None:
208
+ if not torch.cuda.is_available():
209
+ logger.error("CUDA is not available. This script requires a GPU.")
210
+ logger.error("For cloud execution: hf jobs uv run --flavor l4x1 ...")
211
+ sys.exit(1)
212
+ logger.info("CUDA OK — GPU: %s", torch.cuda.get_device_name(0))
213
+
214
+ HF_TOKEN = hf_token or os.environ.get("HF_TOKEN")
215
+ if HF_TOKEN:
216
+ login(token=HF_TOKEN)
217
+
218
+ start_time = datetime.now()
219
+
220
+ min_pixels = eval_pixel_string(str(min_image_tokens)) * 32 * 32
221
+ max_pixels = eval_pixel_string(str(max_image_tokens)) * 32 * 32
222
+ logger.info(
223
+ "Pixel budget: min=%d (%d tokens), max=%d (%d tokens)",
224
+ min_pixels,
225
+ min_image_tokens,
226
+ max_pixels,
227
+ max_image_tokens,
228
+ )
229
+
230
+ logger.info("Loading dataset: %s (split=%s)", input_dataset, split)
231
+ dataset = load_dataset(input_dataset, split=split)
232
+
233
+ if image_column not in dataset.column_names:
234
+ raise ValueError(
235
+ f"Column '{image_column}' not found. Available: {dataset.column_names}"
236
+ )
237
+
238
+ if shuffle:
239
+ logger.info("Shuffling dataset (seed=%d)", seed)
240
+ dataset = dataset.shuffle(seed=seed)
241
+
242
+ if max_samples:
243
+ dataset = dataset.select(range(min(max_samples, len(dataset))))
244
+ logger.info("Limited to %d samples", len(dataset))
245
+
246
+ if tensor_parallel_size is None:
247
+ tensor_parallel_size = torch.cuda.device_count()
248
+ logger.info(
249
+ "Auto-detected %d GPU(s) for tensor parallelism", tensor_parallel_size
250
+ )
251
+
252
+ logger.info("Loading vLLM model: %s", model)
253
+ # Cap max_num_seqs to our batch_size: hybrid-Mamba architectures (Qwen3.6's
254
+ # qwen3_5_moe uses Gated DeltaNet) allocate per-sequence SSM cache blocks
255
+ # separately from KV cache; vLLM's default 256 can exceed available blocks
256
+ # and crash CUDA graph capture. We never decode > batch_size concurrently.
257
+ llm_kwargs: dict[str, Any] = {
258
+ "model": model,
259
+ "trust_remote_code": True,
260
+ "gpu_memory_utilization": gpu_memory_utilization,
261
+ "tensor_parallel_size": tensor_parallel_size,
262
+ "limit_mm_per_prompt": {"image": 1},
263
+ "mm_processor_kwargs": {
264
+ "min_pixels": min_pixels,
265
+ "max_pixels": max_pixels,
266
+ },
267
+ "max_num_seqs": max(batch_size, 8),
268
+ }
269
+ if max_model_len:
270
+ llm_kwargs["max_model_len"] = max_model_len
271
+
272
+ llm = LLM(**llm_kwargs)
273
+ processor = AutoProcessor.from_pretrained(model, trust_remote_code=True)
274
+
275
+ # repetition_penalty > 1.0 prevents the catastrophic loop where the model
276
+ # emits the same detection JSON object hundreds of times until it hits the
277
+ # token cap (observed on Qwen3-VL-30B-A3B-Instruct on cppe-5 row 3/5).
278
+ sampling_params = SamplingParams(
279
+ temperature=temperature,
280
+ max_tokens=max_tokens,
281
+ repetition_penalty=repetition_penalty,
282
+ )
283
+ logger.info(
284
+ "Sampling: temperature=%.2f repetition_penalty=%.2f max_tokens=%d",
285
+ temperature,
286
+ repetition_penalty,
287
+ max_tokens,
288
+ )
289
+
290
+ logger.info("Processing %d images in batches of %d", len(dataset), batch_size)
291
+ logger.info("Prompt: %s", prompt[:200] + ("..." if len(prompt) > 200 else ""))
292
+
293
+ all_detections: list[list[dict[str, Any]]] = []
294
+ all_raw: list[str] = []
295
+ all_sizes: list[list[int]] = []
296
+
297
+ for batch_indices in tqdm(
298
+ partition_all(batch_size, range(len(dataset))),
299
+ total=(len(dataset) + batch_size - 1) // batch_size,
300
+ desc="Qwen3-VL detection",
301
+ ):
302
+ batch_indices = list(batch_indices)
303
+ batch_inputs: list[dict[str, Any]] = []
304
+ batch_sizes: list[list[int]] = []
305
+
306
+ for idx in batch_indices:
307
+ pil_img = to_pil(dataset[idx][image_column]).convert("RGB")
308
+ if grayscale:
309
+ # Convert to luminance and replicate to 3 channels; useful for
310
+ # discoloured/sepia historical scans where the colour channel
311
+ # is misleading noise rather than signal.
312
+ pil_img = pil_img.convert("L").convert("RGB")
313
+ batch_sizes.append([pil_img.width, pil_img.height])
314
+ messages = [
315
+ {
316
+ "role": "user",
317
+ "content": [
318
+ {"type": "image"},
319
+ {"type": "text", "text": prompt},
320
+ ],
321
+ }
322
+ ]
323
+ templated_prompt = processor.apply_chat_template(
324
+ messages, add_generation_prompt=True, tokenize=False
325
+ )
326
+ batch_inputs.append(
327
+ {
328
+ "prompt": templated_prompt,
329
+ "multi_modal_data": {"image": pil_img},
330
+ }
331
+ )
332
+
333
+ try:
334
+ outputs = llm.generate(batch_inputs, sampling_params, use_tqdm=False)
335
+ except Exception as e:
336
+ logger.error("Batch failed: %s", e)
337
+ for size in batch_sizes:
338
+ all_detections.append([])
339
+ all_raw.append(f"[ERROR] {e}")
340
+ all_sizes.append(size)
341
+ continue
342
+
343
+ for output, size in zip(outputs, batch_sizes):
344
+ text = output.outputs[0].text if output.outputs else ""
345
+ parsed = parse_bboxes_from_response(text)
346
+ W, H = size
347
+ all_detections.append([normalise_detection(o, W, H) for o in parsed])
348
+ all_raw.append(text)
349
+ all_sizes.append(size)
350
+
351
+ processing_duration = datetime.now() - start_time
352
+ logger.info(
353
+ "Processing complete in %.1f min", processing_duration.total_seconds() / 60
354
+ )
355
+
356
+ # Per-row inference_info so the image_size travels with the row (bbox
357
+ # coordinate-frame work in v2 needs this).
358
+ inference_info_rows: list[str] = []
359
+ for size in all_sizes:
360
+ inference_info_rows.append(
361
+ json.dumps(
362
+ {
363
+ "model_id": model,
364
+ "prompt": prompt,
365
+ "image_size": size,
366
+ "min_pixels": min_pixels,
367
+ "max_pixels": max_pixels,
368
+ "script": "qwen3vl-detect.py",
369
+ "timestamp": datetime.now().isoformat(timespec="seconds"),
370
+ }
371
+ )
372
+ )
373
+
374
+ logger.info("Adding columns to dataset")
375
+ dataset = dataset.add_column("detections", all_detections)
376
+ dataset = dataset.add_column("raw_response", all_raw)
377
+ dataset = dataset.add_column("inference_info", inference_info_rows)
378
+
379
+ logger.info("Pushing to %s", output_dataset)
380
+ dataset.push_to_hub(
381
+ output_dataset,
382
+ private=private,
383
+ token=HF_TOKEN,
384
+ create_pr=create_pr,
385
+ commit_message=(f"Qwen3-VL detection: {model} on {len(dataset)} samples"),
386
+ )
387
+
388
+ n_with_dets = sum(1 for d in all_detections if d)
389
+ total_dets = sum(len(d) for d in all_detections)
390
+ logger.info("Done.")
391
+ logger.info(" Rows with >=1 detection: %d / %d", n_with_dets, len(all_detections))
392
+ logger.info(" Total detections: %d", total_dets)
393
+ logger.info(" Output: https://huggingface.co/datasets/%s", output_dataset)
394
+
395
+
396
+ def build_parser() -> argparse.ArgumentParser:
397
+ parser = argparse.ArgumentParser(
398
+ description="Instruction-oriented object detection with Qwen3-VL (vLLM batch).",
399
+ formatter_class=argparse.RawDescriptionHelpFormatter,
400
+ epilog="""
401
+ Examples:
402
+ # Generic detection
403
+ uv run qwen3vl-detect.py \\
404
+ input-dataset username/output-dataset \\
405
+ --prompt "Detect all visible objects, return JSON with bbox_2d, label, sub_label"
406
+
407
+ # Component detection (matches the demo's example 1)
408
+ uv run qwen3vl-detect.py \\
409
+ pcb-images username/labelled-pcbs \\
410
+ --prompt-file prompts/components.txt \\
411
+ --max-samples 20
412
+
413
+ # HF Jobs (local script upload)
414
+ hf jobs uv run --flavor l4x1 \\
415
+ -e HF_TOKEN=$(python3 -c "from huggingface_hub import get_token; print(get_token())") \\
416
+ ./qwen3vl-detect.py \\
417
+ input-dataset username/output-dataset \\
418
+ --prompt "detect all photographs" --max-samples 5
419
+
420
+ Notes:
421
+ Default --model is Qwen/Qwen3.6-35B-A3B (needs A100/80GB via
422
+ vllm/vllm-openai:latest image). For smaller GPUs use Qwen/Qwen3.5-9B
423
+ and lower --max-model-len / --max-image-tokens.
424
+ """,
425
+ )
426
+ parser.add_argument("input_dataset", help="Input dataset ID on the Hub")
427
+ parser.add_argument("output_dataset", help="Output dataset ID on the Hub")
428
+
429
+ prompt_group = parser.add_mutually_exclusive_group()
430
+ prompt_group.add_argument(
431
+ "--prompt",
432
+ default=None,
433
+ help="Detection instruction (free-form). Defaults to a generic JSON-bbox prompt.",
434
+ )
435
+ prompt_group.add_argument(
436
+ "--prompt-file",
437
+ default=None,
438
+ help="Path to a text file containing the detection prompt.",
439
+ )
440
+
441
+ parser.add_argument(
442
+ "--image-column", default="image", help="Image column name (default: image)"
443
+ )
444
+ parser.add_argument(
445
+ "--model", default=DEFAULT_MODEL, help=f"VLM (default: {DEFAULT_MODEL})"
446
+ )
447
+ parser.add_argument(
448
+ "--batch-size", type=int, default=8, help="Batch size (default: 8)"
449
+ )
450
+ parser.add_argument(
451
+ "--max-samples", type=int, default=None, help="Cap rows processed (for testing)"
452
+ )
453
+ parser.add_argument(
454
+ "--split", default="train", help="Dataset split (default: train)"
455
+ )
456
+ parser.add_argument(
457
+ "--max-model-len",
458
+ type=int,
459
+ default=32768,
460
+ help="vLLM max_model_len (default: 32768, tuned for A100/80GB). "
461
+ "Lower to ~12288 on L4-class GPUs.",
462
+ )
463
+ parser.add_argument(
464
+ "--max-tokens",
465
+ type=int,
466
+ default=8192,
467
+ help="Max generation tokens (default: 8192)",
468
+ )
469
+ parser.add_argument(
470
+ "--min-image-tokens",
471
+ type=int,
472
+ default=1024,
473
+ help="Min image tokens (32x32 patches); default 1024",
474
+ )
475
+ parser.add_argument(
476
+ "--max-image-tokens",
477
+ type=int,
478
+ default=9800,
479
+ help="Max image tokens (32x32 patches); default 9800 (A100/80GB). "
480
+ "Lower to ~4096 on L4-class GPUs.",
481
+ )
482
+ parser.add_argument(
483
+ "--gpu-memory-utilization",
484
+ type=float,
485
+ default=0.90,
486
+ help="GPU memory utilisation (default: 0.90)",
487
+ )
488
+ parser.add_argument(
489
+ "--tensor-parallel-size",
490
+ type=int,
491
+ default=None,
492
+ help="Tensor-parallel GPUs (default: auto)",
493
+ )
494
+ parser.add_argument(
495
+ "--temperature",
496
+ type=float,
497
+ default=0.0,
498
+ help="Sampling temperature (default: 0.0)",
499
+ )
500
+ parser.add_argument(
501
+ "--repetition-penalty",
502
+ type=float,
503
+ default=1.05,
504
+ help="vLLM repetition_penalty (default: 1.05). Prevents the duplicate-detection loop "
505
+ "observed on Qwen3-VL-30B-A3B. Set to 1.0 to disable.",
506
+ )
507
+ parser.add_argument(
508
+ "--grayscale",
509
+ action="store_true",
510
+ help="Convert each image to greyscale (L channel replicated to RGB) before "
511
+ "inference. Useful for sepia/discoloured historical scans where the colour "
512
+ "channel is noise rather than signal.",
513
+ )
514
+ parser.add_argument(
515
+ "--hf-token", default=None, help="HF token (or set HF_TOKEN env)"
516
+ )
517
+ parser.add_argument(
518
+ "--private", action="store_true", help="Push output dataset as private"
519
+ )
520
+ parser.add_argument(
521
+ "--shuffle", action="store_true", help="Shuffle before processing"
522
+ )
523
+ parser.add_argument(
524
+ "--seed", type=int, default=42, help="Shuffle seed (default: 42)"
525
+ )
526
+ parser.add_argument(
527
+ "--create-pr",
528
+ action="store_true",
529
+ help="Push as PR instead of direct commit (useful for parallel runs)",
530
+ )
531
+ return parser
532
+
533
+
534
+ if __name__ == "__main__":
535
+ parser = build_parser()
536
+
537
+ if len(sys.argv) == 1:
538
+ parser.print_help()
539
+ print("\n" + "=" * 60)
540
+ print("Example HF Jobs command:")
541
+ print("=" * 60)
542
+ print(
543
+ """
544
+ hf jobs uv run \\
545
+ --image vllm/vllm-openai:latest \\
546
+ --flavor a100-large \\
547
+ --python /usr/bin/python3 \\
548
+ -e PYTHONPATH=/usr/local/lib/python3.12/dist-packages \\
549
+ -s HF_TOKEN \\
550
+ ./qwen3vl-detect.py \\
551
+ INPUT_DATASET OUTPUT_DATASET \\
552
+ --prompt "detect all visible objects" \\
553
+ --max-samples 5
554
+ """
555
+ )
556
+ sys.exit(0)
557
+
558
+ args = parser.parse_args()
559
+
560
+ if args.prompt_file:
561
+ with open(args.prompt_file, "r", encoding="utf-8") as f:
562
+ prompt = f.read().strip()
563
+ elif args.prompt:
564
+ prompt = args.prompt
565
+ else:
566
+ prompt = DEFAULT_PROMPT
567
+
568
+ main(
569
+ input_dataset=args.input_dataset,
570
+ output_dataset=args.output_dataset,
571
+ prompt=prompt,
572
+ image_column=args.image_column,
573
+ model=args.model,
574
+ batch_size=args.batch_size,
575
+ max_samples=args.max_samples,
576
+ split=args.split,
577
+ max_model_len=args.max_model_len,
578
+ max_tokens=args.max_tokens,
579
+ min_image_tokens=args.min_image_tokens,
580
+ max_image_tokens=args.max_image_tokens,
581
+ gpu_memory_utilization=args.gpu_memory_utilization,
582
+ tensor_parallel_size=args.tensor_parallel_size,
583
+ temperature=args.temperature,
584
+ repetition_penalty=args.repetition_penalty,
585
+ grayscale=args.grayscale,
586
+ hf_token=args.hf_token,
587
+ private=args.private,
588
+ shuffle=args.shuffle,
589
+ seed=args.seed,
590
+ create_pr=args.create_pr,
591
+ )