davanstrien HF Staff Claude Opus 4.6 commited on
Commit
796ef91
·
1 Parent(s): 9642a35

Pin rolm-ocr.py to stable vLLM (>=0.15.1) + datasets>=4.0.0

Browse files

RolmOCR (Qwen2.5-VL fine-tune) is in stable vLLM. Note: 8B model
needs a100 or larger GPU (OOMs on L4 with default max_model_len=16384).

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>

Files changed (1) hide show
  1. rolm-ocr.py +65 -45
rolm-ocr.py CHANGED
@@ -1,10 +1,10 @@
1
  # /// script
2
  # requires-python = ">=3.11"
3
  # dependencies = [
4
- # "datasets",
5
  # "huggingface-hub",
6
  # "pillow",
7
- # "vllm",
8
  # "tqdm",
9
  # "toolz",
10
  # "torch", # Added for CUDA check
@@ -104,7 +104,7 @@ def create_dataset_card(
104
  ) -> str:
105
  """Create a dataset card documenting the OCR process."""
106
  model_name = model.split("/")[-1]
107
-
108
  return f"""---
109
  viewer: false
110
  tags:
@@ -207,12 +207,13 @@ def main(
207
  output_column: str = None,
208
  shuffle: bool = False,
209
  seed: int = 42,
 
210
  ):
211
  """Process images from HF dataset through OCR model."""
212
 
213
  # Check CUDA availability first
214
  check_cuda_availability()
215
-
216
  # Track processing start time
217
  start_time = datetime.now()
218
 
@@ -227,13 +228,10 @@ def main(
227
  # Load dataset
228
  logger.info(f"Loading dataset: {input_dataset}")
229
  dataset = load_dataset(input_dataset, split=split)
230
-
231
- # Set output column name dynamically if not provided
232
  if output_column is None:
233
- # Extract model name from path (e.g., "reducto/RolmOCR" -> "rolmocr")
234
- model_name = model.split("/")[-1].lower().replace("-", "_")
235
- output_column = f"{model_name}_text"
236
- logger.info(f"Using dynamic output column name: {output_column}")
237
 
238
  # Validate image column
239
  if image_column not in dataset.column_names:
@@ -300,52 +298,53 @@ def main(
300
  # Add text column to dataset
301
  logger.info(f"Adding {output_column} column to dataset")
302
  dataset = dataset.add_column(output_column, all_text)
303
-
304
  # Handle inference_info tracking
305
  logger.info("Updating inference_info...")
306
-
307
- # Check for existing inference_info
308
- if "inference_info" in dataset.column_names:
309
- # Parse existing info from first row (all rows have same info)
310
- try:
311
- existing_info = json.loads(dataset[0]["inference_info"])
312
- if not isinstance(existing_info, list):
313
- existing_info = [existing_info] # Convert old format to list
314
- except (json.JSONDecodeError, TypeError):
315
- existing_info = []
316
- # Remove old column to update it
317
- dataset = dataset.remove_columns(["inference_info"])
318
- else:
319
- existing_info = []
320
-
321
- # Add new inference info
322
- new_info = {
323
- "column_name": output_column,
324
  "model_id": model,
325
- "processing_date": datetime.now().isoformat(),
 
 
326
  "batch_size": batch_size,
327
  "max_tokens": max_tokens,
328
  "gpu_memory_utilization": gpu_memory_utilization,
329
  "max_model_len": max_model_len,
330
  "script": "rolm-ocr.py",
331
- "script_version": "1.0.0",
332
- "script_url": "https://huggingface.co/datasets/uv-scripts/ocr/raw/main/rolm-ocr.py"
333
  }
334
- existing_info.append(new_info)
335
-
336
- # Add updated inference_info column
337
- info_json = json.dumps(existing_info, ensure_ascii=False)
338
- dataset = dataset.add_column("inference_info", [info_json] * len(dataset))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
339
 
340
  # Push to hub
341
  logger.info(f"Pushing to {output_dataset}")
342
  dataset.push_to_hub(output_dataset, private=private, token=HF_TOKEN)
343
-
344
  # Calculate processing time
345
  end_time = datetime.now()
346
  processing_duration = end_time - start_time
347
  processing_time = f"{processing_duration.total_seconds() / 60:.1f} minutes"
348
-
349
  # Create and push dataset card
350
  logger.info("Creating dataset card...")
351
  card_content = create_dataset_card(
@@ -361,7 +360,7 @@ def main(
361
  image_column=image_column,
362
  split=split,
363
  )
364
-
365
  card = DatasetCard(card_content)
366
  card.push_to_hub(output_dataset, token=HF_TOKEN)
367
  logger.info("✅ Dataset card created and pushed!")
@@ -371,6 +370,17 @@ def main(
371
  f"Dataset available at: https://huggingface.co/datasets/{output_dataset}"
372
  )
373
 
 
 
 
 
 
 
 
 
 
 
 
374
 
375
  if __name__ == "__main__":
376
  # Show example usage if no arguments
@@ -396,10 +406,14 @@ if __name__ == "__main__":
396
  print("\n3. Process a subset for testing:")
397
  print(" uv run rolm-ocr.py large-dataset test-output --max-samples 10")
398
  print("\n4. Random sample from ordered dataset:")
399
- print(" uv run rolm-ocr.py ordered-dataset random-test --max-samples 50 --shuffle")
 
 
400
  print("\n5. Running on HF Jobs:")
401
  print(" hf jobs uv run --flavor l4x1 \\")
402
- print(" -e HF_TOKEN=$(python3 -c \"from huggingface_hub import get_token; print(get_token())\") \\")
 
 
403
  print(
404
  " https://huggingface.co/datasets/uv-scripts/ocr/raw/main/rolm-ocr.py \\"
405
  )
@@ -426,7 +440,7 @@ Examples:
426
  # Random sample of 100 images
427
  uv run rolm-ocr.py ordered-dataset random-sample --max-samples 100 --shuffle
428
 
429
- # Custom output column name (default: rolmocr_text)
430
  uv run rolm-ocr.py images texts --output-column ocr_text
431
  """,
432
  )
@@ -482,7 +496,7 @@ Examples:
482
  parser.add_argument(
483
  "--output-column",
484
  default=None,
485
- help="Name of the output column for extracted text (default: auto-generated from model name)",
486
  )
487
  parser.add_argument(
488
  "--shuffle",
@@ -495,6 +509,11 @@ Examples:
495
  default=42,
496
  help="Random seed for shuffling (default: 42)",
497
  )
 
 
 
 
 
498
 
499
  args = parser.parse_args()
500
 
@@ -514,4 +533,5 @@ Examples:
514
  output_column=args.output_column,
515
  shuffle=args.shuffle,
516
  seed=args.seed,
517
- )
 
 
1
  # /// script
2
  # requires-python = ">=3.11"
3
  # dependencies = [
4
+ # "datasets>=4.0.0",
5
  # "huggingface-hub",
6
  # "pillow",
7
+ # "vllm>=0.15.1",
8
  # "tqdm",
9
  # "toolz",
10
  # "torch", # Added for CUDA check
 
104
  ) -> str:
105
  """Create a dataset card documenting the OCR process."""
106
  model_name = model.split("/")[-1]
107
+
108
  return f"""---
109
  viewer: false
110
  tags:
 
207
  output_column: str = None,
208
  shuffle: bool = False,
209
  seed: int = 42,
210
+ verbose: bool = False,
211
  ):
212
  """Process images from HF dataset through OCR model."""
213
 
214
  # Check CUDA availability first
215
  check_cuda_availability()
216
+
217
  # Track processing start time
218
  start_time = datetime.now()
219
 
 
228
  # Load dataset
229
  logger.info(f"Loading dataset: {input_dataset}")
230
  dataset = load_dataset(input_dataset, split=split)
231
+
232
+ # Default output column is 'markdown' for consistency across scripts
233
  if output_column is None:
234
+ output_column = "markdown"
 
 
 
235
 
236
  # Validate image column
237
  if image_column not in dataset.column_names:
 
298
  # Add text column to dataset
299
  logger.info(f"Adding {output_column} column to dataset")
300
  dataset = dataset.add_column(output_column, all_text)
301
+
302
  # Handle inference_info tracking
303
  logger.info("Updating inference_info...")
304
+
305
+ inference_entry = {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
306
  "model_id": model,
307
+ "model_name": "RolmOCR",
308
+ "column_name": output_column,
309
+ "timestamp": datetime.now().isoformat(),
310
  "batch_size": batch_size,
311
  "max_tokens": max_tokens,
312
  "gpu_memory_utilization": gpu_memory_utilization,
313
  "max_model_len": max_model_len,
314
  "script": "rolm-ocr.py",
315
+ "script_url": "https://huggingface.co/datasets/uv-scripts/ocr/raw/main/rolm-ocr.py",
 
316
  }
317
+
318
+ if "inference_info" in dataset.column_names:
319
+ logger.info("Updating existing inference_info column")
320
+
321
+ def update_inference_info(example):
322
+ try:
323
+ existing_info = (
324
+ json.loads(example["inference_info"])
325
+ if example["inference_info"]
326
+ else []
327
+ )
328
+ except (json.JSONDecodeError, TypeError):
329
+ existing_info = []
330
+ existing_info.append(inference_entry)
331
+ return {"inference_info": json.dumps(existing_info)}
332
+
333
+ dataset = dataset.map(update_inference_info)
334
+ else:
335
+ logger.info("Creating new inference_info column")
336
+ inference_list = [json.dumps([inference_entry])] * len(dataset)
337
+ dataset = dataset.add_column("inference_info", inference_list)
338
 
339
  # Push to hub
340
  logger.info(f"Pushing to {output_dataset}")
341
  dataset.push_to_hub(output_dataset, private=private, token=HF_TOKEN)
342
+
343
  # Calculate processing time
344
  end_time = datetime.now()
345
  processing_duration = end_time - start_time
346
  processing_time = f"{processing_duration.total_seconds() / 60:.1f} minutes"
347
+
348
  # Create and push dataset card
349
  logger.info("Creating dataset card...")
350
  card_content = create_dataset_card(
 
360
  image_column=image_column,
361
  split=split,
362
  )
363
+
364
  card = DatasetCard(card_content)
365
  card.push_to_hub(output_dataset, token=HF_TOKEN)
366
  logger.info("✅ Dataset card created and pushed!")
 
370
  f"Dataset available at: https://huggingface.co/datasets/{output_dataset}"
371
  )
372
 
373
+ if verbose:
374
+ import importlib.metadata
375
+
376
+ logger.info("--- Resolved package versions ---")
377
+ for pkg in ["vllm", "transformers", "torch", "datasets", "pyarrow", "pillow"]:
378
+ try:
379
+ logger.info(f" {pkg}=={importlib.metadata.version(pkg)}")
380
+ except importlib.metadata.PackageNotFoundError:
381
+ logger.info(f" {pkg}: not installed")
382
+ logger.info("--- End versions ---")
383
+
384
 
385
  if __name__ == "__main__":
386
  # Show example usage if no arguments
 
406
  print("\n3. Process a subset for testing:")
407
  print(" uv run rolm-ocr.py large-dataset test-output --max-samples 10")
408
  print("\n4. Random sample from ordered dataset:")
409
+ print(
410
+ " uv run rolm-ocr.py ordered-dataset random-test --max-samples 50 --shuffle"
411
+ )
412
  print("\n5. Running on HF Jobs:")
413
  print(" hf jobs uv run --flavor l4x1 \\")
414
+ print(
415
+ ' -e HF_TOKEN=$(python3 -c "from huggingface_hub import get_token; print(get_token())") \\'
416
+ )
417
  print(
418
  " https://huggingface.co/datasets/uv-scripts/ocr/raw/main/rolm-ocr.py \\"
419
  )
 
440
  # Random sample of 100 images
441
  uv run rolm-ocr.py ordered-dataset random-sample --max-samples 100 --shuffle
442
 
443
+ # Custom output column name (default: markdown)
444
  uv run rolm-ocr.py images texts --output-column ocr_text
445
  """,
446
  )
 
496
  parser.add_argument(
497
  "--output-column",
498
  default=None,
499
+ help="Name of the output column for extracted text (default: markdown)",
500
  )
501
  parser.add_argument(
502
  "--shuffle",
 
509
  default=42,
510
  help="Random seed for shuffling (default: 42)",
511
  )
512
+ parser.add_argument(
513
+ "--verbose",
514
+ action="store_true",
515
+ help="Log resolved package versions after processing (useful for pinning deps)",
516
+ )
517
 
518
  args = parser.parse_args()
519
 
 
533
  output_column=args.output_column,
534
  shuffle=args.shuffle,
535
  seed=args.seed,
536
+ verbose=args.verbose,
537
+ )