davanstrien HF Staff Claude Opus 4.6 commited on
Commit
6cae11c
·
1 Parent(s): c29334d

Pin nanonets-ocr2.py + paddleocr-vl.py to stable vLLM (>=0.15.1)

Browse files

Both use architectures in stable vLLM. Also pins datasets>=4.0.0
and drops nightly index from paddleocr-vl.py.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>

Files changed (2) hide show
  1. nanonets-ocr2.py +54 -31
  2. paddleocr-vl.py +2 -8
nanonets-ocr2.py CHANGED
@@ -1,10 +1,10 @@
1
  # /// script
2
  # requires-python = ">=3.11"
3
  # dependencies = [
4
- # "datasets",
5
  # "huggingface-hub",
6
  # "pillow",
7
- # "vllm",
8
  # "tqdm",
9
  # "toolz",
10
  # "torch",
@@ -211,7 +211,7 @@ def main(
211
  batch_size: int = 16,
212
  model: str = "nanonets/Nanonets-OCR2-3B",
213
  max_model_len: int = 8192,
214
- max_tokens: int = 4096,
215
  gpu_memory_utilization: float = 0.8,
216
  hf_token: str = None,
217
  split: str = "train",
@@ -219,6 +219,7 @@ def main(
219
  private: bool = False,
220
  shuffle: bool = False,
221
  seed: int = 42,
 
222
  ):
223
  """Process images from HF dataset through Nanonets-OCR2-3B model."""
224
 
@@ -309,38 +310,39 @@ def main(
309
  # Handle inference_info tracking
310
  logger.info("Updating inference_info...")
311
 
312
- # Check for existing inference_info
313
- if "inference_info" in dataset.column_names:
314
- # Parse existing info from first row (all rows have same info)
315
- try:
316
- existing_info = json.loads(dataset[0]["inference_info"])
317
- if not isinstance(existing_info, list):
318
- existing_info = [existing_info] # Convert old format to list
319
- except (json.JSONDecodeError, TypeError):
320
- existing_info = []
321
- # Remove old column to update it
322
- dataset = dataset.remove_columns(["inference_info"])
323
- else:
324
- existing_info = []
325
-
326
- # Add new inference info
327
- new_info = {
328
- "column_name": "markdown",
329
  "model_id": model,
330
- "processing_date": datetime.now().isoformat(),
 
 
331
  "batch_size": batch_size,
332
  "max_tokens": max_tokens,
333
  "gpu_memory_utilization": gpu_memory_utilization,
334
  "max_model_len": max_model_len,
335
  "script": "nanonets-ocr2.py",
336
- "script_version": "1.0.0",
337
- "script_url": "https://huggingface.co/datasets/uv-scripts/ocr/raw/main/nanonets-ocr2.py"
338
  }
339
- existing_info.append(new_info)
340
 
341
- # Add updated inference_info column
342
- info_json = json.dumps(existing_info, ensure_ascii=False)
343
- dataset = dataset.add_column("inference_info", [info_json] * len(dataset))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
344
 
345
  # Push to hub
346
  logger.info(f"Pushing to {output_dataset}")
@@ -375,6 +377,17 @@ def main(
375
  f"Dataset available at: https://huggingface.co/datasets/{output_dataset}"
376
  )
377
 
 
 
 
 
 
 
 
 
 
 
 
378
 
379
  if __name__ == "__main__":
380
  # Show example usage if no arguments
@@ -407,8 +420,12 @@ if __name__ == "__main__":
407
  print(" --max-samples 50 --shuffle")
408
  print("\n5. Running on HF Jobs:")
409
  print(" hf jobs uv run --flavor l4x1 \\")
410
- print(" -e HF_TOKEN=$(python3 -c \"from huggingface_hub import get_token; print(get_token())\") \\")
411
- print(" https://huggingface.co/datasets/uv-scripts/ocr/raw/main/nanonets-ocr2.py \\")
 
 
 
 
412
  print(" your-document-dataset \\")
413
  print(" your-markdown-output")
414
  print("\n" + "=" * 80)
@@ -461,8 +478,8 @@ Examples:
461
  parser.add_argument(
462
  "--max-tokens",
463
  type=int,
464
- default=4096,
465
- help="Maximum tokens to generate (default: 4096)",
466
  )
467
  parser.add_argument(
468
  "--gpu-memory-utilization",
@@ -493,6 +510,11 @@ Examples:
493
  default=42,
494
  help="Random seed for shuffling (default: 42)",
495
  )
 
 
 
 
 
496
 
497
  args = parser.parse_args()
498
 
@@ -511,4 +533,5 @@ Examples:
511
  private=args.private,
512
  shuffle=args.shuffle,
513
  seed=args.seed,
 
514
  )
 
1
  # /// script
2
  # requires-python = ">=3.11"
3
  # dependencies = [
4
+ # "datasets>=4.0.0",
5
  # "huggingface-hub",
6
  # "pillow",
7
+ # "vllm>=0.15.1",
8
  # "tqdm",
9
  # "toolz",
10
  # "torch",
 
211
  batch_size: int = 16,
212
  model: str = "nanonets/Nanonets-OCR2-3B",
213
  max_model_len: int = 8192,
214
+ max_tokens: int = 15000,
215
  gpu_memory_utilization: float = 0.8,
216
  hf_token: str = None,
217
  split: str = "train",
 
219
  private: bool = False,
220
  shuffle: bool = False,
221
  seed: int = 42,
222
+ verbose: bool = False,
223
  ):
224
  """Process images from HF dataset through Nanonets-OCR2-3B model."""
225
 
 
310
  # Handle inference_info tracking
311
  logger.info("Updating inference_info...")
312
 
313
+ inference_entry = {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
314
  "model_id": model,
315
+ "model_name": "Nanonets-OCR2-3B",
316
+ "column_name": "markdown",
317
+ "timestamp": datetime.now().isoformat(),
318
  "batch_size": batch_size,
319
  "max_tokens": max_tokens,
320
  "gpu_memory_utilization": gpu_memory_utilization,
321
  "max_model_len": max_model_len,
322
  "script": "nanonets-ocr2.py",
323
+ "script_url": "https://huggingface.co/datasets/uv-scripts/ocr/raw/main/nanonets-ocr2.py",
 
324
  }
 
325
 
326
+ if "inference_info" in dataset.column_names:
327
+ logger.info("Updating existing inference_info column")
328
+
329
+ def update_inference_info(example):
330
+ try:
331
+ existing_info = (
332
+ json.loads(example["inference_info"])
333
+ if example["inference_info"]
334
+ else []
335
+ )
336
+ except (json.JSONDecodeError, TypeError):
337
+ existing_info = []
338
+ existing_info.append(inference_entry)
339
+ return {"inference_info": json.dumps(existing_info)}
340
+
341
+ dataset = dataset.map(update_inference_info)
342
+ else:
343
+ logger.info("Creating new inference_info column")
344
+ inference_list = [json.dumps([inference_entry])] * len(dataset)
345
+ dataset = dataset.add_column("inference_info", inference_list)
346
 
347
  # Push to hub
348
  logger.info(f"Pushing to {output_dataset}")
 
377
  f"Dataset available at: https://huggingface.co/datasets/{output_dataset}"
378
  )
379
 
380
+ if verbose:
381
+ import importlib.metadata
382
+
383
+ logger.info("--- Resolved package versions ---")
384
+ for pkg in ["vllm", "transformers", "torch", "datasets", "pyarrow", "pillow"]:
385
+ try:
386
+ logger.info(f" {pkg}=={importlib.metadata.version(pkg)}")
387
+ except importlib.metadata.PackageNotFoundError:
388
+ logger.info(f" {pkg}: not installed")
389
+ logger.info("--- End versions ---")
390
+
391
 
392
  if __name__ == "__main__":
393
  # Show example usage if no arguments
 
420
  print(" --max-samples 50 --shuffle")
421
  print("\n5. Running on HF Jobs:")
422
  print(" hf jobs uv run --flavor l4x1 \\")
423
+ print(
424
+ ' -e HF_TOKEN=$(python3 -c "from huggingface_hub import get_token; print(get_token())") \\'
425
+ )
426
+ print(
427
+ " https://huggingface.co/datasets/uv-scripts/ocr/raw/main/nanonets-ocr2.py \\"
428
+ )
429
  print(" your-document-dataset \\")
430
  print(" your-markdown-output")
431
  print("\n" + "=" * 80)
 
478
  parser.add_argument(
479
  "--max-tokens",
480
  type=int,
481
+ default=15000,
482
+ help="Maximum tokens to generate (default: 15000, per model card recommendation)",
483
  )
484
  parser.add_argument(
485
  "--gpu-memory-utilization",
 
510
  default=42,
511
  help="Random seed for shuffling (default: 42)",
512
  )
513
+ parser.add_argument(
514
+ "--verbose",
515
+ action="store_true",
516
+ help="Log resolved package versions after processing (useful for pinning deps)",
517
+ )
518
 
519
  args = parser.parse_args()
520
 
 
533
  private=args.private,
534
  shuffle=args.shuffle,
535
  seed=args.seed,
536
+ verbose=args.verbose,
537
  )
paddleocr-vl.py CHANGED
@@ -1,22 +1,16 @@
1
  # /// script
2
  # requires-python = ">=3.11"
3
  # dependencies = [
4
- # "datasets",
5
  # "huggingface-hub",
6
  # "pillow",
7
- # "vllm",
8
  # "tqdm",
9
  # "toolz",
10
  # "torch",
11
  # "pyarrow",
12
  # "transformers",
13
  # ]
14
- #
15
- # [[tool.uv.index]]
16
- # url = "https://wheels.vllm.ai/nightly/cu129"
17
- #
18
- # [tool.uv]
19
- # prerelease = "allow"
20
  # ///
21
 
22
  """
 
1
  # /// script
2
  # requires-python = ">=3.11"
3
  # dependencies = [
4
+ # "datasets>=4.0.0",
5
  # "huggingface-hub",
6
  # "pillow",
7
+ # "vllm>=0.15.1",
8
  # "tqdm",
9
  # "toolz",
10
  # "torch",
11
  # "pyarrow",
12
  # "transformers",
13
  # ]
 
 
 
 
 
 
14
  # ///
15
 
16
  """