Commit ·
c29334d
1
Parent(s): 796ef91
Pin nanonets-ocr.py to stable vLLM (>=0.15.1) + datasets>=4.0.0
Browse filesNanonets-OCR-s (Qwen2.5-VL fine-tune) is in stable vLLM.
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
- nanonets-ocr.py +47 -28
nanonets-ocr.py
CHANGED
|
@@ -1,10 +1,10 @@
|
|
| 1 |
# /// script
|
| 2 |
# requires-python = ">=3.11"
|
| 3 |
# dependencies = [
|
| 4 |
-
# "datasets",
|
| 5 |
# "huggingface-hub",
|
| 6 |
# "pillow",
|
| 7 |
-
# "vllm",
|
| 8 |
# "tqdm",
|
| 9 |
# "toolz",
|
| 10 |
# "torch", # Added for CUDA check
|
|
@@ -205,7 +205,7 @@ def main(
|
|
| 205 |
batch_size: int = 32,
|
| 206 |
model: str = "nanonets/Nanonets-OCR-s",
|
| 207 |
max_model_len: int = 8192,
|
| 208 |
-
max_tokens: int =
|
| 209 |
gpu_memory_utilization: float = 0.8,
|
| 210 |
hf_token: str = None,
|
| 211 |
split: str = "train",
|
|
@@ -213,6 +213,7 @@ def main(
|
|
| 213 |
private: bool = False,
|
| 214 |
shuffle: bool = False,
|
| 215 |
seed: int = 42,
|
|
|
|
| 216 |
):
|
| 217 |
"""Process images from HF dataset through OCR model."""
|
| 218 |
|
|
@@ -303,38 +304,39 @@ def main(
|
|
| 303 |
# Handle inference_info tracking
|
| 304 |
logger.info("Updating inference_info...")
|
| 305 |
|
| 306 |
-
|
| 307 |
-
if "inference_info" in dataset.column_names:
|
| 308 |
-
# Parse existing info from first row (all rows have same info)
|
| 309 |
-
try:
|
| 310 |
-
existing_info = json.loads(dataset[0]["inference_info"])
|
| 311 |
-
if not isinstance(existing_info, list):
|
| 312 |
-
existing_info = [existing_info] # Convert old format to list
|
| 313 |
-
except (json.JSONDecodeError, TypeError):
|
| 314 |
-
existing_info = []
|
| 315 |
-
# Remove old column to update it
|
| 316 |
-
dataset = dataset.remove_columns(["inference_info"])
|
| 317 |
-
else:
|
| 318 |
-
existing_info = []
|
| 319 |
-
|
| 320 |
-
# Add new inference info
|
| 321 |
-
new_info = {
|
| 322 |
-
"column_name": "markdown",
|
| 323 |
"model_id": model,
|
| 324 |
-
"
|
|
|
|
|
|
|
| 325 |
"batch_size": batch_size,
|
| 326 |
"max_tokens": max_tokens,
|
| 327 |
"gpu_memory_utilization": gpu_memory_utilization,
|
| 328 |
"max_model_len": max_model_len,
|
| 329 |
"script": "nanonets-ocr.py",
|
| 330 |
-
"script_version": "1.0.0",
|
| 331 |
"script_url": "https://huggingface.co/datasets/uv-scripts/ocr/raw/main/nanonets-ocr.py",
|
| 332 |
}
|
| 333 |
-
existing_info.append(new_info)
|
| 334 |
|
| 335 |
-
|
| 336 |
-
|
| 337 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 338 |
|
| 339 |
# Push to hub
|
| 340 |
logger.info(f"Pushing to {output_dataset}")
|
|
@@ -369,6 +371,17 @@ def main(
|
|
| 369 |
f"Dataset available at: https://huggingface.co/datasets/{output_dataset}"
|
| 370 |
)
|
| 371 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 372 |
|
| 373 |
if __name__ == "__main__":
|
| 374 |
# Show example usage if no arguments
|
|
@@ -455,8 +468,8 @@ Examples:
|
|
| 455 |
parser.add_argument(
|
| 456 |
"--max-tokens",
|
| 457 |
type=int,
|
| 458 |
-
default=
|
| 459 |
-
help="Maximum tokens to generate (default:
|
| 460 |
)
|
| 461 |
parser.add_argument(
|
| 462 |
"--gpu-memory-utilization",
|
|
@@ -487,6 +500,11 @@ Examples:
|
|
| 487 |
default=42,
|
| 488 |
help="Random seed for shuffling (default: 42)",
|
| 489 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 490 |
|
| 491 |
args = parser.parse_args()
|
| 492 |
|
|
@@ -505,4 +523,5 @@ Examples:
|
|
| 505 |
private=args.private,
|
| 506 |
shuffle=args.shuffle,
|
| 507 |
seed=args.seed,
|
|
|
|
| 508 |
)
|
|
|
|
| 1 |
# /// script
|
| 2 |
# requires-python = ">=3.11"
|
| 3 |
# dependencies = [
|
| 4 |
+
# "datasets>=4.0.0",
|
| 5 |
# "huggingface-hub",
|
| 6 |
# "pillow",
|
| 7 |
+
# "vllm>=0.15.1",
|
| 8 |
# "tqdm",
|
| 9 |
# "toolz",
|
| 10 |
# "torch", # Added for CUDA check
|
|
|
|
| 205 |
batch_size: int = 32,
|
| 206 |
model: str = "nanonets/Nanonets-OCR-s",
|
| 207 |
max_model_len: int = 8192,
|
| 208 |
+
max_tokens: int = 15000,
|
| 209 |
gpu_memory_utilization: float = 0.8,
|
| 210 |
hf_token: str = None,
|
| 211 |
split: str = "train",
|
|
|
|
| 213 |
private: bool = False,
|
| 214 |
shuffle: bool = False,
|
| 215 |
seed: int = 42,
|
| 216 |
+
verbose: bool = False,
|
| 217 |
):
|
| 218 |
"""Process images from HF dataset through OCR model."""
|
| 219 |
|
|
|
|
| 304 |
# Handle inference_info tracking
|
| 305 |
logger.info("Updating inference_info...")
|
| 306 |
|
| 307 |
+
inference_entry = {
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 308 |
"model_id": model,
|
| 309 |
+
"model_name": "Nanonets-OCR-s",
|
| 310 |
+
"column_name": "markdown",
|
| 311 |
+
"timestamp": datetime.now().isoformat(),
|
| 312 |
"batch_size": batch_size,
|
| 313 |
"max_tokens": max_tokens,
|
| 314 |
"gpu_memory_utilization": gpu_memory_utilization,
|
| 315 |
"max_model_len": max_model_len,
|
| 316 |
"script": "nanonets-ocr.py",
|
|
|
|
| 317 |
"script_url": "https://huggingface.co/datasets/uv-scripts/ocr/raw/main/nanonets-ocr.py",
|
| 318 |
}
|
|
|
|
| 319 |
|
| 320 |
+
if "inference_info" in dataset.column_names:
|
| 321 |
+
logger.info("Updating existing inference_info column")
|
| 322 |
+
|
| 323 |
+
def update_inference_info(example):
|
| 324 |
+
try:
|
| 325 |
+
existing_info = (
|
| 326 |
+
json.loads(example["inference_info"])
|
| 327 |
+
if example["inference_info"]
|
| 328 |
+
else []
|
| 329 |
+
)
|
| 330 |
+
except (json.JSONDecodeError, TypeError):
|
| 331 |
+
existing_info = []
|
| 332 |
+
existing_info.append(inference_entry)
|
| 333 |
+
return {"inference_info": json.dumps(existing_info)}
|
| 334 |
+
|
| 335 |
+
dataset = dataset.map(update_inference_info)
|
| 336 |
+
else:
|
| 337 |
+
logger.info("Creating new inference_info column")
|
| 338 |
+
inference_list = [json.dumps([inference_entry])] * len(dataset)
|
| 339 |
+
dataset = dataset.add_column("inference_info", inference_list)
|
| 340 |
|
| 341 |
# Push to hub
|
| 342 |
logger.info(f"Pushing to {output_dataset}")
|
|
|
|
| 371 |
f"Dataset available at: https://huggingface.co/datasets/{output_dataset}"
|
| 372 |
)
|
| 373 |
|
| 374 |
+
if verbose:
|
| 375 |
+
import importlib.metadata
|
| 376 |
+
|
| 377 |
+
logger.info("--- Resolved package versions ---")
|
| 378 |
+
for pkg in ["vllm", "transformers", "torch", "datasets", "pyarrow", "pillow"]:
|
| 379 |
+
try:
|
| 380 |
+
logger.info(f" {pkg}=={importlib.metadata.version(pkg)}")
|
| 381 |
+
except importlib.metadata.PackageNotFoundError:
|
| 382 |
+
logger.info(f" {pkg}: not installed")
|
| 383 |
+
logger.info("--- End versions ---")
|
| 384 |
+
|
| 385 |
|
| 386 |
if __name__ == "__main__":
|
| 387 |
# Show example usage if no arguments
|
|
|
|
| 468 |
parser.add_argument(
|
| 469 |
"--max-tokens",
|
| 470 |
type=int,
|
| 471 |
+
default=15000,
|
| 472 |
+
help="Maximum tokens to generate (default: 15000, per model card recommendation)",
|
| 473 |
)
|
| 474 |
parser.add_argument(
|
| 475 |
"--gpu-memory-utilization",
|
|
|
|
| 500 |
default=42,
|
| 501 |
help="Random seed for shuffling (default: 42)",
|
| 502 |
)
|
| 503 |
+
parser.add_argument(
|
| 504 |
+
"--verbose",
|
| 505 |
+
action="store_true",
|
| 506 |
+
help="Log resolved package versions after processing (useful for pinning deps)",
|
| 507 |
+
)
|
| 508 |
|
| 509 |
args = parser.parse_args()
|
| 510 |
|
|
|
|
| 523 |
private=args.private,
|
| 524 |
shuffle=args.shuffle,
|
| 525 |
seed=args.seed,
|
| 526 |
+
verbose=args.verbose,
|
| 527 |
)
|