#!/usr/bin/env python3 # /// script # dependencies = [ # "trl>=0.12.0", # "peft>=0.7.0", # "transformers>=4.36.0", # "accelerate>=0.24.0", # "trackio", # ] # /// from datasets import load_dataset from peft import LoraConfig from trl import SFTTrainer, SFTConfig import trackio import os print("šŸš€ Medium-Scale SFT Training with Trackio") print("=" * 60) # Initialize Trackio with Space sync print("\nšŸ“Š Initializing Trackio...") trackio.init( project="medium-sft-training", space_id="evalstate/trl-trackio-dashboard", config={ "model": "Qwen/Qwen2.5-0.5B", "dataset": "trl-lib/Capybara", "dataset_size": 1000, "num_epochs": 3, "learning_rate": 2e-5, "batch_size": 4, "gradient_accumulation": 4, "lora_r": 16, "lora_alpha": 32, "hardware": "a10g-large", } ) print("āœ… Trackio initialized!") print("šŸ“ˆ Dashboard: https://huggingface.co/spaces/evalstate/trl-trackio-dashboard") # Load dataset - 1000 examples print("\nšŸ“Š Loading dataset...") dataset = load_dataset("trl-lib/Capybara", split="train[:1000]") print(f"āœ… Dataset loaded: {len(dataset)} examples") # Get username username = os.environ.get("HF_USERNAME", "evalstate") # Training configuration - production settings print("\nāš™ļø Configuring training...") config = SFTConfig( # Output and Hub settings output_dir="qwen-capybara-medium", push_to_hub=True, hub_model_id=f"{username}/qwen-capybara-medium", hub_strategy="every_save", # Push all checkpoints # Training parameters - 3 epochs on 1K examples num_train_epochs=3, per_device_train_batch_size=4, gradient_accumulation_steps=4, # Effective batch size = 16 # Learning rate and schedule learning_rate=2e-5, warmup_ratio=0.1, lr_scheduler_type="cosine", # Logging and checkpointing logging_steps=10, # Log every 10 steps save_strategy="steps", save_steps=50, # Save every 50 steps save_total_limit=3, # Keep only 3 latest checkpoints # Evaluation eval_strategy="steps", eval_steps=50, # Optimization bf16=True, # Use bfloat16 for A10G gradient_checkpointing=True, # Save memory # Trackio monitoring report_to="trackio", ) # LoRA configuration - larger than demo print("šŸ”§ Setting up LoRA (r=16)...") peft_config = LoraConfig( r=16, lora_alpha=32, lora_dropout=0.05, bias="none", task_type="CAUSAL_LM", target_modules=["q_proj", "k_proj", "v_proj", "o_proj"], # More modules ) # Create eval split print("\nšŸ”€ Creating train/eval split...") dataset_split = dataset.train_test_split(test_size=0.1, seed=42) train_dataset = dataset_split["train"] eval_dataset = dataset_split["test"] print(f" Train: {len(train_dataset)} examples") print(f" Eval: {len(eval_dataset)} examples") # Initialize trainer print("\nšŸŽÆ Initializing trainer...") trainer = SFTTrainer( model="Qwen/Qwen2.5-0.5B", train_dataset=train_dataset, eval_dataset=eval_dataset, args=config, peft_config=peft_config, ) # Calculate training info total_steps = (len(train_dataset) // (4 * 4)) * 3 # samples / (batch * grad_accum) * epochs print(f"\nšŸ“Š Training Info:") print(f" Total steps: ~{total_steps}") print(f" Epochs: 3") print(f" Effective batch size: 16") print(f" Expected time: ~45-60 minutes") print(f" Checkpoints saved every 50 steps") # Train! print("\nšŸƒ Starting training...") print("šŸ“ˆ Watch live metrics: https://huggingface.co/spaces/evalstate/trl-trackio-dashboard") print("-" * 60) trainer.train() # Save to Hub print("\nšŸ’¾ Pushing final model to Hub...") trainer.push_to_hub() # Finish Trackio print("\nšŸ“Š Finalizing Trackio metrics...") trackio.finish() print("\n" + "=" * 60) print("āœ… Training complete!") print(f"šŸ“¦ Model: https://huggingface.co/{username}/qwen-capybara-medium") print(f"šŸ“Š Metrics: https://huggingface.co/spaces/evalstate/trl-trackio-dashboard") print(f"šŸ’” Try the model with:") print(f' from transformers import pipeline') print(f' generator = pipeline("text-generation", model="{username}/qwen-capybara-medium")') print("=" * 60)