| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| |
|
| | """ |
| | Agent Zero SFT v2: LiquidAI/LFM2.5-1.2B-Instruct |
| | LoRA fine-tuning on mixed agent-zero-sft-v2 dataset. |
| | |
| | Changes from v1: |
| | - Mixed dataset: 40% agent + 40% math (MetaMathQA) + 20% general (OpenHermes) |
| | - LoRA r=8 (was 16), alpha=16 (was 32) — reduced rank to prevent overfitting |
| | - 2 epochs (was 3) |
| | - lr=1e-4 (was 2e-4) — gentler updates |
| | """ |
| |
|
| | import os |
| |
|
| | import trackio |
| | from datasets import load_dataset |
| | from huggingface_hub import login |
| | from peft import LoraConfig |
| | from trl import SFTTrainer, SFTConfig |
| |
|
| | token = os.getenv("HF_TOKEN") |
| | if token: |
| | login(token=token) |
| |
|
| | |
| | print("Loading dataset...") |
| | train_ds = load_dataset("wheattoast11/agent-zero-sft-v2", split="train") |
| | val_ds = load_dataset("wheattoast11/agent-zero-sft-v2", split="validation") |
| | print(f"Train: {len(train_ds)}, Val: {len(val_ds)}") |
| |
|
| | config = SFTConfig( |
| | output_dir="agent-zero-lfm-1.2b-v2", |
| | push_to_hub=True, |
| | hub_model_id="wheattoast11/agent-zero-lfm-1.2b-v2", |
| | hub_strategy="every_save", |
| | hub_private_repo=True, |
| |
|
| | |
| | num_train_epochs=2, |
| | per_device_train_batch_size=4, |
| | gradient_accumulation_steps=4, |
| | |
| | learning_rate=1e-4, |
| | bf16=True, |
| |
|
| | logging_steps=10, |
| | save_strategy="steps", |
| | save_steps=200, |
| | save_total_limit=2, |
| |
|
| | eval_strategy="steps", |
| | eval_steps=200, |
| |
|
| | warmup_ratio=0.1, |
| | lr_scheduler_type="cosine", |
| |
|
| | report_to="trackio", |
| | project="agent-zero-finetune", |
| | run_name="lfm-1.2b-sft-v2", |
| | ) |
| |
|
| | |
| | peft_config = LoraConfig( |
| | r=8, |
| | lora_alpha=16, |
| | lora_dropout=0.05, |
| | bias="none", |
| | task_type="CAUSAL_LM", |
| | target_modules=["q_proj", "v_proj", "k_proj", "o_proj"], |
| | ) |
| |
|
| | print("Initializing trainer...") |
| | trainer = SFTTrainer( |
| | model="LiquidAI/LFM2.5-1.2B-Instruct", |
| | train_dataset=train_ds, |
| | eval_dataset=val_ds, |
| | args=config, |
| | peft_config=peft_config, |
| | ) |
| |
|
| | print("Starting training...") |
| | trainer.train() |
| |
|
| | print("Pushing to Hub...") |
| | trainer.push_to_hub() |
| |
|
| | trackio.finish() |
| | print("Done! Model at: https://huggingface.co/wheattoast11/agent-zero-lfm-1.2b-v2") |
| |
|