File size: 1,183 Bytes
954116d | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 | seed: 2048
cudnn_deterministic: false
train_data_jsons:
- data_reason.ALL.json
batch_scale: 4000
text_batch_scale: 3000
max_length: 1024
min_length: 10
n_worker: 4
local_rank: 0
minibatch_debug: -1
audio_semantic_card: 8206
audio_reason_card: 4110
local_model: meta-llama/Llama-3.2-300M
parallel_number: 9
reason_pad_token: 4097
semantic_pad_token: 8193
llm_pretrained_model: understand_v3/ep1.checkpoint
llm_name: meta-llama/Llama-3.2-3B
text_tokenizer_path: tools/tokenizer/Text2ID/llama3_2_tokenizer
semantic_eos: 8194
semantic_bos: 8195
reason_bos: 4098
reason_eos: 4099
text_pad_token: 0
exp_dir: generation_stage2
print_freq: 100
save_interval: 5000
training_stage: 2
resume: null
prompt_token_path: MusicLLM2/prompts/task_prompt.pt
audio_embeddings_path: extracted_params/audio_embeddings.pth
audio_understanding_expert_path: extracted_params/audio_expert.pth
n_epoch: 2
grad_accum: 1
learning_rate: 0.0002
grad_clip: 1.0
warmup_steps: 10000
schedule: cosine
weight_decay: 0.05
beta1: 0.9
beta2: 0.95
data_parallel: fsdp
mixed_precision: bf16
grad_precision: bf16
activation_checkpointing: true
no_wandb: true
audio_tokenizer: reasoningCodec
text_tokenizer: llama3-3B
rank: 0
|