File size: 1,508 Bytes
141706a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
{
  "lr_mp": 0.0001,
  "lr_vision_backbone": 0.0,
  "lr_language_backbone": 0.0001,
  "lr_right_tower": 0.0001,
  "lr_kv_bridge": 0.0,
  "lr_activation_bridge": 0.0,
  "batch_size": 32,
  "gradient_accumulation_steps": 4,
  "max_grad_norm": 1.0,
  "max_training_steps": 10000,
  "stop_after_step": 5100,
  "warmup_ratio": 0.03,
  "stats_log_interval": 100,
  "precision": "bf16",
  "compile": false,
  "do_eval": true,
  "eval_interval": 1000,
  "max_val_batches": 64,
  "max_images_per_example": 1,
  "max_sample_length": 1024,
  "train_dataset_path": "patrickamadeus/the_cauldron",
  "train_dataset_name": [
    "all"
  ],
  "train_split": "train",
  "val_split": "validation",
  "stream_dataset": false,
  "enable_source_filter": true,
  "allowed_dataset_sources": [
    "aokvqa",
    "chart2text",
    "chartqa",
    "docvqa",
    "figureqa",
    "iconqa",
    "infographic_vqa",
    "ocrvqa",
    "robut_sqa",
    "scienceqa",
    "textcaps",
    "textvqa",
    "vistext",
    "visual7w",
    "visualmrc",
    "vqav2",
    "vsr"
  ],
  "relevance_min_rating": 1,
  "image_correspondence_min_rating": 1,
  "visual_dependency_min_rating": 1,
  "formatting_min_rating": 1,
  "wandb_entity": "HuggingFace",
  "log_wandb": false,
  "push_checkpoints_to_hub": true,
  "save_training_state_to_hub": false,
  "checkpoint_repo_pattern": "patrickamadeus/dt-memory-full-replace-{i}",
  "hf_private": false,
  "push_final_model_to_hub": true,
  "resume_from_vlm_checkpoint": true,
  "resume_checkpoint_path": null
}