DatPySci commited on
Commit
4a09e97
·
verified ·
1 Parent(s): bdb430c

Upload folder using huggingface_hub

Browse files
EvoLM-1B-160BT-Warmup-LoRA-RL-step100/adapter_config.json ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "task_type": "CAUSAL_LM",
3
+ "peft_type": "LORA",
4
+ "auto_mapping": null,
5
+ "peft_version": "0.18.1",
6
+ "base_model_name_or_path": "/dev/shm/verl-cache/f94a13b34a9873ada45a6fd30683b0f9/evolm-1B-160BT-MixedFW8FM42-Ep1",
7
+ "revision": null,
8
+ "inference_mode": false,
9
+ "r": 32,
10
+ "target_modules": [
11
+ "gate_proj",
12
+ "o_proj",
13
+ "up_proj",
14
+ "down_proj",
15
+ "v_proj",
16
+ "k_proj",
17
+ "q_proj"
18
+ ],
19
+ "exclude_modules": null,
20
+ "lora_alpha": 64,
21
+ "lora_dropout": 0.0,
22
+ "fan_in_fan_out": false,
23
+ "bias": "none",
24
+ "use_rslora": false,
25
+ "modules_to_save": null,
26
+ "init_lora_weights": true,
27
+ "layers_to_transform": null,
28
+ "layers_pattern": null,
29
+ "rank_pattern": {},
30
+ "alpha_pattern": {},
31
+ "megatron_config": null,
32
+ "megatron_core": "megatron.core",
33
+ "trainable_token_indices": null,
34
+ "loftq_config": {},
35
+ "eva_config": null,
36
+ "corda_config": null,
37
+ "use_dora": false,
38
+ "alora_invocation_tokens": null,
39
+ "use_qalora": false,
40
+ "qalora_group_size": 16,
41
+ "layer_replication": null,
42
+ "runtime_config": {
43
+ "ephemeral_gpu_offload": false
44
+ },
45
+ "lora_bias": false,
46
+ "target_parameters": null,
47
+ "arrow_config": null,
48
+ "ensure_weight_tying": false
49
+ }
EvoLM-1B-160BT-Warmup-LoRA-RL-step100/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2fe74096312d514808bc45682fba586bdef05dc58a79ad3335d5a553498338b9
3
+ size 94748560