BikoRiko commited on
Commit
dfa9ac4
·
verified ·
1 Parent(s): d911abc

Upload folder using huggingface_hub

Browse files
README.md ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ ---
3
+ language: en
4
+ license: mit
5
+ base_model: BikoRiko/GPT-2.4-High-Pro
6
+ tags:
7
+ - gpt2
8
+ - math
9
+ - fine-tuned
10
+ ---
11
+
12
+ # GPT-2.5-Math
13
+
14
+ GPT-2.5-Math is an upgraded version of **BikoRiko/GPT-2.4-High-Pro**, featuring an expanded architecture and specialized fine-tuning on mathematical reasoning.
15
+
16
+ ## Model Details
17
+ - **Architecture:** GPT-2 with 6 additional layers (Total parameters ~0.2B).
18
+ - **Training Hardware:** NVIDIA H100 (via Modal.com).
19
+ - **Dataset:** 5% subset of `microsoft/orca-math-word-problems-200k`.
20
+ - **Objective:** Fine-tuned to solve math word problems and logical queries.
21
+
22
+ ## Performance
23
+ The model is trained for mathematical reasoning. While it is a 0.2B parameter model, it demonstrates the beginning of logical grounding for basic word problems.
24
+
25
+ ## Training Details
26
+ - **Optimizer:** AdamW
27
+ - **Precision:** Mixed Precision (torch.amp)
28
+ - **Epochs:** 3
29
+ - **Learning Rate:** 5e-5
config.json ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "activation_function": "gelu_new",
3
+ "add_cross_attention": false,
4
+ "architectures": [
5
+ "GPT2LMHeadModel"
6
+ ],
7
+ "attn_pdrop": 0.1,
8
+ "bos_token_id": 50256,
9
+ "dtype": "float32",
10
+ "embd_pdrop": 0.1,
11
+ "eos_token_id": 50256,
12
+ "initializer_range": 0.02,
13
+ "layer_norm_epsilon": 1e-05,
14
+ "model_type": "gpt2",
15
+ "n_ctx": 2048,
16
+ "n_embd": 768,
17
+ "n_head": 12,
18
+ "n_inner": null,
19
+ "n_layer": 18,
20
+ "n_positions": 2048,
21
+ "pad_token_id": null,
22
+ "reorder_and_upcast_attn": false,
23
+ "resid_pdrop": 0.1,
24
+ "scale_attn_by_inverse_layer_idx": false,
25
+ "scale_attn_weights": true,
26
+ "summary_activation": null,
27
+ "summary_first_dropout": 0.1,
28
+ "summary_proj_to_labels": true,
29
+ "summary_type": "cls_index",
30
+ "summary_use_proj": true,
31
+ "task_specific_params": {
32
+ "text-generation": {
33
+ "do_sample": true,
34
+ "max_length": 50
35
+ }
36
+ },
37
+ "tie_word_embeddings": true,
38
+ "transformers_version": "5.8.1",
39
+ "use_cache": false,
40
+ "vocab_size": 50257
41
+ }
generation_config.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 50256,
4
+ "eos_token_id": 50256,
5
+ "output_attentions": false,
6
+ "output_hidden_states": false,
7
+ "transformers_version": "5.8.1",
8
+ "use_cache": false
9
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:81eb10c958e28a8515f1c01579a363e7edc93e6fab22004e787fd663a64f7c19
3
+ size 671036264
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "backend": "tokenizers",
4
+ "bos_token": "<|endoftext|>",
5
+ "eos_token": "<|endoftext|>",
6
+ "errors": "replace",
7
+ "is_local": false,
8
+ "local_files_only": false,
9
+ "max_length": 128,
10
+ "model_max_length": 1024,
11
+ "pad_to_multiple_of": null,
12
+ "pad_token": "<|endoftext|>",
13
+ "pad_token_type_id": 0,
14
+ "padding_side": "right",
15
+ "stride": 0,
16
+ "tokenizer_class": "GPT2Tokenizer",
17
+ "truncation_side": "right",
18
+ "truncation_strategy": "longest_first",
19
+ "unk_token": "<|endoftext|>"
20
+ }