CompactAI commited on
Commit
afcef29
·
verified ·
1 Parent(s): b05663a

Upload folder using huggingface_hub

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
README.md ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ tags:
4
+ - pruned
5
+ - python
6
+ - optimized
7
+ - wanda
8
+ - activation-pruning
9
+ base_model: Qwen/Qwen2.5-0.5B
10
+ pipeline_tag: text-generation
11
+ ---
12
+
13
+ # Qwen2.5-0.5B-python-safe
14
+
15
+ > 🎯 **PYTHON-optimized** | 📦 **Safe** pruning | ⚡ **1% weights pruned**
16
+
17
+ This model is a **conservatively pruned** version of [Qwen/Qwen2.5-0.5B](https://huggingface.co/Qwen/Qwen2.5-0.5B), specialized for **PYTHON** tasks using activation-aware weight pruning (Wanda-style).
18
+
19
+ ## ✨ Key Features
20
+
21
+ - **Specialization**: Optimized for Python tasks
22
+ - **Pruning Method**: Wanda-style (|W| × |activation|) importance scoring
23
+ - **Size Reduction**: 1% weights pruned
24
+ - **Use Case**: High accuracy retention, ideal for production use
25
+
26
+ ## 📊 Performance Comparison
27
+
28
+ | Category | Original | Pruned | Change |
29
+ |----------|----------|--------|--------|
30
+ | **Python** | 0.0% | 0.0% ⭐ | → |
31
+ | Html | 0.0% | 0.0% | → |
32
+ | Trivia | 60.0% | 60.0% | → |
33
+ | Math | 53.3% | 53.3% | → |
34
+ | Reasoning | 13.3% | 6.7% | ↓ 6.7% |
35
+ | Medical | 33.3% | 46.7% | ↑ 13.3% |
36
+ | Linux | 0.0% | 0.0% | → |
37
+ | Writing | 20.0% | 26.7% | ↑ 6.7% |
38
+
39
+ **Average**: 22.5% → 24.2% (+1.7%)
40
+
41
+
42
+
43
+ ![Comparison Graph](comparison_graph.png)
44
+
45
+ ## 🚀 Quick Start
46
+
47
+ ```python
48
+ from transformers import AutoModelForCausalLM, AutoTokenizer
49
+
50
+ model = AutoModelForCausalLM.from_pretrained("CompactAI/Qwen2.5-0.5B-python-safe")
51
+ tokenizer = AutoTokenizer.from_pretrained("CompactAI/Qwen2.5-0.5B-python-safe")
52
+
53
+ # Example usage
54
+ inputs = tokenizer("Your prompt here", return_tensors="pt")
55
+ outputs = model.generate(**inputs, max_new_tokens=100)
56
+ print(tokenizer.decode(outputs[0], skip_special_tokens=True))
57
+ ```
58
+
59
+ ## 📋 Technical Details
60
+
61
+ | Property | Value |
62
+ |----------|-------|
63
+ | Base Model | [Qwen/Qwen2.5-0.5B](https://huggingface.co/Qwen/Qwen2.5-0.5B) |
64
+ | Specialization | Python |
65
+ | Prune Mode | Safe |
66
+ | Pruning Method | Activation-based weight pruning (Wanda) |
67
+ | Weight Reduction | 1% weights pruned |
68
+
69
+ ## 🔗 Related Models
70
+
71
+ This model is part of the **Qwen2.5-0.5B** pruned model collection. Variants:
72
+ - **Safe** - Conservative pruning (~10-20%), high accuracy retention
73
+ - **Aggressive** - Maximum compression (~40-50%), best for edge deployment
74
+
75
+ ## 📜 License
76
+
77
+ This model inherits the license from the base model [Qwen/Qwen2.5-0.5B](https://huggingface.co/Qwen/Qwen2.5-0.5B).
78
+
79
+ ---
80
+ *Generated by ZANNPS [Zeto Automatic Neural Network Pruning System]*
chat_template.jinja ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {%- if tools %}
2
+ {{- '<|im_start|>system\n' }}
3
+ {%- if messages[0]['role'] == 'system' %}
4
+ {{- messages[0]['content'] }}
5
+ {%- else %}
6
+ {{- 'You are a helpful assistant.' }}
7
+ {%- endif %}
8
+ {{- "\n\n# Tools\n\nYou may call one or more functions to assist with the user query.\n\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>" }}
9
+ {%- for tool in tools %}
10
+ {{- "\n" }}
11
+ {{- tool | tojson }}
12
+ {%- endfor %}
13
+ {{- "\n</tools>\n\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call><|im_end|>\n" }}
14
+ {%- else %}
15
+ {%- if messages[0]['role'] == 'system' %}
16
+ {{- '<|im_start|>system\n' + messages[0]['content'] + '<|im_end|>\n' }}
17
+ {%- else %}
18
+ {{- '<|im_start|>system\nYou are a helpful assistant.<|im_end|>\n' }}
19
+ {%- endif %}
20
+ {%- endif %}
21
+ {%- for message in messages %}
22
+ {%- if (message.role == "user") or (message.role == "system" and not loop.first) or (message.role == "assistant" and not message.tool_calls) %}
23
+ {{- '<|im_start|>' + message.role + '\n' + message.content + '<|im_end|>' + '\n' }}
24
+ {%- elif message.role == "assistant" %}
25
+ {{- '<|im_start|>' + message.role }}
26
+ {%- if message.content %}
27
+ {{- '\n' + message.content }}
28
+ {%- endif %}
29
+ {%- for tool_call in message.tool_calls %}
30
+ {%- if tool_call.function is defined %}
31
+ {%- set tool_call = tool_call.function %}
32
+ {%- endif %}
33
+ {{- '\n<tool_call>\n{"name": "' }}
34
+ {{- tool_call.name }}
35
+ {{- '", "arguments": ' }}
36
+ {{- tool_call.arguments | tojson }}
37
+ {{- '}\n</tool_call>' }}
38
+ {%- endfor %}
39
+ {{- '<|im_end|>\n' }}
40
+ {%- elif message.role == "tool" %}
41
+ {%- if (loop.index0 == 0) or (messages[loop.index0 - 1].role != "tool") %}
42
+ {{- '<|im_start|>user' }}
43
+ {%- endif %}
44
+ {{- '\n<tool_response>\n' }}
45
+ {{- message.content }}
46
+ {{- '\n</tool_response>' }}
47
+ {%- if loop.last or (messages[loop.index0 + 1].role != "tool") %}
48
+ {{- '<|im_end|>\n' }}
49
+ {%- endif %}
50
+ {%- endif %}
51
+ {%- endfor %}
52
+ {%- if add_generation_prompt %}
53
+ {{- '<|im_start|>assistant\n' }}
54
+ {%- endif %}
comparison_graph.png ADDED
config.json ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "Qwen2ForCausalLM"
4
+ ],
5
+ "attention_dropout": 0.0,
6
+ "bos_token_id": 151643,
7
+ "dtype": "float16",
8
+ "eos_token_id": 151643,
9
+ "hidden_act": "silu",
10
+ "hidden_size": 896,
11
+ "initializer_range": 0.02,
12
+ "intermediate_size": 4864,
13
+ "layer_types": [
14
+ "full_attention",
15
+ "full_attention",
16
+ "full_attention",
17
+ "full_attention",
18
+ "full_attention",
19
+ "full_attention",
20
+ "full_attention",
21
+ "full_attention",
22
+ "full_attention",
23
+ "full_attention",
24
+ "full_attention",
25
+ "full_attention",
26
+ "full_attention",
27
+ "full_attention",
28
+ "full_attention",
29
+ "full_attention",
30
+ "full_attention",
31
+ "full_attention",
32
+ "full_attention",
33
+ "full_attention",
34
+ "full_attention",
35
+ "full_attention",
36
+ "full_attention",
37
+ "full_attention"
38
+ ],
39
+ "max_position_embeddings": 32768,
40
+ "max_window_layers": 24,
41
+ "model_type": "qwen2",
42
+ "num_attention_heads": 14,
43
+ "num_hidden_layers": 24,
44
+ "num_key_value_heads": 2,
45
+ "pad_token_id": null,
46
+ "rms_norm_eps": 1e-06,
47
+ "rope_parameters": {
48
+ "rope_theta": 1000000.0,
49
+ "rope_type": "default"
50
+ },
51
+ "sliding_window": null,
52
+ "tie_word_embeddings": true,
53
+ "transformers_version": "5.0.0",
54
+ "use_cache": true,
55
+ "use_mrope": false,
56
+ "use_sliding_window": false,
57
+ "vocab_size": 151936
58
+ }
generation_config.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token_id": 151643,
3
+ "do_sample": false,
4
+ "eos_token_id": 151643,
5
+ "max_new_tokens": 2048,
6
+ "transformers_version": "5.0.0"
7
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2093ca63936cc27dcc4eabb582f0be0acbedc80cedce4af42ae11938b1104909
3
+ size 988097536
tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:51354673edf4300eb841665e1fb684cc1badea87c49d5de6ef09981151683508
3
+ size 11422159
tokenizer_config.json ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_prefix_space": false,
3
+ "backend": "tokenizers",
4
+ "bos_token": null,
5
+ "clean_up_tokenization_spaces": false,
6
+ "eos_token": "<|endoftext|>",
7
+ "errors": "replace",
8
+ "extra_special_tokens": [
9
+ "<|im_start|>",
10
+ "<|im_end|>",
11
+ "<|object_ref_start|>",
12
+ "<|object_ref_end|>",
13
+ "<|box_start|>",
14
+ "<|box_end|>",
15
+ "<|quad_start|>",
16
+ "<|quad_end|>",
17
+ "<|vision_start|>",
18
+ "<|vision_end|>",
19
+ "<|vision_pad|>",
20
+ "<|image_pad|>",
21
+ "<|video_pad|>"
22
+ ],
23
+ "is_local": false,
24
+ "model_max_length": 131072,
25
+ "pad_token": "<|endoftext|>",
26
+ "split_special_tokens": false,
27
+ "tokenizer_class": "Qwen2Tokenizer",
28
+ "unk_token": null
29
+ }