autoprogrammer commited on
Commit
ad68f0f
·
verified ·
1 Parent(s): b0b9a2f

Initial upload: SDAR-4B trace SFT on ESFT-intent

Browse files
added_tokens.json ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "</think>": 151668,
3
+ "</tool_call>": 151658,
4
+ "</tool_response>": 151666,
5
+ "<think>": 151667,
6
+ "<tool_call>": 151657,
7
+ "<tool_response>": 151665,
8
+ "<|MASK|>": 151669,
9
+ "<|box_end|>": 151649,
10
+ "<|box_start|>": 151648,
11
+ "<|endoftext|>": 151643,
12
+ "<|file_sep|>": 151664,
13
+ "<|fim_middle|>": 151660,
14
+ "<|fim_pad|>": 151662,
15
+ "<|fim_prefix|>": 151659,
16
+ "<|fim_suffix|>": 151661,
17
+ "<|im_end|>": 151645,
18
+ "<|im_start|>": 151644,
19
+ "<|image_pad|>": 151655,
20
+ "<|object_ref_end|>": 151647,
21
+ "<|object_ref_start|>": 151646,
22
+ "<|quad_end|>": 151651,
23
+ "<|quad_start|>": 151650,
24
+ "<|repo_name|>": 151663,
25
+ "<|video_pad|>": 151656,
26
+ "<|vision_end|>": 151653,
27
+ "<|vision_pad|>": 151654,
28
+ "<|vision_start|>": 151652
29
+ }
chat_template.jinja ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {%- if tools %}
2
+ {{- '<|im_start|>system\n' }}
3
+ {%- if messages[0].role == 'system' %}
4
+ {{- messages[0].content + '\n\n' }}
5
+ {%- endif %}
6
+ {{- "# Tools\n\nYou may call one or more functions to assist with the user query.\n\nYou are provided with function signatures within <tools></tools> XML tags:\n<tools>" }}
7
+ {%- for tool in tools %}
8
+ {{- "\n" }}
9
+ {{- tool | tojson }}
10
+ {%- endfor %}
11
+ {{- "\n</tools>\n\nFor each function call, return a json object with function name and arguments within <tool_call></tool_call> XML tags:\n<tool_call>\n{\"name\": <function-name>, \"arguments\": <args-json-object>}\n</tool_call><|im_end|>\n" }}
12
+ {%- else %}
13
+ {%- if messages[0].role == 'system' %}
14
+ {{- '<|im_start|>system\n' + messages[0].content + '<|im_end|>\n' }}
15
+ {%- endif %}
16
+ {%- endif %}
17
+ {%- set ns = namespace(multi_step_tool=true, last_query_index=messages|length - 1) %}
18
+ {%- for message in messages[::-1] %}
19
+ {%- set index = (messages|length - 1) - loop.index0 %}
20
+ {%- if ns.multi_step_tool and message.role == "user" and not(message.content.startswith('<tool_response>') and message.content.endswith('</tool_response>')) %}
21
+ {%- set ns.multi_step_tool = false %}
22
+ {%- set ns.last_query_index = index %}
23
+ {%- endif %}
24
+ {%- endfor %}
25
+ {%- for message in messages %}
26
+ {%- if (message.role == "user") or (message.role == "system" and not loop.first) %}
27
+ {{- '<|im_start|>' + message.role + '\n' + message.content + '<|im_end|>' + '\n' }}
28
+ {%- elif message.role == "assistant" %}
29
+ {%- set content = message.content %}
30
+ {%- set reasoning_content = '' %}
31
+ {%- if message.reasoning_content is defined and message.reasoning_content is not none %}
32
+ {%- set reasoning_content = message.reasoning_content %}
33
+ {%- else %}
34
+ {%- if '</think>' in message.content %}
35
+ {%- set content = message.content.split('</think>')[-1].lstrip('\n') %}
36
+ {%- set reasoning_content = message.content.split('</think>')[0].rstrip('\n').split('<think>')[-1].lstrip('\n') %}
37
+ {%- endif %}
38
+ {%- endif %}
39
+ {%- if loop.index0 > ns.last_query_index %}
40
+ {%- if loop.last or (not loop.last and reasoning_content) %}
41
+ {{- '<|im_start|>' + message.role + '\n<think>\n' + reasoning_content.strip('\n') + '\n</think>\n\n' + content.lstrip('\n') }}
42
+ {%- else %}
43
+ {{- '<|im_start|>' + message.role + '\n' + content }}
44
+ {%- endif %}
45
+ {%- else %}
46
+ {{- '<|im_start|>' + message.role + '\n' + content }}
47
+ {%- endif %}
48
+ {%- if message.tool_calls %}
49
+ {%- for tool_call in message.tool_calls %}
50
+ {%- if (loop.first and content) or (not loop.first) %}
51
+ {{- '\n' }}
52
+ {%- endif %}
53
+ {%- if tool_call.function %}
54
+ {%- set tool_call = tool_call.function %}
55
+ {%- endif %}
56
+ {{- '<tool_call>\n{"name": "' }}
57
+ {{- tool_call.name }}
58
+ {{- '", "arguments": ' }}
59
+ {%- if tool_call.arguments is string %}
60
+ {{- tool_call.arguments }}
61
+ {%- else %}
62
+ {{- tool_call.arguments | tojson }}
63
+ {%- endif %}
64
+ {{- '}\n</tool_call>' }}
65
+ {%- endfor %}
66
+ {%- endif %}
67
+ {{- '<|im_end|>\n' }}
68
+ {%- elif message.role == "tool" %}
69
+ {%- if loop.first or (messages[loop.index0 - 1].role != "tool") %}
70
+ {{- '<|im_start|>user' }}
71
+ {%- endif %}
72
+ {{- '\n<tool_response>\n' }}
73
+ {{- message.content }}
74
+ {{- '\n</tool_response>' }}
75
+ {%- if loop.last or (messages[loop.index0 + 1].role != "tool") %}
76
+ {{- '<|im_end|>\n' }}
77
+ {%- endif %}
78
+ {%- endif %}
79
+ {%- endfor %}
80
+ {%- if add_generation_prompt %}
81
+ {{- '<|im_start|>assistant\n' }}
82
+ {%- if enable_thinking is defined and enable_thinking is false %}
83
+ {{- '<think>\n\n</think>\n\n' }}
84
+ {%- endif %}
85
+ {%- endif %}
config.json ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "SDARForCausalLM"
4
+ ],
5
+ "attention_bias": false,
6
+ "attention_dropout": 0.0,
7
+ "auto_map": {
8
+ "AutoConfig": "configuration_sdar.SDARConfig",
9
+ "AutoModel": "modeling_sdar.SDARModel",
10
+ "AutoModelForCausalLM": "modeling_sdar.SDARForCausalLM"
11
+ },
12
+ "block_size": 4,
13
+ "bos_token_id": 151643,
14
+ "debug": false,
15
+ "eos_token_id": 151643,
16
+ "ep_size": 1,
17
+ "fuse_cross_entropy": false,
18
+ "head_dim": 128,
19
+ "hidden_act": "silu",
20
+ "hidden_size": 2560,
21
+ "initializer_range": 0.02,
22
+ "intermediate_size": 9728,
23
+ "mask_token_id": 151669,
24
+ "max_position_embeddings": 32768,
25
+ "max_window_layers": 36,
26
+ "micro_forward": false,
27
+ "model_type": "sdar",
28
+ "num_attention_heads": 32,
29
+ "num_hidden_layers": 36,
30
+ "num_key_value_heads": 8,
31
+ "rms_norm_eps": 1e-06,
32
+ "rope_scaling": null,
33
+ "rope_theta": 1000000,
34
+ "skip_checkpoint": false,
35
+ "sliding_window": null,
36
+ "tie_word_embeddings": false,
37
+ "torch_dtype": "bfloat16",
38
+ "transformers_version": "4.52.4",
39
+ "use_cache": false,
40
+ "use_deepep": false,
41
+ "use_sliding_window": false,
42
+ "vocab_size": 151936
43
+ }
configuration_sdar.py ADDED
@@ -0,0 +1,212 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2024 The Qwen team, Alibaba Group and the HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """SDAR model configuration"""
16
+
17
+ from transformers.configuration_utils import PretrainedConfig
18
+ from transformers.modeling_rope_utils import rope_config_validation
19
+ from transformers.utils import logging
20
+
21
+
22
+ logger = logging.get_logger(__name__)
23
+
24
+
25
+ class SDARConfig(PretrainedConfig):
26
+ r"""
27
+ This is the configuration class to store the configuration of a [`SDARModel`]. It is used to instantiate a
28
+ SDAR model according to the specified arguments, defining the model architecture. Instantiating a configuration
29
+ with the defaults will yield a similar configuration to that of
30
+ SDAR-1.7B [DiffuOpen/SDAR-1.7B-Chat](https://huggingface.co/DiffuOpen/SDAR-1.7B-Chat/).
31
+
32
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
33
+ documentation from [`PretrainedConfig`] for more information.
34
+
35
+
36
+ Args:
37
+ vocab_size (`int`, *optional*, defaults to 151936):
38
+ Vocabulary size of the SDAR model. Defines the number of different tokens that can be represented by the
39
+ `inputs_ids` passed when calling [`SDARModel`]
40
+ hidden_size (`int`, *optional*, defaults to 4096):
41
+ Dimension of the hidden representations.
42
+ intermediate_size (`int`, *optional*, defaults to 22016):
43
+ Dimension of the MLP representations.
44
+ num_hidden_layers (`int`, *optional*, defaults to 32):
45
+ Number of hidden layers in the Transformer encoder.
46
+ num_attention_heads (`int`, *optional*, defaults to 32):
47
+ Number of attention heads for each attention layer in the Transformer encoder.
48
+ num_key_value_heads (`int`, *optional*, defaults to 32):
49
+ This is the number of key_value heads that should be used to implement Grouped Query Attention. If
50
+ `num_key_value_heads=num_attention_heads`, the model will use Multi Head Attention (MHA), if
51
+ `num_key_value_heads=1` the model will use Multi Query Attention (MQA) otherwise GQA is used. When
52
+ converting a multi-head checkpoint to a GQA checkpoint, each group key and value head should be constructed
53
+ by meanpooling all the original heads within that group. For more details checkout [this
54
+ paper](https://arxiv.org/pdf/2305.13245.pdf). If it is not specified, will default to `32`.
55
+ head_dim (`int`, *optional*, defaults to 128):
56
+ The attention head dimension.
57
+ hidden_act (`str` or `function`, *optional*, defaults to `"silu"`):
58
+ The non-linear activation function (function or string) in the decoder.
59
+ max_position_embeddings (`int`, *optional*, defaults to 32768):
60
+ The maximum sequence length that this model might ever be used with.
61
+ initializer_range (`float`, *optional*, defaults to 0.02):
62
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
63
+ rms_norm_eps (`float`, *optional*, defaults to 1e-06):
64
+ The epsilon used by the rms normalization layers.
65
+ use_cache (`bool`, *optional*, defaults to `True`):
66
+ Whether or not the model should return the last key/values attentions (not used by all models). Only
67
+ relevant if `config.is_decoder=True`.
68
+ tie_word_embeddings (`bool`, *optional*, defaults to `False`):
69
+ Whether the model's input and output word embeddings should be tied.
70
+ rope_theta (`float`, *optional*, defaults to 10000.0):
71
+ The base period of the RoPE embeddings.
72
+ rope_scaling (`Dict`, *optional*):
73
+ Dictionary containing the scaling configuration for the RoPE embeddings. NOTE: if you apply new rope type
74
+ and you expect the model to work on longer `max_position_embeddings`, we recommend you to update this value
75
+ accordingly.
76
+ Expected contents:
77
+ `rope_type` (`str`):
78
+ The sub-variant of RoPE to use. Can be one of ['default', 'linear', 'dynamic', 'yarn', 'longrope',
79
+ 'llama3'], with 'default' being the original RoPE implementation.
80
+ `factor` (`float`, *optional*):
81
+ Used with all rope types except 'default'. The scaling factor to apply to the RoPE embeddings. In
82
+ most scaling types, a `factor` of x will enable the model to handle sequences of length x *
83
+ original maximum pre-trained length.
84
+ `original_max_position_embeddings` (`int`, *optional*):
85
+ Used with 'dynamic', 'longrope' and 'llama3'. The original max position embeddings used during
86
+ pretraining.
87
+ `attention_factor` (`float`, *optional*):
88
+ Used with 'yarn' and 'longrope'. The scaling factor to be applied on the attention
89
+ computation. If unspecified, it defaults to value recommended by the implementation, using the
90
+ `factor` field to infer the suggested value.
91
+ `beta_fast` (`float`, *optional*):
92
+ Only used with 'yarn'. Parameter to set the boundary for extrapolation (only) in the linear
93
+ ramp function. If unspecified, it defaults to 32.
94
+ `beta_slow` (`float`, *optional*):
95
+ Only used with 'yarn'. Parameter to set the boundary for interpolation (only) in the linear
96
+ ramp function. If unspecified, it defaults to 1.
97
+ `short_factor` (`List[float]`, *optional*):
98
+ Only used with 'longrope'. The scaling factor to be applied to short contexts (<
99
+ `original_max_position_embeddings`). Must be a list of numbers with the same length as the hidden
100
+ size divided by the number of attention heads divided by 2
101
+ `long_factor` (`List[float]`, *optional*):
102
+ Only used with 'longrope'. The scaling factor to be applied to long contexts (<
103
+ `original_max_position_embeddings`). Must be a list of numbers with the same length as the hidden
104
+ size divided by the number of attention heads divided by 2
105
+ `low_freq_factor` (`float`, *optional*):
106
+ Only used with 'llama3'. Scaling factor applied to low frequency components of the RoPE
107
+ `high_freq_factor` (`float`, *optional*):
108
+ Only used with 'llama3'. Scaling factor applied to high frequency components of the RoPE
109
+ attention_bias (`bool`, defaults to `False`, *optional*, defaults to `False`):
110
+ Whether to use a bias in the query, key, value and output projection layers during self-attention.
111
+ use_sliding_window (`bool`, *optional*, defaults to `False`):
112
+ Whether to use sliding window attention.
113
+ sliding_window (`int`, *optional*, defaults to 4096):
114
+ Sliding window attention (SWA) window size. If not specified, will default to `4096`.
115
+ max_window_layers (`int`, *optional*, defaults to 28):
116
+ The number of layers that use SWA (Sliding Window Attention). The bottom layers use SWA while the top use full attention.
117
+ attention_dropout (`float`, *optional*, defaults to 0.0):
118
+ The dropout ratio for the attention probabilities.
119
+
120
+ ```python
121
+ >>> from transformers import SDARModel, SDARConfig
122
+
123
+ >>> # Initializing a SDAR style configuration
124
+ >>> configuration = SDARConfig()
125
+
126
+ >>> # Initializing a model from the SDAR-8B style configuration
127
+ >>> model = SDARModel(configuration)
128
+
129
+ >>> # Accessing the model configuration
130
+ >>> configuration = model.config
131
+ ```"""
132
+
133
+ model_type = "sdar"
134
+ keys_to_ignore_at_inference = ["past_key_values"]
135
+
136
+ # Default tensor parallel plan for base model `SDAR`
137
+ base_model_tp_plan = {
138
+ "layers.*.self_attn.q_proj": "colwise",
139
+ "layers.*.self_attn.k_proj": "colwise",
140
+ "layers.*.self_attn.v_proj": "colwise",
141
+ "layers.*.self_attn.o_proj": "rowwise",
142
+ "layers.*.mlp.gate_proj": "colwise",
143
+ "layers.*.mlp.up_proj": "colwise",
144
+ "layers.*.mlp.down_proj": "rowwise",
145
+ }
146
+ base_model_pp_plan = {
147
+ "embed_tokens": (["input_ids"], ["inputs_embeds"]),
148
+ "layers": (["hidden_states", "attention_mask"], ["hidden_states"]),
149
+ "norm": (["hidden_states"], ["hidden_states"]),
150
+ }
151
+
152
+ def __init__(
153
+ self,
154
+ vocab_size=151936,
155
+ hidden_size=4096,
156
+ intermediate_size=22016,
157
+ num_hidden_layers=32,
158
+ num_attention_heads=32,
159
+ num_key_value_heads=32,
160
+ head_dim=128,
161
+ hidden_act="silu",
162
+ max_position_embeddings=32768,
163
+ initializer_range=0.02,
164
+ rms_norm_eps=1e-6,
165
+ use_cache=True,
166
+ tie_word_embeddings=False,
167
+ rope_theta=10000.0,
168
+ rope_scaling=None,
169
+ attention_bias=False,
170
+ use_sliding_window=False,
171
+ sliding_window=4096,
172
+ max_window_layers=28,
173
+ attention_dropout=0.0,
174
+ **kwargs,
175
+ ):
176
+ self.vocab_size = vocab_size
177
+ self.max_position_embeddings = max_position_embeddings
178
+ self.hidden_size = hidden_size
179
+ self.intermediate_size = intermediate_size
180
+ self.num_hidden_layers = num_hidden_layers
181
+ self.num_attention_heads = num_attention_heads
182
+ self.use_sliding_window = use_sliding_window
183
+ self.sliding_window = sliding_window # we check `use_sliding_window` in the modeling code
184
+ self.max_window_layers = max_window_layers
185
+
186
+ # for backward compatibility
187
+ if num_key_value_heads is None:
188
+ num_key_value_heads = num_attention_heads
189
+
190
+ self.num_key_value_heads = num_key_value_heads
191
+ self.head_dim = head_dim
192
+ self.hidden_act = hidden_act
193
+ self.initializer_range = initializer_range
194
+ self.rms_norm_eps = rms_norm_eps
195
+ self.use_cache = use_cache
196
+ self.rope_theta = rope_theta
197
+ self.rope_scaling = rope_scaling
198
+ self.attention_bias = attention_bias
199
+ self.attention_dropout = attention_dropout
200
+ # Validate the correctness of rotary position embeddings parameters
201
+ # BC: if there is a 'type' field, move it to 'rope_type'.
202
+ if self.rope_scaling is not None and "type" in self.rope_scaling:
203
+ self.rope_scaling["rope_type"] = self.rope_scaling["type"]
204
+ rope_config_validation(self)
205
+
206
+ super().__init__(
207
+ tie_word_embeddings=tie_word_embeddings,
208
+ **kwargs,
209
+ )
210
+
211
+
212
+ __all__ = ["SDARConfig"]
generation_config.json ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token_id": 151643,
3
+ "do_sample": true,
4
+ "eos_token_id": [
5
+ 151645,
6
+ 151643
7
+ ],
8
+ "pad_token_id": 151643,
9
+ "temperature": 0.6,
10
+ "top_k": 20,
11
+ "top_p": 0.95,
12
+ "transformers_version": "4.52.4"
13
+ }
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
model-00001-of-00002.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e1fdd17010d04a512e7bfa08edcfde4af0240e51caec70aa86df28b49e78b49f
3
+ size 4967215360
model-00002-of-00002.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f96b1437115b4d4b39053488183acbe28cc804c571426ec3dd8684910abf1f64
3
+ size 3855679144
model.safetensors.index.json ADDED
@@ -0,0 +1,406 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "metadata": {
3
+ "total_size": 8822848512
4
+ },
5
+ "weight_map": {
6
+ "lm_head.weight": "model-00002-of-00002.safetensors",
7
+ "model.embed_tokens.weight": "model-00001-of-00002.safetensors",
8
+ "model.layers.0.input_layernorm.weight": "model-00001-of-00002.safetensors",
9
+ "model.layers.0.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
10
+ "model.layers.0.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
11
+ "model.layers.0.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
12
+ "model.layers.0.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
13
+ "model.layers.0.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
14
+ "model.layers.0.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
15
+ "model.layers.0.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
16
+ "model.layers.0.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
17
+ "model.layers.0.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
18
+ "model.layers.0.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
19
+ "model.layers.1.input_layernorm.weight": "model-00001-of-00002.safetensors",
20
+ "model.layers.1.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
21
+ "model.layers.1.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
22
+ "model.layers.1.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
23
+ "model.layers.1.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
24
+ "model.layers.1.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
25
+ "model.layers.1.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
26
+ "model.layers.1.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
27
+ "model.layers.1.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
28
+ "model.layers.1.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
29
+ "model.layers.1.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
30
+ "model.layers.10.input_layernorm.weight": "model-00001-of-00002.safetensors",
31
+ "model.layers.10.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
32
+ "model.layers.10.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
33
+ "model.layers.10.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
34
+ "model.layers.10.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
35
+ "model.layers.10.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
36
+ "model.layers.10.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
37
+ "model.layers.10.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
38
+ "model.layers.10.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
39
+ "model.layers.10.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
40
+ "model.layers.10.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
41
+ "model.layers.11.input_layernorm.weight": "model-00001-of-00002.safetensors",
42
+ "model.layers.11.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
43
+ "model.layers.11.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
44
+ "model.layers.11.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
45
+ "model.layers.11.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
46
+ "model.layers.11.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
47
+ "model.layers.11.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
48
+ "model.layers.11.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
49
+ "model.layers.11.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
50
+ "model.layers.11.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
51
+ "model.layers.11.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
52
+ "model.layers.12.input_layernorm.weight": "model-00001-of-00002.safetensors",
53
+ "model.layers.12.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
54
+ "model.layers.12.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
55
+ "model.layers.12.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
56
+ "model.layers.12.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
57
+ "model.layers.12.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
58
+ "model.layers.12.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
59
+ "model.layers.12.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
60
+ "model.layers.12.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
61
+ "model.layers.12.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
62
+ "model.layers.12.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
63
+ "model.layers.13.input_layernorm.weight": "model-00001-of-00002.safetensors",
64
+ "model.layers.13.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
65
+ "model.layers.13.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
66
+ "model.layers.13.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
67
+ "model.layers.13.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
68
+ "model.layers.13.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
69
+ "model.layers.13.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
70
+ "model.layers.13.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
71
+ "model.layers.13.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
72
+ "model.layers.13.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
73
+ "model.layers.13.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
74
+ "model.layers.14.input_layernorm.weight": "model-00001-of-00002.safetensors",
75
+ "model.layers.14.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
76
+ "model.layers.14.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
77
+ "model.layers.14.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
78
+ "model.layers.14.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
79
+ "model.layers.14.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
80
+ "model.layers.14.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
81
+ "model.layers.14.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
82
+ "model.layers.14.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
83
+ "model.layers.14.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
84
+ "model.layers.14.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
85
+ "model.layers.15.input_layernorm.weight": "model-00001-of-00002.safetensors",
86
+ "model.layers.15.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
87
+ "model.layers.15.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
88
+ "model.layers.15.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
89
+ "model.layers.15.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
90
+ "model.layers.15.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
91
+ "model.layers.15.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
92
+ "model.layers.15.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
93
+ "model.layers.15.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
94
+ "model.layers.15.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
95
+ "model.layers.15.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
96
+ "model.layers.16.input_layernorm.weight": "model-00001-of-00002.safetensors",
97
+ "model.layers.16.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
98
+ "model.layers.16.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
99
+ "model.layers.16.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
100
+ "model.layers.16.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
101
+ "model.layers.16.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
102
+ "model.layers.16.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
103
+ "model.layers.16.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
104
+ "model.layers.16.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
105
+ "model.layers.16.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
106
+ "model.layers.16.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
107
+ "model.layers.17.input_layernorm.weight": "model-00001-of-00002.safetensors",
108
+ "model.layers.17.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
109
+ "model.layers.17.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
110
+ "model.layers.17.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
111
+ "model.layers.17.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
112
+ "model.layers.17.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
113
+ "model.layers.17.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
114
+ "model.layers.17.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
115
+ "model.layers.17.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
116
+ "model.layers.17.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
117
+ "model.layers.17.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
118
+ "model.layers.18.input_layernorm.weight": "model-00001-of-00002.safetensors",
119
+ "model.layers.18.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
120
+ "model.layers.18.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
121
+ "model.layers.18.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
122
+ "model.layers.18.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
123
+ "model.layers.18.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
124
+ "model.layers.18.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
125
+ "model.layers.18.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
126
+ "model.layers.18.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
127
+ "model.layers.18.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
128
+ "model.layers.18.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
129
+ "model.layers.19.input_layernorm.weight": "model-00001-of-00002.safetensors",
130
+ "model.layers.19.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
131
+ "model.layers.19.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
132
+ "model.layers.19.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
133
+ "model.layers.19.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
134
+ "model.layers.19.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
135
+ "model.layers.19.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
136
+ "model.layers.19.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
137
+ "model.layers.19.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
138
+ "model.layers.19.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
139
+ "model.layers.19.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
140
+ "model.layers.2.input_layernorm.weight": "model-00001-of-00002.safetensors",
141
+ "model.layers.2.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
142
+ "model.layers.2.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
143
+ "model.layers.2.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
144
+ "model.layers.2.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
145
+ "model.layers.2.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
146
+ "model.layers.2.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
147
+ "model.layers.2.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
148
+ "model.layers.2.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
149
+ "model.layers.2.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
150
+ "model.layers.2.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
151
+ "model.layers.20.input_layernorm.weight": "model-00002-of-00002.safetensors",
152
+ "model.layers.20.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
153
+ "model.layers.20.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
154
+ "model.layers.20.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
155
+ "model.layers.20.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
156
+ "model.layers.20.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
157
+ "model.layers.20.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
158
+ "model.layers.20.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
159
+ "model.layers.20.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
160
+ "model.layers.20.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
161
+ "model.layers.20.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
162
+ "model.layers.21.input_layernorm.weight": "model-00002-of-00002.safetensors",
163
+ "model.layers.21.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
164
+ "model.layers.21.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
165
+ "model.layers.21.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
166
+ "model.layers.21.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
167
+ "model.layers.21.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
168
+ "model.layers.21.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
169
+ "model.layers.21.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
170
+ "model.layers.21.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
171
+ "model.layers.21.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
172
+ "model.layers.21.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
173
+ "model.layers.22.input_layernorm.weight": "model-00002-of-00002.safetensors",
174
+ "model.layers.22.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
175
+ "model.layers.22.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
176
+ "model.layers.22.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
177
+ "model.layers.22.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
178
+ "model.layers.22.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
179
+ "model.layers.22.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
180
+ "model.layers.22.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
181
+ "model.layers.22.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
182
+ "model.layers.22.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
183
+ "model.layers.22.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
184
+ "model.layers.23.input_layernorm.weight": "model-00002-of-00002.safetensors",
185
+ "model.layers.23.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
186
+ "model.layers.23.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
187
+ "model.layers.23.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
188
+ "model.layers.23.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
189
+ "model.layers.23.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
190
+ "model.layers.23.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
191
+ "model.layers.23.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
192
+ "model.layers.23.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
193
+ "model.layers.23.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
194
+ "model.layers.23.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
195
+ "model.layers.24.input_layernorm.weight": "model-00002-of-00002.safetensors",
196
+ "model.layers.24.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
197
+ "model.layers.24.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
198
+ "model.layers.24.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
199
+ "model.layers.24.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
200
+ "model.layers.24.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
201
+ "model.layers.24.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
202
+ "model.layers.24.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
203
+ "model.layers.24.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
204
+ "model.layers.24.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
205
+ "model.layers.24.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
206
+ "model.layers.25.input_layernorm.weight": "model-00002-of-00002.safetensors",
207
+ "model.layers.25.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
208
+ "model.layers.25.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
209
+ "model.layers.25.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
210
+ "model.layers.25.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
211
+ "model.layers.25.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
212
+ "model.layers.25.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
213
+ "model.layers.25.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
214
+ "model.layers.25.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
215
+ "model.layers.25.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
216
+ "model.layers.25.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
217
+ "model.layers.26.input_layernorm.weight": "model-00002-of-00002.safetensors",
218
+ "model.layers.26.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
219
+ "model.layers.26.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
220
+ "model.layers.26.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
221
+ "model.layers.26.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
222
+ "model.layers.26.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
223
+ "model.layers.26.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
224
+ "model.layers.26.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
225
+ "model.layers.26.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
226
+ "model.layers.26.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
227
+ "model.layers.26.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
228
+ "model.layers.27.input_layernorm.weight": "model-00002-of-00002.safetensors",
229
+ "model.layers.27.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
230
+ "model.layers.27.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
231
+ "model.layers.27.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
232
+ "model.layers.27.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
233
+ "model.layers.27.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
234
+ "model.layers.27.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
235
+ "model.layers.27.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
236
+ "model.layers.27.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
237
+ "model.layers.27.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
238
+ "model.layers.27.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
239
+ "model.layers.28.input_layernorm.weight": "model-00002-of-00002.safetensors",
240
+ "model.layers.28.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
241
+ "model.layers.28.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
242
+ "model.layers.28.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
243
+ "model.layers.28.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
244
+ "model.layers.28.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
245
+ "model.layers.28.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
246
+ "model.layers.28.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
247
+ "model.layers.28.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
248
+ "model.layers.28.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
249
+ "model.layers.28.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
250
+ "model.layers.29.input_layernorm.weight": "model-00002-of-00002.safetensors",
251
+ "model.layers.29.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
252
+ "model.layers.29.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
253
+ "model.layers.29.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
254
+ "model.layers.29.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
255
+ "model.layers.29.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
256
+ "model.layers.29.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
257
+ "model.layers.29.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
258
+ "model.layers.29.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
259
+ "model.layers.29.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
260
+ "model.layers.29.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
261
+ "model.layers.3.input_layernorm.weight": "model-00001-of-00002.safetensors",
262
+ "model.layers.3.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
263
+ "model.layers.3.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
264
+ "model.layers.3.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
265
+ "model.layers.3.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
266
+ "model.layers.3.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
267
+ "model.layers.3.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
268
+ "model.layers.3.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
269
+ "model.layers.3.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
270
+ "model.layers.3.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
271
+ "model.layers.3.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
272
+ "model.layers.30.input_layernorm.weight": "model-00002-of-00002.safetensors",
273
+ "model.layers.30.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
274
+ "model.layers.30.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
275
+ "model.layers.30.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
276
+ "model.layers.30.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
277
+ "model.layers.30.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
278
+ "model.layers.30.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
279
+ "model.layers.30.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
280
+ "model.layers.30.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
281
+ "model.layers.30.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
282
+ "model.layers.30.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
283
+ "model.layers.31.input_layernorm.weight": "model-00002-of-00002.safetensors",
284
+ "model.layers.31.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
285
+ "model.layers.31.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
286
+ "model.layers.31.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
287
+ "model.layers.31.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
288
+ "model.layers.31.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
289
+ "model.layers.31.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
290
+ "model.layers.31.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
291
+ "model.layers.31.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
292
+ "model.layers.31.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
293
+ "model.layers.31.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
294
+ "model.layers.32.input_layernorm.weight": "model-00002-of-00002.safetensors",
295
+ "model.layers.32.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
296
+ "model.layers.32.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
297
+ "model.layers.32.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
298
+ "model.layers.32.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
299
+ "model.layers.32.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
300
+ "model.layers.32.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
301
+ "model.layers.32.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
302
+ "model.layers.32.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
303
+ "model.layers.32.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
304
+ "model.layers.32.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
305
+ "model.layers.33.input_layernorm.weight": "model-00002-of-00002.safetensors",
306
+ "model.layers.33.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
307
+ "model.layers.33.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
308
+ "model.layers.33.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
309
+ "model.layers.33.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
310
+ "model.layers.33.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
311
+ "model.layers.33.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
312
+ "model.layers.33.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
313
+ "model.layers.33.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
314
+ "model.layers.33.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
315
+ "model.layers.33.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
316
+ "model.layers.34.input_layernorm.weight": "model-00002-of-00002.safetensors",
317
+ "model.layers.34.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
318
+ "model.layers.34.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
319
+ "model.layers.34.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
320
+ "model.layers.34.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
321
+ "model.layers.34.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
322
+ "model.layers.34.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
323
+ "model.layers.34.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
324
+ "model.layers.34.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
325
+ "model.layers.34.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
326
+ "model.layers.34.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
327
+ "model.layers.35.input_layernorm.weight": "model-00002-of-00002.safetensors",
328
+ "model.layers.35.mlp.down_proj.weight": "model-00002-of-00002.safetensors",
329
+ "model.layers.35.mlp.gate_proj.weight": "model-00002-of-00002.safetensors",
330
+ "model.layers.35.mlp.up_proj.weight": "model-00002-of-00002.safetensors",
331
+ "model.layers.35.post_attention_layernorm.weight": "model-00002-of-00002.safetensors",
332
+ "model.layers.35.self_attn.k_norm.weight": "model-00002-of-00002.safetensors",
333
+ "model.layers.35.self_attn.k_proj.weight": "model-00002-of-00002.safetensors",
334
+ "model.layers.35.self_attn.o_proj.weight": "model-00002-of-00002.safetensors",
335
+ "model.layers.35.self_attn.q_norm.weight": "model-00002-of-00002.safetensors",
336
+ "model.layers.35.self_attn.q_proj.weight": "model-00002-of-00002.safetensors",
337
+ "model.layers.35.self_attn.v_proj.weight": "model-00002-of-00002.safetensors",
338
+ "model.layers.4.input_layernorm.weight": "model-00001-of-00002.safetensors",
339
+ "model.layers.4.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
340
+ "model.layers.4.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
341
+ "model.layers.4.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
342
+ "model.layers.4.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
343
+ "model.layers.4.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
344
+ "model.layers.4.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
345
+ "model.layers.4.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
346
+ "model.layers.4.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
347
+ "model.layers.4.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
348
+ "model.layers.4.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
349
+ "model.layers.5.input_layernorm.weight": "model-00001-of-00002.safetensors",
350
+ "model.layers.5.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
351
+ "model.layers.5.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
352
+ "model.layers.5.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
353
+ "model.layers.5.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
354
+ "model.layers.5.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
355
+ "model.layers.5.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
356
+ "model.layers.5.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
357
+ "model.layers.5.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
358
+ "model.layers.5.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
359
+ "model.layers.5.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
360
+ "model.layers.6.input_layernorm.weight": "model-00001-of-00002.safetensors",
361
+ "model.layers.6.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
362
+ "model.layers.6.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
363
+ "model.layers.6.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
364
+ "model.layers.6.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
365
+ "model.layers.6.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
366
+ "model.layers.6.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
367
+ "model.layers.6.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
368
+ "model.layers.6.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
369
+ "model.layers.6.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
370
+ "model.layers.6.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
371
+ "model.layers.7.input_layernorm.weight": "model-00001-of-00002.safetensors",
372
+ "model.layers.7.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
373
+ "model.layers.7.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
374
+ "model.layers.7.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
375
+ "model.layers.7.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
376
+ "model.layers.7.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
377
+ "model.layers.7.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
378
+ "model.layers.7.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
379
+ "model.layers.7.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
380
+ "model.layers.7.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
381
+ "model.layers.7.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
382
+ "model.layers.8.input_layernorm.weight": "model-00001-of-00002.safetensors",
383
+ "model.layers.8.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
384
+ "model.layers.8.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
385
+ "model.layers.8.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
386
+ "model.layers.8.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
387
+ "model.layers.8.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
388
+ "model.layers.8.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
389
+ "model.layers.8.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
390
+ "model.layers.8.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
391
+ "model.layers.8.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
392
+ "model.layers.8.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
393
+ "model.layers.9.input_layernorm.weight": "model-00001-of-00002.safetensors",
394
+ "model.layers.9.mlp.down_proj.weight": "model-00001-of-00002.safetensors",
395
+ "model.layers.9.mlp.gate_proj.weight": "model-00001-of-00002.safetensors",
396
+ "model.layers.9.mlp.up_proj.weight": "model-00001-of-00002.safetensors",
397
+ "model.layers.9.post_attention_layernorm.weight": "model-00001-of-00002.safetensors",
398
+ "model.layers.9.self_attn.k_norm.weight": "model-00001-of-00002.safetensors",
399
+ "model.layers.9.self_attn.k_proj.weight": "model-00001-of-00002.safetensors",
400
+ "model.layers.9.self_attn.o_proj.weight": "model-00001-of-00002.safetensors",
401
+ "model.layers.9.self_attn.q_norm.weight": "model-00001-of-00002.safetensors",
402
+ "model.layers.9.self_attn.q_proj.weight": "model-00001-of-00002.safetensors",
403
+ "model.layers.9.self_attn.v_proj.weight": "model-00001-of-00002.safetensors",
404
+ "model.norm.weight": "model-00002-of-00002.safetensors"
405
+ }
406
+ }
modeling_sdar.py ADDED
@@ -0,0 +1,1542 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This file is modified based on https://github.com/huggingface/transformers/blob/v4.52.4/src/transformers/models/qwen3/modeling_qwen3.py.
2
+ #
3
+ # 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
4
+ # This file was automatically generated from src/transformers/models/qwen3/modular_qwen3.py.
5
+ # Do NOT edit this file manually as any edits will be overwritten by the generation of
6
+ # the file from the modular. If any change should be done, please apply the change to the
7
+ # modular_qwen3.py file directly. One of our CI enforces this.
8
+ # 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
9
+ # coding=utf-8
10
+ # Copyright 2025 The Qwen team, Alibaba Group and the HuggingFace Inc. team. All rights reserved.
11
+ #
12
+ # Licensed under the Apache License, Version 2.0 (the "License");
13
+ # you may not use this file except in compliance with the License.
14
+ # You may obtain a copy of the License at
15
+ #
16
+ # http://www.apache.org/licenses/LICENSE-2.0
17
+ #
18
+ # Unless required by applicable law or agreed to in writing, software
19
+ # distributed under the License is distributed on an "AS IS" BASIS,
20
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
21
+ # See the License for the specific language governing permissions and
22
+ # limitations under the License.
23
+
24
+ from typing import Callable, Optional, Tuple, Union, List
25
+
26
+ import torch
27
+ from torch import nn
28
+ from einops import rearrange
29
+
30
+ from transformers.activations import ACT2FN
31
+ from transformers.cache_utils import Cache, DynamicCache, SlidingWindowCache, StaticCache
32
+ from transformers.generation import GenerationMixin
33
+ from transformers.integrations import use_kernel_forward_from_hub
34
+ from transformers.modeling_attn_mask_utils import AttentionMaskConverter
35
+ from transformers.modeling_flash_attention_utils import FlashAttentionKwargs
36
+ from transformers.modeling_layers import GradientCheckpointingLayer
37
+ from transformers.modeling_outputs import (
38
+ BaseModelOutputWithPast,
39
+ CausalLMOutputWithPast,
40
+ QuestionAnsweringModelOutput,
41
+ SequenceClassifierOutputWithPast,
42
+ TokenClassifierOutput,
43
+ )
44
+ from transformers.modeling_rope_utils import ROPE_INIT_FUNCTIONS, dynamic_rope_update
45
+ from transformers.modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
46
+ from transformers.processing_utils import Unpack
47
+ from transformers.utils import LossKwargs, auto_docstring, can_return_tuple, is_torch_flex_attn_available, logging
48
+ from .configuration_sdar import SDARConfig
49
+ from .fused_linear_diffusion_cross_entropy import FusedLinearDiffusionCrossEntropyLoss
50
+
51
+ from flash_attn.ops.triton.layer_norm import rms_norm_fn as flash_rms_norm
52
+
53
+ import torch.nn.functional as F
54
+ try:
55
+ from flash_attn import flash_attn_func, flash_attn_varlen_func
56
+ from flash_attn.bert_padding import index_first_axis, pad_input, unpad_input
57
+ except:
58
+ pass
59
+
60
+ try:
61
+ from liger_kernel.ops.swiglu import LigerSiLUMulFunction # noqa: F401
62
+ liger_kernel_is_available = True
63
+ except ImportError:
64
+ liger_kernel_is_available = False
65
+
66
+
67
+ if is_torch_flex_attn_available():
68
+ from torch.nn.attention.flex_attention import BlockMask, create_block_mask, flex_attention
69
+ from transformers.integrations.flex_attention import make_flex_block_causal_mask
70
+
71
+
72
+ logger = logging.get_logger(__name__)
73
+
74
+
75
+ def modify_padded_position_ids_2d(position_ids: torch.LongTensor) -> torch.LongTensor:
76
+ """
77
+ 使用完全向量化的 PyTorch 操作修改一个 batch 的 packed position_ids。
78
+ 这个函数假设输入是一个 2D Tensor,形状为 (batch_size, sequence_length)。
79
+ 它会独立地处理 batch 中的每一行。
80
+
81
+ Args:
82
+ position_ids: 二维 PyTorch Tensor, shape (batch_size, sequence_length).
83
+
84
+ Returns:
85
+ 修改后的 position_ids Tensor, shape (batch_size, sequence_length).
86
+ """
87
+ if position_ids.dim() != 2:
88
+ raise ValueError(f"Input tensor must be 2D, but got {position_ids.dim()} dimensions.")
89
+
90
+ batch_size, seq_len = position_ids.shape
91
+ device = position_ids.device
92
+
93
+ col_indices = torch.arange(seq_len, device=device, dtype=position_ids.dtype).expand(batch_size, -1)
94
+ mask = (position_ids != 0)
95
+
96
+ masked_indices = col_indices * mask
97
+ last_nonzero_idx = torch.max(masked_indices, dim=1).values
98
+ has_nonzero = torch.any(mask, dim=1)
99
+ pad_start_idx = torch.where(has_nonzero, last_nonzero_idx + 1, torch.tensor(0, device=device, dtype=position_ids.dtype))
100
+
101
+ padding_mask = col_indices >= pad_start_idx.unsqueeze(1)
102
+ new_pad_values = col_indices - pad_start_idx.unsqueeze(1)
103
+ position_ids = torch.where(padding_mask, new_pad_values, position_ids)
104
+
105
+ return position_ids
106
+
107
+
108
+ def calculate_token_nums(position_ids: torch.Tensor):
109
+ """
110
+ 使用 PyTorch 高效计算一个批次中每个打包序列的长度。
111
+
112
+ Args:
113
+ position_ids (torch.Tensor): 一个 2D Tensor,形状为 (batch_size, sequence_length)。
114
+ 例如:tensor([[0,1,2,3,4,0,1,2,3,4,5,0,1,2,3,0,0,0]])
115
+ Returns:
116
+ list[list[int]]: 一个嵌套列表,包含每个批次项中各个序列的长度。
117
+ 例如:[[5, 6, 4, 1, 1, 1]]
118
+ """
119
+ # 检查输入是否为 2D Tensor
120
+ if position_ids.dim() != 2:
121
+ raise ValueError(f"输入必须是 2D Tensor,但得到了 {position_ids.dim()}D")
122
+
123
+ all_lengths = []
124
+
125
+ # 我们按批次逐行处理。因为每行的序列长度数量不同(ragged),
126
+ # 所以 Python 循环在批次维度上是最高效且最清晰的写法。
127
+ # 循环内部的操作是完全向量化的。
128
+ for pids_row in position_ids:
129
+ # 获取当前行的总长度
130
+ seq_len = pids_row.shape[0]
131
+
132
+ # 1. 找到所有值为 0 的元素的索引
133
+ # pids_row == 0 会返回一个布尔 Tensor: [True, False, ..., True, ...]
134
+ # torch.nonzero 会返回这些 True 值的索引
135
+ # .flatten() 将其从 (N, 1) 形状的 Tensor 变为 (N,) 形状
136
+ zero_indices = torch.nonzero(pids_row == 0).flatten()
137
+
138
+ # 2. 将序列的总长度作为一个额外的切分点添加到末尾
139
+ # 这对于计算最后一个序列的长度至关重要
140
+ # 注意:要确保新创建的 tensor 和原始 tensor 在同一个设备上 (cpu/cuda)
141
+ split_points = torch.cat([
142
+ zero_indices,
143
+ torch.tensor([seq_len], device=pids_row.device, dtype=zero_indices.dtype)
144
+ ])
145
+
146
+ # 3. 计算相邻切分点之间的差值,这就是我们想要的长度
147
+ # torch.diff([a, b, c, d]) 会返回 [b-a, c-b, d-c]
148
+ lengths = torch.diff(split_points)
149
+
150
+ all_lengths.append(lengths)
151
+
152
+ return all_lengths
153
+
154
+
155
+ def forward_add_noise_packed(
156
+ inputs_ids: torch.Tensor,
157
+ num_tokens_list: List[torch.Tensor],
158
+ prompt_mask: torch.Tensor,
159
+ mask_id: int,
160
+ eps: float = 1e-3,
161
+ max_tries: int = 10,
162
+ ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
163
+ """
164
+ 为一批打包(packed)序列的 token ID 添加噪声。
165
+
166
+ 此函数保留了为每个逻辑样本(在每个批次项内拼接)生成独立随机噪声率的逻辑。
167
+ 它会随机将一部分 token 的 ID 替换为 mask_id。
168
+ 这个过程会避开被 prompt_mask 标记的位置。
169
+
170
+ Args:
171
+ inputs_ids (torch.Tensor):
172
+ 输入的 token ID 张量,形状为 (bsz, total_tokens)。
173
+ num_tokens_list (List[torch.Tensor]):
174
+ 一个张量列表,长度为 bsz。列表中的每个张量记录了对应批次项中
175
+ 每个逻辑样本的长度。例如: [tensor([len1, len2]), tensor([len3, len4, len5])].
176
+ prompt_mask (torch.Tensor):
177
+ 布尔型张量,形状为 (bsz, total_tokens),值为 True 的位置表示是 prompt,
178
+ 不应添加噪声。
179
+ mask_id (int):
180
+ 用于替换的 mask token 的 ID。
181
+ eps (float):
182
+ 微小值,用于防止噪声率 t 恰好为 0,确保 p_mask > 0。
183
+ max_tries (int):
184
+ 为确保至少一个非 prompt token 被 mask,对每个批次项尝试的最大次数。
185
+
186
+ Returns:
187
+ Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
188
+ - noisy_input_ids (torch.Tensor):
189
+ 添加噪声后的 token ID 张量,形状为 (bsz, total_tokens)。
190
+ - final_masked_indices (torch.Tensor):
191
+ 布尔型张量,标记了哪些位置被实际 mask 了,形状为 (bsz, total_tokens)。
192
+ - p_masks (torch.Tensor):
193
+ 一个一维张量,包含了被 mask 的 token 对应的实际噪声率。
194
+ """
195
+ # 1. 验证和获取形状
196
+ bsz, total_tokens = inputs_ids.shape
197
+ device = inputs_ids.device
198
+
199
+ # 检查输入的一致性
200
+ assert len(num_tokens_list) == bsz, f"num_tokens_list 的长度 ({len(num_tokens_list)}) 必须等于 bsz ({bsz})"
201
+ assert prompt_mask.shape == (bsz, total_tokens), f"prompt_mask 形状不匹配, 期望 {(bsz, total_tokens)}, 得到 {prompt_mask.shape}"
202
+
203
+ # 准备结果容器
204
+ noisy_ids_list = []
205
+ final_masked_indices_list = []
206
+ p_masks_per_token_list = []
207
+
208
+ # 2. 在批次维度上迭代
209
+ # 这是处理不同打包结构最直接有效的方法
210
+ for i in range(bsz):
211
+ # 提取当前批次项的数据
212
+ current_ids = inputs_ids[i:i+1] # shape: (1, total_tokens)
213
+ current_num_tokens = num_tokens_list[i]
214
+ current_prompt_mask = prompt_mask[i:i+1] # shape: (1, total_tokens)
215
+
216
+ num_samples_in_item = len(current_num_tokens)
217
+ # 验证当前批次项的 token 总数是否匹配
218
+ assert total_tokens == torch.sum(current_num_tokens), \
219
+ f"批次项 {i} 的 num_tokens 之和 ({torch.sum(current_num_tokens)}) 与 total_tokens ({total_tokens}) 不匹配"
220
+
221
+ eligible_for_masking = ~current_prompt_mask
222
+
223
+ # 如果没有任何 token 可以被 mask,直接使用原始输入,并设置 p_mask 为 eps
224
+ if not eligible_for_masking.any():
225
+ noisy_ids_list.append(current_ids)
226
+ final_masked_indices_list.append(torch.zeros_like(current_prompt_mask, dtype=torch.bool))
227
+ # p_mask_per_token 的形状应为 (1, total_tokens) 以便后续拼接
228
+ p_masks_per_token_list.append(torch.full((1, total_tokens), eps, device=device, dtype=torch.float))
229
+ continue
230
+
231
+ # --- 尝试生成 mask,确保至少 mask 一个 token ---
232
+ final_masked_indices_item = torch.zeros_like(current_prompt_mask, dtype=torch.bool)
233
+ p_mask_per_token = None
234
+
235
+ for _ in range(max_tries):
236
+ # 为每个逻辑样本生成一个独立的噪声率 t
237
+ t = torch.rand(num_samples_in_item, device=device)
238
+ p_mask_per_sample = (1 - eps) * t + eps
239
+
240
+ # 将每个样本的噪声率扩展到其所有 token 上
241
+ p_mask_per_token_1d = torch.repeat_interleave(p_mask_per_sample, current_num_tokens)
242
+ p_mask_per_token = p_mask_per_token_1d.unsqueeze(0) # shape: (1, total_tokens)
243
+
244
+ # 根据噪声率生成随机 mask
245
+ masked_indices = torch.rand_like(p_mask_per_token) < p_mask_per_token
246
+ # 应用 prompt mask,确保 prompt 不被 mask
247
+ final_masked_indices_item = masked_indices & eligible_for_masking
248
+
249
+ # 如果成功 mask 了至少一个 token,则跳出尝试循环
250
+ if final_masked_indices_item.any():
251
+ break
252
+
253
+ # 如果 max_tries 之后仍然没有 mask 任何 token (极小概率),就强制 mask 一个可 mask 的 token
254
+ if not final_masked_indices_item.any():
255
+ eligible_indices = torch.nonzero(eligible_for_masking.squeeze(0), as_tuple=True)[0]
256
+ if len(eligible_indices) > 0:
257
+ # 随机选择一个可 mask 的位置
258
+ random_choice = torch.randint(0, len(eligible_indices), (1,)).item()
259
+ force_mask_idx = eligible_indices[random_choice]
260
+ final_masked_indices_item[0, force_mask_idx] = True
261
+
262
+
263
+ # --- 根据最终的 mask 生成带噪声的 IDs ---
264
+ noisy_ids_item = torch.where(
265
+ final_masked_indices_item,
266
+ mask_id,
267
+ current_ids
268
+ )
269
+
270
+ # 保存这个批次项的结果
271
+ noisy_ids_list.append(noisy_ids_item)
272
+ final_masked_indices_list.append(final_masked_indices_item)
273
+ p_masks_per_token_list.append(p_mask_per_token)
274
+
275
+ # 3. 将列表中的结果堆叠成最终的批处理张量
276
+ noisy_input_ids = torch.cat(noisy_ids_list, dim=0)
277
+ final_masked_indices = torch.cat(final_masked_indices_list, dim=0)
278
+ p_mask_full = torch.cat(p_masks_per_token_list, dim=0)
279
+
280
+ # 4. 提取被 mask 位置对应的噪声率
281
+ p_masks = p_mask_full[final_masked_indices]
282
+
283
+ return noisy_input_ids, final_masked_indices, p_masks
284
+
285
+
286
+ def block_diff_mask(b, h, q_idx, kv_idx, block_size=None, n=None):
287
+ """
288
+ Constructs the specialized block diffusion attention mask for training
289
+ composed of three masks:
290
+ - **Block Diagonal Mask (M_BD)**: Self-attention within noised blocks
291
+ - **Offset Block Causal Mask (M_OBC)**: Cross-attention for conditional context
292
+ - **Block Causal Mask (M_BC)**: Attention to update x0
293
+
294
+ Args:
295
+ b, h: Batch and head indices (ignored for mask logic).
296
+ q_idx, kv_idx: Query and Key indices.
297
+ seq_len: Total sequence length.
298
+ block_size: Defines the block structure.
299
+
300
+ Returns:
301
+ A boolean attention mask.
302
+ """
303
+
304
+ # Indicate whether token belongs to xt or x0
305
+ x0_flag_q = q_idx >= n
306
+ x0_flag_kv = kv_idx >= n
307
+
308
+ # Compute block indices
309
+ block_q = torch.where(
310
+ x0_flag_q == 1, (q_idx - n) // block_size, q_idx // block_size
311
+ )
312
+ block_kv = torch.where(
313
+ x0_flag_kv == 1, (kv_idx - n) // block_size, kv_idx // block_size
314
+ )
315
+
316
+ # **1. Block Diagonal Mask (M_BD) **
317
+ block_diagonal = (block_q == block_kv) & (x0_flag_q == x0_flag_kv)
318
+
319
+ # **2. Offset Block-Causal Mask (M_OBC) **
320
+ offset_block_causal = (block_q > block_kv) & (
321
+ x0_flag_kv == 1) & (x0_flag_q == 0)
322
+
323
+ # **3. Block-Causal Mask (M_BC) **
324
+ block_causal = (block_q >= block_kv) & (x0_flag_kv == 1) & (x0_flag_q == 1)
325
+
326
+ # **4. Combine Masks **
327
+ return block_diagonal | offset_block_causal | block_causal
328
+
329
+
330
+ def block_attn_mask(num_tokens, block_size, device):
331
+ masks = []
332
+ for i in range(len(num_tokens)):
333
+ cur_masks = []
334
+ for num in num_tokens[i]:
335
+ # 全部返回 n*n 而非 2n*2n
336
+ single_mask = block_diff_mask(
337
+ b=None,
338
+ h=None,
339
+ q_idx=torch.arange(num * 2, device=device)[:, None],
340
+ kv_idx=torch.arange(num * 2, device=device)[None, :],
341
+ block_size=block_size,
342
+ n=num,
343
+ )
344
+ cur_masks.append(single_mask)
345
+ masks.append(torch.block_diag(*cur_masks))
346
+ masks = torch.stack(masks, dim=0)
347
+ return masks
348
+
349
+
350
+ def create_causal_mask_from_labels(token_labels: torch.LongTensor, block_size: int) -> torch.Tensor:
351
+ """
352
+ Build a causal mask from token_labels for token-label SFT.
353
+
354
+ token_labels shape: (batch_size, seq_len)
355
+ - 0: prompt
356
+ - 1..block_size: clean block labels (generation steps)
357
+ - block_size + 1: mask block labels
358
+ - -1: padding
359
+ """
360
+ if token_labels.dim() != 2:
361
+ raise ValueError(f"`token_labels` must be 2D, got shape {tuple(token_labels.shape)}.")
362
+
363
+ bsz, _ = token_labels.shape
364
+ device = token_labels.device
365
+
366
+ is_prompt = token_labels == 0
367
+ is_data = (token_labels > 0) & (token_labels <= block_size)
368
+ is_mask = token_labels == (block_size + 1)
369
+ is_pad = token_labels == -1
370
+
371
+ time_steps = token_labels.clone().float()
372
+ for b in range(bsz):
373
+ data_vals = time_steps[b, is_data[b]]
374
+ mask_indices = torch.nonzero(is_mask[b], as_tuple=True)[0]
375
+ if mask_indices.numel() == 0:
376
+ continue
377
+ if mask_indices.numel() == data_vals.numel():
378
+ time_steps[b, mask_indices] = data_vals
379
+ else:
380
+ min_len = min(mask_indices.numel(), data_vals.numel())
381
+ time_steps[b, mask_indices[:min_len]] = data_vals[:min_len]
382
+
383
+ time_steps[is_pad] = float("inf")
384
+
385
+ type_i = torch.zeros_like(token_labels) # 1=data, 2=mask
386
+ type_i[is_data] = 1
387
+ type_i[is_mask] = 2
388
+ type_i = type_i.unsqueeze(1).unsqueeze(2) # (B, 1, L, 1)
389
+ type_j = type_i.view(bsz, 1, 1, -1) # (B, 1, 1, L)
390
+
391
+ time_i = time_steps.unsqueeze(1).unsqueeze(2)
392
+ time_j = time_steps.unsqueeze(1).unsqueeze(1)
393
+
394
+ is_prompt_j = is_prompt.view(bsz, 1, 1, -1)
395
+ is_pad_i = is_pad.view(bsz, 1, -1, 1)
396
+ is_pad_j = is_pad.view(bsz, 1, 1, -1)
397
+
398
+ mask_prompt = is_prompt_j
399
+ mask_data_data = (type_i == 1) & (type_j == 1) & (time_j <= time_i)
400
+ mask_data_mask = (type_i == 1) & (type_j == 2) & (time_j > time_i)
401
+ mask_mask_data = (type_i == 2) & (type_j == 1) & (time_j < time_i)
402
+ mask_mask_mask = (type_i == 2) & (type_j == 2) & (time_j >= time_i)
403
+ mask_prompt_internal = (token_labels.unsqueeze(1).unsqueeze(2) == 0) & is_prompt_j
404
+
405
+ final_mask = (
406
+ mask_prompt
407
+ | mask_data_data
408
+ | mask_data_mask
409
+ | mask_mask_data
410
+ | mask_mask_mask
411
+ | mask_prompt_internal
412
+ )
413
+ final_mask = final_mask & (~is_pad_i) & (~is_pad_j)
414
+ return final_mask.squeeze(1).to(dtype=torch.bool, device=device)
415
+
416
+
417
+ def create_multi_block_causal_mask(
418
+ token_labels: torch.LongTensor,
419
+ block_ids: torch.LongTensor,
420
+ block_size: int,
421
+ block_causal_prompt: bool = True,
422
+ ) -> torch.Tensor:
423
+ """
424
+ Generate attention mask for multi-block causal mask training.
425
+
426
+ Args:
427
+ token_labels: (B, L) — 0=prompt, 1..block_size=data step, block_size+1=mask, -1=pad
428
+ block_ids: (B, L) — -1=prompt/pad, 0,1,2,...=block index
429
+ block_size: denoising steps per block
430
+ block_causal_prompt:
431
+ True: prompt block-level causal (SDAR-style), prompt 不看 data/mask
432
+ False: 原版, prompt 看所有 prompt + mask, 所有人看所有 prompt
433
+
434
+ Returns:
435
+ attn_mask: (B, L, L) bool tensor (squeezed from (B,1,L,L)), True = visible
436
+ """
437
+ B, L = token_labels.shape
438
+ device = token_labels.device
439
+
440
+ is_prompt = (token_labels == 0)
441
+ is_data = (token_labels > 0) & (token_labels <= block_size)
442
+ is_mask = (token_labels == (block_size + 1))
443
+ is_pad = (token_labels == -1)
444
+
445
+ time_steps = token_labels.clone().float()
446
+ time_steps[is_pad] = float("inf")
447
+ time_steps[is_prompt] = 0
448
+
449
+ for b in range(B):
450
+ blk_vals = block_ids[b][block_ids[b] >= 0].unique()
451
+ for blk in blk_vals:
452
+ blk_mask = (block_ids[b] == blk)
453
+ data_in_blk = blk_mask & is_data[b]
454
+ mask_in_blk = blk_mask & is_mask[b]
455
+ data_steps = time_steps[b, data_in_blk]
456
+ mask_indices = torch.nonzero(mask_in_blk, as_tuple=True)[0]
457
+ n_data = data_steps.shape[0]
458
+ n_mask = mask_indices.shape[0]
459
+ if n_mask > 0 and n_data > 0:
460
+ min_len = min(n_data, n_mask)
461
+ time_steps[b, mask_indices[:min_len]] = data_steps[:min_len]
462
+
463
+ type_vals = torch.zeros_like(token_labels)
464
+ type_vals[is_data] = 1
465
+ type_vals[is_mask] = 2
466
+
467
+ type_i = type_vals[:, None, :, None]
468
+ type_j = type_vals[:, None, None, :]
469
+ time_i = time_steps[:, None, :, None]
470
+ time_j = time_steps[:, None, None, :]
471
+ blkid_i = block_ids[:, None, :, None].float()
472
+ blkid_j = block_ids[:, None, None, :].float()
473
+
474
+ is_prompt_i = is_prompt.view(B, 1, L, 1)
475
+ is_prompt_j = is_prompt.view(B, 1, 1, L)
476
+ is_pad_i = is_pad.view(B, 1, L, 1)
477
+ is_pad_j = is_pad.view(B, 1, 1, L)
478
+
479
+ if block_causal_prompt:
480
+ # 新版: prompt block-level causal (SDAR-style)
481
+ # prompt 按 block_size 分块, 每个 block 只看前面的 block, 不看 data/mask
482
+ prompt_cumpos = is_prompt.long().cumsum(dim=1) - 1
483
+ prompt_blk = prompt_cumpos // block_size
484
+ prompt_blk_i = prompt_blk[:, None, :, None]
485
+ prompt_blk_j = prompt_blk[:, None, None, :]
486
+ rule_prompt = is_prompt_i & is_prompt_j & (prompt_blk_j <= prompt_blk_i)
487
+ rule_see_prompt = (~is_prompt_i) & (~is_pad_i) & is_prompt_j
488
+ else:
489
+ # 原版: prompt 看所有 prompt + mask, 所有人看所有 prompt
490
+ rule_prompt = is_prompt_i & is_prompt_j
491
+ is_mask_j = is_mask.view(B, 1, 1, L)
492
+ rule_prompt = rule_prompt | (is_prompt_i & is_mask_j)
493
+ rule_see_prompt = is_prompt_j.expand(B, 1, L, L)
494
+
495
+ same_block = (blkid_i == blkid_j) & (blkid_i >= 0)
496
+ intra_dd = same_block & (type_i == 1) & (type_j == 1) & (time_j <= time_i)
497
+ intra_dm = same_block & (type_i == 1) & (type_j == 2) & (time_j > time_i)
498
+ intra_md = same_block & (type_i == 2) & (type_j == 1) & (time_j < time_i)
499
+ intra_mm = same_block & (type_i == 2) & (type_j == 2) & (time_j >= time_i)
500
+
501
+ cross_block_data = (blkid_i > blkid_j) & (blkid_j >= 0) & (type_j == 1)
502
+
503
+ final_mask = (
504
+ rule_prompt
505
+ | rule_see_prompt
506
+ | intra_dd | intra_dm | intra_md | intra_mm
507
+ | cross_block_data
508
+ )
509
+ final_mask = final_mask & (~is_pad_i) & (~is_pad_j)
510
+ return final_mask.squeeze(1).to(dtype=torch.bool, device=device)
511
+
512
+
513
+ @torch.compile(fullgraph=True, mode="max-autotune-no-cudagraphs")
514
+ def fused_flex_attention(query, key, value, attention_mask, **kwargs):
515
+ # Force small backward-kernel tiles so the Triton kernel fits in the
516
+ # 99KB / 101376 B opt-in shared-memory budget on sm_120 (RTX PRO 6000
517
+ # Blackwell). Default autotuner picks tiles that each require >100KB
518
+ # SRAM, so every backward config gets rejected.
519
+ opts = kwargs.pop("kernel_options", None) or {}
520
+ # Forward kernel: the surviving autotune winner uses these tile sizes; set them
521
+ # explicitly to skip the noisy ~24 rejected configs on every first run.
522
+ opts.setdefault("fwd_BLOCK_M", 128)
523
+ opts.setdefault("fwd_BLOCK_N", 64)
524
+ opts.setdefault("fwd_num_warps", 8)
525
+ opts.setdefault("fwd_num_stages", 1)
526
+ # Backward kernel: smaller tiles so dK/dV and dQ passes fit in 99KB SMEM.
527
+ opts.setdefault("bwd_BLOCK_M1", 32)
528
+ opts.setdefault("bwd_BLOCK_N1", 64)
529
+ opts.setdefault("bwd_BLOCK_M2", 64)
530
+ opts.setdefault("bwd_BLOCK_N2", 32)
531
+ opts.setdefault("bwd_num_warps", 4)
532
+ opts.setdefault("bwd_num_stages", 1)
533
+ return flex_attention(
534
+ query, key, value,
535
+ block_mask=attention_mask,
536
+ kernel_options=opts,
537
+ **kwargs,
538
+ )
539
+
540
+
541
+ @use_kernel_forward_from_hub("RMSNorm")
542
+ class SDARRMSNorm(nn.Module):
543
+ def __init__(self, hidden_size, eps=1e-6):
544
+ """
545
+ SDARRMSNorm is equivalent to T5LayerNorm
546
+ """
547
+ super().__init__()
548
+ self.weight = nn.Parameter(torch.ones(hidden_size))
549
+ self.variance_epsilon = eps
550
+
551
+ def forward(self, hidden_states):
552
+ return flash_rms_norm(
553
+ hidden_states, weight=self.weight, bias=None, eps=self.variance_epsilon)
554
+ '''
555
+ input_dtype = hidden_states.dtype
556
+ hidden_states = hidden_states.to(torch.float32)
557
+ variance = hidden_states.pow(2).mean(-1, keepdim=True)
558
+ hidden_states = hidden_states * \
559
+ torch.rsqrt(variance + self.variance_epsilon)
560
+ return self.weight * hidden_states.to(input_dtype)
561
+ '''
562
+
563
+ def extra_repr(self):
564
+ return f"{tuple(self.weight.shape)}, eps={self.variance_epsilon}"
565
+
566
+
567
+ class SDARMLP(nn.Module):
568
+ def __init__(self, config):
569
+ super().__init__()
570
+ self.config = config
571
+ self.hidden_size = config.hidden_size
572
+ self.intermediate_size = config.intermediate_size
573
+ self.gate_proj = nn.Linear(
574
+ self.hidden_size, self.intermediate_size, bias=False)
575
+ self.up_proj = nn.Linear(
576
+ self.hidden_size, self.intermediate_size, bias=False)
577
+ self.down_proj = nn.Linear(
578
+ self.intermediate_size, self.hidden_size, bias=False)
579
+ self.act_fn = ACT2FN[config.hidden_act]
580
+
581
+ def forward(self, x):
582
+ if liger_kernel_is_available:
583
+ return self.down_proj(LigerSiLUMulFunction.apply(self.gate_proj(x), self.up_proj(x)))
584
+ else:
585
+ down_proj = self.down_proj(self.act_fn(
586
+ self.gate_proj(x)) * self.up_proj(x))
587
+ return down_proj
588
+
589
+
590
+ def rotate_half(x):
591
+ """Rotates half the hidden dims of the input."""
592
+ x1 = x[..., : x.shape[-1] // 2]
593
+ x2 = x[..., x.shape[-1] // 2:]
594
+ return torch.cat((-x2, x1), dim=-1)
595
+
596
+
597
+ def apply_rotary_pos_emb(q, k, cos, sin, position_ids=None, unsqueeze_dim=1):
598
+ """Applies Rotary Position Embedding to the query and key tensors.
599
+
600
+ Args:
601
+ q (`torch.Tensor`): The query tensor.
602
+ k (`torch.Tensor`): The key tensor.
603
+ cos (`torch.Tensor`): The cosine part of the rotary embedding.
604
+ sin (`torch.Tensor`): The sine part of the rotary embedding.
605
+ position_ids (`torch.Tensor`, *optional*):
606
+ Deprecated and unused.
607
+ unsqueeze_dim (`int`, *optional*, defaults to 1):
608
+ The 'unsqueeze_dim' argument specifies the dimension along which to unsqueeze cos[position_ids] and
609
+ sin[position_ids] so that they can be properly broadcasted to the dimensions of q and k. For example, note
610
+ that cos[position_ids] and sin[position_ids] have the shape [batch_size, seq_len, head_dim]. Then, if q and
611
+ k have the shape [batch_size, heads, seq_len, head_dim], then setting unsqueeze_dim=1 makes
612
+ cos[position_ids] and sin[position_ids] broadcastable to the shapes of q and k. Similarly, if q and k have
613
+ the shape [batch_size, seq_len, heads, head_dim], then set unsqueeze_dim=2.
614
+ Returns:
615
+ `tuple(torch.Tensor)` comprising of the query and key tensors rotated using the Rotary Position Embedding.
616
+ """
617
+ cos = cos.unsqueeze(unsqueeze_dim)
618
+ sin = sin.unsqueeze(unsqueeze_dim)
619
+ q_embed = (q * cos) + (rotate_half(q) * sin)
620
+ k_embed = (k * cos) + (rotate_half(k) * sin)
621
+ return q_embed, k_embed
622
+
623
+
624
+ def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
625
+ """
626
+ This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
627
+ num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
628
+ """
629
+ batch, num_key_value_heads, slen, head_dim = hidden_states.shape
630
+ if n_rep == 1:
631
+ return hidden_states
632
+ hidden_states = hidden_states[:, :, None, :, :].expand(
633
+ batch, num_key_value_heads, n_rep, slen, head_dim)
634
+ return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
635
+
636
+
637
+ def eager_attention_forward(
638
+ module: nn.Module,
639
+ query: torch.Tensor,
640
+ key: torch.Tensor,
641
+ value: torch.Tensor,
642
+ attention_mask: Optional[torch.Tensor],
643
+ scaling: float,
644
+ dropout: float = 0.0,
645
+ **kwargs,
646
+ ):
647
+ key_states = repeat_kv(key, module.num_key_value_groups)
648
+ value_states = repeat_kv(value, module.num_key_value_groups)
649
+
650
+ attn_weights = torch.matmul(query, key_states.transpose(2, 3)) * scaling
651
+ if attention_mask is not None:
652
+ causal_mask = attention_mask[:, :, :, : key_states.shape[-2]]
653
+ attn_weights = attn_weights + causal_mask
654
+
655
+ attn_weights = nn.functional.softmax(
656
+ attn_weights, dim=-1, dtype=torch.float32).to(query.dtype)
657
+ attn_weights = nn.functional.dropout(
658
+ attn_weights, p=dropout, training=module.training)
659
+ attn_output = torch.matmul(attn_weights, value_states)
660
+ attn_output = attn_output.transpose(1, 2).contiguous()
661
+
662
+ return attn_output, attn_weights
663
+
664
+
665
+ class SDARAttention(nn.Module):
666
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
667
+
668
+ def __init__(self, config: SDARConfig, layer_idx: int):
669
+ super().__init__()
670
+ self.config = config
671
+ self.layer_idx = layer_idx
672
+ self.head_dim = getattr(
673
+ config, "head_dim", config.hidden_size // config.num_attention_heads)
674
+ self.num_key_value_groups = config.num_attention_heads // config.num_key_value_heads
675
+ self.scaling = self.head_dim**-0.5
676
+ self.attention_dropout = config.attention_dropout
677
+ self.is_causal = True
678
+
679
+ self.hidden_size = config.hidden_size
680
+ self.num_attention_heads = config.num_attention_heads
681
+ self.num_key_value_heads = config.num_key_value_heads
682
+
683
+ self.q_proj = nn.Linear(
684
+ config.hidden_size, config.num_attention_heads * self.head_dim, bias=config.attention_bias
685
+ )
686
+ self.k_proj = nn.Linear(
687
+ config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias
688
+ )
689
+ self.v_proj = nn.Linear(
690
+ config.hidden_size, config.num_key_value_heads * self.head_dim, bias=config.attention_bias
691
+ )
692
+ self.o_proj = nn.Linear(
693
+ config.num_attention_heads * self.head_dim, config.hidden_size, bias=config.attention_bias
694
+ )
695
+ # unlike olmo, only on the head dim!
696
+ self.q_norm = SDARRMSNorm(self.head_dim, eps=config.rms_norm_eps)
697
+ # thus post q_norm does not need reshape
698
+ self.k_norm = SDARRMSNorm(self.head_dim, eps=config.rms_norm_eps)
699
+ self.sliding_window = config.sliding_window
700
+ if not (
701
+ self.config.use_sliding_window
702
+ and getattr(self.config, "sliding_window", None) is not None
703
+ and self.layer_idx >= self.config.max_window_layers
704
+ ):
705
+ self.sliding_window = None
706
+
707
+ def forward(
708
+ self,
709
+ hidden_states: torch.Tensor,
710
+ position_embeddings: Tuple[torch.Tensor, torch.Tensor],
711
+ attention_mask: Optional[torch.Tensor],
712
+ past_key_value: Optional[Cache] = None,
713
+ cache_position: Optional[torch.LongTensor] = None,
714
+ **kwargs: Unpack[FlashAttentionKwargs],
715
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
716
+ input_shape = hidden_states.shape[:-1]
717
+ bsz, q_len = input_shape
718
+ hidden_shape = (*input_shape, -1, self.head_dim)
719
+
720
+ query_states = self.q_norm(self.q_proj(
721
+ hidden_states).view(hidden_shape)).transpose(1, 2)
722
+ key_states = self.k_norm(self.k_proj(
723
+ hidden_states).view(hidden_shape)).transpose(1, 2)
724
+ value_states = self.v_proj(hidden_states).view(
725
+ hidden_shape).transpose(1, 2)
726
+
727
+ cos, sin = position_embeddings
728
+ query_states, key_states = apply_rotary_pos_emb(
729
+ query_states, key_states, cos, sin)
730
+
731
+ if past_key_value is not None and kwargs.get("store_kv", False):
732
+ # sin and cos are specific to RoPE models; cache_position needed for the static cache
733
+ key_states, value_states = past_key_value.update(
734
+ key_states, value_states, self.layer_idx)
735
+ elif past_key_value is not None and not kwargs.get("store_kv", False) and len(past_key_value) > self.layer_idx:
736
+ # only retrive, do not store kv
737
+ past_key_states, past_value_states = past_key_value[self.layer_idx]
738
+ key_states = torch.cat(
739
+ [past_key_states, key_states], dim=-2)
740
+ value_states = torch.cat(
741
+ [past_value_states, value_states], dim=-2)
742
+
743
+ if self.training:
744
+ attn_output, attn_weights = fused_flex_attention(
745
+ query=query_states,
746
+ key=key_states,
747
+ value=value_states,
748
+ attention_mask=attention_mask,
749
+ enable_gqa=True,
750
+ scale=self.scaling,
751
+ return_lse=True
752
+ )
753
+ attn_weights = attn_weights.to(
754
+ value_states.dtype) if attn_weights is not None else None
755
+ attn_output = rearrange(attn_output, 'b h l d -> b l (h d)')
756
+ else:
757
+ attention_mask = attention_mask.bool() if attention_mask is not None else None
758
+ attn_weights = None
759
+ if torch.all(attention_mask): # decoding
760
+ query_states = query_states.transpose(1, 2)
761
+ key_states = key_states.transpose(1, 2)
762
+ value_states = value_states.transpose(1, 2)
763
+ attn_output = flash_attn_func(
764
+ query_states,
765
+ key_states,
766
+ value_states,
767
+ causal=False,
768
+ softmax_scale=self.scaling
769
+ )
770
+ attn_output = rearrange(attn_output, 'b l h d -> b l (h d)')
771
+ else: # prefilling
772
+ attn_output = F.scaled_dot_product_attention(
773
+ query=query_states,
774
+ key=key_states,
775
+ value=value_states,
776
+ attn_mask=attention_mask,
777
+ is_causal=False,
778
+ scale=self.scaling,
779
+ enable_gqa=True
780
+ )
781
+ attn_output = rearrange(attn_output, 'b h l d -> b l (h d)')
782
+ attn_output = self.o_proj(attn_output)
783
+ return attn_output, attn_weights # , attn_weights
784
+
785
+
786
+ class SDARDecoderLayer(GradientCheckpointingLayer):
787
+ def __init__(self, config: SDARConfig, layer_idx: int):
788
+ super().__init__()
789
+ self.hidden_size = config.hidden_size
790
+ self.self_attn = SDARAttention(config=config, layer_idx=layer_idx)
791
+ self.mlp = SDARMLP(config)
792
+ self.input_layernorm = SDARRMSNorm(
793
+ config.hidden_size, eps=config.rms_norm_eps)
794
+ self.post_attention_layernorm = SDARRMSNorm(
795
+ config.hidden_size, eps=config.rms_norm_eps)
796
+ if (
797
+ config.sliding_window and config._attn_implementation != "flash_attention_2"
798
+ ): # diff with Llama is this warning
799
+ logger.warning_once(
800
+ f"Sliding Window Attention is enabled but not implemented for `{config._attn_implementation}`; "
801
+ "unexpected results may be encountered."
802
+ )
803
+
804
+ def forward(
805
+ self,
806
+ hidden_states: torch.Tensor,
807
+ attention_mask: Optional[torch.Tensor] = None,
808
+ position_ids: Optional[torch.LongTensor] = None,
809
+ past_key_value: Optional[Cache] = None,
810
+ output_attentions: Optional[bool] = False,
811
+ use_cache: Optional[bool] = False,
812
+ store_kv: Optional[bool] = False,
813
+ cache_position: Optional[torch.LongTensor] = None,
814
+ # necessary, but kept here for BC
815
+ position_embeddings: Optional[Tuple[torch.Tensor,
816
+ torch.Tensor]] = None,
817
+ **kwargs: Unpack[FlashAttentionKwargs],
818
+ ) -> Tuple[torch.FloatTensor, Optional[Tuple[torch.FloatTensor, torch.FloatTensor]]]:
819
+ residual = hidden_states
820
+ hidden_states = self.input_layernorm(hidden_states)
821
+
822
+ # Self Attention
823
+ hidden_states, self_attn_weights = self.self_attn(
824
+ hidden_states=hidden_states,
825
+ attention_mask=attention_mask,
826
+ position_ids=position_ids,
827
+ past_key_value=past_key_value,
828
+ output_attentions=output_attentions,
829
+ use_cache=use_cache,
830
+ store_kv=store_kv,
831
+ cache_position=cache_position,
832
+ position_embeddings=position_embeddings,
833
+ **kwargs,
834
+ )
835
+ hidden_states = residual + hidden_states
836
+
837
+ # Fully Connected
838
+ residual = hidden_states
839
+ hidden_states = self.post_attention_layernorm(hidden_states)
840
+ hidden_states = self.mlp(hidden_states)
841
+ hidden_states = residual + hidden_states
842
+
843
+ outputs = (hidden_states,)
844
+ if output_attentions:
845
+ outputs += (self_attn_weights,)
846
+
847
+ return outputs
848
+
849
+
850
+ @auto_docstring
851
+ class SDARPreTrainedModel(PreTrainedModel):
852
+ config_class = SDARConfig
853
+ base_model_prefix = "model"
854
+ supports_gradient_checkpointing = True
855
+ _no_split_modules = ["SDARDecoderLayer"]
856
+ _skip_keys_device_placement = ["past_key_values"]
857
+ _supports_flash_attn_2 = True
858
+ _supports_sdpa = True
859
+ _supports_flex_attn = True
860
+ _supports_cache_class = True
861
+ _supports_quantized_cache = True
862
+ _supports_static_cache = True
863
+ _supports_attention_backend = True
864
+
865
+ def _init_weights(self, module):
866
+ std = self.config.initializer_range
867
+ if isinstance(module, nn.Linear):
868
+ module.weight.data.normal_(mean=0.0, std=std)
869
+ if module.bias is not None:
870
+ module.bias.data.zero_()
871
+ elif isinstance(module, nn.Embedding):
872
+ module.weight.data.normal_(mean=0.0, std=std)
873
+ if module.padding_idx is not None:
874
+ module.weight.data[module.padding_idx].zero_()
875
+ elif isinstance(module, SDARRMSNorm):
876
+ module.weight.data.fill_(1.0)
877
+
878
+
879
+ class SDARRotaryEmbedding(nn.Module):
880
+ def __init__(self, config: SDARConfig, device=None):
881
+ super().__init__()
882
+ # BC: "rope_type" was originally "type"
883
+ if hasattr(config, "rope_scaling") and config.rope_scaling is not None:
884
+ self.rope_type = config.rope_scaling.get(
885
+ "rope_type", config.rope_scaling.get("type"))
886
+ else:
887
+ self.rope_type = "default"
888
+ self.max_seq_len_cached = config.max_position_embeddings
889
+ self.original_max_seq_len = config.max_position_embeddings
890
+
891
+ self.config = config
892
+ self.rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type]
893
+
894
+ inv_freq, self.attention_scaling = self.rope_init_fn(
895
+ self.config, device)
896
+ self.register_buffer("inv_freq", inv_freq, persistent=False)
897
+ self.original_inv_freq = self.inv_freq
898
+
899
+ @torch.no_grad()
900
+ # power user: used with advanced RoPE types (e.g. dynamic rope)
901
+ @dynamic_rope_update
902
+ def forward(self, x, position_ids, token_labels: Optional[torch.LongTensor] = None):
903
+ inv_freq_expanded = self.inv_freq[None, :, None].float().expand(
904
+ position_ids.shape[0], -1, 1).to(x.device)
905
+ position_ids_expanded = position_ids[:, None, :].float()
906
+
907
+ device_type = x.device.type if isinstance(
908
+ x.device.type, str) and x.device.type != "mps" else "cpu"
909
+ with torch.autocast(device_type=device_type, enabled=False): # Force float32
910
+ freqs = (inv_freq_expanded.float() @
911
+ position_ids_expanded.float()).transpose(1, 2)
912
+ emb = torch.cat((freqs, freqs), dim=-1)
913
+ cos = emb.cos() * self.attention_scaling
914
+ sin = emb.sin() * self.attention_scaling
915
+
916
+ cos = cos.to(dtype=x.dtype)
917
+ sin = sin.to(dtype=x.dtype)
918
+
919
+ if token_labels is not None:
920
+ if token_labels.shape != position_ids.shape:
921
+ raise ValueError(
922
+ f"`token_labels` shape {tuple(token_labels.shape)} must match `position_ids` shape {tuple(position_ids.shape)}."
923
+ )
924
+
925
+ clean_min_label = 1
926
+ clean_max_label = self.config.block_size
927
+ mask_label = self.config.block_size + 1
928
+ token_labels = token_labels.to(position_ids.device)
929
+ for batch_idx in range(token_labels.size(0)):
930
+ clean_indices = torch.nonzero(
931
+ (token_labels[batch_idx] >= clean_min_label) & (token_labels[batch_idx] <= clean_max_label),
932
+ as_tuple=True,
933
+ )[0]
934
+ mask_indices = torch.nonzero(token_labels[batch_idx] == mask_label, as_tuple=True)[0]
935
+ if mask_indices.numel() == 0:
936
+ continue
937
+ if clean_indices.numel() != mask_indices.numel():
938
+ raise ValueError(
939
+ "The clean block and mask block must have equal lengths for RoPE frequency copy."
940
+ )
941
+ cos[batch_idx, mask_indices] = cos[batch_idx, clean_indices]
942
+ sin[batch_idx, mask_indices] = sin[batch_idx, clean_indices]
943
+
944
+ return cos, sin
945
+
946
+
947
+ @auto_docstring
948
+ class SDARModel(SDARPreTrainedModel):
949
+ def __init__(self, config: SDARConfig):
950
+ super().__init__(config)
951
+ self.padding_idx = config.pad_token_id
952
+ self.vocab_size = config.vocab_size
953
+
954
+ self.embed_tokens = nn.Embedding(
955
+ config.vocab_size, config.hidden_size, self.padding_idx)
956
+ self.layers = nn.ModuleList(
957
+ [SDARDecoderLayer(config, layer_idx)
958
+ for layer_idx in range(config.num_hidden_layers)]
959
+ )
960
+ self.norm = SDARRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
961
+ self.rotary_emb = SDARRotaryEmbedding(config=config)
962
+ self.gradient_checkpointing = False
963
+
964
+ # Initialize weights and apply final processing
965
+ self.post_init()
966
+
967
+ def get_input_embeddings(self):
968
+ return self.embed_tokens
969
+
970
+ def set_input_embeddings(self, value):
971
+ self.embed_tokens = value
972
+
973
+ @can_return_tuple
974
+ @auto_docstring
975
+ def forward(
976
+ self,
977
+ input_ids: Optional[torch.LongTensor] = None,
978
+ attention_mask: Optional[torch.Tensor] = None,
979
+ position_ids: Optional[torch.LongTensor] = None,
980
+ token_labels: Optional[torch.LongTensor] = None,
981
+ past_key_values: Optional[Cache] = None,
982
+ inputs_embeds: Optional[torch.FloatTensor] = None,
983
+ use_cache: Optional[bool] = None,
984
+ store_kv: Optional[bool] = None,
985
+ output_attentions: Optional[bool] = None,
986
+ output_hidden_states: Optional[bool] = None,
987
+ cache_position: Optional[torch.LongTensor] = None,
988
+ **flash_attn_kwargs: Unpack[FlashAttentionKwargs],
989
+ ) -> BaseModelOutputWithPast:
990
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
991
+ output_hidden_states = (
992
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
993
+ )
994
+ use_cache = use_cache if use_cache is not None else self.config.use_cache
995
+
996
+ if (input_ids is None) ^ (inputs_embeds is not None):
997
+ raise ValueError(
998
+ "You must specify exactly one of input_ids or inputs_embeds")
999
+
1000
+ if self.gradient_checkpointing and self.training and use_cache:
1001
+ logger.warning_once(
1002
+ "`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`."
1003
+ )
1004
+ use_cache = False
1005
+
1006
+ # TODO (joao): remove this exception in v4.56 -- it exists for users that try to pass a legacy cache
1007
+ if not isinstance(past_key_values, (type(None), Cache)):
1008
+ raise ValueError(
1009
+ "The `past_key_values` should be either a `Cache` object or `None`.")
1010
+
1011
+ if inputs_embeds is None:
1012
+ inputs_embeds = self.embed_tokens(input_ids)
1013
+
1014
+ if use_cache and past_key_values is None:
1015
+ past_key_values = DynamicCache()
1016
+
1017
+ if cache_position is None:
1018
+ past_seen_tokens = past_key_values.get_seq_length(
1019
+ ) if past_key_values is not None else 0
1020
+ cache_position = torch.arange(
1021
+ past_seen_tokens, past_seen_tokens + inputs_embeds.shape[1], device=inputs_embeds.device
1022
+ )
1023
+
1024
+ if position_ids is None:
1025
+ position_ids = cache_position.unsqueeze(0).expand(inputs_embeds.shape[0], -1)
1026
+
1027
+ # causal_mask = self._update_causal_mask(
1028
+ # attention_mask, inputs_embeds, cache_position, past_key_values, output_attentions
1029
+ # )
1030
+
1031
+ hidden_states = inputs_embeds
1032
+
1033
+ # create position embeddings to be shared across the decoder layers
1034
+ position_embeddings = self.rotary_emb(hidden_states, position_ids)
1035
+
1036
+ # RoPE frequency copy: for single-block training (without block_ids),
1037
+ # data and mask have different position_ids, so copy data's RoPE to mask.
1038
+ # For multi-block training (with block_ids), position_ids are already shared
1039
+ # between data and mask, so no copy is needed.
1040
+ if token_labels is not None and not hasattr(self, '_skip_rope_copy'):
1041
+ cos, sin = position_embeddings
1042
+ block_size = self.config.block_size
1043
+ clean_min_label, clean_max_label = 1, block_size
1044
+ mask_label = block_size + 1
1045
+ tl = token_labels.to(position_ids.device)
1046
+ for b_idx in range(tl.size(0)):
1047
+ clean_idx = torch.nonzero(
1048
+ (tl[b_idx] >= clean_min_label) & (tl[b_idx] <= clean_max_label), as_tuple=True
1049
+ )[0]
1050
+ mask_idx = torch.nonzero(tl[b_idx] == mask_label, as_tuple=True)[0]
1051
+ if mask_idx.numel() > 0 and clean_idx.numel() == mask_idx.numel():
1052
+ cos[b_idx, mask_idx] = cos[b_idx, clean_idx]
1053
+ sin[b_idx, mask_idx] = sin[b_idx, clean_idx]
1054
+ position_embeddings = (cos, sin)
1055
+
1056
+ # decoder layers
1057
+ all_hidden_states = () if output_hidden_states else None
1058
+ all_self_attns = () if output_attentions else None
1059
+
1060
+ for decoder_layer in self.layers[: self.config.num_hidden_layers]:
1061
+ if output_hidden_states:
1062
+ all_hidden_states += (hidden_states,)
1063
+
1064
+ layer_outputs = decoder_layer(
1065
+ hidden_states,
1066
+ attention_mask=attention_mask,
1067
+ position_ids=position_ids,
1068
+ past_key_value=past_key_values,
1069
+ output_attentions=output_attentions,
1070
+ use_cache=use_cache,
1071
+ store_kv=store_kv,
1072
+ cache_position=cache_position,
1073
+ position_embeddings=position_embeddings,
1074
+ **flash_attn_kwargs,
1075
+ )
1076
+
1077
+ hidden_states = layer_outputs[0]
1078
+
1079
+ if output_attentions:
1080
+ all_self_attns += (layer_outputs[1],)
1081
+
1082
+ hidden_states = self.norm(hidden_states)
1083
+
1084
+ # add hidden states from the last decoder layer
1085
+ if output_hidden_states:
1086
+ all_hidden_states += (hidden_states,)
1087
+
1088
+ return BaseModelOutputWithPast(
1089
+ last_hidden_state=hidden_states,
1090
+ past_key_values=past_key_values if use_cache else None,
1091
+ hidden_states=all_hidden_states,
1092
+ attentions=all_self_attns,
1093
+ )
1094
+
1095
+ def _update_causal_mask(
1096
+ self,
1097
+ attention_mask: Union[torch.Tensor, "BlockMask"],
1098
+ input_tensor: torch.Tensor,
1099
+ cache_position: torch.Tensor,
1100
+ past_key_values: Cache,
1101
+ output_attentions: bool = False,
1102
+ ):
1103
+ if self.config._attn_implementation == "flash_attention_2":
1104
+ if attention_mask is not None and past_key_values is not None:
1105
+ is_padding_right = attention_mask[:, -
1106
+ 1].sum().item() != input_tensor.size()[0]
1107
+ if is_padding_right:
1108
+ raise ValueError(
1109
+ "You are attempting to perform batched generation with padding_side='right'"
1110
+ " this may lead to unexpected behaviour for Flash Attention version of Qwen3. Make sure to "
1111
+ " call `tokenizer.padding_side = 'left'` before tokenizing the input. "
1112
+ )
1113
+ if attention_mask is not None and 0.0 in attention_mask:
1114
+ return attention_mask
1115
+ return None
1116
+ if self.config._attn_implementation == "flex_attention":
1117
+ if isinstance(attention_mask, torch.Tensor):
1118
+ seq_len_q, seq_len_kv = attention_mask.shape
1119
+ assert seq_len_q == seq_len_kv, f"got {attention_mask.shape=}"
1120
+ attention_mask = create_block_mask(
1121
+ # 2d bool tensor, shape: [2*seqlen, 2*seqlen]
1122
+ lambda b, h, q_idx, kv_idx: attention_mask[q_idx, kv_idx],
1123
+ B=None, H=None, Q_LEN=seq_len_q, KV_LEN=seq_len_kv,
1124
+ )
1125
+ else:
1126
+ # Here we pass in flex mask computed externally
1127
+ assert isinstance(attention_mask, BlockMask)
1128
+ return attention_mask
1129
+
1130
+ # For SDPA, when possible, we will rely on its `is_causal` argument instead of its `attn_mask` argument, in
1131
+ # order to dispatch on Flash Attention 2. This feature is not compatible with static cache, as SDPA will fail
1132
+ # to infer the attention mask.
1133
+ past_seen_tokens = past_key_values.get_seq_length(
1134
+ ) if past_key_values is not None else 0
1135
+ using_static_cache = isinstance(past_key_values, StaticCache)
1136
+ using_sliding_window_cache = isinstance(
1137
+ past_key_values, SlidingWindowCache)
1138
+
1139
+ # When output attentions is True, sdpa implementation's forward method calls the eager implementation's forward
1140
+ if (
1141
+ self.config._attn_implementation == "sdpa"
1142
+ and not (using_static_cache or using_sliding_window_cache)
1143
+ and not output_attentions
1144
+ ):
1145
+ if AttentionMaskConverter._ignore_causal_mask_sdpa(
1146
+ attention_mask,
1147
+ inputs_embeds=input_tensor,
1148
+ past_key_values_length=past_seen_tokens,
1149
+ sliding_window=self.config.sliding_window,
1150
+ is_training=self.training,
1151
+ ):
1152
+ return None
1153
+
1154
+ dtype = input_tensor.dtype
1155
+ min_dtype = torch.finfo(dtype).min
1156
+ sequence_length = input_tensor.shape[1]
1157
+ # SlidingWindowCache or StaticCache
1158
+ if using_sliding_window_cache or using_static_cache:
1159
+ target_length = past_key_values.get_max_cache_shape()
1160
+ # DynamicCache or no cache
1161
+ else:
1162
+ target_length = (
1163
+ attention_mask.shape[-1]
1164
+ if isinstance(attention_mask, torch.Tensor)
1165
+ else past_seen_tokens + sequence_length + 1
1166
+ )
1167
+
1168
+ # In case the provided `attention` mask is 2D, we generate a causal mask here (4D).
1169
+ causal_mask = self._prepare_4d_causal_attention_mask_with_cache_position(
1170
+ attention_mask,
1171
+ sequence_length=sequence_length,
1172
+ target_length=target_length,
1173
+ dtype=dtype,
1174
+ cache_position=cache_position,
1175
+ batch_size=input_tensor.shape[0],
1176
+ config=self.config,
1177
+ past_key_values=past_key_values,
1178
+ )
1179
+
1180
+ if (
1181
+ self.config._attn_implementation == "sdpa"
1182
+ and attention_mask is not None
1183
+ and attention_mask.device.type in ["cuda", "xpu", "npu"]
1184
+ and not output_attentions
1185
+ ):
1186
+ # Attend to all tokens in fully masked rows in the causal_mask, for example the relevant first rows when
1187
+ # using left padding. This is required by F.scaled_dot_product_attention memory-efficient attention path.
1188
+ # Details: https://github.com/pytorch/pytorch/issues/110213
1189
+ causal_mask = AttentionMaskConverter._unmask_unattended(
1190
+ causal_mask, min_dtype)
1191
+
1192
+ return causal_mask
1193
+
1194
+ @staticmethod
1195
+ def _prepare_4d_causal_attention_mask_with_cache_position(
1196
+ attention_mask: torch.Tensor,
1197
+ sequence_length: int,
1198
+ target_length: int,
1199
+ dtype: torch.dtype,
1200
+ cache_position: torch.Tensor,
1201
+ batch_size: int,
1202
+ config: SDARConfig,
1203
+ past_key_values: Cache,
1204
+ ):
1205
+ """
1206
+ Creates a causal 4D mask of shape `(batch_size, 1, query_length, key_value_length)` from a 2D mask of shape
1207
+ `(batch_size, key_value_length)`, or if the input `attention_mask` is already 4D, do nothing.
1208
+
1209
+ Args:
1210
+ attention_mask (`torch.Tensor`):
1211
+ A 2D attention mask of shape `(batch_size, key_value_length)` or a 4D attention mask of shape `(batch_size, 1, query_length, key_value_length)`.
1212
+ sequence_length (`int`):
1213
+ The sequence length being processed.
1214
+ target_length (`int`):
1215
+ The target length: when generating with static cache, the mask should be as long as the static cache, to account for the 0 padding, the part of the cache that is not filled yet.
1216
+ dtype (`torch.dtype`):
1217
+ The dtype to use for the 4D attention mask.
1218
+ cache_position (`torch.Tensor`):
1219
+ Indices depicting the position of the input sequence tokens in the sequence.
1220
+ batch_size (`torch.Tensor`):
1221
+ Batch size.
1222
+ config (`SDARConfig`):
1223
+ The model's configuration class
1224
+ past_key_values (`Cache`):
1225
+ The cache class that is being used currently to generate
1226
+ """
1227
+ if attention_mask is not None and attention_mask.dim() == 4:
1228
+ # In this case we assume that the mask comes already in inverted form and requires no inversion or slicing.
1229
+ causal_mask = attention_mask
1230
+ else:
1231
+ min_dtype = torch.finfo(dtype).min
1232
+ causal_mask = torch.full(
1233
+ (sequence_length, target_length), fill_value=min_dtype, dtype=dtype, device=cache_position.device
1234
+ )
1235
+ diagonal_attend_mask = torch.arange(target_length, device=cache_position.device) > cache_position.reshape(
1236
+ -1, 1
1237
+ )
1238
+ text_config = config.get_text_config()
1239
+ if getattr(text_config, "use_sliding_window", True) and text_config.sliding_window is not None:
1240
+ # if we have sliding window, we should not attend to tokens beyond sliding window length, so we mask them out also
1241
+ # the check is needed to verify is current checkpoint was trained with sliding window or not
1242
+ if not isinstance(past_key_values, SlidingWindowCache) or sequence_length > target_length:
1243
+ sliding_attend_mask = torch.arange(target_length, device=cache_position.device) <= (
1244
+ cache_position.reshape(-1, 1) -
1245
+ text_config.sliding_window
1246
+ )
1247
+ diagonal_attend_mask.bitwise_or_(sliding_attend_mask)
1248
+ causal_mask *= diagonal_attend_mask
1249
+ causal_mask = causal_mask[None, None,
1250
+ :, :].expand(batch_size, 1, -1, -1)
1251
+ if attention_mask is not None:
1252
+ causal_mask = causal_mask.clone() # copy to contiguous memory for in-place edit
1253
+ if attention_mask.shape[-1] > target_length:
1254
+ attention_mask = attention_mask[:, :target_length]
1255
+ mask_length = attention_mask.shape[-1]
1256
+ padding_mask = causal_mask[:, :, :, :mask_length] + attention_mask[:, None, None, :].to(
1257
+ causal_mask.device
1258
+ )
1259
+ padding_mask = padding_mask == 0
1260
+ causal_mask[:, :, :, :mask_length] = causal_mask[:, :, :, :mask_length].masked_fill(
1261
+ padding_mask, min_dtype
1262
+ )
1263
+ return causal_mask
1264
+
1265
+
1266
+ class KwargsForCausalLM(FlashAttentionKwargs, LossKwargs):
1267
+ ...
1268
+
1269
+
1270
+ @auto_docstring
1271
+ class SDARForCausalLM(SDARPreTrainedModel, GenerationMixin):
1272
+ _tied_weights_keys = ["lm_head.weight"]
1273
+ _tp_plan = {"lm_head": "colwise_rep"}
1274
+ _pp_plan = {"lm_head": (["hidden_states"], ["logits"])}
1275
+
1276
+ def __init__(self, config):
1277
+ super().__init__(config)
1278
+ self.model = SDARModel(config)
1279
+ self.vocab_size = config.vocab_size
1280
+ self.lm_head = nn.Linear(
1281
+ config.hidden_size, config.vocab_size, bias=False)
1282
+
1283
+ # Initialize weights and apply final processing
1284
+ self.post_init()
1285
+
1286
+ def get_input_embeddings(self):
1287
+ return self.model.embed_tokens
1288
+
1289
+ def set_input_embeddings(self, value):
1290
+ self.model.embed_tokens = value
1291
+
1292
+ def get_output_embeddings(self):
1293
+ return self.lm_head
1294
+
1295
+ def set_output_embeddings(self, new_embeddings):
1296
+ self.lm_head = new_embeddings
1297
+
1298
+ def set_decoder(self, decoder):
1299
+ self.model = decoder
1300
+
1301
+ def get_decoder(self):
1302
+ return self.model
1303
+
1304
+ def prepare_for_bd_training(self, inputs_ids, position_ids, prompt_mask):
1305
+ bsz, seq_len = inputs_ids.shape
1306
+ num_tokens = calculate_token_nums(position_ids) # List[torch.Tensor]
1307
+ noisy_inputs_ids, logits_to_keep_half, p_mask = forward_add_noise_packed(
1308
+ inputs_ids=inputs_ids,
1309
+ num_tokens_list=num_tokens,
1310
+ prompt_mask=prompt_mask,
1311
+ mask_id=self.config.mask_token_id,
1312
+ )
1313
+ router_noisy_part_list = []
1314
+ for i in range(bsz):
1315
+ cur_router_noisy_part = (torch.arange(num_tokens[i].shape[0] *2) % 2 == 0).to(inputs_ids.device)
1316
+ cur_router_noisy_part = cur_router_noisy_part.repeat_interleave(num_tokens[i].repeat_interleave(2))
1317
+ router_noisy_part_list.append(cur_router_noisy_part)
1318
+ router_noisy_part = torch.stack(router_noisy_part_list, dim=0)
1319
+
1320
+ # concated inputs_ids: (bzs, seq_len x 2)
1321
+ concat_inputs_ids = inputs_ids.repeat(1, 2)
1322
+ # concated logits_to_keep: (bsz, seq_len x 2)
1323
+ logits_to_keep = torch.zeros(
1324
+ bsz, 2 * seq_len, dtype=torch.bool, device=inputs_ids.device)
1325
+ # concated position_ids: (bsz, seq_len x 2)
1326
+ concat_position_ids = torch.zeros(
1327
+ bsz, 2 * seq_len, dtype=position_ids.dtype, device=position_ids.device)
1328
+ for i in range(bsz):
1329
+ concat_inputs_ids[i][router_noisy_part[i]] = noisy_inputs_ids[i]
1330
+ concat_inputs_ids[i][~router_noisy_part[i]] = inputs_ids[i]
1331
+
1332
+ logits_to_keep[i][router_noisy_part[i]] = logits_to_keep_half[i]
1333
+
1334
+ concat_position_ids[i][router_noisy_part[i]] = position_ids[i]
1335
+ concat_position_ids[i][~router_noisy_part[i]] = position_ids[i]
1336
+
1337
+ # create flex_attention mask
1338
+ attention_mask = block_attn_mask(num_tokens, self.config.block_size, inputs_ids.device)
1339
+ flex_attention_mask_3d = create_block_mask(
1340
+ lambda b, h, q_idx, kv_idx: attention_mask[b, q_idx, kv_idx],
1341
+ B=attention_mask.size(0), H=None,
1342
+ Q_LEN=attention_mask.size(1), KV_LEN=attention_mask.size(2),
1343
+ )
1344
+
1345
+ return concat_inputs_ids, concat_position_ids, flex_attention_mask_3d, logits_to_keep_half, logits_to_keep, p_mask
1346
+
1347
+ @can_return_tuple
1348
+ @auto_docstring
1349
+ def forward(
1350
+ self,
1351
+ input_ids: Optional[torch.LongTensor] = None,
1352
+ attention_mask: Optional[torch.Tensor] = None,
1353
+ position_ids: Optional[torch.LongTensor] = None,
1354
+ token_labels: Optional[torch.LongTensor] = None,
1355
+ block_ids: Optional[torch.LongTensor] = None,
1356
+ past_key_values: Optional[Cache] = None,
1357
+ inputs_embeds: Optional[torch.FloatTensor] = None,
1358
+ labels: Optional[torch.LongTensor] = None,
1359
+ use_cache: Optional[bool] = None,
1360
+ output_attentions: Optional[bool] = None,
1361
+ output_hidden_states: Optional[bool] = None,
1362
+ cache_position: Optional[torch.LongTensor] = None,
1363
+ logits_to_keep: Union[int, torch.Tensor] = 0,
1364
+ **kwargs: Unpack[KwargsForCausalLM],
1365
+ ) -> CausalLMOutputWithPast:
1366
+ r"""
1367
+ labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
1368
+ Labels for computing the masked language modeling loss. Indices should either be in `[0, ...,
1369
+ config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored
1370
+ (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`.
1371
+
1372
+ Example:
1373
+
1374
+ ```python
1375
+ >>> from transformers import AutoTokenizer, SDARForCausalLM
1376
+
1377
+ >>> model = SDARForCausalLM.from_pretrained("DiffuOpen/SDAR-1.7B-Chat")
1378
+ >>> tokenizer = AutoTokenizer.from_pretrained("DiffuOpen/SDAR-1.7B-Chat")
1379
+
1380
+ >>> prompt = "Hey, are you conscious? Can you talk to me?"
1381
+ >>> inputs = tokenizer(prompt, return_tensors="pt")
1382
+
1383
+ >>> # Generate
1384
+ >>> generate_ids = model.generate(inputs.input_ids, max_length=30)
1385
+ >>> tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
1386
+ "Hey, are you conscious? Can you talk to me?\nI'm not conscious, but I can talk to you."
1387
+ ```"""
1388
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1389
+ output_hidden_states = (
1390
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1391
+ )
1392
+ if self.training:
1393
+ assert inputs_embeds is None, "only support input_ids during training"
1394
+ assert labels is not None, "Labels must be provided for training."
1395
+ # Trace SFT path: pre-computed block attention mask provided via kwargs
1396
+ block_attention_mask = kwargs.pop("block_attention_mask", None)
1397
+ if block_attention_mask is not None:
1398
+ # block_attention_mask: (B, L, L) boolean tensor
1399
+ flex_attention_mask_3d = create_block_mask(
1400
+ lambda b, h, q_idx, kv_idx: block_attention_mask[b, q_idx, kv_idx],
1401
+ B=block_attention_mask.size(0),
1402
+ H=None,
1403
+ Q_LEN=block_attention_mask.size(1),
1404
+ KV_LEN=block_attention_mask.size(2),
1405
+ )
1406
+ outputs = self.model(
1407
+ input_ids=input_ids,
1408
+ attention_mask=flex_attention_mask_3d,
1409
+ position_ids=position_ids,
1410
+ output_attentions=output_attentions,
1411
+ output_hidden_states=output_hidden_states,
1412
+ return_dict=True,
1413
+ cache_position=cache_position,
1414
+ )
1415
+ hidden_states = outputs.last_hidden_state
1416
+ logits = self.lm_head(hidden_states)
1417
+ # Unshifted cross-entropy loss (diffusion-style)
1418
+ loss = nn.CrossEntropyLoss(ignore_index=-100)(
1419
+ logits.view(-1, self.config.vocab_size), labels.view(-1)
1420
+ )
1421
+ logits = None
1422
+ elif token_labels is not None:
1423
+ if input_ids is None:
1424
+ raise ValueError("`input_ids` is required in token-label SFT training.")
1425
+ if token_labels.shape != input_ids.shape:
1426
+ raise ValueError(
1427
+ f"`token_labels` shape {tuple(token_labels.shape)} must match `input_ids` shape {tuple(input_ids.shape)}."
1428
+ )
1429
+ # Multi-block mask when block_ids provided, else single-block
1430
+ if block_ids is not None:
1431
+ bcp = getattr(self.config, "block_causal_prompt", True)
1432
+ token_label_mask = create_multi_block_causal_mask(
1433
+ token_labels, block_ids, self.config.block_size,
1434
+ block_causal_prompt=bcp,
1435
+ )
1436
+ else:
1437
+ token_label_mask = create_causal_mask_from_labels(token_labels, self.config.block_size)
1438
+ flex_attention_mask_3d = create_block_mask(
1439
+ lambda b, h, q_idx, kv_idx: token_label_mask[b, q_idx, kv_idx],
1440
+ B=token_label_mask.size(0),
1441
+ H=None,
1442
+ Q_LEN=token_label_mask.size(1),
1443
+ KV_LEN=token_label_mask.size(2),
1444
+ )
1445
+ outputs = self.model(
1446
+ input_ids=input_ids,
1447
+ attention_mask=flex_attention_mask_3d,
1448
+ position_ids=position_ids,
1449
+ token_labels=token_labels,
1450
+ output_attentions=output_attentions,
1451
+ output_hidden_states=output_hidden_states,
1452
+ return_dict=True,
1453
+ cache_position=cache_position,
1454
+ **kwargs,
1455
+ )
1456
+ hidden_states = outputs.last_hidden_state
1457
+ logits = self.lm_head(hidden_states)
1458
+ masked_labels = labels.masked_fill(token_labels != (self.config.block_size + 1), -100)
1459
+ if not torch.any(masked_labels != -100):
1460
+ raise ValueError("No valid supervision token found for token-label SFT loss.")
1461
+ loss = nn.CrossEntropyLoss(ignore_index=-100)(
1462
+ logits.view(-1, self.config.vocab_size), masked_labels.view(-1)
1463
+ )
1464
+ logits = None
1465
+ else:
1466
+ prompt_mask = labels == -100
1467
+ position_ids = modify_padded_position_ids_2d(position_ids)
1468
+ concat_inputs_ids, concat_position_ids, flex_attention_mask_3d, logits_to_keep_half, logits_to_keep, p_mask = self.prepare_for_bd_training(input_ids, position_ids, prompt_mask)
1469
+ outputs = self.model(
1470
+ input_ids=concat_inputs_ids,
1471
+ attention_mask=flex_attention_mask_3d,
1472
+ position_ids=concat_position_ids,
1473
+ output_attentions=output_attentions,
1474
+ output_hidden_states=output_hidden_states,
1475
+ return_dict=True,
1476
+ cache_position=cache_position,
1477
+ **kwargs,
1478
+ )
1479
+ hidden_states = outputs.last_hidden_state
1480
+ hidden_states = hidden_states[logits_to_keep].contiguous()
1481
+ answer_len = (labels != -100).sum()
1482
+ loss_fct = FusedLinearDiffusionCrossEntropyLoss(reduction='sum')
1483
+ loss = loss_fct( # it will return (sum_loss, unreduced_loss)
1484
+ # conduct `view(-1, V)` inside the function
1485
+ x=hidden_states,
1486
+ target=labels[logits_to_keep_half].contiguous(),
1487
+ weight=self.lm_head.weight,
1488
+ bias=self.lm_head.bias,
1489
+ p_mask=p_mask,
1490
+ )
1491
+ loss = loss / answer_len
1492
+ logits = None
1493
+ else:
1494
+ # decoder outputs consists of (dec_features, layer_state, dec_hidden, dec_attn)
1495
+ outputs: BaseModelOutputWithPast = self.model(
1496
+ input_ids=input_ids,
1497
+ attention_mask=attention_mask,
1498
+ position_ids=position_ids,
1499
+ token_labels=token_labels,
1500
+ past_key_values=past_key_values,
1501
+ inputs_embeds=inputs_embeds,
1502
+ use_cache=use_cache,
1503
+ output_attentions=output_attentions,
1504
+ output_hidden_states=output_hidden_states,
1505
+ cache_position=cache_position,
1506
+ **kwargs,
1507
+ )
1508
+
1509
+ hidden_states = outputs.last_hidden_state
1510
+ # Only compute necessary logits, and do not upcast them to float if we are not computing the loss
1511
+ slice_indices = slice(-logits_to_keep,
1512
+ None) if isinstance(logits_to_keep, int) else logits_to_keep
1513
+ hidden_states = hidden_states[:, slice_indices, :].contiguous()
1514
+ fuse_linear_and_cross_entropy = self.config.fuse_cross_entropy and self.training
1515
+ if fuse_linear_and_cross_entropy:
1516
+ # When using fused_linear_ce_loss, we do not compute the whole logits on HBM
1517
+ logits = None
1518
+ else:
1519
+ logits = self.lm_head(hidden_states)
1520
+
1521
+ loss = None
1522
+ if labels is not None:
1523
+ # FusedLinearCrossEntropyLoss will be implemented by monkey patch when training
1524
+ # We don't use it when inferencing
1525
+ loss_fct = nn.CrossEntropyLoss() # nn.CE
1526
+ loss = loss_fct(
1527
+ logits.view(-1, self.config.vocab_size), labels.view(-1))
1528
+
1529
+ return CausalLMOutputWithPast(
1530
+ loss=loss,
1531
+ logits=logits,
1532
+ past_key_values=outputs.past_key_values,
1533
+ hidden_states=outputs.hidden_states,
1534
+ attentions=outputs.attentions,
1535
+ )
1536
+
1537
+
1538
+ __all__ = [
1539
+ "SDARForCausalLM",
1540
+ "SDARModel",
1541
+ "SDARPreTrainedModel",
1542
+ ]
special_tokens_map.json ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<|im_start|>",
4
+ "<|im_end|>",
5
+ "<|object_ref_start|>",
6
+ "<|object_ref_end|>",
7
+ "<|box_start|>",
8
+ "<|box_end|>",
9
+ "<|quad_start|>",
10
+ "<|quad_end|>",
11
+ "<|vision_start|>",
12
+ "<|vision_end|>",
13
+ "<|vision_pad|>",
14
+ "<|image_pad|>",
15
+ "<|video_pad|>",
16
+ "<|MASK|>"
17
+ ],
18
+ "eos_token": {
19
+ "content": "<|endoftext|>",
20
+ "lstrip": false,
21
+ "normalized": false,
22
+ "rstrip": false,
23
+ "single_word": false
24
+ },
25
+ "mask_token": {
26
+ "content": "<|MASK|>",
27
+ "lstrip": false,
28
+ "normalized": false,
29
+ "rstrip": false,
30
+ "single_word": false
31
+ },
32
+ "pad_token": {
33
+ "content": "<|endoftext|>",
34
+ "lstrip": false,
35
+ "normalized": false,
36
+ "rstrip": false,
37
+ "single_word": false
38
+ }
39
+ }
tokenization_qwen2.py ADDED
@@ -0,0 +1,342 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2024 The Qwen team, Alibaba Group and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Tokenization classes for Qwen2."""
16
+
17
+ import json
18
+ import os
19
+ import unicodedata
20
+ from functools import lru_cache
21
+ from typing import Optional, Tuple
22
+
23
+ import regex as re
24
+
25
+ from transformers.tokenization_utils import AddedToken, PreTrainedTokenizer
26
+ from transformers.utils import logging
27
+
28
+
29
+ logger = logging.get_logger(__name__)
30
+
31
+ VOCAB_FILES_NAMES = {
32
+ "vocab_file": "vocab.json",
33
+ "merges_file": "merges.txt",
34
+ }
35
+
36
+
37
+ MAX_MODEL_INPUT_SIZES = {"qwen/qwen-tokenizer": 32768}
38
+
39
+ PRETOKENIZE_REGEX = r"""(?i:'s|'t|'re|'ve|'m|'ll|'d)|[^\r\n\p{L}\p{N}]?\p{L}+|\p{N}| ?[^\s\p{L}\p{N}]+[\r\n]*|\s*[\r\n]+|\s+(?!\S)|\s+"""
40
+
41
+
42
+ @lru_cache()
43
+ # Copied from transformers.models.gpt2.tokenization_gpt2.bytes_to_unicode
44
+ def bytes_to_unicode():
45
+ """
46
+ Returns list of utf-8 byte and a mapping to unicode strings. We specifically avoids mapping to whitespace/control
47
+ characters the bpe code barfs on.
48
+
49
+ The reversible bpe codes work on unicode strings. This means you need a large # of unicode characters in your vocab
50
+ if you want to avoid UNKs. When you're at something like a 10B token dataset you end up needing around 5K for
51
+ decent coverage. This is a significant percentage of your normal, say, 32K bpe vocab. To avoid that, we want lookup
52
+ tables between utf-8 bytes and unicode strings.
53
+ """
54
+ bs = (
55
+ list(range(ord("!"), ord("~") + 1)) + list(range(ord("¡"), ord("¬") + 1)) + list(range(ord("®"), ord("ÿ") + 1))
56
+ )
57
+ cs = bs[:]
58
+ n = 0
59
+ for b in range(2**8):
60
+ if b not in bs:
61
+ bs.append(b)
62
+ cs.append(2**8 + n)
63
+ n += 1
64
+ cs = [chr(n) for n in cs]
65
+ return dict(zip(bs, cs))
66
+
67
+
68
+ # Copied from transformers.models.gpt2.tokenization_gpt2.get_pairs
69
+ def get_pairs(word):
70
+ """
71
+ Return set of symbol pairs in a word.
72
+
73
+ Word is represented as tuple of symbols (symbols being variable-length strings).
74
+ """
75
+ pairs = set()
76
+ prev_char = word[0]
77
+ for char in word[1:]:
78
+ pairs.add((prev_char, char))
79
+ prev_char = char
80
+ return pairs
81
+
82
+
83
+ class Qwen2Tokenizer(PreTrainedTokenizer):
84
+ """
85
+ Construct a Qwen2 tokenizer. Based on byte-level Byte-Pair-Encoding.
86
+
87
+ Same with GPT2Tokenizer, this tokenizer has been trained to treat spaces like parts of the tokens so a word will
88
+ be encoded differently whether it is at the beginning of the sentence (without space) or not:
89
+
90
+ ```python
91
+ >>> from transformers import Qwen2Tokenizer
92
+
93
+ >>> tokenizer = Qwen2Tokenizer.from_pretrained("Qwen/Qwen-tokenizer")
94
+ >>> tokenizer("Hello world")["input_ids"]
95
+ [9707, 1879]
96
+
97
+ >>> tokenizer(" Hello world")["input_ids"]
98
+ [21927, 1879]
99
+ ```
100
+ This is expected.
101
+
102
+ You should not use GPT2Tokenizer instead, because of the different pretokenization rules.
103
+
104
+ This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
105
+ this superclass for more information regarding those methods.
106
+
107
+ Args:
108
+ vocab_file (`str`):
109
+ Path to the vocabulary file.
110
+ merges_file (`str`):
111
+ Path to the merges file.
112
+ errors (`str`, *optional*, defaults to `"replace"`):
113
+ Paradigm to follow when decoding bytes to UTF-8. See
114
+ [bytes.decode](https://docs.python.org/3/library/stdtypes.html#bytes.decode) for more information.
115
+ unk_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
116
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
117
+ token instead.
118
+ bos_token (`str`, *optional*):
119
+ The beginning of sequence token. Not applicable for this tokenizer.
120
+ eos_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
121
+ The end of sequence token.
122
+ pad_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
123
+ The token used for padding, for example when batching sequences of different lengths.
124
+ clean_up_tokenization_spaces (`bool`, *optional*, defaults to `False`):
125
+ Whether or not the model should cleanup the spaces that were added when splitting the input text during the
126
+ tokenization process. Not applicable to this tokenizer, since tokenization does not add spaces.
127
+ split_special_tokens (`bool`, *optional*, defaults to `False`):
128
+ Whether or not the special tokens should be split during the tokenization process. The default behavior is
129
+ to not split special tokens. This means that if `<|endoftext|>` is the `eos_token`, then `tokenizer.tokenize("<|endoftext|>") =
130
+ ['<|endoftext|>`]. Otherwise, if `split_special_tokens=True`, then `tokenizer.tokenize("<|endoftext|>")` will be give `['<',
131
+ '|', 'endo', 'ft', 'ext', '|', '>']`. This argument is only supported for `slow` tokenizers for the moment.
132
+ """
133
+
134
+ vocab_files_names = VOCAB_FILES_NAMES
135
+ model_input_names = ["input_ids", "attention_mask"]
136
+
137
+ def __init__(
138
+ self,
139
+ vocab_file,
140
+ merges_file,
141
+ errors="replace",
142
+ unk_token="<|endoftext|>",
143
+ bos_token=None,
144
+ eos_token="<|endoftext|>",
145
+ pad_token="<|endoftext|>",
146
+ clean_up_tokenization_spaces=False,
147
+ split_special_tokens=False,
148
+ **kwargs,
149
+ ):
150
+ # Qwen vocab does not contain control tokens; added tokens need to be special
151
+ bos_token = (
152
+ AddedToken(bos_token, lstrip=False, rstrip=False, special=True, normalized=False)
153
+ if isinstance(bos_token, str)
154
+ else bos_token
155
+ )
156
+ eos_token = (
157
+ AddedToken(eos_token, lstrip=False, rstrip=False, special=True, normalized=False)
158
+ if isinstance(eos_token, str)
159
+ else eos_token
160
+ )
161
+ unk_token = (
162
+ AddedToken(unk_token, lstrip=False, rstrip=False, special=True, normalized=False)
163
+ if isinstance(unk_token, str)
164
+ else unk_token
165
+ )
166
+ pad_token = (
167
+ AddedToken(pad_token, lstrip=False, rstrip=False, special=True, normalized=False)
168
+ if isinstance(pad_token, str)
169
+ else pad_token
170
+ )
171
+
172
+ with open(vocab_file, encoding="utf-8") as vocab_handle:
173
+ self.encoder = json.load(vocab_handle)
174
+ self.decoder = {v: k for k, v in self.encoder.items()}
175
+ self.errors = errors # how to handle errors in decoding
176
+ self.byte_encoder = bytes_to_unicode()
177
+ self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}
178
+ bpe_merges = []
179
+ with open(merges_file, encoding="utf-8") as merges_handle:
180
+ for i, line in enumerate(merges_handle):
181
+ line = line.strip()
182
+ if (i == 0 and line.startswith("#version:")) or not line:
183
+ continue
184
+ bpe_merges.append(tuple(line.split()))
185
+ self.bpe_ranks = dict(zip(bpe_merges, range(len(bpe_merges))))
186
+ # NOTE: the cache can grow without bound and will get really large for long running processes
187
+ # (esp. for texts of language that do not use space between word, e.g. Chinese); technically
188
+ # not a memory leak but appears as one.
189
+ # GPT2Tokenizer has the same problem, so let's be consistent.
190
+ self.cache = {}
191
+
192
+ self.pat = re.compile(PRETOKENIZE_REGEX)
193
+
194
+ if kwargs.get("add_prefix_space", False):
195
+ logger.warning_once(
196
+ f"{self.__class__.__name} does not support `add_prefix_space`, setting it to True has no effect."
197
+ )
198
+
199
+ super().__init__(
200
+ errors=errors,
201
+ bos_token=bos_token,
202
+ eos_token=eos_token,
203
+ pad_token=pad_token,
204
+ unk_token=unk_token,
205
+ clean_up_tokenization_spaces=clean_up_tokenization_spaces,
206
+ split_special_tokens=split_special_tokens,
207
+ **kwargs,
208
+ )
209
+
210
+ @property
211
+ def vocab_size(self) -> int:
212
+ return len(self.encoder)
213
+
214
+ # Copied from transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer.get_vocab
215
+ def get_vocab(self):
216
+ return dict(self.encoder, **self.added_tokens_encoder)
217
+
218
+ # Copied from transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer.bpe
219
+ def bpe(self, token):
220
+ if token in self.cache:
221
+ return self.cache[token]
222
+ word = tuple(token)
223
+ pairs = get_pairs(word)
224
+
225
+ if not pairs:
226
+ return token
227
+
228
+ while True:
229
+ bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf")))
230
+ if bigram not in self.bpe_ranks:
231
+ break
232
+ first, second = bigram
233
+ new_word = []
234
+ i = 0
235
+ while i < len(word):
236
+ try:
237
+ j = word.index(first, i)
238
+ except ValueError:
239
+ new_word.extend(word[i:])
240
+ break
241
+ else:
242
+ new_word.extend(word[i:j])
243
+ i = j
244
+
245
+ if word[i] == first and i < len(word) - 1 and word[i + 1] == second:
246
+ new_word.append(first + second)
247
+ i += 2
248
+ else:
249
+ new_word.append(word[i])
250
+ i += 1
251
+ new_word = tuple(new_word)
252
+ word = new_word
253
+ if len(word) == 1:
254
+ break
255
+ else:
256
+ pairs = get_pairs(word)
257
+ word = " ".join(word)
258
+ self.cache[token] = word
259
+ return word
260
+
261
+ # Copied from transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer._tokenize
262
+ def _tokenize(self, text):
263
+ """Tokenize a string."""
264
+ bpe_tokens = []
265
+ for token in re.findall(self.pat, text):
266
+ token = "".join(
267
+ self.byte_encoder[b] for b in token.encode("utf-8")
268
+ ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
269
+ bpe_tokens.extend(bpe_token for bpe_token in self.bpe(token).split(" "))
270
+ return bpe_tokens
271
+
272
+ # Copied from transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer._convert_token_to_id
273
+ def _convert_token_to_id(self, token):
274
+ """Converts a token (str) in an id using the vocab."""
275
+ return self.encoder.get(token, self.encoder.get(self.unk_token))
276
+
277
+ # Copied from transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer._convert_id_to_token
278
+ def _convert_id_to_token(self, index):
279
+ """Converts an index (integer) in a token (str) using the vocab."""
280
+ return self.decoder.get(index)
281
+
282
+ # Copied from transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer.convert_tokens_to_string
283
+ def convert_tokens_to_string(self, tokens):
284
+ """Converts a sequence of tokens (string) in a single string."""
285
+ text = "".join(tokens)
286
+ text = bytearray([self.byte_decoder[c] for c in text]).decode("utf-8", errors=self.errors)
287
+ return text
288
+
289
+ def decode(
290
+ self,
291
+ token_ids,
292
+ skip_special_tokens: bool = False,
293
+ clean_up_tokenization_spaces: Optional[bool] = False,
294
+ spaces_between_special_tokens: bool = False,
295
+ **kwargs,
296
+ ) -> str:
297
+ # `spaces_between_special_tokens` defaults to True for _decode in slow tokenizers
298
+ # and cannot be configured elsewhere, but it should default to False for Qwen2Tokenizer
299
+ return super().decode(
300
+ token_ids,
301
+ skip_special_tokens=skip_special_tokens,
302
+ clean_up_tokenization_spaces=clean_up_tokenization_spaces,
303
+ spaces_between_special_tokens=spaces_between_special_tokens,
304
+ **kwargs,
305
+ )
306
+
307
+ # Copied from transformers.models.gpt2.tokenization_gpt2.GPT2Tokenizer.save_vocabulary
308
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
309
+ if not os.path.isdir(save_directory):
310
+ logger.error(f"Vocabulary path ({save_directory}) should be a directory")
311
+ return
312
+ vocab_file = os.path.join(
313
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
314
+ )
315
+ merge_file = os.path.join(
316
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"]
317
+ )
318
+
319
+ with open(vocab_file, "w", encoding="utf-8") as f:
320
+ f.write(json.dumps(self.encoder, indent=2, sort_keys=True, ensure_ascii=False) + "\n")
321
+
322
+ index = 0
323
+ with open(merge_file, "w", encoding="utf-8") as writer:
324
+ writer.write("#version: 0.2\n")
325
+ for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]):
326
+ if index != token_index:
327
+ logger.warning(
328
+ f"Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."
329
+ " Please check that the tokenizer is not corrupted!"
330
+ )
331
+ index = token_index
332
+ writer.write(" ".join(bpe_tokens) + "\n")
333
+ index += 1
334
+
335
+ return vocab_file, merge_file
336
+
337
+ def prepare_for_tokenization(self, text, **kwargs):
338
+ text = unicodedata.normalize("NFC", text)
339
+ return (text, kwargs)
340
+
341
+
342
+ __all__ = ["Qwen2Tokenizer"]
tokenization_qwen2_fast.py ADDED
@@ -0,0 +1,131 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2024 The Qwen team, Alibaba Group and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Tokenization classes for Qwen2."""
16
+
17
+ from typing import Optional, Tuple
18
+
19
+ from transformers.tokenization_utils import AddedToken
20
+ from transformers.tokenization_utils_fast import PreTrainedTokenizerFast
21
+ from transformers.utils import logging
22
+ from .tokenization_qwen2 import Qwen2Tokenizer
23
+
24
+
25
+ logger = logging.get_logger(__name__)
26
+
27
+ VOCAB_FILES_NAMES = {
28
+ "vocab_file": "vocab.json",
29
+ "merges_file": "merges.txt",
30
+ "tokenizer_file": "tokenizer.json",
31
+ }
32
+
33
+
34
+ MAX_MODEL_INPUT_SIZES = {"qwen/qwen-tokenizer": 32768}
35
+
36
+
37
+ class Qwen2TokenizerFast(PreTrainedTokenizerFast):
38
+ """
39
+ Construct a "fast" Qwen2 tokenizer (backed by HuggingFace's *tokenizers* library). Based on byte-level
40
+ Byte-Pair-Encoding.
41
+ Same with GPT2Tokenizer, this tokenizer has been trained to treat spaces like parts of the tokens so a word will
42
+ be encoded differently whether it is at the beginning of the sentence (without space) or not:
43
+ ```python
44
+ >>> from transformers import Qwen2TokenizerFast
45
+ >>> tokenizer = Qwen2TokenizerFast.from_pretrained("Qwen/Qwen-tokenizer")
46
+ >>> tokenizer("Hello world")["input_ids"]
47
+ [9707, 1879]
48
+ >>> tokenizer(" Hello world")["input_ids"]
49
+ [21927, 1879]
50
+ ```
51
+ This is expected.
52
+ This tokenizer inherits from [`PreTrainedTokenizerFast`] which contains most of the main methods. Users should
53
+ refer to this superclass for more information regarding those methods.
54
+ Args:
55
+ vocab_file (`str`, *optional*):
56
+ Path to the vocabulary file.
57
+ merges_file (`str`, *optional*):
58
+ Path to the merges file.
59
+ tokenizer_file (`str`, *optional*):
60
+ Path to [tokenizers](https://github.com/huggingface/tokenizers) file (generally has a .json extension) that
61
+ contains everything needed to load the tokenizer.
62
+ unk_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
63
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
64
+ token instead. Not applicable to this tokenizer.
65
+ bos_token (`str`, *optional*):
66
+ The beginning of sequence token. Not applicable for this tokenizer.
67
+ eos_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
68
+ The end of sequence token.
69
+ pad_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
70
+ The token used for padding, for example when batching sequences of different lengths.
71
+ """
72
+
73
+ vocab_files_names = VOCAB_FILES_NAMES
74
+ model_input_names = ["input_ids", "attention_mask"]
75
+ slow_tokenizer_class = Qwen2Tokenizer
76
+
77
+ def __init__(
78
+ self,
79
+ vocab_file=None,
80
+ merges_file=None,
81
+ tokenizer_file=None,
82
+ unk_token="<|endoftext|>",
83
+ bos_token=None,
84
+ eos_token="<|endoftext|>",
85
+ pad_token="<|endoftext|>",
86
+ **kwargs,
87
+ ):
88
+ # We need to at least pass vocab_file and merges_file to base class
89
+ # in case a slow tokenizer needs to be initialized; other can be
90
+ # configured through files.
91
+ # following GPT2TokenizerFast, also adding unk_token, bos_token, and eos_token
92
+
93
+ bos_token = (
94
+ AddedToken(bos_token, lstrip=False, rstrip=False, special=True, normalized=False)
95
+ if isinstance(bos_token, str)
96
+ else bos_token
97
+ )
98
+ eos_token = (
99
+ AddedToken(eos_token, lstrip=False, rstrip=False, special=True, normalized=False)
100
+ if isinstance(eos_token, str)
101
+ else eos_token
102
+ )
103
+ unk_token = (
104
+ AddedToken(unk_token, lstrip=False, rstrip=False, special=True, normalized=False)
105
+ if isinstance(unk_token, str)
106
+ else unk_token
107
+ )
108
+ pad_token = (
109
+ AddedToken(pad_token, lstrip=False, rstrip=False, special=True, normalized=False)
110
+ if isinstance(pad_token, str)
111
+ else pad_token
112
+ )
113
+
114
+ super().__init__(
115
+ vocab_file=vocab_file,
116
+ merges_file=merges_file,
117
+ tokenizer_file=tokenizer_file,
118
+ unk_token=unk_token,
119
+ bos_token=bos_token,
120
+ eos_token=eos_token,
121
+ pad_token=pad_token,
122
+ **kwargs,
123
+ )
124
+
125
+ # Copied from transformers.models.gpt2.tokenization_gpt2_fast.GPT2TokenizerFast.save_vocabulary
126
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
127
+ files = self._tokenizer.model.save(save_directory, name=filename_prefix)
128
+ return tuple(files)
129
+
130
+
131
+ __all__ = ["Qwen2TokenizerFast"]
tokenizer_config.json ADDED
@@ -0,0 +1,255 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": false,
3
+ "add_prefix_space": false,
4
+ "added_tokens_decoder": {
5
+ "151643": {
6
+ "content": "<|endoftext|>",
7
+ "lstrip": false,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ },
13
+ "151644": {
14
+ "content": "<|im_start|>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false,
19
+ "special": true
20
+ },
21
+ "151645": {
22
+ "content": "<|im_end|>",
23
+ "lstrip": false,
24
+ "normalized": false,
25
+ "rstrip": false,
26
+ "single_word": false,
27
+ "special": true
28
+ },
29
+ "151646": {
30
+ "content": "<|object_ref_start|>",
31
+ "lstrip": false,
32
+ "normalized": false,
33
+ "rstrip": false,
34
+ "single_word": false,
35
+ "special": true
36
+ },
37
+ "151647": {
38
+ "content": "<|object_ref_end|>",
39
+ "lstrip": false,
40
+ "normalized": false,
41
+ "rstrip": false,
42
+ "single_word": false,
43
+ "special": true
44
+ },
45
+ "151648": {
46
+ "content": "<|box_start|>",
47
+ "lstrip": false,
48
+ "normalized": false,
49
+ "rstrip": false,
50
+ "single_word": false,
51
+ "special": true
52
+ },
53
+ "151649": {
54
+ "content": "<|box_end|>",
55
+ "lstrip": false,
56
+ "normalized": false,
57
+ "rstrip": false,
58
+ "single_word": false,
59
+ "special": true
60
+ },
61
+ "151650": {
62
+ "content": "<|quad_start|>",
63
+ "lstrip": false,
64
+ "normalized": false,
65
+ "rstrip": false,
66
+ "single_word": false,
67
+ "special": true
68
+ },
69
+ "151651": {
70
+ "content": "<|quad_end|>",
71
+ "lstrip": false,
72
+ "normalized": false,
73
+ "rstrip": false,
74
+ "single_word": false,
75
+ "special": true
76
+ },
77
+ "151652": {
78
+ "content": "<|vision_start|>",
79
+ "lstrip": false,
80
+ "normalized": false,
81
+ "rstrip": false,
82
+ "single_word": false,
83
+ "special": true
84
+ },
85
+ "151653": {
86
+ "content": "<|vision_end|>",
87
+ "lstrip": false,
88
+ "normalized": false,
89
+ "rstrip": false,
90
+ "single_word": false,
91
+ "special": true
92
+ },
93
+ "151654": {
94
+ "content": "<|vision_pad|>",
95
+ "lstrip": false,
96
+ "normalized": false,
97
+ "rstrip": false,
98
+ "single_word": false,
99
+ "special": true
100
+ },
101
+ "151655": {
102
+ "content": "<|image_pad|>",
103
+ "lstrip": false,
104
+ "normalized": false,
105
+ "rstrip": false,
106
+ "single_word": false,
107
+ "special": true
108
+ },
109
+ "151656": {
110
+ "content": "<|video_pad|>",
111
+ "lstrip": false,
112
+ "normalized": false,
113
+ "rstrip": false,
114
+ "single_word": false,
115
+ "special": true
116
+ },
117
+ "151657": {
118
+ "content": "<tool_call>",
119
+ "lstrip": false,
120
+ "normalized": false,
121
+ "rstrip": false,
122
+ "single_word": false,
123
+ "special": false
124
+ },
125
+ "151658": {
126
+ "content": "</tool_call>",
127
+ "lstrip": false,
128
+ "normalized": false,
129
+ "rstrip": false,
130
+ "single_word": false,
131
+ "special": false
132
+ },
133
+ "151659": {
134
+ "content": "<|fim_prefix|>",
135
+ "lstrip": false,
136
+ "normalized": false,
137
+ "rstrip": false,
138
+ "single_word": false,
139
+ "special": false
140
+ },
141
+ "151660": {
142
+ "content": "<|fim_middle|>",
143
+ "lstrip": false,
144
+ "normalized": false,
145
+ "rstrip": false,
146
+ "single_word": false,
147
+ "special": false
148
+ },
149
+ "151661": {
150
+ "content": "<|fim_suffix|>",
151
+ "lstrip": false,
152
+ "normalized": false,
153
+ "rstrip": false,
154
+ "single_word": false,
155
+ "special": false
156
+ },
157
+ "151662": {
158
+ "content": "<|fim_pad|>",
159
+ "lstrip": false,
160
+ "normalized": false,
161
+ "rstrip": false,
162
+ "single_word": false,
163
+ "special": false
164
+ },
165
+ "151663": {
166
+ "content": "<|repo_name|>",
167
+ "lstrip": false,
168
+ "normalized": false,
169
+ "rstrip": false,
170
+ "single_word": false,
171
+ "special": false
172
+ },
173
+ "151664": {
174
+ "content": "<|file_sep|>",
175
+ "lstrip": false,
176
+ "normalized": false,
177
+ "rstrip": false,
178
+ "single_word": false,
179
+ "special": false
180
+ },
181
+ "151665": {
182
+ "content": "<tool_response>",
183
+ "lstrip": false,
184
+ "normalized": false,
185
+ "rstrip": false,
186
+ "single_word": false,
187
+ "special": false
188
+ },
189
+ "151666": {
190
+ "content": "</tool_response>",
191
+ "lstrip": false,
192
+ "normalized": false,
193
+ "rstrip": false,
194
+ "single_word": false,
195
+ "special": false
196
+ },
197
+ "151667": {
198
+ "content": "<think>",
199
+ "lstrip": false,
200
+ "normalized": false,
201
+ "rstrip": false,
202
+ "single_word": false,
203
+ "special": false
204
+ },
205
+ "151668": {
206
+ "content": "</think>",
207
+ "lstrip": false,
208
+ "normalized": false,
209
+ "rstrip": false,
210
+ "single_word": false,
211
+ "special": false
212
+ },
213
+ "151669": {
214
+ "content": "<|MASK|>",
215
+ "lstrip": false,
216
+ "normalized": false,
217
+ "rstrip": false,
218
+ "single_word": false,
219
+ "special": true
220
+ }
221
+ },
222
+ "additional_special_tokens": [
223
+ "<|im_start|>",
224
+ "<|im_end|>",
225
+ "<|object_ref_start|>",
226
+ "<|object_ref_end|>",
227
+ "<|box_start|>",
228
+ "<|box_end|>",
229
+ "<|quad_start|>",
230
+ "<|quad_end|>",
231
+ "<|vision_start|>",
232
+ "<|vision_end|>",
233
+ "<|vision_pad|>",
234
+ "<|image_pad|>",
235
+ "<|video_pad|>",
236
+ "<|MASK|>"
237
+ ],
238
+ "auto_map": {
239
+ "AutoTokenizer": [
240
+ "tokenization_qwen2.Qwen2Tokenizer",
241
+ null
242
+ ]
243
+ },
244
+ "bos_token": null,
245
+ "clean_up_tokenization_spaces": false,
246
+ "eos_token": "<|endoftext|>",
247
+ "errors": "replace",
248
+ "extra_special_tokens": {},
249
+ "mask_token": "<|MASK|>",
250
+ "model_max_length": 131072,
251
+ "pad_token": "<|endoftext|>",
252
+ "split_special_tokens": false,
253
+ "tokenizer_class": "Qwen2Tokenizer",
254
+ "unk_token": null
255
+ }
vocab.json ADDED
The diff for this file is too large to render. See raw diff