| --- |
| library_name: transformers |
| base_model: |
| - zai-org/GLM-5 |
| --- |
| |
| This tiny model is intended for debugging. It is randomly initialized using the configuration adapted from [zai-org/GLM-5](https://huggingface.co/zai-org/GLM-5). |
|
|
| | File path | Size | |
| |------|------| |
| | model.safetensors | 9.0MB | |
|
|
|
|
| ### Example usage: |
|
|
| - vLLM |
|
|
| ```bash |
| # Multi-token prediction is supported |
| model_id=tiny-random/glm-5 |
| vllm serve $model_id \ |
| --tensor-parallel-size 2 \ |
| --speculative-config.method mtp \ |
| --speculative-config.num_speculative_tokens 1 \ |
| --tool-call-parser glm47 \ |
| --reasoning-parser glm45 \ |
| --enable-auto-tool-choice |
| ``` |
|
|
| - SGLang |
|
|
| ```bash |
| # Multi-token prediction is supported |
| model_id=tiny-random/glm-5 |
| python3 -m sglang.launch_server --model-path $model_id --tp-size 2 \ |
| --tool-call-parser glm47 \ |
| --reasoning-parser glm45 \ |
| --speculative-algorithm EAGLE \ |
| --speculative-num-steps 3 \ |
| --speculative-eagle-topk 1 \ |
| --speculative-num-draft-tokens 4 |
| ``` |
|
|
| - Transformers |
|
|
| ```python |
| import torch |
| from transformers import AutoModelForCausalLM, AutoTokenizer |
| |
| model_id = "tiny-random/glm-5" |
| tokenizer = AutoTokenizer.from_pretrained(model_id) |
| input_ids = torch.randint(1000, 2000, size=(1, 2333), dtype=torch.long).cuda() # trigger DSA |
| # messages = [{"role": "user", "content": "hello"}] |
| # input_ids = tokenizer(messages, return_tensors="pt").input_ids.cuda() |
| model = AutoModelForCausalLM.from_pretrained( |
| model_id, |
| dtype=torch.bfloat16, |
| device_map="cuda", |
| ) |
| generated_ids = model.generate(input_ids, max_new_tokens=32) |
| output_text = tokenizer.decode(generated_ids[0][input_ids.shape[1]:]) |
| print(output_text) |
| ``` |
|
|
| ### Codes to create this repo: |
|
|
| <details> |
| <summary>Click to expand</summary> |
|
|
| ```python |
| import json |
| from copy import deepcopy |
| from pathlib import Path |
| |
| import accelerate |
| import torch |
| import torch.nn as nn |
| from huggingface_hub import file_exists, hf_hub_download |
| from transformers import ( |
| AutoConfig, |
| AutoModelForCausalLM, |
| AutoProcessor, |
| GenerationConfig, |
| set_seed, |
| ) |
| |
| source_model_id = "zai-org/GLM-5" |
| save_folder = "/tmp/tiny-random/glm-5" |
| |
| processor = AutoProcessor.from_pretrained( |
| source_model_id, trust_remote_code=True) |
| processor.save_pretrained(save_folder) |
| |
| with open(hf_hub_download(source_model_id, filename='config.json', repo_type='model'), 'r', encoding='utf-8') as f: |
| config_json: dict = json.load(f) |
| |
| head_dim = 64 |
| kv_lora_rank = 512 |
| qk_nope_head_dim = 192 |
| config_json.update({ |
| "first_k_dense_replace": 1, |
| "mlp_layer_types": ['dense'] + ['sparse'], |
| "head_dim": head_dim, |
| "hidden_size": 8, |
| "index_head_dim": 32, |
| "index_n_heads": 4, |
| "intermediate_size": 32, |
| "moe_intermediate_size": 32, |
| "num_hidden_layers": 2, |
| 'kv_lora_rank': kv_lora_rank, |
| "num_attention_heads": 4, |
| 'num_key_value_heads': 4, |
| 'q_lora_rank': 32, |
| "qk_head_dim": qk_nope_head_dim + head_dim, |
| 'qk_nope_head_dim': qk_nope_head_dim, |
| 'qk_rope_head_dim': head_dim, |
| 'v_head_dim': qk_nope_head_dim + head_dim, |
| "tie_word_embeddings": True, |
| }) |
| with open(f"{save_folder}/config.json", "w", encoding='utf-8') as f: |
| json.dump(config_json, f, indent=2) |
| |
| config = AutoConfig.from_pretrained( |
| save_folder, |
| trust_remote_code=True, |
| ) |
| print(config) |
| torch.set_default_dtype(torch.bfloat16) |
| model = AutoModelForCausalLM.from_config(config) |
| torch.set_default_dtype(torch.float32) |
| |
| if file_exists(filename="generation_config.json", repo_id=source_model_id, repo_type='model'): |
| model.generation_config = GenerationConfig.from_pretrained( |
| source_model_id, trust_remote_code=True, |
| ) |
| model.generation_config.do_sample = True |
| print(model.generation_config) |
| |
| model = model.cpu() |
| set_seed(42) |
| n_params = sum(p.numel() for p in model.parameters()) |
| with torch.no_grad(): |
| for name, p in sorted(model.named_parameters()): |
| torch.nn.init.normal_(p, 0, 0.1) |
| print(name, p.shape, p.numel() / n_params * 100, '%') |
| # MTP |
| set_seed(42) |
| model.model.layers.append(nn.ModuleDict(dict( |
| shared_head=nn.ModuleDict(dict( |
| norm=nn.RMSNorm(config.hidden_size), |
| # head=deepcopy(model.model.embed_tokens), |
| )), |
| # embed_tokens=deepcopy(model.model.embed_tokens), |
| eh_proj=nn.Linear(config.hidden_size * 2, |
| config.hidden_size, bias=False), |
| enorm=nn.RMSNorm(config.hidden_size), |
| hnorm=nn.RMSNorm(config.hidden_size), |
| input_layernorm=nn.RMSNorm(config.hidden_size), |
| post_attention_layernorm=nn.RMSNorm(config.hidden_size), |
| self_attn=deepcopy(model.model.layers[1].self_attn), |
| mlp=deepcopy(model.model.layers[1].mlp), |
| ))) |
| for i in range(1, len(model.model.layers)): |
| model.model.layers[i].mlp.gate.e_score_correction_bias = torch.rand_like( |
| model.model.layers[i].mlp.gate.e_score_correction_bias).float() |
| model.save_pretrained(save_folder) |
| print(model) |
| ``` |
|
|
| </details> |
|
|
| ### Printing the model: |
|
|
| <details><summary>Click to expand</summary> |
|
|
| ```text |
| GlmMoeDsaForCausalLM( |
| (model): GlmMoeDsaModel( |
| (embed_tokens): Embedding(154880, 8, padding_idx=154820) |
| (layers): ModuleList( |
| (0): GlmMoeDsaDecoderLayer( |
| (self_attn): GlmMoeDsaAttention( |
| (q_a_proj): Linear(in_features=8, out_features=32, bias=False) |
| (q_a_layernorm): GlmMoeDsaRMSNorm((32,), eps=1e-06) |
| (q_b_proj): Linear(in_features=32, out_features=1024, bias=False) |
| (kv_a_proj_with_mqa): Linear(in_features=8, out_features=576, bias=False) |
| (kv_a_layernorm): GlmMoeDsaRMSNorm((512,), eps=1e-06) |
| (kv_b_proj): Linear(in_features=512, out_features=1792, bias=False) |
| (o_proj): Linear(in_features=1024, out_features=8, bias=False) |
| (wq_b): Linear(in_features=32, out_features=1024, bias=False) |
| (wk): Linear(in_features=8, out_features=256, bias=False) |
| (k_norm): GlmMoeDsaRMSNorm((256,), eps=1e-06) |
| (weights_proj): Linear(in_features=8, out_features=4, bias=False) |
| ) |
| (mlp): GlmMoeDsaMLP( |
| (gate_proj): Linear(in_features=8, out_features=32, bias=False) |
| (up_proj): Linear(in_features=8, out_features=32, bias=False) |
| (down_proj): Linear(in_features=32, out_features=8, bias=False) |
| (act_fn): SiLUActivation() |
| ) |
| (input_layernorm): GlmMoeDsaRMSNorm((8,), eps=1e-05) |
| (post_attention_layernorm): GlmMoeDsaRMSNorm((8,), eps=1e-05) |
| ) |
| (1): GlmMoeDsaDecoderLayer( |
| (self_attn): GlmMoeDsaAttention( |
| (q_a_proj): Linear(in_features=8, out_features=32, bias=False) |
| (q_a_layernorm): GlmMoeDsaRMSNorm((32,), eps=1e-06) |
| (q_b_proj): Linear(in_features=32, out_features=1024, bias=False) |
| (kv_a_proj_with_mqa): Linear(in_features=8, out_features=576, bias=False) |
| (kv_a_layernorm): GlmMoeDsaRMSNorm((512,), eps=1e-06) |
| (kv_b_proj): Linear(in_features=512, out_features=1792, bias=False) |
| (o_proj): Linear(in_features=1024, out_features=8, bias=False) |
| (wq_b): Linear(in_features=32, out_features=1024, bias=False) |
| (wk): Linear(in_features=8, out_features=256, bias=False) |
| (k_norm): GlmMoeDsaRMSNorm((256,), eps=1e-06) |
| (weights_proj): Linear(in_features=8, out_features=4, bias=False) |
| ) |
| (mlp): GlmMoeDsaMoE( |
| (experts): GlmMoeDsaNaiveMoe( |
| (act_fn): SiLUActivation() |
| ) |
| (gate): GlmMoeDsaTopkRouter() |
| (shared_experts): GlmMoeDsaMLP( |
| (gate_proj): Linear(in_features=8, out_features=32, bias=False) |
| (up_proj): Linear(in_features=8, out_features=32, bias=False) |
| (down_proj): Linear(in_features=32, out_features=8, bias=False) |
| (act_fn): SiLUActivation() |
| ) |
| ) |
| (input_layernorm): GlmMoeDsaRMSNorm((8,), eps=1e-05) |
| (post_attention_layernorm): GlmMoeDsaRMSNorm((8,), eps=1e-05) |
| ) |
| (2): ModuleDict( |
| (shared_head): ModuleDict( |
| (norm): RMSNorm((8,), eps=None, elementwise_affine=True) |
| ) |
| (eh_proj): Linear(in_features=16, out_features=8, bias=False) |
| (enorm): RMSNorm((8,), eps=None, elementwise_affine=True) |
| (hnorm): RMSNorm((8,), eps=None, elementwise_affine=True) |
| (input_layernorm): RMSNorm((8,), eps=None, elementwise_affine=True) |
| (post_attention_layernorm): RMSNorm((8,), eps=None, elementwise_affine=True) |
| (self_attn): GlmMoeDsaAttention( |
| (q_a_proj): Linear(in_features=8, out_features=32, bias=False) |
| (q_a_layernorm): GlmMoeDsaRMSNorm((32,), eps=1e-06) |
| (q_b_proj): Linear(in_features=32, out_features=1024, bias=False) |
| (kv_a_proj_with_mqa): Linear(in_features=8, out_features=576, bias=False) |
| (kv_a_layernorm): GlmMoeDsaRMSNorm((512,), eps=1e-06) |
| (kv_b_proj): Linear(in_features=512, out_features=1792, bias=False) |
| (o_proj): Linear(in_features=1024, out_features=8, bias=False) |
| (wq_b): Linear(in_features=32, out_features=1024, bias=False) |
| (wk): Linear(in_features=8, out_features=256, bias=False) |
| (k_norm): GlmMoeDsaRMSNorm((256,), eps=1e-06) |
| (weights_proj): Linear(in_features=8, out_features=4, bias=False) |
| ) |
| (mlp): GlmMoeDsaMoE( |
| (experts): GlmMoeDsaNaiveMoe( |
| (act_fn): SiLUActivation() |
| ) |
| (gate): GlmMoeDsaTopkRouter() |
| (shared_experts): GlmMoeDsaMLP( |
| (gate_proj): Linear(in_features=8, out_features=32, bias=False) |
| (up_proj): Linear(in_features=8, out_features=32, bias=False) |
| (down_proj): Linear(in_features=32, out_features=8, bias=False) |
| (act_fn): SiLUActivation() |
| ) |
| ) |
| ) |
| ) |
| (norm): GlmMoeDsaRMSNorm((8,), eps=1e-05) |
| (rotary_emb): GlmMoeDsaRotaryEmbedding() |
| ) |
| (lm_head): Linear(in_features=8, out_features=154880, bias=False) |
| ) |
| ``` |
|
|
| </details> |