| --- |
| library_name: transformers |
| base_model: |
| - meituan-longcat/LongCat-Flash-Lite |
| --- |
| |
| This tiny model is intended for debugging. It is randomly initialized using the configuration adapted from [meituan-longcat/LongCat-Flash-Lite](https://huggingface.co/meituan-longcat/LongCat-Flash-Lite). |
|
|
| | File path | Size | |
| |------|------| |
| | model.safetensors | 8.4MB | |
|
|
|
|
| ### Example usage: |
|
|
| ```python |
| import torch |
| import transformers |
| |
| model_id = "tiny-random/longcat-flash-ngram" |
| pipe = transformers.pipelines.pipeline( |
| 'text-generation', |
| model=model_id, |
| trust_remote_code=True, |
| device_map='cuda', |
| torch_dtype=torch.bfloat16, |
| ) |
| past_key_values = transformers.DynamicCache(config=None) # set config to None |
| r = pipe('Hello, world!', past_key_values=past_key_values, max_new_tokens=32) |
| print(r) |
| ``` |
|
|
| ### Codes to create this repo: |
|
|
| <details> |
| <summary>Python codes</summary> |
|
|
| ```python |
| import json |
| from copy import deepcopy |
| from pathlib import Path |
| |
| import torch |
| import torch.nn as nn |
| from huggingface_hub import file_exists, hf_hub_download |
| from transformers import ( |
| AutoConfig, |
| AutoModelForCausalLM, |
| AutoProcessor, |
| AutoTokenizer, |
| GenerationConfig, |
| set_seed, |
| ) |
| from transformers.models.glm4_moe.modeling_glm4_moe import Glm4MoeRMSNorm |
| source_model_id = "meituan-longcat/LongCat-Flash-Lite" |
| save_folder = "/tmp/tiny-random/longcat-flash-ngram" |
| |
| Path(save_folder).mkdir(parents=True, exist_ok=True) |
| tokenizer = AutoTokenizer.from_pretrained(source_model_id, trust_remote_code=True) |
| tokenizer.save_pretrained(save_folder) |
| |
| with open(hf_hub_download(source_model_id, filename='config.json', repo_type='model'), 'r', encoding='utf-8') as f: |
| config_json = json.load(f) |
| for k, v in config_json['auto_map'].items(): |
| config_json['auto_map'][k] = f'{source_model_id}--{v}' |
| config_json.update({ |
| 'num_layers': 2, |
| 'hidden_size': 8, |
| 'ffn_hidden_size': 32, |
| 'expert_ffn_hidden_size': 32, |
| 'num_attention_heads': 4, |
| 'kv_lora_rank': 384, |
| 'n_routed_experts': 32, |
| 'q_lora_rank': 32, |
| 'qk_nope_head_dim': 64, |
| 'qk_rope_head_dim': 192, |
| 'head_dim': 192, |
| 'qk_head_dim': 256, |
| 'v_head_dim': 64, |
| 'moe_topk': 12, |
| 'zero_expert_num': 16, |
| 'emb_split_num': 2, |
| 'emb_neighbor_num': 2, |
| 'ngram_vocab_size_ratio': 4, |
| }) |
| # del config_json['quantization_config'] |
| with open(f"{save_folder}/config.json", "w", encoding='utf-8') as f: |
| json.dump(config_json, f, indent=2) |
| |
| config = AutoConfig.from_pretrained( |
| save_folder, |
| trust_remote_code=True, |
| ) |
| print(config) |
| torch.set_default_dtype(torch.bfloat16) |
| model = AutoModelForCausalLM.from_config(config, trust_remote_code=True) |
| if file_exists(filename="generation_config.json", repo_id=source_model_id, repo_type='model'): |
| model.generation_config = GenerationConfig.from_pretrained( |
| source_model_id, trust_remote_code=True, |
| ) |
| model = model.cpu() |
| # MTP |
| model.model.mtp = nn.ModuleDict({ |
| "layers": nn.ModuleList([nn.ModuleDict(dict( |
| eh_proj=nn.Linear(config.hidden_size * 2, config.hidden_size, bias=False), |
| enorm=nn.ModuleDict({"m": nn.RMSNorm(config.hidden_size)}), |
| hnorm=nn.ModuleDict({"m": nn.RMSNorm(config.hidden_size)}), |
| input_layernorm=nn.RMSNorm(config.hidden_size), |
| post_attention_layernorm=nn.RMSNorm(config.hidden_size), |
| self_attn=deepcopy(model.model.layers[0].self_attn[0]), |
| transformer_layer=nn.ModuleDict({"mlp": deepcopy(model.model.layers[0].mlps[0])}), |
| ))]), |
| "norm": nn.RMSNorm(config.hidden_size), |
| }) |
| for i in range(config.num_layers): |
| model.model.layers[i].mlp.router = model.model.layers[i].mlp.router.float() |
| # model.model.layers[i].mlp.router.e_score_correction_bias = torch.zeros((config.n_routed_experts + config.zero_expert_num)).float() |
| set_seed(42) |
| with torch.no_grad(): |
| for name, p in sorted(model.named_parameters()): |
| torch.nn.init.normal_(p, 0, 0.1) |
| print(name, p.shape, p.dtype) |
| model.model.mtp.embed_tokens = deepcopy(model.model.embed_tokens) |
| model.model.ngram_embeddings = None # avoid saving shared params |
| |
| model.save_pretrained(save_folder) |
| torch.set_default_dtype(torch.float32) |
| |
| with open(f"{save_folder}/config.json", "r", encoding='utf-8') as f: |
| config_json = json.load(f) |
| config_json['auto_map'] = {k: source_model_id + '--' + |
| v.split('--')[-1] for k, v in config_json['auto_map'].items()} |
| with open(f"{save_folder}/config.json", "w", encoding='utf-8') as f: |
| json.dump(config_json, f, indent=2) |
| for f in Path(save_folder).glob('*.py'): |
| f.unlink() |
| ``` |
|
|
| </details> |
|
|
| ### Printing the model: |
|
|
| <details><summary>Click to expand</summary> |
|
|
| ```text |
| LongcatFlashNgramForCausalLM( |
| (model): LongcatFlashNgramModel( |
| (embed_tokens): Embedding(131072, 8) |
| (layers): ModuleList( |
| (0-1): 2 x LongcatFlashDecoderLayer( |
| (mlp): LongcatFlashMoE( |
| (experts): ModuleList( |
| (0-31): 32 x LongcatFlashMLP( |
| (gate_proj): Linear(in_features=8, out_features=32, bias=False) |
| (up_proj): Linear(in_features=8, out_features=32, bias=False) |
| (down_proj): Linear(in_features=32, out_features=8, bias=False) |
| (act_fn): SiLUActivation() |
| ) |
| (32-47): 16 x Identity() |
| ) |
| (router): LongcatFlashTopkRouter( |
| (classifier): Linear(in_features=8, out_features=48, bias=False) |
| ) |
| ) |
| (self_attn): ModuleList( |
| (0-1): 2 x LongcatFlashMLA( |
| (q_a_proj): Linear(in_features=8, out_features=32, bias=False) |
| (q_a_layernorm): LongcatFlashRMSNorm((32,), eps=1e-06) |
| (q_b_proj): Linear(in_features=32, out_features=1024, bias=False) |
| (kv_a_proj_with_mqa): Linear(in_features=8, out_features=576, bias=False) |
| (kv_a_layernorm): LongcatFlashRMSNorm((384,), eps=1e-06) |
| (kv_b_proj): Linear(in_features=384, out_features=512, bias=False) |
| (o_proj): Linear(in_features=256, out_features=8, bias=False) |
| ) |
| ) |
| (mlps): ModuleList( |
| (0-1): 2 x LongcatFlashMLP( |
| (gate_proj): Linear(in_features=8, out_features=32, bias=False) |
| (up_proj): Linear(in_features=8, out_features=32, bias=False) |
| (down_proj): Linear(in_features=32, out_features=8, bias=False) |
| (act_fn): SiLUActivation() |
| ) |
| ) |
| (input_layernorm): ModuleList( |
| (0-1): 2 x LongcatFlashRMSNorm((8,), eps=1e-05) |
| ) |
| (post_attention_layernorm): ModuleList( |
| (0-1): 2 x LongcatFlashRMSNorm((8,), eps=1e-05) |
| ) |
| ) |
| ) |
| (norm): LongcatFlashRMSNorm((8,), eps=1e-05) |
| (rotary_emb): LongcatFlashRotaryEmbedding() |
| (ngram_embeddings): None |
| (mtp): ModuleDict( |
| (layers): ModuleList( |
| (0): ModuleDict( |
| (eh_proj): Linear(in_features=16, out_features=8, bias=False) |
| (enorm): ModuleDict( |
| (m): RMSNorm((8,), eps=None, elementwise_affine=True) |
| ) |
| (hnorm): ModuleDict( |
| (m): RMSNorm((8,), eps=None, elementwise_affine=True) |
| ) |
| (input_layernorm): RMSNorm((8,), eps=None, elementwise_affine=True) |
| (post_attention_layernorm): RMSNorm((8,), eps=None, elementwise_affine=True) |
| (self_attn): LongcatFlashMLA( |
| (q_a_proj): Linear(in_features=8, out_features=32, bias=False) |
| (q_a_layernorm): LongcatFlashRMSNorm((32,), eps=1e-06) |
| (q_b_proj): Linear(in_features=32, out_features=1024, bias=False) |
| (kv_a_proj_with_mqa): Linear(in_features=8, out_features=576, bias=False) |
| (kv_a_layernorm): LongcatFlashRMSNorm((384,), eps=1e-06) |
| (kv_b_proj): Linear(in_features=384, out_features=512, bias=False) |
| (o_proj): Linear(in_features=256, out_features=8, bias=False) |
| ) |
| (transformer_layer): ModuleDict( |
| (mlp): LongcatFlashMLP( |
| (gate_proj): Linear(in_features=8, out_features=32, bias=False) |
| (up_proj): Linear(in_features=8, out_features=32, bias=False) |
| (down_proj): Linear(in_features=32, out_features=8, bias=False) |
| (act_fn): SiLUActivation() |
| ) |
| ) |
| ) |
| ) |
| (norm): RMSNorm((8,), eps=None, elementwise_affine=True) |
| (embed_tokens): Embedding(131072, 8) |
| ) |
| ) |
| (lm_head): Linear(in_features=8, out_features=131072, bias=False) |
| ) |
| ``` |
|
|
| </details> |