| { | |
| "architectures": [ | |
| "CloverLMForCausalLM" | |
| ], | |
| "attn_backend": "flash2", | |
| "auto_map": { | |
| "AutoConfig": "configuration_cloverlm.CloverLMConfig", | |
| "AutoModelForCausalLM": "modeling_cloverlm.CloverLMForCausalLM", | |
| "AutoTokenizer": [ | |
| "tokenization_cloverlm.CloverLMTokenizer", | |
| null | |
| ] | |
| }, | |
| "d_head": 128, | |
| "heads": 28, | |
| "max_context": 1024, | |
| "model_type": "cloverlm", | |
| "num_blocks": 29, | |
| "num_hidden_layers": 29, | |
| "quartet_2_impl": "pseudoquant", | |
| "ratio": 4, | |
| "scale_type": "1/sqrt(d)", | |
| "transformers_version": "5.3.0", | |
| "vocab_size": 32000, | |
| "weight_tying": true | |
| } | |