| { | |
| "added": [], | |
| "missing": [], | |
| "shapeChanges": [], | |
| "loraDetected": true, | |
| "recurrentDetected": false, | |
| "tensorCount": 24, | |
| "shardCount": 0, | |
| "missingShardFiles": [], | |
| "missingTensorKeys": [], | |
| "detectedLoRAKeys": [ | |
| "base_model.model.transformer.h.0.attn.c_attn.lora_A.weight", | |
| "base_model.model.transformer.h.0.attn.c_attn.lora_B.weight", | |
| "base_model.model.transformer.h.1.attn.c_attn.lora_A.weight", | |
| "base_model.model.transformer.h.1.attn.c_attn.lora_B.weight", | |
| "base_model.model.transformer.h.10.attn.c_attn.lora_A.weight", | |
| "base_model.model.transformer.h.10.attn.c_attn.lora_B.weight", | |
| "base_model.model.transformer.h.11.attn.c_attn.lora_A.weight", | |
| "base_model.model.transformer.h.11.attn.c_attn.lora_B.weight", | |
| "base_model.model.transformer.h.2.attn.c_attn.lora_A.weight", | |
| "base_model.model.transformer.h.2.attn.c_attn.lora_B.weight", | |
| "base_model.model.transformer.h.3.attn.c_attn.lora_A.weight", | |
| "base_model.model.transformer.h.3.attn.c_attn.lora_B.weight", | |
| "base_model.model.transformer.h.4.attn.c_attn.lora_A.weight", | |
| "base_model.model.transformer.h.4.attn.c_attn.lora_B.weight", | |
| "base_model.model.transformer.h.5.attn.c_attn.lora_A.weight", | |
| "base_model.model.transformer.h.5.attn.c_attn.lora_B.weight", | |
| "base_model.model.transformer.h.6.attn.c_attn.lora_A.weight", | |
| "base_model.model.transformer.h.6.attn.c_attn.lora_B.weight", | |
| "base_model.model.transformer.h.7.attn.c_attn.lora_A.weight", | |
| "base_model.model.transformer.h.7.attn.c_attn.lora_B.weight", | |
| "base_model.model.transformer.h.8.attn.c_attn.lora_A.weight", | |
| "base_model.model.transformer.h.8.attn.c_attn.lora_B.weight", | |
| "base_model.model.transformer.h.9.attn.c_attn.lora_A.weight", | |
| "base_model.model.transformer.h.9.attn.c_attn.lora_B.weight" | |
| ], | |
| "detectedRecurrentKeys": [], | |
| "tensorShapeSummary": { | |
| "base_model.model.transformer.h.0.attn.c_attn.lora_A.weight": [ | |
| 16, | |
| 768 | |
| ], | |
| "base_model.model.transformer.h.0.attn.c_attn.lora_B.weight": [ | |
| 2304, | |
| 16 | |
| ], | |
| "base_model.model.transformer.h.1.attn.c_attn.lora_A.weight": [ | |
| 16, | |
| 768 | |
| ], | |
| "base_model.model.transformer.h.1.attn.c_attn.lora_B.weight": [ | |
| 2304, | |
| 16 | |
| ], | |
| "base_model.model.transformer.h.10.attn.c_attn.lora_A.weight": [ | |
| 16, | |
| 768 | |
| ], | |
| "base_model.model.transformer.h.10.attn.c_attn.lora_B.weight": [ | |
| 2304, | |
| 16 | |
| ], | |
| "base_model.model.transformer.h.11.attn.c_attn.lora_A.weight": [ | |
| 16, | |
| 768 | |
| ], | |
| "base_model.model.transformer.h.11.attn.c_attn.lora_B.weight": [ | |
| 2304, | |
| 16 | |
| ], | |
| "base_model.model.transformer.h.2.attn.c_attn.lora_A.weight": [ | |
| 16, | |
| 768 | |
| ], | |
| "base_model.model.transformer.h.2.attn.c_attn.lora_B.weight": [ | |
| 2304, | |
| 16 | |
| ], | |
| "base_model.model.transformer.h.3.attn.c_attn.lora_A.weight": [ | |
| 16, | |
| 768 | |
| ], | |
| "base_model.model.transformer.h.3.attn.c_attn.lora_B.weight": [ | |
| 2304, | |
| 16 | |
| ], | |
| "base_model.model.transformer.h.4.attn.c_attn.lora_A.weight": [ | |
| 16, | |
| 768 | |
| ], | |
| "base_model.model.transformer.h.4.attn.c_attn.lora_B.weight": [ | |
| 2304, | |
| 16 | |
| ], | |
| "base_model.model.transformer.h.5.attn.c_attn.lora_A.weight": [ | |
| 16, | |
| 768 | |
| ], | |
| "base_model.model.transformer.h.5.attn.c_attn.lora_B.weight": [ | |
| 2304, | |
| 16 | |
| ], | |
| "base_model.model.transformer.h.6.attn.c_attn.lora_A.weight": [ | |
| 16, | |
| 768 | |
| ], | |
| "base_model.model.transformer.h.6.attn.c_attn.lora_B.weight": [ | |
| 2304, | |
| 16 | |
| ], | |
| "base_model.model.transformer.h.7.attn.c_attn.lora_A.weight": [ | |
| 16, | |
| 768 | |
| ], | |
| "base_model.model.transformer.h.7.attn.c_attn.lora_B.weight": [ | |
| 2304, | |
| 16 | |
| ], | |
| "base_model.model.transformer.h.8.attn.c_attn.lora_A.weight": [ | |
| 16, | |
| 768 | |
| ], | |
| "base_model.model.transformer.h.8.attn.c_attn.lora_B.weight": [ | |
| 2304, | |
| 16 | |
| ], | |
| "base_model.model.transformer.h.9.attn.c_attn.lora_A.weight": [ | |
| 16, | |
| 768 | |
| ], | |
| "base_model.model.transformer.h.9.attn.c_attn.lora_B.weight": [ | |
| 2304, | |
| 16 | |
| ] | |
| }, | |
| "shardSummary": { | |
| "count": 0, | |
| "shards": [], | |
| "missing": [] | |
| }, | |
| "hfCompatibility": { | |
| "config": false, | |
| "tokenizer": true, | |
| "safetensors": true, | |
| "adapterFiles": true | |
| }, | |
| "tensorChecksCompleted": true, | |
| "validationStrictness": "normal", | |
| "validatedModelPath": "C:\\Users\\jdwall\\AppData\\Local\\Temp\\my-lora-adapter-h43em74k" | |
| } |