alexmarques commited on
Commit
7c716cd
·
verified ·
1 Parent(s): 5dbfe8b

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +2 -2
README.md CHANGED
@@ -88,7 +88,7 @@ from transformers import AutoModelForCausalLM, AutoTokenizer, AutoProcessor
88
  from llmcompressor import oneshot
89
  from llmcompressor.modifiers.quantization import GPTQModifier
90
 
91
- MODEL_ID = "inference-optimization/MiniMax-M2.5-BF16"
92
 
93
  # Load model.
94
  model = AutoModelForCausalLM.from_pretrained(MODEL_ID, torch_dtype="auto", trust_remote_code=True)
@@ -209,7 +209,7 @@ The model was evaluated on the ifeval, mmlu_pro and gsm8k_platinum using [lm-ev
209
 
210
  ### Accuracy
211
 
212
- | Benchmark | inference-optimization/MiniMax-M2.5-BF16 | inference-optimization/MiniMax-M2.5.w4a16 | Recovery (%) |
213
  |-----------|------------------------------------------|-------------------------------------------|--------------|
214
  | GSM8k Platinum (0-shot) | 95.15 | 96.36 | 101.27 |
215
  | IfEval (0-shot) | 92.05 | 90.45 | 98.26 |
 
88
  from llmcompressor import oneshot
89
  from llmcompressor.modifiers.quantization import GPTQModifier
90
 
91
+ MODEL_ID = "RedHatAI/MiniMax-M2.5-BF16"
92
 
93
  # Load model.
94
  model = AutoModelForCausalLM.from_pretrained(MODEL_ID, torch_dtype="auto", trust_remote_code=True)
 
209
 
210
  ### Accuracy
211
 
212
+ | Benchmark | RedHatAI/MiniMax-M2.5-BF16 | RedHatAI/MiniMax-M2.5.w4a16 | Recovery (%) |
213
  |-----------|------------------------------------------|-------------------------------------------|--------------|
214
  | GSM8k Platinum (0-shot) | 95.15 | 96.36 | 101.27 |
215
  | IfEval (0-shot) | 92.05 | 90.45 | 98.26 |