|
|
from mmengine.config import read_base |
|
|
|
|
|
from opencompass.models import HuggingFaceBaseModel, TurboMindModel |
|
|
|
|
|
with read_base(): |
|
|
from opencompass.configs.datasets.ARC_c.ARC_c_few_shot_ppl import \ |
|
|
ARC_c_datasets |
|
|
from opencompass.configs.datasets.bbh.bbh_gen_98fba6 import \ |
|
|
bbh_datasets |
|
|
from opencompass.configs.datasets.cmmlu.cmmlu_ppl_041cbf import \ |
|
|
cmmlu_datasets |
|
|
from opencompass.configs.datasets.drop.drop_gen_a2697c import \ |
|
|
drop_datasets |
|
|
from opencompass.configs.datasets.GaokaoBench.GaokaoBench_no_subjective_gen_d21e37 import \ |
|
|
GaokaoBench_datasets |
|
|
from opencompass.configs.datasets.gpqa.gpqa_few_shot_ppl_4b5a83 import \ |
|
|
gpqa_datasets |
|
|
|
|
|
from opencompass.configs.datasets.gsm8k.gsm8k_gen_17d0dc import \ |
|
|
gsm8k_datasets |
|
|
from opencompass.configs.datasets.hellaswag.hellaswag_10shot_ppl_59c85e import \ |
|
|
hellaswag_datasets |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
from opencompass.configs.datasets.math.math_4shot_base_gen_43d5b6 import \ |
|
|
math_datasets |
|
|
from opencompass.configs.datasets.MathBench.mathbench_2024_few_shot_mixed_4a3fd4 import \ |
|
|
mathbench_datasets |
|
|
from opencompass.configs.datasets.mbpp.sanitized_mbpp_gen_742f0c import \ |
|
|
sanitized_mbpp_datasets |
|
|
from opencompass.configs.datasets.mmlu.mmlu_ppl_ac766d import \ |
|
|
mmlu_datasets |
|
|
from opencompass.configs.datasets.mmlu_pro.mmlu_pro_few_shot_gen_bfaf90 import \ |
|
|
mmlu_pro_datasets |
|
|
from opencompass.configs.datasets.nq.nq_open_1shot_gen_20a989 import \ |
|
|
nq_datasets |
|
|
from opencompass.configs.datasets.race.race_few_shot_ppl import \ |
|
|
race_datasets |
|
|
from opencompass.configs.datasets.SuperGLUE_BoolQ.SuperGLUE_BoolQ_few_shot_ppl import \ |
|
|
BoolQ_datasets |
|
|
from opencompass.configs.datasets.TheoremQA.TheoremQA_5shot_gen_6f0af8 import \ |
|
|
TheoremQA_datasets |
|
|
from opencompass.configs.datasets.triviaqa.triviaqa_wiki_1shot_gen_20a989 import \ |
|
|
triviaqa_datasets |
|
|
from opencompass.configs.datasets.wikibench.wikibench_few_shot_ppl_c23d79 import \ |
|
|
wikibench_datasets |
|
|
from opencompass.configs.datasets.winogrande.winogrande_5shot_ll_252f01 import \ |
|
|
winogrande_datasets |
|
|
from opencompass.configs.summarizers.groups.bbh import \ |
|
|
bbh_summary_groups |
|
|
|
|
|
from opencompass.configs.summarizers.groups.cmmlu import \ |
|
|
cmmlu_summary_groups |
|
|
from opencompass.configs.summarizers.groups.GaokaoBench import \ |
|
|
GaokaoBench_summary_groups |
|
|
from opencompass.configs.summarizers.groups.mathbench_v1_2024 import \ |
|
|
mathbench_2024_summary_groups |
|
|
from opencompass.configs.summarizers.groups.mmlu import \ |
|
|
mmlu_summary_groups |
|
|
from opencompass.configs.summarizers.groups.mmlu_pro import \ |
|
|
mmlu_pro_summary_groups |
|
|
|
|
|
from ...rjob import eval, infer |
|
|
|
|
|
race_datasets = [race_datasets[1]] |
|
|
bbh_datasets = [ |
|
|
x for x in bbh_datasets if 'logical_deduction_seven_objects' in x['abbr'] |
|
|
or 'multistep_arithmetic_two' in x['abbr'] |
|
|
] |
|
|
cmmlu_datasets = [ |
|
|
x for x in cmmlu_datasets if x['abbr'].replace('cmmlu-', '') in [ |
|
|
'ancient_chinese', 'chinese_civil_service_exam', |
|
|
'chinese_driving_rule', 'chinese_food_culture', |
|
|
'chinese_foreign_policy', 'chinese_history', 'chinese_literature', |
|
|
'chinese_teacher_qualification', 'construction_project_management', |
|
|
'elementary_chinese', 'elementary_commonsense', 'ethnology', |
|
|
'high_school_politics', 'modern_chinese', |
|
|
'traditional_chinese_medicine' |
|
|
] |
|
|
] |
|
|
mmlu_datasets = [ |
|
|
x for x in mmlu_datasets if x['abbr'].replace('lukaemon_mmlu_', '') in [ |
|
|
'business_ethics', 'clinical_knowledge', 'college_medicine', |
|
|
'global_facts', 'human_aging', 'management', 'marketing', |
|
|
'medical_genetics', 'miscellaneous', 'nutrition', |
|
|
'professional_accounting', 'professional_medicine', 'virology' |
|
|
] |
|
|
] |
|
|
mmlu_pro_datasets = [mmlu_pro_datasets[0]] |
|
|
mathbench_datasets = [x for x in mathbench_datasets if 'college' in x['abbr']] |
|
|
GaokaoBench_datasets = [ |
|
|
x for x in GaokaoBench_datasets if '2010-2022_Math_II_MCQs' in x['abbr'] |
|
|
or '2010-2022_Math_II_Fill-in-the-Blank' in x['abbr'] |
|
|
] |
|
|
datasets = sum((v for k, v in locals().items() |
|
|
if k.endswith('_datasets') and 'dingo' not in k.lower()), []) |
|
|
|
|
|
summary_groups = sum( |
|
|
[v for k, v in locals().items() if k.endswith('_summary_groups')], []) |
|
|
summary_groups.append( |
|
|
{ |
|
|
'name': 'Mathbench', |
|
|
'subsets': ['mathbench-a (average)', 'mathbench-t (average)'], |
|
|
}, ) |
|
|
|
|
|
summarizer = dict( |
|
|
dataset_abbrs=[ |
|
|
'Language', |
|
|
['race-high', 'accuracy'], |
|
|
['ARC-c', 'accuracy'], |
|
|
['BoolQ', 'accuracy'], |
|
|
['triviaqa_wiki_1shot', 'score'], |
|
|
['nq_open_1shot', 'score'], |
|
|
'', |
|
|
'General Reasoning', |
|
|
['drop', 'accuracy'], |
|
|
['bbh', 'naive_average'], |
|
|
['GPQA_diamond', 'accuracy'], |
|
|
['hellaswag', 'accuracy'], |
|
|
['TheoremQA', 'score'], |
|
|
['winogrande', 'accuracy'], |
|
|
'', |
|
|
'Math Calculation', |
|
|
['gsm8k', 'accuracy'], |
|
|
['GaokaoBench', 'weighted_average'], |
|
|
'GaokaoBench_2010-2022_Math_II_MCQs', |
|
|
'GaokaoBench_2010-2022_Math_II_Fill-in-the-Blank', |
|
|
['math', 'accuracy'], |
|
|
['Mathbench', 'naive_average'], |
|
|
'', |
|
|
'Knowledge', |
|
|
['wikibench-wiki-single_choice_cncircular', 'perf_4'], |
|
|
['cmmlu', 'naive_average'], |
|
|
['mmlu', 'naive_average'], |
|
|
['mmlu_pro', 'naive_average'], |
|
|
'', |
|
|
'Code', |
|
|
['openai_humaneval', 'humaneval_pass@1'], |
|
|
['openai_humaneval_v2', 'humaneval_pass@1'], |
|
|
['sanitized_mbpp', 'score'], |
|
|
'', |
|
|
['dingo_en_192', 'score'], |
|
|
['dingo_zh_170', 'score'], |
|
|
'', |
|
|
'mmlu', |
|
|
'mmlu-stem', |
|
|
'mmlu-social-science', |
|
|
'mmlu-humanities', |
|
|
['mmlu-other', 'accuracy'], |
|
|
'', |
|
|
'cmmlu', |
|
|
'cmmlu-stem', |
|
|
'cmmlu-social-science', |
|
|
'cmmlu-humanities', |
|
|
'cmmlu-other', |
|
|
['cmmlu-china-specific', 'accuracy'], |
|
|
'', |
|
|
'mmlu_pro', |
|
|
'mmlu_pro_biology', |
|
|
'mmlu_pro_business', |
|
|
'mmlu_pro_chemistry', |
|
|
'mmlu_pro_computer_science', |
|
|
'mmlu_pro_economics', |
|
|
'mmlu_pro_engineering', |
|
|
'mmlu_pro_health', |
|
|
'mmlu_pro_history', |
|
|
'mmlu_pro_law', |
|
|
'mmlu_pro_math', |
|
|
'mmlu_pro_philosophy', |
|
|
'mmlu_pro_physics', |
|
|
'mmlu_pro_psychology', |
|
|
'mmlu_pro_other', |
|
|
'', |
|
|
'bbh-logical_deduction_seven_objects', |
|
|
'bbh-multistep_arithmetic_two', |
|
|
'###### MathBench-A: Application Part ######', |
|
|
'college', |
|
|
'high', |
|
|
'middle', |
|
|
'primary', |
|
|
'arithmetic', |
|
|
'mathbench-a (average)', |
|
|
'###### MathBench-T: Theory Part ######', |
|
|
'college_knowledge', |
|
|
'high_knowledge', |
|
|
'middle_knowledge', |
|
|
'primary_knowledge', |
|
|
'mathbench-t (average)', |
|
|
], |
|
|
summary_groups=summary_groups, |
|
|
) |
|
|
|
|
|
hf_model = dict( |
|
|
type=HuggingFaceBaseModel, |
|
|
abbr='qwen-3-8b-base-hf-fullbench', |
|
|
path='Qwen/Qwen3-8B-Base', |
|
|
max_out_len=8192, |
|
|
batch_size=8, |
|
|
run_cfg=dict(num_gpus=1), |
|
|
) |
|
|
|
|
|
tm_model = dict( |
|
|
type=TurboMindModel, |
|
|
abbr='qwen-3-8b-base-fullbench', |
|
|
path='Qwen/Qwen3-8B-Base', |
|
|
engine_config=dict(session_len=32768, max_batch_size=1, tp=1), |
|
|
gen_config=dict(top_k=1, temperature=1e-6, top_p=0.9, max_new_tokens=1024), |
|
|
max_seq_len=32768, |
|
|
max_out_len=1024, |
|
|
batch_size=1, |
|
|
run_cfg=dict(num_gpus=1), |
|
|
) |
|
|
|
|
|
models = [hf_model, tm_model] |
|
|
|
|
|
datasets = sum([v for k, v in locals().items() if k.endswith('_datasets')], []) |
|
|
|
|
|
for d in datasets: |
|
|
d['reader_cfg']['test_range'] = '[0:16]' |
|
|
|