from vlmeval.dataset import build_dataset from vlmeval.smp import * load_env() # dataset_name = "MMMU_DEV_VAL" # dataset_name = "MathVista_MINI" dataset_name = "DynaMath" dataset = build_dataset(dataset_name) judge_kwargs = { 'nproc': 16, 'verbose': True, 'retry': 10, } if dataset.TYPE in ['MCQ', 'Y/N', 'MCQ_MMMU_Pro'] or listinstr(['moviechat1k'], dataset_name.lower()): if listinstr(['WeMath'], dataset_name): judge_kwargs['model'] = 'gpt-4o-mini' else: judge_kwargs['model'] = 'chatgpt-0125' elif listinstr(['MMVet', 'LLaVABench', 'MMBench_Video'], dataset_name): judge_kwargs['model'] = 'gpt-4-turbo' elif listinstr(['MathVista', 'MathVerse', 'MathVision', 'DynaMath', 'VL-RewardBench', 'LogicVista', 'MOAT'], dataset_name): # noqa: E501 judge_kwargs['model'] = 'gpt-4o-mini' elif listinstr(['MMLongBench', 'MMDU', 'DUDE', 'SLIDEVQA', 'MIA-Bench', 'WildVision', 'MMAlignBench'], dataset_name): # noqa: E501 judge_kwargs['model'] = 'gpt-4o' fs = [ "/user/konglingyu/VLMEvalKit/public_eval/grpo_v7_exp0_qwen25vl_scalable_rl_opensource_math_grpo_bs96_wofilter_scoreB_std_filter_0523_global_step_200/DynaMath_train_prompt_greedy/20250524/grpo_v7_exp0_qwen25vl_scalable_rl_opensource_math_grpo_bs96_wofilter_scoreB_std_filter_0523_global_step_200/T20250524_G/grpo_v7_exp0_qwen25vl_scalable_rl_opensource_math_grpo_bs96_wofilter_scoreB_std_filter_0523_global_step_200_DynaMath.xlsx" # "/user/konglingyu/VLMEvalKit/outputs/Qwen2.5-VL-7B-Instruct-original/T20250412_G/Qwen2.5-VL-7B-Instruct-original_DynaMath.xlsx", # "/user/konglingyu/VLMEvalKit/outputs/Qwen2.5-VL-7B-RL-greedy/T20250414_G/Qwen2.5-VL-7B-RL-greedy_DynaMath.xlsx", # "/user/konglingyu/VLMEvalKit/public_eval/bbox_step_300/DynaMath/20250418/bbox_step_300/T20250418_G/bbox_step_300_DynaMath.xlsx", # "/user/konglingyu/VLMEvalKit/public_eval/clip_high_step_600/DynaMath/20250419/clip_high_step_600/T20250419_G/clip_high_step_600_DynaMath.xlsx", # "/user/konglingyu/VLMEvalKit/public_eval/dr_grpo_step_600/DynaMath/20250419/dr_grpo_step_600/T20250419_G/dr_grpo_step_600_DynaMath.xlsx", # "/user/konglingyu/VLMEvalKit/public_eval/dr_grpo_step_800/DynaMath/20250418/dr_grpo_step_800/T20250418_G/dr_grpo_step_800_DynaMath.xlsx", # "/user/konglingyu/VLMEvalKit/public_eval/grpo_v7_exp9_qwen25vl_grpo_opensource_math_doc_dr_grpo_500/DynaMath/20250417/grpo_v7_exp9_qwen25vl_grpo_opensource_math_doc_dr_grpo/T20250417_G/grpo_v7_exp9_qwen25vl_grpo_opensource_math_doc_dr_grpo_DynaMath.xlsx", # "/user/konglingyu/VLMEvalKit/public_eval/naive_grpo_step_400/DynaMath/20250418/naive_grpo_step_400/T20250418_G/naive_grpo_step_400_DynaMath.xlsx", ] # file = "/user/konglingyu/VLMEvalKit/public_eval/bbox_step_300/DynaMath/20250418/bbox_step_300/T20250418_G/bbox_step_300_DynaMath.xlsx" for file in fs: try: os.remove(file.replace(".xlsx", "_gpt-4o-mini_score.csv")) os.remove(file.replace(".xlsx", "_gpt-4o-mini.pkl")) os.remove(file.replace(".xlsx", "_gpt-4o-mini.xlsx")) print("Removed old files") except: pass dataset.evaluate(file, **judge_kwargs) with open(file.replace(".xlsx", "_gpt-4o-mini_score.csv")) as f: lines = f.readlines() print(f"File: {file.split('/')[-1]}") for line in lines: print(line.strip())