|
|
import argparse |
|
|
import json |
|
|
import os |
|
|
from datetime import datetime |
|
|
import subprocess |
|
|
import logging |
|
|
|
|
|
full_datasets = { |
|
|
"MathVista_MINI": ["train_prompt_sampling"], |
|
|
"MathVision": ["train_prompt_greedy"], |
|
|
"MathVerse_MINI": ["train_prompt_greedy"], |
|
|
"MMMU_DEV_VAL": ["origin_prompt_greedy"], |
|
|
"MMStar": ["train_prompt_greedy"], |
|
|
"DynaMath": ["train_prompt_greedy"], |
|
|
"WeMath": ["train_prompt_greedy"], |
|
|
"TextVQA_VAL": ["origin_prompt_greedy"], |
|
|
"MMVet": ["origin_prompt_greedy"], |
|
|
"MMDocBench": ["origin_prompt_greedy"], |
|
|
"AI2D_TEST": ["origin_prompt_greedy"], |
|
|
"HallusionBench": ["origin_prompt_greedy"], |
|
|
"MMBench_DEV_EN_V11": ["origin_prompt_greedy"], |
|
|
"OCRBench": ["origin_prompt_greedy"], |
|
|
"DocVQA_VAL": ["origin_prompt_greedy"], |
|
|
"EMMA-mini": ["train_prompt_sampling"], |
|
|
|
|
|
|
|
|
} |
|
|
|
|
|
settings = { |
|
|
"train_prompt_sampling": { |
|
|
"use_reasoning_prompt": 2, |
|
|
"do_sample": True, |
|
|
"top_p": 1, |
|
|
"top_k": -1, |
|
|
"temperature": 1, |
|
|
}, |
|
|
"train_prompt_greedy": { |
|
|
"use_reasoning_prompt": 2, |
|
|
"do_sample": True, |
|
|
"top_p": 0.001, |
|
|
"top_k": 1, |
|
|
"temperature": 0.01, |
|
|
}, |
|
|
"origin_prompt_greedy": { |
|
|
"use_reasoning_prompt": 0, |
|
|
"do_sample": True, |
|
|
"top_p": 0.001, |
|
|
"top_k": 1, |
|
|
"temperature": 0.01, |
|
|
}, |
|
|
} |
|
|
|
|
|
|
|
|
def main(): |
|
|
parser = argparse.ArgumentParser() |
|
|
|
|
|
parser.add_argument("--run_name", type=str, required=True, help="Name of the run") |
|
|
parser.add_argument("--gpus", type=int, default=8, help="Number of GPUs to use") |
|
|
parser.add_argument("--path", type=str, required=True, help="Path to the model") |
|
|
parser.add_argument( |
|
|
"--dataset", type=str, nargs="+", required=True, help="List of datasets to use" |
|
|
) |
|
|
|
|
|
parser.add_argument( |
|
|
"--min_pixels", type=int, default=3136, help="Minimum number of pixels" |
|
|
) |
|
|
parser.add_argument( |
|
|
"--max_pixels", type=int, default=12845056, help="Maximum number of pixels" |
|
|
) |
|
|
parser.add_argument( |
|
|
"--max_new_tokens", type=int, default=2048, help="Maximum number of new tokens" |
|
|
) |
|
|
|
|
|
args = parser.parse_args() |
|
|
assert len(args.dataset), "--dataset should be a list of datasets" |
|
|
|
|
|
datasets = args.dataset |
|
|
if len(args.dataset) == 1 and args.dataset[0] == "full": |
|
|
datasets = list(full_datasets.keys()) |
|
|
|
|
|
for dataset in datasets: |
|
|
assert ( |
|
|
dataset in full_datasets |
|
|
), f"Dataset {dataset} is not in the list of available datasets: {list(full_datasets.keys())}" |
|
|
|
|
|
print("Datasets to be used:", datasets) |
|
|
print("Run name:", args.run_name) |
|
|
print("Number of GPUs:", args.gpus) |
|
|
print("Model path:", args.path) |
|
|
print("Minimum pixels:", args.min_pixels) |
|
|
print("Maximum pixels:", args.max_pixels) |
|
|
print("Maximum new tokens:", args.max_new_tokens, flush=True) |
|
|
|
|
|
for dataset in datasets: |
|
|
assert isinstance(full_datasets[dataset], list) |
|
|
for setting in full_datasets[dataset]: |
|
|
config = { |
|
|
"model": { |
|
|
args.run_name: { |
|
|
"class": "Qwen2VLChat", |
|
|
"model_path": args.path, |
|
|
"min_pixels": args.min_pixels, |
|
|
"max_pixels": args.max_pixels, |
|
|
"use_vllm": True, |
|
|
"max_new_tokens": args.max_new_tokens, |
|
|
**settings[setting], |
|
|
}, |
|
|
}, |
|
|
"datasets": datasets, |
|
|
} |
|
|
|
|
|
current_datetime = datetime.now().strftime("%Y%m%d") |
|
|
save_dir = f"public_eval/{args.run_name}/{dataset}_{setting}/{current_datetime}" |
|
|
os.makedirs(save_dir, exist_ok=True) |
|
|
|
|
|
config_name = f"config.json" |
|
|
config_path = os.path.join(save_dir, config_name) |
|
|
with open(config_path, "w") as json_file: |
|
|
json.dump(config, json_file, indent=4) |
|
|
|
|
|
print(f"Start evaluating on {dataset}.") |
|
|
print(f"Eval config {setting}", flush=True) |
|
|
|
|
|
env_vars = os.environ.copy() |
|
|
env_vars["VLLM_USE_V1"] = "0" |
|
|
|
|
|
if dataset == "EMMA" or dataset == "EMMA-mini": |
|
|
logger = logging.getLogger('EMMA-logger') |
|
|
logger.setLevel(level=logging.DEBUG) |
|
|
|
|
|
formatter = logging.Formatter('%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s: %(message)s') |
|
|
|
|
|
file_handler = logging.FileHandler(os.path.join(save_dir, f"out.log")) |
|
|
file_handler.setLevel(level=logging.DEBUG) |
|
|
file_handler.setFormatter(formatter) |
|
|
|
|
|
stream_handler = logging.StreamHandler() |
|
|
stream_handler.setLevel(logging.DEBUG) |
|
|
stream_handler.setFormatter(formatter) |
|
|
|
|
|
logger.addHandler(file_handler) |
|
|
logger.addHandler(stream_handler) |
|
|
|
|
|
from EMMA.generate_response import do_generate |
|
|
from EMMA.evaluation.evaluate import gen_true_false |
|
|
from EMMA.evaluation.calculate_acc import gen_score |
|
|
|
|
|
dataset_name = f"/root/LMUData/{dataset}" |
|
|
os.environ["VLLM_USE_V1"] = "0" |
|
|
os.environ["VLLM_WORKER_MULTIPROC_METHOD"] = "spawn" |
|
|
do_generate(dataset_name, args.path, f"{save_dir}/results.json", logger=logger, seed=114413) |
|
|
gen_true_false(f"{save_dir}/results.json", logger=logger) |
|
|
gen_score(f"{save_dir}/results.json", f"{save_dir}/results_acc.json", logger=logger) |
|
|
else: |
|
|
command = [ |
|
|
"torchrun", |
|
|
f"--nproc_per_node={args.gpus}", |
|
|
"run_for_bash.py", |
|
|
"--config", |
|
|
f"{config_path}", |
|
|
"--data", |
|
|
f"{dataset}", |
|
|
"--verbose", |
|
|
"--work-dir", |
|
|
f"{save_dir}", |
|
|
] |
|
|
|
|
|
stdout_file = os.path.join(save_dir, f"out.log") |
|
|
stderr_file = os.path.join(save_dir, f"err.log") |
|
|
|
|
|
with open(stdout_file, "w") as stdout, open(stderr_file, "w") as stderr: |
|
|
try: |
|
|
print(f"Output redirected to {stdout_file}") |
|
|
print(f"Errors redirected to {stderr_file}", flush=True) |
|
|
|
|
|
process = subprocess.Popen( |
|
|
command, env=env_vars, stdout=stdout, stderr=subprocess.PIPE, text=True |
|
|
) |
|
|
|
|
|
for line in process.stderr: |
|
|
print(line, end="") |
|
|
stderr.write(line) |
|
|
|
|
|
|
|
|
process.wait() |
|
|
|
|
|
if process.returncode != 0: |
|
|
print(f"Command failed with return code {process.returncode}. Check {stderr_file} for error details.", flush=True) |
|
|
except subprocess.CalledProcessError as e: |
|
|
print(f"torchrun failed. Check {stderr_file} for error details.", flush=True) |
|
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
|
if not os.path.exists("/root/LMUData"): |
|
|
os.symlink("/user/konglingyu/LMUData", "/root/LMUData") |
|
|
main() |
|
|
|