From f871e80887ea3611bb1ffe622abd2bcc498186ea Mon Sep 17 00:00:00 2001 From: Alexander Lam Date: Fri, 3 Jan 2025 16:33:43 +0800 Subject: [PATCH] [Feature] Add Bradley-Terry Subjective Evaluation method to Arena Hard dataset (#1802) * added base_models_abbrs to references (passed from LMEvaluator); added bradleyterry subjective evaluation method for wildbench, alpacaeval, and compassarena datasets; added all_scores output files for reference in CompassArenaBradleyTerrySummarizer; * added bradleyterry subjective evaluation method to arena_hard dataset --- .../arena_hard_compare_bradleyterry.py | 79 +++++++++++++++++++ configs/eval_subjective_bradleyterry.py | 6 ++ .../arena_hard_compare_bradleyterry.py | 79 +++++++++++++++++++ opencompass/datasets/subjective/__init__.py | 1 + opencompass/datasets/subjective/arena_hard.py | 79 +++++++++++++++++-- 5 files changed, 237 insertions(+), 7 deletions(-) create mode 100644 configs/datasets/subjective/arena_hard/arena_hard_compare_bradleyterry.py create mode 100644 opencompass/configs/datasets/subjective/arena_hard/arena_hard_compare_bradleyterry.py diff --git a/configs/datasets/subjective/arena_hard/arena_hard_compare_bradleyterry.py b/configs/datasets/subjective/arena_hard/arena_hard_compare_bradleyterry.py new file mode 100644 index 000000000..7a0e9ae8a --- /dev/null +++ b/configs/datasets/subjective/arena_hard/arena_hard_compare_bradleyterry.py @@ -0,0 +1,79 @@ +from mmengine.config import read_base + +from opencompass.datasets import ArenaHardDataset, arenahard_bradleyterry_postprocess +from opencompass.openicl.icl_evaluator import LMEvaluator +from opencompass.openicl.icl_inferencer import GenInferencer +from opencompass.openicl.icl_prompt_template import PromptTemplate +from opencompass.openicl.icl_retriever import ZeroRetriever + +subjective_reader_cfg = dict( + input_columns=['question'], + output_column='judge', +) + +subjective_all_sets = [ + 'arenahard', +] + + +arenahard_datasets = [] + +system_prompt = "Please act as an impartial judge and evaluate the quality of the responses provided by two AI assistants to the user prompt displayed below. You will be given assistant A's answer and assistant B's answer. Your job is to evaluate which assistant's answer is better.\n\nBegin your evaluation by generating your own answer to the prompt. You must provide your answers before judging any answers.\n\nWhen evaluating the assistants' answers, compare both assistants' answers with your answer. You must identify and correct any mistakes or inaccurate information.\n\nThen consider if the assistant's answers are helpful, relevant, and concise. Helpful means the answer correctly responds to the prompt or follows the instructions. Note when user prompt has any ambiguity or more than one interpretation, it is more helpful and appropriate to ask for clarifications or more information from the user than providing an answer based on assumptions. Relevant means all parts of the response closely connect or are appropriate to what is being asked. Concise means the response is clear and not verbose or excessive.\n\nThen consider the creativity and novelty of the assistant's answers when needed. Finally, identify any missing important information in the assistants' answers that would be beneficial to include when responding to the user prompt.\n\nAfter providing your explanation, you must output only one of the following choices as your final verdict with a label:\n\n1. Assistant A is significantly better: [[A>>B]]\n2. Assistant A is slightly better: [[A>B]]\n3. Tie, relatively the same: [[A=B]]\n4. Assistant B is slightly better: [[B>A]]\n5. Assistant B is significantly better: [[B>>A]]\n\nExample output: \"My final verdict is tie: [[A=B]]\"." + +judge_prompt = "<|User Prompt|>\n{question}\n\n<|The Start of Assistant A's Answer|>\n{prediction}\n<|The End of Assistant A's Answer|>\n\n<|The Start of Assistant B's Answer|>\n{prediction2}\n<|The End of Assistant B's Answer|>" + +gpt4 = [ + dict( + abbr='gpt4-0314', + ) +] + +for _name in subjective_all_sets: + subjective_infer_cfg = dict( + prompt_template=dict( + type=PromptTemplate, + template=dict( + round=[ + dict(role='HUMAN', prompt='{question}'), + ] + ), + ), + retriever=dict(type=ZeroRetriever), + inferencer=dict(type=GenInferencer, max_out_len=4096), + ) + + subjective_eval_cfg = dict( + evaluator=dict( + type=LMEvaluator, + prompt_template=dict( + type=PromptTemplate, + template=dict( + begin=[ + dict(role='SYSTEM', fallback_role='HUMAN', prompt=system_prompt) + ], + round=[ + dict(role='HUMAN', prompt=judge_prompt), + ], + ), + ), + dict_postprocessor=dict(type=arenahard_bradleyterry_postprocess), + keep_predictions=True, # Must be turned on to save predictions from model pairs to calculate style features in postprocessor + ), + pred_role='BOT', + ) + + arenahard_datasets.append( + dict( + abbr='arenahard', + type=ArenaHardDataset, + path='./data/subjective/arena_hard', + name=_name, + reader_cfg=subjective_reader_cfg, + infer_cfg=subjective_infer_cfg, + eval_cfg=subjective_eval_cfg, + mode='m2n', + infer_order='double', + base_models=gpt4, + given_pred=[{'abbr': 'gpt4-0314', 'path': './data/subjective/arena_hard'}], + ) + ) diff --git a/configs/eval_subjective_bradleyterry.py b/configs/eval_subjective_bradleyterry.py index ebd6d417d..0b8b8c6c2 100644 --- a/configs/eval_subjective_bradleyterry.py +++ b/configs/eval_subjective_bradleyterry.py @@ -4,6 +4,11 @@ from opencompass.configs.datasets.subjective.alpaca_eval.alpacav2_judgeby_gpt4_bradleyterry import ( alpacav2_datasets, ) + + from opencompass.configs.datasets.subjective.arena_hard.arena_hard_compare_bradleyterry import ( + arenahard_datasets, + ) + from opencompass.configs.datasets.subjective.compassarena.compassarena_compare_bradleyterry import ( compassarena_datasets, ) @@ -71,6 +76,7 @@ datasets = [ *alpacav2_datasets, + *arenahard_datasets, *compassarena_datasets, *wildbench_datasets, ] diff --git a/opencompass/configs/datasets/subjective/arena_hard/arena_hard_compare_bradleyterry.py b/opencompass/configs/datasets/subjective/arena_hard/arena_hard_compare_bradleyterry.py new file mode 100644 index 000000000..7a0e9ae8a --- /dev/null +++ b/opencompass/configs/datasets/subjective/arena_hard/arena_hard_compare_bradleyterry.py @@ -0,0 +1,79 @@ +from mmengine.config import read_base + +from opencompass.datasets import ArenaHardDataset, arenahard_bradleyterry_postprocess +from opencompass.openicl.icl_evaluator import LMEvaluator +from opencompass.openicl.icl_inferencer import GenInferencer +from opencompass.openicl.icl_prompt_template import PromptTemplate +from opencompass.openicl.icl_retriever import ZeroRetriever + +subjective_reader_cfg = dict( + input_columns=['question'], + output_column='judge', +) + +subjective_all_sets = [ + 'arenahard', +] + + +arenahard_datasets = [] + +system_prompt = "Please act as an impartial judge and evaluate the quality of the responses provided by two AI assistants to the user prompt displayed below. You will be given assistant A's answer and assistant B's answer. Your job is to evaluate which assistant's answer is better.\n\nBegin your evaluation by generating your own answer to the prompt. You must provide your answers before judging any answers.\n\nWhen evaluating the assistants' answers, compare both assistants' answers with your answer. You must identify and correct any mistakes or inaccurate information.\n\nThen consider if the assistant's answers are helpful, relevant, and concise. Helpful means the answer correctly responds to the prompt or follows the instructions. Note when user prompt has any ambiguity or more than one interpretation, it is more helpful and appropriate to ask for clarifications or more information from the user than providing an answer based on assumptions. Relevant means all parts of the response closely connect or are appropriate to what is being asked. Concise means the response is clear and not verbose or excessive.\n\nThen consider the creativity and novelty of the assistant's answers when needed. Finally, identify any missing important information in the assistants' answers that would be beneficial to include when responding to the user prompt.\n\nAfter providing your explanation, you must output only one of the following choices as your final verdict with a label:\n\n1. Assistant A is significantly better: [[A>>B]]\n2. Assistant A is slightly better: [[A>B]]\n3. Tie, relatively the same: [[A=B]]\n4. Assistant B is slightly better: [[B>A]]\n5. Assistant B is significantly better: [[B>>A]]\n\nExample output: \"My final verdict is tie: [[A=B]]\"." + +judge_prompt = "<|User Prompt|>\n{question}\n\n<|The Start of Assistant A's Answer|>\n{prediction}\n<|The End of Assistant A's Answer|>\n\n<|The Start of Assistant B's Answer|>\n{prediction2}\n<|The End of Assistant B's Answer|>" + +gpt4 = [ + dict( + abbr='gpt4-0314', + ) +] + +for _name in subjective_all_sets: + subjective_infer_cfg = dict( + prompt_template=dict( + type=PromptTemplate, + template=dict( + round=[ + dict(role='HUMAN', prompt='{question}'), + ] + ), + ), + retriever=dict(type=ZeroRetriever), + inferencer=dict(type=GenInferencer, max_out_len=4096), + ) + + subjective_eval_cfg = dict( + evaluator=dict( + type=LMEvaluator, + prompt_template=dict( + type=PromptTemplate, + template=dict( + begin=[ + dict(role='SYSTEM', fallback_role='HUMAN', prompt=system_prompt) + ], + round=[ + dict(role='HUMAN', prompt=judge_prompt), + ], + ), + ), + dict_postprocessor=dict(type=arenahard_bradleyterry_postprocess), + keep_predictions=True, # Must be turned on to save predictions from model pairs to calculate style features in postprocessor + ), + pred_role='BOT', + ) + + arenahard_datasets.append( + dict( + abbr='arenahard', + type=ArenaHardDataset, + path='./data/subjective/arena_hard', + name=_name, + reader_cfg=subjective_reader_cfg, + infer_cfg=subjective_infer_cfg, + eval_cfg=subjective_eval_cfg, + mode='m2n', + infer_order='double', + base_models=gpt4, + given_pred=[{'abbr': 'gpt4-0314', 'path': './data/subjective/arena_hard'}], + ) + ) diff --git a/opencompass/datasets/subjective/__init__.py b/opencompass/datasets/subjective/__init__.py index ac9371326..09a0a8edb 100644 --- a/opencompass/datasets/subjective/__init__.py +++ b/opencompass/datasets/subjective/__init__.py @@ -5,6 +5,7 @@ from .alpacaeval import alpacaeval_bradleyterry_postprocess # noqa: F401, F403 from .alpacaeval import alpacaeval_postprocess # noqa: F401, F403 from .arena_hard import ArenaHardDataset # noqa: F401, F403 +from .arena_hard import arenahard_bradleyterry_postprocess # noqa: F401, F403 from .arena_hard import arenahard_postprocess # noqa: F401, F403 from .compass_arena import CompassArenaDataset # noqa: F401, F403 from .compass_arena import \ diff --git a/opencompass/datasets/subjective/arena_hard.py b/opencompass/datasets/subjective/arena_hard.py index f415410a3..b146f3aca 100644 --- a/opencompass/datasets/subjective/arena_hard.py +++ b/opencompass/datasets/subjective/arena_hard.py @@ -10,6 +10,8 @@ from datasets import Dataset, DatasetDict from sklearn.linear_model import LogisticRegression +from opencompass.datasets.subjective.compass_arena_subjective_bench import \ + get_element_counts from opencompass.registry import DICT_POSTPROCESSORS, LOAD_DATASET from opencompass.utils import get_data_path @@ -38,8 +40,8 @@ def load(self, path: str, name: str, *args, **kwargs): 'judge': { 'capability': cluster, 'question': question, - 'question_id': question_id - } + 'question_id': question_id, + }, }) dataset = Dataset.from_list(raw_data) return dataset @@ -133,11 +135,10 @@ def get_bootstrap_result(battles, func_compute_elo, num_round): def preety_print_two_ratings(ratings_1, ratings_2, column_names): - df = pd.DataFrame( + df = (pd.DataFrame( [[n, ratings_1[n], ratings_2[n]] for n in ratings_1.keys()], - columns=['Model', column_names[0], column_names[1] - ]).sort_values(column_names[0], - ascending=False).reset_index(drop=True) + columns=['Model', column_names[0], column_names[1]], + ).sort_values(column_names[0], ascending=False).reset_index(drop=True)) df[column_names[0]] = (df[column_names[0]] + 0.5).astype(int) df[column_names[1]] = (df[column_names[1]] + 0.5).astype(int) df.index = df.index + 1 @@ -172,7 +173,10 @@ def get_win_rate_column(df, column, baseline='gpt4-0314'): @DICT_POSTPROCESSORS.register_module('arenahard') -def arenahard_postprocess(output: dict, output_path: str) -> dict: +def arenahard_postprocess( + output: dict, + output_path: str, +) -> dict: judged_answers, references = get_judgeanswer_and_reference( output, output_path, post_process_arenahard) @@ -211,3 +215,64 @@ def arenahard_postprocess(output: dict, output_path: str) -> dict: results = {'score': score} results['details'] = output return results + + +@DICT_POSTPROCESSORS.register_module('arenahard_bradleyterry') +def arenahard_bradleyterry_postprocess( + output: dict, + output_path: str, +) -> dict: + judged_answers, references = get_judgeanswer_and_reference( + result=output, + filename=output_path, + post_process=post_process_arenahard, + ) + + if 'prediction1' not in references[0]: + raise ValueError( + 'prediction1 not in references. Set `keep_predictions=True` for LMEvaluator in dataset config and retry.' + ) + + if 'prediction2' not in references[0]: + raise ValueError( + 'prediction2 not in references. Set `keep_predictions=True` for LMEvaluator in dataset config and retry.' + ) + + results = {} + matches = [] + for judged_answer, reference in zip(judged_answers, references): + cur_dict = {} + + if judged_answer in ['A>>B', 'B<B', 'BA', 'A<>A']: + cur_dict['winner'] = 'model_b' + else: + continue + + cur_dict['capability'] = reference['capability'] + cur_dict['model_a'] = reference['answer1'] + cur_dict['model_b'] = reference['answer2'] + cur_dict['prediction1'] = reference['prediction1'] + cur_dict['prediction2'] = reference['prediction2'] + + matches.append(cur_dict) + + ### ---------- Add Style Metadata ---------- ### + matches = get_element_counts( + data=matches, + column='prediction1', + suffix='_a', + ) + matches = get_element_counts( + data=matches, + column='prediction2', + suffix='_b', + ) + + results['matches'] = matches + # results["details"] = output + + return results