Skip to content

Commit

Permalink
[Feature] Add Bradley-Terry Subjective Evaluation method to Arena Har…
Browse files Browse the repository at this point in the history
…d dataset (#1802)

* added base_models_abbrs to references (passed from LMEvaluator); added bradleyterry subjective evaluation method for wildbench, alpacaeval, and compassarena datasets; added all_scores output files for reference in CompassArenaBradleyTerrySummarizer;

* added bradleyterry subjective evaluation method to arena_hard dataset
  • Loading branch information
acylam authored Jan 3, 2025
1 parent 117dc50 commit f871e80
Show file tree
Hide file tree
Showing 5 changed files with 237 additions and 7 deletions.
Original file line number Diff line number Diff line change
@@ -0,0 +1,79 @@
from mmengine.config import read_base

from opencompass.datasets import ArenaHardDataset, arenahard_bradleyterry_postprocess
from opencompass.openicl.icl_evaluator import LMEvaluator
from opencompass.openicl.icl_inferencer import GenInferencer
from opencompass.openicl.icl_prompt_template import PromptTemplate
from opencompass.openicl.icl_retriever import ZeroRetriever

subjective_reader_cfg = dict(
input_columns=['question'],
output_column='judge',
)

subjective_all_sets = [
'arenahard',
]


arenahard_datasets = []

system_prompt = "Please act as an impartial judge and evaluate the quality of the responses provided by two AI assistants to the user prompt displayed below. You will be given assistant A's answer and assistant B's answer. Your job is to evaluate which assistant's answer is better.\n\nBegin your evaluation by generating your own answer to the prompt. You must provide your answers before judging any answers.\n\nWhen evaluating the assistants' answers, compare both assistants' answers with your answer. You must identify and correct any mistakes or inaccurate information.\n\nThen consider if the assistant's answers are helpful, relevant, and concise. Helpful means the answer correctly responds to the prompt or follows the instructions. Note when user prompt has any ambiguity or more than one interpretation, it is more helpful and appropriate to ask for clarifications or more information from the user than providing an answer based on assumptions. Relevant means all parts of the response closely connect or are appropriate to what is being asked. Concise means the response is clear and not verbose or excessive.\n\nThen consider the creativity and novelty of the assistant's answers when needed. Finally, identify any missing important information in the assistants' answers that would be beneficial to include when responding to the user prompt.\n\nAfter providing your explanation, you must output only one of the following choices as your final verdict with a label:\n\n1. Assistant A is significantly better: [[A>>B]]\n2. Assistant A is slightly better: [[A>B]]\n3. Tie, relatively the same: [[A=B]]\n4. Assistant B is slightly better: [[B>A]]\n5. Assistant B is significantly better: [[B>>A]]\n\nExample output: \"My final verdict is tie: [[A=B]]\"."

judge_prompt = "<|User Prompt|>\n{question}\n\n<|The Start of Assistant A's Answer|>\n{prediction}\n<|The End of Assistant A's Answer|>\n\n<|The Start of Assistant B's Answer|>\n{prediction2}\n<|The End of Assistant B's Answer|>"

gpt4 = [
dict(
abbr='gpt4-0314',
)
]

for _name in subjective_all_sets:
subjective_infer_cfg = dict(
prompt_template=dict(
type=PromptTemplate,
template=dict(
round=[
dict(role='HUMAN', prompt='{question}'),
]
),
),
retriever=dict(type=ZeroRetriever),
inferencer=dict(type=GenInferencer, max_out_len=4096),
)

subjective_eval_cfg = dict(
evaluator=dict(
type=LMEvaluator,
prompt_template=dict(
type=PromptTemplate,
template=dict(
begin=[
dict(role='SYSTEM', fallback_role='HUMAN', prompt=system_prompt)
],
round=[
dict(role='HUMAN', prompt=judge_prompt),
],
),
),
dict_postprocessor=dict(type=arenahard_bradleyterry_postprocess),
keep_predictions=True, # Must be turned on to save predictions from model pairs to calculate style features in postprocessor
),
pred_role='BOT',
)

arenahard_datasets.append(
dict(
abbr='arenahard',
type=ArenaHardDataset,
path='./data/subjective/arena_hard',
name=_name,
reader_cfg=subjective_reader_cfg,
infer_cfg=subjective_infer_cfg,
eval_cfg=subjective_eval_cfg,
mode='m2n',
infer_order='double',
base_models=gpt4,
given_pred=[{'abbr': 'gpt4-0314', 'path': './data/subjective/arena_hard'}],
)
)
6 changes: 6 additions & 0 deletions configs/eval_subjective_bradleyterry.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,11 @@
from opencompass.configs.datasets.subjective.alpaca_eval.alpacav2_judgeby_gpt4_bradleyterry import (
alpacav2_datasets,
)

from opencompass.configs.datasets.subjective.arena_hard.arena_hard_compare_bradleyterry import (
arenahard_datasets,
)

from opencompass.configs.datasets.subjective.compassarena.compassarena_compare_bradleyterry import (
compassarena_datasets,
)
Expand Down Expand Up @@ -71,6 +76,7 @@

datasets = [
*alpacav2_datasets,
*arenahard_datasets,
*compassarena_datasets,
*wildbench_datasets,
]
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,79 @@
from mmengine.config import read_base

from opencompass.datasets import ArenaHardDataset, arenahard_bradleyterry_postprocess
from opencompass.openicl.icl_evaluator import LMEvaluator
from opencompass.openicl.icl_inferencer import GenInferencer
from opencompass.openicl.icl_prompt_template import PromptTemplate
from opencompass.openicl.icl_retriever import ZeroRetriever

subjective_reader_cfg = dict(
input_columns=['question'],
output_column='judge',
)

subjective_all_sets = [
'arenahard',
]


arenahard_datasets = []

system_prompt = "Please act as an impartial judge and evaluate the quality of the responses provided by two AI assistants to the user prompt displayed below. You will be given assistant A's answer and assistant B's answer. Your job is to evaluate which assistant's answer is better.\n\nBegin your evaluation by generating your own answer to the prompt. You must provide your answers before judging any answers.\n\nWhen evaluating the assistants' answers, compare both assistants' answers with your answer. You must identify and correct any mistakes or inaccurate information.\n\nThen consider if the assistant's answers are helpful, relevant, and concise. Helpful means the answer correctly responds to the prompt or follows the instructions. Note when user prompt has any ambiguity or more than one interpretation, it is more helpful and appropriate to ask for clarifications or more information from the user than providing an answer based on assumptions. Relevant means all parts of the response closely connect or are appropriate to what is being asked. Concise means the response is clear and not verbose or excessive.\n\nThen consider the creativity and novelty of the assistant's answers when needed. Finally, identify any missing important information in the assistants' answers that would be beneficial to include when responding to the user prompt.\n\nAfter providing your explanation, you must output only one of the following choices as your final verdict with a label:\n\n1. Assistant A is significantly better: [[A>>B]]\n2. Assistant A is slightly better: [[A>B]]\n3. Tie, relatively the same: [[A=B]]\n4. Assistant B is slightly better: [[B>A]]\n5. Assistant B is significantly better: [[B>>A]]\n\nExample output: \"My final verdict is tie: [[A=B]]\"."

judge_prompt = "<|User Prompt|>\n{question}\n\n<|The Start of Assistant A's Answer|>\n{prediction}\n<|The End of Assistant A's Answer|>\n\n<|The Start of Assistant B's Answer|>\n{prediction2}\n<|The End of Assistant B's Answer|>"

gpt4 = [
dict(
abbr='gpt4-0314',
)
]

for _name in subjective_all_sets:
subjective_infer_cfg = dict(
prompt_template=dict(
type=PromptTemplate,
template=dict(
round=[
dict(role='HUMAN', prompt='{question}'),
]
),
),
retriever=dict(type=ZeroRetriever),
inferencer=dict(type=GenInferencer, max_out_len=4096),
)

subjective_eval_cfg = dict(
evaluator=dict(
type=LMEvaluator,
prompt_template=dict(
type=PromptTemplate,
template=dict(
begin=[
dict(role='SYSTEM', fallback_role='HUMAN', prompt=system_prompt)
],
round=[
dict(role='HUMAN', prompt=judge_prompt),
],
),
),
dict_postprocessor=dict(type=arenahard_bradleyterry_postprocess),
keep_predictions=True, # Must be turned on to save predictions from model pairs to calculate style features in postprocessor
),
pred_role='BOT',
)

arenahard_datasets.append(
dict(
abbr='arenahard',
type=ArenaHardDataset,
path='./data/subjective/arena_hard',
name=_name,
reader_cfg=subjective_reader_cfg,
infer_cfg=subjective_infer_cfg,
eval_cfg=subjective_eval_cfg,
mode='m2n',
infer_order='double',
base_models=gpt4,
given_pred=[{'abbr': 'gpt4-0314', 'path': './data/subjective/arena_hard'}],
)
)
1 change: 1 addition & 0 deletions opencompass/datasets/subjective/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
from .alpacaeval import alpacaeval_bradleyterry_postprocess # noqa: F401, F403
from .alpacaeval import alpacaeval_postprocess # noqa: F401, F403
from .arena_hard import ArenaHardDataset # noqa: F401, F403
from .arena_hard import arenahard_bradleyterry_postprocess # noqa: F401, F403
from .arena_hard import arenahard_postprocess # noqa: F401, F403
from .compass_arena import CompassArenaDataset # noqa: F401, F403
from .compass_arena import \
Expand Down
79 changes: 72 additions & 7 deletions opencompass/datasets/subjective/arena_hard.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,8 @@
from datasets import Dataset, DatasetDict
from sklearn.linear_model import LogisticRegression

from opencompass.datasets.subjective.compass_arena_subjective_bench import \
get_element_counts
from opencompass.registry import DICT_POSTPROCESSORS, LOAD_DATASET
from opencompass.utils import get_data_path

Expand Down Expand Up @@ -38,8 +40,8 @@ def load(self, path: str, name: str, *args, **kwargs):
'judge': {
'capability': cluster,
'question': question,
'question_id': question_id
}
'question_id': question_id,
},
})
dataset = Dataset.from_list(raw_data)
return dataset
Expand Down Expand Up @@ -133,11 +135,10 @@ def get_bootstrap_result(battles, func_compute_elo, num_round):


def preety_print_two_ratings(ratings_1, ratings_2, column_names):
df = pd.DataFrame(
df = (pd.DataFrame(
[[n, ratings_1[n], ratings_2[n]] for n in ratings_1.keys()],
columns=['Model', column_names[0], column_names[1]
]).sort_values(column_names[0],
ascending=False).reset_index(drop=True)
columns=['Model', column_names[0], column_names[1]],
).sort_values(column_names[0], ascending=False).reset_index(drop=True))
df[column_names[0]] = (df[column_names[0]] + 0.5).astype(int)
df[column_names[1]] = (df[column_names[1]] + 0.5).astype(int)
df.index = df.index + 1
Expand Down Expand Up @@ -172,7 +173,10 @@ def get_win_rate_column(df, column, baseline='gpt4-0314'):


@DICT_POSTPROCESSORS.register_module('arenahard')
def arenahard_postprocess(output: dict, output_path: str) -> dict:
def arenahard_postprocess(
output: dict,
output_path: str,
) -> dict:
judged_answers, references = get_judgeanswer_and_reference(
output, output_path, post_process_arenahard)

Expand Down Expand Up @@ -211,3 +215,64 @@ def arenahard_postprocess(output: dict, output_path: str) -> dict:
results = {'score': score}
results['details'] = output
return results


@DICT_POSTPROCESSORS.register_module('arenahard_bradleyterry')
def arenahard_bradleyterry_postprocess(
output: dict,
output_path: str,
) -> dict:
judged_answers, references = get_judgeanswer_and_reference(
result=output,
filename=output_path,
post_process=post_process_arenahard,
)

if 'prediction1' not in references[0]:
raise ValueError(
'prediction1 not in references. Set `keep_predictions=True` for LMEvaluator in dataset config and retry.'
)

if 'prediction2' not in references[0]:
raise ValueError(
'prediction2 not in references. Set `keep_predictions=True` for LMEvaluator in dataset config and retry.'
)

results = {}
matches = []
for judged_answer, reference in zip(judged_answers, references):
cur_dict = {}

if judged_answer in ['A>>B', 'B<<A', 'A>B', 'B<A']:
cur_dict['winner'] = 'model_a'
elif judged_answer in ['A=B', 'B=A']:
cur_dict['winner'] = 'tie'
elif judged_answer in ['A<B', 'B>A', 'A<<B', 'B>>A']:
cur_dict['winner'] = 'model_b'
else:
continue

cur_dict['capability'] = reference['capability']
cur_dict['model_a'] = reference['answer1']
cur_dict['model_b'] = reference['answer2']
cur_dict['prediction1'] = reference['prediction1']
cur_dict['prediction2'] = reference['prediction2']

matches.append(cur_dict)

### ---------- Add Style Metadata ---------- ###
matches = get_element_counts(
data=matches,
column='prediction1',
suffix='_a',
)
matches = get_element_counts(
data=matches,
column='prediction2',
suffix='_b',
)

results['matches'] = matches
# results["details"] = output

return results

0 comments on commit f871e80

Please sign in to comment.