-
Notifications
You must be signed in to change notification settings - Fork 473
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
[Feature] Add Bradley-Terry Subjective Evaluation method to Arena Har…
…d dataset (#1802) * added base_models_abbrs to references (passed from LMEvaluator); added bradleyterry subjective evaluation method for wildbench, alpacaeval, and compassarena datasets; added all_scores output files for reference in CompassArenaBradleyTerrySummarizer; * added bradleyterry subjective evaluation method to arena_hard dataset
- Loading branch information
Showing
5 changed files
with
237 additions
and
7 deletions.
There are no files selected for viewing
79 changes: 79 additions & 0 deletions
79
configs/datasets/subjective/arena_hard/arena_hard_compare_bradleyterry.py
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,79 @@ | ||
from mmengine.config import read_base | ||
|
||
from opencompass.datasets import ArenaHardDataset, arenahard_bradleyterry_postprocess | ||
from opencompass.openicl.icl_evaluator import LMEvaluator | ||
from opencompass.openicl.icl_inferencer import GenInferencer | ||
from opencompass.openicl.icl_prompt_template import PromptTemplate | ||
from opencompass.openicl.icl_retriever import ZeroRetriever | ||
|
||
subjective_reader_cfg = dict( | ||
input_columns=['question'], | ||
output_column='judge', | ||
) | ||
|
||
subjective_all_sets = [ | ||
'arenahard', | ||
] | ||
|
||
|
||
arenahard_datasets = [] | ||
|
||
system_prompt = "Please act as an impartial judge and evaluate the quality of the responses provided by two AI assistants to the user prompt displayed below. You will be given assistant A's answer and assistant B's answer. Your job is to evaluate which assistant's answer is better.\n\nBegin your evaluation by generating your own answer to the prompt. You must provide your answers before judging any answers.\n\nWhen evaluating the assistants' answers, compare both assistants' answers with your answer. You must identify and correct any mistakes or inaccurate information.\n\nThen consider if the assistant's answers are helpful, relevant, and concise. Helpful means the answer correctly responds to the prompt or follows the instructions. Note when user prompt has any ambiguity or more than one interpretation, it is more helpful and appropriate to ask for clarifications or more information from the user than providing an answer based on assumptions. Relevant means all parts of the response closely connect or are appropriate to what is being asked. Concise means the response is clear and not verbose or excessive.\n\nThen consider the creativity and novelty of the assistant's answers when needed. Finally, identify any missing important information in the assistants' answers that would be beneficial to include when responding to the user prompt.\n\nAfter providing your explanation, you must output only one of the following choices as your final verdict with a label:\n\n1. Assistant A is significantly better: [[A>>B]]\n2. Assistant A is slightly better: [[A>B]]\n3. Tie, relatively the same: [[A=B]]\n4. Assistant B is slightly better: [[B>A]]\n5. Assistant B is significantly better: [[B>>A]]\n\nExample output: \"My final verdict is tie: [[A=B]]\"." | ||
|
||
judge_prompt = "<|User Prompt|>\n{question}\n\n<|The Start of Assistant A's Answer|>\n{prediction}\n<|The End of Assistant A's Answer|>\n\n<|The Start of Assistant B's Answer|>\n{prediction2}\n<|The End of Assistant B's Answer|>" | ||
|
||
gpt4 = [ | ||
dict( | ||
abbr='gpt4-0314', | ||
) | ||
] | ||
|
||
for _name in subjective_all_sets: | ||
subjective_infer_cfg = dict( | ||
prompt_template=dict( | ||
type=PromptTemplate, | ||
template=dict( | ||
round=[ | ||
dict(role='HUMAN', prompt='{question}'), | ||
] | ||
), | ||
), | ||
retriever=dict(type=ZeroRetriever), | ||
inferencer=dict(type=GenInferencer, max_out_len=4096), | ||
) | ||
|
||
subjective_eval_cfg = dict( | ||
evaluator=dict( | ||
type=LMEvaluator, | ||
prompt_template=dict( | ||
type=PromptTemplate, | ||
template=dict( | ||
begin=[ | ||
dict(role='SYSTEM', fallback_role='HUMAN', prompt=system_prompt) | ||
], | ||
round=[ | ||
dict(role='HUMAN', prompt=judge_prompt), | ||
], | ||
), | ||
), | ||
dict_postprocessor=dict(type=arenahard_bradleyterry_postprocess), | ||
keep_predictions=True, # Must be turned on to save predictions from model pairs to calculate style features in postprocessor | ||
), | ||
pred_role='BOT', | ||
) | ||
|
||
arenahard_datasets.append( | ||
dict( | ||
abbr='arenahard', | ||
type=ArenaHardDataset, | ||
path='./data/subjective/arena_hard', | ||
name=_name, | ||
reader_cfg=subjective_reader_cfg, | ||
infer_cfg=subjective_infer_cfg, | ||
eval_cfg=subjective_eval_cfg, | ||
mode='m2n', | ||
infer_order='double', | ||
base_models=gpt4, | ||
given_pred=[{'abbr': 'gpt4-0314', 'path': './data/subjective/arena_hard'}], | ||
) | ||
) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
79 changes: 79 additions & 0 deletions
79
opencompass/configs/datasets/subjective/arena_hard/arena_hard_compare_bradleyterry.py
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,79 @@ | ||
from mmengine.config import read_base | ||
|
||
from opencompass.datasets import ArenaHardDataset, arenahard_bradleyterry_postprocess | ||
from opencompass.openicl.icl_evaluator import LMEvaluator | ||
from opencompass.openicl.icl_inferencer import GenInferencer | ||
from opencompass.openicl.icl_prompt_template import PromptTemplate | ||
from opencompass.openicl.icl_retriever import ZeroRetriever | ||
|
||
subjective_reader_cfg = dict( | ||
input_columns=['question'], | ||
output_column='judge', | ||
) | ||
|
||
subjective_all_sets = [ | ||
'arenahard', | ||
] | ||
|
||
|
||
arenahard_datasets = [] | ||
|
||
system_prompt = "Please act as an impartial judge and evaluate the quality of the responses provided by two AI assistants to the user prompt displayed below. You will be given assistant A's answer and assistant B's answer. Your job is to evaluate which assistant's answer is better.\n\nBegin your evaluation by generating your own answer to the prompt. You must provide your answers before judging any answers.\n\nWhen evaluating the assistants' answers, compare both assistants' answers with your answer. You must identify and correct any mistakes or inaccurate information.\n\nThen consider if the assistant's answers are helpful, relevant, and concise. Helpful means the answer correctly responds to the prompt or follows the instructions. Note when user prompt has any ambiguity or more than one interpretation, it is more helpful and appropriate to ask for clarifications or more information from the user than providing an answer based on assumptions. Relevant means all parts of the response closely connect or are appropriate to what is being asked. Concise means the response is clear and not verbose or excessive.\n\nThen consider the creativity and novelty of the assistant's answers when needed. Finally, identify any missing important information in the assistants' answers that would be beneficial to include when responding to the user prompt.\n\nAfter providing your explanation, you must output only one of the following choices as your final verdict with a label:\n\n1. Assistant A is significantly better: [[A>>B]]\n2. Assistant A is slightly better: [[A>B]]\n3. Tie, relatively the same: [[A=B]]\n4. Assistant B is slightly better: [[B>A]]\n5. Assistant B is significantly better: [[B>>A]]\n\nExample output: \"My final verdict is tie: [[A=B]]\"." | ||
|
||
judge_prompt = "<|User Prompt|>\n{question}\n\n<|The Start of Assistant A's Answer|>\n{prediction}\n<|The End of Assistant A's Answer|>\n\n<|The Start of Assistant B's Answer|>\n{prediction2}\n<|The End of Assistant B's Answer|>" | ||
|
||
gpt4 = [ | ||
dict( | ||
abbr='gpt4-0314', | ||
) | ||
] | ||
|
||
for _name in subjective_all_sets: | ||
subjective_infer_cfg = dict( | ||
prompt_template=dict( | ||
type=PromptTemplate, | ||
template=dict( | ||
round=[ | ||
dict(role='HUMAN', prompt='{question}'), | ||
] | ||
), | ||
), | ||
retriever=dict(type=ZeroRetriever), | ||
inferencer=dict(type=GenInferencer, max_out_len=4096), | ||
) | ||
|
||
subjective_eval_cfg = dict( | ||
evaluator=dict( | ||
type=LMEvaluator, | ||
prompt_template=dict( | ||
type=PromptTemplate, | ||
template=dict( | ||
begin=[ | ||
dict(role='SYSTEM', fallback_role='HUMAN', prompt=system_prompt) | ||
], | ||
round=[ | ||
dict(role='HUMAN', prompt=judge_prompt), | ||
], | ||
), | ||
), | ||
dict_postprocessor=dict(type=arenahard_bradleyterry_postprocess), | ||
keep_predictions=True, # Must be turned on to save predictions from model pairs to calculate style features in postprocessor | ||
), | ||
pred_role='BOT', | ||
) | ||
|
||
arenahard_datasets.append( | ||
dict( | ||
abbr='arenahard', | ||
type=ArenaHardDataset, | ||
path='./data/subjective/arena_hard', | ||
name=_name, | ||
reader_cfg=subjective_reader_cfg, | ||
infer_cfg=subjective_infer_cfg, | ||
eval_cfg=subjective_eval_cfg, | ||
mode='m2n', | ||
infer_order='double', | ||
base_models=gpt4, | ||
given_pred=[{'abbr': 'gpt4-0314', 'path': './data/subjective/arena_hard'}], | ||
) | ||
) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters