Skip to content

Commit

Permalink
Merge pull request #2 from yuliana-shakhvalieva/arena-hard-ru
Browse files Browse the repository at this point in the history
Errors: results and regeneration + Initial benchmark automation
  • Loading branch information
zemerov authored Aug 2, 2024
2 parents a7fa155 + 2e6bc14 commit b07523e
Show file tree
Hide file tree
Showing 12 changed files with 434 additions and 1 deletion.
7 changes: 7 additions & 0 deletions automation/.gen_answer_ascii.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@

____ ____ ____ ____ _____ ______ _____ _____
/ __ `/ _ \/ __ \ / __ `/ __ \/ ___/ | /| / / _ \/ ___/
/ /_/ / __/ / / / / /_/ / / / (__ )| |/ |/ / __/ /
\__, /\___/_/ /_/ \__,_/_/ /_/____/ |__/|__/\___/_/
/____/

8 changes: 8 additions & 0 deletions automation/.gen_judgment_ascii.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@

_ __ __
____ ____ ____ (_)_ ______/ /___ _____ ___ ___ ____ / /_
/ __ `/ _ \/ __ \ / / / / / __ / __ `/ __ `__ \/ _ \/ __ \/ __/
/ /_/ / __/ / / / / / /_/ / /_/ / /_/ / / / / / / __/ / / / /_
\__, /\___/_/ /_/ __/ /\__,_/\__,_/\__, /_/ /_/ /_/\___/_/ /_/\__/
/____/ /___/ /____/

8 changes: 8 additions & 0 deletions automation/.get_token_ascii.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@

__ __ __
____ ____ / /_ / /_____ / /_____ ____
/ __ `/ _ \/ __/ / __/ __ \/ //_/ _ \/ __ \
/ /_/ / __/ /_ / /_/ /_/ / ,< / __/ / / /
\__, /\___/\__/ \__/\____/_/|_|\___/_/ /_/
/____/

7 changes: 7 additions & 0 deletions automation/.results_ascii.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@

____
________ _______ __/ / /______
/ ___/ _ \/ ___/ / / / / __/ ___/
/ / / __(__ ) /_/ / / /_(__ )
/_/ \___/____/\__,_/_/\__/____/

7 changes: 7 additions & 0 deletions automation/.run_vllm_ascii.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@

____
_______ ______ _ __/ / /___ ___
/ ___/ / / / __ \ | | / / / / __ `__ \
/ / / /_/ / / / / | |/ / / / / / / / /
/_/ \__,_/_/ /_/ |___/_/_/_/ /_/ /_/

18 changes: 18 additions & 0 deletions automation/config_constants.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,18 @@
# api_config.yaml
gpt_4o_parallel = 250
gpt_4_0613_parallel = 50
yandex_parallel = 1
gigachat_parallel = 1
hf_parallel = 1

question_file_name = 'question_ru.jsonl'

# judge_config.yaml
prompt_template = ["<|User Prompt|>\n{question_1}\n\n<|The Start of Assistant A's Answer|>\n{answer_1}\n<|The End of Assistant A's Answer|>\n\n<|The Start of Assistant B's Answer|>\n{answer_2}\n<|The End of Assistant B's Answer|>"]
system_prompt = "Please act as an impartial judge and evaluate the quality of the responses provided by two AI assistants to the user prompt displayed below. You will be given assistant A's answer and assistant B's answer. Your job is to evaluate which assistant's answer is better.\n\nBegin your evaluation by generating your own answer to the prompt. You must provide your answers before judging any answers.\n\nWhen evaluating the assistants' answers, compare both assistants' answers with your answer. You must identify and correct any mistakes or inaccurate information.\n\nThen consider if the assistant's answers are helpful, relevant, and concise. Helpful means the answer correctly responds to the prompt or follows the instructions. Note when user prompt has any ambiguity or more than one interpretation, it is more helpful and appropriate to ask for clarifications or more information from the user than providing an answer based on assumptions. Relevant means all parts of the response closely connect or are appropriate to what is being asked. Concise means the response is clear and not verbose or excessive.\n\nThen consider the creativity and novelty of the assistant's answers when needed. Finally, identify any missing important information in the assistants' answers that would be beneficial to include when responding to the user prompt.\n\nAfter providing your explanation, you must output only one of the following choices as your final verdict with a label:\n\n1. Assistant A is significantly better: [[A>>B]]\n2. Assistant A is slightly better: [[A>B]]\n3. Tie, relatively the same: [[A=B]]\n4. Assistant B is slightly better: [[B>A]]\n5. Assistant B is significantly better: [[B>>A]]\n\nExample output: \"My final verdict is tie: [[A=B]]\"."
regex_pattern = f"\[\[([AB<>=]+)\]\]"

reference = False
ref_model = None
baseline = True
pairwise = True
14 changes: 14 additions & 0 deletions automation/default_hyperparameters.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
vllm_port = 8880
hf_parallel = 1

bench_name = arena-hard-v0.1
judge_model = gpt-4o
baseline_model = gpt-4-0613

gen_answer_temperature = 0.0
gen_answer_max_tokens = 4096
gen_answer_num_choices = 1

judge_config_temperature = 0
judge_config_max_tokens = 4096

143 changes: 143 additions & 0 deletions automation/make_yaml_config.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,143 @@
# -*- coding: utf-8 -*-
import sys
import yaml
import config_constants


class QuotedString(str):
pass


class CustomDumper(yaml.Dumper):
def represent_data(self, data):
if type(data) == QuotedString:
return self.represent_scalar('tag:yaml.org,2002:str', data, style='"')

return super(CustomDumper, self).represent_data(data)


def save_yaml(data_name, data):
file_name = 'config/' + data_name + '.yaml'

with open(file_name, 'w', encoding='utf-8') as outfile:
yaml.dump(data, outfile,
default_flow_style=False,
encoding='utf-8',
width=2000,
allow_unicode=True,
Dumper=CustomDumper)


def save_api_config(hyperparameters):
api_config = {
'gpt-4o': {
'model_name': 'gpt-4o',
'endpoints': None,
'api_type': 'openai',
'parallel': config_constants.gpt_4o_parallel,
},
'gpt-4-0613': {
'model_name': 'gpt-4-0613',
'endpoints': None,
'api_type': 'openai',
'parallel': config_constants.gpt_4_0613_parallel,
}
}

if hyperparameters['model_type'] == 'yandex':
model_config = {
'model_name': hyperparameters['original_model_name'],
'system_prompt': QuotedString('Ты полезный AI-ассистент.'),
'endpoints': None,
'api_type': 'yandex',
'parallel': config_constants.yandex_parallel,
}
elif hyperparameters['model_type'] == 'gigachat':
model_config = {
'model_name': hyperparameters['original_model_name'],
'system_prompt': QuotedString('Ты полезный AI-ассистент.'),
'endpoints': None,
'api_type': 'sber',
'parallel': config_constants.gigachat_parallel,
}
elif hyperparameters['model_type'] == 'hf':
model_config = {
'model_name': hyperparameters['original_model_name'],
'endpoints': [
{
'api_base': f"http://{hyperparameters['hostname']}:{hyperparameters['vllm_port']}/v1",
'api_key': 'default-token',
},
],
'api_type': 'openai',
'parallel': hyperparameters['hf_parallel'],
}
else:
raise ValueError('Incorrect model type')

api_config[hyperparameters['model_alias']] = model_config
save_yaml('api_config', api_config)


def save_gen_answer_config(hyperparameters):
gen_answer_config = {
'name': f'config of answer generation for {hyperparameters["bench_name"]}',
'bench_name': hyperparameters['bench_name'],
'temperature': hyperparameters['gen_answer_temperature'],
'max_tokens': hyperparameters['gen_answer_max_tokens'],
'num_choices': hyperparameters['gen_answer_num_choices'],
'question_file': QuotedString(config_constants.question_file_name),
'model_list': [hyperparameters['baseline_model'], hyperparameters['model_alias']],
}

save_yaml('gen_answer_config', gen_answer_config)


def save_judge_config(hyperparameters):
judge_config = {
'name': f'judgment config file for {hyperparameters["bench_name"]}',
'bench_name': hyperparameters['bench_name'],
'judge_model': hyperparameters['judge_model'],
'reference': config_constants.reference,
'ref_model': config_constants.ref_model,
'baseline': config_constants.baseline,
'baseline_model': hyperparameters['baseline_model'],
'pairwise': config_constants.pairwise,
'temperature': hyperparameters['judge_config_temperature'],
'max_tokens': hyperparameters['judge_config_max_tokens'],
'regex_pattern': config_constants.regex_pattern,
'system_prompt': QuotedString(config_constants.system_prompt),
'prompt_template': config_constants.prompt_template,
'question_file': QuotedString(config_constants.question_file_name),
'model_list': [hyperparameters['model_alias']],
}

save_yaml('judge_config', judge_config)


def correct_type(arg):
try:
return int(arg)
except ValueError:
try:
return float(arg)
except ValueError:
return arg


def main(args):
data = [correct_type(arg) for arg in args[1:] if arg != "="]
hyperparameters = dict()

for i in range(0, len(data), 2):
var_name = data[i]
var_value = data[i + 1]
hyperparameters[var_name] = var_value

save_api_config(hyperparameters)
save_gen_answer_config(hyperparameters)
save_judge_config(hyperparameters)


if __name__ == '__main__':
main(sys.argv)
Loading

0 comments on commit b07523e

Please sign in to comment.