From 741b3e7f17033ccec7ba6d7462ad31fbe09743e6 Mon Sep 17 00:00:00 2001 From: Liubov Talamanova Date: Fri, 15 Nov 2024 16:28:27 +0000 Subject: [PATCH 01/18] Add LongBench validation --- tests/python_tests/test_kv_cache_eviction.py | 85 ++++++ tests/python_tests/utils_longbench.py | 259 +++++++++++++++++++ 2 files changed, 344 insertions(+) create mode 100644 tests/python_tests/utils_longbench.py diff --git a/tests/python_tests/test_kv_cache_eviction.py b/tests/python_tests/test_kv_cache_eviction.py index 84baa79e02..76ee2cc944 100644 --- a/tests/python_tests/test_kv_cache_eviction.py +++ b/tests/python_tests/test_kv_cache_eviction.py @@ -16,6 +16,7 @@ from transformers import AutoTokenizer from common import TESTS_ROOT, run_cb_pipeline_with_ref, get_default_properties +from utils_longbench import dataset2maxlen, evaluate, preprocess_prompt, post_process_pred def load_prompts_dataset(file_name : str) -> Dict[str, List[str]]: @@ -69,6 +70,7 @@ class CacheOptTestStruct: SHORT_CACHE_EVICTION_CONFIG = CacheEvictionConfig(start_size=32, recent_size=32, max_cache_size=96, aggregation_mode=AggregationMode.NORM_SUM) +LONGBENCH_CACHE_EVICTION_CONFIG = CacheEvictionConfig(start_size=32, recent_size=64, max_cache_size=256, aggregation_mode=AggregationMode.NORM_SUM) @pytest.mark.precommit @@ -190,3 +192,86 @@ def get_beam_search_seq_len_300() -> GenerationConfig: def test_dynamic_memory_allocation(tmp_path, params): run_cb_pipeline_with_ref(tmp_path, "facebook/opt-125m", scheduler_params=params[0], generation_config=params[1]) + +@pytest.fixture(scope='module') +def phi3_converted_model(tmp_path_factory): + model_id = "microsoft/Phi-3-mini-4k-instruct" + model = OVModelForCausalLM.from_pretrained(model_id, export=True, trust_remote_code=True, load_in_8bit=False) + tokenizer = AutoTokenizer.from_pretrained(model_id) + models_path = tmp_path_factory.mktemp("cacheopt_test_models") / model_id + model.save_pretrained(models_path) + ov_tokenizer, ov_detokenizer = convert_tokenizer(tokenizer, with_detokenizer=True, skip_special_tokens=True) + serialize(ov_tokenizer, models_path / "openvino_tokenizer.xml") + serialize(ov_detokenizer, models_path / "openvino_detokenizer.xml") + phi3_converted_model = ConvertedModel(model, tokenizer, models_path) + yield phi3_converted_model + del phi3_converted_model + del model + + +@pytest.mark.precommit +@pytest.mark.parametrize("subset", ["samsum", "qmsum", "trec", "qasper", "hotpotqa", "repobench-p"]) +def test_unoptimized_generation_longbench(phi3_converted_model, subset): + seqs_per_request = 2 + num_kv_blocks = 1000 + scheduler_config = get_scheduler_config(num_kv_blocks) + models_path = phi3_converted_model.models_path + model_name = "/".join(models_path.parts[-2:]) + max_new_tokens = dataset2maxlen[subset] + tokenizer = phi3_converted_model.tokenizer + + generation_config = GenerationConfig() # expecting default greedy sampling + generation_config.num_return_sequences = 1 + generation_config.max_new_tokens = max_new_tokens + generation_config.eos_token_id = tokenizer.eos_token_id + + data = datasets.load_dataset('THUDM/LongBench', subset, split='test') + + # model_id = "microsoft/Phi-3-mini-4k-instruct" + # model = OVModelForCausalLM.from_pretrained(model_id, export=True, trust_remote_code=True, load_in_8bit=False) + + model_cb_noopt = ContinuousBatchingPipeline(models_path.absolute().as_posix(), scheduler_config, "CPU", {}) + with tqdm(total=len(data)) as progress_bar: + batch = [] + answers = [] + for p_idx, data_sample in enumerate(data): + prompt, context_len = preprocess_prompt(tokenizer, data_sample, subset, model_name) + progress_bar.update(1) + batch.append(prompt) + answers.append({"context_len": context_len, "answers": data_sample["answers"], "all_classes": data_sample["all_classes"]}) + + # input = tokenizer(prompt, truncation=False, return_tensors="pt") + # output = model.generate( + # **input, + # max_new_tokens=128, + # num_beams=1, + # do_sample=False, + # temperature=1.0, + # min_length=context_len+1, + # pad_token_id=tokenizer.eos_token_id, + # eos_token_id=[tokenizer.eos_token_id, tokenizer.encode("\n", add_special_tokens=False)[-1]], + # )[0] + # pred = tokenizer.decode(output[context_len:], skip_special_tokens=True) + # pred = post_process_pred(output.m_generation_ids, subset, model_name) + # answers[-1]["pred"] = pred + + if ( + len(batch) == seqs_per_request + or p_idx == len(data) - 1 + ): + ans_batch = model_cb_noopt.generate( + batch, [generation_config] * len(batch) + ) + for i, output in enumerate(ans_batch, start=p_idx-len(batch)+1): + context_len = answers[i]["context_len"] + pred = post_process_pred(output.m_generation_ids, subset, model_name) + answers[i]["pred"] = pred + + batch.clear() + + score = evaluate(answers, subset) + print(f"Score: {score}") + + pipeline_noopt_metrics = model_cb_noopt.get_metrics() + print(f"No-opt cache usage: max {pipeline_noopt_metrics.max_cache_usage:.3f}, avg {pipeline_noopt_metrics.avg_cache_usage:.3f}") + del model_cb_noopt diff --git a/tests/python_tests/utils_longbench.py b/tests/python_tests/utils_longbench.py new file mode 100644 index 0000000000..2f0bcb4010 --- /dev/null +++ b/tests/python_tests/utils_longbench.py @@ -0,0 +1,259 @@ +import re +import string + +from collections import Counter +from rouge import Rouge + +def normalize_answer(s): + """Lower text and remove punctuation, articles and extra whitespace.""" + + def remove_articles(text): + return re.sub(r"\b(a|an|the)\b", " ", text) + + def white_space_fix(text): + return " ".join(text.split()) + + def remove_punc(text): + exclude = set(string.punctuation) + return "".join(ch for ch in text if ch not in exclude) + + def lower(text): + return text.lower() + + return white_space_fix(remove_articles(remove_punc(lower(s)))) + +def normalize_zh_answer(s): + """Lower text and remove punctuation, extra whitespace.""" + + def white_space_fix(text): + return "".join(text.split()) + + def remove_punc(text): + cn_punctuation = "!?。。"#$%&'()*+,-/:;<=>@[\]^_`{|}~⦅⦆「」、、〃》「」『』【】〔〕〖〗〘〙〚〛〜〝〞〟〰〾〿–—‘’‛“”„‟…‧﹏." + all_punctuation = set(string.punctuation + cn_punctuation) + return "".join(ch for ch in text if ch not in all_punctuation) + + def lower(text): + return text.lower() + + return white_space_fix(remove_punc(lower(s))) + +def count_score(prediction, ground_truth, **kwargs): + numbers = re.findall(r"\d+", prediction) + right_num = 0 + for number in numbers: + if str(number) == str(ground_truth): + right_num += 1 + final_score = 0.0 if len(numbers) == 0 else right_num / len(numbers) + return float(final_score) + +def retrieval_score(prediction, ground_truth, **kwargs): + pattern = r'Paragraph (\d+)' + matches = re.findall(pattern, ground_truth) + ground_truth_id = matches[0] + numbers = re.findall(r"\d+", prediction) + right_num = 0 + for number in numbers: + if str(number) == str(ground_truth_id): + right_num += 1 + final_score = 0.0 if len(numbers) == 0 else right_num / len(numbers) + return float(final_score) + +def retrieval_zh_score(prediction, ground_truth, **kwargs): + pattern = r'段落(\d+)' + matches = re.findall(pattern, ground_truth) + ground_truth_id = matches[0] + numbers = re.findall(r"\d+", prediction) + right_num = 0 + for number in numbers: + if str(number) == str(ground_truth_id): + right_num += 1 + final_score = 0.0 if len(numbers) == 0 else right_num / len(numbers) + return float(final_score) + +def code_sim_score(prediction, ground_truth, **kwargs): + from fuzzywuzzy import fuzz + all_lines = prediction.lstrip('\n').split('\n') + prediction = "" + for line in all_lines: + if ('`' not in line) and ('#' not in line) and ('//' not in line): + prediction = line + break + return (fuzz.ratio(prediction, ground_truth) / 100) + +def classification_score(prediction, ground_truth, **kwargs): + em_match_list = [] + all_classes = kwargs["all_classes"] + for class_name in all_classes: + if class_name in prediction: + em_match_list.append(class_name) + for match_term in em_match_list: + if match_term in ground_truth and match_term != ground_truth: + em_match_list.remove(match_term) + if ground_truth in em_match_list: + score = (1.0 / len(em_match_list)) + else: + score = 0.0 + return score + +def rouge_score(prediction, ground_truth, **kwargs): + rouge = Rouge() + try: + scores = rouge.get_scores([prediction], [ground_truth], avg=True) + except: + return 0.0 + return scores["rouge-l"]["f"] + +def f1_score(prediction, ground_truth, **kwargs): + common = Counter(prediction) & Counter(ground_truth) + num_same = sum(common.values()) + if num_same == 0: + return 0 + precision = 1.0 * num_same / len(prediction) + recall = 1.0 * num_same / len(ground_truth) + f1 = (2 * precision * recall) / (precision + recall) + return f1 + +def qa_f1_score(prediction, ground_truth, **kwargs): + normalized_prediction = normalize_answer(prediction) + normalized_ground_truth = normalize_answer(ground_truth) + + prediction_tokens = normalized_prediction.split() + ground_truth_tokens = normalized_ground_truth.split() + return f1_score(prediction_tokens, ground_truth_tokens) + + +dataset2metric = { + "narrativeqa": qa_f1_score, + "qasper": qa_f1_score, + "multifieldqa_en": qa_f1_score, + "hotpotqa": qa_f1_score, + "2wikimqa": qa_f1_score, + "musique": qa_f1_score, + "gov_report": rouge_score, + "qmsum": rouge_score, + "multi_news": rouge_score, + "trec": classification_score, + "triviaqa": qa_f1_score, + "samsum": rouge_score, + "lsht": classification_score, + "passage_retrieval_en": retrieval_score, + "passage_count": count_score, + "passage_retrieval_zh": retrieval_zh_score, + "lcc": code_sim_score, + "repobench-p": code_sim_score, +} + +# Max length for NVIDIA GeForce RTX 3090 (24 GB) +model2maxlen = { + "meta-llama/Llama-2-7b-chat-hf": 3500, + "meta-llama/Meta-Llama-3-8B-Instruct": 5000, + "meta-llama/Llama-3.1-8B-Instruct": 5000, + "microsoft/Phi-3-mini-4k-instruct": 5000, +} + +dataset2maxlen = { + "narrativeqa": 128, + "qasper": 128, + "multifieldqa_en": 64, + "multifieldqa_zh": 64, + "hotpotqa": 32, + "2wikimqa": 32, + "musique": 32, + "dureader": 128, + "gov_report": 512, + "qmsum": 512, + "multi_news": 512, + "vcsum": 512, + "trec": 64, + "triviaqa": 32, + "samsum": 128, + "lsht": 64, + "passage_count": 32, + "passage_retrieval_en": 32, + "passage_retrieval_zh": 32, + "lcc": 64, + "repobench-p": 64 +} + +dataset2prompt = { + "narrativeqa": "You are given a story, which can be either a novel or a movie script, and a question. Answer the question asconcisely as you can, using a single phrase if possible. Do not provide any explanation.\n\nStory: {context}\n\nNow, answer the question based on the story asconcisely as you can, using a single phrase if possible. Do not provide any explanation.\n\nQuestion: {input}\n\nAnswer:", + "qasper": "You are given a scientific article and a question. Answer the question as concisely as you can, using a single phrase or sentence if possible. If the question cannot be answered based on the information in the article, write \"unanswerable\". If the question is a yes/no question, answer \"yes\", \"no\", or \"unanswerable\". Do not provide any explanation.\n\nArticle: {context}\n\n Answer the question based on the above article as concisely as you can, using a single phrase or sentence if possible. If the question cannot be answered based on the information in the article, write \"unanswerable\". If the question is a yes/no question, answer \"yes\", \"no\", or \"unanswerable\". Do not provide any explanation.\n\nQuestion: {input}\n\nAnswer:", + "multifieldqa_en": "Read the following text and answer briefly.\n\n{context}\n\nNow, answer the following question based on the above text, only give me the answer and do not output any other words.\n\nQuestion: {input}\nAnswer:", + "multifieldqa_zh": "阅读以下文字并用中文简短回答:\n\n{context}\n\n现在请基于上面的文章回答下面的问题,只告诉我答案,不要输出任何其他字词。\n\n问题:{input}\n回答:", + "hotpotqa": "Answer the question based on the given passages. Only give me the answer and do not output any other words.\n\nThe following are given passages.\n{context}\n\nAnswer the question based on the given passages. Only give me the answer and do not output any other words.\n\nQuestion: {input}\nAnswer:", + "2wikimqa": "Answer the question based on the given passages. Only give me the answer and do not output any other words.\n\nThe following are given passages.\n{context}\n\nAnswer the question based on the given passages. Only give me the answer and do not output any other words.\n\nQuestion: {input}\nAnswer:", + "musique": "Answer the question based on the given passages. Only give me the answer and do not output any other words.\n\nThe following are given passages.\n{context}\n\nAnswer the question based on the given passages. Only give me the answer and do not output any other words.\n\nQuestion: {input}\nAnswer:", + "dureader": "请基于给定的文章回答下述问题。\n\n文章:{context}\n\n请基于上述文章回答下面的问题。\n\n问题:{input}\n回答:", + "gov_report": "You are given a report by a government agency. Write a one-page summary of the report.\n\nReport:\n{context}\n\nNow, write a one-page summary of the report.\n\nSummary:", + "qmsum": "You are given a meeting transcript and a query containing a question or instruction. Answer the query in one or more sentences.\n\nTranscript:\n{context}\n\nNow, answer the query based on the above meeting transcript in one or more sentences.\n\nQuery: {input}\nAnswer:", + "multi_news": "You are given several news passages. Write a one-page summary of all news. \n\nNews:\n{context}\n\nNow, write a one-page summary of all the news.\n\nSummary:", + "vcsum": "下面有一段会议记录,请你阅读后,写一段总结,总结会议的内容。\n会议记录:\n{context}\n\n会议总结:", + "trec": "Please determine the type of the question below. Here are some examples of questions.\n\n{context}\n{input}", + "triviaqa": "Answer the question based on the given passage. Only give me the answer and do not output any other words. The following are some examples.\n\n{context}\n\n{input}", + "samsum": "Summarize the dialogue into a few short sentences. The following are some examples.\n\n{context}\n\n{input}", + "lsht": "请判断给定新闻的类别,下面是一些例子。\n\n{context}\n{input}", + "passage_count": "There are some paragraphs below sourced from Wikipedia. Some of them may be duplicates. Please carefully read these paragraphs and determine how many unique paragraphs there are after removing duplicates. In other words, how many non-repeating paragraphs are there in total?\n\n{context}\n\nPlease enter the final count of unique paragraphs after removing duplicates. The output format should only contain the number, such as 1, 2, 3, and so on.\n\nThe final answer is: ", + "passage_retrieval_en": "Here are 30 paragraphs from Wikipedia, along with an abstract. Please determine which paragraph the abstract is from.\n\n{context}\n\nThe following is an abstract.\n\n{input}\n\nPlease enter the number of the paragraph that the abstract is from. The answer format must be like \"Paragraph 1\", \"Paragraph 2\", etc.\n\nThe answer is: ", + "passage_retrieval_zh": "以下是若干段落文字,以及其中一个段落的摘要。请确定给定的摘要出自哪一段。\n\n{context}\n\n下面是一个摘要\n\n{input}\n\n请输入摘要所属段落的编号。答案格式必须是\"段落1\",\"段落2\"等格式\n\n答案是:", + "lcc": "Please complete the code given below. \n{context}Next line of code:\n", + "repobench-p": "Please complete the code given below. \n{context}{input}Next line of code:\n" +} + + +def scorer(dataset, predictions, answers, all_classes): + total_score = 0. + for (prediction, ground_truths) in zip(predictions, answers): + score = 0. + if dataset in ["trec", "triviaqa", "samsum", "lsht"]: + prediction = prediction.lstrip('\n').split('\n')[0] + for ground_truth in ground_truths: + score = max(score, dataset2metric[dataset](prediction, ground_truth, all_classes=all_classes)) + total_score += score + return round(100 * total_score / len(predictions), 2) + + +def evaluate(model_output, task): + predictions, answers = [], [] + for data in model_output: + predictions.append(data["pred"]) + answers.append(data["answers"]) + all_classes = data["all_classes"] + score = scorer(task, predictions, answers, all_classes) + return score + + +def build_chat(prompt, model_name): + if "Llama-2" in model_name: + prompt = f"[INST]{prompt}[/INST]" + elif "Llama" in model_name: + prompt = f"<|begin_of_text|><|start_header_id|>user<|end_header_id|>\n\n{prompt}<|eot_id|>\n<|start_header_id|>assistant<|end_header_id|>" + elif "Phi-3" in model_name: + prompt = f"<|user|>\n{prompt} <|end|>\n<|assistant|>" + return prompt + + +def preprocess_prompt(tokenizer, data_sample, subset, model_name): + prompt_format = dataset2prompt[subset] + max_length = model2maxlen[model_name] + + prompt = prompt_format.format(**data_sample) + tokenized_prompt = tokenizer(prompt, truncation=False, return_tensors="pt").input_ids[0] + context_len = tokenized_prompt.shape[-1] + if len(tokenized_prompt) > max_length: + context_len = max_length + half = int(max_length/2) + prompt = tokenizer.decode(tokenized_prompt[:half], skip_special_tokens=True) + tokenizer.decode(tokenized_prompt[-half:], skip_special_tokens=True) + if subset not in ["trec", "triviaqa", "samsum", "lsht", "lcc", "repobench-p"]: + prompt = build_chat(prompt, model_name) + return prompt, context_len + + +def post_process_pred(subset, model_name): + if subset in ["samsum", "qsum", "hotpotqa", "qasper"] and "Llama-3-8B" in model_name: + pred = pred[:pred.find("assistant")] + elif subset == "samsum": + pred = pred[:pred.find("\nDialogue")] + elif "Phi-3" in model_name and subset == "hotpotqa": + pred = pred.lstrip('\n').split('\n')[0] + return pred From f11fdaf23b9e1fa6113eb94cb9ae576a76c8d098 Mon Sep 17 00:00:00 2001 From: Liubov Talamanova Date: Fri, 15 Nov 2024 18:14:56 +0000 Subject: [PATCH 02/18] fix --- tests/python_tests/test_kv_cache_eviction.py | 2 +- tests/python_tests/utils_longbench.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/python_tests/test_kv_cache_eviction.py b/tests/python_tests/test_kv_cache_eviction.py index 76ee2cc944..1c71654670 100644 --- a/tests/python_tests/test_kv_cache_eviction.py +++ b/tests/python_tests/test_kv_cache_eviction.py @@ -264,7 +264,7 @@ def test_unoptimized_generation_longbench(phi3_converted_model, subset): ) for i, output in enumerate(ans_batch, start=p_idx-len(batch)+1): context_len = answers[i]["context_len"] - pred = post_process_pred(output.m_generation_ids, subset, model_name) + pred = post_process_pred(output.m_generation_ids[0], subset, model_name) answers[i]["pred"] = pred batch.clear() diff --git a/tests/python_tests/utils_longbench.py b/tests/python_tests/utils_longbench.py index 2f0bcb4010..32f063bfaa 100644 --- a/tests/python_tests/utils_longbench.py +++ b/tests/python_tests/utils_longbench.py @@ -249,7 +249,7 @@ def preprocess_prompt(tokenizer, data_sample, subset, model_name): return prompt, context_len -def post_process_pred(subset, model_name): +def post_process_pred(pred, subset, model_name): if subset in ["samsum", "qsum", "hotpotqa", "qasper"] and "Llama-3-8B" in model_name: pred = pred[:pred.find("assistant")] elif subset == "samsum": From 7810f4f34c3f7f979e4a29fdec54783b01c8c69c Mon Sep 17 00:00:00 2001 From: Liubov Talamanova Date: Mon, 18 Nov 2024 11:00:00 +0000 Subject: [PATCH 03/18] set context len as max len to phi3 --- tests/python_tests/test_kv_cache_eviction.py | 2 +- tests/python_tests/utils_longbench.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/python_tests/test_kv_cache_eviction.py b/tests/python_tests/test_kv_cache_eviction.py index 1c71654670..058d41ca47 100644 --- a/tests/python_tests/test_kv_cache_eviction.py +++ b/tests/python_tests/test_kv_cache_eviction.py @@ -196,7 +196,7 @@ def test_dynamic_memory_allocation(tmp_path, params): @pytest.fixture(scope='module') def phi3_converted_model(tmp_path_factory): model_id = "microsoft/Phi-3-mini-4k-instruct" - model = OVModelForCausalLM.from_pretrained(model_id, export=True, trust_remote_code=True, load_in_8bit=False) + model = OVModelForCausalLM.from_pretrained(model_id, export=True, trust_remote_code=True) tokenizer = AutoTokenizer.from_pretrained(model_id) models_path = tmp_path_factory.mktemp("cacheopt_test_models") / model_id model.save_pretrained(models_path) diff --git a/tests/python_tests/utils_longbench.py b/tests/python_tests/utils_longbench.py index 32f063bfaa..77cf2efc4f 100644 --- a/tests/python_tests/utils_longbench.py +++ b/tests/python_tests/utils_longbench.py @@ -149,7 +149,7 @@ def qa_f1_score(prediction, ground_truth, **kwargs): "meta-llama/Llama-2-7b-chat-hf": 3500, "meta-llama/Meta-Llama-3-8B-Instruct": 5000, "meta-llama/Llama-3.1-8B-Instruct": 5000, - "microsoft/Phi-3-mini-4k-instruct": 5000, + "microsoft/Phi-3-mini-4k-instruct": 4096, } dataset2maxlen = { From be73e30e624d50ef594ae5326d840575227821d4 Mon Sep 17 00:00:00 2001 From: Liubov Talamanova Date: Mon, 18 Nov 2024 18:53:04 +0000 Subject: [PATCH 04/18] update model --- tests/python_tests/test_kv_cache_eviction.py | 4 ++-- tests/python_tests/utils_longbench.py | 8 +++++--- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/tests/python_tests/test_kv_cache_eviction.py b/tests/python_tests/test_kv_cache_eviction.py index 058d41ca47..45dd2a677c 100644 --- a/tests/python_tests/test_kv_cache_eviction.py +++ b/tests/python_tests/test_kv_cache_eviction.py @@ -195,7 +195,7 @@ def test_dynamic_memory_allocation(tmp_path, params): @pytest.fixture(scope='module') def phi3_converted_model(tmp_path_factory): - model_id = "microsoft/Phi-3-mini-4k-instruct" + model_id = "meta-llama/Llama-3.2-3B-Instruct" model = OVModelForCausalLM.from_pretrained(model_id, export=True, trust_remote_code=True) tokenizer = AutoTokenizer.from_pretrained(model_id) models_path = tmp_path_factory.mktemp("cacheopt_test_models") / model_id @@ -212,7 +212,7 @@ def phi3_converted_model(tmp_path_factory): @pytest.mark.precommit @pytest.mark.parametrize("subset", ["samsum", "qmsum", "trec", "qasper", "hotpotqa", "repobench-p"]) def test_unoptimized_generation_longbench(phi3_converted_model, subset): - seqs_per_request = 2 + seqs_per_request = 32 num_kv_blocks = 1000 scheduler_config = get_scheduler_config(num_kv_blocks) models_path = phi3_converted_model.models_path diff --git a/tests/python_tests/utils_longbench.py b/tests/python_tests/utils_longbench.py index 77cf2efc4f..152095e1db 100644 --- a/tests/python_tests/utils_longbench.py +++ b/tests/python_tests/utils_longbench.py @@ -146,10 +146,12 @@ def qa_f1_score(prediction, ground_truth, **kwargs): # Max length for NVIDIA GeForce RTX 3090 (24 GB) model2maxlen = { - "meta-llama/Llama-2-7b-chat-hf": 3500, + "meta-llama/Llama-2-7b-chat-hf": 4096, "meta-llama/Meta-Llama-3-8B-Instruct": 5000, - "meta-llama/Llama-3.1-8B-Instruct": 5000, + "meta-llama/Llama-3.1-8B-Instruct": 10000, "microsoft/Phi-3-mini-4k-instruct": 4096, + 'meta-llama/Llama-3.2-1B-Instruct': 10000, + 'meta-llama/Llama-3.2-3B-Instruct': 10000, } dataset2maxlen = { @@ -250,7 +252,7 @@ def preprocess_prompt(tokenizer, data_sample, subset, model_name): def post_process_pred(pred, subset, model_name): - if subset in ["samsum", "qsum", "hotpotqa", "qasper"] and "Llama-3-8B" in model_name: + if subset in ["samsum", "qsum", "hotpotqa", "qasper"] and "Llama-3" in model_name: pred = pred[:pred.find("assistant")] elif subset == "samsum": pred = pred[:pred.find("\nDialogue")] From a4040264a2192c7934fc56b82ed379b393b9807e Mon Sep 17 00:00:00 2001 From: Liubov Talamanova Date: Wed, 18 Dec 2024 20:31:35 +0000 Subject: [PATCH 05/18] change model to Qwen2 --- tests/python_tests/test_kv_cache_eviction.py | 83 ++++++++++---------- tests/python_tests/utils_longbench.py | 24 +----- 2 files changed, 44 insertions(+), 63 deletions(-) diff --git a/tests/python_tests/test_kv_cache_eviction.py b/tests/python_tests/test_kv_cache_eviction.py index 45dd2a677c..9374616d64 100644 --- a/tests/python_tests/test_kv_cache_eviction.py +++ b/tests/python_tests/test_kv_cache_eviction.py @@ -70,7 +70,7 @@ class CacheOptTestStruct: SHORT_CACHE_EVICTION_CONFIG = CacheEvictionConfig(start_size=32, recent_size=32, max_cache_size=96, aggregation_mode=AggregationMode.NORM_SUM) -LONGBENCH_CACHE_EVICTION_CONFIG = CacheEvictionConfig(start_size=32, recent_size=64, max_cache_size=256, aggregation_mode=AggregationMode.NORM_SUM) +LONGBENCH_CACHE_EVICTION_CONFIG = CacheEvictionConfig(start_size=32, recent_size=128, max_cache_size=672, aggregation_mode=AggregationMode.NORM_SUM) @pytest.mark.precommit @@ -194,8 +194,8 @@ def test_dynamic_memory_allocation(tmp_path, params): @pytest.fixture(scope='module') -def phi3_converted_model(tmp_path_factory): - model_id = "meta-llama/Llama-3.2-3B-Instruct" +def qwen2_converted_model(tmp_path_factory): + model_id = "Qwen/Qwen2-0.5B-Instruct" model = OVModelForCausalLM.from_pretrained(model_id, export=True, trust_remote_code=True) tokenizer = AutoTokenizer.from_pretrained(model_id) models_path = tmp_path_factory.mktemp("cacheopt_test_models") / model_id @@ -203,75 +203,72 @@ def phi3_converted_model(tmp_path_factory): ov_tokenizer, ov_detokenizer = convert_tokenizer(tokenizer, with_detokenizer=True, skip_special_tokens=True) serialize(ov_tokenizer, models_path / "openvino_tokenizer.xml") serialize(ov_detokenizer, models_path / "openvino_detokenizer.xml") - phi3_converted_model = ConvertedModel(model, tokenizer, models_path) - yield phi3_converted_model - del phi3_converted_model + qwen2_converted_model = ConvertedModel(model, tokenizer, models_path) + yield qwen2_converted_model + del qwen2_converted_model del model -@pytest.mark.precommit -@pytest.mark.parametrize("subset", ["samsum", "qmsum", "trec", "qasper", "hotpotqa", "repobench-p"]) -def test_unoptimized_generation_longbench(phi3_converted_model, subset): +@dataclass +class LongBenchTestData: + subset: str + ref_score: float + max_cache_usage: float + avg_cache_usage: float + + +@pytest.mark.parametrize("test_struct", [ + LongBenchTestData("samsum", 34.96, 16.2, 8.145), + LongBenchTestData("trec", 35, 14, 7.284), + LongBenchTestData("qasper", 14.67, 22.8, 13.182), +]) +def test_unoptimized_generation_longbench(qwen2_converted_model, test_struct): seqs_per_request = 32 num_kv_blocks = 1000 scheduler_config = get_scheduler_config(num_kv_blocks) - models_path = phi3_converted_model.models_path + models_path = qwen2_converted_model.models_path model_name = "/".join(models_path.parts[-2:]) + subset = test_struct.subset max_new_tokens = dataset2maxlen[subset] - tokenizer = phi3_converted_model.tokenizer + tokenizer = qwen2_converted_model.tokenizer generation_config = GenerationConfig() # expecting default greedy sampling generation_config.num_return_sequences = 1 generation_config.max_new_tokens = max_new_tokens generation_config.eos_token_id = tokenizer.eos_token_id - data = datasets.load_dataset('THUDM/LongBench', subset, split='test') + scheduler_config.use_cache_eviction = True + if scheduler_config.use_cache_eviction: + scheduler_config.cache_eviction_config = LONGBENCH_CACHE_EVICTION_CONFIG - # model_id = "microsoft/Phi-3-mini-4k-instruct" - # model = OVModelForCausalLM.from_pretrained(model_id, export=True, trust_remote_code=True, load_in_8bit=False) + model_cb_opt = ContinuousBatchingPipeline(models_path.absolute().as_posix(), scheduler_config, "CPU", {}) + data = datasets.load_dataset('THUDM/LongBench', subset, split='test') - model_cb_noopt = ContinuousBatchingPipeline(models_path.absolute().as_posix(), scheduler_config, "CPU", {}) with tqdm(total=len(data)) as progress_bar: batch = [] answers = [] for p_idx, data_sample in enumerate(data): - prompt, context_len = preprocess_prompt(tokenizer, data_sample, subset, model_name) + prompt = preprocess_prompt(data_sample, subset, model_name) progress_bar.update(1) batch.append(prompt) - answers.append({"context_len": context_len, "answers": data_sample["answers"], "all_classes": data_sample["all_classes"]}) - - # input = tokenizer(prompt, truncation=False, return_tensors="pt") - # output = model.generate( - # **input, - # max_new_tokens=128, - # num_beams=1, - # do_sample=False, - # temperature=1.0, - # min_length=context_len+1, - # pad_token_id=tokenizer.eos_token_id, - # eos_token_id=[tokenizer.eos_token_id, tokenizer.encode("\n", add_special_tokens=False)[-1]], - # )[0] - # pred = tokenizer.decode(output[context_len:], skip_special_tokens=True) - # pred = post_process_pred(output.m_generation_ids, subset, model_name) - # answers[-1]["pred"] = pred - - if ( - len(batch) == seqs_per_request - or p_idx == len(data) - 1 - ): - ans_batch = model_cb_noopt.generate( + answers.append({"answers": data_sample["answers"], "all_classes": data_sample["all_classes"]}) + + if len(batch) == seqs_per_request or p_idx == len(data) - 1: + ans_batch = model_cb_opt.generate( batch, [generation_config] * len(batch) ) for i, output in enumerate(ans_batch, start=p_idx-len(batch)+1): - context_len = answers[i]["context_len"] pred = post_process_pred(output.m_generation_ids[0], subset, model_name) answers[i]["pred"] = pred - batch.clear() score = evaluate(answers, subset) print(f"Score: {score}") - pipeline_noopt_metrics = model_cb_noopt.get_metrics() - print(f"No-opt cache usage: max {pipeline_noopt_metrics.max_cache_usage:.3f}, avg {pipeline_noopt_metrics.avg_cache_usage:.3f}") - del model_cb_noopt + pipeline_noopt_metrics = model_cb_opt.get_metrics() + print(f"Opt cache usage: max {pipeline_noopt_metrics.max_cache_usage:.3f}, avg {pipeline_noopt_metrics.avg_cache_usage:.3f}") + + assert abs(test_struct.ref_score - score) < 1 + assert abs(test_struct.max_cache_usage - pipeline_noopt_metrics.max_cache_usage) < 1 + assert abs(test_struct.avg_cache_usage - pipeline_noopt_metrics.avg_cache_usage) < 1 + del model_cb_opt diff --git a/tests/python_tests/utils_longbench.py b/tests/python_tests/utils_longbench.py index 152095e1db..be349f6602 100644 --- a/tests/python_tests/utils_longbench.py +++ b/tests/python_tests/utils_longbench.py @@ -144,16 +144,6 @@ def qa_f1_score(prediction, ground_truth, **kwargs): "repobench-p": code_sim_score, } -# Max length for NVIDIA GeForce RTX 3090 (24 GB) -model2maxlen = { - "meta-llama/Llama-2-7b-chat-hf": 4096, - "meta-llama/Meta-Llama-3-8B-Instruct": 5000, - "meta-llama/Llama-3.1-8B-Instruct": 10000, - "microsoft/Phi-3-mini-4k-instruct": 4096, - 'meta-llama/Llama-3.2-1B-Instruct': 10000, - 'meta-llama/Llama-3.2-3B-Instruct': 10000, -} - dataset2maxlen = { "narrativeqa": 128, "qasper": 128, @@ -235,20 +225,12 @@ def build_chat(prompt, model_name): return prompt -def preprocess_prompt(tokenizer, data_sample, subset, model_name): +def preprocess_prompt(data_sample, subset, model_name): prompt_format = dataset2prompt[subset] - max_length = model2maxlen[model_name] - prompt = prompt_format.format(**data_sample) - tokenized_prompt = tokenizer(prompt, truncation=False, return_tensors="pt").input_ids[0] - context_len = tokenized_prompt.shape[-1] - if len(tokenized_prompt) > max_length: - context_len = max_length - half = int(max_length/2) - prompt = tokenizer.decode(tokenized_prompt[:half], skip_special_tokens=True) + tokenizer.decode(tokenized_prompt[-half:], skip_special_tokens=True) if subset not in ["trec", "triviaqa", "samsum", "lsht", "lcc", "repobench-p"]: prompt = build_chat(prompt, model_name) - return prompt, context_len + return prompt def post_process_pred(pred, subset, model_name): @@ -258,4 +240,6 @@ def post_process_pred(pred, subset, model_name): pred = pred[:pred.find("\nDialogue")] elif "Phi-3" in model_name and subset == "hotpotqa": pred = pred.lstrip('\n').split('\n')[0] + elif "Qwen" in model_name and subset == "qasper": + pred = pred.lstrip('\n').split('\n')[0] return pred From 147ae2c9d46e08efa49b8b6545b8efee957f2cc4 Mon Sep 17 00:00:00 2001 From: Liubov Talamanova Date: Thu, 19 Dec 2024 15:11:00 +0000 Subject: [PATCH 06/18] add test to precommit --- tests/python_tests/test_kv_cache_eviction.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/tests/python_tests/test_kv_cache_eviction.py b/tests/python_tests/test_kv_cache_eviction.py index 9374616d64..9a9fcd9ab8 100644 --- a/tests/python_tests/test_kv_cache_eviction.py +++ b/tests/python_tests/test_kv_cache_eviction.py @@ -5,7 +5,9 @@ import sys from typing import Dict, List, Optional +import datasets import pytest +from tqdm import tqdm from optimum.intel.openvino import OVModelForCausalLM @@ -217,10 +219,11 @@ class LongBenchTestData: avg_cache_usage: float +@pytest.mark.precommit @pytest.mark.parametrize("test_struct", [ - LongBenchTestData("samsum", 34.96, 16.2, 8.145), - LongBenchTestData("trec", 35, 14, 7.284), - LongBenchTestData("qasper", 14.67, 22.8, 13.182), + LongBenchTestData("samsum", 36.78, 14, 9.596), + LongBenchTestData("trec", 28.12, 11.8, 7.721), + LongBenchTestData("qasper", 21.68, 18.4, 12.706), ]) def test_unoptimized_generation_longbench(qwen2_converted_model, test_struct): seqs_per_request = 32 @@ -242,7 +245,7 @@ def test_unoptimized_generation_longbench(qwen2_converted_model, test_struct): scheduler_config.cache_eviction_config = LONGBENCH_CACHE_EVICTION_CONFIG model_cb_opt = ContinuousBatchingPipeline(models_path.absolute().as_posix(), scheduler_config, "CPU", {}) - data = datasets.load_dataset('THUDM/LongBench', subset, split='test') + data = datasets.load_dataset('THUDM/LongBench', subset, split=f'test[:{seqs_per_request}]') with tqdm(total=len(data)) as progress_bar: batch = [] From 270e3c20c09af16ff4f6a4d9edbe391ceaaed610 Mon Sep 17 00:00:00 2001 From: Liubov Talamanova Date: Thu, 19 Dec 2024 17:21:10 +0000 Subject: [PATCH 07/18] add missing requirement --- tests/python_tests/requirements.txt | 1 + tests/python_tests/test_kv_cache_eviction.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/python_tests/requirements.txt b/tests/python_tests/requirements.txt index 8d0f6d4e30..0db8f6fa97 100644 --- a/tests/python_tests/requirements.txt +++ b/tests/python_tests/requirements.txt @@ -35,3 +35,4 @@ sacremoses librosa soundfile datasets +rouge diff --git a/tests/python_tests/test_kv_cache_eviction.py b/tests/python_tests/test_kv_cache_eviction.py index 9a9fcd9ab8..9311ce4e6b 100644 --- a/tests/python_tests/test_kv_cache_eviction.py +++ b/tests/python_tests/test_kv_cache_eviction.py @@ -225,7 +225,7 @@ class LongBenchTestData: LongBenchTestData("trec", 28.12, 11.8, 7.721), LongBenchTestData("qasper", 21.68, 18.4, 12.706), ]) -def test_unoptimized_generation_longbench(qwen2_converted_model, test_struct): +def test_optimized_generation_longbench(qwen2_converted_model, test_struct): seqs_per_request = 32 num_kv_blocks = 1000 scheduler_config = get_scheduler_config(num_kv_blocks) From 2330f713b7b942ecdba446170c5e232ed829655e Mon Sep 17 00:00:00 2001 From: Liubov Talamanova Date: Tue, 7 Jan 2025 15:24:06 +0000 Subject: [PATCH 08/18] Remove eos_token_id --- tests/python_tests/test_kv_cache_eviction.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/tests/python_tests/test_kv_cache_eviction.py b/tests/python_tests/test_kv_cache_eviction.py index 9311ce4e6b..e9f87565a5 100644 --- a/tests/python_tests/test_kv_cache_eviction.py +++ b/tests/python_tests/test_kv_cache_eviction.py @@ -221,9 +221,9 @@ class LongBenchTestData: @pytest.mark.precommit @pytest.mark.parametrize("test_struct", [ - LongBenchTestData("samsum", 36.78, 14, 9.596), + LongBenchTestData("samsum", 37.84, 11.8, 7.68), LongBenchTestData("trec", 28.12, 11.8, 7.721), - LongBenchTestData("qasper", 21.68, 18.4, 12.706), + LongBenchTestData("qasper", 15.88, 11.8, 6.483), ]) def test_optimized_generation_longbench(qwen2_converted_model, test_struct): seqs_per_request = 32 @@ -233,12 +233,10 @@ def test_optimized_generation_longbench(qwen2_converted_model, test_struct): model_name = "/".join(models_path.parts[-2:]) subset = test_struct.subset max_new_tokens = dataset2maxlen[subset] - tokenizer = qwen2_converted_model.tokenizer generation_config = GenerationConfig() # expecting default greedy sampling generation_config.num_return_sequences = 1 generation_config.max_new_tokens = max_new_tokens - generation_config.eos_token_id = tokenizer.eos_token_id scheduler_config.use_cache_eviction = True if scheduler_config.use_cache_eviction: From de978bb502f8374a5783ad9661f212017dec79e7 Mon Sep 17 00:00:00 2001 From: Liubov Talamanova Date: Fri, 31 Jan 2025 17:08:58 +0000 Subject: [PATCH 09/18] Update test logic to relative diff --- tests/python_tests/test_kv_cache_eviction.py | 64 +++++++++++++------- tests/python_tests/utils_longbench.py | 4 +- 2 files changed, 44 insertions(+), 24 deletions(-) diff --git a/tests/python_tests/test_kv_cache_eviction.py b/tests/python_tests/test_kv_cache_eviction.py index e9f87565a5..7bb86f7ad3 100644 --- a/tests/python_tests/test_kv_cache_eviction.py +++ b/tests/python_tests/test_kv_cache_eviction.py @@ -214,22 +214,31 @@ def qwen2_converted_model(tmp_path_factory): @dataclass class LongBenchTestData: subset: str - ref_score: float - max_cache_usage: float - avg_cache_usage: float + threshold: float + max_cache_usage_optimization_ratio: float + avg_cache_usage_optimization_ratio: float @pytest.mark.precommit @pytest.mark.parametrize("test_struct", [ - LongBenchTestData("samsum", 37.84, 11.8, 7.68), - LongBenchTestData("trec", 28.12, 11.8, 7.721), - LongBenchTestData("qasper", 15.88, 11.8, 6.483), + LongBenchTestData("samsum", 4, 1.6, 3.3), + LongBenchTestData("trec", 3.2, 2.0, 3.3), + LongBenchTestData("qasper", 5.8, 1.7, 3.6), ]) def test_optimized_generation_longbench(qwen2_converted_model, test_struct): seqs_per_request = 32 num_kv_blocks = 1000 - scheduler_config = get_scheduler_config(num_kv_blocks) models_path = qwen2_converted_model.models_path + scheduler_config = get_scheduler_config(num_kv_blocks) + + scheduler_config_opt = get_scheduler_config(num_kv_blocks) + scheduler_config_opt.use_cache_eviction = True + if scheduler_config_opt.use_cache_eviction: + scheduler_config_opt.cache_eviction_config = LONGBENCH_CACHE_EVICTION_CONFIG + + model_cb_noopt = ContinuousBatchingPipeline(models_path, scheduler_config, "CPU", {}, get_default_properties()) + model_cb_opt = ContinuousBatchingPipeline(models_path, scheduler_config_opt, "CPU", {}, get_default_properties()) + model_name = "/".join(models_path.parts[-2:]) subset = test_struct.subset max_new_tokens = dataset2maxlen[subset] @@ -238,38 +247,49 @@ def test_optimized_generation_longbench(qwen2_converted_model, test_struct): generation_config.num_return_sequences = 1 generation_config.max_new_tokens = max_new_tokens - scheduler_config.use_cache_eviction = True - if scheduler_config.use_cache_eviction: - scheduler_config.cache_eviction_config = LONGBENCH_CACHE_EVICTION_CONFIG - - model_cb_opt = ContinuousBatchingPipeline(models_path.absolute().as_posix(), scheduler_config, "CPU", {}) - data = datasets.load_dataset('THUDM/LongBench', subset, split=f'test[:{seqs_per_request}]') - + data = datasets.load_dataset('THUDM/LongBench', subset, split='test[:32]') with tqdm(total=len(data)) as progress_bar: batch = [] answers = [] + ref_answers = [] for p_idx, data_sample in enumerate(data): prompt = preprocess_prompt(data_sample, subset, model_name) progress_bar.update(1) batch.append(prompt) answers.append({"answers": data_sample["answers"], "all_classes": data_sample["all_classes"]}) + ref_answers.append({"answers": data_sample["answers"], "all_classes": data_sample["all_classes"]}) if len(batch) == seqs_per_request or p_idx == len(data) - 1: ans_batch = model_cb_opt.generate( batch, [generation_config] * len(batch) ) - for i, output in enumerate(ans_batch, start=p_idx-len(batch)+1): - pred = post_process_pred(output.m_generation_ids[0], subset, model_name) - answers[i]["pred"] = pred + ref_ans_batch = model_cb_noopt.generate( + batch, [generation_config] * len(batch) + ) + for i, (opt_output, ref_output) in enumerate(zip(ans_batch, ref_ans_batch), start=p_idx-len(batch)+1): + answers[i]["pred"] = post_process_pred(opt_output.m_generation_ids[0], subset, model_name) + ref_answers[i]["pred"] = post_process_pred(ref_output.m_generation_ids[0], subset, model_name) batch.clear() score = evaluate(answers, subset) print(f"Score: {score}") - pipeline_noopt_metrics = model_cb_opt.get_metrics() - print(f"Opt cache usage: max {pipeline_noopt_metrics.max_cache_usage:.3f}, avg {pipeline_noopt_metrics.avg_cache_usage:.3f}") + ref_score = evaluate(ref_answers, subset) + print(f"Reference score: {ref_score}") + pipeline_opt_metrics = model_cb_opt.get_metrics() + pipeline_noopt_metrics = model_cb_noopt.get_metrics() + + print(f"No-opt cache usage: max {pipeline_noopt_metrics.max_cache_usage:.3f}, avg {pipeline_noopt_metrics.avg_cache_usage:.3f}") + print(f"Opt cache usage: max {pipeline_opt_metrics.max_cache_usage:.3f}, avg {pipeline_opt_metrics.avg_cache_usage:.3f}") + max_optimization_ratio = (pipeline_noopt_metrics.max_cache_usage / pipeline_opt_metrics.max_cache_usage) + avg_optimization_ratio = (pipeline_noopt_metrics.avg_cache_usage / pipeline_opt_metrics.avg_cache_usage) + print(f"Optimization ratios: max {max_optimization_ratio:.3f}x, avg {avg_optimization_ratio:.3f}x") - assert abs(test_struct.ref_score - score) < 1 - assert abs(test_struct.max_cache_usage - pipeline_noopt_metrics.max_cache_usage) < 1 - assert abs(test_struct.avg_cache_usage - pipeline_noopt_metrics.avg_cache_usage) < 1 del model_cb_opt + del model_cb_noopt + import gc + gc.collect() + + assert ref_score - score <= test_struct.threshold + assert max_optimization_ratio >= test_struct.max_cache_usage_optimization_ratio + assert avg_optimization_ratio >= test_struct.avg_cache_usage_optimization_ratio diff --git a/tests/python_tests/utils_longbench.py b/tests/python_tests/utils_longbench.py index be349f6602..1e0bdcc44c 100644 --- a/tests/python_tests/utils_longbench.py +++ b/tests/python_tests/utils_longbench.py @@ -240,6 +240,6 @@ def post_process_pred(pred, subset, model_name): pred = pred[:pred.find("\nDialogue")] elif "Phi-3" in model_name and subset == "hotpotqa": pred = pred.lstrip('\n').split('\n')[0] - elif "Qwen" in model_name and subset == "qasper": - pred = pred.lstrip('\n').split('\n')[0] + elif subset in ["trec", "hotpotqa", "qasper"] and "Qwen" in model_name: + pred = pred[:pred.find("\nQuestion")] return pred From cd3dd04c6379cbd19142e5d93f0114f4a9249b85 Mon Sep 17 00:00:00 2001 From: Liubov Talamanova Date: Mon, 3 Feb 2025 16:45:20 +0000 Subject: [PATCH 10/18] Exclude longbench from precommit --- tests/python_tests/test_kv_cache_eviction.py | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/python_tests/test_kv_cache_eviction.py b/tests/python_tests/test_kv_cache_eviction.py index 7bb86f7ad3..af3b195bcd 100644 --- a/tests/python_tests/test_kv_cache_eviction.py +++ b/tests/python_tests/test_kv_cache_eviction.py @@ -219,7 +219,6 @@ class LongBenchTestData: avg_cache_usage_optimization_ratio: float -@pytest.mark.precommit @pytest.mark.parametrize("test_struct", [ LongBenchTestData("samsum", 4, 1.6, 3.3), LongBenchTestData("trec", 3.2, 2.0, 3.3), From 0e104832da76914e79a24ee4a414cd67719672ac Mon Sep 17 00:00:00 2001 From: Liubov Talamanova Date: Tue, 4 Feb 2025 11:10:03 +0000 Subject: [PATCH 11/18] Add license --- tests/python_tests/utils_longbench.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/tests/python_tests/utils_longbench.py b/tests/python_tests/utils_longbench.py index 1e0bdcc44c..d538ef0086 100644 --- a/tests/python_tests/utils_longbench.py +++ b/tests/python_tests/utils_longbench.py @@ -1,9 +1,18 @@ +# Copyright (C) 2023-2025 Intel Corporation +# SPDX-License-Identifier: Apache-2.0 +# +# This file includes utility functions copied from the LongBench repository: +# https://github.com/THUDM/LongBench +# +# Copyright (c) 2023 THU-KEG & Zhipu AI +# Licensed under the MIT License import re import string from collections import Counter from rouge import Rouge + def normalize_answer(s): """Lower text and remove punctuation, articles and extra whitespace.""" From 8a06845c224fd47c73da5430ee45944a4c3ff735 Mon Sep 17 00:00:00 2001 From: Liubov Talamanova Date: Tue, 4 Feb 2025 15:46:37 +0000 Subject: [PATCH 12/18] Add test to nightly scope --- tests/python_tests/test_kv_cache_eviction.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/python_tests/test_kv_cache_eviction.py b/tests/python_tests/test_kv_cache_eviction.py index af3b195bcd..576dd37e82 100644 --- a/tests/python_tests/test_kv_cache_eviction.py +++ b/tests/python_tests/test_kv_cache_eviction.py @@ -219,6 +219,7 @@ class LongBenchTestData: avg_cache_usage_optimization_ratio: float +@pytest.mark.nightly @pytest.mark.parametrize("test_struct", [ LongBenchTestData("samsum", 4, 1.6, 3.3), LongBenchTestData("trec", 3.2, 2.0, 3.3), From 71a1df31e0a6623f18e3530ec8ba78176436e1f5 Mon Sep 17 00:00:00 2001 From: Liubov Talamanova Date: Tue, 4 Feb 2025 18:25:23 +0000 Subject: [PATCH 13/18] Extend test to GPU device --- tests/python_tests/test_kv_cache_eviction.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/tests/python_tests/test_kv_cache_eviction.py b/tests/python_tests/test_kv_cache_eviction.py index 576dd37e82..6dd6c57511 100644 --- a/tests/python_tests/test_kv_cache_eviction.py +++ b/tests/python_tests/test_kv_cache_eviction.py @@ -220,14 +220,15 @@ class LongBenchTestData: @pytest.mark.nightly +@pytest.mark.parametrize("device", ["CPU", "GPU"]) @pytest.mark.parametrize("test_struct", [ LongBenchTestData("samsum", 4, 1.6, 3.3), LongBenchTestData("trec", 3.2, 2.0, 3.3), LongBenchTestData("qasper", 5.8, 1.7, 3.6), ]) -def test_optimized_generation_longbench(qwen2_converted_model, test_struct): +def test_optimized_generation_longbench(qwen2_converted_model, device, test_struct): seqs_per_request = 32 - num_kv_blocks = 1000 + num_kv_blocks = 1000 if device == "CPU" else 500 models_path = qwen2_converted_model.models_path scheduler_config = get_scheduler_config(num_kv_blocks) @@ -236,8 +237,8 @@ def test_optimized_generation_longbench(qwen2_converted_model, test_struct): if scheduler_config_opt.use_cache_eviction: scheduler_config_opt.cache_eviction_config = LONGBENCH_CACHE_EVICTION_CONFIG - model_cb_noopt = ContinuousBatchingPipeline(models_path, scheduler_config, "CPU", {}, get_default_properties()) - model_cb_opt = ContinuousBatchingPipeline(models_path, scheduler_config_opt, "CPU", {}, get_default_properties()) + model_cb_noopt = ContinuousBatchingPipeline(models_path, scheduler_config, device, {}, get_default_properties()) + model_cb_opt = ContinuousBatchingPipeline(models_path, scheduler_config_opt, device, {}, get_default_properties()) model_name = "/".join(models_path.parts[-2:]) subset = test_struct.subset From 506e03388abd409fc620d42d80a8529289bbb44a Mon Sep 17 00:00:00 2001 From: Liubov Talamanova Date: Fri, 15 Nov 2024 16:28:27 +0000 Subject: [PATCH 14/18] Add LongBench validation --- tests/python_tests/utils_longbench.py | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/python_tests/utils_longbench.py b/tests/python_tests/utils_longbench.py index d538ef0086..8947312384 100644 --- a/tests/python_tests/utils_longbench.py +++ b/tests/python_tests/utils_longbench.py @@ -12,7 +12,6 @@ from collections import Counter from rouge import Rouge - def normalize_answer(s): """Lower text and remove punctuation, articles and extra whitespace.""" From c8aea68bc2e35c7350038ddfebb8b9b744287ce8 Mon Sep 17 00:00:00 2001 From: Liubov Talamanova Date: Mon, 18 Nov 2024 18:53:04 +0000 Subject: [PATCH 15/18] update model --- tests/python_tests/test_kv_cache_eviction.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/tests/python_tests/test_kv_cache_eviction.py b/tests/python_tests/test_kv_cache_eviction.py index 6dd6c57511..f21a03775e 100644 --- a/tests/python_tests/test_kv_cache_eviction.py +++ b/tests/python_tests/test_kv_cache_eviction.py @@ -211,6 +211,7 @@ def qwen2_converted_model(tmp_path_factory): del model +<<<<<<< HEAD @dataclass class LongBenchTestData: subset: str @@ -230,6 +231,13 @@ def test_optimized_generation_longbench(qwen2_converted_model, device, test_stru seqs_per_request = 32 num_kv_blocks = 1000 if device == "CPU" else 500 models_path = qwen2_converted_model.models_path +======= +@pytest.mark.precommit +@pytest.mark.parametrize("subset", ["samsum", "qmsum", "trec", "qasper", "hotpotqa", "repobench-p"]) +def test_unoptimized_generation_longbench(phi3_converted_model, subset): + seqs_per_request = 32 + num_kv_blocks = 1000 +>>>>>>> 1cbbbf42... update model scheduler_config = get_scheduler_config(num_kv_blocks) scheduler_config_opt = get_scheduler_config(num_kv_blocks) From 6e163443f8d334427acde3e310b7967b292d58da Mon Sep 17 00:00:00 2001 From: Liubov Talamanova Date: Wed, 18 Dec 2024 20:31:35 +0000 Subject: [PATCH 16/18] change model to Qwen2 --- tests/python_tests/test_kv_cache_eviction.py | 8 -------- 1 file changed, 8 deletions(-) diff --git a/tests/python_tests/test_kv_cache_eviction.py b/tests/python_tests/test_kv_cache_eviction.py index f21a03775e..6dd6c57511 100644 --- a/tests/python_tests/test_kv_cache_eviction.py +++ b/tests/python_tests/test_kv_cache_eviction.py @@ -211,7 +211,6 @@ def qwen2_converted_model(tmp_path_factory): del model -<<<<<<< HEAD @dataclass class LongBenchTestData: subset: str @@ -231,13 +230,6 @@ def test_optimized_generation_longbench(qwen2_converted_model, device, test_stru seqs_per_request = 32 num_kv_blocks = 1000 if device == "CPU" else 500 models_path = qwen2_converted_model.models_path -======= -@pytest.mark.precommit -@pytest.mark.parametrize("subset", ["samsum", "qmsum", "trec", "qasper", "hotpotqa", "repobench-p"]) -def test_unoptimized_generation_longbench(phi3_converted_model, subset): - seqs_per_request = 32 - num_kv_blocks = 1000 ->>>>>>> 1cbbbf42... update model scheduler_config = get_scheduler_config(num_kv_blocks) scheduler_config_opt = get_scheduler_config(num_kv_blocks) From 3a013c4ba4b736e016421c9b3c4aca1d404a6eb0 Mon Sep 17 00:00:00 2001 From: Liubov Talamanova Date: Tue, 4 Feb 2025 11:10:03 +0000 Subject: [PATCH 17/18] Add license --- tests/python_tests/utils_longbench.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/python_tests/utils_longbench.py b/tests/python_tests/utils_longbench.py index 8947312384..d538ef0086 100644 --- a/tests/python_tests/utils_longbench.py +++ b/tests/python_tests/utils_longbench.py @@ -12,6 +12,7 @@ from collections import Counter from rouge import Rouge + def normalize_answer(s): """Lower text and remove punctuation, articles and extra whitespace.""" From 85b5e32952b86167796f481f074d5b9043c654a6 Mon Sep 17 00:00:00 2001 From: Liubov Talamanova Date: Tue, 4 Feb 2025 18:25:23 +0000 Subject: [PATCH 18/18] Extend test to GPU device