Skip to content

Commit

Permalink
Fix finite lorax generation in cb mode
Browse files Browse the repository at this point in the history
Signed-off-by: Jou-An Chen <[email protected]>
  • Loading branch information
quic-jouachen committed Jan 9, 2025
1 parent 41cf878 commit c66e9aa
Show file tree
Hide file tree
Showing 2 changed files with 32 additions and 3 deletions.
4 changes: 3 additions & 1 deletion QEfficient/generation/text_generation_inference.py
Original file line number Diff line number Diff line change
Expand Up @@ -341,7 +341,9 @@ def cloud_ai_100_exec_kv(
perf_metrics=PerfMetrics(prefill_time, decode_perf, total_perf, total_time),
)
else:
exec_info = generate_text.generate(prompt=prompt, generation_len=generation_len)
exec_info = generate_text.generate(
prompt=prompt, generation_len=generation_len, prompt_to_lora_id_mapping=prompt_to_lora_id_mapping
)

print_latency_stats_kv(prompt, exec_info=exec_info, automation=automation)
return exec_info
Expand Down
31 changes: 29 additions & 2 deletions tests/peft/lora/test_lora_model.py
Original file line number Diff line number Diff line change
Expand Up @@ -195,10 +195,12 @@ def test_auto_lora_model_for_causal_lm_load_unload_adapter(base_model_name, adap
assert qeff_model.unload_adapter("adapter_0") # valid unload


# test the export, export caching, compile, generate workflow
# test the export, export caching, compile, generate workflow in noncb mode
@pytest.mark.on_qaic
@pytest.mark.parametrize("base_model_name,adapter_id_0,adapter_id_1", model_samples[:1])
def test_auto_lora_model_for_causal_lm_export_compile_generate(base_model_name, adapter_id_0, adapter_id_1, tmp_path):
def test_auto_lora_model_for_causal_lm_noncb_export_compile_generate(
base_model_name, adapter_id_0, adapter_id_1, tmp_path
):
qeff_model = QEffAutoLoraModelForCausalLM.from_pretrained(base_model_name, num_hidden_layers=1)

qeff_model.load_adapter(adapter_id_0, "adapter_0")
Expand Down Expand Up @@ -232,3 +234,28 @@ def test_auto_lora_model_for_causal_lm_export_compile_generate(base_model_name,
device_id=[0],
prompt_to_adapter_mapping=["adapter_0", "adapter_1", "adapter_0", "base"],
)


# test the compile and generate workflow in cb mode
@pytest.mark.on_qaic
@pytest.mark.parametrize("base_model_name,adapter_id_0,adapter_id_1", model_samples[:1])
def test_auto_lora_model_for_causal_lm_cb_compile_generate(base_model_name, adapter_id_0, adapter_id_1, tmp_path):
qeff_model = QEffAutoLoraModelForCausalLM.from_pretrained(
base_model_name, continuous_batching=True, num_hidden_layers=1
)

qeff_model.load_adapter(adapter_id_0, "adapter_0")
qeff_model.load_adapter(adapter_id_1, "adapter_1")

# test compile
qeff_model.compile(prefill_seq_len=32, ctx_len=64, full_batch_size=2)
assert Path(qeff_model.qpc_path).is_dir()

# test generate
prompts = ["hello!", "hi", "hello, my name is", "hey"]
qeff_model.generate(
tokenizer=load_hf_tokenizer(pretrained_model_name_or_path=base_model_name),
prompts=prompts,
device_id=[0],
prompt_to_adapter_mapping=["adapter_0", "adapter_1", "adapter_0", "base"],
)

0 comments on commit c66e9aa

Please sign in to comment.