Skip to content

Commit

Permalink
vLLM ITL fix (#667)
Browse files Browse the repository at this point in the history
* vLLM ITL fix

* Drop old comment
  • Loading branch information
IzzyPutterman authored May 20, 2024
1 parent cc53c72 commit 76ac69a
Showing 1 changed file with 8 additions and 12 deletions.
20 changes: 8 additions & 12 deletions src/c++/perf_analyzer/genai-perf/genai_perf/llm_metrics.py
Original file line number Diff line number Diff line change
Expand Up @@ -627,18 +627,14 @@ def _preprocess_response(
res_outputs[i] = {"response": json.dumps(merged_response)}

# Remove responses without any content
# These are only observed to happen at the start or end
while res_outputs and self._is_openai_empty_response(
res_outputs[0]["response"]
):
res_timestamps.pop(0)
res_outputs.pop(0)

while res_outputs and self._is_openai_empty_response(
res_outputs[-1]["response"]
):
res_timestamps.pop()
res_outputs.pop()
indices_to_remove = []
for idx, out in enumerate(res_outputs):
if self._is_openai_empty_response(out["response"]):
indices_to_remove.append(idx)
indices_to_remove.sort(reverse=True)
for index in indices_to_remove:
res_timestamps.pop(index)
res_outputs.pop(index)

def _tokenize_request_inputs(self, req_inputs: dict) -> List[int]:
"""Deserialize the request input and return tokenized inputs."""
Expand Down

0 comments on commit 76ac69a

Please sign in to comment.