Skip to content

Commit

Permalink
fixed linting errors
Browse files Browse the repository at this point in the history
  • Loading branch information
babu-namburi committed Aug 2, 2024
1 parent ad7ac8e commit 718b8f2
Showing 1 changed file with 9 additions and 8 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -293,6 +293,7 @@ def process_request(idx: str, enable_cot: bool, data: dict, url: str, endpoint_k
"text": None,
"exception": e,
}

def process_conversational_request(idx: str, data: dict, url: str, endpoint_key: str):
"""Process a single conversational request.
Expand All @@ -307,9 +308,9 @@ def process_conversational_request(idx: str, data: dict, url: str, endpoint_key:
"""
try:
logger.info(f"request_data: {repr(data)}")
# Basic validation for the input data
# Basic validation for the input data
messages = data.pop("messages", [])
if not messages: # empty messages
if not messages: # empty messages
return {
"idx": idx,
"status_code": None,
Expand All @@ -319,20 +320,21 @@ def process_conversational_request(idx: str, data: dict, url: str, endpoint_key:
first_message = messages[0]
if first_message['role'] != 'system':
logger.warning(f"First message should be system, but got {first_message['role']}")
#TODO: handle this case
# TODO: handle this case
for message in messages[1:]:
role = message['role']
if role not in ('system', 'user'):
logger.warning(f"role should be system or user, but got {role}")
#TODO: handle this case
# TODO: handle this case
synthetic_responses = []
for message in messages:
role = message['role']
if role in ('system', 'user'):
synthetic_responses.append(message)
else:
# replace the assistant content from the model
response: Response = _invoke_endpoint(url=url, key=endpoint_key, data={"messages": synthetic_responses} | data)
response: Response = _invoke_endpoint(url=url, key=endpoint_key,
data={"messages": synthetic_responses} | data)
if response.status_code != 200:
break
logger.info(f"response_text: {response.text}")
Expand All @@ -344,7 +346,7 @@ def process_conversational_request(idx: str, data: dict, url: str, endpoint_key:
# response content should be structured as below for a successful vllm response
else response_data['choices'][0]["message"]["content"].strip()
)
synthetic_responses.append({'role': 'assistant', 'content': prediction_result })
synthetic_responses.append({'role': 'assistant', 'content': prediction_result})
return {
"idx": idx,
"status_code": response.status_code,
Expand Down Expand Up @@ -530,9 +532,8 @@ def batch_process_data(input_file_path: Path, output_file_path: Path, batch_size
if success_ratio < min_endpoint_success_ratio:
msg = f"Success ratio for dataset {input_file_path}: {success_ratio} < {min_endpoint_success_ratio}."
raise Exception(msg)

logger.info("Processing train file")
#TODO: conditionally the batch_process_conversation_data based on the data_generation_task_type
# TODO: conditionally the batch_process_conversation_data based on the data_generation_task_type
batch_process_data(train_file_path, generated_train_file_path, request_batch_size)
# batch_process_conversation_data(train_file_path, generated_train_file_path, request_batch_size)
logger.info("Data generated and saved for train file")
Expand Down

0 comments on commit 718b8f2

Please sign in to comment.