From 5b02adeb0f895c4220216fb186feaedd4fe4ed8b Mon Sep 17 00:00:00 2001 From: Nirmal Savinda Date: Sun, 1 Dec 2024 16:16:15 +0530 Subject: [PATCH] feat: enhance question generation API with structured responses for MCQ and essay questions (#41) --- app/data/responseHandle.py | 48 ++++++++++++++++++++++++++++++--- app/routers/questionGenerate.py | 37 ++++++++++++++++--------- app/services/generate.py | 24 ++++++++++++++--- 3 files changed, 89 insertions(+), 20 deletions(-) diff --git a/app/data/responseHandle.py b/app/data/responseHandle.py index 6f20a10..8efee92 100644 --- a/app/data/responseHandle.py +++ b/app/data/responseHandle.py @@ -45,11 +45,26 @@ def handle_mcq(response): cleaned_options = [re.sub(r'^[A-Za-z]\)\s*|^[A-Za-z]\.\s*', '', option).strip() for option in options] # Return as dictionary - return { - "question": question, - "options": cleaned_options, - "correct_answer": correct_answer_index + # return { + # "question": question, + # "options": cleaned_options, + # "correct_answer": correct_answer_index + # } + print(correct_answer_index) + response_options = [] + for index, option in enumerate(options): + response_options.append({ + "optionText": option, + "marks": 1 if index == correct_answer_index else 0, + "correct": index == correct_answer_index + }) + + response = { + "questionText": question, + "difficultyLevel": "EASY", # Or determined dynamically based on your criteria + "options": response_options } + return response except Exception as e: print(f"Error: {e}") @@ -69,3 +84,28 @@ def handle_mcq(response): # parsed_data = handle_mcq(response) # print(parsed_data) + + +def handle_essay(original_question): + # Original data format + question = original_question["question"] + answers = original_question["answers"] + + # New data structure + new_question = { + "questionText": question, + "difficultyLevel": "MEDIUM", # Static value, change as needed + "coveringPoints": [] + } + + # Convert each answer to a covering point + for answer in answers: + new_question["coveringPoints"].append({ + "coveringPointText": answer, + "marks": 5 # Assuming each point is worth 5 marks + }) + + return new_question + + + diff --git a/app/routers/questionGenerate.py b/app/routers/questionGenerate.py index 1589848..5d342ae 100644 --- a/app/routers/questionGenerate.py +++ b/app/routers/questionGenerate.py @@ -14,6 +14,16 @@ class EssayQuestionRequest(BaseModel): text: str examid: str +class MCQListRequest(BaseModel): + text: str + examid: str + choices: int = 4 + num_questions: int = 1 + +class EssayListRequest(BaseModel): + text: str + examid: str + num_questions: int = 1 router = APIRouter() @@ -42,33 +52,34 @@ async def generate_essay_question(request: EssayQuestionRequest) -> dict: logger.error(f"An error occurred while generating the essay question: {str(e)}") raise HTTPException(status_code=500, detail=f"An error occurred while generating the essay question: {str(e)}") -@router.post("/generate-questions/mcq/", response_model=list[dict]) -async def generate_mcq_questions(text: str = Query(..., description="The text to generate multiple choice questions for"), - examid: str = Query(..., description="The ID of the exam related to the text"), - choices: int = Query(4, description="The number of choices for the multiple choice questions"), - num_questions: int = Query(1, description="The number of questions to generate") - ) -> list[dict]: +@router.post("/generate-questions/mcq/", response_model=dict) +async def generate_mcq_questions(request: MCQListRequest) -> dict: """Endpoint to generate multiple choice questions for a given text using OpenAI's model.""" try: # Assuming 'prompt' function is synchronous; if it's async, use 'await prompt(text, examid)' - question_responses = generate_list(text, examid, question_type='mcq', choices=choices, num_questions=num_questions) + question_responses = generate_list(request.text, request.examid, question_type='mcq', choices=request.choices, num_questions=request.num_questions) + + logger.info(f"Generated multiple choice questions: {question_responses}") return question_responses + # return { + # "success": True, + # "questions": question_responses + # } + except Exception as e: # Catching a broad exception is not best practice; adjust according to specific exceptions expected from 'prompt' logger.error(f"An error occurred while generating the multiple choice questions: {str(e)}") raise HTTPException(status_code=500, detail=f"An error occurred while generating the multiple choice questions: {str(e)}") -@router.post("/generate-questions/essay/", response_model=list[dict]) -async def generate_essay_questions(text: str = Query(..., description="The text to generate essay questions for"), - examid: str = Query(..., description="The ID of the exam related to the text"), - num_questions: int = Query(1, description="The number of questions to generate") - ) -> list[dict]: +@router.post("/generate-questions/essay/", response_model=dict) +async def generate_essay_questions(request: EssayListRequest) -> dict: """Endpoint to generate essay questions for a given text using OpenAI's model.""" try: # Assuming 'prompt' function is synchronous; if it's async, use 'await prompt(text, examid, question_type='essay') - question_responses = generate_list(text, examid, question_type='essay', num_questions=num_questions) + # question_responses = generate_list(text, examid, question_type='essay', num_questions=num_questions) + question_responses = generate_list(request.text, request.examid, question_type='essay', num_questions=request.num_questions) logger.info(f"Generated essay questions: {question_responses}") return question_responses except Exception as e: diff --git a/app/services/generate.py b/app/services/generate.py index f8fbc4e..fc41ba9 100644 --- a/app/services/generate.py +++ b/app/services/generate.py @@ -140,16 +140,34 @@ def generate_list(self,text: str, question_type: str = "mcq", choices: int = 4, if question_type == "mcq": for i in range(len(result)): formatted_result.append(responseHandle.handle_mcq(result[i])) - return formatted_result + print(formatted_result) + # return formatted_result + + return { + "success": True, + "questions": formatted_result + } + elif question_type == "essay": - return result + + for i in range(len(result)): + formatted_result.append(responseHandle.handle_essay(result[i])) + + # return result + return { + "success": True, + "questions": formatted_result + } except Exception as e: log.logger.error(f"Error in generating list: {e}") print(formatted_result) - return [] + return { + "success": False, + "questions": [] + }