Skip to content

Commit

Permalink
Dev (#7)
Browse files Browse the repository at this point in the history
* feat:question template added

* feat:mcq generate

* docs:readme

* ci:Create pylint.yml

* Update pylint.yml

* refactor:remove unused imports

* refactor:lint fix

* fix:response json format
  • Loading branch information
nsavinda authored Jun 13, 2024
1 parent a980699 commit 7652bfb
Show file tree
Hide file tree
Showing 10 changed files with 250 additions and 70 deletions.
24 changes: 24 additions & 0 deletions .github/workflows/pylint.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
name: Pylint

on: [push]

jobs:
build:
runs-on: ubuntu-latest
strategy:
matrix:
python-version: [ "3.12"]
steps:
- uses: actions/checkout@v4
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v3
with:
python-version: ${{ matrix.python-version }}
- name: Install dependencies
run: |
python -m pip install --upgrade pip
pip install -r requirements.txt
pip install pylint
- name: Analysing the code with pylint
run: |
pylint $(git ls-files '*.py')
17 changes: 17 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
# Testify - AI Assistant

## Setup
1. Clone the repository
2. Install the required packages using `pip install -r requirements.txt`

```bash
pip install -r requirements.txt
```

3. Run the app using `uvicorn app.main:app --reload`

```bash
uvicorn app.main:app --reload --port 7401
```

4. Open API documentation at `http://localhost:7401/docs`
11 changes: 4 additions & 7 deletions app/__init__.py
Original file line number Diff line number Diff line change
@@ -1,14 +1,11 @@
# app/__init__.py
from .main import app

from fastapi import FastAPI
from .routers.upload import router as upload_router
from .routers.questionGenerate import router as questionGenerate_router


from .main import app

# Include routers with appropriate API version prefix
app.include_router(upload_router, prefix="/api/v1")
app.include_router(questionGenerate_router, prefix="/api/v1")


# app/routers/upload.py


46 changes: 46 additions & 0 deletions app/data/questionPrompts.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,46 @@
from langchain_core.pydantic_v1 import BaseModel, Field
from langchain_core.output_parsers import JsonOutputParser



# Define a Pydantic model for a standard question and answer format.
class QuestionParser(BaseModel):
question: str = Field(description="The question generated from the text.")
answer: str = Field(description="The answer to the generated question.")

# Define a Pydantic model for multiple-choice questions.
class Answer(BaseModel):
char: str = Field(description="The character representing the answer, e.g., 'A', 'B', 'C', 'D'.")
text: str = Field(description="The text of the answer.")

class MultipleChoiceQuestionParser(BaseModel):
question: str = Field(description="The multiple choice question generated from the text.")
options: list[Answer] = Field(description="The options for the multiple choice question, should be a list of Answer objects.")
answer: str = Field(description="The character representing the correct answer, e.g., 'A', 'B', 'C', 'D'.")

# Function to generate a prompt and corresponding parser for creating multiple-choice questions.
def mcq_prompt(options: int) -> tuple[str, JsonOutputParser]:
"""
Generates a prompt for creating multiple-choice questions along with a JSON output parser.
Args:
options_count (int): The number of options for the multiple-choice question.
Returns:
tuple[str, JsonOutputParser]: A tuple containing the prompt and the JSON output parser.
"""
prompt_text = f"Generate a multiple choice question with {options} options and indicate the correct answer."
parser = JsonOutputParser(pydantic_object=MultipleChoiceQuestionParser)
return (prompt_text, parser)

# Function to generate a prompt and corresponding parser for creating essay-type questions.
def essay_prompt() -> tuple[str, JsonOutputParser]:
"""
Generates a prompt for creating essay questions along with a JSON output parser.
Returns:
tuple[str, JsonOutputParser]: A tuple containing the prompt and the JSON output parser.
"""
prompt_text = "Generate an essay question."
parser = JsonOutputParser(pydantic_object=QuestionParser)
return (prompt_text, parser)
12 changes: 7 additions & 5 deletions app/main.py
Original file line number Diff line number Diff line change
@@ -1,12 +1,14 @@
from fastapi import FastAPI

# Initialize the FastAPI app with a custom title
# Create an instance of the FastAPI application with a custom title
app = FastAPI(title="Testify AI")

@app.get("/", response_model=dict)
@app.get("/api/assistant", response_model=dict)
async def read_root() -> dict:
"""
Root GET endpoint to return a simple greeting.
Returns a JSON object with a greeting message.
Root GET endpoint that provides a simple greeting message.
Returns:
dict: A dictionary containing a greeting message.
"""
return {"Hello": "World"}
return {"message": "Welcome to the Testify AI Assistant!"}
18 changes: 10 additions & 8 deletions app/routers/questionGenerate.py
Original file line number Diff line number Diff line change
@@ -1,14 +1,16 @@
from fastapi import APIRouter, Query, HTTPException
from typing import List

from fastapi import APIRouter, HTTPException, Query
from ..services.prompt import prompt


router = APIRouter()

@router.get("/generate-question/", response_model=str)
@router.get("/generate-question/", response_model=dict)
async def generate_question(text: str = Query(..., description="The text to generate a question for"),
examid: str = Query(..., description="The ID of the exam related to the text")) -> str:
examid: str = Query(..., description="The ID of the exam related to the text")) -> dict:
"""Endpoint to generate a question for a given text using OpenAI's model."""

return prompt(text, examid)
try:
# Assuming 'prompt' function is synchronous; if it's async, use 'await prompt(text, examid)'
question_response = prompt(text, examid)
return question_response
except Exception as e:
# Catching a broad exception is not best practice; adjust according to specific exceptions expected from 'prompt'
raise HTTPException(status_code=500, detail=f"An error occurred while generating the question: {str(e)}")
14 changes: 8 additions & 6 deletions app/routers/upload.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,15 +6,17 @@
router = APIRouter()

@router.post("/upload-pdf/", status_code=201)
async def upload_pdf(file: UploadFile = File(...), examid:str = Query(..., description="The ID of the exam related to the uploaded PDF") ) -> dict:
async def upload_pdf(file: UploadFile = File(...), examid: str = Query(..., description="The ID of the exam related to the uploaded PDF")) -> dict:
"""Endpoint to upload a PDF and upsert its contents into a Pinecone vector store."""

if file.content_type != 'application/pdf':
raise HTTPException(status_code=415, detail="Unsupported file type. Please upload a PDF.")

# Call the upsert function from the imported service
upsert(file, examid)
# Assuming 'upsert' is an async function; if not, consider wrapping with 'await'
# or adjust the function to be a regular call if it's designed to be synchronous
success = upsert(file, examid)

if not success:
raise HTTPException(status_code=500, detail="Failed to process the PDF file.")

# return {"filename": file.filename}
Response(status_code=201)
# Directly return a message if upsert is successful; 'Response(status_code=201)' is redundant with `status_code=201` in the decorator
return {"message": "PDF uploaded successfully."}
21 changes: 9 additions & 12 deletions app/services/pinecone_upsert.py
Original file line number Diff line number Diff line change
@@ -1,19 +1,15 @@
from typing import Any, BinaryIO
from typing import BinaryIO
import os
import dotenv
import pdfplumber
from langchain_openai import OpenAIEmbeddings
from langchain_pinecone import PineconeVectorStore
from pinecone import Pinecone, ServerlessSpec

dotenv.load_dotenv()

pinecone = Pinecone(api_key=os.getenv('PINECONE_API_KEY'))

def generate_embeddings_from_pdf(pdf_file: BinaryIO) -> str:
"""Generates embeddings from a PDF file using OpenAI's model."""

print(pdf_file.filename)
def generate_text_from_pdf(pdf_file: BinaryIO) -> str:
"""Generates and returns text extracted from a PDF file."""
print(f"Processing file: {pdf_file.filename}")
full_text = ""
with pdfplumber.open(pdf_file.file) as pdf:
for page in pdf.pages:
Expand All @@ -23,13 +19,11 @@ def generate_embeddings_from_pdf(pdf_file: BinaryIO) -> str:

if not full_text.strip():
raise ValueError("No text found in the PDF.")

return full_text

def upsert(pdf_file: BinaryIO, examid: str) -> str:
"""Upserts PDF text into a Pinecone vector store and returns the extracted text."""

text = generate_embeddings_from_pdf(pdf_file)
"""Extracts text from a PDF file, generates embeddings, and upserts them into a Pinecone vector store."""
text = generate_text_from_pdf(pdf_file)

embeddings = OpenAIEmbeddings(
model="text-embedding-3-large",
Expand All @@ -44,4 +38,7 @@ def upsert(pdf_file: BinaryIO, examid: str) -> str:
index_name="abc"
)




return text
76 changes: 44 additions & 32 deletions app/services/prompt.py
Original file line number Diff line number Diff line change
@@ -1,26 +1,43 @@
from typing import Any, BinaryIO
import os
import dotenv
import pdfplumber
from langchain_openai import OpenAIEmbeddings
import json
from langchain_openai import OpenAIEmbeddings, ChatOpenAI
from langchain_pinecone import PineconeVectorStore
from pinecone import Pinecone, ServerlessSpec

from langchain.chat_models import ChatOpenAI
from langchain.chains import RetrievalQA

from pinecone import Pinecone
from langchain_core.pydantic_v1 import BaseModel, Field
from langchain_core.output_parsers import JsonOutputParser
from langchain.prompts import PromptTemplate

from ..data.questionPrompts import mcq_prompt, essay_prompt

dotenv.load_dotenv()

pinecone = Pinecone(api_key=os.getenv('PINECONE_API_KEY'))
class QuestionParser(BaseModel):
question: str = Field(description="The question generated from the text.")
answer: str = Field(description="The answer to the generated question.")

class MultipleChoiceQuestionParser(BaseModel):
question: str = Field(description="The multiple choice question generated from the text.")
options: list[str] = Field(description="The options for the multiple choice question.")
answer: int = Field(description="The index of the correct answer in the options list.")

def format_docs(docs):
"""Helper function to format document content."""
return "\n\n".join([d.page_content for d in docs])

def select_prompt(question_type: str) -> tuple[str, JsonOutputParser]:
"""Selects the appropriate prompt and parser based on the question type."""
if question_type == "mcq":
return mcq_prompt(4) # This function is assumed to return a tuple (prompt, parser)
elif question_type == "essay":
return essay_prompt() # This function is assumed to return a tuple (prompt, parser)
else:
raise ValueError("Invalid question type. Please select 'mcq' or 'essay'.")

def prompt(text: str, examid: str, question_type: str = "mcq") -> dict:
"""Generates a question based on the provided text and exam ID."""
question, parser = select_prompt(question_type)

def prompt(text: str, examid: str) -> str:
"""Upserts PDF text into a Pinecone vector store and returns the extracted text."""

embed = OpenAIEmbeddings(
model="text-embedding-3-large",
api_key=os.getenv('OPENAI_API_KEY'),
Expand All @@ -31,34 +48,29 @@ def prompt(text: str, examid: str) -> str:
namespace=examid,
index_name="abc",
embedding=embed

)

vectorstore.similarity_search(
text,
# top_k=5
)
docs = vectorstore.similarity_search(text) # Assuming this method returns relevant documents

llm = ChatOpenAI(
model="gpt-3.5-turbo",
api_key=os.getenv('OPENAI_API_KEY')
model="gpt-4o",
api_key=os.getenv('OPENAI_API_KEY'),
model_kwargs={"response_format": {"type": "json_object"}}

)

qa = RetrievalQA.from_chain_type(
llm=llm,
chain_type="stuff",
retriever=vectorstore.as_retriever()
prompt_template = PromptTemplate(
template="Generate one question, {question} about {query} from {document}. Output is only json format.",
input_variables=["query", "document", "question"],
partial_variables={"format_instructions": parser.get_format_instructions()},

)

print(qa.invoke(text))
return "Question generated successfully."






chain = prompt_template | llm | parser

formatted_docs = format_docs(docs)
result = chain.invoke({"query": text, "document": formatted_docs, "question": question})


return result
return json.dumps(result) # Converting the result to a JSON string for consistency

Loading

0 comments on commit 7652bfb

Please sign in to comment.