Skip to content

Commit

Permalink
refactor:lint fix
Browse files Browse the repository at this point in the history
  • Loading branch information
nsavinda committed Jun 12, 2024
1 parent 34d0a47 commit 0cabfc9
Show file tree
Hide file tree
Showing 7 changed files with 77 additions and 147 deletions.
10 changes: 3 additions & 7 deletions app/__init__.py
Original file line number Diff line number Diff line change
@@ -1,15 +1,11 @@
# app/__init__.py
from .main import app

from fastapi import FastAPI
from .routers.upload import router as upload_router
from .routers.questionGenerate import router as questionGenerate_router


from .main import app

# Include routers with appropriate API version prefix
app.include_router(upload_router, prefix="/api/v1")
app.include_router(questionGenerate_router, prefix="/api/v1")


# app/routers/upload.py


30 changes: 22 additions & 8 deletions app/data/questionPrompts.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,23 +8,37 @@ class QuestionParser(BaseModel):

# Define a Pydantic model for multiple-choice questions.
class Answer(BaseModel):
char: str = Field(description="The character representing the answer. (e.g. 'A', 'B', 'C', 'D')")
char: str = Field(description="The character representing the answer, e.g., 'A', 'B', 'C', 'D'.")
text: str = Field(description="The text of the answer.")


class MultipleChoiceQuestionParser(BaseModel):
question: str = Field(description="The multiple choice question generated from the text.")
options: list[Answer] = Field(description="The options for the multiple choice question. Should be a list of Answer objects. Answer objects should have a 'char' field and a 'text' field.")
answer: str = Field(description="The character representing the correct answer.(e.g. 'A', 'B', 'C', 'D'")
options: list[Answer] = Field(description="The options for the multiple choice question, should be a list of Answer objects.")
answer: str = Field(description="The character representing the correct answer, e.g., 'A', 'B', 'C', 'D'.")

# Function to generate a prompt and corresponding parser for creating multiple-choice questions.
def mcq_prompt(options: int) -> tuple[str, JsonOutputParser]:
q = f"Generate a multiple choice question with {options} options and a correct answer."
"""
Generates a prompt for creating multiple-choice questions along with a JSON output parser.
Args:
options_count (int): The number of options for the multiple-choice question.
Returns:
tuple[str, JsonOutputParser]: A tuple containing the prompt and the JSON output parser.
"""
prompt_text = f"Generate a multiple choice question with {options} options and indicate the correct answer."
parser = JsonOutputParser(pydantic_object=MultipleChoiceQuestionParser)
return (q, parser)
return (prompt_text, parser)

# Function to generate a prompt and corresponding parser for creating essay-type questions.
def essay_prompt() -> tuple[str, JsonOutputParser]:
q = "Generate an essay question."
"""
Generates a prompt for creating essay questions along with a JSON output parser.
Returns:
tuple[str, JsonOutputParser]: A tuple containing the prompt and the JSON output parser.
"""
prompt_text = "Generate an essay question."
parser = JsonOutputParser(pydantic_object=QuestionParser)
return (q, parser)
return (prompt_text, parser)
8 changes: 5 additions & 3 deletions app/main.py
Original file line number Diff line number Diff line change
@@ -1,12 +1,14 @@
from fastapi import FastAPI

# Initialize the FastAPI app with a custom title
# Create an instance of the FastAPI application with a custom title
app = FastAPI(title="Testify AI")

@app.get("/api/assistant", response_model=dict)
async def read_root() -> dict:
"""
Root GET endpoint to return a simple greeting.
Returns a JSON object with a greeting message.
Root GET endpoint that provides a simple greeting message.
Returns:
dict: A dictionary containing a greeting message.
"""
return {"message": "Welcome to the Testify AI Assistant!"}
14 changes: 9 additions & 5 deletions app/routers/questionGenerate.py
Original file line number Diff line number Diff line change
@@ -1,12 +1,16 @@
from fastapi import APIRouter, Query
from fastapi import APIRouter, HTTPException, Query
from ..services.prompt import prompt


router = APIRouter()

@router.get("/generate-question/", response_model=dict)
async def generate_question(text: str = Query(..., description="The text to generate a question for"),
examid: str = Query(..., description="The ID of the exam related to the text")) -> any:
examid: str = Query(..., description="The ID of the exam related to the text")) -> dict:
"""Endpoint to generate a question for a given text using OpenAI's model."""

return prompt(text, examid)
try:
# Assuming 'prompt' function is synchronous; if it's async, use 'await prompt(text, examid)'
question_response = prompt(text, examid)
return question_response
except Exception as e:
# Catching a broad exception is not best practice; adjust according to specific exceptions expected from 'prompt'
raise HTTPException(status_code=500, detail=f"An error occurred while generating the question: {str(e)}")
14 changes: 8 additions & 6 deletions app/routers/upload.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,15 +6,17 @@
router = APIRouter()

@router.post("/upload-pdf/", status_code=201)
async def upload_pdf(file: UploadFile = File(...), examid:str = Query(..., description="The ID of the exam related to the uploaded PDF") ) -> dict:
async def upload_pdf(file: UploadFile = File(...), examid: str = Query(..., description="The ID of the exam related to the uploaded PDF")) -> dict:
"""Endpoint to upload a PDF and upsert its contents into a Pinecone vector store."""

if file.content_type != 'application/pdf':
raise HTTPException(status_code=415, detail="Unsupported file type. Please upload a PDF.")

# Call the upsert function from the imported service
upsert(file, examid)
# Assuming 'upsert' is an async function; if not, consider wrapping with 'await'
# or adjust the function to be a regular call if it's designed to be synchronous
success = upsert(file, examid)

if not success:
raise HTTPException(status_code=500, detail="Failed to process the PDF file.")

# return {"filename": file.filename}
Response(status_code=201)
# Directly return a message if upsert is successful; 'Response(status_code=201)' is redundant with `status_code=201` in the decorator
return {"message": "PDF uploaded successfully."}
21 changes: 9 additions & 12 deletions app/services/pinecone_upsert.py
Original file line number Diff line number Diff line change
@@ -1,19 +1,15 @@
from typing import BinaryIO
from typing import BinaryIO
import os
import dotenv
import pdfplumber
from langchain_openai import OpenAIEmbeddings
from langchain_pinecone import PineconeVectorStore
from pinecone import Pinecone

dotenv.load_dotenv()

pinecone = Pinecone(api_key=os.getenv('PINECONE_API_KEY'))

def generate_embeddings_from_pdf(pdf_file: BinaryIO) -> str:
"""Generates embeddings from a PDF file using OpenAI's model."""

print(pdf_file.filename)
def generate_text_from_pdf(pdf_file: BinaryIO) -> str:
"""Generates and returns text extracted from a PDF file."""
print(f"Processing file: {pdf_file.filename}")
full_text = ""
with pdfplumber.open(pdf_file.file) as pdf:
for page in pdf.pages:
Expand All @@ -23,13 +19,11 @@ def generate_embeddings_from_pdf(pdf_file: BinaryIO) -> str:

if not full_text.strip():
raise ValueError("No text found in the PDF.")

return full_text

def upsert(pdf_file: BinaryIO, examid: str) -> str:
"""Upserts PDF text into a Pinecone vector store and returns the extracted text."""

text = generate_embeddings_from_pdf(pdf_file)
"""Extracts text from a PDF file, generates embeddings, and upserts them into a Pinecone vector store."""
text = generate_text_from_pdf(pdf_file)

embeddings = OpenAIEmbeddings(
model="text-embedding-3-large",
Expand All @@ -44,4 +38,7 @@ def upsert(pdf_file: BinaryIO, examid: str) -> str:
index_name="abc"
)




return text
127 changes: 21 additions & 106 deletions app/services/prompt.py
Original file line number Diff line number Diff line change
@@ -1,74 +1,42 @@
import os
import dotenv
from langchain_openai import OpenAIEmbeddings
import json
from langchain_openai import OpenAIEmbeddings, ChatOpenAI
from langchain_pinecone import PineconeVectorStore
from pinecone import Pinecone

from langchain_core.pydantic_v1 import BaseModel, Field

# JSONresponse


# from langchain.chat_models import ChatOpenAI
# from langchain_community.chat_models import ChatOpenAI
from langchain_openai import ChatOpenAI
from langchain.chains import RetrievalQA

from langchain_core.prompts import PromptTemplate

# json output parser

import json

from ..data.questionPrompts import mcq_prompt, essay_prompt

# jsonoutputparser
from langchain_core.output_parsers import JsonOutputParser
from langchain.prompts import PromptTemplate

LLMChain = ChatOpenAI()



from ..data.questionPrompts import mcq_prompt, essay_prompt

dotenv.load_dotenv()

pinecone = Pinecone(api_key=os.getenv('PINECONE_API_KEY'))


class QuestionParser(BaseModel):
question: str = Field(description="The question generated from the text.")
answer: str= Field(description="The answer to the generated question.")

answer: str = Field(description="The answer to the generated question.")

class MultipleChoiceQuestionParser(BaseModel):
question: str = Field(description="The multiple choice question generated from the text.")
options: list[str] = Field(description="The options for the multiple choice question.")
answer: int = Field(description="The index of the correct answer in the options list.")

def format_docs(docs):
"""Helper function to format document content."""
return "\n\n".join([d.page_content for d in docs])


def select_prompt(question_type: str) -> tuple[str, JsonOutputParser]:
"""Selects the appropriate prompt and parser based on the question type."""
if question_type == "mcq":
return mcq_prompt(4)
return mcq_prompt(4) # This function is assumed to return a tuple (prompt, parser)
elif question_type == "essay":
return essay_prompt()
return essay_prompt() # This function is assumed to return a tuple (prompt, parser)
else:
raise ValueError("Invalid question type. Please select 'mcq' or 'essay'.")



def prompt(text: str, examid: str, question_type: str = "mcq") -> any:
"""Upserts PDF text into a Pinecone vector store and returns the extracted text."""




question , parser = select_prompt(question_type)


def prompt(text: str, examid: str, question_type: str = "mcq") -> dict:
"""Generates a question based on the provided text and exam ID."""
question, parser = select_prompt(question_type)

embed = OpenAIEmbeddings(
model="text-embedding-3-large",
Expand All @@ -80,79 +48,26 @@ def prompt(text: str, examid: str, question_type: str = "mcq") -> any:
namespace=examid,
index_name="abc",
embedding=embed

)

doc = vectorstore.similarity_search(
text,
# top_k=5
)



# print(doc)
docs = vectorstore.similarity_search(text) # Assuming this method returns relevant documents

llm = ChatOpenAI(
model="gpt-3.5-turbo",
api_key=os.getenv('OPENAI_API_KEY'),

api_key=os.getenv('OPENAI_API_KEY')
)





# qa = RetrievalQA.from_chain_type(
# llm=llm,
# chain_type="stuff",

# retriever=vectorstore.as_retriever(),


# )

# parser = JsonOutputParser(pydantic_object=QuestionParser)
# parser = JsonOutputParser(pydantic_object=MultipleChoiceQuestionParser)

prompt = PromptTemplate(
template="Generate One Question, {question} about {query} from {document}.output is json",
input_variables=["query", "document", "question"],
partial_variables={"format_instructions": parser.get_format_instructions()},
prompt_template = PromptTemplate(
template="Generate one question, {question} about {query} from {document}. Output is json",
input_variables=["query", "document", "question"],
partial_variables={"format_instructions": parser.get_format_instructions()},
)

chain = prompt | llm | parser

print(text)
chain = prompt_template | llm | parser

result = chain.invoke({"query": text, "document": format_docs(doc), "question": question})

print(result)
print(type(json.dumps(result)))
formatted_docs = format_docs(docs)
result = chain.invoke({"query": text, "document": formatted_docs, "question": question})

return result

# return json.dumps(result)
# dict_result = json.loads(result)
# return dict_result


# return result






# print(qa.invoke(text))

# return "Question generated successfully."









return json.dumps(result) # Converting the result to a JSON string for consistency

0 comments on commit 0cabfc9

Please sign in to comment.