Skip to content

Commit

Permalink
Merge pull request #71 from amosproj/llm_function
Browse files Browse the repository at this point in the history
Added gemini and llama3 using API
  • Loading branch information
nikolas-rauscher authored May 28, 2024
2 parents a33727f + 18c3ec1 commit 1aa7a2d
Show file tree
Hide file tree
Showing 7 changed files with 263 additions and 1 deletion.
6 changes: 5 additions & 1 deletion Project/backend/.env.example
Original file line number Diff line number Diff line change
Expand Up @@ -15,5 +15,9 @@ POSTGRES_PASSWORD=password
POSTGRES_DB=amos
POSTGRES_PORT=5432
POSTGRES_HOST=amos-db
JANUS_PORT=8182

#API Keys
GROQ_API_KEY=API_KEY
GOOGLE_API_KEY=API_KEY

JANUS_PORT=8182
1 change: 1 addition & 0 deletions Project/backend/codebase/requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -82,3 +82,4 @@ tenacity==8.3.0
tomli==2.0.1
typing-inspect==0.9.0
urllib3==2.2.1
google-generativeai==0.5.4
148 changes: 148 additions & 0 deletions Project/backend/config/llm/gemini.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,148 @@
import os
import json
import google.generativeai as genai
from datetime import datetime
from dotenv import load_dotenv


def configure_genai():
"""
Ensure the API key is set in the environment (set it in the .env file or Linux/Mac: export GOOGLE_API_KEY="Your_API_KEY")
Raises:
ValueError: If the API key is not found in the environment variables
"""
# load the API key from the environment variables (could be remove if the env is loaded in the main file)
load_dotenv("../../.env", override=True)

api_key = os.getenv("GOOGLE_API_KEY")
if not api_key:
raise ValueError("API key not found in environment variables")

genai.configure(
api_key=api_key,
)


def create_model():
"""
Create and configure a generative model with specified generation and safety settings.
Returns:
genai.GenerativeModel: A configured generative model instance.
"""
generation_config = {
"temperature": 1,
"top_p": 0.95,
"top_k": 64,
"max_output_tokens": 8192,
"response_mime_type": "text/plain",
}
safety_settings = [
{
"category": "HARM_CATEGORY_HARASSMENT",
"threshold": "BLOCK_MEDIUM_AND_ABOVE",
},
{
"category": "HARM_CATEGORY_HATE_SPEECH",
"threshold": "BLOCK_MEDIUM_AND_ABOVE",
},
{
"category": "HARM_CATEGORY_SEXUALLY_EXPLICIT",
"threshold": "BLOCK_MEDIUM_AND_ABOVE",
},
{
"category": "HARM_CATEGORY_DANGEROUS_CONTENT",
"threshold": "BLOCK_MEDIUM_AND_ABOVE",
},
]

return genai.GenerativeModel(
model_name="gemini-1.5-pro-latest",
safety_settings=safety_settings,
generation_config=generation_config,
)


def serialize_chat_history(history):
"""
Convert the chat history to a serializable format.
Args:
history (list): A list of chat entries where each entry is an object containing message details.
Returns:
list: A list of dictionaries, each containing the serialized chat entry with message, timestamp, and role.
"""
serialized_history = []
for entry in history:
# Extract relevant information from the entry object
serialized_entry = {
"message": str(entry),
"timestamp": datetime.now().isoformat(),
"role": entry.role if hasattr(entry, "role") else None,
}
serialized_history.append(serialized_entry)
return serialized_history


def generate_response(text_content, prompt_template):
"""
Generate a response from a generative model based on provided text content and a prompt template.
Args:
text_content (str): The main text content to be used within the prompt template.
prompt_template (str): A template string for the prompt, containing a placeholder for the text content.
Returns:
str: A JSON-formatted string containing the response text and the serialized chat history.
Raises:
ValueError: If the API key is not found in the environment variables.
Exception: If there are issues in configuring the model or generating the response.
"""
configure_genai()
model = create_model()

chat_session = model.start_chat(history=[])

# Combine text content and prompt template to form the message
message = prompt_template.format(text_content=text_content)

# Send the message to the chat session
response = chat_session.send_message(message)

# Extract the response text and the chat history
response_data = {
"response_text": response.text,
"chat_history": serialize_chat_history(chat_session.history),
}

# Return the response data in JSON format
return json.dumps(response_data, indent=2)


if __name__ == "__main__":
text_content = (
"When one thinks about what a holiday means for students, "
"we notice how important it is for the kids. It is a time "
"when they finally get the chance to take a break from studies "
"and pursue their hobbies. They can join courses which give them "
"special training to specialize in it. They can get expert in arts, "
"craft, pottery, candle making and more. Furthermore, they also make "
"new friends there who have the same interests. In addition, students "
"get to visit new places on holiday. Like during summer or winter holidays, "
"they go with their families to different cities and countries. Through holidays, "
"they get new experiences and memories which they remember for a lifetime. "
"Furthermore, it also gives them time to relax with their families. Other cousins "
"also visit each other’s places and spend time there. They play games and go out "
"with each other. Moreover, students also get plenty of time to complete their homework "
"and revise the syllabus."
)
prompt_template = "Give important 3 words which is: {text_content}"

# text_content = input("Input your paragraph: ")
# prompt_template = input("Enter your prompt: ")

response_json = generate_response(text_content, prompt_template)
print(response_json)
109 changes: 109 additions & 0 deletions Project/backend/config/llm/llama3.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,109 @@
# 1. Visit [consol.groq.com](https://consol.groq.com).
# 2. Navigate to the API Keys section and create a new key.
# 3. Important: Copy the key immediately as it will only be visible once.

# !pip install groq

import os
import json
from datetime import datetime
from dotenv import load_dotenv

# Assuming 'groq' is the correct library for the API you're using
from groq import Groq


def get_groq_client():
"""
Ensure the API key is set in the environment (set it in the .env file or Linux/Mac: export GROQ_API_KEY="Your_API_KEY")
Raises:
ValueError: If the API key is not found in the environment variables
"""
# load the API key from the environment variables (could be remove if the env is loaded in the main file)
load_dotenv("../../.env", override=True)

api_key = os.getenv("GROQ_API_KEY")
if not api_key:
raise ValueError("API key not found in environment variables")
return Groq(api_key=api_key)


def generate_response(text_content, prompt_template):
"""
Generate a response from a Groq AI client based on provided text content and a prompt template.
This function initializes the Groq client, creates a chat completion request with the given text content
and prompt template, and retrieves the response along with the chat history. The response data is returned in JSON format.
Args:
text_content (str): The main text content to be used within the prompt template.
prompt_template (str): A template string for the prompt, containing a placeholder for the text content.
Returns:
str: A JSON-formatted string containing the response text and the serialized chat history.
Raises:
ValueError: If the API key is not found in the environment variables.
"""
client = get_groq_client()

# Combine text content and prompt template to form the message
message = prompt_template.format(text_content=text_content)

chat_completion = client.chat.completions.create(
messages=[
{
"role": "user",
"content": message,
}
],
model="llama3-8b-8192",
)

response_data = {
"response_text": chat_completion.choices[0].message.content,
"chat_history": [
{
"message": message,
"role": "user",
"timestamp": datetime.now().isoformat(), # You can replace None with actual timestamp if available
},
{
"message": chat_completion.choices[0].message.content,
"role": "model",
"timestamp": datetime.now().isoformat(), # You can replace None with actual timestamp if available
},
],
}

# Return the response data in JSON format
return json.dumps(response_data, indent=2)


if __name__ == "__main__":
# Take text_content and prompt_template from the user
text_content = (
"When one thinks about what a holiday means for students, "
"we notice how important it is for the kids. It is a time "
"when they finally get the chance to take a break from studies "
"and pursue their hobbies. They can join courses which give them "
"special training to specialize in it. They can get expert in arts, "
"craft, pottery, candle making and more. Furthermore, they also make "
"new friends there who have the same interests. In addition, students "
"get to visit new places on holiday. Like during summer or winter holidays, "
"they go with their families to different cities and countries. Through holidays, "
"they get new experiences and memories which they remember for a lifetime. "
"Furthermore, it also gives them time to relax with their families. Other cousins "
"also visit each other’s places and spend time there. They play games and go out "
"with each other. Moreover, students also get plenty of time to complete their homework "
"and revise the syllabus."
)
prompt_template = "Give me 3 important words for paragraph"

# To get the inputs
# text_content = input("Enter the paragraph: ")
# prompt_template = input("Enter the prompt: ")

response_json = generate_response(text_content, prompt_template)
print(response_json)
File renamed without changes.
File renamed without changes.
File renamed without changes.

0 comments on commit 1aa7a2d

Please sign in to comment.