diff --git a/app/core/llm_framework/openai_vanilla.py b/app/core/llm_framework/openai_vanilla.py index dfb4c99..a969342 100644 --- a/app/core/llm_framework/openai_vanilla.py +++ b/app/core/llm_framework/openai_vanilla.py @@ -21,7 +21,7 @@ def get_context(source_documents): # ** This will need to be adjusted, based on what the returned results look like ** for _, source_document in enumerate(source_documents): if ( - len(source_document.page_content) + len(context) > 11000 + len(source_document.page_content) + len(context) > 44000 ): # FIXME: use tiktoken library to count tokens break if source_document.metadata.get("source", "") is not None: @@ -36,7 +36,8 @@ def get_pre_prompt(context): """Constructs a pre-prompt for the conversation, including the context""" chat_prefix = "The following is a conversation with an AI assistant for " chat_prefix += "Bible translators. The assistant is" - chat_prefix += " helpful, creative, clever, very friendly and follows instructions carefully.\n" + chat_prefix += "verbose, helpful, creative, clever, very friendly and follows instructions carefully," + chat_prefix += "giving as much information as possible.\n" prompt = ( chat_prefix + "Read the paragraph below and answer the question, using only the information" @@ -77,7 +78,7 @@ class OpenAIVanilla(LLMFrameworkInterface): # pylint: disable=too-few-public-me def __init__( self, # pylint: disable=super-init-not-called key: str = os.getenv("OPENAI_API_KEY"), - model_name: str = "gpt-3.5-turbo", + model_name: str = "gpt-3.5-turbo-1106", vectordb: VectordbInterface = None, # What should this be by default? ) -> None: """Sets the API key and initializes library objects if any""" diff --git a/deployment/docker-compose.yml b/deployment/docker-compose.yml index 22320c7..417de98 100644 --- a/deployment/docker-compose.yml +++ b/deployment/docker-compose.yml @@ -76,7 +76,7 @@ services: - chatbot environment: - CHAT_DOMAIN=${DOMAIN:-"localhost"} - - PROD_DOMAIN=${DOMAIN2:-assistant.bible} + - PROD_DOMAIN=${DOMAIN2:-dev.assistant.bible} volumes: - ./nginx/nginx.conf.template:/etc/nginx/templates/default.conf.template:ro # - ./nginx/nginx.conf:/etc/nginx/nginx.conf:ro @@ -93,7 +93,7 @@ services: - ./certbot/conf/:/etc/letsencrypt/:rw networks: - chatbot-network - + # command: certonly --webroot --webroot-path=/var/www/certbot --email alejandro_quintero@sil.org --agree-tos --no-eff-email --staging -d dev.assistant.bible ofelia-scheduler: image: mcuadros/ofelia:v0.3.7 depends_on: @@ -131,4 +131,4 @@ volumes: logs-vol: chroma-db: postgres-db-vol: - postgres-db-backup: \ No newline at end of file + postgres-db-backup: diff --git a/deployment/nginx/nginx.conf b/deployment/nginx/nginx.conf new file mode 100644 index 0000000..0ab4fc3 --- /dev/null +++ b/deployment/nginx/nginx.conf @@ -0,0 +1,21 @@ +events { + worker_connections 1024; + # other events directives can be placed here +} +http { + server { + listen 80; + listen [::]:80; + + server_name [dev.assistant.bible] www.[dev.assistant.bible]; + server_tokens off; + + location /.well-known/acme-challenge/ { + root /var/www/certbot; + } + + location / { + return 301 https://[dev.assistant.bible]$request_uri; + } + } +} \ No newline at end of file