Skip to content

Commit

Permalink
feat: add workflow to build + test docker container (letta-ai#1278)
Browse files Browse the repository at this point in the history
  • Loading branch information
sarahwooders authored Apr 22, 2024
1 parent 4ddd0d4 commit 048e55f
Show file tree
Hide file tree
Showing 14 changed files with 237 additions and 149 deletions.
64 changes: 64 additions & 0 deletions .github/workflows/docker-integration-tests.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,64 @@
on:
push:
branches: [ main ]
pull_request:
branches: [ main ]

jobs:
test:
runs-on: ubuntu-latest
timeout-minutes: 15
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Python
uses: actions/setup-python@v5
with:
python-version: '3.12'

- name: Set permissions for log directory
run: |
mkdir -p /home/runner/.memgpt/logs
sudo chown -R $USER:$USER /home/runner/.memgpt/logs
chmod -R 755 /home/runner/.memgpt/logs
- name: Build and run docker dev server
env:
MEMGPT_PG_DB: memgpt
MEMGPT_PG_USER: memgpt
MEMGPT_PG_PASSWORD: memgpt
MEMGPT_PG_PORT: 8888
MEMGPT_SERVER_PASS: test_server_token
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}

run: docker compose -f dev-compose.yaml up --build -d
#- name: "Setup Python, Poetry and Dependencies"
# uses: packetcoders/[email protected]
# with:
# python-version: "3.12"
# poetry-version: "1.8.2"
# install-args: "--all-extras"

- name: Wait for service
run: bash scripts/wait_for_service.sh http://localhost:8083 -- echo "Service is ready"

- name: Run tests with pytest
env:
MEMGPT_PG_DB: memgpt
MEMGPT_PG_USER: memgpt
MEMGPT_PG_PASSWORD: memgpt
MEMGPT_PG_PORT: 8888
MEMGPT_SERVER_PASS: test_server_token
MEMGPT_SERVER_URL: http://localhost:8083
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
PYTHONPATH: ${{ github.workspace }}:${{ env.PYTHONPATH }}
run: |
pipx install poetry==1.8.2
poetry install -E dev
poetry run pytest -s tests/test_client.py
- name: Print docker logs if tests fail
if: failure()
run: |
echo "Printing Docker Logs..."
docker compose -f dev-compose.yaml logs
4 changes: 2 additions & 2 deletions .github/workflows/tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -25,8 +25,8 @@ jobs:
- name: "Setup Python, Poetry and Dependencies"
uses: packetcoders/action-setup-cache-python-poetry@main
with:
python-version: "3.11"
poetry-version: "1.7.1"
python-version: "3.12"
poetry-version: "1.8.2"
install-args: "--all-extras"

- name: Initialize credentials
Expand Down
1 change: 1 addition & 0 deletions compose.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,7 @@ services:
- MEMGPT_PG_PASSWORD=${MEMGPT_PG_PASSWORD}
- MEMGPT_PG_HOST=pgvector_db
- MEMGPT_PG_PORT=5432
- OPENAI_API_KEY=${OPENAI_API_KEY}
volumes:
- ./configs/server_config.yaml:/root/.memgpt/config # config file
- ~/.memgpt/credentials:/root/.memgpt/credentials # credentials file
Expand Down
2 changes: 2 additions & 0 deletions configs/server_config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -7,11 +7,13 @@ human = basic
model = gpt-4
model_endpoint = https://api.openai.com/v1
model_endpoint_type = openai
model_wrapper = null
context_window = 8192

[embedding]
embedding_endpoint_type = openai
embedding_endpoint = https://api.openai.com/v1
embedding_model = text-embedding-ada-002
embedding_dim = 1536
embedding_chunk_size = 300

Expand Down
12 changes: 2 additions & 10 deletions dev-compose.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ services:
- POSTGRES_PASSWORD=${MEMGPT_PG_PASSWORD}
- POSTGRES_DB=${MEMGPT_PG_DB}
volumes:
- ./.persist/pgdata:/var/lib/postgresql/data
- ./.persist/pgdata-test:/var/lib/postgresql/data
- ./init.sql:/docker-entrypoint-initdb.d/init.sql
ports:
- "5432:5432"
Expand All @@ -27,22 +27,14 @@ services:
ports:
- "8083:8083"
- "8283:8283"
env_file:
- .env
environment:
- MEMGPT_SERVER_PASS=${MEMGPT_SERVER_PASS} # memgpt server password
- MEMGPT_PG_DB=${MEMGPT_PG_DB}
- MEMGPT_PG_USER=${MEMGPT_PG_USER}
- MEMGPT_PG_PASSWORD=${MEMGPT_PG_PASSWORD}
- MEMGPT_PG_HOST=pgvector_db
- MEMGPT_PG_PORT=5432
- OPENAI_API_KEY=${OPENAI_API_KEY}
volumes:
- ./configs/server_config.yaml:/root/.memgpt/config # config file
- ~/.memgpt/credentials:/root/.memgpt/credentials # credentials file
memgpt_nginx:
hostname: memgpt-nginx
image: nginx:stable-alpine3.17-slim
volumes:
- ./nginx.conf:/etc/nginx/nginx.conf
ports:
- "80:80"
2 changes: 1 addition & 1 deletion memgpt/client/client.py
Original file line number Diff line number Diff line change
Expand Up @@ -256,7 +256,7 @@ def create_agent(
}
response = requests.post(f"{self.base_url}/api/agents", json=payload, headers=self.headers)
if response.status_code != 200:
raise ValueError(f"Failed to create agent: {response.text}")
raise ValueError(f"Status {response.status_code} - Failed to create agent: {response.text}")
response_obj = CreateAgentResponse(**response.json())
return self.get_agent_response_to_state(response_obj)

Expand Down
2 changes: 1 addition & 1 deletion memgpt/credentials.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ class MemGPTCredentials:

# openai config
openai_auth_type: str = "bearer_token"
openai_key: Optional[str] = None
openai_key: Optional[str] = os.getenv("OPENAI_API_KEY")

# gemini config
google_ai_key: Optional[str] = None
Expand Down
102 changes: 51 additions & 51 deletions memgpt/server/rest_api/agents/index.py
Original file line number Diff line number Diff line change
Expand Up @@ -55,56 +55,56 @@ def create_agent(
"""
interface.clear()

try:
agent_state = server.create_agent(
user_id=user_id,
# **request.config
# TODO turn into a pydantic model
name=request.config["name"],
preset=request.config["preset"] if "preset" in request.config else None,
persona_name=request.config["persona_name"] if "persona_name" in request.config else None,
human_name=request.config["human_name"] if "human_name" in request.config else None,
persona=request.config["persona"] if "persona" in request.config else None,
human=request.config["human"] if "human" in request.config else None,
# llm_config=LLMConfigModel(
# model=request.config['model'],
# )
function_names=request.config["function_names"].split(",") if "function_names" in request.config else None,
)
llm_config = LLMConfigModel(**vars(agent_state.llm_config))
embedding_config = EmbeddingConfigModel(**vars(agent_state.embedding_config))

# TODO when get_preset returns a PresetModel instead of Preset, we can remove this packing/unpacking line
preset = server.ms.get_preset(name=agent_state.preset, user_id=user_id)

return CreateAgentResponse(
agent_state=AgentStateModel(
id=agent_state.id,
name=agent_state.name,
user_id=agent_state.user_id,
preset=agent_state.preset,
persona=agent_state.persona,
human=agent_state.human,
llm_config=llm_config,
embedding_config=embedding_config,
state=agent_state.state,
created_at=int(agent_state.created_at.timestamp()),
functions_schema=agent_state.state["functions"], # TODO: this is very error prone, jsut lookup the preset instead
),
preset=PresetModel(
name=preset.name,
id=preset.id,
user_id=preset.user_id,
description=preset.description,
created_at=preset.created_at,
system=preset.system,
persona=preset.persona,
human=preset.human,
functions_schema=preset.functions_schema,
),
)
except Exception as e:
print(str(e))
raise HTTPException(status_code=500, detail=str(e))
# try:
agent_state = server.create_agent(
user_id=user_id,
# **request.config
# TODO turn into a pydantic model
name=request.config["name"],
preset=request.config["preset"] if "preset" in request.config else None,
persona_name=request.config["persona_name"] if "persona_name" in request.config else None,
human_name=request.config["human_name"] if "human_name" in request.config else None,
persona=request.config["persona"] if "persona" in request.config else None,
human=request.config["human"] if "human" in request.config else None,
# llm_config=LLMConfigModel(
# model=request.config['model'],
# )
function_names=request.config["function_names"].split(",") if "function_names" in request.config else None,
)
llm_config = LLMConfigModel(**vars(agent_state.llm_config))
embedding_config = EmbeddingConfigModel(**vars(agent_state.embedding_config))

# TODO when get_preset returns a PresetModel instead of Preset, we can remove this packing/unpacking line
preset = server.ms.get_preset(name=agent_state.preset, user_id=user_id)

return CreateAgentResponse(
agent_state=AgentStateModel(
id=agent_state.id,
name=agent_state.name,
user_id=agent_state.user_id,
preset=agent_state.preset,
persona=agent_state.persona,
human=agent_state.human,
llm_config=llm_config,
embedding_config=embedding_config,
state=agent_state.state,
created_at=int(agent_state.created_at.timestamp()),
functions_schema=agent_state.state["functions"], # TODO: this is very error prone, jsut lookup the preset instead
),
preset=PresetModel(
name=preset.name,
id=preset.id,
user_id=preset.user_id,
description=preset.description,
created_at=preset.created_at,
system=preset.system,
persona=preset.persona,
human=preset.human,
functions_schema=preset.functions_schema,
),
)
# except Exception as e:
# print(str(e))
# raise HTTPException(status_code=500, detail=str(e))

return router
4 changes: 0 additions & 4 deletions memgpt/server/rest_api/server.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,10 +39,6 @@
cd memgpt/server/rest_api
poetry run uvicorn server:app --reload
"""
config = MemGPTConfig.load()
for memory_type in ("archival", "recall", "metadata"):
setattr(config, f"{memory_type}_storage_uri", settings.pg_uri)
config.save()

interface: QueuingInterface = QueuingInterface()
server: SyncServer = SyncServer(default_interface=interface)
Expand Down
49 changes: 31 additions & 18 deletions memgpt/server/server.py
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@

from fastapi import HTTPException

from memgpt.settings import settings
import memgpt.constants as constants
import memgpt.presets.presets as presets
import memgpt.server.utils as server_utils
Expand Down Expand Up @@ -197,9 +198,23 @@ def __init__(
assert self.config.persona is not None, "Persona must be set in the config"
assert self.config.human is not None, "Human must be set in the config"

# Update storage URI to match passed in settings
# TODO: very hack, fix in the future
for memory_type in ("archival", "recall", "metadata"):
setattr(self.config, f"{memory_type}_storage_uri", settings.pg_uri)
self.config.save()

# TODO figure out how to handle credentials for the server
self.credentials = MemGPTCredentials.load()

# check credentials
# TODO: add checks for other providers
if (
self.config.default_embedding_config.embedding_endpoint_type == "openai"
or self.config.default_llm_config.model_endpoint_type == "openai"
):
assert self.credentials.openai_key is not None, "OpenAI key must be set in the credentials file"

# Ensure valid database configuration
# TODO: add back once tests are matched
# assert (
Expand Down Expand Up @@ -665,25 +680,25 @@ def create_agent(
preset_override = True
preset_obj.human = human
# This is a check for a common bug where users were providing filenames instead of values
try:
get_human_text(human)
raise ValueError(human)
raise UserWarning(
f"It looks like there is a human file named {human} - did you mean to pass the file contents to the `human` arg?"
)
except:
pass
# try:
# get_human_text(human)
# raise ValueError(human)
# raise UserWarning(
# f"It looks like there is a human file named {human} - did you mean to pass the file contents to the `human` arg?"
# )
# except:
# pass
if persona is not None:
preset_override = True
preset_obj.persona = persona
try:
get_persona_text(persona)
raise ValueError(persona)
raise UserWarning(
f"It looks like there is a persona file named {persona} - did you mean to pass the file contents to the `persona` arg?"
)
except:
pass
# try:
# get_persona_text(persona)
# raise ValueError(persona)
# raise UserWarning(
# f"It looks like there is a persona file named {persona} - did you mean to pass the file contents to the `persona` arg?"
# )
# except:
# pass
if human_name is not None and human_name != preset_obj.human_name:
preset_override = True
preset_obj.human_name = human_name
Expand Down Expand Up @@ -721,8 +736,6 @@ def create_agent(
# gpt-3.5-turbo tends to omit inner monologue, relax this requirement for now
first_message_verify_mono=True if (llm_config.model is not None and "gpt-4" in llm_config.model) else False,
)
save_agent(agent=agent, ms=self.ms)

# FIXME: this is a hacky way to get the system prompts injected into agent into the DB
# self.ms.update_agent(agent.agent_state)
except Exception as e:
Expand Down
Loading

0 comments on commit 048e55f

Please sign in to comment.