diff --git a/.devcontainer/devcontainer.json b/.devcontainer/devcontainer.json new file mode 100644 index 00000000..3c23d879 --- /dev/null +++ b/.devcontainer/devcontainer.json @@ -0,0 +1,26 @@ +// For format details, see https://aka.ms/devcontainer.json. For config options, see the +// README at: https://github.com/devcontainers/templates/tree/main/src/python +{ + "name": "Python 3", + // Or use a Dockerfile or Docker Compose file. More info: https://containers.dev/guide/dockerfile + "image": "mcr.microsoft.com/devcontainers/python:1-3.12-bullseye", + "runArgs": ["--name", "OAPL-DEV"], + "features": { + "ghcr.io/devcontainers/features/docker-outside-of-docker:1": {} + + }, + // Features to add to the dev container. More info: https://containers.dev/features. + // "features": {}, + + // Use 'forwardPorts' to make a list of ports inside the container available locally. + // "forwardPorts": [], + + // Use 'postCreateCommand' to run commands after the container is created. + "postCreateCommand": "/bin/bash .devcontainer/setup.sh", + + // Configure tool-specific properties. + // "customizations": {}, + + // Uncomment to connect as root instead. More info: https://aka.ms/dev-containers-non-root. + // "remoteUser": "root" +} diff --git a/.devcontainer/setup.sh b/.devcontainer/setup.sh new file mode 100755 index 00000000..d44836c8 --- /dev/null +++ b/.devcontainer/setup.sh @@ -0,0 +1,20 @@ +#!/bin/bash + +if [ -d ".venv" ]; then + echo "Virtual environment already exists" +else + python3 -m venv .venv +fi +source .venv/bin/activate + +apt-get update && apt-get install -y git + +pip install --upgrade pip +pip3 install --user -r requirements.txt + +cd tests +pip3 install --user -r requirements.txt + +pip install pre-commit +pre-commit install +pre-commit run --all-files diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 92767b79..171c838c 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -126,3 +126,8 @@ jobs: git config --global user.email "<>" ./run.sh + + - name: e2e tests + working-directory: ./tests + run: | + ./run.sh diff --git a/.gitignore b/.gitignore index 06c38bfb..757499e0 100644 --- a/.gitignore +++ b/.gitignore @@ -1,4 +1,11 @@ -# Byte-compiled / optimized / DLL files +# OPAL specific +.env +*.env +opal-example-policy-repo/* + + +# Temporary and Python cache files +**/*.pyc __pycache__/ *.py[cod] *$py.class @@ -6,10 +13,17 @@ __pycache__/ # C extensions *.so +# Virtual environments +.venv/ +venv/ +env/ +ENV/ +env.bak/ +venv.bak/ + # Distribution / packaging .Python build/ -develop-eggs/ dist/ downloads/ eggs/ @@ -18,7 +32,6 @@ lib/ lib64/ parts/ sdist/ -var/ wheels/ pip-wheel-metadata/ share/python-wheels/ @@ -27,16 +40,14 @@ share/python-wheels/ *.egg MANIFEST -# PyInstaller -# Usually these files are written by a python script from a template -# before PyInstaller builds the exe, so as to inject date/other infos into it. -*.manifest -*.spec - # Installer logs pip-log.txt pip-delete-this-directory.txt +# PyInstaller +*.manifest +*.spec + # Unit test / coverage reports htmlcov/ .tox/ @@ -55,17 +66,17 @@ coverage.xml *.mo *.pot -# Django stuff: +# Django *.log local_settings.py db.sqlite3 db.sqlite3-journal -# Flask stuff: +# Flask instance/ .webassets-cache -# Scrapy stuff: +# Scrapy .scrapy # Sphinx documentation @@ -84,53 +95,41 @@ ipython_config.py # pyenv .python-version -# pipenv -# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. -# However, in case of collaboration, if having platform-specific dependencies or dependencies -# having no cross-platform support, pipenv may install dependencies that don't work, or not -# install all needed dependencies. +# Pipenv #Pipfile.lock -# PEP 582; used by e.g. github.com/David-OConnor/pyflow +# PEP 582 __pypackages__/ -# Celery stuff +# Celery celerybeat-schedule celerybeat.pid -# SageMath parsed files +# SageMath *.sage.py -# Environments -.env -.venv -env/ -venv/ -ENV/ -env.bak/ -venv.bak/ +# Editors +.vscode/ +.idea +*.iml -# Spyder project settings +# Spyder .spyderproject .spyproject -# Rope project settings +# Rope .ropeproject -# mkdocs documentation -/site +# mkdocs +docs/_build/ # mypy .mypy_cache/ .dmypy.json dmypy.json -# Pyre type checker +# Pyre .pyre/ -# editors -.vscode/ -.idea -*.iml - +# System files .DS_Store diff --git a/.vscode/launch.json b/.vscode/launch.json new file mode 100644 index 00000000..4559acfd --- /dev/null +++ b/.vscode/launch.json @@ -0,0 +1,40 @@ +{ + "version": "0.2.0", + "configurations": [ + { + "name": "Remote to local", + "type": "debugpy", + "request": "attach", + "justMyCode": false, + "subProcess": true, + "connect": { + "host": "localhost", + "port": 5678 + }, + "pathMappings": [ + { + "localRoot": "${workspaceFolder}", + "remoteRoot": "${cwd}" + } + ] + }, + { + "name": "Python Debugger: Current File", + "type": "debugpy", + "request": "launch", + "program": "${file}", + "console": "integratedTerminal" + }, + { + "name": "Python: Debug with Args", + "type": "debugpy", + "request": "launch", + "program": "${file}", + "args": [ + "--deploy", + "--with_broadcast", + ], + "console": "integratedTerminal" + } + ] +} diff --git a/.vscode/settings.json b/.vscode/settings.json new file mode 100644 index 00000000..f5f61e4b --- /dev/null +++ b/.vscode/settings.json @@ -0,0 +1,8 @@ +{ + "cmake.ignoreCMakeListsMissing": true, + "makefile.configureOnOpen": false, + "python.analysis.extraPaths": [ + "./packages/opal-common", + "./packages/opal-server" + ] +} diff --git a/app-tests/docker-compose-app-tests.yml b/app-tests/docker-compose-app-tests.yml index b12e5309..6c9398c9 100644 --- a/app-tests/docker-compose-app-tests.yml +++ b/app-tests/docker-compose-app-tests.yml @@ -1,4 +1,6 @@ + services: + broadcast_channel: image: postgres:alpine environment: @@ -7,15 +9,19 @@ services: - POSTGRES_PASSWORD=postgres opal_server: - image: permitio/opal-server:${OPAL_IMAGE_TAG:-latest} + #image: permitio/opal-server:${OPAL_IMAGE_TAG:-latest} + build: + context: ../ # Point to the directory containing your Dockerfile + dockerfile: ./docker/Dockerfile.server # Specify your Dockerfile if it's not named 'Dockerfile' deploy: mode: replicated - replicas: 2 + replicas: 1 endpoint_mode: vip environment: - OPAL_BROADCAST_URI=postgres://postgres:postgres@broadcast_channel:5432/postgres - - UVICORN_NUM_WORKERS=4 - - OPAL_POLICY_REPO_URL=${OPAL_POLICY_REPO_URL:-git@github.com:permitio/opal-tests-policy-repo.git} + - UVICORN_NUM_WORKERS=0 + #- OPAL_POLICY_REPO_URL=${OPAL_POLICY_REPO_URL:-git@github.com:permitio/opal-tests-policy-repo.git} + - OPAL_POLICY_REPO_URL=${OPAL_POLICY_REPO_URL:-git@github.com:permitio/opal-example-policy-repo.git} - OPAL_POLICY_REPO_MAIN_BRANCH=${POLICY_REPO_BRANCH} - OPAL_POLICY_REPO_SSH_KEY=${OPAL_POLICY_REPO_SSH_KEY} - OPAL_DATA_CONFIG_SOURCES={"config":{"entries":[{"url":"http://opal_server:7002/policy-data","config":{"headers":{"Authorization":"Bearer ${OPAL_CLIENT_TOKEN}"}},"topics":["policy_data"],"dst_path":"/static"}]}} @@ -35,6 +41,7 @@ services: opal_client: image: permitio/opal-client:${OPAL_IMAGE_TAG:-latest} + scale: 2 deploy: mode: replicated replicas: 2 diff --git a/app-tests/jwks_dir/jwks.json b/app-tests/jwks_dir/jwks.json new file mode 100644 index 00000000..4b33b871 --- /dev/null +++ b/app-tests/jwks_dir/jwks.json @@ -0,0 +1 @@ +{"keys": [{"kty": "RSA", "key_ops": ["verify"], "n": "zQSk0F8jfU3KR9w-7-aq5n7Elh34Vhi1pzQsKG7VVoJgzqHqhXP1JsbSo-4ntAZ77fCWKMd25y4gOmNpur_0sOErRtSGwdlwVRxef-wztDD0ecqksMF7c2ZNwdq3hXxJ9NrDpcg8ORmIt6q-T5ZtbvoYhVy37LKw5dr0ry-SxfTeUuadFin4wTMQAwuiYKNQMGjuW8eEGi_ZEjziXjhQhEWfIIhH1v_jcWnW-_cjx7fvJ_Jau98vs40KKLawnvueiRdi8KWNQpA4b6480b0KfC0U7qbr61-fyL8u0L7aotTxmMzCfRabBRQ53sR5zYvvhV4Y-OZM_82RAFleIFsNfjrkbN5Sq7NEStE3b_yLMYu_uW5IoewOt7X3MgICq0jiXrbvExuJ0pq7DGlax5uhBt316Gt_HY8yqWAnCaa766_0av8IVZstRlCPOjhaM4liEneNdlzGheQlxoi6SxvQFhy4jbcG4tAmbvRIWpAbYaaXyB0H_PMPN7uPzo9lQNv1N6jMHlUq9GOGMy83qy6iQ2aA_NouCwDUKh5WUDePZwpwNFd5Fs6EiBcESG0SJkTwyuFPM6iYl6H2S7Knf8CYJMmixr_Ezm0id0Ltm0_FAwoEUqbltSmaGDZeI5T29732eFr9lK0Fw5R8_2X6uIRaIljCMSkbrRQcsm8gUZ_H9ms", "e": "AQAB"}]} diff --git a/app-tests/minrun.sh b/app-tests/minrun.sh new file mode 100755 index 00000000..d4575210 --- /dev/null +++ b/app-tests/minrun.sh @@ -0,0 +1,125 @@ +#!/bin/bash +set -e + +export OPAL_AUTH_PUBLIC_KEY +export OPAL_AUTH_PRIVATE_KEY +export OPAL_AUTH_MASTER_TOKEN +export OPAL_CLIENT_TOKEN +export OPAL_DATA_SOURCE_TOKEN + +function generate_opal_keys { + echo "- Generating OPAL keys" + + ssh-keygen -q -t rsa -b 4096 -m pem -f opal_crypto_key -N "" + OPAL_AUTH_PUBLIC_KEY="$(cat opal_crypto_key.pub)" + OPAL_AUTH_PRIVATE_KEY="$(tr '\n' '_' < opal_crypto_key)" + rm opal_crypto_key.pub opal_crypto_key + + OPAL_AUTH_MASTER_TOKEN="$(openssl rand -hex 16)" + OPAL_AUTH_JWT_AUDIENCE=https://api.opal.ac/v1/ OPAL_AUTH_JWT_ISSUER=https://opal.ac/ OPAL_REPO_WATCHER_ENABLED=0 \ + opal-server run & + sleep 2; + + OPAL_CLIENT_TOKEN="$(opal-client obtain-token "$OPAL_AUTH_MASTER_TOKEN" --type client)" + echo "Client token: $OPAL_CLIENT_TOKEN" + OPAL_DATA_SOURCE_TOKEN="$(opal-client obtain-token "$OPAL_AUTH_MASTER_TOKEN" --type datasource)" + # shellcheck disable=SC2009ß + ps -ef | grep opal-server | grep -v grep | awk '{print $2}' | xargs kill + sleep 5; + + echo "- Create .env file" + rm -f .env + ( + echo "OPAL_AUTH_PUBLIC_KEY=\"$OPAL_AUTH_PUBLIC_KEY\""; + echo "OPAL_AUTH_PRIVATE_KEY=\"$OPAL_AUTH_PRIVATE_KEY\""; + echo "OPAL_AUTH_MASTER_TOKEN=\"$OPAL_AUTH_MASTER_TOKEN\""; + echo "OPAL_CLIENT_TOKEN=\"$OPAL_CLIENT_TOKEN\""; + echo "OPAL_AUTH_PRIVATE_KEY_PASSPHRASE=\"$OPAL_AUTH_PRIVATE_KEY_PASSPHRASE\"" + ) > .env +} + +function prepare_policy_repo { + echo "- Clone tests policy repo to create test's branch" + export OPAL_POLICY_REPO_URL + OPAL_POLICY_REPO_URL=${OPAL_POLICY_REPO_URL:-git@github.com:permitio/opal-example-policy-repo.git} + +echo "- Forking the policy repo" +OPAL_TARGET_ACCOUNT="SomeTargetAccount" # Replace with your GitHub username +ORIGINAL_REPO_NAME=$(basename -s .git "$OPAL_POLICY_REPO_URL") +NEW_REPO_NAME="${ORIGINAL_REPO_NAME}" +FORKED_REPO_URL="git@github.com:${OPAL_TARGET_ACCOUNT}/${NEW_REPO_NAME}.git" + +# Check if the forked repository already exists +if gh repo list "$OPAL_TARGET_ACCOUNT" --json name -q '.[].name' | grep -q "^${NEW_REPO_NAME}$"; then + echo "Forked repository $NEW_REPO_NAME already exists." + OPAL_POLICY_REPO_URL="$FORKED_REPO_URL" + echo "Using existing forked repository: $OPAL_POLICY_REPO_URL" +else + # Using GitHub CLI to fork the repository + # gh repo fork "$OPAL_POLICY_REPO_URL" --clone --remote=false --org="$OPAL_TARGET_ACCOUNT" + OPAL_TARGET_PAT="${pat:-}" + curl -X POST -H "Authorization: token $OPAL_TARGET_PAT" https://api.github.com/repos/permitio/opal-example-policy-repo/forks + if [ $? -eq 0 ]; then + echo "Fork created successfully!" + else + echo "Error creating fork: $?" + fi + + # Update OPAL_POLICY_REPO_URL to point to the forked repo + OPAL_POLICY_REPO_URL="$FORKED_REPO_URL" + echo "Updated OPAL_POLICY_REPO_URL to $OPAL_POLICY_REPO_URL" +fi + + + export POLICY_REPO_BRANCH + POLICY_REPO_BRANCH=test-$RANDOM$RANDOM + rm -rf ./opal-example-policy-repo + git clone "$OPAL_POLICY_REPO_URL" + cd opal-example-policy-repo + git checkout -b $POLICY_REPO_BRANCH + git push --set-upstream origin $POLICY_REPO_BRANCH + cd - + + echo "OPAL_POLICY_REPO_URL=\"$OPAL_POLICY_REPO_URL\"" >> .env + echo "POLICY_REPO_BRANCH=\"$POLICY_REPO_BRANCH\"" >> .env + + # That's for the docker-compose to use, set ssh key from "~/.ssh/id_rsa", unless another path/key data was configured + export OPAL_POLICY_REPO_SSH_KEY + OPAL_POLICY_REPO_SSH_KEY_PATH=${OPAL_POLICY_REPO_SSH_KEY_PATH:-~/.ssh/id_rsa} + OPAL_POLICY_REPO_SSH_KEY=${OPAL_POLICY_REPO_SSH_KEY:-$(cat "$OPAL_POLICY_REPO_SSH_KEY_PATH")} + echo "- OPAL_POLICY_REPO_SSH_KEY=$OPAL_POLICY_REPO_SSH_KEY" + echo "OPAL_POLICY_REPO_SSH_KEY=\"$OPAL_POLICY_REPO_SSH_KEY\"" >> .env +} + +function compose { + docker compose -f ./docker-compose-app-tests.yml --env-file .env "$@" +} + +function clean_up { + ARG=$? + if [[ "$ARG" -ne 0 ]]; then + echo "*** Test Failed ***" + echo "" + compose logs + else + echo "*** Test Passed ***" + echo "" + fi + compose down + #cd opal-example-policy-repo; git push -d origin $POLICY_REPO_BRANCH; cd - # Remove remote tests branch + rm -rf ./opal-example-policy-repo + exit $ARG +} + +function main { + + generate_opal_keys + prepare_policy_repo + + trap clean_up EXIT + +} + +# This script is good if you want to just generate opal keys and initialize the policy repo on your github account +# and then run some docker compose and tests. +main diff --git a/docker/Dockerfile b/docker/Dockerfile index a1495311..9ad66f27 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -5,10 +5,10 @@ FROM python:3.10-bookworm AS build-stage # from now on, work in the /app directory WORKDIR /app/ # Layer dependency install (for caching) -COPY ./packages/requires.txt ./base_requires.txt -COPY ./packages/opal-common/requires.txt ./common_requires.txt -COPY ./packages/opal-client/requires.txt ./client_requires.txt -COPY ./packages/opal-server/requires.txt ./server_requires.txt +COPY ../packages/requires.txt ./base_requires.txt +COPY ../packages/opal-common/requires.txt ./common_requires.txt +COPY ../packages/opal-client/requires.txt ./client_requires.txt +COPY ../packages/opal-server/requires.txt ./server_requires.txt # install python deps RUN pip install --no-cache-dir --upgrade pip && pip install --no-cache-dir -r ./base_requires.txt -r ./common_requires.txt -r ./client_requires.txt -r ./server_requires.txt @@ -16,7 +16,7 @@ RUN pip install --no-cache-dir --upgrade pip && pip install --no-cache-dir -r ./ # split this stage to save time and reduce image size # --------------------------------------------------- FROM rust:1.79 AS cedar-builder -COPY ./cedar-agent /tmp/cedar-agent +COPY ../cedar-agent /tmp/cedar-agent WORKDIR /tmp/cedar-agent RUN CARGO_REGISTRIES_CRATES_IO_PROTOCOL=sparse cargo build --release @@ -34,7 +34,7 @@ RUN useradd -m -b / -s /bin/bash opal WORKDIR /opal # copy wait-for script (create link at old path to maintain backward compatibility) -COPY scripts/wait-for.sh . +COPY ../scripts/wait-for.sh . RUN chmod +x ./wait-for.sh RUN ln -s /opal/wait-for.sh /usr/wait-for.sh @@ -42,15 +42,15 @@ RUN ln -s /opal/wait-for.sh /usr/wait-for.sh RUN apt-get update && apt-get install -y netcat-traditional jq wget && apt-get clean # copy startup script (create link at old path to maintain backward compatibility) -COPY ./scripts/start.sh . +COPY ../scripts/start.sh . RUN chmod +x ./start.sh RUN ln -s /opal/start.sh /start.sh # copy gunicorn_config -COPY ./scripts/gunicorn_conf.py . +COPY ../scripts/gunicorn_conf.py . # copy app code -COPY ./README.md . -COPY ./packages ./packages/ +COPY ../README.md . +COPY ../packages ./packages/ # install the opal-common package RUN cd ./packages/opal-common && python setup.py install # Make sure scripts in .local are usable: @@ -119,6 +119,8 @@ ENV OPAL_INLINE_OPA_ENABLED=true ENV OPAL_INLINE_OPA_EXEC_PATH=/opal/opa # expose opa port EXPOSE 8181 +EXPOSE 5678 + USER opal # CEDAR CLIENT IMAGE -------------------------------- diff --git a/packages/opal-server/opal_server/data/api.py b/packages/opal-server/opal_server/data/api.py index 3ef9d573..b4c82dd0 100644 --- a/packages/opal-server/opal_server/data/api.py +++ b/packages/opal-server/opal_server/data/api.py @@ -1,5 +1,6 @@ from typing import Optional +import debugpy from fastapi import APIRouter, Depends, Header, HTTPException, status from fastapi.responses import RedirectResponse from opal_common.authentication.authz import ( diff --git a/pytest.ini b/pytest.ini index 16c88ba9..10b5e330 100644 --- a/pytest.ini +++ b/pytest.ini @@ -1,3 +1,4 @@ # Handling DeprecationWarning 'asyncio_mode' default value [pytest] asyncio_mode = strict +asyncio_default_fixture_loop_scope = function diff --git a/requirements.txt b/requirements.txt index 86e2f7ef..93930ce3 100644 --- a/requirements.txt +++ b/requirements.txt @@ -7,6 +7,7 @@ pytest-asyncio pytest-rerunfailures wheel>=0.38.0 twine +testcontainers setuptools>=70.0.0 # not directly required, pinned by Snyk to avoid a vulnerability zipp>=3.19.1 # not directly required, pinned by Snyk to avoid a vulnerability prometheus_client diff --git a/scripts/start.sh b/scripts/start.sh index 350c836b..16aebbee 100755 --- a/scripts/start.sh +++ b/scripts/start.sh @@ -5,6 +5,8 @@ export GUNICORN_CONF=${GUNICORN_CONF:-./gunicorn_conf.py} export GUNICORN_TIMEOUT=${GUNICORN_TIMEOUT:-30} export GUNICORN_KEEP_ALIVE_TIMEOUT=${GUNICORN_KEEP_ALIVE_TIMEOUT:-5} +sleep 10 + if [[ -z "${OPAL_BROADCAST_URI}" && "${UVICORN_NUM_WORKERS}" != "1" ]]; then echo "OPAL_BROADCAST_URI must be set when having multiple workers" exit 1 @@ -15,4 +17,9 @@ prefix="" if [[ -z "${OPAL_ENABLE_DATADOG_APM}" && "${OPAL_ENABLE_DATADOG_APM}" = "true" ]]; then prefix=ddtrace-run fi -(set -x; exec $prefix gunicorn -b 0.0.0.0:${UVICORN_PORT} -k uvicorn.workers.UvicornWorker --workers=${UVICORN_NUM_WORKERS} -c ${GUNICORN_CONF} ${UVICORN_ASGI_APP} -t ${GUNICORN_TIMEOUT} --keep-alive ${GUNICORN_KEEP_ALIVE_TIMEOUT}) + +#(set -x; exec $prefix gunicorn --reload -b 0.0.0.0:${UVICORN_PORT} -k uvicorn.workers.UvicornWorker --workers=${UVICORN_NUM_WORKERS} -c ${GUNICORN_CONF} ${UVICORN_ASGI_APP} -t ${GUNICORN_TIMEOUT} --keep-alive ${GUNICORN_KEEP_ALIVE_TIMEOUT}) +(set -x; exec $prefix python -m debugpy --listen 0.0.0.0:5678 -m uvicorn ${UVICORN_ASGI_APP} --reload --host 0.0.0.0 --port ${UVICORN_PORT} ) + +# write a code that will wait for the user to press enter +read -n1 -r -p "Press any key to continue..." key diff --git a/tests/.env.example b/tests/.env.example new file mode 100644 index 00000000..ae9fb7b1 --- /dev/null +++ b/tests/.env.example @@ -0,0 +1,5 @@ +#!/usr/bin/env bash + +export OPAL_POLICY_REPO_URL='' +export POLICY_REPO_BRANCH='' +export OPAL_POLICY_REPO_SSH_KEY='' diff --git a/tests/README.md b/tests/README.md new file mode 100644 index 00000000..0cc451d3 --- /dev/null +++ b/tests/README.md @@ -0,0 +1,82 @@ +# Tests + +The tests folder contains integration and unit tests for OPAL. These tests ensure the proper functionality and reliability of OPAL across various components and scenarios. Below is an overview of the test structure, utilities, and execution methods. + +## Running the Tests + +To execute the tests, run the `run.sh` script from the root directory of the repository. This script sets up the environment and executes all tests: + +```bash +./run.sh +``` + +What you will see is that pytest begins to pull the images for the broadcaster(s), then gitea, then the opal_server and opal_client will be built from the local debuggable version using the source code, rather than permitio/opal-server or opal-client images. So you could test your changes to the code. + +If all infrastructure is set up well, you will then see the tests being executed by pytest as normal. + +## Test Structure + +- **`tests/containers`**: Configurations and setups for containerized environments used in testing OPAL, including Docker and Kubernetes configurations. +- **`tests/data-fetchers`**: OPAL data fetchers used in the tests to fetch data from various sources, such as PostgreSQL, MongoDB, etc. +- **`tests/docker`**: Dockerfiles and related files used to build Docker images for the tests. +- **`tests/policies`**: Policies written in REGO used to verify that OPAL functions correctly. +- **`containers`**: Configurations and setups for containerized environments used in testing OPAL, including Docker and Kubernetes configurations. +- **`data-fetchers`**: OPAL data fetchers used in the tests to fetch data from various sources, such as PostgreSQL, MongoDB, etc. +- **`docker`**: Dockerfiles and related files used to build Docker images for the tests. +- **`policies`**: Policies written in REGO used to verify that OPAL functions correctly. +- **`policy_repos`**: Providers managing policy repositories on platforms such as Gitea, GitHub, GitLab, and others. Additional platforms should implement a class derived from `PolicyRepoBase` (e.g., Bitbucket). +- **`app-tests`**: Integration tests running OPAL with a sample service to verify correct configuration. +- **`policy_stores`**: Test setups to validate support for policy decision engines such as OPA, Cedar, OpenFGA, etc. +## Infrastructure of the Testing System + +### Settings + +The `settings.py` file includes a `TestSettings` class for configuring global test settings. This class allows you to define: + +- Test data location. +- Docker network configuration. +- Other environment settings. + +### Utilities + +The `utils.py` file contains a `Utils` class for simplifying test writing. It provides methods for: + +- Creating temporary directories. +- Copying files. +- Other common tasks. + +### Using the `session_matrix` + +The `session_matrix` feature allows you to define and manage test scenarios across multiple configurations. This is particularly useful for validating OPAL's behavior under various conditions. + +#### Using the `is_final` Property + +The `is_final` property within the `session_matrix` helps identify if a particular test session represents the last stage of a given scenario. This can be used to perform cleanup tasks or additional validations at the end of a test sequence. + +Example: + +```python +def test_example(session_matrix): + if session_matrix.is_final: + # Perform cleanup or final assertions + print("Final session reached") +``` + +## Writing Your Own Tests + +To write a test, include `opal_servers` and `opal_clients` as parameters in your test function. These will automatically be populated with available OPAL servers and clients. For example: + +```python +def test_custom_policy(opal_servers, opal_clients): + server = opal_servers[0] + client = opal_clients[0] + # Add your test logic here +``` + +## OPAL API Reference + +Refer to the [OPAL API Documentation](https://opal-v2.permit.io/redoc#tag/Bundle-Server/operation/get_policy_policy_get) for additional details on endpoints and functionality. + +--- + +Let me know if you'd like to include specific code examples or any other details! diff --git a/tests/__init__.py b/tests/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/conftest.py b/tests/conftest.py new file mode 100644 index 00000000..bdded10d --- /dev/null +++ b/tests/conftest.py @@ -0,0 +1,505 @@ +import json +import os +import shutil +import tempfile +import threading +import time +from typing import List + +import debugpy +import pytest +from testcontainers.core.network import Network +from testcontainers.core.utils import setup_logger +from testcontainers.core.waiting_utils import wait_for_logs + +import docker +from tests import utils +from tests.containers.broadcast_container_base import BroadcastContainerBase +from tests.containers.cedar_container import CedarContainer +from tests.containers.gitea_container import GiteaContainer +from tests.containers.kafka_broadcast_container import KafkaBroadcastContainer +from tests.containers.opa_container import OpaContainer, OpaSettings +from tests.containers.opal_client_container import OpalClientContainer +from tests.containers.opal_server_container import OpalServerContainer +from tests.containers.postgres_broadcast_container import PostgresBroadcastContainer +from tests.containers.redis_broadcast_container import RedisBroadcastContainer +from tests.containers.settings.cedar_settings import CedarSettings +from tests.containers.settings.gitea_settings import GiteaSettings +from tests.containers.settings.opal_client_settings import OpalClientSettings +from tests.containers.settings.opal_server_settings import OpalServerSettings +from tests.containers.settings.postgres_broadcast_settings import ( + PostgresBroadcastSettings, +) +from tests.policy_repos.policy_repo_base import PolicyRepoBase +from tests.policy_repos.policy_repo_factory import ( + PolicyRepoFactory, + SupportedPolicyRepo, +) +from tests.policy_repos.policy_repo_settings import PolicyRepoSettings +from tests.settings import pytest_settings + +logger = setup_logger(__name__) + +# wait some seconds for the debugger to attach +debugger_wait_time = 5 # seconds + + +def cancel_wait_for_client_after_timeout(): + try: + time.sleep(debugger_wait_time) + debugpy.wait_for_client.cancel() + except Exception as e: + print(f"Failed to cancel wait for client: {e}") + + +try: + if pytest_settings.wait_for_debugger: + t = threading.Thread(target=cancel_wait_for_client_after_timeout) + t.start() + print(f"Waiting for debugger to attach... {debugger_wait_time} seconds timeout") + debugpy.wait_for_client() +except Exception as e: + print(f"Failed to attach debugger: {e}") + +utils.export_env("OPAL_TESTS_DEBUG", "true") +utils.install_opal_server_and_client() + + +@pytest.fixture(scope="session") +def temp_dir(): + # Setup: Create a temporary directory + """Creates a temporary directory once at the beginning of the test session, + prints the directory path to the console, and yields it to the test. + + After the test session is finished, it deletes the directory and + prints the directory removal to the console. + + This fixture is useful for tests that need a temporary directory to + exist for the duration of the test session. + """ + dir_path = tempfile.mkdtemp() + print(f"Temporary directory created: {dir_path}") + yield dir_path + + # Teardown: Clean up the temporary directory + shutil.rmtree(dir_path) + print(f"Temporary directory removed: {dir_path}") + + +@pytest.fixture(scope="session") +def opal_network(): + """Creates a Docker network and yields it. + + The network is cleaned up after all tests have finished running. + """ + network = Network().create() + + yield network + + print("Removing network...") + time.sleep(5) # wait for the containers to stop + network.remove() + print("Network removed") + + +@pytest.fixture(scope="session") +def number_of_opal_servers(): + """The number of OPAL servers to start. + + This fixture is used to determine how many OPAL servers to start for + the tests. The default value is 2, but it can be overridden by setting + the environment variable OPAL_TESTS_NUMBER_OF_OPAL_SERVERS. + + Returns: + int: The number of OPAL servers to start. + """ + return 2 + + +from tests.fixtures.broadcasters import ( + broadcast_channel, + kafka_broadcast_channel, + postgres_broadcast_channel, + redis_broadcast_channel, +) +from tests.fixtures.images import opal_server_image +from tests.fixtures.policy_repos import gitea_server, gitea_settings, policy_repo + + +@pytest.fixture(scope="session") +def opal_servers( + opal_network: Network, + broadcast_channel: BroadcastContainerBase, + policy_repo: PolicyRepoBase, + number_of_opal_servers: int, + opal_server_image: str, + topics: dict[str, int], + # kafka_broadcast_channel: KafkaBroadcastContainer, + # redis_broadcast_channel: RedisBroadcastContainer, + session_matrix, +): + """Fixture that initializes and manages OPAL server containers for testing. + + This fixture sets up a specified number of OPAL server containers, each + connected to the provided Docker network and using the specified broadcast + channel. The first server container sets up and creates a webhook for the + policy repository. All containers are started and their logs are monitored + for successful cloning of the policy repository. The containers are stopped + after the test session is complete. + + Args: + opal_network (Network): The Docker network to which the containers are connected. + broadcast_channel (BroadcastContainerBase): The broadcast channel container. + policy_repo (PolicyRepoBase): The policy repository to be used. + number_of_opal_servers (int): The number of OPAL server containers to start. + opal_server_image (str): The Docker image used for the OPAL servers. + topics (dict[str, int]): The topics for OPAL data configuration. + kafka_broadcast_channel (KafkaBroadcastContainer): The Kafka broadcast channel container. + redis_broadcast_channel (RedisBroadcastContainer): The Redis broadcast channel container. + session_matrix: The session matrix used for the test configuration. + + Yields: + List[OpalServerContainer]: A list of running OPAL server containers. + """ + + if not broadcast_channel: + raise ValueError("Missing 'broadcast_channel' container.") + + containers = [] # List to store container instances + + for i in range(number_of_opal_servers): + container_name = f"opal_server_{i+1}" + + container = OpalServerContainer( + OpalServerSettings( + broadcast_uri=broadcast_channel.get_url(), + container_name=container_name, + container_index=i + 1, + uvicorn_workers="4", + policy_repo_url=policy_repo.get_repo_url(), + image=opal_server_image, + log_level="DEBUG", + data_topics=" ".join(topics.keys()), + polling_interval=3, + ), + network=opal_network, + ) + + container.start() + container.get_wrapped_container().reload() + + if i == 0: + # Only the first server should setup the webhook + policy_repo.setup_webhook( + container.get_container_host_ip(), container.settings.port + ) + policy_repo.create_webhook() + + print( + f"Started container: {container_name}, ID: {container.get_wrapped_container().id}" + ) + container.wait_for_log("Clone succeeded", timeout=30) + containers.append(container) + + yield containers + + for container in containers: + container.stop() + + +@pytest.fixture(scope="session") +def number_of_opal_clients(): + """The number of OPAL clients to start. + + This fixture is used to determine how many OPAL clients to start for + the tests. The default value is 2, but it can be overridden by + setting the environment variable OPAL_TESTS_NUMBER_OF_OPAL_CLIENTS. + """ + return 2 + + +@pytest.fixture(scope="session") +def connected_clients(opal_clients: List[OpalClientContainer]): + """A fixture that waits for all OPAL clients to connect to the PubSub + server before yielding them. + + This fixture takes a list of OPAL clients as input and waits for each of them + to connect to the PubSub server before yielding them. The fixture is used to + ensure that all OPAL clients are connected and ready to receive messages + before the tests are executed. + + Parameters + ---------- + opal_clients : List[OpalClientContainer] + A list of OPAL client containers. + + Yields + ------ + List[OpalClientContainer] + A list of connected OPAL client containers. + """ + for client in opal_clients: + assert client.wait_for_log( + log_str="Connected to PubSub server", timeout=30 + ), f"Client {client.settings.container_name} did not connect to PubSub server." + yield opal_clients + + +from tests.fixtures.images import ( + cedar_image, + opa_image, + opal_client_image, + opal_client_with_opa_image, +) +from tests.fixtures.policy_stores import cedar_server, opa_server + + +@pytest.fixture(scope="session") +def opal_clients( + opal_network: Network, + opal_servers: List[OpalServerContainer], + # opa_server: OpaContainer, + # cedar_server: CedarContainer, + request, + number_of_opal_clients: int, + opal_client_with_opa_image, +): + """A fixture that starts and manages multiple OPAL client containers. + + This fixture takes a list of OPAL server containers as input and starts a + specified number of OPAL client containers, each connected to the first + OPAL server container. The fixture yields the list of started OPAL client + containers. + + Parameters + ---------- + opal_network : Network + The Docker network to which the containers are connected. + opal_servers : List[OpalServerContainer] + A list of OPAL server containers. + #opa_server : OpaContainer + # The OPA server container. + cedar_server : CedarContainer + The Cedar server container. + request + The pytest request object. + number_of_opal_clients : int + The number of OPAL clients to start. + opal_client_image + The Docker image used for the OPAL clients. + + Yields + ------ + List[OpalClientContainer] + A list of started OPAL client containers. + """ + if not opal_servers or len(opal_servers) == 0: + raise ValueError("Missing 'opal_server' container.") + + opal_server_url = f"http://{opal_servers[0].settings.container_name}:{opal_servers[0].settings.port}" + + containers = [] # List to store OpalClientContainer instances + + for i in range(number_of_opal_clients): + container_name = f"opal_client_{i+1}" # Unique name for each client + + client_token = opal_servers[0].obtain_OPAL_tokens(container_name)["client"] + callbacks = json.dumps( + { + "callbacks": [ + [ + f"{opal_server_url}/data/callback_report", + { + "method": "post", + "process_data": False, + "headers": { + "Authorization": f"Bearer {client_token}", + "content-type": "application/json", + }, + }, + ] + ] + } + ) + + container = OpalClientContainer( + OpalClientSettings( + image=opal_client_with_opa_image, + container_name=container_name, + container_index=i + 1, + opal_server_url=opal_server_url, + client_token=client_token, + default_update_callbacks=callbacks, + ), + network=opal_network, + ) + + container.start() + print( + f"Started OpalClientContainer: {container_name}, ID: {container.get_wrapped_container().id}" + ) + containers.append(container) + + yield containers + + try: + for container in containers: + container.stop() + except Exception: + logger.error(f"Failed to stop containers: {container}") + pass + + +@pytest.fixture(scope="session") +def topics(): + """A fixture that returns a dictionary mapping topic names to the number of + OpalClientContainer instances that should subscribe to each topic. + + Returns + ------- + dict + A dictionary mapping topic names to the number of OpalClientContainer + instances that should subscribe to each topic. + """ + topics = {"topic_1": 1, "topic_2": 1} + return topics + + +@pytest.fixture(scope="session") +def topiced_clients( + topics, opal_network: Network, opal_servers: list[OpalServerContainer] +): + """Fixture that starts and manages multiple OPAL client containers, each + subscribing to a different topic. + + The fixture takes a dictionary of topics and the number of clients to + subscribe to each topic. It starts the specified number of OPAL client + containers, each connected to the first OPAL server container, and each + subscribing to the specified topic. The fixture yields the list of started + OPAL client containers, organized by topic. + + Parameters + ---------- + topics : dict + A dictionary mapping topic names to the number of OpalClientContainer + instances that should subscribe to each topic. + opal_network : Network + The Docker network to which the containers are connected. + opal_servers : list[OpalServerContainer] + A list of OPAL server containers. + + Yields + ------ + dict + A dictionary mapping topic names to a list of OpalClientContainer + instances that are subscribed to the topic. + """ + if not opal_servers or len(opal_servers) == 0: + raise ValueError("Missing 'opal_server' container.") + + opal_server_url = f"http://{opal_servers[0].settings.container_name}:{opal_servers[0].settings.port}" + containers = {} # List to store OpalClientContainer instances + + client_token = opal_servers[0].obtain_OPAL_tokens("topiced_opal_client_?x?")[ + "client" + ] + callbacks = json.dumps( + { + "callbacks": [ + [ + f"{opal_server_url}/data/callback_report", + { + "method": "post", + "process_data": False, + "headers": { + "Authorization": f"Bearer {client_token}", + "content-type": "application/json", + }, + }, + ] + ] + } + ) + + for topic, number_of_clients in topics.items(): + for i in range(number_of_clients): + container_name = f"opal_client_{topic}_{i+1}" # Unique name for each client + + container = OpalClientContainer( + OpalClientSettings( + image="permitio/opal-client:latest", + container_name=container_name, + container_index=i + 1, + opal_server_url=opal_server_url, + client_token=client_token, + default_update_callbacks=callbacks, + topics=topic, + ), + network=opal_network, + ) + + container.start() + logger.info( + f"Started OpalClientContainer: {container_name}, ID: {container.get_wrapped_container().id} - on topic: {topic}" + ) + containers[topic] = containers.get(topic, []) + + assert container.wait_for_log( + log_str="Connected to PubSub server", timeout=30 + ), f"Client {client.settings.container_name} did not connect to PubSub server." + + containers[topic].append(container) + + yield containers + + for _, clients in containers.items(): + for client in clients: + client.stop() + + +def wait_sometime(): + """Pauses execution based on the environment. + + If the code is running inside GitHub Actions, it pauses execution + for 30 seconds. Otherwise, it waits for user input to continue. + + This can be used to control the flow of execution depending on the + environment in which the code is being executed. + """ + + if os.getenv("GITHUB_ACTIONS") == "true": + print("Running inside GitHub Actions. Sleeping for 30 seconds...") + time.sleep(3600) # Sleep for 30 seconds + else: + print("Running on the local machine. Press Enter to continue...") + input() # Wait for key press + + +@pytest.fixture(scope="session", autouse=True) +def setup(opal_clients, session_matrix): + """A setup fixture that is run once per test session. + + This fixture is automatically used by all tests, and is used to set up the + environment for the test session. The fixture yields, allowing the tests to + execute, and then is used to tear down the environment when the test session + is finished. + + Parameters + ---------- + opal_servers : List[OpalServerContainer] + A list of OPAL server containers. + opal_clients : List[OpalClientContainer] + A list of OPAL client containers. + session_matrix : dict + A dictionary containing information about the test session. + + Yields + ------ + None + """ + yield + + if session_matrix["is_final"]: + logger.info("Finalizing test session...") + utils.remove_env("OPAL_TESTS_DEBUG") + wait_sometime() diff --git a/tests/containers/broadcast_container_base.py b/tests/containers/broadcast_container_base.py new file mode 100644 index 00000000..15e4fd22 --- /dev/null +++ b/tests/containers/broadcast_container_base.py @@ -0,0 +1,21 @@ +from tests.containers.opal_test_container import OpalTestContainer + + +class BroadcastContainerBase(OpalTestContainer): + def __init__(self, **kwargs): + OpalTestContainer.__init__(self, **kwargs) + + def get_url(self) -> str: + url = ( + self.settings.protocol + + "://" + + self.settings.user + + ":" + + self.settings.password + + "@" + + self.settings.container_name + + ":" + + str(self.settings.port) + ) + print(url) + return url diff --git a/tests/containers/cedar_container.py b/tests/containers/cedar_container.py new file mode 100644 index 00000000..aab56ee0 --- /dev/null +++ b/tests/containers/cedar_container.py @@ -0,0 +1,46 @@ +from testcontainers.core.generic import DockerContainer +from testcontainers.core.network import Network +from testcontainers.core.utils import setup_logger + +from tests import utils +from tests.containers.opal_test_container import OpalTestContainer +from tests.containers.settings.cedar_settings import CedarSettings +from tests.containers.settings.opal_client_settings import OpalClientSettings + + +class CedarContainer(OpalTestContainer, DockerContainer): + def __init__( + self, + settings: CedarSettings, + network: Network, + docker_client_kw: dict | None = None, + **kwargs, + ) -> None: + OpalTestContainer.__init__(self) # Initialize OpalTestContainer + DockerContainer.__init__( + self, image=settings.image, docker_client_kw=docker_client_kw, **kwargs + ) + self.settings = settings + self.network = network + self.logger = setup_logger(__name__) + self.configure() + + def configure(self): + for key, value in self.settings.getEnvVars().items(): + self.with_env(key, value) + + self.with_name(self.settings.container_name).with_bind_ports( + 8180, self.settings.port + ).with_network(self.network).with_kwargs( + labels={"com.docker.compose.project": "pytest"} + ).with_network_aliases( + self.settings.container_name + ) + + def reload_with_settings(self, settings: CedarSettings | None = None): + self.stop() + + self.settings = settings if settings else self.settings + self.configure() + + self.start() diff --git a/tests/containers/gitea_container.py b/tests/containers/gitea_container.py new file mode 100644 index 00000000..2719d483 --- /dev/null +++ b/tests/containers/gitea_container.py @@ -0,0 +1,413 @@ +import codecs +import os +import shutil +import time + +import requests +from git import GitCommandError, Repo +from testcontainers.core.generic import DockerContainer +from testcontainers.core.network import Network +from testcontainers.core.utils import setup_logger + +import docker +from tests.containers.opal_test_container import OpalTestContainer +from tests.containers.settings.gitea_settings import GiteaSettings + + +class GiteaContainer(OpalTestContainer, DockerContainer): + def __init__( + self, + settings: GiteaSettings, + network: Network, + docker_client_kw: dict | None = None, + **kwargs, + ) -> None: + self.settings = settings + self.network = network + self.kwargs = kwargs + + self.logger = setup_logger(__name__) + + labels = self.kwargs.get("labels", {}) + labels.update({"com.docker.compose.project": "pytest"}) + kwargs["labels"] = labels + + # Set container lifecycle properties + self.with_kwargs(auto_remove=False, restart_policy={"Name": "always"}) + + OpalTestContainer.__init__(self) + DockerContainer.__init__( + self, + image=self.settings.image, + docker_client_kw=docker_client_kw, + **self.kwargs, + ) + + self.configure() + + def configure(self): + for key, value in self.settings.getEnvVars().items(): + self.with_env(key, value) + + # Set container name and ports + self.with_name(self.settings.container_name).with_bind_ports( + 3000, self.settings.port_http + ).with_bind_ports(2222, self.settings.port_ssh).with_network( + self.network + ).with_network_aliases( + self.settings.network_aliases + ) + + def is_gitea_ready(self): + """Check if Gitea is ready by inspecting logs.""" + stdout_logs, stderr_logs = self.get_logs() + logs = stdout_logs.decode("utf-8") + stderr_logs.decode("utf-8") + return "Listen: http://0.0.0.0:3000" in logs + + def wait_for_gitea(self, timeout: int = 30): + """Wait for Gitea to initialize within a timeout period.""" + for _ in range(timeout): + if self.is_gitea_ready(): + self.logger.info("Gitea is ready.") + return + time.sleep(1) + raise RuntimeError("Gitea initialization timeout.") + + def create_gitea_user(self): + """Create an admin user in the Gitea instance.""" + create_user_command = ( + f"/usr/local/bin/gitea admin user create " + f"--admin --username {self.settings.username} " + f"--email {self.settings.email} " + f"--password {self.settings.password} " + f"--must-change-password=false" + ) + result = self.exec(create_user_command) + if result.exit_code != 0: + raise RuntimeError( + f"Failed to create Gitea user: {result.output.decode('utf-8')}" + ) + + def create_gitea_admin_token(self): + """Generate an admin access token for the Gitea instance.""" + create_token_command = ( + f"/usr/local/bin/gitea admin user generate-access-token " + f"--username {self.settings.username} --raw --scopes all" + ) + result = self.exec(create_token_command) + token_result = result.output.decode("utf-8").strip() + if not token_result: + raise RuntimeError("Failed to create an access token.") + + return token_result + + def deploy_gitea(self): + """Deploy Gitea container and initialize configuration.""" + self.logger.info("Deploying Gitea container...") + # self.start() + self.wait_for_gitea() + self.create_gitea_user() + self.access_token = self.create_gitea_admin_token() + + def exec(self, command: str): + """Execute a command inside the container.""" + self.logger.info(f"Executing command: {command}") + exec_result = self.get_wrapped_container().exec_run(command) + if exec_result.exit_code != 0: + raise RuntimeError( + f"Command failed with exit code {exec_result.exit_code}: {exec_result.output.decode('utf-8')}" + ) + return exec_result + + def repo_exists(self): + url = f"{self.settings.gitea_base_url}/repos/{self.settings.username}/{self.settings.repo_name}" + headers = {"Authorization": f"token {self.access_token}"} + response = requests.get(url, headers=headers) + + if response.status_code == 200: + self.logger.info(f"Repository '{self.settings.repo_name}' already exists.") + return True + elif response.status_code == 404: + self.logger.info(f"Repository '{self.settings.repo_name}' does not exist.") + return False + else: + self.logger.error( + f"Failed to check repository: {response.status_code} {response.text}" + ) + response.raise_for_status() + + def create_gitea_repo( + self, description="", private=False, auto_init=True, default_branch="master" + ): + url = f"{self.settings.gitea_base_url}/api/v1/user/repos" + headers = { + "Authorization": f"token {self.access_token}", + "Content-Type": "application/json", + } + payload = { + "name": self.settings.repo_name, + "description": description, + "private": private, + "auto_init": auto_init, + "default_branch": default_branch, + } + response = requests.post(url, json=payload, headers=headers) + if response.status_code == 201: + self.logger.info("Repository created successfully!") + return response.json() + else: + self.logger.error( + f"Failed to create repository: {response.status_code} {response.text}" + ) + response.raise_for_status() + + def clone_repo_with_gitpython(self, clone_directory): + repo_url = f"{self.settings.gitea_base_url}/{self.settings.username}/{self.settings.repo_name}.git" + if self.access_token: + repo_url = f"http://{self.settings.username}:{self.access_token}@{self.settings.gitea_base_url.split('://')[1]}/{self.settings.username}/{self.settings.repo_name}.git" + try: + if os.path.exists(clone_directory): + self.logger.debug( + f"Directory '{clone_directory}' already exists. Deleting it..." + ) + shutil.rmtree(clone_directory) + Repo.clone_from(repo_url, clone_directory) + self.logger.debug( + f"Repository '{self.settings.repo_name}' cloned successfully into '{clone_directory}'." + ) + except Exception as e: + self.logger.error( + f"Failed to clone repository '{self.settings.repo_name}': {e}" + ) + + def reset_repo_with_rbac(self, repo_directory, source_rbac_file): + try: + if not os.path.exists(repo_directory): + raise FileNotFoundError( + f"Repository directory '{repo_directory}' does not exist." + ) + + git_dir = os.path.join(repo_directory, ".git") + if not os.path.exists(git_dir): + raise FileNotFoundError( + f"The directory '{repo_directory}' is not a valid Git repository (missing .git folder)." + ) + + repo = Repo(repo_directory) + + # Get the default branch name + default_branch = self.get_default_branch(repo) + if not default_branch: + raise ValueError("Could not determine the default branch name.") + + # Ensure we are on the default branch + if repo.active_branch.name != default_branch: + repo.git.checkout(default_branch) + + # Remove other branches + branches = [ + branch.name for branch in repo.branches if branch.name != default_branch + ] + for branch in branches: + repo.git.branch("-D", branch) + + # Reset repository content + for item in os.listdir(repo_directory): + item_path = os.path.join(repo_directory, item) + if os.path.basename(item_path) == ".git": + continue + if os.path.isfile(item_path) or os.path.islink(item_path): + os.unlink(item_path) + elif os.path.isdir(item_path): + shutil.rmtree(item_path) + + # Copy RBAC file + destination_rbac_path = os.path.join(repo_directory, "rbac.rego") + shutil.copy2(source_rbac_file, destination_rbac_path) + + # Stage and commit changes + repo.git.add(all=True) + repo.index.commit("Reset repository to only include 'rbac.rego'") + + self.logger.debug( + f"Repository reset successfully. 'rbac.rego' is the only file and changes are committed." + ) + except Exception as e: + self.logger.error(f"Error resetting repository: {e}") + + def get_default_branch(self, repo): + try: + return repo.git.symbolic_ref("refs/remotes/origin/HEAD").split("/")[-1] + except Exception as e: + self.logger.error(f"Error determining default branch: {e}") + return None + + def push_repo_to_remote(self, repo_directory): + try: + repo = Repo(repo_directory) + + # Get the default branch name + default_branch = self.get_default_branch(repo) + if not default_branch: + raise ValueError("Could not determine the default branch name.") + + # Ensure we are on the default branch + if repo.active_branch.name != default_branch: + repo.git.checkout(default_branch) + + # Check if remote origin exists + if "origin" not in [remote.name for remote in repo.remotes]: + raise ValueError("No remote named 'origin' found in the repository.") + + # Push changes to the default branch + repo.remotes.origin.push(refspec=f"{default_branch}:{default_branch}") + self.logger.info("Changes pushed to remote repository successfully.") + except Exception as e: + self.logger.error(f"Error pushing changes to remote: {e}") + + def cleanup_local_repo(self, repo_directory): + try: + if os.path.exists(repo_directory): + shutil.rmtree(repo_directory) + self.logger.debug( + f"Local repository '{repo_directory}' has been cleaned up." + ) + else: + self.logger.debug( + f"Local repository '{repo_directory}' does not exist. No cleanup needed." + ) + except Exception as e: + self.logger.error(f"Error during cleanup: {e}") + + def init_repo(self): + try: + # Set paths for source RBAC file and clone directory + source_rbac_file = os.path.join( + self.settings.data_dir, "rbac.rego" + ) # Use self.data_dir for source RBAC file + clone_directory = os.path.join( + self.settings.temp_dir, f"{self.settings.repo_name}-clone" + ) # Use self.repo_name + + # Check if the repository exists + if not self.repo_exists(): + # Create the repository if it doesn't exist + self.create_gitea_repo( + description="This is a test repository created via API.", + private=False, + ) + + # Clone the repository + self.clone_repo_with_gitpython(clone_directory=clone_directory) + + # Reset the repository with RBAC + self.reset_repo_with_rbac( + repo_directory=clone_directory, source_rbac_file=source_rbac_file + ) + + # Push the changes to the remote repository + self.push_repo_to_remote(repo_directory=clone_directory) + + # Clean up the local repository + self.cleanup_local_repo(repo_directory=clone_directory) + + self.logger.info("Repository initialization completed successfully.") + except Exception as e: + self.logger.error(f"Error during repository initialization: {e}") + + # Prepare the directory + def prepare_directory(self, path): + """Prepare the directory by cleaning up any existing content.""" + if os.path.exists(path): + shutil.rmtree(path) # Remove existing directory + os.makedirs(path) # Create a new directory + + # Clone and push changes + def clone_and_update( + self, + branch, + file_name, + file_content, + CLONE_DIR, + authenticated_url, + COMMIT_MESSAGE, + ): + """Clone the repository, update the specified branch, and push + changes.""" + self.prepare_directory(CLONE_DIR) # Clean up and prepare the directory + print(f"Processing branch: {branch}") + + # Clone the repository for the specified branch + print(f"Cloning branch {branch}...") + repo = Repo.clone_from(authenticated_url, CLONE_DIR, branch=branch) + + # Create or update the specified file with the provided content + file_path = os.path.join(CLONE_DIR, file_name) + with open(file_path, "w") as f: + f.write(file_content) + + # Stage the changes + print(f"Staging changes for branch {branch}...") + repo.git.add(A=True) # Add all changes + + # Commit the changes if there are modifications + if repo.is_dirty(): + print(f"Committing changes for branch {branch}...") + repo.index.commit(COMMIT_MESSAGE) + + # Push changes to the remote repository + print(f"Pushing changes for branch {branch}...") + try: + repo.git.push(authenticated_url, branch) + except GitCommandError as e: + print(f"Error pushing branch {branch}: {e}") + + # Cleanup function + def cleanup(self, CLONE_DIR): + """Remove the temporary clone directory.""" + if os.path.exists(CLONE_DIR): + print("Cleaning up temporary directory...") + shutil.rmtree(CLONE_DIR) + + def update_branch(self, branch, file_name, file_content): + temp_dir = self.settings.temp_dir + + self.logger.info( + f"Updating branch '{branch}' with file '{file_name}' content..." + ) + + # Decode escape sequences in the file content + file_content = codecs.decode(file_content, "unicode_escape") + + GITEA_REPO_URL = f"http://localhost:{self.settings.port_http}/{self.settings.username}/{self.settings.repo_name}.git" + username = self.settings.username + PASSWORD = self.settings.password + CLONE_DIR = os.path.join(temp_dir, "branch_update") + COMMIT_MESSAGE = "Automated update commit" + + # Append credentials to the repository URL + authenticated_url = GITEA_REPO_URL.replace( + "http://", f"http://{username}:{PASSWORD}@" + ) + + try: + self.clone_and_update( + branch, + file_name, + file_content, + CLONE_DIR, + authenticated_url, + COMMIT_MESSAGE, + ) + print("Operation completed successfully.") + finally: + # Ensure cleanup is performed regardless of success or failure + self.cleanup(CLONE_DIR) + + def reload_with_settings(self, settings: GiteaSettings | None = None): + self.stop() + + self.settings = settings if settings else self.settings + self.configure() + + self.start() diff --git a/tests/containers/kafka_broadcast_container.py b/tests/containers/kafka_broadcast_container.py new file mode 100644 index 00000000..697bd69e --- /dev/null +++ b/tests/containers/kafka_broadcast_container.py @@ -0,0 +1,33 @@ +import debugpy +from testcontainers.core.network import Network +from testcontainers.kafka import KafkaContainer + +import docker +from tests.containers.opal_test_container import OpalTestContainer +from tests.containers.zookeeper_container import ZookeeperContainer + + +class KafkaBroadcastContainer(OpalTestContainer, KafkaContainer): + def __init__( + self, + network: Network, + zookeeper_container: ZookeeperContainer, + docker_client_kw: dict | None = None, + **kwargs, + ) -> None: + # Add custom labels to the kwargs + labels = kwargs.get("labels", {}) + labels.update({"com.docker.compose.project": "pytest"}) + kwargs["labels"] = labels + + self.zookeeper_container = zookeeper_container + self.network = network + + OpalTestContainer.__init__(self) + KafkaContainer.__init__(self, docker_client_kw=docker_client_kw, **kwargs) + + self.with_network(self.network) + + self.with_network_aliases("broadcast_channel") + # Add a custom name for the container + self.with_name(f"kafka_broadcast_channel") diff --git a/tests/containers/kafka_ui_container.py b/tests/containers/kafka_ui_container.py new file mode 100644 index 00000000..f28cbc25 --- /dev/null +++ b/tests/containers/kafka_ui_container.py @@ -0,0 +1,36 @@ +from testcontainers.core.container import DockerContainer +from testcontainers.core.network import Network + +from tests.containers.kafka_broadcast_container import KafkaBroadcastContainer +from tests.containers.opal_test_container import OpalTestContainer + + +class KafkaUIContainer(OpalTestContainer, DockerContainer): + def __init__( + self, + network: Network, + kafka_container: KafkaBroadcastContainer, + docker_client_kw: dict | None = None, + **kwargs, + ) -> None: + # Add custom labels to the kwargs + labels = kwargs.get("labels", {}) + labels.update({"com.docker.compose.project": "pytest"}) + kwargs["labels"] = labels + + self.kafka_container = kafka_container + self.network = network + + self.image = "provectuslabs/kafka-ui:latest" + + OpalTestContainer.__init__(self) + DockerContainer.__init__( + self, image=self.image, docker_client_kw=docker_client_kw, **kwargs + ) + + self.with_name("kafka-ui") + self.with_bind_ports(8080, 8080) + self.with_env("KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS", "kafka:9092") + + self.with_network(self.network) + self.with_network_aliases("Kafka_ui") diff --git a/tests/containers/opa_container.py b/tests/containers/opa_container.py new file mode 100644 index 00000000..1e5cbe9c --- /dev/null +++ b/tests/containers/opa_container.py @@ -0,0 +1,67 @@ +from testcontainers.core.generic import DockerContainer +from testcontainers.core.network import Network +from testcontainers.core.utils import setup_logger + +from tests import utils +from tests.containers.opal_test_container import OpalTestContainer +from tests.containers.settings.opal_client_settings import OpalClientSettings + + +class OpaSettings: + def __init__( + self, + image: str | None = None, + port: int | None = None, + container_name: str | None = None, + ) -> None: + self.image = image if image else "openpolicyagent/opa:0.29.0" + self.container_name = "opa" + + if port is None: + self.port = utils.find_available_port(8181) + else: + if utils.is_port_available(port): + self.port = port + else: + self.port = utils.find_available_port(8181) + + def getEnvVars(self): + return {} + + +class OpaContainer(OpalTestContainer, DockerContainer): + def __init__( + self, + settings: OpaSettings, + network: Network, + docker_client_kw: dict | None = None, + **kwargs, + ) -> None: + OpalTestContainer.__init__(self) # Initialize OpalTestContainer + DockerContainer.__init__( + self, image=settings.image, docker_client_kw=docker_client_kw, **kwargs + ) + self.settings = settings + self.network = network + self.logger = setup_logger(__name__) + self.configure() + + def configure(self): + for key, value in self.settings.getEnvVars().items(): + self.with_env(key, value) + + self.with_name(self.settings.container_name).with_bind_ports( + 8181, self.settings.port + ).with_network(self.network).with_kwargs( + labels={"com.docker.compose.project": "pytest"} + ).with_network_aliases( + self.settings.container_name + ) + + def reload_with_settings(self, settings: OpaSettings | None = None): + self.stop() + + self.settings = settings if settings else self.settings + self.configure() + + self.start() diff --git a/tests/containers/opal_client_container.py b/tests/containers/opal_client_container.py new file mode 100644 index 00000000..282c6d15 --- /dev/null +++ b/tests/containers/opal_client_container.py @@ -0,0 +1,52 @@ +from testcontainers.core.generic import DockerContainer +from testcontainers.core.network import Network +from testcontainers.core.utils import setup_logger + +from tests import utils +from tests.containers.opal_test_container import OpalTestContainer +from tests.containers.settings.opal_client_settings import OpalClientSettings + + +class OpalClientContainer(OpalTestContainer, DockerContainer): + def __init__( + self, + settings: OpalClientSettings, + network: Network, + docker_client_kw: dict | None = None, + **kwargs, + ) -> None: + OpalTestContainer.__init__(self) # Initialize OpalTestContainer + DockerContainer.__init__( + self, image=settings.image, docker_client_kw=docker_client_kw, **kwargs + ) + self.settings = settings + self.network = network + self.logger = setup_logger(__name__) + self.configure() + + def configure(self): + for key, value in self.settings.getEnvVars().items(): + self.with_env(key, value) + + self.with_name(self.settings.container_name).with_bind_ports( + 7000, self.settings.port + ).with_bind_ports( + 8181, utils.find_available_port(self.settings.opa_port) + ).with_network( + self.network + ).with_kwargs( + labels={"com.docker.compose.project": "pytest"} + ).with_network_aliases( + self.settings.container_name + ) + + if self.settings.debug_enabled: + self.with_bind_ports(5678, self.settings.debug_port) + + def reload_with_settings(self, settings: OpalClientSettings | None = None): + self.stop() + + self.settings = settings if settings else self.settings + self.configure() + + self.start() diff --git a/tests/containers/opal_server_container.py b/tests/containers/opal_server_container.py new file mode 100644 index 00000000..ab55bfcc --- /dev/null +++ b/tests/containers/opal_server_container.py @@ -0,0 +1,93 @@ +import requests +from testcontainers.core.generic import DockerContainer +from testcontainers.core.network import Network +from testcontainers.core.utils import setup_logger + +from tests.containers.opal_test_container import OpalTestContainer +from tests.containers.settings.opal_server_settings import OpalServerSettings + + +class OpalServerContainer(OpalTestContainer, DockerContainer): + def __init__( + self, + settings: OpalServerSettings, + network: Network, + docker_client_kw: dict | None = None, + **kwargs, + ) -> None: + self.settings = settings + self.network = network + + self.logger = setup_logger(__name__) + + OpalTestContainer.__init__(self) + DockerContainer.__init__( + self, image=self.settings.image, docker_client_kw=docker_client_kw, **kwargs + ) + + self.configure() + + def configure(self): + # Add environment variables individually + for key, value in self.settings.getEnvVars().items(): + self.with_env(key, value) + + # Configure network and other settings + self.with_name(self.settings.container_name).with_bind_ports( + 7002, self.settings.port + ).with_network(self.network).with_kwargs( + labels={"com.docker.compose.project": "pytest"} + ).with_network_aliases( + self.settings.container_name + ) + + # Bind debug ports if enabled + if self.settings.debugEnabled: + self.with_bind_ports(5678, self.settings.debug_port) + + def reload_with_settings(self, settings: OpalServerSettings | None = None): + self.stop() + + self.settings = settings if settings else self.settings + self.configure() + + self.start() + + def obtain_OPAL_tokens(self, caller: str = "Unknown caller") -> dict: + """Fetch client and datasource tokens from the OPAL server.""" + token_url = f"http://localhost:{self.settings.port}/token" + headers = { + "Authorization": f"Bearer {self.settings.master_token}", + "Content-Type": "application/json", + } + + tokens = {} + + for token_type in ["client", "datasource"]: + try: + data = {"type": token_type} # ).replace("'", "\"") + self.logger.debug(f"Fetching OPAL {token_type} token...") + self.logger.debug(f"url: {token_url}") + self.logger.debug(f"headers: {headers}") + self.logger.debug(data) + + response = requests.post(token_url, headers=headers, json=data) + response.raise_for_status() + + token = response.json().get("token") + if token: + tokens[token_type] = token + self.logger.info( + f"{caller} | Successfully fetched OPAL {token_type} token." + ) + else: + self.logger.error( + f"{caller} | Failed to fetch OPAL {token_type} token: {response.json()}" + ) + + except requests.exceptions.RequestException as e: + self.logger.error( + f"{caller} | HTTP Request failed while fetching OPAL {token_type} token: {e}" + ) + + return tokens diff --git a/tests/containers/opal_test_container.py b/tests/containers/opal_test_container.py new file mode 100644 index 00000000..5c4f37f1 --- /dev/null +++ b/tests/containers/opal_test_container.py @@ -0,0 +1,88 @@ +import threading +import asyncio +import re +import time +from datetime import datetime + +from testcontainers.core.utils import setup_logger + + +class OpalTestContainer: + def __init__(self, **kwargs): + self.opalLogger = setup_logger(__name__) + + # Add custom labels to the kwargs + labels = kwargs.get("labels", {}) + labels.update({"com.docker.compose.project": "pytest"}) + kwargs["labels"] = labels + + self.timestamp_with_ansi = ( + r"\x1b\[.*?(\d{4}-\d{2}-\d{2}T\d{2}:\d{2}:\d{2}\.\d{6}\+\d{4})" + ) + + + def wait_for_log( + self, log_str: str, timeout: int, reference_timestamp: datetime | None = None + ) -> bool: + """ + Wait for a specific log to appear in the container logs after the + reference timestamp. + + Args: + log_str (str): The string to search for in the logs. + timeout (int): Maximum time to wait for the log (in seconds). + reference_timestamp (datetime | None): The timestamp to start checking logs from. + + Returns: + bool: True if the log was found, False if the timeout was reached. + """ + + #timeout = 0.1 + timeout = timeout + + log_found = threading.Event() + + def process_logs(): + """ + Asynchronous sub-function to check logs with timeout handling. + """ + #input(f"Press Enter to continue... searching for: {log_str} | on container: {self.settings.container_name} | timeout set to: {timeout}") + logs = self._container.logs(stream=True) # Stream logs + start_time = time.time() + + for line in logs: # Synchronous iteration over logs + elapsed_time = time.time() - start_time + if elapsed_time > timeout: + self.opalLogger.warning( + f"{self.settings.container_name} | Timeout reached while waiting for the log. | {log_str}" + ) + break + + decoded_line = line.decode("utf-8").strip() + + # Extract timestamp if present + match = re.search(self.timestamp_with_ansi, decoded_line) + if match: + log_timestamp_string = match.group(1) + log_timestamp = datetime.strptime( + log_timestamp_string, "%Y-%m-%dT%H:%M:%S.%f%z" + ) + + if reference_timestamp is None or log_timestamp > reference_timestamp: + if log_str in decoded_line: + log_found.set() # Signal that the log was found + break + + log_thread = threading.Thread(target=process_logs) + log_thread.start() + + log_thread.join(timeout=float(timeout)) + + if not log_found.is_set(): + self.opalLogger.warning( + f"{self.settings.container_name} | Timeout reached while waiting for the log. | {log_str}" + ) + return False + + return True + \ No newline at end of file diff --git a/tests/containers/postgres_broadcast_container.py b/tests/containers/postgres_broadcast_container.py new file mode 100644 index 00000000..82fa504e --- /dev/null +++ b/tests/containers/postgres_broadcast_container.py @@ -0,0 +1,39 @@ +from testcontainers.core.network import Network +from testcontainers.postgres import PostgresContainer + +from tests.containers.broadcast_container_base import BroadcastContainerBase +from tests.containers.settings.postgres_broadcast_settings import ( + PostgresBroadcastSettings, +) + + +class PostgresBroadcastContainer(BroadcastContainerBase, PostgresContainer): + def __init__( + self, + network: Network, + settings: PostgresBroadcastSettings, + image: str = "postgres:alpine", + docker_client_kw: dict | None = None, + **kwargs, + ) -> None: + self.network = network + self.settings = settings + + BroadcastContainerBase.__init__(self, **kwargs) + PostgresContainer.__init__( + self, + image, + settings.port, + settings.user, + settings.password, + settings.database, + docker_client_kw=docker_client_kw, + **kwargs, + ) + + self.with_network(self.network) + + self.with_network_aliases("broadcast_channel") + self.with_name(f"postgres_broadcast_channel") + + self.start() diff --git a/tests/containers/pulsar_broadcast_container.py b/tests/containers/pulsar_broadcast_container.py new file mode 100644 index 00000000..a4498cb7 --- /dev/null +++ b/tests/containers/pulsar_broadcast_container.py @@ -0,0 +1,32 @@ +import debugpy +from testcontainers.core.container import DockerContainer +from testcontainers.core.network import Network + +import docker +from tests.containers.opal_test_container import OpalTestContainer + + +class PulsarBroadcastContainer(OpalTestContainer, DockerContainer): + def __init__( + self, + network: Network, + docker_client_kw: dict | None = None, + **kwargs, + ) -> None: + # Add custom labels to the kwargs + labels = kwargs.get("labels", {}) + labels.update({"com.docker.compose.project": "pytest"}) + kwargs["labels"] = labels + + self.network = network + + OpalTestContainer.__init__(self) + DockerContainer.__init__( + self, image="pulsar:latest", docker_client_kw=docker_client_kw, **kwargs + ) + + self.with_network(self.network) + + self.with_network_aliases("broadcast_channel") + # Add a custom name for the container + self.with_name(f"pytest_opal_broadcast_channel") diff --git a/tests/containers/redis_broadcast_container.py b/tests/containers/redis_broadcast_container.py new file mode 100644 index 00000000..a8ddfd2f --- /dev/null +++ b/tests/containers/redis_broadcast_container.py @@ -0,0 +1,28 @@ +from testcontainers.core.network import Network +from testcontainers.redis import RedisContainer + +from tests.containers.opal_test_container import OpalTestContainer + + +class RedisBroadcastContainer(OpalTestContainer, RedisContainer): + def __init__( + self, + network: Network, + docker_client_kw: dict | None = None, + **kwargs, + ) -> None: + # Add custom labels to the kwargs + labels = kwargs.get("labels", {}) + labels.update({"com.docker.compose.project": "pytest"}) + kwargs["labels"] = labels + + self.network = network + + OpalTestContainer.__init__(self) + RedisContainer.__init__(self, docker_client_kw=docker_client_kw, **kwargs) + + self.with_network(self.network) + + self.with_network_aliases("broadcast_channel") + # Add a custom name for the container + self.with_name(f"redis_broadcast_channel") diff --git a/tests/containers/redis_ui_container.py b/tests/containers/redis_ui_container.py new file mode 100644 index 00000000..c467ae90 --- /dev/null +++ b/tests/containers/redis_ui_container.py @@ -0,0 +1,36 @@ +from testcontainers.core.container import DockerContainer +from testcontainers.core.network import Network +from testcontainers.redis import RedisContainer + +from tests.containers.opal_test_container import OpalTestContainer + + +class RedisUIContainer(OpalTestContainer, DockerContainer): + def __init__( + self, + network: Network, + redis_container: RedisContainer, + docker_client_kw: dict | None = None, + **kwargs, + ) -> None: + # Add custom labels to the kwargs + labels = kwargs.get("labels", {}) + labels.update({"com.docker.compose.project": "pytest"}) + kwargs["labels"] = labels + + self.redis_container = redis_container + self.network = network + self.container_name = "redis-ui" + self.image = "redislabs/redisinsight:latest" + + OpalTestContainer.__init__(self) + DockerContainer.__init__( + self, image=self.image, docker_client_kw=docker_client_kw, **kwargs + ) + + self.with_name(self.container_name) + + self.with_network(self.network) + self.with_bind_ports(5540, 5540) + + self.with_network_aliases("redis_ui") diff --git a/tests/containers/settings/cedar_settings.py b/tests/containers/settings/cedar_settings.py new file mode 100644 index 00000000..d05d3e22 --- /dev/null +++ b/tests/containers/settings/cedar_settings.py @@ -0,0 +1,23 @@ +from tests import utils + + +class CedarSettings: + def __init__( + self, + image: str | None = None, + port: int | None = None, + container_name: str | None = None, + ) -> None: + self.image = image if image else "permitio/cedar:latest" + self.container_name = container_name if container_name else "cedar" + + if port is None: + self.port = utils.find_available_port(8180) + else: + if utils.is_port_available(port): + self.port = port + else: + self.port = utils.find_available_port(8180) + + def getEnvVars(self): + return {} diff --git a/tests/containers/settings/gitea_settings.py b/tests/containers/settings/gitea_settings.py new file mode 100644 index 00000000..8a61f69d --- /dev/null +++ b/tests/containers/settings/gitea_settings.py @@ -0,0 +1,121 @@ +import os + +from testcontainers.core.utils import setup_logger + + +class GiteaSettings: + def __init__( + self, + container_name: str = None, + repo_name: str = None, + temp_dir: str = None, + data_dir: str = None, + port_http: int = None, + port_ssh: int = None, + USER_UID: int = None, + USER_GID: int = None, + username: str = None, + email: str = None, + password: str = None, + network_aliases: str = None, + image: str = None, + **kwargs, + ): + """Initialize the Gitea Docker container and related parameters. + + :param container_name: Name of the Gitea container + :param repo_name: Name of the repository + :param temp_dir: Path to the temporary directory for files + :param data_dir: Path to the data directory for persistent files + :param port_http: Optional - Port for Gitea HTTP access + :param ssh_port: Optional - Port for Gitea SSH access + :param image: Optional - Docker image for Gitea + :param USER_UID: Optional - User UID for Gitea + :param USER_GID: Optional - User GID for Gitea + :param username: Optional - Default admin username for Gitea + :param email: Optional - Default admin email for Gitea + :param password: Optional - Default admin password for Gitea + """ + + self.logger = setup_logger(__name__) + + self.load_from_env() + + self.image = image if image else self.image + self.container_name = container_name if container_name else self.container_name + self.repo_name = repo_name if repo_name else self.repo_name + self.port_http = port_http if port_http else self.port_http + self.port_ssh = port_ssh if port_ssh else self.port_ssh + self.uid = USER_UID if USER_UID else self.uid + self.gid = USER_GID if USER_GID else self.gid + + self.username = username if username else self.username + self.email = email if email else self.email + self.password = password if password else self.password + + self.temp_dir = os.path.abspath(temp_dir) if temp_dir else self.temp_dir + self.data_dir = ( + data_dir if data_dir else self.data_dir + ) # Data directory for persistent files (e.g., RBAC file) + + self.db_type = "sqlite3" # Default to SQLite + self.install_lock = "true" + + self.network_aliases = ( + network_aliases if network_aliases else self.network_aliases + ) + + self.access_token = None # Optional, can be set later + self.__dict__.update(kwargs) + + self.gitea_base_url = f"http://localhost:{self.port_http}" + + # Validate required parameters + self.validate_dependencies() + + self.gitea_internal_base_url = f"http://{self.container_name}:{self.port_http}" + + + def validate_dependencies(self): + """Validate required parameters.""" + required_params = [ + self.container_name, + self.port_http, + self.port_ssh, + self.image, + self.uid, + self.gid, + ] + if not all(required_params): + raise ValueError( + "Missing required parameters for Gitea container initialization." + ) + + self.logger.info(f"{self.container_name} | Dependencies validated successfully.") + + + def getEnvVars(self): + return { + "USER_UID": self.uid, + "USER_GID": self.gid, + "username": self.username, + "EMAIL": self.email, + "PASSWORD": self.password, + "DB_TYPE": self.db_type, + "INSTALL_LOCK": self.install_lock, + } + + def load_from_env(self): + self.image = os.getenv("GITEA_IMAGE", "gitea/gitea:latest-rootless") + self.container_name = os.getenv("GITEA_CONTAINER_NAME", "gitea") + self.repo_name = os.getenv("REPO_NAME", "permit") + self.temp_dir = os.getenv("TEMP_DIR", "/tmp/permit") + self.data_dir = os.getenv("DATA_DIR", "/tmp/data") + self.port_http = int(os.getenv("GITEA_PORT_HTTP", 3000)) + self.port_ssh = int(os.getenv("GITEA_PORT_SSH", 2222)) + self.uid = int(os.getenv("USER_UID", 1000)) + self.gid = int(os.getenv("USER_GID", 1000)) + self.username = os.getenv("username", "permitAdmin") + self.email = os.getenv("EMAIL", "admin@permit.io") + self.password = os.getenv("PASSWORD", "Aa123456") + self.network_aliases = os.getenv("NETWORK_ALIASES", "gitea") diff --git a/tests/containers/settings/kafka_broadcast_settings.py b/tests/containers/settings/kafka_broadcast_settings.py new file mode 100644 index 00000000..f388e3d2 --- /dev/null +++ b/tests/containers/settings/kafka_broadcast_settings.py @@ -0,0 +1,104 @@ +import os +from testcontainers.core.utils import setup_logger + +class KafkaBroadcastSettings: + def __init__(self, host, port, user, password, database): + + self.logger = setup_logger("KafkaBroadcastSettings") + + self.host = host + self.port = port + self.user = user + self.password = password + self.database = database + + self.validate_dependencies() + + def validate_dependencies(self): + """Validate required dependencies before starting the server.""" + if not self.host: + raise ValueError("POSTGRES_HOST is required.") + if not self.port: + raise ValueError("POSTGRES_PORT is required.") + if not self.user: + raise ValueError("POSTGRES_USER is required.") + if not self.password: + raise ValueError("POSTGRES_PASSWORD is required.") + if not self.database: + raise ValueError("POSTGRES_DATABASE is required.") + + self.logger.info(f"{self.kafka_container_name} | Dependencies validated successfully.") + + + def getEnvVars(self): + return { + "POSTGRES_HOST": self.host, + "POSTGRES_PORT": self.port, + "POSTGRES_USER": self.user, + "POSTGRES_PASSWORD": self.password, + "POSTGRES_DATABASE": self.database, + } + + def load_from_env(self): + self.host = os.getenv("POSTGRES_HOST", "localhost") + self.port = int(os.getenv("POSTGRES_PORT", 5432)) + self.user = os.getenv("POSTGRES_USER", "postgres") + self.password = os.getenv("POSTGRES_PASSWORD", "postgres") + self.database = os.getenv("POSTGRES_DATABASE", "postgres") + + self.zookeeper_image_name = os.getenv( + "ZOOKEEPER_IMAGE_NAME", "confluentinc/cp-zookeeper:6.2.0" + ) + self.zookeeper_container_name = os.getenv( + "ZOOKEEPER_CONTAINER_NAME", "zookeeper" + ) + self.zookeeper_port = os.getenv("ZOOKEEPER_CLIENT_PORT", 2181) + self.zookeeper_tick_time = os.getenv("ZOOKEEPER_TICK_TIME", 2000) + self.zookeeper_allow_anonymous_login = os.getenv("ALLOW_ANONYMOUS_LOGIN", "yes") + + self.kafka_image_name = os.getenv( + "KAFKA_IMAGE_NAME", "confluentinc/cp-kafka:6.2.0" + ) + self.kafka_container_name = os.getenv("KAFKA_CONTAINER_NAME", "kafka") + self.kafka_port = os.getenv("KAFKA_CLIENT_PORT", 9092) + self.kafka_admin_port = os.getenv("KAFKA_ADMIN_PORT", 29092) + + self.kafka_ui_image_name = os.getenv( + "KAFKA_UI_IMAGE_NAME", "provectuslabs/kafka-ui:latest" + ) + self.kafka_ui_container_name = os.getenv("KAFKA_UI_CONTAINER_NAME", "kafka-ui") + + self.kafka_ui_port = os.getenv("KAFKA_UI_PORT", 8080) + + self.kafka_ui_url = os.getenv( + "KAFKA_UI_URL", f"http://{self.kafka_ui_host}:{self.kafka_ui_port}" + ) + + self.broker_id = os.getenv("KAFKA_BROKER_ID", 1) + self.zookeeper_connect = os.getenv( + "KAFKA_ZOOKEEPER_CONNECT", + f"{self.zookeeper_container_name}:{self.zookeeper_port}", + ) + self.offsets_topic_replication_factor = os.getenv( + "KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR", 1 + ) + self.listener_security_protocol_map = os.getenv( + "KAFKA_LISTENER_SECURITY_PROTOCOL_MAP", + "PLAINTEXT:PLAINTEXT,PLAINTEXT_HOST:PLAINTEXT", + ) + self.advertised_listeners = os.getenv( + "KAFKA_ADVERTISED_LISTENERS", + f"PLAINTEXT_HOST://localhost:{self.kafka_admin_port},PLAINTEXT://{self.kafka_container_name}:{self.kafka_port}", + ) + self.allow_plaintext_listener = os.getenv("ALLOW_PLAINTEXT_LISTENER", "yes") + self.kafka_topic_auto_create = os.getenv("KAFKA_TOPIC_AUTO_CREATE", "true") + self.kafka_transaction_state_log_min_isr = os.getenv( + "KAFKA_TRANSACTION_STATE_LOG_MIN_ISR", 1 + ) + self.kafka_transaction_state_log_replication_factor = os.getenv( + "KAFKA_TRANSACTION_STATE_LOG_REPLICATION_FACTOR", 1 + ) + self.kafka_clusters_bootstrapservers = os.getenv( + "KAFKA_CLUSTERS_0_BOOTSTRAPSERVERS", + f"{self.kafka_container_name}:{self.kafka_port}", + ) diff --git a/tests/containers/settings/opal_client_settings.py b/tests/containers/settings/opal_client_settings.py new file mode 100644 index 00000000..6e8a8e5b --- /dev/null +++ b/tests/containers/settings/opal_client_settings.py @@ -0,0 +1,304 @@ +import os + +from testcontainers.core.utils import setup_logger + +from tests import utils + + +class OpalClientSettings: + def __init__( + self, + client_token: str | None = None, + container_name: str | None = None, + port: int | None = None, + opal_server_url: str | None = None, + should_report_on_data_updates: str | None = None, + log_format_include_pid: str | None = None, + tests_debug: bool | None = False, + log_diagnose: str | None = None, + log_level: str | None = None, + debug_enabled: bool | None = None, + debug_port: int | None = None, + image: str | None = None, + opa_port: int | None = None, + default_update_callbacks: str | None = None, + opa_health_check_policy_enabled: str | None = None, + auth_jwt_audience: str | None = None, + auth_jwt_issuer: str | None = None, + statistics_enabled: str | None = None, + policy_store_type: str | None = None, + policy_store_url: str | None = None, + iniline_cedar_enabled: str | None = None, + inline_cedar_exec_path: str | None = None, + inline_cedar_config: str | None = None, + inline_cedar_log_format: str | None = None, + inline_opa_enabled: bool | None = None, + inline_opa_exec_path: str | None = None, + inline_opa_config: str | None = None, + inline_opa_log_format: str | None = None, + uvicorn_asgi_app: str | None = None, + container_index: int = 1, + topics: str | None = None, + public_key: str | None = None, + private_key: str | None = None, + **kwargs, + ): + """ + Args: + client_token: The client token to use for authentication. + container_name: The name of the container. + port: The port to use for the server. + opal_server_url: The URL of the server. + should_report_on_data_updates: Whether to report on data updates. + log_format_include_pid: Whether to include the process ID in the log format. + tests_debug: Whether to run the tests in debug mode. + log_diagnose: Whether to log diagnose information. + log_level: The log level to use. + debug_enabled: Whether to enable debug mode. + debug_port: The port to use for the debug server. + image: The image to use for the container. + opa_port: The port to use for the OPA server. + default_update_callbacks: The default update callbacks to use. + opa_health_check_policy_enabled: Whether to enable the OPA health check policy. + auth_jwt_audience: The JWT audience to use for authentication. + auth_jwt_issuer: The JWT issuer to use for authentication. + statistics_enabled: Whether to enable statistics. + policy_store_type: The policy store type to use. + policy_store_url: The URL of the policy store. + iniline_cedar_enabled: Whether to enable inline Cedar. + inline_cedar_exec_path: The path to the Cedar executable. + inline_cedar_config: The configuration to use for Cedar. + inline_cedar_log_format: The log format to use for Cedar. + inline_opa_enabled: Whether to enable inline OPA. + inline_opa_exec_path: The path to the OPA executable. + inline_opa_config: The configuration to use for OPA. + inline_opa_log_format: The log format to use for OPA. + uvicorn_asgi_app: The ASGI app to use for the server. + container_index: The index of the container. + topics: The topics to use for the server. + public_key: The public key to use for authentication. + private_key: The private key to use for authentication. + **kwargs: Additional keyword arguments. + + Instructions: + To add a setting, add it to the constructor and update the load_from_env() method. + That will initialize the settings from environment variables, or + from a fallback value in the getenv() method. + Then assign your settings to the corresponding variables in the constructor. + + If your var should be passed on to the container as an environment variable + make sure to also add it in the getEnvVars() method + """ + + self.logger = setup_logger("OpalClientSettings") + + self.load_from_env() + + self.image = image if image else self.image + self.container_name = container_name if container_name else self.container_name + self.port = port if port else self.port + self.opal_server_url = ( + opal_server_url if opal_server_url else self.opal_server_url + ) + self.opa_port = opa_port if opa_port else self.opa_port + self.should_report_on_data_updates = ( + should_report_on_data_updates + if should_report_on_data_updates + else self.should_report_on_data_updates + ) + self.log_format_include_pid = ( + log_format_include_pid + if log_format_include_pid + else self.log_format_include_pid + ) + + self.tests_debug = tests_debug if tests_debug else self.tests_debug + self.log_diagnose = log_diagnose if log_diagnose else self.log_diagnose + self.log_level = log_level if log_level else self.log_level + self.debug_enabled = debug_enabled if debug_enabled else self.debug_enabled + self.default_update_callbacks = ( + default_update_callbacks + if default_update_callbacks + else self.default_update_callbacks + ) + self.client_token = client_token if client_token else self.client_token + self.opa_health_check_policy_enabled = ( + opa_health_check_policy_enabled + if opa_health_check_policy_enabled + else self.opa_health_check_policy_enabled + ) + self.auth_jwt_audience = ( + auth_jwt_audience if auth_jwt_audience else self.auth_jwt_audience + ) + self.auth_jwt_issuer = ( + auth_jwt_issuer if auth_jwt_issuer else self.auth_jwt_issuer + ) + self.statistics_enabled = ( + statistics_enabled if statistics_enabled else self.statistics_enabled + ) + self.container_index = ( + container_index if container_index else self.container_index + ) + self.debug_port = debug_port if debug_port else self.debug_port + self.__dict__.update(kwargs) + + self.policy_store_type = ( + policy_store_type if policy_store_type else self.policy_store_type + ) + self.policy_store_url = ( + policy_store_url if policy_store_url else self.policy_store_url + ) + + self.public_key = public_key if public_key else self.public_key + self.private_key = private_key if private_key else self.private_key + + self.uvicorn_asgi_app = ( + uvicorn_asgi_app if uvicorn_asgi_app else self.uvicorn_asgi_app + ) + + self.iniline_cedar_enabled = ( + iniline_cedar_enabled + if iniline_cedar_enabled + else self.iniline_cedar_enabled + ) + self.inline_cedar_exec_path = ( + inline_cedar_exec_path + if inline_cedar_exec_path + else self.inline_cedar_exec_path + ) + self.inline_cedar_config = ( + inline_cedar_config if inline_cedar_config else self.inline_cedar_config + ) + self.inline_cedar_log_format = ( + inline_cedar_log_format + if inline_cedar_log_format + else self.inline_cedar_log_format + ) + + self.inline_opa_enabled = ( + inline_opa_enabled if inline_opa_enabled else self.inline_opa_enabled + ) + self.inline_opa_exec_path = ( + inline_opa_exec_path if inline_opa_exec_path else self.inline_opa_exec_path + ) + self.inline_opa_config = ( + inline_opa_config if inline_opa_config else self.inline_opa_config + ) + self.inline_opa_log_format = ( + inline_opa_log_format + if inline_opa_log_format + else self.inline_opa_log_format + ) + self.topics = topics if topics else self.topics + + self.validate_dependencies() + + def validate_dependencies(self): + if not self.image: + raise ValueError("OPAL_CLIENT_IMAGE is required.") + if not self.container_name: + raise ValueError("OPAL_CLIENT_CONTAINER_NAME is required.") + if not self.opal_server_url: + raise ValueError("OPAL_SERVER_URL is required.") + + self.logger.info( + f"{self.container_name} | Dependencies validated successfully." + ) + + def getEnvVars(self): + env_vars = { + "OPAL_SERVER_URL": self.opal_server_url, + "OPAL_LOG_FORMAT_INCLUDE_PID": self.log_format_include_pid, + "OPAL_SHOULD_REPORT_ON_DATA_UPDATES": self.should_report_on_data_updates, + "OPAL_DEFAULT_UPDATE_CALLBACKS": self.default_update_callbacks, + "OPAL_OPA_HEALTH_CHECK_POLICY_ENABLED": self.opa_health_check_policy_enabled, + "OPAL_CLIENT_TOKEN": self.client_token, + "OPAL_AUTH_JWT_AUDIENCE": self.auth_jwt_audience, + "OPAL_AUTH_JWT_ISSUER": self.auth_jwt_issuer, + "OPAL_STATISTICS_ENABLED": self.statistics_enabled, + # TODO: make not hardcoded + "OPAL_DATA_TOPICS": self.topics, + "UVICORN_ASGI_APP": self.uvicorn_asgi_app, + "UVICORN_NUM_WORKERS": "1", + "UVICORN_PORT": str(self.port), + "OPAL_AUTH_PUBLIC_KEY": self.public_key, + } + + if self.tests_debug: + env_vars["LOG_DIAGNOSE"] = self.log_diagnose + env_vars["OPAL_LOG_LEVEL"] = self.log_level + + if self.policy_store_type: + env_vars["OPAL_POLICY_STORE_TYPE"] = self.policy_store_type + + if self.policy_store_url: + env_vars["OPAL_POLICY_STORE_URL"] = self.policy_store_url + + if self.inline_opa_enabled: + env_vars["OPAL_INLINE_OPA_ENABLED"] = self.inline_opa_enabled + env_vars["OPAL_INLINE_OPA_EXEC_PATH"] = self.inline_opa_exec_path + env_vars["OPAL_INLINE_OPA_CONFIG"] = self.inline_opa_config + env_vars["OPAL_INLINE_OPA_LOG_FORMAT"] = self.inline_opa_log_format + + if self.iniline_cedar_enabled: + env_vars["OPAL_INILINE_CEDAR_ENABLED"] = self.iniline_cedar_enabled + env_vars["OPAL_INILINE_CEDAR_EXEC_PATH"] = self.inline_cedar_exec_path + env_vars["OPAL_INILINE_CEDAR_CONFIG"] = self.inline_cedar_config + env_vars["OPAL_INILINE_CEDAR_LOG_FORMAT"] = self.inline_cedar_log_format + + return env_vars + + def load_from_env(self): + self.image = os.getenv("OPAL_CLIENT_IMAGE", "opal_client_debug_local") + self.container_name = os.getenv("OPAL_CLIENT_CONTAINER_NAME", "opal_client") + self.port = os.getenv("OPAL_CLIENT_PORT", utils.find_available_port(7000)) + self.opal_server_url = os.getenv("OPAL_SERVER_URL", "http://opal_server:7002") + self.opa_port = os.getenv("OPA_PORT", utils.find_available_port(8181)) + self.tests_debug = os.getenv("OPAL_TESTS_DEBUG", "true") + self.log_diagnose = os.getenv("LOG_DIAGNOSE", "true") + self.log_level = os.getenv("OPAL_LOG_LEVEL", "DEBUG") + self.public_key = os.getenv("OPAL_AUTH_PUBLIC_KEY", None) + self.private_key = os.getenv("OPAL_AUTH_PRIVATE_KEY", None) + self.log_format_include_pid = os.getenv("OPAL_LOG_FORMAT_INCLUDE_PID", "true") + self.should_report_on_data_updates = os.getenv( + "OPAL_SHOULD_REPORT_ON_DATA_UPDATES", "true" + ) + self.default_update_callbacks = os.getenv("OPAL_DEFAULT_UPDATE_CALLBACKS", None) + self.opa_health_check_policy_enabled = os.getenv( + "OPAL_OPA_HEALTH_CHECK_POLICY_ENABLED", "true" + ) + self.client_token = os.getenv("OPAL_CLIENT_TOKEN", None) + self.auth_jwt_audience = os.getenv( + "OPAL_AUTH_JWT_AUDIENCE", "https://api.opal.ac/v1/" + ) + self.auth_jwt_issuer = os.getenv("OPAL_AUTH_JWT_ISSUER", "https://opal.ac/") + self.statistics_enabled = os.getenv("OPAL_STATISTICS_ENABLED", "true") + self.debug_enabled = os.getenv("OPAL_DEBUG_ENABLED", True) + self.debug_port = os.getenv( + "CLIENT_DEBUG_PORT", utils.find_available_port(6678) + ) + self.policy_store_url = os.getenv("OPAL_POLICY_STORE_URL", None) + + self.policy_store_type = os.getenv("OPAL_POLICY_STORE_TYPE", "OPA") + + self.uvicorn_asgi_app = os.getenv("UVICORN_ASGI_APP", "opal_client.main:app") + + self.iniline_cedar_enabled = os.getenv("OPAL_INILINE_CEDAR_ENABLED", "false") + self.inline_cedar_exec_path = os.getenv( + "OPAL_INLINE_CEDAR_EXEC_PATH", "/cedar/cedar-agent" + ) + self.inline_cedar_config = os.getenv( + "OPAL_INLINE_CEDAR_CONFIG", '{"addr": "0.0.0.0:8180"}' + ) + self.inline_cedar_log_format = os.getenv("OPAL_INLINE_CEDAR_LOG_FORMAT", "http") + + self.inline_opa_enabled = os.getenv("OPAL_INLINE_OPA_ENABLED", "true") + self.inline_opa_exec_path = os.getenv("OPAL_INLINE_OPA_EXEC_PATH", "/opal/opa") + self.inline_opa_config = os.getenv( + "OPAL_INLINE_OPA_CONFIG", None #'{"addr": "0.0.0.0:8181"}' + ) + self.inline_opa_log_format = os.getenv("OPAL_INLINE_OPA_LOG_FORMAT", "http") + self.topics = os.getenv("OPAL_DATA_TOPICS", "policy_data") + + if not self.private_key or not self.public_key: + self.private_key, self.public_key = utils.generate_ssh_key_pair() diff --git a/tests/containers/settings/opal_server_settings.py b/tests/containers/settings/opal_server_settings.py new file mode 100644 index 00000000..059866bd --- /dev/null +++ b/tests/containers/settings/opal_server_settings.py @@ -0,0 +1,262 @@ +import json +import os +from secrets import token_hex + +from testcontainers.core.utils import setup_logger + +from tests import utils +from tests.settings import pytest_settings + + +class OpalServerSettings: + def __init__( + self, + container_name: str = None, + port: int = None, + uvicorn_workers: str = None, + policy_repo_url: str = None, + polling_interval: str = None, + private_key: str = None, + public_key: str = None, + master_token: str = None, + data_topics: str = None, + auth_audience: str = None, + auth_issuer: str = None, + tests_debug: bool = False, + log_diagnose: str = None, + log_level: str = None, + log_format_include_pid: bool = None, + statistics_enabled: bool = None, + debug_enabled: bool = None, + debug_port: int = None, + auth_private_key_passphrase: str = None, + policy_repo_main_branch: str = None, + image: str = None, + broadcast_uri: str = None, + webhook_secret: str = None, + webhook_params: str = None, + uvicorn_asgi_app: str = None, + uvicorn_port: int = None, + all_data_url: str = None, + policy_repo_reuse_clone_path: bool = None, + container_index: int = 1, + **kwargs, + ): + """Initialize the OPAL Server with the provided parameters. + + :param image: Docker image for the OPAL server. + :param container_name: Name of the Docker container. + :param network_name: Name of the Docker network to attach. + :param port: Exposed port for the OPAL server. + :param uvicorn_workers: Number of Uvicorn workers. + :param policy_repo_url: URL of the policy repository. + :param polling_interval: Polling interval for the policy + repository. + :param private_key: SSH private key for authentication. + :param public_key: SSH public key for authentication. + :param master_token: Master token for OPAL authentication. + :param data_topics: Data topics for OPAL configuration. + :param broadcast_uri: Optional URI for the broadcast channel. + :param auth_audience: Optional audience for authentication. + :param auth_issuer: Optional issuer for authentication. + :param tests_debug: Optional flag for tests debug mode. + :param log_diagnose: Optional flag for log diagnosis. + :param log_level: Optional log level for the OPAL server. + :param log_format_include_pid: Optional flag for including PID + in log format. + :param statistics_enabled: Optional flag for enabling + statistics. + :param debug_enabled: Optional flag for enabling debug mode with + debugpy. + :param debug_port: Optional port for debugpy. + :param auth_private_key_passphrase: Optional passphrase for the + private key. + :param policy_repo_main_branch: Optional main branch for the + policy repository. + :param webhook_secret: Optional secret for the webhook. + :param webhook_params: Optional parameters for the webhook. + :param uvicorn_asgi_app: Optional ASGI app for Uvicorn. + :param uvicorn_port: Optional port for Uvicorn. + :param all_data_url: Optional URL for all data. + :param policy_repo_reuse_clone_path: Optional flag for reusing + the clone path for the policy repository. + :param container_index: Optional index for the container. + :param kwargs: Additional keyword arguments. + """ + + self.logger = setup_logger(__name__) + + self.load_from_env() + + self.image = image if image else self.image + self.container_name = container_name if container_name else self.container_name + self.port = port if port else self.port + self.uvicorn_workers = ( + uvicorn_workers if uvicorn_workers else self.uvicorn_workers + ) + self.policy_repo_url = ( + policy_repo_url if policy_repo_url else self.policy_repo_url + ) + self.polling_interval = ( + polling_interval if polling_interval else self.polling_interval + ) + self.private_key = private_key if private_key else self.private_key + self.public_key = public_key if public_key else self.public_key + self.master_token = master_token if master_token else self.master_token + self.data_topics = data_topics if data_topics else self.data_topics + self.broadcast_uri = broadcast_uri if broadcast_uri else self.broadcast_uri + self.auth_audience = auth_audience if auth_audience else self.auth_audience + self.auth_issuer = auth_issuer if auth_issuer else self.auth_issuer + self.tests_debug = tests_debug if tests_debug else self.tests_debug + self.log_diagnose = log_diagnose if log_diagnose else self.log_diagnose + self.log_level = log_level if log_level else self.log_level + self.log_format_include_pid = ( + log_format_include_pid + if log_format_include_pid + else self.log_format_include_pid + ) + self.statistics_enabled = ( + statistics_enabled if statistics_enabled else self.statistics_enabled + ) + self.debugEnabled = debug_enabled if debug_enabled else self.debugEnabled + self.debug_port = debug_port if debug_port else self.debug_port + self.auth_private_key_passphrase = ( + auth_private_key_passphrase + if auth_private_key_passphrase + else self.auth_private_key_passphrase + ) + self.policy_repo_main_branch = ( + policy_repo_main_branch + if policy_repo_main_branch + else self.policy_repo_main_branch + ) + + self.uvicorn_asgi_app = ( + uvicorn_asgi_app if uvicorn_asgi_app else self.uvicorn_asgi_app + ) + self.uvicorn_port = uvicorn_port if uvicorn_port else self.uvicorn_port + self.all_data_url = all_data_url if all_data_url else self.all_data_url + self.policy_repo_reuse_clone_path = ( + policy_repo_reuse_clone_path + if policy_repo_reuse_clone_path + else self.policy_repo_reuse_clone_path + ) + + self.container_index = ( + container_index if container_index else self.container_index + ) + + self.webhook_secret = webhook_secret if webhook_secret else self.webhook_secret + self.webhook_params = webhook_params if webhook_params else self.webhook_params + + self.__dict__.update(kwargs) + + self.validate_dependencies() + + def validate_dependencies(self): + """Validate required dependencies before starting the server.""" + if not self.policy_repo_url: + raise ValueError("OPAL_POLICY_REPO_URL is required.") + if not self.private_key or not self.public_key: + raise ValueError("SSH private and public keys are required.") + if not self.master_token: + raise ValueError("OPAL master token is required.") + self.logger.info( + f"{self.container_name} | Dependencies validated successfully." + ) + + def getEnvVars(self): + # Configure environment variables + + env_vars = { + "UVICORN_NUM_WORKERS": self.uvicorn_workers, + "OPAL_POLICY_REPO_URL": self.policy_repo_url, + "OPAL_POLICY_REPO_MAIN_BRANCH": self.policy_repo_main_branch, + "OPAL_POLICY_REPO_POLLING_INTERVAL": self.polling_interval, + "OPAL_AUTH_PRIVATE_KEY": self.private_key, + "OPAL_AUTH_PUBLIC_KEY": self.public_key, + "OPAL_AUTH_MASTER_TOKEN": self.master_token, + "OPAL_DATA_CONFIG_SOURCES": f"""{{"config":{{"entries":[{{"url":"http://{self.container_name}:7002/policy-data","topics":["{self.data_topics}"],"dst_path":"/static"}}]}}}}""", + "OPAL_LOG_FORMAT_INCLUDE_PID": self.log_format_include_pid, + "OPAL_STATISTICS_ENABLED": self.statistics_enabled, + "OPAL_AUTH_JWT_AUDIENCE": self.auth_audience, + "OPAL_AUTH_JWT_ISSUER": self.auth_issuer, + "UVICORN_ASGI_APP": self.uvicorn_asgi_app, + "UVICORN_PORT": self.uvicorn_port, + "OPAL_ALL_DATA_URL": self.all_data_url, + "OPAL_POLICY_REPO_REUSE_CLONE_PATH": self.policy_repo_reuse_clone_path, + } + + if pytest_settings.use_webhook: + env_vars["OPAL_WEBHOOK_SECRET"] = self.webhook_secret + env_vars["OPAL_WEBHOOK_PARAMS"] = self.webhook_params + + if self.tests_debug: + env_vars["LOG_DIAGNOSE"] = self.log_diagnose + env_vars["OPAL_LOG_LEVEL"] = self.log_level + + if self.auth_private_key_passphrase: + env_vars[ + "OPAL_AUTH_PRIVATE_KEY_PASSPHRASE" + ] = self.auth_private_key_passphrase + + if self.broadcast_uri: + env_vars["OPAL_BROADCAST_URI"] = self.broadcast_uri + + return env_vars + + def load_from_env(self): + self.image = os.getenv("OPAL_SERVER_IMAGE", "opal_server_debug_local") + self.container_name = os.getenv("OPAL_SERVER_CONTAINER_NAME", None) + self.port = os.getenv("OPAL_SERVER_PORT", utils.find_available_port(7002)) + self.uvicorn_workers = os.getenv("OPAL_SERVER_UVICORN_WORKERS", "1") + self.policy_repo_url = os.getenv("OPAL_POLICY_REPO_URL", None) + self.polling_interval = os.getenv("OPAL_POLICY_REPO_POLLING_INTERVAL", "30") + self.private_key = os.getenv("OPAL_AUTH_PRIVATE_KEY", None) + self.public_key = os.getenv("OPAL_AUTH_PUBLIC_KEY", None) + self.master_token = os.getenv("OPAL_AUTH_MASTER_TOKEN", token_hex(16)) + self.data_topics = os.getenv("OPAL_DATA_TOPICS", "policy_data") + self.broadcast_uri = os.getenv("OPAL_BROADCAST_URI", None) + self.auth_audience = os.getenv( + "OPAL_AUTH_JWT_AUDIENCE", "https://api.opal.ac/v1/" + ) + self.auth_issuer = os.getenv("OPAL_AUTH_JWT_ISSUER", "https://opal.ac/") + self.tests_debug = os.getenv("OPAL_TESTS_DEBUG", "true") + self.log_diagnose = os.getenv("LOG_DIAGNOSE", "true") + self.log_level = os.getenv("OPAL_LOG_LEVEL", "INFO") + self.log_format_include_pid = os.getenv("OPAL_LOG_FORMAT_INCLUDE_PID", "true") + self.statistics_enabled = os.getenv("OPAL_STATISTICS_ENABLED", "true") + self.debugEnabled = os.getenv("OPAL_DEBUG_ENABLED", "true") + self.auth_private_key_passphrase = os.getenv( + "OPAL_AUTH_PRIVATE_KEY_PASSPHRASE", None + ) + self.policy_repo_main_branch = os.getenv( + "OPAL_POLICY_REPO_MAIN_BRANCH", "master" + ) + self.debug_port = os.getenv( + "SERVER_DEBUG_PORT", utils.find_available_port(5678) + ) + self.webhook_secret = os.getenv("OPAL_POLICY_REPO_WEBHOOK_SECRET", "P3rm1t10") + self.webhook_params = os.getenv( + "OPAL_POLICY_REPO_WEBHOOK_PARAMS", + json.dumps( + { + "secret_header_name": "x-webhook-token", + "secret_type": "token", + "secret_parsing_regex": "(.*)", + "event_request_key": "gitEvent", + "push_event_value": "git.push", + } + ), + ) + self.all_data_url = os.getenv("OPAL_ALL_DATA_URL", None) + self.policy_repo_reuse_clone_path = os.getenv( + "OPAL_POLICY_REPO_REUSE_CLONE_PATH", "true" + ) + self.uvicorn_asgi_app = os.getenv( + "OPAL_SERVER_UVICORN_ASGI_APP", "opal_server.main:app" + ) + self.uvicorn_port = os.getenv("OPAL_SERVER_UVICORN_PORT", "7002") + + if not self.private_key or not self.public_key: + self.private_key, self.public_key = utils.generate_ssh_key_pair() diff --git a/tests/containers/settings/postgres_broadcast_settings.py b/tests/containers/settings/postgres_broadcast_settings.py new file mode 100644 index 00000000..09f3ba4d --- /dev/null +++ b/tests/containers/settings/postgres_broadcast_settings.py @@ -0,0 +1,61 @@ +import os +from testcontainers.core.utils import setup_logger + + +class PostgresBroadcastSettings: + def __init__( + self, + container_name: str | None = None, + host: str | None = None, + port: int | None = None, + user: str | None = None, + password: str | None = None, + database: str | None = None, + ): + + self.logger = setup_logger("PostgresBroadcastSettings") + + self.load_from_env() + + self.container_name = container_name if container_name else self.container_name + self.host = host if host else self.host + self.port = port if port else self.port + self.user = user if user else self.user + self.password = password if password else self.password + self.database = database if database else self.database + self.protocol = "postgres" + + self.validate_dependencies() + + def validate_dependencies(self): + """Validate required dependencies before starting the server.""" + if not self.host: + raise ValueError("POSTGRES_HOST is required.") + if not self.port: + raise ValueError("POSTGRES_PORT is required.") + if not self.user: + raise ValueError("POSTGRES_USER is required.") + if not self.password: + raise ValueError("POSTGRES_PASSWORD is required.") + if not self.database: + raise ValueError("POSTGRES_DATABASE is required.") + + self.logger.info(f"{self.container_name} | Dependencies validated successfully.") + + + def getEnvVars(self): + return { + "POSTGRES_HOST": self.host, + "POSTGRES_PORT": self.port, + "POSTGRES_USER": self.user, + "POSTGRES_PASSWORD": self.password, + "POSTGRES_DATABASE": self.database, + } + + def load_from_env(self): + self.host = os.getenv("POSTGRES_HOST", "localhost") + self.port = int(os.getenv("POSTGRES_PORT", 5432)) + self.user = os.getenv("POSTGRES_USER", "postgres") + self.password = os.getenv("POSTGRES_PASSWORD", "postgres") + self.database = os.getenv("POSTGRES_DATABASE", "postgres") + self.container_name = os.getenv("POSTGRES_CONTAINER_NAME", "broadcast_channel") diff --git a/tests/containers/zookeeper_container.py b/tests/containers/zookeeper_container.py new file mode 100644 index 00000000..aec6327e --- /dev/null +++ b/tests/containers/zookeeper_container.py @@ -0,0 +1,40 @@ +import debugpy +from testcontainers.core.container import DockerContainer +from testcontainers.core.network import Network + +import docker +from tests.containers.opal_test_container import OpalTestContainer + + +class ZookeeperContainer(OpalTestContainer, DockerContainer): + def __init__( + self, + network: Network, + docker_client_kw: dict | None = None, + **kwargs, + ) -> None: + # Add custom labels to the kwargs + labels = kwargs.get("labels", {}) + labels.update({"com.docker.compose.project": "pytest"}) + kwargs["labels"] = labels + + self.network = network + + OpalTestContainer.__init__(self) + DockerContainer.__init__( + self, + image="confluentinc/cp-zookeeper:latest", + docker_client_kw=docker_client_kw, + **kwargs, + ) + + self.with_bind_ports(2181, 2181) + self.with_env("ZOOKEEPER_CLIENT_PORT", "2181") + self.with_env("ZOOKEEPER_TICK_TIME", "2000") + self.with_env("ALLOW_ANONYMOUS_LOGIN", "yes") + + self.with_network(self.network) + + self.with_network_aliases("zookeper") + # Add a custom name for the container + self.with_name(f"zookeeper") diff --git a/tests/docker/Dockerfile.cedar b/tests/docker/Dockerfile.cedar new file mode 100644 index 00000000..5715a820 --- /dev/null +++ b/tests/docker/Dockerfile.cedar @@ -0,0 +1,32 @@ +# CEDAR AGENT BUILD STAGE --------------------------- +# This stage compiles the Cedar agent +# --------------------------------------------------- + FROM rust:1.79 AS cedar-builder + + # Copy Cedar agent source code + COPY ./cedar-agent /tmp/cedar-agent + WORKDIR /tmp/cedar-agent + + # Build the Cedar agent in release mode + RUN CARGO_REGISTRIES_CRATES_IO_PROTOCOL=sparse cargo build --release + + # CEDAR AGENT IMAGE --------------------------------- + # The final image with the Cedar agent executable + # --------------------------------------------------- + FROM alpine:latest AS cedar-agent + + # Create a non-root user for running the agent + RUN adduser -D cedar && mkdir -p /cedar && chown cedar:cedar /cedar + USER cedar + + # Copy the Cedar agent binary from the build stage + COPY --from=cedar-builder /tmp/cedar-agent/target/*/cedar-agent /cedar/cedar-agent + + # Expose Cedar agent port + EXPOSE 8180 + + # Set default working directory + WORKDIR /cedar + + # Set the default command + CMD ["/cedar/cedar-agent"] diff --git a/tests/docker/Dockerfile.client b/tests/docker/Dockerfile.client new file mode 100644 index 00000000..810f3c1c --- /dev/null +++ b/tests/docker/Dockerfile.client @@ -0,0 +1,17 @@ +FROM permitio/opal-client:latest + +# Install debugpy +RUN pip install debugpy + +# Set up Gunicorn to include debugpy (or switch to Uvicorn for debugging) +USER root + +WORKDIR /opal + +COPY start_debug.sh . +RUN chmod +x start_debug.sh +RUN ln -s /opal/start_debug.sh /start_debug.sh + +USER opal + +CMD ["./start_debug.sh"] diff --git a/tests/docker/Dockerfile.client.local b/tests/docker/Dockerfile.client.local new file mode 100644 index 00000000..093c90fb --- /dev/null +++ b/tests/docker/Dockerfile.client.local @@ -0,0 +1,76 @@ +# Dockerfile.client + +# BUILD IMAGE +FROM python:3.10-bookworm AS build-stage +# from now on, work in the /app directory +WORKDIR /app/ +# Layer dependency install (for caching) +COPY packages/requires.txt ./base_requires.txt +COPY packages/opal-common/requires.txt ./common_requires.txt +COPY packages/opal-client/requires.txt ./client_requires.txt +COPY packages/opal-server/requires.txt ./server_requires.txt + +RUN apt-get update && apt-get install -y gcc python3-dev procps sudo && apt-get clean + +# install python deps +RUN pip install --no-cache-dir --upgrade pip && pip install --no-cache-dir -r ./base_requires.txt -r ./common_requires.txt -r ./client_requires.txt -r ./server_requires.txt + +# Install debugpy +RUN pip install debugpy + +# COMMON IMAGE +FROM python:3.10-slim-bookworm AS common + +# copy libraries from build stage (This won't copy redundant libraries we used in build-stage) +# also remove the default python site-packages that has older versions of packages that won't be overridden +RUN rm -r /usr/local/lib/python3.10/site-packages +COPY --from=build-stage /usr/local /usr/local + +# Add non-root user (with home dir at /opal) +RUN useradd -m -b / -s /bin/bash opal +WORKDIR /opal + +# copy wait-for script (create link at old path to maintain backward compatibility) +COPY scripts/wait-for.sh . +RUN chmod +x ./wait-for.sh +RUN ln -s /opal/wait-for.sh /usr/wait-for.sh + +# netcat (nc) is used by the wait-for.sh script +RUN apt-get update && apt-get install -y netcat-traditional jq && apt-get clean +# Install sudo for Debian/Ubuntu-based images +RUN apt-get update && apt-get install -y sudo && apt-get clean + +# copy startup script (create link at old path to maintain backward compatibility) +COPY scripts/start.sh . +RUN chmod +x ./start.sh +RUN ln -s /opal/start.sh /start.sh +# copy gunicorn_config +COPY scripts/gunicorn_conf.py . +# copy app code + +COPY README.md . +COPY packages ./packages/ +# install the opal-common package +RUN cd ./packages/opal-common && python setup.py install +# Make sure scripts in .local are usable: +ENV PATH=/opal:/root/.local/bin:$PATH + +#add on top of the regular start script the debug one +COPY ./tests/start_debug.sh . +RUN chmod +x start_debug.sh +RUN ln -s /opal/start_debug.sh /start_debug.sh + +# run gunicorn +CMD ["./start_debug.sh"] + +# STANDALONE IMAGE ---------------------------------- +# --------------------------------------------------- +FROM common AS client-standalone + +# install the opal-client package +RUN cd ./packages/opal-client && python setup.py install + +USER opal + +RUN mkdir -p /opal/backup +VOLUME /opal/backup diff --git a/tests/docker/Dockerfile.client_cedar.local b/tests/docker/Dockerfile.client_cedar.local new file mode 100644 index 00000000..2fcab94f --- /dev/null +++ b/tests/docker/Dockerfile.client_cedar.local @@ -0,0 +1,111 @@ +# Dockerfile.client + +# BUILD IMAGE +FROM python:3.10-bookworm AS build-stage +# from now on, work in the /app directory +WORKDIR /app/ +# Layer dependency install (for caching) +COPY packages/requires.txt ./base_requires.txt +COPY packages/opal-common/requires.txt ./common_requires.txt +COPY packages/opal-client/requires.txt ./client_requires.txt +COPY packages/opal-server/requires.txt ./server_requires.txt + +RUN apt-get update && apt-get install -y gcc python3-dev procps sudo && apt-get clean + +# install python deps +RUN pip install --no-cache-dir --upgrade pip && pip install --no-cache-dir -r ./base_requires.txt -r ./common_requires.txt -r ./client_requires.txt -r ./server_requires.txt + +# Install debugpy +RUN pip install debugpy + +FROM rust:1.79 AS cedar-builder + +# Copy Cedar agent source code +COPY ./cedar-agent /tmp/cedar-agent +WORKDIR /tmp/cedar-agent + +# Build the Cedar agent in release mode +RUN CARGO_REGISTRIES_CRATES_IO_PROTOCOL=sparse cargo build --release + +# COMMON IMAGE +FROM python:3.10-slim-bookworm AS common + +# copy libraries from build stage (This won't copy redundant libraries we used in build-stage) +# also remove the default python site-packages that has older versions of packages that won't be overridden +RUN rm -r /usr/local/lib/python3.10/site-packages +COPY --from=build-stage /usr/local /usr/local + +# Add non-root user (with home dir at /opal) +RUN useradd -m -b / -s /bin/bash opal +WORKDIR /opal + +# copy wait-for script (create link at old path to maintain backward compatibility) +COPY scripts/wait-for.sh . +RUN chmod +x ./wait-for.sh +RUN ln -s /opal/wait-for.sh /usr/wait-for.sh + +# netcat (nc) is used by the wait-for.sh script +RUN apt-get update && apt-get install -y netcat-traditional jq && apt-get clean +# Install sudo for Debian/Ubuntu-based images +RUN apt-get update && apt-get install -y sudo && apt-get clean + +# copy startup script (create link at old path to maintain backward compatibility) +COPY scripts/start.sh . +RUN chmod +x ./start.sh +RUN ln -s /opal/start.sh /start.sh +# copy gunicorn_config +COPY scripts/gunicorn_conf.py . +# copy app code + +COPY README.md . +COPY packages ./packages/ +# install the opal-common package +RUN cd ./packages/opal-common && python setup.py install +# Make sure scripts in .local are usable: +ENV PATH=/opal:/root/.local/bin:$PATH + +#add on top of the regular start script the debug one +COPY ./tests/start_debug.sh . +RUN chmod +x start_debug.sh +RUN ln -s /opal/start_debug.sh /start_debug.sh + +# run gunicorn +CMD ["./start_debug.sh"] + +# STANDALONE IMAGE ---------------------------------- +# --------------------------------------------------- + FROM common AS client-standalone + + # install the opal-client package + RUN cd ./packages/opal-client && python setup.py install + + USER opal + + RUN mkdir -p /opal/backup + VOLUME /opal/backup + + # CEDAR CLIENT IMAGE -------------------------------- +# Using standalone image as base -------------------- +# --------------------------------------------------- +FROM client-standalone AS client-cedar + +# Temporarily move back to root for additional setup +USER root + +# Copy cedar from its build stage +COPY --from=cedar-builder /tmp/cedar-agent/target/*/cedar-agent /bin/cedar-agent + +ENV UVICORN_NUM_WORKERS=1 +ENV UVICORN_ASGI_APP=opal_client.main:app +ENV UVICORN_PORT=7000 + +# enable inline Cedar agent +ENV OPAL_POLICY_STORE_TYPE=CEDAR +ENV OPAL_INLINE_CEDAR_ENABLED=true +ENV OPAL_INLINE_CEDAR_EXEC_PATH=/bin/cedar-agent +ENV OPAL_INLINE_CEDAR_CONFIG='{"addr": "0.0.0.0:8180"}' +ENV OPAL_POLICY_STORE_URL=http://localhost:8180 + +# expose cedar port +EXPOSE 8180 +USER opal diff --git a/tests/docker/Dockerfile.client_opa.local b/tests/docker/Dockerfile.client_opa.local new file mode 100644 index 00000000..903f0d81 --- /dev/null +++ b/tests/docker/Dockerfile.client_opa.local @@ -0,0 +1,106 @@ +# Dockerfile.client + +# BUILD IMAGE +FROM python:3.10-bookworm AS build-stage +# from now on, work in the /app directory +WORKDIR /app/ +# Layer dependency install (for caching) +COPY packages/requires.txt ./base_requires.txt +COPY packages/opal-common/requires.txt ./common_requires.txt +COPY packages/opal-client/requires.txt ./client_requires.txt +COPY packages/opal-server/requires.txt ./server_requires.txt + +RUN apt-get update && apt-get install -y gcc python3-dev procps sudo && apt-get clean + +# install python deps +RUN pip install --no-cache-dir --upgrade pip && pip install --no-cache-dir -r ./base_requires.txt -r ./common_requires.txt -r ./client_requires.txt -r ./server_requires.txt + +# Install debugpy +RUN pip install debugpy + +# COMMON IMAGE +FROM python:3.10-slim-bookworm AS common + +# copy libraries from build stage (This won't copy redundant libraries we used in build-stage) +# also remove the default python site-packages that has older versions of packages that won't be overridden +RUN rm -r /usr/local/lib/python3.10/site-packages +COPY --from=build-stage /usr/local /usr/local + +# Add non-root user (with home dir at /opal) +RUN useradd -m -b / -s /bin/bash opal +WORKDIR /opal + +# copy wait-for script (create link at old path to maintain backward compatibility) +COPY scripts/wait-for.sh . +RUN chmod +x ./wait-for.sh +RUN ln -s /opal/wait-for.sh /usr/wait-for.sh + +# netcat (nc) is used by the wait-for.sh script +RUN apt-get update && apt-get install -y netcat-traditional jq && apt-get clean +# Install sudo for Debian/Ubuntu-based images +RUN apt-get update && apt-get install -y sudo && apt-get clean + +# copy startup script (create link at old path to maintain backward compatibility) +COPY scripts/start.sh . +RUN chmod +x ./start.sh +RUN ln -s /opal/start.sh /start.sh +# copy gunicorn_config +COPY scripts/gunicorn_conf.py . +# copy app code + +COPY README.md . +COPY packages ./packages/ +# install the opal-common package +RUN cd ./packages/opal-common && python setup.py install +# Make sure scripts in .local are usable: +ENV PATH=/opal:/root/.local/bin:$PATH + +#add on top of the regular start script the debug one +COPY ./tests/start_debug.sh . +RUN chmod +x start_debug.sh +RUN ln -s /opal/start_debug.sh /start_debug.sh + +# run gunicorn +CMD ["./start_debug.sh"] + +# STANDALONE IMAGE ---------------------------------- +# --------------------------------------------------- + FROM common AS client-standalone + + # install the opal-client package + RUN cd ./packages/opal-client && python setup.py install + + USER opal + + RUN mkdir -p /opal/backup + VOLUME /opal/backup + + # IMAGE to extract OPA from official image ---------- + # --------------------------------------------------- + FROM alpine:latest AS opa-extractor + USER root + + RUN apk update && apk add skopeo tar + WORKDIR /opal + + # copy opa from official docker image + ARG opa_image=openpolicyagent/opa + ARG opa_tag=latest-static + RUN skopeo copy "docker://${opa_image}:${opa_tag}" docker-archive:./image.tar && \ + mkdir image && tar xf image.tar -C ./image && cat image/*.tar | tar xf - -C ./image -i && \ + find image/ -name "opa*" -type f -executable -print0 | xargs -0 -I "{}" cp {} ./opa && chmod 755 ./opa && \ + rm -r image image.tar + + + # OPA CLIENT IMAGE ---------------------------------- + # Using standalone image as base -------------------- + # --------------------------------------------------- + FROM client-standalone AS client + + # Temporarily move back to root for additional setup + USER root + + # copy opa from opa-extractor + COPY --from=opa-extractor /opal/opa ./opa + + USER opal diff --git a/tests/docker/Dockerfile.opa b/tests/docker/Dockerfile.opa new file mode 100644 index 00000000..89a6f63d --- /dev/null +++ b/tests/docker/Dockerfile.opa @@ -0,0 +1,36 @@ +# OPA EXTRACTOR STAGE -------------------------------- +# This stage extracts the OPA binary from the official OPA image +# ----------------------------------------------------- + FROM alpine:latest AS opa-extractor + + # Install necessary tools for extracting the OPA binary + RUN apk update && apk add --no-cache skopeo tar + + # Define working directory + WORKDIR /opa + + # Copy OPA binary from the official OPA image + ARG OPA_IMAGE=openpolicyagent/opa + ARG OPA_TAG=latest-static + RUN skopeo copy "docker://${OPA_IMAGE}:${OPA_TAG}" docker-archive:./image.tar && \ + mkdir image && tar xf image.tar -C ./image && cat image/*.tar | tar xf - -C ./image -i && \ + find image/ -name "opa*" -type f -executable -print0 | xargs -0 -I "{}" cp {} ./opa && chmod 755 ./opa && \ + rm -r image image.tar + + # STANDALONE OPA CONTAINER ---------------------------- + # This is the final image with the extracted OPA binary + # ----------------------------------------------------- + FROM alpine:latest AS opa + + # Create a non-root user for running OPA + RUN adduser -D opa && mkdir -p /opa && chown opa:opa /opa + USER opa + + # Copy the OPA binary from the extractor stage + COPY --from=opa-extractor /opa/opa /opa/opa + + # Set the working directory + WORKDIR /opa + + # Set the default command to run the OPA server + CMD ["/opa/opa", "run", "--server", "--log-level", "info"] diff --git a/tests/docker/Dockerfile.server b/tests/docker/Dockerfile.server new file mode 100644 index 00000000..9a6dd30f --- /dev/null +++ b/tests/docker/Dockerfile.server @@ -0,0 +1,17 @@ +FROM permitio/opal-server:latest + +# Install debugpy +RUN pip install debugpy + +# Set up Gunicorn to include debugpy (or switch to Uvicorn for debugging) +USER root + +WORKDIR /opal + +COPY start_debug.sh . +RUN chmod +x start_debug.sh +RUN ln -s /opal/start_debug.sh /start_debug.sh + +USER opal + +CMD ["./start_debug.sh"] diff --git a/tests/docker/Dockerfile.server.local b/tests/docker/Dockerfile.server.local new file mode 100644 index 00000000..d4056ee2 --- /dev/null +++ b/tests/docker/Dockerfile.server.local @@ -0,0 +1,102 @@ +# Dockerfile.server + +# BUILD IMAGE +FROM python:3.10-bookworm AS build-stage +# from now on, work in the /app directory +WORKDIR /app/ +# Layer dependency install (for caching) +COPY packages/requires.txt ./base_requires.txt +COPY packages/opal-common/requires.txt ./common_requires.txt +COPY packages/opal-client/requires.txt ./client_requires.txt +COPY packages/opal-server/requires.txt ./server_requires.txt + +RUN apt-get update && apt-get install -y gcc python3-dev procps sudo && apt-get clean + +# install python deps +RUN pip install --no-cache-dir --upgrade pip && pip install --no-cache-dir -r ./base_requires.txt -r ./common_requires.txt -r ./client_requires.txt -r ./server_requires.txt + +# Install debugpy +RUN pip install debugpy + +# COMMON IMAGE +FROM python:3.10-slim-bookworm AS common + +# copy libraries from build stage (This won't copy redundant libraries we used in build-stage) +# also remove the default python site-packages that has older versions of packages that won't be overridden +RUN rm -r /usr/local/lib/python3.10/site-packages +COPY --from=build-stage /usr/local /usr/local + +# Add non-root user (with home dir at /opal) +RUN useradd -m -b / -s /bin/bash opal +WORKDIR /opal + +# copy wait-for script (create link at old path to maintain backward compatibility) +COPY scripts/wait-for.sh . +RUN chmod +x ./wait-for.sh +RUN ln -s /opal/wait-for.sh /usr/wait-for.sh + +# netcat (nc) is used by the wait-for.sh script +RUN apt-get update && apt-get install -y netcat-traditional jq && apt-get clean +# Install sudo for Debian/Ubuntu-based images +RUN apt-get update && apt-get install -y sudo && apt-get clean + +# copy startup script (create link at old path to maintain backward compatibility) +COPY scripts/start.sh . +RUN chmod +x ./start.sh +RUN ln -s /opal/start.sh /start.sh + +# copy gunicorn_config +COPY scripts/gunicorn_conf.py . + +# copy app code +COPY README.md . +COPY packages ./packages/ +# install the opal-common package +RUN cd ./packages/opal-common && python setup.py install +# Make sure scripts in .local are usable: +ENV PATH=/opal:/root/.local/bin:$PATH + +#add on top of the regular start script the debug one +COPY tests/start_debug.sh . +RUN chmod +x start_debug.sh +RUN ln -s /opal/start_debug.sh /start_debug.sh + +# run gunicorn +CMD ["./start_debug.sh"] + +# SERVER IMAGE -------------------------------------- +# --------------------------------------------------- +FROM common AS server + +RUN apt-get update && apt-get install -y openssh-client git && apt-get clean +RUN git config --global core.symlinks false # Mitigate CVE-2024-32002 + +USER opal + +# Potentially trust POLICY REPO HOST ssh signature -- +# opal trackes a remote (git) repository and fetches policy (e.g rego) from it. +# however, if the policy repo uses an ssh url scheme, authentication to said repo +# is done via ssh, and without adding the repo remote host (i.e: github.com) to +# the ssh known hosts file, ssh will issue output an interactive prompt that +# looks something like this: +# The authenticity of host 'github.com (192.30.252.131)' can't be established. +# RSA key fingerprint is 16:27:ac:a5:76:28:1d:52:13:1a:21:2d:bz:1d:66:a8. +# Are you sure you want to continue connecting (yes/no)? +# if the docker build arg `TRUST_POLICY_REPO_HOST_SSH_FINGERPRINT` is set to `true` +# (default), the host specified by `POLICY_REPO_HOST` build arg (i.e: `github.com`) +# will be added to the known ssh hosts file at build time and prevent said prompt +# from showing. +ARG TRUST_POLICY_REPO_HOST_SSH_FINGERPRINT="true" +ARG POLICY_REPO_HOST="github.com" + +RUN if [ "$TRUST_POLICY_REPO_HOST_SSH_FINGERPRINT" = "true" ] ; then \ + mkdir -p ~/.ssh && \ + chmod 0700 ~/.ssh && \ + ssh-keyscan -t rsa ${POLICY_REPO_HOST} >> ~/.ssh/known_hosts ; fi + +USER root + +# install the opal-server package +RUN cd ./packages/opal-server && python setup.py install + +USER opal diff --git a/tests/fixtures/__init__.py b/tests/fixtures/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/fixtures/broadcasters.py b/tests/fixtures/broadcasters.py new file mode 100644 index 00000000..7b0a1a5f --- /dev/null +++ b/tests/fixtures/broadcasters.py @@ -0,0 +1,111 @@ +import pytest +from testcontainers.core.network import Network +from testcontainers.core.utils import setup_logger + +from tests.containers.kafka_broadcast_container import KafkaBroadcastContainer +from tests.containers.kafka_ui_container import KafkaUIContainer +from tests.containers.postgres_broadcast_container import PostgresBroadcastContainer +from tests.containers.redis_broadcast_container import RedisBroadcastContainer +from tests.containers.redis_ui_container import RedisUIContainer +from tests.containers.settings.postgres_broadcast_settings import ( + PostgresBroadcastSettings, +) +from tests.containers.zookeeper_container import ZookeeperContainer + +logger = setup_logger(__name__) + + +@pytest.fixture(scope="session") +def postgres_broadcast_channel(opal_network: Network): + """Fixture that yields a running Postgres broadcast channel container. + + The container is started once and kept running throughout the entire + test session. It is stopped once all tests have finished running, + unless an exception is raised during teardown. + """ + try: + container = PostgresBroadcastContainer( + network=opal_network, + settings=PostgresBroadcastSettings() + ) + yield container + + try: + if container.get_wrapped_container().status == "running": + container.stop() + except Exception: + logger.error(f"Failed to stop containers: {container.settings.container_name}") + return + + except Exception as e: + logger.error( + f"Failed on container: {container.settings.container_name} with error: {e} {e.__traceback__}" + ) + return + + +@pytest.fixture(scope="session") +def kafka_broadcast_channel(opal_network: Network): + """Fixture that sets up a Kafka broadcast channel for testing purposes. + + This fixture initializes a Zookeeper container, a Kafka container, + and a Kafka UI container, connecting them to the specified network. + It yields a list of these containers, which remain running + throughout the test session. At the end of the session, it attempts + to stop each container, logging an error if any container fails to + stop. + """ + + with ZookeeperContainer(opal_network) as zookeeper_container: + with KafkaBroadcastContainer( + opal_network, zookeeper_container + ) as kafka_container: + with KafkaUIContainer(opal_network, kafka_container) as kafka_ui_container: + containers = [zookeeper_container, kafka_container, kafka_ui_container] + yield containers + + for container in containers: + try: + container.stop() + except Exception: + logger.error(f"Failed to stop container: {container}") + return + + +@pytest.fixture(scope="session") +def redis_broadcast_channel(opal_network: Network): + """Fixture that yields a running redis broadcast channel container. + + The fixture starts a redis broadcast container and a redis ui + container. The yield value is a list of the two containers. The + fixture stops the containers after the test is done. + """ + with RedisBroadcastContainer(opal_network) as redis_container: + with RedisUIContainer(opal_network, redis_container) as redis_ui_container: + containers = [redis_container, redis_ui_container] + yield containers + + for container in containers: + try: + container.stop() + except Exception: + logger.error(f"Failed to stop containers: {container}") + return + + +@pytest.fixture(scope="session") +def broadcast_channel(opal_network: Network, postgres_broadcast_channel): + """Fixture that yields a running broadcast channel container. + + The container is started once and kept running throughout the entire + test session. It is stopped once all tests have finished running, + unless an exception is raised during teardown. + """ + + yield postgres_broadcast_channel + + try: + postgres_broadcast_channel.stop() + except Exception: + logger.error(f"Failed to stop containers: {postgres_broadcast_channel}") + return diff --git a/tests/fixtures/images.py b/tests/fixtures/images.py new file mode 100644 index 00000000..fcc7964a --- /dev/null +++ b/tests/fixtures/images.py @@ -0,0 +1,91 @@ +import pytest + +import docker +from tests import utils +from tests.settings import pytest_settings, session_matrix + + +@pytest.fixture(scope="session") +def opal_server_image(session_matrix): + """Builds a Docker image for the OPAL server in debug mode. + + Yields the name of the built image. + + This fixture is used to provide a working OPAL server image for the + tests. + """ + + if pytest_settings.do_not_build_images: + yield "permitio/opal-server:latest" + return + + image_name = "opal_server_debug_local:latest" + yield from utils.build_docker_image( + "Dockerfile.server.local", image_name, session_matrix + ) + + +@pytest.fixture(scope="session") +def opa_image(session_matrix): + """Builds a Docker image containing the Open Policy Agent (OPA) binary. + + Yields the name of the built image. + + This fixture is used to provide a working OPA image for the tests. + """ + image_name = "opa" + + yield from utils.build_docker_image("Dockerfile.opa", image_name, session_matrix) + + +@pytest.fixture(scope="session") +def cedar_image(session_matrix): + """Builds a Docker image containing the Cedar binary. + + Yields the name of the built image. + + This fixture is used to provide a working Cedar image for the tests. + """ + image_name = "cedar" + + yield from utils.build_docker_image("Dockerfile.cedar", image_name, session_matrix) + + +@pytest.fixture(scope="session") +def opal_client_image(session_matrix): + """Builds a Docker image containing the OPAL client binary. + + Yields the name of the built image. + + This fixture is used to provide a working OPAL client image for the + tests. + """ + if pytest_settings.do_not_build_images: + yield "permitio/opal-client:latest" + return + + image_name = "opal_client_debug_local:latest" + + yield from utils.build_docker_image( + "Dockerfile.client.local", image_name, session_matrix + ) + + +@pytest.fixture(scope="session") +def opal_client_with_opa_image(session_matrix): + """Builds a Docker image containing the OPAL client binary. + + Yields the name of the built image. + + This fixture is used to provide a working OPAL client image for the + tests. + """ + if pytest_settings.do_not_build_images: + yield "permitio/opal-client:latest" + return + + image_name = "opal_client_with_opa_debug_local:latest" + + yield from utils.build_docker_image( + "Dockerfile.client_opa.local", image_name, session_matrix + ) diff --git a/tests/fixtures/policy_repos.py b/tests/fixtures/policy_repos.py new file mode 100644 index 00000000..d6d08dc5 --- /dev/null +++ b/tests/fixtures/policy_repos.py @@ -0,0 +1,111 @@ +import os + +import pytest +from testcontainers.core.network import Network +from testcontainers.core.utils import setup_logger + +from tests.containers.gitea_container import GiteaContainer +from tests.containers.settings.gitea_settings import GiteaSettings +from tests.policy_repos.policy_repo_base import PolicyRepoBase +from tests.policy_repos.policy_repo_factory import ( + PolicyRepoFactory, + SupportedPolicyRepo, +) +from tests.policy_repos.policy_repo_settings import PolicyRepoSettings +from tests.settings import pytest_settings + +logger = setup_logger(__name__) + + +@pytest.fixture(scope="session") +def gitea_settings(): + """Returns a GiteaSettings object with default values for the Gitea + container name, repository name, temporary directory, and data directory. + + This fixture is used to create a Gitea container for testing and to + initialize the repository settings for the policy repository. + + :return: A GiteaSettings object with default settings. + """ + return GiteaSettings( + container_name="gitea_server", + repo_name="test_repo", + temp_dir=os.path.join(os.path.dirname(__file__), "temp"), + data_dir=os.path.join(os.path.dirname(__file__), "../policies"), + ) + + +@pytest.fixture(scope="session") +def gitea_server(opal_network: Network, gitea_settings: GiteaSettings): + """Creates a Gitea container and initializes a test repository. + + The Gitea container is created with the default settings for the + container name, repository name, temporary directory, and data + directory. The container is then started and the test repository is + initialized. + + The fixture yields the GiteaContainer object, which can be used to + interact with the Gitea container. + + :param opal_network: The network to create the container on. + :param gitea_settings: The settings for the Gitea container. + :return: The GiteaContainer object. + """ + with GiteaContainer( + settings=gitea_settings, + network=opal_network, + ) as gitea_container: + gitea_container.deploy_gitea() + gitea_container.init_repo() + yield gitea_container + + +@pytest.fixture(scope="session") +def policy_repo( + gitea_settings: GiteaSettings, temp_dir: str, request +) -> PolicyRepoBase: + """Creates a policy repository for testing. + + This fixture creates a policy repository based on the configuration + specified in pytest.ini. The repository is created with the default + branch name "master" and is initialized with the policies from the + source repository specified in pytest.ini. + + The fixture yields the PolicyRepoBase object, which can be used to + interact with the policy repository. + + :param gitea_settings: The settings for the Gitea container. + :param temp_dir: The temporary directory to use for the policy + repository. + :param request: The pytest request object. + :return: The PolicyRepoBase object. + """ + if pytest_settings.policy_repo_provider == SupportedPolicyRepo.GITEA: + gitea_server = request.getfixturevalue("gitea_server") + + repo_settings = PolicyRepoSettings( + temp_dir, + pytest_settings.repo_owner, + pytest_settings.repo_name, + "master", + gitea_settings.container_name, + gitea_settings.port_http, + gitea_settings.port_ssh, + pytest_settings.repo_password, + None, + pytest_settings.ssh_key_path, + pytest_settings.source_repo_owner, + pytest_settings.source_repo_name, + True, + True, + pytest_settings.webhook_secret, + ) + policy_repo = PolicyRepoFactory( + pytest_settings.policy_repo_provider + ).get_policy_repo( + repo_settings, + logger, + ) + + policy_repo.setup(gitea_settings) + return policy_repo diff --git a/tests/fixtures/policy_stores.py b/tests/fixtures/policy_stores.py new file mode 100644 index 00000000..c07980e7 --- /dev/null +++ b/tests/fixtures/policy_stores.py @@ -0,0 +1,89 @@ +import pytest +from testcontainers.core.network import Network + +from tests.containers.cedar_container import CedarContainer +from tests.containers.opa_container import OpaContainer, OpaSettings +from tests.containers.settings.cedar_settings import CedarSettings +from tests.fixtures.images import cedar_image, opa_image + + +@pytest.fixture(scope="session") +def opa_server(opal_network: Network, opa_image): + """OPA server fixture. + + This fixture starts an OPA server and stops it after all tests have been + executed. The OPA server is started in a separate thread and is available + under the name "opa" in the test container network. + + The fixture yields the container object, which can be used to access the + container logs or to execute commands inside the container. + + The fixture is scoped to the session, meaning it is executed only once per + test session. + + Parameters + ---------- + opal_network : Network + The network to which the OPA server should be connected. + opa_image : str + The OPA server image to use. + + Yields + ------ + container : OpaContainer + The OPA server container object. + """ + with OpaContainer( + settings=OpaSettings( + container_name="opa", + image=opa_image, + ), + network=opal_network, + ) as container: + assert container.wait_for_log( + log_str="Server started", timeout=30 + ), "OPA server did not start." + yield container + + container.stop() + + +@pytest.fixture(scope="session") +def cedar_server(opal_network: Network, cedar_image): + """CEDAR server fixture. + + This fixture starts a CEDAR server and stops it after all tests have been + executed. The CEDAR server is started in a separate thread and is available + under the name "cedar" in the test container network. + + The fixture yields the container object, which can be used to access the + container logs or to execute commands inside the container. + + The fixture is scoped to the session, meaning it is executed only once per + test session. + + Parameters + ---------- + opal_network : Network + The network to which the CEDAR server should be connected. + cedar_image : str + The CEDAR server image to use. + + Yields + ------ + container : CedarContainer + The CEDAR server container object. + """ + with CedarContainer( + settings=CedarSettings( + container_name="cedar", + image=cedar_image, + ), + network=opal_network, + ) as container: + # assert container.wait_for_log( + # log_str="Server started", timeout=30 + # ), "CEDAR server did not start." + yield container + + container.stop() diff --git a/tests/genopalkeys.sh b/tests/genopalkeys.sh new file mode 100644 index 00000000..23256cdd --- /dev/null +++ b/tests/genopalkeys.sh @@ -0,0 +1,18 @@ +# This is utility script to generate OPAL keys - Use it for your needs + +# This function generates a pair of RSA keys using ssh-keygen, extracts the public key into OPAL_AUTH_PUBLIC_KEY, +# formats the private key by replacing newlines with underscores and stores it in OPAL_AUTH_PRIVATE_KEY, +# and then removes the key files. It outputs messages indicating the start and completion of key generation. + +function generate_opal_keys { + echo "- Generating OPAL keys" + + ssh-keygen -q -t rsa -b 4096 -m pem -f opal_crypto_key -N "" + OPAL_AUTH_PUBLIC_KEY="$(cat opal_crypto_key.pub)" + OPAL_AUTH_PRIVATE_KEY="$(tr '\n' '_' /dev/null 2>&1 + + if ! command -v opal-server &> /dev/null || ! command -v opal-client &> /dev/null; then + echo "Installation failed: opal-server or opal-client is not available." + exit 1 + fi + + echo "- opal-server and opal-client successfully installed." +} + +install_opal_server_and_client diff --git a/tests/policies/rbac.rego b/tests/policies/rbac.rego new file mode 100644 index 00000000..fa09dc92 --- /dev/null +++ b/tests/policies/rbac.rego @@ -0,0 +1,9 @@ +package app.rbac +default allow = false + +# Allow the action if the user is granted permission to perform the action. +allow { + # unless user location is outside US + country := data.users[input.user].location.country + country == "US" +} diff --git a/tests/policy_repos/gitea_policy_repo.py b/tests/policy_repos/gitea_policy_repo.py new file mode 100644 index 00000000..e3efa161 --- /dev/null +++ b/tests/policy_repos/gitea_policy_repo.py @@ -0,0 +1,106 @@ +import codecs +import os + +from git import GitCommandError, Repo + +from tests.containers.settings.gitea_settings import GiteaSettings +from tests.policy_repos.policy_repo_base import PolicyRepoBase +from tests.policy_repos.policy_repo_settings import PolicyRepoSettings + + +class GiteaPolicyRepo(PolicyRepoBase): + def __init__(self, settings: PolicyRepoSettings, *args): + super().__init__() + self.settings = settings + + def setup(self, settings: PolicyRepoSettings): + self.settings = settings + + def get_repo_url(self): + if self.settings is None: + raise Exception("Gitea settings not set") + + return f"http://{self.settings.container_name}:{self.settings.port_http}/{self.settings.username}/{self.settings.repo_name}.git" + + def clone_and_update( + self, + branch, + file_name, + file_content, + CLONE_DIR, + authenticated_url, + COMMIT_MESSAGE, + ): + """Clone the repository, update the specified branch, and push + changes.""" + self.prepare_directory(CLONE_DIR) # Clean up and prepare the directory + print(f"Processing branch: {branch}") + + # Clone the repository for the specified branch + print(f"Cloning branch {branch}...") + repo = Repo.clone_from(authenticated_url, CLONE_DIR, branch=branch) + + # Create or update the specified file with the provided content + file_path = os.path.join(CLONE_DIR, file_name) + with open(file_path, "w") as f: + f.write(file_content) + + # Stage the changes + print(f"Staging changes for branch {branch}...") + repo.git.add(A=True) # Add all changes + + # Commit the changes if there are modifications + if repo.is_dirty(): + print(f"Committing changes for branch {branch}...") + repo.index.commit(COMMIT_MESSAGE) + + # Push changes to the remote repository + print(f"Pushing changes for branch {branch}...") + try: + repo.git.push(authenticated_url, branch) + except GitCommandError as e: + print(f"Error pushing branch {branch}: {e}") + + def update_branch(self, branch, file_name, file_content): + temp_dir = self.settings.local_clone_path + + self.logger.info( + f"Updating branch '{branch}' with file '{file_name}' content..." + ) + + # Decode escape sequences in the file content + file_content = codecs.decode(file_content, "unicode_escape") + + GITEA_REPO_URL = f"http://localhost:{self.settings.repo_port}/{self.settings.owner}/{self.settings.repo_name}.git" + username = self.settings.owner + PASSWORD = self.settings.password + CLONE_DIR = os.path.join(temp_dir, "branch_update") + COMMIT_MESSAGE = "Automated update commit" + + # Append credentials to the repository URL + authenticated_url = GITEA_REPO_URL.replace( + "http://", f"http://{username}:{PASSWORD}@" + ) + + try: + self.clone_and_update( + branch, + file_name, + file_content, + CLONE_DIR, + authenticated_url, + COMMIT_MESSAGE, + ) + print("Operation completed successfully.") + finally: + # Ensure cleanup is performed regardless of success or failure + self.cleanup(CLONE_DIR) + + def cleanup(self): + return super().cleanup() + + def setup_webhook(self, host, port): + return super().setup_webhook(host, port) + + def create_webhook(self): + return super().create_webhook() diff --git a/tests/policy_repos/github_policy_repo.py b/tests/policy_repos/github_policy_repo.py new file mode 100644 index 00000000..5b7f44ea --- /dev/null +++ b/tests/policy_repos/github_policy_repo.py @@ -0,0 +1,411 @@ +import codecs +import logging +import os +import random +import shutil +import subprocess + +import requests +from git import GitCommandError, Repo +from github import Auth, Github +from testcontainers.core.utils import setup_logger + +from tests import utils +from tests.policy_repos.policy_repo_base import PolicyRepoBase +from tests.policy_repos.policy_repo_settings import PolicyRepoSettings + + +class GithubPolicyRepo(PolicyRepoBase): + def __init__( + self, + settings: PolicyRepoSettings, + logger: logging.Logger = setup_logger(__name__), + ): + self.logger = logger + self.load_from_env() + + self.protocol = "git" + self.host = "github.com" + self.port = 22 + self.temp_dir = settings.local_clone_path + self.ssh_key_name = "OPAL_PYTEST" + + self.owner = settings.owner if settings.owner else self.owner + self.password = settings.password + self.github_pat = settings.pat if settings.pat else self.github_pat + self.repo = settings.repo_name if settings.repo_name else self.repo + + self.source_repo_owner = ( + settings.source_repo_owner + if settings.source_repo_owner + else self.source_repo_owner + ) + self.source_repo_name = ( + settings.source_repo_name + if settings.source_repo_name + else self.source_repo_name + ) + + self.local_repo_path = os.path.join(self.temp_dir, self.source_repo_name) + self.ssh_key_path = ( + settings.ssh_key_path if settings.ssh_key_path else self.ssh_key_path + ) + self.should_fork = settings.should_fork + self.webhook_secret = ( + settings.webhook_secret if settings.webhook_secret else self.webhook_secret + ) + + if not self.password and not self.github_pat and not self.ssh_key_path: + self.logger.error("No password or Github PAT or SSH key provided.") + raise Exception("No authentication method provided.") + + self.load_ssh_key() + + def load_from_env(self): + self.owner = os.getenv("OPAL_TARGET_ACCOUNT", None) + self.github_pat = os.getenv("OPAL_GITHUB_PAT", None) + self.ssh_key_path = os.getenv( + "OPAL_PYTEST_POLICY_REPO_SSH_KEY_PATH", "~/.ssh/id_rsa" + ) + self.repo = os.getenv("OPAL_TARGET_REPO_NAME", "opal-example-policy-repo") + self.source_repo_owner = os.getenv("OPAL_SOURCE_ACCOUNT", "permitio") + self.source_repo_name = os.getenv( + "OPAL_SOURCE_REPO_NAME", "opal-example-policy-repo" + ) + self.webhook_secret: str = os.getenv("OPAL_WEBHOOK_SECRET", "xxxxx") + + def load_ssh_key(self): + if self.ssh_key_path.startswith("~"): + self.ssh_key_path = os.path.expanduser("~/.ssh/id_rsa") + + if not os.path.exists(self.ssh_key_path): + self.logger.debug(f"SSH key file not found at {self.ssh_key_path}") + + self.logger.debug("Generating new SSH key...") + ssh_keys = utils.generate_ssh_key_pair() + self.ssh_key = ssh_keys["public"] + self.private_key = ssh_keys["private"] + + try: + with open(self.ssh_key_path, "r") as ssh_key_file: + self.ssh_key = ssh_key_file.read().strip() + + os.environ["OPAL_POLICY_REPO_SSH_KEY"] = self.ssh_key + except Exception as e: + self.logger.error(f"Error loading SSH key: {e}") + + def setup_webhook(self, host, port): + self.webhook_host = host + self.webhook_port = port + + def set_envvars(self): + # Update .env file + with open(".env", "a") as env_file: + env_file.write(f'OPAL_POLICY_REPO_URL="{self.get_repo_url()}"\n') + env_file.write(f'OPAL_POLICY_REPO_BRANCH="{self.test_branch}"\n') + + with open(".env", "a") as env_file: + env_file.write(f'OPAL_POLICY_REPO_SSH_KEY="{self.ssh_key}"\n') + + def get_repo_url(self): + return self.build_repo_url(self.owner, self.repo) + + def build_repo_url(self, owner, repo) -> str: + if owner is None: + raise Exception("Owner not set") + + if self.protocol == "ssh" or self.protocol == "git": + return f"git@{self.host}:{owner}/{repo}.git" + + if self.protocol == "http" or self.protocol == "https": + if self.github_pat: + return f"{self.protocol}://{self.host}/{owner}/{repo}.git" + + if self.password is None and self.github_pat is None and self.ssh_key is None: + raise Exception("No authentication method set") + + return f"{self.protocol}://{self.owner}:{self.password}@{self.host}:{self.port}/{owner}/{repo}" + + def get_source_repo_url(self): + return self.build_repo_url(self.source_repo_owner, self.source_repo_name) + + def clone_initial_repo(self): + Repo.clone_from(self.get_source_repo_url(), self.local_repo_path) + + def check_repo_exists(self): + try: + gh = Github(auth=Auth.Token(self.github_pat)) + repo_list = gh.get_user().get_repos() + for repo in repo_list: + if repo.full_name == self.repo: + self.logger.debug(f"Repository {self.repo} already exists.") + return True + + except Exception as e: + self.logger.error(f"Error checking repository existence: {e}") + + return False + + def create_target_repo(self): + if self.check_repo_exists(): + return + + try: + gh = Github(auth=Auth.Token(self.github_pat)) + gh.get_user().create_repo(self.repo) + self.logger.info(f"Repository {self.repo} created successfully.") + except Exception as e: + self.logger.error(f"Error creating repository: {e}") + + def fork_target_repo(self): + if self.check_repo_exists(): + return + + self.logger.debug(f"Forking repository {self.source_repo_name}...") + + if self.github_pat is None: + try: + gh = Github(auth=Auth.Token(self.github_pat)) + gh.get_user().create_fork(self.source_repo_owner, self.source_repo_name) + self.logger.info( + f"Repository {self.source_repo_name} forked successfully." + ) + except Exception as e: + self.logger.error(f"Error forking repository: {e}") + return + + # Try with PAT + try: + headers = {"Authorization": f"token {self.github_pat}"} + response = requests.post( + f"https://api.github.com/repos/{self.source_repo_owner}/{self.source_repo_name}/forks", + headers=headers, + ) + if response.status_code == 202: + self.logger.info("Fork created successfully!") + else: + self.logger.error(f"Error creating fork: {response.status_code}") + self.logger.debug(response.json()) + + except Exception as e: + self.logger.error(f"Error forking repository: {str(e)}") + + def cleanup(self): + self.delete_test_branches() + + def delete_test_branches(self): + """Deletes all branches starting with 'test-' from the specified + repository.""" + + try: + self.logger.info(f"Deleting test branches from {self.repo}...") + + # Initialize Github API + gh = Github(auth=Auth.Token(self.github_pat)) + + # Get the repository + repo = gh.get_user().get_repo(self.repo) + + # Enumerate branches and delete pytest- branches + branches = repo.get_branches() + for branch in branches: + if branch.name.startswith("test-"): + ref = f"heads/{branch.name}" + repo.get_git_ref(ref).delete() + self.logger.info(f"Deleted branch: {branch.name}") + else: + self.logger.info(f"Skipping branch: {branch.name}") + + self.logger.info("All test branches have been deleted successfully.") + except Exception as e: + self.logger.error(f"An error occurred: {e}") + + return + + def generate_test_branch(self): + self.test_branch = ( + f"test-{random.randint(1000, 9999)}{random.randint(1000, 9999)}" + ) + os.environ["OPAL_POLICY_REPO_BRANCH"] = self.test_branch + + def create_test_branch(self): + try: + # Initialize the repository + repo = Repo(self.local_repo_path) + + # Ensure the repository is clean + if repo.is_dirty(untracked_files=True): + raise RuntimeError( + "The repository has uncommitted changes. Commit or stash them before proceeding." + ) + + # Set the origin remote URL + remote_url = f"https://github.com/{self.owner}/{self.repo}.git" + if "origin" in repo.remotes: + origin = repo.remote(name="origin") + origin.set_url(remote_url) # Update origin URL if it exists + else: + origin = repo.create_remote( + "origin", remote_url + ) # Create origin remote if it doesn't exist + + self.logger.debug(f"Origin set to: {remote_url}") + + # Create and checkout the new branch + new_branch = repo.create_head(self.test_branch) # Create branch + new_branch.checkout() # Switch to the new branch + + # Push the new branch to the remote + origin.push(refspec=f"{self.test_branch}:{self.test_branch}") + + self.logger.info( + f"Branch '{self.test_branch}' successfully created and pushed." + ) + except GitCommandError as e: + self.logger.error(f"Git command failed: {e}") + except Exception as e: + self.logger.error(f"An error occurred: {e}") + + def cleanup(self, delete_repo=True, delete_ssh_key=True): + subprocess.run(["rm", "-rf", "./opal-example-policy-repo"], check=True) + + self.delete_test_branches() + + if delete_repo: + self.delete_repo() + + if delete_ssh_key: + self.delete_ssh_key() + + def delete_ssh_key(self): + gh = Github(auth=Auth.Token(self.github_pat)) + user = gh.get_user() + keys = user.get_keys() + for key in keys: + if key.title == self.ssh_key_name: + key.delete() + self.logger.debug(f"SSH key deleted: {key.title}") + break + + self.logger.debug("All OPAL SSH keys have been deleted successfully.") + + return + + def delete_repo(self): + try: + gh = Github(auth=Auth.Token(self.github_pat)) + repo = gh.get_user().get_repo(self.repo) + repo.delete() + self.logger.debug(f"Repository {self.repo} deleted successfully.") + except Exception as e: + self.logger.error(f"Error deleting repository: {e}") + + def setup(self): + self.clone_initial_repo() + + if self.should_fork: + self.fork_target_repo() + else: + self.create_target_repo() + + self.generate_test_branch() + self.create_test_branch() + + def add_ssh_key(self): + gh = Github(auth=Auth.Token(self.github_pat)) + user = gh.get_user() + keys = user.get_keys() + for key in keys: + if key.title == self.ssh_key_name: + return + + key = user.create_key(self.ssh_key_name, self.ssh_key) + self.logger.info(f"SSH key added: {key.title}") + + def create_webhook(self): + try: + gh = Github(auth=Auth.Token(self.github_pat)) + self.logger.info( + f"Creating webhook for repository {self.owner}/{self.repo}" + ) + repo = gh.get_user().get_repo(f"{self.repo}") + url = utils.create_localtunnel(self.webhook_port) + self.logger.info(f"Webhook URL: {url}") + self.github_webhook = repo.create_hook( + "web", + { + "url": f"{url}/webhook", + "content_type": "json", + f"secret": "abc123", + "insecure_ssl": "1", + }, + events=["push"], + active=True, + ) + self.logger.info("Webhook created successfully.") + except Exception as e: + self.logger.error(f"Error creating webhook: {e}") + + def delete_webhook(self): + try: + gh = Github(auth=Auth.Token(self.github_pat)) + repo = gh.get_user().get_repo(f"{self.repo}") + repo.delete_hook(self.github_webhook.id) + self.logger.info("Webhook deleted successfully.") + except Exception as e: + self.logger.error(f"Error deleting webhook: {e}") + + def update_branch(self, file_name, file_content): + self.logger.info( + f"Updating branch '{self.test_branch}' with file '{file_name}' content..." + ) + + # Decode escape sequences in the file content + if file_content is not None: + file_content = codecs.decode(file_content, "unicode_escape") + + # Create or update the specified file with the provided content + file_path = os.path.join(self.local_repo_path, file_name) + with open(file_path, "w") as f: + f.write(file_content) + + if file_content is None: + with open(file_path, "r") as f: + file_content = f.read() + + try: + # Stage the changes + self.logger.debug(f"Staging changes for branch {self.test_branch}...") + gh = Github(auth=Auth.Token(self.github_pat)) + repo = gh.get_user().get_repo(self.repo) + branch_ref = f"heads/{self.test_branch}" + ref = repo.get_git_ref(branch_ref) + latest_commit = repo.get_git_commit(ref.object.sha) + base_tree = latest_commit.commit.tree + new_tree = repo.create_git_tree( + [ + { + "path": file_name, + "mode": "100644", + "type": "blob", + "content": file_content, + } + ], + base_tree, + ) + new_commit = repo.create_git_commit( + f"Commit changes for branch {self.test_branch}", + new_tree, + [latest_commit], + ) + ref.edit(new_commit.sha) + self.logger.debug(f"Changes pushed for branch {self.test_branch}.") + + except Exception as e: + self.logger.error(f"Error updating branch: {e}") + return False + + return True + + def remove_webhook(self): + self.github_webhook.delete() diff --git a/tests/policy_repos/gitlab_policy_repo.py b/tests/policy_repos/gitlab_policy_repo.py new file mode 100644 index 00000000..00dc0f7f --- /dev/null +++ b/tests/policy_repos/gitlab_policy_repo.py @@ -0,0 +1,103 @@ +import codecs + +from tests.policy_repos.policy_repo_base import PolicyRepoBase + + +class GitlabPolicyRepo(PolicyRepoBase): + def __init__(self, owner, repo, token): + self.owner = owner + self.repo = repo + self.token = token + + def clone_and_update( + self, + branch, + file_name, + file_content, + CLONE_DIR, + authenticated_url, + COMMIT_MESSAGE, + ): + """Clone the repository, update the specified branch, and push + changes.""" + self.prepare_directory(CLONE_DIR) # Clean up and prepare the directory + print(f"Processing branch: {branch}") + + # Clone the repository for the specified branch + print(f"Cloning branch {branch}...") + repo = Repo.clone_from(authenticated_url, CLONE_DIR, branch=branch) + + # Create or update the specified file with the provided content + file_path = os.path.join(CLONE_DIR, file_name) + with open(file_path, "w") as f: + f.write(file_content) + + # Stage the changes + print(f"Staging changes for branch {branch}...") + repo.git.add(A=True) # Add all changes + + # Commit the changes if there are modifications + if repo.is_dirty(): + print(f"Committing changes for branch {branch}...") + repo.index.commit(COMMIT_MESSAGE) + repo.git.push("origin", branch) + + # Clean up the cloned repository + print(f"Cleaning up branch {branch}...") + shutil.rmtree(CLONE_DIR) + + print(f"Branch {branch} processed successfully.") + + def update_branch(self, branch, file_name, file_content): + temp_dir = self.settings.temp_dir + + self.logger.debug( + f"Updating branch '{branch}' with file '{file_name}' content..." + ) + + # Decode escape sequences in the file content + file_content = codecs.decode(file_content, "unicode_escape") + + GITHUB_REPO_URL = ( + f"https://github.com/{self.settings.username}/{self.settings.repo_name}.git" + ) + username = self.settings.username + PASSWORD = self.settings.password + CLONE_DIR = os.path.join(temp_dir, "branch_update") + COMMIT_MESSAGE = "Automated update commit" + + # Append credentials to the repository URL + authenticated_url = GITHUB_REPO_URL.replace( + "https://", f"https://{username}:{PASSWORD}@" + ) + + try: + self.clone_and_update( + branch, + file_name, + file_content, + CLONE_DIR, + authenticated_url, + COMMIT_MESSAGE, + ) + except Exception as e: + self.logger.error(f"Error updating branch: {e}") + return False + return True + + # implementation using git subprocess + # try: + # # Change to the policy repository directory + # os.chdir(opal_repo_path) + + # # Create a .rego file with the policy name as the package + # with open(regofile, "w") as f: + # f.write(f"package {policy_name}\n") + + # # Run Git commands to add, commit, and push the policy file + # subprocess.run(["git", "add", regofile], check=True) + # subprocess.run(["git", "commit", "-m", f"Add {regofile}"], check=True) + # subprocess.run(["git", "push"], check=True) + # finally: + # # Change back to the previous directory + # os.chdir("..") diff --git a/tests/policy_repos/policy_repo_base.py b/tests/policy_repos/policy_repo_base.py new file mode 100644 index 00000000..65701caf --- /dev/null +++ b/tests/policy_repos/policy_repo_base.py @@ -0,0 +1,27 @@ +from abc import ABC, abstractmethod + + +class PolicyRepoBase(ABC): + @abstractmethod + def get_repo_url(self) -> str: + pass + + @abstractmethod + def setup_webhook(self, host, port): + pass + + @abstractmethod + def setup(self) -> None: + pass + + @abstractmethod + def cleanup(self) -> None: + pass + + @abstractmethod + def update_branch(self, file_name, file_content) -> None: + pass + + @abstractmethod + def create_webhook(self): + pass diff --git a/tests/policy_repos/policy_repo_factory.py b/tests/policy_repos/policy_repo_factory.py new file mode 100644 index 00000000..e3fe2aa5 --- /dev/null +++ b/tests/policy_repos/policy_repo_factory.py @@ -0,0 +1,51 @@ +import logging +import os +from enum import Enum + +from testcontainers.core.utils import setup_logger + +from tests.policy_repos.gitea_policy_repo import GiteaPolicyRepo +from tests.policy_repos.github_policy_repo import GithubPolicyRepo +from tests.policy_repos.gitlab_policy_repo import GitlabPolicyRepo +from tests.policy_repos.policy_repo_base import PolicyRepoBase +from tests.policy_repos.policy_repo_settings import PolicyRepoSettings + + +class SupportedPolicyRepo(Enum): + GITEA = "Gitea" + GITHUB = "Github" + GITLAB = "Gitlab" + # BITBUCKET = "Bitbucket" + # AZURE_DEVOPS = "AzureDevOps" + + +# Factory class to create a policy repository object based on the type of policy repository. +class PolicyRepoFactory: + def __init__(self, policy_repo: str = SupportedPolicyRepo.GITEA): + """ + :param policy_repo: The type of policy repository. Defaults to GITEA. + """ + self.assert_exists(policy_repo) + + self.policy_repo = policy_repo + + def get_policy_repo( + self, + settings: PolicyRepoSettings, + logger: logging.Logger = setup_logger(__name__), + ) -> PolicyRepoBase: + factory = { + SupportedPolicyRepo.GITEA: GiteaPolicyRepo, + SupportedPolicyRepo.GITHUB: GithubPolicyRepo, + SupportedPolicyRepo.GITLAB: GitlabPolicyRepo, + } + + return factory[SupportedPolicyRepo(self.policy_repo)](settings) + + def assert_exists(self, policy_repo: str) -> bool: + try: + source_enum = SupportedPolicyRepo(policy_repo) + except ValueError: + raise ValueError( + f"Unsupported REPO_SOURCE value: {policy_repo}. Must be one of {[e.value for e in SupportedPolicyRepo]}" + ) diff --git a/tests/policy_repos/policy_repo_settings.py b/tests/policy_repos/policy_repo_settings.py new file mode 100644 index 00000000..975c0edb --- /dev/null +++ b/tests/policy_repos/policy_repo_settings.py @@ -0,0 +1,36 @@ +class PolicyRepoSettings: + def __init__( + self, + local_clone_path: str | None = None, + owner: str | None = None, + repo_name: str | None = None, + branch_name: str | None = None, + repo_host: str | None = None, + repo_port_http: int | None = None, + repo_port_ssh: int | None = None, + password: str | None = None, + pat: str | None = None, + ssh_key_path: str | None = None, + source_repo_owner: str | None = None, + source_repo_name: str | None = None, + should_fork: bool = False, + should_create_repo: bool = False, # if True, will create the repo, if the should_fork is False. + # If should_fork is True, it will fork and not create the repo from scratch. + # if False, the an existing repository is expected + webhook_secret: str | None = None, + ): + self.local_clone_path = local_clone_path + self.owner = owner + self.repo_name = repo_name + self.branch_name = branch_name + self.repo_host = repo_host + self.repo_port_http = repo_port_http + self.repo_port_ssh = repo_port_ssh + self.password = password + self.pat = pat + self.ssh_key_path = ssh_key_path + self.source_repo_owner = source_repo_owner + self.source_repo_name = source_repo_name + self.should_fork = should_fork + self.should_create_repo = should_create_repo + self.webhook_secret = webhook_secret diff --git a/tests/pytest.ini b/tests/pytest.ini new file mode 100644 index 00000000..87ffbfda --- /dev/null +++ b/tests/pytest.ini @@ -0,0 +1,8 @@ +[pytest] +asyncio_default_fixture_loop_scope = function +log_cli = true +log_level = INFO +log_cli_level = INFO +log_file = pytest_logs.log +log_file_level = DEBUG +pythonpath = fixtures diff --git a/tests/requirements.txt b/tests/requirements.txt new file mode 100644 index 00000000..a1a6b5ae --- /dev/null +++ b/tests/requirements.txt @@ -0,0 +1,4 @@ +PyGithub +debugpy +pytest +testcontainers diff --git a/tests/run.sh b/tests/run.sh new file mode 100755 index 00000000..38922a91 --- /dev/null +++ b/tests/run.sh @@ -0,0 +1,25 @@ +#!/bin/bash +set -e + +if [[ -f ".env" ]]; then + # shellcheck disable=SC1091 + source .env +fi + +function main { + + echo "Running tests..." + + # Check if a specific test is provided + if [[ -n "$1" ]]; then + echo "Running specific test: $1" + python -Xfrozen_modules=off -m debugpy --listen 5678 -m pytest -s "$@" + else + echo "Running all tests..." + python -Xfrozen_modules=off -m debugpy --listen 5678 -m pytest -s + fi + + echo "Done!" +} + +main "$@" diff --git a/tests/settings.py b/tests/settings.py new file mode 100644 index 00000000..4122ba52 --- /dev/null +++ b/tests/settings.py @@ -0,0 +1,171 @@ +import io +import json +import os +from contextlib import redirect_stdout +from secrets import token_hex +from typing import List + +import pytest +from dotenv import load_dotenv +from opal_common.cli.commands import obtain_token +from opal_common.schemas.security import PeerType +from testcontainers.core.generic import DockerContainer +from testcontainers.core.waiting_utils import wait_for_logs + +from tests.policy_repos.policy_repo_factory import SupportedPolicyRepo + + +class TestSettings: + def __init__(self): + """Initialize settings for the test session. + + This method creates a new session ID, then loads settings from environment + variables. The session ID is a 2-character hexadecimal string, and is used to + identify the test session for logging and debugging purposes. + + The settings loaded from environment variables are as follows: + + - OPAL_PYTEST_POLICY_REPO_PROVIDER: The policy repository provider to use + for the test session. Valid values are 'GITEA' and 'GITHUB'. If not set, + defaults to 'GITEA'. + """ + self.session_id = token_hex(2) + + self.load_from_env() + + def load_from_env(self): + """Loads environment variables into the test settings. + + This function loads the environment variables using the `load_dotenv` function + and assigns them to various attributes of the settings object. The environment + variables control various aspects of the test session, such as the policy + repository provider, repository details, authentication credentials, and + configuration options for the test environment. + + Attributes set by this function: + - policy_repo_provider: The provider for the policy repository. Defaults to GITEA. + - repo_owner: The owner of the policy repository. Defaults to "iwphonedo". + - repo_name: The name of the policy repository. Defaults to "opal-example-policy-repo". + - repo_password: The password for accessing the policy repository. + - github_pat: The GitHub personal access token for accessing the repository. + - ssh_key_path: The path to the SSH key used for repository access. + - source_repo_owner: The owner of the source repository. Defaults to "permitio". + - source_repo_name: The name of the source repository. Defaults to "opal-example-policy-repo". + - webhook_secret: The secret used for authenticating webhooks. Defaults to "xxxxx". + - should_fork: Whether to fork the repository. Defaults to "true". + - use_webhook: Whether to use webhooks for triggering updates. Defaults to "true". + - wait_for_debugger: Whether to wait for a debugger. Defaults to "false". + - skip_rebuild_images: Whether to skip rebuilding Docker images. Defaults to "false". + - keep_images: Whether to keep Docker images after tests. Defaults to "true". + """ + + load_dotenv() + + self.policy_repo_provider = os.getenv( + "OPAL_PYTEST_POLICY_REPO_PROVIDER", SupportedPolicyRepo.GITEA + ) + self.repo_owner = os.getenv("OPAL_PYTEST_REPO_OWNER", "iwphonedo") + self.repo_name = os.getenv("OPAL_PYTEST_REPO_NAME", "opal-example-policy-repo") + self.repo_password = os.getenv("OPAL_PYTEST_REPO_PASSWORD") + self.github_pat = os.getenv("OPAL_PYTEST_GITHUB_PAT") + self.ssh_key_path = os.getenv("OPAL_PYTEST_SSH_KEY_PATH") + self.source_repo_owner = os.getenv("OPAL_PYTEST_SOURCE_ACCOUNT", "permitio") + self.source_repo_name = os.getenv( + "OPAL_PYTEST_SOURCE_REPO", "opal-example-policy-repo" + ) + self.webhook_secret = os.getenv("OPAL_PYTEST_WEBHOOK_SECRET", "xxxxx") + self.should_fork = os.getenv("OPAL_PYTEST_SHOULD_FORK", "true") + self.use_webhook = os.getenv("OPAL_PYTEST_USE_WEBHOOK", "true") + self.wait_for_debugger = os.getenv("OPAL_PYTEST_WAIT_FOR_DEBUGGER", False) + + # This will fallback to the official permitio images of opal-server and opal-client, you could use it to fallback also opa and cedar + self.do_not_build_images = os.getenv("OPAL_PYTEST_DO_NOT_BUILD_IMAGES", False) + + # This will use the same image between test sessions. Otherwise, it will rebuild the images with every execution. + # Don't use it if you changed the code, as your changes won't be deployed. + # In order to use this flag, you should first set the keep_images flag to true, and for the following execution you will have the images. + self.skip_rebuild_images = os.getenv("OPAL_PYTEST_SKIP_REBUILD_IMAGES", False) + + # This will keep the images after the test session. If you use it, you will be able to use skip_rebuild_images the next time. + self.keep_images = os.getenv("OPAL_PYTEST_KEEP_IMAGES", True) + + def dump_settings(self): + with open(f"pytest_{self.session_id}.env", "w") as envfile: + envfile.write("#!/usr/bin/env bash\n\n") + for key, val in globals().items(): + if key.startswith("OPAL") or key.startswith("UVICORN"): + envfile.write(f"export {key}='{val}'\n\n") + + +pytest_settings = TestSettings() +from testcontainers.core.utils import setup_logger + + +class PyTestSessionSettings(List): + repo_providers = ["gitea"] + modes = ["without_webhook"] + broadcasters = ["postgres"] + broadcaster = "fgsfdg" + repo_provider = "fdgdfg" + mode = "rgrtre" + + def __init__( + self, + session_id: str = None, + repo_provider: str = None, + broadcaster: str = None, + mode: str = None, + ): + super().__init__() + + self.session_id = session_id + self.repo_provider = repo_provider + self.broadcaster = broadcaster + self.mode = mode + + self.current_broadcaster = 0 + self.current_repo_provider = 0 + self.current_mode = 0 + + def __iter__(self): + return self + + def __next__(self): + if self.current_broadcaster >= len(self.broadcasters): + raise StopIteration + + while self.current_broadcaster < len(self.broadcasters): + is_first = ( + (self.current_broadcaster <= 0) + and (self.current_repo_provider <= 0) + and (self.current_mode <= 0) + ) + + # Update settings + self.broadcaster = self.broadcasters[self.current_broadcaster] + self.repo_provider = self.repo_providers[self.current_repo_provider] + self.mode = self.modes[self.current_mode] + # Move to the next combination + self.current_mode += 1 + if self.current_mode >= len(self.modes): + self.current_mode = 0 + self.current_repo_provider += 1 + if self.current_repo_provider >= len(self.repo_providers): + self.current_repo_provider = 0 + self.current_broadcaster += 1 + + return { + "session_id": self.session_id, + "repo_provider": self.repo_provider, + "broadcaster": self.broadcaster, + "mode": self.mode, + "is_final": (self.current_broadcaster >= len(self.broadcasters)), + "is_first": is_first, + } + + print("Finished iterating over PyTestSessionSettings...") + + +@pytest.fixture(params=list(PyTestSessionSettings()), scope="session") +def session_matrix(request): + return request.param diff --git a/tests/start_debug.sh b/tests/start_debug.sh new file mode 100644 index 00000000..b2ef3f26 --- /dev/null +++ b/tests/start_debug.sh @@ -0,0 +1,29 @@ +#!/bin/bash + +echo "Starting Opal Server or Client with Debugpy in Debug Mode..." + +# Set default values for variables if not already set +export GUNICORN_CONF=${GUNICORN_CONF:-./gunicorn_conf.py} +export UVICORN_PORT=${UVICORN_PORT:-8000} +export UVICORN_NUM_WORKERS=${UVICORN_NUM_WORKERS:-1} +export GUNICORN_TIMEOUT=${GUNICORN_TIMEOUT:-30} +export GUNICORN_KEEP_ALIVE_TIMEOUT=${GUNICORN_KEEP_ALIVE_TIMEOUT:-5} +#export UVICORN_ASGI_APP=${UVICORN_ASGI_APP:-opal_server.main:app} + +# Check for OPAL_BROADCAST_URI when multiple workers are enabled +if [[ -z "${OPAL_BROADCAST_URI}" && "${UVICORN_NUM_WORKERS}" != "1" ]]; then + echo "OPAL_BROADCAST_URI must be set when having multiple workers" + exit 1 +fi + +# Ensure PYTHONPATH includes the directory for `opal_server` +export PYTHONPATH=/opal/packages/opal-server:$PYTHONPATH +export PYTHONPATH=/opal/packages/opal-client:$PYTHONPATH +echo "PYTHONPATH: $PYTHONPATH" + +# Start Gunicorn with Debugpy +#exec python -m debugpy --listen 0.0.0.0:5678 --wait-for-client \ +exec python -m debugpy --listen 0.0.0.0:5678 \ + -m gunicorn -b 0.0.0.0:${UVICORN_PORT} -k uvicorn.workers.UvicornWorker \ + --workers=${UVICORN_NUM_WORKERS} -c ${GUNICORN_CONF} ${UVICORN_ASGI_APP} \ + -t ${GUNICORN_TIMEOUT} --keep-alive ${GUNICORN_KEEP_ALIVE_TIMEOUT} diff --git a/tests/test_app.py b/tests/test_app.py new file mode 100644 index 00000000..acb20717 --- /dev/null +++ b/tests/test_app.py @@ -0,0 +1,311 @@ +import asyncio +import subprocess +import time +from datetime import datetime, timezone +from typing import List + +import pytest +import requests +from testcontainers.core.utils import setup_logger + +from tests import utils +from tests.containers.broadcast_container_base import BroadcastContainerBase +from tests.containers.gitea_container import GiteaContainer +from tests.containers.opal_client_container import ( + OpalClientContainer, + OpalTestContainer, +) +from tests.containers.opal_server_container import OpalServerContainer +from tests.policy_repos.policy_repo_factory import SupportedPolicyRepo +from tests.settings import PyTestSessionSettings, session_matrix + +logger = setup_logger(__name__) + +OPAL_DISTRIBUTION_TIME_SECONDS = 2 +ip_to_location_base_url = "https://api.country.is/" + + +def publish_data_user_location( + src, user, DATASOURCE_TOKEN: str, port: int, topics: str = "policy_data" +): + """Publish user location data to OPAL.""" + # Construct the command to publish data update + publish_data_user_location_command = ( + f"opal-client publish-data-update --server-url http://localhost:{port} --src-url {src} " + f"-t {topics} --dst-path /users/{user}/location {DATASOURCE_TOKEN}" + ) + + # Execute the command + result = subprocess.run(publish_data_user_location_command, shell=True) + # Check command execution result + if result.returncode != 0: + logger.error("Error: Failed to update user location!") + else: + logger.info(f"Successfully updated user location with source: {src}") + + +async def data_publish_and_test( + user, + allowed_country, + locations, + DATASOURCE_TOKEN: str, + opal_client: OpalClientContainer, + port: int, +): + """Run the user location policy tests multiple times.""" + + for location in locations: + ip = location[0] + user_country = location[1] + + publish_data_user_location( + f"{ip_to_location_base_url}{ip}", user, DATASOURCE_TOKEN, port + ) + + if allowed_country == user_country: + print( + f"{user}'s location set to: {user_country}. current_country is set to: {allowed_country} Expected outcome: ALLOWED." + ) + else: + print( + f"{user}'s location set to: {user_country}. current_country is set to: {allowed_country} Expected outcome: NOT ALLOWED." + ) + + await asyncio.sleep(1) + + assert await utils.opal_authorize( + user, + f"http://localhost:{opal_client.settings.opa_port}/v1/data/app/rbac/allow", + ) == (allowed_country == user_country) + return True + + +def update_policy( + gitea_container: GiteaContainer, + opal_server_container: OpalServerContainer, + country_value, +): + """Update the policy file dynamically.""" + + gitea_container.update_branch( + opal_server_container.settings.policy_repo_main_branch, + "rbac.rego", + ( + "package app.rbac\n" + "import rego.v1\n" + "default allow := false\n\n" + "# Allow the action if the user is granted permission to perform the action.\n" + "allow if {\n" + "\t# unless user location is outside US\n" + "\tcountry := data.users[input.user].location.country\n" + '\tcountry == "' + country_value + '"\n' + "}" + ), + ) + + utils.wait_policy_repo_polling_interval(opal_server_container) + + +def test_topiced_user_location( + opal_servers: list[OpalServerContainer], + topiced_clients: dict[str, OpalClientContainer], +): + """Test data publishing.""" + + for topic, clients in topiced_clients.items(): + # Generate the reference timestamp + reference_timestamp = datetime.now(timezone.utc) + logger.info(f"Reference timestamp: {reference_timestamp}") + + # Publish data to the OPAL server + publish_data_user_location( + f"{ip_to_location_base_url}8.8.8.8", + "bob", + opal_servers[0].obtain_OPAL_tokens("test_user_location")["datasource"], + opal_servers[0].settings.port, + topic, + ) + + logger.info(f"Published user location for 'bob'. | topic: {topic}") + + for client in clients: + log_found = client.wait_for_log( + "PUT /v1/data/users/bob/location -> 204", 30, reference_timestamp + ) + logger.info("Finished processing logs.") + assert ( + log_found + ), "Expected log entry not found after the reference timestamp." + + +def test_user_location( + opal_servers: list[OpalServerContainer], + connected_clients: list[OpalClientContainer], + session_matrix: PyTestSessionSettings, +): + """Test data publishing.""" + + # Generate the reference timestamp + reference_timestamp = datetime.now(timezone.utc) + logger.info(f"Reference timestamp: {reference_timestamp}") + + # Publish data to the OPAL server + logger.info(ip_to_location_base_url) + publish_data_user_location( + f"{ip_to_location_base_url}8.8.8.8", + "bob", + opal_servers[0].obtain_OPAL_tokens("test_user_locaation")["datasource"], + opal_servers[0].settings.port, + ) + logger.info("Published user location for 'bob'.") + + for client in connected_clients: + log_found = client.wait_for_log( + "PUT /v1/data/users/bob/location -> 204", 30, reference_timestamp + ) + logger.info("Finished processing logs.") + assert log_found, "Expected log entry not found after the reference timestamp." + + +# @pytest.mark.parametrize("location", ["CN", "US", "SE"]) +@pytest.mark.asyncio +async def test_policy_and_data_updates( + gitea_server: GiteaContainer, + opal_servers: list[OpalServerContainer], + opal_clients: list[OpalClientContainer], + temp_dir, +): + """This script updates policy configurations and tests access based on + specified settings and locations. + + It integrates with Gitea and OPA for policy management and testing. + """ + + # Parse locations into separate lists of IPs and countries + locations = [("8.8.8.8", "US"), ("77.53.31.138", "SE")] + for server in opal_servers: + DATASOURCE_TOKEN = server.obtain_OPAL_tokens("test_policy_and_data_updates")[ + "datasource" + ] + + for location in locations: + # Update policy to allow only non-US users + print(f"Updating policy to allow only users from {location[1]}...") + update_policy(gitea_server, server, location[1]) + + for client in opal_clients: + assert await data_publish_and_test( + "bob", + location[1], + locations, + DATASOURCE_TOKEN, + client, + server.settings.port, + ) + + +@pytest.mark.parametrize("attempts", [10]) # Number of attempts to repeat the check +def test_read_statistics( + attempts, + opal_servers: list[OpalServerContainer], + number_of_opal_servers: int, + number_of_opal_clients: int, +): + """Tests the statistics feature by verifying the number of clients and + servers.""" + + print("- Testing statistics feature") + + time.sleep(15) + + for server in opal_servers: + print(f"OPAL Server: {server.settings.container_name}:7002") + + # The URL for statistics + stats_url = f"http://localhost:{server.settings.port}/stats" + + headers = { + "Authorization": f"Bearer {server.obtain_OPAL_tokens('test_read_statistics')['datasource']}" + } + + # Repeat the request multiple times + for attempt in range(attempts): + print(f"Attempt {attempt + 1}/{attempts} - Checking statistics...") + + try: + time.sleep(1) + # Send a request to the statistics endpoint + response = requests.get(stats_url, headers=headers) + response.raise_for_status() # Raise an error for HTTP status codes 4xx/5xx + + print(f"Response: {response.status_code} {response.text}") + + # Look for the expected data in the response + stats = utils.get_client_and_server_count(response.text) + if stats is None: + pytest.fail( + f"Expected statistics not found in response: {response.text}" + ) + + client_count = stats["client_count"] + server_count = stats["server_count"] + print( + f"Number of OPAL servers expected: {number_of_opal_servers}, found: {server_count}" + ) + print( + f"Number of OPAL clients expected: {number_of_opal_clients}, found: {client_count}" + ) + + if server_count < number_of_opal_servers: + pytest.fail( + f"Expected number of servers not found in response: {response.text}" + ) + + if client_count < number_of_opal_clients: + pytest.fail( + f"Expected number of clients not found in response: {response.text}" + ) + + except requests.RequestException as e: + if response is not None: + print(f"Request failed: {response.status_code} {response.text}") + pytest.fail(f"Failed to fetch statistics: {e}") + + print("Statistics check passed in all attempts.") + + +#@pytest.mark.asyncio +def test_policy_update( + gitea_server: GiteaContainer, + opal_servers: list[OpalServerContainer], + opal_clients: list[OpalClientContainer], + temp_dir, +): + # Parse locations into separate lists of IPs and countries + location = "CN" + + # Generate the reference timestamp + reference_timestamp = datetime.now(timezone.utc) + logger.info(f"Reference timestamp: {reference_timestamp}") + + for server in opal_servers: + # Update policy to allow only non-US users + print(f"Updating policy to allow only users from {location}...") + update_policy(gitea_server, server, "location") + + log_found = server.wait_for_log( + "Found new commits: old HEAD was", 30, reference_timestamp + ) + logger.info("Finished processing logs.") + assert ( + log_found + ), f"Expected log entry not found in server '{server.settings.container_name}' after the reference timestamp." + + for client in opal_clients: + log_found = client.wait_for_log( + "Fetching policy bundle from", 30, reference_timestamp + ) + logger.info("Finished processing logs.") + assert ( + log_found + ), f"Expected log entry not found in client '{client.settings.container_name}' after the reference timestamp." diff --git a/tests/test_opal_server_config.py b/tests/test_opal_server_config.py new file mode 100644 index 00000000..7353b652 --- /dev/null +++ b/tests/test_opal_server_config.py @@ -0,0 +1,4 @@ +# Test each config value from /packages.opal-server/opal_server/config.py OpalServerConfig +print( + "Test each config value from /packages.opal-server/opal_server/config.py OpalServerConfig" +) diff --git a/tests/utils.py b/tests/utils.py new file mode 100644 index 00000000..2cab0962 --- /dev/null +++ b/tests/utils.py @@ -0,0 +1,511 @@ +import asyncio +import json +import os +import platform +import re +import subprocess +import sys +import time + +import aiohttp +import requests +from cryptography.hazmat.primitives import serialization +from cryptography.hazmat.primitives.asymmetric import rsa +from git import Repo +from testcontainers.core.utils import setup_logger + +import docker +from tests.containers.opal_server_container import OpalServerContainer +from tests.settings import pytest_settings + +logger = setup_logger(__name__) + + +def compose(filename="docker-compose-app-tests.yml", *args): + """Helper function to run docker compose commands with the given arguments. + + Assumes `docker-compose-app-tests.yml` is the compose file and `.env` is the environment file. + """ + command = [ + "docker", + "compose", + "-f", + filename, + "--env-file", + ".env", + ] + list(args) + result = subprocess.run( + command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True + ) + if result.returncode != 0: + raise RuntimeError(f"Compose command failed: {result.stderr.strip()}") + return result.stdout + + +def build_docker_image(docker_file: str, image_name: str, session_matrix: dict): + """Build the Docker image from the Dockerfile.server.local file in the + tests/docker directory.""" + + docker_client = docker.from_env() + + print(f"Building Docker image '{image_name}'...") + + image = None + if (not session_matrix["is_first"]) or (pytest_settings.skip_rebuild_images): + exists = any(image_name in image.tags for image in docker_client.images.list()) + if exists: + image = docker_client.images.get(image_name) + + if not image: + if "tests" in os.path.abspath(__file__): + logger.info(f"Right now the file is {os.path.abspath(__file__)}") + context_path = os.path.abspath( + os.path.join(os.path.dirname(__file__), "..", "..", "opal") + ) + else: + context_path = ".." + dockerfile_path = os.path.join(os.path.dirname(__file__), "docker", docker_file) + logger.info(f"Context path: {context_path}, Dockerfile path: {dockerfile_path}") + + # Ensure the Dockerfile exists + if not os.path.exists(dockerfile_path): + raise FileNotFoundError(f"Dockerfile not found at {dockerfile_path}") + + logger.debug(f"Building Docker image from {dockerfile_path}...") + + try: + # Build the Docker image + image, logs = docker_client.images.build( + path=context_path, + dockerfile=dockerfile_path, + tag=image_name, + rm=True, + ) + # Print build logs + for log in logs: + logger.debug(log.get("stream", "").strip()) + except Exception as e: + raise RuntimeError(f"Failed to build Docker image: {e}") + + logger.debug(f"Docker image '{image_name}' built successfully.") + + yield image_name + + if session_matrix["is_final"]: + # Optionally, clean up the image after the test session + try: + if pytest_settings.keep_images: + return + + image.remove(force=True) + print(f"Docker image '{image.id}' removed.") + except Exception as cleanup_error: + print( + f"Failed to remove Docker image '{image_name}'{image.id}: {cleanup_error}" + ) + + +def remove_pytest_opal_networks(): + """Remove all Docker networks with names starting with 'pytest_opal_'.""" + try: + client = docker.from_env() + networks = client.networks.list() + + for network in networks: + if network.name.startswith("pytest_opal_"): + try: + logger.debug(f"Removing network: {network.name}") + network.remove() + except Exception as e: + logger.debug(f"Failed to remove network {network.name}: {e}") + logger.debug("Cleanup complete!") + except Exception as e: + logger.debug(f"Error while accessing Docker: {e}") + + +def generate_ssh_key_pair(): + # Generate a private key + private_key = rsa.generate_private_key( + public_exponent=65537, # Standard public exponent + key_size=2048, # Key size in bits + ) + + # Serialize the private key in PEM format + private_key_pem = private_key.private_bytes( + encoding=serialization.Encoding.PEM, + format=serialization.PrivateFormat.TraditionalOpenSSL, + encryption_algorithm=serialization.NoEncryption(), # No passphrase + ) + + # Generate the corresponding public key + public_key = private_key.public_key() + + # Serialize the public key in OpenSSH format + public_key_openssh = public_key.public_bytes( + encoding=serialization.Encoding.OpenSSH, + format=serialization.PublicFormat.OpenSSH, + ) + + # Return the keys as strings + return private_key_pem.decode("utf-8"), public_key_openssh.decode("utf-8") + + +async def opal_authorize(user: str, policy_url: str): + """Test if the user is authorized based on the current policy.""" + + # HTTP headers and request payload + headers = {"Content-Type": "application/json"} + data = { + "input": {"user": user, "action": "read", "object": "id123", "type": "finance"} + } + + # Send POST request to OPA + response = requests.post(policy_url, headers=headers, json=data) + + allowed = False + # Parse the JSON response + response_json = response.json() + assert "result" in response_json, response_json + allowed = response.json()["result"] + logger.debug( + f"Authorization test result: {user} is {'ALLOWED' if allowed else 'NOT ALLOWED'}." + ) + + return allowed + + +def wait_policy_repo_polling_interval(opal_server_container: OpalServerContainer): + # Allow time for the update to propagate + propagation_time = 5 # seconds + for i in range( + int(opal_server_container.settings.polling_interval) + propagation_time, 0, -1 + ): + logger.debug( + f"waiting for OPAL server to pull the new policy {i} secondes left", + end="\r", + ) + time.sleep(1) + + +def is_port_available(port): + # Determine the platform (Linux or macOS) + system_platform = platform.system().lower() + + # Run the appropriate netstat command based on the platform + if system_platform == "darwin": # macOS + result = subprocess.run( + ["netstat", "-an"], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + text=True, + ) + # macOS 'netstat' shows *. format for listening ports + if f".{port} " in result.stdout: + return False # Port is in use + else: # Linux + result = subprocess.run( + ["netstat", "-an"], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + text=True, + ) + # Linux 'netstat' shows 0.0.0.0: or ::: format for listening ports + if f":{port} " in result.stdout or f"::{port} " in result.stdout: + return False # Port is in use + + return True # Port is available + + +def find_available_port(starting_port=5001): + port = starting_port + while True: + if is_port_available(port): + return port + port += 1 + + +def publish_data_update( + server_url: str, + server_route: str, + token: str, + src_url: str = None, + reason: str = "", + topics: list[str] = ["policy_data"], + data: str = None, + src_config: dict[str, any] = None, + dst_path: str = "", + save_method: str = "PUT", +): + """Publish a DataUpdate through an OPAL-server. + + Args: + server_url (str): URL of the OPAL-server. + server_route (str): Route in the server for updates. + token (str): JWT token for authentication. + src_url (Optional[str]): URL of the data source. + reason (str): Reason for the update. + topics (Optional[List[str]]): Topics for the update. + data (Optional[str]): Data to include in the update. + src_config (Optional[Dict[str, Any]]): Fetching config as JSON. + dst_path (str): Destination path in the client data store. + save_method (str): Method to save data (e.g., "PUT"). + """ + entries = [] + if src_url: + entries.append( + { + "url": src_url, + "data": json.loads(data) if data else None, + "topics": topics or ["policy_data"], # Ensure topics is not None + "dst_path": dst_path, + "save_method": save_method, + "config": src_config, + } + ) + + update_payload = {"entries": entries, "reason": reason} + + async def send_update(): + headers = {"content-type": "application/json"} + if token: + headers["Authorization"] = f"Bearer {token}" + + async with aiohttp.ClientSession(headers=headers) as session: + async with session.post( + f"{server_url}{server_route}", json=update_payload + ) as response: + if response.status == 200: + return "Event Published Successfully" + else: + error_text = await response.text() + raise RuntimeError( + f"Failed with status {response.status}: {error_text}" + ) + + return asyncio.run(send_update()) + + +def publish_data_update_with_curl( + server_url: str, + server_route: str, + token: str, + src_url: str = None, + reason: str = "", + topics: list[str] = ["policy_data"], + data: str = None, + src_config: dict[str, any] = None, + dst_path: str = "", + save_method: str = "PUT", +): + """Publish a DataUpdate through an OPAL-server using curl. + # Example usage + # publish_data_update_with_curl("http://example.com", "/update", "your-token", src_url="http://data-source") + + Args: + server_url (str): URL of the OPAL-server. + server_route (str): Route in the server for updates. + token (str): JWT token for authentication. + src_url (Optional[str]): URL of the data source. + reason (str): Reason for the update. + topics (Optional[List[str]]): Topics for the update. + data (Optional[str]): Data to include in the update. + src_config (Optional[Dict[str, Any]]): Fetching config as JSON. + dst_path (str): Destination path in the client data store. + save_method (str): Method to save data (e.g., "PUT"). + """ + entries = [] + if src_url: + entries.append( + { + "url": src_url, + "data": json.loads(data) if data else None, + "topics": topics or ["policy_data"], # Ensure topics is not None + "dst_path": dst_path, + "save_method": save_method, + "config": src_config, + } + ) + + update_payload = {"entries": entries, "reason": reason} + + # Prepare headers for the curl command + headers = [ + "Content-Type: application/json", + ] + if token: + headers.append(f"Authorization: Bearer {token}") + + # Build the curl command + curl_command = [ + "curl", + "-X", + "POST", + f"{server_url}{server_route}", + "-H", + " -H ".join([f'"{header}"' for header in headers]), + "-d", + json.dumps(update_payload), + ] + + # Execute the curl command + try: + result = subprocess.run( + curl_command, capture_output=True, text=True, check=True + ) + if result.returncode == 0: + return "Event Published Successfully" + else: + raise RuntimeError( + f"Failed with status {result.returncode}: {result.stderr}" + ) + except subprocess.CalledProcessError as e: + raise RuntimeError(f"Error executing curl: {e.stderr}") + + +def get_client_and_server_count(json_data): + """Extracts the client_count and server_count from a given JSON string. + + Args: + json_data (str): A JSON string containing the client and server counts. + + Returns: + dict: A dictionary with keys 'client_count' and 'server_count'. + """ + try: + # Parse the JSON string + data = json.loads(json_data) + + # Extract client and server counts + client_count = data.get("client_count", 0) + server_count = data.get("server_count", 0) + + return {"client_count": client_count, "server_count": server_count} + except json.JSONDecodeError: + raise ValueError("Invalid JSON input.") + + +def install_opal_server_and_client(): + logger.debug("- Installing opal-server and opal-client from pip...") + + try: + # Install opal-server and opal-client + subprocess.run( + [sys.executable, "-m", "pip", "install", "opal-server", "opal-client"], + stdout=subprocess.DEVNULL, + stderr=subprocess.DEVNULL, + check=True, + ) + + # Verify installation + opal_server_installed = ( + subprocess.run( + ["opal-server"], + stdout=subprocess.DEVNULL, + stderr=subprocess.DEVNULL, + shell=True, + ).returncode + == 0 + ) + + opal_client_installed = ( + subprocess.run( + ["opal-client"], + stdout=subprocess.DEVNULL, + stderr=subprocess.DEVNULL, + shell=True, + ).returncode + == 0 + ) + + if not opal_server_installed or not opal_client_installed: + logger.debug( + "Installation failed: opal-server or opal-client is not available." + ) + sys.exit(1) + + logger.debug("- opal-server and opal-client successfully installed.") + + except subprocess.CalledProcessError: + logger.debug("Installation failed: pip command encountered an error.") + sys.exit(1) + + +def export_env(varname, value): + """Exports an environment variable with a given value and updates the + current environment. + + Args: + varname (str): The name of the environment variable to set. + value (str): The value to assign to the environment variable. + + Returns: + str: The value assigned to the environment variable. + + Side Effects: + Prints the export statement to the console and sets the environment variable. + """ + + logger.debug(f"export {varname}={value}") + os.environ[varname] = value + + return value + + +def remove_env(varname): + """Removes an environment variable from the current environment. + + Args: + varname (str): The name of the environment variable to remove. + + Returns: + None + + Side Effects: + Prints the unset statement to the console and removes the environment variable. + """ + logger.debug(f"unset {varname}") + del os.environ[varname] + + return + + +def create_localtunnel(port=8000): + try: + # Run the LocalTunnel command + process = subprocess.Popen( + ["lt", "--port", str(port)], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + text=True, + ) + + # Read output line by line + for line in iter(process.stdout.readline, ""): + # Match the public URL from LocalTunnel output + match = re.search(r"https://[a-z0-9\-]+\.loca\.lt", line) + if match: + public_url = match.group(0) + logger.debug(f"Public URL: {public_url}") + return public_url + + except Exception as e: + logger.debug(f"Error starting LocalTunnel: {e}") + + return None + + +import sys + + +def global_exception_handler(exc_type, exc_value, exc_traceback): + if issubclass(exc_type, KeyboardInterrupt): + # Allow Ctrl+C to exit the program without a traceback + sys.__excepthook__(exc_type, exc_value, exc_traceback) + return + + # Log or print the exception details + logger.debug(f"Uncaught exception: {exc_type.__name__}: {exc_value}") + + +# Set the global exception handler +sys.excepthook = global_exception_handler