Skip to content

Integration tests

Integration tests #14

name: "Integration tests"
on:
workflow_call:
inputs:
package:
description: "Choose the package to test"
type: string
required: true
branch:
description: "Choose the branch to test"
type: string
default: "main"
repository:
description: "Choose the repository to test, when using a fork"
type: string
default: "dbt-labs/dbt-adapters"
os:
description: "Choose the OS to test against"
type: string
default: "ubuntu-22.04"
python-version:
description: "Choose the Python version to test against"
type: string
default: "3.9"
workflow_dispatch:
inputs:
package:
description: "Choose the package to test"
type: choice
options:
- "dbt-athena"
- "dbt-athena-community"
- "dbt-bigquery"
- "dbt-postgres"
- "dbt-redshift"
- "dbt-spark"
branch:
description: "Choose the branch to test"
type: string
default: "main"
repository:
description: "Choose the repository to test, when using a fork"
type: string
default: "dbt-labs/dbt-adapters"
os:
description: "Choose the OS to test against"
type: string
default: "ubuntu-22.04"
python-version:
description: "Choose the Python version to test against"
type: choice
options: ["3.9", "3.10", "3.11", "3.12"]
permissions:
id-token: write
contents: read
env:
DBT_INVOCATION_ENV: ${{ vars.DBT_INVOCATION_ENV }}
DD_CIVISIBILITY_AGENTLESS_ENABLED: ${{ vars.DD_CIVISIBILITY_AGENTLESS_ENABLED }}
DD_API_KEY: ${{ secrets.DD_API_KEY }}
DD_SITE: ${{ vars.DD_SITE }}
DD_ENV: ${{ vars.DD_ENV }}
DD_SERVICE: ${{ github.event.repository.name }} # this can change per run because of forks
DBT_TEST_USER_1: ${{ vars.DBT_TEST_USER_1 }}
DBT_TEST_USER_2: ${{ vars.DBT_TEST_USER_2 }}
DBT_TEST_USER_3: ${{ vars.DBT_TEST_USER_3 }}
jobs:
integration-tests-athena:
if: ${{ inputs.package == 'dbt-athena' || inputs.package == 'dbt-athena-community' }}
runs-on: ${{ inputs.os }}
environment:
name: "dbt-athena"
env:
DBT_TEST_ATHENA_DATABASE: ${{ vars.DBT_TEST_ATHENA_DATABASE }}
DBT_TEST_ATHENA_NUM_RETRIES: ${{ vars.DBT_TEST_ATHENA_NUM_RETRIES }}
DBT_TEST_ATHENA_POLL_INTERVAL: ${{ vars.DBT_TEST_ATHENA_POLL_INTERVAL }}
DBT_TEST_ATHENA_REGION_NAME: ${{ vars.DBT_TEST_ATHENA_REGION_NAME }}
DBT_TEST_ATHENA_S3_STAGING_DIR: ${{ vars.DBT_TEST_ATHENA_S3_STAGING_DIR }}
DBT_TEST_ATHENA_S3_TMP_TABLE_DIR: ${{ vars.DBT_TEST_ATHENA_S3_TMP_TABLE_DIR }}
DBT_TEST_ATHENA_SCHEMA: ${{ vars.DBT_TEST_ATHENA_SCHEMA }}
DBT_TEST_ATHENA_THREADS: ${{ vars.DBT_TEST_ATHENA_THREADS }}
DBT_TEST_ATHENA_WORK_GROUP: ${{ vars.DBT_TEST_ATHENA_WORK_GROUP }}
steps:
- uses: actions/checkout@v4
with:
ref: ${{ inputs.branch }}
repository: ${{ inputs.repository }}
- uses: actions/setup-python@v5
with:
python-version: ${{ inputs.python-version }}
- uses: pypa/hatch@install
- uses: aws-actions/configure-aws-credentials@v4
with:
role-to-assume: arn:aws:iam::${{ secrets.AWS_ACCOUNT_ID }}:role/${{ secrets.ASSUMABLE_ROLE_NAME }}
aws-region: ${{ vars.DBT_TEST_ATHENA_REGION_NAME }}
- run: hatch run integration-tests
working-directory: ./${{ inputs.package }}
integration-tests-bigquery:
if: ${{ inputs.package == 'dbt-bigquery' }}
runs-on: ${{ inputs.os }}
environment:
name: "dbt-bigquery"
env:
BIGQUERY_TEST_SERVICE_ACCOUNT_JSON: ${{ secrets.BIGQUERY_TEST_SERVICE_ACCOUNT_JSON }}
BIGQUERY_TEST_ALT_DATABASE: ${{ vars.BIGQUERY_TEST_ALT_DATABASE }}
BIGQUERY_TEST_NO_ACCESS_DATABASE: ${{ vars.BIGQUERY_TEST_NO_ACCESS_DATABASE }}
DBT_TEST_USER_1: ${{ vars.DBT_TEST_USER_1 }}
DBT_TEST_USER_2: ${{ vars.DBT_TEST_USER_2 }}
DBT_TEST_USER_3: ${{ vars.DBT_TEST_USER_3 }}
DATAPROC_REGION: ${{ vars.DATAPROC_REGION }}
DATAPROC_CLUSTER_NAME: ${{ vars.DATAPROC_CLUSTER_NAME }}
GCS_BUCKET: ${{ vars.GCS_BUCKET }}
steps:
- uses: actions/checkout@v4
with:
ref: ${{ inputs.branch }}
repository: ${{ inputs.repository }}
- uses: actions/setup-python@v5
with:
python-version: ${{ inputs.python-version }}
- uses: pypa/hatch@install
- run: hatch run integration-tests tests/functional -k "not TestPython"
working-directory: ./${{ inputs.package }}
- run: hatch run integration-tests tests/functional -n1 -k "TestPython"
if: ${{ inputs.python-version == '3.9' }} # we only run this for one version to run in series
working-directory: ./${{ inputs.package }}
integration-tests-postgres:
if: inputs.package == 'dbt-postgres'
runs-on: ${{ inputs.os }}
environment:
name: ${{ inputs.package }}
env:
POSTGRES_TEST_HOST: ${{ vars.POSTGRES_TEST_HOST }}
POSTGRES_TEST_PORT: ${{ vars.POSTGRES_TEST_PORT }}
POSTGRES_TEST_USER: ${{ vars.POSTGRES_TEST_USER }}
POSTGRES_TEST_PASS: ${{ secrets.POSTGRES_TEST_PASS }}
POSTGRES_TEST_DATABASE: ${{ vars.POSTGRES_TEST_DATABASE }}
POSTGRES_TEST_THREADS: ${{ vars.POSTGRES_TEST_THREADS }}
services:
postgres:
image: postgres
env:
POSTGRES_PASSWORD: postgres
options: >-
--health-cmd pg_isready
--health-interval 10s
--health-timeout 5s
--health-retries 5
ports:
- ${{ vars.POSTGRES_TEST_PORT }}:5432
steps:
- uses: actions/checkout@v4
with:
ref: ${{ inputs.branch }}
repository: ${{ inputs.repository }}
- uses: actions/setup-python@v5
with:
python-version: ${{ inputs.python-version }}
- run: psql -f ./scripts/setup_test_database.sql
working-directory: ./dbt-postgres
env:
PGHOST: ${{ vars.POSTGRES_TEST_HOST }}
PGPORT: ${{ vars.POSTGRES_TEST_PORT }}
PGUSER: postgres
PGPASSWORD: postgres
PGDATABASE: postgres
- uses: pypa/hatch@install
- run: hatch run integration-tests tests/functional --ddtrace
working-directory: ./${{ inputs.package }}
integration-tests-postgres-psycopg2-check:
if: inputs.package == 'dbt-postgres'
runs-on: ${{ inputs.os }}
steps:
- uses: actions/checkout@v4
- uses: actions/setup-python@v5
with:
python-version: ${{ inputs.python-version }}
- if: startsWith(inputs.os, 'macos')
run: brew install postgresql
- name: "Test psycopg2 - default"
run: scripts/psycopg2-check.sh
working-directory: ./${{ inputs.package }}
env:
PSYCOPG2_EXPECTED_NAME: psycopg2-binary
- name: "Test psycopg2 - invalid override"
run: scripts/psycopg2-check.sh
working-directory: ./${{ inputs.package }}
env:
DBT_PSYCOPG2_NAME: rubber-baby-buggy-bumpers
PSYCOPG2_EXPECTED_NAME: psycopg2-binary
- name: "Test psycopg2 - override"
run: scripts/psycopg2-check.sh
working-directory: ./${{ inputs.package }}
env:
DBT_PSYCOPG2_NAME: psycopg2
PSYCOPG2_EXPECTED_NAME: psycopg2-binary # we have not implemented the hook yet, so this doesn't work
- name: "Test psycopg2 - manual override"
# verify that the workaround documented in the `README.md` continues to work
run: scripts/psycopg2-check.sh
working-directory: ./${{ inputs.package }}
env:
PSYCOPG2_WORKAROUND: true
PSYCOPG2_EXPECTED_NAME: psycopg2
integration-tests-redshift:
if: ${{ inputs.package == 'dbt-redshift' }}
runs-on: ${{ inputs.os }}
environment:
name: "dbt-redshift"
env:
AWS_USER_PROFILE: ${{ vars.AWS_USER_PROFILE }}
AWS_USER_ACCESS_KEY_ID: ${{ vars.AWS_USER_ACCESS_KEY_ID }}
AWS_USER_SECRET_ACCESS_KEY: ${{ secrets.AWS_USER_SECRET_ACCESS_KEY }}
AWS_SOURCE_PROFILE: ${{ vars.AWS_SOURCE_PROFILE }}
AWS_ROLE_PROFILE: ${{ vars.AWS_ROLE_PROFILE }}
AWS_ROLE_ACCESS_KEY_ID: ${{ vars.AWS_ROLE_ACCESS_KEY_ID }}
AWS_ROLE_SECRET_ACCESS_KEY: ${{ secrets.AWS_ROLE_SECRET_ACCESS_KEY }}
AWS_ROLE_ARN: ${{ secrets.AWS_ROLE_ARN }}
AWS_REGION: ${{ vars.AWS_REGION }}
REDSHIFT_TEST_DBNAME: ${{ vars.REDSHIFT_TEST_DBNAME }}
REDSHIFT_TEST_PASS: ${{ secrets.REDSHIFT_TEST_PASS }}
REDSHIFT_TEST_USER: ${{ vars.REDSHIFT_TEST_USER }}
REDSHIFT_TEST_PORT: ${{ vars.REDSHIFT_TEST_PORT }}
REDSHIFT_TEST_HOST: ${{ secrets.REDSHIFT_TEST_HOST }}
REDSHIFT_TEST_CLUSTER_ID: ${{ vars.REDSHIFT_TEST_CLUSTER_ID }}
REDSHIFT_TEST_REGION: ${{ vars.AWS_REGION }}
REDSHIFT_TEST_IAM_USER_PROFILE: ${{ vars.AWS_USER_PROFILE }}
REDSHIFT_TEST_IAM_USER_ACCESS_KEY_ID: ${{ vars.AWS_USER_ACCESS_KEY_ID }}
REDSHIFT_TEST_IAM_USER_SECRET_ACCESS_KEY: ${{ secrets.AWS_USER_SECRET_ACCESS_KEY }}
REDSHIFT_TEST_IAM_ROLE_PROFILE: ${{ vars.AWS_ROLE_PROFILE }}
DBT_TEST_USER_1: ${{ vars.DBT_TEST_USER_1 }}
DBT_TEST_USER_2: ${{ vars.DBT_TEST_USER_2 }}
DBT_TEST_USER_3: ${{ vars.DBT_TEST_USER_3 }}
steps:
- uses: actions/checkout@v4
with:
ref: ${{ inputs.branch }}
repository: ${{ inputs.repository }}
- uses: actions/setup-python@v5
with:
python-version: ${{ inputs.python-version }}
- uses: pypa/hatch@install
- name: Create AWS IAM profiles
run: |
aws configure --profile $AWS_USER_PROFILE set aws_access_key_id $AWS_USER_ACCESS_KEY_ID
aws configure --profile $AWS_USER_PROFILE set aws_secret_access_key $AWS_USER_SECRET_ACCESS_KEY
aws configure --profile $AWS_USER_PROFILE set region $AWS_REGION
aws configure --profile $AWS_USER_PROFILE set output json
aws configure --profile $AWS_SOURCE_PROFILE set aws_access_key_id $AWS_ROLE_ACCESS_KEY_ID
aws configure --profile $AWS_SOURCE_PROFILE set aws_secret_access_key $AWS_ROLE_SECRET_ACCESS_KEY
aws configure --profile $AWS_SOURCE_PROFILE set region $AWS_REGION
aws configure --profile $AWS_SOURCE_PROFILE set output json
aws configure --profile $AWS_ROLE_PROFILE set source_profile $AWS_SOURCE_PROFILE
aws configure --profile $AWS_ROLE_PROFILE set role_arn $AWS_ROLE_ARN
aws configure --profile $AWS_ROLE_PROFILE set region $AWS_REGION
aws configure --profile $AWS_ROLE_PROFILE set output json
- run: hatch run integration-tests tests/functional -m "not flaky" --ddtrace
working-directory: ./${{ inputs.package }}
- run: hatch run integration-tests tests/functional -m flaky -n1 --ddtrace
working-directory: ./${{ inputs.package }}
integration-tests-spark:
if: ${{ inputs.package == 'dbt-spark' }}
runs-on: ${{ inputs.os }}
environment:
name: "dbt-spark"
env:
DBT_DATABRICKS_CLUSTER_NAME: ${{ secrets.DBT_DATABRICKS_CLUSTER_NAME }}
DBT_DATABRICKS_HOST_NAME: ${{ secrets.DBT_DATABRICKS_HOST_NAME }}
DBT_DATABRICKS_ENDPOINT: ${{ secrets.DBT_DATABRICKS_ENDPOINT }}
DBT_DATABRICKS_TOKEN: ${{ secrets.DBT_DATABRICKS_TOKEN }}
DBT_DATABRICKS_USER: ${{ secrets.DBT_DATABRICKS_USER }}
strategy:
fail-fast: false
matrix:
profile:
- "apache_spark"
- "spark_session"
- "spark_http_odbc"
- "databricks_sql_endpoint"
- "databricks_cluster"
- "databricks_http_cluster"
steps:
- uses: actions/checkout@v4
with:
ref: ${{ inputs.branch }}
repository: ${{ inputs.repository }}
- uses: actions/setup-python@v5
with:
python-version: ${{ inputs.python-version }}
- uses: pypa/hatch@install
- run: hatch run pip install -r dagger/requirements.txt
working-directory: ./${{ inputs.package }}
- run: hatch run integration-tests --profile ${{ matrix.profile }}
working-directory: ./${{ inputs.package }}