diff --git a/.github/ISSUE_TEMPLATE/bug-report.yml b/.github/ISSUE_TEMPLATE/bug-report.yml
index 3fcfe3d37..30cbba577 100644
--- a/.github/ISSUE_TEMPLATE/bug-report.yml
+++ b/.github/ISSUE_TEMPLATE/bug-report.yml
@@ -37,6 +37,8 @@ body:
required: false
- label: dbt-bigquery
required: false
+ - label: dbt-redshift
+ required: false
- label: dbt-spark
required: false
- type: textarea
diff --git a/.github/ISSUE_TEMPLATE/regression-report.yml b/.github/ISSUE_TEMPLATE/regression-report.yml
index 1df804d6e..eefc1e41f 100644
--- a/.github/ISSUE_TEMPLATE/regression-report.yml
+++ b/.github/ISSUE_TEMPLATE/regression-report.yml
@@ -32,6 +32,8 @@ body:
required: false
- label: dbt-bigquery
required: false
+ - label: dbt-redshift
+ required: false
- label: dbt-spark
required: false
- type: textarea
diff --git a/.github/dependabot.yml b/.github/dependabot.yml
index cea36c85c..7954afb6f 100644
--- a/.github/dependabot.yml
+++ b/.github/dependabot.yml
@@ -7,6 +7,7 @@ updates:
- "/dbt-athena"
- "/dbt-athena-community"
- "/dbt-bigquery"
+ - "/dbt-redshift"
- "/dbt-spark"
schedule:
interval: "daily"
@@ -18,6 +19,7 @@ updates:
- package-ecosystem: "docker"
directories:
- "/dbt-bigquery/docker"
+ - "/dbt-redshift/docker"
- "/dbt-spark/docker"
schedule:
interval: "weekly"
diff --git a/.github/workflows/_integration-tests.yml b/.github/workflows/_integration-tests.yml
index c6a40f13b..856ba1b44 100644
--- a/.github/workflows/_integration-tests.yml
+++ b/.github/workflows/_integration-tests.yml
@@ -127,6 +127,65 @@ jobs:
if: ${{ inputs.python-version == '3.9' }} # we only run this for one version to run in series
working-directory: ./${{ inputs.package }}
+ integration-tests-redshift:
+ if: ${{ inputs.package == 'dbt-redshift' }}
+ runs-on: ${{ inputs.os }}
+ environment:
+ name: "dbt-redshift"
+ env:
+ AWS_USER_PROFILE: ${{ vars.AWS_USER_PROFILE }}
+ AWS_USER_ACCESS_KEY_ID: ${{ vars.AWS_USER_ACCESS_KEY_ID }}
+ AWS_USER_SECRET_ACCESS_KEY: ${{ secrets.AWS_USER_SECRET_ACCESS_KEY }}
+ AWS_SOURCE_PROFILE: ${{ vars.AWS_SOURCE_PROFILE }}
+ AWS_ROLE_PROFILE: ${{ vars.AWS_ROLE_PROFILE }}
+ AWS_ROLE_ACCESS_KEY_ID: ${{ vars.AWS_ROLE_ACCESS_KEY_ID }}
+ AWS_ROLE_SECRET_ACCESS_KEY: ${{ secrets.AWS_ROLE_SECRET_ACCESS_KEY }}
+ AWS_ROLE_ARN: ${{ secrets.AWS_ROLE_ARN }}
+ AWS_REGION: ${{ vars.AWS_REGION }}
+ REDSHIFT_TEST_DBNAME: ${{ vars.REDSHIFT_TEST_DBNAME }}
+ REDSHIFT_TEST_PASS: ${{ secrets.REDSHIFT_TEST_PASS }}
+ REDSHIFT_TEST_USER: ${{ vars.REDSHIFT_TEST_USER }}
+ REDSHIFT_TEST_PORT: ${{ vars.REDSHIFT_TEST_PORT }}
+ REDSHIFT_TEST_HOST: ${{ secrets.REDSHIFT_TEST_HOST }}
+ REDSHIFT_TEST_CLUSTER_ID: ${{ vars.REDSHIFT_TEST_CLUSTER_ID }}
+ REDSHIFT_TEST_REGION: ${{ vars.AWS_REGION }}
+ REDSHIFT_TEST_IAM_USER_PROFILE: ${{ vars.AWS_USER_PROFILE }}
+ REDSHIFT_TEST_IAM_USER_ACCESS_KEY_ID: ${{ vars.AWS_USER_ACCESS_KEY_ID }}
+ REDSHIFT_TEST_IAM_USER_SECRET_ACCESS_KEY: ${{ secrets.AWS_USER_SECRET_ACCESS_KEY }}
+ REDSHIFT_TEST_IAM_ROLE_PROFILE: ${{ vars.AWS_ROLE_PROFILE }}
+ DBT_TEST_USER_1: ${{ vars.DBT_TEST_USER_1 }}
+ DBT_TEST_USER_2: ${{ vars.DBT_TEST_USER_2 }}
+ DBT_TEST_USER_3: ${{ vars.DBT_TEST_USER_3 }}
+ steps:
+ - uses: actions/checkout@v4
+ with:
+ ref: ${{ inputs.branch }}
+ repository: ${{ inputs.repository }}
+ - uses: actions/setup-python@v5
+ with:
+ python-version: ${{ inputs.python-version }}
+ - uses: pypa/hatch@install
+ - name: Create AWS IAM profiles
+ run: |
+ aws configure --profile $AWS_USER_PROFILE set aws_access_key_id $AWS_USER_ACCESS_KEY_ID
+ aws configure --profile $AWS_USER_PROFILE set aws_secret_access_key $AWS_USER_SECRET_ACCESS_KEY
+ aws configure --profile $AWS_USER_PROFILE set region $AWS_REGION
+ aws configure --profile $AWS_USER_PROFILE set output json
+
+ aws configure --profile $AWS_SOURCE_PROFILE set aws_access_key_id $AWS_ROLE_ACCESS_KEY_ID
+ aws configure --profile $AWS_SOURCE_PROFILE set aws_secret_access_key $AWS_ROLE_SECRET_ACCESS_KEY
+ aws configure --profile $AWS_SOURCE_PROFILE set region $AWS_REGION
+ aws configure --profile $AWS_SOURCE_PROFILE set output json
+
+ aws configure --profile $AWS_ROLE_PROFILE set source_profile $AWS_SOURCE_PROFILE
+ aws configure --profile $AWS_ROLE_PROFILE set role_arn $AWS_ROLE_ARN
+ aws configure --profile $AWS_ROLE_PROFILE set region $AWS_REGION
+ aws configure --profile $AWS_ROLE_PROFILE set output json
+ - run: hatch run integration-tests tests/functional -m "not flaky" --ddtrace
+ working-directory: ./${{ inputs.package }}
+ - run: hatch run integration-tests tests/functional -m flaky -n1 --ddtrace
+ working-directory: ./${{ inputs.package }}
+
integration-tests-spark:
if: ${{ inputs.package == 'dbt-spark' }}
runs-on: ${{ inputs.os }}
diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml
index 112c7c94f..41b85b39b 100644
--- a/.pre-commit-config.yaml
+++ b/.pre-commit-config.yaml
@@ -34,12 +34,13 @@ repos:
rev: 7.1.1
hooks:
- id: flake8
- exclude: dbt/adapters/events/adapter_types_pb2.py|tests/functional/|dbt-spark/tests/|dbt-bigquery/tests/
+ exclude: dbt/adapters/events/adapter_types_pb2.py|tests/functional/|dbt-spark/tests/|dbt-bigquery/tests/|dbt-redshift/tests
args:
- --max-line-length=99
- --select=E,F,W
- --ignore=E203,E501,E704,E741,W503,W504
- --per-file-ignores=*/__init__.py:F401,*/conftest.py:F401
+ additional_dependencies: [flaky]
- repo: https://github.com/pre-commit/mirrors-mypy
rev: v1.11.2
diff --git a/dbt-redshift/.changes/0.0.0.md b/dbt-redshift/.changes/0.0.0.md
new file mode 100644
index 000000000..f6d8d31d5
--- /dev/null
+++ b/dbt-redshift/.changes/0.0.0.md
@@ -0,0 +1,9 @@
+## Previous Releases
+For information on prior major and minor releases, see their changelogs:
+- [1.6](https://github.com/dbt-labs/dbt-redshift/blob/1.6.latest/CHANGELOG.md)
+- [1.5](https://github.com/dbt-labs/dbt-redshift/blob/1.5.latest/CHANGELOG.md)
+- [1.4](https://github.com/dbt-labs/dbt-redshift/blob/1.4.latest/CHANGELOG.md)
+- [1.3](https://github.com/dbt-labs/dbt-redshift/blob/1.3.latest/CHANGELOG.md)
+- [1.2](https://github.com/dbt-labs/dbt-redshift/blob/1.2.latest/CHANGELOG.md)
+- [1.1](https://github.com/dbt-labs/dbt-redshift/blob/1.1.latest/CHANGELOG.md)
+- [1.0](https://github.com/dbt-labs/dbt-redshift/blob/1.0.latest/CHANGELOG.md)
diff --git a/dbt-redshift/.changes/README.md b/dbt-redshift/.changes/README.md
new file mode 100644
index 000000000..6fd5866b8
--- /dev/null
+++ b/dbt-redshift/.changes/README.md
@@ -0,0 +1,3 @@
+# CHANGELOG
+
+To view information about the changelog operation we suggest reading this [README](https://github.com/dbt-labs/dbt-redshift/blob/main/.changes/README.md) found in `dbt-redshift`.
diff --git a/dbt-redshift/.changes/header.tpl.md b/dbt-redshift/.changes/header.tpl.md
new file mode 100644
index 000000000..d77a00b69
--- /dev/null
+++ b/dbt-redshift/.changes/header.tpl.md
@@ -0,0 +1,6 @@
+# dbt-redshift Changelog
+
+- This file provides a full account of all changes to `dbt-redshift`.
+- Changes are listed under the (pre)release in which they first appear. Subsequent releases include changes from previous releases.
+- "Breaking changes" listed under a version may require action from end users or external maintainers when upgrading to that version.
+- Do not edit this file directly. This file is auto-generated using [changie](https://github.com/miniscruff/changie). For details on how to document a change, see [the contributing guide](https://github.com/dbt-labs/dbt-redshift/blob/main/CONTRIBUTING.md#adding-changelog-entry)
diff --git a/dbt-redshift/.changes/unreleased/unreleased/.gitkeep b/dbt-redshift/.changes/unreleased/unreleased/.gitkeep
new file mode 100644
index 000000000..e69de29bb
diff --git a/dbt-redshift/.changes/unreleased/unreleased/Features-20241217-181340.yaml b/dbt-redshift/.changes/unreleased/unreleased/Features-20241217-181340.yaml
new file mode 100644
index 000000000..a2c1c523f
--- /dev/null
+++ b/dbt-redshift/.changes/unreleased/unreleased/Features-20241217-181340.yaml
@@ -0,0 +1,6 @@
+kind: Features
+body: Add IdpTokenAuthPlugin authentication method.
+time: 2024-12-17T18:13:40.281494-08:00
+custom:
+ Author: versusfacit
+ Issue: "898"
diff --git a/dbt-redshift/.changes/unreleased/unreleased/Under the Hood-20241204-185729.yaml b/dbt-redshift/.changes/unreleased/unreleased/Under the Hood-20241204-185729.yaml
new file mode 100644
index 000000000..276308126
--- /dev/null
+++ b/dbt-redshift/.changes/unreleased/unreleased/Under the Hood-20241204-185729.yaml
@@ -0,0 +1,6 @@
+kind: Under the Hood
+body: Add retry logic for retryable exceptions
+time: 2024-12-04T18:57:29.925299-08:00
+custom:
+ Author: versusfacit colin-rogers-dbt
+ Issue: "960"
diff --git a/dbt-redshift/.changes/unreleased/unreleased/Under the Hood-20241207-165918.yaml b/dbt-redshift/.changes/unreleased/unreleased/Under the Hood-20241207-165918.yaml
new file mode 100644
index 000000000..96d7da708
--- /dev/null
+++ b/dbt-redshift/.changes/unreleased/unreleased/Under the Hood-20241207-165918.yaml
@@ -0,0 +1,6 @@
+kind: Under the Hood
+body: Move from setup.py to pyproject.toml and to hatch as a dev tool
+time: 2024-12-07T16:59:18.731819-05:00
+custom:
+ Author: mikealfare
+ Issue: "951"
diff --git a/dbt-redshift/.changes/unreleased/unreleased/Under the Hood-20241211-145132.yaml b/dbt-redshift/.changes/unreleased/unreleased/Under the Hood-20241211-145132.yaml
new file mode 100644
index 000000000..561b03f83
--- /dev/null
+++ b/dbt-redshift/.changes/unreleased/unreleased/Under the Hood-20241211-145132.yaml
@@ -0,0 +1,6 @@
+kind: Under the Hood
+body: Refactor to use new batch context varaibles
+time: 2024-12-11T14:51:32.239224-06:00
+custom:
+ Author: QMalcolm
+ Issue: "966"
diff --git a/dbt-redshift/.changie.yaml b/dbt-redshift/.changie.yaml
new file mode 100644
index 000000000..620305ac4
--- /dev/null
+++ b/dbt-redshift/.changie.yaml
@@ -0,0 +1,130 @@
+changesDir: .changes
+unreleasedDir: unreleased
+headerPath: header.tpl.md
+versionHeaderPath: ""
+changelogPath: CHANGELOG.md
+versionExt: md
+envPrefix: "CHANGIE_"
+versionFormat: '## dbt-redshift {{.Version}} - {{.Time.Format "January 02, 2006"}}'
+kindFormat: '### {{.Kind}}'
+changeFormat: |-
+ {{- $IssueList := list }}
+ {{- $changes := splitList " " $.Custom.Issue }}
+ {{- range $issueNbr := $changes }}
+ {{- $changeLink := "[#nbr](https://github.com/dbt-labs/dbt-redshift/issues/nbr)" | replace "nbr" $issueNbr }}
+ {{- $IssueList = append $IssueList $changeLink }}
+ {{- end -}}
+ - {{.Body}} ({{ range $index, $element := $IssueList }}{{if $index}}, {{end}}{{$element}}{{end}})
+kinds:
+- label: Breaking Changes
+- label: Features
+- label: Fixes
+- label: Under the Hood
+- label: Dependencies
+ changeFormat: |-
+ {{- $PRList := list }}
+ {{- $changes := splitList " " $.Custom.PR }}
+ {{- range $pullrequest := $changes }}
+ {{- $changeLink := "[#nbr](https://github.com/dbt-labs/dbt-redshift/pull/nbr)" | replace "nbr" $pullrequest }}
+ {{- $PRList = append $PRList $changeLink }}
+ {{- end -}}
+ - {{.Body}} ({{ range $index, $element := $PRList }}{{if $index}}, {{end}}{{$element}}{{end}})
+ skipGlobalChoices: true
+ additionalChoices:
+ - key: Author
+ label: GitHub Username(s) (separated by a single space if multiple)
+ type: string
+ minLength: 3
+ - key: PR
+ label: GitHub Pull Request Number (separated by a single space if multiple)
+ type: string
+ minLength: 1
+- label: Security
+ changeFormat: |-
+ {{- $PRList := list }}
+ {{- $changes := splitList " " $.Custom.PR }}
+ {{- range $pullrequest := $changes }}
+ {{- $changeLink := "[#nbr](https://github.com/dbt-labs/dbt-redshift/pull/nbr)" | replace "nbr" $pullrequest }}
+ {{- $PRList = append $PRList $changeLink }}
+ {{- end -}}
+ - {{.Body}} ({{ range $index, $element := $PRList }}{{if $index}}, {{end}}{{$element}}{{end}})
+ skipGlobalChoices: true
+ additionalChoices:
+ - key: Author
+ label: GitHub Username(s) (separated by a single space if multiple)
+ type: string
+ minLength: 3
+ - key: PR
+ label: GitHub Pull Request Number (separated by a single space if multiple)
+ type: string
+ minLength: 1
+
+newlines:
+ afterChangelogHeader: 1
+ afterKind: 1
+ afterChangelogVersion: 1
+ beforeKind: 1
+ endOfVersion: 1
+
+custom:
+- key: Author
+ label: GitHub Username(s) (separated by a single space if multiple)
+ type: string
+ minLength: 3
+- key: Issue
+ label: GitHub Issue Number (separated by a single space if multiple)
+ type: string
+ minLength: 1
+
+footerFormat: |
+ {{- $contributorDict := dict }}
+ {{- /* ensure all names in this list are all lowercase for later matching purposes */}}
+ {{- $core_team := splitList " " .Env.CORE_TEAM }}
+ {{- /* ensure we always skip snyk and dependabot in addition to the core team */}}
+ {{- $maintainers := list "dependabot[bot]" "snyk-bot"}}
+ {{- range $team_member := $core_team }}
+ {{- $team_member_lower := lower $team_member }}
+ {{- $maintainers = append $maintainers $team_member_lower }}
+ {{- end }}
+ {{- range $change := .Changes }}
+ {{- $authorList := splitList " " $change.Custom.Author }}
+ {{- /* loop through all authors for a single changelog */}}
+ {{- range $author := $authorList }}
+ {{- $authorLower := lower $author }}
+ {{- /* we only want to include non-core team contributors */}}
+ {{- if not (has $authorLower $maintainers)}}
+ {{- $changeList := splitList " " $change.Custom.Author }}
+ {{- $IssueList := list }}
+ {{- $changeLink := $change.Kind }}
+ {{- if or (eq $change.Kind "Dependencies") (eq $change.Kind "Security") }}
+ {{- $changes := splitList " " $change.Custom.PR }}
+ {{- range $issueNbr := $changes }}
+ {{- $changeLink := "[#nbr](https://github.com/dbt-labs/dbt-redshift/pull/nbr)" | replace "nbr" $issueNbr }}
+ {{- $IssueList = append $IssueList $changeLink }}
+ {{- end -}}
+ {{- else }}
+ {{- $changes := splitList " " $change.Custom.Issue }}
+ {{- range $issueNbr := $changes }}
+ {{- $changeLink := "[#nbr](https://github.com/dbt-labs/dbt-redshift/issues/nbr)" | replace "nbr" $issueNbr }}
+ {{- $IssueList = append $IssueList $changeLink }}
+ {{- end -}}
+ {{- end }}
+ {{- /* check if this contributor has other changes associated with them already */}}
+ {{- if hasKey $contributorDict $author }}
+ {{- $contributionList := get $contributorDict $author }}
+ {{- $contributionList = concat $contributionList $IssueList }}
+ {{- $contributorDict := set $contributorDict $author $contributionList }}
+ {{- else }}
+ {{- $contributionList := $IssueList }}
+ {{- $contributorDict := set $contributorDict $author $contributionList }}
+ {{- end }}
+ {{- end}}
+ {{- end}}
+ {{- end }}
+ {{- /* no indentation here for formatting so the final markdown doesn't have unneeded indentations */}}
+ {{- if $contributorDict}}
+ ### Contributors
+ {{- range $k,$v := $contributorDict }}
+ - [@{{$k}}](https://github.com/{{$k}}) ({{ range $index, $element := $v }}{{if $index}}, {{end}}{{$element}}{{end}})
+ {{- end }}
+ {{- end }}
diff --git a/dbt-redshift/CHANGELOG.md b/dbt-redshift/CHANGELOG.md
new file mode 100644
index 000000000..04a8e7db1
--- /dev/null
+++ b/dbt-redshift/CHANGELOG.md
@@ -0,0 +1,16 @@
+# dbt-redshift Changelog
+
+- This file provides a full account of all changes to `dbt-redshift`.
+- Changes are listed under the (pre)release in which they first appear. Subsequent releases include changes from previous releases.
+- "Breaking changes" listed under a version may require action from end users or external maintainers when upgrading to that version.
+- Do not edit this file directly. This file is auto-generated using [changie](https://github.com/miniscruff/changie). For details on how to document a change, see [the contributing guide](https://github.com/dbt-labs/dbt-redshift/blob/main/CONTRIBUTING.md#adding-changelog-entry)
+
+## Previous Releases
+For information on prior major and minor releases, see their changelogs:
+- [1.6](https://github.com/dbt-labs/dbt-redshift/blob/1.6.latest/CHANGELOG.md)
+- [1.5](https://github.com/dbt-labs/dbt-redshift/blob/1.5.latest/CHANGELOG.md)
+- [1.4](https://github.com/dbt-labs/dbt-redshift/blob/1.4.latest/CHANGELOG.md)
+- [1.3](https://github.com/dbt-labs/dbt-redshift/blob/1.3.latest/CHANGELOG.md)
+- [1.2](https://github.com/dbt-labs/dbt-redshift/blob/1.2.latest/CHANGELOG.md)
+- [1.1](https://github.com/dbt-labs/dbt-redshift/blob/1.1.latest/CHANGELOG.md)
+- [1.0](https://github.com/dbt-labs/dbt-redshift/blob/1.0.latest/CHANGELOG.md)
diff --git a/dbt-redshift/CONTRIBUTING.md b/dbt-redshift/CONTRIBUTING.md
new file mode 100644
index 000000000..11a88a1cd
--- /dev/null
+++ b/dbt-redshift/CONTRIBUTING.md
@@ -0,0 +1,115 @@
+# Contributing to `dbt-redshift`
+
+1. [About this document](#about-this-document)
+3. [Getting the code](#getting-the-code)
+5. [Running `dbt-redshift` in development](#running-dbt-redshift-in-development)
+6. [Testing](#testing)
+7. [Updating Docs](#updating-docs)
+7. [Submitting a Pull Request](#submitting-a-pull-request)
+
+## About this document
+This document is a guide intended for folks interested in contributing to `dbt-redshift`. Below, we document the process by which members of the community should create issues and submit pull requests (PRs) in this repository. It is not intended as a guide for using `dbt-redshift`, and it assumes a certain level of familiarity with Python concepts such as virtualenvs, `pip`, python modules, filesystems, and so on. This guide assumes you are using macOS or Linux and are comfortable with the command line.
+
+For those wishing to contribute we highly suggest reading the [dbt-core](https://github.com/dbt-labs/dbt-core/blob/main/CONTRIBUTING.md), if you haven't already. Almost all of the information there is applicable to contributing here, too!
+
+### Signing the CLA
+
+Please note that all contributors to `dbt-redshift` must sign the [Contributor License Agreement](https://docs.getdbt.com/docs/contributor-license-agreements) to have their Pull Request merged into an `dbt-redshift` codebase. If you are unable to sign the CLA, then the `dbt-redshift` maintainers will unfortunately be unable to merge your Pull Request. You are, however, welcome to open issues and comment on existing ones.
+
+
+## Getting the code
+
+You will need `git` in order to download and modify the `dbt-redshift` source code. You can find direction [here](https://github.com/git-guides/install-git) on how to install `git`.
+
+### External contributors
+
+If you are not a member of the `dbt-labs` GitHub organization, you can contribute to `dbt-redshift` by forking the `dbt-redshift` repository. For a detailed overview on forking, check out the [GitHub docs on forking](https://help.github.com/en/articles/fork-a-repo). In short, you will need to:
+
+1. fork the `dbt-redshift` repository
+2. clone your fork locally
+3. check out a new branch for your proposed changes
+4. push changes to your fork
+5. open a pull request against `dbt-labs/dbt-redshift` from your forked repository
+
+### dbt Labs contributors
+
+If you are a member of the `dbt Labs` GitHub organization, you will have push access to the `dbt-redshift` repo. Rather than forking `dbt-redshift` to make your changes, just clone the repository, check out a new branch, and push directly to that branch.
+
+
+## Running `dbt-redshift` in development
+
+### Installation
+
+First make sure that you set up your `virtualenv` as described in [Setting up an environment](https://github.com/dbt-labs/dbt-core/blob/HEAD/CONTRIBUTING.md#setting-up-an-environment). Ensure you have the latest version of pip installed with `pip install --upgrade pip`. Next, install `dbt-redshift` latest dependencies:
+
+```sh
+pip install -e . -r ./dev-requirements.txt
+```
+
+When `dbt-redshift` is installed this way, any changes you make to the `dbt-redshift` source code will be reflected immediately in your next `dbt run` command that uses `dbt-redshift`.
+
+To confirm you have correct `dbt-core` and adapter versions installed please run `dbt --version` and `which dbt` to check the correct executable path you wish to use for `dbt-core` is in your current virtualenv.
+
+
+## Testing
+
+### Initial Setup
+
+`dbt-redshift` contains [unit](https://github.com/dbt-labs/dbt-redshift/tree/main/tests/unit) and [functional](https://github.com/dbt-labs/dbt-redshift/tree/main/tests/functional) tests. Functional tests require testing against an actual Redshift warehouse. We have CI set up to test against a Redshift warehouse during PR checks.
+
+In order to run functional tests locally, you will need a `test.env` file in the root of the repository that contains credentials for your Redshift warehouse.
+
+Note: This `test.env` file is git-ignored, but please be extra careful to never check in credentials or other sensitive information when developing. To create your `test.env` file, copy the provided example file, then supply your relevant credentials.
+
+```
+cp test.env.example test.env
+$EDITOR test.env
+```
+
+### Test commands
+There are a few methods for running tests locally.
+
+#### `tox`
+`tox` takes care of managing Python virtualenvs and installing dependencies in order to run tests. You can also run tests in parallel. For example, you can run unit tests for Python 3.9 and Python 3.10, and `flake8` checks in parallel with `tox -p`. Also, you can run unit tests for specific python versions with `tox -e py39`. The configuration of these tests are located in `tox.ini`.
+
+#### `pytest`
+Finally, you can also run a specific test or group of tests using `pytest` directly. With a Python virtualenv active and dev dependencies installed you can do things like:
+
+```sh
+# run specific redshift functional tests
+python -m pytest tests/functional/adapter/concurrent_transactions
+# run specific redshift functional tests in a file
+python -m pytest tests/functional/adapter/test_basic.py
+# run all unit tests in a file
+python -m pytest tests/unit/test_redshift_adapter.py
+# run a specific unit test
+python -m pytest tests/unit/test_redshift_adapter.py::TestRedshiftAdapterConversions::test_convert_date_type
+```
+
+## Updating Docs
+
+Many changes will require an update to the `dbt-redshift` docs. If so, here are some useful resources to find where the current behavior is documented.
+
+- Docs are [here](https://docs.getdbt.com/).
+- The docs repo for making changes is located [here]( https://github.com/dbt-labs/docs.getdbt.com).
+- The changes made are likely to impact one or both of [Redshift Profile](https://docs.getdbt.com/reference/warehouse-profiles/redshift-profile), or [Redshift Configs](https://docs.getdbt.com/reference/resource-configs/redshift-configs).
+- We ask every community member who makes a user-facing change to open an issue or PR regarding doc changes.
+
+## Adding CHANGELOG Entry
+
+We use [changie](https://changie.dev) to generate `CHANGELOG` entries. **Note:** Do not edit the `CHANGELOG.md` directly. Your modifications will be lost.
+
+Follow the steps to [install `changie`](https://changie.dev/guide/installation/) for your system.
+
+Once changie is installed and your PR is created, simply run `changie new` and changie will walk you through the process of creating a changelog entry. Commit the file that's created and your changelog entry is complete!
+
+You don't need to worry about which `dbt-redshift` version your change will go into. Just create the changelog entry with `changie`, and open your PR against the `main` branch. All merged changes will be included in the next minor version of `dbt-redshift`. The Core maintainers _may_ choose to "backport" specific changes in order to patch older minor versions. In that case, a maintainer will take care of that backport after merging your PR, before releasing the new version of `dbt-redshift`.
+
+
+## Submitting a Pull Request
+
+dbt Labs provides a CI environment to test changes to the `dbt-redshift` adapter and periodic checks against the development version of `dbt-core` through Github Actions.
+
+A `dbt-redshift` maintainer will review your PR. They may suggest code revision for style or clarity, or request that you add unit or functional test(s). These are good things! We believe that, with a little bit of help, anyone can contribute high-quality code.
+
+Once all tests are passing and your PR has been approved, a `dbt-redshift` maintainer will merge your changes into the active development branch. And that's it! Happy developing :tada:
diff --git a/dbt-redshift/README.md b/dbt-redshift/README.md
new file mode 100644
index 000000000..e405aa190
--- /dev/null
+++ b/dbt-redshift/README.md
@@ -0,0 +1,39 @@
+
+
+
+
+
+
+
+
+
+
+
+
+**[dbt](https://www.getdbt.com/)** enables data analysts and engineers to transform their data using the same practices that software engineers use to build applications.
+
+dbt is the T in ELT. Organize, cleanse, denormalize, filter, rename, and pre-aggregate the raw data in your warehouse so that it's ready for analysis.
+
+## dbt-redshift
+
+The `dbt-redshift` package contains all of the code enabling dbt to work with Amazon Redshift. For
+more information on using dbt with Redshift, consult [the docs](https://docs.getdbt.com/docs/profile-redshift).
+
+## Getting started
+
+- [Install dbt](https://docs.getdbt.com/docs/installation)
+- Read the [introduction](https://docs.getdbt.com/docs/introduction/) and [viewpoint](https://docs.getdbt.com/docs/about/viewpoint/)
+
+## Join the dbt Community
+
+- Be part of the conversation in the [dbt Community Slack](http://community.getdbt.com/)
+- Read more on the [dbt Community Discourse](https://discourse.getdbt.com)
+
+## Reporting bugs and contributing code
+
+- Want to report a bug or request a feature? Let us know on [Slack](http://community.getdbt.com/), or open [an issue](https://github.com/dbt-labs/dbt-redshift/issues/new)
+- Want to help us build dbt? Check out the [Contributing Guide](https://github.com/dbt-labs/dbt/blob/HEAD/CONTRIBUTING.md)
+
+## Code of Conduct
+
+Everyone interacting in the dbt project's codebases, issue trackers, chat rooms, and mailing lists is expected to follow the [dbt Code of Conduct](https://community.getdbt.com/code-of-conduct).
diff --git a/dbt-redshift/docker/Dockerfile b/dbt-redshift/docker/Dockerfile
new file mode 100644
index 000000000..a0f987e45
--- /dev/null
+++ b/dbt-redshift/docker/Dockerfile
@@ -0,0 +1,37 @@
+# this image gets published to GHCR for production use
+ARG py_version=3.11.2
+
+FROM python:$py_version-slim-bullseye AS base
+
+RUN apt-get update \
+ && apt-get dist-upgrade -y \
+ && apt-get install -y --no-install-recommends \
+ build-essential=12.9 \
+ ca-certificates=20210119 \
+ git=1:2.30.2-1+deb11u2 \
+ libpq-dev=13.18-0+deb11u1 \
+ make=4.3-4.1 \
+ openssh-client=1:8.4p1-5+deb11u3 \
+ software-properties-common=0.96.20.2-2.1 \
+ && apt-get clean \
+ && rm -rf \
+ /var/lib/apt/lists/* \
+ /tmp/* \
+ /var/tmp/*
+
+ENV PYTHONIOENCODING=utf-8
+ENV LANG=C.UTF-8
+
+RUN python -m pip install --upgrade "pip==24.0" "setuptools==69.2.0" "wheel==0.43.0" --no-cache-dir
+
+
+FROM base AS dbt-redshift
+
+ARG commit_ref=main
+
+HEALTHCHECK CMD dbt --version || exit 1
+
+WORKDIR /usr/app/dbt/
+ENTRYPOINT ["dbt"]
+
+RUN python -m pip install --no-cache-dir "dbt-redshift @ git+https://github.com/dbt-labs/dbt-redshift@${commit_ref}"
diff --git a/dbt-redshift/docker/README.md b/dbt-redshift/docker/README.md
new file mode 100644
index 000000000..5be9e56ef
--- /dev/null
+++ b/dbt-redshift/docker/README.md
@@ -0,0 +1,58 @@
+# Docker for dbt
+This docker file is suitable for building dbt Docker images locally or using with CI/CD to automate populating a container registry.
+
+
+## Building an image:
+This Dockerfile can create images for the following target: `dbt-redshift`
+
+In order to build a new image, run the following docker command.
+```shell
+docker build --tag --target dbt-redshift
+```
+---
+> **Note:** Docker must be configured to use [BuildKit](https://docs.docker.com/develop/develop-images/build_enhancements/) in order for images to build properly!
+
+---
+
+By default the image will be populated with the latest version of `dbt-redshift` on `main`.
+If you need to use a different version you can specify it by git ref using the `--build-arg` flag:
+```shell
+docker build --tag \
+ --target dbt-redshift \
+ --build-arg commit_ref= \
+
+```
+
+### Examples:
+To build an image named "my-dbt" that supports Snowflake using the latest releases:
+```shell
+cd dbt-core/docker
+docker build --tag my-dbt --target dbt-redshift .
+```
+
+To build an image named "my-other-dbt" that supports Snowflake using the adapter version 1.0.0b1:
+```shell
+cd dbt-core/docker
+docker build \
+ --tag my-other-dbt \
+ --target dbt-redshift \
+ --build-arg commit_ref=v1.0.0b1 \
+ .
+```
+
+## Running an image in a container:
+The `ENTRYPOINT` for this Dockerfile is the command `dbt` so you can bind-mount your project to `/usr/app` and use dbt as normal:
+```shell
+docker run \
+ --network=host \
+ --mount type=bind,source=path/to/project,target=/usr/app \
+ --mount type=bind,source=path/to/profiles.yml,target=/root/.dbt/profiles.yml \
+ my-dbt \
+ ls
+```
+---
+**Notes:**
+* Bind-mount sources _must_ be an absolute path
+* You may need to make adjustments to the docker networking setting depending on the specifics of your data warehouse/database host.
+
+---
diff --git a/dbt-redshift/docker/dev.Dockerfile b/dbt-redshift/docker/dev.Dockerfile
new file mode 100644
index 000000000..a982d2597
--- /dev/null
+++ b/dbt-redshift/docker/dev.Dockerfile
@@ -0,0 +1,50 @@
+# this image does not get published, it is intended for local development only, see `Makefile` for usage
+FROM ubuntu:24.04 as base
+
+# prevent python installation from asking for time zone region
+ARG DEBIAN_FRONTEND=noninteractive
+
+# add python repository
+RUN apt-get update \
+ && apt-get install -y software-properties-common=0.99.48 \
+ && add-apt-repository -y ppa:deadsnakes/ppa \
+ && apt-get clean \
+ && rm -rf \
+ /var/lib/apt/lists/* \
+ /tmp/* \
+ /var/tmp/*
+
+# install python
+RUN apt-get update \
+ && apt-get install -y --no-install-recommends \
+ build-essential=12.10ubuntu1 \
+ git-all=1:2.43.0-1ubuntu7.1 \
+ python3.9=3.9.20-1+noble1 \
+ python3.9-dev=3.9.20-1+noble1 \
+ python3.9-distutils=3.9.20-1+noble1 \
+ python3.9-venv=3.9.20-1+noble1 \
+ python3-pip=24.0+dfsg-1ubuntu1 \
+ python3-wheel=0.42.0-2 \
+ && apt-get clean \
+ && rm -rf \
+ /var/lib/apt/lists/* \
+ /tmp/* \
+ /var/tmp/*
+
+# update the default system interpreter to the newly installed version
+RUN update-alternatives --install /usr/bin/python python /usr/bin/python3.9 1
+
+
+FROM base as dbt-redshift-dev
+
+HEALTHCHECK CMD python --version || exit 1
+
+# send stdout/stderr to terminal
+ENV PYTHONUNBUFFERED=1
+
+# setup mount for local code
+WORKDIR /opt/code
+VOLUME /opt/code
+
+# create a virtual environment
+RUN python3 -m venv /opt/venv
diff --git a/dbt-redshift/hatch.toml b/dbt-redshift/hatch.toml
new file mode 100644
index 000000000..0d78b8a1d
--- /dev/null
+++ b/dbt-redshift/hatch.toml
@@ -0,0 +1,64 @@
+[version]
+path = "src/dbt/adapters/redshift/__version__.py"
+
+[build.targets.sdist]
+packages = ["src/dbt"]
+sources = ["src"]
+
+[build.targets.wheel]
+packages = ["src/dbt"]
+sources = ["src"]
+
+[envs.default]
+dependencies = [
+ "dbt-adapters @ git+https://github.com/dbt-labs/dbt-adapters.git",
+ "dbt-common @ git+https://github.com/dbt-labs/dbt-common.git",
+ "dbt-core @ git+https://github.com/dbt-labs/dbt-core.git#subdirectory=core",
+ "dbt-tests-adapter @ git+https://github.com/dbt-labs/dbt-adapters.git#subdirectory=dbt-tests-adapter",
+ "ddtrace==2.3.0",
+ "freezegun",
+ "ipdb~=0.13.13",
+ "pre-commit==3.7.0",
+ "pytest-csv~=3.0",
+ "pytest-dotenv",
+ "pytest-logbook~=1.2",
+ "pytest-mock",
+ "pytest-xdist",
+ "pytest>=7.0,<8.0",
+ "requests",
+]
+
+[envs.default.scripts]
+setup = "pre-commit install"
+code-quality = "pre-commit run --all-files"
+unit-tests = "python -m pytest {args:tests/unit}"
+integration-tests = "python -m pytest {args:tests/functional}"
+docker-dev = [
+ "docker build -f docker/dev.Dockerfile -t dbt-redshift-dev .",
+ "docker run --rm -it --name dbt-redshift-dev -v $(shell pwd):/opt/code dbt-redshift-dev",
+]
+
+[envs.build]
+detached = true
+dependencies = [
+ "wheel",
+ "twine",
+ "check-wheel-contents",
+]
+
+[envs.build.scripts]
+check-all = [
+ "- check-wheel",
+ "- check-sdist",
+]
+check-wheel = [
+ "twine check dist/*",
+ "find ./dist/dbt_redshift-*.whl -maxdepth 1 -type f | xargs python -m pip install --force-reinstall --find-links=dist/",
+ "pip freeze | grep dbt-redshift",
+]
+check-sdist = [
+ "check-wheel-contents dist/*.whl --ignore W007,W008",
+ "find ./dist/dbt_redshift-*.gz -maxdepth 1 -type f | xargs python -m pip install --force-reinstall --find-links=dist/",
+ "pip freeze | grep dbt-redshift",
+]
+docker-prod = "docker build -f docker/Dockerfile -t dbt-redshift ."
diff --git a/dbt-redshift/pyproject.toml b/dbt-redshift/pyproject.toml
new file mode 100644
index 000000000..77f86fd33
--- /dev/null
+++ b/dbt-redshift/pyproject.toml
@@ -0,0 +1,57 @@
+[build-system]
+requires = ["hatchling"]
+build-backend = "hatchling.build"
+
+[project]
+dynamic = ["version"]
+name = "dbt-redshift"
+description = "The Redshift adapter plugin for dbt"
+readme = "README.md"
+keywords = ["dbt", "adapter", "adapters", "database", "elt", "dbt-core", "dbt Core", "dbt Cloud", "dbt Labs", "redshift", "aws", "amazon", "amazon web services"]
+requires-python = ">=3.9.0"
+authors = [{ name = "dbt Labs", email = "info@dbtlabs.com" }]
+maintainers = [{ name = "dbt Labs", email = "info@dbtlabs.com" }]
+classifiers = [
+ "Development Status :: 5 - Production/Stable",
+ "License :: OSI Approved :: Apache Software License",
+ "Operating System :: MacOS :: MacOS X",
+ "Operating System :: Microsoft :: Windows",
+ "Operating System :: POSIX :: Linux",
+ "Programming Language :: Python :: 3.9",
+ "Programming Language :: Python :: 3.10",
+ "Programming Language :: Python :: 3.11",
+ "Programming Language :: Python :: 3.12",
+]
+dependencies = [
+ "dbt-common>=1.10,<2.0",
+ "dbt-adapters>=1.11,<2.0",
+ "dbt-postgres>=1.8,<1.10",
+ # dbt-redshift depends deeply on this package. it does not follow SemVer, therefore there have been breaking changes in previous patch releases
+ # Pin to the patch or minor version, and bump in each new minor version of dbt-redshift.
+ "redshift-connector<2.1.1,>=2.0.913,!=2.0.914",
+ # add dbt-core to ensure backwards compatibility of installation, this is not a functional dependency
+ "dbt-core>=1.8.0b3",
+ # installed via dbt-core but referenced directly; don't pin to avoid version conflicts with dbt-core
+ "sqlparse>=0.5.0,<0.6.0",
+ "agate",
+ "requests",
+]
+
+[project.urls]
+Homepage = "https://github.com/dbt-labs/dbt-redshift"
+Documentation = "https://docs.getdbt.com"
+Repository = "https://github.com/dbt-labs/dbt-redshift.git"
+Issues = "https://github.com/dbt-labs/dbt-redshift/issues"
+Changelog = "https://github.com/dbt-labs/dbt-redshift/blob/main/CHANGELOG.md"
+
+[tool.mypy]
+mypy_path = "third-party-stubs/"
+
+[tool.pytest.ini_options]
+testpaths = ["tests/functional", "tests/unit"]
+env_files = ["test.env"]
+addopts = "-v --color=yes -n auto"
+filterwarnings = [
+ "ignore:.*'soft_unicode' has been renamed to 'soft_str'*:DeprecationWarning",
+ "ignore:unclosed file .*:ResourceWarning",
+]
diff --git a/dbt-redshift/scripts/build-dist.sh b/dbt-redshift/scripts/build-dist.sh
new file mode 100755
index 000000000..3c3808399
--- /dev/null
+++ b/dbt-redshift/scripts/build-dist.sh
@@ -0,0 +1,20 @@
+#!/bin/bash
+
+set -eo pipefail
+
+DBT_PATH="$( cd "$(dirname "$0")/.." ; pwd -P )"
+
+PYTHON_BIN=${PYTHON_BIN:-python}
+
+echo "$PYTHON_BIN"
+
+set -x
+
+rm -rf "$DBT_PATH"/dist
+rm -rf "$DBT_PATH"/build
+mkdir -p "$DBT_PATH"/dist
+
+cd "$DBT_PATH"
+$PYTHON_BIN setup.py sdist bdist_wheel
+
+set +x
diff --git a/dbt-redshift/scripts/env-setup.sh b/dbt-redshift/scripts/env-setup.sh
new file mode 100644
index 000000000..866d8f749
--- /dev/null
+++ b/dbt-redshift/scripts/env-setup.sh
@@ -0,0 +1,10 @@
+#!/bin/bash
+# Set TOXENV environment variable for subsequent steps
+echo "TOXENV=integration-redshift" >> $GITHUB_ENV
+# Set INTEGRATION_TESTS_SECRETS_PREFIX environment variable for subsequent steps
+# All GH secrets that have this prefix will be set as environment variables
+echo "INTEGRATION_TESTS_SECRETS_PREFIX=REDSHIFT_TEST" >> $GITHUB_ENV
+# Set environment variables required for integration tests
+echo "DBT_TEST_USER_1=dbt_test_user_1" >> $GITHUB_ENV
+echo "DBT_TEST_USER_2=dbt_test_user_2" >> $GITHUB_ENV
+echo "DBT_TEST_USER_3=dbt_test_user_3" >> $GITHUB_ENV
diff --git a/dbt-redshift/scripts/update_dependencies.sh b/dbt-redshift/scripts/update_dependencies.sh
new file mode 100644
index 000000000..1205d80a8
--- /dev/null
+++ b/dbt-redshift/scripts/update_dependencies.sh
@@ -0,0 +1,18 @@
+#!/bin/bash -e
+set -e
+
+git_branch=$1
+target_req_file="hatch.toml"
+core_req_sed_pattern="s|dbt-core.git.*#subdirectory=core|dbt-core.git@${git_branch}#subdirectory=core|g"
+postgres_req_sed_pattern="s|dbt-postgres.git|dbt-postgres.git@${git_branch}"
+tests_req_sed_pattern="s|dbt-adapters.git.*#subdirectory=dbt-tests-adapter|dbt-adapters.git@${git_branch}#subdirectory=dbt-tests-adapter|g"
+if [[ "$OSTYPE" == darwin* ]]; then
+ # mac ships with a different version of sed that requires a delimiter arg
+ sed -i "" "$core_req_sed_pattern" $target_req_file
+ sed -i "" "$postgres_req_sed_pattern" $target_req_file
+ sed -i "" "$tests_req_sed_pattern" $target_req_file
+else
+ sed -i "$core_req_sed_pattern" $target_req_file
+ sed -i "$postgres_req_sed_pattern" $target_req_file
+ sed -i "$tests_req_sed_pattern" $target_req_file
+fi
diff --git a/dbt-redshift/scripts/update_dev_dependency_branches.sh b/dbt-redshift/scripts/update_dev_dependency_branches.sh
new file mode 100755
index 000000000..9385cf885
--- /dev/null
+++ b/dbt-redshift/scripts/update_dev_dependency_branches.sh
@@ -0,0 +1,21 @@
+#!/bin/bash -e
+set -e
+
+
+dbt_adapters_branch=$1
+dbt_core_branch=$2
+dbt_common_branch=$3
+target_req_file="hatch.toml"
+core_req_sed_pattern="s|dbt-core.git.*#subdirectory=core|dbt-core.git@${dbt_core_branch}#subdirectory=core|g"
+adapters_req_sed_pattern="s|dbt-adapters.git|dbt-adapters.git@${dbt_adapters_branch}|g"
+common_req_sed_pattern="s|dbt-common.git|dbt-common.git@${dbt_common_branch}|g"
+if [[ "$OSTYPE" == darwin* ]]; then
+ # mac ships with a different version of sed that requires a delimiter arg
+ sed -i "" "$adapters_req_sed_pattern" $target_req_file
+ sed -i "" "$core_req_sed_pattern" $target_req_file
+ sed -i "" "$common_req_sed_pattern" $target_req_file
+else
+ sed -i "$adapters_req_sed_pattern" $target_req_file
+ sed -i "$core_req_sed_pattern" $target_req_file
+ sed -i "$common_req_sed_pattern" $target_req_file
+fi
diff --git a/dbt-redshift/scripts/update_release_branch.sh b/dbt-redshift/scripts/update_release_branch.sh
new file mode 100644
index 000000000..75b9ccef6
--- /dev/null
+++ b/dbt-redshift/scripts/update_release_branch.sh
@@ -0,0 +1,11 @@
+#!/bin/bash -e
+set -e
+
+release_branch=$1
+target_req_file=".github/workflows/nightly-release.yml"
+if [[ "$OSTYPE" == darwin* ]]; then
+ # mac ships with a different version of sed that requires a delimiter arg
+ sed -i "" "s|[0-9].[0-9].latest|$release_branch|" $target_req_file
+else
+ sed -i "s|[0-9].[0-9].latest|$release_branch|" $target_req_file
+fi
diff --git a/dbt-redshift/src/dbt/__init__.py b/dbt-redshift/src/dbt/__init__.py
new file mode 100644
index 000000000..b36383a61
--- /dev/null
+++ b/dbt-redshift/src/dbt/__init__.py
@@ -0,0 +1,3 @@
+from pkgutil import extend_path
+
+__path__ = extend_path(__path__, __name__)
diff --git a/dbt-redshift/src/dbt/adapters/redshift/__init__.py b/dbt-redshift/src/dbt/adapters/redshift/__init__.py
new file mode 100644
index 000000000..70e77ef5e
--- /dev/null
+++ b/dbt-redshift/src/dbt/adapters/redshift/__init__.py
@@ -0,0 +1,18 @@
+from dbt.adapters.base import AdapterPlugin
+
+from dbt.adapters.redshift.connections import ( # noqa: F401
+ RedshiftConnectionManager,
+ RedshiftCredentials,
+)
+
+from dbt.adapters.redshift.relation import RedshiftRelation # noqa: F401
+from dbt.adapters.redshift.impl import RedshiftAdapter
+from dbt.include import redshift
+
+
+Plugin: AdapterPlugin = AdapterPlugin(
+ adapter=RedshiftAdapter, # type: ignore
+ credentials=RedshiftCredentials,
+ include_path=redshift.PACKAGE_PATH,
+ dependencies=["postgres"],
+)
diff --git a/dbt-redshift/src/dbt/adapters/redshift/__version__.py b/dbt-redshift/src/dbt/adapters/redshift/__version__.py
new file mode 100644
index 000000000..1af777a62
--- /dev/null
+++ b/dbt-redshift/src/dbt/adapters/redshift/__version__.py
@@ -0,0 +1 @@
+version = "1.10.0a1"
diff --git a/dbt-redshift/src/dbt/adapters/redshift/auth_providers.py b/dbt-redshift/src/dbt/adapters/redshift/auth_providers.py
new file mode 100644
index 000000000..bd4a7a309
--- /dev/null
+++ b/dbt-redshift/src/dbt/adapters/redshift/auth_providers.py
@@ -0,0 +1,87 @@
+import requests
+from abc import ABC, abstractmethod
+from enum import Enum
+from typing import Dict, Any
+
+from dbt.adapters.exceptions import FailedToConnectError
+from dbt_common.exceptions import DbtRuntimeError
+
+
+# Define an Enum for the supported token endpoint types
+class TokenServiceBase(ABC):
+ def __init__(self, token_endpoint: Dict[str, Any]):
+ expected_keys = {"type", "request_url", "request_data"}
+ for key in expected_keys:
+ if key not in token_endpoint:
+ raise FailedToConnectError(f"Missing required key in token_endpoint: '{key}'")
+
+ self.type: str = token_endpoint["type"]
+ self.url: str = token_endpoint["request_url"]
+ self.data: str = token_endpoint["request_data"]
+
+ self.other_params = {k: v for k, v in token_endpoint.items() if k not in expected_keys}
+
+ @abstractmethod
+ def build_header_payload(self) -> Dict[str, Any]:
+ pass
+
+ def handle_request(self) -> requests.Response:
+ """
+ Handles the request with rate limiting and error handling.
+ """
+ response = requests.post(self.url, headers=self.build_header_payload(), data=self.data)
+
+ if response.status_code == 429:
+ raise DbtRuntimeError(
+ "Rate limit on identity provider's token dispatch has been reached. "
+ "Consider increasing your identity provider's refresh token rate or "
+ "lower dbt's maximum concurrent thread count."
+ )
+
+ response.raise_for_status()
+ return response
+
+
+class OktaIdpTokenService(TokenServiceBase):
+ def build_header_payload(self) -> Dict[str, Any]:
+ if encoded_idp_client_creds := self.other_params.get("idp_auth_credentials"):
+ return {
+ "accept": "application/json",
+ "authorization": f"Basic {encoded_idp_client_creds}",
+ "content-type": "application/x-www-form-urlencoded",
+ }
+ else:
+ raise FailedToConnectError(
+ "Missing 'idp_auth_credentials' from token_endpoint. Please provide client_id:client_secret in base64 encoded format as a profile entry under token_endpoint."
+ )
+
+
+class EntraIdpTokenService(TokenServiceBase):
+ """
+ formatted based on docs: https://learn.microsoft.com/en-us/entra/identity-platform/v2-oauth2-auth-code-flow#refresh-the-access-token
+ """
+
+ def build_header_payload(self) -> Dict[str, Any]:
+ return {
+ "accept": "application/json",
+ "content-type": "application/x-www-form-urlencoded",
+ }
+
+
+class TokenServiceType(Enum):
+ OKTA = "okta"
+ ENTRA = "entra"
+
+
+def create_token_service_client(token_endpoint: Dict[str, Any]) -> TokenServiceBase:
+ if (service_type := token_endpoint.get("type")) is None:
+ raise FailedToConnectError("Missing required key in token_endpoint: 'type'")
+
+ if service_type == TokenServiceType.OKTA.value:
+ return OktaIdpTokenService(token_endpoint)
+ elif service_type == TokenServiceType.ENTRA.value:
+ return EntraIdpTokenService(token_endpoint)
+ else:
+ raise ValueError(
+ f"Unsupported identity provider type: {service_type}. Select 'okta' or 'entra.'"
+ )
diff --git a/dbt-redshift/src/dbt/adapters/redshift/connections.py b/dbt-redshift/src/dbt/adapters/redshift/connections.py
new file mode 100644
index 000000000..9632be77b
--- /dev/null
+++ b/dbt-redshift/src/dbt/adapters/redshift/connections.py
@@ -0,0 +1,566 @@
+import re
+import redshift_connector
+import sqlparse
+
+from multiprocessing import Lock
+from contextlib import contextmanager
+from typing import Any, Callable, Dict, Tuple, Union, Optional, List, TYPE_CHECKING
+from dataclasses import dataclass, field
+
+from dbt.adapters.exceptions import FailedToConnectError
+from redshift_connector.utils.oids import get_datatype_name
+
+from dbt.adapters.sql import SQLConnectionManager
+from dbt.adapters.contracts.connection import AdapterResponse, Connection, Credentials
+from dbt.adapters.events.logging import AdapterLogger
+from dbt.adapters.redshift.auth_providers import create_token_service_client
+from dbt_common.contracts.util import Replaceable
+from dbt_common.dataclass_schema import dbtClassMixin, StrEnum, ValidationError
+from dbt_common.helper_types import Port
+from dbt_common.exceptions import DbtRuntimeError, CompilationError, DbtDatabaseError
+
+if TYPE_CHECKING:
+ # Indirectly imported via agate_helper, which is lazy loaded further downfile.
+ # Used by mypy for earlier type hints.
+ import agate
+
+
+class SSLConfigError(CompilationError):
+ def __init__(self, exc: ValidationError):
+ self.exc = exc
+ super().__init__(msg=self.get_message())
+
+ def get_message(self) -> str:
+ validator_msg = self.validator_error_message(self.exc)
+ msg = f"Could not parse SSL config: {validator_msg}"
+ return msg
+
+
+logger = AdapterLogger("Redshift")
+
+
+class RedshiftConnectionMethod(StrEnum):
+ DATABASE = "database"
+ IAM = "iam"
+ IAM_ROLE = "iam_role"
+ IAM_IDENTITY_CENTER_BROWSER = "browser_identity_center"
+ IAM_IDENTITY_CENTER_TOKEN = "oauth_token_identity_center"
+
+ @classmethod
+ def uses_identity_center(cls, method: str) -> bool:
+ return method in (cls.IAM_IDENTITY_CENTER_BROWSER, cls.IAM_IDENTITY_CENTER_TOKEN)
+
+ @classmethod
+ def is_iam(cls, method: str) -> bool:
+ return not cls.uses_identity_center(method)
+
+
+class UserSSLMode(StrEnum):
+ disable = "disable"
+ allow = "allow"
+ prefer = "prefer"
+ require = "require"
+ verify_ca = "verify-ca"
+ verify_full = "verify-full"
+
+ @classmethod
+ def default(cls) -> "UserSSLMode":
+ # default for `psycopg2`, which aligns with dbt-redshift 1.4 and provides backwards compatibility
+ return cls("prefer")
+
+
+class RedshiftSSLMode(StrEnum):
+ verify_ca = "verify-ca"
+ verify_full = "verify-full"
+
+
+SSL_MODE_TRANSLATION = {
+ UserSSLMode.disable: None,
+ UserSSLMode.allow: RedshiftSSLMode("verify-ca"),
+ UserSSLMode.prefer: RedshiftSSLMode("verify-ca"),
+ UserSSLMode.require: RedshiftSSLMode("verify-ca"),
+ UserSSLMode.verify_ca: RedshiftSSLMode("verify-ca"),
+ UserSSLMode.verify_full: RedshiftSSLMode("verify-full"),
+}
+
+
+@dataclass
+class RedshiftSSLConfig(dbtClassMixin, Replaceable): # type: ignore
+ ssl: bool = True
+ sslmode: Optional[RedshiftSSLMode] = SSL_MODE_TRANSLATION[UserSSLMode.default()]
+
+ @classmethod
+ def parse(cls, user_sslmode: UserSSLMode) -> "RedshiftSSLConfig":
+ try:
+ raw_redshift_ssl = {
+ "ssl": user_sslmode != UserSSLMode.disable,
+ "sslmode": SSL_MODE_TRANSLATION[user_sslmode],
+ }
+ cls.validate(raw_redshift_ssl)
+ except ValidationError as exc:
+ raise SSLConfigError(exc)
+
+ redshift_ssl = cls.from_dict(raw_redshift_ssl)
+
+ if redshift_ssl.ssl:
+ message = (
+ f"Establishing connection using ssl with `sslmode` set to '{user_sslmode}'."
+ f"To connect without ssl, set `sslmode` to 'disable'."
+ )
+ else:
+ message = "Establishing connection without ssl."
+
+ logger.debug(message)
+
+ return redshift_ssl
+
+
+@dataclass
+class RedshiftCredentials(Credentials):
+ host: str
+ port: Port
+ method: str = RedshiftConnectionMethod.DATABASE # type: ignore
+ user: Optional[str] = None
+ password: Optional[str] = None # type: ignore
+ cluster_id: Optional[str] = field(
+ default=None,
+ metadata={"description": "If using IAM auth, the name of the cluster"},
+ )
+ iam_profile: Optional[str] = None
+ autocreate: bool = False
+ db_groups: List[str] = field(default_factory=list)
+ ra3_node: Optional[bool] = False
+ connect_timeout: Optional[int] = None
+ role: Optional[str] = None
+ sslmode: UserSSLMode = field(default_factory=UserSSLMode.default)
+ retries: int = 1
+ region: Optional[str] = None
+ # opt-in by default per team deliberation on https://peps.python.org/pep-0249/#autocommit
+ autocommit: Optional[bool] = True
+ access_key_id: Optional[str] = None
+ secret_access_key: Optional[str] = None
+
+ #
+ # IAM identity center methods
+ #
+
+ # browser
+ idc_region: Optional[str] = None
+ issuer_url: Optional[str] = None
+ idp_listen_port: Optional[int] = 7890
+ idc_client_display_name: Optional[str] = "Amazon Redshift driver"
+ idp_response_timeout: Optional[int] = None
+
+ # token_endpoint
+ # a field that we expect to be a dictionary of values used to create
+ # access tokens from an external identity provider integrated with a redshift
+ # and aws org or account Iam Idc instance
+ token_endpoint: Optional[Dict[str, str]] = None
+
+ _ALIASES = {"dbname": "database", "pass": "password"}
+
+ @property
+ def type(self):
+ return "redshift"
+
+ def _connection_keys(self):
+ return (
+ "host",
+ "user",
+ "port",
+ "database",
+ "method",
+ "cluster_id",
+ "iam_profile",
+ "schema",
+ "sslmode",
+ "region",
+ "sslmode",
+ "region",
+ "autocreate",
+ "db_groups",
+ "ra3_node",
+ "connect_timeout",
+ "role",
+ "retries",
+ "autocommit",
+ "access_key_id",
+ )
+
+ @property
+ def unique_field(self) -> str:
+ return self.host
+
+
+def get_connection_method(
+ credentials: RedshiftCredentials,
+) -> Callable[[], redshift_connector.Connection]:
+ #
+ # Helper Methods
+ #
+ def __validate_required_fields(method_name: str, required_fields: Tuple[str, ...]):
+ missing_fields: List[str] = [
+ field for field in required_fields if getattr(credentials, field, None) is None
+ ]
+ if missing_fields:
+ fields_str: str = "', '".join(missing_fields)
+ raise FailedToConnectError(
+ f"'{fields_str}' field(s) are required for '{method_name}' credentials method"
+ )
+
+ def __base_kwargs(credentials) -> Dict[str, Any]:
+ redshift_ssl_config: Dict[str, Any] = RedshiftSSLConfig.parse(
+ credentials.sslmode
+ ).to_dict()
+ return {
+ "host": credentials.host,
+ "port": int(credentials.port) if credentials.port else 5439,
+ "database": credentials.database,
+ "region": credentials.region,
+ "auto_create": credentials.autocreate,
+ "db_groups": credentials.db_groups,
+ "timeout": credentials.connect_timeout,
+ **redshift_ssl_config,
+ }
+
+ def __iam_kwargs(credentials) -> Dict[str, Any]:
+
+ # iam True except for identity center methods
+ iam: bool = RedshiftConnectionMethod.is_iam(credentials.method)
+
+ cluster_identifier: Optional[str]
+ if "serverless" in credentials.host or RedshiftConnectionMethod.uses_identity_center(
+ credentials.method
+ ):
+ cluster_identifier = None
+ elif credentials.cluster_id:
+ cluster_identifier = credentials.cluster_id
+ else:
+ raise FailedToConnectError(
+ "Failed to use IAM method:"
+ " 'cluster_id' must be provided for provisioned cluster"
+ " 'host' must be provided for serverless endpoint"
+ )
+
+ iam_specific_kwargs: Dict[str, Any] = {
+ "iam": iam,
+ "user": "",
+ "password": "",
+ "cluster_identifier": cluster_identifier,
+ }
+
+ return __base_kwargs(credentials) | iam_specific_kwargs
+
+ def __database_kwargs(credentials) -> Dict[str, Any]:
+ logger.debug("Connecting to Redshift with 'database' credentials method")
+
+ __validate_required_fields("database", ("user", "password"))
+
+ db_credentials: Dict[str, Any] = {
+ "user": credentials.user,
+ "password": credentials.password,
+ }
+
+ return __base_kwargs(credentials) | db_credentials
+
+ def __iam_user_kwargs(credentials) -> Dict[str, Any]:
+ logger.debug("Connecting to Redshift with 'iam' credentials method")
+
+ iam_credentials: Dict[str, Any]
+ if credentials.access_key_id and credentials.secret_access_key:
+ iam_credentials = {
+ "access_key_id": credentials.access_key_id,
+ "secret_access_key": credentials.secret_access_key,
+ }
+ elif credentials.access_key_id or credentials.secret_access_key:
+ raise FailedToConnectError(
+ "'access_key_id' and 'secret_access_key' are both needed if providing explicit credentials"
+ )
+ else:
+ iam_credentials = {"profile": credentials.iam_profile}
+
+ __validate_required_fields("iam", ("user",))
+ iam_credentials["db_user"] = credentials.user
+
+ return __iam_kwargs(credentials) | iam_credentials
+
+ def __iam_role_kwargs(credentials) -> Dict[str, Any]:
+ logger.debug("Connecting to Redshift with 'iam_role' credentials method")
+ role_kwargs = {
+ "db_user": None,
+ "group_federation": "serverless" not in credentials.host,
+ }
+
+ if credentials.iam_profile:
+ role_kwargs["profile"] = credentials.iam_profile
+
+ return __iam_kwargs(credentials) | role_kwargs
+
+ def __iam_idc_browser_kwargs(credentials) -> Dict[str, Any]:
+ logger.debug("Connecting to Redshift with '{credentials.method}' credentials method")
+
+ __IDP_TIMEOUT: int = 60
+ __LISTEN_PORT_DEFAULT: int = 7890
+
+ __validate_required_fields(
+ "browser_identity_center", ("method", "idc_region", "issuer_url")
+ )
+
+ idp_timeout: int = (
+ timeout
+ if (timeout := credentials.idp_response_timeout) or timeout == 0
+ else __IDP_TIMEOUT
+ )
+
+ idp_listen_port: int = (
+ port if (port := credentials.idp_listen_port) else __LISTEN_PORT_DEFAULT
+ )
+
+ idc_kwargs: Dict[str, Any] = {
+ "credentials_provider": "BrowserIdcAuthPlugin",
+ "issuer_url": credentials.issuer_url,
+ "listen_port": idp_listen_port,
+ "idc_region": credentials.idc_region,
+ "idc_client_display_name": credentials.idc_client_display_name,
+ "idp_response_timeout": idp_timeout,
+ }
+
+ return __iam_kwargs(credentials) | idc_kwargs
+
+ def __iam_idc_token_kwargs(credentials) -> Dict[str, Any]:
+ """
+ Accepts a `credentials` object with a `token_endpoint` field that corresponds to
+ either Okta or Entra authentication services.
+
+ We only support token_type=EXT_JWT tokens. token_type=ACCESS_TOKEN has not been
+ tested. It can be added with a presenting use-case.
+ """
+
+ logger.debug("Connecting to Redshift with '{credentials.method}' credentials method")
+
+ __validate_required_fields("oauth_token_identity_center", ("token_endpoint",))
+
+ token_service = create_token_service_client(credentials.token_endpoint)
+ response = token_service.handle_request()
+ try:
+ access_token = response.json()["access_token"]
+ except KeyError:
+ raise FailedToConnectError(
+ "access_token missing from Idp token request. Please confirm correct configuration of the token_endpoint field in profiles.yml and that your Idp can use a refresh token to obtain an OIDC-compliant access token."
+ )
+
+ return __iam_kwargs(credentials) | {
+ "credentials_provider": "IdpTokenAuthPlugin",
+ "token": access_token,
+ "token_type": "EXT_JWT",
+ }
+
+ #
+ # Head of function execution
+ #
+
+ method_to_kwargs_function = {
+ None: __database_kwargs,
+ RedshiftConnectionMethod.DATABASE: __database_kwargs,
+ RedshiftConnectionMethod.IAM: __iam_user_kwargs,
+ RedshiftConnectionMethod.IAM_ROLE: __iam_role_kwargs,
+ RedshiftConnectionMethod.IAM_IDENTITY_CENTER_BROWSER: __iam_idc_browser_kwargs,
+ RedshiftConnectionMethod.IAM_IDENTITY_CENTER_TOKEN: __iam_idc_token_kwargs,
+ }
+
+ try:
+ kwargs_function: Callable[[RedshiftCredentials], Dict[str, Any]] = (
+ method_to_kwargs_function[credentials.method]
+ )
+ except KeyError:
+ raise FailedToConnectError(f"Invalid 'method' in profile: '{credentials.method}'")
+
+ kwargs: Dict[str, Any] = kwargs_function(credentials)
+
+ def connect() -> redshift_connector.Connection:
+ c = redshift_connector.connect(**kwargs)
+ if credentials.autocommit:
+ c.autocommit = True
+ if credentials.role:
+ c.cursor().execute(f"set role {credentials.role}")
+ return c
+
+ return connect
+
+
+class RedshiftConnectionManager(SQLConnectionManager):
+ TYPE = "redshift"
+
+ def cancel(self, connection: Connection):
+ pid = connection.backend_pid # type: ignore
+ sql = f"select pg_terminate_backend({pid})"
+ logger.debug(f"Cancel query on: '{connection.name}' with PID: {pid}")
+ logger.debug(sql)
+
+ try:
+ self.add_query(sql)
+ except redshift_connector.InterfaceError as e:
+ if "is closed" in str(e):
+ logger.debug(f"Connection {connection.name} was already closed")
+ return
+ raise
+
+ @classmethod
+ def _get_backend_pid(cls, connection):
+ with connection.handle.cursor() as c:
+ sql = "select pg_backend_pid()"
+ res = c.execute(sql).fetchone()
+ return res[0]
+
+ @classmethod
+ def get_response(cls, cursor: redshift_connector.Cursor) -> AdapterResponse:
+ # redshift_connector.Cursor doesn't have a status message attribute but
+ # this function is only used for successful run, so we can just return a dummy
+ rows = cursor.rowcount
+ message = "SUCCESS"
+ return AdapterResponse(_message=message, rows_affected=rows)
+
+ @contextmanager
+ def exception_handler(self, sql):
+ try:
+ yield
+ except redshift_connector.DatabaseError as e:
+ try:
+ err_msg = e.args[0]["M"] # this is a type redshift sets, so we must use these keys
+ except Exception:
+ err_msg = str(e).strip()
+ logger.debug(f"Redshift error: {err_msg}")
+ self.rollback_if_open()
+ raise DbtDatabaseError(err_msg) from e
+
+ except Exception as e:
+ logger.debug("Error running SQL: {}", sql)
+ logger.debug("Rolling back transaction.")
+ self.rollback_if_open()
+ # Raise DBT native exceptions as is.
+ if isinstance(e, DbtRuntimeError):
+ raise
+ raise DbtRuntimeError(str(e)) from e
+
+ @contextmanager
+ def fresh_transaction(self):
+ """On entrance to this context manager, hold an exclusive lock and
+ create a fresh transaction for redshift, then commit and begin a new
+ one before releasing the lock on exit.
+
+ See drop_relation in RedshiftAdapter for more information.
+ """
+ drop_lock: Lock = self.lock
+
+ with drop_lock:
+ connection = self.get_thread_connection()
+
+ if connection.transaction_open:
+ self.commit()
+
+ self.begin()
+ yield
+ self.commit()
+
+ self.begin()
+
+ @classmethod
+ def open(cls, connection):
+ if connection.state == "open":
+ logger.debug("Connection is already open, skipping open.")
+ return connection
+
+ credentials = connection.credentials
+
+ retryable_exceptions = (
+ redshift_connector.OperationalError,
+ redshift_connector.DatabaseError,
+ redshift_connector.DataError,
+ redshift_connector.InterfaceError,
+ )
+
+ open_connection = cls.retry_connection(
+ connection,
+ connect=get_connection_method(credentials),
+ logger=logger,
+ retry_limit=credentials.retries,
+ retryable_exceptions=retryable_exceptions,
+ )
+ open_connection.backend_pid = cls._get_backend_pid(open_connection) # type: ignore
+ return open_connection
+
+ def execute(
+ self,
+ sql: str,
+ auto_begin: bool = False,
+ fetch: bool = False,
+ limit: Optional[int] = None,
+ ) -> Tuple[AdapterResponse, "agate.Table"]:
+ sql = self._add_query_comment(sql)
+ _, cursor = self.add_query(sql, auto_begin)
+ response = self.get_response(cursor)
+ if fetch:
+ table = self.get_result_from_cursor(cursor, limit)
+ else:
+ from dbt_common.clients import agate_helper
+
+ table = agate_helper.empty_table()
+ return response, table
+
+ def add_query(self, sql, auto_begin=True, bindings=None, abridge_sql_log=False):
+ connection = None
+ cursor = None
+
+ self._initialize_sqlparse_lexer()
+ queries = sqlparse.split(sql)
+
+ for query in queries:
+ # Strip off comments from the current query
+ without_comments = re.sub(
+ re.compile(r"(\".*?\"|\'.*?\')|(/\*.*?\*/|--[^\r\n]*$)", re.MULTILINE),
+ "",
+ query,
+ ).strip()
+
+ if without_comments == "":
+ continue
+
+ retryable_exceptions = (
+ redshift_connector.InterfaceError,
+ redshift_connector.InternalError,
+ )
+
+ connection, cursor = super().add_query(
+ query,
+ auto_begin,
+ bindings=bindings,
+ abridge_sql_log=abridge_sql_log,
+ retryable_exceptions=retryable_exceptions,
+ retry_limit=self.profile.credentials.retries,
+ )
+
+ if cursor is None:
+ conn = self.get_thread_connection()
+ conn_name = conn.name if conn and conn.name else ""
+ raise DbtRuntimeError(f"Tried to run invalid SQL: {sql} on {conn_name}")
+
+ return connection, cursor
+
+ @classmethod
+ def get_credentials(cls, credentials):
+ return credentials
+
+ @classmethod
+ def data_type_code_to_name(cls, type_code: Union[int, str]) -> str:
+ return get_datatype_name(type_code)
+
+ @staticmethod
+ def _initialize_sqlparse_lexer():
+ """
+ Resolves: https://github.com/dbt-labs/dbt-redshift/issues/710
+ Implementation of this fix: https://github.com/dbt-labs/dbt-core/pull/8215
+ """
+ from sqlparse.lexer import Lexer # type: ignore
+
+ if hasattr(Lexer, "get_default_instance"):
+ Lexer.get_default_instance()
diff --git a/dbt-redshift/src/dbt/adapters/redshift/impl.py b/dbt-redshift/src/dbt/adapters/redshift/impl.py
new file mode 100644
index 000000000..aaf3d46ca
--- /dev/null
+++ b/dbt-redshift/src/dbt/adapters/redshift/impl.py
@@ -0,0 +1,190 @@
+import os
+from dataclasses import dataclass
+
+from dbt_common.contracts.constraints import ConstraintType
+from typing import Optional, Set, Any, Dict, Type, TYPE_CHECKING
+from collections import namedtuple
+from dbt.adapters.base import PythonJobHelper
+from dbt.adapters.base.impl import AdapterConfig, ConstraintSupport
+from dbt.adapters.base.meta import available
+from dbt.adapters.capability import Capability, CapabilityDict, CapabilitySupport, Support
+from dbt.adapters.sql import SQLAdapter
+from dbt.adapters.contracts.connection import AdapterResponse
+from dbt.adapters.events.logging import AdapterLogger
+
+
+import dbt_common.exceptions
+
+from dbt.adapters.redshift import RedshiftConnectionManager, RedshiftRelation
+
+logger = AdapterLogger("Redshift")
+packages = ["redshift_connector", "redshift_connector.core"]
+if os.getenv("DBT_REDSHIFT_CONNECTOR_DEBUG_LOGGING"):
+ level = "DEBUG"
+else:
+ level = "ERROR"
+for package in packages:
+ logger.debug(f"Setting {package} to {level}")
+ logger.set_adapter_dependency_log_level(package, level)
+
+GET_RELATIONS_MACRO_NAME = "redshift__get_relations"
+
+if TYPE_CHECKING:
+ import agate
+
+
+@dataclass
+class RedshiftConfig(AdapterConfig):
+ sort_type: Optional[str] = None
+ dist: Optional[str] = None
+ sort: Optional[str] = None
+ bind: Optional[bool] = None
+ backup: Optional[bool] = True
+ auto_refresh: Optional[bool] = False
+
+
+class RedshiftAdapter(SQLAdapter):
+ Relation = RedshiftRelation # type: ignore
+ ConnectionManager = RedshiftConnectionManager
+ connections: RedshiftConnectionManager
+
+ AdapterSpecificConfigs = RedshiftConfig # type: ignore
+
+ CONSTRAINT_SUPPORT = {
+ ConstraintType.check: ConstraintSupport.NOT_SUPPORTED,
+ ConstraintType.not_null: ConstraintSupport.ENFORCED,
+ ConstraintType.unique: ConstraintSupport.NOT_ENFORCED,
+ ConstraintType.primary_key: ConstraintSupport.NOT_ENFORCED,
+ ConstraintType.foreign_key: ConstraintSupport.NOT_ENFORCED,
+ }
+
+ _capabilities = CapabilityDict(
+ {
+ Capability.SchemaMetadataByRelations: CapabilitySupport(support=Support.Full),
+ Capability.TableLastModifiedMetadata: CapabilitySupport(support=Support.Full),
+ Capability.TableLastModifiedMetadataBatch: CapabilitySupport(support=Support.Full),
+ }
+ )
+
+ @classmethod
+ def date_function(cls):
+ return "getdate()"
+
+ def drop_relation(self, relation):
+ """
+ In Redshift, DROP TABLE ... CASCADE should not be used
+ inside a transaction. Redshift doesn't prevent the CASCADE
+ part from conflicting with concurrent transactions. If we do
+ attempt to drop two tables with CASCADE at once, we'll often
+ get the dreaded:
+
+ table was dropped by a concurrent transaction
+
+ So, we need to lock around calls to the underlying
+ drop_relation() function.
+
+ https://docs.aws.amazon.com/redshift/latest/dg/r_DROP_TABLE.html
+ """
+ with self.connections.fresh_transaction():
+ return super().drop_relation(relation)
+
+ @classmethod
+ def convert_text_type(cls, agate_table: "agate.Table", col_idx):
+ column = agate_table.columns[col_idx]
+ # `lens` must be a list, so this can't be a generator expression,
+ # because max() raises ane exception if its argument has no members.
+ lens = [len(d.encode("utf-8")) for d in column.values_without_nulls()]
+ max_len = max(lens) if lens else 64
+ return "varchar({})".format(max_len)
+
+ @classmethod
+ def convert_time_type(cls, agate_table: "agate.Table", col_idx):
+ return "varchar(24)"
+
+ @available
+ def verify_database(self, database):
+ if database.startswith('"'):
+ database = database.strip('"')
+ expected = self.config.credentials.database
+ ra3_node = self.config.credentials.ra3_node
+
+ if database.lower() != expected.lower() and not ra3_node:
+ raise dbt_common.exceptions.NotImplementedError(
+ "Cross-db references allowed only in RA3.* node. ({} vs {})".format(
+ database, expected
+ )
+ )
+ # return an empty string on success so macros can call this
+ return ""
+
+ def _get_catalog_schemas(self, manifest):
+ # redshift(besides ra3) only allow one database (the main one)
+ schemas = super(SQLAdapter, self)._get_catalog_schemas(manifest)
+ try:
+ return schemas.flatten(allow_multiple_databases=self.config.credentials.ra3_node)
+ except dbt_common.exceptions.DbtRuntimeError as exc:
+ msg = f"Cross-db references allowed only in {self.type()} RA3.* node. Got {exc.msg}"
+ raise dbt_common.exceptions.CompilationError(msg)
+
+ def valid_incremental_strategies(self):
+ """The set of standard builtin strategies which this adapter supports out-of-the-box.
+ Not used to validate custom strategies defined by end users.
+ """
+ return ["append", "delete+insert", "merge", "microbatch"]
+
+ def timestamp_add_sql(self, add_to: str, number: int = 1, interval: str = "hour") -> str:
+ return f"{add_to} + interval '{number} {interval}'"
+
+ def _link_cached_database_relations(self, schemas: Set[str]):
+ """
+ :param schemas: The set of schemas that should have links added.
+ """
+ database = self.config.credentials.database
+ _Relation = namedtuple("_Relation", "database schema identifier")
+ links = [
+ (
+ _Relation(database, dep_schema, dep_identifier),
+ _Relation(database, ref_schema, ref_identifier),
+ )
+ for dep_schema, dep_identifier, ref_schema, ref_identifier in self.execute_macro(
+ GET_RELATIONS_MACRO_NAME
+ )
+ # don't record in cache if this relation isn't in a relevant schema
+ if ref_schema in schemas
+ ]
+
+ for dependent, referenced in links:
+ self.cache.add_link(
+ referenced=self.Relation.create(**referenced._asdict()),
+ dependent=self.Relation.create(**dependent._asdict()),
+ )
+
+ def _link_cached_relations(self, manifest):
+ schemas = set(
+ relation.schema.lower()
+ for relation in self._get_cache_schemas(manifest)
+ if self.verify_database(relation.database) == ""
+ )
+ self._link_cached_database_relations(schemas)
+
+ def _relations_cache_for_schemas(self, manifest, cache_schemas=None):
+ super()._relations_cache_for_schemas(manifest, cache_schemas)
+ self._link_cached_relations(manifest)
+
+ # avoid non-implemented abstract methods warning
+ # make it clear what needs to be implemented while still raising the error in super()
+ # we can update these with Redshift-specific messages if needed
+ @property
+ def python_submission_helpers(self) -> Dict[str, Type[PythonJobHelper]]:
+ return super().python_submission_helpers
+
+ @property
+ def default_python_submission_method(self) -> str:
+ return super().default_python_submission_method
+
+ def generate_python_submission_response(self, submission_result: Any) -> AdapterResponse:
+ return super().generate_python_submission_response(submission_result)
+
+ def debug_query(self):
+ """Override for DebugTask method"""
+ self.execute("select 1 as id")
diff --git a/dbt-redshift/src/dbt/adapters/redshift/relation.py b/dbt-redshift/src/dbt/adapters/redshift/relation.py
new file mode 100644
index 000000000..eaf60f54c
--- /dev/null
+++ b/dbt-redshift/src/dbt/adapters/redshift/relation.py
@@ -0,0 +1,110 @@
+from dataclasses import dataclass, field
+from dbt.adapters.contracts.relation import RelationConfig
+from typing import FrozenSet, Optional
+
+from dbt.adapters.base.relation import BaseRelation
+from dbt.adapters.relation_configs import (
+ RelationConfigBase,
+ RelationConfigChangeAction,
+ RelationResults,
+)
+from dbt.adapters.base import RelationType
+from dbt_common.exceptions import DbtRuntimeError
+
+from dbt.adapters.redshift.relation_configs import (
+ RedshiftMaterializedViewConfig,
+ RedshiftMaterializedViewConfigChangeset,
+ RedshiftAutoRefreshConfigChange,
+ RedshiftDistConfigChange,
+ RedshiftSortConfigChange,
+ RedshiftIncludePolicy,
+ RedshiftQuotePolicy,
+ MAX_CHARACTERS_IN_IDENTIFIER,
+)
+
+
+@dataclass(frozen=True, eq=False, repr=False)
+class RedshiftRelation(BaseRelation):
+ include_policy = RedshiftIncludePolicy # type: ignore
+ quote_policy = RedshiftQuotePolicy # type: ignore
+ require_alias: bool = False
+ relation_configs = {
+ RelationType.MaterializedView.value: RedshiftMaterializedViewConfig,
+ }
+ renameable_relations: FrozenSet[RelationType] = field(
+ default_factory=lambda: frozenset(
+ {
+ RelationType.View,
+ RelationType.Table,
+ }
+ )
+ )
+ replaceable_relations: FrozenSet[RelationType] = field(
+ default_factory=lambda: frozenset(
+ {
+ RelationType.View,
+ }
+ )
+ )
+
+ def __post_init__(self):
+ # Check for length of Redshift table/view names.
+ # Check self.type to exclude test relation identifiers
+ if (
+ self.identifier is not None
+ and self.type is not None
+ and len(self.identifier) > MAX_CHARACTERS_IN_IDENTIFIER
+ ):
+ raise DbtRuntimeError(
+ f"Relation name '{self.identifier}' "
+ f"is longer than {MAX_CHARACTERS_IN_IDENTIFIER} characters"
+ )
+
+ def relation_max_name_length(self):
+ return MAX_CHARACTERS_IN_IDENTIFIER
+
+ @classmethod
+ def from_config(cls, config: RelationConfig) -> RelationConfigBase:
+ relation_type: str = config.config.materialized # type: ignore
+
+ if relation_config := cls.relation_configs.get(relation_type):
+ return relation_config.from_relation_config(config)
+
+ raise DbtRuntimeError(
+ f"from_config() is not supported for the provided relation type: {relation_type}"
+ )
+
+ @classmethod
+ def materialized_view_config_changeset(
+ cls, relation_results: RelationResults, relation_config: RelationConfig
+ ) -> Optional[RedshiftMaterializedViewConfigChangeset]:
+ config_change_collection = RedshiftMaterializedViewConfigChangeset()
+
+ existing_materialized_view = RedshiftMaterializedViewConfig.from_relation_results(
+ relation_results
+ )
+ new_materialized_view = RedshiftMaterializedViewConfig.from_relation_config(
+ relation_config
+ )
+
+ if new_materialized_view.autorefresh != existing_materialized_view.autorefresh:
+ config_change_collection.autorefresh = RedshiftAutoRefreshConfigChange(
+ action=RelationConfigChangeAction.alter,
+ context=new_materialized_view.autorefresh,
+ )
+
+ if new_materialized_view.dist != existing_materialized_view.dist:
+ config_change_collection.dist = RedshiftDistConfigChange(
+ action=RelationConfigChangeAction.alter,
+ context=new_materialized_view.dist,
+ )
+
+ if new_materialized_view.sort != existing_materialized_view.sort:
+ config_change_collection.sort = RedshiftSortConfigChange(
+ action=RelationConfigChangeAction.alter,
+ context=new_materialized_view.sort,
+ )
+
+ if config_change_collection.has_changes:
+ return config_change_collection
+ return None
diff --git a/dbt-redshift/src/dbt/adapters/redshift/relation_configs/__init__.py b/dbt-redshift/src/dbt/adapters/redshift/relation_configs/__init__.py
new file mode 100644
index 000000000..0bc69ef4a
--- /dev/null
+++ b/dbt-redshift/src/dbt/adapters/redshift/relation_configs/__init__.py
@@ -0,0 +1,18 @@
+from dbt.adapters.redshift.relation_configs.sort import (
+ RedshiftSortConfig,
+ RedshiftSortConfigChange,
+)
+from dbt.adapters.redshift.relation_configs.dist import (
+ RedshiftDistConfig,
+ RedshiftDistConfigChange,
+)
+from dbt.adapters.redshift.relation_configs.materialized_view import (
+ RedshiftMaterializedViewConfig,
+ RedshiftAutoRefreshConfigChange,
+ RedshiftMaterializedViewConfigChangeset,
+)
+from dbt.adapters.redshift.relation_configs.policies import (
+ RedshiftIncludePolicy,
+ RedshiftQuotePolicy,
+ MAX_CHARACTERS_IN_IDENTIFIER,
+)
diff --git a/dbt-redshift/src/dbt/adapters/redshift/relation_configs/base.py b/dbt-redshift/src/dbt/adapters/redshift/relation_configs/base.py
new file mode 100644
index 000000000..6f1409659
--- /dev/null
+++ b/dbt-redshift/src/dbt/adapters/redshift/relation_configs/base.py
@@ -0,0 +1,75 @@
+from dataclasses import dataclass
+from typing import Optional, Dict, TYPE_CHECKING
+
+from dbt.adapters.base.relation import Policy
+from dbt.adapters.contracts.relation import ComponentName, RelationConfig
+from dbt.adapters.relation_configs import (
+ RelationConfigBase,
+ RelationResults,
+)
+from typing_extensions import Self
+
+from dbt.adapters.redshift.relation_configs.policies import (
+ RedshiftIncludePolicy,
+ RedshiftQuotePolicy,
+)
+
+if TYPE_CHECKING:
+ # Imported downfile for specific row gathering function.
+ import agate
+
+
+@dataclass(frozen=True, eq=True, unsafe_hash=True)
+class RedshiftRelationConfigBase(RelationConfigBase):
+ """
+ This base class implements a few boilerplate methods and provides some light structure for Redshift relations.
+ """
+
+ @classmethod
+ def include_policy(cls) -> Policy:
+ return RedshiftIncludePolicy()
+
+ @classmethod
+ def quote_policy(cls) -> Policy:
+ return RedshiftQuotePolicy()
+
+ @classmethod
+ def from_relation_config(cls, relation_config: RelationConfig) -> Self:
+ relation_config_dict = cls.parse_relation_config(relation_config)
+ relation = cls.from_dict(relation_config_dict)
+ return relation # type: ignore
+
+ @classmethod
+ def parse_relation_config(cls, relation_config: RelationConfig) -> Dict:
+ raise NotImplementedError(
+ "`parse_relation_config()` needs to be implemented on this RelationConfigBase instance"
+ )
+
+ @classmethod
+ def from_relation_results(cls, relation_results: RelationResults) -> Self:
+ relation_config = cls.parse_relation_results(relation_results)
+ relation = cls.from_dict(relation_config)
+ return relation # type: ignore
+
+ @classmethod
+ def parse_relation_results(cls, relation_results: RelationResults) -> Dict:
+ raise NotImplementedError(
+ "`parse_relation_results()` needs to be implemented on this RelationConfigBase instance"
+ )
+
+ @classmethod
+ def _render_part(cls, component: ComponentName, value: Optional[str]) -> Optional[str]:
+ if cls.include_policy().get_part(component) and value:
+ if cls.quote_policy().get_part(component):
+ return f'"{value}"'
+ return value.lower()
+ return None
+
+ @classmethod
+ def _get_first_row(cls, results: "agate.Table") -> "agate.Row":
+ try:
+ return results.rows[0]
+ except IndexError:
+ import agate
+
+ return agate.Row(values=set())
diff --git a/dbt-redshift/src/dbt/adapters/redshift/relation_configs/dist.py b/dbt-redshift/src/dbt/adapters/redshift/relation_configs/dist.py
new file mode 100644
index 000000000..2bcdb9566
--- /dev/null
+++ b/dbt-redshift/src/dbt/adapters/redshift/relation_configs/dist.py
@@ -0,0 +1,167 @@
+from dataclasses import dataclass
+from dbt.adapters.contracts.relation import RelationConfig
+from typing import Optional, Set, Dict, TYPE_CHECKING
+
+from dbt.adapters.relation_configs import (
+ RelationConfigChange,
+ RelationConfigChangeAction,
+ RelationConfigValidationMixin,
+ RelationConfigValidationRule,
+)
+from dbt_common.dataclass_schema import StrEnum
+from dbt_common.exceptions import DbtRuntimeError
+from typing_extensions import Self
+
+from dbt.adapters.redshift.relation_configs.base import RedshiftRelationConfigBase
+
+if TYPE_CHECKING:
+ import agate
+
+
+class RedshiftDistStyle(StrEnum):
+ auto = "auto"
+ even = "even"
+ all = "all"
+ key = "key"
+
+ @classmethod
+ def default(cls) -> "RedshiftDistStyle":
+ return cls("auto")
+
+
+@dataclass(frozen=True, eq=True, unsafe_hash=True)
+class RedshiftDistConfig(RedshiftRelationConfigBase, RelationConfigValidationMixin):
+ """
+ This config fallows the specs found here:
+ https://docs.aws.amazon.com/redshift/latest/dg/r_CREATE_TABLE_NEW.html
+
+ The following parameters are configurable by dbt:
+ - diststyle: the type of data distribution style to use on the table/materialized view
+ - distkey: the column to use for the dist key if `dist_style` is `key`
+ """
+
+ diststyle: Optional[RedshiftDistStyle] = RedshiftDistStyle.default()
+ distkey: Optional[str] = None
+
+ @property
+ def validation_rules(self) -> Set[RelationConfigValidationRule]:
+ # index rules get run by default with the mixin
+ return {
+ RelationConfigValidationRule(
+ validation_check=not (
+ self.diststyle == RedshiftDistStyle.key and self.distkey is None
+ ),
+ validation_error=DbtRuntimeError(
+ "A `RedshiftDistConfig` that specifies a `diststyle` of `key` must provide a value for `distkey`."
+ ),
+ ),
+ RelationConfigValidationRule(
+ validation_check=not (
+ self.diststyle
+ in (RedshiftDistStyle.auto, RedshiftDistStyle.even, RedshiftDistStyle.all)
+ and self.distkey is not None
+ ),
+ validation_error=DbtRuntimeError(
+ "A `RedshiftDistConfig` that specifies a `distkey` must be of `diststyle` `key`."
+ ),
+ ),
+ }
+
+ @classmethod
+ def from_dict(cls, config_dict) -> Self:
+ kwargs_dict = {
+ "diststyle": config_dict.get("diststyle"),
+ "distkey": config_dict.get("distkey"),
+ }
+ dist: Self = super().from_dict(kwargs_dict) # type: ignore
+ return dist
+
+ @classmethod
+ def parse_relation_config(cls, relation_config: RelationConfig) -> dict:
+ """
+ Translate ModelNode objects from the user-provided config into a standard dictionary.
+
+ Args:
+ relation_config: the description of the distkey and diststyle from the user in this format:
+
+ {
+ "dist": any("auto", "even", "all") or ""
+ }
+
+ Returns: a standard dictionary describing this `RedshiftDistConfig` instance
+ """
+ dist = relation_config.config.extra.get("dist", "") # type: ignore
+
+ diststyle = dist.lower()
+
+ if diststyle == "":
+ config = {}
+
+ elif diststyle in (
+ RedshiftDistStyle.auto,
+ RedshiftDistStyle.even,
+ RedshiftDistStyle.all,
+ ):
+ config = {"diststyle": diststyle}
+
+ else:
+ config = {"diststyle": RedshiftDistStyle.key.value, "distkey": dist} # type: ignore
+
+ return config
+
+ @classmethod
+ def parse_relation_results(cls, relation_results_entry: "agate.Row") -> Dict:
+ """
+ Translate agate objects from the database into a standard dictionary.
+
+ Args:
+ relation_results_entry: the description of the distkey and diststyle from the database in this format:
+
+ agate.Row({
+ "diststyle": "", # e.g. EVEN | KEY(column1) | AUTO(ALL) | AUTO(KEY(id))
+ })
+
+ Returns: a standard dictionary describing this `RedshiftDistConfig` instance
+ """
+ dist: str = relation_results_entry.get("diststyle")
+
+ try:
+ # covers `AUTO`, `ALL`, `EVEN`, `KEY`, '',
+ diststyle = dist.split("(")[0].lower()
+ except AttributeError:
+ # covers None
+ diststyle = ""
+
+ if dist == "":
+ config = {}
+
+ elif diststyle == RedshiftDistStyle.key:
+ open_paren = len("KEY(")
+ close_paren = -len(")")
+ distkey = dist[open_paren:close_paren] # e.g. KEY(column1)
+ config = {"diststyle": diststyle, "distkey": distkey}
+
+ else:
+ config = {"diststyle": diststyle}
+
+ return config
+
+
+@dataclass(frozen=True, eq=True, unsafe_hash=True)
+class RedshiftDistConfigChange(RelationConfigChange, RelationConfigValidationMixin):
+ context: RedshiftDistConfig
+
+ @property
+ def requires_full_refresh(self) -> bool:
+ return True
+
+ @property
+ def validation_rules(self) -> Set[RelationConfigValidationRule]:
+ return {
+ RelationConfigValidationRule(
+ validation_check=(self.action == RelationConfigChangeAction.alter),
+ validation_error=DbtRuntimeError(
+ "Invalid operation, only `alter` changes are supported for `distkey` / `diststyle`."
+ ),
+ ),
+ }
diff --git a/dbt-redshift/src/dbt/adapters/redshift/relation_configs/materialized_view.py b/dbt-redshift/src/dbt/adapters/redshift/relation_configs/materialized_view.py
new file mode 100644
index 000000000..a01185f22
--- /dev/null
+++ b/dbt-redshift/src/dbt/adapters/redshift/relation_configs/materialized_view.py
@@ -0,0 +1,278 @@
+from dataclasses import dataclass, field
+from typing import Optional, Set, Dict, Any, TYPE_CHECKING
+
+from dbt.adapters.relation_configs import (
+ RelationResults,
+ RelationConfigChange,
+ RelationConfigValidationMixin,
+ RelationConfigValidationRule,
+)
+from dbt.adapters.contracts.relation import ComponentName, RelationConfig
+from dbt_common.exceptions import DbtRuntimeError
+from typing_extensions import Self
+
+from dbt.adapters.redshift.relation_configs.base import RedshiftRelationConfigBase
+from dbt.adapters.redshift.relation_configs.dist import (
+ RedshiftDistConfig,
+ RedshiftDistStyle,
+ RedshiftDistConfigChange,
+)
+from dbt.adapters.redshift.relation_configs.policies import MAX_CHARACTERS_IN_IDENTIFIER
+from dbt.adapters.redshift.relation_configs.sort import (
+ RedshiftSortConfig,
+ RedshiftSortConfigChange,
+)
+from dbt.adapters.redshift.utility import evaluate_bool
+
+if TYPE_CHECKING:
+ import agate
+
+
+@dataclass(frozen=True, eq=True, unsafe_hash=True)
+class RedshiftMaterializedViewConfig(RedshiftRelationConfigBase, RelationConfigValidationMixin):
+ """
+ This config follow the specs found here:
+ https://docs.aws.amazon.com/redshift/latest/dg/materialized-view-create-sql-command.html
+
+ The following parameters are configurable by dbt:
+ - mv_name: name of the materialized view
+ - query: the query that defines the view
+ - backup: determines if the materialized view is included in automated and manual cluster snapshots
+ - Note: we cannot currently query this from Redshift, which creates two issues
+ - a model deployed with this set to False will rebuild every run because the database version will always
+ look like True
+ - to deploy this as a change from False to True, a full refresh must be issued since the database version
+ will always look like True (unless there is another full refresh-triggering change)
+ - dist: the distribution configuration for the data behind the materialized view, a combination of
+ a `diststyle` and an optional `distkey`
+ - Note: the default `diststyle` for materialized views is EVEN, despite the default in general being AUTO
+ - sort: the sort configuration for the data behind the materialized view, a combination of
+ a `sortstyle` and an optional `sortkey`
+ - auto_refresh: specifies whether the materialized view should be automatically refreshed
+ with latest changes from its base tables
+
+ There are currently no non-configurable parameters.
+ """
+
+ mv_name: str
+ schema_name: str
+ database_name: str
+ query: str
+ backup: bool = field(default=True, compare=False, hash=False)
+ dist: RedshiftDistConfig = RedshiftDistConfig(diststyle=RedshiftDistStyle("even"))
+ sort: RedshiftSortConfig = RedshiftSortConfig()
+ autorefresh: bool = False
+
+ @property
+ def path(self) -> str:
+ return ".".join(
+ part
+ for part in [self.database_name, self.schema_name, self.mv_name]
+ if part is not None
+ )
+
+ @property
+ def validation_rules(self) -> Set[RelationConfigValidationRule]:
+ # sort and dist rules get run by default with the mixin
+ return {
+ RelationConfigValidationRule(
+ validation_check=len(self.mv_name or "") <= MAX_CHARACTERS_IN_IDENTIFIER,
+ validation_error=DbtRuntimeError(
+ f"The materialized view name is more than {MAX_CHARACTERS_IN_IDENTIFIER} "
+ f"characters: {self.mv_name}"
+ ),
+ ),
+ RelationConfigValidationRule(
+ validation_check=self.dist.diststyle != RedshiftDistStyle.auto,
+ validation_error=DbtRuntimeError(
+ "Redshift materialized views do not support a `diststyle` of `auto`."
+ ),
+ ),
+ RelationConfigValidationRule(
+ validation_check=len(self.mv_name if self.mv_name else "") <= 127,
+ validation_error=DbtRuntimeError(
+ "Redshift does not support object names longer than 127 characters."
+ ),
+ ),
+ }
+
+ @classmethod
+ def from_dict(cls, config_dict) -> Self:
+ kwargs_dict = {
+ "mv_name": cls._render_part(ComponentName.Identifier, config_dict.get("mv_name")),
+ "schema_name": cls._render_part(ComponentName.Schema, config_dict.get("schema_name")),
+ "database_name": cls._render_part(
+ ComponentName.Database, config_dict.get("database_name")
+ ),
+ "query": config_dict.get("query"),
+ "backup": config_dict.get("backup"),
+ "autorefresh": config_dict.get("autorefresh"),
+ }
+
+ # this preserves the materialized view-specific default of `even` over the general default of `auto`
+ if dist := config_dict.get("dist"):
+ kwargs_dict.update({"dist": RedshiftDistConfig.from_dict(dist)})
+
+ if sort := config_dict.get("sort"):
+ kwargs_dict.update({"sort": RedshiftSortConfig.from_dict(sort)})
+
+ materialized_view: Self = super().from_dict(kwargs_dict) # type: ignore
+ return materialized_view
+
+ @classmethod
+ def parse_relation_config(cls, config: RelationConfig) -> Dict[str, Any]:
+ config_dict: Dict[str, Any] = {
+ "mv_name": config.identifier,
+ "schema_name": config.schema,
+ "database_name": config.database,
+ }
+
+ # backup/autorefresh can be bools or strings
+ backup_value = config.config.extra.get("backup") # type: ignore
+ if backup_value is not None:
+ config_dict["backup"] = evaluate_bool(backup_value)
+
+ autorefresh_value = config.config.extra.get("auto_refresh") # type: ignore
+ if autorefresh_value is not None:
+ config_dict["autorefresh"] = evaluate_bool(autorefresh_value)
+
+ if query := config.compiled_code: # type: ignore
+ config_dict.update({"query": query.strip()})
+
+ if config.config.get("dist"): # type: ignore
+ config_dict.update({"dist": RedshiftDistConfig.parse_relation_config(config)})
+
+ if config.config.get("sort"): # type: ignore
+ config_dict.update({"sort": RedshiftSortConfig.parse_relation_config(config)})
+
+ return config_dict
+
+ @classmethod
+ def parse_relation_results(cls, relation_results: RelationResults) -> Dict:
+ """
+ Translate agate objects from the database into a standard dictionary.
+
+ Args:
+ relation_results: the description of the materialized view from the database in this format:
+
+ {
+ "materialized_view": agate.Table(
+ agate.Row({
+ "database": "",
+ "schema": "",
+ "table": "",
+ "diststyle": "", # e.g. EVEN | KEY(column1) | AUTO(ALL) | AUTO(KEY(id)),
+ "sortkey1": "",
+ "autorefresh: any("t", "f"),
+ })
+ ),
+ "query": agate.Table(
+ agate.Row({"definition": "")}
+ ),
+ "columns": agate.Table(
+ agate.Row({
+ "column": "",
+ "sort_key_position": ,
+ "is_dist_key: any(true, false),
+ })
+ ),
+ }
+
+ Additional columns in either value is fine, as long as `sortkey` and `sortstyle` are available.
+
+ Returns: a standard dictionary describing this `RedshiftMaterializedViewConfig` instance
+ """
+ materialized_view: "agate.Row" = cls._get_first_row(
+ relation_results.get("materialized_view")
+ )
+ query: "agate.Row" = cls._get_first_row(relation_results.get("query"))
+
+ config_dict = {
+ "mv_name": materialized_view.get("table"),
+ "schema_name": materialized_view.get("schema"),
+ "database_name": materialized_view.get("database"),
+ "query": cls._parse_query(query.get("definition")),
+ }
+
+ autorefresh_value = materialized_view.get("autorefresh")
+ if autorefresh_value is not None:
+ bool_filter = {"t": True, "f": False}
+ config_dict["autorefresh"] = bool_filter.get(autorefresh_value, autorefresh_value)
+
+ # the default for materialized views differs from the default for diststyle in general
+ # only set it if we got a value
+ if materialized_view.get("diststyle"):
+ config_dict.update(
+ {"dist": RedshiftDistConfig.parse_relation_results(materialized_view)}
+ )
+
+ if columns := relation_results.get("columns"):
+ sort_columns = [row for row in columns.rows if row.get("sort_key_position", 0) > 0]
+ if sort_columns:
+ config_dict.update(
+ {"sort": RedshiftSortConfig.parse_relation_results(sort_columns)}
+ )
+
+ return config_dict
+
+ @classmethod
+ def _parse_query(cls, query: str) -> str:
+ """
+ Get the select statement from the materialized view definition in Redshift.
+
+ Args:
+ query: the `create materialized view` statement from `pg_views`, for example:
+
+ create materialized view my_materialized_view
+ backup yes
+ diststyle even
+ sortkey (id)
+ auto refresh no
+ as (
+ select * from my_base_table
+ );
+
+ Returns: the `select ...` statement, for example:
+
+ select * from my_base_table
+
+ """
+ open_paren = query.find("as (") + len("as (")
+ close_paren = query.find(");")
+ return query[open_paren:close_paren].strip()
+
+
+@dataclass(frozen=True, eq=True, unsafe_hash=True)
+class RedshiftAutoRefreshConfigChange(RelationConfigChange):
+ context: Optional[bool] = None
+
+ @property
+ def requires_full_refresh(self) -> bool:
+ return False
+
+
+@dataclass
+class RedshiftMaterializedViewConfigChangeset:
+ dist: Optional[RedshiftDistConfigChange] = None
+ sort: Optional[RedshiftSortConfigChange] = None
+ autorefresh: Optional[RedshiftAutoRefreshConfigChange] = None
+
+ @property
+ def requires_full_refresh(self) -> bool:
+ return any(
+ {
+ self.autorefresh.requires_full_refresh if self.autorefresh else False,
+ self.dist.requires_full_refresh if self.dist else False,
+ self.sort.requires_full_refresh if self.sort else False,
+ }
+ )
+
+ @property
+ def has_changes(self) -> bool:
+ return any(
+ {
+ self.dist if self.dist else False,
+ self.sort if self.sort else False,
+ self.autorefresh if self.autorefresh else False,
+ }
+ )
diff --git a/dbt-redshift/src/dbt/adapters/redshift/relation_configs/policies.py b/dbt-redshift/src/dbt/adapters/redshift/relation_configs/policies.py
new file mode 100644
index 000000000..7ec8e8acb
--- /dev/null
+++ b/dbt-redshift/src/dbt/adapters/redshift/relation_configs/policies.py
@@ -0,0 +1,19 @@
+from dataclasses import dataclass
+
+from dbt.adapters.base.relation import Policy
+
+
+MAX_CHARACTERS_IN_IDENTIFIER = 127
+
+
+class RedshiftIncludePolicy(Policy):
+ database: bool = True
+ schema: bool = True
+ identifier: bool = True
+
+
+@dataclass
+class RedshiftQuotePolicy(Policy):
+ database: bool = True
+ schema: bool = True
+ identifier: bool = True
diff --git a/dbt-redshift/src/dbt/adapters/redshift/relation_configs/sort.py b/dbt-redshift/src/dbt/adapters/redshift/relation_configs/sort.py
new file mode 100644
index 000000000..f38d5a1e1
--- /dev/null
+++ b/dbt-redshift/src/dbt/adapters/redshift/relation_configs/sort.py
@@ -0,0 +1,189 @@
+from dataclasses import dataclass
+from dbt.adapters.contracts.relation import RelationConfig
+from typing import Optional, Set, Dict, Any, TYPE_CHECKING, Tuple
+
+from dbt.adapters.relation_configs import (
+ RelationConfigChange,
+ RelationConfigChangeAction,
+ RelationConfigValidationMixin,
+ RelationConfigValidationRule,
+)
+from dbt_common.dataclass_schema import StrEnum
+from dbt_common.exceptions import DbtRuntimeError
+from typing_extensions import Self
+
+from dbt.adapters.redshift.relation_configs.base import RedshiftRelationConfigBase
+
+if TYPE_CHECKING:
+ import agate
+
+
+class RedshiftSortStyle(StrEnum):
+ auto = "auto"
+ compound = "compound"
+ interleaved = "interleaved"
+
+ @classmethod
+ def default(cls) -> "RedshiftSortStyle":
+ return cls("auto")
+
+ @classmethod
+ def default_with_columns(cls) -> "RedshiftSortStyle":
+ return cls("compound")
+
+
+@dataclass(frozen=True, eq=True, unsafe_hash=True)
+class RedshiftSortConfig(RedshiftRelationConfigBase, RelationConfigValidationMixin):
+ """
+ This config fallows the specs found here:
+ https://docs.aws.amazon.com/redshift/latest/dg/r_CREATE_TABLE_NEW.html
+
+ The following parameters are configurable by dbt:
+ - sort_type: the type of sort key on the table/materialized view
+ - defaults to `auto` if no sort config information is provided
+ - defaults to `compound` if columns are provided, but type is omitted
+ - sort_key: the column(s) to use for the sort key; cannot be combined with `sort_type=auto`
+ """
+
+ sortstyle: Optional[RedshiftSortStyle] = None
+ sortkey: Optional[Tuple[str]] = None
+
+ def __post_init__(self):
+ # maintains `frozen=True` while allowing for a variable default on `sort_type`
+ if self.sortstyle is None and self.sortkey is None:
+ object.__setattr__(self, "sortstyle", RedshiftSortStyle.default())
+ elif self.sortstyle is None:
+ object.__setattr__(self, "sortstyle", RedshiftSortStyle.default_with_columns())
+ super().__post_init__()
+
+ @property
+ def validation_rules(self) -> Set[RelationConfigValidationRule]:
+ # index rules get run by default with the mixin
+ return {
+ RelationConfigValidationRule(
+ validation_check=not (
+ self.sortstyle == RedshiftSortStyle.auto and self.sortkey is not None
+ ),
+ validation_error=DbtRuntimeError(
+ "A `RedshiftSortConfig` that specifies a `sortkey` does not support the `sortstyle` of `auto`."
+ ),
+ ),
+ RelationConfigValidationRule(
+ validation_check=not (
+ self.sortstyle in (RedshiftSortStyle.compound, RedshiftSortStyle.interleaved)
+ and self.sortkey is None
+ ),
+ validation_error=DbtRuntimeError(
+ "A `sortstyle` of `compound` or `interleaved` requires a `sortkey` to be provided."
+ ),
+ ),
+ RelationConfigValidationRule(
+ validation_check=not (
+ self.sortstyle == RedshiftSortStyle.compound
+ and self.sortkey is not None
+ and len(self.sortkey) > 400
+ ),
+ validation_error=DbtRuntimeError(
+ "A compound `sortkey` only supports 400 columns."
+ ),
+ ),
+ RelationConfigValidationRule(
+ validation_check=not (
+ self.sortstyle == RedshiftSortStyle.interleaved
+ and self.sortkey is not None
+ and len(self.sortkey) > 8
+ ),
+ validation_error=DbtRuntimeError(
+ "An interleaved `sortkey` only supports 8 columns."
+ ),
+ ),
+ }
+
+ @classmethod
+ def from_dict(cls, config_dict) -> Self:
+ kwargs_dict = {
+ "sortstyle": config_dict.get("sortstyle"),
+ "sortkey": tuple(column for column in config_dict.get("sortkey", {})),
+ }
+ sort: Self = super().from_dict(kwargs_dict) # type: ignore
+ return sort # type: ignore
+
+ @classmethod
+ def parse_relation_config(cls, relation_config: RelationConfig) -> Dict[str, Any]:
+ """
+ Translate ModelNode objects from the user-provided config into a standard dictionary.
+
+ Args:
+ relation_config: the description of the sortkey and sortstyle from the user in this format:
+
+ {
+ "sort_key": "" or [""] or ["",...]
+ "sort_type": any("compound", "interleaved", "auto")
+ }
+
+ Returns: a standard dictionary describing this `RedshiftSortConfig` instance
+ """
+ config_dict = {}
+
+ if sortstyle := relation_config.config.extra.get("sort_type"): # type: ignore
+ config_dict.update({"sortstyle": sortstyle.lower()})
+
+ if sortkey := relation_config.config.extra.get("sort"): # type: ignore
+ # we allow users to specify the `sort_key` as a string if it's a single column
+ if isinstance(sortkey, str):
+ sortkey = [sortkey]
+
+ config_dict.update({"sortkey": tuple(sortkey)})
+
+ return config_dict
+
+ @classmethod
+ def parse_relation_results(cls, relation_results_entry: "agate.MappedSequence") -> dict:
+ """
+ Translate agate objects from the database into a standard dictionary.
+
+ Note:
+ This was only built for materialized views, which does not specify a sortstyle.
+ Processing of `sortstyle` has been omitted here, which means it's the default (compound).
+
+ Args:
+ relation_results_entry: The list of rows that contains the sortkey in this format:
+ [
+ agate.Row({
+ ...,
+ "column": "",
+ "sort_key_position": ,
+ ...
+ }),
+ ]
+
+ Returns: a standard dictionary describing this `RedshiftSortConfig` instance
+ """
+ sort_config = []
+
+ sorted_columns = sorted(relation_results_entry, key=lambda x: x["sort_key_position"])
+ for column in sorted_columns:
+ if column.get("sort_key_position"):
+ sort_config.append(column.get("column"))
+
+ return {"sortkey": sort_config}
+
+
+@dataclass(frozen=True, eq=True, unsafe_hash=True)
+class RedshiftSortConfigChange(RelationConfigChange, RelationConfigValidationMixin):
+ context: RedshiftSortConfig
+
+ @property
+ def requires_full_refresh(self) -> bool:
+ return True
+
+ @property
+ def validation_rules(self) -> Set[RelationConfigValidationRule]:
+ return {
+ RelationConfigValidationRule(
+ validation_check=(self.action == RelationConfigChangeAction.alter),
+ validation_error=DbtRuntimeError(
+ "Invalid operation, only `alter` changes are supported for `sortkey` / `sortstyle`."
+ ),
+ ),
+ }
diff --git a/dbt-redshift/src/dbt/adapters/redshift/utility.py b/dbt-redshift/src/dbt/adapters/redshift/utility.py
new file mode 100644
index 000000000..64f5e9cd8
--- /dev/null
+++ b/dbt-redshift/src/dbt/adapters/redshift/utility.py
@@ -0,0 +1,25 @@
+from typing import Union
+
+
+def evaluate_bool_str(value: str) -> bool:
+ value = value.strip().lower()
+ if value == "true":
+ return True
+ elif value == "false":
+ return False
+ else:
+ raise ValueError(f"Invalid boolean string value: {value}")
+
+
+def evaluate_bool(value: Union[str, bool]) -> bool:
+ if not value:
+ return False
+ if isinstance(value, bool):
+ return value
+ elif isinstance(value, str):
+ return evaluate_bool_str(value)
+ else:
+ raise TypeError(
+ f"Invalid type for boolean evaluation, "
+ f"expecting boolean or str, recieved: {type(value)}"
+ )
diff --git a/dbt-redshift/src/dbt/include/redshift/__init__.py b/dbt-redshift/src/dbt/include/redshift/__init__.py
new file mode 100644
index 000000000..b177e5d49
--- /dev/null
+++ b/dbt-redshift/src/dbt/include/redshift/__init__.py
@@ -0,0 +1,3 @@
+import os
+
+PACKAGE_PATH = os.path.dirname(__file__)
diff --git a/dbt-redshift/src/dbt/include/redshift/dbt_project.yml b/dbt-redshift/src/dbt/include/redshift/dbt_project.yml
new file mode 100644
index 000000000..1efdab2c1
--- /dev/null
+++ b/dbt-redshift/src/dbt/include/redshift/dbt_project.yml
@@ -0,0 +1,5 @@
+config-version: 2
+name: dbt_redshift
+version: 1.0
+
+macro-paths: ["macros"]
diff --git a/dbt-redshift/src/dbt/include/redshift/macros/adapters.sql b/dbt-redshift/src/dbt/include/redshift/macros/adapters.sql
new file mode 100644
index 000000000..8b74933bd
--- /dev/null
+++ b/dbt-redshift/src/dbt/include/redshift/macros/adapters.sql
@@ -0,0 +1,329 @@
+
+{% macro dist(dist) %}
+ {%- if dist is not none -%}
+ {%- set dist = dist.strip().lower() -%}
+
+ {%- if dist in ['all', 'even'] -%}
+ diststyle {{ dist }}
+ {%- elif dist == "auto" -%}
+ {%- else -%}
+ diststyle key distkey ({{ dist }})
+ {%- endif -%}
+
+ {%- endif -%}
+{%- endmacro -%}
+
+
+{% macro sort(sort_type, sort) %}
+ {%- if sort is not none %}
+ {{ sort_type | default('compound', boolean=true) }} sortkey(
+ {%- if sort is string -%}
+ {%- set sort = [sort] -%}
+ {%- endif -%}
+ {%- for item in sort -%}
+ {{ item }}
+ {%- if not loop.last -%},{%- endif -%}
+ {%- endfor -%}
+ )
+ {%- endif %}
+{%- endmacro -%}
+
+
+{% macro redshift__create_table_as(temporary, relation, sql) -%}
+
+ {%- set _dist = config.get('dist') -%}
+ {%- set _sort_type = config.get(
+ 'sort_type',
+ validator=validation.any['compound', 'interleaved']) -%}
+ {%- set _sort = config.get(
+ 'sort',
+ validator=validation.any[list, basestring]) -%}
+ {%- set sql_header = config.get('sql_header', none) -%}
+ {%- set backup = config.get('backup') -%}
+
+ {{ sql_header if sql_header is not none }}
+
+ {%- set contract_config = config.get('contract') -%}
+ {%- if contract_config.enforced -%}
+
+ create {% if temporary -%}temporary{%- endif %} table
+ {{ relation.include(database=(not temporary), schema=(not temporary)) }}
+ {{ get_table_columns_and_constraints() }}
+ {{ get_assert_columns_equivalent(sql) }}
+ {%- set sql = get_select_subquery(sql) %}
+ {% if backup == false -%}backup no{%- endif %}
+ {{ dist(_dist) }}
+ {{ sort(_sort_type, _sort) }}
+ ;
+
+ insert into {{ relation.include(database=(not temporary), schema=(not temporary)) }}
+ (
+ {{ sql }}
+ )
+ ;
+
+ {%- else %}
+
+ create {% if temporary -%}temporary{%- endif %} table
+ {{ relation.include(database=(not temporary), schema=(not temporary)) }}
+ {% if backup == false -%}backup no{%- endif %}
+ {{ dist(_dist) }}
+ {{ sort(_sort_type, _sort) }}
+ as (
+ {{ sql }}
+ );
+
+ {%- endif %}
+{%- endmacro %}
+
+
+{% macro redshift__create_view_as(relation, sql) -%}
+ {%- set binding = config.get('bind', default=True) -%}
+
+ {% set bind_qualifier = '' if binding else 'with no schema binding' %}
+ {%- set sql_header = config.get('sql_header', none) -%}
+
+ {{ sql_header if sql_header is not none }}
+
+ create view {{ relation }}
+ {%- set contract_config = config.get('contract') -%}
+ {%- if contract_config.enforced -%}
+ {{ get_assert_columns_equivalent(sql) }}
+ {%- endif %} as (
+ {{ sql }}
+ ) {{ bind_qualifier }};
+{% endmacro %}
+
+
+{% macro redshift__create_schema(relation) -%}
+ {{ postgres__create_schema(relation) }}
+{% endmacro %}
+
+
+{% macro redshift__drop_schema(relation) -%}
+ {{ postgres__drop_schema(relation) }}
+{% endmacro %}
+
+
+{% macro redshift__get_columns_in_relation(relation) -%}
+ {% call statement('get_columns_in_relation', fetch_result=True) %}
+ with bound_views as (
+ select
+ ordinal_position,
+ table_schema,
+ column_name,
+ data_type,
+ character_maximum_length,
+ numeric_precision,
+ numeric_scale
+
+ from information_schema."columns"
+ where table_name = '{{ relation.identifier }}'
+ ),
+
+ unbound_views as (
+ select
+ ordinal_position,
+ view_schema,
+ col_name,
+ case
+ when col_type ilike 'character varying%' then
+ 'character varying'
+ when col_type ilike 'numeric%' then 'numeric'
+ else col_type
+ end as col_type,
+ case
+ when col_type like 'character%'
+ then nullif(REGEXP_SUBSTR(col_type, '[0-9]+'), '')::int
+ else null
+ end as character_maximum_length,
+ case
+ when col_type like 'numeric%'
+ then nullif(
+ SPLIT_PART(REGEXP_SUBSTR(col_type, '[0-9,]+'), ',', 1),
+ '')::int
+ else null
+ end as numeric_precision,
+ case
+ when col_type like 'numeric%'
+ then nullif(
+ SPLIT_PART(REGEXP_SUBSTR(col_type, '[0-9,]+'), ',', 2),
+ '')::int
+ else null
+ end as numeric_scale
+
+ from pg_get_late_binding_view_cols()
+ cols(view_schema name, view_name name, col_name name,
+ col_type varchar, ordinal_position int)
+ where view_name = '{{ relation.identifier }}'
+ ),
+
+ external_views as (
+ select
+ columnnum,
+ schemaname,
+ columnname,
+ case
+ when external_type ilike 'character varying%' or external_type ilike 'varchar%'
+ then 'character varying'
+ when external_type ilike 'numeric%' then 'numeric'
+ else external_type
+ end as external_type,
+ case
+ when external_type like 'character%' or external_type like 'varchar%'
+ then nullif(
+ REGEXP_SUBSTR(external_type, '[0-9]+'),
+ '')::int
+ else null
+ end as character_maximum_length,
+ case
+ when external_type like 'numeric%'
+ then nullif(
+ SPLIT_PART(REGEXP_SUBSTR(external_type, '[0-9,]+'), ',', 1),
+ '')::int
+ else null
+ end as numeric_precision,
+ case
+ when external_type like 'numeric%'
+ then nullif(
+ SPLIT_PART(REGEXP_SUBSTR(external_type, '[0-9,]+'), ',', 2),
+ '')::int
+ else null
+ end as numeric_scale
+ from
+ pg_catalog.svv_external_columns
+ where
+ schemaname = '{{ relation.schema }}'
+ and tablename = '{{ relation.identifier }}'
+
+ ),
+
+ unioned as (
+ select * from bound_views
+ union all
+ select * from unbound_views
+ union all
+ select * from external_views
+ )
+
+ select
+ column_name,
+ data_type,
+ character_maximum_length,
+ numeric_precision,
+ numeric_scale
+
+ from unioned
+ {% if relation.schema %}
+ where table_schema = '{{ relation.schema }}'
+ {% endif %}
+ order by ordinal_position
+ {% endcall %}
+ {% set table = load_result('get_columns_in_relation').table %}
+ {{ return(sql_convert_columns_in_relation(table)) }}
+{% endmacro %}
+
+{% macro redshift__list_relations_without_caching(schema_relation) %}
+
+ {% call statement('list_relations_without_caching', fetch_result=True) -%}
+ select
+ table_catalog as database,
+ table_name as name,
+ table_schema as schema,
+ 'table' as type
+ from information_schema.tables
+ where table_schema ilike '{{ schema_relation.schema }}'
+ and table_type = 'BASE TABLE'
+ union all
+ select
+ table_catalog as database,
+ table_name as name,
+ table_schema as schema,
+ case
+ when view_definition ilike '%create materialized view%'
+ then 'materialized_view'
+ else 'view'
+ end as type
+ from information_schema.views
+ where table_schema ilike '{{ schema_relation.schema }}'
+ {% endcall %}
+ {{ return(load_result('list_relations_without_caching').table) }}
+{% endmacro %}
+
+{% macro redshift__information_schema_name(database) -%}
+ {{ return(postgres__information_schema_name(database)) }}
+{%- endmacro %}
+
+
+{% macro redshift__list_schemas(database) -%}
+ {{ return(postgres__list_schemas(database)) }}
+{%- endmacro %}
+
+{% macro redshift__check_schema_exists(information_schema, schema) -%}
+ {{ return(postgres__check_schema_exists(information_schema, schema)) }}
+{%- endmacro %}
+
+
+{% macro redshift__persist_docs(relation, model, for_relation, for_columns) -%}
+ {% if for_relation and config.persist_relation_docs() and model.description %}
+ {% do run_query(alter_relation_comment(relation, model.description)) %}
+ {% endif %}
+
+ {# Override: do not set column comments for LBVs #}
+ {% set is_lbv = config.get('materialized') == 'view' and config.get('bind') == false %}
+ {% if for_columns and config.persist_column_docs() and model.columns and not is_lbv %}
+ {% do run_query(alter_column_comment(relation, model.columns)) %}
+ {% endif %}
+{% endmacro %}
+
+{#
+ Copied from the postgres-adapter.
+#}
+{% macro escape_comment(comment) -%}
+ {% if comment is not string %}
+ {% do exceptions.raise_compiler_error('cannot escape a non-string: ' ~ comment) %}
+ {% endif %}
+ {%- set magic = '$dbt_comment_literal_block$' -%}
+ {%- if magic in comment -%}
+ {%- do exceptions.raise_compiler_error('The string ' ~ magic ~ ' is not allowed in comments.') -%}
+ {%- endif -%}
+ {{ magic }}{{ comment }}{{ magic }}
+{%- endmacro %}
+
+{% macro redshift__alter_relation_comment(relation, comment) %}
+ {%- set escaped_comment = escape_comment(comment) -%}
+ {%- set relation_type = 'view' if relation.type == 'materialized_view' else relation.type -%}
+ comment on {{ relation_type }} {{ relation }} is {{ escaped_comment }};
+{% endmacro %}
+
+
+{% macro redshift__alter_column_comment(relation, column_dict) %}
+ {% do return(postgres__alter_column_comment(relation, column_dict)) %}
+{% endmacro %}
+
+
+{% macro redshift__alter_relation_add_remove_columns(relation, add_columns, remove_columns) %}
+
+ {% if add_columns %}
+
+ {% for column in add_columns %}
+ {% set sql -%}
+ alter {{ relation.type }} {{ relation }} add column {{ column.name }} {{ column.data_type }}
+ {% endset %}
+ {% do run_query(sql) %}
+ {% endfor %}
+
+ {% endif %}
+
+ {% if remove_columns %}
+
+ {% for column in remove_columns %}
+ {% set sql -%}
+ alter {{ relation.type }} {{ relation }} drop column {{ column.name }}
+ {% endset %}
+ {% do run_query(sql) %}
+ {% endfor %}
+
+ {% endif %}
+
+{% endmacro %}
diff --git a/dbt-redshift/src/dbt/include/redshift/macros/adapters/apply_grants.sql b/dbt-redshift/src/dbt/include/redshift/macros/adapters/apply_grants.sql
new file mode 100644
index 000000000..fa6523a26
--- /dev/null
+++ b/dbt-redshift/src/dbt/include/redshift/macros/adapters/apply_grants.sql
@@ -0,0 +1,27 @@
+{% macro redshift__get_show_grant_sql(relation) %}
+
+with privileges as (
+
+ -- valid options per https://docs.aws.amazon.com/redshift/latest/dg/r_HAS_TABLE_PRIVILEGE.html
+ select 'select' as privilege_type
+ union all
+ select 'insert' as privilege_type
+ union all
+ select 'update' as privilege_type
+ union all
+ select 'delete' as privilege_type
+ union all
+ select 'references' as privilege_type
+
+)
+
+select
+ u.usename as grantee,
+ p.privilege_type
+from pg_user u
+cross join privileges p
+where has_table_privilege(u.usename, '{{ relation }}', privilege_type)
+ and u.usename != current_user
+ and not u.usesuper
+
+{% endmacro %}
diff --git a/dbt-redshift/src/dbt/include/redshift/macros/adapters/unit_testing.sql b/dbt-redshift/src/dbt/include/redshift/macros/adapters/unit_testing.sql
new file mode 100644
index 000000000..5463f4e2b
--- /dev/null
+++ b/dbt-redshift/src/dbt/include/redshift/macros/adapters/unit_testing.sql
@@ -0,0 +1,11 @@
+{%- macro redshift__validate_fixture_rows(rows, row_number) -%}
+ {%- if rows is not none and rows|length > 0 -%}
+ {%- set row = rows[0] -%}
+ {%- for key, value in row.items() -%}
+ {%- if value is none -%}
+ {%- set fixture_name = "expected output" if model.resource_type == 'unit_test' else ("'" ~ model.name ~ "'") -%}
+ {{ exceptions.raise_compiler_error("Unit test fixture " ~ fixture_name ~ " in " ~ model.name ~ " does not have any row free of null values, which may cause type mismatch errors during unit test execution.") }}
+ {%- endif -%}
+ {%- endfor -%}
+ {%- endif -%}
+{%- endmacro -%}
diff --git a/dbt-redshift/src/dbt/include/redshift/macros/catalog/by_relation.sql b/dbt-redshift/src/dbt/include/redshift/macros/catalog/by_relation.sql
new file mode 100644
index 000000000..d0d79c65a
--- /dev/null
+++ b/dbt-redshift/src/dbt/include/redshift/macros/catalog/by_relation.sql
@@ -0,0 +1,82 @@
+{% macro redshift__get_catalog_relations(information_schema, relations) -%}
+
+ {% set database = information_schema.database %}
+ {{ adapter.verify_database(database) }}
+
+ {#-- Compute a left-outer join in memory. Some Redshift queries are
+ -- leader-only, and cannot be joined to other compute-based queries #}
+
+ {% set catalog = _redshift__get_base_catalog_by_relation(database, relations) %}
+
+ {% set select_extended = redshift__can_select_from('svv_table_info') %}
+ {% if select_extended %}
+ {% set extended_catalog = _redshift__get_extended_catalog_by_relation(relations) %}
+ {% set catalog = catalog.join(extended_catalog, ['table_schema', 'table_name']) %}
+ {% else %}
+ {{ redshift__no_svv_table_info_warning() }}
+ {% endif %}
+
+ {{ return(catalog) }}
+
+{% endmacro %}
+
+
+{% macro _redshift__get_base_catalog_by_relation(database, relations) -%}
+ {%- call statement('base_catalog', fetch_result=True) -%}
+ with
+ late_binding as ({{ _redshift__get_late_binding_by_relation_sql(relations) }}),
+ early_binding as ({{ _redshift__get_early_binding_by_relation_sql(database, relations) }}),
+ unioned as (select * from early_binding union all select * from late_binding),
+ table_owners as ({{ redshift__get_table_owners_sql() }})
+ select '{{ database }}' as table_database, *
+ from unioned
+ join table_owners using (table_schema, table_name)
+ order by "column_index"
+ {%- endcall -%}
+ {{ return(load_result('base_catalog').table) }}
+{%- endmacro %}
+
+
+{% macro _redshift__get_late_binding_by_relation_sql(relations) %}
+ {{ redshift__get_late_binding_sql() }}
+ where (
+ {%- for relation in relations -%}
+ (
+ upper(table_schema) = upper('{{ relation.schema }}')
+ and upper(table_name) = upper('{{ relation.identifier }}')
+ )
+ {%- if not loop.last %} or {% endif -%}
+ {%- endfor -%}
+ )
+{% endmacro %}
+
+
+{% macro _redshift__get_early_binding_by_relation_sql(database, relations) %}
+ {{ redshift__get_early_binding_sql(database) }}
+ and (
+ {%- for relation in relations -%}
+ (
+ upper(sch.nspname) = upper('{{ relation.schema }}')
+ and upper(tbl.relname) = upper('{{ relation.identifier }}')
+ )
+ {%- if not loop.last %} or {% endif -%}
+ {%- endfor -%}
+ )
+{% endmacro %}
+
+
+{% macro _redshift__get_extended_catalog_by_relation(relations) %}
+ {%- call statement('extended_catalog', fetch_result=True) -%}
+ {{ redshift__get_extended_catalog_sql() }}
+ where (
+ {%- for relation in relations -%}
+ (
+ upper("schema") = upper('{{ relation.schema }}')
+ and upper("table") = upper('{{ relation.identifier }}')
+ )
+ {%- if not loop.last %} or {% endif -%}
+ {%- endfor -%}
+ )
+ {%- endcall -%}
+ {{ return(load_result('extended_catalog').table) }}
+{% endmacro %}
diff --git a/dbt-redshift/src/dbt/include/redshift/macros/catalog/by_schema.sql b/dbt-redshift/src/dbt/include/redshift/macros/catalog/by_schema.sql
new file mode 100644
index 000000000..99325f765
--- /dev/null
+++ b/dbt-redshift/src/dbt/include/redshift/macros/catalog/by_schema.sql
@@ -0,0 +1,70 @@
+{% macro redshift__get_catalog(information_schema, schemas) %}
+
+ {% set database = information_schema.database %}
+ {{ adapter.verify_database(database) }}
+
+ {#-- Compute a left-outer join in memory. Some Redshift queries are
+ -- leader-only, and cannot be joined to other compute-based queries #}
+
+ {% set catalog = _redshift__get_base_catalog_by_schema(database, schemas) %}
+
+ {% set select_extended = redshift__can_select_from('svv_table_info') %}
+ {% if select_extended %}
+ {% set extended_catalog = _redshift__get_extended_catalog_by_schema(schemas) %}
+ {% set catalog = catalog.join(extended_catalog, ['table_schema', 'table_name']) %}
+ {% else %}
+ {{ redshift__no_svv_table_info_warning() }}
+ {% endif %}
+
+ {{ return(catalog) }}
+
+{% endmacro %}
+
+
+{% macro _redshift__get_base_catalog_by_schema(database, schemas) -%}
+ {%- call statement('base_catalog', fetch_result=True) -%}
+ with
+ late_binding as ({{ _redshift__get_late_binding_by_schema_sql(schemas) }}),
+ early_binding as ({{ _redshift__get_early_binding_by_schema_sql(database, schemas) }}),
+ unioned as (select * from early_binding union all select * from late_binding),
+ table_owners as ({{ redshift__get_table_owners_sql() }})
+ select '{{ database }}' as table_database, *
+ from unioned
+ join table_owners using (table_schema, table_name)
+ order by "column_index"
+ {%- endcall -%}
+ {{ return(load_result('base_catalog').table) }}
+{%- endmacro %}
+
+
+{% macro _redshift__get_late_binding_by_schema_sql(schemas) %}
+ {{ redshift__get_late_binding_sql() }}
+ where (
+ {%- for schema in schemas -%}
+ upper(table_schema) = upper('{{ schema }}'){%- if not loop.last %} or {% endif -%}
+ {%- endfor -%}
+ )
+{% endmacro %}
+
+
+{% macro _redshift__get_early_binding_by_schema_sql(database, schemas) %}
+ {{ redshift__get_early_binding_sql(database) }}
+ and (
+ {%- for schema in schemas -%}
+ upper(sch.nspname) = upper('{{ schema }}'){%- if not loop.last %} or {% endif -%}
+ {%- endfor -%}
+ )
+{% endmacro %}
+
+
+{% macro _redshift__get_extended_catalog_by_schema(schemas) %}
+ {%- call statement('extended_catalog', fetch_result=True) -%}
+ {{ redshift__get_extended_catalog_sql() }}
+ where (
+ {%- for schema in schemas -%}
+ upper("schema") = upper('{{ schema }}'){%- if not loop.last %} or {% endif -%}
+ {%- endfor -%}
+ )
+ {%- endcall -%}
+ {{ return(load_result('extended_catalog').table) }}
+{% endmacro %}
diff --git a/dbt-redshift/src/dbt/include/redshift/macros/catalog/catalog.sql b/dbt-redshift/src/dbt/include/redshift/macros/catalog/catalog.sql
new file mode 100644
index 000000000..694a9441b
--- /dev/null
+++ b/dbt-redshift/src/dbt/include/redshift/macros/catalog/catalog.sql
@@ -0,0 +1,176 @@
+{% macro redshift__get_late_binding_sql() %}
+ select
+ table_schema,
+ table_name,
+ 'LATE BINDING VIEW'::varchar as table_type,
+ null::text as table_comment,
+ column_name,
+ column_index,
+ column_type,
+ null::text as column_comment
+ from pg_get_late_binding_view_cols()
+ cols(
+ table_schema name,
+ table_name name,
+ column_name name,
+ column_type varchar,
+ column_index int
+ )
+{% endmacro %}
+
+
+{% macro redshift__get_early_binding_sql(database) %}
+ select
+ sch.nspname as table_schema,
+ tbl.relname as table_name,
+ case
+ when tbl.relkind = 'v' and mat_views.table_name is not null then 'MATERIALIZED VIEW'
+ when tbl.relkind = 'v' then 'VIEW'
+ else 'BASE TABLE'
+ end as table_type,
+ tbl_desc.description as table_comment,
+ col.attname as column_name,
+ col.attnum as column_index,
+ pg_catalog.format_type(col.atttypid, col.atttypmod) as column_type,
+ col_desc.description as column_comment
+ from pg_catalog.pg_namespace sch
+ join pg_catalog.pg_class tbl
+ on tbl.relnamespace = sch.oid
+ join pg_catalog.pg_attribute col
+ on col.attrelid = tbl.oid
+ left outer join pg_catalog.pg_description tbl_desc
+ on tbl_desc.objoid = tbl.oid
+ and tbl_desc.objsubid = 0
+ left outer join pg_catalog.pg_description col_desc
+ on col_desc.objoid = tbl.oid
+ and col_desc.objsubid = col.attnum
+ left outer join information_schema.views mat_views
+ on mat_views.table_schema = sch.nspname
+ and mat_views.table_name = tbl.relname
+ and mat_views.view_definition ilike '%create materialized view%'
+ and mat_views.table_catalog = '{{ database }}'
+ where tbl.relkind in ('r', 'v', 'f', 'p')
+ and col.attnum > 0
+ and not col.attisdropped
+{% endmacro %}
+
+
+{% macro redshift__get_table_owners_sql() %}
+ select
+ schemaname as table_schema,
+ tablename as table_name,
+ tableowner as table_owner
+ from pg_tables
+ union all
+ select
+ schemaname as table_schema,
+ viewname as table_name,
+ viewowner as table_owner
+ from pg_views
+{% endmacro %}
+
+
+{% macro redshift__get_extended_catalog_sql() %}
+ select
+ "schema" as table_schema,
+ "table" as table_name,
+
+ 'Encoded'::text as "stats:encoded:label",
+ encoded as "stats:encoded:value",
+ 'Indicates whether any column in the table has compression encoding defined.'::text as "stats:encoded:description",
+ true as "stats:encoded:include",
+
+ 'Dist Style' as "stats:diststyle:label",
+ diststyle as "stats:diststyle:value",
+ 'Distribution style or distribution key column, if key distribution is defined.'::text as "stats:diststyle:description",
+ true as "stats:diststyle:include",
+
+ 'Sort Key 1' as "stats:sortkey1:label",
+ -- handle 0xFF byte in response for interleaved sort styles
+ case
+ when sortkey1 like 'INTERLEAVED%' then 'INTERLEAVED'::text
+ else sortkey1
+ end as "stats:sortkey1:value",
+ 'First column in the sort key.'::text as "stats:sortkey1:description",
+ (sortkey1 is not null) as "stats:sortkey1:include",
+
+ 'Max Varchar' as "stats:max_varchar:label",
+ max_varchar as "stats:max_varchar:value",
+ 'Size of the largest column that uses a VARCHAR data type.'::text as "stats:max_varchar:description",
+ true as "stats:max_varchar:include",
+
+ -- exclude this, as the data is strangely returned with null-byte characters
+ 'Sort Key 1 Encoding' as "stats:sortkey1_enc:label",
+ sortkey1_enc as "stats:sortkey1_enc:value",
+ 'Compression encoding of the first column in the sort key.' as "stats:sortkey1_enc:description",
+ false as "stats:sortkey1_enc:include",
+
+ '# Sort Keys' as "stats:sortkey_num:label",
+ sortkey_num as "stats:sortkey_num:value",
+ 'Number of columns defined as sort keys.' as "stats:sortkey_num:description",
+ (sortkey_num > 0) as "stats:sortkey_num:include",
+
+ 'Approximate Size' as "stats:size:label",
+ size * 1000000 as "stats:size:value",
+ 'Approximate size of the table, calculated from a count of 1MB blocks'::text as "stats:size:description",
+ true as "stats:size:include",
+
+ 'Disk Utilization' as "stats:pct_used:label",
+ pct_used / 100.0 as "stats:pct_used:value",
+ 'Percent of available space that is used by the table.'::text as "stats:pct_used:description",
+ true as "stats:pct_used:include",
+
+ 'Unsorted %' as "stats:unsorted:label",
+ unsorted / 100.0 as "stats:unsorted:value",
+ 'Percent of unsorted rows in the table.'::text as "stats:unsorted:description",
+ (unsorted is not null) as "stats:unsorted:include",
+
+ 'Stats Off' as "stats:stats_off:label",
+ stats_off as "stats:stats_off:value",
+ 'Number that indicates how stale the table statistics are; 0 is current, 100 is out of date.'::text as "stats:stats_off:description",
+ true as "stats:stats_off:include",
+
+ 'Approximate Row Count' as "stats:rows:label",
+ tbl_rows as "stats:rows:value",
+ 'Approximate number of rows in the table. This value includes rows marked for deletion, but not yet vacuumed.'::text as "stats:rows:description",
+ true as "stats:rows:include",
+
+ 'Sort Key Skew' as "stats:skew_sortkey1:label",
+ skew_sortkey1 as "stats:skew_sortkey1:value",
+ 'Ratio of the size of the largest non-sort key column to the size of the first column of the sort key.'::text as "stats:skew_sortkey1:description",
+ (skew_sortkey1 is not null) as "stats:skew_sortkey1:include",
+
+ 'Skew Rows' as "stats:skew_rows:label",
+ skew_rows as "stats:skew_rows:value",
+ 'Ratio of the number of rows in the slice with the most rows to the number of rows in the slice with the fewest rows.'::text as "stats:skew_rows:description",
+ (skew_rows is not null) as "stats:skew_rows:include"
+
+ from svv_table_info
+{% endmacro %}
+
+
+{% macro redshift__can_select_from(table_name) %}
+
+ {%- call statement('has_table_privilege', fetch_result=True) -%}
+ select has_table_privilege(current_user, '{{ table_name }}', 'SELECT') as can_select
+ {%- endcall -%}
+
+ {% set can_select = load_result('has_table_privilege').table[0]['can_select'] %}
+ {{ return(can_select) }}
+
+{% endmacro %}
+
+
+{% macro redshift__no_svv_table_info_warning() %}
+
+ {% set msg %}
+
+ Warning: The database user "{{ target.user }}" has insufficient permissions to
+ query the "svv_table_info" table. Please grant SELECT permissions on this table
+ to the "{{ target.user }}" user to fetch extended table details from Redshift.
+
+ {% endset %}
+
+ {{ log(msg, info=True) }}
+
+{% endmacro %}
diff --git a/dbt-redshift/src/dbt/include/redshift/macros/materializations/incremental_merge.sql b/dbt-redshift/src/dbt/include/redshift/macros/materializations/incremental_merge.sql
new file mode 100644
index 000000000..c9f8b98f6
--- /dev/null
+++ b/dbt-redshift/src/dbt/include/redshift/macros/materializations/incremental_merge.sql
@@ -0,0 +1,114 @@
+{% macro redshift__get_merge_sql(target, source, unique_key, dest_columns, incremental_predicates=none) -%}
+ {%- set predicates = [] -%}
+ {% if incremental_predicates is not none %}
+ {%- set incremental_predicates_list = [] + incremental_predicates -%}
+ {%- for pred in incremental_predicates_list -%}
+ {% if "DBT_INTERNAL_DEST." in pred %}
+ {%- set pred = pred | replace("DBT_INTERNAL_DEST.", target ~ "." ) -%}
+ {% endif %}
+ {% if "dbt_internal_dest." in pred %}
+ {%- set pred = pred | replace("dbt_internal_dest.", target ~ "." ) -%}
+ {% endif %}
+ {% do predicates.append(pred) %}
+ {% endfor %}
+ {% endif %}
+
+ {%- set merge_update_columns = config.get('merge_update_columns') -%}
+ {%- set merge_exclude_columns = config.get('merge_exclude_columns') -%}
+ {%- set update_columns = get_merge_update_columns(merge_update_columns, merge_exclude_columns, dest_columns) -%}
+ {%- set insert_columns = get_merge_update_columns(none, none, dest_columns) -%}
+ {%- set sql_header = config.get('sql_header', none) -%}
+
+ {% if unique_key %}
+ {% if unique_key is sequence and unique_key is not mapping and unique_key is not string %}
+ {% for key in unique_key %}
+ {% set this_key_match %}
+ DBT_INTERNAL_SOURCE.{{ key }} = {{ target }}.{{ key }}
+ {% endset %}
+ {% do predicates.append(this_key_match) %}
+ {% endfor %}
+ {% else %}
+ {% set unique_key_match %}
+ DBT_INTERNAL_SOURCE.{{ unique_key }} = {{ target }}.{{ unique_key }}
+ {% endset %}
+ {% do predicates.append(unique_key_match) %}
+ {% endif %}
+ {% else %}
+ {% do predicates.append('FALSE') %}
+ {% endif %}
+
+ {{ sql_header if sql_header is not none }}
+
+ merge into {{ target }}
+ using {{ source }} as DBT_INTERNAL_SOURCE
+ on {{"(" ~ predicates | join(") and (") ~ ")"}}
+
+ {% if unique_key %}
+ when matched then update set
+ {% for column_name in update_columns -%}
+ {{ column_name }} = DBT_INTERNAL_SOURCE.{{ column_name }}
+ {%- if not loop.last %}, {% endif %}
+ {% endfor %}
+ {% endif %}
+
+ when not matched then insert (
+ {% for column_name in insert_columns -%}
+ {{ column_name }}
+ {%- if not loop.last %}, {% endif %}
+ {% endfor %}
+ )
+ values (
+ {% for column_name in insert_columns -%}
+ DBT_INTERNAL_SOURCE.{{ column_name }}
+ {%- if not loop.last %}, {% endif %}
+ {% endfor %}
+ )
+
+{% endmacro %}
+
+{% macro redshift__get_incremental_microbatch_sql(arg_dict) %}
+ {#-
+ Technically this function could just call out to the default implementation of delete_insert.
+ However, the default implementation requires a unique_id, which we actually do not want or
+ need. Thus we re-implement delete insert here without the unique_id requirement
+ -#}
+
+ {%- set target = arg_dict["target_relation"] -%}
+ {%- set source = arg_dict["temp_relation"] -%}
+ {%- set dest_columns = arg_dict["dest_columns"] -%}
+ {%- set predicates = [] -%}
+
+ {%- set incremental_predicates = [] if arg_dict.get('incremental_predicates') is none else arg_dict.get('incremental_predicates') -%}
+ {%- for pred in incremental_predicates -%}
+ {% if "DBT_INTERNAL_DEST." in pred %}
+ {%- set pred = pred | replace("DBT_INTERNAL_DEST.", target ~ "." ) -%}
+ {% endif %}
+ {% if "dbt_internal_dest." in pred %}
+ {%- set pred = pred | replace("dbt_internal_dest.", target ~ "." ) -%}
+ {% endif %}
+ {% do predicates.append(pred) %}
+ {% endfor %}
+
+ {% if not model.batch or (not model.batch.event_time_start or not model.batch.event_time_end) -%}
+ {% do exceptions.raise_compiler_error('dbt could not compute the start and end timestamps for the running batch') %}
+ {% endif %}
+
+ {#-- Add additional incremental_predicates to filter for batch --#}
+ {% do predicates.append(model.config.event_time ~ " >= TIMESTAMP '" ~ model.batch.event_time_start ~ "'") %}
+ {% do predicates.append(model.config.event_time ~ " < TIMESTAMP '" ~ model.batch.event_time_end ~ "'") %}
+ {% do arg_dict.update({'incremental_predicates': predicates}) %}
+
+ delete from {{ target }}
+ where (
+ {% for predicate in predicates %}
+ {%- if not loop.first %}and {% endif -%} {{ predicate }}
+ {% endfor %}
+ );
+
+ {%- set dest_cols_csv = get_quoted_csv(dest_columns | map(attribute="name")) -%}
+ insert into {{ target }} ({{ dest_cols_csv }})
+ (
+ select {{ dest_cols_csv }}
+ from {{ source }}
+ )
+{% endmacro %}
diff --git a/dbt-redshift/src/dbt/include/redshift/macros/materializations/materialized_view.sql b/dbt-redshift/src/dbt/include/redshift/macros/materializations/materialized_view.sql
new file mode 100644
index 000000000..9b1ef2d50
--- /dev/null
+++ b/dbt-redshift/src/dbt/include/redshift/macros/materializations/materialized_view.sql
@@ -0,0 +1,5 @@
+{% macro redshift__get_materialized_view_configuration_changes(existing_relation, new_config) %}
+ {% set _existing_materialized_view = redshift__describe_materialized_view(existing_relation) %}
+ {% set _configuration_changes = existing_relation.materialized_view_config_changeset(_existing_materialized_view, new_config.model) %}
+ {% do return(_configuration_changes) %}
+{% endmacro %}
diff --git a/dbt-redshift/src/dbt/include/redshift/macros/materializations/seeds/helpers.sql b/dbt-redshift/src/dbt/include/redshift/macros/materializations/seeds/helpers.sql
new file mode 100644
index 000000000..32afdae81
--- /dev/null
+++ b/dbt-redshift/src/dbt/include/redshift/macros/materializations/seeds/helpers.sql
@@ -0,0 +1,27 @@
+{% macro redshift__create_csv_table(model, agate_table) %}
+ {%- set column_override = model['config'].get('column_types', {}) -%}
+ {%- set quote_seed_column = model['config'].get('quote_columns', None) -%}
+ {%- set _dist = model['config'].get('dist', None) -%}
+
+ {% set sql %}
+ create table {{ this.render() }} (
+ {%- for col_name in agate_table.column_names -%}
+ {%- set inferred_type = adapter.convert_type(agate_table, loop.index0) -%}
+ {%- set type = column_override.get(col_name, inferred_type) -%}
+ {%- set column_name = (col_name | string) -%}
+ {{ adapter.quote_seed_column(column_name, quote_seed_column) }} {{ type }} {%- if not loop.last -%}, {%- endif -%}
+ {%- endfor -%}
+ )
+ {{ dist(_dist) }}
+ {% endset %}
+
+ {% call statement('_') -%}
+ {{ sql }}
+ {%- endcall %}
+
+ {{ return(sql) }}
+{% endmacro %}
+
+{% macro redshift__get_batch_size() %}
+ {{ return(500) }}
+{% endmacro %}
diff --git a/dbt-redshift/src/dbt/include/redshift/macros/materializations/snapshot_merge.sql b/dbt-redshift/src/dbt/include/redshift/macros/materializations/snapshot_merge.sql
new file mode 100644
index 000000000..eda314727
--- /dev/null
+++ b/dbt-redshift/src/dbt/include/redshift/macros/materializations/snapshot_merge.sql
@@ -0,0 +1,4 @@
+
+{% macro redshift__snapshot_merge_sql(target, source, insert_cols) -%}
+ {{ postgres__snapshot_merge_sql(target, source, insert_cols) }}
+{% endmacro %}
diff --git a/dbt-redshift/src/dbt/include/redshift/macros/materializations/table.sql b/dbt-redshift/src/dbt/include/redshift/macros/materializations/table.sql
new file mode 100644
index 000000000..907c83874
--- /dev/null
+++ b/dbt-redshift/src/dbt/include/redshift/macros/materializations/table.sql
@@ -0,0 +1,69 @@
+{% materialization table, adapter='redshift' %}
+
+ {%- set existing_relation = load_cached_relation(this) -%}
+ {%- set target_relation = this.incorporate(type='table') %}
+ {%- set intermediate_relation = make_intermediate_relation(target_relation) -%}
+ -- the intermediate_relation should not already exist in the database; get_relation
+ -- will return None in that case. Otherwise, we get a relation that we can drop
+ -- later, before we try to use this name for the current operation
+ {%- set preexisting_intermediate_relation = load_cached_relation(intermediate_relation) -%}
+ /*
+ See ../view/view.sql for more information about this relation.
+ */
+ {%- set backup_relation_type = 'table' if existing_relation is none else existing_relation.type -%}
+ {%- set backup_relation = make_backup_relation(target_relation, backup_relation_type) -%}
+ -- as above, the backup_relation should not already exist
+ {%- set preexisting_backup_relation = load_cached_relation(backup_relation) -%}
+ -- grab current tables grants config for comparision later on
+ {% set grant_config = config.get('grants') %}
+
+ -- drop the temp relations if they exist already in the database
+ {{ drop_relation_if_exists(preexisting_intermediate_relation) }}
+ {{ drop_relation_if_exists(preexisting_backup_relation) }}
+
+ {{ run_hooks(pre_hooks, inside_transaction=False) }}
+
+ -- `BEGIN` happens here:
+ {{ run_hooks(pre_hooks, inside_transaction=True) }}
+
+ -- build model
+ {% call statement('main') -%}
+ {{ get_create_table_as_sql(False, intermediate_relation, sql) }}
+ {%- endcall %}
+
+ -- cleanup
+ {% if existing_relation is not none %}
+ /* Do the equivalent of rename_if_exists. 'existing_relation' could have been dropped
+ since the variable was first set. */
+ {% set existing_relation = load_cached_relation(existing_relation) %}
+ {% if existing_relation is not none %}
+ {% if existing_relation.can_be_renamed %}
+ {{ adapter.rename_relation(existing_relation, backup_relation) }}
+ {% else %}
+ {{ drop_relation_if_exists(existing_relation) }}
+ {% endif %}
+ {% endif %}
+ {% endif %}
+
+
+ {{ adapter.rename_relation(intermediate_relation, target_relation) }}
+
+ {% do create_indexes(target_relation) %}
+
+ {{ run_hooks(post_hooks, inside_transaction=True) }}
+
+ {% set should_revoke = should_revoke(existing_relation, full_refresh_mode=True) %}
+ {% do apply_grants(target_relation, grant_config, should_revoke=should_revoke) %}
+
+ {% do persist_docs(target_relation, model) %}
+
+ -- `COMMIT` happens here
+ {{ adapter.commit() }}
+
+ -- finally, drop the existing/backup relation after the commit
+ {{ drop_relation_if_exists(backup_relation) }}
+
+ {{ run_hooks(post_hooks, inside_transaction=False) }}
+
+ {{ return({'relations': [target_relation]}) }}
+{% endmaterialization %}
diff --git a/dbt-redshift/src/dbt/include/redshift/macros/materializations/view.sql b/dbt-redshift/src/dbt/include/redshift/macros/materializations/view.sql
new file mode 100644
index 000000000..f353f913f
--- /dev/null
+++ b/dbt-redshift/src/dbt/include/redshift/macros/materializations/view.sql
@@ -0,0 +1,77 @@
+{%- materialization view, adapter='redshift' -%}
+
+ {%- set existing_relation = load_cached_relation(this) -%}
+ {%- set target_relation = this.incorporate(type='view') -%}
+ {%- set intermediate_relation = make_intermediate_relation(target_relation) -%}
+
+ -- the intermediate_relation should not already exist in the database; get_relation
+ -- will return None in that case. Otherwise, we get a relation that we can drop
+ -- later, before we try to use this name for the current operation
+ {%- set preexisting_intermediate_relation = load_cached_relation(intermediate_relation) -%}
+ /*
+ This relation (probably) doesn't exist yet. If it does exist, it's a leftover from
+ a previous run, and we're going to try to drop it immediately. At the end of this
+ materialization, we're going to rename the "existing_relation" to this identifier,
+ and then we're going to drop it. In order to make sure we run the correct one of:
+ - drop view ...
+ - drop table ...
+
+ We need to set the type of this relation to be the type of the existing_relation, if it exists,
+ or else "view" as a sane default if it does not. Note that if the existing_relation does not
+ exist, then there is nothing to move out of the way and subsequentally drop. In that case,
+ this relation will be effectively unused.
+ */
+ {%- set backup_relation_type = 'view' if existing_relation is none else existing_relation.type -%}
+ {%- set backup_relation = make_backup_relation(target_relation, backup_relation_type) -%}
+ -- as above, the backup_relation should not already exist
+ {%- set preexisting_backup_relation = load_cached_relation(backup_relation) -%}
+ -- grab current tables grants config for comparision later on
+ {% set grant_config = config.get('grants') %}
+
+ {{ run_hooks(pre_hooks, inside_transaction=False) }}
+
+ -- drop the temp relations if they exist already in the database
+ {{ drop_relation_if_exists(preexisting_intermediate_relation) }}
+ {{ drop_relation_if_exists(preexisting_backup_relation) }}
+
+ -- `BEGIN` happens here:
+ {{ run_hooks(pre_hooks, inside_transaction=True) }}
+
+ -- build model
+ {% call statement('main') -%}
+ {{ get_create_view_as_sql(intermediate_relation, sql) }}
+ {%- endcall %}
+
+ -- cleanup
+ -- move the existing view out of the way
+ {% if existing_relation is not none %}
+ /* Do the equivalent of rename_if_exists. 'existing_relation' could have been dropped
+ since the variable was first set. */
+ {% set existing_relation = load_cached_relation(existing_relation) %}
+ {% if existing_relation is not none %}
+ {% if existing_relation.can_be_renamed %}
+ {{ adapter.rename_relation(existing_relation, backup_relation) }}
+ {% else %}
+ {{ drop_relation_if_exists(existing_relation) }}
+ {% endif %}
+ {% endif %}
+ {% endif %}
+
+ {{ adapter.rename_relation(intermediate_relation, target_relation) }}
+
+ {% set should_revoke = should_revoke(existing_relation, full_refresh_mode=True) %}
+ {% do apply_grants(target_relation, grant_config, should_revoke=should_revoke) %}
+
+ {% do persist_docs(target_relation, model) %}
+
+ {{ run_hooks(post_hooks, inside_transaction=True) }}
+
+ {{ adapter.commit() }}
+
+ {{ drop_relation_if_exists(backup_relation) }}
+
+ {{ run_hooks(post_hooks, inside_transaction=False) }}
+
+ {{ return({'relations': [target_relation]}) }}
+
+{%- endmaterialization -%}
diff --git a/dbt-redshift/src/dbt/include/redshift/macros/metadata/relation_last_modified.sql b/dbt-redshift/src/dbt/include/redshift/macros/metadata/relation_last_modified.sql
new file mode 100644
index 000000000..f21299c72
--- /dev/null
+++ b/dbt-redshift/src/dbt/include/redshift/macros/metadata/relation_last_modified.sql
@@ -0,0 +1,29 @@
+{% macro redshift__get_relation_last_modified(information_schema, relations) -%}
+
+ {%- call statement('last_modified', fetch_result=True) -%}
+ select
+ ns.nspname as "schema",
+ c.relname as identifier,
+ max(qd.start_time) as last_modified,
+ {{ current_timestamp() }} as snapshotted_at
+ from pg_class c
+ join pg_namespace ns
+ on ns.oid = c.relnamespace
+ join sys_query_detail qd
+ on qd.table_id = c.oid
+ where qd.step_name = 'insert'
+ and (
+ {%- for relation in relations -%}
+ (
+ upper(ns.nspname) = upper('{{ relation.schema }}')
+ and upper(c.relname) = upper('{{ relation.identifier }}')
+ )
+ {%- if not loop.last %} or {% endif -%}
+ {%- endfor -%}
+ )
+ group by 1, 2, 4
+ {%- endcall -%}
+
+ {{ return(load_result('last_modified')) }}
+
+{% endmacro %}
diff --git a/dbt-redshift/src/dbt/include/redshift/macros/relations.sql b/dbt-redshift/src/dbt/include/redshift/macros/relations.sql
new file mode 100644
index 000000000..6d83c36b9
--- /dev/null
+++ b/dbt-redshift/src/dbt/include/redshift/macros/relations.sql
@@ -0,0 +1,45 @@
+{% macro redshift__get_relations() -%}
+
+{%- call statement('relations', fetch_result=True) -%}
+
+with
+ relation as (
+ select
+ pg_class.oid as relation_id,
+ pg_class.relname as relation_name,
+ pg_class.relnamespace as schema_id,
+ pg_namespace.nspname as schema_name,
+ pg_class.relkind as relation_type
+ from pg_class
+ join pg_namespace
+ on pg_class.relnamespace = pg_namespace.oid
+ where pg_namespace.nspname != 'information_schema'
+ and pg_namespace.nspname not like 'pg\_%'
+ ),
+ dependency as (
+ select distinct
+ coalesce(pg_rewrite.ev_class, pg_depend.objid) as dep_relation_id,
+ pg_depend.refobjid as ref_relation_id,
+ pg_depend.refclassid as ref_class_id
+ from pg_depend
+ left join pg_rewrite
+ on pg_depend.objid = pg_rewrite.oid
+ where coalesce(pg_rewrite.ev_class, pg_depend.objid) != pg_depend.refobjid
+ )
+
+select distinct
+ dep.schema_name as dependent_schema,
+ dep.relation_name as dependent_name,
+ ref.schema_name as referenced_schema,
+ ref.relation_name as referenced_name
+from dependency
+join relation ref
+ on dependency.ref_relation_id = ref.relation_id
+join relation dep
+ on dependency.dep_relation_id = dep.relation_id
+
+{%- endcall -%}
+
+{{ return(load_result('relations').table) }}
+
+{% endmacro %}
diff --git a/dbt-redshift/src/dbt/include/redshift/macros/relations/materialized_view/alter.sql b/dbt-redshift/src/dbt/include/redshift/macros/relations/materialized_view/alter.sql
new file mode 100644
index 000000000..7f0379847
--- /dev/null
+++ b/dbt-redshift/src/dbt/include/redshift/macros/relations/materialized_view/alter.sql
@@ -0,0 +1,26 @@
+{% macro redshift__get_alter_materialized_view_as_sql(
+ relation,
+ configuration_changes,
+ sql,
+ existing_relation,
+ backup_relation,
+ intermediate_relation
+) %}
+
+ -- apply a full refresh immediately if needed
+ {% if configuration_changes.requires_full_refresh %}
+
+ {{ get_replace_sql(existing_relation, relation, sql) }}
+
+ -- otherwise apply individual changes as needed
+ {% else %}
+
+ {%- set autorefresh = configuration_changes.autorefresh -%}
+ {%- if autorefresh -%}{{- log('Applying UPDATE AUTOREFRESH to: ' ~ relation) -}}{%- endif -%}
+
+ alter materialized view {{ relation }}
+ auto refresh {% if autorefresh.context %}yes{% else %}no{% endif %}
+
+ {%- endif -%}
+
+{% endmacro %}
diff --git a/dbt-redshift/src/dbt/include/redshift/macros/relations/materialized_view/create.sql b/dbt-redshift/src/dbt/include/redshift/macros/relations/materialized_view/create.sql
new file mode 100644
index 000000000..1b81992e4
--- /dev/null
+++ b/dbt-redshift/src/dbt/include/redshift/macros/relations/materialized_view/create.sql
@@ -0,0 +1,15 @@
+{% macro redshift__get_create_materialized_view_as_sql(relation, sql) %}
+
+ {%- set materialized_view = relation.from_config(config.model) -%}
+
+ create materialized view {{ materialized_view.path }}
+ backup {% if materialized_view.backup %}yes{% else %}no{% endif %}
+ diststyle {{ materialized_view.dist.diststyle }}
+ {% if materialized_view.dist.distkey %}distkey ({{ materialized_view.dist.distkey }}){% endif %}
+ {% if materialized_view.sort.sortkey %}sortkey ({{ ','.join(materialized_view.sort.sortkey) }}){% endif %}
+ auto refresh {% if materialized_view.autorefresh %}yes{% else %}no{% endif %}
+ as (
+ {{ materialized_view.query }}
+ )
+
+{% endmacro %}
diff --git a/dbt-redshift/src/dbt/include/redshift/macros/relations/materialized_view/describe.sql b/dbt-redshift/src/dbt/include/redshift/macros/relations/materialized_view/describe.sql
new file mode 100644
index 000000000..3f038b9ad
--- /dev/null
+++ b/dbt-redshift/src/dbt/include/redshift/macros/relations/materialized_view/describe.sql
@@ -0,0 +1,57 @@
+{% macro redshift__describe_materialized_view(relation) %}
+ {#-
+ These need to be separate queries because redshift will not let you run queries
+ against svv_table_info and pg_views in the same query. The same is true of svv_redshift_columns.
+ -#}
+
+ {%- set _materialized_view_sql -%}
+ select
+ tb.database,
+ tb.schema,
+ tb.table,
+ tb.diststyle,
+ tb.sortkey1,
+ mv.autorefresh
+ from svv_table_info tb
+ -- svv_mv_info is queryable by Redshift Serverless, but stv_mv_info is not
+ left join svv_mv_info mv
+ on mv.database_name = tb.database
+ and mv.schema_name = tb.schema
+ and mv.name = tb.table
+ where tb.table ilike '{{ relation.identifier }}'
+ and tb.schema ilike '{{ relation.schema }}'
+ and tb.database ilike '{{ relation.database }}'
+ {%- endset %}
+ {% set _materialized_view = run_query(_materialized_view_sql) %}
+
+ {%- set _column_descriptor_sql -%}
+ SELECT
+ a.attname as column,
+ a.attisdistkey as is_dist_key,
+ a.attsortkeyord as sort_key_position
+ FROM pg_class c
+ JOIN pg_namespace n ON n.oid = c.relnamespace
+ JOIN pg_attribute a ON a.attrelid = c.oid
+ WHERE
+ n.nspname ilike '{{ relation.schema }}'
+ AND c.relname LIKE 'mv_tbl__{{ relation.identifier }}__%'
+ {%- endset %}
+ {% set _column_descriptor = run_query(_column_descriptor_sql) %}
+
+ {%- set _query_sql -%}
+ select
+ vw.definition
+ from pg_views vw
+ where vw.viewname = '{{ relation.identifier }}'
+ and vw.schemaname = '{{ relation.schema }}'
+ and vw.definition ilike '%create materialized view%'
+ {%- endset %}
+ {% set _query = run_query(_query_sql) %}
+
+ {% do return({
+ 'materialized_view': _materialized_view,
+ 'query': _query,
+ 'columns': _column_descriptor,
+ })%}
+
+{% endmacro %}
diff --git a/dbt-redshift/src/dbt/include/redshift/macros/relations/materialized_view/drop.sql b/dbt-redshift/src/dbt/include/redshift/macros/relations/materialized_view/drop.sql
new file mode 100644
index 000000000..0db283817
--- /dev/null
+++ b/dbt-redshift/src/dbt/include/redshift/macros/relations/materialized_view/drop.sql
@@ -0,0 +1,3 @@
+{% macro redshift__drop_materialized_view(relation) -%}
+ drop materialized view if exists {{ relation }} cascade
+{%- endmacro %}
diff --git a/dbt-redshift/src/dbt/include/redshift/macros/relations/materialized_view/refresh.sql b/dbt-redshift/src/dbt/include/redshift/macros/relations/materialized_view/refresh.sql
new file mode 100644
index 000000000..c53ed2786
--- /dev/null
+++ b/dbt-redshift/src/dbt/include/redshift/macros/relations/materialized_view/refresh.sql
@@ -0,0 +1,3 @@
+{% macro redshift__refresh_materialized_view(relation) -%}
+ refresh materialized view {{ relation }}
+{% endmacro %}
diff --git a/dbt-redshift/src/dbt/include/redshift/macros/relations/table/drop.sql b/dbt-redshift/src/dbt/include/redshift/macros/relations/table/drop.sql
new file mode 100644
index 000000000..64ffc1f22
--- /dev/null
+++ b/dbt-redshift/src/dbt/include/redshift/macros/relations/table/drop.sql
@@ -0,0 +1,3 @@
+{%- macro redshift__drop_table(relation) -%}
+ drop table if exists {{ relation }} cascade
+{%- endmacro -%}
diff --git a/dbt-redshift/src/dbt/include/redshift/macros/relations/table/rename.sql b/dbt-redshift/src/dbt/include/redshift/macros/relations/table/rename.sql
new file mode 100644
index 000000000..08fd5a172
--- /dev/null
+++ b/dbt-redshift/src/dbt/include/redshift/macros/relations/table/rename.sql
@@ -0,0 +1,3 @@
+{% macro redshift__get_rename_table_sql(relation, new_name) %}
+ alter table {{ relation }} rename to {{ new_name }}
+{% endmacro %}
diff --git a/dbt-redshift/src/dbt/include/redshift/macros/relations/view/drop.sql b/dbt-redshift/src/dbt/include/redshift/macros/relations/view/drop.sql
new file mode 100644
index 000000000..cba066a53
--- /dev/null
+++ b/dbt-redshift/src/dbt/include/redshift/macros/relations/view/drop.sql
@@ -0,0 +1,3 @@
+{%- macro redshift__drop_view(relation) -%}
+ drop view if exists {{ relation }} cascade
+{%- endmacro -%}
diff --git a/dbt-redshift/src/dbt/include/redshift/macros/relations/view/rename.sql b/dbt-redshift/src/dbt/include/redshift/macros/relations/view/rename.sql
new file mode 100644
index 000000000..a96b04451
--- /dev/null
+++ b/dbt-redshift/src/dbt/include/redshift/macros/relations/view/rename.sql
@@ -0,0 +1,3 @@
+{% macro redshift__get_rename_view_sql(relation, new_name) %}
+ alter table {{ relation }} rename to {{ new_name }}
+{% endmacro %}
diff --git a/dbt-redshift/src/dbt/include/redshift/macros/relations/view/replace.sql b/dbt-redshift/src/dbt/include/redshift/macros/relations/view/replace.sql
new file mode 100644
index 000000000..7ae89ab45
--- /dev/null
+++ b/dbt-redshift/src/dbt/include/redshift/macros/relations/view/replace.sql
@@ -0,0 +1,18 @@
+{% macro redshift__get_replace_view_sql(relation, sql) -%}
+
+ {%- set binding = config.get('bind', default=True) -%}
+
+ {% set bind_qualifier = '' if binding else 'with no schema binding' %}
+ {%- set sql_header = config.get('sql_header', none) -%}
+
+ {{ sql_header if sql_header is not none }}
+
+ create or replace view {{ relation }}
+ {%- set contract_config = config.get('contract') -%}
+ {%- if contract_config.enforced -%}
+ {{ get_assert_columns_equivalent(sql) }}
+ {%- endif %} as (
+ {{ sql }}
+ ) {{ bind_qualifier }}
+
+{%- endmacro %}
diff --git a/dbt-redshift/src/dbt/include/redshift/macros/timestamps.sql b/dbt-redshift/src/dbt/include/redshift/macros/timestamps.sql
new file mode 100644
index 000000000..dee5f41ed
--- /dev/null
+++ b/dbt-redshift/src/dbt/include/redshift/macros/timestamps.sql
@@ -0,0 +1,20 @@
+{% macro redshift__current_timestamp() -%}
+ getdate()
+{%- endmacro %}
+
+{% macro redshift__snapshot_get_time() -%}
+ {{ current_timestamp() }}::timestamp
+{%- endmacro %}
+
+{% macro redshift__snapshot_string_as_time(timestamp) -%}
+ {%- set result = "'" ~ timestamp ~ "'::timestamp" -%}
+ {{ return(result) }}
+{%- endmacro %}
+
+{% macro redshift__current_timestamp_backcompat() -%}
+ getdate()
+{%- endmacro %}
+
+{% macro redshift__current_timestamp_in_utc_backcompat() -%}
+ getdate()
+{%- endmacro %}
diff --git a/dbt-redshift/src/dbt/include/redshift/macros/utils/array_append.sql b/dbt-redshift/src/dbt/include/redshift/macros/utils/array_append.sql
new file mode 100644
index 000000000..83cf42d7f
--- /dev/null
+++ b/dbt-redshift/src/dbt/include/redshift/macros/utils/array_append.sql
@@ -0,0 +1,3 @@
+{% macro redshift__array_append(array, new_element) -%}
+ {{ array_concat(array, array_construct([new_element])) }}
+{%- endmacro %}
diff --git a/dbt-redshift/src/dbt/include/redshift/macros/utils/array_concat.sql b/dbt-redshift/src/dbt/include/redshift/macros/utils/array_concat.sql
new file mode 100644
index 000000000..02065bfbf
--- /dev/null
+++ b/dbt-redshift/src/dbt/include/redshift/macros/utils/array_concat.sql
@@ -0,0 +1,3 @@
+{% macro redshift__array_concat(array_1, array_2) -%}
+ array_concat({{ array_1 }}, {{ array_2 }})
+{%- endmacro %}
diff --git a/dbt-redshift/src/dbt/include/redshift/macros/utils/array_construct.sql b/dbt-redshift/src/dbt/include/redshift/macros/utils/array_construct.sql
new file mode 100644
index 000000000..1d5717a47
--- /dev/null
+++ b/dbt-redshift/src/dbt/include/redshift/macros/utils/array_construct.sql
@@ -0,0 +1,3 @@
+{% macro redshift__array_construct(inputs, data_type) -%}
+ array( {{ inputs|join(' , ') }} )
+{%- endmacro %}
diff --git a/dbt-redshift/src/dbt/include/redshift/macros/utils/cast_bool_to_text.sql b/dbt-redshift/src/dbt/include/redshift/macros/utils/cast_bool_to_text.sql
new file mode 100644
index 000000000..f88dee17b
--- /dev/null
+++ b/dbt-redshift/src/dbt/include/redshift/macros/utils/cast_bool_to_text.sql
@@ -0,0 +1,6 @@
+{% macro redshift__cast_bool_to_text(field) %}
+ case
+ when {{ field }} is true then 'true'
+ when {{ field }} is false then 'false'
+ end::text
+{% endmacro %}
diff --git a/dbt-redshift/src/dbt/include/redshift/macros/utils/dateadd.sql b/dbt-redshift/src/dbt/include/redshift/macros/utils/dateadd.sql
new file mode 100644
index 000000000..ba3e666a3
--- /dev/null
+++ b/dbt-redshift/src/dbt/include/redshift/macros/utils/dateadd.sql
@@ -0,0 +1,9 @@
+{% macro redshift__dateadd(datepart, interval, from_date_or_timestamp) %}
+
+ dateadd(
+ {{ datepart }},
+ {{ interval }},
+ {{ from_date_or_timestamp }}
+ )
+
+{% endmacro %}
diff --git a/dbt-redshift/src/dbt/include/redshift/macros/utils/datediff.sql b/dbt-redshift/src/dbt/include/redshift/macros/utils/datediff.sql
new file mode 100644
index 000000000..1d540b908
--- /dev/null
+++ b/dbt-redshift/src/dbt/include/redshift/macros/utils/datediff.sql
@@ -0,0 +1,9 @@
+{% macro redshift__datediff(first_date, second_date, datepart) -%}
+
+ datediff(
+ {{ datepart }},
+ {{ first_date }},
+ {{ second_date }}
+ )
+
+{%- endmacro %}
diff --git a/dbt-redshift/src/dbt/include/redshift/macros/utils/last_day.sql b/dbt-redshift/src/dbt/include/redshift/macros/utils/last_day.sql
new file mode 100644
index 000000000..8c643644b
--- /dev/null
+++ b/dbt-redshift/src/dbt/include/redshift/macros/utils/last_day.sql
@@ -0,0 +1,7 @@
+{% macro redshift__last_day(date, datepart) %}
+ cast(
+ {{dbt.dateadd('day', '-1',
+ dbt.dateadd(datepart, '1', dbt.date_trunc(datepart, date))
+ )}}
+ as date)
+{% endmacro %}
diff --git a/dbt-redshift/src/dbt/include/redshift/macros/utils/length.sql b/dbt-redshift/src/dbt/include/redshift/macros/utils/length.sql
new file mode 100644
index 000000000..040874353
--- /dev/null
+++ b/dbt-redshift/src/dbt/include/redshift/macros/utils/length.sql
@@ -0,0 +1,7 @@
+{% macro redshift__length(expression) %}
+
+ len(
+ {{ expression }}
+ )
+
+{%- endmacro -%}
diff --git a/dbt-redshift/src/dbt/include/redshift/macros/utils/listagg.sql b/dbt-redshift/src/dbt/include/redshift/macros/utils/listagg.sql
new file mode 100644
index 000000000..3dc6092b4
--- /dev/null
+++ b/dbt-redshift/src/dbt/include/redshift/macros/utils/listagg.sql
@@ -0,0 +1,34 @@
+{# if there are instances of delimiter_text within your measure, you cannot include a limit_num #}
+{% macro redshift__listagg(measure, delimiter_text, order_by_clause, limit_num) -%}
+
+ {% if limit_num -%}
+ {% set ns = namespace() %}
+ {% set ns.delimiter_text_regex = delimiter_text|trim("'") %}
+ {% set special_chars %}\,^,$,.,|,?,*,+,(,),[,],{,}{% endset %}
+ {%- for char in special_chars.split(',') -%}
+ {% set escape_char %}\\{{ char }}{% endset %}
+ {% set ns.delimiter_text_regex = ns.delimiter_text_regex|replace(char,escape_char) %}
+ {%- endfor -%}
+
+ {% set regex %}'([^{{ ns.delimiter_text_regex }}]+{{ ns.delimiter_text_regex }}){1,{{ limit_num - 1}}}[^{{ ns.delimiter_text_regex }}]+'{% endset %}
+ regexp_substr(
+ listagg(
+ {{ measure }},
+ {{ delimiter_text }}
+ )
+ {% if order_by_clause -%}
+ within group ({{ order_by_clause }})
+ {%- endif %}
+ ,{{ regex }}
+ )
+ {%- else %}
+ listagg(
+ {{ measure }},
+ {{ delimiter_text }}
+ )
+ {% if order_by_clause -%}
+ within group ({{ order_by_clause }})
+ {%- endif %}
+ {%- endif %}
+
+{%- endmacro %}
diff --git a/dbt-redshift/src/dbt/include/redshift/macros/utils/split_part.sql b/dbt-redshift/src/dbt/include/redshift/macros/utils/split_part.sql
new file mode 100644
index 000000000..e594d6fa3
--- /dev/null
+++ b/dbt-redshift/src/dbt/include/redshift/macros/utils/split_part.sql
@@ -0,0 +1,9 @@
+{% macro redshift__split_part(string_text, delimiter_text, part_number) %}
+
+ {% if part_number >= 0 %}
+ {{ dbt.default__split_part(string_text, delimiter_text, part_number) }}
+ {% else %}
+ {{ dbt._split_part_negative(string_text, delimiter_text, part_number) }}
+ {% endif %}
+
+{% endmacro %}
diff --git a/dbt-redshift/src/dbt/include/redshift/profile_template.yml b/dbt-redshift/src/dbt/include/redshift/profile_template.yml
new file mode 100644
index 000000000..d78356923
--- /dev/null
+++ b/dbt-redshift/src/dbt/include/redshift/profile_template.yml
@@ -0,0 +1,27 @@
+fixed:
+ type: redshift
+prompts:
+ host:
+ hint: 'hostname.region.redshift.amazonaws.com'
+ port:
+ default: 5439
+ type: 'int'
+ user:
+ hint: 'dev username'
+ _choose_authentication_method:
+ password:
+ password:
+ hint: 'dev password'
+ hide_input: true
+ iam:
+ _fixed_method: iam
+ iam_role:
+ _fixed_method: iam_role
+ dbname:
+ hint: 'default database that dbt will build objects in'
+ schema:
+ hint: 'default schema that dbt will build objects in'
+ threads:
+ hint: '1 or more'
+ type: 'int'
+ default: 1
diff --git a/dbt-redshift/test.env.example b/dbt-redshift/test.env.example
new file mode 100644
index 000000000..6816b4ec2
--- /dev/null
+++ b/dbt-redshift/test.env.example
@@ -0,0 +1,26 @@
+# Note: Make sure you have a Redshift account that is set up so these fields are easy to complete.
+# These will all be gathered from account information or created by you.
+
+# Database Authentication Method
+REDSHIFT_TEST_HOST=
+REDSHIFT_TEST_PORT=
+REDSHIFT_TEST_DBNAME=
+REDSHIFT_TEST_USER=
+REDSHIFT_TEST_PASS=
+REDSHIFT_TEST_REGION=
+
+# IAM Methods
+REDSHIFT_TEST_CLUSTER_ID=
+
+# IAM User Authentication Method
+REDSHIFT_TEST_IAM_USER_PROFILE=
+REDSHIFT_TEST_IAM_USER_ACCESS_KEY_ID=
+REDSHIFT_TEST_IAM_USER_SECRET_ACCESS_KEY=
+
+# IAM Role Authentication Method
+REDSHIFT_TEST_IAM_ROLE_PROFILE=
+
+# Database users for testing
+DBT_TEST_USER_1=dbt_test_user_1
+DBT_TEST_USER_2=dbt_test_user_2
+DBT_TEST_USER_3=dbt_test_user_3
diff --git a/dbt-redshift/tests/__init__.py b/dbt-redshift/tests/__init__.py
new file mode 100644
index 000000000..e69de29bb
diff --git a/dbt-redshift/tests/boundary/__init__.py b/dbt-redshift/tests/boundary/__init__.py
new file mode 100644
index 000000000..e69de29bb
diff --git a/dbt-redshift/tests/boundary/conftest.py b/dbt-redshift/tests/boundary/conftest.py
new file mode 100644
index 000000000..402fa2d66
--- /dev/null
+++ b/dbt-redshift/tests/boundary/conftest.py
@@ -0,0 +1,28 @@
+from datetime import datetime
+import os
+import random
+
+import pytest
+import redshift_connector
+
+
+@pytest.fixture
+def connection() -> redshift_connector.Connection:
+ return redshift_connector.connect(
+ user=os.getenv("REDSHIFT_TEST_USER"),
+ password=os.getenv("REDSHIFT_TEST_PASS"),
+ host=os.getenv("REDSHIFT_TEST_HOST"),
+ port=int(os.getenv("REDSHIFT_TEST_PORT")),
+ database=os.getenv("REDSHIFT_TEST_DBNAME"),
+ region=os.getenv("REDSHIFT_TEST_REGION"),
+ )
+
+
+@pytest.fixture
+def schema_name(request) -> str:
+ runtime = datetime.utcnow() - datetime(1970, 1, 1, 0, 0, 0)
+ runtime_s = int(runtime.total_seconds())
+ runtime_ms = runtime.microseconds
+ random_int = random.randint(0, 9999)
+ file_name = request.module.__name__.split(".")[-1]
+ return f"test_{runtime_s}{runtime_ms}{random_int:04}_{file_name}"
diff --git a/dbt-redshift/tests/boundary/test_redshift_connector.py b/dbt-redshift/tests/boundary/test_redshift_connector.py
new file mode 100644
index 000000000..200d0cccf
--- /dev/null
+++ b/dbt-redshift/tests/boundary/test_redshift_connector.py
@@ -0,0 +1,43 @@
+import pytest
+
+
+@pytest.fixture
+def schema(connection, schema_name) -> str:
+ with connection.cursor() as cursor:
+ cursor.execute(f"CREATE SCHEMA IF NOT EXISTS {schema_name}")
+ yield schema_name
+ with connection.cursor() as cursor:
+ cursor.execute(f"DROP SCHEMA IF EXISTS {schema_name} CASCADE")
+
+
+def test_columns_in_relation(connection, schema):
+ table = "cross_db"
+ with connection.cursor() as cursor:
+ cursor.execute(f"CREATE TABLE {schema}.{table} as select 3.14 as id")
+ columns = cursor.get_columns(
+ schema_pattern=schema,
+ tablename_pattern=table,
+ )
+
+ assert len(columns) == 1
+ column = columns[0]
+
+ (
+ database_name,
+ schema_name,
+ table_name,
+ column_name,
+ type_code,
+ type_name,
+ precision,
+ _,
+ scale,
+ *_,
+ ) = column
+ assert schema_name == schema
+ assert table_name == table
+ assert column_name == "id"
+ assert type_code == 2
+ assert type_name == "numeric"
+ assert precision == 3
+ assert scale == 2
diff --git a/dbt-redshift/tests/conftest.py b/dbt-redshift/tests/conftest.py
new file mode 100644
index 000000000..712bf047a
--- /dev/null
+++ b/dbt-redshift/tests/conftest.py
@@ -0,0 +1,12 @@
+pytest_plugins = ["dbt.tests.fixtures.project"]
+
+
+def pytest_sessionfinish(session, exitstatus):
+ """
+ Configures pytest to treat a scenario with no tests as passing
+
+ pytest returns a code 5 when it collects no tests in an effort to warn when tests are expected but not collected
+ We don't want this when running tox because some combinations of markers and test segments return nothing
+ """
+ if exitstatus == 5:
+ session.exitstatus = 0
diff --git a/dbt-redshift/tests/functional/__init__.py b/dbt-redshift/tests/functional/__init__.py
new file mode 100644
index 000000000..5cfdf5d2d
--- /dev/null
+++ b/dbt-redshift/tests/functional/__init__.py
@@ -0,0 +1 @@
+# supports namespacing during test discovery
diff --git a/dbt-redshift/tests/functional/adapter/__init__.py b/dbt-redshift/tests/functional/adapter/__init__.py
new file mode 100644
index 000000000..30e204d08
--- /dev/null
+++ b/dbt-redshift/tests/functional/adapter/__init__.py
@@ -0,0 +1 @@
+# provides namespacing for test discovery
diff --git a/dbt-redshift/tests/functional/adapter/backup_tests/models.py b/dbt-redshift/tests/functional/adapter/backup_tests/models.py
new file mode 100644
index 000000000..6432e7319
--- /dev/null
+++ b/dbt-redshift/tests/functional/adapter/backup_tests/models.py
@@ -0,0 +1,61 @@
+BACKUP_IS_FALSE = """
+{{ config(
+ materialized='table',
+ backup=False
+) }}
+select 1 as my_col
+"""
+
+
+BACKUP_IS_TRUE = """
+{{ config(
+ materialized='table',
+ backup=True
+) }}
+select 1 as my_col
+"""
+
+
+BACKUP_IS_UNDEFINED = """
+{{ config(
+ materialized='table'
+) }}
+select 1 as my_col
+"""
+
+
+BACKUP_IS_TRUE_VIEW = """
+{{ config(
+ materialized='view',
+ backup=True
+) }}
+select 1 as my_col
+"""
+
+
+SYNTAX_WITH_DISTKEY = """
+{{ config(
+ materialized='table',
+ backup=False,
+ dist='my_col'
+) }}
+select 1 as my_col
+"""
+
+
+SYNTAX_WITH_SORTKEY = """
+{{ config(
+ materialized='table',
+ backup=False,
+ sort='my_col'
+) }}
+select 1 as my_col
+"""
+
+
+BACKUP_IS_UNDEFINED_DEPENDENT_VIEW = """
+{{ config(
+ materialized='view',
+) }}
+select * from {{ ref('backup_is_undefined') }}
+"""
diff --git a/dbt-redshift/tests/functional/adapter/backup_tests/test_backup_table.py b/dbt-redshift/tests/functional/adapter/backup_tests/test_backup_table.py
new file mode 100644
index 000000000..6871b70a7
--- /dev/null
+++ b/dbt-redshift/tests/functional/adapter/backup_tests/test_backup_table.py
@@ -0,0 +1,108 @@
+import pytest
+
+from dbt.tests.util import run_dbt
+
+from tests.functional.adapter.backup_tests import models
+
+
+class BackupTableBase:
+ @pytest.fixture(scope="class", autouse=True)
+ def _run_dbt(self, project):
+ run_dbt(["run"])
+
+
+class TestBackupTableOption(BackupTableBase):
+ @pytest.fixture(scope="class")
+ def models(self):
+ return {
+ "backup_is_false.sql": models.BACKUP_IS_FALSE,
+ "backup_is_true.sql": models.BACKUP_IS_TRUE,
+ "backup_is_undefined.sql": models.BACKUP_IS_UNDEFINED,
+ "backup_is_true_view.sql": models.BACKUP_IS_TRUE_VIEW,
+ }
+
+ @pytest.mark.parametrize(
+ "model_ddl,backup_expected",
+ [
+ ("backup_is_false", False),
+ ("backup_is_true", True),
+ ("backup_is_undefined", True),
+ ("backup_is_true_view", True),
+ ],
+ indirect=["model_ddl"],
+ )
+ def test_setting_reflects_config_option(self, model_ddl: str, backup_expected: bool):
+ """
+ Test different scenarios of configuration at the MODEL level and verify the expected setting for backup
+
+ This test looks for whether `backup no` appears in the DDL file. If it does, then the table will not be backed
+ up. If it does not appear, the table will be backed up.
+
+ Args:
+ model_ddl: the DDL for each model as a string
+ backup_expected: whether backup is expected for this model
+ """
+ backup_will_occur = "backup no" not in model_ddl.lower()
+ assert backup_will_occur == backup_expected
+
+
+class TestBackupTableSyntax(BackupTableBase):
+ @pytest.fixture(scope="class")
+ def models(self):
+ return {
+ "syntax_with_distkey.sql": models.SYNTAX_WITH_DISTKEY,
+ "syntax_with_sortkey.sql": models.SYNTAX_WITH_SORTKEY,
+ }
+
+ @pytest.mark.parametrize(
+ "model_ddl,search_phrase",
+ [
+ ("syntax_with_distkey", "diststyle key distkey"),
+ ("syntax_with_sortkey", "compound sortkey"),
+ ],
+ indirect=["model_ddl"],
+ )
+ def test_backup_predicate_precedes_secondary_predicates(self, model_ddl, search_phrase):
+ """
+ Test whether `backup no` appears roughly in the correct spot in the DDL
+
+ This test verifies that the backup predicate comes before the secondary predicates.
+ This test does not guarantee that the resulting DDL is properly formed.
+
+ Args:
+ model_ddl: the DDL for each model as a string
+ search_phrase: the string within the DDL that indicates the distkey or sortkey
+ """
+ assert model_ddl.find("backup no") < model_ddl.find(search_phrase)
+
+
+class TestBackupTableProjectDefault(BackupTableBase):
+ @pytest.fixture(scope="class")
+ def project_config_update(self):
+ return {"models": {"backup": False}}
+
+ @pytest.fixture(scope="class")
+ def models(self):
+ return {
+ "backup_is_true.sql": models.BACKUP_IS_TRUE,
+ "backup_is_undefined.sql": models.BACKUP_IS_UNDEFINED,
+ }
+
+ @pytest.mark.parametrize(
+ "model_ddl,backup_expected",
+ [("backup_is_true", True), ("backup_is_undefined", False)],
+ indirect=["model_ddl"],
+ )
+ def test_setting_defaults_to_project_option(self, model_ddl: str, backup_expected: bool):
+ """
+ Test different scenarios of configuration at the PROJECT level and verify the expected setting for backup
+
+ This test looks for whether `backup no` appears in the DDL file. If it does, then the table will not be backed
+ up. If it does not appear, the table will be backed up.
+
+ Args:
+ model_ddl: the DDL for each model as a string
+ backup_expected: whether backup is expected for this model
+ """
+ backup_will_occur = "backup no" not in model_ddl.lower()
+ assert backup_will_occur == backup_expected
diff --git a/dbt-redshift/tests/functional/adapter/catalog_tests/files.py b/dbt-redshift/tests/functional/adapter/catalog_tests/files.py
new file mode 100644
index 000000000..9c19522e7
--- /dev/null
+++ b/dbt-redshift/tests/functional/adapter/catalog_tests/files.py
@@ -0,0 +1,33 @@
+MY_SEED = """
+id,value,record_valid_date
+1,100,2023-01-01 00:00:00
+2,200,2023-01-02 00:00:00
+3,300,2023-01-02 00:00:00
+""".strip()
+
+
+MY_TABLE = """
+{{ config(
+ materialized='table',
+) }}
+select *
+from {{ ref('my_seed') }}
+"""
+
+
+MY_VIEW = """
+{{ config(
+ materialized='view',
+) }}
+select *
+from {{ ref('my_seed') }}
+"""
+
+
+MY_MATERIALIZED_VIEW = """
+{{ config(
+ materialized='materialized_view',
+) }}
+select *
+from {{ ref('my_seed') }}
+"""
diff --git a/dbt-redshift/tests/functional/adapter/catalog_tests/test_get_catalog.py b/dbt-redshift/tests/functional/adapter/catalog_tests/test_get_catalog.py
new file mode 100644
index 000000000..e0b512896
--- /dev/null
+++ b/dbt-redshift/tests/functional/adapter/catalog_tests/test_get_catalog.py
@@ -0,0 +1,144 @@
+from dbt.adapters.contracts.relation import RelationType
+from dbt.tests.util import get_connection
+import pytest
+
+
+class TestGetCatalog:
+ @pytest.fixture(scope="class")
+ def my_schema(self, project, adapter):
+ schema = adapter.Relation.create(
+ database=project.database,
+ schema=project.test_schema,
+ identifier="",
+ )
+ yield schema
+
+ @pytest.fixture(scope="class")
+ def my_seed(self, adapter, my_schema):
+ relation = adapter.Relation.create(
+ database=my_schema.database,
+ schema=my_schema.schema,
+ identifier="my_seed",
+ type=RelationType.Table,
+ )
+ with get_connection(adapter):
+ sql = f"""
+ create table {relation.database}.{relation.schema}.{relation.identifier} (
+ id integer,
+ value integer,
+ record_valid_date timestamp
+ );
+ insert into {relation.database}.{relation.schema}.{relation.identifier}
+ (id, value, record_valid_date) values
+ (1,100,'2023-01-01 00:00:00'),
+ (2,200,'2023-01-02 00:00:00'),
+ (3,300,'2023-01-02 00:00:00')
+ ;
+ """
+ adapter.execute(sql)
+ yield relation
+
+ @pytest.fixture(scope="class")
+ def my_table(self, adapter, my_schema, my_seed):
+ relation = adapter.Relation.create(
+ database=my_schema.database,
+ schema=my_schema.schema,
+ identifier="my_table",
+ type=RelationType.Table,
+ )
+ with get_connection(adapter):
+ sql = f"""
+ create table {relation.database}.{relation.schema}.{relation.identifier} as
+ select * from {my_seed.database}.{my_seed.schema}.{my_seed.identifier}
+ ;
+ """
+ adapter.execute(sql)
+ yield relation
+
+ @pytest.fixture(scope="class")
+ def my_view(self, adapter, my_schema, my_seed):
+ relation = adapter.Relation.create(
+ database=my_schema.database,
+ schema=my_schema.schema,
+ identifier="my_view",
+ type=RelationType.View,
+ )
+ with get_connection(adapter):
+ sql = f"""
+ create view {relation.database}.{relation.schema}.{relation.identifier} as
+ select * from {my_seed.database}.{my_seed.schema}.{my_seed.identifier}
+ ;
+ """
+ adapter.execute(sql)
+ yield relation
+
+ @pytest.fixture(scope="class")
+ def my_materialized_view(self, adapter, my_schema, my_seed):
+ relation = adapter.Relation.create(
+ database=my_schema.database,
+ schema=my_schema.schema,
+ identifier="my_materialized_view",
+ type=RelationType.MaterializedView,
+ )
+ with get_connection(adapter):
+ sql = f"""
+ create materialized view {relation.database}.{relation.schema}.{relation.identifier} as
+ select * from {my_seed.database}.{my_seed.schema}.{my_seed.identifier}
+ ;
+ """
+ adapter.execute(sql)
+ yield relation
+
+ @pytest.fixture(scope="class")
+ def my_information_schema(self, adapter, my_schema):
+ yield adapter.Relation.create(
+ database=my_schema.database,
+ schema=my_schema.schema,
+ identifier="INFORMATION_SCHEMA",
+ ).information_schema()
+
+ @pytest.mark.flaky
+ def test_get_one_catalog_by_relations(
+ self,
+ adapter,
+ my_schema,
+ my_seed,
+ my_table,
+ my_view,
+ my_materialized_view,
+ my_information_schema,
+ ):
+ my_schemas = frozenset({(my_schema.database, my_schema.schema)})
+ my_relations = [my_seed, my_table, my_view, my_materialized_view]
+ with get_connection(adapter):
+ catalog = adapter._get_one_catalog_by_relations(
+ information_schema=my_information_schema,
+ relations=my_relations,
+ used_schemas=my_schemas,
+ )
+ # my_seed, my_table, my_view, my_materialized_view each have 3 cols = 12 cols
+ # my_materialized_view creates an underlying table with 2 additional = 5 cols
+ # note the underlying table is missing as it's not in `my_relations`
+ assert len(catalog) == 12
+
+ @pytest.mark.flaky
+ def test_get_one_catalog_by_schemas(
+ self,
+ adapter,
+ my_schema,
+ my_seed,
+ my_table,
+ my_view,
+ my_materialized_view,
+ my_information_schema,
+ ):
+ my_schemas = frozenset({(my_schema.database, my_schema.schema)})
+ with get_connection(adapter):
+ catalog = adapter._get_one_catalog(
+ information_schema=my_information_schema,
+ schemas={my_schema.schema},
+ used_schemas=my_schemas,
+ )
+ # my_seed, my_table, my_view, my_materialized_view each have 3 cols = 12 cols
+ # my_materialized_view creates an underlying table with 2 additional = 5 cols
+ assert len(catalog) == 17
diff --git a/dbt-redshift/tests/functional/adapter/catalog_tests/test_relation_types.py b/dbt-redshift/tests/functional/adapter/catalog_tests/test_relation_types.py
new file mode 100644
index 000000000..657bf215b
--- /dev/null
+++ b/dbt-redshift/tests/functional/adapter/catalog_tests/test_relation_types.py
@@ -0,0 +1,45 @@
+from dbt.contracts.results import CatalogArtifact
+from dbt.tests.util import run_dbt
+import pytest
+
+from tests.functional.adapter.catalog_tests import files
+
+
+class TestCatalogRelationTypes:
+ @pytest.fixture(scope="class", autouse=True)
+ def seeds(self):
+ return {"my_seed.csv": files.MY_SEED}
+
+ @pytest.fixture(scope="class", autouse=True)
+ def models(self):
+ yield {
+ "my_table.sql": files.MY_TABLE,
+ "my_view.sql": files.MY_VIEW,
+ "my_materialized_view.sql": files.MY_MATERIALIZED_VIEW,
+ }
+
+ @pytest.fixture(scope="class", autouse=True)
+ def docs(self, project):
+ run_dbt(["seed"])
+ run_dbt(["run"])
+ yield run_dbt(["docs", "generate"])
+
+ @pytest.mark.flaky
+ @pytest.mark.parametrize(
+ "node_name,relation_type",
+ [
+ ("seed.test.my_seed", "BASE TABLE"),
+ ("model.test.my_table", "BASE TABLE"),
+ ("model.test.my_view", "VIEW"),
+ ("model.test.my_materialized_view", "MATERIALIZED VIEW"),
+ ],
+ )
+ def test_relation_types_populate_correctly(
+ self, docs: CatalogArtifact, node_name: str, relation_type: str
+ ):
+ """
+ This test addresses: https://github.com/dbt-labs/dbt-redshift/issues/652
+ """
+ assert node_name in docs.nodes
+ node = docs.nodes[node_name]
+ assert node.metadata.type == relation_type
diff --git a/dbt-redshift/tests/functional/adapter/conftest.py b/dbt-redshift/tests/functional/adapter/conftest.py
new file mode 100644
index 000000000..c5c980154
--- /dev/null
+++ b/dbt-redshift/tests/functional/adapter/conftest.py
@@ -0,0 +1,25 @@
+import pytest
+
+
+@pytest.fixture
+def model_ddl(request) -> str:
+ """
+ Returns the contents of the DDL file for the model provided. Use with pytest parameterization.
+
+ Example:
+ ===
+ @pytest.mark.parametrize(
+ "model_ddl,backup_expected",
+ [("backup_is_false", False)],
+ indirect=["model_ddl"]
+ )
+ def test_setting_reflects_config_option(self, model_ddl: str, backup_expected: bool):
+ backup_will_occur = "backup no" not in model_ddl.lower()
+ assert backup_will_occur == backup_expected
+ ===
+
+ In this example, the fixture returns the contents of the backup_is_false DDL file as a string.
+ This string is then referenced in the test as model_ddl.
+ """
+ with open(f"target/run/test/models/{request.param}.sql", "r") as ddl_file:
+ yield "\n".join(ddl_file.readlines())
diff --git a/dbt-redshift/tests/functional/adapter/dbt_clone/test_dbt_clone.py b/dbt-redshift/tests/functional/adapter/dbt_clone/test_dbt_clone.py
new file mode 100644
index 000000000..2a1d96a8c
--- /dev/null
+++ b/dbt-redshift/tests/functional/adapter/dbt_clone/test_dbt_clone.py
@@ -0,0 +1,20 @@
+import pytest
+from dbt.tests.adapter.dbt_clone.test_dbt_clone import BaseCloneNotPossible
+
+
+class TestRedshiftCloneNotPossible(BaseCloneNotPossible):
+ @pytest.fixture(autouse=True)
+ def clean_up(self, project):
+ yield
+ with project.adapter.connection_named("__test"):
+ relation = project.adapter.Relation.create(
+ database=project.database, schema=f"{project.test_schema}_seeds"
+ )
+ project.adapter.drop_schema(relation)
+
+ relation = project.adapter.Relation.create(
+ database=project.database, schema=project.test_schema
+ )
+ project.adapter.drop_schema(relation)
+
+ pass
diff --git a/dbt-redshift/tests/functional/adapter/dbt_show/test_dbt_show.py b/dbt-redshift/tests/functional/adapter/dbt_show/test_dbt_show.py
new file mode 100644
index 000000000..83cb399ca
--- /dev/null
+++ b/dbt-redshift/tests/functional/adapter/dbt_show/test_dbt_show.py
@@ -0,0 +1,17 @@
+from dbt.tests.adapter.dbt_show.test_dbt_show import (
+ BaseShowSqlHeader,
+ BaseShowLimit,
+ BaseShowDoesNotHandleDoubleLimit,
+)
+
+
+class TestRedshiftShowLimit(BaseShowLimit):
+ pass
+
+
+class TestRedshiftShowSqlHeader(BaseShowSqlHeader):
+ pass
+
+
+class TestShowDoesNotHandleDoubleLimit(BaseShowDoesNotHandleDoubleLimit):
+ pass
diff --git a/dbt-redshift/tests/functional/adapter/empty/test_empty.py b/dbt-redshift/tests/functional/adapter/empty/test_empty.py
new file mode 100644
index 000000000..27f36f1df
--- /dev/null
+++ b/dbt-redshift/tests/functional/adapter/empty/test_empty.py
@@ -0,0 +1,9 @@
+from dbt.tests.adapter.empty.test_empty import BaseTestEmpty, BaseTestEmptyInlineSourceRef
+
+
+class TestRedshiftEmpty(BaseTestEmpty):
+ pass
+
+
+class TestRedshiftEmptyInlineSourceRef(BaseTestEmptyInlineSourceRef):
+ pass
diff --git a/dbt-redshift/tests/functional/adapter/expected_stats.py b/dbt-redshift/tests/functional/adapter/expected_stats.py
new file mode 100644
index 000000000..265ae225a
--- /dev/null
+++ b/dbt-redshift/tests/functional/adapter/expected_stats.py
@@ -0,0 +1,100 @@
+from dbt.tests.util import AnyStringWith, AnyInteger, AnyString, AnyFloat
+
+
+def redshift_stats():
+ return {
+ "has_stats": {
+ "id": "has_stats",
+ "label": "Has Stats?",
+ "value": True,
+ "description": "Indicates whether there are statistics for this table",
+ "include": False,
+ },
+ "encoded": {
+ "id": "encoded",
+ "label": "Encoded",
+ "value": AnyStringWith("Y"),
+ "description": "Indicates whether any column in the table has compression encoding defined.",
+ "include": True,
+ },
+ "diststyle": {
+ "id": "diststyle",
+ "label": "Dist Style",
+ "value": AnyStringWith("AUTO"),
+ "description": "Distribution style or distribution key column, if key distribution is defined.",
+ "include": True,
+ },
+ "max_varchar": {
+ "id": "max_varchar",
+ "label": "Max Varchar",
+ "value": AnyInteger(),
+ "description": "Size of the largest column that uses a VARCHAR data type.",
+ "include": True,
+ },
+ "size": {
+ "id": "size",
+ "label": "Approximate Size",
+ "value": AnyInteger(),
+ "description": "Approximate size of the table, calculated from a count of 1MB blocks",
+ "include": True,
+ },
+ "sortkey1": {
+ "id": "sortkey1",
+ "label": "Sort Key 1",
+ "value": AnyString(),
+ "description": "First column in the sort key.",
+ "include": True,
+ },
+ "pct_used": {
+ "id": "pct_used",
+ "label": "Disk Utilization",
+ "value": AnyFloat(),
+ "description": "Percent of available space that is used by the table.",
+ "include": True,
+ },
+ "stats_off": {
+ "id": "stats_off",
+ "label": "Stats Off",
+ "value": AnyFloat(),
+ "description": "Number that indicates how stale the table statistics are; 0 is current, 100 is out of date.",
+ "include": True,
+ },
+ "rows": {
+ "id": "rows",
+ "label": "Approximate Row Count",
+ "value": AnyFloat(),
+ "description": "Approximate number of rows in the table. This value includes rows marked for deletion, but not yet vacuumed.",
+ "include": True,
+ },
+ }
+
+
+def redshift_ephemeral_summary_stats():
+ additional = {
+ "skew_sortkey1": {
+ "description": "Ratio of the size of the largest non-sort "
+ "key column to the size of the first column "
+ "of the sort key.",
+ "id": "skew_sortkey1",
+ "include": True,
+ "label": "Sort Key Skew",
+ "value": 1.0,
+ },
+ "sortkey_num": {
+ "description": "Number of columns defined as sort keys.",
+ "id": "sortkey_num",
+ "include": True,
+ "label": "# Sort Keys",
+ "value": 1.0,
+ },
+ "unsorted": {
+ "description": "Percent of unsorted rows in the table.",
+ "id": "unsorted",
+ "include": True,
+ "label": "Unsorted %",
+ "value": 0.0,
+ },
+ }
+ stats = redshift_stats()
+ stats.update(additional)
+ return stats
diff --git a/dbt-redshift/tests/functional/adapter/incremental/test_incremental_merge_exclude_columns.py b/dbt-redshift/tests/functional/adapter/incremental/test_incremental_merge_exclude_columns.py
new file mode 100644
index 000000000..022ebca07
--- /dev/null
+++ b/dbt-redshift/tests/functional/adapter/incremental/test_incremental_merge_exclude_columns.py
@@ -0,0 +1,7 @@
+from dbt.tests.adapter.incremental.test_incremental_merge_exclude_columns import (
+ BaseMergeExcludeColumns,
+)
+
+
+class TestMergeExcludeColumns(BaseMergeExcludeColumns):
+ pass
diff --git a/dbt-redshift/tests/functional/adapter/incremental/test_incremental_microbatch.py b/dbt-redshift/tests/functional/adapter/incremental/test_incremental_microbatch.py
new file mode 100644
index 000000000..1bd196bf8
--- /dev/null
+++ b/dbt-redshift/tests/functional/adapter/incremental/test_incremental_microbatch.py
@@ -0,0 +1,24 @@
+import pytest
+from dbt.tests.adapter.incremental.test_incremental_microbatch import (
+ BaseMicrobatch,
+)
+
+
+# No requirement for a unique_id for redshift microbatch!
+_microbatch_model_no_unique_id_sql = """
+{{ config(materialized='incremental', incremental_strategy='microbatch', event_time='event_time', batch_size='day', begin=modules.datetime.datetime(2020, 1, 1, 0, 0, 0)) }}
+select * from {{ ref('input_model') }}
+"""
+
+
+class TestSnowflakeMicrobatch(BaseMicrobatch):
+ @pytest.fixture(scope="class")
+ def microbatch_model_sql(self) -> str:
+ return _microbatch_model_no_unique_id_sql
+
+ @pytest.fixture(scope="class")
+ def insert_two_rows_sql(self, project) -> str:
+ test_schema_relation = project.adapter.Relation.create(
+ database=project.database, schema=project.test_schema
+ )
+ return f"insert into {test_schema_relation}.input_model (id, event_time) values (4, '2020-01-04 00:00:00-0'), (5, '2020-01-05 00:00:00-0')"
diff --git a/dbt-redshift/tests/functional/adapter/incremental/test_incremental_on_schema_change.py b/dbt-redshift/tests/functional/adapter/incremental/test_incremental_on_schema_change.py
new file mode 100644
index 000000000..7b73d212b
--- /dev/null
+++ b/dbt-redshift/tests/functional/adapter/incremental/test_incremental_on_schema_change.py
@@ -0,0 +1,7 @@
+from dbt.tests.adapter.incremental.test_incremental_on_schema_change import (
+ BaseIncrementalOnSchemaChange,
+)
+
+
+class TestIncrementalOnSchemaChange(BaseIncrementalOnSchemaChange):
+ pass
diff --git a/dbt-redshift/tests/functional/adapter/incremental/test_incremental_predicates.py b/dbt-redshift/tests/functional/adapter/incremental/test_incremental_predicates.py
new file mode 100644
index 000000000..3478079eb
--- /dev/null
+++ b/dbt-redshift/tests/functional/adapter/incremental/test_incremental_predicates.py
@@ -0,0 +1,34 @@
+import pytest
+from dbt.tests.adapter.incremental.test_incremental_predicates import BaseIncrementalPredicates
+
+
+class TestIncrementalPredicatesDeleteInsertRedshift(BaseIncrementalPredicates):
+ pass
+
+
+class TestPredicatesDeleteInsertRedshift(BaseIncrementalPredicates):
+ @pytest.fixture(scope="class")
+ def project_config_update(self):
+ return {"models": {"+predicates": ["id != 2"], "+incremental_strategy": "delete+insert"}}
+
+
+class TestIncrementalPredicatesMergeRedshift(BaseIncrementalPredicates):
+ @pytest.fixture(scope="class")
+ def project_config_update(self):
+ return {
+ "models": {
+ "+incremental_predicates": ["dbt_internal_dest.id != 2"],
+ "+incremental_strategy": "merge",
+ }
+ }
+
+
+class TestPredicatesMergeRedshift(BaseIncrementalPredicates):
+ @pytest.fixture(scope="class")
+ def project_config_update(self):
+ return {
+ "models": {
+ "+predicates": ["dbt_internal_dest.id != 2"],
+ "+incremental_strategy": "merge",
+ }
+ }
diff --git a/dbt-redshift/tests/functional/adapter/incremental/test_incremental_run_result.py b/dbt-redshift/tests/functional/adapter/incremental/test_incremental_run_result.py
new file mode 100644
index 000000000..56cb51b2e
--- /dev/null
+++ b/dbt-redshift/tests/functional/adapter/incremental/test_incremental_run_result.py
@@ -0,0 +1,29 @@
+from dbt.tests.util import run_dbt
+from dbt.tests.adapter.basic.test_incremental import (
+ BaseIncremental,
+ BaseIncrementalNotSchemaChange,
+)
+
+
+class TestBaseIncrementalNotSchemaChange(BaseIncrementalNotSchemaChange):
+ pass
+
+
+class TestIncrementalRunResultRedshift(BaseIncremental):
+ """Bonus test to verify that incremental models return the number of rows affected"""
+
+ def test_incremental(self, project):
+ # seed command
+ results = run_dbt(["seed"])
+ assert len(results) == 2
+
+ # run with initial seed
+ results = run_dbt(["run", "--vars", "seed_name: base"])
+ assert len(results) == 1
+
+ # run with additions
+ results = run_dbt(["run", "--vars", "seed_name: added"])
+ assert len(results) == 1
+ # verify that run_result is correct
+ rows_affected = results[0].adapter_response["rows_affected"]
+ assert rows_affected == 10, f"Expected 10 rows changed, found {rows_affected}"
diff --git a/dbt-redshift/tests/functional/adapter/incremental/test_incremental_strategies.py b/dbt-redshift/tests/functional/adapter/incremental/test_incremental_strategies.py
new file mode 100644
index 000000000..da5898bd6
--- /dev/null
+++ b/dbt-redshift/tests/functional/adapter/incremental/test_incremental_strategies.py
@@ -0,0 +1,52 @@
+import pytest
+from dbt.tests.util import run_dbt, get_manifest
+from dbt_common.exceptions import DbtRuntimeError
+from dbt.context.providers import generate_runtime_model_context
+
+
+my_model_sql = """
+ select 1 as fun
+"""
+
+
+@pytest.fixture(scope="class")
+def models():
+ return {"my_model.sql": my_model_sql}
+
+
+def test_basic(project):
+ results = run_dbt(["run"])
+ assert len(results) == 1
+
+ manifest = get_manifest(project.project_root)
+ model = manifest.nodes["model.test.my_model"]
+
+ # Normally the context will be provided by the macro that calls the
+ # get_incrmental_strategy_macro method, but for testing purposes
+ # we create a runtime_model_context.
+ context = generate_runtime_model_context(
+ model,
+ project.adapter.config,
+ manifest,
+ )
+
+ macro_func = project.adapter.get_incremental_strategy_macro(context, "default")
+ assert macro_func
+ assert type(macro_func).__name__ == "MacroGenerator"
+
+ macro_func = project.adapter.get_incremental_strategy_macro(context, "append")
+ assert macro_func
+ assert type(macro_func).__name__ == "MacroGenerator"
+
+ macro_func = project.adapter.get_incremental_strategy_macro(context, "delete+insert")
+ assert macro_func
+ assert type(macro_func).__name__ == "MacroGenerator"
+
+ macro_func = project.adapter.get_incremental_strategy_macro(context, "merge")
+ assert macro_func
+ assert type(macro_func).__name__ == "MacroGenerator"
+
+ # This incremental strategy is not valid for Redshift
+ with pytest.raises(DbtRuntimeError) as excinfo:
+ macro_func = project.adapter.get_incremental_strategy_macro(context, "insert_overwrite")
+ assert "insert_overwrite" in str(excinfo.value)
diff --git a/dbt-redshift/tests/functional/adapter/incremental/test_incremental_unique_id.py b/dbt-redshift/tests/functional/adapter/incremental/test_incremental_unique_id.py
new file mode 100644
index 000000000..db53dce67
--- /dev/null
+++ b/dbt-redshift/tests/functional/adapter/incremental/test_incremental_unique_id.py
@@ -0,0 +1,12 @@
+import pytest
+from dbt.tests.adapter.incremental.test_incremental_unique_id import BaseIncrementalUniqueKey
+
+
+class TestUniqueKeyRedshift(BaseIncrementalUniqueKey):
+ pass
+
+
+class TestUniqueKeyDeleteInsertRedshift(BaseIncrementalUniqueKey):
+ @pytest.fixture(scope="class")
+ def project_config_update(self):
+ return {"models": {"+incremental_strategy": "delete+insert"}}
diff --git a/dbt-redshift/tests/functional/adapter/materialized_view_tests/__init__.py b/dbt-redshift/tests/functional/adapter/materialized_view_tests/__init__.py
new file mode 100644
index 000000000..30e204d08
--- /dev/null
+++ b/dbt-redshift/tests/functional/adapter/materialized_view_tests/__init__.py
@@ -0,0 +1 @@
+# provides namespacing for test discovery
diff --git a/dbt-redshift/tests/functional/adapter/materialized_view_tests/test_drop_cascade.py b/dbt-redshift/tests/functional/adapter/materialized_view_tests/test_drop_cascade.py
new file mode 100644
index 000000000..3d226a01e
--- /dev/null
+++ b/dbt-redshift/tests/functional/adapter/materialized_view_tests/test_drop_cascade.py
@@ -0,0 +1,56 @@
+"""
+This test addresses this bug: https://github.com/dbt-labs/dbt-redshift/issues/642
+
+Redshift did not initially support DROP CASCADE for materialized views,
+or at least did not document that they did. Now that they do, we should
+use DROP CASCADE instead of DROP.
+"""
+
+from dbt.tests.util import run_dbt
+import pytest
+
+
+SEED = """
+id
+1
+""".strip()
+
+
+PARENT_MATERIALIZED_VIEW = """
+{{ config(
+ materialized='materialized_view',
+ on_configuration_change='apply',
+) }}
+
+select * from {{ ref('my_seed') }}
+"""
+
+
+CHILD_MATERIALIZED_VIEW = """
+{{ config(
+ materialized='materialized_view',
+ on_configuration_change='apply',
+) }}
+
+select * from {{ ref('parent_mv') }}
+"""
+
+
+@pytest.fixture(scope="class")
+def seeds():
+ return {"my_seed.csv": SEED}
+
+
+@pytest.fixture(scope="class")
+def models():
+ return {
+ "parent_mv.sql": PARENT_MATERIALIZED_VIEW,
+ "child_mv.sql": CHILD_MATERIALIZED_VIEW,
+ }
+
+
+def test_drop_cascade(project):
+ run_dbt(["seed"])
+ run_dbt(["run"])
+ # this originally raised an error when it should not have
+ run_dbt(["run", "--full-refresh"])
diff --git a/dbt-redshift/tests/functional/adapter/materialized_view_tests/test_materialized_views.py b/dbt-redshift/tests/functional/adapter/materialized_view_tests/test_materialized_views.py
new file mode 100644
index 000000000..f201196b4
--- /dev/null
+++ b/dbt-redshift/tests/functional/adapter/materialized_view_tests/test_materialized_views.py
@@ -0,0 +1,289 @@
+from typing import Optional, Tuple
+
+import pytest
+
+from dbt.adapters.base.relation import BaseRelation
+from dbt.adapters.contracts.relation import OnConfigurationChangeOption
+
+from dbt.tests.adapter.materialized_view.basic import MaterializedViewBasic
+from dbt.tests.adapter.materialized_view.changes import (
+ MaterializedViewChanges,
+ MaterializedViewChangesApplyMixin,
+ MaterializedViewChangesContinueMixin,
+ MaterializedViewChangesFailMixin,
+)
+from dbt.tests.adapter.materialized_view.files import MY_TABLE, MY_VIEW, MY_SEED
+from dbt.tests.util import (
+ assert_message_in_logs,
+ get_model_file,
+ set_model_file,
+ run_dbt,
+)
+
+from tests.functional.adapter.materialized_view_tests.utils import (
+ query_autorefresh,
+ query_dist,
+ query_relation_type,
+ query_sort,
+ run_dbt_and_capture_with_retries_redshift_mv,
+)
+
+MY_MATERIALIZED_VIEW = """
+{{ config(
+ materialized='materialized_view',
+ sort_type='compound',
+ sort=['id'],
+ dist='id',
+) }}
+select * from {{ ref('my_seed') }}
+"""
+
+
+class TestRedshiftMaterializedViewsBasic(MaterializedViewBasic):
+ @pytest.fixture(scope="class", autouse=True)
+ def models(self):
+ yield {
+ "my_table.sql": MY_TABLE,
+ "my_view.sql": MY_VIEW,
+ "my_materialized_view.sql": MY_MATERIALIZED_VIEW,
+ }
+
+ @staticmethod
+ def insert_record(project, table: BaseRelation, record: Tuple[int, int]):
+ my_id, value = record
+ project.run_sql(f"insert into {table} (id, value) values ({my_id}, {value})")
+
+ @staticmethod
+ def refresh_materialized_view(project, materialized_view: BaseRelation):
+ sql = f"refresh materialized view {materialized_view}"
+ project.run_sql(sql)
+
+ @staticmethod
+ def query_row_count(project, relation: BaseRelation) -> int:
+ sql = f"select count(*) from {relation}"
+ return project.run_sql(sql, fetch="one")[0]
+
+ @staticmethod
+ def query_relation_type(project, relation: BaseRelation) -> Optional[str]:
+ return query_relation_type(project, relation)
+
+ def test_materialized_view_create_idempotent(self, project, my_materialized_view):
+ # setup creates it once; verify it's there and run once
+ assert self.query_relation_type(project, my_materialized_view) == "materialized_view"
+ run_dbt_and_capture_with_retries_redshift_mv(
+ ["run", "--models", my_materialized_view.identifier]
+ )
+ assert self.query_relation_type(project, my_materialized_view) == "materialized_view"
+
+ @pytest.mark.flaky
+ def test_table_replaces_materialized_view(self, project, my_materialized_view):
+ super().test_table_replaces_materialized_view(project, my_materialized_view)
+
+ @pytest.mark.flaky
+ def test_view_replaces_materialized_view(self, project, my_materialized_view):
+ super().test_view_replaces_materialized_view(project, my_materialized_view)
+
+
+class RedshiftMaterializedViewChanges(MaterializedViewChanges):
+ @pytest.fixture(scope="class", autouse=True)
+ def models(self):
+ yield {
+ "my_table.sql": MY_TABLE,
+ "my_view.sql": MY_VIEW,
+ "my_materialized_view.sql": MY_MATERIALIZED_VIEW,
+ }
+
+ @staticmethod
+ def query_relation_type(project, relation: BaseRelation) -> Optional[str]:
+ return query_relation_type(project, relation)
+
+ @staticmethod
+ def check_start_state(project, materialized_view):
+ assert query_autorefresh(project, materialized_view) is False
+ assert query_sort(project, materialized_view) == "id"
+ assert query_dist(project, materialized_view) == "KEY(id)"
+
+ @staticmethod
+ def change_config_via_alter(project, materialized_view):
+ initial_model = get_model_file(project, materialized_view)
+ new_model = initial_model.replace("dist='id',", "dist='id', auto_refresh=True")
+ set_model_file(project, materialized_view, new_model)
+
+ @staticmethod
+ def change_config_via_alter_str_true(project, materialized_view):
+ initial_model = get_model_file(project, materialized_view)
+ new_model = initial_model.replace("dist='id',", "dist='id', auto_refresh='true'")
+ set_model_file(project, materialized_view, new_model)
+
+ @staticmethod
+ def change_config_via_alter_str_false(project, materialized_view):
+ initial_model = get_model_file(project, materialized_view)
+ new_model = initial_model.replace("dist='id',", "dist='id', auto_refresh='False'")
+ set_model_file(project, materialized_view, new_model)
+
+ @staticmethod
+ def check_state_alter_change_is_applied(project, materialized_view):
+ assert query_autorefresh(project, materialized_view) is True
+
+ @staticmethod
+ def check_state_alter_change_is_applied_str_false(project, materialized_view):
+ assert query_autorefresh(project, materialized_view) is False
+
+ @staticmethod
+ def change_config_via_replace(project, materialized_view):
+ initial_model = get_model_file(project, materialized_view)
+ new_model = initial_model.replace("dist='id',", "").replace(
+ "sort=['id']", "sort=['value']"
+ )
+ set_model_file(project, materialized_view, new_model)
+
+ @staticmethod
+ def check_state_replace_change_is_applied(project, materialized_view):
+ assert query_sort(project, materialized_view) == "value"
+ assert query_dist(project, materialized_view) == "EVEN"
+
+
+class TestRedshiftMaterializedViewChangesApply(
+ RedshiftMaterializedViewChanges, MaterializedViewChangesApplyMixin
+):
+ def test_change_is_applied_via_alter(self, project, my_materialized_view):
+ self.check_start_state(project, my_materialized_view)
+
+ self.change_config_via_alter(project, my_materialized_view)
+ _, logs = run_dbt_and_capture_with_retries_redshift_mv(
+ ["--debug", "run", "--models", my_materialized_view.name]
+ )
+
+ self.check_state_alter_change_is_applied(project, my_materialized_view)
+
+ assert_message_in_logs(f"Applying ALTER to: {my_materialized_view}", logs)
+ assert_message_in_logs(f"Applying REPLACE to: {my_materialized_view}", logs, False)
+
+ def test_change_is_applied_via_alter_str_true(self, project, my_materialized_view):
+ self.check_start_state(project, my_materialized_view)
+
+ self.change_config_via_alter_str_true(project, my_materialized_view)
+ _, logs = run_dbt_and_capture_with_retries_redshift_mv(
+ ["--debug", "run", "--models", my_materialized_view.name]
+ )
+
+ self.check_state_alter_change_is_applied(project, my_materialized_view)
+
+ assert_message_in_logs(f"Applying ALTER to: {my_materialized_view}", logs)
+ assert_message_in_logs(f"Applying REPLACE to: {my_materialized_view}", logs, False)
+
+ def test_change_is_applied_via_replace(self, project, my_materialized_view):
+ self.check_start_state(project, my_materialized_view)
+
+ self.change_config_via_alter(project, my_materialized_view)
+ self.change_config_via_replace(project, my_materialized_view)
+ _, logs = run_dbt_and_capture_with_retries_redshift_mv(
+ ["--debug", "run", "--models", my_materialized_view.name]
+ )
+
+ self.check_state_alter_change_is_applied(project, my_materialized_view)
+ self.check_state_replace_change_is_applied(project, my_materialized_view)
+
+ assert_message_in_logs(f"Applying REPLACE to: {my_materialized_view}", logs)
+
+
+class TestRedshiftMaterializedViewChangesContinue(
+ RedshiftMaterializedViewChanges, MaterializedViewChangesContinueMixin
+):
+ def test_change_is_not_applied_via_alter(self, project, my_materialized_view):
+ self.check_start_state(project, my_materialized_view)
+
+ self.change_config_via_alter(project, my_materialized_view)
+ _, logs = run_dbt_and_capture_with_retries_redshift_mv(
+ ["--debug", "run", "--models", my_materialized_view.name]
+ )
+
+ self.check_start_state(project, my_materialized_view)
+
+ assert_message_in_logs(
+ f"Configuration changes were identified and `on_configuration_change` was set"
+ f" to `continue` for `{my_materialized_view}`",
+ logs,
+ )
+ assert_message_in_logs(f"Applying ALTER to: {my_materialized_view}", logs, False)
+ assert_message_in_logs(f"Applying REPLACE to: {my_materialized_view}", logs, False)
+
+ @pytest.mark.flaky
+ def test_change_is_not_applied_via_replace(self, project, my_materialized_view):
+ self.check_start_state(project, my_materialized_view)
+
+ self.change_config_via_alter(project, my_materialized_view)
+ self.change_config_via_replace(project, my_materialized_view)
+ _, logs = run_dbt_and_capture_with_retries_redshift_mv(
+ ["--debug", "run", "--models", my_materialized_view.name]
+ )
+
+ self.check_start_state(project, my_materialized_view)
+
+ assert_message_in_logs(
+ f"Configuration changes were identified and `on_configuration_change` was set"
+ f" to `continue` for `{my_materialized_view}`",
+ logs,
+ )
+ assert_message_in_logs(f"Applying ALTER to: {my_materialized_view}", logs, False)
+ assert_message_in_logs(f"Applying REPLACE to: {my_materialized_view}", logs, False)
+
+
+class TestRedshiftMaterializedViewChangesFail(
+ RedshiftMaterializedViewChanges, MaterializedViewChangesFailMixin
+):
+ # Note: using retries doesn't work when we expect `dbt_run` to fail
+
+ @pytest.mark.flaky
+ def test_change_is_not_applied_via_replace(self, project, my_materialized_view):
+ super().test_change_is_not_applied_via_replace(project, my_materialized_view)
+
+
+NO_BACKUP_MATERIALIZED_VIEW = """
+{{ config(
+ materialized='materialized_view',
+ backup=False
+) }}
+select * from {{ ref('my_seed') }}
+"""
+
+
+class TestRedshiftMaterializedViewWithBackupConfig:
+ @pytest.fixture(scope="class", autouse=True)
+ def models(self):
+ yield {
+ "my_materialized_view.sql": NO_BACKUP_MATERIALIZED_VIEW,
+ }
+
+ @pytest.fixture(scope="class", autouse=True)
+ def seeds(self):
+ return {"my_seed.csv": MY_SEED}
+
+ @pytest.fixture(scope="class")
+ def project_config_update(self):
+ return {"models": {"on_configuration_change": OnConfigurationChangeOption.Fail.value}}
+
+ @pytest.fixture(scope="function")
+ def dbt_run_results(self, project):
+ run_dbt(["seed"])
+ yield run_dbt(["run", "--full-refresh"])
+
+ def test_running_mv_with_backup_false_succeeds(self, dbt_run_results):
+ assert dbt_run_results[0].node.config_call_dict["backup"] is False
+
+ @pytest.mark.flaky
+ def test_running_mv_with_backup_false_is_idempotent(self, project, dbt_run_results):
+ """
+ Addresses: https://github.com/dbt-labs/dbt-redshift/issues/621
+ Context:
+ - The default for `backup` is `True`
+ - We cannot query `backup` for a materialized view in Redshift at the moment
+ Premise:
+ - Set `on_configuration_change` = 'fail' (via `project_config_update`)
+ - Set `backup` = False (via `NO_BACKUP_MATERIALIZED_VIEW`)
+ - Create the materialized view (via `dbt_run_results`)
+ - Run a second time forcing the configuration change monitoring
+ - There should be no changes monitored, hence the run should be successful
+ """
+ results = run_dbt(["run"])
+ assert results[0].node.config_call_dict["backup"] is False
diff --git a/dbt-redshift/tests/functional/adapter/materialized_view_tests/utils.py b/dbt-redshift/tests/functional/adapter/materialized_view_tests/utils.py
new file mode 100644
index 000000000..112ae3057
--- /dev/null
+++ b/dbt-redshift/tests/functional/adapter/materialized_view_tests/utils.py
@@ -0,0 +1,88 @@
+from typing import List, Optional
+
+from dbt.adapters.base.relation import BaseRelation
+from dbt.tests.util import run_dbt_and_capture
+
+from dbt.adapters.redshift.relation import RedshiftRelation
+
+
+def query_relation_type(project, relation: BaseRelation) -> Optional[str]:
+ assert isinstance(relation, RedshiftRelation)
+ sql = f"""
+ select
+ 'table' as relation_type
+ from pg_tables
+ where schemaname = '{relation.schema}'
+ and tablename = '{relation.identifier}'
+ union all
+ select
+ case
+ when definition ilike '%create materialized view%'
+ then 'materialized_view'
+ else 'view'
+ end as relation_type
+ from pg_views
+ where schemaname = '{relation.schema}'
+ and viewname = '{relation.identifier}'
+ """
+ results = project.run_sql(sql, fetch="all")
+ if len(results) == 0:
+ return None
+ elif len(results) > 1:
+ raise ValueError(f"More than one instance of {relation.identifier} found!")
+ else:
+ return results[0][0]
+
+
+def query_sort(project, relation: RedshiftRelation) -> str:
+ sql = f"""
+ select
+ tb.sortkey1 as sortkey
+ from svv_table_info tb
+ where tb.table ilike '{ relation.identifier }'
+ and tb.schema ilike '{ relation.schema }'
+ and tb.database ilike '{ relation.database }'
+ """
+ return project.run_sql(sql, fetch="one")[0]
+
+
+def query_dist(project, relation: RedshiftRelation) -> str:
+ sql = f"""
+ select
+ tb.diststyle
+ from svv_table_info tb
+ where tb.table ilike '{ relation.identifier }'
+ and tb.schema ilike '{ relation.schema }'
+ and tb.database ilike '{ relation.database }'
+ """
+ return project.run_sql(sql, fetch="one")[0]
+
+
+def query_autorefresh(project, relation: RedshiftRelation) -> bool:
+ sql = f"""
+ select
+ case mv.autorefresh when 't' then True when 'f' then False end as autorefresh
+ from stv_mv_info mv
+ where trim(mv.name) ilike '{ relation.identifier }'
+ and trim(mv.schema) ilike '{ relation.schema }'
+ and trim(mv.db_name) ilike '{ relation.database }'
+ """
+ return project.run_sql(sql, fetch="one")[0]
+
+
+def run_dbt_and_capture_with_retries_redshift_mv(args: List[str], max_retries: int = 10):
+ """
+ We need to retry `run_dbt` calls on Redshift because we get sporadic failures of the form:
+
+ Database Error in model my_materialized_view (models/my_materialized_view.sql)
+ could not open relation with OID 14957412
+ """
+ retries = 0
+ while retries < max_retries:
+ try:
+ # there's no point to using this with expect_pass=False
+ return run_dbt_and_capture(args, expect_pass=True)
+ except AssertionError as e:
+ retries += 1
+ if retries == max_retries:
+ raise e
diff --git a/dbt-redshift/tests/functional/adapter/sources_freshness_tests/files.py b/dbt-redshift/tests/functional/adapter/sources_freshness_tests/files.py
new file mode 100644
index 000000000..f2dd6fbbe
--- /dev/null
+++ b/dbt-redshift/tests/functional/adapter/sources_freshness_tests/files.py
@@ -0,0 +1,38 @@
+SCHEMA_YML = """version: 2
+sources:
+ - name: test_source
+ freshness:
+ warn_after: {count: 10, period: hour}
+ error_after: {count: 1, period: day}
+ schema: "{{ env_var('DBT_GET_LAST_RELATION_TEST_SCHEMA') }}"
+ tables:
+ - name: test_source_no_last_modified
+ - name: test_source_last_modified
+ loaded_at_field: last_modified
+"""
+
+SEED_TEST_SOURCE_NO_LAST_MODIFIED_CSV = """
+id,name
+1,Martin
+2,Jeter
+3,Ruth
+4,Gehrig
+5,DiMaggio
+6,Torre
+7,Mantle
+8,Berra
+9,Maris
+""".strip()
+
+SEED_TEST_SOURCE_LAST_MODIFIED_CSV = """
+id,name,last_modified
+1,Martin,2023-01-01 00:00:00
+2,Jeter,2023-02-01 00:00:00
+3,Ruth,2023-03-01 00:00:00
+4,Gehrig,2023-04-01 00:00:00
+5,DiMaggio,2023-05-01 00:00:00
+6,Torre,2023-06-01 00:00:00
+7,Mantle,2023-07-01 00:00:00
+8,Berra,2023-08-01 00:00:00
+9,Maris,2023-09-01 00:00:00
+""".strip()
diff --git a/dbt-redshift/tests/functional/adapter/sources_freshness_tests/test_get_relation_last_modified.py b/dbt-redshift/tests/functional/adapter/sources_freshness_tests/test_get_relation_last_modified.py
new file mode 100644
index 000000000..c31e9ac61
--- /dev/null
+++ b/dbt-redshift/tests/functional/adapter/sources_freshness_tests/test_get_relation_last_modified.py
@@ -0,0 +1,150 @@
+import os
+import pytest
+from unittest import mock
+
+from dbt.adapters.redshift.impl import RedshiftAdapter
+from dbt.adapters.capability import Capability, CapabilityDict
+from dbt.cli.main import dbtRunner
+from dbt.tests.util import run_dbt
+
+from tests.functional.adapter.sources_freshness_tests import files
+
+
+class SetupGetLastRelationModified:
+ @pytest.fixture(scope="class", autouse=True)
+ def set_env_vars(self, project):
+ os.environ["DBT_GET_LAST_RELATION_TEST_SCHEMA"] = project.test_schema
+ yield
+ del os.environ["DBT_GET_LAST_RELATION_TEST_SCHEMA"]
+
+
+class TestGetLastRelationModified(SetupGetLastRelationModified):
+ @pytest.fixture(scope="class")
+ def seeds(self):
+ return {
+ "test_source_no_last_modified.csv": files.SEED_TEST_SOURCE_NO_LAST_MODIFIED_CSV,
+ "test_source_last_modified.csv": files.SEED_TEST_SOURCE_LAST_MODIFIED_CSV,
+ }
+
+ @pytest.fixture(scope="class")
+ def models(self):
+ return {"schema.yml": files.SCHEMA_YML}
+
+ @pytest.mark.parametrize(
+ "source,status,expect_pass",
+ [
+ ("test_source.test_source_no_last_modified", "pass", True),
+ ("test_source.test_source_last_modified", "error", False), # stale
+ ],
+ )
+ def test_get_last_relation_modified(self, project, source, status, expect_pass):
+ run_dbt(["seed"])
+
+ results = run_dbt(
+ ["source", "freshness", "--select", f"source:{source}"], expect_pass=expect_pass
+ )
+ assert len(results) == 1
+ result = results[0]
+ assert result.status == status
+
+
+freshness_metadata_schema_batch_yml = """
+sources:
+ - name: test_source
+ freshness:
+ warn_after: {count: 10, period: hour}
+ error_after: {count: 1, period: day}
+ schema: "{{ env_var('DBT_GET_LAST_RELATION_TEST_SCHEMA') }}"
+ tables:
+ - name: test_table
+ - name: test_table2
+ - name: test_table_with_loaded_at_field
+ loaded_at_field: my_loaded_at_field
+"""
+
+
+class TestGetLastRelationModifiedBatch(SetupGetLastRelationModified):
+ @pytest.fixture(scope="class")
+ def custom_schema(self, project, set_env_vars):
+ with project.adapter.connection_named("__test"):
+ relation = project.adapter.Relation.create(
+ database=project.database, schema=os.environ["DBT_GET_LAST_RELATION_TEST_SCHEMA"]
+ )
+ project.adapter.drop_schema(relation)
+ project.adapter.create_schema(relation)
+
+ yield relation.schema
+
+ with project.adapter.connection_named("__test"):
+ project.adapter.drop_schema(relation)
+
+ @pytest.fixture(scope="class")
+ def models(self):
+ return {"schema.yml": freshness_metadata_schema_batch_yml}
+
+ def get_freshness_result_for_table(self, table_name, results):
+ for result in results:
+ if result.node.name == table_name:
+ return result
+ return None
+
+ def test_get_last_relation_modified_batch(self, project, custom_schema):
+ project.run_sql(
+ f"create table {custom_schema}.test_table as (select 1 as id, 'test' as name);"
+ )
+ project.run_sql(
+ f"create table {custom_schema}.test_table2 as (select 1 as id, 'test' as name);"
+ )
+ project.run_sql(
+ f"create table {custom_schema}.test_table_with_loaded_at_field as (select 1 as id, timestamp '2009-09-15 10:59:43' as my_loaded_at_field);"
+ )
+
+ runner = dbtRunner()
+ freshness_results_batch = runner.invoke(["source", "freshness"]).result
+
+ assert len(freshness_results_batch) == 3
+ test_table_batch_result = self.get_freshness_result_for_table(
+ "test_table", freshness_results_batch
+ )
+ test_table2_batch_result = self.get_freshness_result_for_table(
+ "test_table2", freshness_results_batch
+ )
+ test_table_with_loaded_at_field_batch_result = self.get_freshness_result_for_table(
+ "test_table_with_loaded_at_field", freshness_results_batch
+ )
+
+ # Remove TableLastModifiedMetadataBatch and run freshness on same input without batch strategy
+ capabilities_no_batch = CapabilityDict(
+ {
+ capability: support
+ for capability, support in RedshiftAdapter.capabilities().items()
+ if capability != Capability.TableLastModifiedMetadataBatch
+ }
+ )
+ with mock.patch.object(
+ RedshiftAdapter, "capabilities", return_value=capabilities_no_batch
+ ):
+ freshness_results = runner.invoke(["source", "freshness"]).result
+
+ assert len(freshness_results) == 3
+ test_table_result = self.get_freshness_result_for_table("test_table", freshness_results)
+ test_table2_result = self.get_freshness_result_for_table("test_table2", freshness_results)
+ test_table_with_loaded_at_field_result = self.get_freshness_result_for_table(
+ "test_table_with_loaded_at_field", freshness_results
+ )
+
+ # assert results between batch vs non-batch freshness strategy are equivalent
+ assert test_table_result.status == test_table_batch_result.status
+ assert test_table_result.max_loaded_at == test_table_batch_result.max_loaded_at
+
+ assert test_table2_result.status == test_table2_batch_result.status
+ assert test_table2_result.max_loaded_at == test_table2_batch_result.max_loaded_at
+
+ assert (
+ test_table_with_loaded_at_field_batch_result.status
+ == test_table_with_loaded_at_field_result.status
+ )
+ assert (
+ test_table_with_loaded_at_field_batch_result.max_loaded_at
+ == test_table_with_loaded_at_field_result.max_loaded_at
+ )
diff --git a/dbt-redshift/tests/functional/adapter/test_adapter_methods.py b/dbt-redshift/tests/functional/adapter/test_adapter_methods.py
new file mode 100644
index 000000000..9907ccb72
--- /dev/null
+++ b/dbt-redshift/tests/functional/adapter/test_adapter_methods.py
@@ -0,0 +1,115 @@
+import pytest
+
+from dbt.tests.util import run_dbt, check_relations_equal
+from dbt.tests.fixtures.project import write_project_files
+
+
+tests__get_relation_invalid = """
+{% set upstream = ref('upstream') %}
+{% set relations = adapter.get_relation(database=upstream.database, schema=upstream.schema, identifier="doesnotexist") %}
+{% set limit_query = 0 %}
+{% if relations.identifier %}
+ {% set limit_query = 1 %}
+{% endif %}
+
+select 1 as id limit {{ limit_query }}
+
+"""
+
+models__upstream_sql = """
+select 1 as id
+
+"""
+
+models__expected_sql = """
+select 1 as valid_relation
+
+"""
+
+models__model_sql = """
+
+{% set upstream = ref('upstream') %}
+
+select * from {{ upstream }}
+
+"""
+
+models__call_get_relation = """
+
+{% set model = ref('model') %}
+
+{% set relation = adapter.get_relation(database=model.database, schema=model.schema, identifier=model.identifier) %}
+{% if relation.identifier == model.identifier %}
+
+select 1 as valid_relation
+
+{% else %}
+
+select 0 as valid_relation
+
+{% endif %}
+
+"""
+
+models__get_relation_type = """
+
+{% set base_view = ref('base_view') %}
+
+{% set relation = adapter.get_relation(database=base_view.database, schema=base_view.schema, identifier=base_view.identifier) %}
+{% if relation.type == 'view' %}
+
+select 1 as valid_type
+
+{% else %}
+
+select 0 as valid_type
+
+{% endif %}
+
+"""
+
+
+class RedshiftAdapterMethod:
+ @pytest.fixture(scope="class")
+ def tests(self):
+ return {"get_relation_invalid.sql": tests__get_relation_invalid}
+
+ @pytest.fixture(scope="class")
+ def models(self):
+ return {
+ "upstream.sql": models__upstream_sql,
+ "expected.sql": models__expected_sql,
+ "model.sql": models__model_sql,
+ "call_get_relation.sql": models__call_get_relation,
+ "base_view.sql": "{{ config(bind=True) }} select * from {{ ref('model') }}",
+ "get_relation_type.sql": models__get_relation_type,
+ "expected_type.sql": "select 1 as valid_type",
+ }
+
+ def project_files(
+ self,
+ project_root,
+ tests,
+ models,
+ ):
+ write_project_files(project_root, "tests", tests)
+ write_project_files(project_root, "models", models)
+
+ @pytest.fixture(scope="class")
+ def project_config_update(self):
+ return {
+ "name": "adapter_methods",
+ }
+
+ def test_adapter_methods(self, project):
+ run_dbt(["compile"]) # trigger any compile-time issues
+ result = run_dbt()
+ assert len(result) == 7
+
+ run_dbt(["test"])
+ check_relations_equal(project.adapter, ["call_get_relation", "expected"])
+ check_relations_equal(project.adapter, ["get_relation_type", "expected_type"])
+
+
+class TestRedshiftAdapterMethod(RedshiftAdapterMethod):
+ pass
diff --git a/dbt-redshift/tests/functional/adapter/test_basic.py b/dbt-redshift/tests/functional/adapter/test_basic.py
new file mode 100644
index 000000000..cd04bb1ba
--- /dev/null
+++ b/dbt-redshift/tests/functional/adapter/test_basic.py
@@ -0,0 +1,174 @@
+import pytest
+
+from dbt.tests.util import AnyStringWith, run_dbt
+from dbt.tests.adapter.basic.test_base import BaseSimpleMaterializations
+from dbt.tests.adapter.basic.test_singular_tests import BaseSingularTests
+from dbt.tests.adapter.basic.test_singular_tests_ephemeral import BaseSingularTestsEphemeral
+from dbt.tests.adapter.basic.test_empty import BaseEmpty
+from dbt.tests.adapter.basic.test_ephemeral import BaseEphemeral
+from dbt.tests.adapter.basic.test_incremental import BaseIncremental
+from dbt.tests.adapter.basic.test_generic_tests import BaseGenericTests
+from dbt.tests.adapter.basic.test_snapshot_check_cols import BaseSnapshotCheckCols
+from dbt.tests.adapter.basic.test_snapshot_timestamp import BaseSnapshotTimestamp
+from dbt.tests.adapter.basic.test_adapter_methods import BaseAdapterMethod
+from dbt.tests.adapter.basic.test_docs_generate import BaseDocsGenerate, BaseDocsGenReferences
+from dbt.tests.adapter.basic.expected_catalog import (
+ base_expected_catalog,
+ no_stats,
+ expected_references_catalog,
+)
+from dbt.tests.adapter.basic.files import seeds_base_csv, seeds_added_csv, seeds_newcolumns_csv
+
+from tests.functional.adapter.expected_stats import (
+ redshift_stats,
+ redshift_ephemeral_summary_stats,
+)
+
+
+# set the datatype of the name column in the 'added' seed so that it can hold the '_update' that's added
+schema_seed_added_yml = """
+version: 2
+seeds:
+ - name: added
+ config:
+ column_types:
+ name: varchar(64)
+"""
+
+
+# TODO: update these with test cases or remove them if not needed
+class TestSimpleMaterializationsRedshift(BaseSimpleMaterializations):
+ @pytest.mark.flaky
+ def test_base(self, project):
+ super().test_base(project)
+
+
+class TestSingularTestsRedshift(BaseSingularTests):
+ pass
+
+
+class TestSingularTestsEphemeralRedshift(BaseSingularTestsEphemeral):
+ pass
+
+
+class TestEmptyRedshift(BaseEmpty):
+ pass
+
+
+class TestEphemeralRedshift(BaseEphemeral):
+ @pytest.mark.flaky
+ def test_ephemeral(self, project):
+ super().test_ephemeral(project)
+
+
+class TestIncrementalRedshift(BaseIncremental):
+ @pytest.mark.flaky
+ def test_incremental(self, project):
+ super().test_incremental(project)
+
+
+class TestGenericTestsRedshift(BaseGenericTests):
+ pass
+
+
+class TestSnapshotCheckColsRedshift(BaseSnapshotCheckCols):
+ # Redshift defines the 'name' column such that it's not big enough to hold the '_update' added in the test.
+ @pytest.fixture(scope="class")
+ def models(self):
+ return {
+ "base.csv": seeds_base_csv,
+ "added.csv": seeds_added_csv,
+ "seeds.yml": schema_seed_added_yml,
+ }
+
+
+class TestSnapshotTimestampRedshift(BaseSnapshotTimestamp):
+ # Redshift defines the 'name' column such that it's not big enough to hold the '_update' added in the test.
+ @pytest.fixture(scope="class")
+ def models(self):
+ return {
+ "base.csv": seeds_base_csv,
+ "added.csv": seeds_added_csv,
+ "newcolumns.csv": seeds_newcolumns_csv,
+ "seeds.yml": schema_seed_added_yml,
+ }
+
+
+class TestBaseAdapterMethod(BaseAdapterMethod):
+ pass
+
+
+@pytest.mark.skip(reason="Known flakey test to be reviewed")
+class TestDocsGenerateRedshift(BaseDocsGenerate):
+ @pytest.fixture(scope="class")
+ def expected_catalog(self, project, profile_user):
+ return base_expected_catalog(
+ project,
+ role=profile_user,
+ id_type="integer",
+ text_type=AnyStringWith("character varying"),
+ time_type="timestamp without time zone",
+ view_type="VIEW",
+ table_type="BASE TABLE",
+ model_stats=no_stats(),
+ seed_stats=redshift_stats(),
+ )
+
+
+# TODO: update this or delete it
+@pytest.mark.skip(reason="Needs updated dbt-core code")
+class TestDocsGenReferencesRedshift(BaseDocsGenReferences):
+ @pytest.fixture(scope="class")
+ def expected_catalog(self, project, profile_user):
+ return expected_references_catalog(
+ project,
+ role=profile_user,
+ id_type="integer",
+ text_type=AnyStringWith("character varying"),
+ time_type="timestamp without time zone",
+ bigint_type="bigint",
+ view_type="VIEW",
+ table_type="BASE TABLE",
+ model_stats=redshift_stats(),
+ seed_stats=redshift_stats(),
+ view_summary_stats=no_stats(),
+ ephemeral_summary_stats=redshift_ephemeral_summary_stats(),
+ )
+
+
+class TestViewRerun:
+ """
+ This test addresses: https://github.com/dbt-labs/dbt-redshift/issues/365
+ """
+
+ @pytest.fixture(scope="class")
+ def models(self):
+ return {
+ "base_table.sql": "{{ config(materialized='table') }} select 1 as id",
+ "base_view.sql": "{{ config(bind=True) }} select * from {{ ref('base_table') }}",
+ }
+
+ def test_rerunning_dependent_view_refreshes(self, project):
+ """
+ Assert that subsequent runs of `dbt run` will correctly recreate a view.
+ """
+
+ def db_objects():
+ check_objects_exist_sql = f"""
+ select tablename
+ from pg_tables
+ where schemaname ilike '{project.test_schema}'
+ union all
+ select viewname
+ from pg_views
+ where schemaname ilike '{project.test_schema}'
+ order by 1
+ """
+ return project.run_sql(check_objects_exist_sql, fetch="all")
+
+ results = run_dbt(["run"])
+ assert len(results) == 2
+ assert db_objects() == (["base_table"], ["base_view"])
+ results = run_dbt(["run"])
+ assert len(results) == 2
+ assert db_objects() == (["base_table"], ["base_view"])
diff --git a/dbt-redshift/tests/functional/adapter/test_changing_relation_type.py b/dbt-redshift/tests/functional/adapter/test_changing_relation_type.py
new file mode 100644
index 000000000..81ba99918
--- /dev/null
+++ b/dbt-redshift/tests/functional/adapter/test_changing_relation_type.py
@@ -0,0 +1,5 @@
+from dbt.tests.adapter.relations.test_changing_relation_type import BaseChangeRelationTypeValidator
+
+
+class TestRedshiftChangeRelationTypes(BaseChangeRelationTypeValidator):
+ pass
diff --git a/dbt-redshift/tests/functional/adapter/test_column_types.py b/dbt-redshift/tests/functional/adapter/test_column_types.py
new file mode 100644
index 000000000..e24167456
--- /dev/null
+++ b/dbt-redshift/tests/functional/adapter/test_column_types.py
@@ -0,0 +1,56 @@
+import pytest
+from dbt.tests.adapter.column_types.test_column_types import BaseColumnTypes
+
+_MODEL_SQL = """
+select
+ 1::smallint as smallint_col,
+ 2::int as int_col,
+ 3::bigint as bigint_col,
+ 4::int2 as int2_col,
+ 5::int4 as int4_col,
+ 6::int8 as int8_col,
+ 7::integer as integer_col,
+ 8.0::real as real_col,
+ 9.0::float4 as float4_col,
+ 10.0::float8 as float8_col,
+ 11.0::float as float_col,
+ 12.0::double precision as double_col,
+ 13.0::numeric as numeric_col,
+ 14.0::decimal as decimal_col,
+ '15'::varchar(20) as varchar_col,
+ '16'::text as text_col
+"""
+
+_SCHEMA_YML = """
+version: 2
+models:
+ - name: model
+ tests:
+ - is_type:
+ column_map:
+ smallint_col: ['integer', 'number']
+ int_col: ['integer', 'number']
+ bigint_col: ['integer', 'number']
+ int2_col: ['integer', 'number']
+ int4_col: ['integer', 'number']
+ int8_col: ['integer', 'number']
+ integer_col: ['integer', 'number']
+ real_col: ['float', 'number']
+ double_col: ['float', 'number']
+ float4_col: ['float', 'number']
+ float8_col: ['float', 'number']
+ float_col: ['float', 'number']
+ numeric_col: ['numeric', 'number']
+ decimal_col: ['numeric', 'number']
+ varchar_col: ['string', 'not number']
+ text_col: ['string', 'not number']
+"""
+
+
+class TestRedshiftColumnTypes(BaseColumnTypes):
+ @pytest.fixture(scope="class")
+ def models(self):
+ return {"model.sql": _MODEL_SQL, "schema.yml": _SCHEMA_YML}
+
+ def test_run_and_test(self, project):
+ self.run_and_test()
diff --git a/dbt-redshift/tests/functional/adapter/test_constraints.py b/dbt-redshift/tests/functional/adapter/test_constraints.py
new file mode 100644
index 000000000..a97c66bbd
--- /dev/null
+++ b/dbt-redshift/tests/functional/adapter/test_constraints.py
@@ -0,0 +1,149 @@
+import pytest
+from dbt.tests.adapter.constraints.test_constraints import (
+ BaseTableConstraintsColumnsEqual,
+ BaseViewConstraintsColumnsEqual,
+ BaseIncrementalConstraintsColumnsEqual,
+ BaseConstraintsRuntimeDdlEnforcement,
+ BaseConstraintsRollback,
+ BaseIncrementalConstraintsRuntimeDdlEnforcement,
+ BaseIncrementalConstraintsRollback,
+ BaseModelConstraintsRuntimeEnforcement,
+ BaseConstraintQuotedColumn,
+)
+
+_expected_sql_redshift = """
+create table (
+ id integer not null primary key references (id) unique,
+ color text,
+ date_day text
+) ;
+insert into
+(
+ select
+ id,
+ color,
+ date_day from
+ (
+ -- depends_on:
+ select
+ 'blue' as color,
+ 1 as id,
+ '2019-01-01' as date_day
+ ) as model_subq
+)
+;
+"""
+
+
+class RedshiftColumnEqualSetup:
+ @pytest.fixture
+ def data_types(self, schema_int_type, int_type, string_type):
+ # NOTE: Unlike some other adapters, we don't test array or JSON types here, because
+ # Redshift does not support them as materialized table column types.
+
+ # sql_column_value, schema_data_type, error_data_type
+ return [
+ ["1", schema_int_type, int_type],
+ ["'1'", string_type, string_type],
+ ["cast('2019-01-01' as date)", "date", "DATE"],
+ ["true", "bool", "BOOL"],
+ ["'2013-11-03 00:00:00-07'::timestamptz", "timestamptz", "TIMESTAMPTZ"],
+ ["'2013-11-03 00:00:00-07'::timestamp", "timestamp", "TIMESTAMP"],
+ ["'1'::numeric", "numeric", "NUMERIC"],
+ ]
+
+
+class TestRedshiftTableConstraintsColumnsEqual(
+ RedshiftColumnEqualSetup, BaseTableConstraintsColumnsEqual
+):
+ pass
+
+
+class TestRedshiftViewConstraintsColumnsEqual(
+ RedshiftColumnEqualSetup, BaseViewConstraintsColumnsEqual
+):
+ pass
+
+
+class TestRedshiftIncrementalConstraintsColumnsEqual(
+ RedshiftColumnEqualSetup, BaseIncrementalConstraintsColumnsEqual
+):
+ pass
+
+
+class TestRedshiftTableConstraintsRuntimeDdlEnforcement(BaseConstraintsRuntimeDdlEnforcement):
+ @pytest.fixture(scope="class")
+ def expected_sql(self):
+ return _expected_sql_redshift
+
+
+class TestRedshiftTableConstraintsRollback(BaseConstraintsRollback):
+ @pytest.fixture(scope="class")
+ def expected_error_messages(self):
+ return ["Cannot insert a NULL value into column id"]
+
+
+class TestRedshiftIncrementalConstraintsRuntimeDdlEnforcement(
+ BaseIncrementalConstraintsRuntimeDdlEnforcement
+):
+ @pytest.fixture(scope="class")
+ def expected_sql(self):
+ return _expected_sql_redshift
+
+
+class TestRedshiftIncrementalConstraintsRollback(BaseIncrementalConstraintsRollback):
+ @pytest.fixture(scope="class")
+ def expected_error_messages(self):
+ return ["Cannot insert a NULL value into column id"]
+
+
+class TestRedshiftModelConstraintsRuntimeEnforcement(BaseModelConstraintsRuntimeEnforcement):
+ @pytest.fixture(scope="class")
+ def expected_sql(self):
+ return """
+create table (
+ id integer not null,
+ color text,
+ date_day text,
+ primary key (id),
+ constraint strange_uniqueness_requirement unique (color, date_day),
+ foreign key (id) references (id)
+) ;
+insert into
+(
+ select
+ id,
+ color,
+ date_day from
+ (
+ -- depends_on:
+ select
+ 'blue' as color,
+ 1 as id,
+ '2019-01-01' as date_day
+ ) as model_subq
+)
+;
+"""
+
+
+class TestRedshiftConstraintQuotedColumn(BaseConstraintQuotedColumn):
+ @pytest.fixture(scope="class")
+ def expected_sql(self):
+ return """
+create table (
+ id integer not null,
+ "from" text not null,
+ date_day text
+) ;
+insert into
+(
+ select id, "from", date_day
+ from (
+ select
+ 'blue' as "from",
+ 1 as id,
+ '2019-01-01' as date_day
+ ) as model_subq
+);
+"""
diff --git a/dbt-redshift/tests/functional/adapter/test_grants.py b/dbt-redshift/tests/functional/adapter/test_grants.py
new file mode 100644
index 000000000..b627e450a
--- /dev/null
+++ b/dbt-redshift/tests/functional/adapter/test_grants.py
@@ -0,0 +1,24 @@
+from dbt.tests.adapter.grants.test_model_grants import BaseModelGrants
+from dbt.tests.adapter.grants.test_incremental_grants import BaseIncrementalGrants
+from dbt.tests.adapter.grants.test_seed_grants import BaseSeedGrants
+from dbt.tests.adapter.grants.test_snapshot_grants import BaseSnapshotGrants
+
+
+class TestModelGrantsRedshift(BaseModelGrants):
+ pass
+
+
+class TestIncrementalGrantsRedshift(BaseIncrementalGrants):
+ pass
+
+
+class TestSeedGrantsRedshift(BaseSeedGrants):
+ pass
+
+
+class TestSnapshotGrantsRedshift(BaseSnapshotGrants):
+ pass
+
+
+class TestInvalidGrantsRedshift(BaseModelGrants):
+ pass
diff --git a/dbt-redshift/tests/functional/adapter/test_late_binding_view.py b/dbt-redshift/tests/functional/adapter/test_late_binding_view.py
new file mode 100644
index 000000000..013bf06be
--- /dev/null
+++ b/dbt-redshift/tests/functional/adapter/test_late_binding_view.py
@@ -0,0 +1,49 @@
+import pytest
+
+from dbt.tests.util import run_dbt, run_sql_with_adapter
+
+_MODEL_SQL = """{{
+ config(
+ materialized='view',
+ bind=False
+ )
+}}
+select * from {{ ref('seed') }}
+"""
+
+_SEED_CSV = """
+id,first_name,email,ip_address,updated_at
+1,Larry,lking0@miitbeian.gov.cn,69.135.206.194,2008-09-12 19:08:31
+""".lstrip()
+
+
+class TestLateBindingView:
+ @pytest.fixture(scope="class")
+ def models(self):
+ return {
+ "model.sql": _MODEL_SQL,
+ }
+
+ @pytest.fixture(scope="class")
+ def seeds(self):
+ return {"seed.csv": _SEED_CSV}
+
+ @pytest.fixture(scope="class")
+ def project_config_update(self):
+ return {
+ "seeds": {
+ "quote_columns": False,
+ }
+ }
+
+ def test_late_binding_view_query(self, project):
+ seed_run_result = run_dbt(["seed"])
+ assert len(seed_run_result) == 1
+ run_result = run_dbt()
+ assert len(run_result) == 1
+ # drop the table. Use 'cascade' here so that if late-binding views
+ # didn't work as advertised, the following dbt run will fail.
+ drop_query = """drop table if exists {}.seed cascade""".format(project.test_schema)
+ run_sql_with_adapter(project.adapter, drop_query)
+ run_result = run_dbt()
+ assert len(run_result) == 1
diff --git a/dbt-redshift/tests/functional/adapter/test_macros.py b/dbt-redshift/tests/functional/adapter/test_macros.py
new file mode 100644
index 000000000..0596ab549
--- /dev/null
+++ b/dbt-redshift/tests/functional/adapter/test_macros.py
@@ -0,0 +1,50 @@
+import pytest
+from dbt.tests.util import run_dbt
+
+_MODEL_SQL = """
+{{ dispatch_to_parent() }}
+select 1 as id
+"""
+
+_MACRO_SQL = """
+{% macro do_something2(foo2, bar2) %}
+
+ select
+ '{{ foo2 }}' as foo2,
+ '{{ bar2 }}' as bar2
+
+{% endmacro %}
+
+{% macro with_ref() %}
+
+ {{ ref('table_model') }}
+
+{% endmacro %}
+
+{% macro dispatch_to_parent() %}
+ {% set macro = adapter.dispatch('dispatch_to_parent') %}
+ {{ macro() }}
+{% endmacro %}
+
+{% macro default__dispatch_to_parent() %}
+ {% set msg = 'No default implementation of dispatch_to_parent' %}
+ {{ exceptions.raise_compiler_error(msg) }}
+{% endmacro %}
+
+{% macro postgres__dispatch_to_parent() %}
+ {{ return('') }}
+{% endmacro %}
+"""
+
+
+class TestRedshift:
+ @pytest.fixture(scope="class")
+ def macros(self):
+ return {"macro.sql": _MACRO_SQL}
+
+ @pytest.fixture(scope="class")
+ def models(self):
+ return {"model.sql": _MODEL_SQL}
+
+ def test_inherited_macro(self, project):
+ run_dbt()
diff --git a/dbt-redshift/tests/functional/adapter/test_persist_docs.py b/dbt-redshift/tests/functional/adapter/test_persist_docs.py
new file mode 100644
index 000000000..cb5e632b6
--- /dev/null
+++ b/dbt-redshift/tests/functional/adapter/test_persist_docs.py
@@ -0,0 +1,108 @@
+import json
+import pytest
+
+from dbt.tests.adapter.materialized_view import files
+from dbt.tests.util import run_dbt
+
+from dbt.tests.adapter.persist_docs.test_persist_docs import (
+ BasePersistDocsBase,
+ BasePersistDocs,
+ BasePersistDocsColumnMissing,
+ BasePersistDocsCommentOnQuotedColumn,
+)
+
+_MATERIALIZED_VIEW_PROPERTIES__SCHEMA_YML = """
+version: 2
+models:
+ - name: my_materialized_view
+ description: |
+ Materialized view model description "with double quotes"
+ and with 'single quotes' as welll as other;
+ '''abc123'''
+ reserved -- characters
+ 80% of statistics are made up on the spot
+ --
+ /* comment */
+ Some $lbl$ labeled $lbl$ and $$ unlabeled $$ dollar-quoting
+"""
+
+
+class TestPersistDocs(BasePersistDocs):
+ @pytest.mark.flaky
+ def test_has_comments_pglike(self, project):
+ super().test_has_comments_pglike(project)
+
+
+class TestPersistDocsColumnMissing(BasePersistDocsColumnMissing):
+ @pytest.mark.flaky
+ def test_missing_column(self, project):
+ super().test_missing_column(project)
+
+
+class TestPersistDocsCommentOnQuotedColumn(BasePersistDocsCommentOnQuotedColumn):
+ @pytest.mark.flaky
+ def test_quoted_column_comments(self, run_has_comments):
+ super().test_quoted_column_comments(run_has_comments)
+
+
+class TestPersistDocsLateBinding(BasePersistDocsBase):
+ @pytest.fixture(scope="class")
+ def project_config_update(self):
+ return {
+ "models": {
+ "test": {
+ "+persist_docs": {
+ "relation": True,
+ "columns": True,
+ },
+ "view_model": {
+ "bind": False,
+ },
+ }
+ }
+ }
+
+ @pytest.mark.flaky
+ def test_comment_on_late_binding_view(self, project):
+ run_dbt()
+ run_dbt(["docs", "generate"])
+ with open("target/catalog.json") as fp:
+ catalog_data = json.load(fp)
+ assert "nodes" in catalog_data
+ assert len(catalog_data["nodes"]) == 4
+ table_node = catalog_data["nodes"]["model.test.table_model"]
+ view_node = self._assert_has_table_comments(table_node)
+
+ view_node = catalog_data["nodes"]["model.test.view_model"]
+ self._assert_has_view_comments(view_node, False, False)
+
+ no_docs_node = catalog_data["nodes"]["model.test.no_docs_model"]
+ self._assert_has_view_comments(no_docs_node, False, False)
+
+
+class TestPersistDocsWithMaterializedView(BasePersistDocs):
+ @pytest.fixture(scope="class", autouse=True)
+ def seeds(self):
+ return {"my_seed.csv": files.MY_SEED}
+
+ @pytest.fixture(scope="class")
+ def models(self):
+ return {
+ "my_materialized_view.sql": files.MY_MATERIALIZED_VIEW,
+ }
+
+ @pytest.fixture(scope="class")
+ def properties(self):
+ return {
+ "schema.yml": _MATERIALIZED_VIEW_PROPERTIES__SCHEMA_YML,
+ }
+
+ @pytest.mark.flaky
+ def test_has_comments_pglike(self, project):
+ run_dbt(["docs", "generate"])
+ with open("target/catalog.json") as fp:
+ catalog_data = json.load(fp)
+ assert "nodes" in catalog_data
+ assert len(catalog_data["nodes"]) == 2
+ view_node = catalog_data["nodes"]["model.test.my_materialized_view"]
+ assert view_node["metadata"]["comment"].startswith("Materialized view model description")
diff --git a/dbt-redshift/tests/functional/adapter/test_query_comment.py b/dbt-redshift/tests/functional/adapter/test_query_comment.py
new file mode 100644
index 000000000..75c87ee38
--- /dev/null
+++ b/dbt-redshift/tests/functional/adapter/test_query_comment.py
@@ -0,0 +1,38 @@
+from dbt.tests.adapter.query_comment.test_query_comment import (
+ BaseQueryComments,
+ BaseMacroQueryComments,
+ BaseMacroArgsQueryComments,
+ BaseMacroInvalidQueryComments,
+ BaseNullQueryComments,
+ BaseEmptyQueryComments,
+)
+import pytest
+
+
+class TestQueryCommentsRedshift(BaseQueryComments):
+ pass
+
+
+class TestMacroQueryCommentsRedshift(BaseMacroQueryComments):
+ pass
+
+
+class TestMacroArgsQueryCommentsRedshift(BaseMacroArgsQueryComments):
+ @pytest.mark.skip(
+ "This test is incorrectly comparing the version of `dbt-core`"
+ "to the version of `dbt-postgres`, which is not always the same."
+ )
+ def test_matches_comment(self, project, get_package_version):
+ pass
+
+
+class TestMacroInvalidQueryCommentsRedshift(BaseMacroInvalidQueryComments):
+ pass
+
+
+class TestNullQueryCommentsRedshift(BaseNullQueryComments):
+ pass
+
+
+class TestEmptyQueryCommentsRedshift(BaseEmptyQueryComments):
+ pass
diff --git a/dbt-redshift/tests/functional/adapter/test_relation_name.py b/dbt-redshift/tests/functional/adapter/test_relation_name.py
new file mode 100644
index 000000000..f17bbda63
--- /dev/null
+++ b/dbt-redshift/tests/functional/adapter/test_relation_name.py
@@ -0,0 +1,128 @@
+import pytest
+
+from dbt.tests.util import run_dbt
+
+models__inc_relationname_51_chars_long = """
+{{
+ config({
+ "unique_key": "col_A",
+ "materialized": "incremental"
+ })
+}}
+
+select * from {{ this.schema }}.seed
+"""
+
+models__relationname_52_chars_long = """
+{{
+ config({
+ "materialized": "table"
+ })
+}}
+
+select * from {{ this.schema }}.seed
+"""
+
+models__relationname_63_chars_long = """
+{{
+ config({
+ "materialized": "table"
+ })
+}}
+
+select * from {{ this.schema }}.seed
+"""
+
+models__relationname_64_chars_long = """
+{{
+ config({
+ "materialized": "table"
+ })
+}}
+
+select * from {{ this.schema }}.seed
+"""
+
+models__relationname_127_chars_long = """
+{{
+ config({
+ "materialized": "table"
+ })
+}}
+
+select * from {{ this.schema }}.seed
+"""
+
+
+seeds__seed = """col_A,col_B
+1,2
+3,4
+5,6
+"""
+
+
+class TestAdapterDDLBase(object):
+ @pytest.fixture(scope="class", autouse=True)
+ def setUp(self, project):
+ run_dbt(["seed"])
+
+ @pytest.fixture(scope="class")
+ def seeds(self):
+ return {"seed.csv": seeds__seed}
+
+ @pytest.fixture(scope="class")
+ def project_config_update(self):
+ return {
+ "seeds": {
+ "quote_columns": False,
+ },
+ }
+
+
+class TestAdapterDDL(TestAdapterDDLBase):
+ @pytest.fixture(scope="class")
+ def models(self):
+ relname_51_chars_long = "incremental_table_whose_name_is_51_characters_abcde.sql"
+ relname_52_chars_long = "relation_whose_name_is_52_chars_long_abcdefghijklmno.sql"
+ relname_63_chars_long = (
+ "relation_whose_name_is_63_chars_long_abcdefghijklmnopqrstuvwxyz.sql"
+ )
+ relname_63_chars_long_b = (
+ "relation_whose_name_is_63_chars_long_abcdefghijklmnopqrstuvwxya.sql"
+ )
+ relname_64_chars_long = (
+ "relation_whose_name_is_64_chars_long_abcdefghijklmnopqrstuvwxyz0.sql"
+ )
+ relname_127_chars_long = (
+ "relation_whose_name_is_127_characters89012345678901234567890123456"
+ "7890123456789012345678901234567890123456789012345678901234567.sql"
+ )
+
+ return {
+ relname_51_chars_long: models__inc_relationname_51_chars_long,
+ relname_52_chars_long: models__relationname_52_chars_long,
+ relname_63_chars_long: models__relationname_63_chars_long,
+ relname_63_chars_long_b: models__relationname_63_chars_long,
+ relname_64_chars_long: models__relationname_64_chars_long,
+ relname_127_chars_long: models__relationname_127_chars_long,
+ }
+
+ def test_long_name_succeeds(self, project):
+ run_dbt(["run", "--threads", "2"], expect_pass=True)
+ # warn: second run will trigger the collision at Redshift relation
+ # name length max
+ run_dbt(["run", "--threads", "2"], expect_pass=True)
+
+
+class TestAdapterDDLExceptions(TestAdapterDDLBase):
+ @pytest.fixture(scope="class")
+ def models(self):
+ relname_128_chars_long = (
+ "relation_whose_name_is_127_characters89012345678901234567890123456"
+ "78901234567890123456789012345678901234567890123456789012345678.sql"
+ )
+ return {relname_128_chars_long: models__relationname_127_chars_long}
+
+ def test_too_long_of_name_fails(self, project):
+ results = run_dbt(["run"], expect_pass=False)
+ assert "is longer than 127 characters" in results[0].message
diff --git a/dbt-redshift/tests/functional/adapter/test_simple_seed.py b/dbt-redshift/tests/functional/adapter/test_simple_seed.py
new file mode 100644
index 000000000..3e5ea5b23
--- /dev/null
+++ b/dbt-redshift/tests/functional/adapter/test_simple_seed.py
@@ -0,0 +1,122 @@
+import pytest
+from dbt.tests.adapter.simple_seed.test_seed_type_override import BaseSimpleSeedColumnOverride
+from dbt.tests.adapter.utils.base_utils import run_dbt
+
+_SCHEMA_YML = """
+version: 2
+seeds:
+- name: seed_enabled
+ columns:
+ - name: birthday
+ tests:
+ - column_type:
+ type: date
+ - name: seed_id
+ tests:
+ - column_type:
+ type: character varying(256)
+
+- name: seed_tricky
+ columns:
+ - name: seed_id
+ tests:
+ - column_type:
+ type: integer
+ - name: seed_id_str
+ tests:
+ - column_type:
+ type: character varying(256)
+ - name: a_bool
+ tests:
+ - column_type:
+ type: boolean
+ - name: looks_like_a_bool
+ tests:
+ - column_type:
+ type: character varying(256)
+ - name: a_date
+ tests:
+ - column_type:
+ type: timestamp without time zone
+ - name: looks_like_a_date
+ tests:
+ - column_type:
+ type: character varying(256)
+ - name: relative
+ tests:
+ - column_type:
+ type: character varying(9)
+ - name: weekday
+ tests:
+ - column_type:
+ type: character varying(8)
+""".lstrip()
+
+
+properties__schema_yml = """
+version: 2
+
+seeds:
+ - name: seed_dist_all
+ config:
+ dist: all
+"""
+
+
+seeds__dist_all_csv = """
+seed_id,weekday
+1,Saturday
+2,Sunday
+3,Monday
+""".lstrip()
+
+
+class TestSimpleSeedColumnOverride(BaseSimpleSeedColumnOverride):
+ @pytest.fixture(scope="class")
+ def schema(self):
+ return "simple_seed"
+
+ @pytest.fixture(scope="class")
+ def models(self):
+ return {"models-rs.yml": _SCHEMA_YML}
+
+ @staticmethod
+ def seed_enabled_types():
+ return {
+ "seed_id": "text",
+ "birthday": "date",
+ }
+
+ @staticmethod
+ def seed_tricky_types():
+ return {
+ "seed_id_str": "text",
+ "looks_like_a_bool": "text",
+ "looks_like_a_date": "text",
+ }
+
+ def test_redshift_simple_seed_with_column_override_redshift(self, project):
+ seed_results = run_dbt(["seed"])
+ assert len(seed_results) == 2
+ test_results = run_dbt(["test"])
+ assert len(test_results) == 10
+
+
+class BaseSimpleSeedDiststyleAll:
+ @pytest.fixture(scope="class")
+ def models(self):
+ return {
+ "schema.yml": properties__schema_yml,
+ }
+
+ @pytest.fixture(scope="class")
+ def seeds(self):
+ return {"seed_dist_all.csv": seeds__dist_all_csv}
+
+ def test_simple_seed_with_diststyle_all(self, project):
+ seed_results = run_dbt(["seed", "--show"])
+ assert len(seed_results) == 1
+
+
+class TestSimpleSeedDiststyleAll(BaseSimpleSeedDiststyleAll):
+ pass
diff --git a/dbt-redshift/tests/functional/adapter/test_simple_snapshot.py b/dbt-redshift/tests/functional/adapter/test_simple_snapshot.py
new file mode 100644
index 000000000..4db5b2330
--- /dev/null
+++ b/dbt-redshift/tests/functional/adapter/test_simple_snapshot.py
@@ -0,0 +1,9 @@
+from dbt.tests.adapter.simple_snapshot.test_snapshot import BaseSnapshotCheck, BaseSimpleSnapshot
+
+
+class TestSnapshot(BaseSimpleSnapshot):
+ pass
+
+
+class TestSnapshotCheck(BaseSnapshotCheck):
+ pass
diff --git a/dbt-redshift/tests/functional/adapter/test_store_test_failures.py b/dbt-redshift/tests/functional/adapter/test_store_test_failures.py
new file mode 100644
index 000000000..7f591654e
--- /dev/null
+++ b/dbt-redshift/tests/functional/adapter/test_store_test_failures.py
@@ -0,0 +1,32 @@
+from dbt.tests.adapter.store_test_failures_tests import basic
+from dbt.tests.adapter.store_test_failures_tests.test_store_test_failures import (
+ TestStoreTestFailures,
+)
+
+
+class TestRedshiftTestStoreTestFailures(TestStoreTestFailures):
+ pass
+
+
+class TestStoreTestFailuresAsInteractions(basic.StoreTestFailuresAsInteractions):
+ pass
+
+
+class TestStoreTestFailuresAsProjectLevelOff(basic.StoreTestFailuresAsProjectLevelOff):
+ pass
+
+
+class TestStoreTestFailuresAsProjectLevelView(basic.StoreTestFailuresAsProjectLevelView):
+ pass
+
+
+class TestStoreTestFailuresAsGeneric(basic.StoreTestFailuresAsGeneric):
+ pass
+
+
+class TestStoreTestFailuresAsProjectLevelEphemeral(basic.StoreTestFailuresAsProjectLevelEphemeral):
+ pass
+
+
+class TestStoreTestFailuresAsExceptions(basic.StoreTestFailuresAsExceptions):
+ pass
diff --git a/dbt-redshift/tests/functional/adapter/unit_testing/fixtures.py b/dbt-redshift/tests/functional/adapter/unit_testing/fixtures.py
new file mode 100644
index 000000000..36212dff3
--- /dev/null
+++ b/dbt-redshift/tests/functional/adapter/unit_testing/fixtures.py
@@ -0,0 +1,73 @@
+model_none_value_base = """
+{{ config(materialized="table") }}
+
+select 1 as id, 'a' as col1
+"""
+
+model_none_value_model = """
+{{config(materialized="table")}}
+
+select * from {{ ref('none_value_base') }}
+"""
+
+
+test_none_column_value_doesnt_throw_error_csv = """
+unit_tests:
+ - name: test_simple
+
+ model: none_value_model
+ given:
+ - input: ref('none_value_base')
+ format: csv
+ rows: |
+ id,col1
+ ,d
+ ,e
+ 6,f
+
+ expect:
+ format: csv
+ rows: |
+ id,col1
+ ,d
+ ,e
+ 6,f
+"""
+
+test_none_column_value_doesnt_throw_error_dct = """
+unit_tests:
+ - name: test_simple
+
+ model: none_value_model
+ given:
+ - input: ref('none_value_base')
+ rows:
+ - { "id": , "col1": "d"}
+ - { "id": , "col1": "e"}
+ - { "id": 6, "col1": "f"}
+
+ expect:
+ rows:
+ - {id: , "col1": "d"}
+ - {id: , "col1": "e"}
+ - {id: 6, "col1": "f"}
+"""
+
+test_none_column_value_will_throw_error = """
+unit_tests:
+ - name: test_simple
+
+ model: none_value_model
+ given:
+ - input: ref('none_value_base')
+ rows:
+ - { "id": , "col1": "d"}
+ - { "id": , "col1": "e"}
+ - { "id": 6, "col1": }
+
+ expect:
+ rows:
+ - {id: , "col1": "d"}
+ - {id: , "col1": "e"}
+ - {id: 6, "col1": }
+"""
diff --git a/dbt-redshift/tests/functional/adapter/unit_testing/test_unit_testing.py b/dbt-redshift/tests/functional/adapter/unit_testing/test_unit_testing.py
new file mode 100644
index 000000000..27ed54cb6
--- /dev/null
+++ b/dbt-redshift/tests/functional/adapter/unit_testing/test_unit_testing.py
@@ -0,0 +1,106 @@
+import pytest
+
+from dbt.artifacts.schemas.results import RunStatus
+from dbt.tests.fixtures.project import write_project_files
+from dbt.tests.util import run_dbt
+
+from dbt.tests.adapter.unit_testing.test_types import BaseUnitTestingTypes
+from dbt.tests.adapter.unit_testing.test_case_insensitivity import BaseUnitTestCaseInsensivity
+from dbt.tests.adapter.unit_testing.test_invalid_input import BaseUnitTestInvalidInput
+from tests.functional.adapter.unit_testing.fixtures import (
+ model_none_value_base,
+ model_none_value_model,
+ test_none_column_value_doesnt_throw_error_csv,
+ test_none_column_value_doesnt_throw_error_dct,
+ test_none_column_value_will_throw_error,
+)
+
+from dbt_common.exceptions import CompilationError
+
+
+class TestRedshiftUnitTestingTypes(BaseUnitTestingTypes):
+ @pytest.fixture
+ def data_types(self):
+ # sql_value, yaml_value
+ return [
+ ["1", "1"],
+ ["1.0", "1.0"],
+ ["'1'", "1"],
+ ["'1'::numeric", "1"],
+ ["'string'", "string"],
+ ["true", "true"],
+ ["DATE '2020-01-02'", "2020-01-02"],
+ ["TIMESTAMP '2013-11-03 00:00:00-0'", "2013-11-03 00:00:00-0"],
+ ["TIMESTAMPTZ '2013-11-03 00:00:00-0'", "2013-11-03 00:00:00-0"],
+ [
+ """JSON_PARSE('{"bar": "baz", "balance": 7.77, "active": false}')""",
+ """'{"bar": "baz", "balance": 7.77, "active": false}'""",
+ ],
+ # TODO: array types
+ # ["ARRAY[1,2,3]", """'{1, 2, 3}'"""],
+ # ["ARRAY[1.0,2.0,3.0]", """'{1.0, 2.0, 3.0}'"""],
+ # ["ARRAY[1::numeric,2::numeric,3::numeric]", """'{1.0, 2.0, 3.0}'"""],
+ # ["ARRAY['a','b','c']", """'{"a", "b", "c"}'"""],
+ # ["ARRAY[true,true,false]", """'{true, true, false}'"""],
+ # ["ARRAY[DATE '2020-01-02']", """'{"2020-01-02"}'"""],
+ # ["ARRAY[TIMESTAMP '2013-11-03 00:00:00-0']", """'{"2013-11-03 00:00:00-0"}'"""],
+ # ["ARRAY[TIMESTAMPTZ '2013-11-03 00:00:00-0']", """'{"2013-11-03 00:00:00-0"}'"""],
+ ]
+
+
+class RedshiftUnitTestingNone:
+ def test_nones_handled_dict(self, project):
+ run_dbt(["build"])
+
+
+class TestRedshiftUnitTestCsvNone(RedshiftUnitTestingNone):
+ @pytest.fixture(scope="class")
+ def models(self):
+ return {
+ "none_value_base.sql": model_none_value_base,
+ "none_value_model.sql": model_none_value_model,
+ "__properties.yml": test_none_column_value_doesnt_throw_error_csv,
+ }
+
+
+class TestRedshiftUnitTestDictNone(RedshiftUnitTestingNone):
+ @pytest.fixture(scope="class")
+ def models(self):
+ return {
+ "none_value_base.sql": model_none_value_base,
+ "none_value_model.sql": model_none_value_model,
+ "__properties.yml": test_none_column_value_doesnt_throw_error_dct,
+ }
+
+
+class TestRedshiftUnitTestingTooManyNonesFails:
+ @pytest.fixture(scope="class")
+ def models(self):
+ return {
+ "__properties.yml": test_none_column_value_will_throw_error,
+ "none_value_base.sql": model_none_value_base,
+ "none_value_model.sql": model_none_value_model,
+ }
+
+ def test_invalid_input(self, project):
+ """This is a user-facing exception, so we can't pytest.raise(CompilationError)"""
+
+ def _find_first_error(items):
+ return next((item for item in items if item.status == RunStatus.Error), None)
+
+ run_result = run_dbt(["build"], expect_pass=False)
+ first_item = _find_first_error(run_result)
+
+ assert first_item is not None
+ assert (
+ "does not have any row free of null values, which may cause type mismatch errors during unit test execution"
+ in str(first_item.message)
+ )
+
+
+class TestRedshiftUnitTestCaseInsensitivity(BaseUnitTestCaseInsensivity):
+ pass
+
+
+class TestRedshiftUnitTestInvalidInput(BaseUnitTestInvalidInput):
+ pass
diff --git a/dbt-redshift/tests/functional/adapter/utils/test_data_types.py b/dbt-redshift/tests/functional/adapter/utils/test_data_types.py
new file mode 100644
index 000000000..3201afcfb
--- /dev/null
+++ b/dbt-redshift/tests/functional/adapter/utils/test_data_types.py
@@ -0,0 +1,35 @@
+from dbt.tests.adapter.utils.data_types.test_type_bigint import BaseTypeBigInt
+from dbt.tests.adapter.utils.data_types.test_type_float import BaseTypeFloat
+from dbt.tests.adapter.utils.data_types.test_type_int import BaseTypeInt
+from dbt.tests.adapter.utils.data_types.test_type_numeric import BaseTypeNumeric
+from dbt.tests.adapter.utils.data_types.test_type_string import BaseTypeString
+from dbt.tests.adapter.utils.data_types.test_type_timestamp import BaseTypeTimestamp
+from dbt.tests.adapter.utils.data_types.test_type_boolean import BaseTypeBoolean
+
+
+class TestTypeBigInt(BaseTypeBigInt):
+ pass
+
+
+class TestTypeFloat(BaseTypeFloat):
+ pass
+
+
+class TestTypeInt(BaseTypeInt):
+ pass
+
+
+class TestTypeNumeric(BaseTypeNumeric):
+ pass
+
+
+class TestTypeString(BaseTypeString):
+ pass
+
+
+class TestTypeTimestamp(BaseTypeTimestamp):
+ pass
+
+
+class TestTypeBoolean(BaseTypeBoolean):
+ pass
diff --git a/dbt-redshift/tests/functional/adapter/utils/test_timestamps.py b/dbt-redshift/tests/functional/adapter/utils/test_timestamps.py
new file mode 100644
index 000000000..6c525be44
--- /dev/null
+++ b/dbt-redshift/tests/functional/adapter/utils/test_timestamps.py
@@ -0,0 +1,20 @@
+import pytest
+from dbt.tests.adapter.utils.test_timestamps import BaseCurrentTimestamps
+
+
+class TestCurrentTimestampRedshift(BaseCurrentTimestamps):
+ @pytest.fixture(scope="class")
+ def expected_schema(self):
+ return {
+ "current_timestamp": "timestamp without time zone",
+ "current_timestamp_in_utc_backcompat": "timestamp without time zone",
+ "current_timestamp_backcompat": "timestamp without time zone",
+ }
+
+ @pytest.fixture(scope="class")
+ def expected_sql(self):
+ return """
+ select getdate() as current_timestamp,
+ getdate() as current_timestamp_in_utc_backcompat,
+ getdate() as current_timestamp_backcompat
+ """
diff --git a/dbt-redshift/tests/functional/adapter/utils/test_utils.py b/dbt-redshift/tests/functional/adapter/utils/test_utils.py
new file mode 100644
index 000000000..3a085712a
--- /dev/null
+++ b/dbt-redshift/tests/functional/adapter/utils/test_utils.py
@@ -0,0 +1,151 @@
+from dbt.tests.adapter.utils.test_array_append import BaseArrayAppend
+from dbt.tests.adapter.utils.test_array_concat import BaseArrayConcat
+from dbt.tests.adapter.utils.test_array_construct import BaseArrayConstruct
+from dbt.tests.adapter.utils.test_any_value import BaseAnyValue
+from dbt.tests.adapter.utils.test_bool_or import BaseBoolOr
+from dbt.tests.adapter.utils.test_cast import BaseCast
+from dbt.tests.adapter.utils.test_cast_bool_to_text import BaseCastBoolToText
+from dbt.tests.adapter.utils.test_concat import BaseConcat
+from dbt.tests.adapter.utils.test_current_timestamp import BaseCurrentTimestampNaive
+from dbt.tests.adapter.utils.test_date import BaseDate
+from dbt.tests.adapter.utils.test_dateadd import BaseDateAdd
+from dbt.tests.adapter.utils.test_datediff import BaseDateDiff
+from dbt.tests.adapter.utils.test_date_spine import BaseDateSpine
+from dbt.tests.adapter.utils.test_date_trunc import BaseDateTrunc
+from dbt.tests.adapter.utils.test_escape_single_quotes import BaseEscapeSingleQuotesQuote
+from dbt.tests.adapter.utils.test_except import BaseExcept
+from dbt.tests.adapter.utils.test_generate_series import BaseGenerateSeries
+from dbt.tests.adapter.utils.test_get_intervals_between import BaseGetIntervalsBetween
+from dbt.tests.adapter.utils.test_get_powers_of_two import BaseGetPowersOfTwo
+from dbt.tests.adapter.utils.test_hash import BaseHash
+from dbt.tests.adapter.utils.test_intersect import BaseIntersect
+from dbt.tests.adapter.utils.test_last_day import BaseLastDay
+from dbt.tests.adapter.utils.test_length import BaseLength
+from dbt.tests.adapter.utils.test_listagg import BaseListagg
+from dbt.tests.adapter.utils.test_position import BasePosition
+from dbt.tests.adapter.utils.test_replace import BaseReplace
+from dbt.tests.adapter.utils.test_right import BaseRight
+from dbt.tests.adapter.utils.test_safe_cast import BaseSafeCast
+from dbt.tests.adapter.utils.test_split_part import BaseSplitPart
+from dbt.tests.adapter.utils.test_string_literal import BaseStringLiteral
+
+
+class TestAnyValue(BaseAnyValue):
+ pass
+
+
+class TestArrayAppend(BaseArrayAppend):
+ pass
+
+
+class TestArrayConcat(BaseArrayConcat):
+ pass
+
+
+class TestArrayConstruct(BaseArrayConstruct):
+ pass
+
+
+class TestBoolOr(BaseBoolOr):
+ pass
+
+
+class TestCast(BaseCast):
+ pass
+
+
+class TestCastBoolToText(BaseCastBoolToText):
+ pass
+
+
+class TestConcat(BaseConcat):
+ pass
+
+
+# Use either BaseCurrentTimestampAware or BaseCurrentTimestampNaive but not both
+class TestCurrentTimestamp(BaseCurrentTimestampNaive):
+ pass
+
+
+class TestDate(BaseDate):
+ pass
+
+
+class TestDateAdd(BaseDateAdd):
+ pass
+
+
+class TestDateDiff(BaseDateDiff):
+ pass
+
+
+class TestDateSpine(BaseDateSpine):
+ pass
+
+
+class TestDateTrunc(BaseDateTrunc):
+ pass
+
+
+class TestEscapeSingleQuotes(BaseEscapeSingleQuotesQuote):
+ pass
+
+
+class TestExcept(BaseExcept):
+ pass
+
+
+class TestGenerateSeries(BaseGenerateSeries):
+ pass
+
+
+class TestGetIntervalsBeteween(BaseGetIntervalsBetween):
+ pass
+
+
+class TestGetPowersOfTwo(BaseGetPowersOfTwo):
+ pass
+
+
+class TestHash(BaseHash):
+ pass
+
+
+class TestIntersect(BaseIntersect):
+ pass
+
+
+class TestLastDay(BaseLastDay):
+ pass
+
+
+class TestLength(BaseLength):
+ pass
+
+
+class TestListagg(BaseListagg):
+ pass
+
+
+class TestPosition(BasePosition):
+ pass
+
+
+class TestReplace(BaseReplace):
+ pass
+
+
+class TestRight(BaseRight):
+ pass
+
+
+class TestSafeCast(BaseSafeCast):
+ pass
+
+
+class TestSplitPart(BaseSplitPart):
+ pass
+
+
+class TestStringLiteral(BaseStringLiteral):
+ pass
diff --git a/dbt-redshift/tests/functional/conftest.py b/dbt-redshift/tests/functional/conftest.py
new file mode 100644
index 000000000..73329936d
--- /dev/null
+++ b/dbt-redshift/tests/functional/conftest.py
@@ -0,0 +1,19 @@
+import os
+
+import pytest
+
+
+# The profile dictionary, used to write out profiles.yml
+@pytest.fixture(scope="class")
+def dbt_profile_target():
+ return {
+ "type": "redshift",
+ "host": os.getenv("REDSHIFT_TEST_HOST"),
+ "port": int(os.getenv("REDSHIFT_TEST_PORT")),
+ "dbname": os.getenv("REDSHIFT_TEST_DBNAME"),
+ "user": os.getenv("REDSHIFT_TEST_USER"),
+ "pass": os.getenv("REDSHIFT_TEST_PASS"),
+ "region": os.getenv("REDSHIFT_TEST_REGION"),
+ "threads": 1,
+ "retries": 6,
+ }
diff --git a/dbt-redshift/tests/functional/test_auth_method.py b/dbt-redshift/tests/functional/test_auth_method.py
new file mode 100644
index 000000000..493bc5af6
--- /dev/null
+++ b/dbt-redshift/tests/functional/test_auth_method.py
@@ -0,0 +1,125 @@
+import os
+
+import pytest
+
+from dbt.adapters.redshift.connections import RedshiftConnectionMethod
+from dbt.tests.util import run_dbt
+
+
+MY_SEED = """
+id,name
+1,apple
+2,banana
+3,cherry
+""".strip()
+
+
+MY_VIEW = """
+select * from {{ ref("my_seed") }}
+"""
+
+
+class AuthMethod:
+
+ @pytest.fixture(scope="class")
+ def seeds(self):
+ yield {"my_seed.csv": MY_SEED}
+
+ @pytest.fixture(scope="class")
+ def models(self):
+ yield {"my_view.sql": MY_VIEW}
+
+ def test_connection(self, project):
+ run_dbt(["seed"])
+ results = run_dbt(["run"])
+ assert len(results) == 1
+
+
+class TestDatabaseMethod(AuthMethod):
+ @pytest.fixture(scope="class")
+ def dbt_profile_target(self):
+ return {
+ "type": "redshift",
+ "method": RedshiftConnectionMethod.DATABASE.value,
+ "host": os.getenv("REDSHIFT_TEST_HOST"),
+ "port": int(os.getenv("REDSHIFT_TEST_PORT")),
+ "dbname": os.getenv("REDSHIFT_TEST_DBNAME"),
+ "user": os.getenv("REDSHIFT_TEST_USER"),
+ "pass": os.getenv("REDSHIFT_TEST_PASS"),
+ "threads": 1,
+ "retries": 6,
+ }
+
+
+class TestIAMUserMethodProfile(AuthMethod):
+ @pytest.fixture(scope="class")
+ def dbt_profile_target(self):
+ return {
+ "type": "redshift",
+ "method": RedshiftConnectionMethod.IAM.value,
+ "cluster_id": os.getenv("REDSHIFT_TEST_CLUSTER_ID"),
+ "dbname": os.getenv("REDSHIFT_TEST_DBNAME"),
+ "iam_profile": os.getenv("REDSHIFT_TEST_IAM_USER_PROFILE"),
+ "user": os.getenv("REDSHIFT_TEST_USER"),
+ "threads": 1,
+ "retries": 6,
+ "host": "", # host is a required field in dbt-core
+ "port": 0, # port is a required field in dbt-core
+ }
+
+
+class TestIAMUserMethodExplicit(AuthMethod):
+ @pytest.fixture(scope="class")
+ def dbt_profile_target(self):
+ return {
+ "type": "redshift",
+ "method": RedshiftConnectionMethod.IAM.value,
+ "cluster_id": os.getenv("REDSHIFT_TEST_CLUSTER_ID"),
+ "dbname": os.getenv("REDSHIFT_TEST_DBNAME"),
+ "access_key_id": os.getenv("REDSHIFT_TEST_IAM_USER_ACCESS_KEY_ID"),
+ "secret_access_key": os.getenv("REDSHIFT_TEST_IAM_USER_SECRET_ACCESS_KEY"),
+ "region": os.getenv("REDSHIFT_TEST_REGION"),
+ "user": os.getenv("REDSHIFT_TEST_USER"),
+ "threads": 1,
+ "retries": 6,
+ "host": "", # host is a required field in dbt-core
+ "port": 0, # port is a required field in dbt-core
+ }
+
+
+class TestIAMRoleAuthProfile(AuthMethod):
+ @pytest.fixture(scope="class")
+ def dbt_profile_target(self):
+ return {
+ "type": "redshift",
+ "method": RedshiftConnectionMethod.IAM_ROLE.value,
+ "cluster_id": os.getenv("REDSHIFT_TEST_CLUSTER_ID"),
+ "dbname": os.getenv("REDSHIFT_TEST_DBNAME"),
+ "iam_profile": os.getenv("REDSHIFT_TEST_IAM_ROLE_PROFILE"),
+ "threads": 1,
+ "retries": 6,
+ "host": "", # host is a required field in dbt-core
+ "port": 0, # port is a required field in dbt-core
+ }
+
+
+@pytest.mark.skip(
+ reason="We need to cut over to new adapters team AWS account which has infra to support this as an automated test. This will include a GHA step that renders a refresh token and loading secrets into Github secrets for the <> delimited placeholder values below"
+)
+class TestIamIdcAuthProfileOktaIdp(AuthMethod):
+ @pytest.fixture(scope="class")
+ def dbt_profile_target(self):
+ return {
+ "type": "redshift",
+ "method": "oauth_token_identity_center",
+ "host": os.getenv("REDSHIFT_TEST_HOST"),
+ "port": 5439,
+ "dbname": "dev",
+ "threads": 1,
+ "token_endpoint": {
+ "type": "okta",
+ "request_url": "https://.oktapreview.com/oauth2/default/v1/token",
+ "idp_auth_credentials": "",
+ "request_data": "grant_type=refresh_token&redirect_uri=&refresh_token=",
+ },
+ }
diff --git a/dbt-redshift/tests/functional/test_autocommit.py b/dbt-redshift/tests/functional/test_autocommit.py
new file mode 100644
index 000000000..e5e54a34f
--- /dev/null
+++ b/dbt-redshift/tests/functional/test_autocommit.py
@@ -0,0 +1,171 @@
+import os
+import pytest
+
+from dbt.tests.util import run_dbt, run_dbt_and_capture
+
+_MACROS__CREATE_DB = """
+{% macro create_db_fake() %}
+
+{% set database = "db_for_test__do_delete_if_you_see_this" %}
+
+{# IF NOT EXISTS not avaiable but Redshift merely returns an error for trying to overwrite #}
+{% set create_command %}
+ CREATE DATABASE {{ database }}
+{% endset %}
+
+{{ log(create_command, info=True) }}
+
+{% do run_query(create_command) %}
+
+{{ log("Created redshift database " ~ database, info=True) }}
+
+{% endmacro %}
+"""
+
+_MACROS__UPDATE_MY_MODEL = """
+{% macro update_some_model(alert_ids, sent_at, table_name) %}
+ {% set update_query %}
+ UPDATE {{ ref('my_model') }} set status = 'sent'
+ {% endset %}
+ {% do run_query(update_query) %}
+{% endmacro %}
+"""
+
+_MACROS__UPDATE_MY_SEED = """
+{% macro update_my_seed() %}
+update {{ ref("my_seed") }} set status = 'done'
+{% endmacro %}
+"""
+
+_MODELS__MY_MODEL = """
+{{ config(materialized="table") }}
+
+select 1 as id, 'pending' as status
+"""
+
+_MODELS__AFTER_COMMIT = """
+{{
+ config(
+ post_hook=after_commit("{{ update_my_seed() }}")
+ )
+}}
+
+select 1 as id
+"""
+
+_SEEDS_MY_SEED = """
+id,status
+1,pending
+""".lstrip()
+
+
+class TestTransactionBlocksPreventCertainCommands:
+ @pytest.fixture(scope="class")
+ def macros(self):
+ return {"macro.sql": _MACROS__CREATE_DB}
+
+ def test_autocommit_deactivated_prevents_DDL(self, project):
+ """Scenario: user has autocommit=True in their target to run macros with normally
+ forbidden commands like CREATE DATABASE and VACUUM"""
+ result, out = run_dbt_and_capture(["run-operation", "create_db_fake"], expect_pass=False)
+ assert "CREATE DATABASE cannot run inside a transaction block" not in out
+
+
+class TestAutocommitUnblocksDDLInTransactions:
+ @pytest.fixture(scope="class")
+ def dbt_profile_target(self):
+ return {
+ "type": "redshift",
+ "threads": 1,
+ "retries": 6,
+ "host": os.getenv("REDSHIFT_TEST_HOST"),
+ "port": int(os.getenv("REDSHIFT_TEST_PORT")),
+ "user": os.getenv("REDSHIFT_TEST_USER"),
+ "pass": os.getenv("REDSHIFT_TEST_PASS"),
+ "dbname": os.getenv("REDSHIFT_TEST_DBNAME"),
+ "autocommit": False,
+ }
+
+ @pytest.fixture(scope="class")
+ def macros(self):
+ return {"macro.sql": _MACROS__CREATE_DB}
+
+ def test_default_setting_allows_DDL(self, project):
+ """Monitor if status quo in Redshift connector changes"""
+ result, out = run_dbt_and_capture(["run-operation", "create_db_fake"], expect_pass=False)
+ assert "CREATE DATABASE cannot run inside a transaction block" in out
+
+
+class TestUpdateDDLCommits:
+ @pytest.fixture(scope="class")
+ def macros(self):
+ return {"macro.sql": _MACROS__UPDATE_MY_MODEL}
+
+ @pytest.fixture(scope="class")
+ def models(self):
+ return {"my_model.sql": _MODELS__MY_MODEL}
+
+ def test_update_will_go_through(self, project):
+ run_dbt()
+ run_dbt(["run-operation", "update_some_model"])
+ _, out = run_dbt_and_capture(
+ ["show", "--inline", "select * from {}.my_model".format(project.test_schema)]
+ )
+ assert "1 | sent" in out
+
+
+class TestUpdateDDLDoesNotCommitWithoutAutocommit:
+ @pytest.fixture(scope="class")
+ def dbt_profile_target(self):
+ return {
+ "type": "redshift",
+ "host": os.getenv("REDSHIFT_TEST_HOST"),
+ "port": int(os.getenv("REDSHIFT_TEST_PORT")),
+ "user": os.getenv("REDSHIFT_TEST_USER"),
+ "pass": os.getenv("REDSHIFT_TEST_PASS"),
+ "dbname": os.getenv("REDSHIFT_TEST_DBNAME"),
+ "autocommit": False,
+ }
+
+ @pytest.fixture(scope="class")
+ def macros(self):
+ return {"macro.sql": _MACROS__UPDATE_MY_MODEL}
+
+ @pytest.fixture(scope="class")
+ def models(self):
+ return {"my_model.sql": _MODELS__MY_MODEL}
+
+ def test_update_will_not_go_through(self, project):
+ run_dbt()
+ run_dbt(["run-operation", "update_some_model"])
+ _, out = run_dbt_and_capture(
+ ["show", "--inline", "select * from {}.my_model".format(project.test_schema)]
+ )
+ assert "1 | pending" in out
+
+
+class TestAfterCommitMacroTakesEffect:
+ @pytest.fixture(scope="class")
+ def macros(self):
+ return {"macro.sql": _MACROS__UPDATE_MY_SEED}
+
+ @pytest.fixture(scope="class")
+ def models(self):
+ return {"my_model.sql": _MODELS__AFTER_COMMIT}
+
+ @pytest.fixture(scope="class")
+ def seeds(self):
+ return {"my_seed.csv": _SEEDS_MY_SEED}
+
+ def test_update_happens_via_macro_in_config(self, project):
+ run_dbt(["seed"])
+ _, out = run_dbt_and_capture(
+ ["show", "--inline", "select * from {}.my_seed".format(project.test_schema)]
+ )
+ assert "1 | pending" in out
+
+ run_dbt()
+ _, out = run_dbt_and_capture(
+ ["show", "--inline", "select * from {}.my_seed".format(project.test_schema)]
+ )
+ assert "1 | done" in out
diff --git a/dbt-redshift/tests/functional/test_columns_in_relation.py b/dbt-redshift/tests/functional/test_columns_in_relation.py
new file mode 100644
index 000000000..ca6de67c3
--- /dev/null
+++ b/dbt-redshift/tests/functional/test_columns_in_relation.py
@@ -0,0 +1,111 @@
+from dbt.adapters.base import Column
+from dbt.tests.util import run_dbt, run_dbt_and_capture
+import pytest
+
+from dbt.adapters.redshift import RedshiftRelation
+
+
+class ColumnsInRelation:
+
+ @pytest.fixture(scope="class")
+ def models(self):
+ return {"my_model.sql": "select 1.23 as my_num, 'a' as my_char"}
+
+ @pytest.fixture(scope="class", autouse=True)
+ def setup(self, project):
+ run_dbt(["run"])
+
+ @pytest.fixture(scope="class")
+ def expected_columns(self):
+ return []
+
+ def test_columns_in_relation(self, project, expected_columns):
+ my_relation = RedshiftRelation.create(
+ database=project.database,
+ schema=project.test_schema,
+ identifier="my_model",
+ type=RedshiftRelation.View,
+ )
+ with project.adapter.connection_named("_test"):
+ actual_columns = project.adapter.get_columns_in_relation(my_relation)
+ assert actual_columns == expected_columns
+
+
+class TestColumnsInRelationBehaviorFlagOff(ColumnsInRelation):
+ @pytest.fixture(scope="class")
+ def project_config_update(self):
+ return {"flags": {}}
+
+ @pytest.fixture(scope="class")
+ def expected_columns(self):
+ # the SDK query returns "varchar" whereas our custom query returns "character varying"
+ return [
+ Column(column="my_num", dtype="numeric", numeric_precision=3, numeric_scale=2),
+ Column(column="my_char", dtype="character varying", char_size=1),
+ ]
+
+
+@pytest.mark.skip(
+ """
+ There is a discrepancy between our custom query and the get_columns SDK call.
+ This test should be skipped for now, but re-enabled once get_columns is implemented.
+"""
+)
+class TestColumnsInRelationBehaviorFlagOn(ColumnsInRelation):
+ @pytest.fixture(scope="class")
+ def project_config_update(self):
+ return {"flags": {"restrict_direct_pg_catalog_access": True}}
+
+ @pytest.fixture(scope="class")
+ def expected_columns(self):
+ # the SDK query returns "varchar" whereas our custom query returns "character varying"
+ return [
+ Column(column="my_num", dtype="numeric", numeric_precision=3, numeric_scale=2),
+ Column(column="my_char", dtype="varchar", char_size=1),
+ ]
+
+
+ONE_CHECK = """
+select 1 as id
+-- {{ adapter.get_columns_in_relation(this) }}
+"""
+
+
+TWO_CHECK = """
+select 1 as id
+-- {{ adapter.get_columns_in_relation(this) }}
+-- {{ adapter.get_columns_in_relation(this) }}
+"""
+
+
+@pytest.mark.skip(
+ """
+ There is a discrepancy between our custom query and the get_columns SDK call.
+ This test should be skipped for now, but re-enabled once get_columns is implemented.
+"""
+)
+class TestBehaviorFlagFiresOnce:
+ @pytest.fixture(scope="class")
+ def project_config_update(self):
+ return {"flags": {"restrict_direct_pg_catalog_access": False}}
+
+ @pytest.fixture(scope="class")
+ def models(self):
+ return {"one_check.sql": ONE_CHECK, "two_check.sql": TWO_CHECK}
+
+ def test_warning_fires_once(self, project):
+ msg = "https://docs.getdbt.com/reference/global-configs/behavior-changes#redshift-restrict_direct_pg_catalog_access"
+
+ # trigger the evaluation once, we get one warning
+ _, logs = run_dbt_and_capture(["--debug", "run", "--models", "one_check"])
+ assert logs.count(msg) == 1
+
+ # trigger the evaluation twice, we still get one warning
+ _, logs = run_dbt_and_capture(["--debug", "run", "--models", "one_check"])
+ assert logs.count(msg) == 1
+
+ # trigger the evaluation three times, across two models, we still get one warning
+ _, logs = run_dbt_and_capture(["--debug", "run", "--full-refresh"])
+ assert logs.count(msg) == 1
+
+ # note, we still got a warning in the second call, so it's once per invocation
diff --git a/dbt-redshift/tests/unit/__init__.py b/dbt-redshift/tests/unit/__init__.py
new file mode 100644
index 000000000..e69de29bb
diff --git a/dbt-redshift/tests/unit/mock_adapter.py b/dbt-redshift/tests/unit/mock_adapter.py
new file mode 100644
index 000000000..6e4143b9c
--- /dev/null
+++ b/dbt-redshift/tests/unit/mock_adapter.py
@@ -0,0 +1,81 @@
+from contextlib import contextmanager
+from unittest import mock
+
+from dbt.adapters.base import BaseAdapter
+
+
+def adapter_factory():
+ class MockAdapter(BaseAdapter):
+ ConnectionManager = mock.MagicMock(TYPE="mock")
+ responder = mock.MagicMock()
+ # some convenient defaults
+ responder.quote.side_effect = lambda identifier: '"{}"'.format(identifier)
+ responder.date_function.side_effect = lambda: "unitdate()"
+ responder.is_cancelable.side_effect = lambda: False
+
+ @contextmanager
+ def exception_handler(self, *args, **kwargs):
+ self.responder.exception_handler(*args, **kwargs)
+ yield
+
+ def execute(self, *args, **kwargs):
+ return self.responder.execute(*args, **kwargs)
+
+ def drop_relation(self, *args, **kwargs):
+ return self.responder.drop_relation(*args, **kwargs)
+
+ def truncate_relation(self, *args, **kwargs):
+ return self.responder.truncate_relation(*args, **kwargs)
+
+ def rename_relation(self, *args, **kwargs):
+ return self.responder.rename_relation(*args, **kwargs)
+
+ def get_columns_in_relation(self, *args, **kwargs):
+ return self.responder.get_columns_in_relation(*args, **kwargs)
+
+ def expand_column_types(self, *args, **kwargs):
+ return self.responder.expand_column_types(*args, **kwargs)
+
+ def list_relations_without_caching(self, *args, **kwargs):
+ return self.responder.list_relations_without_caching(*args, **kwargs)
+
+ def create_schema(self, *args, **kwargs):
+ return self.responder.create_schema(*args, **kwargs)
+
+ def drop_schema(self, *args, **kwargs):
+ return self.responder.drop_schema(*args, **kwargs)
+
+ @classmethod
+ def quote(cls, identifier):
+ return cls.responder.quote(identifier)
+
+ def convert_text_type(self, *args, **kwargs):
+ return self.responder.convert_text_type(*args, **kwargs)
+
+ def convert_number_type(self, *args, **kwargs):
+ return self.responder.convert_number_type(*args, **kwargs)
+
+ def convert_boolean_type(self, *args, **kwargs):
+ return self.responder.convert_boolean_type(*args, **kwargs)
+
+ def convert_datetime_type(self, *args, **kwargs):
+ return self.responder.convert_datetime_type(*args, **kwargs)
+
+ def convert_date_type(self, *args, **kwargs):
+ return self.responder.convert_date_type(*args, **kwargs)
+
+ def convert_time_type(self, *args, **kwargs):
+ return self.responder.convert_time_type(*args, **kwargs)
+
+ def list_schemas(self, *args, **kwargs):
+ return self.responder.list_schemas(*args, **kwargs)
+
+ @classmethod
+ def date_function(cls):
+ return cls.responder.date_function()
+
+ @classmethod
+ def is_cancelable(cls):
+ return cls.responder.is_cancelable()
+
+ return MockAdapter
diff --git a/dbt-redshift/tests/unit/test_auth_method.py b/dbt-redshift/tests/unit/test_auth_method.py
new file mode 100644
index 000000000..46412e9d2
--- /dev/null
+++ b/dbt-redshift/tests/unit/test_auth_method.py
@@ -0,0 +1,787 @@
+import requests
+
+from multiprocessing import get_context
+from unittest import TestCase, mock
+from unittest.mock import MagicMock
+
+from dbt.adapters.exceptions import FailedToConnectError
+import redshift_connector
+
+from dbt.adapters.redshift import (
+ Plugin as RedshiftPlugin,
+ RedshiftAdapter,
+)
+from dbt.adapters.redshift.connections import get_connection_method, RedshiftSSLConfig
+from tests.unit.utils import config_from_parts_or_dicts, inject_adapter
+
+
+DEFAULT_SSL_CONFIG = RedshiftSSLConfig().to_dict()
+
+
+class AuthMethod(TestCase):
+ def setUp(self):
+ profile_cfg = {
+ "outputs": {
+ "test": {
+ "type": "redshift",
+ "dbname": "redshift",
+ "user": "root",
+ "host": "thishostshouldnotexist.test.us-east-1",
+ "pass": "password",
+ "port": 5439,
+ "schema": "public",
+ }
+ },
+ "target": "test",
+ }
+
+ project_cfg = {
+ "name": "X",
+ "version": "0.1",
+ "profile": "test",
+ "project-root": "/tmp/dbt/does-not-exist",
+ "quoting": {
+ "identifier": False,
+ "schema": True,
+ },
+ "config-version": 2,
+ }
+
+ self.config = config_from_parts_or_dicts(project_cfg, profile_cfg)
+ self._adapter = None
+
+ @property
+ def adapter(self):
+ if self._adapter is None:
+ self._adapter = RedshiftAdapter(self.config, get_context("spawn"))
+ inject_adapter(self._adapter, RedshiftPlugin)
+ return self._adapter
+
+
+class TestInvalidMethod(AuthMethod):
+ def test_invalid_auth_method(self):
+ # we have to set method this way, otherwise it won't validate
+ self.config.credentials.method = "badmethod"
+ with self.assertRaises(FailedToConnectError) as context:
+ connect_method_factory = get_connection_method(self.config.credentials)
+ connect_method_factory.get_connect_method()
+ self.assertTrue("badmethod" in context.exception.msg)
+
+ @mock.patch("redshift_connector.connect", MagicMock())
+ def test_missing_region_failure(self):
+ # Failure test with no region
+ self.config.credentials = self.config.credentials.replace(
+ method="iam",
+ iam_profile="test",
+ host="doesnotexist.1233_no_region",
+ region=None,
+ )
+
+ with self.assertRaises(FailedToConnectError):
+ connection = self.adapter.acquire_connection("dummy")
+ connection.handle
+ redshift_connector.connect.assert_called_once_with(
+ iam=True,
+ host="doesnotexist.1233_no_region",
+ database="redshift",
+ cluster_identifier=None,
+ auto_create=False,
+ db_groups=[],
+ db_user="root",
+ password="",
+ user="",
+ profile="test",
+ timeout=None,
+ port=5439,
+ **DEFAULT_SSL_CONFIG,
+ )
+
+ @mock.patch("redshift_connector.connect", MagicMock())
+ def test_invalid_region_failure(self):
+ # Invalid region test
+ self.config.credentials = self.config.credentials.replace(
+ method="iam",
+ iam_profile="test",
+ host="doesnotexist.1233_no_region.us-not-a-region-1",
+ region=None,
+ )
+
+ with self.assertRaises(FailedToConnectError):
+ connection = self.adapter.acquire_connection("dummy")
+ connection.handle
+ redshift_connector.connect.assert_called_once_with(
+ iam=True,
+ host="doesnotexist.1233_no_region",
+ database="redshift",
+ cluster_identifier=None,
+ auto_create=False,
+ db_groups=[],
+ db_user="root",
+ password="",
+ user="",
+ profile="test",
+ timeout=None,
+ port=5439,
+ **DEFAULT_SSL_CONFIG,
+ )
+
+
+class TestDatabaseMethod(AuthMethod):
+ @mock.patch("redshift_connector.connect", MagicMock())
+ def test_default(self):
+ connection = self.adapter.acquire_connection("dummy")
+ connection.handle
+ redshift_connector.connect.assert_called_once_with(
+ host="thishostshouldnotexist.test.us-east-1",
+ database="redshift",
+ user="root",
+ password="password",
+ port=5439,
+ auto_create=False,
+ db_groups=[],
+ timeout=None,
+ region=None,
+ **DEFAULT_SSL_CONFIG,
+ )
+
+ @mock.patch("redshift_connector.connect", MagicMock())
+ def test_explicit_auth_method(self):
+ self.config.method = "database"
+
+ connection = self.adapter.acquire_connection("dummy")
+ connection.handle
+ redshift_connector.connect.assert_called_once_with(
+ host="thishostshouldnotexist.test.us-east-1",
+ database="redshift",
+ user="root",
+ password="password",
+ port=5439,
+ auto_create=False,
+ db_groups=[],
+ region=None,
+ timeout=None,
+ **DEFAULT_SSL_CONFIG,
+ )
+
+ def test_database_verification_is_case_insensitive(self):
+ # Override adapter settings from setUp()
+ profile_cfg = {
+ "outputs": {
+ "test": {
+ "type": "redshift",
+ "dbname": "Redshift",
+ "user": "root",
+ "host": "thishostshouldnotexist",
+ "pass": "password",
+ "port": 5439,
+ "schema": "public",
+ }
+ },
+ "target": "test",
+ }
+
+ project_cfg = {
+ "name": "X",
+ "version": "0.1",
+ "profile": "test",
+ "project-root": "/tmp/dbt/does-not-exist",
+ "quoting": {
+ "identifier": False,
+ "schema": True,
+ },
+ "config-version": 2,
+ }
+ self.config = config_from_parts_or_dicts(project_cfg, profile_cfg)
+ self.adapter.cleanup_connections()
+ self._adapter = RedshiftAdapter(self.config, get_context("spawn"))
+ self.adapter.verify_database("redshift")
+
+
+class TestIAMUserMethod(AuthMethod):
+
+ def test_iam_optionals(self):
+ profile_cfg = {
+ "outputs": {
+ "test": {
+ "type": "redshift",
+ "dbname": "redshift",
+ "user": "root",
+ "host": "thishostshouldnotexist",
+ "port": 5439,
+ "schema": "public",
+ "method": "iam",
+ "cluster_id": "my_redshift",
+ "db_groups": ["my_dbgroup"],
+ "autocreate": True,
+ }
+ },
+ "target": "test",
+ }
+
+ config_from_parts_or_dicts(self.config, profile_cfg)
+
+ def test_no_cluster_id(self):
+ self.config.credentials = self.config.credentials.replace(method="iam")
+ with self.assertRaises(FailedToConnectError) as context:
+ connect_method_factory = get_connection_method(self.config.credentials)
+ connect_method_factory.get_connect_method()
+
+ self.assertTrue("'cluster_id' must be provided" in context.exception.msg)
+
+ @mock.patch("redshift_connector.connect", MagicMock())
+ def test_default(self):
+ self.config.credentials = self.config.credentials.replace(
+ method="iam",
+ cluster_id="my_redshift",
+ host="thishostshouldnotexist.test.us-east-1",
+ )
+ connection = self.adapter.acquire_connection("dummy")
+ connection.handle
+ redshift_connector.connect.assert_called_once_with(
+ iam=True,
+ host="thishostshouldnotexist.test.us-east-1",
+ database="redshift",
+ db_user="root",
+ password="",
+ user="",
+ cluster_identifier="my_redshift",
+ region=None,
+ timeout=None,
+ auto_create=False,
+ db_groups=[],
+ profile=None,
+ port=5439,
+ **DEFAULT_SSL_CONFIG,
+ )
+
+ @mock.patch("redshift_connector.connect", MagicMock())
+ def test_profile(self):
+ self.config.credentials = self.config.credentials.replace(
+ method="iam",
+ cluster_id="my_redshift",
+ iam_profile="test",
+ host="thishostshouldnotexist.test.us-east-1",
+ )
+ connection = self.adapter.acquire_connection("dummy")
+ connection.handle
+
+ redshift_connector.connect.assert_called_once_with(
+ iam=True,
+ host="thishostshouldnotexist.test.us-east-1",
+ database="redshift",
+ cluster_identifier="my_redshift",
+ region=None,
+ auto_create=False,
+ db_groups=[],
+ db_user="root",
+ password="",
+ user="",
+ profile="test",
+ timeout=None,
+ port=5439,
+ **DEFAULT_SSL_CONFIG,
+ )
+
+ @mock.patch("redshift_connector.connect", MagicMock())
+ def test_explicit(self):
+ self.config.credentials = self.config.credentials.replace(
+ method="iam",
+ cluster_id="my_redshift",
+ host="thishostshouldnotexist.test.us-east-1",
+ access_key_id="my_access_key_id",
+ secret_access_key="my_secret_access_key",
+ )
+ connection = self.adapter.acquire_connection("dummy")
+ connection.handle
+ redshift_connector.connect.assert_called_once_with(
+ iam=True,
+ host="thishostshouldnotexist.test.us-east-1",
+ access_key_id="my_access_key_id",
+ secret_access_key="my_secret_access_key",
+ database="redshift",
+ db_user="root",
+ password="",
+ user="",
+ cluster_identifier="my_redshift",
+ region=None,
+ timeout=None,
+ auto_create=False,
+ db_groups=[],
+ port=5439,
+ **DEFAULT_SSL_CONFIG,
+ )
+
+
+class TestIAMUserMethodServerless(AuthMethod):
+
+ @mock.patch("redshift_connector.connect", MagicMock())
+ def test_profile_default_region(self):
+ self.config.credentials = self.config.credentials.replace(
+ method="iam",
+ iam_profile="test",
+ host="doesnotexist.1233.us-east-2.redshift-serverless.amazonaws.com",
+ )
+ connection = self.adapter.acquire_connection("dummy")
+ connection.handle
+ redshift_connector.connect.assert_called_once_with(
+ iam=True,
+ host="doesnotexist.1233.us-east-2.redshift-serverless.amazonaws.com",
+ database="redshift",
+ cluster_identifier=None,
+ region=None,
+ auto_create=False,
+ db_groups=[],
+ db_user="root",
+ password="",
+ user="",
+ profile="test",
+ timeout=None,
+ port=5439,
+ **DEFAULT_SSL_CONFIG,
+ )
+
+ @mock.patch("redshift_connector.connect", MagicMock())
+ def test_profile_explicit_region(self):
+ # Successful test
+ self.config.credentials = self.config.credentials.replace(
+ method="iam",
+ iam_profile="test",
+ host="doesnotexist.1233.redshift-serverless.amazonaws.com",
+ region="us-east-2",
+ )
+ connection = self.adapter.acquire_connection("dummy")
+ connection.handle
+ redshift_connector.connect.assert_called_once_with(
+ iam=True,
+ host="doesnotexist.1233.redshift-serverless.amazonaws.com",
+ database="redshift",
+ cluster_identifier=None,
+ region="us-east-2",
+ auto_create=False,
+ db_groups=[],
+ db_user="root",
+ password="",
+ user="",
+ profile="test",
+ timeout=None,
+ port=5439,
+ **DEFAULT_SSL_CONFIG,
+ )
+
+ @mock.patch("redshift_connector.connect", MagicMock())
+ def test_profile_invalid_serverless(self):
+ self.config.credentials = self.config.credentials.replace(
+ method="iam",
+ iam_profile="test",
+ host="doesnotexist.1233.us-east-2.redshift-srvrlss.amazonaws.com",
+ )
+ with self.assertRaises(FailedToConnectError) as context:
+ connection = self.adapter.acquire_connection("dummy")
+ connection.handle
+ redshift_connector.connect.assert_called_once_with(
+ iam=True,
+ host="doesnotexist.1233.us-east-2.redshift-srvrlss.amazonaws.com",
+ database="redshift",
+ cluster_identifier=None,
+ region=None,
+ auto_create=False,
+ db_groups=[],
+ db_user="root",
+ password="",
+ user="",
+ profile="test",
+ port=5439,
+ timeout=None,
+ **DEFAULT_SSL_CONFIG,
+ )
+ self.assertTrue("'host' must be provided" in context.exception.msg)
+
+
+class TestIAMRoleMethod(AuthMethod):
+
+ def test_no_cluster_id(self):
+ self.config.credentials = self.config.credentials.replace(method="iam_role")
+ with self.assertRaises(FailedToConnectError) as context:
+ connect_method_factory = get_connection_method(self.config.credentials)
+ connect_method_factory.get_connect_method()
+
+ self.assertTrue("'cluster_id' must be provided" in context.exception.msg)
+
+ @mock.patch("redshift_connector.connect", MagicMock())
+ def test_default(self):
+ self.config.credentials = self.config.credentials.replace(
+ method="iam_role",
+ cluster_id="my_redshift",
+ )
+ connection = self.adapter.acquire_connection("dummy")
+ connection.handle
+ redshift_connector.connect.assert_called_once_with(
+ iam=True,
+ host="thishostshouldnotexist.test.us-east-1",
+ database="redshift",
+ cluster_identifier="my_redshift",
+ db_user=None,
+ password="",
+ user="",
+ region=None,
+ timeout=None,
+ auto_create=False,
+ db_groups=[],
+ port=5439,
+ group_federation=True,
+ **DEFAULT_SSL_CONFIG,
+ )
+
+ @mock.patch("redshift_connector.connect", MagicMock())
+ def test_profile(self):
+ self.config.credentials = self.config.credentials.replace(
+ method="iam_role",
+ cluster_id="my_redshift",
+ iam_profile="test",
+ )
+ connection = self.adapter.acquire_connection("dummy")
+ connection.handle
+ redshift_connector.connect.assert_called_once_with(
+ iam=True,
+ host="thishostshouldnotexist.test.us-east-1",
+ database="redshift",
+ cluster_identifier="my_redshift",
+ db_user=None,
+ password="",
+ user="",
+ region=None,
+ timeout=None,
+ auto_create=False,
+ db_groups=[],
+ profile="test",
+ port=5439,
+ group_federation=True,
+ **DEFAULT_SSL_CONFIG,
+ )
+
+
+class TestIAMRoleMethodServerless(AuthMethod):
+ # Should behave like IAM Role provisioned, with the exception of not having group_federation set
+
+ @mock.patch("redshift_connector.connect", MagicMock())
+ def test_profile_default_region(self):
+ self.config.credentials = self.config.credentials.replace(
+ method="iam_role",
+ iam_profile="iam_profile_test",
+ host="doesnotexist.1233.us-east-2.redshift-serverless.amazonaws.com",
+ )
+ connection = self.adapter.acquire_connection("dummy")
+ connection.handle
+ redshift_connector.connect.assert_called_once_with(
+ iam=True,
+ host="doesnotexist.1233.us-east-2.redshift-serverless.amazonaws.com",
+ database="redshift",
+ cluster_identifier=None,
+ region=None,
+ auto_create=False,
+ db_groups=[],
+ db_user=None,
+ password="",
+ user="",
+ profile="iam_profile_test",
+ timeout=None,
+ port=5439,
+ group_federation=False,
+ **DEFAULT_SSL_CONFIG,
+ )
+
+ @mock.patch("redshift_connector.connect", MagicMock())
+ def test_profile_ignore_cluster(self):
+ self.config.credentials = self.config.credentials.replace(
+ method="iam_role",
+ iam_profile="iam_profile_test",
+ host="doesnotexist.1233.us-east-2.redshift-serverless.amazonaws.com",
+ cluster_id="my_redshift",
+ )
+ connection = self.adapter.acquire_connection("dummy")
+ connection.handle
+ redshift_connector.connect.assert_called_once_with(
+ iam=True,
+ host="doesnotexist.1233.us-east-2.redshift-serverless.amazonaws.com",
+ database="redshift",
+ cluster_identifier=None,
+ region=None,
+ auto_create=False,
+ db_groups=[],
+ db_user=None,
+ password="",
+ user="",
+ profile="iam_profile_test",
+ timeout=None,
+ port=5439,
+ group_federation=False,
+ **DEFAULT_SSL_CONFIG,
+ )
+
+ @mock.patch("redshift_connector.connect", MagicMock())
+ def test_profile_explicit_region(self):
+ # Successful test
+ self.config.credentials = self.config.credentials.replace(
+ method="iam_role",
+ iam_profile="iam_profile_test",
+ host="doesnotexist.1233.redshift-serverless.amazonaws.com",
+ region="us-east-2",
+ )
+ connection = self.adapter.acquire_connection("dummy")
+ connection.handle
+ redshift_connector.connect.assert_called_once_with(
+ iam=True,
+ host="doesnotexist.1233.redshift-serverless.amazonaws.com",
+ database="redshift",
+ cluster_identifier=None,
+ region="us-east-2",
+ auto_create=False,
+ db_groups=[],
+ db_user=None,
+ password="",
+ user="",
+ profile="iam_profile_test",
+ timeout=None,
+ port=5439,
+ group_federation=False,
+ **DEFAULT_SSL_CONFIG,
+ )
+
+ @mock.patch("redshift_connector.connect", MagicMock())
+ def test_profile_invalid_serverless(self):
+ self.config.credentials = self.config.credentials.replace(
+ method="iam_role",
+ iam_profile="iam_profile_test",
+ host="doesnotexist.1233.us-east-2.redshift-srvrlss.amazonaws.com",
+ )
+ with self.assertRaises(FailedToConnectError) as context:
+ connection = self.adapter.acquire_connection("dummy")
+ connection.handle
+ redshift_connector.connect.assert_called_once_with(
+ iam=True,
+ host="doesnotexist.1233.us-east-2.redshift-srvrlss.amazonaws.com",
+ database="redshift",
+ cluster_identifier=None,
+ region=None,
+ auto_create=False,
+ db_groups=[],
+ db_user=None,
+ password="",
+ user="",
+ profile="iam_profile_test",
+ port=5439,
+ timeout=None,
+ group_federation=False,
+ **DEFAULT_SSL_CONFIG,
+ )
+ self.assertTrue("'host' must be provided" in context.exception.msg)
+
+
+class TestIAMIdcBrowser(AuthMethod):
+ @mock.patch("redshift_connector.connect", MagicMock())
+ def test_profile_idc_browser_all_fields(self):
+ self.config.credentials = self.config.credentials.replace(
+ method="browser_identity_center",
+ idc_region="us-east-1",
+ issuer_url="https://identitycenter.amazonaws.com/ssoins-randomchars",
+ idc_client_display_name="display name",
+ idp_response_timeout=0,
+ host="doesnotexist.1233.us-east-2.redshift-serverless.amazonaws.com",
+ idp_listen_port=1111,
+ )
+ connection = self.adapter.acquire_connection("dummy")
+ connection.handle
+ redshift_connector.connect.assert_called_once_with(
+ iam=False,
+ host="doesnotexist.1233.us-east-2.redshift-serverless.amazonaws.com",
+ database="redshift",
+ cluster_identifier=None,
+ region=None,
+ auto_create=False,
+ db_groups=[],
+ password="",
+ user="",
+ timeout=None,
+ port=5439,
+ **DEFAULT_SSL_CONFIG,
+ idp_response_timeout=0,
+ idc_client_display_name="display name",
+ credentials_provider="BrowserIdcAuthPlugin",
+ idc_region="us-east-1",
+ issuer_url="https://identitycenter.amazonaws.com/ssoins-randomchars",
+ listen_port=1111,
+ )
+
+ @mock.patch("redshift_connector.connect", MagicMock())
+ def test_profile_idc_browser_required_fields_only(self):
+ self.config.credentials = self.config.credentials.replace(
+ method="browser_identity_center",
+ idc_region="us-east-1",
+ issuer_url="https://identitycenter.amazonaws.com/ssoins-randomchars",
+ host="doesnotexist.1233.us-east-2.redshift-serverless.amazonaws.com",
+ )
+ connection = self.adapter.acquire_connection("dummy")
+ connection.handle
+ redshift_connector.connect.assert_called_once_with(
+ iam=False,
+ host="doesnotexist.1233.us-east-2.redshift-serverless.amazonaws.com",
+ database="redshift",
+ cluster_identifier=None,
+ region=None,
+ auto_create=False,
+ db_groups=[],
+ password="",
+ user="",
+ timeout=None,
+ port=5439,
+ **DEFAULT_SSL_CONFIG,
+ credentials_provider="BrowserIdcAuthPlugin",
+ listen_port=7890,
+ idp_response_timeout=60,
+ idc_client_display_name="Amazon Redshift driver",
+ idc_region="us-east-1",
+ issuer_url="https://identitycenter.amazonaws.com/ssoins-randomchars",
+ )
+
+ def test_invalid_adapter_missing_fields(self):
+ self.config.credentials = self.config.credentials.replace(
+ method="browser_identity_center",
+ idp_listen_port=1111,
+ idc_client_display_name="my display",
+ )
+ with self.assertRaises(FailedToConnectError) as context:
+ connection = self.adapter.acquire_connection("dummy")
+ connection.handle
+ redshift_connector.connect.assert_called_once_with(
+ iam=False,
+ host="doesnotexist.1233.us-east-2.redshift-serverless.amazonaws.com",
+ database="redshift",
+ cluster_identifier=None,
+ region=None,
+ auto_create=False,
+ db_groups=[],
+ password="",
+ user="",
+ timeout=None,
+ port=5439,
+ **DEFAULT_SSL_CONFIG,
+ credentials_provider="BrowserIdcAuthPlugin",
+ listen_port=1111,
+ idp_response_timeout=60,
+ idc_client_display_name="my display",
+ )
+
+ assert (
+ "'idc_region', 'issuer_url' field(s) are required for 'browser_identity_center' credentials method"
+ in context.exception.msg
+ )
+
+
+class TestIAMIdcToken(AuthMethod):
+ @mock.patch("redshift_connector.connect", MagicMock())
+ def test_profile_idc_token_all_required_fields_okta(self):
+ """This test doesn't follow the idiom elsewhere in this file because we
+ a real test would need a valid refresh token which would require a valid
+ authorization request, neither of which are possible in automated testing at
+ merge. This is a surrogate test.
+ """
+ self.config.credentials = self.config.credentials.replace(
+ method="oauth_token_identity_center",
+ token_endpoint={
+ "type": "okta",
+ "request_url": "https://dbtcs.oktapreview.com/oauth2/default/v1/token",
+ "idp_auth_credentials": "my_auth_creds",
+ "request_data": "grant_type=refresh_token&redirect_uri=http%3A%2F%2Flocalhost%3A8080%2Flogin%2Foauth2%2Fcode%2Fokta&refresh_token=my_token",
+ },
+ )
+ with self.assertRaises(requests.exceptions.HTTPError) as context:
+ """
+ An http says we've made it in operation to call the token request which fails
+ due to invalid refresh token and auth creds
+ """
+ connection = self.adapter.acquire_connection("dummy")
+ connection.handle
+
+ assert "401 Client Error: Unauthorized for url" in str(context.exception)
+
+ @mock.patch("redshift_connector.connect", MagicMock())
+ def test_profile_idc_token_all_required_fields_entra(self):
+ """This test doesn't follow the idiom elsewhere in this file because we
+ a real test would need a valid refresh token which would require a valid
+ authorization request, neither of which are possible in automated testing at
+ merge. This is a surrogate test.
+ """
+ self.config.credentials = self.config.credentials.replace(
+ method="oauth_token_identity_center",
+ token_endpoint={
+ "type": "entra",
+ "request_url": "https://login.microsoftonline.com/my_tenant/oauth2/v2.0/token",
+ "request_data": "my_data",
+ },
+ )
+ with self.assertRaises(requests.exceptions.HTTPError) as context:
+ """
+ An http says we've made it in operation to call the token request which fails
+ due to invalid refresh token and auth creds
+ """
+ connection = self.adapter.acquire_connection("dummy")
+ connection.handle
+
+ assert "400 Client Error: Bad Request for url" in str(context.exception)
+
+ @mock.patch("redshift_connector.connect", MagicMock())
+ def test_invalid_idc_token_missing_field(self):
+ # Successful test
+ self.config.credentials = self.config.credentials.replace(
+ method="oauth_token_identity_center",
+ )
+ with self.assertRaises(FailedToConnectError) as context:
+ connection = self.adapter.acquire_connection("dummy")
+ connection.handle
+ assert (
+ "'token_endpoint' field(s) are required for 'oauth_token_identity_center' credentials method"
+ in context.exception.msg
+ )
+
+ @mock.patch("redshift_connector.connect", MagicMock())
+ def test_invalid_idc_token_missing_token_endpoint_subfield_okta(self):
+ # Successful test
+ self.config.credentials = self.config.credentials.replace(
+ method="oauth_token_identity_center",
+ token_endpoint={
+ "type": "okta",
+ "request_data": "my_data",
+ "idp_auth_credentials": "my_auth_creds",
+ },
+ )
+ with self.assertRaises(FailedToConnectError) as context:
+ connection = self.adapter.acquire_connection("dummy")
+ connection.handle
+ assert "Missing required key in token_endpoint: 'request_url'" in context.exception.msg
+
+ @mock.patch("redshift_connector.connect", MagicMock())
+ def test_invalid_idc_token_missing_token_endpoint_subfield_entra(self):
+ # Successful test
+ self.config.credentials = self.config.credentials.replace(
+ method="oauth_token_identity_center",
+ token_endpoint={
+ "type": "entra",
+ "request_url": "https://dbtcs.oktapreview.com/oauth2/default/v1/token",
+ },
+ )
+ with self.assertRaises(FailedToConnectError) as context:
+ connection = self.adapter.acquire_connection("dummy")
+ connection.handle
+ assert "Missing required key in token_endpoint: 'request_data'" in context.exception.msg
+
+ @mock.patch("redshift_connector.connect", MagicMock())
+ def test_invalid_idc_token_missing_token_endpoint_type(self):
+ # Successful test
+ self.config.credentials = self.config.credentials.replace(
+ method="oauth_token_identity_center",
+ token_endpoint={},
+ )
+ with self.assertRaises(FailedToConnectError) as context:
+ connection = self.adapter.acquire_connection("dummy")
+ connection.handle
+ assert "Missing required key in token_endpoint: 'type'" in context.exception.msg
diff --git a/dbt-redshift/tests/unit/test_connection.py b/dbt-redshift/tests/unit/test_connection.py
new file mode 100644
index 000000000..61df7a47e
--- /dev/null
+++ b/dbt-redshift/tests/unit/test_connection.py
@@ -0,0 +1,163 @@
+from multiprocessing import get_context
+from unittest import TestCase, mock
+
+import pytest
+from dbt.adapters.exceptions import FailedToConnectError
+from unittest.mock import MagicMock, call
+
+import redshift_connector
+
+from dbt.adapters.redshift import (
+ Plugin as RedshiftPlugin,
+ RedshiftAdapter,
+ RedshiftCredentials,
+)
+from tests.unit.utils import (
+ config_from_parts_or_dicts,
+ inject_adapter,
+ mock_connection,
+)
+
+
+class TestConnection(TestCase):
+
+ def setUp(self):
+ profile_cfg = {
+ "outputs": {
+ "test": {
+ "type": "redshift",
+ "dbname": "redshift",
+ "user": "root",
+ "host": "thishostshouldnotexist.test.us-east-1",
+ "pass": "password",
+ "port": 5439,
+ "schema": "public",
+ }
+ },
+ "target": "test",
+ }
+
+ project_cfg = {
+ "name": "X",
+ "version": "0.1",
+ "profile": "test",
+ "project-root": "/tmp/dbt/does-not-exist",
+ "quoting": {
+ "identifier": False,
+ "schema": True,
+ },
+ "config-version": 2,
+ }
+
+ self.config = config_from_parts_or_dicts(project_cfg, profile_cfg)
+ self._adapter = None
+
+ @property
+ def adapter(self):
+ if self._adapter is None:
+ self._adapter = RedshiftAdapter(self.config, get_context("spawn"))
+ inject_adapter(self._adapter, RedshiftPlugin)
+ return self._adapter
+
+ def test_cancel_open_connections_empty(self):
+ self.assertEqual(len(list(self.adapter.cancel_open_connections())), 0)
+
+ def test_cancel_open_connections_master(self):
+ key = self.adapter.connections.get_thread_identifier()
+ self.adapter.connections.thread_connections[key] = mock_connection("master")
+ self.assertEqual(len(list(self.adapter.cancel_open_connections())), 0)
+
+ def test_cancel_open_connections_single(self):
+ master = mock_connection("master")
+ model = mock_connection("model")
+
+ key = self.adapter.connections.get_thread_identifier()
+ self.adapter.connections.thread_connections.update(
+ {
+ key: master,
+ 1: model,
+ }
+ )
+ with mock.patch.object(self.adapter.connections, "add_query") as add_query:
+ query_result = mock.MagicMock()
+ cursor = mock.Mock()
+ cursor.fetchone.return_value = (42,)
+ add_query.side_effect = [(None, cursor), (None, query_result)]
+
+ self.assertEqual(len(list(self.adapter.cancel_open_connections())), 1)
+ add_query.assert_has_calls(
+ [
+ call(f"select pg_terminate_backend({model.backend_pid})"),
+ ]
+ )
+
+ master.handle.backend_pid.assert_not_called()
+
+ @mock.patch("redshift_connector.connect", MagicMock())
+ def test_connection_has_backend_pid(self):
+ backend_pid = 42
+
+ cursor = mock.MagicMock()
+ execute = cursor().__enter__().execute
+ execute().fetchone.return_value = (backend_pid,)
+ redshift_connector.connect().cursor = cursor
+
+ connection = self.adapter.acquire_connection("dummy")
+ connection.handle
+ assert connection.backend_pid == backend_pid
+
+ execute.assert_has_calls(
+ [
+ call("select pg_backend_pid()"),
+ ]
+ )
+
+ @mock.patch("redshift_connector.connect", MagicMock())
+ def test_backend_pid_used_in_pg_terminate_backend(self):
+ with mock.patch.object(self.adapter.connections, "add_query") as add_query:
+ backend_pid = 42
+ query_result = (backend_pid,)
+
+ cursor = mock.MagicMock()
+ cursor().__enter__().execute().fetchone.return_value = query_result
+ redshift_connector.connect().cursor = cursor
+
+ connection = self.adapter.acquire_connection("dummy")
+ connection.handle
+
+ self.adapter.connections.cancel(connection)
+
+ add_query.assert_has_calls(
+ [
+ call(f"select pg_terminate_backend({backend_pid})"),
+ ]
+ )
+
+ def test_retry_able_exceptions_trigger_retry(self):
+ with mock.patch.object(self.adapter.connections, "add_query") as add_query:
+ connection_mock = mock_connection("model", state="closed")
+ connection_mock.credentials = RedshiftCredentials.from_dict(
+ {
+ "type": "redshift",
+ "dbname": "redshift",
+ "user": "root",
+ "host": "thishostshouldnotexist.test.us-east-1",
+ "pass": "password",
+ "port": 5439,
+ "schema": "public",
+ "retries": 2,
+ }
+ )
+
+ connect_mock = MagicMock()
+ connect_mock.side_effect = [
+ redshift_connector.InterfaceError("retryable interface error<1>"),
+ redshift_connector.InterfaceError("retryable interface error<2>"),
+ redshift_connector.InterfaceError("retryable interface error<3>"),
+ ]
+
+ with mock.patch("redshift_connector.connect", connect_mock):
+ with pytest.raises(FailedToConnectError) as e:
+ connection = self.adapter.connections.open(connection_mock)
+ assert str(e.value) == "Database Error\n retryable interface error<3>"
+ assert connect_mock.call_count == 3
diff --git a/dbt-redshift/tests/unit/test_conversion.py b/dbt-redshift/tests/unit/test_conversion.py
new file mode 100644
index 000000000..a375c4f6a
--- /dev/null
+++ b/dbt-redshift/tests/unit/test_conversion.py
@@ -0,0 +1,80 @@
+import agate
+from dbt_common.clients import agate_helper
+
+from dbt.adapters.redshift import RedshiftAdapter
+from tests.unit.utils import TestAdapterConversions
+
+
+class TestConversion(TestAdapterConversions):
+ def test_convert_text_type(self):
+ rows = [
+ ["", "a1", "stringval1"],
+ ["", "a2", "stringvalasdfasdfasdfa"],
+ ["", "a3", "stringval3"],
+ ]
+ agate_table = self._make_table_of(rows, agate.Text)
+ expected = ["varchar(64)", "varchar(2)", "varchar(22)"]
+ for col_idx, expect in enumerate(expected):
+ assert RedshiftAdapter.convert_text_type(agate_table, col_idx) == expect
+
+ def test_convert_number_type(self):
+ rows = [
+ ["", "23.98", "-1"],
+ ["", "12.78", "-2"],
+ ["", "79.41", "-3"],
+ ]
+ agate_table = self._make_table_of(rows, agate.Number)
+ expected = ["integer", "float8", "integer"]
+ for col_idx, expect in enumerate(expected):
+ assert RedshiftAdapter.convert_number_type(agate_table, col_idx) == expect
+
+ def test_convert_boolean_type(self):
+ rows = [
+ ["", "false", "true"],
+ ["", "false", "false"],
+ ["", "false", "true"],
+ ]
+ agate_table = self._make_table_of(rows, agate.Boolean)
+ expected = ["boolean", "boolean", "boolean"]
+ for col_idx, expect in enumerate(expected):
+ assert RedshiftAdapter.convert_boolean_type(agate_table, col_idx) == expect
+
+ def test_convert_datetime_type(self):
+ rows = [
+ ["", "20190101T01:01:01Z", "2019-01-01 01:01:01"],
+ ["", "20190102T01:01:01Z", "2019-01-01 01:01:01"],
+ ["", "20190103T01:01:01Z", "2019-01-01 01:01:01"],
+ ]
+ agate_table = self._make_table_of(
+ rows, [agate.DateTime, agate_helper.ISODateTime, agate.DateTime]
+ )
+ expected = [
+ "timestamp without time zone",
+ "timestamp without time zone",
+ "timestamp without time zone",
+ ]
+ for col_idx, expect in enumerate(expected):
+ assert RedshiftAdapter.convert_datetime_type(agate_table, col_idx) == expect
+
+ def test_convert_date_type(self):
+ rows = [
+ ["", "2019-01-01", "2019-01-04"],
+ ["", "2019-01-02", "2019-01-04"],
+ ["", "2019-01-03", "2019-01-04"],
+ ]
+ agate_table = self._make_table_of(rows, agate.Date)
+ expected = ["date", "date", "date"]
+ for col_idx, expect in enumerate(expected):
+ assert RedshiftAdapter.convert_date_type(agate_table, col_idx) == expect
+
+ def test_convert_time_type(self):
+ # dbt's default type testers actually don't have a TimeDelta at all.
+ rows = [
+ ["", "120s", "10s"],
+ ["", "3m", "11s"],
+ ["", "1h", "12s"],
+ ]
+ agate_table = self._make_table_of(rows, agate.TimeDelta)
+ expected = ["varchar(24)", "varchar(24)", "varchar(24)"]
+ for col_idx, expect in enumerate(expected):
+ assert RedshiftAdapter.convert_time_type(agate_table, col_idx) == expect
diff --git a/dbt-redshift/tests/unit/test_materialized_view.py b/dbt-redshift/tests/unit/test_materialized_view.py
new file mode 100644
index 000000000..322b134f8
--- /dev/null
+++ b/dbt-redshift/tests/unit/test_materialized_view.py
@@ -0,0 +1,110 @@
+from unittest.mock import Mock
+
+import agate
+import pytest
+
+from dbt.adapters.redshift.relation_configs import RedshiftMaterializedViewConfig
+
+
+@pytest.mark.parametrize("bool_value", [True, False, "True", "False", "true", "false"])
+def test_redshift_materialized_view_config_handles_all_valid_bools(bool_value):
+ config = RedshiftMaterializedViewConfig(
+ database_name="somedb",
+ schema_name="public",
+ mv_name="someview",
+ query="select * from sometable",
+ )
+ model_node = Mock()
+ model_node.config.extra.get = lambda x, y=None: (
+ bool_value if x in ["auto_refresh", "backup"] else "someDistValue"
+ )
+ config_dict = config.parse_relation_config(model_node)
+ assert isinstance(config_dict["autorefresh"], bool)
+ assert isinstance(config_dict["backup"], bool)
+
+
+@pytest.mark.parametrize("bool_value", [1])
+def test_redshift_materialized_view_config_throws_expected_exception_with_invalid_types(
+ bool_value,
+):
+ config = RedshiftMaterializedViewConfig(
+ database_name="somedb",
+ schema_name="public",
+ mv_name="someview",
+ query="select * from sometable",
+ )
+ model_node = Mock()
+ model_node.config.extra.get = lambda x, y=None: (
+ bool_value if x in ["auto_refresh", "backup"] else "someDistValue"
+ )
+ with pytest.raises(TypeError):
+ config.parse_relation_config(model_node)
+
+
+def test_redshift_materialized_view_config_throws_expected_exception_with_invalid_str():
+ config = RedshiftMaterializedViewConfig(
+ database_name="somedb",
+ schema_name="public",
+ mv_name="someview",
+ query="select * from sometable",
+ )
+ model_node = Mock()
+ model_node.config.extra.get = lambda x, y=None: (
+ "notABool" if x in ["auto_refresh", "backup"] else "someDistValue"
+ )
+ with pytest.raises(ValueError):
+ config.parse_relation_config(model_node)
+
+
+def test_redshift_materialized_view_parse_relation_results_handles_multiples_sort_key():
+ materialized_view = agate.Table.from_object(
+ [],
+ [
+ "database",
+ "schema",
+ "table",
+ "diststyle",
+ "sortkey1",
+ "autorefresh",
+ ],
+ )
+
+ column_descriptor = agate.Table.from_object(
+ [
+ {
+ "column": "my_column",
+ "is_dist_key": True,
+ "sort_key_position": 1,
+ },
+ {
+ "column": "my_column2",
+ "is_dist_key": True,
+ "sort_key_position": 2,
+ },
+ {
+ "column": "my_column5",
+ "is_dist_key": False,
+ "sort_key_position": 0,
+ },
+ ],
+ )
+
+ query = agate.Table.from_object(
+ [
+ {
+ "definition": "create materialized view my_view as (select 1 as my_column, 'value' as my_column2)"
+ }
+ ]
+ )
+
+ relation_results = {
+ "materialized_view": materialized_view,
+ "columns": column_descriptor,
+ "query": query,
+ }
+
+ config_dict = RedshiftMaterializedViewConfig.parse_relation_results(relation_results)
+
+ assert isinstance(config_dict["sort"], dict)
+ assert config_dict["sort"]["sortkey"][0] == "my_column"
+ assert config_dict["sort"]["sortkey"][1] == "my_column2"
diff --git a/dbt-redshift/tests/unit/test_query.py b/dbt-redshift/tests/unit/test_query.py
new file mode 100644
index 000000000..c625e9a7f
--- /dev/null
+++ b/dbt-redshift/tests/unit/test_query.py
@@ -0,0 +1,126 @@
+import redshift_connector
+
+from multiprocessing import get_context
+from unittest import TestCase, mock
+
+from dbt.adapters.sql.connections import SQLConnectionManager
+from dbt_common.clients import agate_helper
+from dbt_common.exceptions import DbtRuntimeError
+
+from dbt.adapters.redshift import (
+ Plugin as RedshiftPlugin,
+ RedshiftAdapter,
+)
+from tests.unit.utils import config_from_parts_or_dicts, inject_adapter
+
+
+class TestQuery(TestCase):
+ def setUp(self):
+ profile_cfg = {
+ "outputs": {
+ "test": {
+ "type": "redshift",
+ "dbname": "redshift",
+ "user": "root",
+ "host": "thishostshouldnotexist.test.us-east-1",
+ "pass": "password",
+ "port": 5439,
+ "schema": "public",
+ }
+ },
+ "target": "test",
+ }
+
+ project_cfg = {
+ "name": "X",
+ "version": "0.1",
+ "profile": "test",
+ "project-root": "/tmp/dbt/does-not-exist",
+ "quoting": {
+ "identifier": False,
+ "schema": True,
+ },
+ "config-version": 2,
+ }
+
+ self.config = config_from_parts_or_dicts(project_cfg, profile_cfg)
+ self._adapter = None
+
+ @property
+ def adapter(self):
+ if self._adapter is None:
+ self._adapter = RedshiftAdapter(self.config, get_context("spawn"))
+ inject_adapter(self._adapter, RedshiftPlugin)
+ return self._adapter
+
+ @mock.patch.object(SQLConnectionManager, "get_thread_connection")
+ def mock_cursor(self, mock_get_thread_conn):
+ conn = mock.MagicMock
+ mock_get_thread_conn.return_value = conn
+ mock_handle = mock.MagicMock
+ conn.return_value = mock_handle
+ mock_cursor = mock.MagicMock
+ mock_handle.return_value = mock_cursor
+ return mock_cursor
+
+ def test_execute_with_fetch(self):
+ cursor = mock.Mock()
+ table = agate_helper.empty_table()
+ with mock.patch.object(self.adapter.connections, "add_query") as mock_add_query:
+ mock_add_query.return_value = (
+ None,
+ cursor,
+ ) # when mock_add_query is called, it will always return None, cursor
+ with mock.patch.object(self.adapter.connections, "get_response") as mock_get_response:
+ mock_get_response.return_value = None
+ with mock.patch.object(
+ self.adapter.connections, "get_result_from_cursor"
+ ) as mock_get_result_from_cursor:
+ mock_get_result_from_cursor.return_value = table
+ self.adapter.connections.execute(sql="select * from test", fetch=True)
+ mock_add_query.assert_called_once_with("select * from test", False)
+ mock_get_result_from_cursor.assert_called_once_with(cursor, None)
+ mock_get_response.assert_called_once_with(cursor)
+
+ def test_execute_without_fetch(self):
+ cursor = mock.Mock()
+ with mock.patch.object(self.adapter.connections, "add_query") as mock_add_query:
+ mock_add_query.return_value = (
+ None,
+ cursor,
+ ) # when mock_add_query is called, it will always return None, cursor
+ with mock.patch.object(self.adapter.connections, "get_response") as mock_get_response:
+ mock_get_response.return_value = None
+ with mock.patch.object(
+ self.adapter.connections, "get_result_from_cursor"
+ ) as mock_get_result_from_cursor:
+ self.adapter.connections.execute(sql="select * from test2", fetch=False)
+ mock_add_query.assert_called_once_with("select * from test2", False)
+ mock_get_result_from_cursor.assert_not_called()
+ mock_get_response.assert_called_once_with(cursor)
+
+ def test_add_query_success(self):
+ cursor = mock.Mock()
+ with mock.patch.object(SQLConnectionManager, "add_query") as mock_add_query:
+ mock_add_query.return_value = None, cursor
+ self.adapter.connections.add_query("select * from test3")
+ mock_add_query.assert_called_once_with(
+ "select * from test3",
+ True,
+ bindings=None,
+ abridge_sql_log=False,
+ retryable_exceptions=(
+ redshift_connector.InterfaceError,
+ redshift_connector.InternalError,
+ ),
+ retry_limit=1,
+ )
+
+ def test_add_query_with_no_cursor(self):
+ with mock.patch.object(
+ self.adapter.connections, "get_thread_connection"
+ ) as mock_get_thread_connection:
+ mock_get_thread_connection.return_value = None
+ with self.assertRaisesRegex(DbtRuntimeError, "Tried to run invalid SQL: on "):
+ self.adapter.connections.add_query(sql="")
+ mock_get_thread_connection.assert_called_once()
diff --git a/dbt-redshift/tests/unit/test_relation.py b/dbt-redshift/tests/unit/test_relation.py
new file mode 100644
index 000000000..0985c62c8
--- /dev/null
+++ b/dbt-redshift/tests/unit/test_relation.py
@@ -0,0 +1,157 @@
+from unittest.mock import Mock
+
+import agate
+import pytest
+
+from dbt.adapters.redshift.relation import RedshiftRelation
+from dbt.adapters.contracts.relation import (
+ RelationType,
+ RelationConfig,
+)
+
+from dbt.adapters.redshift.relation_configs.sort import RedshiftSortStyle
+
+
+def test_renameable_relation():
+ relation = RedshiftRelation.create(
+ database="my_db",
+ schema="my_schema",
+ identifier="my_table",
+ type=RelationType.Table,
+ )
+ assert relation.renameable_relations == frozenset(
+ {
+ RelationType.View,
+ RelationType.Table,
+ }
+ )
+
+
+@pytest.fixture
+def materialized_view_without_sort_key_from_db():
+ materialized_view = agate.Table.from_object(
+ [
+ {
+ "database": "my_db",
+ "schema": "my_schema",
+ "table": "my_table",
+ }
+ ],
+ )
+
+ column_descriptor = agate.Table.from_object([])
+
+ query = agate.Table.from_object(
+ [
+ {
+ "definition": "create materialized view my_view as (select 1 as my_column, 'value' as my_column2)"
+ }
+ ]
+ )
+
+ relation_results = {
+ "materialized_view": materialized_view,
+ "columns": column_descriptor,
+ "query": query,
+ }
+ return relation_results
+
+
+@pytest.fixture
+def materialized_view_without_sort_key_config():
+ relation_config = Mock(spec=RelationConfig)
+
+ relation_config.database = "my_db"
+ relation_config.identifier = "my_table"
+ relation_config.schema = "my_schema"
+ relation_config.config = Mock()
+ relation_config.config.extra = {}
+ relation_config.config.sort = ""
+ relation_config.compiled_code = (
+ "create materialized view my_view as (select 1 as my_column, 'value' as my_column2)"
+ )
+ return relation_config
+
+
+@pytest.fixture
+def materialized_view_multiple_sort_key_from_db(materialized_view_without_sort_key_from_db):
+ materialized_view_without_sort_key_from_db["columns"] = agate.Table.from_object(
+ [
+ {
+ "column": "my_column",
+ "is_dist_key": True,
+ "sort_key_position": 1,
+ },
+ {
+ "column": "my_column2",
+ "is_dist_key": True,
+ "sort_key_position": 2,
+ },
+ ],
+ )
+ return materialized_view_without_sort_key_from_db
+
+
+@pytest.fixture
+def materialized_view_multiple_sort_key_config(materialized_view_without_sort_key_config):
+ materialized_view_without_sort_key_config.config.extra = {
+ "sort_type": RedshiftSortStyle.compound,
+ "sort": ["my_column", "my_column2"],
+ }
+
+ return materialized_view_without_sort_key_config
+
+
+def test_materialized_view_config_changeset_without_sort_key_empty_changes(
+ materialized_view_without_sort_key_from_db,
+ materialized_view_without_sort_key_config,
+):
+ change_set = RedshiftRelation.materialized_view_config_changeset(
+ materialized_view_without_sort_key_from_db,
+ materialized_view_without_sort_key_config,
+ )
+
+ assert change_set is None
+
+
+def test_materialized_view_config_changeset_multiple_sort_key_without_changes(
+ materialized_view_multiple_sort_key_from_db,
+ materialized_view_multiple_sort_key_config,
+):
+
+ change_set = RedshiftRelation.materialized_view_config_changeset(
+ materialized_view_multiple_sort_key_from_db,
+ materialized_view_multiple_sort_key_config,
+ )
+
+ assert change_set is None
+
+
+def test_materialized_view_config_changeset_multiple_sort_key_with_changes(
+ materialized_view_multiple_sort_key_from_db,
+ materialized_view_multiple_sort_key_config,
+):
+ materialized_view_multiple_sort_key_config.config.extra["sort"].append("my_column3")
+
+ change_set = RedshiftRelation.materialized_view_config_changeset(
+ materialized_view_multiple_sort_key_from_db,
+ materialized_view_multiple_sort_key_config,
+ )
+
+ assert change_set is not None
+ assert change_set.sort.context.sortkey == ("my_column", "my_column2", "my_column3")
+
+
+def test_materialized_view_config_changeset_multiple_sort_key_with_changes_in_order_column(
+ materialized_view_multiple_sort_key_from_db,
+ materialized_view_multiple_sort_key_config,
+):
+ materialized_view_multiple_sort_key_config.config.extra["sort"] = ["my_column2", "my_column"]
+
+ change_set = RedshiftRelation.materialized_view_config_changeset(
+ materialized_view_multiple_sort_key_from_db,
+ materialized_view_multiple_sort_key_config,
+ )
+
+ assert change_set is not None
+ assert change_set.sort.context.sortkey == ("my_column2", "my_column")
diff --git a/dbt-redshift/tests/unit/test_ssl_mode.py b/dbt-redshift/tests/unit/test_ssl_mode.py
new file mode 100644
index 000000000..832e0718b
--- /dev/null
+++ b/dbt-redshift/tests/unit/test_ssl_mode.py
@@ -0,0 +1,168 @@
+from multiprocessing import get_context
+from unittest import TestCase, mock
+from unittest.mock import MagicMock
+
+import redshift_connector
+
+from dbt.adapters.redshift import (
+ Plugin as RedshiftPlugin,
+ RedshiftAdapter,
+)
+from dbt.adapters.redshift.connections import RedshiftSSLConfig
+from tests.unit.utils import config_from_parts_or_dicts, inject_adapter
+
+
+DEFAULT_SSL_CONFIG = RedshiftSSLConfig().to_dict()
+
+
+class TestSSLMode(TestCase):
+ def setUp(self):
+ profile_cfg = {
+ "outputs": {
+ "test": {
+ "type": "redshift",
+ "dbname": "redshift",
+ "user": "root",
+ "host": "thishostshouldnotexist.test.us-east-1",
+ "pass": "password",
+ "port": 5439,
+ "schema": "public",
+ }
+ },
+ "target": "test",
+ }
+
+ project_cfg = {
+ "name": "X",
+ "version": "0.1",
+ "profile": "test",
+ "project-root": "/tmp/dbt/does-not-exist",
+ "quoting": {
+ "identifier": False,
+ "schema": True,
+ },
+ "config-version": 2,
+ }
+
+ self.config = config_from_parts_or_dicts(project_cfg, profile_cfg)
+ self._adapter = None
+
+ @property
+ def adapter(self):
+ if self._adapter is None:
+ self._adapter = RedshiftAdapter(self.config, get_context("spawn"))
+ inject_adapter(self._adapter, RedshiftPlugin)
+ return self._adapter
+
+ @mock.patch("redshift_connector.connect", MagicMock())
+ def test_disable(self):
+ self.config.credentials.sslmode = "disable"
+ connection = self.adapter.acquire_connection("dummy")
+ connection.handle
+ redshift_connector.connect.assert_called_once_with(
+ host="thishostshouldnotexist.test.us-east-1",
+ database="redshift",
+ user="root",
+ password="password",
+ port=5439,
+ auto_create=False,
+ db_groups=[],
+ region=None,
+ timeout=None,
+ ssl=False,
+ sslmode=None,
+ )
+
+ @mock.patch("redshift_connector.connect", MagicMock())
+ def test_allow(self):
+ self.config.credentials.sslmode = "allow"
+ connection = self.adapter.acquire_connection("dummy")
+ connection.handle
+ redshift_connector.connect.assert_called_once_with(
+ host="thishostshouldnotexist.test.us-east-1",
+ database="redshift",
+ user="root",
+ password="password",
+ port=5439,
+ auto_create=False,
+ db_groups=[],
+ region=None,
+ timeout=None,
+ ssl=True,
+ sslmode="verify-ca",
+ )
+
+ @mock.patch("redshift_connector.connect", MagicMock())
+ def test_verify_full(self):
+ self.config.credentials.sslmode = "verify-full"
+ connection = self.adapter.acquire_connection("dummy")
+ connection.handle
+ redshift_connector.connect.assert_called_once_with(
+ host="thishostshouldnotexist.test.us-east-1",
+ database="redshift",
+ user="root",
+ password="password",
+ port=5439,
+ auto_create=False,
+ db_groups=[],
+ region=None,
+ timeout=None,
+ ssl=True,
+ sslmode="verify-full",
+ )
+
+ @mock.patch("redshift_connector.connect", MagicMock())
+ def test_verify_ca(self):
+ self.config.credentials.sslmode = "verify-ca"
+ connection = self.adapter.acquire_connection("dummy")
+ connection.handle
+ redshift_connector.connect.assert_called_once_with(
+ host="thishostshouldnotexist.test.us-east-1",
+ database="redshift",
+ user="root",
+ password="password",
+ port=5439,
+ auto_create=False,
+ db_groups=[],
+ region=None,
+ timeout=None,
+ ssl=True,
+ sslmode="verify-ca",
+ )
+
+ @mock.patch("redshift_connector.connect", MagicMock())
+ def test_prefer(self):
+ self.config.credentials.sslmode = "prefer"
+ connection = self.adapter.acquire_connection("dummy")
+ connection.handle
+ redshift_connector.connect.assert_called_once_with(
+ host="thishostshouldnotexist.test.us-east-1",
+ database="redshift",
+ user="root",
+ password="password",
+ port=5439,
+ auto_create=False,
+ db_groups=[],
+ region=None,
+ timeout=None,
+ ssl=True,
+ sslmode="verify-ca",
+ )
+
+ @mock.patch("redshift_connector.connect", MagicMock())
+ def test_connection_timeout(self):
+ self.config.credentials = self.config.credentials.replace(connect_timeout=30)
+ connection = self.adapter.acquire_connection("dummy")
+ connection.handle
+ redshift_connector.connect.assert_called_once_with(
+ host="thishostshouldnotexist.test.us-east-1",
+ database="redshift",
+ user="root",
+ password="password",
+ port=5439,
+ auto_create=False,
+ db_groups=[],
+ region=None,
+ timeout=30,
+ **DEFAULT_SSL_CONFIG,
+ )
diff --git a/dbt-redshift/tests/unit/utils.py b/dbt-redshift/tests/unit/utils.py
new file mode 100644
index 000000000..ee580eb9a
--- /dev/null
+++ b/dbt-redshift/tests/unit/utils.py
@@ -0,0 +1,273 @@
+"""Unit test utility functions.
+Note that all imports should be inside the functions to avoid import/mocking
+issues.
+"""
+
+import string
+import os
+from unittest import TestCase, mock
+
+import agate
+from dbt.config.project import PartialProject
+from dbt_common.dataclass_schema import ValidationError
+import pytest
+
+
+def normalize(path):
+ """On windows, neither is enough on its own:
+ >>> normcase('C:\\documents/ALL CAPS/subdir\\..')
+ 'c:\\documents\\all caps\\subdir\\..'
+ >>> normpath('C:\\documents/ALL CAPS/subdir\\..')
+ 'C:\\documents\\ALL CAPS'
+ >>> normpath(normcase('C:\\documents/ALL CAPS/subdir\\..'))
+ 'c:\\documents\\all caps'
+ """
+ return os.path.normcase(os.path.normpath(path))
+
+
+class Obj:
+ which = "blah"
+ single_threaded = False
+
+
+def mock_connection(name, state="open"):
+ conn = mock.MagicMock()
+ conn.name = name
+ conn.state = state
+ return conn
+
+
+def profile_from_dict(profile, profile_name, cli_vars="{}"):
+ from dbt.config import Profile
+ from dbt.config.renderer import ProfileRenderer
+ from dbt.config.utils import parse_cli_vars
+
+ if not isinstance(cli_vars, dict):
+ cli_vars = parse_cli_vars(cli_vars)
+
+ renderer = ProfileRenderer(cli_vars)
+
+ # in order to call dbt's internal profile rendering, we need to set the
+ # flags global. This is a bit of a hack, but it's the best way to do it.
+ from dbt.flags import set_from_args
+ from argparse import Namespace
+
+ set_from_args(Namespace(), None)
+ return Profile.from_raw_profile_info(
+ profile,
+ profile_name,
+ renderer,
+ )
+
+
+def project_from_dict(project, profile, packages=None, selectors=None, cli_vars="{}"):
+ from dbt.config.renderer import DbtProjectYamlRenderer
+ from dbt.config.utils import parse_cli_vars
+
+ if not isinstance(cli_vars, dict):
+ cli_vars = parse_cli_vars(cli_vars)
+
+ renderer = DbtProjectYamlRenderer(profile, cli_vars)
+
+ project_root = project.pop("project-root", os.getcwd())
+
+ partial = PartialProject.from_dicts(
+ project_root=project_root,
+ project_dict=project,
+ packages_dict=packages,
+ selectors_dict=selectors,
+ )
+ return partial.render(renderer)
+
+
+def config_from_parts_or_dicts(project, profile, packages=None, selectors=None, cli_vars="{}"):
+ from dbt.config import Project, Profile, RuntimeConfig
+ from dbt.config.utils import parse_cli_vars
+ from copy import deepcopy
+
+ if not isinstance(cli_vars, dict):
+ cli_vars = parse_cli_vars(cli_vars)
+
+ if isinstance(project, Project):
+ profile_name = project.profile_name
+ else:
+ profile_name = project.get("profile")
+
+ if not isinstance(profile, Profile):
+ profile = profile_from_dict(
+ deepcopy(profile),
+ profile_name,
+ cli_vars,
+ )
+
+ if not isinstance(project, Project):
+ project = project_from_dict(
+ deepcopy(project),
+ profile,
+ packages,
+ selectors,
+ cli_vars,
+ )
+
+ args = Obj()
+ args.vars = cli_vars
+ args.profile_dir = "/dev/null"
+ return RuntimeConfig.from_parts(project=project, profile=profile, args=args)
+
+
+def inject_plugin(plugin):
+ from dbt.adapters.factory import FACTORY
+
+ key = plugin.adapter.type()
+ FACTORY.plugins[key] = plugin
+
+
+def inject_plugin_for(config):
+ # from dbt.adapters.postgres import Plugin, PostgresAdapter
+ from dbt.adapters.factory import FACTORY
+
+ FACTORY.load_plugin(config.credentials.type)
+ adapter = FACTORY.get_adapter(
+ config
+ ) # TODO: there's a get_adaptor function in factory.py, but no method on AdapterContainer
+ return adapter
+
+
+def inject_adapter(value, plugin):
+ """Inject the given adapter into the adapter factory, so your hand-crafted
+ artisanal adapter will be available from get_adapter() as if dbt loaded it.
+ """
+ inject_plugin(plugin)
+ from dbt.adapters.factory import FACTORY
+
+ key = value.type()
+ FACTORY.adapters[key] = value
+
+
+def clear_plugin(plugin):
+ from dbt.adapters.factory import FACTORY
+
+ key = plugin.adapter.type()
+ FACTORY.plugins.pop(key, None)
+ FACTORY.adapters.pop(key, None)
+
+
+class ContractTestCase(TestCase):
+ ContractType = None
+
+ def setUp(self):
+ self.maxDiff = None
+ super().setUp()
+
+ def assert_to_dict(self, obj, dct):
+ self.assertEqual(obj.to_dict(omit_none=True), dct)
+
+ def assert_from_dict(self, obj, dct, cls=None):
+ if cls is None:
+ cls = self.ContractType
+ cls.validate(dct)
+ self.assertEqual(cls.from_dict(dct), obj)
+
+ def assert_symmetric(self, obj, dct, cls=None):
+ self.assert_to_dict(obj, dct)
+ self.assert_from_dict(obj, dct, cls)
+
+ def assert_fails_validation(self, dct, cls=None):
+ if cls is None:
+ cls = self.ContractType
+
+ with self.assertRaises(ValidationError):
+ cls.validate(dct)
+ cls.from_dict(dct)
+
+
+def compare_dicts(dict1, dict2):
+ first_set = set(dict1.keys())
+ second_set = set(dict2.keys())
+ print(f"--- Difference between first and second keys: {first_set.difference(second_set)}")
+ print(f"--- Difference between second and first keys: {second_set.difference(first_set)}")
+ common_keys = set(first_set).intersection(set(second_set))
+ found_differences = False
+ for key in common_keys:
+ if dict1[key] != dict2[key]:
+ print(f"--- --- first dict: {key}: {str(dict1[key])}")
+ print(f"--- --- second dict: {key}: {str(dict2[key])}")
+ found_differences = True
+ if found_differences:
+ print("--- Found differences in dictionaries")
+ else:
+ print("--- Found no differences in dictionaries")
+
+
+def assert_from_dict(obj, dct, cls=None):
+ if cls is None:
+ cls = obj.__class__
+ cls.validate(dct)
+ obj_from_dict = cls.from_dict(dct)
+ if hasattr(obj, "created_at"):
+ obj_from_dict.created_at = 1
+ obj.created_at = 1
+ assert obj_from_dict == obj
+
+
+def assert_to_dict(obj, dct):
+ obj_to_dict = obj.to_dict(omit_none=True)
+ if "created_at" in obj_to_dict:
+ obj_to_dict["created_at"] = 1
+ if "created_at" in dct:
+ dct["created_at"] = 1
+ assert obj_to_dict == dct
+
+
+def assert_symmetric(obj, dct, cls=None):
+ assert_to_dict(obj, dct)
+ assert_from_dict(obj, dct, cls)
+
+
+def assert_fails_validation(dct, cls):
+ with pytest.raises(ValidationError):
+ cls.validate(dct)
+ cls.from_dict(dct)
+
+
+class TestAdapterConversions(TestCase):
+ @staticmethod
+ def _get_tester_for(column_type):
+ from dbt_common.clients import agate_helper
+
+ if column_type is agate.TimeDelta: # dbt never makes this!
+ return agate.TimeDelta()
+
+ for instance in agate_helper.DEFAULT_TYPE_TESTER._possible_types:
+ if isinstance(instance, column_type):
+ return instance
+
+ raise ValueError(f"no tester for {column_type}")
+
+ def _make_table_of(self, rows, column_types):
+ column_names = list(string.ascii_letters[: len(rows[0])])
+ if isinstance(column_types, type):
+ column_types = [self._get_tester_for(column_types) for _ in column_names]
+ else:
+ column_types = [self._get_tester_for(typ) for typ in column_types]
+ table = agate.Table(rows, column_names=column_names, column_types=column_types)
+ return table
+
+
+def load_internal_manifest_macros(config, macro_hook=lambda m: None):
+ from dbt.parser.manifest import ManifestLoader
+
+ return ManifestLoader.load_macros(config, macro_hook)
+
+
+def dict_replace(dct, **kwargs):
+ dct = dct.copy()
+ dct.update(kwargs)
+ return dct
+
+
+def replace_config(n, **kwargs):
+ return n.replace(
+ config=n.config.replace(**kwargs),
+ unrendered_config=dict_replace(n.unrendered_config, **kwargs),
+ )