diff --git a/.github/workflows/publish-nix-pgupgrade-scripts.yml b/.github/workflows/publish-nix-pgupgrade-scripts.yml index 5d373ad8f..eb5f7a755 100644 --- a/.github/workflows/publish-nix-pgupgrade-scripts.yml +++ b/.github/workflows/publish-nix-pgupgrade-scripts.yml @@ -72,6 +72,9 @@ jobs: id: process_release_version run: | VERSION=$(grep 'postgres-version' common-nix.vars.pkr.hcl | sed -e 's/postgres-version = "\(.*\)"/\1/g') + if [[ "${{ inputs.postgresVersion }}" != "" ]]; then + VERSION=${{ inputs.postgresVersion }} + fi echo "version=$VERSION" >> "$GITHUB_OUTPUT" - name: Create a tarball containing pg_upgrade scripts diff --git a/.github/workflows/testinfra-nix.yml b/.github/workflows/testinfra-nix.yml index 3835a9a00..fee2fd791 100644 --- a/.github/workflows/testinfra-nix.yml +++ b/.github/workflows/testinfra-nix.yml @@ -65,12 +65,12 @@ jobs: - name: Cleanup resources on build cancellation if: ${{ cancelled() }} run: | - aws ec2 --region ap-southeast-1 describe-instances --filters "Name=tag:packerExecutionId,Values=${GITHUB_RUN_ID}" --query "Reservations[].Instances[].InstanceId" --output text | xargs -n 1 -I {} aws ec2 terminate-instances --region ap-southeast-1 --instance-ids {} - - - name: Cleanup resources on build cancellation + aws ec2 --region ap-southeast-1 describe-instances --filters "Name=tag:packerExecutionId,Values=${GITHUB_RUN_ID}" --query "Reservations[].Instances[].InstanceId" --output text | xargs -r aws ec2 terminate-instances --region ap-southeast-1 --instance-ids + + - name: Cleanup resources after build if: ${{ always() }} run: | - aws ec2 --region ap-southeast-1 describe-instances --filters "Name=tag:testinfra-run-id,Values=${GITHUB_RUN_ID}" --query "Reservations[].Instances[].InstanceId" --output text | xargs -n 1 -I {} aws ec2 terminate-instances --region ap-southeast-1 --instance-ids {} || true + aws ec2 --region ap-southeast-1 describe-instances --filters "Name=tag:testinfra-run-id,Values=${GITHUB_RUN_ID}" --query "Reservations[].Instances[].InstanceId" --output text | xargs -r aws ec2 terminate-instances --region ap-southeast-1 --instance-ids || true - name: Cleanup AMIs if: always() diff --git a/README.md b/README.md index ca794fc22..074a42ec7 100644 --- a/README.md +++ b/README.md @@ -51,7 +51,7 @@ Aside from having [ufw](https://help.ubuntu.com/community/UFW),[fail2ban](https: | Goodie | Version | Description | | ------------- | :-------------: | ------------- | | [PgBouncer](https://www.pgbouncer.org/) | [1.16.1](http://www.pgbouncer.org/changelog.html#pgbouncer-116x) | Set up Connection Pooling. | -| [PostgREST](https://postgrest.org/en/stable/) | [v10.1.1](https://github.com/PostgREST/postgrest/releases/tag/v10.1.1) | Instantly transform your database into an RESTful API. | +| [PostgREST](https://postgrest.org/en/stable/) | [v12.2.3](https://github.com/PostgREST/postgrest/releases/tag/v12.2.3) | Instantly transform your database into an RESTful API. | | [WAL-G](https://github.com/wal-g/wal-g#wal-g) | [v2.0.1](https://github.com/wal-g/wal-g/releases/tag/v2.0.1) | Tool for physical database backup and recovery. | ## Install diff --git a/ansible/files/admin_api_scripts/pg_upgrade_scripts/common.sh b/ansible/files/admin_api_scripts/pg_upgrade_scripts/common.sh index e978b2a54..a11f8ff32 100755 --- a/ansible/files/admin_api_scripts/pg_upgrade_scripts/common.sh +++ b/ansible/files/admin_api_scripts/pg_upgrade_scripts/common.sh @@ -356,7 +356,8 @@ begin end , case when rec.grantee = 'postgres'::regrole then 'supabase_admin' when rec.grantee = 'supabase_admin'::regrole then 'postgres' - else rec.grantee::regrole + when rec.grantee = 0 then 'public' + else rec.grantee::regrole::text end )); end if; @@ -382,7 +383,7 @@ begin when obj->>'objtype' = 'T' then 'types' when obj->>'objtype' = 'n' then 'schemas' end - , rec.grantee::regrole + , case when rec.grantee = 0 then 'public' else rec.grantee::regrole::text end , case when rec.is_grantable then 'with grant option' else '' end )); end if; @@ -529,7 +530,14 @@ $$; alter database postgres connection limit -1; -- #incident-2024-09-12-project-upgrades-are-temporarily-disabled -grant pg_read_all_data, pg_signal_backend to postgres; +do $$ +begin + if exists (select from pg_authid where rolname = 'pg_read_all_data') then + execute('grant pg_read_all_data to postgres'); + end if; +end +$$; +grant pg_signal_backend to postgres; set session authorization supabase_admin; drop role supabase_tmp; diff --git a/ansible/files/admin_api_scripts/pg_upgrade_scripts/initiate.sh b/ansible/files/admin_api_scripts/pg_upgrade_scripts/initiate.sh index 9443d2acb..235b4f4c1 100755 --- a/ansible/files/admin_api_scripts/pg_upgrade_scripts/initiate.sh +++ b/ansible/files/admin_api_scripts/pg_upgrade_scripts/initiate.sh @@ -11,6 +11,7 @@ # them depending on regtypes referencing system OIDs or outdated library files. EXTENSIONS_TO_DISABLE=( "pg_graphql" + "pg_stat_monitor" ) PG14_EXTENSIONS_TO_DISABLE=( @@ -119,20 +120,22 @@ cleanup() { CI_start_postgres fi + retry 8 pg_isready -h localhost -U supabase_admin + echo "Re-enabling extensions" if [ -f $POST_UPGRADE_EXTENSION_SCRIPT ]; then - run_sql -f $POST_UPGRADE_EXTENSION_SCRIPT + retry 5 run_sql -f $POST_UPGRADE_EXTENSION_SCRIPT fi echo "Removing SUPERUSER grant from postgres" - run_sql -c "ALTER USER postgres WITH NOSUPERUSER;" + retry 5 run_sql -c "ALTER USER postgres WITH NOSUPERUSER;" echo "Resetting postgres database connection limit" - run_sql -c "ALTER DATABASE postgres CONNECTION LIMIT -1;" + retry 5 run_sql -c "ALTER DATABASE postgres CONNECTION LIMIT -1;" if [ -z "$IS_CI" ] && [ -z "$IS_LOCAL_UPGRADE" ]; then echo "Unmounting data disk from ${MOUNT_POINT}" - umount $MOUNT_POINT + retry 3 umount $MOUNT_POINT fi echo "$UPGRADE_STATUS" > /tmp/pg-upgrade-status @@ -208,7 +211,7 @@ function patch_wrappers { WRAPPERS_LIB_PATH_DIR=$(dirname "$WRAPPERS_LIB_PATH") if [ "$WRAPPERS_LIB_PATH" != "$WRAPPERS_LIB_PATH_DIR/${OLD_LIB_FILE_NAME}" ]; then echo "Copying $WRAPPERS_LIB_PATH to $WRAPPERS_LIB_PATH_DIR/${OLD_LIB_FILE_NAME}" - cp "$WRAPPERS_LIB_PATH" "$WRAPPERS_LIB_PATH_DIR/${OLD_LIB_FILE_NAME}" + cp "$WRAPPERS_LIB_PATH" "$WRAPPERS_LIB_PATH_DIR/${OLD_LIB_FILE_NAME}" || true fi fi done @@ -222,7 +225,7 @@ function patch_wrappers { LIB_FILE_NAME=$(basename "$OLD_WRAPPER_LIB_PATH") if [ "$WRAPPERS_LIB_PATH" != "$PGLIBNEW/${LIB_FILE_NAME}" ]; then echo "Copying $WRAPPERS_LIB_PATH to $PGLIBNEW/${LIB_FILE_NAME}" - cp "$WRAPPERS_LIB_PATH" "$PGLIBNEW/${LIB_FILE_NAME}" + cp "$WRAPPERS_LIB_PATH" "$PGLIBNEW/${LIB_FILE_NAME}" || true fi fi fi diff --git a/ansible/files/adminapi.sudoers.conf b/ansible/files/adminapi.sudoers.conf index eada0a94b..ae5537715 100644 --- a/ansible/files/adminapi.sudoers.conf +++ b/ansible/files/adminapi.sudoers.conf @@ -17,6 +17,8 @@ Cmnd_Alias PGBOUNCER = /bin/systemctl start pgbouncer.service, /bin/systemctl st %adminapi ALL= NOPASSWD: /usr/bin/systemctl restart postgresql.service %adminapi ALL= NOPASSWD: /usr/bin/systemctl show -p NRestarts postgresql.service %adminapi ALL= NOPASSWD: /usr/bin/systemctl restart adminapi.service +%adminapi ALL= NOPASSWD: /usr/bin/systemctl is-active commence-backup.service +%adminapi ALL= NOPASSWD: /usr/bin/systemctl start commence-backup.service %adminapi ALL= NOPASSWD: /bin/systemctl daemon-reload %adminapi ALL= NOPASSWD: /bin/systemctl restart services.slice %adminapi ALL= NOPASSWD: /usr/sbin/nft -f /etc/nftables/supabase_managed.conf diff --git a/ansible/files/commence-backup.service.j2 b/ansible/files/commence-backup.service.j2 new file mode 100644 index 000000000..9d4ad0c68 --- /dev/null +++ b/ansible/files/commence-backup.service.j2 @@ -0,0 +1,12 @@ +[Unit] +Description=Async commence physical backup + +[Service] +Type=simple +User=adminapi +ExecStart=/usr/bin/admin-mgr commence-backup --run-as-service true +Restart=no +OOMScoreAdjust=-1000 + +[Install] +WantedBy=multi-user.target diff --git a/ansible/files/envoy_config/lds.yaml b/ansible/files/envoy_config/lds.yaml index 60a38f65d..f2c719d20 100644 --- a/ansible/files/envoy_config/lds.yaml +++ b/ansible/files/envoy_config/lds.yaml @@ -254,8 +254,10 @@ resources: type.googleapis.com/envoy.extensions.filters.http.rbac.v3.RBACPerRoute - match: safe_regex: + google_re2: + max_program_size: 150 regex: >- - /auth/v1/(verify|callback|authorize|sso/saml/(acs|metadata|slo)) + /auth/v1/(verify|callback|authorize|sso/saml/(acs|metadata|slo)|\.well-known/(openid-configuration|jwks\.json)) route: cluster: gotrue regex_rewrite: diff --git a/ansible/files/postgresql_config/supautils.conf.j2 b/ansible/files/postgresql_config/supautils.conf.j2 index 2ab4257fd..aa01cd8aa 100644 --- a/ansible/files/postgresql_config/supautils.conf.j2 +++ b/ansible/files/postgresql_config/supautils.conf.j2 @@ -3,7 +3,7 @@ supautils.policy_grants = '{"postgres":["auth.audit_log_entries","auth.identitie # full list: address_standardizer, address_standardizer_data_us, adminpack, amcheck, autoinc, bloom, btree_gin, btree_gist, citext, cube, dblink, dict_int, dict_xsyn, earthdistance, file_fdw, fuzzystrmatch, hstore, http, hypopg, index_advisor, insert_username, intagg, intarray, isn, lo, ltree, moddatetime, old_snapshot, orioledb, pageinspect, pg_buffercache, pg_cron, pg_freespacemap, pg_graphql, pg_hashids, pg_jsonschema, pg_net, pg_prewarm, pg_repack, pg_stat_monitor, pg_stat_statements, pg_surgery, pg_tle, pg_trgm, pg_visibility, pg_walinspect, pgaudit, pgcrypto, pgjwt, pgroonga, pgroonga_database, pgrouting, pgrowlocks, pgsodium, pgstattuple, pgtap, plcoffee, pljava, plls, plpgsql, plpgsql_check, plv8, postgis, postgis_raster, postgis_sfcgal, postgis_tiger_geocoder, postgis_topology, postgres_fdw, refint, rum, seg, sslinfo, supabase_vault, supautils, tablefunc, tcn, timescaledb, tsm_system_rows, tsm_system_time, unaccent, uuid-ossp, vector, wrappers, xml2 # omitted because may be unsafe: adminpack, amcheck, file_fdw, lo, old_snapshot, pageinspect, pg_buffercache, pg_freespacemap, pg_surgery, pg_visibility # omitted because deprecated: intagg, xml2 -supautils.privileged_extensions = 'address_standardizer, address_standardizer_data_us, autoinc, bloom, btree_gin, btree_gist, citext, cube, dblink, dict_int, dict_xsyn, earthdistance, fuzzystrmatch, hstore, http, hypopg, index_advisor, insert_username, intarray, isn, ltree, moddatetime, orioledb, pg_cron, pg_graphql, pg_hashids, pg_jsonschema, pg_net, pg_repack, pg_stat_monitor, pg_stat_statements, pg_tle, pg_trgm, pg_walinspect, pgaudit, pgcrypto, pgjwt, pg_prewarm, pgroonga, pgroonga_database, pgrouting, pgrowlocks, pgstattuple, pgsodium, pgtap, plcoffee, pljava, plls, plpgsql, plpgsql_check, plv8, postgis, postgis_raster, postgis_sfcgal, postgis_tiger_geocoder, postgis_topology, postgres_fdw, refint, rum, seg, sslinfo, supabase_vault, supautils, tablefunc, tcn, timescaledb, tsm_system_rows, tsm_system_time, unaccent, uuid-ossp, vector, wrappers' +supautils.privileged_extensions = 'address_standardizer, address_standardizer_data_us, autoinc, bloom, btree_gin, btree_gist, citext, cube, dblink, dict_int, dict_xsyn, earthdistance, fuzzystrmatch, hstore, http, hypopg, index_advisor, insert_username, intarray, isn, ltree, moddatetime, orioledb, pg_cron, pg_graphql, pg_hashids, pg_jsonschema, pg_net, pg_partman, pg_repack, pg_stat_monitor, pg_stat_statements, pg_tle, pg_trgm, pg_walinspect, pgaudit, pgcrypto, pgjwt, pg_prewarm, pgmq, pgroonga, pgroonga_database, pgrouting, pgrowlocks, pgstattuple, pgsodium, pgtap, plcoffee, pljava, plls, plpgsql, plpgsql_check, plv8, postgis, postgis_raster, postgis_sfcgal, postgis_tiger_geocoder, postgis_topology, postgres_fdw, refint, rum, seg, sslinfo, supabase_vault, supautils, tablefunc, tcn, timescaledb, tsm_system_rows, tsm_system_time, unaccent, uuid-ossp, vector, wrappers' supautils.privileged_extensions_custom_scripts_path = '/etc/postgresql-custom/extension-custom-scripts' supautils.privileged_extensions_superuser = 'supabase_admin' supautils.privileged_role = 'postgres' diff --git a/ansible/tasks/internal/admin-api.yml b/ansible/tasks/internal/admin-api.yml index cea0109fd..da93fef62 100644 --- a/ansible/tasks/internal/admin-api.yml +++ b/ansible/tasks/internal/admin-api.yml @@ -79,6 +79,11 @@ src: files/adminapi.service.j2 dest: /etc/systemd/system/adminapi.service +- name: adminapi - create service file for commence backup process + template: + src: files/commence-backup.service.j2 + dest: /etc/systemd/system/commence-backup.service + - name: UFW - Allow connections to adminapi ports ufw: rule: allow diff --git a/ansible/vars.yml b/ansible/vars.yml index 02014e66a..3c5ef2172 100644 --- a/ansible/vars.yml +++ b/ansible/vars.yml @@ -17,8 +17,8 @@ postgrest_release: "12.2.3" postgrest_arm_release_checksum: sha1:fbfd6613d711ce1afa25c42d5df8f1b017f396f9 postgrest_x86_release_checksum: sha1:61c513f91a8931be4062587b9d4a18b42acf5c05 -gotrue_release: 2.160.0 -gotrue_release_checksum: sha1:391b3f174e3d82cc806b2ba8d65253b7b2c874a6 +gotrue_release: 2.162.0 +gotrue_release_checksum: sha1:855b23bd002577290c7d42d7042ac0f5316984b1 aws_cli_release: "2.2.7" @@ -50,8 +50,8 @@ postgres_exporter_release_checksum: arm64: sha256:29ba62d538b92d39952afe12ee2e1f4401250d678ff4b354ff2752f4321c87a0 amd64: sha256:cb89fc5bf4485fb554e0d640d9684fae143a4b2d5fa443009bd29c59f9129e84 -adminapi_release: 0.68.0 -adminmgr_release: 0.22.1 +adminapi_release: 0.71.1 +adminmgr_release: 0.24.0 # Postgres Extensions postgis_release: "3.3.2" diff --git a/common-nix.vars.pkr.hcl b/common-nix.vars.pkr.hcl index e80a744d9..8419992a8 100644 --- a/common-nix.vars.pkr.hcl +++ b/common-nix.vars.pkr.hcl @@ -1 +1 @@ -postgres-version = "15.6.1.123" +postgres-version = "15.8.1.003" diff --git a/common.vars.pkr.hcl b/common.vars.pkr.hcl index 3c999dccd..fa1387690 100644 --- a/common.vars.pkr.hcl +++ b/common.vars.pkr.hcl @@ -1 +1 @@ -postgres-version = "15.1.1.93" +postgres-version = "15.1.1.94" diff --git a/docker/all-in-one/entrypoint.sh b/docker/all-in-one/entrypoint.sh index 97e68a6aa..0aff0b1ec 100755 --- a/docker/all-in-one/entrypoint.sh +++ b/docker/all-in-one/entrypoint.sh @@ -101,7 +101,7 @@ function setup_postgres { mv /etc/postgresql.schema.sql /docker-entrypoint-initdb.d/migrations/99-schema.sql tar -xzvf "$INIT_PAYLOAD_PATH" -C / ./etc/postgresql-custom/pgsodium_root.key - echo "include = '/etc/postgresql-custom/postgresql-platform-defaults.conf'" >>$PG_CONF + sed -i "/# Automatically generated optimizations/i # Supabase Platform Defaults\ninclude = '/etc/postgresql-custom/platform-defaults.conf'\n" $PG_CONF # TODO (darora): walg enablement is temporarily performed here until changes from https://github.com/supabase/postgres/pull/639 get picked up # other things will still be needed in the future (auth_delay config) diff --git a/docker/all-in-one/etc/adminapi/adminapi.yaml b/docker/all-in-one/etc/adminapi/adminapi.yaml index 6eaf643e1..7784fd7d4 100644 --- a/docker/all-in-one/etc/adminapi/adminapi.yaml +++ b/docker/all-in-one/etc/adminapi/adminapi.yaml @@ -42,6 +42,13 @@ upstream_metrics_sources: value: {{ .ProjectRef }} - name: service_type value: gotrue + - name: postgrest + url: "http://localhost:3001/metrics" + labels_to_attach: + - name: supabase_project_ref + value: {{ .ProjectRef }} + - name: service_type + value: postgrest monitoring: disk_usage: enabled: true diff --git a/docker/all-in-one/etc/kong/kong.yml b/docker/all-in-one/etc/kong/kong.yml index 53ad4baef..9952f9861 100644 --- a/docker/all-in-one/etc/kong/kong.yml +++ b/docker/all-in-one/etc/kong/kong.yml @@ -27,6 +27,12 @@ services: routes: [{ name: auth-v1-open-saml, strip_path: true, paths: [/auth/v1/sso/saml/] }], plugins: [{ name: cors }], } + - { + name: auth-v1-open-well-known, + url: 'http://localhost:9999/.well-known/', + routes: [{ name: auth-v1-open-well-known, strip_path: true, paths: [/auth/v1/.well-known/] }], + plugins: [{ name: cors }], + } - { name: auth-v1, url: 'http://localhost:9999/', diff --git a/docker/all-in-one/etc/postgresql-custom/postgresql-platform-defaults.conf b/docker/all-in-one/etc/postgresql-custom/postgresql-platform-defaults.conf index e62a1de83..51d34a13a 100644 --- a/docker/all-in-one/etc/postgresql-custom/postgresql-platform-defaults.conf +++ b/docker/all-in-one/etc/postgresql-custom/postgresql-platform-defaults.conf @@ -1,8 +1,6 @@ # these get imported _after_ the user specified overrides row_security = on wal_level = logical -max_wal_senders = 10 -max_replication_slots = 5 log_connections = on statement_timeout = 120000 jit = off diff --git a/flake.nix b/flake.nix index 1f9d5f64d..82409b185 100644 --- a/flake.nix +++ b/flake.nix @@ -38,7 +38,6 @@ # want to have an arbitrary order, since it might matter. being # explicit is better. (import ./nix/overlays/cargo-pgrx.nix) - (import ./nix/overlays/gdal-small.nix) (import ./nix/overlays/psql_16-oriole.nix) ]; @@ -68,15 +67,9 @@ }; }) (import ./nix/overlays/cargo-pgrx-0-11-3.nix) - # (import ./nix/overlays/postgis.nix) - #(import ./nix/overlays/gdal-small.nix) - ]; }; - postgresql_15 = pkgs.postgresql.postgresql_15; - postgresql = pkgs.postgresql.postgresql_15; sfcgal = pkgs.callPackage ./nix/ext/sfcgal/sfcgal.nix { }; - pg_regress = pkgs.callPackage ./nix/ext/pg_regress.nix { inherit postgresql; }; supabase-groonga = pkgs.callPackage ./nix/supabase-groonga.nix { }; mecab-naist-jdic = pkgs.callPackage ./nix/ext/mecab-naist-jdic/default.nix { }; # Our list of PostgreSQL extensions which come from upstream Nixpkgs. @@ -110,6 +103,7 @@ ./nix/ext/pgroonga.nix ./nix/ext/index_advisor.nix ./nix/ext/wal2json.nix + ./nix/ext/pgmq.nix ./nix/ext/pg_repack.nix ./nix/ext/pg-safeupdate.nix ./nix/ext/plpgsql-check.nix @@ -126,6 +120,7 @@ ./nix/ext/pg_hashids.nix ./nix/ext/pgsodium.nix ./nix/ext/pg_graphql.nix + ./nix/ext/pg_partman.nix ./nix/ext/pg_stat_monitor.nix ./nix/ext/pg_jsonschema.nix ./nix/ext/pgvector.nix @@ -284,22 +279,49 @@ # be used with 'nix build'. Don't use the names listed below; check the # name in 'nix flake show' in order to make sure exactly what name you # want. - basePackages = { + basePackages = let + # Function to get the PostgreSQL version from the attribute name + getVersion = name: + let + match = builtins.match "psql_([0-9]+)" name; + in + if match == null then null else builtins.head match; + + # Define the available PostgreSQL versions + postgresVersions = { + psql_15 = makePostgres "15"; + # Uncomment the line below to enable PostgreSQL 16 + # psql_16 = makePostgres "16"; + # psql_orioledb_16 = makeOrioleDbPostgres "16_23" postgresql_orioledb_16; + }; + + # Find the active PostgreSQL version + activeVersion = getVersion (builtins.head (builtins.attrNames postgresVersions)); + + # Function to create the pg_regress package + makePgRegress = version: + let + postgresqlPackage = pkgs."postgresql_${version}"; + in + pkgs.callPackage ./nix/ext/pg_regress.nix { + postgresql = postgresqlPackage; + }; + postgresql_15 = getPostgresqlPackage "15"; + in + postgresVersions //{ supabase-groonga = supabase-groonga; # PostgreSQL versions. psql_15 = makePostgres "15"; - #psql_16 = makePostgres "16"; #psql_orioledb_16 = makeOrioleDbPostgres "16_23" postgresql_orioledb_16; sfcgal = sfcgal; - pg_regress = pg_regress; pg_prove = pkgs.perlPackages.TAPParserSourceHandlerpgTAP; - postgresql_15 = pkgs.postgresql_15; - + inherit postgresql_15; + postgresql_15_debug = if pkgs.stdenv.isLinux then postgresql_15.debug else null; postgresql_15_src = pkgs.stdenv.mkDerivation { pname = "postgresql-15-src"; - version = pkgs.postgresql_15.version; + version = postgresql_15.version; - src = pkgs.postgresql_15.src; + src = postgresql_15.src; nativeBuildInputs = [ pkgs.bzip2 ]; @@ -319,6 +341,7 @@ }; mecab_naist_jdic = mecab-naist-jdic; supabase_groonga = supabase-groonga; + pg_regress = makePgRegress activeVersion; # Start a version of the server. start-server = let @@ -455,6 +478,7 @@ sqlTests = ./nix/tests/smoke; pg_prove = pkgs.perlPackages.TAPParserSourceHandlerpgTAP; supabase-groonga = pkgs.callPackage ./nix/supabase-groonga.nix { }; + pg_regress = basePackages.pg_regress; in pkgs.runCommand "postgres-${pgpkg.version}-check-harness" { diff --git a/migrations/tests/extensions/01-postgis.sql b/migrations/tests/extensions/01-postgis.sql index e843a7bfd..23fab501c 100644 --- a/migrations/tests/extensions/01-postgis.sql +++ b/migrations/tests/extensions/01-postgis.sql @@ -19,7 +19,7 @@ grant all privileges on all sequences in schema tiger, tiger_data to postgres wi alter default privileges in schema tiger, tiger_data grant all on tables to postgres with grant option; alter default privileges in schema tiger, tiger_data grant all on routines to postgres with grant option; alter default privileges in schema tiger, tiger_data grant all on sequences to postgres with grant option; - +SET search_path TO extensions, public, tiger, tiger_data; -- postgres role should have access set local role postgres; select tiger.pprint_addy(tiger.pagc_normalize_address('710 E Ben White Blvd, Austin, TX 78704')); diff --git a/nix/ext/0001-build-Allow-using-V8-from-system.patch b/nix/ext/0001-build-Allow-using-V8-from-system.patch index eb5e2964f..ab2c6f06c 100644 --- a/nix/ext/0001-build-Allow-using-V8-from-system.patch +++ b/nix/ext/0001-build-Allow-using-V8-from-system.patch @@ -5,7 +5,7 @@ index 38879cc..6e78eeb 100644 @@ -20,6 +20,7 @@ OBJS = $(SRCS:.cc=.o) MODULE_big = plv8-$(PLV8_VERSION) EXTENSION = plv8 - PLV8_DATA = plv8.control plv8--$(PLV8_VERSION).sql $(wildcard upgrade/*.sql) + PLV8_DATA = plv8.control plv8--$(PLV8_VERSION).sql +USE_SYSTEM_V8 = 0 diff --git a/nix/ext/gdal.nix b/nix/ext/gdal.nix new file mode 100644 index 000000000..83924d9ff --- /dev/null +++ b/nix/ext/gdal.nix @@ -0,0 +1,69 @@ +{ lib +, stdenv +, fetchFromGitHub +, cmake +, pkg-config +, curl +, expat +, libgeotiff +, geos +, json_c +, libxml2 +, postgresql +, proj +, sqlite +, libtiff +, zlib +}: + +stdenv.mkDerivation rec { + pname = "gdal"; + version = "3.8.5"; + + src = fetchFromGitHub { + owner = "OSGeo"; + repo = "gdal"; + rev = "v${version}"; + hash = "sha256-Z+mYlyOX9vJ772qwZMQfCbD/V7RL6+9JLHTzoZ55ot0="; + }; + + nativeBuildInputs = [ + cmake + pkg-config + ]; + + buildInputs = [ + curl + expat + libgeotiff + geos + json_c + libxml2 + postgresql + proj + sqlite + libtiff + zlib + ]; + + cmakeFlags = [ + "-DGDAL_USE_INTERNAL_LIBS=OFF" + "-DGEOTIFF_INCLUDE_DIR=${lib.getDev libgeotiff}/include" + "-DGEOTIFF_LIBRARY_RELEASE=${lib.getLib libgeotiff}/lib/libgeotiff${stdenv.hostPlatform.extensions.sharedLibrary}" + "-DBUILD_PYTHON_BINDINGS=OFF" + ] ++ lib.optionals (!stdenv.isDarwin) [ + "-DCMAKE_SKIP_BUILD_RPATH=ON" + ] ++ lib.optionals stdenv.isDarwin [ + "-DCMAKE_BUILD_WITH_INSTALL_NAME_DIR=ON" + ]; + + enableParallelBuilding = true; + + meta = with lib; { + description = "Translator library for raster geospatial data formats (PostGIS-focused build)"; + homepage = "https://www.gdal.org/"; + license = licenses.mit; + maintainers = with maintainers; teams.geospatial.members ++ [ marcweber dotlambda ]; + platforms = platforms.unix; + }; +} diff --git a/nix/ext/pg_partman.nix b/nix/ext/pg_partman.nix new file mode 100644 index 000000000..1ece63386 --- /dev/null +++ b/nix/ext/pg_partman.nix @@ -0,0 +1,34 @@ +{ lib, stdenv, fetchFromGitHub, postgresql }: + +stdenv.mkDerivation rec { + pname = "pg_partman"; + version = "5.1.0"; + + buildInputs = [ postgresql ]; + + src = fetchFromGitHub { + owner = "pgpartman"; + repo = pname; + rev = "refs/tags/v${version}"; + sha256 = "sha256-GrVOJ5ywZMyqyDroYDLdKkXDdIJSDGhDfveO/ZvrmYs="; + }; + + installPhase = '' + mkdir -p $out/{lib,share/postgresql/extension} + + cp src/*${postgresql.dlSuffix} $out/lib + cp updates/* $out/share/postgresql/extension + cp -r sql/* $out/share/postgresql/extension + cp *.control $out/share/postgresql/extension + ''; + + meta = with lib; { + description = "Partition management extension for PostgreSQL"; + homepage = "https://github.com/pgpartman/pg_partman"; + changelog = "https://github.com/pgpartman/pg_partman/blob/v${version}/CHANGELOG.md"; + maintainers = with maintainers; [ samrose ]; + platforms = postgresql.meta.platforms; + license = licenses.postgresql; + broken = versionOlder postgresql.version "14"; + }; +} diff --git a/nix/ext/pgaudit.nix b/nix/ext/pgaudit.nix index c3df061ff..f6c5d8bb0 100644 --- a/nix/ext/pgaudit.nix +++ b/nix/ext/pgaudit.nix @@ -1,14 +1,26 @@ { lib, stdenv, fetchFromGitHub, libkrb5, openssl, postgresql }: - -stdenv.mkDerivation rec { +#adapted from https://github.com/NixOS/nixpkgs/blob/master/pkgs/servers/sql/postgresql/ext/pgaudit.nix +let + source = { + "16" = { + version = "16.0"; + hash = "sha256-8+tGOl1U5y9Zgu+9O5UDDE4bec4B0JC/BQ6GLhHzQzc="; + }; + "15" = { + version = "1.7.0"; + hash = "sha256-8pShPr4HJaJQPjW1iPJIpj3CutTx8Tgr+rOqoXtgCcw="; + }; + }.${lib.versions.major postgresql.version} or (throw "Source for pgaudit is not available for ${postgresql.version}"); +in +stdenv.mkDerivation { pname = "pgaudit"; - version = "1.7.0"; + inherit (source) version; src = fetchFromGitHub { owner = "pgaudit"; repo = "pgaudit"; - rev = "${version}"; - hash = "sha256-8pShPr4HJaJQPjW1iPJIpj3CutTx8Tgr+rOqoXtgCcw="; + rev = source.version; + hash = source.hash; }; buildInputs = [ libkrb5 openssl postgresql ]; @@ -25,7 +37,7 @@ stdenv.mkDerivation rec { description = "Open Source PostgreSQL Audit Logging"; homepage = "https://github.com/pgaudit/pgaudit"; changelog = "https://github.com/pgaudit/pgaudit/releases/tag/${source.version}"; - maintainers = with maintainers; [ idontgetoutmuch ]; + maintainers = with maintainers; [ samrose ]; platforms = postgresql.meta.platforms; license = licenses.postgresql; }; diff --git a/nix/ext/pgmq.nix b/nix/ext/pgmq.nix new file mode 100644 index 000000000..97a3c27e3 --- /dev/null +++ b/nix/ext/pgmq.nix @@ -0,0 +1,33 @@ +{ lib, stdenv, fetchFromGitHub, postgresql }: + +stdenv.mkDerivation rec { + pname = "pgmq"; + version = "1.4.4"; + buildInputs = [ postgresql ]; + src = fetchFromGitHub { + owner = "tembo-io"; + repo = pname; + rev = "v${version}"; + hash = "sha256-z+8/BqIlHwlMnuIzMz6eylmYbSmhtsNt7TJf/CxbdVw="; + }; + + buildPhase = '' + cd pgmq-extension + ''; + + installPhase = '' + mkdir -p $out/{lib,share/postgresql/extension} + + mv sql/pgmq.sql $out/share/postgresql/extension/pgmq--${version}.sql + cp sql/*.sql $out/share/postgresql/extension + cp *.control $out/share/postgresql/extension + ''; + + meta = with lib; { + description = "A lightweight message queue. Like AWS SQS and RSMQ but on Postgres."; + homepage = "https://github.com/tembo-io/pgmq"; + maintainers = with maintainers; [ olirice ]; + platforms = postgresql.meta.platforms; + license = licenses.postgresql; + }; +} diff --git a/nix/ext/plv8.nix b/nix/ext/plv8.nix index ef476f0ab..5cb4fb67e 100644 --- a/nix/ext/plv8.nix +++ b/nix/ext/plv8.nix @@ -4,28 +4,32 @@ , v8 , perl , postgresql - # For test +# For passthru test on various systems, and local development on macos +# not we are not currently using passthru tests but retaining for possible contrib +# to nixpkgs , runCommand , coreutils , gnugrep , clang -, patchelf , xcbuild , darwin +, patchelf }: stdenv.mkDerivation (finalAttrs: { pname = "plv8"; - version = "3.1.5"; + version = "3.1.10"; src = fetchFromGitHub { owner = "plv8"; repo = "plv8"; rev = "v${finalAttrs.version}"; - hash = "sha256-LodC2eQJSm5fLckrjm2RuejZhmOyQMJTv9b0iPCnzKQ="; + hash = "sha256-g1A/XPC0dX2360Gzvmo9/FSQnM6Wt2K4eR0pH0p9fz4="; }; patches = [ + # Allow building with system v8. + # https://github.com/plv8/plv8/pull/505 (rejected) ./0001-build-Allow-using-V8-from-system.patch ]; @@ -37,9 +41,7 @@ stdenv.mkDerivation (finalAttrs: { ]; buildInputs = [ - (v8.overrideAttrs (oldAttrs: { - version = "9.7.106.18"; - })) + v8 postgresql ] ++ lib.optionals stdenv.isDarwin [ darwin.apple_sdk.frameworks.CoreFoundation @@ -49,15 +51,16 @@ stdenv.mkDerivation (finalAttrs: { buildFlags = [ "all" ]; makeFlags = [ + # Nixpkgs build a v8 monolith instead of separate v8_libplatform. "USE_SYSTEM_V8=1" "V8_OUTDIR=${v8}/lib" - "PG_CONFIG=${postgresql}/bin/pg_config" + "PG_CONFIG=${postgresql}/bin/pg_config" ] ++ lib.optionals stdenv.isDarwin [ "CC=${clang}/bin/clang" "CXX=${clang}/bin/clang++" "SHLIB_LINK=-L${v8}/lib -lv8_monolith -Wl,-rpath,${v8}/lib" ] ++ lib.optionals (!stdenv.isDarwin) [ - "SHLIB_LINK=-L${v8}/lib -lv8_monolith -Wl,-rpath,${v8}/lib" + "SHLIB_LINK=-lv8" ]; NIX_LDFLAGS = (lib.optionals stdenv.isDarwin [ @@ -73,45 +76,31 @@ stdenv.mkDerivation (finalAttrs: { "-framework" "Kerberos" "-undefined" "dynamic_lookup" "-flat_namespace" - ]) ++ (lib.optionals (!stdenv.isDarwin) [ - "-L${postgresql}/lib" - "-L${v8}/lib" - "-lv8_monolith" - "-lpq" - "-lpgcommon" - "-lpgport" - ]); - - NIX_CFLAGS_COMPILE = [ - "-I${v8}/include" - "-I${postgresql}/include" - "-I${postgresql}/include/server" - "-I${postgresql}/include/internal" - ]; + ]); installFlags = [ + # PGXS only supports installing to postgresql prefix so we need to redirect this "DESTDIR=${placeholder "out"}" ]; + # No configure script. dontConfigure = true; postPatch = '' patchShebangs ./generate_upgrade.sh substituteInPlace generate_upgrade.sh \ --replace " 2.3.10 " " 2.3.10 2.3.11 2.3.12 2.3.13 2.3.14 2.3.15 " - + ${lib.optionalString stdenv.isDarwin '' # Replace g++ with clang++ in Makefile sed -i 's/g++/clang++/g' Makefile ''} ''; - preBuild = lib.optionalString stdenv.isDarwin '' - export CC=${clang}/bin/clang - export CXX=${clang}/bin/clang++ - ''; - postInstall = '' + # Move the redirected to proper directory. + # There appear to be no references to the install directories + # so changing them does not cause issues. mv "$out/nix/store"/*/* "$out" rmdir "$out/nix/store"/* "$out/nix/store" "$out/nix" @@ -133,9 +122,8 @@ stdenv.mkDerivation (finalAttrs: { postgresqlWithSelf = postgresql.withPackages (_: [ finalAttrs.finalPackage ]); - in - { - smoke = runCommand "plv8-smoke-test" { } '' + in { + smoke = runCommand "plv8-smoke-test" {} '' export PATH=${lib.makeBinPath [ postgresqlWithSelf coreutils @@ -198,6 +186,5 @@ stdenv.mkDerivation (finalAttrs: { maintainers = with maintainers; [ samrose ]; platforms = [ "x86_64-linux" "aarch64-linux" "aarch64-darwin" ]; license = licenses.postgresql; - #broken = postgresql.jitSupport; }; -}) +}) \ No newline at end of file diff --git a/nix/ext/postgis.nix b/nix/ext/postgis.nix index e0b6dfbeb..bb9a6162a 100644 --- a/nix/ext/postgis.nix +++ b/nix/ext/postgis.nix @@ -5,7 +5,6 @@ , postgresql , geos , proj -, gdalMinimal , json_c , pkg-config , file @@ -17,18 +16,18 @@ }: let - gdal = gdalMinimal; sfcgal = callPackage ./sfcgal/sfcgal.nix { }; + gdal = callPackage ./gdal.nix { inherit postgresql; }; in stdenv.mkDerivation rec { pname = "postgis"; - version = "3.3.2"; + version = "3.3.7"; outputs = [ "out" "doc" ]; src = fetchurl { url = "https://download.osgeo.org/postgis/source/postgis-${version}.tar.gz"; - sha256 = "sha256-miohnaAFoXMKOdGVmhx87GGbHvsAm2W+gP/CW60pkGg="; + sha256 = "sha256-UHJKDd5JrcJT5Z4CTYsY/va+ToU0GUPG1eHhuXTkP84="; }; buildInputs = [ libxml2 postgresql geos proj gdal json_c protobufc pcre2.dev sfcgal ] diff --git a/nix/ext/timescaledb.nix b/nix/ext/timescaledb.nix index b4c1563bf..1c87916be 100644 --- a/nix/ext/timescaledb.nix +++ b/nix/ext/timescaledb.nix @@ -1,7 +1,7 @@ -{ lib, stdenv, fetchFromGitHub, cmake, postgresql, openssl, libkrb5, enableUnfree ? true }: +{ lib, stdenv, fetchFromGitHub, cmake, postgresql, openssl, libkrb5 }: stdenv.mkDerivation rec { - pname = "timescaledb${lib.optionalString (!enableUnfree) "-apache"}"; + pname = "timescaledb-apache"; version = "2.16.1"; nativeBuildInputs = [ cmake ]; @@ -14,8 +14,7 @@ stdenv.mkDerivation rec { hash = "sha256-sLxWdBmih9mgiO51zLLxn9uwJVYc5JVHJjSWoADoJ+w="; }; - cmakeFlags = [ "-DSEND_TELEMETRY_DEFAULT=OFF" "-DREGRESS_CHECKS=OFF" "-DTAP_CHECKS=OFF" ] - ++ lib.optionals (!enableUnfree) [ "-DAPACHE_ONLY=ON" ] + cmakeFlags = [ "-DSEND_TELEMETRY_DEFAULT=OFF" "-DREGRESS_CHECKS=OFF" "-DTAP_CHECKS=OFF" "-DAPACHE_ONLY=1" ] ++ lib.optionals stdenv.isDarwin [ "-DLINTER=OFF" ]; # Fix the install phase which tries to install into the pgsql extension dir, @@ -38,7 +37,7 @@ stdenv.mkDerivation rec { changelog = "https://github.com/timescale/timescaledb/blob/${version}/CHANGELOG.md"; maintainers = with maintainers; [ samrose ]; platforms = postgresql.meta.platforms; - license = with licenses; if enableUnfree then tsl else asl20; + license = licenses.asl20; broken = versionOlder postgresql.version "13"; }; } diff --git a/nix/overlays/gdal-small.nix b/nix/overlays/gdal-small.nix deleted file mode 100644 index 18be8a526..000000000 --- a/nix/overlays/gdal-small.nix +++ /dev/null @@ -1,14 +0,0 @@ -final: prev: { - # override the version of gdal used with postgis with the small version. - # significantly reduces overall closure size - gdal = prev.gdalMinimal.override { - /* other features can be enabled, reference: - https://github.com/NixOS/nixpkgs/blob/master/pkgs/development/libraries/gdal/default.nix - */ - - # useHDF = true; - # useArrow = true; - # useLibHEIF = true; - # ... - }; -} diff --git a/nix/postgresql/15.nix b/nix/postgresql/15.nix index 00dfc0cbb..63f492840 100644 --- a/nix/postgresql/15.nix +++ b/nix/postgresql/15.nix @@ -1,4 +1,4 @@ import ./generic.nix { - version = "15.6"; - hash = "sha256-hFUUbtnGnJOlfelUrq0DAsr60DXCskIXXWqh4X68svs="; + version = "15.8"; + hash = "sha256-RANRX5pp7rPv68mPMLjGlhIr/fiV6Ss7I/W452nty2o="; } diff --git a/nix/tests/expected/extensions_sql_interface.out b/nix/tests/expected/extensions_sql_interface.out index ab43f54e6..5714fbcec 100644 --- a/nix/tests/expected/extensions_sql_interface.out +++ b/nix/tests/expected/extensions_sql_interface.out @@ -76,6 +76,7 @@ order by pg_hashids | t pg_jsonschema | f pg_net | f + pg_partman | f pg_prewarm | t pg_repack | f pg_stat_monitor | t @@ -88,6 +89,7 @@ order by pgaudit | t pgcrypto | t pgjwt | f + pgmq | f pgroonga | f pgroonga_database | f pgrouting | t @@ -121,7 +123,7 @@ order by vector | t wrappers | f xml2 | f -(81 rows) +(83 rows) /* @@ -149,8 +151,8 @@ order by n.nspname, p.proname, pg_catalog.pg_get_function_identity_arguments(p.oid); - extension_name | schema_name | function_name | argument_types | return_type -------------------------+--------------------------+--------------------------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + extension_name | schema_name | function_name | argument_types | return_type +------------------------+--------------------------+--------------------------------------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ address_standardizer | public | parse_address | text, OUT num text, OUT street text, OUT street2 text, OUT address1 text, OUT city text, OUT state text, OUT zip text, OUT zipplus text, OUT country text | record address_standardizer | public | standardize_address | lextab text, gaztab text, rultab text, address text | stdaddr address_standardizer | public | standardize_address | lextab text, gaztab text, rultab text, micro text, macro text | stdaddr @@ -1147,6 +1149,45 @@ order by pg_net | net | http_get | url text, params jsonb, headers jsonb, timeout_milliseconds integer | bigint pg_net | net | http_post | url text, body jsonb, params jsonb, headers jsonb, timeout_milliseconds integer | bigint pg_net | net | worker_restart | | boolean + pg_partman | public | apply_cluster | p_parent_schema text, p_parent_tablename text, p_child_schema text, p_child_tablename text | void + pg_partman | public | apply_constraints | p_parent_table text, p_child_table text, p_analyze boolean, p_job_id bigint | void + pg_partman | public | apply_privileges | p_parent_schema text, p_parent_tablename text, p_child_schema text, p_child_tablename text, p_job_id bigint | void + pg_partman | public | autovacuum_off | p_parent_schema text, p_parent_tablename text, p_source_schema text, p_source_tablename text | boolean + pg_partman | public | autovacuum_reset | p_parent_schema text, p_parent_tablename text, p_source_schema text, p_source_tablename text | boolean + pg_partman | public | calculate_time_partition_info | p_time_interval interval, p_start_time timestamp with time zone, p_date_trunc_interval text, OUT base_timestamp timestamp with time zone, OUT datetime_string text | record + pg_partman | public | check_automatic_maintenance_value | p_automatic_maintenance text | boolean + pg_partman | public | check_control_type | p_parent_schema text, p_parent_tablename text, p_control text | TABLE(general_type text, exact_type text) + pg_partman | public | check_default | p_exact_count boolean | SETOF check_default_table + pg_partman | public | check_epoch_type | p_type text | boolean + pg_partman | public | check_name_length | p_object_name text, p_suffix text, p_table_partition boolean | text + pg_partman | public | check_partition_type | p_type text | boolean + pg_partman | public | check_subpart_sameconfig | p_parent_table text | TABLE(sub_control text, sub_partition_interval text, sub_partition_type text, sub_premake integer, sub_automatic_maintenance text, sub_template_table text, sub_retention text, sub_retention_schema text, sub_retention_keep_index boolean, sub_retention_keep_table boolean, sub_epoch text, sub_constraint_cols text[], sub_optimize_constraint integer, sub_infinite_time_partitions boolean, sub_jobmon boolean, sub_inherit_privileges boolean, sub_constraint_valid boolean, sub_date_trunc_interval text, sub_ignore_default_data boolean, sub_default_table boolean, sub_maintenance_order integer, sub_retention_keep_publication boolean) + pg_partman | public | check_subpartition_limits | p_parent_table text, p_type text, OUT sub_min text, OUT sub_max text | record + pg_partman | public | create_parent | p_parent_table text, p_control text, p_interval text, p_type text, p_epoch text, p_premake integer, p_start_partition text, p_default_table boolean, p_automatic_maintenance text, p_constraint_cols text[], p_template_table text, p_jobmon boolean, p_date_trunc_interval text | boolean + pg_partman | public | create_partition_id | p_parent_table text, p_partition_ids bigint[], p_start_partition text | boolean + pg_partman | public | create_partition_time | p_parent_table text, p_partition_times timestamp with time zone[], p_start_partition text | boolean + pg_partman | public | create_sub_parent | p_top_parent text, p_control text, p_interval text, p_type text, p_default_table boolean, p_declarative_check text, p_constraint_cols text[], p_premake integer, p_start_partition text, p_epoch text, p_jobmon boolean, p_date_trunc_interval text | boolean + pg_partman | public | drop_constraints | p_parent_table text, p_child_table text, p_debug boolean | void + pg_partman | public | drop_partition_id | p_parent_table text, p_retention bigint, p_keep_table boolean, p_keep_index boolean, p_retention_schema text | integer + pg_partman | public | drop_partition_time | p_parent_table text, p_retention interval, p_keep_table boolean, p_keep_index boolean, p_retention_schema text, p_reference_timestamp timestamp with time zone | integer + pg_partman | public | dump_partitioned_table_definition | p_parent_table text, p_ignore_template_table boolean | text + pg_partman | public | inherit_replica_identity | p_parent_schemaname text, p_parent_tablename text, p_child_tablename text | void + pg_partman | public | inherit_template_properties | p_parent_table text, p_child_schema text, p_child_tablename text | boolean + pg_partman | public | partition_data_id | p_parent_table text, p_batch_count integer, p_batch_interval bigint, p_lock_wait numeric, p_order text, p_analyze boolean, p_source_table text, p_ignored_columns text[] | bigint + pg_partman | public | partition_data_proc | IN p_parent_table text, IN p_loop_count integer, IN p_interval text, IN p_lock_wait integer, IN p_lock_wait_tries integer, IN p_wait integer, IN p_order text, IN p_source_table text, IN p_ignored_columns text[], IN p_quiet boolean | + pg_partman | public | partition_data_time | p_parent_table text, p_batch_count integer, p_batch_interval interval, p_lock_wait numeric, p_order text, p_analyze boolean, p_source_table text, p_ignored_columns text[] | bigint + pg_partman | public | partition_gap_fill | p_parent_table text | integer + pg_partman | public | reapply_constraints_proc | IN p_parent_table text, IN p_drop_constraints boolean, IN p_apply_constraints boolean, IN p_wait integer, IN p_dryrun boolean | + pg_partman | public | reapply_privileges | p_parent_table text | void + pg_partman | public | run_analyze | IN p_skip_locked boolean, IN p_quiet boolean, IN p_parent_table text | + pg_partman | public | run_maintenance | p_parent_table text, p_analyze boolean, p_jobmon boolean | void + pg_partman | public | run_maintenance_proc | IN p_wait integer, IN p_analyze boolean, IN p_jobmon boolean | + pg_partman | public | show_partition_info | p_child_table text, p_partition_interval text, p_parent_table text, OUT child_start_time timestamp with time zone, OUT child_end_time timestamp with time zone, OUT child_start_id bigint, OUT child_end_id bigint, OUT suffix text | record + pg_partman | public | show_partition_name | p_parent_table text, p_value text, OUT partition_schema text, OUT partition_table text, OUT suffix_timestamp timestamp with time zone, OUT suffix_id bigint, OUT table_exists boolean | record + pg_partman | public | show_partitions | p_parent_table text, p_order text, p_include_default boolean | TABLE(partition_schemaname text, partition_tablename text) + pg_partman | public | stop_sub_partition | p_parent_table text, p_jobmon boolean | boolean + pg_partman | public | undo_partition | p_parent_table text, p_target_table text, p_loop_count integer, p_batch_interval text, p_keep_table boolean, p_lock_wait numeric, p_ignored_columns text[], p_drop_cascade boolean, OUT partitions_undone integer, OUT rows_undone bigint | record + pg_partman | public | undo_partition_proc | IN p_parent_table text, IN p_target_table text, IN p_loop_count integer, IN p_interval text, IN p_keep_table boolean, IN p_lock_wait integer, IN p_lock_wait_tries integer, IN p_wait integer, IN p_ignored_columns text[], IN p_drop_cascade boolean, IN p_quiet boolean | pg_prewarm | public | autoprewarm_dump_now | | bigint pg_prewarm | public | autoprewarm_start_worker | | void pg_prewarm | public | pg_prewarm | regclass, mode text, fork text, first_block bigint, last_block bigint | bigint @@ -1306,6 +1347,34 @@ order by pgjwt | public | url_decode | data text | bytea pgjwt | public | url_encode | data bytea | text pgjwt | public | verify | token text, secret text, algorithm text | TABLE(header json, payload json, valid boolean) + pgmq | pgmq | _belongs_to_pgmq | table_name text | boolean + pgmq | pgmq | _ensure_pg_partman_installed | | void + pgmq | pgmq | _get_partition_col | partition_interval text | text + pgmq | pgmq | _get_pg_partman_major_version | | integer + pgmq | pgmq | _get_pg_partman_schema | | text + pgmq | pgmq | archive | queue_name text, msg_id bigint | boolean + pgmq | pgmq | archive | queue_name text, msg_ids bigint[] | SETOF bigint + pgmq | pgmq | convert_archive_partitioned | table_name text, partition_interval text, retention_interval text, leading_partition integer | void + pgmq | pgmq | create | queue_name text | void + pgmq | pgmq | create_non_partitioned | queue_name text | void + pgmq | pgmq | create_partitioned | queue_name text, partition_interval text, retention_interval text | void + pgmq | pgmq | create_unlogged | queue_name text | void + pgmq | pgmq | delete | queue_name text, msg_id bigint | boolean + pgmq | pgmq | delete | queue_name text, msg_ids bigint[] | SETOF bigint + pgmq | pgmq | detach_archive | queue_name text | void + pgmq | pgmq | drop_queue | queue_name text, partitioned boolean | boolean + pgmq | pgmq | format_table_name | queue_name text, prefix text | text + pgmq | pgmq | list_queues | | SETOF pgmq.queue_record + pgmq | pgmq | metrics | queue_name text | pgmq.metrics_result + pgmq | pgmq | metrics_all | | SETOF pgmq.metrics_result + pgmq | pgmq | pop | queue_name text | SETOF pgmq.message_record + pgmq | pgmq | purge_queue | queue_name text | bigint + pgmq | pgmq | read | queue_name text, vt integer, qty integer | SETOF pgmq.message_record + pgmq | pgmq | read_with_poll | queue_name text, vt integer, qty integer, max_poll_seconds integer, poll_interval_ms integer | SETOF pgmq.message_record + pgmq | pgmq | send | queue_name text, msg jsonb, delay integer | SETOF bigint + pgmq | pgmq | send_batch | queue_name text, msgs jsonb[], delay integer | SETOF bigint + pgmq | pgmq | set_vt | queue_name text, msg_id bigint, vt integer | SETOF pgmq.message_record + pgmq | pgmq | validate_queue_name | queue_name text | void pgroonga | pgroonga | command | groongacommand text | text pgroonga | pgroonga | command | groongacommand text, arguments text[] | text pgroonga | pgroonga | command_escape_value | value text | text @@ -4345,11 +4414,11 @@ order by postgis_tiger_geocoder | tiger | drop_nation_tables_generate_script | param_schema text | text postgis_tiger_geocoder | tiger | drop_state_tables_generate_script | param_state text, param_schema text | text postgis_tiger_geocoder | tiger | end_soundex | character varying | character varying - postgis_tiger_geocoder | tiger | geocode | in_addy tiger.norm_addy, max_results integer, restrict_geom geometry, OUT addy tiger.norm_addy, OUT geomout geometry, OUT rating integer | SETOF record - postgis_tiger_geocoder | tiger | geocode | input character varying, max_results integer, restrict_geom geometry, OUT addy tiger.norm_addy, OUT geomout geometry, OUT rating integer | SETOF record - postgis_tiger_geocoder | tiger | geocode_address | parsed tiger.norm_addy, max_results integer, restrict_geom geometry, OUT addy tiger.norm_addy, OUT geomout geometry, OUT rating integer | SETOF record - postgis_tiger_geocoder | tiger | geocode_intersection | roadway1 text, roadway2 text, in_state text, in_city text, in_zip text, num_results integer, OUT addy tiger.norm_addy, OUT geomout geometry, OUT rating integer | SETOF record - postgis_tiger_geocoder | tiger | geocode_location | parsed tiger.norm_addy, restrict_geom geometry, OUT addy tiger.norm_addy, OUT geomout geometry, OUT rating integer | SETOF record + postgis_tiger_geocoder | tiger | geocode | in_addy norm_addy, max_results integer, restrict_geom geometry, OUT addy norm_addy, OUT geomout geometry, OUT rating integer | SETOF record + postgis_tiger_geocoder | tiger | geocode | input character varying, max_results integer, restrict_geom geometry, OUT addy norm_addy, OUT geomout geometry, OUT rating integer | SETOF record + postgis_tiger_geocoder | tiger | geocode_address | parsed norm_addy, max_results integer, restrict_geom geometry, OUT addy norm_addy, OUT geomout geometry, OUT rating integer | SETOF record + postgis_tiger_geocoder | tiger | geocode_intersection | roadway1 text, roadway2 text, in_state text, in_city text, in_zip text, num_results integer, OUT addy norm_addy, OUT geomout geometry, OUT rating integer | SETOF record + postgis_tiger_geocoder | tiger | geocode_location | parsed norm_addy, restrict_geom geometry, OUT addy norm_addy, OUT geomout geometry, OUT rating integer | SETOF record postgis_tiger_geocoder | tiger | get_geocode_setting | setting_name text | text postgis_tiger_geocoder | tiger | get_last_words | inputstring character varying, count integer | character varying postgis_tiger_geocoder | tiger | get_tract | loc_geom geometry, output_field text | text @@ -4374,14 +4443,14 @@ order by postgis_tiger_geocoder | tiger | location_extract_place_exact | fullstreet character varying, stateabbrev character varying | character varying postgis_tiger_geocoder | tiger | location_extract_place_fuzzy | fullstreet character varying, stateabbrev character varying | character varying postgis_tiger_geocoder | tiger | missing_indexes_generate_script | | text - postgis_tiger_geocoder | tiger | normalize_address | in_rawinput character varying | tiger.norm_addy + postgis_tiger_geocoder | tiger | normalize_address | in_rawinput character varying | norm_addy postgis_tiger_geocoder | tiger | nullable_levenshtein | character varying, character varying | integer postgis_tiger_geocoder | tiger | numeric_streets_equal | input_street character varying, output_street character varying | boolean - postgis_tiger_geocoder | tiger | pagc_normalize_address | in_rawinput character varying | tiger.norm_addy - postgis_tiger_geocoder | tiger | pprint_addy | input tiger.norm_addy | character varying + postgis_tiger_geocoder | tiger | pagc_normalize_address | in_rawinput character varying | norm_addy + postgis_tiger_geocoder | tiger | pprint_addy | input norm_addy | character varying postgis_tiger_geocoder | tiger | rate_attributes | dirpa character varying, dirpb character varying, streetnamea character varying, streetnameb character varying, streettypea character varying, streettypeb character varying, dirsa character varying, dirsb character varying, locationa character varying, locationb character varying, prequalabr character varying | integer postgis_tiger_geocoder | tiger | rate_attributes | dirpa character varying, dirpb character varying, streetnamea character varying, streetnameb character varying, streettypea character varying, streettypeb character varying, dirsa character varying, dirsb character varying, prequalabr character varying | integer - postgis_tiger_geocoder | tiger | reverse_geocode | pt geometry, include_strnum_range boolean, OUT intpt geometry[], OUT addy tiger.norm_addy[], OUT street character varying[] | record + postgis_tiger_geocoder | tiger | reverse_geocode | pt geometry, include_strnum_range boolean, OUT intpt geometry[], OUT addy norm_addy[], OUT street character varying[] | record postgis_tiger_geocoder | tiger | set_geocode_setting | setting_name text, setting_value text | text postgis_tiger_geocoder | tiger | setsearchpathforinstall | a_schema_name text | text postgis_tiger_geocoder | tiger | state_extract | rawinput character varying | character varying @@ -5156,7 +5225,7 @@ order by xml2 | public | xpath_table | text, text, text, text, text | SETOF record xml2 | public | xslt_process | text, text | text xml2 | public | xslt_process | text, text, text | text -(5005 rows) +(5072 rows) /* @@ -5185,6 +5254,7 @@ where and pc.relkind in ('r', 'v', 'm', 'i') order by e.extname, + n.nspname, pc.relname, pa.attname; extension_name | schema_name | entity_name | attname @@ -5238,6 +5308,61 @@ order by pg_net | net | http_request_queue | method pg_net | net | http_request_queue | timeout_milliseconds pg_net | net | http_request_queue | url + pg_partman | public | part_config | automatic_maintenance + pg_partman | public | part_config | constraint_cols + pg_partman | public | part_config | constraint_valid + pg_partman | public | part_config | control + pg_partman | public | part_config | date_trunc_interval + pg_partman | public | part_config | datetime_string + pg_partman | public | part_config | default_table + pg_partman | public | part_config | epoch + pg_partman | public | part_config | ignore_default_data + pg_partman | public | part_config | infinite_time_partitions + pg_partman | public | part_config | inherit_privileges + pg_partman | public | part_config | jobmon + pg_partman | public | part_config | maintenance_last_run + pg_partman | public | part_config | maintenance_order + pg_partman | public | part_config | optimize_constraint + pg_partman | public | part_config | parent_table + pg_partman | public | part_config | partition_interval + pg_partman | public | part_config | partition_type + pg_partman | public | part_config | premake + pg_partman | public | part_config | retention + pg_partman | public | part_config | retention_keep_index + pg_partman | public | part_config | retention_keep_publication + pg_partman | public | part_config | retention_keep_table + pg_partman | public | part_config | retention_schema + pg_partman | public | part_config | sub_partition_set_full + pg_partman | public | part_config | template_table + pg_partman | public | part_config | undo_in_progress + pg_partman | public | part_config_sub | sub_automatic_maintenance + pg_partman | public | part_config_sub | sub_constraint_cols + pg_partman | public | part_config_sub | sub_constraint_valid + pg_partman | public | part_config_sub | sub_control + pg_partman | public | part_config_sub | sub_date_trunc_interval + pg_partman | public | part_config_sub | sub_default_table + pg_partman | public | part_config_sub | sub_epoch + pg_partman | public | part_config_sub | sub_ignore_default_data + pg_partman | public | part_config_sub | sub_infinite_time_partitions + pg_partman | public | part_config_sub | sub_inherit_privileges + pg_partman | public | part_config_sub | sub_jobmon + pg_partman | public | part_config_sub | sub_maintenance_order + pg_partman | public | part_config_sub | sub_optimize_constraint + pg_partman | public | part_config_sub | sub_parent + pg_partman | public | part_config_sub | sub_partition_interval + pg_partman | public | part_config_sub | sub_partition_type + pg_partman | public | part_config_sub | sub_premake + pg_partman | public | part_config_sub | sub_retention + pg_partman | public | part_config_sub | sub_retention_keep_index + pg_partman | public | part_config_sub | sub_retention_keep_publication + pg_partman | public | part_config_sub | sub_retention_keep_table + pg_partman | public | part_config_sub | sub_retention_schema + pg_partman | public | part_config_sub | sub_template_table + pg_partman | public | table_privs | grantee + pg_partman | public | table_privs | grantor + pg_partman | public | table_privs | privilege_type + pg_partman | public | table_privs | table_name + pg_partman | public | table_privs | table_schema pg_repack | repack | primary_keys | indexrelid pg_repack | repack | primary_keys | indrelid pg_repack | repack | tables | alter_col_storage @@ -5378,6 +5503,10 @@ order by pg_tle | pgtle | feature_info | obj_identity pg_tle | pgtle | feature_info | proname pg_tle | pgtle | feature_info | schema_name + pgmq | pgmq | meta | created_at + pgmq | pgmq | meta | is_partitioned + pgmq | pgmq | meta | is_unlogged + pgmq | pgmq | meta | queue_name pgsodium | pgsodium | decrypted_key | associated_data pgsodium | pgsodium | decrypted_key | comment pgsodium | pgsodium | decrypted_key | created @@ -5956,49 +6085,6 @@ order by supabase_vault | vault | secrets | nonce supabase_vault | vault | secrets | secret supabase_vault | vault | secrets | updated_at - timescaledb | _timescaledb_config | bgw_job | application_name - timescaledb | _timescaledb_config | bgw_job | check_name - timescaledb | _timescaledb_config | bgw_job | check_schema - timescaledb | _timescaledb_config | bgw_job | config - timescaledb | _timescaledb_config | bgw_job | fixed_schedule - timescaledb | _timescaledb_config | bgw_job | hypertable_id - timescaledb | _timescaledb_config | bgw_job | id - timescaledb | _timescaledb_config | bgw_job | initial_start - timescaledb | _timescaledb_config | bgw_job | max_retries - timescaledb | _timescaledb_config | bgw_job | max_runtime - timescaledb | _timescaledb_config | bgw_job | owner - timescaledb | _timescaledb_config | bgw_job | proc_name - timescaledb | _timescaledb_config | bgw_job | proc_schema - timescaledb | _timescaledb_config | bgw_job | retry_period - timescaledb | _timescaledb_config | bgw_job | schedule_interval - timescaledb | _timescaledb_config | bgw_job | scheduled - timescaledb | _timescaledb_config | bgw_job | timezone - timescaledb | _timescaledb_internal | bgw_job_stat | consecutive_crashes - timescaledb | _timescaledb_internal | bgw_job_stat | consecutive_failures - timescaledb | _timescaledb_internal | bgw_job_stat | flags - timescaledb | _timescaledb_internal | bgw_job_stat | job_id - timescaledb | _timescaledb_internal | bgw_job_stat | last_finish - timescaledb | _timescaledb_internal | bgw_job_stat | last_run_success - timescaledb | _timescaledb_internal | bgw_job_stat | last_start - timescaledb | _timescaledb_internal | bgw_job_stat | last_successful_finish - timescaledb | _timescaledb_internal | bgw_job_stat | next_start - timescaledb | _timescaledb_internal | bgw_job_stat | total_crashes - timescaledb | _timescaledb_internal | bgw_job_stat | total_duration - timescaledb | _timescaledb_internal | bgw_job_stat | total_duration_failures - timescaledb | _timescaledb_internal | bgw_job_stat | total_failures - timescaledb | _timescaledb_internal | bgw_job_stat | total_runs - timescaledb | _timescaledb_internal | bgw_job_stat | total_successes - timescaledb | _timescaledb_internal | bgw_job_stat_history | data - timescaledb | _timescaledb_internal | bgw_job_stat_history | execution_finish - timescaledb | _timescaledb_internal | bgw_job_stat_history | execution_start - timescaledb | _timescaledb_internal | bgw_job_stat_history | id - timescaledb | _timescaledb_internal | bgw_job_stat_history | job_id - timescaledb | _timescaledb_internal | bgw_job_stat_history | pid - timescaledb | _timescaledb_internal | bgw_job_stat_history | succeeded - timescaledb | _timescaledb_internal | bgw_policy_chunk_stats | chunk_id - timescaledb | _timescaledb_internal | bgw_policy_chunk_stats | job_id - timescaledb | _timescaledb_internal | bgw_policy_chunk_stats | last_time_job_run - timescaledb | _timescaledb_internal | bgw_policy_chunk_stats | num_times_job_run timescaledb | _timescaledb_cache | cache_inval_bgw_job | timescaledb | _timescaledb_cache | cache_inval_extension | timescaledb | _timescaledb_cache | cache_inval_hypertable | @@ -6018,10 +6104,6 @@ order by timescaledb | _timescaledb_catalog | chunk_column_stats | range_end timescaledb | _timescaledb_catalog | chunk_column_stats | range_start timescaledb | _timescaledb_catalog | chunk_column_stats | valid - timescaledb | timescaledb_information | chunk_compression_settings | chunk - timescaledb | timescaledb_information | chunk_compression_settings | hypertable - timescaledb | timescaledb_information | chunk_compression_settings | orderby - timescaledb | timescaledb_information | chunk_compression_settings | segmentby timescaledb | _timescaledb_catalog | chunk_constraint | chunk_id timescaledb | _timescaledb_catalog | chunk_constraint | constraint_name timescaledb | _timescaledb_catalog | chunk_constraint | dimension_slice_id @@ -6030,32 +6112,6 @@ order by timescaledb | _timescaledb_catalog | chunk_index | hypertable_id timescaledb | _timescaledb_catalog | chunk_index | hypertable_index_name timescaledb | _timescaledb_catalog | chunk_index | index_name - timescaledb | timescaledb_information | chunks | chunk_creation_time - timescaledb | timescaledb_information | chunks | chunk_name - timescaledb | timescaledb_information | chunks | chunk_schema - timescaledb | timescaledb_information | chunks | chunk_tablespace - timescaledb | timescaledb_information | chunks | hypertable_name - timescaledb | timescaledb_information | chunks | hypertable_schema - timescaledb | timescaledb_information | chunks | is_compressed - timescaledb | timescaledb_information | chunks | primary_dimension - timescaledb | timescaledb_information | chunks | primary_dimension_type - timescaledb | timescaledb_information | chunks | range_end - timescaledb | timescaledb_information | chunks | range_end_integer - timescaledb | timescaledb_information | chunks | range_start - timescaledb | timescaledb_information | chunks | range_start_integer - timescaledb | _timescaledb_internal | compressed_chunk_stats | chunk_name - timescaledb | _timescaledb_internal | compressed_chunk_stats | chunk_schema - timescaledb | _timescaledb_internal | compressed_chunk_stats | compressed_heap_size - timescaledb | _timescaledb_internal | compressed_chunk_stats | compressed_index_size - timescaledb | _timescaledb_internal | compressed_chunk_stats | compressed_toast_size - timescaledb | _timescaledb_internal | compressed_chunk_stats | compressed_total_size - timescaledb | _timescaledb_internal | compressed_chunk_stats | compression_status - timescaledb | _timescaledb_internal | compressed_chunk_stats | hypertable_name - timescaledb | _timescaledb_internal | compressed_chunk_stats | hypertable_schema - timescaledb | _timescaledb_internal | compressed_chunk_stats | uncompressed_heap_size - timescaledb | _timescaledb_internal | compressed_chunk_stats | uncompressed_index_size - timescaledb | _timescaledb_internal | compressed_chunk_stats | uncompressed_toast_size - timescaledb | _timescaledb_internal | compressed_chunk_stats | uncompressed_total_size timescaledb | _timescaledb_catalog | compression_algorithm | description timescaledb | _timescaledb_catalog | compression_algorithm | id timescaledb | _timescaledb_catalog | compression_algorithm | name @@ -6071,18 +6127,11 @@ order by timescaledb | _timescaledb_catalog | compression_chunk_size | uncompressed_heap_size timescaledb | _timescaledb_catalog | compression_chunk_size | uncompressed_index_size timescaledb | _timescaledb_catalog | compression_chunk_size | uncompressed_toast_size - timescaledb | timescaledb_information | compression_settings | attname - timescaledb | timescaledb_information | compression_settings | hypertable_name - timescaledb | timescaledb_information | compression_settings | hypertable_schema timescaledb | _timescaledb_catalog | compression_settings | orderby - timescaledb | timescaledb_information | compression_settings | orderby_asc - timescaledb | timescaledb_information | compression_settings | orderby_column_index timescaledb | _timescaledb_catalog | compression_settings | orderby_desc timescaledb | _timescaledb_catalog | compression_settings | orderby_nullsfirst - timescaledb | timescaledb_information | compression_settings | orderby_nullsfirst timescaledb | _timescaledb_catalog | compression_settings | relid timescaledb | _timescaledb_catalog | compression_settings | segmentby - timescaledb | timescaledb_information | compression_settings | segmentby_column_index timescaledb | _timescaledb_catalog | continuous_agg | direct_view_name timescaledb | _timescaledb_catalog | continuous_agg | direct_view_schema timescaledb | _timescaledb_catalog | continuous_agg | finalized @@ -6105,17 +6154,6 @@ order by timescaledb | _timescaledb_catalog | continuous_agg_migrate_plan_step | status timescaledb | _timescaledb_catalog | continuous_agg_migrate_plan_step | step_id timescaledb | _timescaledb_catalog | continuous_agg_migrate_plan_step | type - timescaledb | timescaledb_information | continuous_aggregates | compression_enabled - timescaledb | timescaledb_information | continuous_aggregates | finalized - timescaledb | timescaledb_information | continuous_aggregates | hypertable_name - timescaledb | timescaledb_information | continuous_aggregates | hypertable_schema - timescaledb | timescaledb_information | continuous_aggregates | materialization_hypertable_name - timescaledb | timescaledb_information | continuous_aggregates | materialization_hypertable_schema - timescaledb | timescaledb_information | continuous_aggregates | materialized_only - timescaledb | timescaledb_information | continuous_aggregates | view_definition - timescaledb | timescaledb_information | continuous_aggregates | view_name - timescaledb | timescaledb_information | continuous_aggregates | view_owner - timescaledb | timescaledb_information | continuous_aggregates | view_schema timescaledb | _timescaledb_catalog | continuous_aggs_bucket_function | bucket_fixed_width timescaledb | _timescaledb_catalog | continuous_aggs_bucket_function | bucket_func timescaledb | _timescaledb_catalog | continuous_aggs_bucket_function | bucket_offset @@ -6149,16 +6187,6 @@ order by timescaledb | _timescaledb_catalog | dimension_slice | id timescaledb | _timescaledb_catalog | dimension_slice | range_end timescaledb | _timescaledb_catalog | dimension_slice | range_start - timescaledb | timescaledb_information | dimensions | column_name - timescaledb | timescaledb_information | dimensions | column_type - timescaledb | timescaledb_information | dimensions | dimension_number - timescaledb | timescaledb_information | dimensions | dimension_type - timescaledb | timescaledb_information | dimensions | hypertable_name - timescaledb | timescaledb_information | dimensions | hypertable_schema - timescaledb | timescaledb_information | dimensions | integer_interval - timescaledb | timescaledb_information | dimensions | integer_now_func - timescaledb | timescaledb_information | dimensions | num_partitions - timescaledb | timescaledb_information | dimensions | time_interval timescaledb | _timescaledb_catalog | hypertable | associated_schema_name timescaledb | _timescaledb_catalog | hypertable | associated_table_prefix timescaledb | _timescaledb_catalog | hypertable | chunk_sizing_func_name @@ -6171,6 +6199,71 @@ order by timescaledb | _timescaledb_catalog | hypertable | schema_name timescaledb | _timescaledb_catalog | hypertable | status timescaledb | _timescaledb_catalog | hypertable | table_name + timescaledb | _timescaledb_catalog | metadata | include_in_telemetry + timescaledb | _timescaledb_catalog | metadata | key + timescaledb | _timescaledb_catalog | metadata | value + timescaledb | _timescaledb_catalog | tablespace | hypertable_id + timescaledb | _timescaledb_catalog | tablespace | id + timescaledb | _timescaledb_catalog | tablespace | tablespace_name + timescaledb | _timescaledb_catalog | telemetry_event | body + timescaledb | _timescaledb_catalog | telemetry_event | created + timescaledb | _timescaledb_catalog | telemetry_event | tag + timescaledb | _timescaledb_config | bgw_job | application_name + timescaledb | _timescaledb_config | bgw_job | check_name + timescaledb | _timescaledb_config | bgw_job | check_schema + timescaledb | _timescaledb_config | bgw_job | config + timescaledb | _timescaledb_config | bgw_job | fixed_schedule + timescaledb | _timescaledb_config | bgw_job | hypertable_id + timescaledb | _timescaledb_config | bgw_job | id + timescaledb | _timescaledb_config | bgw_job | initial_start + timescaledb | _timescaledb_config | bgw_job | max_retries + timescaledb | _timescaledb_config | bgw_job | max_runtime + timescaledb | _timescaledb_config | bgw_job | owner + timescaledb | _timescaledb_config | bgw_job | proc_name + timescaledb | _timescaledb_config | bgw_job | proc_schema + timescaledb | _timescaledb_config | bgw_job | retry_period + timescaledb | _timescaledb_config | bgw_job | schedule_interval + timescaledb | _timescaledb_config | bgw_job | scheduled + timescaledb | _timescaledb_config | bgw_job | timezone + timescaledb | _timescaledb_internal | bgw_job_stat | consecutive_crashes + timescaledb | _timescaledb_internal | bgw_job_stat | consecutive_failures + timescaledb | _timescaledb_internal | bgw_job_stat | flags + timescaledb | _timescaledb_internal | bgw_job_stat | job_id + timescaledb | _timescaledb_internal | bgw_job_stat | last_finish + timescaledb | _timescaledb_internal | bgw_job_stat | last_run_success + timescaledb | _timescaledb_internal | bgw_job_stat | last_start + timescaledb | _timescaledb_internal | bgw_job_stat | last_successful_finish + timescaledb | _timescaledb_internal | bgw_job_stat | next_start + timescaledb | _timescaledb_internal | bgw_job_stat | total_crashes + timescaledb | _timescaledb_internal | bgw_job_stat | total_duration + timescaledb | _timescaledb_internal | bgw_job_stat | total_duration_failures + timescaledb | _timescaledb_internal | bgw_job_stat | total_failures + timescaledb | _timescaledb_internal | bgw_job_stat | total_runs + timescaledb | _timescaledb_internal | bgw_job_stat | total_successes + timescaledb | _timescaledb_internal | bgw_job_stat_history | data + timescaledb | _timescaledb_internal | bgw_job_stat_history | execution_finish + timescaledb | _timescaledb_internal | bgw_job_stat_history | execution_start + timescaledb | _timescaledb_internal | bgw_job_stat_history | id + timescaledb | _timescaledb_internal | bgw_job_stat_history | job_id + timescaledb | _timescaledb_internal | bgw_job_stat_history | pid + timescaledb | _timescaledb_internal | bgw_job_stat_history | succeeded + timescaledb | _timescaledb_internal | bgw_policy_chunk_stats | chunk_id + timescaledb | _timescaledb_internal | bgw_policy_chunk_stats | job_id + timescaledb | _timescaledb_internal | bgw_policy_chunk_stats | last_time_job_run + timescaledb | _timescaledb_internal | bgw_policy_chunk_stats | num_times_job_run + timescaledb | _timescaledb_internal | compressed_chunk_stats | chunk_name + timescaledb | _timescaledb_internal | compressed_chunk_stats | chunk_schema + timescaledb | _timescaledb_internal | compressed_chunk_stats | compressed_heap_size + timescaledb | _timescaledb_internal | compressed_chunk_stats | compressed_index_size + timescaledb | _timescaledb_internal | compressed_chunk_stats | compressed_toast_size + timescaledb | _timescaledb_internal | compressed_chunk_stats | compressed_total_size + timescaledb | _timescaledb_internal | compressed_chunk_stats | compression_status + timescaledb | _timescaledb_internal | compressed_chunk_stats | hypertable_name + timescaledb | _timescaledb_internal | compressed_chunk_stats | hypertable_schema + timescaledb | _timescaledb_internal | compressed_chunk_stats | uncompressed_heap_size + timescaledb | _timescaledb_internal | compressed_chunk_stats | uncompressed_index_size + timescaledb | _timescaledb_internal | compressed_chunk_stats | uncompressed_toast_size + timescaledb | _timescaledb_internal | compressed_chunk_stats | uncompressed_total_size timescaledb | _timescaledb_internal | hypertable_chunk_local_size | chunk_id timescaledb | _timescaledb_internal | hypertable_chunk_local_size | chunk_name timescaledb | _timescaledb_internal | hypertable_chunk_local_size | chunk_schema @@ -6185,6 +6278,59 @@ order by timescaledb | _timescaledb_internal | hypertable_chunk_local_size | index_bytes timescaledb | _timescaledb_internal | hypertable_chunk_local_size | toast_bytes timescaledb | _timescaledb_internal | hypertable_chunk_local_size | total_bytes + timescaledb | timescaledb_experimental | policies | config + timescaledb | timescaledb_experimental | policies | hypertable_name + timescaledb | timescaledb_experimental | policies | hypertable_schema + timescaledb | timescaledb_experimental | policies | proc_name + timescaledb | timescaledb_experimental | policies | proc_schema + timescaledb | timescaledb_experimental | policies | relation_name + timescaledb | timescaledb_experimental | policies | relation_schema + timescaledb | timescaledb_experimental | policies | schedule_interval + timescaledb | timescaledb_information | chunk_compression_settings | chunk + timescaledb | timescaledb_information | chunk_compression_settings | hypertable + timescaledb | timescaledb_information | chunk_compression_settings | orderby + timescaledb | timescaledb_information | chunk_compression_settings | segmentby + timescaledb | timescaledb_information | chunks | chunk_creation_time + timescaledb | timescaledb_information | chunks | chunk_name + timescaledb | timescaledb_information | chunks | chunk_schema + timescaledb | timescaledb_information | chunks | chunk_tablespace + timescaledb | timescaledb_information | chunks | hypertable_name + timescaledb | timescaledb_information | chunks | hypertable_schema + timescaledb | timescaledb_information | chunks | is_compressed + timescaledb | timescaledb_information | chunks | primary_dimension + timescaledb | timescaledb_information | chunks | primary_dimension_type + timescaledb | timescaledb_information | chunks | range_end + timescaledb | timescaledb_information | chunks | range_end_integer + timescaledb | timescaledb_information | chunks | range_start + timescaledb | timescaledb_information | chunks | range_start_integer + timescaledb | timescaledb_information | compression_settings | attname + timescaledb | timescaledb_information | compression_settings | hypertable_name + timescaledb | timescaledb_information | compression_settings | hypertable_schema + timescaledb | timescaledb_information | compression_settings | orderby_asc + timescaledb | timescaledb_information | compression_settings | orderby_column_index + timescaledb | timescaledb_information | compression_settings | orderby_nullsfirst + timescaledb | timescaledb_information | compression_settings | segmentby_column_index + timescaledb | timescaledb_information | continuous_aggregates | compression_enabled + timescaledb | timescaledb_information | continuous_aggregates | finalized + timescaledb | timescaledb_information | continuous_aggregates | hypertable_name + timescaledb | timescaledb_information | continuous_aggregates | hypertable_schema + timescaledb | timescaledb_information | continuous_aggregates | materialization_hypertable_name + timescaledb | timescaledb_information | continuous_aggregates | materialization_hypertable_schema + timescaledb | timescaledb_information | continuous_aggregates | materialized_only + timescaledb | timescaledb_information | continuous_aggregates | view_definition + timescaledb | timescaledb_information | continuous_aggregates | view_name + timescaledb | timescaledb_information | continuous_aggregates | view_owner + timescaledb | timescaledb_information | continuous_aggregates | view_schema + timescaledb | timescaledb_information | dimensions | column_name + timescaledb | timescaledb_information | dimensions | column_type + timescaledb | timescaledb_information | dimensions | dimension_number + timescaledb | timescaledb_information | dimensions | dimension_type + timescaledb | timescaledb_information | dimensions | hypertable_name + timescaledb | timescaledb_information | dimensions | hypertable_schema + timescaledb | timescaledb_information | dimensions | integer_interval + timescaledb | timescaledb_information | dimensions | integer_now_func + timescaledb | timescaledb_information | dimensions | num_partitions + timescaledb | timescaledb_information | dimensions | time_interval timescaledb | timescaledb_information | hypertable_compression_settings | compress_interval_length timescaledb | timescaledb_information | hypertable_compression_settings | hypertable timescaledb | timescaledb_information | hypertable_compression_settings | orderby @@ -6245,23 +6391,6 @@ order by timescaledb | timescaledb_information | jobs | retry_period timescaledb | timescaledb_information | jobs | schedule_interval timescaledb | timescaledb_information | jobs | scheduled - timescaledb | _timescaledb_catalog | metadata | include_in_telemetry - timescaledb | _timescaledb_catalog | metadata | key - timescaledb | _timescaledb_catalog | metadata | value - timescaledb | timescaledb_experimental | policies | config - timescaledb | timescaledb_experimental | policies | hypertable_name - timescaledb | timescaledb_experimental | policies | hypertable_schema - timescaledb | timescaledb_experimental | policies | proc_name - timescaledb | timescaledb_experimental | policies | proc_schema - timescaledb | timescaledb_experimental | policies | relation_name - timescaledb | timescaledb_experimental | policies | relation_schema - timescaledb | timescaledb_experimental | policies | schedule_interval - timescaledb | _timescaledb_catalog | tablespace | hypertable_id - timescaledb | _timescaledb_catalog | tablespace | id - timescaledb | _timescaledb_catalog | tablespace | tablespace_name - timescaledb | _timescaledb_catalog | telemetry_event | body - timescaledb | _timescaledb_catalog | telemetry_event | created - timescaledb | _timescaledb_catalog | telemetry_event | tag wrappers | public | wrappers_fdw_stats | bytes_in wrappers | public | wrappers_fdw_stats | bytes_out wrappers | public | wrappers_fdw_stats | create_times @@ -6271,5 +6400,5 @@ order by wrappers | public | wrappers_fdw_stats | rows_in wrappers | public | wrappers_fdw_stats | rows_out wrappers | public | wrappers_fdw_stats | updated_at -(1082 rows) +(1141 rows) diff --git a/nix/tests/expected/pg_partman.out b/nix/tests/expected/pg_partman.out new file mode 100644 index 000000000..527d58b12 --- /dev/null +++ b/nix/tests/expected/pg_partman.out @@ -0,0 +1,101 @@ +create schema if not exists partman_test; +/* +Simple Time Based: 1 Partition Per Day + +For native partitioning, you must start with a parent table that has already been set up to be partitioned in the desired type. Currently pg_partman only supports the RANGE type of partitioning (both for time & id). You cannot turn a non-partitioned table into the parent table of a partitioned set, which can make migration a challenge. This document will show you some techniques for how to manage this later. For now, we will start with a brand new table in this example. Any non-unique indexes can also be added to the parent table in PG11+ and they will automatically be created on all child tables. +*/ +create table partman_test.time_taptest_table( + col1 int, + col2 text default 'stuff', + col3 timestamptz not null default now() +) + partition by range (col3); +create index on partman_test.time_tap (col3); +ERROR: relation "partman_test.time_tap" does not exist +/* +Unique indexes (including primary keys) cannot be created on a natively partitioned parent unless they include the partition key. For time-based partitioning that generally doesn't work out since that would limit only a single timestamp value in each child table. pg_partman helps to manage this by using a template table to manage properties that currently are not supported by native partitioning. Note that this does not solve the issue of the constraint not being enforced across the entire partition set. See the main documentation to see which properties are managed by the template. + +Manually create the template table first so that when we run create_parent() the initial child tables that are created will have a primary key. If you do not supply a template table to pg_partman, it will create one for you in the schema that you installed the extension to. However properties you add to that template are only then applied to newly created child tables after that point. You will have to retroactively apply those properties manually to any child tables that already existed. +*/ +create table partman_test.time_taptest_table_template (like partman_test.time_taptest_table); +alter table partman_test.time_taptest_table_template add primary key (col1); +/* +Review tables in the partman_test schema +*/ +select + table_name, + table_type +from + information_schema.tables +where + table_schema = 'partman_test'; + table_name | table_type +-----------------------------+------------ + time_taptest_table | BASE TABLE + time_taptest_table_template | BASE TABLE +(2 rows) + +select public.create_parent( + p_parent_table := 'partman_test.time_taptest_table', + p_control := 'col3', + p_interval := '1 day', + p_template_table := 'partman_test.time_taptest_table_template' +); + create_parent +--------------- + t +(1 row) + +/* +Review tables in the partman_test schema, which should now include daily partitions +*/ +select + -- dates in partition names are variable, so reduced to the prefix + substring(table_name, 1, 21) as table_prefix, + table_type +from + information_schema.tables +where + table_schema = 'partman_test' +order by + table_name; + table_prefix | table_type +-----------------------+------------ + time_taptest_table | BASE TABLE + time_taptest_table_de | BASE TABLE + time_taptest_table_p2 | BASE TABLE + time_taptest_table_p2 | BASE TABLE + time_taptest_table_p2 | BASE TABLE + time_taptest_table_p2 | BASE TABLE + time_taptest_table_p2 | BASE TABLE + time_taptest_table_p2 | BASE TABLE + time_taptest_table_p2 | BASE TABLE + time_taptest_table_p2 | BASE TABLE + time_taptest_table_p2 | BASE TABLE + time_taptest_table_te | BASE TABLE +(12 rows) + +/* +Confirm maintenance proc runs without issue +*/ +call public.run_maintenance_proc(); +/* +Make sure the background worker is NOT enabled. +This is intentional. We document using pg_cron to schedule calls to +public.run_maintenance_proc(). That is consistent with other providers. +*/ +select + application_name +from + pg_stat_activity +where + application_name = 'pg_partman_bgw'; + application_name +------------------ +(0 rows) + +-- Cleanup +drop schema partman_test cascade; +NOTICE: drop cascades to 2 other objects +DETAIL: drop cascades to table partman_test.time_taptest_table +drop cascades to table partman_test.time_taptest_table_template diff --git a/nix/tests/expected/pgmq.out b/nix/tests/expected/pgmq.out new file mode 100644 index 000000000..b2886d9fa --- /dev/null +++ b/nix/tests/expected/pgmq.out @@ -0,0 +1,143 @@ +-- Test the standard flow +select + pgmq.create('Foo'); + create +-------- + +(1 row) + +select + * +from + pgmq.send( + queue_name:='Foo', + msg:='{"foo": "bar1"}' + ); + send +------ + 1 +(1 row) + +-- Test queue is not case sensitive +select + msg_id, + read_ct, + message +from + pgmq.send( + queue_name:='foo', -- note: lowercase useage + msg:='{"foo": "bar2"}', + delay:=5 + ); +ERROR: column "msg_id" does not exist +LINE 2: msg_id, + ^ +select + msg_id, + read_ct, + message +from + pgmq.read( + queue_name:='Foo', + vt:=30, + qty:=2 + ); + msg_id | read_ct | message +--------+---------+----------------- + 1 | 1 | {"foo": "bar1"} +(1 row) + +select + msg_id, + read_ct, + message +from + pgmq.pop('Foo'); + msg_id | read_ct | message +--------+---------+--------- +(0 rows) + +-- Archive message with msg_id=2. +select + pgmq.archive( + queue_name:='Foo', + msg_id:=2 + ); + archive +--------- + f +(1 row) + +select + pgmq.create('my_queue'); + create +-------- + +(1 row) + +select + pgmq.send_batch( + queue_name:='my_queue', + msgs:=array['{"foo": "bar3"}','{"foo": "bar4"}','{"foo": "bar5"}']::jsonb[] +); + send_batch +------------ + 1 + 2 + 3 +(3 rows) + +select + pgmq.archive( + queue_name:='my_queue', + msg_ids:=array[3, 4, 5] + ); + archive +--------- + 3 +(1 row) + +select + pgmq.delete('my_queue', 6); + delete +-------- + f +(1 row) + +select + pgmq.drop_queue('my_queue'); + drop_queue +------------ + t +(1 row) + +select + pgmq.create_partitioned( + 'my_partitioned_queue', + '5 seconds', + '10 seconds' +); + create_partitioned +-------------------- + +(1 row) + +-- Make sure SQLI enabling characters are blocked +select pgmq.create('F--oo'); +ERROR: queue name contains invalid characters: $, ;, --, or \' +CONTEXT: PL/pgSQL function pgmq.format_table_name(text,text) line 5 at RAISE +PL/pgSQL function pgmq.create_non_partitioned(text) line 3 during statement block local variable initialization +SQL statement "SELECT pgmq.create_non_partitioned(queue_name)" +PL/pgSQL function pgmq."create"(text) line 3 at PERFORM +select pgmq.create('F$oo'); +ERROR: queue name contains invalid characters: $, ;, --, or \' +CONTEXT: PL/pgSQL function pgmq.format_table_name(text,text) line 5 at RAISE +PL/pgSQL function pgmq.create_non_partitioned(text) line 3 during statement block local variable initialization +SQL statement "SELECT pgmq.create_non_partitioned(queue_name)" +PL/pgSQL function pgmq."create"(text) line 3 at PERFORM +select pgmq.create($$F'oo$$); +ERROR: queue name contains invalid characters: $, ;, --, or \' +CONTEXT: PL/pgSQL function pgmq.format_table_name(text,text) line 5 at RAISE +PL/pgSQL function pgmq.create_non_partitioned(text) line 3 during statement block local variable initialization +SQL statement "SELECT pgmq.create_non_partitioned(queue_name)" +PL/pgSQL function pgmq."create"(text) line 3 at PERFORM diff --git a/nix/tests/expected/timescale.out b/nix/tests/expected/timescale.out index d32b8e9f3..0812954ec 100644 --- a/nix/tests/expected/timescale.out +++ b/nix/tests/expected/timescale.out @@ -1,3 +1,10 @@ +-- Confirm we're running the apache version +show timescaledb.license; + timescaledb.license +--------------------- + apache +(1 row) + -- Create schema v create schema v; -- Create a table in the v schema diff --git a/nix/tests/migrations/pgmq.sql b/nix/tests/migrations/pgmq.sql new file mode 100644 index 000000000..d0121a269 --- /dev/null +++ b/nix/tests/migrations/pgmq.sql @@ -0,0 +1,12 @@ +select + pgmq.create('Foo'); + +select + * +from + pgmq.send( + queue_name:='Foo', + msg:='{"foo": "bar1"}' + ); + + diff --git a/nix/tests/prime.sql b/nix/tests/prime.sql index f31558531..3891817e1 100644 --- a/nix/tests/prime.sql +++ b/nix/tests/prime.sql @@ -28,6 +28,7 @@ create extension ltree; create extension moddatetime; create extension old_snapshot; create extension pageinspect; +create extension pg_backtrace; create extension pg_buffercache; /* @@ -42,6 +43,7 @@ create extension pg_graphql; create extension pg_freespacemap; create extension pg_hashids; create extension pg_prewarm; +create extension pg_partman; create extension pg_jsonschema; create extension pg_repack; create extension pg_stat_monitor; @@ -53,6 +55,7 @@ create extension pg_visibility; create extension pg_walinspect; create extension pgaudit; create extension pgcrypto; +create extension pgmq; create extension pgtap; create extension pgjwt; create extension pgroonga; @@ -61,11 +64,9 @@ create extension pgsodium; create extension pgrowlocks; create extension pgstattuple; create extension plpgsql_check; - create extension plv8; create extension plcoffee; create extension plls; - create extension postgis; create extension postgis_raster; create extension postgis_sfcgal; @@ -88,10 +89,3 @@ create extension "uuid-ossp"; create extension vector; create extension wrappers; create extension xml2; - - - - - - -CREATE EXTENSION IF NOT EXISTS pg_backtrace; diff --git a/nix/tests/sql/extensions_sql_interface.sql b/nix/tests/sql/extensions_sql_interface.sql index ce7580248..947a4a612 100644 --- a/nix/tests/sql/extensions_sql_interface.sql +++ b/nix/tests/sql/extensions_sql_interface.sql @@ -97,5 +97,6 @@ where and pc.relkind in ('r', 'v', 'm', 'i') order by e.extname, + n.nspname, pc.relname, pa.attname; diff --git a/nix/tests/sql/pg_partman.sql b/nix/tests/sql/pg_partman.sql new file mode 100644 index 000000000..621e1a7c9 --- /dev/null +++ b/nix/tests/sql/pg_partman.sql @@ -0,0 +1,82 @@ +create schema if not exists partman_test; + +/* +Simple Time Based: 1 Partition Per Day + +For native partitioning, you must start with a parent table that has already been set up to be partitioned in the desired type. Currently pg_partman only supports the RANGE type of partitioning (both for time & id). You cannot turn a non-partitioned table into the parent table of a partitioned set, which can make migration a challenge. This document will show you some techniques for how to manage this later. For now, we will start with a brand new table in this example. Any non-unique indexes can also be added to the parent table in PG11+ and they will automatically be created on all child tables. +*/ + +create table partman_test.time_taptest_table( + col1 int, + col2 text default 'stuff', + col3 timestamptz not null default now() +) + partition by range (col3); + +create index on partman_test.time_tap (col3); + +/* +Unique indexes (including primary keys) cannot be created on a natively partitioned parent unless they include the partition key. For time-based partitioning that generally doesn't work out since that would limit only a single timestamp value in each child table. pg_partman helps to manage this by using a template table to manage properties that currently are not supported by native partitioning. Note that this does not solve the issue of the constraint not being enforced across the entire partition set. See the main documentation to see which properties are managed by the template. + +Manually create the template table first so that when we run create_parent() the initial child tables that are created will have a primary key. If you do not supply a template table to pg_partman, it will create one for you in the schema that you installed the extension to. However properties you add to that template are only then applied to newly created child tables after that point. You will have to retroactively apply those properties manually to any child tables that already existed. +*/ + +create table partman_test.time_taptest_table_template (like partman_test.time_taptest_table); + +alter table partman_test.time_taptest_table_template add primary key (col1); + +/* +Review tables in the partman_test schema +*/ + +select + table_name, + table_type +from + information_schema.tables +where + table_schema = 'partman_test'; + + +select public.create_parent( + p_parent_table := 'partman_test.time_taptest_table', + p_control := 'col3', + p_interval := '1 day', + p_template_table := 'partman_test.time_taptest_table_template' +); + +/* +Review tables in the partman_test schema, which should now include daily partitions +*/ + +select + -- dates in partition names are variable, so reduced to the prefix + substring(table_name, 1, 21) as table_prefix, + table_type +from + information_schema.tables +where + table_schema = 'partman_test' +order by + table_name; + + +/* +Confirm maintenance proc runs without issue +*/ +call public.run_maintenance_proc(); + +/* +Make sure the background worker is NOT enabled. +This is intentional. We document using pg_cron to schedule calls to +public.run_maintenance_proc(). That is consistent with other providers. +*/ +select + application_name +from + pg_stat_activity +where + application_name = 'pg_partman_bgw'; + +-- Cleanup +drop schema partman_test cascade; diff --git a/nix/tests/sql/pgmq.sql b/nix/tests/sql/pgmq.sql new file mode 100644 index 000000000..4d4045484 --- /dev/null +++ b/nix/tests/sql/pgmq.sql @@ -0,0 +1,89 @@ +-- Test the standard flow +select + pgmq.create('Foo'); + +select + * +from + pgmq.send( + queue_name:='Foo', + msg:='{"foo": "bar1"}' + ); + +-- Test queue is not case sensitive +select + msg_id, + read_ct, + message +from + pgmq.send( + queue_name:='foo', -- note: lowercase useage + msg:='{"foo": "bar2"}', + delay:=5 + ); + +select + msg_id, + read_ct, + message +from + pgmq.read( + queue_name:='Foo', + vt:=30, + qty:=2 + ); + +select + msg_id, + read_ct, + message +from + pgmq.pop('Foo'); + + +-- Archive message with msg_id=2. +select + pgmq.archive( + queue_name:='Foo', + msg_id:=2 + ); + + +select + pgmq.create('my_queue'); + +select + pgmq.send_batch( + queue_name:='my_queue', + msgs:=array['{"foo": "bar3"}','{"foo": "bar4"}','{"foo": "bar5"}']::jsonb[] +); + +select + pgmq.archive( + queue_name:='my_queue', + msg_ids:=array[3, 4, 5] + ); + +select + pgmq.delete('my_queue', 6); + + +select + pgmq.drop_queue('my_queue'); + +select + pgmq.create_partitioned( + 'my_partitioned_queue', + '5 seconds', + '10 seconds' +); + + +-- Make sure SQLI enabling characters are blocked +select pgmq.create('F--oo'); +select pgmq.create('F$oo'); +select pgmq.create($$F'oo$$); + + + + diff --git a/nix/tests/sql/timescale.sql b/nix/tests/sql/timescale.sql index 939612a4a..baa96f48b 100644 --- a/nix/tests/sql/timescale.sql +++ b/nix/tests/sql/timescale.sql @@ -1,3 +1,6 @@ +-- Confirm we're running the apache version +show timescaledb.license; + -- Create schema v create schema v; diff --git a/nix/tools/run-client.sh.in b/nix/tools/run-client.sh.in index 18a8f21bb..5f3bf9bbb 100644 --- a/nix/tools/run-client.sh.in +++ b/nix/tools/run-client.sh.in @@ -3,16 +3,84 @@ [ ! -z "$DEBUG" ] && set -x -# first argument should be '15' or '16' for the version -if [ "$1" == "15" ]; then +# Default values +PSQL_VERSION="15" +MIGRATION_FILE="" +PORTNO="@PGSQL_DEFAULT_PORT@" + +# Function to display help +print_help() { + echo "Usage: nix run .#start-client -- [options]" + echo + echo "Options:" + echo " -v, --version [15|16|orioledb-16] Specify the PostgreSQL version to use (required)" + echo " -f, --file FILE Provide a custom migration script" + echo " -h, --help Show this help message" + echo + echo "Description:" + echo " Starts an interactive 'psql' session connecting to a Postgres database started with the" + echo " 'nix run .#start-server' command. If a migration file is not provided, the client" + echo " initializes the database with the default migrations for a new Supabase project." + echo " If a migrations file is provided, default migrations are skipped" + echo " If no migration file is provided, it runs the default Supabase migrations." + echo + echo "Examples:" + echo " nix run .#start-client" + echo " nix run .#start-client -- --version 15" + echo " nix run .#start-client -- --version 16 --file custom_migration.sql" + echo " nix run .#start-client -- --version 16 --port 5433" +} + +# Parse arguments +while [[ "$#" -gt 0 ]]; do + case "$1" in + -v|--version) + if [[ -n "$2" && ! "$2" =~ ^- ]]; then + PSQL_VERSION="$2" + shift 2 + else + echo "Error: --version requires an argument (15, 16, or orioledb-16)" + exit 1 + fi + ;; + -f|--file) + if [[ -n "$2" && ! "$2" =~ ^- ]]; then + MIGRATION_FILE="$2" + shift 2 + else + echo "Error: --file requires a filename" + exit 1 + fi + ;; + -h|--help) + print_help + exit 0 + ;; + *) + echo "Unknown option: $1" + print_help + exit 1 + ;; + esac +done + +# Check if version is provided +if [[ -z "$PSQL_VERSION" ]]; then + echo "Error: PostgreSQL version is required." + print_help + exit 1 +fi + +# Determine PostgreSQL version +if [ "$PSQL_VERSION" == "15" ]; then echo "Starting client for PSQL 15" PSQL15=@PSQL15_BINDIR@ BINDIR="$PSQL15" -elif [ "$1" == "16" ]; then +elif [ "$PSQL_VERSION" == "16" ]; then echo "Starting client for PSQL 16" PSQL16=@PSQL16_BINDIR@ BINDIR="$PSQL16" -elif [ "$1" == "orioledb-16" ]; then +elif [ "$PSQL_VERSION" == "orioledb-16" ]; then echo "Starting client for PSQL ORIOLEDB 16" PSQLORIOLEDB16=@PSQLORIOLEDB16_BINDIR@ BINDIR="$PSQLORIOLEDB16" @@ -20,11 +88,12 @@ else echo "Please provide a valid Postgres version (15, 16, or orioledb-16)" exit 1 fi + #vars for migration.sh export PATH=$BINDIR/bin:$PATH export POSTGRES_DB=postgres export POSTGRES_HOST=localhost -export POSTGRES_PORT=@PGSQL_DEFAULT_PORT@ + PORTNO="${2:-@PGSQL_DEFAULT_PORT@}" PGSQL_SUPERUSER=@PGSQL_SUPERUSER@ MIGRATIONS_DIR=@MIGRATIONS_DIR@ @@ -35,20 +104,37 @@ psql -v ON_ERROR_STOP=1 --no-password --no-psqlrc -U "$PGSQL_SUPERUSER" -p "$POR create role postgres superuser login password '$PGPASSWORD'; alter database postgres owner to postgres; EOSQL -for sql in "$MIGRATIONS_DIR"/init-scripts/*.sql; do - echo "$0: running $sql" - psql -v ON_ERROR_STOP=1 --no-password --no-psqlrc -U postgres -p "$PORTNO" -h localhost -f "$sql" postgres -done -psql -v ON_ERROR_STOP=1 --no-password --no-psqlrc -U postgres -p "$PORTNO" -h localhost -c "ALTER USER supabase_admin WITH PASSWORD '$PGPASSWORD'" -psql -v ON_ERROR_STOP=1 --no-password --no-psqlrc -U postgres -p "$PORTNO" -h localhost -d postgres -f "$PGBOUNCER_AUTH_SCHEMA_SQL" -psql -v ON_ERROR_STOP=1 --no-password --no-psqlrc -U postgres -p "$PORTNO" -h localhost -d postgres -f "$STAT_EXTENSION_SQL" -# run migrations as super user - postgres user demoted in post-setup -for sql in "$MIGRATIONS_DIR"/migrations/*.sql; do - echo "$0: running $sql" - psql -v ON_ERROR_STOP=1 --no-password --no-psqlrc -U supabase_admin -p "$PORTNO" -h localhost -f "$sql" postgres -done -psql -v ON_ERROR_STOP=1 --no-password --no-psqlrc -U supabase_admin -p "$PORTNO" -h localhost -f "$POSTGRESQL_SCHEMA_SQL" postgres -# TODO Do we need to reset stats when running migrations locally? -#psql -v ON_ERROR_STOP=1 --no-password --no-psqlrc -U supabase_admin -p "$PORTNO" -h localhost -c 'SELECT extensions.pg_stat_statements_reset(); SELECT pg_stat_reset();' postgres || true +# Use custom migration script if provided +if [ -n "$MIGRATION_FILE" ]; then + echo "$0: running user-provided migration file $MIGRATION_FILE" + psql -v ON_ERROR_STOP=1 --no-password --no-psqlrc -U "$PGSQL_SUPERUSER" -p "$PORTNO" -h localhost -f "$MIGRATION_FILE" postgres +else + # Run default init scripts + for sql in "$MIGRATIONS_DIR"/init-scripts/*.sql; do + echo "$0: running $sql" + psql -v ON_ERROR_STOP=1 --no-password --no-psqlrc -U postgres -p "$PORTNO" -h localhost -f "$sql" postgres + done + + # Alter user password + psql -v ON_ERROR_STOP=1 --no-password --no-psqlrc -U postgres -p "$PORTNO" -h localhost -c "ALTER USER supabase_admin WITH PASSWORD '$PGPASSWORD'" + + # Run additional schema files + psql -v ON_ERROR_STOP=1 --no-password --no-psqlrc -U postgres -p "$PORTNO" -h localhost -d postgres -f "$PGBOUNCER_AUTH_SCHEMA_SQL" + psql -v ON_ERROR_STOP=1 --no-password --no-psqlrc -U postgres -p "$PORTNO" -h localhost -d postgres -f "$STAT_EXTENSION_SQL" + + # Run migrations as superuser + for sql in "$MIGRATIONS_DIR"/migrations/*.sql; do + echo "$0: running $sql" + psql -v ON_ERROR_STOP=1 --no-password --no-psqlrc -U supabase_admin -p "$PORTNO" -h localhost -f "$sql" postgres + done + + # Run PostgreSQL schema + psql -v ON_ERROR_STOP=1 --no-password --no-psqlrc -U supabase_admin -p "$PORTNO" -h localhost -f "$POSTGRESQL_SCHEMA_SQL" postgres +fi + +# Optional: Reset stats if needed +# psql -v ON_ERROR_STOP=1 --no-password --no-psqlrc -U supabase_admin -p "$PORTNO" -h localhost -c 'SELECT extensions.pg_stat_statements_reset(); SELECT pg_stat_reset();' postgres || true + +# Start interactive psql session exec psql -U postgres -p "$PORTNO" -h localhost postgres diff --git a/testinfra/test_ami.py b/testinfra/test_ami.py index 05ec773b8..403641dac 100644 --- a/testinfra/test_ami.py +++ b/testinfra/test_ami.py @@ -65,6 +65,10 @@ name: gotrue url: 'http://localhost:9122/metrics' labels_to_attach: [{name: supabase_project_ref, value: aaaaaaaaaaaaaaaaaaaa}, {name: service_type, value: gotrue}] + - + name: postgrest + url: 'http://localhost:3001/metrics' + labels_to_attach: [{name: supabase_project_ref, value: aaaaaaaaaaaaaaaaaaaa}, {name: service_type, value: postgrest}] monitoring: disk_usage: enabled: true diff --git a/testinfra/test_ami_nix.py b/testinfra/test_ami_nix.py index 18a366b34..cd58859fc 100644 --- a/testinfra/test_ami_nix.py +++ b/testinfra/test_ami_nix.py @@ -65,6 +65,10 @@ name: gotrue url: 'http://localhost:9122/metrics' labels_to_attach: [{name: supabase_project_ref, value: aaaaaaaaaaaaaaaaaaaa}, {name: service_type, value: gotrue}] + - + name: postgrest + url: 'http://localhost:3001/metrics' + labels_to_attach: [{name: supabase_project_ref, value: aaaaaaaaaaaaaaaaaaaa}, {name: service_type, value: postgrest}] monitoring: disk_usage: enabled: true