From 08bb21f7e69b636a736635e081bf16b1083eacb1 Mon Sep 17 00:00:00 2001 From: Sven Klemm Date: Thu, 15 Dec 2022 16:42:46 +0100 Subject: [PATCH] 2.9.0 Post-release adjustments Add 2.9.0 to update test scripts and adjust downgrade scripts for 2.9.0. Additionally adjust CHANGELOG to sync with the actual release CHANGELOG and add PG15 to CI. --- .github/ci_settings.py | 6 +- .github/workflows/apt-arm-packages.yaml | 2 +- .github/workflows/apt-packages.yaml | 2 +- .github/workflows/rpm-packages.yaml | 2 +- CHANGELOG.md | 15 +- scripts/docker-build.sh | 2 +- scripts/test_updates_pg12.sh | 8 +- scripts/test_updates_pg13.sh | 2 +- scripts/test_updates_pg14.sh | 3 +- scripts/test_updates_pg15.sh | 11 + sql/CMakeLists.txt | 3 +- sql/updates/2.9.0--2.8.1.sql | 432 ++++++++++++++++++ sql/updates/reverse-dev.sql | 432 ------------------ test/sql/updates/post.catalog.sql | 2 +- test/sql/updates/setup.continuous_aggs.v2.sql | 42 +- version.config | 4 +- 16 files changed, 497 insertions(+), 471 deletions(-) create mode 100755 scripts/test_updates_pg15.sh create mode 100644 sql/updates/2.9.0--2.8.1.sql diff --git a/.github/ci_settings.py b/.github/ci_settings.py index 960e51915..33da31e71 100644 --- a/.github/ci_settings.py +++ b/.github/ci_settings.py @@ -22,4 +22,8 @@ PG14_EARLIEST = "14.0" PG14_LATEST = "14.6" PG14_ABI_MIN = "14.0" -PG_LATEST = [PG12_LATEST, PG13_LATEST, PG14_LATEST] +PG15_EARLIEST = "15.0" +PG15_LATEST = "15.1" +PG15_ABI_MIN = "15.0" + +PG_LATEST = [PG12_LATEST, PG13_LATEST, PG14_LATEST, PG15_LATEST] diff --git a/.github/workflows/apt-arm-packages.yaml b/.github/workflows/apt-arm-packages.yaml index 42ee7ef2b..cc410ff0a 100644 --- a/.github/workflows/apt-arm-packages.yaml +++ b/.github/workflows/apt-arm-packages.yaml @@ -19,7 +19,7 @@ jobs: # Debian images: 10 (buster), 11 (bullseye) # Ubuntu images: 20.04 LTS (focal), 22.04 (jammy) image: [ "debian:10-slim","debian:11-slim","ubuntu:focal", "ubuntu:jammy"] - pg: [ 12, 13, 14 ] + pg: [ 12, 13, 14, 15 ] steps: - name: Setup emulation diff --git a/.github/workflows/apt-packages.yaml b/.github/workflows/apt-packages.yaml index 3262c0fb1..ed000ed54 100644 --- a/.github/workflows/apt-packages.yaml +++ b/.github/workflows/apt-packages.yaml @@ -23,7 +23,7 @@ jobs: # Debian images: 10 (buster), 11 (bullseye) # Ubuntu images: 18.04 LTS (bionic), 20.04 LTS (focal), 21.10 (impish), 22.04 (jammy) image: [ "debian:10-slim", "debian:11-slim", "ubuntu:bionic", "ubuntu:focal", "ubuntu:jammy"] - pg: [ 12, 13, 14 ] + pg: [ 12, 13, 14, 15 ] license: [ "TSL", "Apache"] include: - license: Apache diff --git a/.github/workflows/rpm-packages.yaml b/.github/workflows/rpm-packages.yaml index 6f59c4487..f367809a5 100644 --- a/.github/workflows/rpm-packages.yaml +++ b/.github/workflows/rpm-packages.yaml @@ -19,7 +19,7 @@ jobs: fail-fast: false matrix: image: [ "centos:centos7", "rockylinux:8", "rockylinux:9" ] - pg: [ 12, 13, 14 ] + pg: [ 12, 13, 14, 15 ] license: [ "TSL", "Apache"] include: - license: Apache diff --git a/CHANGELOG.md b/CHANGELOG.md index d670bbc32..09f9ad84e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,13 +4,13 @@ `psql` with the `-X` flag to prevent any `.psqlrc` commands from accidentally triggering the load of a previous DB version.** +## Unreleased + **Bugfixes** -* #5054 Fix segfault after second ANALYZE +* #5101 Fix enabling compression on caggs with renamed columns -**Thanks** -* @kyrias for reporting a crash when ANALYZE is executed on extended query protocol mode with extension loaded. +## 2.9.0 (2022-12-15) -## 2.9.0 This release adds major new features since the 2.8.1 release. We deem it moderate priority for upgrading. @@ -60,15 +60,18 @@ This release also includes several bug fixes. * #4955 Fix cagg migration for hypertables using timestamp without timezone * #4968 Check for interrupts in gapfill main loop * #4988 Fix cagg migration crash when refreshing the newly created cagg +* #5054 Fix segfault after second ANALYZE +* #5086 Reset baserel cache on invalid hypertable cache **Thanks** +* @byazici for reporting a problem with segmentby on compressed caggs * @jflambert for reporting a crash with nested user-defined functions. * @jvanns for reporting hypertable FK reference to vanilla PostgreSQL partitioned table doesn't seem to work * @kou for fixing a typo in process_compressed_data_out -* @xvaara for helping reproduce a bug with bitmap scans in transparent decompression -* @byazici for reporting a problem with segmentby on compressed caggs +* @kyrias for reporting a crash when ANALYZE is executed on extended query protocol mode with extension loaded. * @tobiasdirksen for requesting Continuous aggregate on top of another continuous aggregate * @xima for reporting a bug in Cagg migration +* @xvaara for helping reproduce a bug with bitmap scans in transparent decompression ## 2.8.1 (2022-10-06) diff --git a/scripts/docker-build.sh b/scripts/docker-build.sh index 9de09e962..432e628a0 100755 --- a/scripts/docker-build.sh +++ b/scripts/docker-build.sh @@ -53,7 +53,7 @@ create_postgres_build_image() { docker run -d --name ${BUILD_CONTAINER_NAME} --env POSTGRES_HOST_AUTH_METHOD=trust -v ${BASE_DIR}:/src postgres:${PG_IMAGE_TAG} # Install build dependencies - docker exec -u root ${BUILD_CONTAINER_NAME} /bin/bash -c "apk add --no-cache --virtual .build-deps postgresql-dev gdb coreutils dpkg-dev gcc git libc-dev make cmake util-linux-dev diffutils openssl-dev krb5-dev && mkdir -p /build/debug" + docker exec -u root ${BUILD_CONTAINER_NAME} /bin/bash -c "apk add --no-cache --virtual .build-deps postgresql-dev gdb coreutils dpkg-dev gcc git libc-dev make cmake util-linux-dev diffutils libssl3 openssl-dev krb5-dev && mkdir -p /build/debug" docker commit -a $USER -m "TimescaleDB build base image version $PG_IMAGE_TAG" ${BUILD_CONTAINER_NAME} ${image} remove_build_container ${BUILD_CONTAINER_NAME} diff --git a/scripts/test_updates_pg12.sh b/scripts/test_updates_pg12.sh index d8bd292c1..c2d64926e 100755 --- a/scripts/test_updates_pg12.sh +++ b/scripts/test_updates_pg12.sh @@ -12,13 +12,11 @@ source ${SCRIPT_DIR}/test_functions.inc # 2.0.0-rc1 and 2.0.0-rc2 # # Please extend this list if repairs are needed between more steps. -run_tests "$@" -r -v6 \ - 1.7.0-pg12 1.7.1-pg12 1.7.2-pg12 1.7.3-pg12 1.7.4-pg12 1.7.5-pg12 run_tests "$@" -r -v7 \ - 2.0.0-rc1-pg12 + 2.0.0-rc1-pg12 run_tests "$@" -v7 \ 2.0.0-rc2-pg12 2.0.0-rc3-pg12 2.0.0-rc4-pg12 2.0.0-pg12 2.0.1-pg12 2.0.2-pg12 2.1.0-pg12 \ - 2.1.1-pg12 2.2.0-pg12 2.2.1-pg12 2.3.0-pg12 2.3.1-pg12 2.4.0-pg12 2.4.1-pg12 2.4.2-pg12 + 2.1.1-pg12 2.2.0-pg12 2.2.1-pg12 2.3.0-pg12 2.3.1-pg12 2.4.0-pg12 2.4.1-pg12 2.4.2-pg12 run_tests "$@" -v8 \ 2.5.0-pg12 2.5.1-pg12 2.5.2-pg12 2.6.0-pg12 2.6.1-pg12 2.7.0-pg12 2.7.1-pg12 2.7.2-pg12 \ - 2.8.0-pg12 2.8.1-pg12 + 2.8.0-pg12 2.8.1-pg12 2.9.0-pg12 diff --git a/scripts/test_updates_pg13.sh b/scripts/test_updates_pg13.sh index 35abfa985..fc86aa08a 100755 --- a/scripts/test_updates_pg13.sh +++ b/scripts/test_updates_pg13.sh @@ -12,5 +12,5 @@ run_tests "$@" -v7 \ 2.4.0-pg13 2.4.1-pg13 2.4.2-pg13 run_tests "$@" -v8 \ 2.5.0-pg13 2.5.1-pg13 2.5.2-pg13 2.6.0-pg13 2.6.1-pg13 2.7.0-pg13 2.7.1-pg13 2.7.2-pg13 \ - 2.8.0-pg13 2.8.1-pg13 + 2.8.0-pg13 2.8.1-pg13 2.9.0-pg13 diff --git a/scripts/test_updates_pg14.sh b/scripts/test_updates_pg14.sh index 3e558e7bc..8334489d8 100755 --- a/scripts/test_updates_pg14.sh +++ b/scripts/test_updates_pg14.sh @@ -11,4 +11,5 @@ run_tests "$@" -v7 \ 2.5.0-pg14 2.5.1-pg14 run_tests "$@" -v8 \ 2.5.0-pg14 2.5.1-pg14 2.5.2-pg14 2.6.0-pg14 2.6.1-pg14 2.7.0-pg14 2.7.1-pg14 2.7.2-pg14 \ - 2.8.0-pg14 2.8.1-pg14 + 2.8.0-pg14 2.8.1-pg14 2.9.0-pg14 + diff --git a/scripts/test_updates_pg15.sh b/scripts/test_updates_pg15.sh new file mode 100755 index 000000000..9e082de25 --- /dev/null +++ b/scripts/test_updates_pg15.sh @@ -0,0 +1,11 @@ +#!/usr/bin/env bash + +set -e + +SCRIPT_DIR=$(dirname $0) + +# shellcheck source=scripts/test_functions.inc +source ${SCRIPT_DIR}/test_functions.inc + +run_tests "$@" -v8 \ + 2.9.0-pg15 diff --git a/sql/CMakeLists.txt b/sql/CMakeLists.txt index 3cde01d75..c124db13d 100644 --- a/sql/CMakeLists.txt +++ b/sql/CMakeLists.txt @@ -62,7 +62,8 @@ set(OLD_REV_FILES 2.7.1--2.7.0.sql 2.7.2--2.7.1.sql 2.8.0--2.7.2.sql - 2.8.1--2.8.0.sql) + 2.8.1--2.8.0.sql + 2.9.0--2.8.1.sql) set(MODULE_PATHNAME "$libdir/timescaledb-${PROJECT_VERSION_MOD}") set(LOADER_PATHNAME "$libdir/timescaledb") diff --git a/sql/updates/2.9.0--2.8.1.sql b/sql/updates/2.9.0--2.8.1.sql new file mode 100644 index 000000000..a036bc4ff --- /dev/null +++ b/sql/updates/2.9.0--2.8.1.sql @@ -0,0 +1,432 @@ +-- gapfill with timezone support +DROP FUNCTION @extschema@.time_bucket_gapfill(INTERVAL,TIMESTAMPTZ,TEXT,TIMESTAMPTZ,TIMESTAMPTZ); + +ALTER TABLE _timescaledb_catalog.compression_chunk_size DROP CONSTRAINT compression_chunk_size_pkey; +ALTER TABLE _timescaledb_catalog.compression_chunk_size ADD CONSTRAINT compression_chunk_size_pkey PRIMARY KEY(chunk_id,compressed_chunk_id); + +DROP FUNCTION _timescaledb_internal.policy_job_error_retention(integer, JSONB); +DROP FUNCTION _timescaledb_internal.policy_job_error_retention_check(JSONB); +DELETE FROM _timescaledb_config.bgw_job WHERE id = 2; + +ALTER EXTENSION timescaledb DROP VIEW timescaledb_information.job_errors; +ALTER EXTENSION timescaledb DROP TABLE _timescaledb_internal.job_errors; + +DROP VIEW timescaledb_information.job_errors; +DROP TABLE _timescaledb_internal.job_errors; + +-- drop dependent views +DROP VIEW IF EXISTS timescaledb_information.job_stats; +DROP VIEW IF EXISTS timescaledb_information.jobs; + +CREATE TABLE _timescaledb_internal._tmp_bgw_job_stat AS SELECT * FROM _timescaledb_internal.bgw_job_stat; +DROP TABLE _timescaledb_internal.bgw_job_stat; + +CREATE TABLE _timescaledb_internal.bgw_job_stat ( + job_id integer NOT NULL, + last_start timestamptz NOT NULL DEFAULT NOW(), + last_finish timestamptz NOT NULL, + next_start timestamptz NOT NULL, + last_successful_finish timestamptz NOT NULL, + last_run_success bool NOT NULL, + total_runs bigint NOT NULL, + total_duration interval NOT NULL, + total_successes bigint NOT NULL, + total_failures bigint NOT NULL, + total_crashes bigint NOT NULL, + consecutive_failures int NOT NULL, + consecutive_crashes int NOT NULL, + -- table constraints + CONSTRAINT bgw_job_stat_pkey PRIMARY KEY (job_id), + CONSTRAINT bgw_job_stat_job_id_fkey FOREIGN KEY (job_id) REFERENCES _timescaledb_config.bgw_job (id) ON DELETE CASCADE +); + +INSERT INTO _timescaledb_internal.bgw_job_stat SELECT + job_id, last_start, last_finish, next_start, last_successful_finish, last_run_success, total_runs, total_duration, total_successes, total_failures, total_crashes, consecutive_failures, consecutive_crashes +FROM _timescaledb_internal._tmp_bgw_job_stat; +DROP TABLE _timescaledb_internal._tmp_bgw_job_stat; + +GRANT SELECT ON TABLE _timescaledb_internal.bgw_job_stat TO PUBLIC; + +DROP VIEW _timescaledb_internal.hypertable_chunk_local_size; +DROP FUNCTION _timescaledb_internal.hypertable_local_size(name, name); + +CREATE FUNCTION _timescaledb_internal.hypertable_local_size( + schema_name_in name, + table_name_in name) +RETURNS TABLE ( + table_bytes BIGINT, + index_bytes BIGINT, + toast_bytes BIGINT, + total_bytes BIGINT) +LANGUAGE SQL VOLATILE STRICT AS +$BODY$ + /* get the main hypertable id and sizes */ + WITH _hypertable AS ( + SELECT + id, + _timescaledb_internal.relation_size(format('%I.%I', schema_name, table_name)::regclass) AS relsize + FROM + _timescaledb_catalog.hypertable + WHERE + schema_name = schema_name_in + AND table_name = table_name_in + ), + /* project the size of the parent hypertable */ + _hypertable_sizes AS ( + SELECT + id, + COALESCE((relsize).total_size, 0) AS total_bytes, + COALESCE((relsize).heap_size, 0) AS heap_bytes, + COALESCE((relsize).index_size, 0) AS index_bytes, + COALESCE((relsize).toast_size, 0) AS toast_bytes, + 0::BIGINT AS compressed_total_size, + 0::BIGINT AS compressed_index_size, + 0::BIGINT AS compressed_toast_size, + 0::BIGINT AS compressed_heap_size + FROM + _hypertable + ), + /* calculate the size of the hypertable chunks */ + _chunk_sizes AS ( + SELECT + chunk_id, + COALESCE(ch.total_bytes, 0) AS total_bytes, + COALESCE(ch.heap_bytes, 0) AS heap_bytes, + COALESCE(ch.index_bytes, 0) AS index_bytes, + COALESCE(ch.toast_bytes, 0) AS toast_bytes, + COALESCE(ch.compressed_total_size, 0) AS compressed_total_size, + COALESCE(ch.compressed_index_size, 0) AS compressed_index_size, + COALESCE(ch.compressed_toast_size, 0) AS compressed_toast_size, + COALESCE(ch.compressed_heap_size, 0) AS compressed_heap_size + FROM + _timescaledb_internal.hypertable_chunk_local_size ch + JOIN _hypertable_sizes ht ON ht.id = ch.hypertable_id + ) + /* calculate the SUM of the hypertable and chunk sizes */ + SELECT + (SUM(heap_bytes) + SUM(compressed_heap_size))::BIGINT AS heap_bytes, + (SUM(index_bytes) + SUM(compressed_index_size))::BIGINT AS index_bytes, + (SUM(toast_bytes) + SUM(compressed_toast_size))::BIGINT AS toast_bytes, + (SUM(total_bytes) + SUM(compressed_total_size))::BIGINT AS total_bytes + FROM + (SELECT * FROM _hypertable_sizes + UNION ALL + SELECT * FROM _chunk_sizes) AS sizes; +$BODY$ SET search_path TO pg_catalog, pg_temp; + + +DROP VIEW IF EXISTS timescaledb_information.job_stats; +DROP VIEW IF EXISTS timescaledb_information.jobs; +DROP VIEW IF EXISTS timescaledb_experimental.policies; +-- fixed schedule +DROP FUNCTION IF EXISTS @extschema@.add_retention_policy(REGCLASS, "any", BOOL, INTERVAL, TIMESTAMPTZ, BOOL); + +DROP FUNCTION IF EXISTS @extschema@.add_compression_policy(REGCLASS, "any", BOOL, INTERVAL); + +-- fixed schedule changes +-- drop and recreate functions with modified signatures, modified views, modified tables +DROP FUNCTION IF EXISTS @extschema@.add_job(REGPROC, INTERVAL, JSONB, TIMESTAMPTZ, BOOL, REGPROC, BOOL, TEXT); +DROP FUNCTION IF EXISTS @extschema@.add_continuous_aggregate_policy(REGCLASS, "any", "any", INTERVAL, BOOL, TIMESTAMPTZ, TEXT); +DROP FUNCTION IF EXISTS @extschema@.add_compression_policy(REGCLASS, "any", BOOL, INTERVAL, TIMESTAMPTZ, TEXT); +DROP FUNCTION IF EXISTS @extschema@.add_retention_policy(REGCLASS, "any", BOOL, INTERVAL, TIMESTAMPTZ, TEXT); +DROP FUNCTION IF EXISTS @extschema@.add_reorder_policy(REGCLASS, NAME, BOOL, TIMESTAMPTZ, TEXT); +-- recreate functions with the previous signature +CREATE FUNCTION @extschema@.add_job( + proc REGPROC, + schedule_interval INTERVAL, + config JSONB DEFAULT NULL, + initial_start TIMESTAMPTZ DEFAULT NULL, + scheduled BOOL DEFAULT true, + check_config REGPROC DEFAULT NULL +) RETURNS INTEGER AS '@MODULE_PATHNAME@', 'ts_job_add' LANGUAGE C VOLATILE; + +CREATE FUNCTION @extschema@.add_compression_policy(hypertable REGCLASS, compress_after "any", if_not_exists BOOL = false, schedule_interval INTERVAL = NULL) +RETURNS INTEGER AS '@MODULE_PATHNAME@', 'ts_policy_compression_add' LANGUAGE C VOLATILE STRICT; + +CREATE FUNCTION @extschema@.add_retention_policy( + relation REGCLASS, + drop_after "any", + if_not_exists BOOL = false, + schedule_interval INTERVAL = NULL +) +RETURNS INTEGER AS '@MODULE_PATHNAME@', 'ts_policy_retention_add' +LANGUAGE C VOLATILE STRICT; + +CREATE FUNCTION @extschema@.add_continuous_aggregate_policy(continuous_aggregate REGCLASS, start_offset "any", end_offset "any", schedule_interval INTERVAL, if_not_exists BOOL = false) +RETURNS INTEGER +AS '@MODULE_PATHNAME@', 'ts_policy_refresh_cagg_add' +LANGUAGE C VOLATILE; + +CREATE FUNCTION @extschema@.add_reorder_policy( + hypertable REGCLASS, + index_name NAME, + if_not_exists BOOL = false +) RETURNS INTEGER +AS '@MODULE_PATHNAME@', 'ts_policy_reorder_add' +LANGUAGE C VOLATILE STRICT; + +DROP VIEW IF EXISTS timescaledb_information.jobs; +DROP VIEW IF EXISTS timescaledb_information.job_stats; + +-- now need to rebuild the table +ALTER TABLE _timescaledb_internal.bgw_job_stat + DROP CONSTRAINT bgw_job_stat_job_id_fkey; +ALTER TABLE _timescaledb_internal.bgw_policy_chunk_stats + DROP CONSTRAINT bgw_policy_chunk_stats_chunk_id_fkey, + DROP CONSTRAINT bgw_policy_chunk_stats_job_id_fkey; + +CREATE TABLE _timescaledb_config.bgw_job_tmp AS SELECT * FROM _timescaledb_config.bgw_job; +ALTER EXTENSION timescaledb DROP TABLE _timescaledb_config.bgw_job; +ALTER EXTENSION timescaledb DROP SEQUENCE _timescaledb_config.bgw_job_id_seq; + +CREATE TABLE _timescaledb_internal.tmp_bgw_job_seq_value AS SELECT last_value, is_called FROM _timescaledb_config.bgw_job_id_seq; +DROP TABLE _timescaledb_config.bgw_job; + +CREATE SEQUENCE _timescaledb_config.bgw_job_id_seq MINVALUE 1000; +SELECT pg_catalog.pg_extension_config_dump('_timescaledb_config.bgw_job_id_seq', ''); +SELECT setval('_timescaledb_config.bgw_job_id_seq', last_value, is_called) FROM _timescaledb_internal.tmp_bgw_job_seq_value; +DROP TABLE _timescaledb_internal.tmp_bgw_job_seq_value; + +CREATE TABLE _timescaledb_config.bgw_job ( + id integer PRIMARY KEY DEFAULT nextval('_timescaledb_config.bgw_job_id_seq'), + application_name name NOT NULL, + schedule_interval interval NOT NULL, + max_runtime interval NOT NULL, + max_retries integer NOT NULL, + retry_period interval NOT NULL, + proc_schema name NOT NULL, + proc_name name NOT NULL, + owner name NOT NULL DEFAULT CURRENT_ROLE, + scheduled bool NOT NULL DEFAULT TRUE, + hypertable_id integer REFERENCES _timescaledb_catalog.hypertable (id) ON DELETE CASCADE, + config jsonb, + check_schema NAME, + check_name NAME +); + +INSERT INTO _timescaledb_config.bgw_job(id, application_name, schedule_interval, max_runtime, max_retries, retry_period, proc_schema, proc_name, owner, scheduled, hypertable_id, config) +SELECT id, application_name, schedule_interval, max_runtime, max_retries, retry_period, proc_schema, proc_name, owner, scheduled, hypertable_id, config FROM _timescaledb_config.bgw_job_tmp ORDER BY id; + +ALTER SEQUENCE _timescaledb_config.bgw_job_id_seq OWNED BY _timescaledb_config.bgw_job.id; +CREATE INDEX bgw_job_proc_hypertable_id_idx ON _timescaledb_config.bgw_job(proc_schema,proc_name,hypertable_id); +SELECT pg_catalog.pg_extension_config_dump('_timescaledb_config.bgw_job', 'WHERE id >= 1000'); +GRANT SELECT ON _timescaledb_config.bgw_job TO PUBLIC; +GRANT SELECT ON _timescaledb_config.bgw_job_id_seq TO PUBLIC; + +DROP TABLE _timescaledb_config.bgw_job_tmp; +ALTER TABLE _timescaledb_internal.bgw_job_stat ADD CONSTRAINT bgw_job_stat_job_id_fkey FOREIGN KEY(job_id) REFERENCES _timescaledb_config.bgw_job(id) ON DELETE CASCADE; +ALTER TABLE _timescaledb_internal.bgw_policy_chunk_stats + ADD CONSTRAINT bgw_policy_chunk_stats_chunk_id_fkey + FOREIGN KEY (chunk_id) REFERENCES _timescaledb_catalog.chunk(id) + ON DELETE CASCADE, + ADD CONSTRAINT bgw_policy_chunk_stats_job_id_fkey + FOREIGN KEY(job_id) REFERENCES _timescaledb_config.bgw_job(id) + ON DELETE CASCADE; + +DROP FUNCTION _timescaledb_internal.health; + +-- Recreate _timescaledb_catalog.dimension table without the compress_interval_length column -- +CREATE TABLE _timescaledb_internal.dimension_tmp +AS SELECT * from _timescaledb_catalog.dimension; + +CREATE TABLE _timescaledb_internal.tmp_dimension_seq_value AS +SELECT last_value, is_called FROM _timescaledb_catalog.dimension_id_seq; + +--drop foreign keys on dimension table +ALTER TABLE _timescaledb_catalog.dimension_partition DROP CONSTRAINT +dimension_partition_dimension_id_fkey; +ALTER TABLE _timescaledb_catalog.dimension_slice DROP CONSTRAINT +dimension_slice_dimension_id_fkey; + +--drop dependent views +DROP VIEW IF EXISTS timescaledb_information.chunks; +DROP VIEW IF EXISTS timescaledb_information.dimensions; + +ALTER EXTENSION timescaledb DROP TABLE _timescaledb_catalog.dimension; +ALTER EXTENSION timescaledb DROP SEQUENCE _timescaledb_catalog.dimension_id_seq; +DROP TABLE _timescaledb_catalog.dimension; + +CREATE TABLE _timescaledb_catalog.dimension ( + id serial NOT NULL , + hypertable_id integer NOT NULL, + column_name name NOT NULL, + column_type REGTYPE NOT NULL, + aligned boolean NOT NULL, + -- closed dimensions + num_slices smallint NULL, + partitioning_func_schema name NULL, + partitioning_func name NULL, + -- open dimensions (e.g., time) + interval_length bigint NULL, + integer_now_func_schema name NULL, + integer_now_func name NULL, + -- table constraints + CONSTRAINT dimension_pkey PRIMARY KEY (id), + CONSTRAINT dimension_hypertable_id_column_name_key UNIQUE (hypertable_id, column_name), + CONSTRAINT dimension_check CHECK ((partitioning_func_schema IS NULL AND partitioning_func IS NULL) OR (partitioning_func_schema IS NOT NULL AND partitioning_func IS NOT NULL)), + CONSTRAINT dimension_check1 CHECK ((num_slices IS NULL AND interval_length IS NOT NULL) OR (num_slices IS NOT NULL AND interval_length IS NULL)), + CONSTRAINT dimension_check2 CHECK ((integer_now_func_schema IS NULL AND integer_now_func IS NULL) OR (integer_now_func_schema IS NOT NULL AND integer_now_func IS NOT NULL)), + CONSTRAINT dimension_interval_length_check CHECK (interval_length IS NULL OR interval_length > 0), + CONSTRAINT dimension_hypertable_id_fkey FOREIGN KEY (hypertable_id) REFERENCES _timescaledb_catalog.hypertable (id) ON DELETE CASCADE +); + +INSERT INTO _timescaledb_catalog.dimension +( id, hypertable_id, column_name, column_type, + aligned, num_slices, partitioning_func_schema, + partitioning_func, interval_length, + integer_now_func_schema, integer_now_func) +SELECT id, hypertable_id, column_name, column_type, + aligned, num_slices, partitioning_func_schema, + partitioning_func, interval_length, + integer_now_func_schema, integer_now_func +FROM _timescaledb_internal.dimension_tmp; + +ALTER SEQUENCE _timescaledb_catalog.dimension_id_seq OWNED BY _timescaledb_catalog.dimension.id; +SELECT setval('_timescaledb_catalog.dimension_id_seq', last_value, is_called) FROM _timescaledb_internal.tmp_dimension_seq_value; + +SELECT pg_catalog.pg_extension_config_dump('_timescaledb_catalog.dimension', ''); +SELECT pg_catalog.pg_extension_config_dump(pg_get_serial_sequence('_timescaledb_catalog.dimension', 'id'), ''); + +--add the foreign key constraints +ALTER TABLE _timescaledb_catalog.dimension_partition ADD CONSTRAINT +dimension_partition_dimension_id_fkey FOREIGN KEY (dimension_id) +REFERENCES _timescaledb_catalog.dimension(id) ON DELETE CASCADE; +ALTER TABLE _timescaledb_catalog.dimension_slice ADD CONSTRAINT +dimension_slice_dimension_id_fkey FOREIGN KEY (dimension_id) +REFERENCES _timescaledb_catalog.dimension(id) ON DELETE CASCADE; + +--cleanup +DROP TABLE _timescaledb_internal.dimension_tmp; +DROP TABLE _timescaledb_internal.tmp_dimension_seq_value; + +GRANT SELECT ON _timescaledb_catalog.dimension_id_seq TO PUBLIC; +GRANT SELECT ON _timescaledb_catalog.dimension TO PUBLIC; + +-- end recreate _timescaledb_catalog.dimension table -- + +-- changes related to alter_data_node() +DROP INDEX _timescaledb_catalog.chunk_data_node_node_name_idx; +DROP FUNCTION @extschema@.alter_data_node; + +-- +-- Prevent downgrading if there are hierarchical continuous aggregates +-- +DO +$$ +DECLARE + caggs_hierarchical TEXT; + caggs_count INTEGER; +BEGIN + SELECT + string_agg(format('%I.%I', user_view_schema, user_view_name), ', '), + count(*) + INTO + caggs_hierarchical, + caggs_count + FROM + _timescaledb_catalog.continuous_agg + WHERE + parent_mat_hypertable_id IS NOT NULL; + + IF caggs_count > 0 THEN + RAISE EXCEPTION 'Downgrade is not possible because there are % hierarchical continuous aggregates: %', caggs_count, caggs_nested + USING HINT = 'Remove the corresponding continuous aggregates manually before downgrading'; + END IF; +END; +$$ +LANGUAGE 'plpgsql'; + +-- +-- Rebuild the catalog table `_timescaledb_catalog.continuous_agg` +-- +DROP VIEW IF EXISTS timescaledb_information.hypertables; +DROP VIEW IF EXISTS timescaledb_information.continuous_aggregates; +DROP PROCEDURE IF EXISTS @extschema@.cagg_migrate (REGCLASS, BOOLEAN, BOOLEAN); +DROP FUNCTION IF EXISTS _timescaledb_internal.cagg_migrate_pre_validation (TEXT, TEXT, TEXT); +DROP PROCEDURE IF EXISTS _timescaledb_internal.cagg_migrate_create_plan (_timescaledb_catalog.continuous_agg, TEXT, BOOLEAN, BOOLEAN); +DROP FUNCTION IF EXISTS _timescaledb_internal.cagg_migrate_plan_exists (INTEGER); +DROP PROCEDURE IF EXISTS _timescaledb_internal.cagg_migrate_execute_plan (_timescaledb_catalog.continuous_agg); +DROP PROCEDURE IF EXISTS _timescaledb_internal.cagg_migrate_execute_create_new_cagg (_timescaledb_catalog.continuous_agg, _timescaledb_catalog.continuous_agg_migrate_plan_step); +DROP PROCEDURE IF EXISTS _timescaledb_internal.cagg_migrate_execute_disable_policies (_timescaledb_catalog.continuous_agg, _timescaledb_catalog.continuous_agg_migrate_plan_step); +DROP PROCEDURE IF EXISTS _timescaledb_internal.cagg_migrate_execute_enable_policies (_timescaledb_catalog.continuous_agg, _timescaledb_catalog.continuous_agg_migrate_plan_step); +DROP PROCEDURE IF EXISTS _timescaledb_internal.cagg_migrate_execute_copy_policies (_timescaledb_catalog.continuous_agg, _timescaledb_catalog.continuous_agg_migrate_plan_step); +DROP PROCEDURE IF EXISTS _timescaledb_internal.cagg_migrate_execute_refresh_new_cagg (_timescaledb_catalog.continuous_agg, _timescaledb_catalog.continuous_agg_migrate_plan_step); +DROP PROCEDURE IF EXISTS _timescaledb_internal.cagg_migrate_execute_copy_data (_timescaledb_catalog.continuous_agg, _timescaledb_catalog.continuous_agg_migrate_plan_step); +DROP PROCEDURE IF EXISTS _timescaledb_internal.cagg_migrate_execute_override_cagg (_timescaledb_catalog.continuous_agg, _timescaledb_catalog.continuous_agg_migrate_plan_step); +DROP PROCEDURE IF EXISTS _timescaledb_internal.cagg_migrate_execute_drop_old_cagg (_timescaledb_catalog.continuous_agg, _timescaledb_catalog.continuous_agg_migrate_plan_step); + +ALTER EXTENSION timescaledb + DROP TABLE _timescaledb_catalog.continuous_agg; + +ALTER TABLE _timescaledb_catalog.continuous_aggs_materialization_invalidation_log + DROP CONSTRAINT continuous_aggs_materialization_invalid_materialization_id_fkey; + +ALTER TABLE _timescaledb_catalog.continuous_agg_migrate_plan + DROP CONSTRAINT continuous_agg_migrate_plan_mat_hypertable_id_fkey; + +CREATE TABLE _timescaledb_catalog._tmp_continuous_agg AS + SELECT + mat_hypertable_id, + raw_hypertable_id, + user_view_schema, + user_view_name, + partial_view_schema, + partial_view_name, + bucket_width, + direct_view_schema, + direct_view_name, + materialized_only, + finalized + FROM + _timescaledb_catalog.continuous_agg + ORDER BY + mat_hypertable_id; + +DROP TABLE _timescaledb_catalog.continuous_agg; + +CREATE TABLE _timescaledb_catalog.continuous_agg ( + mat_hypertable_id integer NOT NULL, + raw_hypertable_id integer NOT NULL, + user_view_schema name NOT NULL, + user_view_name name NOT NULL, + partial_view_schema name NOT NULL, + partial_view_name name NOT NULL, + bucket_width bigint NOT NULL, + direct_view_schema name NOT NULL, + direct_view_name name NOT NULL, + materialized_only bool NOT NULL DEFAULT FALSE, + finalized bool NOT NULL DEFAULT TRUE, + -- table constraints + CONSTRAINT continuous_agg_pkey PRIMARY KEY (mat_hypertable_id), + CONSTRAINT continuous_agg_partial_view_schema_partial_view_name_key UNIQUE (partial_view_schema, partial_view_name), + CONSTRAINT continuous_agg_user_view_schema_user_view_name_key UNIQUE (user_view_schema, user_view_name), + CONSTRAINT continuous_agg_mat_hypertable_id_fkey + FOREIGN KEY (mat_hypertable_id) REFERENCES _timescaledb_catalog.hypertable (id) ON DELETE CASCADE, + CONSTRAINT continuous_agg_raw_hypertable_id_fkey + FOREIGN KEY (raw_hypertable_id) REFERENCES _timescaledb_catalog.hypertable (id) ON DELETE CASCADE +); + +INSERT INTO _timescaledb_catalog.continuous_agg +SELECT * FROM _timescaledb_catalog._tmp_continuous_agg; +DROP TABLE _timescaledb_catalog._tmp_continuous_agg; + +CREATE INDEX continuous_agg_raw_hypertable_id_idx ON _timescaledb_catalog.continuous_agg (raw_hypertable_id); + +SELECT pg_catalog.pg_extension_config_dump('_timescaledb_catalog.continuous_agg', ''); + +GRANT SELECT ON TABLE _timescaledb_catalog.continuous_agg TO PUBLIC; + +ALTER TABLE _timescaledb_catalog.continuous_aggs_materialization_invalidation_log + ADD CONSTRAINT continuous_aggs_materialization_invalid_materialization_id_fkey + FOREIGN KEY (materialization_id) + REFERENCES _timescaledb_catalog.continuous_agg(mat_hypertable_id) ON DELETE CASCADE; + +ALTER TABLE _timescaledb_catalog.continuous_agg_migrate_plan + ADD CONSTRAINT continuous_agg_migrate_plan_mat_hypertable_id_fkey + FOREIGN KEY (mat_hypertable_id) + REFERENCES _timescaledb_catalog.continuous_agg (mat_hypertable_id); + +ANALYZE _timescaledb_catalog.continuous_agg; + +-- changes related to drop_stale_chunks() +DROP FUNCTION _timescaledb_internal.drop_stale_chunks; diff --git a/sql/updates/reverse-dev.sql b/sql/updates/reverse-dev.sql index a036bc4ff..e69de29bb 100644 --- a/sql/updates/reverse-dev.sql +++ b/sql/updates/reverse-dev.sql @@ -1,432 +0,0 @@ --- gapfill with timezone support -DROP FUNCTION @extschema@.time_bucket_gapfill(INTERVAL,TIMESTAMPTZ,TEXT,TIMESTAMPTZ,TIMESTAMPTZ); - -ALTER TABLE _timescaledb_catalog.compression_chunk_size DROP CONSTRAINT compression_chunk_size_pkey; -ALTER TABLE _timescaledb_catalog.compression_chunk_size ADD CONSTRAINT compression_chunk_size_pkey PRIMARY KEY(chunk_id,compressed_chunk_id); - -DROP FUNCTION _timescaledb_internal.policy_job_error_retention(integer, JSONB); -DROP FUNCTION _timescaledb_internal.policy_job_error_retention_check(JSONB); -DELETE FROM _timescaledb_config.bgw_job WHERE id = 2; - -ALTER EXTENSION timescaledb DROP VIEW timescaledb_information.job_errors; -ALTER EXTENSION timescaledb DROP TABLE _timescaledb_internal.job_errors; - -DROP VIEW timescaledb_information.job_errors; -DROP TABLE _timescaledb_internal.job_errors; - --- drop dependent views -DROP VIEW IF EXISTS timescaledb_information.job_stats; -DROP VIEW IF EXISTS timescaledb_information.jobs; - -CREATE TABLE _timescaledb_internal._tmp_bgw_job_stat AS SELECT * FROM _timescaledb_internal.bgw_job_stat; -DROP TABLE _timescaledb_internal.bgw_job_stat; - -CREATE TABLE _timescaledb_internal.bgw_job_stat ( - job_id integer NOT NULL, - last_start timestamptz NOT NULL DEFAULT NOW(), - last_finish timestamptz NOT NULL, - next_start timestamptz NOT NULL, - last_successful_finish timestamptz NOT NULL, - last_run_success bool NOT NULL, - total_runs bigint NOT NULL, - total_duration interval NOT NULL, - total_successes bigint NOT NULL, - total_failures bigint NOT NULL, - total_crashes bigint NOT NULL, - consecutive_failures int NOT NULL, - consecutive_crashes int NOT NULL, - -- table constraints - CONSTRAINT bgw_job_stat_pkey PRIMARY KEY (job_id), - CONSTRAINT bgw_job_stat_job_id_fkey FOREIGN KEY (job_id) REFERENCES _timescaledb_config.bgw_job (id) ON DELETE CASCADE -); - -INSERT INTO _timescaledb_internal.bgw_job_stat SELECT - job_id, last_start, last_finish, next_start, last_successful_finish, last_run_success, total_runs, total_duration, total_successes, total_failures, total_crashes, consecutive_failures, consecutive_crashes -FROM _timescaledb_internal._tmp_bgw_job_stat; -DROP TABLE _timescaledb_internal._tmp_bgw_job_stat; - -GRANT SELECT ON TABLE _timescaledb_internal.bgw_job_stat TO PUBLIC; - -DROP VIEW _timescaledb_internal.hypertable_chunk_local_size; -DROP FUNCTION _timescaledb_internal.hypertable_local_size(name, name); - -CREATE FUNCTION _timescaledb_internal.hypertable_local_size( - schema_name_in name, - table_name_in name) -RETURNS TABLE ( - table_bytes BIGINT, - index_bytes BIGINT, - toast_bytes BIGINT, - total_bytes BIGINT) -LANGUAGE SQL VOLATILE STRICT AS -$BODY$ - /* get the main hypertable id and sizes */ - WITH _hypertable AS ( - SELECT - id, - _timescaledb_internal.relation_size(format('%I.%I', schema_name, table_name)::regclass) AS relsize - FROM - _timescaledb_catalog.hypertable - WHERE - schema_name = schema_name_in - AND table_name = table_name_in - ), - /* project the size of the parent hypertable */ - _hypertable_sizes AS ( - SELECT - id, - COALESCE((relsize).total_size, 0) AS total_bytes, - COALESCE((relsize).heap_size, 0) AS heap_bytes, - COALESCE((relsize).index_size, 0) AS index_bytes, - COALESCE((relsize).toast_size, 0) AS toast_bytes, - 0::BIGINT AS compressed_total_size, - 0::BIGINT AS compressed_index_size, - 0::BIGINT AS compressed_toast_size, - 0::BIGINT AS compressed_heap_size - FROM - _hypertable - ), - /* calculate the size of the hypertable chunks */ - _chunk_sizes AS ( - SELECT - chunk_id, - COALESCE(ch.total_bytes, 0) AS total_bytes, - COALESCE(ch.heap_bytes, 0) AS heap_bytes, - COALESCE(ch.index_bytes, 0) AS index_bytes, - COALESCE(ch.toast_bytes, 0) AS toast_bytes, - COALESCE(ch.compressed_total_size, 0) AS compressed_total_size, - COALESCE(ch.compressed_index_size, 0) AS compressed_index_size, - COALESCE(ch.compressed_toast_size, 0) AS compressed_toast_size, - COALESCE(ch.compressed_heap_size, 0) AS compressed_heap_size - FROM - _timescaledb_internal.hypertable_chunk_local_size ch - JOIN _hypertable_sizes ht ON ht.id = ch.hypertable_id - ) - /* calculate the SUM of the hypertable and chunk sizes */ - SELECT - (SUM(heap_bytes) + SUM(compressed_heap_size))::BIGINT AS heap_bytes, - (SUM(index_bytes) + SUM(compressed_index_size))::BIGINT AS index_bytes, - (SUM(toast_bytes) + SUM(compressed_toast_size))::BIGINT AS toast_bytes, - (SUM(total_bytes) + SUM(compressed_total_size))::BIGINT AS total_bytes - FROM - (SELECT * FROM _hypertable_sizes - UNION ALL - SELECT * FROM _chunk_sizes) AS sizes; -$BODY$ SET search_path TO pg_catalog, pg_temp; - - -DROP VIEW IF EXISTS timescaledb_information.job_stats; -DROP VIEW IF EXISTS timescaledb_information.jobs; -DROP VIEW IF EXISTS timescaledb_experimental.policies; --- fixed schedule -DROP FUNCTION IF EXISTS @extschema@.add_retention_policy(REGCLASS, "any", BOOL, INTERVAL, TIMESTAMPTZ, BOOL); - -DROP FUNCTION IF EXISTS @extschema@.add_compression_policy(REGCLASS, "any", BOOL, INTERVAL); - --- fixed schedule changes --- drop and recreate functions with modified signatures, modified views, modified tables -DROP FUNCTION IF EXISTS @extschema@.add_job(REGPROC, INTERVAL, JSONB, TIMESTAMPTZ, BOOL, REGPROC, BOOL, TEXT); -DROP FUNCTION IF EXISTS @extschema@.add_continuous_aggregate_policy(REGCLASS, "any", "any", INTERVAL, BOOL, TIMESTAMPTZ, TEXT); -DROP FUNCTION IF EXISTS @extschema@.add_compression_policy(REGCLASS, "any", BOOL, INTERVAL, TIMESTAMPTZ, TEXT); -DROP FUNCTION IF EXISTS @extschema@.add_retention_policy(REGCLASS, "any", BOOL, INTERVAL, TIMESTAMPTZ, TEXT); -DROP FUNCTION IF EXISTS @extschema@.add_reorder_policy(REGCLASS, NAME, BOOL, TIMESTAMPTZ, TEXT); --- recreate functions with the previous signature -CREATE FUNCTION @extschema@.add_job( - proc REGPROC, - schedule_interval INTERVAL, - config JSONB DEFAULT NULL, - initial_start TIMESTAMPTZ DEFAULT NULL, - scheduled BOOL DEFAULT true, - check_config REGPROC DEFAULT NULL -) RETURNS INTEGER AS '@MODULE_PATHNAME@', 'ts_job_add' LANGUAGE C VOLATILE; - -CREATE FUNCTION @extschema@.add_compression_policy(hypertable REGCLASS, compress_after "any", if_not_exists BOOL = false, schedule_interval INTERVAL = NULL) -RETURNS INTEGER AS '@MODULE_PATHNAME@', 'ts_policy_compression_add' LANGUAGE C VOLATILE STRICT; - -CREATE FUNCTION @extschema@.add_retention_policy( - relation REGCLASS, - drop_after "any", - if_not_exists BOOL = false, - schedule_interval INTERVAL = NULL -) -RETURNS INTEGER AS '@MODULE_PATHNAME@', 'ts_policy_retention_add' -LANGUAGE C VOLATILE STRICT; - -CREATE FUNCTION @extschema@.add_continuous_aggregate_policy(continuous_aggregate REGCLASS, start_offset "any", end_offset "any", schedule_interval INTERVAL, if_not_exists BOOL = false) -RETURNS INTEGER -AS '@MODULE_PATHNAME@', 'ts_policy_refresh_cagg_add' -LANGUAGE C VOLATILE; - -CREATE FUNCTION @extschema@.add_reorder_policy( - hypertable REGCLASS, - index_name NAME, - if_not_exists BOOL = false -) RETURNS INTEGER -AS '@MODULE_PATHNAME@', 'ts_policy_reorder_add' -LANGUAGE C VOLATILE STRICT; - -DROP VIEW IF EXISTS timescaledb_information.jobs; -DROP VIEW IF EXISTS timescaledb_information.job_stats; - --- now need to rebuild the table -ALTER TABLE _timescaledb_internal.bgw_job_stat - DROP CONSTRAINT bgw_job_stat_job_id_fkey; -ALTER TABLE _timescaledb_internal.bgw_policy_chunk_stats - DROP CONSTRAINT bgw_policy_chunk_stats_chunk_id_fkey, - DROP CONSTRAINT bgw_policy_chunk_stats_job_id_fkey; - -CREATE TABLE _timescaledb_config.bgw_job_tmp AS SELECT * FROM _timescaledb_config.bgw_job; -ALTER EXTENSION timescaledb DROP TABLE _timescaledb_config.bgw_job; -ALTER EXTENSION timescaledb DROP SEQUENCE _timescaledb_config.bgw_job_id_seq; - -CREATE TABLE _timescaledb_internal.tmp_bgw_job_seq_value AS SELECT last_value, is_called FROM _timescaledb_config.bgw_job_id_seq; -DROP TABLE _timescaledb_config.bgw_job; - -CREATE SEQUENCE _timescaledb_config.bgw_job_id_seq MINVALUE 1000; -SELECT pg_catalog.pg_extension_config_dump('_timescaledb_config.bgw_job_id_seq', ''); -SELECT setval('_timescaledb_config.bgw_job_id_seq', last_value, is_called) FROM _timescaledb_internal.tmp_bgw_job_seq_value; -DROP TABLE _timescaledb_internal.tmp_bgw_job_seq_value; - -CREATE TABLE _timescaledb_config.bgw_job ( - id integer PRIMARY KEY DEFAULT nextval('_timescaledb_config.bgw_job_id_seq'), - application_name name NOT NULL, - schedule_interval interval NOT NULL, - max_runtime interval NOT NULL, - max_retries integer NOT NULL, - retry_period interval NOT NULL, - proc_schema name NOT NULL, - proc_name name NOT NULL, - owner name NOT NULL DEFAULT CURRENT_ROLE, - scheduled bool NOT NULL DEFAULT TRUE, - hypertable_id integer REFERENCES _timescaledb_catalog.hypertable (id) ON DELETE CASCADE, - config jsonb, - check_schema NAME, - check_name NAME -); - -INSERT INTO _timescaledb_config.bgw_job(id, application_name, schedule_interval, max_runtime, max_retries, retry_period, proc_schema, proc_name, owner, scheduled, hypertable_id, config) -SELECT id, application_name, schedule_interval, max_runtime, max_retries, retry_period, proc_schema, proc_name, owner, scheduled, hypertable_id, config FROM _timescaledb_config.bgw_job_tmp ORDER BY id; - -ALTER SEQUENCE _timescaledb_config.bgw_job_id_seq OWNED BY _timescaledb_config.bgw_job.id; -CREATE INDEX bgw_job_proc_hypertable_id_idx ON _timescaledb_config.bgw_job(proc_schema,proc_name,hypertable_id); -SELECT pg_catalog.pg_extension_config_dump('_timescaledb_config.bgw_job', 'WHERE id >= 1000'); -GRANT SELECT ON _timescaledb_config.bgw_job TO PUBLIC; -GRANT SELECT ON _timescaledb_config.bgw_job_id_seq TO PUBLIC; - -DROP TABLE _timescaledb_config.bgw_job_tmp; -ALTER TABLE _timescaledb_internal.bgw_job_stat ADD CONSTRAINT bgw_job_stat_job_id_fkey FOREIGN KEY(job_id) REFERENCES _timescaledb_config.bgw_job(id) ON DELETE CASCADE; -ALTER TABLE _timescaledb_internal.bgw_policy_chunk_stats - ADD CONSTRAINT bgw_policy_chunk_stats_chunk_id_fkey - FOREIGN KEY (chunk_id) REFERENCES _timescaledb_catalog.chunk(id) - ON DELETE CASCADE, - ADD CONSTRAINT bgw_policy_chunk_stats_job_id_fkey - FOREIGN KEY(job_id) REFERENCES _timescaledb_config.bgw_job(id) - ON DELETE CASCADE; - -DROP FUNCTION _timescaledb_internal.health; - --- Recreate _timescaledb_catalog.dimension table without the compress_interval_length column -- -CREATE TABLE _timescaledb_internal.dimension_tmp -AS SELECT * from _timescaledb_catalog.dimension; - -CREATE TABLE _timescaledb_internal.tmp_dimension_seq_value AS -SELECT last_value, is_called FROM _timescaledb_catalog.dimension_id_seq; - ---drop foreign keys on dimension table -ALTER TABLE _timescaledb_catalog.dimension_partition DROP CONSTRAINT -dimension_partition_dimension_id_fkey; -ALTER TABLE _timescaledb_catalog.dimension_slice DROP CONSTRAINT -dimension_slice_dimension_id_fkey; - ---drop dependent views -DROP VIEW IF EXISTS timescaledb_information.chunks; -DROP VIEW IF EXISTS timescaledb_information.dimensions; - -ALTER EXTENSION timescaledb DROP TABLE _timescaledb_catalog.dimension; -ALTER EXTENSION timescaledb DROP SEQUENCE _timescaledb_catalog.dimension_id_seq; -DROP TABLE _timescaledb_catalog.dimension; - -CREATE TABLE _timescaledb_catalog.dimension ( - id serial NOT NULL , - hypertable_id integer NOT NULL, - column_name name NOT NULL, - column_type REGTYPE NOT NULL, - aligned boolean NOT NULL, - -- closed dimensions - num_slices smallint NULL, - partitioning_func_schema name NULL, - partitioning_func name NULL, - -- open dimensions (e.g., time) - interval_length bigint NULL, - integer_now_func_schema name NULL, - integer_now_func name NULL, - -- table constraints - CONSTRAINT dimension_pkey PRIMARY KEY (id), - CONSTRAINT dimension_hypertable_id_column_name_key UNIQUE (hypertable_id, column_name), - CONSTRAINT dimension_check CHECK ((partitioning_func_schema IS NULL AND partitioning_func IS NULL) OR (partitioning_func_schema IS NOT NULL AND partitioning_func IS NOT NULL)), - CONSTRAINT dimension_check1 CHECK ((num_slices IS NULL AND interval_length IS NOT NULL) OR (num_slices IS NOT NULL AND interval_length IS NULL)), - CONSTRAINT dimension_check2 CHECK ((integer_now_func_schema IS NULL AND integer_now_func IS NULL) OR (integer_now_func_schema IS NOT NULL AND integer_now_func IS NOT NULL)), - CONSTRAINT dimension_interval_length_check CHECK (interval_length IS NULL OR interval_length > 0), - CONSTRAINT dimension_hypertable_id_fkey FOREIGN KEY (hypertable_id) REFERENCES _timescaledb_catalog.hypertable (id) ON DELETE CASCADE -); - -INSERT INTO _timescaledb_catalog.dimension -( id, hypertable_id, column_name, column_type, - aligned, num_slices, partitioning_func_schema, - partitioning_func, interval_length, - integer_now_func_schema, integer_now_func) -SELECT id, hypertable_id, column_name, column_type, - aligned, num_slices, partitioning_func_schema, - partitioning_func, interval_length, - integer_now_func_schema, integer_now_func -FROM _timescaledb_internal.dimension_tmp; - -ALTER SEQUENCE _timescaledb_catalog.dimension_id_seq OWNED BY _timescaledb_catalog.dimension.id; -SELECT setval('_timescaledb_catalog.dimension_id_seq', last_value, is_called) FROM _timescaledb_internal.tmp_dimension_seq_value; - -SELECT pg_catalog.pg_extension_config_dump('_timescaledb_catalog.dimension', ''); -SELECT pg_catalog.pg_extension_config_dump(pg_get_serial_sequence('_timescaledb_catalog.dimension', 'id'), ''); - ---add the foreign key constraints -ALTER TABLE _timescaledb_catalog.dimension_partition ADD CONSTRAINT -dimension_partition_dimension_id_fkey FOREIGN KEY (dimension_id) -REFERENCES _timescaledb_catalog.dimension(id) ON DELETE CASCADE; -ALTER TABLE _timescaledb_catalog.dimension_slice ADD CONSTRAINT -dimension_slice_dimension_id_fkey FOREIGN KEY (dimension_id) -REFERENCES _timescaledb_catalog.dimension(id) ON DELETE CASCADE; - ---cleanup -DROP TABLE _timescaledb_internal.dimension_tmp; -DROP TABLE _timescaledb_internal.tmp_dimension_seq_value; - -GRANT SELECT ON _timescaledb_catalog.dimension_id_seq TO PUBLIC; -GRANT SELECT ON _timescaledb_catalog.dimension TO PUBLIC; - --- end recreate _timescaledb_catalog.dimension table -- - --- changes related to alter_data_node() -DROP INDEX _timescaledb_catalog.chunk_data_node_node_name_idx; -DROP FUNCTION @extschema@.alter_data_node; - --- --- Prevent downgrading if there are hierarchical continuous aggregates --- -DO -$$ -DECLARE - caggs_hierarchical TEXT; - caggs_count INTEGER; -BEGIN - SELECT - string_agg(format('%I.%I', user_view_schema, user_view_name), ', '), - count(*) - INTO - caggs_hierarchical, - caggs_count - FROM - _timescaledb_catalog.continuous_agg - WHERE - parent_mat_hypertable_id IS NOT NULL; - - IF caggs_count > 0 THEN - RAISE EXCEPTION 'Downgrade is not possible because there are % hierarchical continuous aggregates: %', caggs_count, caggs_nested - USING HINT = 'Remove the corresponding continuous aggregates manually before downgrading'; - END IF; -END; -$$ -LANGUAGE 'plpgsql'; - --- --- Rebuild the catalog table `_timescaledb_catalog.continuous_agg` --- -DROP VIEW IF EXISTS timescaledb_information.hypertables; -DROP VIEW IF EXISTS timescaledb_information.continuous_aggregates; -DROP PROCEDURE IF EXISTS @extschema@.cagg_migrate (REGCLASS, BOOLEAN, BOOLEAN); -DROP FUNCTION IF EXISTS _timescaledb_internal.cagg_migrate_pre_validation (TEXT, TEXT, TEXT); -DROP PROCEDURE IF EXISTS _timescaledb_internal.cagg_migrate_create_plan (_timescaledb_catalog.continuous_agg, TEXT, BOOLEAN, BOOLEAN); -DROP FUNCTION IF EXISTS _timescaledb_internal.cagg_migrate_plan_exists (INTEGER); -DROP PROCEDURE IF EXISTS _timescaledb_internal.cagg_migrate_execute_plan (_timescaledb_catalog.continuous_agg); -DROP PROCEDURE IF EXISTS _timescaledb_internal.cagg_migrate_execute_create_new_cagg (_timescaledb_catalog.continuous_agg, _timescaledb_catalog.continuous_agg_migrate_plan_step); -DROP PROCEDURE IF EXISTS _timescaledb_internal.cagg_migrate_execute_disable_policies (_timescaledb_catalog.continuous_agg, _timescaledb_catalog.continuous_agg_migrate_plan_step); -DROP PROCEDURE IF EXISTS _timescaledb_internal.cagg_migrate_execute_enable_policies (_timescaledb_catalog.continuous_agg, _timescaledb_catalog.continuous_agg_migrate_plan_step); -DROP PROCEDURE IF EXISTS _timescaledb_internal.cagg_migrate_execute_copy_policies (_timescaledb_catalog.continuous_agg, _timescaledb_catalog.continuous_agg_migrate_plan_step); -DROP PROCEDURE IF EXISTS _timescaledb_internal.cagg_migrate_execute_refresh_new_cagg (_timescaledb_catalog.continuous_agg, _timescaledb_catalog.continuous_agg_migrate_plan_step); -DROP PROCEDURE IF EXISTS _timescaledb_internal.cagg_migrate_execute_copy_data (_timescaledb_catalog.continuous_agg, _timescaledb_catalog.continuous_agg_migrate_plan_step); -DROP PROCEDURE IF EXISTS _timescaledb_internal.cagg_migrate_execute_override_cagg (_timescaledb_catalog.continuous_agg, _timescaledb_catalog.continuous_agg_migrate_plan_step); -DROP PROCEDURE IF EXISTS _timescaledb_internal.cagg_migrate_execute_drop_old_cagg (_timescaledb_catalog.continuous_agg, _timescaledb_catalog.continuous_agg_migrate_plan_step); - -ALTER EXTENSION timescaledb - DROP TABLE _timescaledb_catalog.continuous_agg; - -ALTER TABLE _timescaledb_catalog.continuous_aggs_materialization_invalidation_log - DROP CONSTRAINT continuous_aggs_materialization_invalid_materialization_id_fkey; - -ALTER TABLE _timescaledb_catalog.continuous_agg_migrate_plan - DROP CONSTRAINT continuous_agg_migrate_plan_mat_hypertable_id_fkey; - -CREATE TABLE _timescaledb_catalog._tmp_continuous_agg AS - SELECT - mat_hypertable_id, - raw_hypertable_id, - user_view_schema, - user_view_name, - partial_view_schema, - partial_view_name, - bucket_width, - direct_view_schema, - direct_view_name, - materialized_only, - finalized - FROM - _timescaledb_catalog.continuous_agg - ORDER BY - mat_hypertable_id; - -DROP TABLE _timescaledb_catalog.continuous_agg; - -CREATE TABLE _timescaledb_catalog.continuous_agg ( - mat_hypertable_id integer NOT NULL, - raw_hypertable_id integer NOT NULL, - user_view_schema name NOT NULL, - user_view_name name NOT NULL, - partial_view_schema name NOT NULL, - partial_view_name name NOT NULL, - bucket_width bigint NOT NULL, - direct_view_schema name NOT NULL, - direct_view_name name NOT NULL, - materialized_only bool NOT NULL DEFAULT FALSE, - finalized bool NOT NULL DEFAULT TRUE, - -- table constraints - CONSTRAINT continuous_agg_pkey PRIMARY KEY (mat_hypertable_id), - CONSTRAINT continuous_agg_partial_view_schema_partial_view_name_key UNIQUE (partial_view_schema, partial_view_name), - CONSTRAINT continuous_agg_user_view_schema_user_view_name_key UNIQUE (user_view_schema, user_view_name), - CONSTRAINT continuous_agg_mat_hypertable_id_fkey - FOREIGN KEY (mat_hypertable_id) REFERENCES _timescaledb_catalog.hypertable (id) ON DELETE CASCADE, - CONSTRAINT continuous_agg_raw_hypertable_id_fkey - FOREIGN KEY (raw_hypertable_id) REFERENCES _timescaledb_catalog.hypertable (id) ON DELETE CASCADE -); - -INSERT INTO _timescaledb_catalog.continuous_agg -SELECT * FROM _timescaledb_catalog._tmp_continuous_agg; -DROP TABLE _timescaledb_catalog._tmp_continuous_agg; - -CREATE INDEX continuous_agg_raw_hypertable_id_idx ON _timescaledb_catalog.continuous_agg (raw_hypertable_id); - -SELECT pg_catalog.pg_extension_config_dump('_timescaledb_catalog.continuous_agg', ''); - -GRANT SELECT ON TABLE _timescaledb_catalog.continuous_agg TO PUBLIC; - -ALTER TABLE _timescaledb_catalog.continuous_aggs_materialization_invalidation_log - ADD CONSTRAINT continuous_aggs_materialization_invalid_materialization_id_fkey - FOREIGN KEY (materialization_id) - REFERENCES _timescaledb_catalog.continuous_agg(mat_hypertable_id) ON DELETE CASCADE; - -ALTER TABLE _timescaledb_catalog.continuous_agg_migrate_plan - ADD CONSTRAINT continuous_agg_migrate_plan_mat_hypertable_id_fkey - FOREIGN KEY (mat_hypertable_id) - REFERENCES _timescaledb_catalog.continuous_agg (mat_hypertable_id); - -ANALYZE _timescaledb_catalog.continuous_agg; - --- changes related to drop_stale_chunks() -DROP FUNCTION _timescaledb_internal.drop_stale_chunks; diff --git a/test/sql/updates/post.catalog.sql b/test/sql/updates/post.catalog.sql index 4c022e6b4..d13729d45 100644 --- a/test/sql/updates/post.catalog.sql +++ b/test/sql/updates/post.catalog.sql @@ -34,7 +34,7 @@ ORDER BY schema, name, initpriv; \df+ public.*; \dy -\d+ public.* +\d public.* \dx+ timescaledb SELECT count(*) diff --git a/test/sql/updates/setup.continuous_aggs.v2.sql b/test/sql/updates/setup.continuous_aggs.v2.sql index 2688287a2..c5e9240f5 100644 --- a/test/sql/updates/setup.continuous_aggs.v2.sql +++ b/test/sql/updates/setup.continuous_aggs.v2.sql @@ -6,14 +6,22 @@ -- the right usage. Some of these are changed in the same version, but -- we keep them separate anyway so that we can do additional checking -- if necessary. + SELECT - extversion < '2.0.0' AS has_refresh_mat_view, - extversion < '2.0.0' AS has_drop_chunks_old_interface, - extversion < '2.0.0' AS has_ignore_invalidations_older_than, - extversion < '2.0.0' AS has_max_interval_per_job, - extversion >= '2.0.0' AS has_create_mat_view, - extversion >= '2.0.0' AS has_continuous_aggs_policy, - extversion >= '2.7.0' AS has_continuous_aggs_finals_form + (string_to_array(extversion,'.'))[1] AS ts_major, + (string_to_array(extversion,'.'))[2] AS ts_minor + FROM pg_extension + WHERE extname = 'timescaledb' \gset + +SELECT + :ts_major < 2 AS has_refresh_mat_view, + :ts_major < 2 AS has_drop_chunks_old_interface, + :ts_major < 2 AS has_ignore_invalidations_older_than, + :ts_major < 2 AS has_max_interval_per_job, + :ts_major >= 2 AS has_create_mat_view, + :ts_major >= 2 AS has_continuous_aggs_policy, + :ts_major = 2 AND :ts_minor >= 7 AS has_continuous_aggs_finals_form, + :ts_major = 2 AND :ts_minor IN (7,8) AS has_continuous_aggs_finalized_option FROM pg_extension WHERE extname = 'timescaledb' \gset @@ -55,10 +63,10 @@ SELECT generate_series('2018-11-01 00:00'::timestamp, '2018-12-15 00:00'::timest -- we had a bug related to that and need to verify if compression can be -- enabled on such a view CREATE MATERIALIZED VIEW rename_cols - \if :has_continuous_aggs_finals_form + \if :has_continuous_aggs_finalized_option WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized=false) AS \else - WITH (timescaledb.continuous, timescaledb.materialized_only = false) AS + WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS \endif \endif SELECT time_bucket('1 week', timec) AS bucket, location, round(avg(humidity)) AS humidity @@ -76,7 +84,7 @@ SELECT generate_series('2018-11-01 00:00'::timestamp, '2018-12-15 00:00'::timest WITH ( timescaledb.continuous, timescaledb.materialized_only=true, timescaledb.refresh_lag='-30 day', timescaledb.max_interval_per_job ='1000 day') \else CREATE MATERIALIZED VIEW IF NOT EXISTS mat_before - \if :has_continuous_aggs_finals_form + \if :has_continuous_aggs_finalized_option WITH (timescaledb.continuous, timescaledb.materialized_only=true, timescaledb.finalized=false) \else WITH ( timescaledb.continuous, timescaledb.materialized_only=true) @@ -156,7 +164,7 @@ CREATE SCHEMA cagg; WITH ( timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.refresh_lag='-30 day', timescaledb.max_interval_per_job ='1000 day') \else CREATE MATERIALIZED VIEW IF NOT EXISTS cagg.realtime_mat - \if :has_continuous_aggs_finals_form + \if :has_continuous_aggs_finalized_option WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized=false) \else WITH ( timescaledb.continuous, timescaledb.materialized_only=false) @@ -228,7 +236,7 @@ CALL refresh_continuous_aggregate('cagg.realtime_mat',NULL,NULL); timescaledb.max_interval_per_job = '100000 days') \else CREATE MATERIALIZED VIEW IF NOT EXISTS mat_ignoreinval - \if :has_continuous_aggs_finals_form + \if :has_continuous_aggs_finalized_option WITH (timescaledb.continuous, timescaledb.materialized_only=true, timescaledb.finalized=false) \else WITH ( timescaledb.continuous, timescaledb.materialized_only=true) @@ -269,7 +277,7 @@ SELECT generate_series('2018-12-01 00:00'::timestamp, '2018-12-20 00:00'::timest timescaledb.max_interval_per_job='100000 days' ) \else CREATE MATERIALIZED VIEW mat_inval - \if :has_continuous_aggs_finals_form + \if :has_continuous_aggs_finalized_option WITH (timescaledb.continuous, timescaledb.materialized_only=true, timescaledb.finalized=false) \else WITH ( timescaledb.continuous, timescaledb.materialized_only=true ) @@ -317,7 +325,7 @@ INSERT INTO int_time_test VALUES timescaledb.refresh_interval='12 hours') \else CREATE MATERIALIZED VIEW mat_inttime - \if :has_continuous_aggs_finals_form + \if :has_continuous_aggs_finalized_option WITH (timescaledb.continuous, timescaledb.materialized_only=true, timescaledb.finalized=false) \else WITH ( timescaledb.continuous, timescaledb.materialized_only=true ) @@ -339,7 +347,7 @@ INSERT INTO int_time_test VALUES timescaledb.refresh_interval='12 hours') \else CREATE MATERIALIZED VIEW mat_inttime2 - \if :has_continuous_aggs_finals_form + \if :has_continuous_aggs_finalized_option WITH (timescaledb.continuous, timescaledb.materialized_only=true, timescaledb.finalized=false) \else WITH ( timescaledb.continuous, timescaledb.materialized_only=true ) @@ -377,7 +385,7 @@ SELECT create_hypertable('conflict_test', 'time', chunk_time_interval => INTERVA timescaledb.refresh_interval='12 hours' ) \else CREATE MATERIALIZED VIEW mat_conflict - \if :has_continuous_aggs_finals_form + \if :has_continuous_aggs_finalized_option WITH (timescaledb.continuous, timescaledb.materialized_only=true, timescaledb.finalized=false) \else WITH ( timescaledb.continuous, timescaledb.materialized_only=true ) @@ -441,7 +449,7 @@ WITH ( timescaledb.refresh_lag='-30 day', timescaledb.max_interval_per_job ='1000 day', \endif -\if :has_continuous_aggs_finals_form +\if :has_continuous_aggs_finalized_option timescaledb.finalized = false, \endif timescaledb.continuous diff --git a/version.config b/version.config index 3adb83c20..905a4647f 100644 --- a/version.config +++ b/version.config @@ -1,3 +1,3 @@ -version = 2.9.0-dev +version = 2.10.0-dev update_from_version = 2.9.0 -downgrade_to_version = 2.8.1 +downgrade_to_version = 2.9.0