timescaledb/sql/updates/2.7.0--2.6.1.sql
Sven Klemm 0a68209563 Release 2.7.0
This release adds major new features since the 2.6.1 release.
We deem it moderate priority for upgrading.

This release includes these noteworthy features:

* Optimize continuous aggregate query performance and storage
* The following query clauses and functions can now be used in a continuous
  aggregate: FILTER, DISTINCT, ORDER BY as well as [Ordered-Set Aggregate](https://www.postgresql.org/docs/current/functions-aggregate.html#FUNCTIONS-ORDEREDSET-TABLE)
  and [Hypothetical-Set Aggregate](https://www.postgresql.org/docs/current/functions-aggregate.html#FUNCTIONS-HYPOTHETICAL-TABLE)
* Optimize now() query planning time
* Improve COPY insert performance
* Improve performance of UPDATE/DELETE on PG14 by excluding chunks

This release also includes several bug fixes.

If you are upgrading from a previous version and were using compression
with a non-default collation on a segmentby-column you should recompress
those hypertables.

**Features**
* #4045 Custom origin's support in CAGGs
* #4120 Add logging for retention policy
* #4158 Allow ANALYZE command on a data node directly
* #4169 Add support for chunk exclusion on DELETE to PG14
* #4209 Add support for chunk exclusion on UPDATE to PG14
* #4269 Continuous Aggregates finals form
* #4301 Add support for bulk inserts in COPY operator
* #4311 Support non-superuser move chunk operations
* #4330 Add GUC "bgw_launcher_poll_time"
* #4340 Enable now() usage in plan-time chunk exclusion

**Bugfixes**
* #3899 Fix segfault in Continuous Aggregates
* #4225 Fix TRUNCATE error as non-owner on hypertable
* #4236 Fix potential wrong order of results for compressed hypertable with a non-default collation
* #4249 Fix option "timescaledb.create_group_indexes"
* #4251 Fix INSERT into compressed chunks with dropped columns
* #4255 Fix option "timescaledb.create_group_indexes"
* #4259 Fix logic bug in extension update script
* #4269 Fix bad Continuous Aggregate view definition reported in #4233
* #4289 Support moving compressed chunks between data nodes
* #4300 Fix refresh window cap for cagg refresh policy
* #4315 Fix memory leak in scheduler
* #4323 Remove printouts from signal handlers
* #4342 Fix move chunk cleanup logic
* #4349 Fix crashes in functions using AlterTableInternal
* #4358 Fix crash and other issues in telemetry reporter

**Thanks**
* @abrownsword for reporting a bug in the telemetry reporter and testing the fix
* @jsoref for fixing various misspellings in code, comments and documentation
* @yalon for reporting an error with ALTER TABLE RENAME on distributed hypertables
* @zhuizhuhaomeng for reporting and fixing a memory leak in our scheduler
2022-05-23 17:58:20 +02:00

188 lines
6.7 KiB
PL/PgSQL

DROP VIEW _timescaledb_internal.hypertable_chunk_local_size;
DROP FUNCTION _timescaledb_internal.relation_size(relation REGCLASS);
DROP INDEX _timescaledb_catalog.chunk_constraint_dimension_slice_id_idx;
CREATE INDEX chunk_constraint_chunk_id_dimension_slice_id_idx ON _timescaledb_catalog.chunk_constraint (chunk_id, dimension_slice_id);
DROP FUNCTION _timescaledb_internal.freeze_chunk(chunk REGCLASS);
DROP FUNCTION _timescaledb_internal.drop_chunk(chunk REGCLASS);
DO
$$
DECLARE
caggs_finalized TEXT;
caggs_count INTEGER;
BEGIN
SELECT
string_agg(format('%I.%I', user_view_schema, user_view_name), ', '),
count(*)
INTO
caggs_finalized,
caggs_count
FROM
_timescaledb_catalog.continuous_agg
WHERE
finalized IS TRUE;
IF caggs_count > 0 THEN
RAISE EXCEPTION 'Downgrade is not possible because there are % continuous aggregates using the finalized form: %', caggs_count, caggs_finalized
USING HINT = 'Remove the corresponding continuous aggregates manually before downgrading';
END IF;
END;
$$
LANGUAGE 'plpgsql';
--
-- Rebuild the catalog table `_timescaledb_catalog.continuous_agg`
--
-- We need to recreate the catalog from scratch because when we drop a column
-- Postgres mark the `pg_attribute.attisdropped=TRUE` instead of removing it from
-- the `pg_catalog.pg_attribute` table.
--
-- If we downgrade and upgrade the extension without rebuild the catalog table it
-- will mess with `pg_attribute.attnum` and we will end up with issues when trying
-- to update data in those catalog tables.
--
DROP VIEW IF EXISTS timescaledb_information.hypertables;
DROP VIEW IF EXISTS timescaledb_information.continuous_aggregates;
ALTER EXTENSION timescaledb
DROP TABLE _timescaledb_catalog.continuous_agg;
ALTER TABLE _timescaledb_catalog.continuous_aggs_materialization_invalidation_log
DROP CONSTRAINT continuous_aggs_materialization_invalid_materialization_id_fkey;
ALTER TABLE _timescaledb_catalog.continuous_agg
DROP COLUMN finalized;
CREATE TABLE _timescaledb_catalog._tmp_continuous_agg (
LIKE _timescaledb_catalog.continuous_agg
INCLUDING ALL
-- indexes and constraintes will be created later to keep original names
EXCLUDING INDEXES
EXCLUDING CONSTRAINTS
);
INSERT INTO _timescaledb_catalog._tmp_continuous_agg
SELECT
mat_hypertable_id,
raw_hypertable_id,
user_view_schema,
user_view_name,
partial_view_schema,
partial_view_name,
bucket_width,
direct_view_schema,
direct_view_name,
materialized_only
FROM
_timescaledb_catalog.continuous_agg
ORDER BY
mat_hypertable_id;
DROP TABLE _timescaledb_catalog.continuous_agg;
ALTER TABLE _timescaledb_catalog._tmp_continuous_agg
RENAME TO continuous_agg;
ALTER TABLE _timescaledb_catalog.continuous_agg
ADD CONSTRAINT continuous_agg_pkey PRIMARY KEY (mat_hypertable_id),
ADD CONSTRAINT continuous_agg_partial_view_schema_partial_view_name_key UNIQUE (partial_view_schema, partial_view_name),
ADD CONSTRAINT continuous_agg_user_view_schema_user_view_name_key UNIQUE (user_view_schema, user_view_name),
ADD CONSTRAINT continuous_agg_mat_hypertable_id_fkey FOREIGN KEY (mat_hypertable_id) REFERENCES _timescaledb_catalog.hypertable(id) ON DELETE CASCADE,
ADD CONSTRAINT continuous_agg_raw_hypertable_id_fkey FOREIGN KEY (raw_hypertable_id) REFERENCES _timescaledb_catalog.hypertable(id) ON DELETE CASCADE;
CREATE INDEX continuous_agg_raw_hypertable_id_idx ON _timescaledb_catalog.continuous_agg (raw_hypertable_id);
SELECT pg_catalog.pg_extension_config_dump('_timescaledb_catalog.continuous_agg', '');
GRANT SELECT ON TABLE _timescaledb_catalog.continuous_agg TO PUBLIC;
ALTER TABLE _timescaledb_catalog.continuous_aggs_materialization_invalidation_log
ADD CONSTRAINT continuous_aggs_materialization_invalid_materialization_id_fkey
FOREIGN KEY (materialization_id)
REFERENCES _timescaledb_catalog.continuous_agg(mat_hypertable_id) ON DELETE CASCADE;
ANALYZE _timescaledb_catalog.continuous_agg;
DROP PROCEDURE timescaledb_experimental.move_chunk(REGCLASS, NAME, NAME, NAME);
DROP PROCEDURE timescaledb_experimental.copy_chunk(REGCLASS, NAME, NAME, NAME);
DROP FUNCTION IF EXISTS timescaledb_experimental.subscription_exec(TEXT);
DROP FUNCTION _timescaledb_internal.create_compressed_chunk(REGCLASS, REGCLASS,
BIGINT, BIGINT, BIGINT, BIGINT, BIGINT, BIGINT, BIGINT, BIGINT);
--
-- Rebuild the catalog table `_timescaledb_catalog.chunk_copy_operation`
--
-- We need to recreate the catalog from scratch because when we drop a column
-- Postgres mark the `pg_attribute.attisdropped=TRUE` instead of removing it from
-- the `pg_catalog.pg_attribute` table.
--
-- If we downgrade and upgrade the extension without rebuild the catalog table it
-- will mess with `pg_attribute.attnum` and we will end up with issues when trying
-- to update data in those catalog tables.
ALTER TABLE _timescaledb_catalog.chunk_copy_operation
DROP COLUMN compress_chunk_name;
CREATE TABLE _timescaledb_catalog._tmp_chunk_copy_operation (
LIKE _timescaledb_catalog.chunk_copy_operation
INCLUDING ALL
EXCLUDING INDEXES
EXCLUDING CONSTRAINTS
);
INSERT INTO _timescaledb_catalog._tmp_chunk_copy_operation
SELECT
operation_id,
backend_pid,
completed_stage,
time_start,
chunk_id,
source_node_name,
dest_node_name,
delete_on_source_node
FROM
_timescaledb_catalog.chunk_copy_operation
ORDER BY
operation_id;
ALTER EXTENSION timescaledb
DROP TABLE _timescaledb_catalog.chunk_copy_operation;
DROP TABLE _timescaledb_catalog.chunk_copy_operation;
CREATE TABLE _timescaledb_catalog.chunk_copy_operation (
LIKE _timescaledb_catalog._tmp_chunk_copy_operation
INCLUDING ALL
EXCLUDING INDEXES
EXCLUDING CONSTRAINTS
);
-- Create a new table to void doing rename operation on the tmp table
--
INSERT INTO _timescaledb_catalog.chunk_copy_operation
SELECT
operation_id,
backend_pid,
completed_stage,
time_start,
chunk_id,
source_node_name,
dest_node_name,
delete_on_source_node
FROM
_timescaledb_catalog._tmp_chunk_copy_operation
ORDER BY
operation_id;
DROP TABLE _timescaledb_catalog._tmp_chunk_copy_operation;
ALTER TABLE _timescaledb_catalog.chunk_copy_operation
ADD CONSTRAINT chunk_copy_operation_pkey PRIMARY KEY (operation_id),
ADD CONSTRAINT chunk_copy_operation_chunk_id_fkey FOREIGN KEY (chunk_id) REFERENCES _timescaledb_catalog.chunk(id) ON DELETE CASCADE;
GRANT SELECT ON TABLE _timescaledb_catalog.chunk_copy_operation TO PUBLIC;
ANALYZE _timescaledb_catalog.chunk_copy_operation;