mirror of
https://github.com/timescale/timescaledb.git
synced 2025-05-17 11:03:36 +08:00
Release 2.8.1
This release is a patch release. We recommend that you upgrade at the next available opportunity. **Bugfixes** * #4454 Keep locks after reading job status * #4658 Fix error when querying a compressed hypertable with compress_segmentby on an enum column * #4671 Fix a possible error while flushing the COPY data * #4675 Fix bad TupleTableSlot drop * #4676 Fix a deadlock when decompressing chunks and performing SELECTs * #4685 Fix chunk exclusion for space partitions in SELECT FOR UPDATE queries * #4694 Change parameter names of cagg_migrate procedure * #4698 Do not use row-by-row fetcher for parameterized plans * #4711 Remove support for procedures as custom checks * #4712 Fix assertion failure in constify_now * #4713 Fix Continuous Aggregate migration policies * #4720 Fix chunk exclusion for prepared statements and dst changes * #4726 Fix gapfill function signature * #4737 Fix join on time column of compressed chunk * #4738 Fix error when waiting for remote COPY to finish * #4739 Fix continuous aggregate migrate check constraint * #4760 Fix segfault when INNER JOINing hypertables * #4767 Fix permission issues on index creation for CAggs **Thanks** * @boxhock and @cocowalla for reporting a segfault when JOINing hypertables * @carobme for reporting constraint error during continuous aggregate migration * @choisnetm, @dustinsorensen, @jayadevanm and @joeyberkovitz for reporting a problem with JOINs on compressed hypertables * @daniel-k for reporting a background worker crash * @justinpryzby for reporting an error when compressing very wide tables * @maxtwardowski for reporting problems with chunk exclusion and space partitions * @yuezhihan for reporting GROUP BY error when having compress_segmentby on an enum column
This commit is contained in:
parent
b259191dfe
commit
12b7b9f665
44
CHANGELOG.md
44
CHANGELOG.md
@ -18,28 +18,48 @@ argument or resolve the type ambiguity by casting to the intended type.
|
|||||||
* #4650 Show warnings when not following best practices
|
* #4650 Show warnings when not following best practices
|
||||||
|
|
||||||
**Bugfixes**
|
**Bugfixes**
|
||||||
* #4619 Improve handling enum columns in compressed hypertables
|
|
||||||
* #4673 Fix now() constification for VIEWs
|
* #4673 Fix now() constification for VIEWs
|
||||||
* #4676 Fix a deadlock when decompressing chunks and performing SELECTs
|
|
||||||
* #4681 Fix compression_chunk_size primary key
|
* #4681 Fix compression_chunk_size primary key
|
||||||
* #4685 Improve chunk exclusion for space dimensions
|
|
||||||
* #4696 Report warning when enabling compression on hypertable
|
* #4696 Report warning when enabling compression on hypertable
|
||||||
* #4720 Fix chunk exclusion for prepared statements and dst changes
|
|
||||||
* #4737 Fix join on time column of compressed chunk
|
|
||||||
* #4738 Fix the assorted epoll_ctl() errors that could occur with COPY into a distributed hypertable
|
|
||||||
* #4739 Fix continuous aggregate migrate check constraint
|
|
||||||
* #4745 Fix FK constraint violation error while insert into hypertable which references partitioned table
|
* #4745 Fix FK constraint violation error while insert into hypertable which references partitioned table
|
||||||
* #4756 Improve compression job IO performance
|
* #4756 Improve compression job IO performance
|
||||||
|
|
||||||
|
**Thanks**
|
||||||
|
* @jvanns for reporting hypertable FK reference to vanilla PostgreSQL partitioned table doesn't seem to work
|
||||||
|
|
||||||
|
## 2.8.1 (2022-10-06)
|
||||||
|
|
||||||
|
This release is a patch release. We recommend that you upgrade at the
|
||||||
|
next available opportunity.
|
||||||
|
|
||||||
|
**Bugfixes**
|
||||||
|
* #4454 Keep locks after reading job status
|
||||||
|
* #4658 Fix error when querying a compressed hypertable with compress_segmentby on an enum column
|
||||||
|
* #4671 Fix a possible error while flushing the COPY data
|
||||||
|
* #4675 Fix bad TupleTableSlot drop
|
||||||
|
* #4676 Fix a deadlock when decompressing chunks and performing SELECTs
|
||||||
|
* #4685 Fix chunk exclusion for space partitions in SELECT FOR UPDATE queries
|
||||||
|
* #4694 Change parameter names of cagg_migrate procedure
|
||||||
|
* #4698 Do not use row-by-row fetcher for parameterized plans
|
||||||
|
* #4711 Remove support for procedures as custom checks
|
||||||
|
* #4712 Fix assertion failure in constify_now
|
||||||
|
* #4713 Fix Continuous Aggregate migration policies
|
||||||
|
* #4720 Fix chunk exclusion for prepared statements and dst changes
|
||||||
|
* #4726 Fix gapfill function signature
|
||||||
|
* #4737 Fix join on time column of compressed chunk
|
||||||
|
* #4738 Fix error when waiting for remote COPY to finish
|
||||||
|
* #4739 Fix continuous aggregate migrate check constraint
|
||||||
* #4760 Fix segfault when INNER JOINing hypertables
|
* #4760 Fix segfault when INNER JOINing hypertables
|
||||||
* #4735 Allow manual index creation for CAggs
|
* #4767 Fix permission issues on index creation for CAggs
|
||||||
|
|
||||||
**Thanks**
|
**Thanks**
|
||||||
* @boxhock and @cocowalla for reporting a segfault when JOINing hypertables
|
* @boxhock and @cocowalla for reporting a segfault when JOINing hypertables
|
||||||
* @choisnetm, @dustinsorensen, @jayadevanm and @joeyberkovitz for reporting a problem with JOINs on compressed hypertables
|
|
||||||
* @maxtwardowski for reporting problems with chunk exclusion and space dimensions
|
|
||||||
* @yuezhihan for reporting GROUP BY error when setting compress_segmentby with an enum column
|
|
||||||
* @carobme for reporting constraint error during continuous aggregate migration
|
* @carobme for reporting constraint error during continuous aggregate migration
|
||||||
* @jvanns for reporting hypertable FK reference to vanilla PostgreSQL partitioned table doesn't seem to work
|
* @choisnetm, @dustinsorensen, @jayadevanm and @joeyberkovitz for reporting a problem with JOINs on compressed hypertables
|
||||||
|
* @daniel-k for reporting a background worker crash
|
||||||
|
* @justinpryzby for reporting an error when compressing very wide tables
|
||||||
|
* @maxtwardowski for reporting problems with chunk exclusion and space partitions
|
||||||
|
* @yuezhihan for reporting GROUP BY error when having compress_segmentby on an enum column
|
||||||
|
|
||||||
## 2.8.0 (2022-08-30)
|
## 2.8.0 (2022-08-30)
|
||||||
|
|
||||||
|
@ -38,7 +38,8 @@ set(MOD_FILES
|
|||||||
updates/2.6.1--2.7.0.sql
|
updates/2.6.1--2.7.0.sql
|
||||||
updates/2.7.0--2.7.1.sql
|
updates/2.7.0--2.7.1.sql
|
||||||
updates/2.7.1--2.7.2.sql
|
updates/2.7.1--2.7.2.sql
|
||||||
updates/2.7.2--2.8.0.sql)
|
updates/2.7.2--2.8.0.sql
|
||||||
|
updates/2.8.0--2.8.1.sql)
|
||||||
|
|
||||||
# The downgrade file to generate a downgrade script for the current version, as
|
# The downgrade file to generate a downgrade script for the current version, as
|
||||||
# specified in version.config
|
# specified in version.config
|
||||||
|
174
sql/updates/2.8.0--2.8.1.sql
Normal file
174
sql/updates/2.8.0--2.8.1.sql
Normal file
@ -0,0 +1,174 @@
|
|||||||
|
DROP PROCEDURE IF EXISTS @extschema@.cagg_migrate (REGCLASS, BOOLEAN, BOOLEAN);
|
||||||
|
DROP PROCEDURE IF EXISTS _timescaledb_internal.cagg_migrate_create_plan (_timescaledb_catalog.continuous_agg, TEXT, BOOLEAN, BOOLEAN);
|
||||||
|
|
||||||
|
CREATE PROCEDURE _timescaledb_internal.cagg_migrate_create_plan (
|
||||||
|
_cagg_data _timescaledb_catalog.continuous_agg,
|
||||||
|
_cagg_name_new TEXT,
|
||||||
|
_override BOOLEAN DEFAULT FALSE,
|
||||||
|
_drop_old BOOLEAN DEFAULT FALSE
|
||||||
|
)
|
||||||
|
LANGUAGE plpgsql AS
|
||||||
|
$BODY$
|
||||||
|
DECLARE
|
||||||
|
_sql TEXT;
|
||||||
|
_matht RECORD;
|
||||||
|
_time_interval INTERVAL;
|
||||||
|
_integer_interval BIGINT;
|
||||||
|
_watermark TEXT;
|
||||||
|
_policies JSONB;
|
||||||
|
_bucket_column_name TEXT;
|
||||||
|
_bucket_column_type TEXT;
|
||||||
|
_interval_type TEXT;
|
||||||
|
_interval_value TEXT;
|
||||||
|
BEGIN
|
||||||
|
IF _timescaledb_internal.cagg_migrate_plan_exists(_cagg_data.mat_hypertable_id) IS TRUE THEN
|
||||||
|
RAISE EXCEPTION 'plan already exists for materialized hypertable %', _cagg_data.mat_hypertable_id;
|
||||||
|
END IF;
|
||||||
|
|
||||||
|
INSERT INTO
|
||||||
|
_timescaledb_catalog.continuous_agg_migrate_plan (mat_hypertable_id)
|
||||||
|
VALUES
|
||||||
|
(_cagg_data.mat_hypertable_id);
|
||||||
|
|
||||||
|
SELECT schema_name, table_name
|
||||||
|
INTO _matht
|
||||||
|
FROM _timescaledb_catalog.hypertable
|
||||||
|
WHERE id = _cagg_data.mat_hypertable_id;
|
||||||
|
|
||||||
|
SELECT time_interval, integer_interval, column_name, column_type
|
||||||
|
INTO _time_interval, _integer_interval, _bucket_column_name, _bucket_column_type
|
||||||
|
FROM timescaledb_information.dimensions
|
||||||
|
WHERE hypertable_schema = _matht.schema_name
|
||||||
|
AND hypertable_name = _matht.table_name
|
||||||
|
AND dimension_type = 'Time';
|
||||||
|
|
||||||
|
IF _integer_interval IS NOT NULL THEN
|
||||||
|
_interval_value := _integer_interval::TEXT;
|
||||||
|
_interval_type := _bucket_column_type;
|
||||||
|
IF _bucket_column_type = 'bigint' THEN
|
||||||
|
_watermark := COALESCE(_timescaledb_internal.cagg_watermark(_cagg_data.mat_hypertable_id)::bigint, '-9223372036854775808'::bigint)::TEXT;
|
||||||
|
ELSIF _bucket_column_type = 'integer' THEN
|
||||||
|
_watermark := COALESCE(_timescaledb_internal.cagg_watermark(_cagg_data.mat_hypertable_id)::integer, '-2147483648'::integer)::TEXT;
|
||||||
|
ELSE
|
||||||
|
_watermark := COALESCE(_timescaledb_internal.cagg_watermark(_cagg_data.mat_hypertable_id)::smallint, '-32768'::smallint)::TEXT;
|
||||||
|
END IF;
|
||||||
|
ELSE
|
||||||
|
_interval_value := _time_interval::TEXT;
|
||||||
|
_interval_type := 'interval';
|
||||||
|
_watermark := COALESCE(_timescaledb_internal.to_timestamp(_timescaledb_internal.cagg_watermark(_cagg_data.mat_hypertable_id)), '-infinity'::timestamptz)::TEXT;
|
||||||
|
END IF;
|
||||||
|
|
||||||
|
-- get all scheduled policies except the refresh
|
||||||
|
SELECT jsonb_build_object('policies', array_agg(id))
|
||||||
|
INTO _policies
|
||||||
|
FROM _timescaledb_config.bgw_job
|
||||||
|
WHERE hypertable_id = _cagg_data.mat_hypertable_id
|
||||||
|
AND proc_name IS DISTINCT FROM 'policy_refresh_continuous_aggregate'
|
||||||
|
AND scheduled IS TRUE
|
||||||
|
AND id >= 1000;
|
||||||
|
|
||||||
|
INSERT INTO
|
||||||
|
_timescaledb_catalog.continuous_agg_migrate_plan_step (mat_hypertable_id, type, config)
|
||||||
|
VALUES
|
||||||
|
(_cagg_data.mat_hypertable_id, 'SAVE WATERMARK', jsonb_build_object('watermark', _watermark)),
|
||||||
|
(_cagg_data.mat_hypertable_id, 'CREATE NEW CAGG', jsonb_build_object('cagg_name_new', _cagg_name_new)),
|
||||||
|
(_cagg_data.mat_hypertable_id, 'DISABLE POLICIES', _policies),
|
||||||
|
(_cagg_data.mat_hypertable_id, 'REFRESH NEW CAGG', jsonb_build_object('cagg_name_new', _cagg_name_new, 'window_start', _watermark, 'window_start_type', _bucket_column_type));
|
||||||
|
|
||||||
|
-- Finish the step because don't require any extra step
|
||||||
|
UPDATE _timescaledb_catalog.continuous_agg_migrate_plan_step
|
||||||
|
SET status = 'FINISHED', start_ts = now(), end_ts = clock_timestamp()
|
||||||
|
WHERE type = 'SAVE WATERMARK';
|
||||||
|
|
||||||
|
_sql := format (
|
||||||
|
$$
|
||||||
|
WITH boundaries AS (
|
||||||
|
SELECT min(%1$I), max(%1$I), %1$L AS bucket_column_name, %2$L AS bucket_column_type, %3$L AS cagg_name_new
|
||||||
|
FROM %4$I.%5$I
|
||||||
|
WHERE %1$I < CAST(%6$L AS %2$s)
|
||||||
|
)
|
||||||
|
INSERT INTO
|
||||||
|
_timescaledb_catalog.continuous_agg_migrate_plan_step (mat_hypertable_id, type, config)
|
||||||
|
SELECT
|
||||||
|
%7$L,
|
||||||
|
'COPY DATA',
|
||||||
|
jsonb_build_object (
|
||||||
|
'start_ts', start::text,
|
||||||
|
'end_ts', (start + CAST(%8$L AS %9$s))::text,
|
||||||
|
'bucket_column_name', bucket_column_name,
|
||||||
|
'bucket_column_type', bucket_column_type,
|
||||||
|
'cagg_name_new', cagg_name_new
|
||||||
|
)
|
||||||
|
FROM boundaries,
|
||||||
|
LATERAL generate_series(min, max, CAST(%8$L AS %9$s)) AS start;
|
||||||
|
$$,
|
||||||
|
_bucket_column_name, _bucket_column_type, _cagg_name_new, _cagg_data.user_view_schema,
|
||||||
|
_cagg_data.user_view_name, _watermark, _cagg_data.mat_hypertable_id, _interval_value, _interval_type
|
||||||
|
);
|
||||||
|
|
||||||
|
EXECUTE _sql;
|
||||||
|
|
||||||
|
-- get all scheduled policies
|
||||||
|
SELECT jsonb_build_object('policies', array_agg(id))
|
||||||
|
INTO _policies
|
||||||
|
FROM _timescaledb_config.bgw_job
|
||||||
|
WHERE hypertable_id = _cagg_data.mat_hypertable_id
|
||||||
|
AND scheduled IS TRUE
|
||||||
|
AND id >= 1000;
|
||||||
|
|
||||||
|
INSERT INTO
|
||||||
|
_timescaledb_catalog.continuous_agg_migrate_plan_step (mat_hypertable_id, type, config)
|
||||||
|
VALUES
|
||||||
|
(_cagg_data.mat_hypertable_id, 'OVERRIDE CAGG', jsonb_build_object('cagg_name_new', _cagg_name_new, 'override', _override, 'drop_old', _drop_old)),
|
||||||
|
(_cagg_data.mat_hypertable_id, 'DROP OLD CAGG', jsonb_build_object('cagg_name_new', _cagg_name_new, 'override', _override, 'drop_old', _drop_old)),
|
||||||
|
(_cagg_data.mat_hypertable_id, 'COPY POLICIES', _policies || jsonb_build_object('cagg_name_new', _cagg_name_new)),
|
||||||
|
(_cagg_data.mat_hypertable_id, 'ENABLE POLICIES', NULL);
|
||||||
|
END;
|
||||||
|
$BODY$ SET search_path TO pg_catalog, pg_temp;
|
||||||
|
|
||||||
|
CREATE PROCEDURE @extschema@.cagg_migrate (
|
||||||
|
cagg REGCLASS,
|
||||||
|
override BOOLEAN DEFAULT FALSE,
|
||||||
|
drop_old BOOLEAN DEFAULT FALSE
|
||||||
|
)
|
||||||
|
LANGUAGE plpgsql AS
|
||||||
|
$BODY$
|
||||||
|
DECLARE
|
||||||
|
_cagg_schema TEXT;
|
||||||
|
_cagg_name TEXT;
|
||||||
|
_cagg_name_new TEXT;
|
||||||
|
_cagg_data _timescaledb_catalog.continuous_agg;
|
||||||
|
BEGIN
|
||||||
|
SELECT nspname, relname
|
||||||
|
INTO _cagg_schema, _cagg_name
|
||||||
|
FROM pg_catalog.pg_class
|
||||||
|
JOIN pg_catalog.pg_namespace ON pg_namespace.oid OPERATOR(pg_catalog.=) pg_class.relnamespace
|
||||||
|
WHERE pg_class.oid OPERATOR(pg_catalog.=) cagg::pg_catalog.oid;
|
||||||
|
|
||||||
|
-- maximum size of an identifier in Postgres is 63 characters, se we need to left space for '_new'
|
||||||
|
_cagg_name_new := pg_catalog.format('%s_new', pg_catalog.substr(_cagg_name, 1, 59));
|
||||||
|
|
||||||
|
-- pre-validate the migration and get some variables
|
||||||
|
_cagg_data := _timescaledb_internal.cagg_migrate_pre_validation(_cagg_schema, _cagg_name, _cagg_name_new);
|
||||||
|
|
||||||
|
-- create new migration plan
|
||||||
|
CALL _timescaledb_internal.cagg_migrate_create_plan(_cagg_data, _cagg_name_new, override, drop_old);
|
||||||
|
COMMIT;
|
||||||
|
|
||||||
|
-- execute the migration plan
|
||||||
|
CALL _timescaledb_internal.cagg_migrate_execute_plan(_cagg_data);
|
||||||
|
|
||||||
|
-- finish the migration plan
|
||||||
|
UPDATE _timescaledb_catalog.continuous_agg_migrate_plan
|
||||||
|
SET end_ts = pg_catalog.clock_timestamp()
|
||||||
|
WHERE mat_hypertable_id OPERATOR(pg_catalog.=) _cagg_data.mat_hypertable_id;
|
||||||
|
END;
|
||||||
|
$BODY$;
|
||||||
|
|
||||||
|
-- Issue #4727
|
||||||
|
ALTER TABLE _timescaledb_catalog.continuous_agg_migrate_plan_step
|
||||||
|
DROP CONSTRAINT IF EXISTS continuous_agg_migrate_plan_step_check2;
|
||||||
|
|
||||||
|
ALTER TABLE _timescaledb_catalog.continuous_agg_migrate_plan_step
|
||||||
|
ADD CONSTRAINT continuous_agg_migrate_plan_step_check2
|
||||||
|
CHECK (type IN ('CREATE NEW CAGG', 'DISABLE POLICIES', 'COPY POLICIES', 'ENABLE POLICIES', 'SAVE WATERMARK', 'REFRESH NEW CAGG', 'COPY DATA', 'OVERRIDE CAGG', 'DROP OLD CAGG'));
|
@ -6,172 +6,6 @@ AS '@MODULE_PATHNAME@', 'ts_gapfill_timestamptz_timezone_bucket' LANGUAGE C VOLA
|
|||||||
ALTER TABLE _timescaledb_catalog.compression_chunk_size DROP CONSTRAINT compression_chunk_size_pkey;
|
ALTER TABLE _timescaledb_catalog.compression_chunk_size DROP CONSTRAINT compression_chunk_size_pkey;
|
||||||
ALTER TABLE _timescaledb_catalog.compression_chunk_size ADD CONSTRAINT compression_chunk_size_pkey PRIMARY KEY(chunk_id);
|
ALTER TABLE _timescaledb_catalog.compression_chunk_size ADD CONSTRAINT compression_chunk_size_pkey PRIMARY KEY(chunk_id);
|
||||||
|
|
||||||
DROP PROCEDURE IF EXISTS @extschema@.cagg_migrate (REGCLASS, BOOLEAN, BOOLEAN);
|
|
||||||
DROP PROCEDURE IF EXISTS _timescaledb_internal.cagg_migrate_create_plan (_timescaledb_catalog.continuous_agg, TEXT, BOOLEAN, BOOLEAN);
|
|
||||||
|
|
||||||
CREATE PROCEDURE _timescaledb_internal.cagg_migrate_create_plan (
|
|
||||||
_cagg_data _timescaledb_catalog.continuous_agg,
|
|
||||||
_cagg_name_new TEXT,
|
|
||||||
_override BOOLEAN DEFAULT FALSE,
|
|
||||||
_drop_old BOOLEAN DEFAULT FALSE
|
|
||||||
)
|
|
||||||
LANGUAGE plpgsql AS
|
|
||||||
$BODY$
|
|
||||||
DECLARE
|
|
||||||
_sql TEXT;
|
|
||||||
_matht RECORD;
|
|
||||||
_time_interval INTERVAL;
|
|
||||||
_integer_interval BIGINT;
|
|
||||||
_watermark TEXT;
|
|
||||||
_policies JSONB;
|
|
||||||
_bucket_column_name TEXT;
|
|
||||||
_bucket_column_type TEXT;
|
|
||||||
_interval_type TEXT;
|
|
||||||
_interval_value TEXT;
|
|
||||||
BEGIN
|
|
||||||
IF _timescaledb_internal.cagg_migrate_plan_exists(_cagg_data.mat_hypertable_id) IS TRUE THEN
|
|
||||||
RAISE EXCEPTION 'plan already exists for materialized hypertable %', _cagg_data.mat_hypertable_id;
|
|
||||||
END IF;
|
|
||||||
|
|
||||||
INSERT INTO
|
|
||||||
_timescaledb_catalog.continuous_agg_migrate_plan (mat_hypertable_id)
|
|
||||||
VALUES
|
|
||||||
(_cagg_data.mat_hypertable_id);
|
|
||||||
|
|
||||||
SELECT schema_name, table_name
|
|
||||||
INTO _matht
|
|
||||||
FROM _timescaledb_catalog.hypertable
|
|
||||||
WHERE id = _cagg_data.mat_hypertable_id;
|
|
||||||
|
|
||||||
SELECT time_interval, integer_interval, column_name, column_type
|
|
||||||
INTO _time_interval, _integer_interval, _bucket_column_name, _bucket_column_type
|
|
||||||
FROM timescaledb_information.dimensions
|
|
||||||
WHERE hypertable_schema = _matht.schema_name
|
|
||||||
AND hypertable_name = _matht.table_name
|
|
||||||
AND dimension_type = 'Time';
|
|
||||||
|
|
||||||
IF _integer_interval IS NOT NULL THEN
|
|
||||||
_interval_value := _integer_interval::TEXT;
|
|
||||||
_interval_type := _bucket_column_type;
|
|
||||||
IF _bucket_column_type = 'bigint' THEN
|
|
||||||
_watermark := COALESCE(_timescaledb_internal.cagg_watermark(_cagg_data.mat_hypertable_id)::bigint, '-9223372036854775808'::bigint)::TEXT;
|
|
||||||
ELSIF _bucket_column_type = 'integer' THEN
|
|
||||||
_watermark := COALESCE(_timescaledb_internal.cagg_watermark(_cagg_data.mat_hypertable_id)::integer, '-2147483648'::integer)::TEXT;
|
|
||||||
ELSE
|
|
||||||
_watermark := COALESCE(_timescaledb_internal.cagg_watermark(_cagg_data.mat_hypertable_id)::smallint, '-32768'::smallint)::TEXT;
|
|
||||||
END IF;
|
|
||||||
ELSE
|
|
||||||
_interval_value := _time_interval::TEXT;
|
|
||||||
_interval_type := 'interval';
|
|
||||||
_watermark := COALESCE(_timescaledb_internal.to_timestamp(_timescaledb_internal.cagg_watermark(_cagg_data.mat_hypertable_id)), '-infinity'::timestamptz)::TEXT;
|
|
||||||
END IF;
|
|
||||||
|
|
||||||
-- get all scheduled policies except the refresh
|
|
||||||
SELECT jsonb_build_object('policies', array_agg(id))
|
|
||||||
INTO _policies
|
|
||||||
FROM _timescaledb_config.bgw_job
|
|
||||||
WHERE hypertable_id = _cagg_data.mat_hypertable_id
|
|
||||||
AND proc_name IS DISTINCT FROM 'policy_refresh_continuous_aggregate'
|
|
||||||
AND scheduled IS TRUE
|
|
||||||
AND id >= 1000;
|
|
||||||
|
|
||||||
INSERT INTO
|
|
||||||
_timescaledb_catalog.continuous_agg_migrate_plan_step (mat_hypertable_id, type, config)
|
|
||||||
VALUES
|
|
||||||
(_cagg_data.mat_hypertable_id, 'SAVE WATERMARK', jsonb_build_object('watermark', _watermark)),
|
|
||||||
(_cagg_data.mat_hypertable_id, 'CREATE NEW CAGG', jsonb_build_object('cagg_name_new', _cagg_name_new)),
|
|
||||||
(_cagg_data.mat_hypertable_id, 'DISABLE POLICIES', _policies),
|
|
||||||
(_cagg_data.mat_hypertable_id, 'REFRESH NEW CAGG', jsonb_build_object('cagg_name_new', _cagg_name_new, 'window_start', _watermark, 'window_start_type', _bucket_column_type));
|
|
||||||
|
|
||||||
-- Finish the step because don't require any extra step
|
|
||||||
UPDATE _timescaledb_catalog.continuous_agg_migrate_plan_step
|
|
||||||
SET status = 'FINISHED', start_ts = now(), end_ts = clock_timestamp()
|
|
||||||
WHERE type = 'SAVE WATERMARK';
|
|
||||||
|
|
||||||
_sql := format (
|
|
||||||
$$
|
|
||||||
WITH boundaries AS (
|
|
||||||
SELECT min(%1$I), max(%1$I), %1$L AS bucket_column_name, %2$L AS bucket_column_type, %3$L AS cagg_name_new
|
|
||||||
FROM %4$I.%5$I
|
|
||||||
WHERE %1$I < CAST(%6$L AS %2$s)
|
|
||||||
)
|
|
||||||
INSERT INTO
|
|
||||||
_timescaledb_catalog.continuous_agg_migrate_plan_step (mat_hypertable_id, type, config)
|
|
||||||
SELECT
|
|
||||||
%7$L,
|
|
||||||
'COPY DATA',
|
|
||||||
jsonb_build_object (
|
|
||||||
'start_ts', start::text,
|
|
||||||
'end_ts', (start + CAST(%8$L AS %9$s))::text,
|
|
||||||
'bucket_column_name', bucket_column_name,
|
|
||||||
'bucket_column_type', bucket_column_type,
|
|
||||||
'cagg_name_new', cagg_name_new
|
|
||||||
)
|
|
||||||
FROM boundaries,
|
|
||||||
LATERAL generate_series(min, max, CAST(%8$L AS %9$s)) AS start;
|
|
||||||
$$,
|
|
||||||
_bucket_column_name, _bucket_column_type, _cagg_name_new, _cagg_data.user_view_schema,
|
|
||||||
_cagg_data.user_view_name, _watermark, _cagg_data.mat_hypertable_id, _interval_value, _interval_type
|
|
||||||
);
|
|
||||||
|
|
||||||
EXECUTE _sql;
|
|
||||||
|
|
||||||
-- get all scheduled policies
|
|
||||||
SELECT jsonb_build_object('policies', array_agg(id))
|
|
||||||
INTO _policies
|
|
||||||
FROM _timescaledb_config.bgw_job
|
|
||||||
WHERE hypertable_id = _cagg_data.mat_hypertable_id
|
|
||||||
AND scheduled IS TRUE
|
|
||||||
AND id >= 1000;
|
|
||||||
|
|
||||||
INSERT INTO
|
|
||||||
_timescaledb_catalog.continuous_agg_migrate_plan_step (mat_hypertable_id, type, config)
|
|
||||||
VALUES
|
|
||||||
(_cagg_data.mat_hypertable_id, 'OVERRIDE CAGG', jsonb_build_object('cagg_name_new', _cagg_name_new, 'override', _override, 'drop_old', _drop_old)),
|
|
||||||
(_cagg_data.mat_hypertable_id, 'DROP OLD CAGG', jsonb_build_object('cagg_name_new', _cagg_name_new, 'override', _override, 'drop_old', _drop_old)),
|
|
||||||
(_cagg_data.mat_hypertable_id, 'COPY POLICIES', _policies || jsonb_build_object('cagg_name_new', _cagg_name_new)),
|
|
||||||
(_cagg_data.mat_hypertable_id, 'ENABLE POLICIES', NULL);
|
|
||||||
END;
|
|
||||||
$BODY$ SET search_path TO pg_catalog, pg_temp;
|
|
||||||
|
|
||||||
CREATE PROCEDURE @extschema@.cagg_migrate (
|
|
||||||
cagg REGCLASS,
|
|
||||||
override BOOLEAN DEFAULT FALSE,
|
|
||||||
drop_old BOOLEAN DEFAULT FALSE
|
|
||||||
)
|
|
||||||
LANGUAGE plpgsql AS
|
|
||||||
$BODY$
|
|
||||||
DECLARE
|
|
||||||
_cagg_schema TEXT;
|
|
||||||
_cagg_name TEXT;
|
|
||||||
_cagg_name_new TEXT;
|
|
||||||
_cagg_data _timescaledb_catalog.continuous_agg;
|
|
||||||
BEGIN
|
|
||||||
SELECT nspname, relname
|
|
||||||
INTO _cagg_schema, _cagg_name
|
|
||||||
FROM pg_catalog.pg_class
|
|
||||||
JOIN pg_catalog.pg_namespace ON pg_namespace.oid OPERATOR(pg_catalog.=) pg_class.relnamespace
|
|
||||||
WHERE pg_class.oid OPERATOR(pg_catalog.=) cagg::pg_catalog.oid;
|
|
||||||
|
|
||||||
-- maximum size of an identifier in Postgres is 63 characters, se we need to left space for '_new'
|
|
||||||
_cagg_name_new := pg_catalog.format('%s_new', pg_catalog.substr(_cagg_name, 1, 59));
|
|
||||||
|
|
||||||
-- pre-validate the migration and get some variables
|
|
||||||
_cagg_data := _timescaledb_internal.cagg_migrate_pre_validation(_cagg_schema, _cagg_name, _cagg_name_new);
|
|
||||||
|
|
||||||
-- create new migration plan
|
|
||||||
CALL _timescaledb_internal.cagg_migrate_create_plan(_cagg_data, _cagg_name_new, override, drop_old);
|
|
||||||
COMMIT;
|
|
||||||
|
|
||||||
-- execute the migration plan
|
|
||||||
CALL _timescaledb_internal.cagg_migrate_execute_plan(_cagg_data);
|
|
||||||
|
|
||||||
-- finish the migration plan
|
|
||||||
UPDATE _timescaledb_catalog.continuous_agg_migrate_plan
|
|
||||||
SET end_ts = pg_catalog.clock_timestamp()
|
|
||||||
WHERE mat_hypertable_id OPERATOR(pg_catalog.=) _cagg_data.mat_hypertable_id;
|
|
||||||
END;
|
|
||||||
$BODY$;
|
|
||||||
CREATE TABLE _timescaledb_internal.job_errors (
|
CREATE TABLE _timescaledb_internal.job_errors (
|
||||||
job_id integer not null,
|
job_id integer not null,
|
||||||
pid integer,
|
pid integer,
|
||||||
@ -215,10 +49,3 @@ ALTER TABLE _timescaledb_internal.bgw_job_stat
|
|||||||
ALTER COLUMN flags SET NOT NULL,
|
ALTER COLUMN flags SET NOT NULL,
|
||||||
ALTER COLUMN flags SET DEFAULT 0;
|
ALTER COLUMN flags SET DEFAULT 0;
|
||||||
|
|
||||||
-- Issue #4727
|
|
||||||
ALTER TABLE _timescaledb_catalog.continuous_agg_migrate_plan_step
|
|
||||||
DROP CONSTRAINT IF EXISTS continuous_agg_migrate_plan_step_check2;
|
|
||||||
|
|
||||||
ALTER TABLE _timescaledb_catalog.continuous_agg_migrate_plan_step
|
|
||||||
ADD CONSTRAINT continuous_agg_migrate_plan_step_check2
|
|
||||||
CHECK (type IN ('CREATE NEW CAGG', 'DISABLE POLICIES', 'COPY POLICIES', 'ENABLE POLICIES', 'SAVE WATERMARK', 'REFRESH NEW CAGG', 'COPY DATA', 'OVERRIDE CAGG', 'DROP OLD CAGG'));
|
|
||||||
|
@ -1,3 +1,3 @@
|
|||||||
version = 2.9.0-dev
|
version = 2.9.0-dev
|
||||||
update_from_version = 2.8.0
|
update_from_version = 2.8.1
|
||||||
downgrade_to_version = 2.8.0
|
downgrade_to_version = 2.8.0
|
||||||
|
Loading…
x
Reference in New Issue
Block a user