1
0
mirror of https://github.com/timescale/timescaledb.git synced 2025-05-20 12:47:40 +08:00

Refactor CAgg migrate regression tests

When CAggs migration was introduced in commit e34218ce the regression
tests was splited into 6 (six) different test suites.

Simplyfied it grouping tests and reduced to just 2 (two) different test
suites. It save resources and time because each suite test spawn it own
Postgres instance.
This commit is contained in:
Fabrízio de Royes Mello 2022-12-05 14:31:30 -03:00
parent e5843dddd3
commit 4694c7d798
17 changed files with 4111 additions and 4190 deletions

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

@ -1,680 +0,0 @@
-- This file and its contents are licensed under the Timescale License.
-- Please see the included NOTICE for copyright information and
-- LICENSE-TIMESCALE for a copy of the license.
\set IS_DISTRIBUTED FALSE
\set IS_TIME_DIMENSION FALSE
\set TIME_DIMENSION_DATATYPE INTEGER
\ir include/cagg_migrate_common.sql
-- This file and its contents are licensed under the Timescale License.
-- Please see the included NOTICE for copyright information and
-- LICENSE-TIMESCALE for a copy of the license.
\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER
\if :IS_DISTRIBUTED
\echo 'Running distributed hypertable tests'
\else
\echo 'Running local hypertable tests'
Running local hypertable tests
\endif
CREATE TABLE conditions (
"time" :TIME_DIMENSION_DATATYPE NOT NULL,
temperature NUMERIC
);
\if :IS_DISTRIBUTED
\if :IS_TIME_DIMENSION
SELECT table_name FROM create_distributed_hypertable('conditions', 'time', replication_factor => 2);
\else
SELECT table_name FROM create_distributed_hypertable('conditions', 'time', chunk_time_interval => 10, replication_factor => 2);
\endif
\else
\if :IS_TIME_DIMENSION
SELECT table_name FROM create_hypertable('conditions', 'time');
\else
SELECT table_name FROM create_hypertable('conditions', 'time', chunk_time_interval => 10);
table_name
------------
conditions
(1 row)
\endif
\endif
\if :IS_TIME_DIMENSION
INSERT INTO conditions ("time", temperature)
SELECT
generate_series('2022-01-01 00:00:00-00'::timestamptz, '2022-12-31 23:59:59-00'::timestamptz, '1 hour'),
0.25;
\else
CREATE OR REPLACE FUNCTION integer_now()
RETURNS :TIME_DIMENSION_DATATYPE LANGUAGE SQL STABLE AS
$$
SELECT coalesce(max(time), 0)
FROM public.conditions
$$;
\if :IS_DISTRIBUTED
SELECT
'CREATE OR REPLACE FUNCTION integer_now() RETURNS '||:'TIME_DIMENSION_DATATYPE'||' LANGUAGE SQL STABLE AS $$ SELECT coalesce(max(time), 0) FROM public.conditions $$;' AS "STMT"
\gset
CALL distributed_exec (:'STMT');
\endif
SELECT set_integer_now_func('conditions', 'integer_now');
set_integer_now_func
----------------------
(1 row)
INSERT INTO conditions ("time", temperature)
SELECT
generate_series(1, 1000, 1),
0.25;
\endif
\set ON_ERROR_STOP 0
-- should fail relation does not exist
CALL cagg_migrate('conditions_summary_daily');
psql:include/cagg_migrate_common.sql:63: ERROR: relation "conditions_summary_daily" does not exist at character 19
CREATE TABLE conditions_summary_daily();
-- should fail continuous agg does not exist
CALL cagg_migrate('conditions_summary_daily');
psql:include/cagg_migrate_common.sql:66: ERROR: continuous aggregate "public.conditions_summary_daily" does not exist
\set ON_ERROR_STOP 1
DROP TABLE conditions_summary_daily;
CREATE MATERIALIZED VIEW conditions_summary_daily_new
WITH (timescaledb.continuous) AS
SELECT
\if :IS_TIME_DIMENSION
time_bucket(INTERVAL '1 day', "time") AS bucket,
\else
time_bucket(INTEGER '24', "time") AS bucket,
\endif
MIN(temperature),
MAX(temperature),
AVG(temperature),
SUM(temperature)
FROM
conditions
GROUP BY
bucket
WITH NO DATA;
\set ON_ERROR_STOP 0
-- should fail because we don't need to migrate finalized caggs
CALL cagg_migrate('conditions_summary_daily_new');
psql:include/cagg_migrate_common.sql:91: ERROR: continuous aggregate "public.conditions_summary_daily_new" does not require any migration
\set ON_ERROR_STOP 1
-- older continuous aggregate to be migrated
CREATE MATERIALIZED VIEW conditions_summary_daily
WITH (timescaledb.continuous, timescaledb.finalized=false) AS
SELECT
\if :IS_TIME_DIMENSION
time_bucket(INTERVAL '1 day', "time") AS bucket,
\else
time_bucket(INTEGER '24', "time") AS bucket,
\endif
MIN(temperature),
MAX(temperature),
AVG(temperature),
SUM(temperature)
FROM
conditions
GROUP BY
bucket;
psql:include/cagg_migrate_common.sql:110: NOTICE: refreshing continuous aggregate "conditions_summary_daily"
SELECT
ca.raw_hypertable_id AS "RAW_HYPERTABLE_ID",
h.schema_name AS "MAT_SCHEMA_NAME",
h.table_name AS "MAT_TABLE_NAME",
partial_view_name AS "PART_VIEW_NAME",
partial_view_schema AS "PART_VIEW_SCHEMA",
direct_view_name AS "DIR_VIEW_NAME",
direct_view_schema AS "DIR_VIEW_SCHEMA"
FROM
_timescaledb_catalog.continuous_agg ca
JOIN _timescaledb_catalog.hypertable h ON (h.id = ca.mat_hypertable_id)
WHERE
user_view_name = 'conditions_summary_daily'
\gset
\set ON_ERROR_STOP 0
-- should fail because the new cagg with suffix '_new' already exists
CALL cagg_migrate('conditions_summary_daily');
psql:include/cagg_migrate_common.sql:129: ERROR: continuous aggregate "public.conditions_summary_daily_new" already exists
\set ON_ERROR_STOP 1
-- remove the new cagg to execute the migration
DROP MATERIALIZED VIEW conditions_summary_daily_new;
-- get and set all the cagg data
SELECT
_timescaledb_internal.cagg_migrate_pre_validation(
'public',
'conditions_summary_daily',
'conditions_summary_daily_new'
) AS "CAGG_DATA"
\gset
CALL _timescaledb_internal.cagg_migrate_create_plan(:'CAGG_DATA', 'conditions_summary_daily_new');
SELECT mat_hypertable_id FROM _timescaledb_catalog.continuous_agg_migrate_plan;
mat_hypertable_id
-------------------
3
(1 row)
SELECT mat_hypertable_id, step_id, status, type, config FROM _timescaledb_catalog.continuous_agg_migrate_plan_step ORDER BY step_id;
mat_hypertable_id | step_id | status | type | config
-------------------+---------+-------------+------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------
3 | 1 | FINISHED | SAVE WATERMARK | {"watermark": "1008"}
3 | 2 | NOT STARTED | CREATE NEW CAGG | {"cagg_name_new": "conditions_summary_daily_new"}
3 | 3 | NOT STARTED | DISABLE POLICIES | {"policies": null}
3 | 4 | NOT STARTED | REFRESH NEW CAGG | {"window_start": "1008", "cagg_name_new": "conditions_summary_daily_new", "window_start_type": "integer"}
3 | 5 | NOT STARTED | COPY DATA | {"end_ts": "100", "start_ts": "0", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "integer"}
3 | 6 | NOT STARTED | COPY DATA | {"end_ts": "200", "start_ts": "100", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "integer"}
3 | 7 | NOT STARTED | COPY DATA | {"end_ts": "300", "start_ts": "200", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "integer"}
3 | 8 | NOT STARTED | COPY DATA | {"end_ts": "400", "start_ts": "300", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "integer"}
3 | 9 | NOT STARTED | COPY DATA | {"end_ts": "500", "start_ts": "400", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "integer"}
3 | 10 | NOT STARTED | COPY DATA | {"end_ts": "600", "start_ts": "500", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "integer"}
3 | 11 | NOT STARTED | COPY DATA | {"end_ts": "700", "start_ts": "600", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "integer"}
3 | 12 | NOT STARTED | COPY DATA | {"end_ts": "800", "start_ts": "700", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "integer"}
3 | 13 | NOT STARTED | COPY DATA | {"end_ts": "900", "start_ts": "800", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "integer"}
3 | 14 | NOT STARTED | COPY DATA | {"end_ts": "1000", "start_ts": "900", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "integer"}
3 | 15 | NOT STARTED | COPY POLICIES | {"policies": null, "cagg_name_new": "conditions_summary_daily_new"}
3 | 16 | NOT STARTED | OVERRIDE CAGG | {"drop_old": false, "override": false, "cagg_name_new": "conditions_summary_daily_new"}
3 | 17 | NOT STARTED | DROP OLD CAGG | {"drop_old": false, "override": false, "cagg_name_new": "conditions_summary_daily_new"}
3 | 18 | NOT STARTED | ENABLE POLICIES |
(18 rows)
-- should resume the execution
CALL cagg_migrate('conditions_summary_daily');
psql:include/cagg_migrate_common.sql:149: WARNING: resuming the migration of the continuous aggregate "public.conditions_summary_daily"
psql:include/cagg_migrate_common.sql:149: WARNING: refresh the continuous aggregate after the migration executing this statement: "CALL public.refresh_continuous_aggregate('public.conditions_summary_daily_new', CAST('1008' AS integer), NULL);"
SELECT mat_hypertable_id, step_id, status, type, config FROM _timescaledb_catalog.continuous_agg_migrate_plan_step ORDER BY step_id;
mat_hypertable_id | step_id | status | type | config
-------------------+---------+----------+------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------
3 | 1 | FINISHED | SAVE WATERMARK | {"watermark": "1008"}
3 | 2 | FINISHED | CREATE NEW CAGG | {"cagg_name_new": "conditions_summary_daily_new"}
3 | 3 | FINISHED | DISABLE POLICIES | {"policies": null}
3 | 4 | FINISHED | REFRESH NEW CAGG | {"window_start": "1008", "cagg_name_new": "conditions_summary_daily_new", "window_start_type": "integer"}
3 | 5 | FINISHED | COPY DATA | {"end_ts": "100", "start_ts": "0", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "integer"}
3 | 6 | FINISHED | COPY DATA | {"end_ts": "200", "start_ts": "100", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "integer"}
3 | 7 | FINISHED | COPY DATA | {"end_ts": "300", "start_ts": "200", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "integer"}
3 | 8 | FINISHED | COPY DATA | {"end_ts": "400", "start_ts": "300", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "integer"}
3 | 9 | FINISHED | COPY DATA | {"end_ts": "500", "start_ts": "400", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "integer"}
3 | 10 | FINISHED | COPY DATA | {"end_ts": "600", "start_ts": "500", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "integer"}
3 | 11 | FINISHED | COPY DATA | {"end_ts": "700", "start_ts": "600", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "integer"}
3 | 12 | FINISHED | COPY DATA | {"end_ts": "800", "start_ts": "700", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "integer"}
3 | 13 | FINISHED | COPY DATA | {"end_ts": "900", "start_ts": "800", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "integer"}
3 | 14 | FINISHED | COPY DATA | {"end_ts": "1000", "start_ts": "900", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "integer"}
3 | 15 | FINISHED | COPY POLICIES | {"policies": null, "cagg_name_new": "conditions_summary_daily_new"}
3 | 16 | FINISHED | OVERRIDE CAGG | {"drop_old": false, "override": false, "cagg_name_new": "conditions_summary_daily_new"}
3 | 17 | FINISHED | DROP OLD CAGG | {"drop_old": false, "override": false, "cagg_name_new": "conditions_summary_daily_new"}
3 | 18 | FINISHED | ENABLE POLICIES |
(18 rows)
\set ON_ERROR_STOP 0
-- should error because plan already exists
CALL _timescaledb_internal.cagg_migrate_create_plan(:'CAGG_DATA', 'conditions_summary_daily_new');
psql:include/cagg_migrate_common.sql:154: ERROR: plan already exists for materialized hypertable 3
CALL cagg_migrate('conditions_summary_daily');
psql:include/cagg_migrate_common.sql:155: ERROR: plan already exists for continuous aggregate public.conditions_summary_daily
\set ON_ERROR_STOP 1
-- policies for test
ALTER MATERIALIZED VIEW conditions_summary_daily SET (timescaledb.compress=true);
\if :IS_TIME_DIMENSION
SELECT add_retention_policy('conditions_summary_daily', '30 days'::interval);
SELECT add_continuous_aggregate_policy('conditions_summary_daily', '30 days'::interval, '1 day'::interval, '1 hour'::interval);
SELECT add_compression_policy('conditions_summary_daily', '45 days'::interval);
\else
SELECT add_retention_policy('conditions_summary_daily', '400'::integer);
add_retention_policy
----------------------
1000
(1 row)
SELECT add_continuous_aggregate_policy('conditions_summary_daily', '50'::integer, '1'::integer, '1 hour'::interval);
add_continuous_aggregate_policy
---------------------------------
1001
(1 row)
SELECT add_compression_policy('conditions_summary_daily', '100'::integer);
add_compression_policy
------------------------
1002
(1 row)
\endif
SELECT *
FROM timescaledb_information.jobs
WHERE hypertable_schema = :'MAT_SCHEMA_NAME'
AND hypertable_name = :'MAT_TABLE_NAME'
AND job_id >= 1000;
job_id | application_name | schedule_interval | max_runtime | max_retries | retry_period | proc_schema | proc_name | owner | scheduled | fixed_schedule | config | next_start | initial_start | hypertable_schema | hypertable_name | check_schema | check_name
--------+--------------------------------------------+-------------------+-------------+-------------+--------------+-----------------------+-------------------------------------+--------------------+-----------+----------------+---------------------------------------------------------------+------------+---------------+-----------------------+----------------------------+-----------------------+-------------------------------------------
1002 | Compression Policy [1002] | @ 1 day | @ 0 | -1 | @ 1 hour | _timescaledb_internal | policy_compression | cluster_super_user | t | f | {"hypertable_id": 3, "compress_after": 100} | | | _timescaledb_internal | _materialized_hypertable_3 | _timescaledb_internal | policy_compression_check
1001 | Refresh Continuous Aggregate Policy [1001] | @ 1 hour | @ 0 | -1 | @ 1 hour | _timescaledb_internal | policy_refresh_continuous_aggregate | cluster_super_user | t | f | {"end_offset": 1, "start_offset": 50, "mat_hypertable_id": 3} | | | _timescaledb_internal | _materialized_hypertable_3 | _timescaledb_internal | policy_refresh_continuous_aggregate_check
1000 | Retention Policy [1000] | @ 1 day | @ 5 mins | -1 | @ 5 mins | _timescaledb_internal | policy_retention | cluster_super_user | t | f | {"drop_after": 400, "hypertable_id": 3} | | | _timescaledb_internal | _materialized_hypertable_3 | _timescaledb_internal | policy_retention_check
(3 rows)
-- execute the migration
DROP MATERIALIZED VIEW conditions_summary_daily_new;
psql:include/cagg_migrate_common.sql:178: NOTICE: drop cascades to 10 other objects
TRUNCATE _timescaledb_catalog.continuous_agg_migrate_plan RESTART IDENTITY CASCADE;
psql:include/cagg_migrate_common.sql:179: NOTICE: truncate cascades to table "continuous_agg_migrate_plan_step"
CALL cagg_migrate('conditions_summary_daily');
psql:include/cagg_migrate_common.sql:180: WARNING: refresh the continuous aggregate after the migration executing this statement: "CALL public.refresh_continuous_aggregate('public.conditions_summary_daily_new', CAST('1008' AS integer), NULL);"
SELECT
ca.raw_hypertable_id AS "NEW_RAW_HYPERTABLE_ID",
h.schema_name AS "NEW_MAT_SCHEMA_NAME",
h.table_name AS "NEW_MAT_TABLE_NAME",
partial_view_name AS "NEW_PART_VIEW_NAME",
partial_view_schema AS "NEW_PART_VIEW_SCHEMA",
direct_view_name AS "NEW_DIR_VIEW_NAME",
direct_view_schema AS "NEW_DIR_VIEW_SCHEMA"
FROM
_timescaledb_catalog.continuous_agg ca
JOIN _timescaledb_catalog.hypertable h ON (h.id = ca.mat_hypertable_id)
WHERE
user_view_name = 'conditions_summary_daily_new'
\gset
\d+ conditions_summary_daily_new
View "public.conditions_summary_daily_new"
Column | Type | Collation | Nullable | Default | Storage | Description
--------+---------+-----------+----------+---------+---------+-------------
bucket | integer | | | | plain |
min | numeric | | | | main |
max | numeric | | | | main |
avg | numeric | | | | main |
sum | numeric | | | | main |
View definition:
SELECT _materialized_hypertable_6.bucket,
_materialized_hypertable_6.min,
_materialized_hypertable_6.max,
_materialized_hypertable_6.avg,
_materialized_hypertable_6.sum
FROM _timescaledb_internal._materialized_hypertable_6
WHERE _materialized_hypertable_6.bucket < COALESCE(_timescaledb_internal.cagg_watermark(6)::integer, '-2147483648'::integer)
UNION ALL
SELECT time_bucket(24, conditions."time") AS bucket,
min(conditions.temperature) AS min,
max(conditions.temperature) AS max,
avg(conditions.temperature) AS avg,
sum(conditions.temperature) AS sum
FROM conditions
WHERE conditions."time" >= COALESCE(_timescaledb_internal.cagg_watermark(6)::integer, '-2147483648'::integer)
GROUP BY (time_bucket(24, conditions."time"));
SELECT *
FROM timescaledb_information.jobs
WHERE hypertable_schema = :'NEW_MAT_SCHEMA_NAME'
AND hypertable_name = :'NEW_MAT_TABLE_NAME'
AND job_id >= 1000;
job_id | application_name | schedule_interval | max_runtime | max_retries | retry_period | proc_schema | proc_name | owner | scheduled | fixed_schedule | config | next_start | initial_start | hypertable_schema | hypertable_name | check_schema | check_name
--------+--------------------------------------------+-------------------+-------------+-------------+--------------+-----------------------+-------------------------------------+--------------------+-----------+----------------+---------------------------------------------------------------+------------+---------------+-----------------------+----------------------------+-----------------------+-------------------------------------------
1005 | Compression Policy [1005] | @ 1 day | @ 0 | -1 | @ 1 hour | _timescaledb_internal | policy_compression | cluster_super_user | t | f | {"hypertable_id": 6, "compress_after": 100} | | | _timescaledb_internal | _materialized_hypertable_6 | _timescaledb_internal | policy_compression_check
1004 | Refresh Continuous Aggregate Policy [1004] | @ 1 hour | @ 0 | -1 | @ 1 hour | _timescaledb_internal | policy_refresh_continuous_aggregate | cluster_super_user | t | f | {"end_offset": 1, "start_offset": 50, "mat_hypertable_id": 6} | | | _timescaledb_internal | _materialized_hypertable_6 | _timescaledb_internal | policy_refresh_continuous_aggregate_check
1003 | Retention Policy [1003] | @ 1 day | @ 5 mins | -1 | @ 5 mins | _timescaledb_internal | policy_retention | cluster_super_user | t | f | {"drop_after": 400, "hypertable_id": 6} | | | _timescaledb_internal | _materialized_hypertable_6 | _timescaledb_internal | policy_retention_check
(3 rows)
SELECT mat_hypertable_id, step_id, status, type, config FROM _timescaledb_catalog.continuous_agg_migrate_plan_step ORDER BY step_id;
mat_hypertable_id | step_id | status | type | config
-------------------+---------+----------+------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------
3 | 1 | FINISHED | SAVE WATERMARK | {"watermark": "1008"}
3 | 2 | FINISHED | CREATE NEW CAGG | {"cagg_name_new": "conditions_summary_daily_new"}
3 | 3 | FINISHED | DISABLE POLICIES | {"policies": [1002, 1000]}
3 | 4 | FINISHED | REFRESH NEW CAGG | {"window_start": "1008", "cagg_name_new": "conditions_summary_daily_new", "window_start_type": "integer"}
3 | 5 | FINISHED | COPY DATA | {"end_ts": "100", "start_ts": "0", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "integer"}
3 | 6 | FINISHED | COPY DATA | {"end_ts": "200", "start_ts": "100", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "integer"}
3 | 7 | FINISHED | COPY DATA | {"end_ts": "300", "start_ts": "200", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "integer"}
3 | 8 | FINISHED | COPY DATA | {"end_ts": "400", "start_ts": "300", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "integer"}
3 | 9 | FINISHED | COPY DATA | {"end_ts": "500", "start_ts": "400", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "integer"}
3 | 10 | FINISHED | COPY DATA | {"end_ts": "600", "start_ts": "500", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "integer"}
3 | 11 | FINISHED | COPY DATA | {"end_ts": "700", "start_ts": "600", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "integer"}
3 | 12 | FINISHED | COPY DATA | {"end_ts": "800", "start_ts": "700", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "integer"}
3 | 13 | FINISHED | COPY DATA | {"end_ts": "900", "start_ts": "800", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "integer"}
3 | 14 | FINISHED | COPY DATA | {"end_ts": "1000", "start_ts": "900", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "integer"}
3 | 15 | FINISHED | COPY POLICIES | {"policies": [1002, 1001, 1000], "cagg_name_new": "conditions_summary_daily_new"}
3 | 16 | FINISHED | OVERRIDE CAGG | {"drop_old": false, "override": false, "cagg_name_new": "conditions_summary_daily_new"}
3 | 17 | FINISHED | DROP OLD CAGG | {"drop_old": false, "override": false, "cagg_name_new": "conditions_summary_daily_new"}
3 | 18 | FINISHED | ENABLE POLICIES | {"policies": [1003, 1004, 1005, 1002, 1001, 1000]}
(18 rows)
-- check migrated data. should return 0 (zero) rows
SELECT * FROM conditions_summary_daily
EXCEPT
SELECT * FROM conditions_summary_daily_new;
bucket | min | max | avg | sum
--------+-----+-----+-----+-----
(0 rows)
-- compress both caggs
SELECT compress_chunk(c) FROM show_chunks('conditions_summary_daily') c ORDER BY c::regclass::text;
compress_chunk
------------------------------------------
_timescaledb_internal._hyper_3_102_chunk
_timescaledb_internal._hyper_3_103_chunk
_timescaledb_internal._hyper_3_104_chunk
_timescaledb_internal._hyper_3_105_chunk
_timescaledb_internal._hyper_3_106_chunk
_timescaledb_internal._hyper_3_107_chunk
_timescaledb_internal._hyper_3_108_chunk
_timescaledb_internal._hyper_3_109_chunk
_timescaledb_internal._hyper_3_110_chunk
_timescaledb_internal._hyper_3_111_chunk
(10 rows)
SELECT compress_chunk(c) FROM show_chunks('conditions_summary_daily_new') c ORDER BY c::regclass::text;
compress_chunk
------------------------------------------
_timescaledb_internal._hyper_6_122_chunk
_timescaledb_internal._hyper_6_123_chunk
_timescaledb_internal._hyper_6_124_chunk
_timescaledb_internal._hyper_6_125_chunk
_timescaledb_internal._hyper_6_126_chunk
_timescaledb_internal._hyper_6_127_chunk
_timescaledb_internal._hyper_6_128_chunk
_timescaledb_internal._hyper_6_129_chunk
_timescaledb_internal._hyper_6_130_chunk
_timescaledb_internal._hyper_6_131_chunk
(10 rows)
-- check migrated data after compression. should return 0 (zero) rows
SELECT * FROM conditions_summary_daily
EXCEPT
SELECT * FROM conditions_summary_daily_new;
bucket | min | max | avg | sum
--------+-----+-----+-----+-----
(0 rows)
CREATE VIEW cagg_jobs AS
SELECT user_view_schema AS schema, user_view_name AS name, bgw_job.*
FROM _timescaledb_config.bgw_job
JOIN _timescaledb_catalog.continuous_agg ON mat_hypertable_id = hypertable_id
ORDER BY bgw_job.id;
-- test migration overriding the new cagg and keeping the old
DROP MATERIALIZED VIEW conditions_summary_daily_new;
psql:include/cagg_migrate_common.sql:228: NOTICE: drop cascades to 10 other objects
TRUNCATE _timescaledb_catalog.continuous_agg_migrate_plan RESTART IDENTITY CASCADE;
psql:include/cagg_migrate_common.sql:229: NOTICE: truncate cascades to table "continuous_agg_migrate_plan_step"
-- check policies before the migration
SELECT * FROM cagg_jobs WHERE schema = 'public' AND name = 'conditions_summary_daily';
schema | name | id | application_name | schedule_interval | max_runtime | max_retries | retry_period | proc_schema | proc_name | owner | scheduled | fixed_schedule | initial_start | hypertable_id | config | check_schema | check_name | timezone
--------+--------------------------+------+--------------------------------------------+-------------------+-------------+-------------+--------------+-----------------------+-------------------------------------+--------------------+-----------+----------------+---------------+---------------+---------------------------------------------------------------+-----------------------+-------------------------------------------+----------
public | conditions_summary_daily | 1000 | Retention Policy [1000] | @ 1 day | @ 5 mins | -1 | @ 5 mins | _timescaledb_internal | policy_retention | cluster_super_user | t | f | | 3 | {"drop_after": 400, "hypertable_id": 3} | _timescaledb_internal | policy_retention_check |
public | conditions_summary_daily | 1001 | Refresh Continuous Aggregate Policy [1001] | @ 1 hour | @ 0 | -1 | @ 1 hour | _timescaledb_internal | policy_refresh_continuous_aggregate | cluster_super_user | t | f | | 3 | {"end_offset": 1, "start_offset": 50, "mat_hypertable_id": 3} | _timescaledb_internal | policy_refresh_continuous_aggregate_check |
public | conditions_summary_daily | 1002 | Compression Policy [1002] | @ 1 day | @ 0 | -1 | @ 1 hour | _timescaledb_internal | policy_compression | cluster_super_user | t | f | | 3 | {"hypertable_id": 3, "compress_after": 100} | _timescaledb_internal | policy_compression_check |
(3 rows)
CALL cagg_migrate('conditions_summary_daily', override => TRUE);
psql:include/cagg_migrate_common.sql:232: WARNING: refresh the continuous aggregate after the migration executing this statement: "CALL public.refresh_continuous_aggregate('public.conditions_summary_daily', CAST('1008' AS integer), NULL);"
-- cagg with the new format because it was overriden
\d+ conditions_summary_daily
View "public.conditions_summary_daily"
Column | Type | Collation | Nullable | Default | Storage | Description
--------+---------+-----------+----------+---------+---------+-------------
bucket | integer | | | | plain |
min | numeric | | | | main |
max | numeric | | | | main |
avg | numeric | | | | main |
sum | numeric | | | | main |
View definition:
SELECT _materialized_hypertable_8.bucket,
_materialized_hypertable_8.min,
_materialized_hypertable_8.max,
_materialized_hypertable_8.avg,
_materialized_hypertable_8.sum
FROM _timescaledb_internal._materialized_hypertable_8
WHERE _materialized_hypertable_8.bucket < COALESCE(_timescaledb_internal.cagg_watermark(8)::integer, '-2147483648'::integer)
UNION ALL
SELECT time_bucket(24, conditions."time") AS bucket,
min(conditions.temperature) AS min,
max(conditions.temperature) AS max,
avg(conditions.temperature) AS avg,
sum(conditions.temperature) AS sum
FROM conditions
WHERE conditions."time" >= COALESCE(_timescaledb_internal.cagg_watermark(8)::integer, '-2147483648'::integer)
GROUP BY (time_bucket(24, conditions."time"));
-- cagg with the old format because it was overriden
\d+ conditions_summary_daily_old
View "public.conditions_summary_daily_old"
Column | Type | Collation | Nullable | Default | Storage | Description
--------+---------+-----------+----------+---------+---------+-------------
bucket | integer | | | | plain |
min | numeric | | | | main |
max | numeric | | | | main |
avg | numeric | | | | main |
sum | numeric | | | | main |
View definition:
SELECT _materialized_hypertable_3.bucket,
_timescaledb_internal.finalize_agg('pg_catalog.min(numeric)'::text, NULL::name, NULL::name, '{{pg_catalog,numeric}}'::name[], _materialized_hypertable_3.agg_2_2, NULL::numeric) AS min,
_timescaledb_internal.finalize_agg('pg_catalog.max(numeric)'::text, NULL::name, NULL::name, '{{pg_catalog,numeric}}'::name[], _materialized_hypertable_3.agg_3_3, NULL::numeric) AS max,
_timescaledb_internal.finalize_agg('pg_catalog.avg(numeric)'::text, NULL::name, NULL::name, '{{pg_catalog,numeric}}'::name[], _materialized_hypertable_3.agg_4_4, NULL::numeric) AS avg,
_timescaledb_internal.finalize_agg('pg_catalog.sum(numeric)'::text, NULL::name, NULL::name, '{{pg_catalog,numeric}}'::name[], _materialized_hypertable_3.agg_5_5, NULL::numeric) AS sum
FROM _timescaledb_internal._materialized_hypertable_3
WHERE _materialized_hypertable_3.bucket < COALESCE(_timescaledb_internal.cagg_watermark(3)::integer, '-2147483648'::integer)
GROUP BY _materialized_hypertable_3.bucket
UNION ALL
SELECT time_bucket(24, conditions."time") AS bucket,
min(conditions.temperature) AS min,
max(conditions.temperature) AS max,
avg(conditions.temperature) AS avg,
sum(conditions.temperature) AS sum
FROM conditions
WHERE conditions."time" >= COALESCE(_timescaledb_internal.cagg_watermark(3)::integer, '-2147483648'::integer)
GROUP BY (time_bucket(24, conditions."time"));
\set ON_ERROR_STOP 0
-- should fail because the cagg was overriden
SELECT * FROM conditions_summary_daily_new;
psql:include/cagg_migrate_common.sql:239: ERROR: relation "conditions_summary_daily_new" does not exist at character 15
\set ON_ERROR_STOP 1
-- check policies after the migration
SELECT * FROM cagg_jobs WHERE schema = 'public' AND name = 'conditions_summary_daily';
schema | name | id | application_name | schedule_interval | max_runtime | max_retries | retry_period | proc_schema | proc_name | owner | scheduled | fixed_schedule | initial_start | hypertable_id | config | check_schema | check_name | timezone
--------+--------------------------+------+--------------------------------------------+-------------------+-------------+-------------+--------------+-----------------------+-------------------------------------+--------------------+-----------+----------------+---------------+---------------+---------------------------------------------------------------+-----------------------+-------------------------------------------+----------
public | conditions_summary_daily | 1006 | Retention Policy [1006] | @ 1 day | @ 5 mins | -1 | @ 5 mins | _timescaledb_internal | policy_retention | cluster_super_user | t | f | | 8 | {"drop_after": 400, "hypertable_id": 8} | _timescaledb_internal | policy_retention_check |
public | conditions_summary_daily | 1007 | Refresh Continuous Aggregate Policy [1007] | @ 1 hour | @ 0 | -1 | @ 1 hour | _timescaledb_internal | policy_refresh_continuous_aggregate | cluster_super_user | t | f | | 8 | {"end_offset": 1, "start_offset": 50, "mat_hypertable_id": 8} | _timescaledb_internal | policy_refresh_continuous_aggregate_check |
public | conditions_summary_daily | 1008 | Compression Policy [1008] | @ 1 day | @ 0 | -1 | @ 1 hour | _timescaledb_internal | policy_compression | cluster_super_user | t | f | | 8 | {"hypertable_id": 8, "compress_after": 100} | _timescaledb_internal | policy_compression_check |
(3 rows)
-- should return the old cagg jobs
SELECT * FROM cagg_jobs WHERE schema = 'public' AND name = 'conditions_summary_daily_old';
schema | name | id | application_name | schedule_interval | max_runtime | max_retries | retry_period | proc_schema | proc_name | owner | scheduled | fixed_schedule | initial_start | hypertable_id | config | check_schema | check_name | timezone
--------+------------------------------+------+--------------------------------------------+-------------------+-------------+-------------+--------------+-----------------------+-------------------------------------+--------------------+-----------+----------------+---------------+---------------+---------------------------------------------------------------+-----------------------+-------------------------------------------+----------
public | conditions_summary_daily_old | 1000 | Retention Policy [1000] | @ 1 day | @ 5 mins | -1 | @ 5 mins | _timescaledb_internal | policy_retention | cluster_super_user | t | f | | 3 | {"drop_after": 400, "hypertable_id": 3} | _timescaledb_internal | policy_retention_check |
public | conditions_summary_daily_old | 1001 | Refresh Continuous Aggregate Policy [1001] | @ 1 hour | @ 0 | -1 | @ 1 hour | _timescaledb_internal | policy_refresh_continuous_aggregate | cluster_super_user | t | f | | 3 | {"end_offset": 1, "start_offset": 50, "mat_hypertable_id": 3} | _timescaledb_internal | policy_refresh_continuous_aggregate_check |
public | conditions_summary_daily_old | 1002 | Compression Policy [1002] | @ 1 day | @ 0 | -1 | @ 1 hour | _timescaledb_internal | policy_compression | cluster_super_user | t | f | | 3 | {"hypertable_id": 3, "compress_after": 100} | _timescaledb_internal | policy_compression_check |
(3 rows)
-- should return no rows because the cagg was overwritten
SELECT * FROM cagg_jobs WHERE schema = 'public' AND name = 'conditions_summary_daily_new';
schema | name | id | application_name | schedule_interval | max_runtime | max_retries | retry_period | proc_schema | proc_name | owner | scheduled | fixed_schedule | initial_start | hypertable_id | config | check_schema | check_name | timezone
--------+------+----+------------------+-------------------+-------------+-------------+--------------+-------------+-----------+-------+-----------+----------------+---------------+---------------+--------+--------------+------------+----------
(0 rows)
-- test migration overriding the new cagg and removing the old
TRUNCATE _timescaledb_catalog.continuous_agg_migrate_plan RESTART IDENTITY CASCADE;
psql:include/cagg_migrate_common.sql:249: NOTICE: truncate cascades to table "continuous_agg_migrate_plan_step"
DROP MATERIALIZED VIEW conditions_summary_daily;
psql:include/cagg_migrate_common.sql:250: NOTICE: drop cascades to 10 other objects
ALTER MATERIALIZED VIEW conditions_summary_daily_old RENAME TO conditions_summary_daily;
-- check policies before the migration
SELECT * FROM cagg_jobs WHERE schema = 'public' AND name = 'conditions_summary_daily';
schema | name | id | application_name | schedule_interval | max_runtime | max_retries | retry_period | proc_schema | proc_name | owner | scheduled | fixed_schedule | initial_start | hypertable_id | config | check_schema | check_name | timezone
--------+--------------------------+------+--------------------------------------------+-------------------+-------------+-------------+--------------+-----------------------+-------------------------------------+--------------------+-----------+----------------+---------------+---------------+---------------------------------------------------------------+-----------------------+-------------------------------------------+----------
public | conditions_summary_daily | 1000 | Retention Policy [1000] | @ 1 day | @ 5 mins | -1 | @ 5 mins | _timescaledb_internal | policy_retention | cluster_super_user | t | f | | 3 | {"drop_after": 400, "hypertable_id": 3} | _timescaledb_internal | policy_retention_check |
public | conditions_summary_daily | 1001 | Refresh Continuous Aggregate Policy [1001] | @ 1 hour | @ 0 | -1 | @ 1 hour | _timescaledb_internal | policy_refresh_continuous_aggregate | cluster_super_user | t | f | | 3 | {"end_offset": 1, "start_offset": 50, "mat_hypertable_id": 3} | _timescaledb_internal | policy_refresh_continuous_aggregate_check |
public | conditions_summary_daily | 1002 | Compression Policy [1002] | @ 1 day | @ 0 | -1 | @ 1 hour | _timescaledb_internal | policy_compression | cluster_super_user | t | f | | 3 | {"hypertable_id": 3, "compress_after": 100} | _timescaledb_internal | policy_compression_check |
(3 rows)
CALL cagg_migrate('conditions_summary_daily', override => TRUE, drop_old => TRUE);
psql:include/cagg_migrate_common.sql:254: WARNING: refresh the continuous aggregate after the migration executing this statement: "CALL public.refresh_continuous_aggregate('public.conditions_summary_daily', CAST('1008' AS integer), NULL);"
psql:include/cagg_migrate_common.sql:254: NOTICE: drop cascades to 10 other objects
psql:include/cagg_migrate_common.sql:254: NOTICE: job 1002 not found, skipping
psql:include/cagg_migrate_common.sql:254: NOTICE: job 1001 not found, skipping
psql:include/cagg_migrate_common.sql:254: NOTICE: job 1000 not found, skipping
-- cagg with the new format because it was overriden
\d+ conditions_summary_daily
View "public.conditions_summary_daily"
Column | Type | Collation | Nullable | Default | Storage | Description
--------+---------+-----------+----------+---------+---------+-------------
bucket | integer | | | | plain |
min | numeric | | | | main |
max | numeric | | | | main |
avg | numeric | | | | main |
sum | numeric | | | | main |
View definition:
SELECT _materialized_hypertable_10.bucket,
_materialized_hypertable_10.min,
_materialized_hypertable_10.max,
_materialized_hypertable_10.avg,
_materialized_hypertable_10.sum
FROM _timescaledb_internal._materialized_hypertable_10
WHERE _materialized_hypertable_10.bucket < COALESCE(_timescaledb_internal.cagg_watermark(10)::integer, '-2147483648'::integer)
UNION ALL
SELECT time_bucket(24, conditions."time") AS bucket,
min(conditions.temperature) AS min,
max(conditions.temperature) AS max,
avg(conditions.temperature) AS avg,
sum(conditions.temperature) AS sum
FROM conditions
WHERE conditions."time" >= COALESCE(_timescaledb_internal.cagg_watermark(10)::integer, '-2147483648'::integer)
GROUP BY (time_bucket(24, conditions."time"));
\set ON_ERROR_STOP 0
-- should fail because the cagg was overriden
SELECT * FROM conditions_summary_daily_new;
psql:include/cagg_migrate_common.sql:259: ERROR: relation "conditions_summary_daily_new" does not exist at character 15
-- should fail because the old cagg was removed
SELECT * FROM conditions_summary_daily_old;
psql:include/cagg_migrate_common.sql:261: ERROR: relation "conditions_summary_daily_old" does not exist at character 15
\set ON_ERROR_STOP 1
-- check policies after the migration
SELECT * FROM cagg_jobs WHERE schema = 'public' AND name = 'conditions_summary_daily';
schema | name | id | application_name | schedule_interval | max_runtime | max_retries | retry_period | proc_schema | proc_name | owner | scheduled | fixed_schedule | initial_start | hypertable_id | config | check_schema | check_name | timezone
--------+--------------------------+------+--------------------------------------------+-------------------+-------------+-------------+--------------+-----------------------+-------------------------------------+--------------------+-----------+----------------+---------------+---------------+----------------------------------------------------------------+-----------------------+-------------------------------------------+----------
public | conditions_summary_daily | 1009 | Retention Policy [1009] | @ 1 day | @ 5 mins | -1 | @ 5 mins | _timescaledb_internal | policy_retention | cluster_super_user | t | f | | 10 | {"drop_after": 400, "hypertable_id": 10} | _timescaledb_internal | policy_retention_check |
public | conditions_summary_daily | 1010 | Refresh Continuous Aggregate Policy [1010] | @ 1 hour | @ 0 | -1 | @ 1 hour | _timescaledb_internal | policy_refresh_continuous_aggregate | cluster_super_user | t | f | | 10 | {"end_offset": 1, "start_offset": 50, "mat_hypertable_id": 10} | _timescaledb_internal | policy_refresh_continuous_aggregate_check |
public | conditions_summary_daily | 1011 | Compression Policy [1011] | @ 1 day | @ 0 | -1 | @ 1 hour | _timescaledb_internal | policy_compression | cluster_super_user | t | f | | 10 | {"hypertable_id": 10, "compress_after": 100} | _timescaledb_internal | policy_compression_check |
(3 rows)
-- should return no rows because the old cagg was removed
SELECT * FROM cagg_jobs WHERE schema = 'public' AND name = 'conditions_summary_daily_old';
schema | name | id | application_name | schedule_interval | max_runtime | max_retries | retry_period | proc_schema | proc_name | owner | scheduled | fixed_schedule | initial_start | hypertable_id | config | check_schema | check_name | timezone
--------+------+----+------------------+-------------------+-------------+-------------+--------------+-------------+-----------+-------+-----------+----------------+---------------+---------------+--------+--------------+------------+----------
(0 rows)
-- should return no rows because the cagg was overwritten
SELECT * FROM cagg_jobs WHERE schema = 'public' AND name = 'conditions_summary_daily_new';
schema | name | id | application_name | schedule_interval | max_runtime | max_retries | retry_period | proc_schema | proc_name | owner | scheduled | fixed_schedule | initial_start | hypertable_id | config | check_schema | check_name | timezone
--------+------+----+------------------+-------------------+-------------+-------------+--------------+-------------+-----------+-------+-----------+----------------+---------------+---------------+--------+--------------+------------+----------
(0 rows)
-- permissions test
TRUNCATE _timescaledb_catalog.continuous_agg_migrate_plan RESTART IDENTITY CASCADE;
psql:include/cagg_migrate_common.sql:271: NOTICE: truncate cascades to table "continuous_agg_migrate_plan_step"
DROP MATERIALIZED VIEW conditions_summary_daily;
psql:include/cagg_migrate_common.sql:272: NOTICE: drop cascades to 10 other objects
GRANT ALL ON TABLE conditions TO :ROLE_DEFAULT_PERM_USER;
SET ROLE :ROLE_DEFAULT_PERM_USER;
CREATE MATERIALIZED VIEW conditions_summary_daily
WITH (timescaledb.continuous, timescaledb.finalized=false) AS
SELECT
\if :IS_TIME_DIMENSION
time_bucket(INTERVAL '1 day', "time") AS bucket,
\else
time_bucket(INTEGER '24', "time") AS bucket,
\endif
MIN(temperature),
MAX(temperature),
AVG(temperature),
SUM(temperature)
FROM
conditions
GROUP BY
bucket;
psql:include/cagg_migrate_common.sql:291: NOTICE: refreshing continuous aggregate "conditions_summary_daily"
\set ON_ERROR_STOP 0
-- should fail because the lack of permissions on 'continuous_agg_migrate_plan' catalog table
CALL cagg_migrate('conditions_summary_daily');
psql:include/cagg_migrate_common.sql:295: ERROR: permission denied for table continuous_agg_migrate_plan
\set ON_ERROR_STOP 1
RESET ROLE;
GRANT SELECT, INSERT, UPDATE ON TABLE _timescaledb_catalog.continuous_agg_migrate_plan TO :ROLE_DEFAULT_PERM_USER;
SET ROLE :ROLE_DEFAULT_PERM_USER;
\set ON_ERROR_STOP 0
-- should fail because the lack of permissions on 'continuous_agg_migrate_plan_step' catalog table
CALL cagg_migrate('conditions_summary_daily');
psql:include/cagg_migrate_common.sql:305: ERROR: permission denied for table continuous_agg_migrate_plan_step
\set ON_ERROR_STOP 1
RESET ROLE;
GRANT SELECT, INSERT, UPDATE ON TABLE _timescaledb_catalog.continuous_agg_migrate_plan_step TO :ROLE_DEFAULT_PERM_USER;
SET ROLE :ROLE_DEFAULT_PERM_USER;
\set ON_ERROR_STOP 0
-- should fail because the lack of permissions on 'continuous_agg_migrate_plan_step_step_id_seq' catalog sequence
CALL cagg_migrate('conditions_summary_daily');
psql:include/cagg_migrate_common.sql:315: ERROR: permission denied for sequence continuous_agg_migrate_plan_step_step_id_seq
\set ON_ERROR_STOP 1
RESET ROLE;
GRANT USAGE ON SEQUENCE _timescaledb_catalog.continuous_agg_migrate_plan_step_step_id_seq TO :ROLE_DEFAULT_PERM_USER;
SET ROLE :ROLE_DEFAULT_PERM_USER;
-- all necessary permissions granted
CALL cagg_migrate('conditions_summary_daily');
psql:include/cagg_migrate_common.sql:324: WARNING: refresh the continuous aggregate after the migration executing this statement: "CALL public.refresh_continuous_aggregate('public.conditions_summary_daily_new', CAST('1008' AS integer), NULL);"
-- check migrated data. should return 0 (zero) rows
SELECT * FROM conditions_summary_daily
EXCEPT
SELECT * FROM conditions_summary_daily_new;
bucket | min | max | avg | sum
--------+-----+-----+-----+-----
(0 rows)
SELECT mat_hypertable_id, step_id, status, type, config FROM _timescaledb_catalog.continuous_agg_migrate_plan_step ORDER BY step_id;
mat_hypertable_id | step_id | status | type | config
-------------------+---------+----------+------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------
12 | 1 | FINISHED | SAVE WATERMARK | {"watermark": "1008"}
12 | 2 | FINISHED | CREATE NEW CAGG | {"cagg_name_new": "conditions_summary_daily_new"}
12 | 3 | FINISHED | DISABLE POLICIES | {"policies": null}
12 | 4 | FINISHED | REFRESH NEW CAGG | {"window_start": "1008", "cagg_name_new": "conditions_summary_daily_new", "window_start_type": "integer"}
12 | 5 | FINISHED | COPY DATA | {"end_ts": "100", "start_ts": "0", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "integer"}
12 | 6 | FINISHED | COPY DATA | {"end_ts": "200", "start_ts": "100", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "integer"}
12 | 7 | FINISHED | COPY DATA | {"end_ts": "300", "start_ts": "200", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "integer"}
12 | 8 | FINISHED | COPY DATA | {"end_ts": "400", "start_ts": "300", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "integer"}
12 | 9 | FINISHED | COPY DATA | {"end_ts": "500", "start_ts": "400", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "integer"}
12 | 10 | FINISHED | COPY DATA | {"end_ts": "600", "start_ts": "500", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "integer"}
12 | 11 | FINISHED | COPY DATA | {"end_ts": "700", "start_ts": "600", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "integer"}
12 | 12 | FINISHED | COPY DATA | {"end_ts": "800", "start_ts": "700", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "integer"}
12 | 13 | FINISHED | COPY DATA | {"end_ts": "900", "start_ts": "800", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "integer"}
12 | 14 | FINISHED | COPY DATA | {"end_ts": "1000", "start_ts": "900", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "integer"}
12 | 15 | FINISHED | COPY POLICIES | {"policies": null, "cagg_name_new": "conditions_summary_daily_new"}
12 | 16 | FINISHED | OVERRIDE CAGG | {"drop_old": false, "override": false, "cagg_name_new": "conditions_summary_daily_new"}
12 | 17 | FINISHED | DROP OLD CAGG | {"drop_old": false, "override": false, "cagg_name_new": "conditions_summary_daily_new"}
12 | 18 | FINISHED | ENABLE POLICIES |
(18 rows)
RESET ROLE;
-- according to the official documentation trying to execute a procedure with
-- transaction control statements inside an explicit transaction should fail:
-- https://www.postgresql.org/docs/current/sql-call.html
-- `If CALL is executed in a transaction block, then the called procedure cannot
-- execute transaction control statements. Transaction control statements are only
-- allowed if CALL is executed in its own transaction.`
TRUNCATE _timescaledb_catalog.continuous_agg_migrate_plan RESTART IDENTITY CASCADE;
psql:include/cagg_migrate_common.sql:341: NOTICE: truncate cascades to table "continuous_agg_migrate_plan_step"
DROP MATERIALIZED VIEW conditions_summary_daily_new;
psql:include/cagg_migrate_common.sql:342: NOTICE: drop cascades to 10 other objects
\set ON_ERROR_STOP 0
BEGIN;
-- should fail with `invalid transaction termination`
CALL cagg_migrate('conditions_summary_daily');
psql:include/cagg_migrate_common.sql:347: ERROR: invalid transaction termination
ROLLBACK;
\set ON_ERROR_STOP 1
CREATE FUNCTION execute_migration() RETURNS void AS
$$
BEGIN
CALL cagg_migrate('conditions_summary_daily');
RETURN;
END;
$$
LANGUAGE plpgsql;
\set ON_ERROR_STOP 0
-- execute migration inside a plpgsql function
BEGIN;
-- should fail with `invalid transaction termination`
SELECT execute_migration();
psql:include/cagg_migrate_common.sql:364: ERROR: invalid transaction termination
ROLLBACK;
\set ON_ERROR_STOP 1

@ -1,720 +0,0 @@
-- This file and its contents are licensed under the Timescale License.
-- Please see the included NOTICE for copyright information and
-- LICENSE-TIMESCALE for a copy of the license.
------------------------------------
-- Set up a distributed environment
------------------------------------
\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER
\set DATA_NODE_1 :TEST_DBNAME _1
\set DATA_NODE_2 :TEST_DBNAME _2
\set DATA_NODE_3 :TEST_DBNAME _3
\ir include/remote_exec.sql
-- This file and its contents are licensed under the Timescale License.
-- Please see the included NOTICE for copyright information and
-- LICENSE-TIMESCALE for a copy of the license.
CREATE SCHEMA IF NOT EXISTS test;
psql:include/remote_exec.sql:5: NOTICE: schema "test" already exists, skipping
GRANT USAGE ON SCHEMA test TO PUBLIC;
CREATE OR REPLACE FUNCTION test.remote_exec(srv_name name[], command text)
RETURNS VOID
AS :TSL_MODULE_PATHNAME, 'ts_remote_exec'
LANGUAGE C;
CREATE OR REPLACE FUNCTION test.remote_exec_get_result_strings(srv_name name[], command text)
RETURNS TABLE("table_record" CSTRING[])
AS :TSL_MODULE_PATHNAME, 'ts_remote_exec_get_result_strings'
LANGUAGE C;
SELECT node_name, database, node_created, database_created, extension_created
FROM (
SELECT (add_data_node(name, host => 'localhost', DATABASE => name)).*
FROM (VALUES (:'DATA_NODE_1'), (:'DATA_NODE_2'), (:'DATA_NODE_3')) v(name)
) a;
node_name | database | node_created | database_created | extension_created
-----------------------------------+-----------------------------------+--------------+------------------+-------------------
db_cagg_migrate_integer_dist_ht_1 | db_cagg_migrate_integer_dist_ht_1 | t | t | t
db_cagg_migrate_integer_dist_ht_2 | db_cagg_migrate_integer_dist_ht_2 | t | t | t
db_cagg_migrate_integer_dist_ht_3 | db_cagg_migrate_integer_dist_ht_3 | t | t | t
(3 rows)
GRANT USAGE ON FOREIGN SERVER :DATA_NODE_1, :DATA_NODE_2, :DATA_NODE_3 TO PUBLIC;
\set IS_DISTRIBUTED TRUE
\set IS_TIME_DIMENSION FALSE
\set TIME_DIMENSION_DATATYPE INTEGER
\ir include/cagg_migrate_common.sql
-- This file and its contents are licensed under the Timescale License.
-- Please see the included NOTICE for copyright information and
-- LICENSE-TIMESCALE for a copy of the license.
\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER
\if :IS_DISTRIBUTED
\echo 'Running distributed hypertable tests'
Running distributed hypertable tests
\else
\echo 'Running local hypertable tests'
\endif
CREATE TABLE conditions (
"time" :TIME_DIMENSION_DATATYPE NOT NULL,
temperature NUMERIC
);
\if :IS_DISTRIBUTED
\if :IS_TIME_DIMENSION
SELECT table_name FROM create_distributed_hypertable('conditions', 'time', replication_factor => 2);
\else
SELECT table_name FROM create_distributed_hypertable('conditions', 'time', chunk_time_interval => 10, replication_factor => 2);
table_name
------------
conditions
(1 row)
\endif
\else
\if :IS_TIME_DIMENSION
SELECT table_name FROM create_hypertable('conditions', 'time');
\else
SELECT table_name FROM create_hypertable('conditions', 'time', chunk_time_interval => 10);
\endif
\endif
\if :IS_TIME_DIMENSION
INSERT INTO conditions ("time", temperature)
SELECT
generate_series('2022-01-01 00:00:00-00'::timestamptz, '2022-12-31 23:59:59-00'::timestamptz, '1 hour'),
0.25;
\else
CREATE OR REPLACE FUNCTION integer_now()
RETURNS :TIME_DIMENSION_DATATYPE LANGUAGE SQL STABLE AS
$$
SELECT coalesce(max(time), 0)
FROM public.conditions
$$;
\if :IS_DISTRIBUTED
SELECT
'CREATE OR REPLACE FUNCTION integer_now() RETURNS '||:'TIME_DIMENSION_DATATYPE'||' LANGUAGE SQL STABLE AS $$ SELECT coalesce(max(time), 0) FROM public.conditions $$;' AS "STMT"
\gset
CALL distributed_exec (:'STMT');
\endif
SELECT set_integer_now_func('conditions', 'integer_now');
set_integer_now_func
----------------------
(1 row)
INSERT INTO conditions ("time", temperature)
SELECT
generate_series(1, 1000, 1),
0.25;
\endif
\set ON_ERROR_STOP 0
-- should fail relation does not exist
CALL cagg_migrate('conditions_summary_daily');
psql:include/cagg_migrate_common.sql:63: ERROR: relation "conditions_summary_daily" does not exist at character 19
CREATE TABLE conditions_summary_daily();
-- should fail continuous agg does not exist
CALL cagg_migrate('conditions_summary_daily');
psql:include/cagg_migrate_common.sql:66: ERROR: continuous aggregate "public.conditions_summary_daily" does not exist
\set ON_ERROR_STOP 1
DROP TABLE conditions_summary_daily;
CREATE MATERIALIZED VIEW conditions_summary_daily_new
WITH (timescaledb.continuous) AS
SELECT
\if :IS_TIME_DIMENSION
time_bucket(INTERVAL '1 day', "time") AS bucket,
\else
time_bucket(INTEGER '24', "time") AS bucket,
\endif
MIN(temperature),
MAX(temperature),
AVG(temperature),
SUM(temperature)
FROM
conditions
GROUP BY
bucket
WITH NO DATA;
\set ON_ERROR_STOP 0
-- should fail because we don't need to migrate finalized caggs
CALL cagg_migrate('conditions_summary_daily_new');
psql:include/cagg_migrate_common.sql:91: ERROR: continuous aggregate "public.conditions_summary_daily_new" does not require any migration
\set ON_ERROR_STOP 1
-- older continuous aggregate to be migrated
CREATE MATERIALIZED VIEW conditions_summary_daily
WITH (timescaledb.continuous, timescaledb.finalized=false) AS
SELECT
\if :IS_TIME_DIMENSION
time_bucket(INTERVAL '1 day', "time") AS bucket,
\else
time_bucket(INTEGER '24', "time") AS bucket,
\endif
MIN(temperature),
MAX(temperature),
AVG(temperature),
SUM(temperature)
FROM
conditions
GROUP BY
bucket;
psql:include/cagg_migrate_common.sql:110: NOTICE: refreshing continuous aggregate "conditions_summary_daily"
SELECT
ca.raw_hypertable_id AS "RAW_HYPERTABLE_ID",
h.schema_name AS "MAT_SCHEMA_NAME",
h.table_name AS "MAT_TABLE_NAME",
partial_view_name AS "PART_VIEW_NAME",
partial_view_schema AS "PART_VIEW_SCHEMA",
direct_view_name AS "DIR_VIEW_NAME",
direct_view_schema AS "DIR_VIEW_SCHEMA"
FROM
_timescaledb_catalog.continuous_agg ca
JOIN _timescaledb_catalog.hypertable h ON (h.id = ca.mat_hypertable_id)
WHERE
user_view_name = 'conditions_summary_daily'
\gset
\set ON_ERROR_STOP 0
-- should fail because the new cagg with suffix '_new' already exists
CALL cagg_migrate('conditions_summary_daily');
psql:include/cagg_migrate_common.sql:129: ERROR: continuous aggregate "public.conditions_summary_daily_new" already exists
\set ON_ERROR_STOP 1
-- remove the new cagg to execute the migration
DROP MATERIALIZED VIEW conditions_summary_daily_new;
-- get and set all the cagg data
SELECT
_timescaledb_internal.cagg_migrate_pre_validation(
'public',
'conditions_summary_daily',
'conditions_summary_daily_new'
) AS "CAGG_DATA"
\gset
CALL _timescaledb_internal.cagg_migrate_create_plan(:'CAGG_DATA', 'conditions_summary_daily_new');
SELECT mat_hypertable_id FROM _timescaledb_catalog.continuous_agg_migrate_plan;
mat_hypertable_id
-------------------
3
(1 row)
SELECT mat_hypertable_id, step_id, status, type, config FROM _timescaledb_catalog.continuous_agg_migrate_plan_step ORDER BY step_id;
mat_hypertable_id | step_id | status | type | config
-------------------+---------+-------------+------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------
3 | 1 | FINISHED | SAVE WATERMARK | {"watermark": "1008"}
3 | 2 | NOT STARTED | CREATE NEW CAGG | {"cagg_name_new": "conditions_summary_daily_new"}
3 | 3 | NOT STARTED | DISABLE POLICIES | {"policies": null}
3 | 4 | NOT STARTED | REFRESH NEW CAGG | {"window_start": "1008", "cagg_name_new": "conditions_summary_daily_new", "window_start_type": "integer"}
3 | 5 | NOT STARTED | COPY DATA | {"end_ts": "100", "start_ts": "0", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "integer"}
3 | 6 | NOT STARTED | COPY DATA | {"end_ts": "200", "start_ts": "100", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "integer"}
3 | 7 | NOT STARTED | COPY DATA | {"end_ts": "300", "start_ts": "200", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "integer"}
3 | 8 | NOT STARTED | COPY DATA | {"end_ts": "400", "start_ts": "300", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "integer"}
3 | 9 | NOT STARTED | COPY DATA | {"end_ts": "500", "start_ts": "400", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "integer"}
3 | 10 | NOT STARTED | COPY DATA | {"end_ts": "600", "start_ts": "500", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "integer"}
3 | 11 | NOT STARTED | COPY DATA | {"end_ts": "700", "start_ts": "600", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "integer"}
3 | 12 | NOT STARTED | COPY DATA | {"end_ts": "800", "start_ts": "700", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "integer"}
3 | 13 | NOT STARTED | COPY DATA | {"end_ts": "900", "start_ts": "800", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "integer"}
3 | 14 | NOT STARTED | COPY DATA | {"end_ts": "1000", "start_ts": "900", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "integer"}
3 | 15 | NOT STARTED | COPY POLICIES | {"policies": null, "cagg_name_new": "conditions_summary_daily_new"}
3 | 16 | NOT STARTED | OVERRIDE CAGG | {"drop_old": false, "override": false, "cagg_name_new": "conditions_summary_daily_new"}
3 | 17 | NOT STARTED | DROP OLD CAGG | {"drop_old": false, "override": false, "cagg_name_new": "conditions_summary_daily_new"}
3 | 18 | NOT STARTED | ENABLE POLICIES |
(18 rows)
-- should resume the execution
CALL cagg_migrate('conditions_summary_daily');
psql:include/cagg_migrate_common.sql:149: WARNING: resuming the migration of the continuous aggregate "public.conditions_summary_daily"
psql:include/cagg_migrate_common.sql:149: WARNING: refresh the continuous aggregate after the migration executing this statement: "CALL public.refresh_continuous_aggregate('public.conditions_summary_daily_new', CAST('1008' AS integer), NULL);"
SELECT mat_hypertable_id, step_id, status, type, config FROM _timescaledb_catalog.continuous_agg_migrate_plan_step ORDER BY step_id;
mat_hypertable_id | step_id | status | type | config
-------------------+---------+----------+------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------
3 | 1 | FINISHED | SAVE WATERMARK | {"watermark": "1008"}
3 | 2 | FINISHED | CREATE NEW CAGG | {"cagg_name_new": "conditions_summary_daily_new"}
3 | 3 | FINISHED | DISABLE POLICIES | {"policies": null}
3 | 4 | FINISHED | REFRESH NEW CAGG | {"window_start": "1008", "cagg_name_new": "conditions_summary_daily_new", "window_start_type": "integer"}
3 | 5 | FINISHED | COPY DATA | {"end_ts": "100", "start_ts": "0", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "integer"}
3 | 6 | FINISHED | COPY DATA | {"end_ts": "200", "start_ts": "100", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "integer"}
3 | 7 | FINISHED | COPY DATA | {"end_ts": "300", "start_ts": "200", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "integer"}
3 | 8 | FINISHED | COPY DATA | {"end_ts": "400", "start_ts": "300", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "integer"}
3 | 9 | FINISHED | COPY DATA | {"end_ts": "500", "start_ts": "400", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "integer"}
3 | 10 | FINISHED | COPY DATA | {"end_ts": "600", "start_ts": "500", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "integer"}
3 | 11 | FINISHED | COPY DATA | {"end_ts": "700", "start_ts": "600", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "integer"}
3 | 12 | FINISHED | COPY DATA | {"end_ts": "800", "start_ts": "700", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "integer"}
3 | 13 | FINISHED | COPY DATA | {"end_ts": "900", "start_ts": "800", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "integer"}
3 | 14 | FINISHED | COPY DATA | {"end_ts": "1000", "start_ts": "900", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "integer"}
3 | 15 | FINISHED | COPY POLICIES | {"policies": null, "cagg_name_new": "conditions_summary_daily_new"}
3 | 16 | FINISHED | OVERRIDE CAGG | {"drop_old": false, "override": false, "cagg_name_new": "conditions_summary_daily_new"}
3 | 17 | FINISHED | DROP OLD CAGG | {"drop_old": false, "override": false, "cagg_name_new": "conditions_summary_daily_new"}
3 | 18 | FINISHED | ENABLE POLICIES |
(18 rows)
\set ON_ERROR_STOP 0
-- should error because plan already exists
CALL _timescaledb_internal.cagg_migrate_create_plan(:'CAGG_DATA', 'conditions_summary_daily_new');
psql:include/cagg_migrate_common.sql:154: ERROR: plan already exists for materialized hypertable 3
CALL cagg_migrate('conditions_summary_daily');
psql:include/cagg_migrate_common.sql:155: ERROR: plan already exists for continuous aggregate public.conditions_summary_daily
\set ON_ERROR_STOP 1
-- policies for test
ALTER MATERIALIZED VIEW conditions_summary_daily SET (timescaledb.compress=true);
\if :IS_TIME_DIMENSION
SELECT add_retention_policy('conditions_summary_daily', '30 days'::interval);
SELECT add_continuous_aggregate_policy('conditions_summary_daily', '30 days'::interval, '1 day'::interval, '1 hour'::interval);
SELECT add_compression_policy('conditions_summary_daily', '45 days'::interval);
\else
SELECT add_retention_policy('conditions_summary_daily', '400'::integer);
add_retention_policy
----------------------
1000
(1 row)
SELECT add_continuous_aggregate_policy('conditions_summary_daily', '50'::integer, '1'::integer, '1 hour'::interval);
add_continuous_aggregate_policy
---------------------------------
1001
(1 row)
SELECT add_compression_policy('conditions_summary_daily', '100'::integer);
add_compression_policy
------------------------
1002
(1 row)
\endif
SELECT *
FROM timescaledb_information.jobs
WHERE hypertable_schema = :'MAT_SCHEMA_NAME'
AND hypertable_name = :'MAT_TABLE_NAME'
AND job_id >= 1000;
job_id | application_name | schedule_interval | max_runtime | max_retries | retry_period | proc_schema | proc_name | owner | scheduled | fixed_schedule | config | next_start | initial_start | hypertable_schema | hypertable_name | check_schema | check_name
--------+--------------------------------------------+-------------------+-------------+-------------+--------------+-----------------------+-------------------------------------+--------------------+-----------+----------------+---------------------------------------------------------------+------------+---------------+-----------------------+----------------------------+-----------------------+-------------------------------------------
1002 | Compression Policy [1002] | @ 1 day | @ 0 | -1 | @ 1 hour | _timescaledb_internal | policy_compression | cluster_super_user | t | f | {"hypertable_id": 3, "compress_after": 100} | | | _timescaledb_internal | _materialized_hypertable_3 | _timescaledb_internal | policy_compression_check
1001 | Refresh Continuous Aggregate Policy [1001] | @ 1 hour | @ 0 | -1 | @ 1 hour | _timescaledb_internal | policy_refresh_continuous_aggregate | cluster_super_user | t | f | {"end_offset": 1, "start_offset": 50, "mat_hypertable_id": 3} | | | _timescaledb_internal | _materialized_hypertable_3 | _timescaledb_internal | policy_refresh_continuous_aggregate_check
1000 | Retention Policy [1000] | @ 1 day | @ 5 mins | -1 | @ 5 mins | _timescaledb_internal | policy_retention | cluster_super_user | t | f | {"drop_after": 400, "hypertable_id": 3} | | | _timescaledb_internal | _materialized_hypertable_3 | _timescaledb_internal | policy_retention_check
(3 rows)
-- execute the migration
DROP MATERIALIZED VIEW conditions_summary_daily_new;
psql:include/cagg_migrate_common.sql:178: NOTICE: drop cascades to 10 other objects
TRUNCATE _timescaledb_catalog.continuous_agg_migrate_plan RESTART IDENTITY CASCADE;
psql:include/cagg_migrate_common.sql:179: NOTICE: truncate cascades to table "continuous_agg_migrate_plan_step"
CALL cagg_migrate('conditions_summary_daily');
psql:include/cagg_migrate_common.sql:180: WARNING: refresh the continuous aggregate after the migration executing this statement: "CALL public.refresh_continuous_aggregate('public.conditions_summary_daily_new', CAST('1008' AS integer), NULL);"
SELECT
ca.raw_hypertable_id AS "NEW_RAW_HYPERTABLE_ID",
h.schema_name AS "NEW_MAT_SCHEMA_NAME",
h.table_name AS "NEW_MAT_TABLE_NAME",
partial_view_name AS "NEW_PART_VIEW_NAME",
partial_view_schema AS "NEW_PART_VIEW_SCHEMA",
direct_view_name AS "NEW_DIR_VIEW_NAME",
direct_view_schema AS "NEW_DIR_VIEW_SCHEMA"
FROM
_timescaledb_catalog.continuous_agg ca
JOIN _timescaledb_catalog.hypertable h ON (h.id = ca.mat_hypertable_id)
WHERE
user_view_name = 'conditions_summary_daily_new'
\gset
\d+ conditions_summary_daily_new
View "public.conditions_summary_daily_new"
Column | Type | Collation | Nullable | Default | Storage | Description
--------+---------+-----------+----------+---------+---------+-------------
bucket | integer | | | | plain |
min | numeric | | | | main |
max | numeric | | | | main |
avg | numeric | | | | main |
sum | numeric | | | | main |
View definition:
SELECT _materialized_hypertable_6.bucket,
_materialized_hypertable_6.min,
_materialized_hypertable_6.max,
_materialized_hypertable_6.avg,
_materialized_hypertable_6.sum
FROM _timescaledb_internal._materialized_hypertable_6
WHERE _materialized_hypertable_6.bucket < COALESCE(_timescaledb_internal.cagg_watermark(6)::integer, '-2147483648'::integer)
UNION ALL
SELECT time_bucket(24, conditions."time") AS bucket,
min(conditions.temperature) AS min,
max(conditions.temperature) AS max,
avg(conditions.temperature) AS avg,
sum(conditions.temperature) AS sum
FROM conditions
WHERE conditions."time" >= COALESCE(_timescaledb_internal.cagg_watermark(6)::integer, '-2147483648'::integer)
GROUP BY (time_bucket(24, conditions."time"));
SELECT *
FROM timescaledb_information.jobs
WHERE hypertable_schema = :'NEW_MAT_SCHEMA_NAME'
AND hypertable_name = :'NEW_MAT_TABLE_NAME'
AND job_id >= 1000;
job_id | application_name | schedule_interval | max_runtime | max_retries | retry_period | proc_schema | proc_name | owner | scheduled | fixed_schedule | config | next_start | initial_start | hypertable_schema | hypertable_name | check_schema | check_name
--------+--------------------------------------------+-------------------+-------------+-------------+--------------+-----------------------+-------------------------------------+--------------------+-----------+----------------+---------------------------------------------------------------+------------+---------------+-----------------------+----------------------------+-----------------------+-------------------------------------------
1005 | Compression Policy [1005] | @ 1 day | @ 0 | -1 | @ 1 hour | _timescaledb_internal | policy_compression | cluster_super_user | t | f | {"hypertable_id": 6, "compress_after": 100} | | | _timescaledb_internal | _materialized_hypertable_6 | _timescaledb_internal | policy_compression_check
1004 | Refresh Continuous Aggregate Policy [1004] | @ 1 hour | @ 0 | -1 | @ 1 hour | _timescaledb_internal | policy_refresh_continuous_aggregate | cluster_super_user | t | f | {"end_offset": 1, "start_offset": 50, "mat_hypertable_id": 6} | | | _timescaledb_internal | _materialized_hypertable_6 | _timescaledb_internal | policy_refresh_continuous_aggregate_check
1003 | Retention Policy [1003] | @ 1 day | @ 5 mins | -1 | @ 5 mins | _timescaledb_internal | policy_retention | cluster_super_user | t | f | {"drop_after": 400, "hypertable_id": 6} | | | _timescaledb_internal | _materialized_hypertable_6 | _timescaledb_internal | policy_retention_check
(3 rows)
SELECT mat_hypertable_id, step_id, status, type, config FROM _timescaledb_catalog.continuous_agg_migrate_plan_step ORDER BY step_id;
mat_hypertable_id | step_id | status | type | config
-------------------+---------+----------+------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------
3 | 1 | FINISHED | SAVE WATERMARK | {"watermark": "1008"}
3 | 2 | FINISHED | CREATE NEW CAGG | {"cagg_name_new": "conditions_summary_daily_new"}
3 | 3 | FINISHED | DISABLE POLICIES | {"policies": [1002, 1000]}
3 | 4 | FINISHED | REFRESH NEW CAGG | {"window_start": "1008", "cagg_name_new": "conditions_summary_daily_new", "window_start_type": "integer"}
3 | 5 | FINISHED | COPY DATA | {"end_ts": "100", "start_ts": "0", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "integer"}
3 | 6 | FINISHED | COPY DATA | {"end_ts": "200", "start_ts": "100", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "integer"}
3 | 7 | FINISHED | COPY DATA | {"end_ts": "300", "start_ts": "200", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "integer"}
3 | 8 | FINISHED | COPY DATA | {"end_ts": "400", "start_ts": "300", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "integer"}
3 | 9 | FINISHED | COPY DATA | {"end_ts": "500", "start_ts": "400", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "integer"}
3 | 10 | FINISHED | COPY DATA | {"end_ts": "600", "start_ts": "500", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "integer"}
3 | 11 | FINISHED | COPY DATA | {"end_ts": "700", "start_ts": "600", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "integer"}
3 | 12 | FINISHED | COPY DATA | {"end_ts": "800", "start_ts": "700", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "integer"}
3 | 13 | FINISHED | COPY DATA | {"end_ts": "900", "start_ts": "800", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "integer"}
3 | 14 | FINISHED | COPY DATA | {"end_ts": "1000", "start_ts": "900", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "integer"}
3 | 15 | FINISHED | COPY POLICIES | {"policies": [1002, 1001, 1000], "cagg_name_new": "conditions_summary_daily_new"}
3 | 16 | FINISHED | OVERRIDE CAGG | {"drop_old": false, "override": false, "cagg_name_new": "conditions_summary_daily_new"}
3 | 17 | FINISHED | DROP OLD CAGG | {"drop_old": false, "override": false, "cagg_name_new": "conditions_summary_daily_new"}
3 | 18 | FINISHED | ENABLE POLICIES | {"policies": [1003, 1004, 1005, 1002, 1001, 1000]}
(18 rows)
-- check migrated data. should return 0 (zero) rows
SELECT * FROM conditions_summary_daily
EXCEPT
SELECT * FROM conditions_summary_daily_new;
bucket | min | max | avg | sum
--------+-----+-----+-----+-----
(0 rows)
-- compress both caggs
SELECT compress_chunk(c) FROM show_chunks('conditions_summary_daily') c ORDER BY c::regclass::text;
compress_chunk
------------------------------------------
_timescaledb_internal._hyper_3_102_chunk
_timescaledb_internal._hyper_3_103_chunk
_timescaledb_internal._hyper_3_104_chunk
_timescaledb_internal._hyper_3_105_chunk
_timescaledb_internal._hyper_3_106_chunk
_timescaledb_internal._hyper_3_107_chunk
_timescaledb_internal._hyper_3_108_chunk
_timescaledb_internal._hyper_3_109_chunk
_timescaledb_internal._hyper_3_110_chunk
_timescaledb_internal._hyper_3_111_chunk
(10 rows)
SELECT compress_chunk(c) FROM show_chunks('conditions_summary_daily_new') c ORDER BY c::regclass::text;
compress_chunk
------------------------------------------
_timescaledb_internal._hyper_6_122_chunk
_timescaledb_internal._hyper_6_123_chunk
_timescaledb_internal._hyper_6_124_chunk
_timescaledb_internal._hyper_6_125_chunk
_timescaledb_internal._hyper_6_126_chunk
_timescaledb_internal._hyper_6_127_chunk
_timescaledb_internal._hyper_6_128_chunk
_timescaledb_internal._hyper_6_129_chunk
_timescaledb_internal._hyper_6_130_chunk
_timescaledb_internal._hyper_6_131_chunk
(10 rows)
-- check migrated data after compression. should return 0 (zero) rows
SELECT * FROM conditions_summary_daily
EXCEPT
SELECT * FROM conditions_summary_daily_new;
bucket | min | max | avg | sum
--------+-----+-----+-----+-----
(0 rows)
CREATE VIEW cagg_jobs AS
SELECT user_view_schema AS schema, user_view_name AS name, bgw_job.*
FROM _timescaledb_config.bgw_job
JOIN _timescaledb_catalog.continuous_agg ON mat_hypertable_id = hypertable_id
ORDER BY bgw_job.id;
-- test migration overriding the new cagg and keeping the old
DROP MATERIALIZED VIEW conditions_summary_daily_new;
psql:include/cagg_migrate_common.sql:228: NOTICE: drop cascades to 10 other objects
TRUNCATE _timescaledb_catalog.continuous_agg_migrate_plan RESTART IDENTITY CASCADE;
psql:include/cagg_migrate_common.sql:229: NOTICE: truncate cascades to table "continuous_agg_migrate_plan_step"
-- check policies before the migration
SELECT * FROM cagg_jobs WHERE schema = 'public' AND name = 'conditions_summary_daily';
schema | name | id | application_name | schedule_interval | max_runtime | max_retries | retry_period | proc_schema | proc_name | owner | scheduled | fixed_schedule | initial_start | hypertable_id | config | check_schema | check_name | timezone
--------+--------------------------+------+--------------------------------------------+-------------------+-------------+-------------+--------------+-----------------------+-------------------------------------+--------------------+-----------+----------------+---------------+---------------+---------------------------------------------------------------+-----------------------+-------------------------------------------+----------
public | conditions_summary_daily | 1000 | Retention Policy [1000] | @ 1 day | @ 5 mins | -1 | @ 5 mins | _timescaledb_internal | policy_retention | cluster_super_user | t | f | | 3 | {"drop_after": 400, "hypertable_id": 3} | _timescaledb_internal | policy_retention_check |
public | conditions_summary_daily | 1001 | Refresh Continuous Aggregate Policy [1001] | @ 1 hour | @ 0 | -1 | @ 1 hour | _timescaledb_internal | policy_refresh_continuous_aggregate | cluster_super_user | t | f | | 3 | {"end_offset": 1, "start_offset": 50, "mat_hypertable_id": 3} | _timescaledb_internal | policy_refresh_continuous_aggregate_check |
public | conditions_summary_daily | 1002 | Compression Policy [1002] | @ 1 day | @ 0 | -1 | @ 1 hour | _timescaledb_internal | policy_compression | cluster_super_user | t | f | | 3 | {"hypertable_id": 3, "compress_after": 100} | _timescaledb_internal | policy_compression_check |
(3 rows)
CALL cagg_migrate('conditions_summary_daily', override => TRUE);
psql:include/cagg_migrate_common.sql:232: WARNING: refresh the continuous aggregate after the migration executing this statement: "CALL public.refresh_continuous_aggregate('public.conditions_summary_daily', CAST('1008' AS integer), NULL);"
-- cagg with the new format because it was overriden
\d+ conditions_summary_daily
View "public.conditions_summary_daily"
Column | Type | Collation | Nullable | Default | Storage | Description
--------+---------+-----------+----------+---------+---------+-------------
bucket | integer | | | | plain |
min | numeric | | | | main |
max | numeric | | | | main |
avg | numeric | | | | main |
sum | numeric | | | | main |
View definition:
SELECT _materialized_hypertable_8.bucket,
_materialized_hypertable_8.min,
_materialized_hypertable_8.max,
_materialized_hypertable_8.avg,
_materialized_hypertable_8.sum
FROM _timescaledb_internal._materialized_hypertable_8
WHERE _materialized_hypertable_8.bucket < COALESCE(_timescaledb_internal.cagg_watermark(8)::integer, '-2147483648'::integer)
UNION ALL
SELECT time_bucket(24, conditions."time") AS bucket,
min(conditions.temperature) AS min,
max(conditions.temperature) AS max,
avg(conditions.temperature) AS avg,
sum(conditions.temperature) AS sum
FROM conditions
WHERE conditions."time" >= COALESCE(_timescaledb_internal.cagg_watermark(8)::integer, '-2147483648'::integer)
GROUP BY (time_bucket(24, conditions."time"));
-- cagg with the old format because it was overriden
\d+ conditions_summary_daily_old
View "public.conditions_summary_daily_old"
Column | Type | Collation | Nullable | Default | Storage | Description
--------+---------+-----------+----------+---------+---------+-------------
bucket | integer | | | | plain |
min | numeric | | | | main |
max | numeric | | | | main |
avg | numeric | | | | main |
sum | numeric | | | | main |
View definition:
SELECT _materialized_hypertable_3.bucket,
_timescaledb_internal.finalize_agg('pg_catalog.min(numeric)'::text, NULL::name, NULL::name, '{{pg_catalog,numeric}}'::name[], _materialized_hypertable_3.agg_2_2, NULL::numeric) AS min,
_timescaledb_internal.finalize_agg('pg_catalog.max(numeric)'::text, NULL::name, NULL::name, '{{pg_catalog,numeric}}'::name[], _materialized_hypertable_3.agg_3_3, NULL::numeric) AS max,
_timescaledb_internal.finalize_agg('pg_catalog.avg(numeric)'::text, NULL::name, NULL::name, '{{pg_catalog,numeric}}'::name[], _materialized_hypertable_3.agg_4_4, NULL::numeric) AS avg,
_timescaledb_internal.finalize_agg('pg_catalog.sum(numeric)'::text, NULL::name, NULL::name, '{{pg_catalog,numeric}}'::name[], _materialized_hypertable_3.agg_5_5, NULL::numeric) AS sum
FROM _timescaledb_internal._materialized_hypertable_3
WHERE _materialized_hypertable_3.bucket < COALESCE(_timescaledb_internal.cagg_watermark(3)::integer, '-2147483648'::integer)
GROUP BY _materialized_hypertable_3.bucket
UNION ALL
SELECT time_bucket(24, conditions."time") AS bucket,
min(conditions.temperature) AS min,
max(conditions.temperature) AS max,
avg(conditions.temperature) AS avg,
sum(conditions.temperature) AS sum
FROM conditions
WHERE conditions."time" >= COALESCE(_timescaledb_internal.cagg_watermark(3)::integer, '-2147483648'::integer)
GROUP BY (time_bucket(24, conditions."time"));
\set ON_ERROR_STOP 0
-- should fail because the cagg was overriden
SELECT * FROM conditions_summary_daily_new;
psql:include/cagg_migrate_common.sql:239: ERROR: relation "conditions_summary_daily_new" does not exist at character 15
\set ON_ERROR_STOP 1
-- check policies after the migration
SELECT * FROM cagg_jobs WHERE schema = 'public' AND name = 'conditions_summary_daily';
schema | name | id | application_name | schedule_interval | max_runtime | max_retries | retry_period | proc_schema | proc_name | owner | scheduled | fixed_schedule | initial_start | hypertable_id | config | check_schema | check_name | timezone
--------+--------------------------+------+--------------------------------------------+-------------------+-------------+-------------+--------------+-----------------------+-------------------------------------+--------------------+-----------+----------------+---------------+---------------+---------------------------------------------------------------+-----------------------+-------------------------------------------+----------
public | conditions_summary_daily | 1006 | Retention Policy [1006] | @ 1 day | @ 5 mins | -1 | @ 5 mins | _timescaledb_internal | policy_retention | cluster_super_user | t | f | | 8 | {"drop_after": 400, "hypertable_id": 8} | _timescaledb_internal | policy_retention_check |
public | conditions_summary_daily | 1007 | Refresh Continuous Aggregate Policy [1007] | @ 1 hour | @ 0 | -1 | @ 1 hour | _timescaledb_internal | policy_refresh_continuous_aggregate | cluster_super_user | t | f | | 8 | {"end_offset": 1, "start_offset": 50, "mat_hypertable_id": 8} | _timescaledb_internal | policy_refresh_continuous_aggregate_check |
public | conditions_summary_daily | 1008 | Compression Policy [1008] | @ 1 day | @ 0 | -1 | @ 1 hour | _timescaledb_internal | policy_compression | cluster_super_user | t | f | | 8 | {"hypertable_id": 8, "compress_after": 100} | _timescaledb_internal | policy_compression_check |
(3 rows)
-- should return the old cagg jobs
SELECT * FROM cagg_jobs WHERE schema = 'public' AND name = 'conditions_summary_daily_old';
schema | name | id | application_name | schedule_interval | max_runtime | max_retries | retry_period | proc_schema | proc_name | owner | scheduled | fixed_schedule | initial_start | hypertable_id | config | check_schema | check_name | timezone
--------+------------------------------+------+--------------------------------------------+-------------------+-------------+-------------+--------------+-----------------------+-------------------------------------+--------------------+-----------+----------------+---------------+---------------+---------------------------------------------------------------+-----------------------+-------------------------------------------+----------
public | conditions_summary_daily_old | 1000 | Retention Policy [1000] | @ 1 day | @ 5 mins | -1 | @ 5 mins | _timescaledb_internal | policy_retention | cluster_super_user | t | f | | 3 | {"drop_after": 400, "hypertable_id": 3} | _timescaledb_internal | policy_retention_check |
public | conditions_summary_daily_old | 1001 | Refresh Continuous Aggregate Policy [1001] | @ 1 hour | @ 0 | -1 | @ 1 hour | _timescaledb_internal | policy_refresh_continuous_aggregate | cluster_super_user | t | f | | 3 | {"end_offset": 1, "start_offset": 50, "mat_hypertable_id": 3} | _timescaledb_internal | policy_refresh_continuous_aggregate_check |
public | conditions_summary_daily_old | 1002 | Compression Policy [1002] | @ 1 day | @ 0 | -1 | @ 1 hour | _timescaledb_internal | policy_compression | cluster_super_user | t | f | | 3 | {"hypertable_id": 3, "compress_after": 100} | _timescaledb_internal | policy_compression_check |
(3 rows)
-- should return no rows because the cagg was overwritten
SELECT * FROM cagg_jobs WHERE schema = 'public' AND name = 'conditions_summary_daily_new';
schema | name | id | application_name | schedule_interval | max_runtime | max_retries | retry_period | proc_schema | proc_name | owner | scheduled | fixed_schedule | initial_start | hypertable_id | config | check_schema | check_name | timezone
--------+------+----+------------------+-------------------+-------------+-------------+--------------+-------------+-----------+-------+-----------+----------------+---------------+---------------+--------+--------------+------------+----------
(0 rows)
-- test migration overriding the new cagg and removing the old
TRUNCATE _timescaledb_catalog.continuous_agg_migrate_plan RESTART IDENTITY CASCADE;
psql:include/cagg_migrate_common.sql:249: NOTICE: truncate cascades to table "continuous_agg_migrate_plan_step"
DROP MATERIALIZED VIEW conditions_summary_daily;
psql:include/cagg_migrate_common.sql:250: NOTICE: drop cascades to 10 other objects
ALTER MATERIALIZED VIEW conditions_summary_daily_old RENAME TO conditions_summary_daily;
-- check policies before the migration
SELECT * FROM cagg_jobs WHERE schema = 'public' AND name = 'conditions_summary_daily';
schema | name | id | application_name | schedule_interval | max_runtime | max_retries | retry_period | proc_schema | proc_name | owner | scheduled | fixed_schedule | initial_start | hypertable_id | config | check_schema | check_name | timezone
--------+--------------------------+------+--------------------------------------------+-------------------+-------------+-------------+--------------+-----------------------+-------------------------------------+--------------------+-----------+----------------+---------------+---------------+---------------------------------------------------------------+-----------------------+-------------------------------------------+----------
public | conditions_summary_daily | 1000 | Retention Policy [1000] | @ 1 day | @ 5 mins | -1 | @ 5 mins | _timescaledb_internal | policy_retention | cluster_super_user | t | f | | 3 | {"drop_after": 400, "hypertable_id": 3} | _timescaledb_internal | policy_retention_check |
public | conditions_summary_daily | 1001 | Refresh Continuous Aggregate Policy [1001] | @ 1 hour | @ 0 | -1 | @ 1 hour | _timescaledb_internal | policy_refresh_continuous_aggregate | cluster_super_user | t | f | | 3 | {"end_offset": 1, "start_offset": 50, "mat_hypertable_id": 3} | _timescaledb_internal | policy_refresh_continuous_aggregate_check |
public | conditions_summary_daily | 1002 | Compression Policy [1002] | @ 1 day | @ 0 | -1 | @ 1 hour | _timescaledb_internal | policy_compression | cluster_super_user | t | f | | 3 | {"hypertable_id": 3, "compress_after": 100} | _timescaledb_internal | policy_compression_check |
(3 rows)
CALL cagg_migrate('conditions_summary_daily', override => TRUE, drop_old => TRUE);
psql:include/cagg_migrate_common.sql:254: WARNING: refresh the continuous aggregate after the migration executing this statement: "CALL public.refresh_continuous_aggregate('public.conditions_summary_daily', CAST('1008' AS integer), NULL);"
psql:include/cagg_migrate_common.sql:254: NOTICE: drop cascades to 10 other objects
psql:include/cagg_migrate_common.sql:254: NOTICE: job 1002 not found, skipping
psql:include/cagg_migrate_common.sql:254: NOTICE: job 1001 not found, skipping
psql:include/cagg_migrate_common.sql:254: NOTICE: job 1000 not found, skipping
-- cagg with the new format because it was overriden
\d+ conditions_summary_daily
View "public.conditions_summary_daily"
Column | Type | Collation | Nullable | Default | Storage | Description
--------+---------+-----------+----------+---------+---------+-------------
bucket | integer | | | | plain |
min | numeric | | | | main |
max | numeric | | | | main |
avg | numeric | | | | main |
sum | numeric | | | | main |
View definition:
SELECT _materialized_hypertable_10.bucket,
_materialized_hypertable_10.min,
_materialized_hypertable_10.max,
_materialized_hypertable_10.avg,
_materialized_hypertable_10.sum
FROM _timescaledb_internal._materialized_hypertable_10
WHERE _materialized_hypertable_10.bucket < COALESCE(_timescaledb_internal.cagg_watermark(10)::integer, '-2147483648'::integer)
UNION ALL
SELECT time_bucket(24, conditions."time") AS bucket,
min(conditions.temperature) AS min,
max(conditions.temperature) AS max,
avg(conditions.temperature) AS avg,
sum(conditions.temperature) AS sum
FROM conditions
WHERE conditions."time" >= COALESCE(_timescaledb_internal.cagg_watermark(10)::integer, '-2147483648'::integer)
GROUP BY (time_bucket(24, conditions."time"));
\set ON_ERROR_STOP 0
-- should fail because the cagg was overriden
SELECT * FROM conditions_summary_daily_new;
psql:include/cagg_migrate_common.sql:259: ERROR: relation "conditions_summary_daily_new" does not exist at character 15
-- should fail because the old cagg was removed
SELECT * FROM conditions_summary_daily_old;
psql:include/cagg_migrate_common.sql:261: ERROR: relation "conditions_summary_daily_old" does not exist at character 15
\set ON_ERROR_STOP 1
-- check policies after the migration
SELECT * FROM cagg_jobs WHERE schema = 'public' AND name = 'conditions_summary_daily';
schema | name | id | application_name | schedule_interval | max_runtime | max_retries | retry_period | proc_schema | proc_name | owner | scheduled | fixed_schedule | initial_start | hypertable_id | config | check_schema | check_name | timezone
--------+--------------------------+------+--------------------------------------------+-------------------+-------------+-------------+--------------+-----------------------+-------------------------------------+--------------------+-----------+----------------+---------------+---------------+----------------------------------------------------------------+-----------------------+-------------------------------------------+----------
public | conditions_summary_daily | 1009 | Retention Policy [1009] | @ 1 day | @ 5 mins | -1 | @ 5 mins | _timescaledb_internal | policy_retention | cluster_super_user | t | f | | 10 | {"drop_after": 400, "hypertable_id": 10} | _timescaledb_internal | policy_retention_check |
public | conditions_summary_daily | 1010 | Refresh Continuous Aggregate Policy [1010] | @ 1 hour | @ 0 | -1 | @ 1 hour | _timescaledb_internal | policy_refresh_continuous_aggregate | cluster_super_user | t | f | | 10 | {"end_offset": 1, "start_offset": 50, "mat_hypertable_id": 10} | _timescaledb_internal | policy_refresh_continuous_aggregate_check |
public | conditions_summary_daily | 1011 | Compression Policy [1011] | @ 1 day | @ 0 | -1 | @ 1 hour | _timescaledb_internal | policy_compression | cluster_super_user | t | f | | 10 | {"hypertable_id": 10, "compress_after": 100} | _timescaledb_internal | policy_compression_check |
(3 rows)
-- should return no rows because the old cagg was removed
SELECT * FROM cagg_jobs WHERE schema = 'public' AND name = 'conditions_summary_daily_old';
schema | name | id | application_name | schedule_interval | max_runtime | max_retries | retry_period | proc_schema | proc_name | owner | scheduled | fixed_schedule | initial_start | hypertable_id | config | check_schema | check_name | timezone
--------+------+----+------------------+-------------------+-------------+-------------+--------------+-------------+-----------+-------+-----------+----------------+---------------+---------------+--------+--------------+------------+----------
(0 rows)
-- should return no rows because the cagg was overwritten
SELECT * FROM cagg_jobs WHERE schema = 'public' AND name = 'conditions_summary_daily_new';
schema | name | id | application_name | schedule_interval | max_runtime | max_retries | retry_period | proc_schema | proc_name | owner | scheduled | fixed_schedule | initial_start | hypertable_id | config | check_schema | check_name | timezone
--------+------+----+------------------+-------------------+-------------+-------------+--------------+-------------+-----------+-------+-----------+----------------+---------------+---------------+--------+--------------+------------+----------
(0 rows)
-- permissions test
TRUNCATE _timescaledb_catalog.continuous_agg_migrate_plan RESTART IDENTITY CASCADE;
psql:include/cagg_migrate_common.sql:271: NOTICE: truncate cascades to table "continuous_agg_migrate_plan_step"
DROP MATERIALIZED VIEW conditions_summary_daily;
psql:include/cagg_migrate_common.sql:272: NOTICE: drop cascades to 10 other objects
GRANT ALL ON TABLE conditions TO :ROLE_DEFAULT_PERM_USER;
SET ROLE :ROLE_DEFAULT_PERM_USER;
CREATE MATERIALIZED VIEW conditions_summary_daily
WITH (timescaledb.continuous, timescaledb.finalized=false) AS
SELECT
\if :IS_TIME_DIMENSION
time_bucket(INTERVAL '1 day', "time") AS bucket,
\else
time_bucket(INTEGER '24', "time") AS bucket,
\endif
MIN(temperature),
MAX(temperature),
AVG(temperature),
SUM(temperature)
FROM
conditions
GROUP BY
bucket;
psql:include/cagg_migrate_common.sql:291: NOTICE: refreshing continuous aggregate "conditions_summary_daily"
\set ON_ERROR_STOP 0
-- should fail because the lack of permissions on 'continuous_agg_migrate_plan' catalog table
CALL cagg_migrate('conditions_summary_daily');
psql:include/cagg_migrate_common.sql:295: ERROR: permission denied for table continuous_agg_migrate_plan
\set ON_ERROR_STOP 1
RESET ROLE;
GRANT SELECT, INSERT, UPDATE ON TABLE _timescaledb_catalog.continuous_agg_migrate_plan TO :ROLE_DEFAULT_PERM_USER;
SET ROLE :ROLE_DEFAULT_PERM_USER;
\set ON_ERROR_STOP 0
-- should fail because the lack of permissions on 'continuous_agg_migrate_plan_step' catalog table
CALL cagg_migrate('conditions_summary_daily');
psql:include/cagg_migrate_common.sql:305: ERROR: permission denied for table continuous_agg_migrate_plan_step
\set ON_ERROR_STOP 1
RESET ROLE;
GRANT SELECT, INSERT, UPDATE ON TABLE _timescaledb_catalog.continuous_agg_migrate_plan_step TO :ROLE_DEFAULT_PERM_USER;
SET ROLE :ROLE_DEFAULT_PERM_USER;
\set ON_ERROR_STOP 0
-- should fail because the lack of permissions on 'continuous_agg_migrate_plan_step_step_id_seq' catalog sequence
CALL cagg_migrate('conditions_summary_daily');
psql:include/cagg_migrate_common.sql:315: ERROR: permission denied for sequence continuous_agg_migrate_plan_step_step_id_seq
\set ON_ERROR_STOP 1
RESET ROLE;
GRANT USAGE ON SEQUENCE _timescaledb_catalog.continuous_agg_migrate_plan_step_step_id_seq TO :ROLE_DEFAULT_PERM_USER;
SET ROLE :ROLE_DEFAULT_PERM_USER;
-- all necessary permissions granted
CALL cagg_migrate('conditions_summary_daily');
psql:include/cagg_migrate_common.sql:324: WARNING: refresh the continuous aggregate after the migration executing this statement: "CALL public.refresh_continuous_aggregate('public.conditions_summary_daily_new', CAST('1008' AS integer), NULL);"
-- check migrated data. should return 0 (zero) rows
SELECT * FROM conditions_summary_daily
EXCEPT
SELECT * FROM conditions_summary_daily_new;
bucket | min | max | avg | sum
--------+-----+-----+-----+-----
(0 rows)
SELECT mat_hypertable_id, step_id, status, type, config FROM _timescaledb_catalog.continuous_agg_migrate_plan_step ORDER BY step_id;
mat_hypertable_id | step_id | status | type | config
-------------------+---------+----------+------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------
12 | 1 | FINISHED | SAVE WATERMARK | {"watermark": "1008"}
12 | 2 | FINISHED | CREATE NEW CAGG | {"cagg_name_new": "conditions_summary_daily_new"}
12 | 3 | FINISHED | DISABLE POLICIES | {"policies": null}
12 | 4 | FINISHED | REFRESH NEW CAGG | {"window_start": "1008", "cagg_name_new": "conditions_summary_daily_new", "window_start_type": "integer"}
12 | 5 | FINISHED | COPY DATA | {"end_ts": "100", "start_ts": "0", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "integer"}
12 | 6 | FINISHED | COPY DATA | {"end_ts": "200", "start_ts": "100", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "integer"}
12 | 7 | FINISHED | COPY DATA | {"end_ts": "300", "start_ts": "200", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "integer"}
12 | 8 | FINISHED | COPY DATA | {"end_ts": "400", "start_ts": "300", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "integer"}
12 | 9 | FINISHED | COPY DATA | {"end_ts": "500", "start_ts": "400", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "integer"}
12 | 10 | FINISHED | COPY DATA | {"end_ts": "600", "start_ts": "500", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "integer"}
12 | 11 | FINISHED | COPY DATA | {"end_ts": "700", "start_ts": "600", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "integer"}
12 | 12 | FINISHED | COPY DATA | {"end_ts": "800", "start_ts": "700", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "integer"}
12 | 13 | FINISHED | COPY DATA | {"end_ts": "900", "start_ts": "800", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "integer"}
12 | 14 | FINISHED | COPY DATA | {"end_ts": "1000", "start_ts": "900", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "integer"}
12 | 15 | FINISHED | COPY POLICIES | {"policies": null, "cagg_name_new": "conditions_summary_daily_new"}
12 | 16 | FINISHED | OVERRIDE CAGG | {"drop_old": false, "override": false, "cagg_name_new": "conditions_summary_daily_new"}
12 | 17 | FINISHED | DROP OLD CAGG | {"drop_old": false, "override": false, "cagg_name_new": "conditions_summary_daily_new"}
12 | 18 | FINISHED | ENABLE POLICIES |
(18 rows)
RESET ROLE;
-- according to the official documentation trying to execute a procedure with
-- transaction control statements inside an explicit transaction should fail:
-- https://www.postgresql.org/docs/current/sql-call.html
-- `If CALL is executed in a transaction block, then the called procedure cannot
-- execute transaction control statements. Transaction control statements are only
-- allowed if CALL is executed in its own transaction.`
TRUNCATE _timescaledb_catalog.continuous_agg_migrate_plan RESTART IDENTITY CASCADE;
psql:include/cagg_migrate_common.sql:341: NOTICE: truncate cascades to table "continuous_agg_migrate_plan_step"
DROP MATERIALIZED VIEW conditions_summary_daily_new;
psql:include/cagg_migrate_common.sql:342: NOTICE: drop cascades to 10 other objects
\set ON_ERROR_STOP 0
BEGIN;
-- should fail with `invalid transaction termination`
CALL cagg_migrate('conditions_summary_daily');
psql:include/cagg_migrate_common.sql:347: ERROR: invalid transaction termination
ROLLBACK;
\set ON_ERROR_STOP 1
CREATE FUNCTION execute_migration() RETURNS void AS
$$
BEGIN
CALL cagg_migrate('conditions_summary_daily');
RETURN;
END;
$$
LANGUAGE plpgsql;
\set ON_ERROR_STOP 0
-- execute migration inside a plpgsql function
BEGIN;
-- should fail with `invalid transaction termination`
SELECT execute_migration();
psql:include/cagg_migrate_common.sql:364: ERROR: invalid transaction termination
ROLLBACK;
\set ON_ERROR_STOP 1
-- cleanup
\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER;
DROP DATABASE :DATA_NODE_1;
DROP DATABASE :DATA_NODE_2;
DROP DATABASE :DATA_NODE_3;

@ -1,652 +0,0 @@
-- This file and its contents are licensed under the Timescale License.
-- Please see the included NOTICE for copyright information and
-- LICENSE-TIMESCALE for a copy of the license.
\set IS_DISTRIBUTED FALSE
\set IS_TIME_DIMENSION TRUE
\set TIME_DIMENSION_DATATYPE TIMESTAMP
\ir include/cagg_migrate_common.sql
-- This file and its contents are licensed under the Timescale License.
-- Please see the included NOTICE for copyright information and
-- LICENSE-TIMESCALE for a copy of the license.
\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER
\if :IS_DISTRIBUTED
\echo 'Running distributed hypertable tests'
\else
\echo 'Running local hypertable tests'
Running local hypertable tests
\endif
CREATE TABLE conditions (
"time" :TIME_DIMENSION_DATATYPE NOT NULL,
temperature NUMERIC
);
\if :IS_DISTRIBUTED
\if :IS_TIME_DIMENSION
SELECT table_name FROM create_distributed_hypertable('conditions', 'time', replication_factor => 2);
\else
SELECT table_name FROM create_distributed_hypertable('conditions', 'time', chunk_time_interval => 10, replication_factor => 2);
\endif
\else
\if :IS_TIME_DIMENSION
SELECT table_name FROM create_hypertable('conditions', 'time');
psql:include/cagg_migrate_common.sql:26: WARNING: column type "timestamp without time zone" used for "time" does not follow best practices
table_name
------------
conditions
(1 row)
\else
SELECT table_name FROM create_hypertable('conditions', 'time', chunk_time_interval => 10);
\endif
\endif
\if :IS_TIME_DIMENSION
INSERT INTO conditions ("time", temperature)
SELECT
generate_series('2022-01-01 00:00:00-00'::timestamptz, '2022-12-31 23:59:59-00'::timestamptz, '1 hour'),
0.25;
\else
CREATE OR REPLACE FUNCTION integer_now()
RETURNS :TIME_DIMENSION_DATATYPE LANGUAGE SQL STABLE AS
$$
SELECT coalesce(max(time), 0)
FROM public.conditions
$$;
\if :IS_DISTRIBUTED
SELECT
'CREATE OR REPLACE FUNCTION integer_now() RETURNS '||:'TIME_DIMENSION_DATATYPE'||' LANGUAGE SQL STABLE AS $$ SELECT coalesce(max(time), 0) FROM public.conditions $$;' AS "STMT"
\gset
CALL distributed_exec (:'STMT');
\endif
SELECT set_integer_now_func('conditions', 'integer_now');
INSERT INTO conditions ("time", temperature)
SELECT
generate_series(1, 1000, 1),
0.25;
\endif
\set ON_ERROR_STOP 0
-- should fail relation does not exist
CALL cagg_migrate('conditions_summary_daily');
psql:include/cagg_migrate_common.sql:63: ERROR: relation "conditions_summary_daily" does not exist at character 19
CREATE TABLE conditions_summary_daily();
-- should fail continuous agg does not exist
CALL cagg_migrate('conditions_summary_daily');
psql:include/cagg_migrate_common.sql:66: ERROR: continuous aggregate "public.conditions_summary_daily" does not exist
\set ON_ERROR_STOP 1
DROP TABLE conditions_summary_daily;
CREATE MATERIALIZED VIEW conditions_summary_daily_new
WITH (timescaledb.continuous) AS
SELECT
\if :IS_TIME_DIMENSION
time_bucket(INTERVAL '1 day', "time") AS bucket,
\else
time_bucket(INTEGER '24', "time") AS bucket,
\endif
MIN(temperature),
MAX(temperature),
AVG(temperature),
SUM(temperature)
FROM
conditions
GROUP BY
bucket
WITH NO DATA;
\set ON_ERROR_STOP 0
-- should fail because we don't need to migrate finalized caggs
CALL cagg_migrate('conditions_summary_daily_new');
psql:include/cagg_migrate_common.sql:91: ERROR: continuous aggregate "public.conditions_summary_daily_new" does not require any migration
\set ON_ERROR_STOP 1
-- older continuous aggregate to be migrated
CREATE MATERIALIZED VIEW conditions_summary_daily
WITH (timescaledb.continuous, timescaledb.finalized=false) AS
SELECT
\if :IS_TIME_DIMENSION
time_bucket(INTERVAL '1 day', "time") AS bucket,
\else
time_bucket(INTEGER '24', "time") AS bucket,
\endif
MIN(temperature),
MAX(temperature),
AVG(temperature),
SUM(temperature)
FROM
conditions
GROUP BY
bucket;
psql:include/cagg_migrate_common.sql:110: NOTICE: refreshing continuous aggregate "conditions_summary_daily"
SELECT
ca.raw_hypertable_id AS "RAW_HYPERTABLE_ID",
h.schema_name AS "MAT_SCHEMA_NAME",
h.table_name AS "MAT_TABLE_NAME",
partial_view_name AS "PART_VIEW_NAME",
partial_view_schema AS "PART_VIEW_SCHEMA",
direct_view_name AS "DIR_VIEW_NAME",
direct_view_schema AS "DIR_VIEW_SCHEMA"
FROM
_timescaledb_catalog.continuous_agg ca
JOIN _timescaledb_catalog.hypertable h ON (h.id = ca.mat_hypertable_id)
WHERE
user_view_name = 'conditions_summary_daily'
\gset
\set ON_ERROR_STOP 0
-- should fail because the new cagg with suffix '_new' already exists
CALL cagg_migrate('conditions_summary_daily');
psql:include/cagg_migrate_common.sql:129: ERROR: continuous aggregate "public.conditions_summary_daily_new" already exists
\set ON_ERROR_STOP 1
-- remove the new cagg to execute the migration
DROP MATERIALIZED VIEW conditions_summary_daily_new;
-- get and set all the cagg data
SELECT
_timescaledb_internal.cagg_migrate_pre_validation(
'public',
'conditions_summary_daily',
'conditions_summary_daily_new'
) AS "CAGG_DATA"
\gset
CALL _timescaledb_internal.cagg_migrate_create_plan(:'CAGG_DATA', 'conditions_summary_daily_new');
SELECT mat_hypertable_id FROM _timescaledb_catalog.continuous_agg_migrate_plan;
mat_hypertable_id
-------------------
3
(1 row)
SELECT mat_hypertable_id, step_id, status, type, config FROM _timescaledb_catalog.continuous_agg_migrate_plan_step ORDER BY step_id;
mat_hypertable_id | step_id | status | type | config
-------------------+---------+-------------+------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
3 | 1 | FINISHED | SAVE WATERMARK | {"watermark": "Sun Jan 01 00:00:00 2023"}
3 | 2 | NOT STARTED | CREATE NEW CAGG | {"cagg_name_new": "conditions_summary_daily_new"}
3 | 3 | NOT STARTED | DISABLE POLICIES | {"policies": null}
3 | 4 | NOT STARTED | REFRESH NEW CAGG | {"window_start": "Sun Jan 01 00:00:00 2023", "cagg_name_new": "conditions_summary_daily_new", "window_start_type": "timestamp without time zone"}
3 | 5 | NOT STARTED | COPY DATA | {"end_ts": "Fri Mar 11 00:00:00 2022", "start_ts": "Fri Dec 31 00:00:00 2021", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp without time zone"}
3 | 6 | NOT STARTED | COPY DATA | {"end_ts": "Fri May 20 00:00:00 2022", "start_ts": "Fri Mar 11 00:00:00 2022", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp without time zone"}
3 | 7 | NOT STARTED | COPY DATA | {"end_ts": "Fri Jul 29 00:00:00 2022", "start_ts": "Fri May 20 00:00:00 2022", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp without time zone"}
3 | 8 | NOT STARTED | COPY DATA | {"end_ts": "Fri Oct 07 00:00:00 2022", "start_ts": "Fri Jul 29 00:00:00 2022", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp without time zone"}
3 | 9 | NOT STARTED | COPY DATA | {"end_ts": "Fri Dec 16 00:00:00 2022", "start_ts": "Fri Oct 07 00:00:00 2022", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp without time zone"}
3 | 10 | NOT STARTED | COPY DATA | {"end_ts": "Fri Feb 24 00:00:00 2023", "start_ts": "Fri Dec 16 00:00:00 2022", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp without time zone"}
3 | 11 | NOT STARTED | COPY POLICIES | {"policies": null, "cagg_name_new": "conditions_summary_daily_new"}
3 | 12 | NOT STARTED | OVERRIDE CAGG | {"drop_old": false, "override": false, "cagg_name_new": "conditions_summary_daily_new"}
3 | 13 | NOT STARTED | DROP OLD CAGG | {"drop_old": false, "override": false, "cagg_name_new": "conditions_summary_daily_new"}
3 | 14 | NOT STARTED | ENABLE POLICIES |
(14 rows)
-- should resume the execution
CALL cagg_migrate('conditions_summary_daily');
psql:include/cagg_migrate_common.sql:149: WARNING: resuming the migration of the continuous aggregate "public.conditions_summary_daily"
psql:include/cagg_migrate_common.sql:149: WARNING: refresh the continuous aggregate after the migration executing this statement: "CALL public.refresh_continuous_aggregate('public.conditions_summary_daily_new', CAST('Sun Jan 01 00:00:00 2023' AS timestamp without time zone), NULL);"
SELECT mat_hypertable_id, step_id, status, type, config FROM _timescaledb_catalog.continuous_agg_migrate_plan_step ORDER BY step_id;
mat_hypertable_id | step_id | status | type | config
-------------------+---------+----------+------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
3 | 1 | FINISHED | SAVE WATERMARK | {"watermark": "Sun Jan 01 00:00:00 2023"}
3 | 2 | FINISHED | CREATE NEW CAGG | {"cagg_name_new": "conditions_summary_daily_new"}
3 | 3 | FINISHED | DISABLE POLICIES | {"policies": null}
3 | 4 | FINISHED | REFRESH NEW CAGG | {"window_start": "Sun Jan 01 00:00:00 2023", "cagg_name_new": "conditions_summary_daily_new", "window_start_type": "timestamp without time zone"}
3 | 5 | FINISHED | COPY DATA | {"end_ts": "Fri Mar 11 00:00:00 2022", "start_ts": "Fri Dec 31 00:00:00 2021", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp without time zone"}
3 | 6 | FINISHED | COPY DATA | {"end_ts": "Fri May 20 00:00:00 2022", "start_ts": "Fri Mar 11 00:00:00 2022", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp without time zone"}
3 | 7 | FINISHED | COPY DATA | {"end_ts": "Fri Jul 29 00:00:00 2022", "start_ts": "Fri May 20 00:00:00 2022", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp without time zone"}
3 | 8 | FINISHED | COPY DATA | {"end_ts": "Fri Oct 07 00:00:00 2022", "start_ts": "Fri Jul 29 00:00:00 2022", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp without time zone"}
3 | 9 | FINISHED | COPY DATA | {"end_ts": "Fri Dec 16 00:00:00 2022", "start_ts": "Fri Oct 07 00:00:00 2022", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp without time zone"}
3 | 10 | FINISHED | COPY DATA | {"end_ts": "Fri Feb 24 00:00:00 2023", "start_ts": "Fri Dec 16 00:00:00 2022", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp without time zone"}
3 | 11 | FINISHED | COPY POLICIES | {"policies": null, "cagg_name_new": "conditions_summary_daily_new"}
3 | 12 | FINISHED | OVERRIDE CAGG | {"drop_old": false, "override": false, "cagg_name_new": "conditions_summary_daily_new"}
3 | 13 | FINISHED | DROP OLD CAGG | {"drop_old": false, "override": false, "cagg_name_new": "conditions_summary_daily_new"}
3 | 14 | FINISHED | ENABLE POLICIES |
(14 rows)
\set ON_ERROR_STOP 0
-- should error because plan already exists
CALL _timescaledb_internal.cagg_migrate_create_plan(:'CAGG_DATA', 'conditions_summary_daily_new');
psql:include/cagg_migrate_common.sql:154: ERROR: plan already exists for materialized hypertable 3
CALL cagg_migrate('conditions_summary_daily');
psql:include/cagg_migrate_common.sql:155: ERROR: plan already exists for continuous aggregate public.conditions_summary_daily
\set ON_ERROR_STOP 1
-- policies for test
ALTER MATERIALIZED VIEW conditions_summary_daily SET (timescaledb.compress=true);
\if :IS_TIME_DIMENSION
SELECT add_retention_policy('conditions_summary_daily', '30 days'::interval);
add_retention_policy
----------------------
1000
(1 row)
SELECT add_continuous_aggregate_policy('conditions_summary_daily', '30 days'::interval, '1 day'::interval, '1 hour'::interval);
add_continuous_aggregate_policy
---------------------------------
1001
(1 row)
SELECT add_compression_policy('conditions_summary_daily', '45 days'::interval);
add_compression_policy
------------------------
1002
(1 row)
\else
SELECT add_retention_policy('conditions_summary_daily', '400'::integer);
SELECT add_continuous_aggregate_policy('conditions_summary_daily', '50'::integer, '1'::integer, '1 hour'::interval);
SELECT add_compression_policy('conditions_summary_daily', '100'::integer);
\endif
SELECT *
FROM timescaledb_information.jobs
WHERE hypertable_schema = :'MAT_SCHEMA_NAME'
AND hypertable_name = :'MAT_TABLE_NAME'
AND job_id >= 1000;
job_id | application_name | schedule_interval | max_runtime | max_retries | retry_period | proc_schema | proc_name | owner | scheduled | fixed_schedule | config | next_start | initial_start | hypertable_schema | hypertable_name | check_schema | check_name
--------+--------------------------------------------+-------------------+-------------+-------------+--------------+-----------------------+-------------------------------------+--------------------+-----------+----------------+--------------------------------------------------------------------------------+------------+---------------+-----------------------+----------------------------+-----------------------+-------------------------------------------
1002 | Compression Policy [1002] | @ 35 days | @ 0 | -1 | @ 1 hour | _timescaledb_internal | policy_compression | cluster_super_user | t | f | {"hypertable_id": 3, "compress_after": "@ 45 days"} | | | _timescaledb_internal | _materialized_hypertable_3 | _timescaledb_internal | policy_compression_check
1001 | Refresh Continuous Aggregate Policy [1001] | @ 1 hour | @ 0 | -1 | @ 1 hour | _timescaledb_internal | policy_refresh_continuous_aggregate | cluster_super_user | t | f | {"end_offset": "@ 1 day", "start_offset": "@ 30 days", "mat_hypertable_id": 3} | | | _timescaledb_internal | _materialized_hypertable_3 | _timescaledb_internal | policy_refresh_continuous_aggregate_check
1000 | Retention Policy [1000] | @ 1 day | @ 5 mins | -1 | @ 5 mins | _timescaledb_internal | policy_retention | cluster_super_user | t | f | {"drop_after": "@ 30 days", "hypertable_id": 3} | | | _timescaledb_internal | _materialized_hypertable_3 | _timescaledb_internal | policy_retention_check
(3 rows)
-- execute the migration
DROP MATERIALIZED VIEW conditions_summary_daily_new;
psql:include/cagg_migrate_common.sql:178: NOTICE: drop cascades to 6 other objects
TRUNCATE _timescaledb_catalog.continuous_agg_migrate_plan RESTART IDENTITY CASCADE;
psql:include/cagg_migrate_common.sql:179: NOTICE: truncate cascades to table "continuous_agg_migrate_plan_step"
CALL cagg_migrate('conditions_summary_daily');
psql:include/cagg_migrate_common.sql:180: WARNING: refresh the continuous aggregate after the migration executing this statement: "CALL public.refresh_continuous_aggregate('public.conditions_summary_daily_new', CAST('Sun Jan 01 00:00:00 2023' AS timestamp without time zone), NULL);"
SELECT
ca.raw_hypertable_id AS "NEW_RAW_HYPERTABLE_ID",
h.schema_name AS "NEW_MAT_SCHEMA_NAME",
h.table_name AS "NEW_MAT_TABLE_NAME",
partial_view_name AS "NEW_PART_VIEW_NAME",
partial_view_schema AS "NEW_PART_VIEW_SCHEMA",
direct_view_name AS "NEW_DIR_VIEW_NAME",
direct_view_schema AS "NEW_DIR_VIEW_SCHEMA"
FROM
_timescaledb_catalog.continuous_agg ca
JOIN _timescaledb_catalog.hypertable h ON (h.id = ca.mat_hypertable_id)
WHERE
user_view_name = 'conditions_summary_daily_new'
\gset
\d+ conditions_summary_daily_new
View "public.conditions_summary_daily_new"
Column | Type | Collation | Nullable | Default | Storage | Description
--------+-----------------------------+-----------+----------+---------+---------+-------------
bucket | timestamp without time zone | | | | plain |
min | numeric | | | | main |
max | numeric | | | | main |
avg | numeric | | | | main |
sum | numeric | | | | main |
View definition:
SELECT _materialized_hypertable_6.bucket,
_materialized_hypertable_6.min,
_materialized_hypertable_6.max,
_materialized_hypertable_6.avg,
_materialized_hypertable_6.sum
FROM _timescaledb_internal._materialized_hypertable_6
WHERE _materialized_hypertable_6.bucket < COALESCE(_timescaledb_internal.to_timestamp_without_timezone(_timescaledb_internal.cagg_watermark(6)), '-infinity'::timestamp without time zone)
UNION ALL
SELECT time_bucket('@ 1 day'::interval, conditions."time") AS bucket,
min(conditions.temperature) AS min,
max(conditions.temperature) AS max,
avg(conditions.temperature) AS avg,
sum(conditions.temperature) AS sum
FROM conditions
WHERE conditions."time" >= COALESCE(_timescaledb_internal.to_timestamp_without_timezone(_timescaledb_internal.cagg_watermark(6)), '-infinity'::timestamp without time zone)
GROUP BY (time_bucket('@ 1 day'::interval, conditions."time"));
SELECT *
FROM timescaledb_information.jobs
WHERE hypertable_schema = :'NEW_MAT_SCHEMA_NAME'
AND hypertable_name = :'NEW_MAT_TABLE_NAME'
AND job_id >= 1000;
job_id | application_name | schedule_interval | max_runtime | max_retries | retry_period | proc_schema | proc_name | owner | scheduled | fixed_schedule | config | next_start | initial_start | hypertable_schema | hypertable_name | check_schema | check_name
--------+--------------------------------------------+-------------------+-------------+-------------+--------------+-----------------------+-------------------------------------+--------------------+-----------+----------------+--------------------------------------------------------------------------------+------------+---------------+-----------------------+----------------------------+-----------------------+-------------------------------------------
1005 | Compression Policy [1005] | @ 35 days | @ 0 | -1 | @ 1 hour | _timescaledb_internal | policy_compression | cluster_super_user | t | f | {"hypertable_id": 6, "compress_after": "@ 45 days"} | | | _timescaledb_internal | _materialized_hypertable_6 | _timescaledb_internal | policy_compression_check
1004 | Refresh Continuous Aggregate Policy [1004] | @ 1 hour | @ 0 | -1 | @ 1 hour | _timescaledb_internal | policy_refresh_continuous_aggregate | cluster_super_user | t | f | {"end_offset": "@ 1 day", "start_offset": "@ 30 days", "mat_hypertable_id": 6} | | | _timescaledb_internal | _materialized_hypertable_6 | _timescaledb_internal | policy_refresh_continuous_aggregate_check
1003 | Retention Policy [1003] | @ 1 day | @ 5 mins | -1 | @ 5 mins | _timescaledb_internal | policy_retention | cluster_super_user | t | f | {"drop_after": "@ 30 days", "hypertable_id": 6} | | | _timescaledb_internal | _materialized_hypertable_6 | _timescaledb_internal | policy_retention_check
(3 rows)
SELECT mat_hypertable_id, step_id, status, type, config FROM _timescaledb_catalog.continuous_agg_migrate_plan_step ORDER BY step_id;
mat_hypertable_id | step_id | status | type | config
-------------------+---------+----------+------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
3 | 1 | FINISHED | SAVE WATERMARK | {"watermark": "Sun Jan 01 00:00:00 2023"}
3 | 2 | FINISHED | CREATE NEW CAGG | {"cagg_name_new": "conditions_summary_daily_new"}
3 | 3 | FINISHED | DISABLE POLICIES | {"policies": [1002, 1000]}
3 | 4 | FINISHED | REFRESH NEW CAGG | {"window_start": "Sun Jan 01 00:00:00 2023", "cagg_name_new": "conditions_summary_daily_new", "window_start_type": "timestamp without time zone"}
3 | 5 | FINISHED | COPY DATA | {"end_ts": "Fri Mar 11 00:00:00 2022", "start_ts": "Fri Dec 31 00:00:00 2021", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp without time zone"}
3 | 6 | FINISHED | COPY DATA | {"end_ts": "Fri May 20 00:00:00 2022", "start_ts": "Fri Mar 11 00:00:00 2022", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp without time zone"}
3 | 7 | FINISHED | COPY DATA | {"end_ts": "Fri Jul 29 00:00:00 2022", "start_ts": "Fri May 20 00:00:00 2022", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp without time zone"}
3 | 8 | FINISHED | COPY DATA | {"end_ts": "Fri Oct 07 00:00:00 2022", "start_ts": "Fri Jul 29 00:00:00 2022", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp without time zone"}
3 | 9 | FINISHED | COPY DATA | {"end_ts": "Fri Dec 16 00:00:00 2022", "start_ts": "Fri Oct 07 00:00:00 2022", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp without time zone"}
3 | 10 | FINISHED | COPY DATA | {"end_ts": "Fri Feb 24 00:00:00 2023", "start_ts": "Fri Dec 16 00:00:00 2022", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp without time zone"}
3 | 11 | FINISHED | COPY POLICIES | {"policies": [1002, 1001, 1000], "cagg_name_new": "conditions_summary_daily_new"}
3 | 12 | FINISHED | OVERRIDE CAGG | {"drop_old": false, "override": false, "cagg_name_new": "conditions_summary_daily_new"}
3 | 13 | FINISHED | DROP OLD CAGG | {"drop_old": false, "override": false, "cagg_name_new": "conditions_summary_daily_new"}
3 | 14 | FINISHED | ENABLE POLICIES | {"policies": [1003, 1004, 1005, 1002, 1001, 1000]}
(14 rows)
-- check migrated data. should return 0 (zero) rows
SELECT * FROM conditions_summary_daily
EXCEPT
SELECT * FROM conditions_summary_daily_new;
bucket | min | max | avg | sum
--------+-----+-----+-----+-----
(0 rows)
-- compress both caggs
SELECT compress_chunk(c) FROM show_chunks('conditions_summary_daily') c ORDER BY c::regclass::text;
compress_chunk
-----------------------------------------
_timescaledb_internal._hyper_3_54_chunk
_timescaledb_internal._hyper_3_55_chunk
_timescaledb_internal._hyper_3_56_chunk
_timescaledb_internal._hyper_3_57_chunk
_timescaledb_internal._hyper_3_58_chunk
_timescaledb_internal._hyper_3_59_chunk
(6 rows)
SELECT compress_chunk(c) FROM show_chunks('conditions_summary_daily_new') c ORDER BY c::regclass::text;
compress_chunk
-----------------------------------------
_timescaledb_internal._hyper_6_66_chunk
_timescaledb_internal._hyper_6_67_chunk
_timescaledb_internal._hyper_6_68_chunk
_timescaledb_internal._hyper_6_69_chunk
_timescaledb_internal._hyper_6_70_chunk
_timescaledb_internal._hyper_6_71_chunk
(6 rows)
-- check migrated data after compression. should return 0 (zero) rows
SELECT * FROM conditions_summary_daily
EXCEPT
SELECT * FROM conditions_summary_daily_new;
bucket | min | max | avg | sum
--------+-----+-----+-----+-----
(0 rows)
CREATE VIEW cagg_jobs AS
SELECT user_view_schema AS schema, user_view_name AS name, bgw_job.*
FROM _timescaledb_config.bgw_job
JOIN _timescaledb_catalog.continuous_agg ON mat_hypertable_id = hypertable_id
ORDER BY bgw_job.id;
-- test migration overriding the new cagg and keeping the old
DROP MATERIALIZED VIEW conditions_summary_daily_new;
psql:include/cagg_migrate_common.sql:228: NOTICE: drop cascades to 6 other objects
TRUNCATE _timescaledb_catalog.continuous_agg_migrate_plan RESTART IDENTITY CASCADE;
psql:include/cagg_migrate_common.sql:229: NOTICE: truncate cascades to table "continuous_agg_migrate_plan_step"
-- check policies before the migration
SELECT * FROM cagg_jobs WHERE schema = 'public' AND name = 'conditions_summary_daily';
schema | name | id | application_name | schedule_interval | max_runtime | max_retries | retry_period | proc_schema | proc_name | owner | scheduled | fixed_schedule | initial_start | hypertable_id | config | check_schema | check_name | timezone
--------+--------------------------+------+--------------------------------------------+-------------------+-------------+-------------+--------------+-----------------------+-------------------------------------+--------------------+-----------+----------------+---------------+---------------+--------------------------------------------------------------------------------+-----------------------+-------------------------------------------+----------
public | conditions_summary_daily | 1000 | Retention Policy [1000] | @ 1 day | @ 5 mins | -1 | @ 5 mins | _timescaledb_internal | policy_retention | cluster_super_user | t | f | | 3 | {"drop_after": "@ 30 days", "hypertable_id": 3} | _timescaledb_internal | policy_retention_check |
public | conditions_summary_daily | 1001 | Refresh Continuous Aggregate Policy [1001] | @ 1 hour | @ 0 | -1 | @ 1 hour | _timescaledb_internal | policy_refresh_continuous_aggregate | cluster_super_user | t | f | | 3 | {"end_offset": "@ 1 day", "start_offset": "@ 30 days", "mat_hypertable_id": 3} | _timescaledb_internal | policy_refresh_continuous_aggregate_check |
public | conditions_summary_daily | 1002 | Compression Policy [1002] | @ 35 days | @ 0 | -1 | @ 1 hour | _timescaledb_internal | policy_compression | cluster_super_user | t | f | | 3 | {"hypertable_id": 3, "compress_after": "@ 45 days"} | _timescaledb_internal | policy_compression_check |
(3 rows)
CALL cagg_migrate('conditions_summary_daily', override => TRUE);
psql:include/cagg_migrate_common.sql:232: WARNING: refresh the continuous aggregate after the migration executing this statement: "CALL public.refresh_continuous_aggregate('public.conditions_summary_daily', CAST('Sun Jan 01 00:00:00 2023' AS timestamp without time zone), NULL);"
-- cagg with the new format because it was overriden
\d+ conditions_summary_daily
View "public.conditions_summary_daily"
Column | Type | Collation | Nullable | Default | Storage | Description
--------+-----------------------------+-----------+----------+---------+---------+-------------
bucket | timestamp without time zone | | | | plain |
min | numeric | | | | main |
max | numeric | | | | main |
avg | numeric | | | | main |
sum | numeric | | | | main |
View definition:
SELECT _materialized_hypertable_8.bucket,
_materialized_hypertable_8.min,
_materialized_hypertable_8.max,
_materialized_hypertable_8.avg,
_materialized_hypertable_8.sum
FROM _timescaledb_internal._materialized_hypertable_8
WHERE _materialized_hypertable_8.bucket < COALESCE(_timescaledb_internal.to_timestamp_without_timezone(_timescaledb_internal.cagg_watermark(8)), '-infinity'::timestamp without time zone)
UNION ALL
SELECT time_bucket('@ 1 day'::interval, conditions."time") AS bucket,
min(conditions.temperature) AS min,
max(conditions.temperature) AS max,
avg(conditions.temperature) AS avg,
sum(conditions.temperature) AS sum
FROM conditions
WHERE conditions."time" >= COALESCE(_timescaledb_internal.to_timestamp_without_timezone(_timescaledb_internal.cagg_watermark(8)), '-infinity'::timestamp without time zone)
GROUP BY (time_bucket('@ 1 day'::interval, conditions."time"));
-- cagg with the old format because it was overriden
\d+ conditions_summary_daily_old
View "public.conditions_summary_daily_old"
Column | Type | Collation | Nullable | Default | Storage | Description
--------+-----------------------------+-----------+----------+---------+---------+-------------
bucket | timestamp without time zone | | | | plain |
min | numeric | | | | main |
max | numeric | | | | main |
avg | numeric | | | | main |
sum | numeric | | | | main |
View definition:
SELECT _materialized_hypertable_3.bucket,
_timescaledb_internal.finalize_agg('pg_catalog.min(numeric)'::text, NULL::name, NULL::name, '{{pg_catalog,numeric}}'::name[], _materialized_hypertable_3.agg_2_2, NULL::numeric) AS min,
_timescaledb_internal.finalize_agg('pg_catalog.max(numeric)'::text, NULL::name, NULL::name, '{{pg_catalog,numeric}}'::name[], _materialized_hypertable_3.agg_3_3, NULL::numeric) AS max,
_timescaledb_internal.finalize_agg('pg_catalog.avg(numeric)'::text, NULL::name, NULL::name, '{{pg_catalog,numeric}}'::name[], _materialized_hypertable_3.agg_4_4, NULL::numeric) AS avg,
_timescaledb_internal.finalize_agg('pg_catalog.sum(numeric)'::text, NULL::name, NULL::name, '{{pg_catalog,numeric}}'::name[], _materialized_hypertable_3.agg_5_5, NULL::numeric) AS sum
FROM _timescaledb_internal._materialized_hypertable_3
WHERE _materialized_hypertable_3.bucket < COALESCE(_timescaledb_internal.to_timestamp_without_timezone(_timescaledb_internal.cagg_watermark(3)), '-infinity'::timestamp without time zone)
GROUP BY _materialized_hypertable_3.bucket
UNION ALL
SELECT time_bucket('@ 1 day'::interval, conditions."time") AS bucket,
min(conditions.temperature) AS min,
max(conditions.temperature) AS max,
avg(conditions.temperature) AS avg,
sum(conditions.temperature) AS sum
FROM conditions
WHERE conditions."time" >= COALESCE(_timescaledb_internal.to_timestamp_without_timezone(_timescaledb_internal.cagg_watermark(3)), '-infinity'::timestamp without time zone)
GROUP BY (time_bucket('@ 1 day'::interval, conditions."time"));
\set ON_ERROR_STOP 0
-- should fail because the cagg was overriden
SELECT * FROM conditions_summary_daily_new;
psql:include/cagg_migrate_common.sql:239: ERROR: relation "conditions_summary_daily_new" does not exist at character 15
\set ON_ERROR_STOP 1
-- check policies after the migration
SELECT * FROM cagg_jobs WHERE schema = 'public' AND name = 'conditions_summary_daily';
schema | name | id | application_name | schedule_interval | max_runtime | max_retries | retry_period | proc_schema | proc_name | owner | scheduled | fixed_schedule | initial_start | hypertable_id | config | check_schema | check_name | timezone
--------+--------------------------+------+--------------------------------------------+-------------------+-------------+-------------+--------------+-----------------------+-------------------------------------+--------------------+-----------+----------------+---------------+---------------+--------------------------------------------------------------------------------+-----------------------+-------------------------------------------+----------
public | conditions_summary_daily | 1006 | Retention Policy [1006] | @ 1 day | @ 5 mins | -1 | @ 5 mins | _timescaledb_internal | policy_retention | cluster_super_user | t | f | | 8 | {"drop_after": "@ 30 days", "hypertable_id": 8} | _timescaledb_internal | policy_retention_check |
public | conditions_summary_daily | 1007 | Refresh Continuous Aggregate Policy [1007] | @ 1 hour | @ 0 | -1 | @ 1 hour | _timescaledb_internal | policy_refresh_continuous_aggregate | cluster_super_user | t | f | | 8 | {"end_offset": "@ 1 day", "start_offset": "@ 30 days", "mat_hypertable_id": 8} | _timescaledb_internal | policy_refresh_continuous_aggregate_check |
public | conditions_summary_daily | 1008 | Compression Policy [1008] | @ 35 days | @ 0 | -1 | @ 1 hour | _timescaledb_internal | policy_compression | cluster_super_user | t | f | | 8 | {"hypertable_id": 8, "compress_after": "@ 45 days"} | _timescaledb_internal | policy_compression_check |
(3 rows)
-- should return the old cagg jobs
SELECT * FROM cagg_jobs WHERE schema = 'public' AND name = 'conditions_summary_daily_old';
schema | name | id | application_name | schedule_interval | max_runtime | max_retries | retry_period | proc_schema | proc_name | owner | scheduled | fixed_schedule | initial_start | hypertable_id | config | check_schema | check_name | timezone
--------+------------------------------+------+--------------------------------------------+-------------------+-------------+-------------+--------------+-----------------------+-------------------------------------+--------------------+-----------+----------------+---------------+---------------+--------------------------------------------------------------------------------+-----------------------+-------------------------------------------+----------
public | conditions_summary_daily_old | 1000 | Retention Policy [1000] | @ 1 day | @ 5 mins | -1 | @ 5 mins | _timescaledb_internal | policy_retention | cluster_super_user | t | f | | 3 | {"drop_after": "@ 30 days", "hypertable_id": 3} | _timescaledb_internal | policy_retention_check |
public | conditions_summary_daily_old | 1001 | Refresh Continuous Aggregate Policy [1001] | @ 1 hour | @ 0 | -1 | @ 1 hour | _timescaledb_internal | policy_refresh_continuous_aggregate | cluster_super_user | t | f | | 3 | {"end_offset": "@ 1 day", "start_offset": "@ 30 days", "mat_hypertable_id": 3} | _timescaledb_internal | policy_refresh_continuous_aggregate_check |
public | conditions_summary_daily_old | 1002 | Compression Policy [1002] | @ 35 days | @ 0 | -1 | @ 1 hour | _timescaledb_internal | policy_compression | cluster_super_user | t | f | | 3 | {"hypertable_id": 3, "compress_after": "@ 45 days"} | _timescaledb_internal | policy_compression_check |
(3 rows)
-- should return no rows because the cagg was overwritten
SELECT * FROM cagg_jobs WHERE schema = 'public' AND name = 'conditions_summary_daily_new';
schema | name | id | application_name | schedule_interval | max_runtime | max_retries | retry_period | proc_schema | proc_name | owner | scheduled | fixed_schedule | initial_start | hypertable_id | config | check_schema | check_name | timezone
--------+------+----+------------------+-------------------+-------------+-------------+--------------+-------------+-----------+-------+-----------+----------------+---------------+---------------+--------+--------------+------------+----------
(0 rows)
-- test migration overriding the new cagg and removing the old
TRUNCATE _timescaledb_catalog.continuous_agg_migrate_plan RESTART IDENTITY CASCADE;
psql:include/cagg_migrate_common.sql:249: NOTICE: truncate cascades to table "continuous_agg_migrate_plan_step"
DROP MATERIALIZED VIEW conditions_summary_daily;
psql:include/cagg_migrate_common.sql:250: NOTICE: drop cascades to 6 other objects
ALTER MATERIALIZED VIEW conditions_summary_daily_old RENAME TO conditions_summary_daily;
-- check policies before the migration
SELECT * FROM cagg_jobs WHERE schema = 'public' AND name = 'conditions_summary_daily';
schema | name | id | application_name | schedule_interval | max_runtime | max_retries | retry_period | proc_schema | proc_name | owner | scheduled | fixed_schedule | initial_start | hypertable_id | config | check_schema | check_name | timezone
--------+--------------------------+------+--------------------------------------------+-------------------+-------------+-------------+--------------+-----------------------+-------------------------------------+--------------------+-----------+----------------+---------------+---------------+--------------------------------------------------------------------------------+-----------------------+-------------------------------------------+----------
public | conditions_summary_daily | 1000 | Retention Policy [1000] | @ 1 day | @ 5 mins | -1 | @ 5 mins | _timescaledb_internal | policy_retention | cluster_super_user | t | f | | 3 | {"drop_after": "@ 30 days", "hypertable_id": 3} | _timescaledb_internal | policy_retention_check |
public | conditions_summary_daily | 1001 | Refresh Continuous Aggregate Policy [1001] | @ 1 hour | @ 0 | -1 | @ 1 hour | _timescaledb_internal | policy_refresh_continuous_aggregate | cluster_super_user | t | f | | 3 | {"end_offset": "@ 1 day", "start_offset": "@ 30 days", "mat_hypertable_id": 3} | _timescaledb_internal | policy_refresh_continuous_aggregate_check |
public | conditions_summary_daily | 1002 | Compression Policy [1002] | @ 35 days | @ 0 | -1 | @ 1 hour | _timescaledb_internal | policy_compression | cluster_super_user | t | f | | 3 | {"hypertable_id": 3, "compress_after": "@ 45 days"} | _timescaledb_internal | policy_compression_check |
(3 rows)
CALL cagg_migrate('conditions_summary_daily', override => TRUE, drop_old => TRUE);
psql:include/cagg_migrate_common.sql:254: WARNING: refresh the continuous aggregate after the migration executing this statement: "CALL public.refresh_continuous_aggregate('public.conditions_summary_daily', CAST('Sun Jan 01 00:00:00 2023' AS timestamp without time zone), NULL);"
psql:include/cagg_migrate_common.sql:254: NOTICE: drop cascades to 6 other objects
psql:include/cagg_migrate_common.sql:254: NOTICE: job 1002 not found, skipping
psql:include/cagg_migrate_common.sql:254: NOTICE: job 1001 not found, skipping
psql:include/cagg_migrate_common.sql:254: NOTICE: job 1000 not found, skipping
-- cagg with the new format because it was overriden
\d+ conditions_summary_daily
View "public.conditions_summary_daily"
Column | Type | Collation | Nullable | Default | Storage | Description
--------+-----------------------------+-----------+----------+---------+---------+-------------
bucket | timestamp without time zone | | | | plain |
min | numeric | | | | main |
max | numeric | | | | main |
avg | numeric | | | | main |
sum | numeric | | | | main |
View definition:
SELECT _materialized_hypertable_10.bucket,
_materialized_hypertable_10.min,
_materialized_hypertable_10.max,
_materialized_hypertable_10.avg,
_materialized_hypertable_10.sum
FROM _timescaledb_internal._materialized_hypertable_10
WHERE _materialized_hypertable_10.bucket < COALESCE(_timescaledb_internal.to_timestamp_without_timezone(_timescaledb_internal.cagg_watermark(10)), '-infinity'::timestamp without time zone)
UNION ALL
SELECT time_bucket('@ 1 day'::interval, conditions."time") AS bucket,
min(conditions.temperature) AS min,
max(conditions.temperature) AS max,
avg(conditions.temperature) AS avg,
sum(conditions.temperature) AS sum
FROM conditions
WHERE conditions."time" >= COALESCE(_timescaledb_internal.to_timestamp_without_timezone(_timescaledb_internal.cagg_watermark(10)), '-infinity'::timestamp without time zone)
GROUP BY (time_bucket('@ 1 day'::interval, conditions."time"));
\set ON_ERROR_STOP 0
-- should fail because the cagg was overriden
SELECT * FROM conditions_summary_daily_new;
psql:include/cagg_migrate_common.sql:259: ERROR: relation "conditions_summary_daily_new" does not exist at character 15
-- should fail because the old cagg was removed
SELECT * FROM conditions_summary_daily_old;
psql:include/cagg_migrate_common.sql:261: ERROR: relation "conditions_summary_daily_old" does not exist at character 15
\set ON_ERROR_STOP 1
-- check policies after the migration
SELECT * FROM cagg_jobs WHERE schema = 'public' AND name = 'conditions_summary_daily';
schema | name | id | application_name | schedule_interval | max_runtime | max_retries | retry_period | proc_schema | proc_name | owner | scheduled | fixed_schedule | initial_start | hypertable_id | config | check_schema | check_name | timezone
--------+--------------------------+------+--------------------------------------------+-------------------+-------------+-------------+--------------+-----------------------+-------------------------------------+--------------------+-----------+----------------+---------------+---------------+---------------------------------------------------------------------------------+-----------------------+-------------------------------------------+----------
public | conditions_summary_daily | 1009 | Retention Policy [1009] | @ 1 day | @ 5 mins | -1 | @ 5 mins | _timescaledb_internal | policy_retention | cluster_super_user | t | f | | 10 | {"drop_after": "@ 30 days", "hypertable_id": 10} | _timescaledb_internal | policy_retention_check |
public | conditions_summary_daily | 1010 | Refresh Continuous Aggregate Policy [1010] | @ 1 hour | @ 0 | -1 | @ 1 hour | _timescaledb_internal | policy_refresh_continuous_aggregate | cluster_super_user | t | f | | 10 | {"end_offset": "@ 1 day", "start_offset": "@ 30 days", "mat_hypertable_id": 10} | _timescaledb_internal | policy_refresh_continuous_aggregate_check |
public | conditions_summary_daily | 1011 | Compression Policy [1011] | @ 35 days | @ 0 | -1 | @ 1 hour | _timescaledb_internal | policy_compression | cluster_super_user | t | f | | 10 | {"hypertable_id": 10, "compress_after": "@ 45 days"} | _timescaledb_internal | policy_compression_check |
(3 rows)
-- should return no rows because the old cagg was removed
SELECT * FROM cagg_jobs WHERE schema = 'public' AND name = 'conditions_summary_daily_old';
schema | name | id | application_name | schedule_interval | max_runtime | max_retries | retry_period | proc_schema | proc_name | owner | scheduled | fixed_schedule | initial_start | hypertable_id | config | check_schema | check_name | timezone
--------+------+----+------------------+-------------------+-------------+-------------+--------------+-------------+-----------+-------+-----------+----------------+---------------+---------------+--------+--------------+------------+----------
(0 rows)
-- should return no rows because the cagg was overwritten
SELECT * FROM cagg_jobs WHERE schema = 'public' AND name = 'conditions_summary_daily_new';
schema | name | id | application_name | schedule_interval | max_runtime | max_retries | retry_period | proc_schema | proc_name | owner | scheduled | fixed_schedule | initial_start | hypertable_id | config | check_schema | check_name | timezone
--------+------+----+------------------+-------------------+-------------+-------------+--------------+-------------+-----------+-------+-----------+----------------+---------------+---------------+--------+--------------+------------+----------
(0 rows)
-- permissions test
TRUNCATE _timescaledb_catalog.continuous_agg_migrate_plan RESTART IDENTITY CASCADE;
psql:include/cagg_migrate_common.sql:271: NOTICE: truncate cascades to table "continuous_agg_migrate_plan_step"
DROP MATERIALIZED VIEW conditions_summary_daily;
psql:include/cagg_migrate_common.sql:272: NOTICE: drop cascades to 6 other objects
GRANT ALL ON TABLE conditions TO :ROLE_DEFAULT_PERM_USER;
SET ROLE :ROLE_DEFAULT_PERM_USER;
CREATE MATERIALIZED VIEW conditions_summary_daily
WITH (timescaledb.continuous, timescaledb.finalized=false) AS
SELECT
\if :IS_TIME_DIMENSION
time_bucket(INTERVAL '1 day', "time") AS bucket,
\else
time_bucket(INTEGER '24', "time") AS bucket,
\endif
MIN(temperature),
MAX(temperature),
AVG(temperature),
SUM(temperature)
FROM
conditions
GROUP BY
bucket;
psql:include/cagg_migrate_common.sql:291: NOTICE: refreshing continuous aggregate "conditions_summary_daily"
\set ON_ERROR_STOP 0
-- should fail because the lack of permissions on 'continuous_agg_migrate_plan' catalog table
CALL cagg_migrate('conditions_summary_daily');
psql:include/cagg_migrate_common.sql:295: ERROR: permission denied for table continuous_agg_migrate_plan
\set ON_ERROR_STOP 1
RESET ROLE;
GRANT SELECT, INSERT, UPDATE ON TABLE _timescaledb_catalog.continuous_agg_migrate_plan TO :ROLE_DEFAULT_PERM_USER;
SET ROLE :ROLE_DEFAULT_PERM_USER;
\set ON_ERROR_STOP 0
-- should fail because the lack of permissions on 'continuous_agg_migrate_plan_step' catalog table
CALL cagg_migrate('conditions_summary_daily');
psql:include/cagg_migrate_common.sql:305: ERROR: permission denied for table continuous_agg_migrate_plan_step
\set ON_ERROR_STOP 1
RESET ROLE;
GRANT SELECT, INSERT, UPDATE ON TABLE _timescaledb_catalog.continuous_agg_migrate_plan_step TO :ROLE_DEFAULT_PERM_USER;
SET ROLE :ROLE_DEFAULT_PERM_USER;
\set ON_ERROR_STOP 0
-- should fail because the lack of permissions on 'continuous_agg_migrate_plan_step_step_id_seq' catalog sequence
CALL cagg_migrate('conditions_summary_daily');
psql:include/cagg_migrate_common.sql:315: ERROR: permission denied for sequence continuous_agg_migrate_plan_step_step_id_seq
\set ON_ERROR_STOP 1
RESET ROLE;
GRANT USAGE ON SEQUENCE _timescaledb_catalog.continuous_agg_migrate_plan_step_step_id_seq TO :ROLE_DEFAULT_PERM_USER;
SET ROLE :ROLE_DEFAULT_PERM_USER;
-- all necessary permissions granted
CALL cagg_migrate('conditions_summary_daily');
psql:include/cagg_migrate_common.sql:324: WARNING: refresh the continuous aggregate after the migration executing this statement: "CALL public.refresh_continuous_aggregate('public.conditions_summary_daily_new', CAST('Sun Jan 01 00:00:00 2023' AS timestamp without time zone), NULL);"
-- check migrated data. should return 0 (zero) rows
SELECT * FROM conditions_summary_daily
EXCEPT
SELECT * FROM conditions_summary_daily_new;
bucket | min | max | avg | sum
--------+-----+-----+-----+-----
(0 rows)
SELECT mat_hypertable_id, step_id, status, type, config FROM _timescaledb_catalog.continuous_agg_migrate_plan_step ORDER BY step_id;
mat_hypertable_id | step_id | status | type | config
-------------------+---------+----------+------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
12 | 1 | FINISHED | SAVE WATERMARK | {"watermark": "Sun Jan 01 00:00:00 2023"}
12 | 2 | FINISHED | CREATE NEW CAGG | {"cagg_name_new": "conditions_summary_daily_new"}
12 | 3 | FINISHED | DISABLE POLICIES | {"policies": null}
12 | 4 | FINISHED | REFRESH NEW CAGG | {"window_start": "Sun Jan 01 00:00:00 2023", "cagg_name_new": "conditions_summary_daily_new", "window_start_type": "timestamp without time zone"}
12 | 5 | FINISHED | COPY DATA | {"end_ts": "Fri Mar 11 00:00:00 2022", "start_ts": "Fri Dec 31 00:00:00 2021", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp without time zone"}
12 | 6 | FINISHED | COPY DATA | {"end_ts": "Fri May 20 00:00:00 2022", "start_ts": "Fri Mar 11 00:00:00 2022", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp without time zone"}
12 | 7 | FINISHED | COPY DATA | {"end_ts": "Fri Jul 29 00:00:00 2022", "start_ts": "Fri May 20 00:00:00 2022", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp without time zone"}
12 | 8 | FINISHED | COPY DATA | {"end_ts": "Fri Oct 07 00:00:00 2022", "start_ts": "Fri Jul 29 00:00:00 2022", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp without time zone"}
12 | 9 | FINISHED | COPY DATA | {"end_ts": "Fri Dec 16 00:00:00 2022", "start_ts": "Fri Oct 07 00:00:00 2022", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp without time zone"}
12 | 10 | FINISHED | COPY DATA | {"end_ts": "Fri Feb 24 00:00:00 2023", "start_ts": "Fri Dec 16 00:00:00 2022", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp without time zone"}
12 | 11 | FINISHED | COPY POLICIES | {"policies": null, "cagg_name_new": "conditions_summary_daily_new"}
12 | 12 | FINISHED | OVERRIDE CAGG | {"drop_old": false, "override": false, "cagg_name_new": "conditions_summary_daily_new"}
12 | 13 | FINISHED | DROP OLD CAGG | {"drop_old": false, "override": false, "cagg_name_new": "conditions_summary_daily_new"}
12 | 14 | FINISHED | ENABLE POLICIES |
(14 rows)
RESET ROLE;
-- according to the official documentation trying to execute a procedure with
-- transaction control statements inside an explicit transaction should fail:
-- https://www.postgresql.org/docs/current/sql-call.html
-- `If CALL is executed in a transaction block, then the called procedure cannot
-- execute transaction control statements. Transaction control statements are only
-- allowed if CALL is executed in its own transaction.`
TRUNCATE _timescaledb_catalog.continuous_agg_migrate_plan RESTART IDENTITY CASCADE;
psql:include/cagg_migrate_common.sql:341: NOTICE: truncate cascades to table "continuous_agg_migrate_plan_step"
DROP MATERIALIZED VIEW conditions_summary_daily_new;
psql:include/cagg_migrate_common.sql:342: NOTICE: drop cascades to 6 other objects
\set ON_ERROR_STOP 0
BEGIN;
-- should fail with `invalid transaction termination`
CALL cagg_migrate('conditions_summary_daily');
psql:include/cagg_migrate_common.sql:347: ERROR: invalid transaction termination
ROLLBACK;
\set ON_ERROR_STOP 1
CREATE FUNCTION execute_migration() RETURNS void AS
$$
BEGIN
CALL cagg_migrate('conditions_summary_daily');
RETURN;
END;
$$
LANGUAGE plpgsql;
\set ON_ERROR_STOP 0
-- execute migration inside a plpgsql function
BEGIN;
-- should fail with `invalid transaction termination`
SELECT execute_migration();
psql:include/cagg_migrate_common.sql:364: ERROR: invalid transaction termination
ROLLBACK;
\set ON_ERROR_STOP 1

@ -1,692 +0,0 @@
-- This file and its contents are licensed under the Timescale License.
-- Please see the included NOTICE for copyright information and
-- LICENSE-TIMESCALE for a copy of the license.
------------------------------------
-- Set up a distributed environment
------------------------------------
\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER
\set DATA_NODE_1 :TEST_DBNAME _1
\set DATA_NODE_2 :TEST_DBNAME _2
\set DATA_NODE_3 :TEST_DBNAME _3
\ir include/remote_exec.sql
-- This file and its contents are licensed under the Timescale License.
-- Please see the included NOTICE for copyright information and
-- LICENSE-TIMESCALE for a copy of the license.
CREATE SCHEMA IF NOT EXISTS test;
psql:include/remote_exec.sql:5: NOTICE: schema "test" already exists, skipping
GRANT USAGE ON SCHEMA test TO PUBLIC;
CREATE OR REPLACE FUNCTION test.remote_exec(srv_name name[], command text)
RETURNS VOID
AS :TSL_MODULE_PATHNAME, 'ts_remote_exec'
LANGUAGE C;
CREATE OR REPLACE FUNCTION test.remote_exec_get_result_strings(srv_name name[], command text)
RETURNS TABLE("table_record" CSTRING[])
AS :TSL_MODULE_PATHNAME, 'ts_remote_exec_get_result_strings'
LANGUAGE C;
SELECT node_name, database, node_created, database_created, extension_created
FROM (
SELECT (add_data_node(name, host => 'localhost', DATABASE => name)).*
FROM (VALUES (:'DATA_NODE_1'), (:'DATA_NODE_2'), (:'DATA_NODE_3')) v(name)
) a;
node_name | database | node_created | database_created | extension_created
-------------------------------------+-------------------------------------+--------------+------------------+-------------------
db_cagg_migrate_timestamp_dist_ht_1 | db_cagg_migrate_timestamp_dist_ht_1 | t | t | t
db_cagg_migrate_timestamp_dist_ht_2 | db_cagg_migrate_timestamp_dist_ht_2 | t | t | t
db_cagg_migrate_timestamp_dist_ht_3 | db_cagg_migrate_timestamp_dist_ht_3 | t | t | t
(3 rows)
GRANT USAGE ON FOREIGN SERVER :DATA_NODE_1, :DATA_NODE_2, :DATA_NODE_3 TO PUBLIC;
\set IS_DISTRIBUTED TRUE
\set IS_TIME_DIMENSION TRUE
\set TIME_DIMENSION_DATATYPE TIMESTAMP
\ir include/cagg_migrate_common.sql
-- This file and its contents are licensed under the Timescale License.
-- Please see the included NOTICE for copyright information and
-- LICENSE-TIMESCALE for a copy of the license.
\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER
\if :IS_DISTRIBUTED
\echo 'Running distributed hypertable tests'
Running distributed hypertable tests
\else
\echo 'Running local hypertable tests'
\endif
CREATE TABLE conditions (
"time" :TIME_DIMENSION_DATATYPE NOT NULL,
temperature NUMERIC
);
\if :IS_DISTRIBUTED
\if :IS_TIME_DIMENSION
SELECT table_name FROM create_distributed_hypertable('conditions', 'time', replication_factor => 2);
psql:include/cagg_migrate_common.sql:20: WARNING: column type "timestamp without time zone" used for "time" does not follow best practices
table_name
------------
conditions
(1 row)
\else
SELECT table_name FROM create_distributed_hypertable('conditions', 'time', chunk_time_interval => 10, replication_factor => 2);
\endif
\else
\if :IS_TIME_DIMENSION
SELECT table_name FROM create_hypertable('conditions', 'time');
\else
SELECT table_name FROM create_hypertable('conditions', 'time', chunk_time_interval => 10);
\endif
\endif
\if :IS_TIME_DIMENSION
INSERT INTO conditions ("time", temperature)
SELECT
generate_series('2022-01-01 00:00:00-00'::timestamptz, '2022-12-31 23:59:59-00'::timestamptz, '1 hour'),
0.25;
\else
CREATE OR REPLACE FUNCTION integer_now()
RETURNS :TIME_DIMENSION_DATATYPE LANGUAGE SQL STABLE AS
$$
SELECT coalesce(max(time), 0)
FROM public.conditions
$$;
\if :IS_DISTRIBUTED
SELECT
'CREATE OR REPLACE FUNCTION integer_now() RETURNS '||:'TIME_DIMENSION_DATATYPE'||' LANGUAGE SQL STABLE AS $$ SELECT coalesce(max(time), 0) FROM public.conditions $$;' AS "STMT"
\gset
CALL distributed_exec (:'STMT');
\endif
SELECT set_integer_now_func('conditions', 'integer_now');
INSERT INTO conditions ("time", temperature)
SELECT
generate_series(1, 1000, 1),
0.25;
\endif
\set ON_ERROR_STOP 0
-- should fail relation does not exist
CALL cagg_migrate('conditions_summary_daily');
psql:include/cagg_migrate_common.sql:63: ERROR: relation "conditions_summary_daily" does not exist at character 19
CREATE TABLE conditions_summary_daily();
-- should fail continuous agg does not exist
CALL cagg_migrate('conditions_summary_daily');
psql:include/cagg_migrate_common.sql:66: ERROR: continuous aggregate "public.conditions_summary_daily" does not exist
\set ON_ERROR_STOP 1
DROP TABLE conditions_summary_daily;
CREATE MATERIALIZED VIEW conditions_summary_daily_new
WITH (timescaledb.continuous) AS
SELECT
\if :IS_TIME_DIMENSION
time_bucket(INTERVAL '1 day', "time") AS bucket,
\else
time_bucket(INTEGER '24', "time") AS bucket,
\endif
MIN(temperature),
MAX(temperature),
AVG(temperature),
SUM(temperature)
FROM
conditions
GROUP BY
bucket
WITH NO DATA;
\set ON_ERROR_STOP 0
-- should fail because we don't need to migrate finalized caggs
CALL cagg_migrate('conditions_summary_daily_new');
psql:include/cagg_migrate_common.sql:91: ERROR: continuous aggregate "public.conditions_summary_daily_new" does not require any migration
\set ON_ERROR_STOP 1
-- older continuous aggregate to be migrated
CREATE MATERIALIZED VIEW conditions_summary_daily
WITH (timescaledb.continuous, timescaledb.finalized=false) AS
SELECT
\if :IS_TIME_DIMENSION
time_bucket(INTERVAL '1 day', "time") AS bucket,
\else
time_bucket(INTEGER '24', "time") AS bucket,
\endif
MIN(temperature),
MAX(temperature),
AVG(temperature),
SUM(temperature)
FROM
conditions
GROUP BY
bucket;
psql:include/cagg_migrate_common.sql:110: NOTICE: refreshing continuous aggregate "conditions_summary_daily"
SELECT
ca.raw_hypertable_id AS "RAW_HYPERTABLE_ID",
h.schema_name AS "MAT_SCHEMA_NAME",
h.table_name AS "MAT_TABLE_NAME",
partial_view_name AS "PART_VIEW_NAME",
partial_view_schema AS "PART_VIEW_SCHEMA",
direct_view_name AS "DIR_VIEW_NAME",
direct_view_schema AS "DIR_VIEW_SCHEMA"
FROM
_timescaledb_catalog.continuous_agg ca
JOIN _timescaledb_catalog.hypertable h ON (h.id = ca.mat_hypertable_id)
WHERE
user_view_name = 'conditions_summary_daily'
\gset
\set ON_ERROR_STOP 0
-- should fail because the new cagg with suffix '_new' already exists
CALL cagg_migrate('conditions_summary_daily');
psql:include/cagg_migrate_common.sql:129: ERROR: continuous aggregate "public.conditions_summary_daily_new" already exists
\set ON_ERROR_STOP 1
-- remove the new cagg to execute the migration
DROP MATERIALIZED VIEW conditions_summary_daily_new;
-- get and set all the cagg data
SELECT
_timescaledb_internal.cagg_migrate_pre_validation(
'public',
'conditions_summary_daily',
'conditions_summary_daily_new'
) AS "CAGG_DATA"
\gset
CALL _timescaledb_internal.cagg_migrate_create_plan(:'CAGG_DATA', 'conditions_summary_daily_new');
SELECT mat_hypertable_id FROM _timescaledb_catalog.continuous_agg_migrate_plan;
mat_hypertable_id
-------------------
3
(1 row)
SELECT mat_hypertable_id, step_id, status, type, config FROM _timescaledb_catalog.continuous_agg_migrate_plan_step ORDER BY step_id;
mat_hypertable_id | step_id | status | type | config
-------------------+---------+-------------+------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
3 | 1 | FINISHED | SAVE WATERMARK | {"watermark": "Sun Jan 01 00:00:00 2023"}
3 | 2 | NOT STARTED | CREATE NEW CAGG | {"cagg_name_new": "conditions_summary_daily_new"}
3 | 3 | NOT STARTED | DISABLE POLICIES | {"policies": null}
3 | 4 | NOT STARTED | REFRESH NEW CAGG | {"window_start": "Sun Jan 01 00:00:00 2023", "cagg_name_new": "conditions_summary_daily_new", "window_start_type": "timestamp without time zone"}
3 | 5 | NOT STARTED | COPY DATA | {"end_ts": "Fri Mar 11 00:00:00 2022", "start_ts": "Fri Dec 31 00:00:00 2021", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp without time zone"}
3 | 6 | NOT STARTED | COPY DATA | {"end_ts": "Fri May 20 00:00:00 2022", "start_ts": "Fri Mar 11 00:00:00 2022", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp without time zone"}
3 | 7 | NOT STARTED | COPY DATA | {"end_ts": "Fri Jul 29 00:00:00 2022", "start_ts": "Fri May 20 00:00:00 2022", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp without time zone"}
3 | 8 | NOT STARTED | COPY DATA | {"end_ts": "Fri Oct 07 00:00:00 2022", "start_ts": "Fri Jul 29 00:00:00 2022", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp without time zone"}
3 | 9 | NOT STARTED | COPY DATA | {"end_ts": "Fri Dec 16 00:00:00 2022", "start_ts": "Fri Oct 07 00:00:00 2022", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp without time zone"}
3 | 10 | NOT STARTED | COPY DATA | {"end_ts": "Fri Feb 24 00:00:00 2023", "start_ts": "Fri Dec 16 00:00:00 2022", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp without time zone"}
3 | 11 | NOT STARTED | COPY POLICIES | {"policies": null, "cagg_name_new": "conditions_summary_daily_new"}
3 | 12 | NOT STARTED | OVERRIDE CAGG | {"drop_old": false, "override": false, "cagg_name_new": "conditions_summary_daily_new"}
3 | 13 | NOT STARTED | DROP OLD CAGG | {"drop_old": false, "override": false, "cagg_name_new": "conditions_summary_daily_new"}
3 | 14 | NOT STARTED | ENABLE POLICIES |
(14 rows)
-- should resume the execution
CALL cagg_migrate('conditions_summary_daily');
psql:include/cagg_migrate_common.sql:149: WARNING: resuming the migration of the continuous aggregate "public.conditions_summary_daily"
psql:include/cagg_migrate_common.sql:149: WARNING: refresh the continuous aggregate after the migration executing this statement: "CALL public.refresh_continuous_aggregate('public.conditions_summary_daily_new', CAST('Sun Jan 01 00:00:00 2023' AS timestamp without time zone), NULL);"
SELECT mat_hypertable_id, step_id, status, type, config FROM _timescaledb_catalog.continuous_agg_migrate_plan_step ORDER BY step_id;
mat_hypertable_id | step_id | status | type | config
-------------------+---------+----------+------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
3 | 1 | FINISHED | SAVE WATERMARK | {"watermark": "Sun Jan 01 00:00:00 2023"}
3 | 2 | FINISHED | CREATE NEW CAGG | {"cagg_name_new": "conditions_summary_daily_new"}
3 | 3 | FINISHED | DISABLE POLICIES | {"policies": null}
3 | 4 | FINISHED | REFRESH NEW CAGG | {"window_start": "Sun Jan 01 00:00:00 2023", "cagg_name_new": "conditions_summary_daily_new", "window_start_type": "timestamp without time zone"}
3 | 5 | FINISHED | COPY DATA | {"end_ts": "Fri Mar 11 00:00:00 2022", "start_ts": "Fri Dec 31 00:00:00 2021", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp without time zone"}
3 | 6 | FINISHED | COPY DATA | {"end_ts": "Fri May 20 00:00:00 2022", "start_ts": "Fri Mar 11 00:00:00 2022", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp without time zone"}
3 | 7 | FINISHED | COPY DATA | {"end_ts": "Fri Jul 29 00:00:00 2022", "start_ts": "Fri May 20 00:00:00 2022", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp without time zone"}
3 | 8 | FINISHED | COPY DATA | {"end_ts": "Fri Oct 07 00:00:00 2022", "start_ts": "Fri Jul 29 00:00:00 2022", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp without time zone"}
3 | 9 | FINISHED | COPY DATA | {"end_ts": "Fri Dec 16 00:00:00 2022", "start_ts": "Fri Oct 07 00:00:00 2022", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp without time zone"}
3 | 10 | FINISHED | COPY DATA | {"end_ts": "Fri Feb 24 00:00:00 2023", "start_ts": "Fri Dec 16 00:00:00 2022", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp without time zone"}
3 | 11 | FINISHED | COPY POLICIES | {"policies": null, "cagg_name_new": "conditions_summary_daily_new"}
3 | 12 | FINISHED | OVERRIDE CAGG | {"drop_old": false, "override": false, "cagg_name_new": "conditions_summary_daily_new"}
3 | 13 | FINISHED | DROP OLD CAGG | {"drop_old": false, "override": false, "cagg_name_new": "conditions_summary_daily_new"}
3 | 14 | FINISHED | ENABLE POLICIES |
(14 rows)
\set ON_ERROR_STOP 0
-- should error because plan already exists
CALL _timescaledb_internal.cagg_migrate_create_plan(:'CAGG_DATA', 'conditions_summary_daily_new');
psql:include/cagg_migrate_common.sql:154: ERROR: plan already exists for materialized hypertable 3
CALL cagg_migrate('conditions_summary_daily');
psql:include/cagg_migrate_common.sql:155: ERROR: plan already exists for continuous aggregate public.conditions_summary_daily
\set ON_ERROR_STOP 1
-- policies for test
ALTER MATERIALIZED VIEW conditions_summary_daily SET (timescaledb.compress=true);
\if :IS_TIME_DIMENSION
SELECT add_retention_policy('conditions_summary_daily', '30 days'::interval);
add_retention_policy
----------------------
1000
(1 row)
SELECT add_continuous_aggregate_policy('conditions_summary_daily', '30 days'::interval, '1 day'::interval, '1 hour'::interval);
add_continuous_aggregate_policy
---------------------------------
1001
(1 row)
SELECT add_compression_policy('conditions_summary_daily', '45 days'::interval);
add_compression_policy
------------------------
1002
(1 row)
\else
SELECT add_retention_policy('conditions_summary_daily', '400'::integer);
SELECT add_continuous_aggregate_policy('conditions_summary_daily', '50'::integer, '1'::integer, '1 hour'::interval);
SELECT add_compression_policy('conditions_summary_daily', '100'::integer);
\endif
SELECT *
FROM timescaledb_information.jobs
WHERE hypertable_schema = :'MAT_SCHEMA_NAME'
AND hypertable_name = :'MAT_TABLE_NAME'
AND job_id >= 1000;
job_id | application_name | schedule_interval | max_runtime | max_retries | retry_period | proc_schema | proc_name | owner | scheduled | fixed_schedule | config | next_start | initial_start | hypertable_schema | hypertable_name | check_schema | check_name
--------+--------------------------------------------+-------------------+-------------+-------------+--------------+-----------------------+-------------------------------------+--------------------+-----------+----------------+--------------------------------------------------------------------------------+------------+---------------+-----------------------+----------------------------+-----------------------+-------------------------------------------
1002 | Compression Policy [1002] | @ 35 days | @ 0 | -1 | @ 1 hour | _timescaledb_internal | policy_compression | cluster_super_user | t | f | {"hypertable_id": 3, "compress_after": "@ 45 days"} | | | _timescaledb_internal | _materialized_hypertable_3 | _timescaledb_internal | policy_compression_check
1001 | Refresh Continuous Aggregate Policy [1001] | @ 1 hour | @ 0 | -1 | @ 1 hour | _timescaledb_internal | policy_refresh_continuous_aggregate | cluster_super_user | t | f | {"end_offset": "@ 1 day", "start_offset": "@ 30 days", "mat_hypertable_id": 3} | | | _timescaledb_internal | _materialized_hypertable_3 | _timescaledb_internal | policy_refresh_continuous_aggregate_check
1000 | Retention Policy [1000] | @ 1 day | @ 5 mins | -1 | @ 5 mins | _timescaledb_internal | policy_retention | cluster_super_user | t | f | {"drop_after": "@ 30 days", "hypertable_id": 3} | | | _timescaledb_internal | _materialized_hypertable_3 | _timescaledb_internal | policy_retention_check
(3 rows)
-- execute the migration
DROP MATERIALIZED VIEW conditions_summary_daily_new;
psql:include/cagg_migrate_common.sql:178: NOTICE: drop cascades to 6 other objects
TRUNCATE _timescaledb_catalog.continuous_agg_migrate_plan RESTART IDENTITY CASCADE;
psql:include/cagg_migrate_common.sql:179: NOTICE: truncate cascades to table "continuous_agg_migrate_plan_step"
CALL cagg_migrate('conditions_summary_daily');
psql:include/cagg_migrate_common.sql:180: WARNING: refresh the continuous aggregate after the migration executing this statement: "CALL public.refresh_continuous_aggregate('public.conditions_summary_daily_new', CAST('Sun Jan 01 00:00:00 2023' AS timestamp without time zone), NULL);"
SELECT
ca.raw_hypertable_id AS "NEW_RAW_HYPERTABLE_ID",
h.schema_name AS "NEW_MAT_SCHEMA_NAME",
h.table_name AS "NEW_MAT_TABLE_NAME",
partial_view_name AS "NEW_PART_VIEW_NAME",
partial_view_schema AS "NEW_PART_VIEW_SCHEMA",
direct_view_name AS "NEW_DIR_VIEW_NAME",
direct_view_schema AS "NEW_DIR_VIEW_SCHEMA"
FROM
_timescaledb_catalog.continuous_agg ca
JOIN _timescaledb_catalog.hypertable h ON (h.id = ca.mat_hypertable_id)
WHERE
user_view_name = 'conditions_summary_daily_new'
\gset
\d+ conditions_summary_daily_new
View "public.conditions_summary_daily_new"
Column | Type | Collation | Nullable | Default | Storage | Description
--------+-----------------------------+-----------+----------+---------+---------+-------------
bucket | timestamp without time zone | | | | plain |
min | numeric | | | | main |
max | numeric | | | | main |
avg | numeric | | | | main |
sum | numeric | | | | main |
View definition:
SELECT _materialized_hypertable_6.bucket,
_materialized_hypertable_6.min,
_materialized_hypertable_6.max,
_materialized_hypertable_6.avg,
_materialized_hypertable_6.sum
FROM _timescaledb_internal._materialized_hypertable_6
WHERE _materialized_hypertable_6.bucket < COALESCE(_timescaledb_internal.to_timestamp_without_timezone(_timescaledb_internal.cagg_watermark(6)), '-infinity'::timestamp without time zone)
UNION ALL
SELECT time_bucket('@ 1 day'::interval, conditions."time") AS bucket,
min(conditions.temperature) AS min,
max(conditions.temperature) AS max,
avg(conditions.temperature) AS avg,
sum(conditions.temperature) AS sum
FROM conditions
WHERE conditions."time" >= COALESCE(_timescaledb_internal.to_timestamp_without_timezone(_timescaledb_internal.cagg_watermark(6)), '-infinity'::timestamp without time zone)
GROUP BY (time_bucket('@ 1 day'::interval, conditions."time"));
SELECT *
FROM timescaledb_information.jobs
WHERE hypertable_schema = :'NEW_MAT_SCHEMA_NAME'
AND hypertable_name = :'NEW_MAT_TABLE_NAME'
AND job_id >= 1000;
job_id | application_name | schedule_interval | max_runtime | max_retries | retry_period | proc_schema | proc_name | owner | scheduled | fixed_schedule | config | next_start | initial_start | hypertable_schema | hypertable_name | check_schema | check_name
--------+--------------------------------------------+-------------------+-------------+-------------+--------------+-----------------------+-------------------------------------+--------------------+-----------+----------------+--------------------------------------------------------------------------------+------------+---------------+-----------------------+----------------------------+-----------------------+-------------------------------------------
1005 | Compression Policy [1005] | @ 35 days | @ 0 | -1 | @ 1 hour | _timescaledb_internal | policy_compression | cluster_super_user | t | f | {"hypertable_id": 6, "compress_after": "@ 45 days"} | | | _timescaledb_internal | _materialized_hypertable_6 | _timescaledb_internal | policy_compression_check
1004 | Refresh Continuous Aggregate Policy [1004] | @ 1 hour | @ 0 | -1 | @ 1 hour | _timescaledb_internal | policy_refresh_continuous_aggregate | cluster_super_user | t | f | {"end_offset": "@ 1 day", "start_offset": "@ 30 days", "mat_hypertable_id": 6} | | | _timescaledb_internal | _materialized_hypertable_6 | _timescaledb_internal | policy_refresh_continuous_aggregate_check
1003 | Retention Policy [1003] | @ 1 day | @ 5 mins | -1 | @ 5 mins | _timescaledb_internal | policy_retention | cluster_super_user | t | f | {"drop_after": "@ 30 days", "hypertable_id": 6} | | | _timescaledb_internal | _materialized_hypertable_6 | _timescaledb_internal | policy_retention_check
(3 rows)
SELECT mat_hypertable_id, step_id, status, type, config FROM _timescaledb_catalog.continuous_agg_migrate_plan_step ORDER BY step_id;
mat_hypertable_id | step_id | status | type | config
-------------------+---------+----------+------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
3 | 1 | FINISHED | SAVE WATERMARK | {"watermark": "Sun Jan 01 00:00:00 2023"}
3 | 2 | FINISHED | CREATE NEW CAGG | {"cagg_name_new": "conditions_summary_daily_new"}
3 | 3 | FINISHED | DISABLE POLICIES | {"policies": [1002, 1000]}
3 | 4 | FINISHED | REFRESH NEW CAGG | {"window_start": "Sun Jan 01 00:00:00 2023", "cagg_name_new": "conditions_summary_daily_new", "window_start_type": "timestamp without time zone"}
3 | 5 | FINISHED | COPY DATA | {"end_ts": "Fri Mar 11 00:00:00 2022", "start_ts": "Fri Dec 31 00:00:00 2021", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp without time zone"}
3 | 6 | FINISHED | COPY DATA | {"end_ts": "Fri May 20 00:00:00 2022", "start_ts": "Fri Mar 11 00:00:00 2022", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp without time zone"}
3 | 7 | FINISHED | COPY DATA | {"end_ts": "Fri Jul 29 00:00:00 2022", "start_ts": "Fri May 20 00:00:00 2022", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp without time zone"}
3 | 8 | FINISHED | COPY DATA | {"end_ts": "Fri Oct 07 00:00:00 2022", "start_ts": "Fri Jul 29 00:00:00 2022", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp without time zone"}
3 | 9 | FINISHED | COPY DATA | {"end_ts": "Fri Dec 16 00:00:00 2022", "start_ts": "Fri Oct 07 00:00:00 2022", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp without time zone"}
3 | 10 | FINISHED | COPY DATA | {"end_ts": "Fri Feb 24 00:00:00 2023", "start_ts": "Fri Dec 16 00:00:00 2022", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp without time zone"}
3 | 11 | FINISHED | COPY POLICIES | {"policies": [1002, 1001, 1000], "cagg_name_new": "conditions_summary_daily_new"}
3 | 12 | FINISHED | OVERRIDE CAGG | {"drop_old": false, "override": false, "cagg_name_new": "conditions_summary_daily_new"}
3 | 13 | FINISHED | DROP OLD CAGG | {"drop_old": false, "override": false, "cagg_name_new": "conditions_summary_daily_new"}
3 | 14 | FINISHED | ENABLE POLICIES | {"policies": [1003, 1004, 1005, 1002, 1001, 1000]}
(14 rows)
-- check migrated data. should return 0 (zero) rows
SELECT * FROM conditions_summary_daily
EXCEPT
SELECT * FROM conditions_summary_daily_new;
bucket | min | max | avg | sum
--------+-----+-----+-----+-----
(0 rows)
-- compress both caggs
SELECT compress_chunk(c) FROM show_chunks('conditions_summary_daily') c ORDER BY c::regclass::text;
compress_chunk
-----------------------------------------
_timescaledb_internal._hyper_3_54_chunk
_timescaledb_internal._hyper_3_55_chunk
_timescaledb_internal._hyper_3_56_chunk
_timescaledb_internal._hyper_3_57_chunk
_timescaledb_internal._hyper_3_58_chunk
_timescaledb_internal._hyper_3_59_chunk
(6 rows)
SELECT compress_chunk(c) FROM show_chunks('conditions_summary_daily_new') c ORDER BY c::regclass::text;
compress_chunk
-----------------------------------------
_timescaledb_internal._hyper_6_66_chunk
_timescaledb_internal._hyper_6_67_chunk
_timescaledb_internal._hyper_6_68_chunk
_timescaledb_internal._hyper_6_69_chunk
_timescaledb_internal._hyper_6_70_chunk
_timescaledb_internal._hyper_6_71_chunk
(6 rows)
-- check migrated data after compression. should return 0 (zero) rows
SELECT * FROM conditions_summary_daily
EXCEPT
SELECT * FROM conditions_summary_daily_new;
bucket | min | max | avg | sum
--------+-----+-----+-----+-----
(0 rows)
CREATE VIEW cagg_jobs AS
SELECT user_view_schema AS schema, user_view_name AS name, bgw_job.*
FROM _timescaledb_config.bgw_job
JOIN _timescaledb_catalog.continuous_agg ON mat_hypertable_id = hypertable_id
ORDER BY bgw_job.id;
-- test migration overriding the new cagg and keeping the old
DROP MATERIALIZED VIEW conditions_summary_daily_new;
psql:include/cagg_migrate_common.sql:228: NOTICE: drop cascades to 6 other objects
TRUNCATE _timescaledb_catalog.continuous_agg_migrate_plan RESTART IDENTITY CASCADE;
psql:include/cagg_migrate_common.sql:229: NOTICE: truncate cascades to table "continuous_agg_migrate_plan_step"
-- check policies before the migration
SELECT * FROM cagg_jobs WHERE schema = 'public' AND name = 'conditions_summary_daily';
schema | name | id | application_name | schedule_interval | max_runtime | max_retries | retry_period | proc_schema | proc_name | owner | scheduled | fixed_schedule | initial_start | hypertable_id | config | check_schema | check_name | timezone
--------+--------------------------+------+--------------------------------------------+-------------------+-------------+-------------+--------------+-----------------------+-------------------------------------+--------------------+-----------+----------------+---------------+---------------+--------------------------------------------------------------------------------+-----------------------+-------------------------------------------+----------
public | conditions_summary_daily | 1000 | Retention Policy [1000] | @ 1 day | @ 5 mins | -1 | @ 5 mins | _timescaledb_internal | policy_retention | cluster_super_user | t | f | | 3 | {"drop_after": "@ 30 days", "hypertable_id": 3} | _timescaledb_internal | policy_retention_check |
public | conditions_summary_daily | 1001 | Refresh Continuous Aggregate Policy [1001] | @ 1 hour | @ 0 | -1 | @ 1 hour | _timescaledb_internal | policy_refresh_continuous_aggregate | cluster_super_user | t | f | | 3 | {"end_offset": "@ 1 day", "start_offset": "@ 30 days", "mat_hypertable_id": 3} | _timescaledb_internal | policy_refresh_continuous_aggregate_check |
public | conditions_summary_daily | 1002 | Compression Policy [1002] | @ 35 days | @ 0 | -1 | @ 1 hour | _timescaledb_internal | policy_compression | cluster_super_user | t | f | | 3 | {"hypertable_id": 3, "compress_after": "@ 45 days"} | _timescaledb_internal | policy_compression_check |
(3 rows)
CALL cagg_migrate('conditions_summary_daily', override => TRUE);
psql:include/cagg_migrate_common.sql:232: WARNING: refresh the continuous aggregate after the migration executing this statement: "CALL public.refresh_continuous_aggregate('public.conditions_summary_daily', CAST('Sun Jan 01 00:00:00 2023' AS timestamp without time zone), NULL);"
-- cagg with the new format because it was overriden
\d+ conditions_summary_daily
View "public.conditions_summary_daily"
Column | Type | Collation | Nullable | Default | Storage | Description
--------+-----------------------------+-----------+----------+---------+---------+-------------
bucket | timestamp without time zone | | | | plain |
min | numeric | | | | main |
max | numeric | | | | main |
avg | numeric | | | | main |
sum | numeric | | | | main |
View definition:
SELECT _materialized_hypertable_8.bucket,
_materialized_hypertable_8.min,
_materialized_hypertable_8.max,
_materialized_hypertable_8.avg,
_materialized_hypertable_8.sum
FROM _timescaledb_internal._materialized_hypertable_8
WHERE _materialized_hypertable_8.bucket < COALESCE(_timescaledb_internal.to_timestamp_without_timezone(_timescaledb_internal.cagg_watermark(8)), '-infinity'::timestamp without time zone)
UNION ALL
SELECT time_bucket('@ 1 day'::interval, conditions."time") AS bucket,
min(conditions.temperature) AS min,
max(conditions.temperature) AS max,
avg(conditions.temperature) AS avg,
sum(conditions.temperature) AS sum
FROM conditions
WHERE conditions."time" >= COALESCE(_timescaledb_internal.to_timestamp_without_timezone(_timescaledb_internal.cagg_watermark(8)), '-infinity'::timestamp without time zone)
GROUP BY (time_bucket('@ 1 day'::interval, conditions."time"));
-- cagg with the old format because it was overriden
\d+ conditions_summary_daily_old
View "public.conditions_summary_daily_old"
Column | Type | Collation | Nullable | Default | Storage | Description
--------+-----------------------------+-----------+----------+---------+---------+-------------
bucket | timestamp without time zone | | | | plain |
min | numeric | | | | main |
max | numeric | | | | main |
avg | numeric | | | | main |
sum | numeric | | | | main |
View definition:
SELECT _materialized_hypertable_3.bucket,
_timescaledb_internal.finalize_agg('pg_catalog.min(numeric)'::text, NULL::name, NULL::name, '{{pg_catalog,numeric}}'::name[], _materialized_hypertable_3.agg_2_2, NULL::numeric) AS min,
_timescaledb_internal.finalize_agg('pg_catalog.max(numeric)'::text, NULL::name, NULL::name, '{{pg_catalog,numeric}}'::name[], _materialized_hypertable_3.agg_3_3, NULL::numeric) AS max,
_timescaledb_internal.finalize_agg('pg_catalog.avg(numeric)'::text, NULL::name, NULL::name, '{{pg_catalog,numeric}}'::name[], _materialized_hypertable_3.agg_4_4, NULL::numeric) AS avg,
_timescaledb_internal.finalize_agg('pg_catalog.sum(numeric)'::text, NULL::name, NULL::name, '{{pg_catalog,numeric}}'::name[], _materialized_hypertable_3.agg_5_5, NULL::numeric) AS sum
FROM _timescaledb_internal._materialized_hypertable_3
WHERE _materialized_hypertable_3.bucket < COALESCE(_timescaledb_internal.to_timestamp_without_timezone(_timescaledb_internal.cagg_watermark(3)), '-infinity'::timestamp without time zone)
GROUP BY _materialized_hypertable_3.bucket
UNION ALL
SELECT time_bucket('@ 1 day'::interval, conditions."time") AS bucket,
min(conditions.temperature) AS min,
max(conditions.temperature) AS max,
avg(conditions.temperature) AS avg,
sum(conditions.temperature) AS sum
FROM conditions
WHERE conditions."time" >= COALESCE(_timescaledb_internal.to_timestamp_without_timezone(_timescaledb_internal.cagg_watermark(3)), '-infinity'::timestamp without time zone)
GROUP BY (time_bucket('@ 1 day'::interval, conditions."time"));
\set ON_ERROR_STOP 0
-- should fail because the cagg was overriden
SELECT * FROM conditions_summary_daily_new;
psql:include/cagg_migrate_common.sql:239: ERROR: relation "conditions_summary_daily_new" does not exist at character 15
\set ON_ERROR_STOP 1
-- check policies after the migration
SELECT * FROM cagg_jobs WHERE schema = 'public' AND name = 'conditions_summary_daily';
schema | name | id | application_name | schedule_interval | max_runtime | max_retries | retry_period | proc_schema | proc_name | owner | scheduled | fixed_schedule | initial_start | hypertable_id | config | check_schema | check_name | timezone
--------+--------------------------+------+--------------------------------------------+-------------------+-------------+-------------+--------------+-----------------------+-------------------------------------+--------------------+-----------+----------------+---------------+---------------+--------------------------------------------------------------------------------+-----------------------+-------------------------------------------+----------
public | conditions_summary_daily | 1006 | Retention Policy [1006] | @ 1 day | @ 5 mins | -1 | @ 5 mins | _timescaledb_internal | policy_retention | cluster_super_user | t | f | | 8 | {"drop_after": "@ 30 days", "hypertable_id": 8} | _timescaledb_internal | policy_retention_check |
public | conditions_summary_daily | 1007 | Refresh Continuous Aggregate Policy [1007] | @ 1 hour | @ 0 | -1 | @ 1 hour | _timescaledb_internal | policy_refresh_continuous_aggregate | cluster_super_user | t | f | | 8 | {"end_offset": "@ 1 day", "start_offset": "@ 30 days", "mat_hypertable_id": 8} | _timescaledb_internal | policy_refresh_continuous_aggregate_check |
public | conditions_summary_daily | 1008 | Compression Policy [1008] | @ 35 days | @ 0 | -1 | @ 1 hour | _timescaledb_internal | policy_compression | cluster_super_user | t | f | | 8 | {"hypertable_id": 8, "compress_after": "@ 45 days"} | _timescaledb_internal | policy_compression_check |
(3 rows)
-- should return the old cagg jobs
SELECT * FROM cagg_jobs WHERE schema = 'public' AND name = 'conditions_summary_daily_old';
schema | name | id | application_name | schedule_interval | max_runtime | max_retries | retry_period | proc_schema | proc_name | owner | scheduled | fixed_schedule | initial_start | hypertable_id | config | check_schema | check_name | timezone
--------+------------------------------+------+--------------------------------------------+-------------------+-------------+-------------+--------------+-----------------------+-------------------------------------+--------------------+-----------+----------------+---------------+---------------+--------------------------------------------------------------------------------+-----------------------+-------------------------------------------+----------
public | conditions_summary_daily_old | 1000 | Retention Policy [1000] | @ 1 day | @ 5 mins | -1 | @ 5 mins | _timescaledb_internal | policy_retention | cluster_super_user | t | f | | 3 | {"drop_after": "@ 30 days", "hypertable_id": 3} | _timescaledb_internal | policy_retention_check |
public | conditions_summary_daily_old | 1001 | Refresh Continuous Aggregate Policy [1001] | @ 1 hour | @ 0 | -1 | @ 1 hour | _timescaledb_internal | policy_refresh_continuous_aggregate | cluster_super_user | t | f | | 3 | {"end_offset": "@ 1 day", "start_offset": "@ 30 days", "mat_hypertable_id": 3} | _timescaledb_internal | policy_refresh_continuous_aggregate_check |
public | conditions_summary_daily_old | 1002 | Compression Policy [1002] | @ 35 days | @ 0 | -1 | @ 1 hour | _timescaledb_internal | policy_compression | cluster_super_user | t | f | | 3 | {"hypertable_id": 3, "compress_after": "@ 45 days"} | _timescaledb_internal | policy_compression_check |
(3 rows)
-- should return no rows because the cagg was overwritten
SELECT * FROM cagg_jobs WHERE schema = 'public' AND name = 'conditions_summary_daily_new';
schema | name | id | application_name | schedule_interval | max_runtime | max_retries | retry_period | proc_schema | proc_name | owner | scheduled | fixed_schedule | initial_start | hypertable_id | config | check_schema | check_name | timezone
--------+------+----+------------------+-------------------+-------------+-------------+--------------+-------------+-----------+-------+-----------+----------------+---------------+---------------+--------+--------------+------------+----------
(0 rows)
-- test migration overriding the new cagg and removing the old
TRUNCATE _timescaledb_catalog.continuous_agg_migrate_plan RESTART IDENTITY CASCADE;
psql:include/cagg_migrate_common.sql:249: NOTICE: truncate cascades to table "continuous_agg_migrate_plan_step"
DROP MATERIALIZED VIEW conditions_summary_daily;
psql:include/cagg_migrate_common.sql:250: NOTICE: drop cascades to 6 other objects
ALTER MATERIALIZED VIEW conditions_summary_daily_old RENAME TO conditions_summary_daily;
-- check policies before the migration
SELECT * FROM cagg_jobs WHERE schema = 'public' AND name = 'conditions_summary_daily';
schema | name | id | application_name | schedule_interval | max_runtime | max_retries | retry_period | proc_schema | proc_name | owner | scheduled | fixed_schedule | initial_start | hypertable_id | config | check_schema | check_name | timezone
--------+--------------------------+------+--------------------------------------------+-------------------+-------------+-------------+--------------+-----------------------+-------------------------------------+--------------------+-----------+----------------+---------------+---------------+--------------------------------------------------------------------------------+-----------------------+-------------------------------------------+----------
public | conditions_summary_daily | 1000 | Retention Policy [1000] | @ 1 day | @ 5 mins | -1 | @ 5 mins | _timescaledb_internal | policy_retention | cluster_super_user | t | f | | 3 | {"drop_after": "@ 30 days", "hypertable_id": 3} | _timescaledb_internal | policy_retention_check |
public | conditions_summary_daily | 1001 | Refresh Continuous Aggregate Policy [1001] | @ 1 hour | @ 0 | -1 | @ 1 hour | _timescaledb_internal | policy_refresh_continuous_aggregate | cluster_super_user | t | f | | 3 | {"end_offset": "@ 1 day", "start_offset": "@ 30 days", "mat_hypertable_id": 3} | _timescaledb_internal | policy_refresh_continuous_aggregate_check |
public | conditions_summary_daily | 1002 | Compression Policy [1002] | @ 35 days | @ 0 | -1 | @ 1 hour | _timescaledb_internal | policy_compression | cluster_super_user | t | f | | 3 | {"hypertable_id": 3, "compress_after": "@ 45 days"} | _timescaledb_internal | policy_compression_check |
(3 rows)
CALL cagg_migrate('conditions_summary_daily', override => TRUE, drop_old => TRUE);
psql:include/cagg_migrate_common.sql:254: WARNING: refresh the continuous aggregate after the migration executing this statement: "CALL public.refresh_continuous_aggregate('public.conditions_summary_daily', CAST('Sun Jan 01 00:00:00 2023' AS timestamp without time zone), NULL);"
psql:include/cagg_migrate_common.sql:254: NOTICE: drop cascades to 6 other objects
psql:include/cagg_migrate_common.sql:254: NOTICE: job 1002 not found, skipping
psql:include/cagg_migrate_common.sql:254: NOTICE: job 1001 not found, skipping
psql:include/cagg_migrate_common.sql:254: NOTICE: job 1000 not found, skipping
-- cagg with the new format because it was overriden
\d+ conditions_summary_daily
View "public.conditions_summary_daily"
Column | Type | Collation | Nullable | Default | Storage | Description
--------+-----------------------------+-----------+----------+---------+---------+-------------
bucket | timestamp without time zone | | | | plain |
min | numeric | | | | main |
max | numeric | | | | main |
avg | numeric | | | | main |
sum | numeric | | | | main |
View definition:
SELECT _materialized_hypertable_10.bucket,
_materialized_hypertable_10.min,
_materialized_hypertable_10.max,
_materialized_hypertable_10.avg,
_materialized_hypertable_10.sum
FROM _timescaledb_internal._materialized_hypertable_10
WHERE _materialized_hypertable_10.bucket < COALESCE(_timescaledb_internal.to_timestamp_without_timezone(_timescaledb_internal.cagg_watermark(10)), '-infinity'::timestamp without time zone)
UNION ALL
SELECT time_bucket('@ 1 day'::interval, conditions."time") AS bucket,
min(conditions.temperature) AS min,
max(conditions.temperature) AS max,
avg(conditions.temperature) AS avg,
sum(conditions.temperature) AS sum
FROM conditions
WHERE conditions."time" >= COALESCE(_timescaledb_internal.to_timestamp_without_timezone(_timescaledb_internal.cagg_watermark(10)), '-infinity'::timestamp without time zone)
GROUP BY (time_bucket('@ 1 day'::interval, conditions."time"));
\set ON_ERROR_STOP 0
-- should fail because the cagg was overriden
SELECT * FROM conditions_summary_daily_new;
psql:include/cagg_migrate_common.sql:259: ERROR: relation "conditions_summary_daily_new" does not exist at character 15
-- should fail because the old cagg was removed
SELECT * FROM conditions_summary_daily_old;
psql:include/cagg_migrate_common.sql:261: ERROR: relation "conditions_summary_daily_old" does not exist at character 15
\set ON_ERROR_STOP 1
-- check policies after the migration
SELECT * FROM cagg_jobs WHERE schema = 'public' AND name = 'conditions_summary_daily';
schema | name | id | application_name | schedule_interval | max_runtime | max_retries | retry_period | proc_schema | proc_name | owner | scheduled | fixed_schedule | initial_start | hypertable_id | config | check_schema | check_name | timezone
--------+--------------------------+------+--------------------------------------------+-------------------+-------------+-------------+--------------+-----------------------+-------------------------------------+--------------------+-----------+----------------+---------------+---------------+---------------------------------------------------------------------------------+-----------------------+-------------------------------------------+----------
public | conditions_summary_daily | 1009 | Retention Policy [1009] | @ 1 day | @ 5 mins | -1 | @ 5 mins | _timescaledb_internal | policy_retention | cluster_super_user | t | f | | 10 | {"drop_after": "@ 30 days", "hypertable_id": 10} | _timescaledb_internal | policy_retention_check |
public | conditions_summary_daily | 1010 | Refresh Continuous Aggregate Policy [1010] | @ 1 hour | @ 0 | -1 | @ 1 hour | _timescaledb_internal | policy_refresh_continuous_aggregate | cluster_super_user | t | f | | 10 | {"end_offset": "@ 1 day", "start_offset": "@ 30 days", "mat_hypertable_id": 10} | _timescaledb_internal | policy_refresh_continuous_aggregate_check |
public | conditions_summary_daily | 1011 | Compression Policy [1011] | @ 35 days | @ 0 | -1 | @ 1 hour | _timescaledb_internal | policy_compression | cluster_super_user | t | f | | 10 | {"hypertable_id": 10, "compress_after": "@ 45 days"} | _timescaledb_internal | policy_compression_check |
(3 rows)
-- should return no rows because the old cagg was removed
SELECT * FROM cagg_jobs WHERE schema = 'public' AND name = 'conditions_summary_daily_old';
schema | name | id | application_name | schedule_interval | max_runtime | max_retries | retry_period | proc_schema | proc_name | owner | scheduled | fixed_schedule | initial_start | hypertable_id | config | check_schema | check_name | timezone
--------+------+----+------------------+-------------------+-------------+-------------+--------------+-------------+-----------+-------+-----------+----------------+---------------+---------------+--------+--------------+------------+----------
(0 rows)
-- should return no rows because the cagg was overwritten
SELECT * FROM cagg_jobs WHERE schema = 'public' AND name = 'conditions_summary_daily_new';
schema | name | id | application_name | schedule_interval | max_runtime | max_retries | retry_period | proc_schema | proc_name | owner | scheduled | fixed_schedule | initial_start | hypertable_id | config | check_schema | check_name | timezone
--------+------+----+------------------+-------------------+-------------+-------------+--------------+-------------+-----------+-------+-----------+----------------+---------------+---------------+--------+--------------+------------+----------
(0 rows)
-- permissions test
TRUNCATE _timescaledb_catalog.continuous_agg_migrate_plan RESTART IDENTITY CASCADE;
psql:include/cagg_migrate_common.sql:271: NOTICE: truncate cascades to table "continuous_agg_migrate_plan_step"
DROP MATERIALIZED VIEW conditions_summary_daily;
psql:include/cagg_migrate_common.sql:272: NOTICE: drop cascades to 6 other objects
GRANT ALL ON TABLE conditions TO :ROLE_DEFAULT_PERM_USER;
SET ROLE :ROLE_DEFAULT_PERM_USER;
CREATE MATERIALIZED VIEW conditions_summary_daily
WITH (timescaledb.continuous, timescaledb.finalized=false) AS
SELECT
\if :IS_TIME_DIMENSION
time_bucket(INTERVAL '1 day', "time") AS bucket,
\else
time_bucket(INTEGER '24', "time") AS bucket,
\endif
MIN(temperature),
MAX(temperature),
AVG(temperature),
SUM(temperature)
FROM
conditions
GROUP BY
bucket;
psql:include/cagg_migrate_common.sql:291: NOTICE: refreshing continuous aggregate "conditions_summary_daily"
\set ON_ERROR_STOP 0
-- should fail because the lack of permissions on 'continuous_agg_migrate_plan' catalog table
CALL cagg_migrate('conditions_summary_daily');
psql:include/cagg_migrate_common.sql:295: ERROR: permission denied for table continuous_agg_migrate_plan
\set ON_ERROR_STOP 1
RESET ROLE;
GRANT SELECT, INSERT, UPDATE ON TABLE _timescaledb_catalog.continuous_agg_migrate_plan TO :ROLE_DEFAULT_PERM_USER;
SET ROLE :ROLE_DEFAULT_PERM_USER;
\set ON_ERROR_STOP 0
-- should fail because the lack of permissions on 'continuous_agg_migrate_plan_step' catalog table
CALL cagg_migrate('conditions_summary_daily');
psql:include/cagg_migrate_common.sql:305: ERROR: permission denied for table continuous_agg_migrate_plan_step
\set ON_ERROR_STOP 1
RESET ROLE;
GRANT SELECT, INSERT, UPDATE ON TABLE _timescaledb_catalog.continuous_agg_migrate_plan_step TO :ROLE_DEFAULT_PERM_USER;
SET ROLE :ROLE_DEFAULT_PERM_USER;
\set ON_ERROR_STOP 0
-- should fail because the lack of permissions on 'continuous_agg_migrate_plan_step_step_id_seq' catalog sequence
CALL cagg_migrate('conditions_summary_daily');
psql:include/cagg_migrate_common.sql:315: ERROR: permission denied for sequence continuous_agg_migrate_plan_step_step_id_seq
\set ON_ERROR_STOP 1
RESET ROLE;
GRANT USAGE ON SEQUENCE _timescaledb_catalog.continuous_agg_migrate_plan_step_step_id_seq TO :ROLE_DEFAULT_PERM_USER;
SET ROLE :ROLE_DEFAULT_PERM_USER;
-- all necessary permissions granted
CALL cagg_migrate('conditions_summary_daily');
psql:include/cagg_migrate_common.sql:324: WARNING: refresh the continuous aggregate after the migration executing this statement: "CALL public.refresh_continuous_aggregate('public.conditions_summary_daily_new', CAST('Sun Jan 01 00:00:00 2023' AS timestamp without time zone), NULL);"
-- check migrated data. should return 0 (zero) rows
SELECT * FROM conditions_summary_daily
EXCEPT
SELECT * FROM conditions_summary_daily_new;
bucket | min | max | avg | sum
--------+-----+-----+-----+-----
(0 rows)
SELECT mat_hypertable_id, step_id, status, type, config FROM _timescaledb_catalog.continuous_agg_migrate_plan_step ORDER BY step_id;
mat_hypertable_id | step_id | status | type | config
-------------------+---------+----------+------------------+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
12 | 1 | FINISHED | SAVE WATERMARK | {"watermark": "Sun Jan 01 00:00:00 2023"}
12 | 2 | FINISHED | CREATE NEW CAGG | {"cagg_name_new": "conditions_summary_daily_new"}
12 | 3 | FINISHED | DISABLE POLICIES | {"policies": null}
12 | 4 | FINISHED | REFRESH NEW CAGG | {"window_start": "Sun Jan 01 00:00:00 2023", "cagg_name_new": "conditions_summary_daily_new", "window_start_type": "timestamp without time zone"}
12 | 5 | FINISHED | COPY DATA | {"end_ts": "Fri Mar 11 00:00:00 2022", "start_ts": "Fri Dec 31 00:00:00 2021", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp without time zone"}
12 | 6 | FINISHED | COPY DATA | {"end_ts": "Fri May 20 00:00:00 2022", "start_ts": "Fri Mar 11 00:00:00 2022", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp without time zone"}
12 | 7 | FINISHED | COPY DATA | {"end_ts": "Fri Jul 29 00:00:00 2022", "start_ts": "Fri May 20 00:00:00 2022", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp without time zone"}
12 | 8 | FINISHED | COPY DATA | {"end_ts": "Fri Oct 07 00:00:00 2022", "start_ts": "Fri Jul 29 00:00:00 2022", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp without time zone"}
12 | 9 | FINISHED | COPY DATA | {"end_ts": "Fri Dec 16 00:00:00 2022", "start_ts": "Fri Oct 07 00:00:00 2022", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp without time zone"}
12 | 10 | FINISHED | COPY DATA | {"end_ts": "Fri Feb 24 00:00:00 2023", "start_ts": "Fri Dec 16 00:00:00 2022", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp without time zone"}
12 | 11 | FINISHED | COPY POLICIES | {"policies": null, "cagg_name_new": "conditions_summary_daily_new"}
12 | 12 | FINISHED | OVERRIDE CAGG | {"drop_old": false, "override": false, "cagg_name_new": "conditions_summary_daily_new"}
12 | 13 | FINISHED | DROP OLD CAGG | {"drop_old": false, "override": false, "cagg_name_new": "conditions_summary_daily_new"}
12 | 14 | FINISHED | ENABLE POLICIES |
(14 rows)
RESET ROLE;
-- according to the official documentation trying to execute a procedure with
-- transaction control statements inside an explicit transaction should fail:
-- https://www.postgresql.org/docs/current/sql-call.html
-- `If CALL is executed in a transaction block, then the called procedure cannot
-- execute transaction control statements. Transaction control statements are only
-- allowed if CALL is executed in its own transaction.`
TRUNCATE _timescaledb_catalog.continuous_agg_migrate_plan RESTART IDENTITY CASCADE;
psql:include/cagg_migrate_common.sql:341: NOTICE: truncate cascades to table "continuous_agg_migrate_plan_step"
DROP MATERIALIZED VIEW conditions_summary_daily_new;
psql:include/cagg_migrate_common.sql:342: NOTICE: drop cascades to 6 other objects
\set ON_ERROR_STOP 0
BEGIN;
-- should fail with `invalid transaction termination`
CALL cagg_migrate('conditions_summary_daily');
psql:include/cagg_migrate_common.sql:347: ERROR: invalid transaction termination
ROLLBACK;
\set ON_ERROR_STOP 1
CREATE FUNCTION execute_migration() RETURNS void AS
$$
BEGIN
CALL cagg_migrate('conditions_summary_daily');
RETURN;
END;
$$
LANGUAGE plpgsql;
\set ON_ERROR_STOP 0
-- execute migration inside a plpgsql function
BEGIN;
-- should fail with `invalid transaction termination`
SELECT execute_migration();
psql:include/cagg_migrate_common.sql:364: ERROR: invalid transaction termination
ROLLBACK;
\set ON_ERROR_STOP 1
-- cleanup
\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER;
DROP DATABASE :DATA_NODE_1;
DROP DATABASE :DATA_NODE_2;
DROP DATABASE :DATA_NODE_3;

@ -1,651 +0,0 @@
-- This file and its contents are licensed under the Timescale License.
-- Please see the included NOTICE for copyright information and
-- LICENSE-TIMESCALE for a copy of the license.
\set IS_DISTRIBUTED FALSE
\set IS_TIME_DIMENSION TRUE
\set TIME_DIMENSION_DATATYPE TIMESTAMPTZ
\ir include/cagg_migrate_common.sql
-- This file and its contents are licensed under the Timescale License.
-- Please see the included NOTICE for copyright information and
-- LICENSE-TIMESCALE for a copy of the license.
\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER
\if :IS_DISTRIBUTED
\echo 'Running distributed hypertable tests'
\else
\echo 'Running local hypertable tests'
Running local hypertable tests
\endif
CREATE TABLE conditions (
"time" :TIME_DIMENSION_DATATYPE NOT NULL,
temperature NUMERIC
);
\if :IS_DISTRIBUTED
\if :IS_TIME_DIMENSION
SELECT table_name FROM create_distributed_hypertable('conditions', 'time', replication_factor => 2);
\else
SELECT table_name FROM create_distributed_hypertable('conditions', 'time', chunk_time_interval => 10, replication_factor => 2);
\endif
\else
\if :IS_TIME_DIMENSION
SELECT table_name FROM create_hypertable('conditions', 'time');
table_name
------------
conditions
(1 row)
\else
SELECT table_name FROM create_hypertable('conditions', 'time', chunk_time_interval => 10);
\endif
\endif
\if :IS_TIME_DIMENSION
INSERT INTO conditions ("time", temperature)
SELECT
generate_series('2022-01-01 00:00:00-00'::timestamptz, '2022-12-31 23:59:59-00'::timestamptz, '1 hour'),
0.25;
\else
CREATE OR REPLACE FUNCTION integer_now()
RETURNS :TIME_DIMENSION_DATATYPE LANGUAGE SQL STABLE AS
$$
SELECT coalesce(max(time), 0)
FROM public.conditions
$$;
\if :IS_DISTRIBUTED
SELECT
'CREATE OR REPLACE FUNCTION integer_now() RETURNS '||:'TIME_DIMENSION_DATATYPE'||' LANGUAGE SQL STABLE AS $$ SELECT coalesce(max(time), 0) FROM public.conditions $$;' AS "STMT"
\gset
CALL distributed_exec (:'STMT');
\endif
SELECT set_integer_now_func('conditions', 'integer_now');
INSERT INTO conditions ("time", temperature)
SELECT
generate_series(1, 1000, 1),
0.25;
\endif
\set ON_ERROR_STOP 0
-- should fail relation does not exist
CALL cagg_migrate('conditions_summary_daily');
psql:include/cagg_migrate_common.sql:63: ERROR: relation "conditions_summary_daily" does not exist at character 19
CREATE TABLE conditions_summary_daily();
-- should fail continuous agg does not exist
CALL cagg_migrate('conditions_summary_daily');
psql:include/cagg_migrate_common.sql:66: ERROR: continuous aggregate "public.conditions_summary_daily" does not exist
\set ON_ERROR_STOP 1
DROP TABLE conditions_summary_daily;
CREATE MATERIALIZED VIEW conditions_summary_daily_new
WITH (timescaledb.continuous) AS
SELECT
\if :IS_TIME_DIMENSION
time_bucket(INTERVAL '1 day', "time") AS bucket,
\else
time_bucket(INTEGER '24', "time") AS bucket,
\endif
MIN(temperature),
MAX(temperature),
AVG(temperature),
SUM(temperature)
FROM
conditions
GROUP BY
bucket
WITH NO DATA;
\set ON_ERROR_STOP 0
-- should fail because we don't need to migrate finalized caggs
CALL cagg_migrate('conditions_summary_daily_new');
psql:include/cagg_migrate_common.sql:91: ERROR: continuous aggregate "public.conditions_summary_daily_new" does not require any migration
\set ON_ERROR_STOP 1
-- older continuous aggregate to be migrated
CREATE MATERIALIZED VIEW conditions_summary_daily
WITH (timescaledb.continuous, timescaledb.finalized=false) AS
SELECT
\if :IS_TIME_DIMENSION
time_bucket(INTERVAL '1 day', "time") AS bucket,
\else
time_bucket(INTEGER '24', "time") AS bucket,
\endif
MIN(temperature),
MAX(temperature),
AVG(temperature),
SUM(temperature)
FROM
conditions
GROUP BY
bucket;
psql:include/cagg_migrate_common.sql:110: NOTICE: refreshing continuous aggregate "conditions_summary_daily"
SELECT
ca.raw_hypertable_id AS "RAW_HYPERTABLE_ID",
h.schema_name AS "MAT_SCHEMA_NAME",
h.table_name AS "MAT_TABLE_NAME",
partial_view_name AS "PART_VIEW_NAME",
partial_view_schema AS "PART_VIEW_SCHEMA",
direct_view_name AS "DIR_VIEW_NAME",
direct_view_schema AS "DIR_VIEW_SCHEMA"
FROM
_timescaledb_catalog.continuous_agg ca
JOIN _timescaledb_catalog.hypertable h ON (h.id = ca.mat_hypertable_id)
WHERE
user_view_name = 'conditions_summary_daily'
\gset
\set ON_ERROR_STOP 0
-- should fail because the new cagg with suffix '_new' already exists
CALL cagg_migrate('conditions_summary_daily');
psql:include/cagg_migrate_common.sql:129: ERROR: continuous aggregate "public.conditions_summary_daily_new" already exists
\set ON_ERROR_STOP 1
-- remove the new cagg to execute the migration
DROP MATERIALIZED VIEW conditions_summary_daily_new;
-- get and set all the cagg data
SELECT
_timescaledb_internal.cagg_migrate_pre_validation(
'public',
'conditions_summary_daily',
'conditions_summary_daily_new'
) AS "CAGG_DATA"
\gset
CALL _timescaledb_internal.cagg_migrate_create_plan(:'CAGG_DATA', 'conditions_summary_daily_new');
SELECT mat_hypertable_id FROM _timescaledb_catalog.continuous_agg_migrate_plan;
mat_hypertable_id
-------------------
3
(1 row)
SELECT mat_hypertable_id, step_id, status, type, config FROM _timescaledb_catalog.continuous_agg_migrate_plan_step ORDER BY step_id;
mat_hypertable_id | step_id | status | type | config
-------------------+---------+-------------+------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
3 | 1 | FINISHED | SAVE WATERMARK | {"watermark": "Sun Jan 01 00:00:00 2023"}
3 | 2 | NOT STARTED | CREATE NEW CAGG | {"cagg_name_new": "conditions_summary_daily_new"}
3 | 3 | NOT STARTED | DISABLE POLICIES | {"policies": null}
3 | 4 | NOT STARTED | REFRESH NEW CAGG | {"window_start": "Sun Jan 01 00:00:00 2023", "cagg_name_new": "conditions_summary_daily_new", "window_start_type": "timestamp with time zone"}
3 | 5 | NOT STARTED | COPY DATA | {"end_ts": "Fri Mar 11 16:00:00 2022 PST", "start_ts": "Fri Dec 31 16:00:00 2021 PST", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp with time zone"}
3 | 6 | NOT STARTED | COPY DATA | {"end_ts": "Fri May 20 16:00:00 2022 PDT", "start_ts": "Fri Mar 11 16:00:00 2022 PST", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp with time zone"}
3 | 7 | NOT STARTED | COPY DATA | {"end_ts": "Fri Jul 29 16:00:00 2022 PDT", "start_ts": "Fri May 20 16:00:00 2022 PDT", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp with time zone"}
3 | 8 | NOT STARTED | COPY DATA | {"end_ts": "Fri Oct 07 16:00:00 2022 PDT", "start_ts": "Fri Jul 29 16:00:00 2022 PDT", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp with time zone"}
3 | 9 | NOT STARTED | COPY DATA | {"end_ts": "Fri Dec 16 16:00:00 2022 PST", "start_ts": "Fri Oct 07 16:00:00 2022 PDT", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp with time zone"}
3 | 10 | NOT STARTED | COPY DATA | {"end_ts": "Fri Feb 24 16:00:00 2023 PST", "start_ts": "Fri Dec 16 16:00:00 2022 PST", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp with time zone"}
3 | 11 | NOT STARTED | COPY POLICIES | {"policies": null, "cagg_name_new": "conditions_summary_daily_new"}
3 | 12 | NOT STARTED | OVERRIDE CAGG | {"drop_old": false, "override": false, "cagg_name_new": "conditions_summary_daily_new"}
3 | 13 | NOT STARTED | DROP OLD CAGG | {"drop_old": false, "override": false, "cagg_name_new": "conditions_summary_daily_new"}
3 | 14 | NOT STARTED | ENABLE POLICIES |
(14 rows)
-- should resume the execution
CALL cagg_migrate('conditions_summary_daily');
psql:include/cagg_migrate_common.sql:149: WARNING: resuming the migration of the continuous aggregate "public.conditions_summary_daily"
psql:include/cagg_migrate_common.sql:149: WARNING: refresh the continuous aggregate after the migration executing this statement: "CALL public.refresh_continuous_aggregate('public.conditions_summary_daily_new', CAST('Sun Jan 01 00:00:00 2023' AS timestamp with time zone), NULL);"
SELECT mat_hypertable_id, step_id, status, type, config FROM _timescaledb_catalog.continuous_agg_migrate_plan_step ORDER BY step_id;
mat_hypertable_id | step_id | status | type | config
-------------------+---------+----------+------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
3 | 1 | FINISHED | SAVE WATERMARK | {"watermark": "Sun Jan 01 00:00:00 2023"}
3 | 2 | FINISHED | CREATE NEW CAGG | {"cagg_name_new": "conditions_summary_daily_new"}
3 | 3 | FINISHED | DISABLE POLICIES | {"policies": null}
3 | 4 | FINISHED | REFRESH NEW CAGG | {"window_start": "Sun Jan 01 00:00:00 2023", "cagg_name_new": "conditions_summary_daily_new", "window_start_type": "timestamp with time zone"}
3 | 5 | FINISHED | COPY DATA | {"end_ts": "Fri Mar 11 16:00:00 2022 PST", "start_ts": "Fri Dec 31 16:00:00 2021 PST", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp with time zone"}
3 | 6 | FINISHED | COPY DATA | {"end_ts": "Fri May 20 16:00:00 2022 PDT", "start_ts": "Fri Mar 11 16:00:00 2022 PST", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp with time zone"}
3 | 7 | FINISHED | COPY DATA | {"end_ts": "Fri Jul 29 16:00:00 2022 PDT", "start_ts": "Fri May 20 16:00:00 2022 PDT", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp with time zone"}
3 | 8 | FINISHED | COPY DATA | {"end_ts": "Fri Oct 07 16:00:00 2022 PDT", "start_ts": "Fri Jul 29 16:00:00 2022 PDT", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp with time zone"}
3 | 9 | FINISHED | COPY DATA | {"end_ts": "Fri Dec 16 16:00:00 2022 PST", "start_ts": "Fri Oct 07 16:00:00 2022 PDT", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp with time zone"}
3 | 10 | FINISHED | COPY DATA | {"end_ts": "Fri Feb 24 16:00:00 2023 PST", "start_ts": "Fri Dec 16 16:00:00 2022 PST", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp with time zone"}
3 | 11 | FINISHED | COPY POLICIES | {"policies": null, "cagg_name_new": "conditions_summary_daily_new"}
3 | 12 | FINISHED | OVERRIDE CAGG | {"drop_old": false, "override": false, "cagg_name_new": "conditions_summary_daily_new"}
3 | 13 | FINISHED | DROP OLD CAGG | {"drop_old": false, "override": false, "cagg_name_new": "conditions_summary_daily_new"}
3 | 14 | FINISHED | ENABLE POLICIES |
(14 rows)
\set ON_ERROR_STOP 0
-- should error because plan already exists
CALL _timescaledb_internal.cagg_migrate_create_plan(:'CAGG_DATA', 'conditions_summary_daily_new');
psql:include/cagg_migrate_common.sql:154: ERROR: plan already exists for materialized hypertable 3
CALL cagg_migrate('conditions_summary_daily');
psql:include/cagg_migrate_common.sql:155: ERROR: plan already exists for continuous aggregate public.conditions_summary_daily
\set ON_ERROR_STOP 1
-- policies for test
ALTER MATERIALIZED VIEW conditions_summary_daily SET (timescaledb.compress=true);
\if :IS_TIME_DIMENSION
SELECT add_retention_policy('conditions_summary_daily', '30 days'::interval);
add_retention_policy
----------------------
1000
(1 row)
SELECT add_continuous_aggregate_policy('conditions_summary_daily', '30 days'::interval, '1 day'::interval, '1 hour'::interval);
add_continuous_aggregate_policy
---------------------------------
1001
(1 row)
SELECT add_compression_policy('conditions_summary_daily', '45 days'::interval);
add_compression_policy
------------------------
1002
(1 row)
\else
SELECT add_retention_policy('conditions_summary_daily', '400'::integer);
SELECT add_continuous_aggregate_policy('conditions_summary_daily', '50'::integer, '1'::integer, '1 hour'::interval);
SELECT add_compression_policy('conditions_summary_daily', '100'::integer);
\endif
SELECT *
FROM timescaledb_information.jobs
WHERE hypertable_schema = :'MAT_SCHEMA_NAME'
AND hypertable_name = :'MAT_TABLE_NAME'
AND job_id >= 1000;
job_id | application_name | schedule_interval | max_runtime | max_retries | retry_period | proc_schema | proc_name | owner | scheduled | fixed_schedule | config | next_start | initial_start | hypertable_schema | hypertable_name | check_schema | check_name
--------+--------------------------------------------+-------------------+-------------+-------------+--------------+-----------------------+-------------------------------------+--------------------+-----------+----------------+--------------------------------------------------------------------------------+------------+---------------+-----------------------+----------------------------+-----------------------+-------------------------------------------
1002 | Compression Policy [1002] | @ 35 days | @ 0 | -1 | @ 1 hour | _timescaledb_internal | policy_compression | cluster_super_user | t | f | {"hypertable_id": 3, "compress_after": "@ 45 days"} | | | _timescaledb_internal | _materialized_hypertable_3 | _timescaledb_internal | policy_compression_check
1001 | Refresh Continuous Aggregate Policy [1001] | @ 1 hour | @ 0 | -1 | @ 1 hour | _timescaledb_internal | policy_refresh_continuous_aggregate | cluster_super_user | t | f | {"end_offset": "@ 1 day", "start_offset": "@ 30 days", "mat_hypertable_id": 3} | | | _timescaledb_internal | _materialized_hypertable_3 | _timescaledb_internal | policy_refresh_continuous_aggregate_check
1000 | Retention Policy [1000] | @ 1 day | @ 5 mins | -1 | @ 5 mins | _timescaledb_internal | policy_retention | cluster_super_user | t | f | {"drop_after": "@ 30 days", "hypertable_id": 3} | | | _timescaledb_internal | _materialized_hypertable_3 | _timescaledb_internal | policy_retention_check
(3 rows)
-- execute the migration
DROP MATERIALIZED VIEW conditions_summary_daily_new;
psql:include/cagg_migrate_common.sql:178: NOTICE: drop cascades to 6 other objects
TRUNCATE _timescaledb_catalog.continuous_agg_migrate_plan RESTART IDENTITY CASCADE;
psql:include/cagg_migrate_common.sql:179: NOTICE: truncate cascades to table "continuous_agg_migrate_plan_step"
CALL cagg_migrate('conditions_summary_daily');
psql:include/cagg_migrate_common.sql:180: WARNING: refresh the continuous aggregate after the migration executing this statement: "CALL public.refresh_continuous_aggregate('public.conditions_summary_daily_new', CAST('Sun Jan 01 00:00:00 2023' AS timestamp with time zone), NULL);"
SELECT
ca.raw_hypertable_id AS "NEW_RAW_HYPERTABLE_ID",
h.schema_name AS "NEW_MAT_SCHEMA_NAME",
h.table_name AS "NEW_MAT_TABLE_NAME",
partial_view_name AS "NEW_PART_VIEW_NAME",
partial_view_schema AS "NEW_PART_VIEW_SCHEMA",
direct_view_name AS "NEW_DIR_VIEW_NAME",
direct_view_schema AS "NEW_DIR_VIEW_SCHEMA"
FROM
_timescaledb_catalog.continuous_agg ca
JOIN _timescaledb_catalog.hypertable h ON (h.id = ca.mat_hypertable_id)
WHERE
user_view_name = 'conditions_summary_daily_new'
\gset
\d+ conditions_summary_daily_new
View "public.conditions_summary_daily_new"
Column | Type | Collation | Nullable | Default | Storage | Description
--------+--------------------------+-----------+----------+---------+---------+-------------
bucket | timestamp with time zone | | | | plain |
min | numeric | | | | main |
max | numeric | | | | main |
avg | numeric | | | | main |
sum | numeric | | | | main |
View definition:
SELECT _materialized_hypertable_6.bucket,
_materialized_hypertable_6.min,
_materialized_hypertable_6.max,
_materialized_hypertable_6.avg,
_materialized_hypertable_6.sum
FROM _timescaledb_internal._materialized_hypertable_6
WHERE _materialized_hypertable_6.bucket < COALESCE(_timescaledb_internal.to_timestamp(_timescaledb_internal.cagg_watermark(6)), '-infinity'::timestamp with time zone)
UNION ALL
SELECT time_bucket('@ 1 day'::interval, conditions."time") AS bucket,
min(conditions.temperature) AS min,
max(conditions.temperature) AS max,
avg(conditions.temperature) AS avg,
sum(conditions.temperature) AS sum
FROM conditions
WHERE conditions."time" >= COALESCE(_timescaledb_internal.to_timestamp(_timescaledb_internal.cagg_watermark(6)), '-infinity'::timestamp with time zone)
GROUP BY (time_bucket('@ 1 day'::interval, conditions."time"));
SELECT *
FROM timescaledb_information.jobs
WHERE hypertable_schema = :'NEW_MAT_SCHEMA_NAME'
AND hypertable_name = :'NEW_MAT_TABLE_NAME'
AND job_id >= 1000;
job_id | application_name | schedule_interval | max_runtime | max_retries | retry_period | proc_schema | proc_name | owner | scheduled | fixed_schedule | config | next_start | initial_start | hypertable_schema | hypertable_name | check_schema | check_name
--------+--------------------------------------------+-------------------+-------------+-------------+--------------+-----------------------+-------------------------------------+--------------------+-----------+----------------+--------------------------------------------------------------------------------+------------+---------------+-----------------------+----------------------------+-----------------------+-------------------------------------------
1005 | Compression Policy [1005] | @ 35 days | @ 0 | -1 | @ 1 hour | _timescaledb_internal | policy_compression | cluster_super_user | t | f | {"hypertable_id": 6, "compress_after": "@ 45 days"} | | | _timescaledb_internal | _materialized_hypertable_6 | _timescaledb_internal | policy_compression_check
1004 | Refresh Continuous Aggregate Policy [1004] | @ 1 hour | @ 0 | -1 | @ 1 hour | _timescaledb_internal | policy_refresh_continuous_aggregate | cluster_super_user | t | f | {"end_offset": "@ 1 day", "start_offset": "@ 30 days", "mat_hypertable_id": 6} | | | _timescaledb_internal | _materialized_hypertable_6 | _timescaledb_internal | policy_refresh_continuous_aggregate_check
1003 | Retention Policy [1003] | @ 1 day | @ 5 mins | -1 | @ 5 mins | _timescaledb_internal | policy_retention | cluster_super_user | t | f | {"drop_after": "@ 30 days", "hypertable_id": 6} | | | _timescaledb_internal | _materialized_hypertable_6 | _timescaledb_internal | policy_retention_check
(3 rows)
SELECT mat_hypertable_id, step_id, status, type, config FROM _timescaledb_catalog.continuous_agg_migrate_plan_step ORDER BY step_id;
mat_hypertable_id | step_id | status | type | config
-------------------+---------+----------+------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
3 | 1 | FINISHED | SAVE WATERMARK | {"watermark": "Sun Jan 01 00:00:00 2023"}
3 | 2 | FINISHED | CREATE NEW CAGG | {"cagg_name_new": "conditions_summary_daily_new"}
3 | 3 | FINISHED | DISABLE POLICIES | {"policies": [1002, 1000]}
3 | 4 | FINISHED | REFRESH NEW CAGG | {"window_start": "Sun Jan 01 00:00:00 2023", "cagg_name_new": "conditions_summary_daily_new", "window_start_type": "timestamp with time zone"}
3 | 5 | FINISHED | COPY DATA | {"end_ts": "Fri Mar 11 16:00:00 2022 PST", "start_ts": "Fri Dec 31 16:00:00 2021 PST", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp with time zone"}
3 | 6 | FINISHED | COPY DATA | {"end_ts": "Fri May 20 16:00:00 2022 PDT", "start_ts": "Fri Mar 11 16:00:00 2022 PST", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp with time zone"}
3 | 7 | FINISHED | COPY DATA | {"end_ts": "Fri Jul 29 16:00:00 2022 PDT", "start_ts": "Fri May 20 16:00:00 2022 PDT", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp with time zone"}
3 | 8 | FINISHED | COPY DATA | {"end_ts": "Fri Oct 07 16:00:00 2022 PDT", "start_ts": "Fri Jul 29 16:00:00 2022 PDT", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp with time zone"}
3 | 9 | FINISHED | COPY DATA | {"end_ts": "Fri Dec 16 16:00:00 2022 PST", "start_ts": "Fri Oct 07 16:00:00 2022 PDT", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp with time zone"}
3 | 10 | FINISHED | COPY DATA | {"end_ts": "Fri Feb 24 16:00:00 2023 PST", "start_ts": "Fri Dec 16 16:00:00 2022 PST", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp with time zone"}
3 | 11 | FINISHED | COPY POLICIES | {"policies": [1002, 1001, 1000], "cagg_name_new": "conditions_summary_daily_new"}
3 | 12 | FINISHED | OVERRIDE CAGG | {"drop_old": false, "override": false, "cagg_name_new": "conditions_summary_daily_new"}
3 | 13 | FINISHED | DROP OLD CAGG | {"drop_old": false, "override": false, "cagg_name_new": "conditions_summary_daily_new"}
3 | 14 | FINISHED | ENABLE POLICIES | {"policies": [1003, 1004, 1005, 1002, 1001, 1000]}
(14 rows)
-- check migrated data. should return 0 (zero) rows
SELECT * FROM conditions_summary_daily
EXCEPT
SELECT * FROM conditions_summary_daily_new;
bucket | min | max | avg | sum
--------+-----+-----+-----+-----
(0 rows)
-- compress both caggs
SELECT compress_chunk(c) FROM show_chunks('conditions_summary_daily') c ORDER BY c::regclass::text;
compress_chunk
-----------------------------------------
_timescaledb_internal._hyper_3_54_chunk
_timescaledb_internal._hyper_3_55_chunk
_timescaledb_internal._hyper_3_56_chunk
_timescaledb_internal._hyper_3_57_chunk
_timescaledb_internal._hyper_3_58_chunk
_timescaledb_internal._hyper_3_59_chunk
(6 rows)
SELECT compress_chunk(c) FROM show_chunks('conditions_summary_daily_new') c ORDER BY c::regclass::text;
compress_chunk
-----------------------------------------
_timescaledb_internal._hyper_6_66_chunk
_timescaledb_internal._hyper_6_67_chunk
_timescaledb_internal._hyper_6_68_chunk
_timescaledb_internal._hyper_6_69_chunk
_timescaledb_internal._hyper_6_70_chunk
_timescaledb_internal._hyper_6_71_chunk
(6 rows)
-- check migrated data after compression. should return 0 (zero) rows
SELECT * FROM conditions_summary_daily
EXCEPT
SELECT * FROM conditions_summary_daily_new;
bucket | min | max | avg | sum
--------+-----+-----+-----+-----
(0 rows)
CREATE VIEW cagg_jobs AS
SELECT user_view_schema AS schema, user_view_name AS name, bgw_job.*
FROM _timescaledb_config.bgw_job
JOIN _timescaledb_catalog.continuous_agg ON mat_hypertable_id = hypertable_id
ORDER BY bgw_job.id;
-- test migration overriding the new cagg and keeping the old
DROP MATERIALIZED VIEW conditions_summary_daily_new;
psql:include/cagg_migrate_common.sql:228: NOTICE: drop cascades to 6 other objects
TRUNCATE _timescaledb_catalog.continuous_agg_migrate_plan RESTART IDENTITY CASCADE;
psql:include/cagg_migrate_common.sql:229: NOTICE: truncate cascades to table "continuous_agg_migrate_plan_step"
-- check policies before the migration
SELECT * FROM cagg_jobs WHERE schema = 'public' AND name = 'conditions_summary_daily';
schema | name | id | application_name | schedule_interval | max_runtime | max_retries | retry_period | proc_schema | proc_name | owner | scheduled | fixed_schedule | initial_start | hypertable_id | config | check_schema | check_name | timezone
--------+--------------------------+------+--------------------------------------------+-------------------+-------------+-------------+--------------+-----------------------+-------------------------------------+--------------------+-----------+----------------+---------------+---------------+--------------------------------------------------------------------------------+-----------------------+-------------------------------------------+----------
public | conditions_summary_daily | 1000 | Retention Policy [1000] | @ 1 day | @ 5 mins | -1 | @ 5 mins | _timescaledb_internal | policy_retention | cluster_super_user | t | f | | 3 | {"drop_after": "@ 30 days", "hypertable_id": 3} | _timescaledb_internal | policy_retention_check |
public | conditions_summary_daily | 1001 | Refresh Continuous Aggregate Policy [1001] | @ 1 hour | @ 0 | -1 | @ 1 hour | _timescaledb_internal | policy_refresh_continuous_aggregate | cluster_super_user | t | f | | 3 | {"end_offset": "@ 1 day", "start_offset": "@ 30 days", "mat_hypertable_id": 3} | _timescaledb_internal | policy_refresh_continuous_aggregate_check |
public | conditions_summary_daily | 1002 | Compression Policy [1002] | @ 35 days | @ 0 | -1 | @ 1 hour | _timescaledb_internal | policy_compression | cluster_super_user | t | f | | 3 | {"hypertable_id": 3, "compress_after": "@ 45 days"} | _timescaledb_internal | policy_compression_check |
(3 rows)
CALL cagg_migrate('conditions_summary_daily', override => TRUE);
psql:include/cagg_migrate_common.sql:232: WARNING: refresh the continuous aggregate after the migration executing this statement: "CALL public.refresh_continuous_aggregate('public.conditions_summary_daily', CAST('Sun Jan 01 00:00:00 2023' AS timestamp with time zone), NULL);"
-- cagg with the new format because it was overriden
\d+ conditions_summary_daily
View "public.conditions_summary_daily"
Column | Type | Collation | Nullable | Default | Storage | Description
--------+--------------------------+-----------+----------+---------+---------+-------------
bucket | timestamp with time zone | | | | plain |
min | numeric | | | | main |
max | numeric | | | | main |
avg | numeric | | | | main |
sum | numeric | | | | main |
View definition:
SELECT _materialized_hypertable_8.bucket,
_materialized_hypertable_8.min,
_materialized_hypertable_8.max,
_materialized_hypertable_8.avg,
_materialized_hypertable_8.sum
FROM _timescaledb_internal._materialized_hypertable_8
WHERE _materialized_hypertable_8.bucket < COALESCE(_timescaledb_internal.to_timestamp(_timescaledb_internal.cagg_watermark(8)), '-infinity'::timestamp with time zone)
UNION ALL
SELECT time_bucket('@ 1 day'::interval, conditions."time") AS bucket,
min(conditions.temperature) AS min,
max(conditions.temperature) AS max,
avg(conditions.temperature) AS avg,
sum(conditions.temperature) AS sum
FROM conditions
WHERE conditions."time" >= COALESCE(_timescaledb_internal.to_timestamp(_timescaledb_internal.cagg_watermark(8)), '-infinity'::timestamp with time zone)
GROUP BY (time_bucket('@ 1 day'::interval, conditions."time"));
-- cagg with the old format because it was overriden
\d+ conditions_summary_daily_old
View "public.conditions_summary_daily_old"
Column | Type | Collation | Nullable | Default | Storage | Description
--------+--------------------------+-----------+----------+---------+---------+-------------
bucket | timestamp with time zone | | | | plain |
min | numeric | | | | main |
max | numeric | | | | main |
avg | numeric | | | | main |
sum | numeric | | | | main |
View definition:
SELECT _materialized_hypertable_3.bucket,
_timescaledb_internal.finalize_agg('pg_catalog.min(numeric)'::text, NULL::name, NULL::name, '{{pg_catalog,numeric}}'::name[], _materialized_hypertable_3.agg_2_2, NULL::numeric) AS min,
_timescaledb_internal.finalize_agg('pg_catalog.max(numeric)'::text, NULL::name, NULL::name, '{{pg_catalog,numeric}}'::name[], _materialized_hypertable_3.agg_3_3, NULL::numeric) AS max,
_timescaledb_internal.finalize_agg('pg_catalog.avg(numeric)'::text, NULL::name, NULL::name, '{{pg_catalog,numeric}}'::name[], _materialized_hypertable_3.agg_4_4, NULL::numeric) AS avg,
_timescaledb_internal.finalize_agg('pg_catalog.sum(numeric)'::text, NULL::name, NULL::name, '{{pg_catalog,numeric}}'::name[], _materialized_hypertable_3.agg_5_5, NULL::numeric) AS sum
FROM _timescaledb_internal._materialized_hypertable_3
WHERE _materialized_hypertable_3.bucket < COALESCE(_timescaledb_internal.to_timestamp(_timescaledb_internal.cagg_watermark(3)), '-infinity'::timestamp with time zone)
GROUP BY _materialized_hypertable_3.bucket
UNION ALL
SELECT time_bucket('@ 1 day'::interval, conditions."time") AS bucket,
min(conditions.temperature) AS min,
max(conditions.temperature) AS max,
avg(conditions.temperature) AS avg,
sum(conditions.temperature) AS sum
FROM conditions
WHERE conditions."time" >= COALESCE(_timescaledb_internal.to_timestamp(_timescaledb_internal.cagg_watermark(3)), '-infinity'::timestamp with time zone)
GROUP BY (time_bucket('@ 1 day'::interval, conditions."time"));
\set ON_ERROR_STOP 0
-- should fail because the cagg was overriden
SELECT * FROM conditions_summary_daily_new;
psql:include/cagg_migrate_common.sql:239: ERROR: relation "conditions_summary_daily_new" does not exist at character 15
\set ON_ERROR_STOP 1
-- check policies after the migration
SELECT * FROM cagg_jobs WHERE schema = 'public' AND name = 'conditions_summary_daily';
schema | name | id | application_name | schedule_interval | max_runtime | max_retries | retry_period | proc_schema | proc_name | owner | scheduled | fixed_schedule | initial_start | hypertable_id | config | check_schema | check_name | timezone
--------+--------------------------+------+--------------------------------------------+-------------------+-------------+-------------+--------------+-----------------------+-------------------------------------+--------------------+-----------+----------------+---------------+---------------+--------------------------------------------------------------------------------+-----------------------+-------------------------------------------+----------
public | conditions_summary_daily | 1006 | Retention Policy [1006] | @ 1 day | @ 5 mins | -1 | @ 5 mins | _timescaledb_internal | policy_retention | cluster_super_user | t | f | | 8 | {"drop_after": "@ 30 days", "hypertable_id": 8} | _timescaledb_internal | policy_retention_check |
public | conditions_summary_daily | 1007 | Refresh Continuous Aggregate Policy [1007] | @ 1 hour | @ 0 | -1 | @ 1 hour | _timescaledb_internal | policy_refresh_continuous_aggregate | cluster_super_user | t | f | | 8 | {"end_offset": "@ 1 day", "start_offset": "@ 30 days", "mat_hypertable_id": 8} | _timescaledb_internal | policy_refresh_continuous_aggregate_check |
public | conditions_summary_daily | 1008 | Compression Policy [1008] | @ 35 days | @ 0 | -1 | @ 1 hour | _timescaledb_internal | policy_compression | cluster_super_user | t | f | | 8 | {"hypertable_id": 8, "compress_after": "@ 45 days"} | _timescaledb_internal | policy_compression_check |
(3 rows)
-- should return the old cagg jobs
SELECT * FROM cagg_jobs WHERE schema = 'public' AND name = 'conditions_summary_daily_old';
schema | name | id | application_name | schedule_interval | max_runtime | max_retries | retry_period | proc_schema | proc_name | owner | scheduled | fixed_schedule | initial_start | hypertable_id | config | check_schema | check_name | timezone
--------+------------------------------+------+--------------------------------------------+-------------------+-------------+-------------+--------------+-----------------------+-------------------------------------+--------------------+-----------+----------------+---------------+---------------+--------------------------------------------------------------------------------+-----------------------+-------------------------------------------+----------
public | conditions_summary_daily_old | 1000 | Retention Policy [1000] | @ 1 day | @ 5 mins | -1 | @ 5 mins | _timescaledb_internal | policy_retention | cluster_super_user | t | f | | 3 | {"drop_after": "@ 30 days", "hypertable_id": 3} | _timescaledb_internal | policy_retention_check |
public | conditions_summary_daily_old | 1001 | Refresh Continuous Aggregate Policy [1001] | @ 1 hour | @ 0 | -1 | @ 1 hour | _timescaledb_internal | policy_refresh_continuous_aggregate | cluster_super_user | t | f | | 3 | {"end_offset": "@ 1 day", "start_offset": "@ 30 days", "mat_hypertable_id": 3} | _timescaledb_internal | policy_refresh_continuous_aggregate_check |
public | conditions_summary_daily_old | 1002 | Compression Policy [1002] | @ 35 days | @ 0 | -1 | @ 1 hour | _timescaledb_internal | policy_compression | cluster_super_user | t | f | | 3 | {"hypertable_id": 3, "compress_after": "@ 45 days"} | _timescaledb_internal | policy_compression_check |
(3 rows)
-- should return no rows because the cagg was overwritten
SELECT * FROM cagg_jobs WHERE schema = 'public' AND name = 'conditions_summary_daily_new';
schema | name | id | application_name | schedule_interval | max_runtime | max_retries | retry_period | proc_schema | proc_name | owner | scheduled | fixed_schedule | initial_start | hypertable_id | config | check_schema | check_name | timezone
--------+------+----+------------------+-------------------+-------------+-------------+--------------+-------------+-----------+-------+-----------+----------------+---------------+---------------+--------+--------------+------------+----------
(0 rows)
-- test migration overriding the new cagg and removing the old
TRUNCATE _timescaledb_catalog.continuous_agg_migrate_plan RESTART IDENTITY CASCADE;
psql:include/cagg_migrate_common.sql:249: NOTICE: truncate cascades to table "continuous_agg_migrate_plan_step"
DROP MATERIALIZED VIEW conditions_summary_daily;
psql:include/cagg_migrate_common.sql:250: NOTICE: drop cascades to 6 other objects
ALTER MATERIALIZED VIEW conditions_summary_daily_old RENAME TO conditions_summary_daily;
-- check policies before the migration
SELECT * FROM cagg_jobs WHERE schema = 'public' AND name = 'conditions_summary_daily';
schema | name | id | application_name | schedule_interval | max_runtime | max_retries | retry_period | proc_schema | proc_name | owner | scheduled | fixed_schedule | initial_start | hypertable_id | config | check_schema | check_name | timezone
--------+--------------------------+------+--------------------------------------------+-------------------+-------------+-------------+--------------+-----------------------+-------------------------------------+--------------------+-----------+----------------+---------------+---------------+--------------------------------------------------------------------------------+-----------------------+-------------------------------------------+----------
public | conditions_summary_daily | 1000 | Retention Policy [1000] | @ 1 day | @ 5 mins | -1 | @ 5 mins | _timescaledb_internal | policy_retention | cluster_super_user | t | f | | 3 | {"drop_after": "@ 30 days", "hypertable_id": 3} | _timescaledb_internal | policy_retention_check |
public | conditions_summary_daily | 1001 | Refresh Continuous Aggregate Policy [1001] | @ 1 hour | @ 0 | -1 | @ 1 hour | _timescaledb_internal | policy_refresh_continuous_aggregate | cluster_super_user | t | f | | 3 | {"end_offset": "@ 1 day", "start_offset": "@ 30 days", "mat_hypertable_id": 3} | _timescaledb_internal | policy_refresh_continuous_aggregate_check |
public | conditions_summary_daily | 1002 | Compression Policy [1002] | @ 35 days | @ 0 | -1 | @ 1 hour | _timescaledb_internal | policy_compression | cluster_super_user | t | f | | 3 | {"hypertable_id": 3, "compress_after": "@ 45 days"} | _timescaledb_internal | policy_compression_check |
(3 rows)
CALL cagg_migrate('conditions_summary_daily', override => TRUE, drop_old => TRUE);
psql:include/cagg_migrate_common.sql:254: WARNING: refresh the continuous aggregate after the migration executing this statement: "CALL public.refresh_continuous_aggregate('public.conditions_summary_daily', CAST('Sun Jan 01 00:00:00 2023' AS timestamp with time zone), NULL);"
psql:include/cagg_migrate_common.sql:254: NOTICE: drop cascades to 6 other objects
psql:include/cagg_migrate_common.sql:254: NOTICE: job 1002 not found, skipping
psql:include/cagg_migrate_common.sql:254: NOTICE: job 1001 not found, skipping
psql:include/cagg_migrate_common.sql:254: NOTICE: job 1000 not found, skipping
-- cagg with the new format because it was overriden
\d+ conditions_summary_daily
View "public.conditions_summary_daily"
Column | Type | Collation | Nullable | Default | Storage | Description
--------+--------------------------+-----------+----------+---------+---------+-------------
bucket | timestamp with time zone | | | | plain |
min | numeric | | | | main |
max | numeric | | | | main |
avg | numeric | | | | main |
sum | numeric | | | | main |
View definition:
SELECT _materialized_hypertable_10.bucket,
_materialized_hypertable_10.min,
_materialized_hypertable_10.max,
_materialized_hypertable_10.avg,
_materialized_hypertable_10.sum
FROM _timescaledb_internal._materialized_hypertable_10
WHERE _materialized_hypertable_10.bucket < COALESCE(_timescaledb_internal.to_timestamp(_timescaledb_internal.cagg_watermark(10)), '-infinity'::timestamp with time zone)
UNION ALL
SELECT time_bucket('@ 1 day'::interval, conditions."time") AS bucket,
min(conditions.temperature) AS min,
max(conditions.temperature) AS max,
avg(conditions.temperature) AS avg,
sum(conditions.temperature) AS sum
FROM conditions
WHERE conditions."time" >= COALESCE(_timescaledb_internal.to_timestamp(_timescaledb_internal.cagg_watermark(10)), '-infinity'::timestamp with time zone)
GROUP BY (time_bucket('@ 1 day'::interval, conditions."time"));
\set ON_ERROR_STOP 0
-- should fail because the cagg was overriden
SELECT * FROM conditions_summary_daily_new;
psql:include/cagg_migrate_common.sql:259: ERROR: relation "conditions_summary_daily_new" does not exist at character 15
-- should fail because the old cagg was removed
SELECT * FROM conditions_summary_daily_old;
psql:include/cagg_migrate_common.sql:261: ERROR: relation "conditions_summary_daily_old" does not exist at character 15
\set ON_ERROR_STOP 1
-- check policies after the migration
SELECT * FROM cagg_jobs WHERE schema = 'public' AND name = 'conditions_summary_daily';
schema | name | id | application_name | schedule_interval | max_runtime | max_retries | retry_period | proc_schema | proc_name | owner | scheduled | fixed_schedule | initial_start | hypertable_id | config | check_schema | check_name | timezone
--------+--------------------------+------+--------------------------------------------+-------------------+-------------+-------------+--------------+-----------------------+-------------------------------------+--------------------+-----------+----------------+---------------+---------------+---------------------------------------------------------------------------------+-----------------------+-------------------------------------------+----------
public | conditions_summary_daily | 1009 | Retention Policy [1009] | @ 1 day | @ 5 mins | -1 | @ 5 mins | _timescaledb_internal | policy_retention | cluster_super_user | t | f | | 10 | {"drop_after": "@ 30 days", "hypertable_id": 10} | _timescaledb_internal | policy_retention_check |
public | conditions_summary_daily | 1010 | Refresh Continuous Aggregate Policy [1010] | @ 1 hour | @ 0 | -1 | @ 1 hour | _timescaledb_internal | policy_refresh_continuous_aggregate | cluster_super_user | t | f | | 10 | {"end_offset": "@ 1 day", "start_offset": "@ 30 days", "mat_hypertable_id": 10} | _timescaledb_internal | policy_refresh_continuous_aggregate_check |
public | conditions_summary_daily | 1011 | Compression Policy [1011] | @ 35 days | @ 0 | -1 | @ 1 hour | _timescaledb_internal | policy_compression | cluster_super_user | t | f | | 10 | {"hypertable_id": 10, "compress_after": "@ 45 days"} | _timescaledb_internal | policy_compression_check |
(3 rows)
-- should return no rows because the old cagg was removed
SELECT * FROM cagg_jobs WHERE schema = 'public' AND name = 'conditions_summary_daily_old';
schema | name | id | application_name | schedule_interval | max_runtime | max_retries | retry_period | proc_schema | proc_name | owner | scheduled | fixed_schedule | initial_start | hypertable_id | config | check_schema | check_name | timezone
--------+------+----+------------------+-------------------+-------------+-------------+--------------+-------------+-----------+-------+-----------+----------------+---------------+---------------+--------+--------------+------------+----------
(0 rows)
-- should return no rows because the cagg was overwritten
SELECT * FROM cagg_jobs WHERE schema = 'public' AND name = 'conditions_summary_daily_new';
schema | name | id | application_name | schedule_interval | max_runtime | max_retries | retry_period | proc_schema | proc_name | owner | scheduled | fixed_schedule | initial_start | hypertable_id | config | check_schema | check_name | timezone
--------+------+----+------------------+-------------------+-------------+-------------+--------------+-------------+-----------+-------+-----------+----------------+---------------+---------------+--------+--------------+------------+----------
(0 rows)
-- permissions test
TRUNCATE _timescaledb_catalog.continuous_agg_migrate_plan RESTART IDENTITY CASCADE;
psql:include/cagg_migrate_common.sql:271: NOTICE: truncate cascades to table "continuous_agg_migrate_plan_step"
DROP MATERIALIZED VIEW conditions_summary_daily;
psql:include/cagg_migrate_common.sql:272: NOTICE: drop cascades to 6 other objects
GRANT ALL ON TABLE conditions TO :ROLE_DEFAULT_PERM_USER;
SET ROLE :ROLE_DEFAULT_PERM_USER;
CREATE MATERIALIZED VIEW conditions_summary_daily
WITH (timescaledb.continuous, timescaledb.finalized=false) AS
SELECT
\if :IS_TIME_DIMENSION
time_bucket(INTERVAL '1 day', "time") AS bucket,
\else
time_bucket(INTEGER '24', "time") AS bucket,
\endif
MIN(temperature),
MAX(temperature),
AVG(temperature),
SUM(temperature)
FROM
conditions
GROUP BY
bucket;
psql:include/cagg_migrate_common.sql:291: NOTICE: refreshing continuous aggregate "conditions_summary_daily"
\set ON_ERROR_STOP 0
-- should fail because the lack of permissions on 'continuous_agg_migrate_plan' catalog table
CALL cagg_migrate('conditions_summary_daily');
psql:include/cagg_migrate_common.sql:295: ERROR: permission denied for table continuous_agg_migrate_plan
\set ON_ERROR_STOP 1
RESET ROLE;
GRANT SELECT, INSERT, UPDATE ON TABLE _timescaledb_catalog.continuous_agg_migrate_plan TO :ROLE_DEFAULT_PERM_USER;
SET ROLE :ROLE_DEFAULT_PERM_USER;
\set ON_ERROR_STOP 0
-- should fail because the lack of permissions on 'continuous_agg_migrate_plan_step' catalog table
CALL cagg_migrate('conditions_summary_daily');
psql:include/cagg_migrate_common.sql:305: ERROR: permission denied for table continuous_agg_migrate_plan_step
\set ON_ERROR_STOP 1
RESET ROLE;
GRANT SELECT, INSERT, UPDATE ON TABLE _timescaledb_catalog.continuous_agg_migrate_plan_step TO :ROLE_DEFAULT_PERM_USER;
SET ROLE :ROLE_DEFAULT_PERM_USER;
\set ON_ERROR_STOP 0
-- should fail because the lack of permissions on 'continuous_agg_migrate_plan_step_step_id_seq' catalog sequence
CALL cagg_migrate('conditions_summary_daily');
psql:include/cagg_migrate_common.sql:315: ERROR: permission denied for sequence continuous_agg_migrate_plan_step_step_id_seq
\set ON_ERROR_STOP 1
RESET ROLE;
GRANT USAGE ON SEQUENCE _timescaledb_catalog.continuous_agg_migrate_plan_step_step_id_seq TO :ROLE_DEFAULT_PERM_USER;
SET ROLE :ROLE_DEFAULT_PERM_USER;
-- all necessary permissions granted
CALL cagg_migrate('conditions_summary_daily');
psql:include/cagg_migrate_common.sql:324: WARNING: refresh the continuous aggregate after the migration executing this statement: "CALL public.refresh_continuous_aggregate('public.conditions_summary_daily_new', CAST('Sun Jan 01 00:00:00 2023' AS timestamp with time zone), NULL);"
-- check migrated data. should return 0 (zero) rows
SELECT * FROM conditions_summary_daily
EXCEPT
SELECT * FROM conditions_summary_daily_new;
bucket | min | max | avg | sum
--------+-----+-----+-----+-----
(0 rows)
SELECT mat_hypertable_id, step_id, status, type, config FROM _timescaledb_catalog.continuous_agg_migrate_plan_step ORDER BY step_id;
mat_hypertable_id | step_id | status | type | config
-------------------+---------+----------+------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
12 | 1 | FINISHED | SAVE WATERMARK | {"watermark": "Sun Jan 01 00:00:00 2023"}
12 | 2 | FINISHED | CREATE NEW CAGG | {"cagg_name_new": "conditions_summary_daily_new"}
12 | 3 | FINISHED | DISABLE POLICIES | {"policies": null}
12 | 4 | FINISHED | REFRESH NEW CAGG | {"window_start": "Sun Jan 01 00:00:00 2023", "cagg_name_new": "conditions_summary_daily_new", "window_start_type": "timestamp with time zone"}
12 | 5 | FINISHED | COPY DATA | {"end_ts": "Fri Mar 11 16:00:00 2022 PST", "start_ts": "Fri Dec 31 16:00:00 2021 PST", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp with time zone"}
12 | 6 | FINISHED | COPY DATA | {"end_ts": "Fri May 20 16:00:00 2022 PDT", "start_ts": "Fri Mar 11 16:00:00 2022 PST", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp with time zone"}
12 | 7 | FINISHED | COPY DATA | {"end_ts": "Fri Jul 29 16:00:00 2022 PDT", "start_ts": "Fri May 20 16:00:00 2022 PDT", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp with time zone"}
12 | 8 | FINISHED | COPY DATA | {"end_ts": "Fri Oct 07 16:00:00 2022 PDT", "start_ts": "Fri Jul 29 16:00:00 2022 PDT", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp with time zone"}
12 | 9 | FINISHED | COPY DATA | {"end_ts": "Fri Dec 16 16:00:00 2022 PST", "start_ts": "Fri Oct 07 16:00:00 2022 PDT", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp with time zone"}
12 | 10 | FINISHED | COPY DATA | {"end_ts": "Fri Feb 24 16:00:00 2023 PST", "start_ts": "Fri Dec 16 16:00:00 2022 PST", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp with time zone"}
12 | 11 | FINISHED | COPY POLICIES | {"policies": null, "cagg_name_new": "conditions_summary_daily_new"}
12 | 12 | FINISHED | OVERRIDE CAGG | {"drop_old": false, "override": false, "cagg_name_new": "conditions_summary_daily_new"}
12 | 13 | FINISHED | DROP OLD CAGG | {"drop_old": false, "override": false, "cagg_name_new": "conditions_summary_daily_new"}
12 | 14 | FINISHED | ENABLE POLICIES |
(14 rows)
RESET ROLE;
-- according to the official documentation trying to execute a procedure with
-- transaction control statements inside an explicit transaction should fail:
-- https://www.postgresql.org/docs/current/sql-call.html
-- `If CALL is executed in a transaction block, then the called procedure cannot
-- execute transaction control statements. Transaction control statements are only
-- allowed if CALL is executed in its own transaction.`
TRUNCATE _timescaledb_catalog.continuous_agg_migrate_plan RESTART IDENTITY CASCADE;
psql:include/cagg_migrate_common.sql:341: NOTICE: truncate cascades to table "continuous_agg_migrate_plan_step"
DROP MATERIALIZED VIEW conditions_summary_daily_new;
psql:include/cagg_migrate_common.sql:342: NOTICE: drop cascades to 6 other objects
\set ON_ERROR_STOP 0
BEGIN;
-- should fail with `invalid transaction termination`
CALL cagg_migrate('conditions_summary_daily');
psql:include/cagg_migrate_common.sql:347: ERROR: invalid transaction termination
ROLLBACK;
\set ON_ERROR_STOP 1
CREATE FUNCTION execute_migration() RETURNS void AS
$$
BEGIN
CALL cagg_migrate('conditions_summary_daily');
RETURN;
END;
$$
LANGUAGE plpgsql;
\set ON_ERROR_STOP 0
-- execute migration inside a plpgsql function
BEGIN;
-- should fail with `invalid transaction termination`
SELECT execute_migration();
psql:include/cagg_migrate_common.sql:364: ERROR: invalid transaction termination
ROLLBACK;
\set ON_ERROR_STOP 1

@ -1,691 +0,0 @@
-- This file and its contents are licensed under the Timescale License.
-- Please see the included NOTICE for copyright information and
-- LICENSE-TIMESCALE for a copy of the license.
------------------------------------
-- Set up a distributed environment
------------------------------------
\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER
\set DATA_NODE_1 :TEST_DBNAME _1
\set DATA_NODE_2 :TEST_DBNAME _2
\set DATA_NODE_3 :TEST_DBNAME _3
\ir include/remote_exec.sql
-- This file and its contents are licensed under the Timescale License.
-- Please see the included NOTICE for copyright information and
-- LICENSE-TIMESCALE for a copy of the license.
CREATE SCHEMA IF NOT EXISTS test;
psql:include/remote_exec.sql:5: NOTICE: schema "test" already exists, skipping
GRANT USAGE ON SCHEMA test TO PUBLIC;
CREATE OR REPLACE FUNCTION test.remote_exec(srv_name name[], command text)
RETURNS VOID
AS :TSL_MODULE_PATHNAME, 'ts_remote_exec'
LANGUAGE C;
CREATE OR REPLACE FUNCTION test.remote_exec_get_result_strings(srv_name name[], command text)
RETURNS TABLE("table_record" CSTRING[])
AS :TSL_MODULE_PATHNAME, 'ts_remote_exec_get_result_strings'
LANGUAGE C;
SELECT node_name, database, node_created, database_created, extension_created
FROM (
SELECT (add_data_node(name, host => 'localhost', DATABASE => name)).*
FROM (VALUES (:'DATA_NODE_1'), (:'DATA_NODE_2'), (:'DATA_NODE_3')) v(name)
) a;
node_name | database | node_created | database_created | extension_created
---------------------------------------+---------------------------------------+--------------+------------------+-------------------
db_cagg_migrate_timestamptz_dist_ht_1 | db_cagg_migrate_timestamptz_dist_ht_1 | t | t | t
db_cagg_migrate_timestamptz_dist_ht_2 | db_cagg_migrate_timestamptz_dist_ht_2 | t | t | t
db_cagg_migrate_timestamptz_dist_ht_3 | db_cagg_migrate_timestamptz_dist_ht_3 | t | t | t
(3 rows)
GRANT USAGE ON FOREIGN SERVER :DATA_NODE_1, :DATA_NODE_2, :DATA_NODE_3 TO PUBLIC;
\set IS_DISTRIBUTED TRUE
\set IS_TIME_DIMENSION TRUE
\set TIME_DIMENSION_DATATYPE TIMESTAMPTZ
\ir include/cagg_migrate_common.sql
-- This file and its contents are licensed under the Timescale License.
-- Please see the included NOTICE for copyright information and
-- LICENSE-TIMESCALE for a copy of the license.
\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER
\if :IS_DISTRIBUTED
\echo 'Running distributed hypertable tests'
Running distributed hypertable tests
\else
\echo 'Running local hypertable tests'
\endif
CREATE TABLE conditions (
"time" :TIME_DIMENSION_DATATYPE NOT NULL,
temperature NUMERIC
);
\if :IS_DISTRIBUTED
\if :IS_TIME_DIMENSION
SELECT table_name FROM create_distributed_hypertable('conditions', 'time', replication_factor => 2);
table_name
------------
conditions
(1 row)
\else
SELECT table_name FROM create_distributed_hypertable('conditions', 'time', chunk_time_interval => 10, replication_factor => 2);
\endif
\else
\if :IS_TIME_DIMENSION
SELECT table_name FROM create_hypertable('conditions', 'time');
\else
SELECT table_name FROM create_hypertable('conditions', 'time', chunk_time_interval => 10);
\endif
\endif
\if :IS_TIME_DIMENSION
INSERT INTO conditions ("time", temperature)
SELECT
generate_series('2022-01-01 00:00:00-00'::timestamptz, '2022-12-31 23:59:59-00'::timestamptz, '1 hour'),
0.25;
\else
CREATE OR REPLACE FUNCTION integer_now()
RETURNS :TIME_DIMENSION_DATATYPE LANGUAGE SQL STABLE AS
$$
SELECT coalesce(max(time), 0)
FROM public.conditions
$$;
\if :IS_DISTRIBUTED
SELECT
'CREATE OR REPLACE FUNCTION integer_now() RETURNS '||:'TIME_DIMENSION_DATATYPE'||' LANGUAGE SQL STABLE AS $$ SELECT coalesce(max(time), 0) FROM public.conditions $$;' AS "STMT"
\gset
CALL distributed_exec (:'STMT');
\endif
SELECT set_integer_now_func('conditions', 'integer_now');
INSERT INTO conditions ("time", temperature)
SELECT
generate_series(1, 1000, 1),
0.25;
\endif
\set ON_ERROR_STOP 0
-- should fail relation does not exist
CALL cagg_migrate('conditions_summary_daily');
psql:include/cagg_migrate_common.sql:63: ERROR: relation "conditions_summary_daily" does not exist at character 19
CREATE TABLE conditions_summary_daily();
-- should fail continuous agg does not exist
CALL cagg_migrate('conditions_summary_daily');
psql:include/cagg_migrate_common.sql:66: ERROR: continuous aggregate "public.conditions_summary_daily" does not exist
\set ON_ERROR_STOP 1
DROP TABLE conditions_summary_daily;
CREATE MATERIALIZED VIEW conditions_summary_daily_new
WITH (timescaledb.continuous) AS
SELECT
\if :IS_TIME_DIMENSION
time_bucket(INTERVAL '1 day', "time") AS bucket,
\else
time_bucket(INTEGER '24', "time") AS bucket,
\endif
MIN(temperature),
MAX(temperature),
AVG(temperature),
SUM(temperature)
FROM
conditions
GROUP BY
bucket
WITH NO DATA;
\set ON_ERROR_STOP 0
-- should fail because we don't need to migrate finalized caggs
CALL cagg_migrate('conditions_summary_daily_new');
psql:include/cagg_migrate_common.sql:91: ERROR: continuous aggregate "public.conditions_summary_daily_new" does not require any migration
\set ON_ERROR_STOP 1
-- older continuous aggregate to be migrated
CREATE MATERIALIZED VIEW conditions_summary_daily
WITH (timescaledb.continuous, timescaledb.finalized=false) AS
SELECT
\if :IS_TIME_DIMENSION
time_bucket(INTERVAL '1 day', "time") AS bucket,
\else
time_bucket(INTEGER '24', "time") AS bucket,
\endif
MIN(temperature),
MAX(temperature),
AVG(temperature),
SUM(temperature)
FROM
conditions
GROUP BY
bucket;
psql:include/cagg_migrate_common.sql:110: NOTICE: refreshing continuous aggregate "conditions_summary_daily"
SELECT
ca.raw_hypertable_id AS "RAW_HYPERTABLE_ID",
h.schema_name AS "MAT_SCHEMA_NAME",
h.table_name AS "MAT_TABLE_NAME",
partial_view_name AS "PART_VIEW_NAME",
partial_view_schema AS "PART_VIEW_SCHEMA",
direct_view_name AS "DIR_VIEW_NAME",
direct_view_schema AS "DIR_VIEW_SCHEMA"
FROM
_timescaledb_catalog.continuous_agg ca
JOIN _timescaledb_catalog.hypertable h ON (h.id = ca.mat_hypertable_id)
WHERE
user_view_name = 'conditions_summary_daily'
\gset
\set ON_ERROR_STOP 0
-- should fail because the new cagg with suffix '_new' already exists
CALL cagg_migrate('conditions_summary_daily');
psql:include/cagg_migrate_common.sql:129: ERROR: continuous aggregate "public.conditions_summary_daily_new" already exists
\set ON_ERROR_STOP 1
-- remove the new cagg to execute the migration
DROP MATERIALIZED VIEW conditions_summary_daily_new;
-- get and set all the cagg data
SELECT
_timescaledb_internal.cagg_migrate_pre_validation(
'public',
'conditions_summary_daily',
'conditions_summary_daily_new'
) AS "CAGG_DATA"
\gset
CALL _timescaledb_internal.cagg_migrate_create_plan(:'CAGG_DATA', 'conditions_summary_daily_new');
SELECT mat_hypertable_id FROM _timescaledb_catalog.continuous_agg_migrate_plan;
mat_hypertable_id
-------------------
3
(1 row)
SELECT mat_hypertable_id, step_id, status, type, config FROM _timescaledb_catalog.continuous_agg_migrate_plan_step ORDER BY step_id;
mat_hypertable_id | step_id | status | type | config
-------------------+---------+-------------+------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
3 | 1 | FINISHED | SAVE WATERMARK | {"watermark": "Sun Jan 01 00:00:00 2023"}
3 | 2 | NOT STARTED | CREATE NEW CAGG | {"cagg_name_new": "conditions_summary_daily_new"}
3 | 3 | NOT STARTED | DISABLE POLICIES | {"policies": null}
3 | 4 | NOT STARTED | REFRESH NEW CAGG | {"window_start": "Sun Jan 01 00:00:00 2023", "cagg_name_new": "conditions_summary_daily_new", "window_start_type": "timestamp with time zone"}
3 | 5 | NOT STARTED | COPY DATA | {"end_ts": "Fri Mar 11 16:00:00 2022 PST", "start_ts": "Fri Dec 31 16:00:00 2021 PST", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp with time zone"}
3 | 6 | NOT STARTED | COPY DATA | {"end_ts": "Fri May 20 16:00:00 2022 PDT", "start_ts": "Fri Mar 11 16:00:00 2022 PST", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp with time zone"}
3 | 7 | NOT STARTED | COPY DATA | {"end_ts": "Fri Jul 29 16:00:00 2022 PDT", "start_ts": "Fri May 20 16:00:00 2022 PDT", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp with time zone"}
3 | 8 | NOT STARTED | COPY DATA | {"end_ts": "Fri Oct 07 16:00:00 2022 PDT", "start_ts": "Fri Jul 29 16:00:00 2022 PDT", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp with time zone"}
3 | 9 | NOT STARTED | COPY DATA | {"end_ts": "Fri Dec 16 16:00:00 2022 PST", "start_ts": "Fri Oct 07 16:00:00 2022 PDT", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp with time zone"}
3 | 10 | NOT STARTED | COPY DATA | {"end_ts": "Fri Feb 24 16:00:00 2023 PST", "start_ts": "Fri Dec 16 16:00:00 2022 PST", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp with time zone"}
3 | 11 | NOT STARTED | COPY POLICIES | {"policies": null, "cagg_name_new": "conditions_summary_daily_new"}
3 | 12 | NOT STARTED | OVERRIDE CAGG | {"drop_old": false, "override": false, "cagg_name_new": "conditions_summary_daily_new"}
3 | 13 | NOT STARTED | DROP OLD CAGG | {"drop_old": false, "override": false, "cagg_name_new": "conditions_summary_daily_new"}
3 | 14 | NOT STARTED | ENABLE POLICIES |
(14 rows)
-- should resume the execution
CALL cagg_migrate('conditions_summary_daily');
psql:include/cagg_migrate_common.sql:149: WARNING: resuming the migration of the continuous aggregate "public.conditions_summary_daily"
psql:include/cagg_migrate_common.sql:149: WARNING: refresh the continuous aggregate after the migration executing this statement: "CALL public.refresh_continuous_aggregate('public.conditions_summary_daily_new', CAST('Sun Jan 01 00:00:00 2023' AS timestamp with time zone), NULL);"
SELECT mat_hypertable_id, step_id, status, type, config FROM _timescaledb_catalog.continuous_agg_migrate_plan_step ORDER BY step_id;
mat_hypertable_id | step_id | status | type | config
-------------------+---------+----------+------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
3 | 1 | FINISHED | SAVE WATERMARK | {"watermark": "Sun Jan 01 00:00:00 2023"}
3 | 2 | FINISHED | CREATE NEW CAGG | {"cagg_name_new": "conditions_summary_daily_new"}
3 | 3 | FINISHED | DISABLE POLICIES | {"policies": null}
3 | 4 | FINISHED | REFRESH NEW CAGG | {"window_start": "Sun Jan 01 00:00:00 2023", "cagg_name_new": "conditions_summary_daily_new", "window_start_type": "timestamp with time zone"}
3 | 5 | FINISHED | COPY DATA | {"end_ts": "Fri Mar 11 16:00:00 2022 PST", "start_ts": "Fri Dec 31 16:00:00 2021 PST", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp with time zone"}
3 | 6 | FINISHED | COPY DATA | {"end_ts": "Fri May 20 16:00:00 2022 PDT", "start_ts": "Fri Mar 11 16:00:00 2022 PST", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp with time zone"}
3 | 7 | FINISHED | COPY DATA | {"end_ts": "Fri Jul 29 16:00:00 2022 PDT", "start_ts": "Fri May 20 16:00:00 2022 PDT", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp with time zone"}
3 | 8 | FINISHED | COPY DATA | {"end_ts": "Fri Oct 07 16:00:00 2022 PDT", "start_ts": "Fri Jul 29 16:00:00 2022 PDT", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp with time zone"}
3 | 9 | FINISHED | COPY DATA | {"end_ts": "Fri Dec 16 16:00:00 2022 PST", "start_ts": "Fri Oct 07 16:00:00 2022 PDT", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp with time zone"}
3 | 10 | FINISHED | COPY DATA | {"end_ts": "Fri Feb 24 16:00:00 2023 PST", "start_ts": "Fri Dec 16 16:00:00 2022 PST", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp with time zone"}
3 | 11 | FINISHED | COPY POLICIES | {"policies": null, "cagg_name_new": "conditions_summary_daily_new"}
3 | 12 | FINISHED | OVERRIDE CAGG | {"drop_old": false, "override": false, "cagg_name_new": "conditions_summary_daily_new"}
3 | 13 | FINISHED | DROP OLD CAGG | {"drop_old": false, "override": false, "cagg_name_new": "conditions_summary_daily_new"}
3 | 14 | FINISHED | ENABLE POLICIES |
(14 rows)
\set ON_ERROR_STOP 0
-- should error because plan already exists
CALL _timescaledb_internal.cagg_migrate_create_plan(:'CAGG_DATA', 'conditions_summary_daily_new');
psql:include/cagg_migrate_common.sql:154: ERROR: plan already exists for materialized hypertable 3
CALL cagg_migrate('conditions_summary_daily');
psql:include/cagg_migrate_common.sql:155: ERROR: plan already exists for continuous aggregate public.conditions_summary_daily
\set ON_ERROR_STOP 1
-- policies for test
ALTER MATERIALIZED VIEW conditions_summary_daily SET (timescaledb.compress=true);
\if :IS_TIME_DIMENSION
SELECT add_retention_policy('conditions_summary_daily', '30 days'::interval);
add_retention_policy
----------------------
1000
(1 row)
SELECT add_continuous_aggregate_policy('conditions_summary_daily', '30 days'::interval, '1 day'::interval, '1 hour'::interval);
add_continuous_aggregate_policy
---------------------------------
1001
(1 row)
SELECT add_compression_policy('conditions_summary_daily', '45 days'::interval);
add_compression_policy
------------------------
1002
(1 row)
\else
SELECT add_retention_policy('conditions_summary_daily', '400'::integer);
SELECT add_continuous_aggregate_policy('conditions_summary_daily', '50'::integer, '1'::integer, '1 hour'::interval);
SELECT add_compression_policy('conditions_summary_daily', '100'::integer);
\endif
SELECT *
FROM timescaledb_information.jobs
WHERE hypertable_schema = :'MAT_SCHEMA_NAME'
AND hypertable_name = :'MAT_TABLE_NAME'
AND job_id >= 1000;
job_id | application_name | schedule_interval | max_runtime | max_retries | retry_period | proc_schema | proc_name | owner | scheduled | fixed_schedule | config | next_start | initial_start | hypertable_schema | hypertable_name | check_schema | check_name
--------+--------------------------------------------+-------------------+-------------+-------------+--------------+-----------------------+-------------------------------------+--------------------+-----------+----------------+--------------------------------------------------------------------------------+------------+---------------+-----------------------+----------------------------+-----------------------+-------------------------------------------
1002 | Compression Policy [1002] | @ 35 days | @ 0 | -1 | @ 1 hour | _timescaledb_internal | policy_compression | cluster_super_user | t | f | {"hypertable_id": 3, "compress_after": "@ 45 days"} | | | _timescaledb_internal | _materialized_hypertable_3 | _timescaledb_internal | policy_compression_check
1001 | Refresh Continuous Aggregate Policy [1001] | @ 1 hour | @ 0 | -1 | @ 1 hour | _timescaledb_internal | policy_refresh_continuous_aggregate | cluster_super_user | t | f | {"end_offset": "@ 1 day", "start_offset": "@ 30 days", "mat_hypertable_id": 3} | | | _timescaledb_internal | _materialized_hypertable_3 | _timescaledb_internal | policy_refresh_continuous_aggregate_check
1000 | Retention Policy [1000] | @ 1 day | @ 5 mins | -1 | @ 5 mins | _timescaledb_internal | policy_retention | cluster_super_user | t | f | {"drop_after": "@ 30 days", "hypertable_id": 3} | | | _timescaledb_internal | _materialized_hypertable_3 | _timescaledb_internal | policy_retention_check
(3 rows)
-- execute the migration
DROP MATERIALIZED VIEW conditions_summary_daily_new;
psql:include/cagg_migrate_common.sql:178: NOTICE: drop cascades to 6 other objects
TRUNCATE _timescaledb_catalog.continuous_agg_migrate_plan RESTART IDENTITY CASCADE;
psql:include/cagg_migrate_common.sql:179: NOTICE: truncate cascades to table "continuous_agg_migrate_plan_step"
CALL cagg_migrate('conditions_summary_daily');
psql:include/cagg_migrate_common.sql:180: WARNING: refresh the continuous aggregate after the migration executing this statement: "CALL public.refresh_continuous_aggregate('public.conditions_summary_daily_new', CAST('Sun Jan 01 00:00:00 2023' AS timestamp with time zone), NULL);"
SELECT
ca.raw_hypertable_id AS "NEW_RAW_HYPERTABLE_ID",
h.schema_name AS "NEW_MAT_SCHEMA_NAME",
h.table_name AS "NEW_MAT_TABLE_NAME",
partial_view_name AS "NEW_PART_VIEW_NAME",
partial_view_schema AS "NEW_PART_VIEW_SCHEMA",
direct_view_name AS "NEW_DIR_VIEW_NAME",
direct_view_schema AS "NEW_DIR_VIEW_SCHEMA"
FROM
_timescaledb_catalog.continuous_agg ca
JOIN _timescaledb_catalog.hypertable h ON (h.id = ca.mat_hypertable_id)
WHERE
user_view_name = 'conditions_summary_daily_new'
\gset
\d+ conditions_summary_daily_new
View "public.conditions_summary_daily_new"
Column | Type | Collation | Nullable | Default | Storage | Description
--------+--------------------------+-----------+----------+---------+---------+-------------
bucket | timestamp with time zone | | | | plain |
min | numeric | | | | main |
max | numeric | | | | main |
avg | numeric | | | | main |
sum | numeric | | | | main |
View definition:
SELECT _materialized_hypertable_6.bucket,
_materialized_hypertable_6.min,
_materialized_hypertable_6.max,
_materialized_hypertable_6.avg,
_materialized_hypertable_6.sum
FROM _timescaledb_internal._materialized_hypertable_6
WHERE _materialized_hypertable_6.bucket < COALESCE(_timescaledb_internal.to_timestamp(_timescaledb_internal.cagg_watermark(6)), '-infinity'::timestamp with time zone)
UNION ALL
SELECT time_bucket('@ 1 day'::interval, conditions."time") AS bucket,
min(conditions.temperature) AS min,
max(conditions.temperature) AS max,
avg(conditions.temperature) AS avg,
sum(conditions.temperature) AS sum
FROM conditions
WHERE conditions."time" >= COALESCE(_timescaledb_internal.to_timestamp(_timescaledb_internal.cagg_watermark(6)), '-infinity'::timestamp with time zone)
GROUP BY (time_bucket('@ 1 day'::interval, conditions."time"));
SELECT *
FROM timescaledb_information.jobs
WHERE hypertable_schema = :'NEW_MAT_SCHEMA_NAME'
AND hypertable_name = :'NEW_MAT_TABLE_NAME'
AND job_id >= 1000;
job_id | application_name | schedule_interval | max_runtime | max_retries | retry_period | proc_schema | proc_name | owner | scheduled | fixed_schedule | config | next_start | initial_start | hypertable_schema | hypertable_name | check_schema | check_name
--------+--------------------------------------------+-------------------+-------------+-------------+--------------+-----------------------+-------------------------------------+--------------------+-----------+----------------+--------------------------------------------------------------------------------+------------+---------------+-----------------------+----------------------------+-----------------------+-------------------------------------------
1005 | Compression Policy [1005] | @ 35 days | @ 0 | -1 | @ 1 hour | _timescaledb_internal | policy_compression | cluster_super_user | t | f | {"hypertable_id": 6, "compress_after": "@ 45 days"} | | | _timescaledb_internal | _materialized_hypertable_6 | _timescaledb_internal | policy_compression_check
1004 | Refresh Continuous Aggregate Policy [1004] | @ 1 hour | @ 0 | -1 | @ 1 hour | _timescaledb_internal | policy_refresh_continuous_aggregate | cluster_super_user | t | f | {"end_offset": "@ 1 day", "start_offset": "@ 30 days", "mat_hypertable_id": 6} | | | _timescaledb_internal | _materialized_hypertable_6 | _timescaledb_internal | policy_refresh_continuous_aggregate_check
1003 | Retention Policy [1003] | @ 1 day | @ 5 mins | -1 | @ 5 mins | _timescaledb_internal | policy_retention | cluster_super_user | t | f | {"drop_after": "@ 30 days", "hypertable_id": 6} | | | _timescaledb_internal | _materialized_hypertable_6 | _timescaledb_internal | policy_retention_check
(3 rows)
SELECT mat_hypertable_id, step_id, status, type, config FROM _timescaledb_catalog.continuous_agg_migrate_plan_step ORDER BY step_id;
mat_hypertable_id | step_id | status | type | config
-------------------+---------+----------+------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
3 | 1 | FINISHED | SAVE WATERMARK | {"watermark": "Sun Jan 01 00:00:00 2023"}
3 | 2 | FINISHED | CREATE NEW CAGG | {"cagg_name_new": "conditions_summary_daily_new"}
3 | 3 | FINISHED | DISABLE POLICIES | {"policies": [1002, 1000]}
3 | 4 | FINISHED | REFRESH NEW CAGG | {"window_start": "Sun Jan 01 00:00:00 2023", "cagg_name_new": "conditions_summary_daily_new", "window_start_type": "timestamp with time zone"}
3 | 5 | FINISHED | COPY DATA | {"end_ts": "Fri Mar 11 16:00:00 2022 PST", "start_ts": "Fri Dec 31 16:00:00 2021 PST", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp with time zone"}
3 | 6 | FINISHED | COPY DATA | {"end_ts": "Fri May 20 16:00:00 2022 PDT", "start_ts": "Fri Mar 11 16:00:00 2022 PST", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp with time zone"}
3 | 7 | FINISHED | COPY DATA | {"end_ts": "Fri Jul 29 16:00:00 2022 PDT", "start_ts": "Fri May 20 16:00:00 2022 PDT", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp with time zone"}
3 | 8 | FINISHED | COPY DATA | {"end_ts": "Fri Oct 07 16:00:00 2022 PDT", "start_ts": "Fri Jul 29 16:00:00 2022 PDT", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp with time zone"}
3 | 9 | FINISHED | COPY DATA | {"end_ts": "Fri Dec 16 16:00:00 2022 PST", "start_ts": "Fri Oct 07 16:00:00 2022 PDT", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp with time zone"}
3 | 10 | FINISHED | COPY DATA | {"end_ts": "Fri Feb 24 16:00:00 2023 PST", "start_ts": "Fri Dec 16 16:00:00 2022 PST", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp with time zone"}
3 | 11 | FINISHED | COPY POLICIES | {"policies": [1002, 1001, 1000], "cagg_name_new": "conditions_summary_daily_new"}
3 | 12 | FINISHED | OVERRIDE CAGG | {"drop_old": false, "override": false, "cagg_name_new": "conditions_summary_daily_new"}
3 | 13 | FINISHED | DROP OLD CAGG | {"drop_old": false, "override": false, "cagg_name_new": "conditions_summary_daily_new"}
3 | 14 | FINISHED | ENABLE POLICIES | {"policies": [1003, 1004, 1005, 1002, 1001, 1000]}
(14 rows)
-- check migrated data. should return 0 (zero) rows
SELECT * FROM conditions_summary_daily
EXCEPT
SELECT * FROM conditions_summary_daily_new;
bucket | min | max | avg | sum
--------+-----+-----+-----+-----
(0 rows)
-- compress both caggs
SELECT compress_chunk(c) FROM show_chunks('conditions_summary_daily') c ORDER BY c::regclass::text;
compress_chunk
-----------------------------------------
_timescaledb_internal._hyper_3_54_chunk
_timescaledb_internal._hyper_3_55_chunk
_timescaledb_internal._hyper_3_56_chunk
_timescaledb_internal._hyper_3_57_chunk
_timescaledb_internal._hyper_3_58_chunk
_timescaledb_internal._hyper_3_59_chunk
(6 rows)
SELECT compress_chunk(c) FROM show_chunks('conditions_summary_daily_new') c ORDER BY c::regclass::text;
compress_chunk
-----------------------------------------
_timescaledb_internal._hyper_6_66_chunk
_timescaledb_internal._hyper_6_67_chunk
_timescaledb_internal._hyper_6_68_chunk
_timescaledb_internal._hyper_6_69_chunk
_timescaledb_internal._hyper_6_70_chunk
_timescaledb_internal._hyper_6_71_chunk
(6 rows)
-- check migrated data after compression. should return 0 (zero) rows
SELECT * FROM conditions_summary_daily
EXCEPT
SELECT * FROM conditions_summary_daily_new;
bucket | min | max | avg | sum
--------+-----+-----+-----+-----
(0 rows)
CREATE VIEW cagg_jobs AS
SELECT user_view_schema AS schema, user_view_name AS name, bgw_job.*
FROM _timescaledb_config.bgw_job
JOIN _timescaledb_catalog.continuous_agg ON mat_hypertable_id = hypertable_id
ORDER BY bgw_job.id;
-- test migration overriding the new cagg and keeping the old
DROP MATERIALIZED VIEW conditions_summary_daily_new;
psql:include/cagg_migrate_common.sql:228: NOTICE: drop cascades to 6 other objects
TRUNCATE _timescaledb_catalog.continuous_agg_migrate_plan RESTART IDENTITY CASCADE;
psql:include/cagg_migrate_common.sql:229: NOTICE: truncate cascades to table "continuous_agg_migrate_plan_step"
-- check policies before the migration
SELECT * FROM cagg_jobs WHERE schema = 'public' AND name = 'conditions_summary_daily';
schema | name | id | application_name | schedule_interval | max_runtime | max_retries | retry_period | proc_schema | proc_name | owner | scheduled | fixed_schedule | initial_start | hypertable_id | config | check_schema | check_name | timezone
--------+--------------------------+------+--------------------------------------------+-------------------+-------------+-------------+--------------+-----------------------+-------------------------------------+--------------------+-----------+----------------+---------------+---------------+--------------------------------------------------------------------------------+-----------------------+-------------------------------------------+----------
public | conditions_summary_daily | 1000 | Retention Policy [1000] | @ 1 day | @ 5 mins | -1 | @ 5 mins | _timescaledb_internal | policy_retention | cluster_super_user | t | f | | 3 | {"drop_after": "@ 30 days", "hypertable_id": 3} | _timescaledb_internal | policy_retention_check |
public | conditions_summary_daily | 1001 | Refresh Continuous Aggregate Policy [1001] | @ 1 hour | @ 0 | -1 | @ 1 hour | _timescaledb_internal | policy_refresh_continuous_aggregate | cluster_super_user | t | f | | 3 | {"end_offset": "@ 1 day", "start_offset": "@ 30 days", "mat_hypertable_id": 3} | _timescaledb_internal | policy_refresh_continuous_aggregate_check |
public | conditions_summary_daily | 1002 | Compression Policy [1002] | @ 35 days | @ 0 | -1 | @ 1 hour | _timescaledb_internal | policy_compression | cluster_super_user | t | f | | 3 | {"hypertable_id": 3, "compress_after": "@ 45 days"} | _timescaledb_internal | policy_compression_check |
(3 rows)
CALL cagg_migrate('conditions_summary_daily', override => TRUE);
psql:include/cagg_migrate_common.sql:232: WARNING: refresh the continuous aggregate after the migration executing this statement: "CALL public.refresh_continuous_aggregate('public.conditions_summary_daily', CAST('Sun Jan 01 00:00:00 2023' AS timestamp with time zone), NULL);"
-- cagg with the new format because it was overriden
\d+ conditions_summary_daily
View "public.conditions_summary_daily"
Column | Type | Collation | Nullable | Default | Storage | Description
--------+--------------------------+-----------+----------+---------+---------+-------------
bucket | timestamp with time zone | | | | plain |
min | numeric | | | | main |
max | numeric | | | | main |
avg | numeric | | | | main |
sum | numeric | | | | main |
View definition:
SELECT _materialized_hypertable_8.bucket,
_materialized_hypertable_8.min,
_materialized_hypertable_8.max,
_materialized_hypertable_8.avg,
_materialized_hypertable_8.sum
FROM _timescaledb_internal._materialized_hypertable_8
WHERE _materialized_hypertable_8.bucket < COALESCE(_timescaledb_internal.to_timestamp(_timescaledb_internal.cagg_watermark(8)), '-infinity'::timestamp with time zone)
UNION ALL
SELECT time_bucket('@ 1 day'::interval, conditions."time") AS bucket,
min(conditions.temperature) AS min,
max(conditions.temperature) AS max,
avg(conditions.temperature) AS avg,
sum(conditions.temperature) AS sum
FROM conditions
WHERE conditions."time" >= COALESCE(_timescaledb_internal.to_timestamp(_timescaledb_internal.cagg_watermark(8)), '-infinity'::timestamp with time zone)
GROUP BY (time_bucket('@ 1 day'::interval, conditions."time"));
-- cagg with the old format because it was overriden
\d+ conditions_summary_daily_old
View "public.conditions_summary_daily_old"
Column | Type | Collation | Nullable | Default | Storage | Description
--------+--------------------------+-----------+----------+---------+---------+-------------
bucket | timestamp with time zone | | | | plain |
min | numeric | | | | main |
max | numeric | | | | main |
avg | numeric | | | | main |
sum | numeric | | | | main |
View definition:
SELECT _materialized_hypertable_3.bucket,
_timescaledb_internal.finalize_agg('pg_catalog.min(numeric)'::text, NULL::name, NULL::name, '{{pg_catalog,numeric}}'::name[], _materialized_hypertable_3.agg_2_2, NULL::numeric) AS min,
_timescaledb_internal.finalize_agg('pg_catalog.max(numeric)'::text, NULL::name, NULL::name, '{{pg_catalog,numeric}}'::name[], _materialized_hypertable_3.agg_3_3, NULL::numeric) AS max,
_timescaledb_internal.finalize_agg('pg_catalog.avg(numeric)'::text, NULL::name, NULL::name, '{{pg_catalog,numeric}}'::name[], _materialized_hypertable_3.agg_4_4, NULL::numeric) AS avg,
_timescaledb_internal.finalize_agg('pg_catalog.sum(numeric)'::text, NULL::name, NULL::name, '{{pg_catalog,numeric}}'::name[], _materialized_hypertable_3.agg_5_5, NULL::numeric) AS sum
FROM _timescaledb_internal._materialized_hypertable_3
WHERE _materialized_hypertable_3.bucket < COALESCE(_timescaledb_internal.to_timestamp(_timescaledb_internal.cagg_watermark(3)), '-infinity'::timestamp with time zone)
GROUP BY _materialized_hypertable_3.bucket
UNION ALL
SELECT time_bucket('@ 1 day'::interval, conditions."time") AS bucket,
min(conditions.temperature) AS min,
max(conditions.temperature) AS max,
avg(conditions.temperature) AS avg,
sum(conditions.temperature) AS sum
FROM conditions
WHERE conditions."time" >= COALESCE(_timescaledb_internal.to_timestamp(_timescaledb_internal.cagg_watermark(3)), '-infinity'::timestamp with time zone)
GROUP BY (time_bucket('@ 1 day'::interval, conditions."time"));
\set ON_ERROR_STOP 0
-- should fail because the cagg was overriden
SELECT * FROM conditions_summary_daily_new;
psql:include/cagg_migrate_common.sql:239: ERROR: relation "conditions_summary_daily_new" does not exist at character 15
\set ON_ERROR_STOP 1
-- check policies after the migration
SELECT * FROM cagg_jobs WHERE schema = 'public' AND name = 'conditions_summary_daily';
schema | name | id | application_name | schedule_interval | max_runtime | max_retries | retry_period | proc_schema | proc_name | owner | scheduled | fixed_schedule | initial_start | hypertable_id | config | check_schema | check_name | timezone
--------+--------------------------+------+--------------------------------------------+-------------------+-------------+-------------+--------------+-----------------------+-------------------------------------+--------------------+-----------+----------------+---------------+---------------+--------------------------------------------------------------------------------+-----------------------+-------------------------------------------+----------
public | conditions_summary_daily | 1006 | Retention Policy [1006] | @ 1 day | @ 5 mins | -1 | @ 5 mins | _timescaledb_internal | policy_retention | cluster_super_user | t | f | | 8 | {"drop_after": "@ 30 days", "hypertable_id": 8} | _timescaledb_internal | policy_retention_check |
public | conditions_summary_daily | 1007 | Refresh Continuous Aggregate Policy [1007] | @ 1 hour | @ 0 | -1 | @ 1 hour | _timescaledb_internal | policy_refresh_continuous_aggregate | cluster_super_user | t | f | | 8 | {"end_offset": "@ 1 day", "start_offset": "@ 30 days", "mat_hypertable_id": 8} | _timescaledb_internal | policy_refresh_continuous_aggregate_check |
public | conditions_summary_daily | 1008 | Compression Policy [1008] | @ 35 days | @ 0 | -1 | @ 1 hour | _timescaledb_internal | policy_compression | cluster_super_user | t | f | | 8 | {"hypertable_id": 8, "compress_after": "@ 45 days"} | _timescaledb_internal | policy_compression_check |
(3 rows)
-- should return the old cagg jobs
SELECT * FROM cagg_jobs WHERE schema = 'public' AND name = 'conditions_summary_daily_old';
schema | name | id | application_name | schedule_interval | max_runtime | max_retries | retry_period | proc_schema | proc_name | owner | scheduled | fixed_schedule | initial_start | hypertable_id | config | check_schema | check_name | timezone
--------+------------------------------+------+--------------------------------------------+-------------------+-------------+-------------+--------------+-----------------------+-------------------------------------+--------------------+-----------+----------------+---------------+---------------+--------------------------------------------------------------------------------+-----------------------+-------------------------------------------+----------
public | conditions_summary_daily_old | 1000 | Retention Policy [1000] | @ 1 day | @ 5 mins | -1 | @ 5 mins | _timescaledb_internal | policy_retention | cluster_super_user | t | f | | 3 | {"drop_after": "@ 30 days", "hypertable_id": 3} | _timescaledb_internal | policy_retention_check |
public | conditions_summary_daily_old | 1001 | Refresh Continuous Aggregate Policy [1001] | @ 1 hour | @ 0 | -1 | @ 1 hour | _timescaledb_internal | policy_refresh_continuous_aggregate | cluster_super_user | t | f | | 3 | {"end_offset": "@ 1 day", "start_offset": "@ 30 days", "mat_hypertable_id": 3} | _timescaledb_internal | policy_refresh_continuous_aggregate_check |
public | conditions_summary_daily_old | 1002 | Compression Policy [1002] | @ 35 days | @ 0 | -1 | @ 1 hour | _timescaledb_internal | policy_compression | cluster_super_user | t | f | | 3 | {"hypertable_id": 3, "compress_after": "@ 45 days"} | _timescaledb_internal | policy_compression_check |
(3 rows)
-- should return no rows because the cagg was overwritten
SELECT * FROM cagg_jobs WHERE schema = 'public' AND name = 'conditions_summary_daily_new';
schema | name | id | application_name | schedule_interval | max_runtime | max_retries | retry_period | proc_schema | proc_name | owner | scheduled | fixed_schedule | initial_start | hypertable_id | config | check_schema | check_name | timezone
--------+------+----+------------------+-------------------+-------------+-------------+--------------+-------------+-----------+-------+-----------+----------------+---------------+---------------+--------+--------------+------------+----------
(0 rows)
-- test migration overriding the new cagg and removing the old
TRUNCATE _timescaledb_catalog.continuous_agg_migrate_plan RESTART IDENTITY CASCADE;
psql:include/cagg_migrate_common.sql:249: NOTICE: truncate cascades to table "continuous_agg_migrate_plan_step"
DROP MATERIALIZED VIEW conditions_summary_daily;
psql:include/cagg_migrate_common.sql:250: NOTICE: drop cascades to 6 other objects
ALTER MATERIALIZED VIEW conditions_summary_daily_old RENAME TO conditions_summary_daily;
-- check policies before the migration
SELECT * FROM cagg_jobs WHERE schema = 'public' AND name = 'conditions_summary_daily';
schema | name | id | application_name | schedule_interval | max_runtime | max_retries | retry_period | proc_schema | proc_name | owner | scheduled | fixed_schedule | initial_start | hypertable_id | config | check_schema | check_name | timezone
--------+--------------------------+------+--------------------------------------------+-------------------+-------------+-------------+--------------+-----------------------+-------------------------------------+--------------------+-----------+----------------+---------------+---------------+--------------------------------------------------------------------------------+-----------------------+-------------------------------------------+----------
public | conditions_summary_daily | 1000 | Retention Policy [1000] | @ 1 day | @ 5 mins | -1 | @ 5 mins | _timescaledb_internal | policy_retention | cluster_super_user | t | f | | 3 | {"drop_after": "@ 30 days", "hypertable_id": 3} | _timescaledb_internal | policy_retention_check |
public | conditions_summary_daily | 1001 | Refresh Continuous Aggregate Policy [1001] | @ 1 hour | @ 0 | -1 | @ 1 hour | _timescaledb_internal | policy_refresh_continuous_aggregate | cluster_super_user | t | f | | 3 | {"end_offset": "@ 1 day", "start_offset": "@ 30 days", "mat_hypertable_id": 3} | _timescaledb_internal | policy_refresh_continuous_aggregate_check |
public | conditions_summary_daily | 1002 | Compression Policy [1002] | @ 35 days | @ 0 | -1 | @ 1 hour | _timescaledb_internal | policy_compression | cluster_super_user | t | f | | 3 | {"hypertable_id": 3, "compress_after": "@ 45 days"} | _timescaledb_internal | policy_compression_check |
(3 rows)
CALL cagg_migrate('conditions_summary_daily', override => TRUE, drop_old => TRUE);
psql:include/cagg_migrate_common.sql:254: WARNING: refresh the continuous aggregate after the migration executing this statement: "CALL public.refresh_continuous_aggregate('public.conditions_summary_daily', CAST('Sun Jan 01 00:00:00 2023' AS timestamp with time zone), NULL);"
psql:include/cagg_migrate_common.sql:254: NOTICE: drop cascades to 6 other objects
psql:include/cagg_migrate_common.sql:254: NOTICE: job 1002 not found, skipping
psql:include/cagg_migrate_common.sql:254: NOTICE: job 1001 not found, skipping
psql:include/cagg_migrate_common.sql:254: NOTICE: job 1000 not found, skipping
-- cagg with the new format because it was overriden
\d+ conditions_summary_daily
View "public.conditions_summary_daily"
Column | Type | Collation | Nullable | Default | Storage | Description
--------+--------------------------+-----------+----------+---------+---------+-------------
bucket | timestamp with time zone | | | | plain |
min | numeric | | | | main |
max | numeric | | | | main |
avg | numeric | | | | main |
sum | numeric | | | | main |
View definition:
SELECT _materialized_hypertable_10.bucket,
_materialized_hypertable_10.min,
_materialized_hypertable_10.max,
_materialized_hypertable_10.avg,
_materialized_hypertable_10.sum
FROM _timescaledb_internal._materialized_hypertable_10
WHERE _materialized_hypertable_10.bucket < COALESCE(_timescaledb_internal.to_timestamp(_timescaledb_internal.cagg_watermark(10)), '-infinity'::timestamp with time zone)
UNION ALL
SELECT time_bucket('@ 1 day'::interval, conditions."time") AS bucket,
min(conditions.temperature) AS min,
max(conditions.temperature) AS max,
avg(conditions.temperature) AS avg,
sum(conditions.temperature) AS sum
FROM conditions
WHERE conditions."time" >= COALESCE(_timescaledb_internal.to_timestamp(_timescaledb_internal.cagg_watermark(10)), '-infinity'::timestamp with time zone)
GROUP BY (time_bucket('@ 1 day'::interval, conditions."time"));
\set ON_ERROR_STOP 0
-- should fail because the cagg was overriden
SELECT * FROM conditions_summary_daily_new;
psql:include/cagg_migrate_common.sql:259: ERROR: relation "conditions_summary_daily_new" does not exist at character 15
-- should fail because the old cagg was removed
SELECT * FROM conditions_summary_daily_old;
psql:include/cagg_migrate_common.sql:261: ERROR: relation "conditions_summary_daily_old" does not exist at character 15
\set ON_ERROR_STOP 1
-- check policies after the migration
SELECT * FROM cagg_jobs WHERE schema = 'public' AND name = 'conditions_summary_daily';
schema | name | id | application_name | schedule_interval | max_runtime | max_retries | retry_period | proc_schema | proc_name | owner | scheduled | fixed_schedule | initial_start | hypertable_id | config | check_schema | check_name | timezone
--------+--------------------------+------+--------------------------------------------+-------------------+-------------+-------------+--------------+-----------------------+-------------------------------------+--------------------+-----------+----------------+---------------+---------------+---------------------------------------------------------------------------------+-----------------------+-------------------------------------------+----------
public | conditions_summary_daily | 1009 | Retention Policy [1009] | @ 1 day | @ 5 mins | -1 | @ 5 mins | _timescaledb_internal | policy_retention | cluster_super_user | t | f | | 10 | {"drop_after": "@ 30 days", "hypertable_id": 10} | _timescaledb_internal | policy_retention_check |
public | conditions_summary_daily | 1010 | Refresh Continuous Aggregate Policy [1010] | @ 1 hour | @ 0 | -1 | @ 1 hour | _timescaledb_internal | policy_refresh_continuous_aggregate | cluster_super_user | t | f | | 10 | {"end_offset": "@ 1 day", "start_offset": "@ 30 days", "mat_hypertable_id": 10} | _timescaledb_internal | policy_refresh_continuous_aggregate_check |
public | conditions_summary_daily | 1011 | Compression Policy [1011] | @ 35 days | @ 0 | -1 | @ 1 hour | _timescaledb_internal | policy_compression | cluster_super_user | t | f | | 10 | {"hypertable_id": 10, "compress_after": "@ 45 days"} | _timescaledb_internal | policy_compression_check |
(3 rows)
-- should return no rows because the old cagg was removed
SELECT * FROM cagg_jobs WHERE schema = 'public' AND name = 'conditions_summary_daily_old';
schema | name | id | application_name | schedule_interval | max_runtime | max_retries | retry_period | proc_schema | proc_name | owner | scheduled | fixed_schedule | initial_start | hypertable_id | config | check_schema | check_name | timezone
--------+------+----+------------------+-------------------+-------------+-------------+--------------+-------------+-----------+-------+-----------+----------------+---------------+---------------+--------+--------------+------------+----------
(0 rows)
-- should return no rows because the cagg was overwritten
SELECT * FROM cagg_jobs WHERE schema = 'public' AND name = 'conditions_summary_daily_new';
schema | name | id | application_name | schedule_interval | max_runtime | max_retries | retry_period | proc_schema | proc_name | owner | scheduled | fixed_schedule | initial_start | hypertable_id | config | check_schema | check_name | timezone
--------+------+----+------------------+-------------------+-------------+-------------+--------------+-------------+-----------+-------+-----------+----------------+---------------+---------------+--------+--------------+------------+----------
(0 rows)
-- permissions test
TRUNCATE _timescaledb_catalog.continuous_agg_migrate_plan RESTART IDENTITY CASCADE;
psql:include/cagg_migrate_common.sql:271: NOTICE: truncate cascades to table "continuous_agg_migrate_plan_step"
DROP MATERIALIZED VIEW conditions_summary_daily;
psql:include/cagg_migrate_common.sql:272: NOTICE: drop cascades to 6 other objects
GRANT ALL ON TABLE conditions TO :ROLE_DEFAULT_PERM_USER;
SET ROLE :ROLE_DEFAULT_PERM_USER;
CREATE MATERIALIZED VIEW conditions_summary_daily
WITH (timescaledb.continuous, timescaledb.finalized=false) AS
SELECT
\if :IS_TIME_DIMENSION
time_bucket(INTERVAL '1 day', "time") AS bucket,
\else
time_bucket(INTEGER '24', "time") AS bucket,
\endif
MIN(temperature),
MAX(temperature),
AVG(temperature),
SUM(temperature)
FROM
conditions
GROUP BY
bucket;
psql:include/cagg_migrate_common.sql:291: NOTICE: refreshing continuous aggregate "conditions_summary_daily"
\set ON_ERROR_STOP 0
-- should fail because the lack of permissions on 'continuous_agg_migrate_plan' catalog table
CALL cagg_migrate('conditions_summary_daily');
psql:include/cagg_migrate_common.sql:295: ERROR: permission denied for table continuous_agg_migrate_plan
\set ON_ERROR_STOP 1
RESET ROLE;
GRANT SELECT, INSERT, UPDATE ON TABLE _timescaledb_catalog.continuous_agg_migrate_plan TO :ROLE_DEFAULT_PERM_USER;
SET ROLE :ROLE_DEFAULT_PERM_USER;
\set ON_ERROR_STOP 0
-- should fail because the lack of permissions on 'continuous_agg_migrate_plan_step' catalog table
CALL cagg_migrate('conditions_summary_daily');
psql:include/cagg_migrate_common.sql:305: ERROR: permission denied for table continuous_agg_migrate_plan_step
\set ON_ERROR_STOP 1
RESET ROLE;
GRANT SELECT, INSERT, UPDATE ON TABLE _timescaledb_catalog.continuous_agg_migrate_plan_step TO :ROLE_DEFAULT_PERM_USER;
SET ROLE :ROLE_DEFAULT_PERM_USER;
\set ON_ERROR_STOP 0
-- should fail because the lack of permissions on 'continuous_agg_migrate_plan_step_step_id_seq' catalog sequence
CALL cagg_migrate('conditions_summary_daily');
psql:include/cagg_migrate_common.sql:315: ERROR: permission denied for sequence continuous_agg_migrate_plan_step_step_id_seq
\set ON_ERROR_STOP 1
RESET ROLE;
GRANT USAGE ON SEQUENCE _timescaledb_catalog.continuous_agg_migrate_plan_step_step_id_seq TO :ROLE_DEFAULT_PERM_USER;
SET ROLE :ROLE_DEFAULT_PERM_USER;
-- all necessary permissions granted
CALL cagg_migrate('conditions_summary_daily');
psql:include/cagg_migrate_common.sql:324: WARNING: refresh the continuous aggregate after the migration executing this statement: "CALL public.refresh_continuous_aggregate('public.conditions_summary_daily_new', CAST('Sun Jan 01 00:00:00 2023' AS timestamp with time zone), NULL);"
-- check migrated data. should return 0 (zero) rows
SELECT * FROM conditions_summary_daily
EXCEPT
SELECT * FROM conditions_summary_daily_new;
bucket | min | max | avg | sum
--------+-----+-----+-----+-----
(0 rows)
SELECT mat_hypertable_id, step_id, status, type, config FROM _timescaledb_catalog.continuous_agg_migrate_plan_step ORDER BY step_id;
mat_hypertable_id | step_id | status | type | config
-------------------+---------+----------+------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
12 | 1 | FINISHED | SAVE WATERMARK | {"watermark": "Sun Jan 01 00:00:00 2023"}
12 | 2 | FINISHED | CREATE NEW CAGG | {"cagg_name_new": "conditions_summary_daily_new"}
12 | 3 | FINISHED | DISABLE POLICIES | {"policies": null}
12 | 4 | FINISHED | REFRESH NEW CAGG | {"window_start": "Sun Jan 01 00:00:00 2023", "cagg_name_new": "conditions_summary_daily_new", "window_start_type": "timestamp with time zone"}
12 | 5 | FINISHED | COPY DATA | {"end_ts": "Fri Mar 11 16:00:00 2022 PST", "start_ts": "Fri Dec 31 16:00:00 2021 PST", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp with time zone"}
12 | 6 | FINISHED | COPY DATA | {"end_ts": "Fri May 20 16:00:00 2022 PDT", "start_ts": "Fri Mar 11 16:00:00 2022 PST", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp with time zone"}
12 | 7 | FINISHED | COPY DATA | {"end_ts": "Fri Jul 29 16:00:00 2022 PDT", "start_ts": "Fri May 20 16:00:00 2022 PDT", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp with time zone"}
12 | 8 | FINISHED | COPY DATA | {"end_ts": "Fri Oct 07 16:00:00 2022 PDT", "start_ts": "Fri Jul 29 16:00:00 2022 PDT", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp with time zone"}
12 | 9 | FINISHED | COPY DATA | {"end_ts": "Fri Dec 16 16:00:00 2022 PST", "start_ts": "Fri Oct 07 16:00:00 2022 PDT", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp with time zone"}
12 | 10 | FINISHED | COPY DATA | {"end_ts": "Fri Feb 24 16:00:00 2023 PST", "start_ts": "Fri Dec 16 16:00:00 2022 PST", "cagg_name_new": "conditions_summary_daily_new", "bucket_column_name": "bucket", "bucket_column_type": "timestamp with time zone"}
12 | 11 | FINISHED | COPY POLICIES | {"policies": null, "cagg_name_new": "conditions_summary_daily_new"}
12 | 12 | FINISHED | OVERRIDE CAGG | {"drop_old": false, "override": false, "cagg_name_new": "conditions_summary_daily_new"}
12 | 13 | FINISHED | DROP OLD CAGG | {"drop_old": false, "override": false, "cagg_name_new": "conditions_summary_daily_new"}
12 | 14 | FINISHED | ENABLE POLICIES |
(14 rows)
RESET ROLE;
-- according to the official documentation trying to execute a procedure with
-- transaction control statements inside an explicit transaction should fail:
-- https://www.postgresql.org/docs/current/sql-call.html
-- `If CALL is executed in a transaction block, then the called procedure cannot
-- execute transaction control statements. Transaction control statements are only
-- allowed if CALL is executed in its own transaction.`
TRUNCATE _timescaledb_catalog.continuous_agg_migrate_plan RESTART IDENTITY CASCADE;
psql:include/cagg_migrate_common.sql:341: NOTICE: truncate cascades to table "continuous_agg_migrate_plan_step"
DROP MATERIALIZED VIEW conditions_summary_daily_new;
psql:include/cagg_migrate_common.sql:342: NOTICE: drop cascades to 6 other objects
\set ON_ERROR_STOP 0
BEGIN;
-- should fail with `invalid transaction termination`
CALL cagg_migrate('conditions_summary_daily');
psql:include/cagg_migrate_common.sql:347: ERROR: invalid transaction termination
ROLLBACK;
\set ON_ERROR_STOP 1
CREATE FUNCTION execute_migration() RETURNS void AS
$$
BEGIN
CALL cagg_migrate('conditions_summary_daily');
RETURN;
END;
$$
LANGUAGE plpgsql;
\set ON_ERROR_STOP 0
-- execute migration inside a plpgsql function
BEGIN;
-- should fail with `invalid transaction termination`
SELECT execute_migration();
psql:include/cagg_migrate_common.sql:364: ERROR: invalid transaction termination
ROLLBACK;
\set ON_ERROR_STOP 1
-- cleanup
\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER;
DROP DATABASE :DATA_NODE_1;
DROP DATABASE :DATA_NODE_2;
DROP DATABASE :DATA_NODE_3;

@ -56,12 +56,8 @@ if(CMAKE_BUILD_TYPE MATCHES Debug)
cagg_drop_chunks.sql
cagg_dump.sql
cagg_errors_deprecated.sql
cagg_migrate_integer.sql
cagg_migrate_integer_dist_ht.sql
cagg_migrate_timestamp.sql
cagg_migrate_timestamp_dist_ht.sql
cagg_migrate_timestamptz.sql
cagg_migrate_timestamptz_dist_ht.sql
cagg_migrate.sql
cagg_migrate_dist_ht.sql
cagg_multi.sql
cagg_on_cagg_integer.sql
cagg_on_cagg_integer_dist_ht.sql

@ -0,0 +1,26 @@
-- This file and its contents are licensed under the Timescale License.
-- Please see the included NOTICE for copyright information and
-- LICENSE-TIMESCALE for a copy of the license.
\set IS_DISTRIBUTED FALSE
\set IS_TIME_DIMENSION FALSE
-- ########################################################
-- ## INTEGER data type tests
-- ########################################################
\set TIME_DIMENSION_DATATYPE INTEGER
\ir include/cagg_migrate_common.sql
-- ########################################################
-- ## TIMESTAMP data type tests
-- ########################################################
\set IS_TIME_DIMENSION TRUE
\set TIME_DIMENSION_DATATYPE TIMESTAMP
\ir include/cagg_migrate_common.sql
-- ########################################################
-- ## TIMESTAMPTZ data type tests
-- ########################################################
\set IS_TIME_DIMENSION TRUE
\set TIME_DIMENSION_DATATYPE TIMESTAMPTZ
\ir include/cagg_migrate_common.sql

@ -22,9 +22,26 @@ FROM (
GRANT USAGE ON FOREIGN SERVER :DATA_NODE_1, :DATA_NODE_2, :DATA_NODE_3 TO PUBLIC;
\set IS_DISTRIBUTED TRUE
-- ########################################################
-- ## INTEGER data type tests
-- ########################################################
\set IS_TIME_DIMENSION FALSE
\set TIME_DIMENSION_DATATYPE INTEGER
\ir include/cagg_migrate_common.sql
-- ########################################################
-- ## TIMESTAMP data type tests
-- ########################################################
\set IS_TIME_DIMENSION TRUE
\set TIME_DIMENSION_DATATYPE TIMESTAMP
\ir include/cagg_migrate_common.sql
-- ########################################################
-- ## TIMESTAMPTZ data type tests
-- ########################################################
\set IS_TIME_DIMENSION TRUE
\set TIME_DIMENSION_DATATYPE TIMESTAMPTZ
\ir include/cagg_migrate_common.sql
-- cleanup

@ -1,9 +0,0 @@
-- This file and its contents are licensed under the Timescale License.
-- Please see the included NOTICE for copyright information and
-- LICENSE-TIMESCALE for a copy of the license.
\set IS_DISTRIBUTED FALSE
\set IS_TIME_DIMENSION FALSE
\set TIME_DIMENSION_DATATYPE INTEGER
\ir include/cagg_migrate_common.sql

@ -1,34 +0,0 @@
-- This file and its contents are licensed under the Timescale License.
-- Please see the included NOTICE for copyright information and
-- LICENSE-TIMESCALE for a copy of the license.
------------------------------------
-- Set up a distributed environment
------------------------------------
\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER
\set DATA_NODE_1 :TEST_DBNAME _1
\set DATA_NODE_2 :TEST_DBNAME _2
\set DATA_NODE_3 :TEST_DBNAME _3
\ir include/remote_exec.sql
SELECT node_name, database, node_created, database_created, extension_created
FROM (
SELECT (add_data_node(name, host => 'localhost', DATABASE => name)).*
FROM (VALUES (:'DATA_NODE_1'), (:'DATA_NODE_2'), (:'DATA_NODE_3')) v(name)
) a;
GRANT USAGE ON FOREIGN SERVER :DATA_NODE_1, :DATA_NODE_2, :DATA_NODE_3 TO PUBLIC;
\set IS_DISTRIBUTED TRUE
\set IS_TIME_DIMENSION FALSE
\set TIME_DIMENSION_DATATYPE INTEGER
\ir include/cagg_migrate_common.sql
-- cleanup
\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER;
DROP DATABASE :DATA_NODE_1;
DROP DATABASE :DATA_NODE_2;
DROP DATABASE :DATA_NODE_3;

@ -1,9 +0,0 @@
-- This file and its contents are licensed under the Timescale License.
-- Please see the included NOTICE for copyright information and
-- LICENSE-TIMESCALE for a copy of the license.
\set IS_DISTRIBUTED FALSE
\set IS_TIME_DIMENSION TRUE
\set TIME_DIMENSION_DATATYPE TIMESTAMP
\ir include/cagg_migrate_common.sql

@ -1,34 +0,0 @@
-- This file and its contents are licensed under the Timescale License.
-- Please see the included NOTICE for copyright information and
-- LICENSE-TIMESCALE for a copy of the license.
------------------------------------
-- Set up a distributed environment
------------------------------------
\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER
\set DATA_NODE_1 :TEST_DBNAME _1
\set DATA_NODE_2 :TEST_DBNAME _2
\set DATA_NODE_3 :TEST_DBNAME _3
\ir include/remote_exec.sql
SELECT node_name, database, node_created, database_created, extension_created
FROM (
SELECT (add_data_node(name, host => 'localhost', DATABASE => name)).*
FROM (VALUES (:'DATA_NODE_1'), (:'DATA_NODE_2'), (:'DATA_NODE_3')) v(name)
) a;
GRANT USAGE ON FOREIGN SERVER :DATA_NODE_1, :DATA_NODE_2, :DATA_NODE_3 TO PUBLIC;
\set IS_DISTRIBUTED TRUE
\set IS_TIME_DIMENSION TRUE
\set TIME_DIMENSION_DATATYPE TIMESTAMP
\ir include/cagg_migrate_common.sql
-- cleanup
\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER;
DROP DATABASE :DATA_NODE_1;
DROP DATABASE :DATA_NODE_2;
DROP DATABASE :DATA_NODE_3;

@ -1,9 +0,0 @@
-- This file and its contents are licensed under the Timescale License.
-- Please see the included NOTICE for copyright information and
-- LICENSE-TIMESCALE for a copy of the license.
\set IS_DISTRIBUTED FALSE
\set IS_TIME_DIMENSION TRUE
\set TIME_DIMENSION_DATATYPE TIMESTAMPTZ
\ir include/cagg_migrate_common.sql

@ -218,7 +218,7 @@ SELECT * FROM conditions_summary_daily
EXCEPT
SELECT * FROM conditions_summary_daily_new;
CREATE VIEW cagg_jobs AS
CREATE OR REPLACE VIEW cagg_jobs AS
SELECT user_view_schema AS schema, user_view_name AS name, bgw_job.*
FROM _timescaledb_config.bgw_job
JOIN _timescaledb_catalog.continuous_agg ON mat_hypertable_id = hypertable_id
@ -348,7 +348,7 @@ CALL cagg_migrate('conditions_summary_daily');
ROLLBACK;
\set ON_ERROR_STOP 1
CREATE FUNCTION execute_migration() RETURNS void AS
CREATE OR REPLACE FUNCTION execute_migration() RETURNS void AS
$$
BEGIN
CALL cagg_migrate('conditions_summary_daily');
@ -364,3 +364,10 @@ BEGIN;
SELECT execute_migration();
ROLLBACK;
\set ON_ERROR_STOP 1
-- cleanup
REVOKE SELECT, INSERT, UPDATE ON TABLE _timescaledb_catalog.continuous_agg_migrate_plan FROM :ROLE_DEFAULT_PERM_USER;
REVOKE USAGE ON SEQUENCE _timescaledb_catalog.continuous_agg_migrate_plan_step_step_id_seq FROM :ROLE_DEFAULT_PERM_USER;
TRUNCATE _timescaledb_catalog.continuous_agg_migrate_plan RESTART IDENTITY CASCADE;
DROP MATERIALIZED VIEW conditions_summary_daily;
DROP TABLE conditions;