From 3749953e9704e45df8f621607989ada0714ce28d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fabr=C3=ADzio=20de=20Royes=20Mello?= Date: Wed, 5 Oct 2022 18:45:40 -0300 Subject: [PATCH] Hierarchical Continuous Aggregates Enable users create Hierarchical Continuous Aggregates (aka Continuous Aggregates on top of another Continuous Aggregates). With this PR users can create levels of aggregation granularity in Continuous Aggregates making the refresh process even faster. A problem with this feature can be in upper levels we can end up with the "average of averages". But to get the "real average" we can rely on "stats_aggs" TimescaleDB Toolkit function that calculate and store the partials that can be finalized with other toolkit functions like "average" and "sum". Closes #1400 --- sql/pre_install/tables.sql | 9 +- sql/updates/latest-dev.sql | 117 +++++- sql/updates/reverse-dev.sql | 138 ++++++- src/ts_catalog/catalog.c | 4 +- src/ts_catalog/catalog.h | 2 + src/ts_catalog/continuous_agg.c | 14 + tsl/src/continuous_aggs/create.c | 101 +++-- tsl/test/expected/cagg_bgw.out | 4 +- tsl/test/expected/cagg_bgw_dist_ht.out | 4 +- tsl/test/expected/cagg_ddl.out | 80 ++-- tsl/test/expected/cagg_ddl_dist_ht.out | 80 ++-- tsl/test/expected/cagg_errors_deprecated.out | 36 +- tsl/test/expected/cagg_on_cagg_integer.out | 335 ++++++++++++++++ .../expected/cagg_on_cagg_integer_dist_ht.out | 372 ++++++++++++++++++ tsl/test/expected/cagg_on_cagg_timestamp.out | 332 ++++++++++++++++ .../cagg_on_cagg_timestamp_dist_ht.out | 369 +++++++++++++++++ .../expected/cagg_on_cagg_timestamptz.out | 331 ++++++++++++++++ .../cagg_on_cagg_timestamptz_dist_ht.out | 368 +++++++++++++++++ tsl/test/expected/cagg_watermark.out | 2 +- tsl/test/expected/exp_cagg_monthly.out | 4 +- tsl/test/sql/CMakeLists.txt | 6 + tsl/test/sql/cagg_errors_deprecated.sql | 16 +- tsl/test/sql/cagg_on_cagg_integer.sql | 17 + tsl/test/sql/cagg_on_cagg_integer_dist_ht.sql | 39 ++ tsl/test/sql/cagg_on_cagg_timestamp.sql | 19 + .../sql/cagg_on_cagg_timestamp_dist_ht.sql | 41 ++ tsl/test/sql/cagg_on_cagg_timestamptz.sql | 19 + .../sql/cagg_on_cagg_timestamptz_dist_ht.sql | 41 ++ tsl/test/sql/cagg_watermark.sql | 2 +- tsl/test/sql/include/cagg_ddl_common.sql | 10 - tsl/test/sql/include/cagg_on_cagg_common.sql | 199 ++++++++++ 31 files changed, 2940 insertions(+), 171 deletions(-) create mode 100644 tsl/test/expected/cagg_on_cagg_integer.out create mode 100644 tsl/test/expected/cagg_on_cagg_integer_dist_ht.out create mode 100644 tsl/test/expected/cagg_on_cagg_timestamp.out create mode 100644 tsl/test/expected/cagg_on_cagg_timestamp_dist_ht.out create mode 100644 tsl/test/expected/cagg_on_cagg_timestamptz.out create mode 100644 tsl/test/expected/cagg_on_cagg_timestamptz_dist_ht.out create mode 100644 tsl/test/sql/cagg_on_cagg_integer.sql create mode 100644 tsl/test/sql/cagg_on_cagg_integer_dist_ht.sql create mode 100644 tsl/test/sql/cagg_on_cagg_timestamp.sql create mode 100644 tsl/test/sql/cagg_on_cagg_timestamp_dist_ht.sql create mode 100644 tsl/test/sql/cagg_on_cagg_timestamptz.sql create mode 100644 tsl/test/sql/cagg_on_cagg_timestamptz_dist_ht.sql create mode 100644 tsl/test/sql/include/cagg_on_cagg_common.sql diff --git a/sql/pre_install/tables.sql b/sql/pre_install/tables.sql index 94b97cb5f..fbe580cdd 100644 --- a/sql/pre_install/tables.sql +++ b/sql/pre_install/tables.sql @@ -203,7 +203,7 @@ CREATE INDEX chunk_compressed_chunk_id_idx ON _timescaledb_catalog.chunk (compre --we could use a partial index (where osm_chunk is true). However, the catalog code --does not work with partial/functional indexes. So we instead have a full index here. --Another option would be to use the status field to identify a OSM chunk. However bit ---operations only work on varbit datatype and not integer datatype. +--operations only work on varbit datatype and not integer datatype. CREATE INDEX chunk_osm_chunk_idx ON _timescaledb_catalog.chunk (osm_chunk, hypertable_id); SELECT pg_catalog.pg_extension_config_dump('_timescaledb_catalog.chunk', ''); @@ -350,6 +350,7 @@ SELECT pg_catalog.pg_extension_config_dump('_timescaledb_catalog.metadata', $$ CREATE TABLE _timescaledb_catalog.continuous_agg ( mat_hypertable_id integer NOT NULL, raw_hypertable_id integer NOT NULL, + parent_mat_hypertable_id integer, user_view_schema name NOT NULL, user_view_name name NOT NULL, partial_view_schema name NOT NULL, @@ -364,7 +365,9 @@ CREATE TABLE _timescaledb_catalog.continuous_agg ( CONSTRAINT continuous_agg_partial_view_schema_partial_view_name_key UNIQUE (partial_view_schema, partial_view_name), CONSTRAINT continuous_agg_user_view_schema_user_view_name_key UNIQUE (user_view_schema, user_view_name), CONSTRAINT continuous_agg_mat_hypertable_id_fkey FOREIGN KEY (mat_hypertable_id) REFERENCES _timescaledb_catalog.hypertable (id) ON DELETE CASCADE, - CONSTRAINT continuous_agg_raw_hypertable_id_fkey FOREIGN KEY (raw_hypertable_id) REFERENCES _timescaledb_catalog.hypertable (id) ON DELETE CASCADE + CONSTRAINT continuous_agg_raw_hypertable_id_fkey FOREIGN KEY (raw_hypertable_id) REFERENCES _timescaledb_catalog.hypertable (id) ON DELETE CASCADE, + CONSTRAINT continuous_agg_parent_mat_hypertable_id_fkey FOREIGN KEY (parent_mat_hypertable_id) + REFERENCES _timescaledb_catalog.continuous_agg (mat_hypertable_id) ON DELETE CASCADE ); CREATE INDEX continuous_agg_raw_hypertable_id_idx ON _timescaledb_catalog.continuous_agg (raw_hypertable_id); @@ -559,7 +562,7 @@ SELECT pg_catalog.pg_extension_config_dump('_timescaledb_catalog.continuous_agg_ SELECT pg_catalog.pg_extension_config_dump(pg_get_serial_sequence('_timescaledb_catalog.continuous_agg_migrate_plan_step', 'step_id'), ''); CREATE TABLE _timescaledb_internal.job_errors ( - job_id integer not null, + job_id integer not null, pid integer, start_time timestamptz, finish_time timestamptz, diff --git a/sql/updates/latest-dev.sql b/sql/updates/latest-dev.sql index 25bc52a9e..27a3bf0d4 100644 --- a/sql/updates/latest-dev.sql +++ b/sql/updates/latest-dev.sql @@ -220,17 +220,17 @@ CREATE TABLE _timescaledb_config.bgw_job ( config jsonb , check_schema NAME, check_name NAME, - timezone TEXT + timezone TEXT ); ALTER SEQUENCE _timescaledb_config.bgw_job_id_seq OWNED BY _timescaledb_config.bgw_job.id; CREATE INDEX bgw_job_proc_hypertable_id_idx ON _timescaledb_config.bgw_job(proc_schema,proc_name,hypertable_id); INSERT INTO _timescaledb_config.bgw_job( - id, application_name, schedule_interval, - max_runtime, max_retries, retry_period, - proc_schema, proc_name, owner, scheduled, - hypertable_id, config, check_schema, check_name, + id, application_name, schedule_interval, + max_runtime, max_retries, retry_period, + proc_schema, proc_name, owner, scheduled, + hypertable_id, config, check_schema, check_name, fixed_schedule ) SELECT id, application_name, schedule_interval, max_runtime, max_retries, retry_period, @@ -247,9 +247,9 @@ GRANT SELECT ON _timescaledb_config.bgw_job_id_seq TO PUBLIC; -- do simple CREATE for the functions with modified signatures CREATE FUNCTION @extschema@.add_continuous_aggregate_policy( -continuous_aggregate REGCLASS, start_offset "any", -end_offset "any", schedule_interval INTERVAL, -if_not_exists BOOL = false, +continuous_aggregate REGCLASS, start_offset "any", +end_offset "any", schedule_interval INTERVAL, +if_not_exists BOOL = false, initial_start TIMESTAMPTZ = NULL, timezone TEXT = NULL) RETURNS INTEGER @@ -257,9 +257,9 @@ AS '@MODULE_PATHNAME@', 'ts_policy_refresh_cagg_add' LANGUAGE C VOLATILE; CREATE FUNCTION @extschema@.add_compression_policy( - hypertable REGCLASS, compress_after "any", + hypertable REGCLASS, compress_after "any", if_not_exists BOOL = false, - schedule_interval INTERVAL = NULL, + schedule_interval INTERVAL = NULL, initial_start TIMESTAMPTZ = NULL, timezone TEXT = NULL ) @@ -399,3 +399,100 @@ BEGIN RAISE EXCEPTION 'unable to modify frozen chunk %s', TG_TABLE_NAME; END; $BODY$ SET search_path TO pg_catalog, pg_temp; + +-- +-- Rebuild the catalog table `_timescaledb_catalog.continuous_agg` +-- +DROP VIEW IF EXISTS timescaledb_information.hypertables; +DROP VIEW IF EXISTS timescaledb_information.continuous_aggregates; +DROP PROCEDURE IF EXISTS @extschema@.cagg_migrate (REGCLASS, BOOLEAN, BOOLEAN); +DROP FUNCTION IF EXISTS _timescaledb_internal.cagg_migrate_pre_validation (TEXT, TEXT, TEXT); +DROP PROCEDURE IF EXISTS _timescaledb_internal.cagg_migrate_create_plan (_timescaledb_catalog.continuous_agg, TEXT, BOOLEAN, BOOLEAN); +DROP FUNCTION IF EXISTS _timescaledb_internal.cagg_migrate_plan_exists (INTEGER); +DROP PROCEDURE IF EXISTS _timescaledb_internal.cagg_migrate_execute_plan (_timescaledb_catalog.continuous_agg); +DROP PROCEDURE IF EXISTS _timescaledb_internal.cagg_migrate_execute_create_new_cagg (_timescaledb_catalog.continuous_agg, _timescaledb_catalog.continuous_agg_migrate_plan_step); +DROP PROCEDURE IF EXISTS _timescaledb_internal.cagg_migrate_execute_disable_policies (_timescaledb_catalog.continuous_agg, _timescaledb_catalog.continuous_agg_migrate_plan_step); +DROP PROCEDURE IF EXISTS _timescaledb_internal.cagg_migrate_execute_enable_policies (_timescaledb_catalog.continuous_agg, _timescaledb_catalog.continuous_agg_migrate_plan_step); +DROP PROCEDURE IF EXISTS _timescaledb_internal.cagg_migrate_execute_copy_policies (_timescaledb_catalog.continuous_agg, _timescaledb_catalog.continuous_agg_migrate_plan_step); +DROP PROCEDURE IF EXISTS _timescaledb_internal.cagg_migrate_execute_refresh_new_cagg (_timescaledb_catalog.continuous_agg, _timescaledb_catalog.continuous_agg_migrate_plan_step); +DROP PROCEDURE IF EXISTS _timescaledb_internal.cagg_migrate_execute_copy_data (_timescaledb_catalog.continuous_agg, _timescaledb_catalog.continuous_agg_migrate_plan_step); +DROP PROCEDURE IF EXISTS _timescaledb_internal.cagg_migrate_execute_override_cagg (_timescaledb_catalog.continuous_agg, _timescaledb_catalog.continuous_agg_migrate_plan_step); +DROP PROCEDURE IF EXISTS _timescaledb_internal.cagg_migrate_execute_drop_old_cagg (_timescaledb_catalog.continuous_agg, _timescaledb_catalog.continuous_agg_migrate_plan_step); + +ALTER EXTENSION timescaledb + DROP TABLE _timescaledb_catalog.continuous_agg; + +ALTER TABLE _timescaledb_catalog.continuous_aggs_materialization_invalidation_log + DROP CONSTRAINT continuous_aggs_materialization_invalid_materialization_id_fkey; + +ALTER TABLE _timescaledb_catalog.continuous_agg_migrate_plan + DROP CONSTRAINT continuous_agg_migrate_plan_mat_hypertable_id_fkey; + +CREATE TABLE _timescaledb_catalog._tmp_continuous_agg AS + SELECT + mat_hypertable_id, + raw_hypertable_id, + NULL::INTEGER AS parent_mat_hypertable_id, + user_view_schema, + user_view_name, + partial_view_schema, + partial_view_name, + bucket_width, + direct_view_schema, + direct_view_name, + materialized_only, + finalized + FROM + _timescaledb_catalog.continuous_agg + ORDER BY + mat_hypertable_id; + +DROP TABLE _timescaledb_catalog.continuous_agg; + +CREATE TABLE _timescaledb_catalog.continuous_agg ( + mat_hypertable_id integer NOT NULL, + raw_hypertable_id integer NOT NULL, + parent_mat_hypertable_id integer, + user_view_schema name NOT NULL, + user_view_name name NOT NULL, + partial_view_schema name NOT NULL, + partial_view_name name NOT NULL, + bucket_width bigint NOT NULL, + direct_view_schema name NOT NULL, + direct_view_name name NOT NULL, + materialized_only bool NOT NULL DEFAULT FALSE, + finalized bool NOT NULL DEFAULT TRUE, + -- table constraints + CONSTRAINT continuous_agg_pkey PRIMARY KEY (mat_hypertable_id), + CONSTRAINT continuous_agg_partial_view_schema_partial_view_name_key UNIQUE (partial_view_schema, partial_view_name), + CONSTRAINT continuous_agg_user_view_schema_user_view_name_key UNIQUE (user_view_schema, user_view_name), + CONSTRAINT continuous_agg_mat_hypertable_id_fkey + FOREIGN KEY (mat_hypertable_id) REFERENCES _timescaledb_catalog.hypertable (id) ON DELETE CASCADE, + CONSTRAINT continuous_agg_raw_hypertable_id_fkey + FOREIGN KEY (raw_hypertable_id) REFERENCES _timescaledb_catalog.hypertable (id) ON DELETE CASCADE, + CONSTRAINT continuous_agg_parent_mat_hypertable_id_fkey + FOREIGN KEY (parent_mat_hypertable_id) + REFERENCES _timescaledb_catalog.continuous_agg (mat_hypertable_id) ON DELETE CASCADE +); + +INSERT INTO _timescaledb_catalog.continuous_agg +SELECT * FROM _timescaledb_catalog._tmp_continuous_agg; +DROP TABLE _timescaledb_catalog._tmp_continuous_agg; + +CREATE INDEX continuous_agg_raw_hypertable_id_idx ON _timescaledb_catalog.continuous_agg (raw_hypertable_id); + +SELECT pg_catalog.pg_extension_config_dump('_timescaledb_catalog.continuous_agg', ''); + +GRANT SELECT ON TABLE _timescaledb_catalog.continuous_agg TO PUBLIC; + +ALTER TABLE _timescaledb_catalog.continuous_aggs_materialization_invalidation_log + ADD CONSTRAINT continuous_aggs_materialization_invalid_materialization_id_fkey + FOREIGN KEY (materialization_id) + REFERENCES _timescaledb_catalog.continuous_agg(mat_hypertable_id) ON DELETE CASCADE; + +ALTER TABLE _timescaledb_catalog.continuous_agg_migrate_plan + ADD CONSTRAINT continuous_agg_migrate_plan_mat_hypertable_id_fkey + FOREIGN KEY (mat_hypertable_id) + REFERENCES _timescaledb_catalog.continuous_agg (mat_hypertable_id); + +ANALYZE _timescaledb_catalog.continuous_agg; diff --git a/sql/updates/reverse-dev.sql b/sql/updates/reverse-dev.sql index 6221b9e6e..559820ff8 100644 --- a/sql/updates/reverse-dev.sql +++ b/sql/updates/reverse-dev.sql @@ -168,7 +168,7 @@ LANGUAGE C VOLATILE STRICT; DROP VIEW IF EXISTS timescaledb_information.jobs; DROP VIEW IF EXISTS timescaledb_information.job_stats; --- now need to rebuild the table +-- now need to rebuild the table ALTER TABLE _timescaledb_internal.bgw_job_stat DROP CONSTRAINT bgw_job_stat_job_id_fkey; ALTER TABLE _timescaledb_internal.bgw_policy_chunk_stats @@ -178,8 +178,6 @@ ALTER TABLE _timescaledb_internal.bgw_policy_chunk_stats CREATE TABLE _timescaledb_config.bgw_job_tmp AS SELECT * FROM _timescaledb_config.bgw_job; ALTER EXTENSION timescaledb DROP TABLE _timescaledb_config.bgw_job; ALTER EXTENSION timescaledb DROP SEQUENCE _timescaledb_config.bgw_job_id_seq; --- ALTER TABLE _timescaledb_internal.bgw_job_stat DROP CONSTRAINT IF EXISTS bgw_job_stat_job_id_fkey; --- ALTER TABLE _timescaledb_internal.bgw_policy_chunk_stats DROP CONSTRAINT IF EXISTS bgw_policy_chunk_stats_job_id_fkey; CREATE TABLE _timescaledb_internal.tmp_bgw_job_seq_value AS SELECT last_value, is_called FROM _timescaledb_config.bgw_job_id_seq; DROP TABLE _timescaledb_config.bgw_job; @@ -202,7 +200,7 @@ CREATE TABLE _timescaledb_config.bgw_job ( scheduled bool NOT NULL DEFAULT TRUE, hypertable_id integer REFERENCES _timescaledb_catalog.hypertable (id) ON DELETE CASCADE, config jsonb, - check_schema NAME, + check_schema NAME, check_name NAME ); @@ -235,9 +233,9 @@ CREATE TABLE _timescaledb_internal.tmp_dimension_seq_value AS SELECT last_value, is_called FROM _timescaledb_catalog.dimension_id_seq; --drop foreign keys on dimension table -ALTER TABLE _timescaledb_catalog.dimension_partition DROP CONSTRAINT +ALTER TABLE _timescaledb_catalog.dimension_partition DROP CONSTRAINT dimension_partition_dimension_id_fkey; -ALTER TABLE _timescaledb_catalog.dimension_slice DROP CONSTRAINT +ALTER TABLE _timescaledb_catalog.dimension_slice DROP CONSTRAINT dimension_slice_dimension_id_fkey; --drop dependent views @@ -290,11 +288,11 @@ SELECT pg_catalog.pg_extension_config_dump('_timescaledb_catalog.dimension', '') SELECT pg_catalog.pg_extension_config_dump(pg_get_serial_sequence('_timescaledb_catalog.dimension', 'id'), ''); --add the foreign key constraints -ALTER TABLE _timescaledb_catalog.dimension_partition ADD CONSTRAINT -dimension_partition_dimension_id_fkey FOREIGN KEY (dimension_id) -REFERENCES _timescaledb_catalog.dimension(id) ON DELETE CASCADE; +ALTER TABLE _timescaledb_catalog.dimension_partition ADD CONSTRAINT +dimension_partition_dimension_id_fkey FOREIGN KEY (dimension_id) +REFERENCES _timescaledb_catalog.dimension(id) ON DELETE CASCADE; ALTER TABLE _timescaledb_catalog.dimension_slice ADD CONSTRAINT -dimension_slice_dimension_id_fkey FOREIGN KEY (dimension_id) +dimension_slice_dimension_id_fkey FOREIGN KEY (dimension_id) REFERENCES _timescaledb_catalog.dimension(id) ON DELETE CASCADE; --cleanup @@ -311,3 +309,123 @@ DROP INDEX _timescaledb_catalog.chunk_data_node_node_name_idx; DROP FUNCTION @extschema@.alter_data_node; DROP FUNCTION _timescaledb_internal.frozen_chunk_modify_blocker; + +-- +-- Prevent downgrading if there are hierarchical continuous aggregates +-- +DO +$$ +DECLARE + caggs_hierarchical TEXT; + caggs_count INTEGER; +BEGIN + SELECT + string_agg(format('%I.%I', user_view_schema, user_view_name), ', '), + count(*) + INTO + caggs_hierarchical, + caggs_count + FROM + _timescaledb_catalog.continuous_agg + WHERE + parent_mat_hypertable_id IS NOT NULL; + + IF caggs_count > 0 THEN + RAISE EXCEPTION 'Downgrade is not possible because there are % hierarchical continuous aggregates: %', caggs_count, caggs_nested + USING HINT = 'Remove the corresponding continuous aggregates manually before downgrading'; + END IF; +END; +$$ +LANGUAGE 'plpgsql'; + +-- +-- Rebuild the catalog table `_timescaledb_catalog.continuous_agg` +-- +DROP VIEW IF EXISTS timescaledb_information.hypertables; +DROP VIEW IF EXISTS timescaledb_information.continuous_aggregates; +DROP PROCEDURE IF EXISTS @extschema@.cagg_migrate (REGCLASS, BOOLEAN, BOOLEAN); +DROP FUNCTION IF EXISTS _timescaledb_internal.cagg_migrate_pre_validation (TEXT, TEXT, TEXT); +DROP PROCEDURE IF EXISTS _timescaledb_internal.cagg_migrate_create_plan (_timescaledb_catalog.continuous_agg, TEXT, BOOLEAN, BOOLEAN); +DROP FUNCTION IF EXISTS _timescaledb_internal.cagg_migrate_plan_exists (INTEGER); +DROP PROCEDURE IF EXISTS _timescaledb_internal.cagg_migrate_execute_plan (_timescaledb_catalog.continuous_agg); +DROP PROCEDURE IF EXISTS _timescaledb_internal.cagg_migrate_execute_create_new_cagg (_timescaledb_catalog.continuous_agg, _timescaledb_catalog.continuous_agg_migrate_plan_step); +DROP PROCEDURE IF EXISTS _timescaledb_internal.cagg_migrate_execute_disable_policies (_timescaledb_catalog.continuous_agg, _timescaledb_catalog.continuous_agg_migrate_plan_step); +DROP PROCEDURE IF EXISTS _timescaledb_internal.cagg_migrate_execute_enable_policies (_timescaledb_catalog.continuous_agg, _timescaledb_catalog.continuous_agg_migrate_plan_step); +DROP PROCEDURE IF EXISTS _timescaledb_internal.cagg_migrate_execute_copy_policies (_timescaledb_catalog.continuous_agg, _timescaledb_catalog.continuous_agg_migrate_plan_step); +DROP PROCEDURE IF EXISTS _timescaledb_internal.cagg_migrate_execute_refresh_new_cagg (_timescaledb_catalog.continuous_agg, _timescaledb_catalog.continuous_agg_migrate_plan_step); +DROP PROCEDURE IF EXISTS _timescaledb_internal.cagg_migrate_execute_copy_data (_timescaledb_catalog.continuous_agg, _timescaledb_catalog.continuous_agg_migrate_plan_step); +DROP PROCEDURE IF EXISTS _timescaledb_internal.cagg_migrate_execute_override_cagg (_timescaledb_catalog.continuous_agg, _timescaledb_catalog.continuous_agg_migrate_plan_step); +DROP PROCEDURE IF EXISTS _timescaledb_internal.cagg_migrate_execute_drop_old_cagg (_timescaledb_catalog.continuous_agg, _timescaledb_catalog.continuous_agg_migrate_plan_step); + +ALTER EXTENSION timescaledb + DROP TABLE _timescaledb_catalog.continuous_agg; + +ALTER TABLE _timescaledb_catalog.continuous_aggs_materialization_invalidation_log + DROP CONSTRAINT continuous_aggs_materialization_invalid_materialization_id_fkey; + +ALTER TABLE _timescaledb_catalog.continuous_agg_migrate_plan + DROP CONSTRAINT continuous_agg_migrate_plan_mat_hypertable_id_fkey; + +CREATE TABLE _timescaledb_catalog._tmp_continuous_agg AS + SELECT + mat_hypertable_id, + raw_hypertable_id, + user_view_schema, + user_view_name, + partial_view_schema, + partial_view_name, + bucket_width, + direct_view_schema, + direct_view_name, + materialized_only, + finalized + FROM + _timescaledb_catalog.continuous_agg + ORDER BY + mat_hypertable_id; + +DROP TABLE _timescaledb_catalog.continuous_agg; + +CREATE TABLE _timescaledb_catalog.continuous_agg ( + mat_hypertable_id integer NOT NULL, + raw_hypertable_id integer NOT NULL, + user_view_schema name NOT NULL, + user_view_name name NOT NULL, + partial_view_schema name NOT NULL, + partial_view_name name NOT NULL, + bucket_width bigint NOT NULL, + direct_view_schema name NOT NULL, + direct_view_name name NOT NULL, + materialized_only bool NOT NULL DEFAULT FALSE, + finalized bool NOT NULL DEFAULT TRUE, + -- table constraints + CONSTRAINT continuous_agg_pkey PRIMARY KEY (mat_hypertable_id), + CONSTRAINT continuous_agg_partial_view_schema_partial_view_name_key UNIQUE (partial_view_schema, partial_view_name), + CONSTRAINT continuous_agg_user_view_schema_user_view_name_key UNIQUE (user_view_schema, user_view_name), + CONSTRAINT continuous_agg_mat_hypertable_id_fkey + FOREIGN KEY (mat_hypertable_id) REFERENCES _timescaledb_catalog.hypertable (id) ON DELETE CASCADE, + CONSTRAINT continuous_agg_raw_hypertable_id_fkey + FOREIGN KEY (raw_hypertable_id) REFERENCES _timescaledb_catalog.hypertable (id) ON DELETE CASCADE +); + +INSERT INTO _timescaledb_catalog.continuous_agg +SELECT * FROM _timescaledb_catalog._tmp_continuous_agg; +DROP TABLE _timescaledb_catalog._tmp_continuous_agg; + +CREATE INDEX continuous_agg_raw_hypertable_id_idx ON _timescaledb_catalog.continuous_agg (raw_hypertable_id); + +SELECT pg_catalog.pg_extension_config_dump('_timescaledb_catalog.continuous_agg', ''); + +GRANT SELECT ON TABLE _timescaledb_catalog.continuous_agg TO PUBLIC; + +ALTER TABLE _timescaledb_catalog.continuous_aggs_materialization_invalidation_log + ADD CONSTRAINT continuous_aggs_materialization_invalid_materialization_id_fkey + FOREIGN KEY (materialization_id) + REFERENCES _timescaledb_catalog.continuous_agg(mat_hypertable_id) ON DELETE CASCADE; + +ALTER TABLE _timescaledb_catalog.continuous_agg_migrate_plan + ADD CONSTRAINT continuous_agg_migrate_plan_mat_hypertable_id_fkey + FOREIGN KEY (mat_hypertable_id) + REFERENCES _timescaledb_catalog.continuous_agg (mat_hypertable_id); + +ANALYZE _timescaledb_catalog.continuous_agg; diff --git a/src/ts_catalog/catalog.c b/src/ts_catalog/catalog.c index 6b57c03a7..1e009012c 100644 --- a/src/ts_catalog/catalog.c +++ b/src/ts_catalog/catalog.c @@ -81,8 +81,8 @@ static const TableInfoDef catalog_table_names[_MAX_CATALOG_TABLES + 1] = { .table_name = BGW_POLICY_CHUNK_STATS_TABLE_NAME, }, [CONTINUOUS_AGG] = { - .schema_name = CATALOG_SCHEMA_NAME, - .table_name = CONTINUOUS_AGG_TABLE_NAME, + .schema_name = CATALOG_SCHEMA_NAME, + .table_name = CONTINUOUS_AGG_TABLE_NAME, }, [CONTINUOUS_AGGS_HYPERTABLE_INVALIDATION_LOG] = { .schema_name = CATALOG_SCHEMA_NAME, diff --git a/src/ts_catalog/catalog.h b/src/ts_catalog/catalog.h index ad1d3c146..2c4796fef 100644 --- a/src/ts_catalog/catalog.h +++ b/src/ts_catalog/catalog.h @@ -938,6 +938,7 @@ typedef enum Anum_continuous_agg { Anum_continuous_agg_mat_hypertable_id = 1, Anum_continuous_agg_raw_hypertable_id, + Anum_continuous_agg_parent_mat_hypertable_id, Anum_continuous_agg_user_view_schema, Anum_continuous_agg_user_view_name, Anum_continuous_agg_partial_view_schema, @@ -956,6 +957,7 @@ typedef struct FormData_continuous_agg { int32 mat_hypertable_id; int32 raw_hypertable_id; + int32 parent_mat_hypertable_id; /* Nested Continuous Aggregate */ NameData user_view_schema; NameData user_view_name; NameData partial_view_schema; diff --git a/src/ts_catalog/continuous_agg.c b/src/ts_catalog/continuous_agg.c index 34ef9f95c..28b6ab2d0 100644 --- a/src/ts_catalog/continuous_agg.c +++ b/src/ts_catalog/continuous_agg.c @@ -288,6 +288,14 @@ continuous_agg_formdata_make_tuple(const FormData_continuous_agg *fd, TupleDesc values[AttrNumberGetAttrOffset(Anum_continuous_agg_raw_hypertable_id)] = Int32GetDatum(fd->raw_hypertable_id); + if (fd->parent_mat_hypertable_id == INVALID_HYPERTABLE_ID) + nulls[AttrNumberGetAttrOffset(Anum_continuous_agg_parent_mat_hypertable_id)] = true; + else + { + values[AttrNumberGetAttrOffset(Anum_continuous_agg_parent_mat_hypertable_id)] = + Int32GetDatum(fd->parent_mat_hypertable_id); + } + values[AttrNumberGetAttrOffset(Anum_continuous_agg_user_view_schema)] = NameGetDatum(&fd->user_view_schema); values[AttrNumberGetAttrOffset(Anum_continuous_agg_user_view_name)] = @@ -329,6 +337,12 @@ continuous_agg_formdata_fill(FormData_continuous_agg *fd, const TupleInfo *ti) fd->raw_hypertable_id = DatumGetInt32(values[AttrNumberGetAttrOffset(Anum_continuous_agg_raw_hypertable_id)]); + if (nulls[AttrNumberGetAttrOffset(Anum_continuous_agg_parent_mat_hypertable_id)]) + fd->parent_mat_hypertable_id = INVALID_HYPERTABLE_ID; + else + fd->parent_mat_hypertable_id = DatumGetInt32( + values[AttrNumberGetAttrOffset(Anum_continuous_agg_parent_mat_hypertable_id)]); + memcpy(&fd->user_view_schema, DatumGetName(values[AttrNumberGetAttrOffset(Anum_continuous_agg_user_view_schema)]), NAMEDATALEN); diff --git a/tsl/src/continuous_aggs/create.c b/tsl/src/continuous_aggs/create.c index 6c24c2115..1149a0d04 100644 --- a/tsl/src/continuous_aggs/create.c +++ b/tsl/src/continuous_aggs/create.c @@ -151,10 +151,11 @@ typedef struct FinalizeQueryInfo typedef struct CAggTimebucketInfo { - int32 htid; /* hypertable id */ - Oid htoid; /* hypertable oid */ - AttrNumber htpartcolno; /* primary partitioning column of raw hypertable */ - /* This should also be the column used by time_bucket */ + int32 htid; /* hypertable id */ + int32 parent_mat_hypertable_id; /* parent materialization hypertable id */ + Oid htoid; /* hypertable oid */ + AttrNumber htpartcolno; /* primary partitioning column of raw hypertable */ + /* This should also be the column used by time_bucket */ Oid htpartcoltype; int64 htpartcol_interval_len; /* interval length setting for primary partitioning column */ int64 bucket_width; /* bucket_width of time_bucket, stores BUCKET_WIDHT_VARIABLE for @@ -206,7 +207,8 @@ static Query *mattablecolumninfo_get_partial_select_query(MatTableColumnInfo *ma static void caggtimebucketinfo_init(CAggTimebucketInfo *src, int32 hypertable_id, Oid hypertable_oid, AttrNumber hypertable_partition_colno, Oid hypertable_partition_coltype, - int64 hypertable_partition_col_interval); + int64 hypertable_partition_col_interval, + int32 parent_mat_hypertable_id); static void caggtimebucket_validate(CAggTimebucketInfo *tbinfo, List *groupClause, List *targetList); static void finalizequery_init(FinalizeQueryInfo *inp, Query *orig_query, @@ -227,7 +229,8 @@ static void create_cagg_catalog_entry(int32 matht_id, int32 rawht_id, const char *user_schema, const char *user_view, const char *partial_schema, const char *partial_view, int64 bucket_width, bool materialized_only, - const char *direct_schema, const char *direct_view, const bool finalized) + const char *direct_schema, const char *direct_view, const bool finalized, + const int32 parent_mat_hypertable_id) { Catalog *catalog = ts_catalog_get(); Relation rel; @@ -249,6 +252,15 @@ create_cagg_catalog_entry(int32 matht_id, int32 rawht_id, const char *user_schem memset(values, 0, sizeof(values)); values[AttrNumberGetAttrOffset(Anum_continuous_agg_mat_hypertable_id)] = matht_id; values[AttrNumberGetAttrOffset(Anum_continuous_agg_raw_hypertable_id)] = rawht_id; + + if (parent_mat_hypertable_id == INVALID_HYPERTABLE_ID) + nulls[AttrNumberGetAttrOffset(Anum_continuous_agg_parent_mat_hypertable_id)] = true; + else + { + values[AttrNumberGetAttrOffset(Anum_continuous_agg_parent_mat_hypertable_id)] = + parent_mat_hypertable_id; + } + values[AttrNumberGetAttrOffset(Anum_continuous_agg_user_view_schema)] = NameGetDatum(&user_schnm); values[AttrNumberGetAttrOffset(Anum_continuous_agg_user_view_name)] = @@ -678,9 +690,10 @@ create_view_for_query(Query *selquery, RangeVar *viewrel) static void caggtimebucketinfo_init(CAggTimebucketInfo *src, int32 hypertable_id, Oid hypertable_oid, AttrNumber hypertable_partition_colno, Oid hypertable_partition_coltype, - int64 hypertable_partition_col_interval) + int64 hypertable_partition_col_interval, int32 parent_mat_hypertable_id) { src->htid = hypertable_id; + src->parent_mat_hypertable_id = parent_mat_hypertable_id; src->htoid = hypertable_oid; src->htpartcolno = hypertable_partition_colno; src->htpartcoltype = hypertable_partition_coltype; @@ -957,7 +970,7 @@ cagg_agg_validate(Node *node, void *context) * added. */ static bool -cagg_query_supported(Query *query, StringInfo hint, StringInfo detail, const bool finalized) +cagg_query_supported(const Query *query, StringInfo hint, StringInfo detail, const bool finalized) { /* * For now deprecate partial aggregates on release builds only. @@ -1079,7 +1092,7 @@ cagg_query_supported(Query *query, StringInfo hint, StringInfo detail, const boo } static CAggTimebucketInfo -cagg_validate_query(Query *query, bool finalized) +cagg_validate_query(const Query *query, const bool finalized) { CAggTimebucketInfo ret; Cache *hcache; @@ -1117,38 +1130,71 @@ cagg_validate_query(Query *query, bool finalized) /* check if we have a hypertable in the FROM clause */ rtref = linitial_node(RangeTblRef, query->jointree->fromlist); rte = list_nth(query->rtable, rtref->rtindex - 1); + /* FROM only sets rte->inh to false */ - if (rte->relkind != RELKIND_RELATION || rte->tablesample || rte->inh == false) + if ((rte->relkind != RELKIND_RELATION && rte->relkind != RELKIND_VIEW) || rte->tablesample || + rte->inh == false) { ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("invalid continuous aggregate view"))); } - if (rte->relkind == RELKIND_RELATION) + + if (rte->relkind == RELKIND_RELATION || rte->relkind == RELKIND_VIEW) { const Dimension *part_dimension = NULL; + int32 parent_mat_hypertable_id = INVALID_HYPERTABLE_ID; - ht = ts_hypertable_cache_get_cache_and_entry(rte->relid, CACHE_FLAG_NONE, &hcache); + if (rte->relkind == RELKIND_RELATION) + ht = ts_hypertable_cache_get_cache_and_entry(rte->relid, CACHE_FLAG_NONE, &hcache); + else + { + const ContinuousAgg *cagg; + + cagg = ts_continuous_agg_find_by_relid(rte->relid); + + if (!ContinuousAggIsFinalized(cagg)) + { + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("old format of continuous aggregate is not supported"), + errhint("Run \"CALL cagg_migrate('%s.%s');\" to migrate to the new " + "format.", + NameStr(cagg->data.user_view_schema), + NameStr(cagg->data.user_view_name)))); + } + + parent_mat_hypertable_id = cagg->data.mat_hypertable_id; + hcache = ts_hypertable_cache_pin(); + ht = ts_hypertable_cache_get_entry_by_id(hcache, cagg->data.mat_hypertable_id); + } if (TS_HYPERTABLE_IS_INTERNAL_COMPRESSION_TABLE(ht)) ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("hypertable is an internal compressed hypertable"))); - /* there can only be one continuous aggregate per table */ - switch (ts_continuous_agg_hypertable_status(ht->fd.id)) + if (rte->relkind == RELKIND_RELATION) { - case HypertableIsMaterialization: - case HypertableIsMaterializationAndRaw: + ContinuousAggHypertableStatus status = ts_continuous_agg_hypertable_status(ht->fd.id); + + /* prevent create a CAGG over an existing materialization hypertable */ + if (status == HypertableIsMaterialization || + status == HypertableIsMaterializationAndRaw) + { + const ContinuousAgg *cagg = ts_continuous_agg_find_by_mat_hypertable_id(ht->fd.id); + Assert(cagg != NULL); + ereport(ERROR, (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), - errmsg("hypertable is a continuous aggregate materialization table"))); - case HypertableIsRawTable: - break; - case HypertableIsNotContinuousAgg: - break; - default: - Assert(false); + errmsg("hypertable is a continuous aggregate materialization table"), + errdetail("Materialization hypertable \"%s.%s\".", + NameStr(ht->fd.schema_name), + NameStr(ht->fd.table_name)), + errhint("Do you want to use continuous aggregate \"%s.%s\" instead?", + NameStr(cagg->data.user_view_schema), + NameStr(cagg->data.user_view_name)))); + } } /* get primary partitioning column information */ @@ -1164,7 +1210,8 @@ cagg_validate_query(Query *query, bool finalized) errmsg("custom partitioning functions not supported" " with continuous aggregates"))); - if (IS_INTEGER_TYPE(ts_dimension_get_partition_type(part_dimension))) + if (IS_INTEGER_TYPE(ts_dimension_get_partition_type(part_dimension)) && + rte->relkind == RELKIND_RELATION) { const char *funcschema = NameStr(part_dimension->fd.integer_now_func_schema); const char *funcname = NameStr(part_dimension->fd.integer_now_func); @@ -1184,7 +1231,8 @@ cagg_validate_query(Query *query, bool finalized) ht->main_table_relid, part_dimension->column_attno, part_dimension->fd.column_type, - part_dimension->fd.interval_length); + part_dimension->fd.interval_length, + parent_mat_hypertable_id); ts_cache_release(hcache); } @@ -2268,7 +2316,8 @@ cagg_create(const CreateTableAsStmt *create_stmt, ViewStmt *stmt, Query *panquer materialized_only, dum_rel->schemaname, dum_rel->relname, - finalized); + finalized, + origquery_ht->parent_mat_hypertable_id); if (origquery_ht->bucket_width == BUCKET_WIDTH_VARIABLE) { diff --git a/tsl/test/expected/cagg_bgw.out b/tsl/test/expected/cagg_bgw.out index 986277d7c..0d566b827 100644 --- a/tsl/test/expected/cagg_bgw.out +++ b/tsl/test/expected/cagg_bgw.out @@ -68,8 +68,8 @@ SELECT * FROM timescaledb_information.job_stats; (0 rows) SELECT * FROM _timescaledb_catalog.continuous_agg; - mat_hypertable_id | raw_hypertable_id | user_view_schema | user_view_name | partial_view_schema | partial_view_name | bucket_width | direct_view_schema | direct_view_name | materialized_only | finalized --------------------+-------------------+------------------+----------------+---------------------+-------------------+--------------+--------------------+------------------+-------------------+----------- + mat_hypertable_id | raw_hypertable_id | parent_mat_hypertable_id | user_view_schema | user_view_name | partial_view_schema | partial_view_name | bucket_width | direct_view_schema | direct_view_name | materialized_only | finalized +-------------------+-------------------+--------------------------+------------------+----------------+---------------------+-------------------+--------------+--------------------+------------------+-------------------+----------- (0 rows) -- though user on access node has required GRANTS, this will propagate GRANTS to the connected data nodes diff --git a/tsl/test/expected/cagg_bgw_dist_ht.out b/tsl/test/expected/cagg_bgw_dist_ht.out index 986ccf403..87aff2438 100644 --- a/tsl/test/expected/cagg_bgw_dist_ht.out +++ b/tsl/test/expected/cagg_bgw_dist_ht.out @@ -105,8 +105,8 @@ SELECT * FROM timescaledb_information.job_stats; (0 rows) SELECT * FROM _timescaledb_catalog.continuous_agg; - mat_hypertable_id | raw_hypertable_id | user_view_schema | user_view_name | partial_view_schema | partial_view_name | bucket_width | direct_view_schema | direct_view_name | materialized_only | finalized --------------------+-------------------+------------------+----------------+---------------------+-------------------+--------------+--------------------+------------------+-------------------+----------- + mat_hypertable_id | raw_hypertable_id | parent_mat_hypertable_id | user_view_schema | user_view_name | partial_view_schema | partial_view_name | bucket_width | direct_view_schema | direct_view_name | materialized_only | finalized +-------------------+-------------------+--------------------------+------------------+----------------+---------------------+-------------------+--------------+--------------------+------------------+-------------------+----------- (0 rows) -- though user on access node has required GRANTS, this will propagate GRANTS to the connected data nodes diff --git a/tsl/test/expected/cagg_ddl.out b/tsl/test/expected/cagg_ddl.out index 10ada1a8d..7cbbaf64b 100644 --- a/tsl/test/expected/cagg_ddl.out +++ b/tsl/test/expected/cagg_ddl.out @@ -474,16 +474,6 @@ AS SELECT time_bucket('6', time_bucket), COUNT("count") FROM new_name GROUP BY 1 WITH NO DATA; psql:include/cagg_ddl_common.sql:326: ERROR: hypertable is a continuous aggregate materialization table --- cannot create a continuous aggregate on a continuous aggregate view -CREATE MATERIALIZED VIEW drop_chunks_view_view - WITH ( - timescaledb.continuous, - timescaledb.materialized_only=true - ) -AS SELECT time_bucket('6', time_bucket), SUM(count) - FROM drop_chunks_view - GROUP BY 1 WITH NO DATA; -psql:include/cagg_ddl_common.sql:336: ERROR: invalid continuous aggregate view \set ON_ERROR_STOP 1 CREATE TABLE metrics(time timestamptz NOT NULL, device_id int, v1 float, v2 float); \if :IS_DISTRIBUTED @@ -525,10 +515,10 @@ SELECT * FROM cagg_expr ORDER BY time LIMIT 5; --test materialization of invalidation before drop DROP TABLE IF EXISTS drop_chunks_table CASCADE; -psql:include/cagg_ddl_common.sql:368: NOTICE: table "drop_chunks_table" does not exist, skipping +psql:include/cagg_ddl_common.sql:358: NOTICE: table "drop_chunks_table" does not exist, skipping DROP TABLE IF EXISTS drop_chunks_table_u CASCADE; -psql:include/cagg_ddl_common.sql:369: NOTICE: drop cascades to 2 other objects -psql:include/cagg_ddl_common.sql:369: NOTICE: drop cascades to table _timescaledb_internal._hyper_7_9_chunk +psql:include/cagg_ddl_common.sql:359: NOTICE: drop cascades to 2 other objects +psql:include/cagg_ddl_common.sql:359: NOTICE: drop cascades to table _timescaledb_internal._hyper_7_9_chunk CREATE TABLE drop_chunks_table(time BIGINT NOT NULL, data INTEGER); \if :IS_DISTRIBUTED SELECT hypertable_id AS drop_chunks_table_nid @@ -724,7 +714,7 @@ SELECT format('%I.%I', schema_name, table_name) AS drop_chunks_mat_tablen, SELECT drop_chunks('drop_chunks_view', newer_than => -20, verbose => true); -psql:include/cagg_ddl_common.sql:464: INFO: dropping chunk _timescaledb_internal._hyper_11_17_chunk +psql:include/cagg_ddl_common.sql:454: INFO: dropping chunk _timescaledb_internal._hyper_11_17_chunk drop_chunks ------------------------------------------ _timescaledb_internal._hyper_11_17_chunk @@ -745,7 +735,7 @@ WHERE hypertable_name = :'drop_chunks_mat_table_name' ORDER BY range_start_integ \set ON_ERROR_STOP 0 \set VERBOSITY default SELECT drop_chunks(:'drop_chunks_mat_tablen', older_than => 60); -psql:include/cagg_ddl_common.sql:476: ERROR: operation not supported on materialized hypertable +psql:include/cagg_ddl_common.sql:466: ERROR: operation not supported on materialized hypertable DETAIL: Hypertable "_materialized_hypertable_11" is a materialized hypertable. HINT: Try the operation on the continuous aggregate instead. \set VERBOSITY terse @@ -998,9 +988,9 @@ SELECT user_view, (2 rows) DROP MATERIALIZED VIEW whatever_view_1; -psql:include/cagg_ddl_common.sql:654: NOTICE: drop cascades to table _timescaledb_internal._hyper_13_24_chunk +psql:include/cagg_ddl_common.sql:644: NOTICE: drop cascades to table _timescaledb_internal._hyper_13_24_chunk DROP MATERIALIZED VIEW whatever_view_2; -psql:include/cagg_ddl_common.sql:655: NOTICE: drop cascades to table _timescaledb_internal._hyper_14_25_chunk +psql:include/cagg_ddl_common.sql:645: NOTICE: drop cascades to table _timescaledb_internal._hyper_14_25_chunk -- test bucket width expressions on integer hypertables CREATE TABLE metrics_int2 ( time int2 NOT NULL, @@ -1111,39 +1101,39 @@ CREATE MATERIALIZED VIEW width_expr WITH (timescaledb.continuous) AS SELECT time_bucket(1::smallint, time) FROM metrics_int2 GROUP BY 1; -psql:include/cagg_ddl_common.sql:760: NOTICE: continuous aggregate "width_expr" is already up-to-date +psql:include/cagg_ddl_common.sql:750: NOTICE: continuous aggregate "width_expr" is already up-to-date DROP MATERIALIZED VIEW width_expr; CREATE MATERIALIZED VIEW width_expr WITH (timescaledb.continuous) AS SELECT time_bucket(1::smallint + 2::smallint, time) FROM metrics_int2 GROUP BY 1; -psql:include/cagg_ddl_common.sql:767: NOTICE: continuous aggregate "width_expr" is already up-to-date +psql:include/cagg_ddl_common.sql:757: NOTICE: continuous aggregate "width_expr" is already up-to-date DROP MATERIALIZED VIEW width_expr; -- width expression for int4 hypertables CREATE MATERIALIZED VIEW width_expr WITH (timescaledb.continuous) AS SELECT time_bucket(1, time) FROM metrics_int4 GROUP BY 1; -psql:include/cagg_ddl_common.sql:775: NOTICE: continuous aggregate "width_expr" is already up-to-date +psql:include/cagg_ddl_common.sql:765: NOTICE: continuous aggregate "width_expr" is already up-to-date DROP MATERIALIZED VIEW width_expr; CREATE MATERIALIZED VIEW width_expr WITH (timescaledb.continuous) AS SELECT time_bucket(1 + 2, time) FROM metrics_int4 GROUP BY 1; -psql:include/cagg_ddl_common.sql:782: NOTICE: continuous aggregate "width_expr" is already up-to-date +psql:include/cagg_ddl_common.sql:772: NOTICE: continuous aggregate "width_expr" is already up-to-date DROP MATERIALIZED VIEW width_expr; -- width expression for int8 hypertables CREATE MATERIALIZED VIEW width_expr WITH (timescaledb.continuous) AS SELECT time_bucket(1, time) FROM metrics_int8 GROUP BY 1; -psql:include/cagg_ddl_common.sql:790: NOTICE: continuous aggregate "width_expr" is already up-to-date +psql:include/cagg_ddl_common.sql:780: NOTICE: continuous aggregate "width_expr" is already up-to-date DROP MATERIALIZED VIEW width_expr; CREATE MATERIALIZED VIEW width_expr WITH (timescaledb.continuous) AS SELECT time_bucket(1 + 2, time) FROM metrics_int8 GROUP BY 1; -psql:include/cagg_ddl_common.sql:797: NOTICE: continuous aggregate "width_expr" is already up-to-date +psql:include/cagg_ddl_common.sql:787: NOTICE: continuous aggregate "width_expr" is already up-to-date DROP MATERIALIZED VIEW width_expr; \set ON_ERROR_STOP 0 -- non-immutable expresions should be rejected @@ -1151,17 +1141,17 @@ CREATE MATERIALIZED VIEW width_expr WITH (timescaledb.continuous) AS SELECT time_bucket(extract(year FROM now())::smallint, time) FROM metrics_int2 GROUP BY 1; -psql:include/cagg_ddl_common.sql:806: ERROR: only immutable expressions allowed in time bucket function +psql:include/cagg_ddl_common.sql:796: ERROR: only immutable expressions allowed in time bucket function CREATE MATERIALIZED VIEW width_expr WITH (timescaledb.continuous) AS SELECT time_bucket(extract(year FROM now())::int, time) FROM metrics_int4 GROUP BY 1; -psql:include/cagg_ddl_common.sql:811: ERROR: only immutable expressions allowed in time bucket function +psql:include/cagg_ddl_common.sql:801: ERROR: only immutable expressions allowed in time bucket function CREATE MATERIALIZED VIEW width_expr WITH (timescaledb.continuous) AS SELECT time_bucket(extract(year FROM now())::int, time) FROM metrics_int8 GROUP BY 1; -psql:include/cagg_ddl_common.sql:816: ERROR: only immutable expressions allowed in time bucket function +psql:include/cagg_ddl_common.sql:806: ERROR: only immutable expressions allowed in time bucket function \set ON_ERROR_STOP 1 -- Test various ALTER MATERIALIZED VIEW statements. SET ROLE :ROLE_DEFAULT_PERM_USER; @@ -1188,7 +1178,7 @@ tablespace | -- we test that the normal checks are done when changing the owner. \set ON_ERROR_STOP 0 ALTER MATERIALIZED VIEW owner_check OWNER TO :ROLE_1; -psql:include/cagg_ddl_common.sql:836: ERROR: must be member of role "test_role_1" +psql:include/cagg_ddl_common.sql:826: ERROR: must be member of role "test_role_1" \set ON_ERROR_STOP 1 -- Superuser can always change owner SET ROLE :ROLE_CLUSTER_SUPERUSER; @@ -1252,9 +1242,9 @@ AS SELECT time_bucket(7, time_int) as bucket, SUM(value), COUNT(value) FROM conditionsnm GROUP BY bucket WITH DATA; -psql:include/cagg_ddl_common.sql:884: NOTICE: refreshing continuous aggregate "conditionsnm_4" +psql:include/cagg_ddl_common.sql:874: NOTICE: refreshing continuous aggregate "conditionsnm_4" DROP materialized view conditionsnm_4; -psql:include/cagg_ddl_common.sql:886: NOTICE: drop cascades to table _timescaledb_internal._hyper_26_37_chunk +psql:include/cagg_ddl_common.sql:876: NOTICE: drop cascades to table _timescaledb_internal._hyper_26_37_chunk -- Case 2: DROP CASCADE should have similar behaviour as DROP CREATE MATERIALIZED VIEW conditionsnm_4 WITH (timescaledb.continuous, timescaledb.materialized_only = TRUE) @@ -1262,9 +1252,9 @@ AS SELECT time_bucket(7, time_int) as bucket, SUM(value), COUNT(value) FROM conditionsnm GROUP BY bucket WITH DATA; -psql:include/cagg_ddl_common.sql:894: NOTICE: refreshing continuous aggregate "conditionsnm_4" +psql:include/cagg_ddl_common.sql:884: NOTICE: refreshing continuous aggregate "conditionsnm_4" DROP materialized view conditionsnm_4 CASCADE; -psql:include/cagg_ddl_common.sql:896: NOTICE: drop cascades to table _timescaledb_internal._hyper_27_38_chunk +psql:include/cagg_ddl_common.sql:886: NOTICE: drop cascades to table _timescaledb_internal._hyper_27_38_chunk -- Case 3: require CASCADE in case of dependent object CREATE MATERIALIZED VIEW conditionsnm_4 WITH (timescaledb.continuous, timescaledb.materialized_only = TRUE) @@ -1272,16 +1262,16 @@ AS SELECT time_bucket(7, time_int) as bucket, SUM(value), COUNT(value) FROM conditionsnm GROUP BY bucket WITH DATA; -psql:include/cagg_ddl_common.sql:904: NOTICE: refreshing continuous aggregate "conditionsnm_4" +psql:include/cagg_ddl_common.sql:894: NOTICE: refreshing continuous aggregate "conditionsnm_4" CREATE VIEW see_cagg as select * from conditionsnm_4; \set ON_ERROR_STOP 0 DROP MATERIALIZED VIEW conditionsnm_4; -psql:include/cagg_ddl_common.sql:908: ERROR: cannot drop view conditionsnm_4 because other objects depend on it +psql:include/cagg_ddl_common.sql:898: ERROR: cannot drop view conditionsnm_4 because other objects depend on it \set ON_ERROR_STOP 1 -- Case 4: DROP CASCADE with dependency DROP MATERIALIZED VIEW conditionsnm_4 CASCADE; -psql:include/cagg_ddl_common.sql:912: NOTICE: drop cascades to view see_cagg -psql:include/cagg_ddl_common.sql:912: NOTICE: drop cascades to table _timescaledb_internal._hyper_28_39_chunk +psql:include/cagg_ddl_common.sql:902: NOTICE: drop cascades to view see_cagg +psql:include/cagg_ddl_common.sql:902: NOTICE: drop cascades to table _timescaledb_internal._hyper_28_39_chunk -- Test DROP SCHEMA CASCADE with continuous aggregates -- -- Issue: #2350 @@ -1324,7 +1314,7 @@ WHERE user_view_name = 'telemetry_1s'; \gset DROP SCHEMA test_schema CASCADE; -psql:include/cagg_ddl_common.sql:951: NOTICE: drop cascades to 4 other objects +psql:include/cagg_ddl_common.sql:941: NOTICE: drop cascades to 4 other objects SELECT count(*) FROM pg_class WHERE relname = :'MAT_TABLE_NAME'; count ------- @@ -1408,7 +1398,7 @@ WHERE user_view_name = 'cagg2'; \gset DROP SCHEMA test_schema CASCADE; -psql:include/cagg_ddl_common.sql:1008: NOTICE: drop cascades to 7 other objects +psql:include/cagg_ddl_common.sql:998: NOTICE: drop cascades to 7 other objects SELECT count(*) FROM pg_class WHERE relname = :'MAT_TABLE_NAME1'; count ------- @@ -1578,10 +1568,10 @@ CALL refresh_continuous_aggregate('conditions_daily', NULL, NULL); \set ON_ERROR_STOP 0 -- unique indexes are not supported CREATE UNIQUE INDEX index_unique_error ON conditions_daily ("time", location); -psql:include/cagg_ddl_common.sql:1094: ERROR: continuous aggregates do not support UNIQUE indexes +psql:include/cagg_ddl_common.sql:1084: ERROR: continuous aggregates do not support UNIQUE indexes -- concurrently index creation not supported CREATE INDEX CONCURRENTLY index_concurrently_avg ON conditions_daily (avg); -psql:include/cagg_ddl_common.sql:1096: ERROR: hypertables do not support concurrent index creation +psql:include/cagg_ddl_common.sql:1086: ERROR: hypertables do not support concurrent index creation \set ON_ERROR_STOP 1 CREATE INDEX index_avg ON conditions_daily (avg); CREATE INDEX index_avg_only ON ONLY conditions_daily (avg); @@ -1618,14 +1608,14 @@ CREATE MATERIALIZED VIEW i3696_cagg1 WITH (timescaledb.continuous) AS SELECT search_query,count(search_query) as count, sum(cnt), time_bucket(INTERVAL '1 minute', time) AS bucket FROM i3696 GROUP BY cnt +cnt2 , bucket, search_query; -psql:include/cagg_ddl_common.sql:1118: NOTICE: continuous aggregate "i3696_cagg1" is already up-to-date +psql:include/cagg_ddl_common.sql:1108: NOTICE: continuous aggregate "i3696_cagg1" is already up-to-date ALTER MATERIALIZED VIEW i3696_cagg1 SET (timescaledb.materialized_only = 'true'); CREATE MATERIALIZED VIEW i3696_cagg2 WITH (timescaledb.continuous) AS SELECT search_query,count(search_query) as count, sum(cnt), time_bucket(INTERVAL '1 minute', time) AS bucket FROM i3696 GROUP BY cnt + cnt2, bucket, search_query HAVING cnt + cnt2 + sum(cnt) > 2 or count(cnt2) > 10; -psql:include/cagg_ddl_common.sql:1126: NOTICE: continuous aggregate "i3696_cagg2" is already up-to-date +psql:include/cagg_ddl_common.sql:1116: NOTICE: continuous aggregate "i3696_cagg2" is already up-to-date ALTER MATERIALIZED VIEW i3696_cagg2 SET (timescaledb.materialized_only = 'true'); --TEST test with multiple settings on continuous aggregates -- -- test for materialized_only + compress combinations (real time aggs enabled initially) @@ -1642,7 +1632,7 @@ SELECT create_hypertable('test_setting', 'time'); \endif CREATE MATERIALIZED VIEW test_setting_cagg with (timescaledb.continuous) AS SELECT time_bucket('1h',time), avg(val), count(*) FROM test_setting GROUP BY 1; -psql:include/cagg_ddl_common.sql:1140: NOTICE: continuous aggregate "test_setting_cagg" is already up-to-date +psql:include/cagg_ddl_common.sql:1130: NOTICE: continuous aggregate "test_setting_cagg" is already up-to-date INSERT INTO test_setting SELECT generate_series( '2020-01-10 8:00'::timestamp, '2020-01-30 10:00+00'::timestamptz, '1 day'::interval), 10.0; CALL refresh_continuous_aggregate('test_setting_cagg', NULL, '2020-05-30 10:00+00'::timestamptz); @@ -1724,10 +1714,10 @@ DELETE FROM test_setting WHERE val = 20; --TEST test with multiple settings on continuous aggregates with real time aggregates turned off initially -- -- test for materialized_only + compress combinations (real time aggs enabled initially) DROP MATERIALIZED VIEW test_setting_cagg; -psql:include/cagg_ddl_common.sql:1184: NOTICE: drop cascades to table _timescaledb_internal._hyper_40_47_chunk +psql:include/cagg_ddl_common.sql:1174: NOTICE: drop cascades to table _timescaledb_internal._hyper_40_47_chunk CREATE MATERIALIZED VIEW test_setting_cagg with (timescaledb.continuous, timescaledb.materialized_only = true) AS SELECT time_bucket('1h',time), avg(val), count(*) FROM test_setting GROUP BY 1; -psql:include/cagg_ddl_common.sql:1187: NOTICE: refreshing continuous aggregate "test_setting_cagg" +psql:include/cagg_ddl_common.sql:1177: NOTICE: refreshing continuous aggregate "test_setting_cagg" CALL refresh_continuous_aggregate('test_setting_cagg', NULL, '2020-05-30 10:00+00'::timestamptz); SELECT count(*) from test_setting_cagg ORDER BY 1; count @@ -1855,7 +1845,7 @@ SELECT time_bucket ('1 day', time) AS bucket, amount + sum(fiat_value) FROM transactions GROUP BY bucket, amount; -psql:include/cagg_ddl_common.sql:1277: NOTICE: refreshing continuous aggregate "cashflows" +psql:include/cagg_ddl_common.sql:1267: NOTICE: refreshing continuous aggregate "cashflows" SELECT h.table_name AS "MAT_TABLE_NAME", partial_view_name AS "PART_VIEW_NAME", direct_view_name AS "DIRECT_VIEW_NAME" diff --git a/tsl/test/expected/cagg_ddl_dist_ht.out b/tsl/test/expected/cagg_ddl_dist_ht.out index 6b4df4b8e..4496936ad 100644 --- a/tsl/test/expected/cagg_ddl_dist_ht.out +++ b/tsl/test/expected/cagg_ddl_dist_ht.out @@ -511,16 +511,6 @@ AS SELECT time_bucket('6', time_bucket), COUNT("count") FROM new_name GROUP BY 1 WITH NO DATA; psql:include/cagg_ddl_common.sql:326: ERROR: hypertable is a continuous aggregate materialization table --- cannot create a continuous aggregate on a continuous aggregate view -CREATE MATERIALIZED VIEW drop_chunks_view_view - WITH ( - timescaledb.continuous, - timescaledb.materialized_only=true - ) -AS SELECT time_bucket('6', time_bucket), SUM(count) - FROM drop_chunks_view - GROUP BY 1 WITH NO DATA; -psql:include/cagg_ddl_common.sql:336: ERROR: invalid continuous aggregate view \set ON_ERROR_STOP 1 CREATE TABLE metrics(time timestamptz NOT NULL, device_id int, v1 float, v2 float); \if :IS_DISTRIBUTED @@ -562,10 +552,10 @@ SELECT * FROM cagg_expr ORDER BY time LIMIT 5; --test materialization of invalidation before drop DROP TABLE IF EXISTS drop_chunks_table CASCADE; -psql:include/cagg_ddl_common.sql:368: NOTICE: table "drop_chunks_table" does not exist, skipping +psql:include/cagg_ddl_common.sql:358: NOTICE: table "drop_chunks_table" does not exist, skipping DROP TABLE IF EXISTS drop_chunks_table_u CASCADE; -psql:include/cagg_ddl_common.sql:369: NOTICE: drop cascades to 2 other objects -psql:include/cagg_ddl_common.sql:369: NOTICE: drop cascades to table _timescaledb_internal._hyper_7_9_chunk +psql:include/cagg_ddl_common.sql:359: NOTICE: drop cascades to 2 other objects +psql:include/cagg_ddl_common.sql:359: NOTICE: drop cascades to table _timescaledb_internal._hyper_7_9_chunk CREATE TABLE drop_chunks_table(time BIGINT NOT NULL, data INTEGER); \if :IS_DISTRIBUTED SELECT hypertable_id AS drop_chunks_table_nid @@ -761,7 +751,7 @@ SELECT format('%I.%I', schema_name, table_name) AS drop_chunks_mat_tablen, SELECT drop_chunks('drop_chunks_view', newer_than => -20, verbose => true); -psql:include/cagg_ddl_common.sql:464: INFO: dropping chunk _timescaledb_internal._hyper_11_17_chunk +psql:include/cagg_ddl_common.sql:454: INFO: dropping chunk _timescaledb_internal._hyper_11_17_chunk drop_chunks ------------------------------------------ _timescaledb_internal._hyper_11_17_chunk @@ -782,7 +772,7 @@ WHERE hypertable_name = :'drop_chunks_mat_table_name' ORDER BY range_start_integ \set ON_ERROR_STOP 0 \set VERBOSITY default SELECT drop_chunks(:'drop_chunks_mat_tablen', older_than => 60); -psql:include/cagg_ddl_common.sql:476: ERROR: operation not supported on materialized hypertable +psql:include/cagg_ddl_common.sql:466: ERROR: operation not supported on materialized hypertable DETAIL: Hypertable "_materialized_hypertable_11" is a materialized hypertable. HINT: Try the operation on the continuous aggregate instead. \set VERBOSITY terse @@ -1041,9 +1031,9 @@ SELECT user_view, (2 rows) DROP MATERIALIZED VIEW whatever_view_1; -psql:include/cagg_ddl_common.sql:654: NOTICE: drop cascades to table _timescaledb_internal._hyper_13_24_chunk +psql:include/cagg_ddl_common.sql:644: NOTICE: drop cascades to table _timescaledb_internal._hyper_13_24_chunk DROP MATERIALIZED VIEW whatever_view_2; -psql:include/cagg_ddl_common.sql:655: NOTICE: drop cascades to table _timescaledb_internal._hyper_14_25_chunk +psql:include/cagg_ddl_common.sql:645: NOTICE: drop cascades to table _timescaledb_internal._hyper_14_25_chunk -- test bucket width expressions on integer hypertables CREATE TABLE metrics_int2 ( time int2 NOT NULL, @@ -1154,39 +1144,39 @@ CREATE MATERIALIZED VIEW width_expr WITH (timescaledb.continuous) AS SELECT time_bucket(1::smallint, time) FROM metrics_int2 GROUP BY 1; -psql:include/cagg_ddl_common.sql:760: NOTICE: continuous aggregate "width_expr" is already up-to-date +psql:include/cagg_ddl_common.sql:750: NOTICE: continuous aggregate "width_expr" is already up-to-date DROP MATERIALIZED VIEW width_expr; CREATE MATERIALIZED VIEW width_expr WITH (timescaledb.continuous) AS SELECT time_bucket(1::smallint + 2::smallint, time) FROM metrics_int2 GROUP BY 1; -psql:include/cagg_ddl_common.sql:767: NOTICE: continuous aggregate "width_expr" is already up-to-date +psql:include/cagg_ddl_common.sql:757: NOTICE: continuous aggregate "width_expr" is already up-to-date DROP MATERIALIZED VIEW width_expr; -- width expression for int4 hypertables CREATE MATERIALIZED VIEW width_expr WITH (timescaledb.continuous) AS SELECT time_bucket(1, time) FROM metrics_int4 GROUP BY 1; -psql:include/cagg_ddl_common.sql:775: NOTICE: continuous aggregate "width_expr" is already up-to-date +psql:include/cagg_ddl_common.sql:765: NOTICE: continuous aggregate "width_expr" is already up-to-date DROP MATERIALIZED VIEW width_expr; CREATE MATERIALIZED VIEW width_expr WITH (timescaledb.continuous) AS SELECT time_bucket(1 + 2, time) FROM metrics_int4 GROUP BY 1; -psql:include/cagg_ddl_common.sql:782: NOTICE: continuous aggregate "width_expr" is already up-to-date +psql:include/cagg_ddl_common.sql:772: NOTICE: continuous aggregate "width_expr" is already up-to-date DROP MATERIALIZED VIEW width_expr; -- width expression for int8 hypertables CREATE MATERIALIZED VIEW width_expr WITH (timescaledb.continuous) AS SELECT time_bucket(1, time) FROM metrics_int8 GROUP BY 1; -psql:include/cagg_ddl_common.sql:790: NOTICE: continuous aggregate "width_expr" is already up-to-date +psql:include/cagg_ddl_common.sql:780: NOTICE: continuous aggregate "width_expr" is already up-to-date DROP MATERIALIZED VIEW width_expr; CREATE MATERIALIZED VIEW width_expr WITH (timescaledb.continuous) AS SELECT time_bucket(1 + 2, time) FROM metrics_int8 GROUP BY 1; -psql:include/cagg_ddl_common.sql:797: NOTICE: continuous aggregate "width_expr" is already up-to-date +psql:include/cagg_ddl_common.sql:787: NOTICE: continuous aggregate "width_expr" is already up-to-date DROP MATERIALIZED VIEW width_expr; \set ON_ERROR_STOP 0 -- non-immutable expresions should be rejected @@ -1194,17 +1184,17 @@ CREATE MATERIALIZED VIEW width_expr WITH (timescaledb.continuous) AS SELECT time_bucket(extract(year FROM now())::smallint, time) FROM metrics_int2 GROUP BY 1; -psql:include/cagg_ddl_common.sql:806: ERROR: only immutable expressions allowed in time bucket function +psql:include/cagg_ddl_common.sql:796: ERROR: only immutable expressions allowed in time bucket function CREATE MATERIALIZED VIEW width_expr WITH (timescaledb.continuous) AS SELECT time_bucket(extract(year FROM now())::int, time) FROM metrics_int4 GROUP BY 1; -psql:include/cagg_ddl_common.sql:811: ERROR: only immutable expressions allowed in time bucket function +psql:include/cagg_ddl_common.sql:801: ERROR: only immutable expressions allowed in time bucket function CREATE MATERIALIZED VIEW width_expr WITH (timescaledb.continuous) AS SELECT time_bucket(extract(year FROM now())::int, time) FROM metrics_int8 GROUP BY 1; -psql:include/cagg_ddl_common.sql:816: ERROR: only immutable expressions allowed in time bucket function +psql:include/cagg_ddl_common.sql:806: ERROR: only immutable expressions allowed in time bucket function \set ON_ERROR_STOP 1 -- Test various ALTER MATERIALIZED VIEW statements. SET ROLE :ROLE_DEFAULT_PERM_USER; @@ -1231,7 +1221,7 @@ tablespace | -- we test that the normal checks are done when changing the owner. \set ON_ERROR_STOP 0 ALTER MATERIALIZED VIEW owner_check OWNER TO :ROLE_1; -psql:include/cagg_ddl_common.sql:836: ERROR: must be member of role "test_role_1" +psql:include/cagg_ddl_common.sql:826: ERROR: must be member of role "test_role_1" \set ON_ERROR_STOP 1 -- Superuser can always change owner SET ROLE :ROLE_CLUSTER_SUPERUSER; @@ -1295,9 +1285,9 @@ AS SELECT time_bucket(7, time_int) as bucket, SUM(value), COUNT(value) FROM conditionsnm GROUP BY bucket WITH DATA; -psql:include/cagg_ddl_common.sql:884: NOTICE: refreshing continuous aggregate "conditionsnm_4" +psql:include/cagg_ddl_common.sql:874: NOTICE: refreshing continuous aggregate "conditionsnm_4" DROP materialized view conditionsnm_4; -psql:include/cagg_ddl_common.sql:886: NOTICE: drop cascades to table _timescaledb_internal._hyper_26_37_chunk +psql:include/cagg_ddl_common.sql:876: NOTICE: drop cascades to table _timescaledb_internal._hyper_26_37_chunk -- Case 2: DROP CASCADE should have similar behaviour as DROP CREATE MATERIALIZED VIEW conditionsnm_4 WITH (timescaledb.continuous, timescaledb.materialized_only = TRUE) @@ -1305,9 +1295,9 @@ AS SELECT time_bucket(7, time_int) as bucket, SUM(value), COUNT(value) FROM conditionsnm GROUP BY bucket WITH DATA; -psql:include/cagg_ddl_common.sql:894: NOTICE: refreshing continuous aggregate "conditionsnm_4" +psql:include/cagg_ddl_common.sql:884: NOTICE: refreshing continuous aggregate "conditionsnm_4" DROP materialized view conditionsnm_4 CASCADE; -psql:include/cagg_ddl_common.sql:896: NOTICE: drop cascades to table _timescaledb_internal._hyper_27_38_chunk +psql:include/cagg_ddl_common.sql:886: NOTICE: drop cascades to table _timescaledb_internal._hyper_27_38_chunk -- Case 3: require CASCADE in case of dependent object CREATE MATERIALIZED VIEW conditionsnm_4 WITH (timescaledb.continuous, timescaledb.materialized_only = TRUE) @@ -1315,16 +1305,16 @@ AS SELECT time_bucket(7, time_int) as bucket, SUM(value), COUNT(value) FROM conditionsnm GROUP BY bucket WITH DATA; -psql:include/cagg_ddl_common.sql:904: NOTICE: refreshing continuous aggregate "conditionsnm_4" +psql:include/cagg_ddl_common.sql:894: NOTICE: refreshing continuous aggregate "conditionsnm_4" CREATE VIEW see_cagg as select * from conditionsnm_4; \set ON_ERROR_STOP 0 DROP MATERIALIZED VIEW conditionsnm_4; -psql:include/cagg_ddl_common.sql:908: ERROR: cannot drop view conditionsnm_4 because other objects depend on it +psql:include/cagg_ddl_common.sql:898: ERROR: cannot drop view conditionsnm_4 because other objects depend on it \set ON_ERROR_STOP 1 -- Case 4: DROP CASCADE with dependency DROP MATERIALIZED VIEW conditionsnm_4 CASCADE; -psql:include/cagg_ddl_common.sql:912: NOTICE: drop cascades to view see_cagg -psql:include/cagg_ddl_common.sql:912: NOTICE: drop cascades to table _timescaledb_internal._hyper_28_39_chunk +psql:include/cagg_ddl_common.sql:902: NOTICE: drop cascades to view see_cagg +psql:include/cagg_ddl_common.sql:902: NOTICE: drop cascades to table _timescaledb_internal._hyper_28_39_chunk -- Test DROP SCHEMA CASCADE with continuous aggregates -- -- Issue: #2350 @@ -1367,7 +1357,7 @@ WHERE user_view_name = 'telemetry_1s'; \gset DROP SCHEMA test_schema CASCADE; -psql:include/cagg_ddl_common.sql:951: NOTICE: drop cascades to 4 other objects +psql:include/cagg_ddl_common.sql:941: NOTICE: drop cascades to 4 other objects SELECT count(*) FROM pg_class WHERE relname = :'MAT_TABLE_NAME'; count ------- @@ -1451,7 +1441,7 @@ WHERE user_view_name = 'cagg2'; \gset DROP SCHEMA test_schema CASCADE; -psql:include/cagg_ddl_common.sql:1008: NOTICE: drop cascades to 7 other objects +psql:include/cagg_ddl_common.sql:998: NOTICE: drop cascades to 7 other objects SELECT count(*) FROM pg_class WHERE relname = :'MAT_TABLE_NAME1'; count ------- @@ -1621,10 +1611,10 @@ CALL refresh_continuous_aggregate('conditions_daily', NULL, NULL); \set ON_ERROR_STOP 0 -- unique indexes are not supported CREATE UNIQUE INDEX index_unique_error ON conditions_daily ("time", location); -psql:include/cagg_ddl_common.sql:1094: ERROR: continuous aggregates do not support UNIQUE indexes +psql:include/cagg_ddl_common.sql:1084: ERROR: continuous aggregates do not support UNIQUE indexes -- concurrently index creation not supported CREATE INDEX CONCURRENTLY index_concurrently_avg ON conditions_daily (avg); -psql:include/cagg_ddl_common.sql:1096: ERROR: hypertables do not support concurrent index creation +psql:include/cagg_ddl_common.sql:1086: ERROR: hypertables do not support concurrent index creation \set ON_ERROR_STOP 1 CREATE INDEX index_avg ON conditions_daily (avg); CREATE INDEX index_avg_only ON ONLY conditions_daily (avg); @@ -1661,14 +1651,14 @@ CREATE MATERIALIZED VIEW i3696_cagg1 WITH (timescaledb.continuous) AS SELECT search_query,count(search_query) as count, sum(cnt), time_bucket(INTERVAL '1 minute', time) AS bucket FROM i3696 GROUP BY cnt +cnt2 , bucket, search_query; -psql:include/cagg_ddl_common.sql:1118: NOTICE: continuous aggregate "i3696_cagg1" is already up-to-date +psql:include/cagg_ddl_common.sql:1108: NOTICE: continuous aggregate "i3696_cagg1" is already up-to-date ALTER MATERIALIZED VIEW i3696_cagg1 SET (timescaledb.materialized_only = 'true'); CREATE MATERIALIZED VIEW i3696_cagg2 WITH (timescaledb.continuous) AS SELECT search_query,count(search_query) as count, sum(cnt), time_bucket(INTERVAL '1 minute', time) AS bucket FROM i3696 GROUP BY cnt + cnt2, bucket, search_query HAVING cnt + cnt2 + sum(cnt) > 2 or count(cnt2) > 10; -psql:include/cagg_ddl_common.sql:1126: NOTICE: continuous aggregate "i3696_cagg2" is already up-to-date +psql:include/cagg_ddl_common.sql:1116: NOTICE: continuous aggregate "i3696_cagg2" is already up-to-date ALTER MATERIALIZED VIEW i3696_cagg2 SET (timescaledb.materialized_only = 'true'); --TEST test with multiple settings on continuous aggregates -- -- test for materialized_only + compress combinations (real time aggs enabled initially) @@ -1685,7 +1675,7 @@ SELECT create_hypertable('test_setting', 'time'); \endif CREATE MATERIALIZED VIEW test_setting_cagg with (timescaledb.continuous) AS SELECT time_bucket('1h',time), avg(val), count(*) FROM test_setting GROUP BY 1; -psql:include/cagg_ddl_common.sql:1140: NOTICE: continuous aggregate "test_setting_cagg" is already up-to-date +psql:include/cagg_ddl_common.sql:1130: NOTICE: continuous aggregate "test_setting_cagg" is already up-to-date INSERT INTO test_setting SELECT generate_series( '2020-01-10 8:00'::timestamp, '2020-01-30 10:00+00'::timestamptz, '1 day'::interval), 10.0; CALL refresh_continuous_aggregate('test_setting_cagg', NULL, '2020-05-30 10:00+00'::timestamptz); @@ -1767,10 +1757,10 @@ DELETE FROM test_setting WHERE val = 20; --TEST test with multiple settings on continuous aggregates with real time aggregates turned off initially -- -- test for materialized_only + compress combinations (real time aggs enabled initially) DROP MATERIALIZED VIEW test_setting_cagg; -psql:include/cagg_ddl_common.sql:1184: NOTICE: drop cascades to table _timescaledb_internal._hyper_40_47_chunk +psql:include/cagg_ddl_common.sql:1174: NOTICE: drop cascades to table _timescaledb_internal._hyper_40_47_chunk CREATE MATERIALIZED VIEW test_setting_cagg with (timescaledb.continuous, timescaledb.materialized_only = true) AS SELECT time_bucket('1h',time), avg(val), count(*) FROM test_setting GROUP BY 1; -psql:include/cagg_ddl_common.sql:1187: NOTICE: refreshing continuous aggregate "test_setting_cagg" +psql:include/cagg_ddl_common.sql:1177: NOTICE: refreshing continuous aggregate "test_setting_cagg" CALL refresh_continuous_aggregate('test_setting_cagg', NULL, '2020-05-30 10:00+00'::timestamptz); SELECT count(*) from test_setting_cagg ORDER BY 1; count @@ -1898,7 +1888,7 @@ SELECT time_bucket ('1 day', time) AS bucket, amount + sum(fiat_value) FROM transactions GROUP BY bucket, amount; -psql:include/cagg_ddl_common.sql:1277: NOTICE: refreshing continuous aggregate "cashflows" +psql:include/cagg_ddl_common.sql:1267: NOTICE: refreshing continuous aggregate "cashflows" SELECT h.table_name AS "MAT_TABLE_NAME", partial_view_name AS "PART_VIEW_NAME", direct_view_name AS "DIRECT_VIEW_NAME" diff --git a/tsl/test/expected/cagg_errors_deprecated.out b/tsl/test/expected/cagg_errors_deprecated.out index d2d3fc829..e786880db 100644 --- a/tsl/test/expected/cagg_errors_deprecated.out +++ b/tsl/test/expected/cagg_errors_deprecated.out @@ -368,7 +368,25 @@ Select sum( b), min(c) from rowsec_tab group by time_bucket('1', a) WITH NO DATA; ERROR: cannot create continuous aggregate on hypertable with row security +-- cagg on cagg not allowed +CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.finalized = false) +AS +SELECT time_bucket('1 day', timec) AS bucket + FROM conditions +GROUP BY time_bucket('1 day', timec); +NOTICE: continuous aggregate "mat_m1" is already up-to-date +CREATE MATERIALIZED VIEW mat_m2_on_mat_m1 WITH (timescaledb.continuous) +AS +SELECT time_bucket('1 week', bucket) AS bucket + FROM mat_m1 +GROUP BY time_bucket('1 week', bucket); +ERROR: old format of continuous aggregate is not supported +HINT: Run "CALL cagg_migrate('public.mat_m1');" to migrate to the new format. drop table conditions cascade; +NOTICE: drop cascades to 3 other objects +DETAIL: drop cascades to view mat_m1 +drop cascades to view _timescaledb_internal._partial_view_3 +drop cascades to view _timescaledb_internal._direct_view_3 --negative tests for WITH options CREATE TABLE conditions ( timec TIMESTAMPTZ NOT NULL, @@ -493,9 +511,9 @@ CREATE FUNCTION text_part_func(TEXT) RETURNS BIGINT CREATE TABLE text_time(time TEXT); SELECT create_hypertable('text_time', 'time', chunk_time_interval => 10, time_partitioning_func => 'text_part_func'); NOTICE: adding not-null constraint to column "time" - create_hypertable ------------------------- - (9,public,text_time,t) + create_hypertable +------------------------- + (10,public,text_time,t) (1 row) \set VERBOSITY default @@ -524,7 +542,7 @@ CREATE TABLE measurements (time TIMESTAMPTZ NOT NULL, device INT, value FLOAT); SELECT create_hypertable('measurements', 'time'); create_hypertable ---------------------------- - (10,public,measurements,t) + (11,public,measurements,t) (1 row) INSERT INTO measurements VALUES ('2019-03-04 13:30', 1, 1.3); @@ -578,8 +596,8 @@ owner | default_perm_user scheduled | t fixed_schedule | f initial_start | -hypertable_id | 11 -config | {"end_offset": null, "start_offset": null, "mat_hypertable_id": 11} +hypertable_id | 12 +config | {"end_offset": null, "start_offset": null, "mat_hypertable_id": 12} check_schema | _timescaledb_internal check_name | policy_refresh_continuous_aggregate_check timezone | @@ -607,7 +625,7 @@ create table i2980(time timestamptz not null); select create_hypertable('i2980','time'); create_hypertable --------------------- - (12,public,i2980,t) + (13,public,i2980,t) (1 row) create materialized view i2980_cagg with (timescaledb.continuous, timescaledb.finalized = false) AS SELECT time_bucket('1h',time), avg(7) FROM i2980 GROUP BY 1; @@ -638,7 +656,7 @@ call refresh_continuous_aggregate('i2980_cagg2', NULL, NULL); SELECT compress_chunk(ch) FROM show_chunks('i2980_cagg2') ch; compress_chunk ----------------------------------------- - _timescaledb_internal._hyper_14_3_chunk + _timescaledb_internal._hyper_15_3_chunk (1 row) ALTER MATERIALIZED VIEW i2980_cagg2 SET ( timescaledb.compress = 'false'); @@ -674,7 +692,7 @@ FROM timescaledb_information.continuous_aggregates WHERE view_name = 'i2980_cagg2' \gset SELECT add_compression_policy( :'MAT_TABLE_NAME', 13::integer); -ERROR: cannot add compression policy to materialized hypertable "_materialized_hypertable_14" +ERROR: cannot add compression policy to materialized hypertable "_materialized_hypertable_15" --TEST compressing cagg chunks without enabling compression SELECT count(*) FROM (select decompress_chunk(ch) FROM show_chunks('i2980_cagg2') ch ) q; count diff --git a/tsl/test/expected/cagg_on_cagg_integer.out b/tsl/test/expected/cagg_on_cagg_integer.out new file mode 100644 index 000000000..044945ec6 --- /dev/null +++ b/tsl/test/expected/cagg_on_cagg_integer.out @@ -0,0 +1,335 @@ +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +-- Setup test variables +\set IS_DISTRIBUTED FALSE +\set IS_TIME_DIMENSION FALSE +\set TIME_DIMENSION_DATATYPE INTEGER +\set CAGG_NAME_1ST_LEVEL conditions_summary_1_1 +\set CAGG_NAME_2TH_LEVEL conditions_summary_2_5 +\set CAGG_NAME_3TH_LEVEL conditions_summary_3_10 +\set BUCKET_WIDTH_1ST 'INTEGER \'1\'' +\set BUCKET_WIDTH_2TH 'INTEGER \'5\'' +\set BUCKET_WIDTH_3TH 'INTEGER \'10\'' +-- Run tests +\ir include/cagg_on_cagg_common.sql +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +\if :IS_DISTRIBUTED +\echo 'Running distributed hypertable tests' +\else +\echo 'Running local hypertable tests' +Running local hypertable tests +\endif +SET ROLE :ROLE_DEFAULT_PERM_USER; +-- CAGGs on CAGGs tests +CREATE TABLE conditions ( + time :TIME_DIMENSION_DATATYPE NOT NULL, + temperature NUMERIC +); +\if :IS_DISTRIBUTED + \if :IS_TIME_DIMENSION + SELECT table_name FROM create_distributed_hypertable('conditions', 'time', replication_factor => 2); + \else + SELECT table_name FROM create_distributed_hypertable('conditions', 'time', chunk_time_interval => 10, replication_factor => 2); + \endif +\else + \if :IS_TIME_DIMENSION + SELECT table_name FROM create_hypertable('conditions', 'time'); + \else + SELECT table_name FROM create_hypertable('conditions', 'time', chunk_time_interval => 10); + table_name +------------ + conditions +(1 row) + + \endif +\endif +\if :IS_TIME_DIMENSION + INSERT INTO conditions VALUES ('2022-01-01 00:00:00-00', 10); + INSERT INTO conditions VALUES ('2022-01-01 01:00:00-00', 5); + INSERT INTO conditions VALUES ('2022-01-02 01:00:00-00', 20); +\else + CREATE OR REPLACE FUNCTION integer_now() + RETURNS :TIME_DIMENSION_DATATYPE LANGUAGE SQL STABLE AS + $$ + SELECT coalesce(max(time), 0) + FROM conditions + $$; + \if :IS_DISTRIBUTED + SELECT + 'CREATE OR REPLACE FUNCTION integer_now() RETURNS '||:'TIME_DIMENSION_DATATYPE'||' LANGUAGE SQL STABLE AS $$ SELECT coalesce(max(time), 0) FROM conditions $$;' AS "STMT" + \gset + CALL distributed_exec (:'STMT'); + \endif + SELECT set_integer_now_func('conditions', 'integer_now'); + set_integer_now_func +---------------------- + +(1 row) + + INSERT INTO conditions VALUES (1, 10); + INSERT INTO conditions VALUES (2, 5); + INSERT INTO conditions VALUES (5, 20); +\endif +-- CAGG on hypertable (1st level) +CREATE MATERIALIZED VIEW :CAGG_NAME_1ST_LEVEL +WITH (timescaledb.continuous, timescaledb.materialized_only=true) AS +SELECT + time_bucket(:BUCKET_WIDTH_1ST, "time") AS bucket, + SUM(temperature) AS temperature +FROM conditions +GROUP BY 1 +WITH NO DATA; +-- CAGG on CAGG (2th level) +CREATE MATERIALIZED VIEW :CAGG_NAME_2TH_LEVEL +WITH (timescaledb.continuous, timescaledb.materialized_only=true) AS +SELECT + time_bucket(:BUCKET_WIDTH_2TH, "bucket") AS bucket, + SUM(temperature) AS temperature +FROM :CAGG_NAME_1ST_LEVEL +GROUP BY 1 +WITH NO DATA; +-- CAGG on CAGG (3th level) +CREATE MATERIALIZED VIEW :CAGG_NAME_3TH_LEVEL +WITH (timescaledb.continuous, timescaledb.materialized_only=true) AS +SELECT + time_bucket(:BUCKET_WIDTH_3TH, "bucket") AS bucket, + SUM(temperature) AS temperature +FROM :CAGG_NAME_2TH_LEVEL +GROUP BY 1 +WITH NO DATA; +-- No data because the CAGGs are just for materialized data +SELECT * FROM :CAGG_NAME_1ST_LEVEL ORDER BY bucket; + bucket | temperature +--------+------------- +(0 rows) + +SELECT * FROM :CAGG_NAME_2TH_LEVEL ORDER BY bucket; + bucket | temperature +--------+------------- +(0 rows) + +SELECT * FROM :CAGG_NAME_3TH_LEVEL ORDER BY bucket; + bucket | temperature +--------+------------- +(0 rows) + +-- Turn CAGGs into Realtime +ALTER MATERIALIZED VIEW :CAGG_NAME_1ST_LEVEL SET (timescaledb.materialized_only=false); +ALTER MATERIALIZED VIEW :CAGG_NAME_2TH_LEVEL SET (timescaledb.materialized_only=false); +ALTER MATERIALIZED VIEW :CAGG_NAME_3TH_LEVEL SET (timescaledb.materialized_only=false); +-- Realtime data +SELECT * FROM :CAGG_NAME_1ST_LEVEL ORDER BY bucket; + bucket | temperature +--------+------------- + 1 | 10 + 2 | 5 + 5 | 20 +(3 rows) + +SELECT * FROM :CAGG_NAME_2TH_LEVEL ORDER BY bucket; + bucket | temperature +--------+------------- + 0 | 15 + 5 | 20 +(2 rows) + +SELECT * FROM :CAGG_NAME_3TH_LEVEL ORDER BY bucket; + bucket | temperature +--------+------------- + 0 | 35 +(1 row) + +-- Turn CAGGs into materialized only again +ALTER MATERIALIZED VIEW :CAGG_NAME_1ST_LEVEL SET (timescaledb.materialized_only=true); +ALTER MATERIALIZED VIEW :CAGG_NAME_2TH_LEVEL SET (timescaledb.materialized_only=true); +ALTER MATERIALIZED VIEW :CAGG_NAME_3TH_LEVEL SET (timescaledb.materialized_only=true); +-- Refresh all data +CALL refresh_continuous_aggregate(:'CAGG_NAME_1ST_LEVEL', NULL, NULL); +CALL refresh_continuous_aggregate(:'CAGG_NAME_2TH_LEVEL', NULL, NULL); +CALL refresh_continuous_aggregate(:'CAGG_NAME_3TH_LEVEL', NULL, NULL); +-- Materialized data +SELECT * FROM :CAGG_NAME_1ST_LEVEL ORDER BY bucket; + bucket | temperature +--------+------------- + 1 | 10 + 2 | 5 + 5 | 20 +(3 rows) + +SELECT * FROM :CAGG_NAME_2TH_LEVEL ORDER BY bucket; + bucket | temperature +--------+------------- + 0 | 15 + 5 | 20 +(2 rows) + +SELECT * FROM :CAGG_NAME_3TH_LEVEL ORDER BY bucket; + bucket | temperature +--------+------------- + 0 | 35 +(1 row) + +\if :IS_TIME_DIMENSION +-- Invalidate an old region +INSERT INTO conditions VALUES ('2022-01-01 01:00:00-00'::timestamptz, 2); +-- New region +INSERT INTO conditions VALUES ('2022-01-03 01:00:00-00'::timestamptz, 2); +\else +-- Invalidate an old region +INSERT INTO conditions VALUES (2, 2); +-- New region +INSERT INTO conditions VALUES (10, 2); +\endif +-- No changes +SELECT * FROM :CAGG_NAME_1ST_LEVEL ORDER BY bucket; + bucket | temperature +--------+------------- + 1 | 10 + 2 | 5 + 5 | 20 +(3 rows) + +SELECT * FROM :CAGG_NAME_2TH_LEVEL ORDER BY bucket; + bucket | temperature +--------+------------- + 0 | 15 + 5 | 20 +(2 rows) + +SELECT * FROM :CAGG_NAME_3TH_LEVEL ORDER BY bucket; + bucket | temperature +--------+------------- + 0 | 35 +(1 row) + +-- Turn CAGGs into Realtime +ALTER MATERIALIZED VIEW :CAGG_NAME_1ST_LEVEL SET (timescaledb.materialized_only=false); +ALTER MATERIALIZED VIEW :CAGG_NAME_2TH_LEVEL SET (timescaledb.materialized_only=false); +ALTER MATERIALIZED VIEW :CAGG_NAME_3TH_LEVEL SET (timescaledb.materialized_only=false); +-- Realtime changes, just new region +SELECT * FROM :CAGG_NAME_1ST_LEVEL ORDER BY bucket; + bucket | temperature +--------+------------- + 1 | 10 + 2 | 5 + 5 | 20 + 10 | 2 +(4 rows) + +SELECT * FROM :CAGG_NAME_2TH_LEVEL ORDER BY bucket; + bucket | temperature +--------+------------- + 0 | 15 + 5 | 20 + 10 | 2 +(3 rows) + +SELECT * FROM :CAGG_NAME_3TH_LEVEL ORDER BY bucket; + bucket | temperature +--------+------------- + 0 | 35 + 10 | 2 +(2 rows) + +-- Turn CAGGs into materialized only again +ALTER MATERIALIZED VIEW :CAGG_NAME_1ST_LEVEL SET (timescaledb.materialized_only=true); +ALTER MATERIALIZED VIEW :CAGG_NAME_2TH_LEVEL SET (timescaledb.materialized_only=true); +ALTER MATERIALIZED VIEW :CAGG_NAME_3TH_LEVEL SET (timescaledb.materialized_only=true); +-- Refresh all data +CALL refresh_continuous_aggregate(:'CAGG_NAME_1ST_LEVEL', NULL, NULL); +CALL refresh_continuous_aggregate(:'CAGG_NAME_2TH_LEVEL', NULL, NULL); +CALL refresh_continuous_aggregate(:'CAGG_NAME_3TH_LEVEL', NULL, NULL); +-- All changes are materialized +SELECT * FROM :CAGG_NAME_1ST_LEVEL ORDER BY bucket; + bucket | temperature +--------+------------- + 1 | 10 + 2 | 7 + 5 | 20 + 10 | 2 +(4 rows) + +SELECT * FROM :CAGG_NAME_2TH_LEVEL ORDER BY bucket; + bucket | temperature +--------+------------- + 0 | 17 + 5 | 20 + 10 | 2 +(3 rows) + +SELECT * FROM :CAGG_NAME_3TH_LEVEL ORDER BY bucket; + bucket | temperature +--------+------------- + 0 | 37 + 10 | 2 +(2 rows) + +-- DROP tests +\set ON_ERROR_STOP 0 +-- should error because it depends of other CAGGs +DROP MATERIALIZED VIEW :CAGG_NAME_1ST_LEVEL; +psql:include/cagg_on_cagg_common.sql:164: ERROR: cannot drop view conditions_summary_1_1 because other objects depend on it +DROP MATERIALIZED VIEW :CAGG_NAME_2TH_LEVEL; +psql:include/cagg_on_cagg_common.sql:165: ERROR: cannot drop view conditions_summary_2_5 because other objects depend on it +CALL refresh_continuous_aggregate(:'CAGG_NAME_1ST_LEVEL', NULL, NULL); +psql:include/cagg_on_cagg_common.sql:166: NOTICE: continuous aggregate "conditions_summary_1_1" is already up-to-date +CALL refresh_continuous_aggregate(:'CAGG_NAME_2TH_LEVEL', NULL, NULL); +psql:include/cagg_on_cagg_common.sql:167: NOTICE: continuous aggregate "conditions_summary_2_5" is already up-to-date +\set ON_ERROR_STOP 1 +-- DROP the 3TH level CAGG don't affect others +DROP MATERIALIZED VIEW :CAGG_NAME_3TH_LEVEL; +psql:include/cagg_on_cagg_common.sql:171: NOTICE: drop cascades to table _timescaledb_internal._hyper_4_4_chunk +\set ON_ERROR_STOP 0 +-- should error because it was dropped +SELECT * FROM :CAGG_NAME_3TH_LEVEL ORDER BY bucket; +psql:include/cagg_on_cagg_common.sql:174: ERROR: relation "conditions_summary_3_10" does not exist at character 15 +\set ON_ERROR_STOP 1 +-- should work because dropping the top level CAGG +-- don't affect the down level CAGGs +TRUNCATE :CAGG_NAME_2TH_LEVEL,:CAGG_NAME_1ST_LEVEL; +CALL refresh_continuous_aggregate(:'CAGG_NAME_2TH_LEVEL', NULL, NULL); +CALL refresh_continuous_aggregate(:'CAGG_NAME_1ST_LEVEL', NULL, NULL); +SELECT * FROM :CAGG_NAME_1ST_LEVEL ORDER BY bucket; + bucket | temperature +--------+------------- + 1 | 10 + 2 | 7 + 5 | 20 + 10 | 2 +(4 rows) + +SELECT * FROM :CAGG_NAME_2TH_LEVEL ORDER BY bucket; + bucket | temperature +--------+------------- +(0 rows) + +-- DROP the 2TH level CAGG don't affect others +DROP MATERIALIZED VIEW :CAGG_NAME_2TH_LEVEL; +psql:include/cagg_on_cagg_common.sql:185: NOTICE: drop cascades to table _timescaledb_internal._hyper_3_3_chunk +\set ON_ERROR_STOP 0 +-- should error because it was dropped +SELECT * FROM :CAGG_NAME_2TH_LEVEL ORDER BY bucket; +psql:include/cagg_on_cagg_common.sql:188: ERROR: relation "conditions_summary_2_5" does not exist at character 15 +\set ON_ERROR_STOP 1 +-- should work because dropping the top level CAGG +-- don't affect the down level CAGGs +SELECT * FROM :CAGG_NAME_1ST_LEVEL ORDER BY bucket; + bucket | temperature +--------+------------- + 1 | 10 + 2 | 7 + 5 | 20 + 10 | 2 +(4 rows) + +-- DROP the first CAGG should work +DROP MATERIALIZED VIEW :CAGG_NAME_1ST_LEVEL; +psql:include/cagg_on_cagg_common.sql:195: NOTICE: drop cascades to table _timescaledb_internal._hyper_2_2_chunk +\set ON_ERROR_STOP 0 +-- should error because it was dropped +SELECT * FROM :CAGG_NAME_1ST_LEVEL ORDER BY bucket; +psql:include/cagg_on_cagg_common.sql:198: ERROR: relation "conditions_summary_1_1" does not exist at character 15 +\set ON_ERROR_STOP 1 diff --git a/tsl/test/expected/cagg_on_cagg_integer_dist_ht.out b/tsl/test/expected/cagg_on_cagg_integer_dist_ht.out new file mode 100644 index 000000000..e2fcef807 --- /dev/null +++ b/tsl/test/expected/cagg_on_cagg_integer_dist_ht.out @@ -0,0 +1,372 @@ +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +------------------------------------ +-- Set up a distributed environment +------------------------------------ +\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER +\set DATA_NODE_1 :TEST_DBNAME _1 +\set DATA_NODE_2 :TEST_DBNAME _2 +\set DATA_NODE_3 :TEST_DBNAME _3 +\ir include/remote_exec.sql +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +CREATE SCHEMA IF NOT EXISTS test; +psql:include/remote_exec.sql:5: NOTICE: schema "test" already exists, skipping +GRANT USAGE ON SCHEMA test TO PUBLIC; +CREATE OR REPLACE FUNCTION test.remote_exec(srv_name name[], command text) +RETURNS VOID +AS :TSL_MODULE_PATHNAME, 'ts_remote_exec' +LANGUAGE C; +CREATE OR REPLACE FUNCTION test.remote_exec_get_result_strings(srv_name name[], command text) +RETURNS TABLE("table_record" CSTRING[]) +AS :TSL_MODULE_PATHNAME, 'ts_remote_exec_get_result_strings' +LANGUAGE C; +SELECT (add_data_node (name, host => 'localhost', DATABASE => name)).* +FROM (VALUES (:'DATA_NODE_1'), (:'DATA_NODE_2'), (:'DATA_NODE_3')) v (name); + node_name | host | port | database | node_created | database_created | extension_created +-----------------------------------+-----------+-------+-----------------------------------+--------------+------------------+------------------- + db_cagg_on_cagg_integer_dist_ht_1 | localhost | 55432 | db_cagg_on_cagg_integer_dist_ht_1 | t | t | t + db_cagg_on_cagg_integer_dist_ht_2 | localhost | 55432 | db_cagg_on_cagg_integer_dist_ht_2 | t | t | t + db_cagg_on_cagg_integer_dist_ht_3 | localhost | 55432 | db_cagg_on_cagg_integer_dist_ht_3 | t | t | t +(3 rows) + +GRANT USAGE ON FOREIGN SERVER :DATA_NODE_1, :DATA_NODE_2, :DATA_NODE_3 TO PUBLIC; +-- Setup test variables +\set IS_DISTRIBUTED TRUE +\set IS_TIME_DIMENSION FALSE +\set TIME_DIMENSION_DATATYPE INTEGER +\set CAGG_NAME_1ST_LEVEL conditions_summary_1_1 +\set CAGG_NAME_2TH_LEVEL conditions_summary_2_5 +\set CAGG_NAME_3TH_LEVEL conditions_summary_3_10 +\set BUCKET_WIDTH_1ST 'INTEGER \'1\'' +\set BUCKET_WIDTH_2TH 'INTEGER \'5\'' +\set BUCKET_WIDTH_3TH 'INTEGER \'10\'' +-- Run tests +\ir include/cagg_on_cagg_common.sql +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +\if :IS_DISTRIBUTED +\echo 'Running distributed hypertable tests' +Running distributed hypertable tests +\else +\echo 'Running local hypertable tests' +\endif +SET ROLE :ROLE_DEFAULT_PERM_USER; +-- CAGGs on CAGGs tests +CREATE TABLE conditions ( + time :TIME_DIMENSION_DATATYPE NOT NULL, + temperature NUMERIC +); +\if :IS_DISTRIBUTED + \if :IS_TIME_DIMENSION + SELECT table_name FROM create_distributed_hypertable('conditions', 'time', replication_factor => 2); + \else + SELECT table_name FROM create_distributed_hypertable('conditions', 'time', chunk_time_interval => 10, replication_factor => 2); + table_name +------------ + conditions +(1 row) + + \endif +\else + \if :IS_TIME_DIMENSION + SELECT table_name FROM create_hypertable('conditions', 'time'); + \else + SELECT table_name FROM create_hypertable('conditions', 'time', chunk_time_interval => 10); + \endif +\endif +\if :IS_TIME_DIMENSION + INSERT INTO conditions VALUES ('2022-01-01 00:00:00-00', 10); + INSERT INTO conditions VALUES ('2022-01-01 01:00:00-00', 5); + INSERT INTO conditions VALUES ('2022-01-02 01:00:00-00', 20); +\else + CREATE OR REPLACE FUNCTION integer_now() + RETURNS :TIME_DIMENSION_DATATYPE LANGUAGE SQL STABLE AS + $$ + SELECT coalesce(max(time), 0) + FROM conditions + $$; + \if :IS_DISTRIBUTED + SELECT + 'CREATE OR REPLACE FUNCTION integer_now() RETURNS '||:'TIME_DIMENSION_DATATYPE'||' LANGUAGE SQL STABLE AS $$ SELECT coalesce(max(time), 0) FROM conditions $$;' AS "STMT" + \gset + CALL distributed_exec (:'STMT'); + \endif + SELECT set_integer_now_func('conditions', 'integer_now'); + set_integer_now_func +---------------------- + +(1 row) + + INSERT INTO conditions VALUES (1, 10); + INSERT INTO conditions VALUES (2, 5); + INSERT INTO conditions VALUES (5, 20); +\endif +-- CAGG on hypertable (1st level) +CREATE MATERIALIZED VIEW :CAGG_NAME_1ST_LEVEL +WITH (timescaledb.continuous, timescaledb.materialized_only=true) AS +SELECT + time_bucket(:BUCKET_WIDTH_1ST, "time") AS bucket, + SUM(temperature) AS temperature +FROM conditions +GROUP BY 1 +WITH NO DATA; +-- CAGG on CAGG (2th level) +CREATE MATERIALIZED VIEW :CAGG_NAME_2TH_LEVEL +WITH (timescaledb.continuous, timescaledb.materialized_only=true) AS +SELECT + time_bucket(:BUCKET_WIDTH_2TH, "bucket") AS bucket, + SUM(temperature) AS temperature +FROM :CAGG_NAME_1ST_LEVEL +GROUP BY 1 +WITH NO DATA; +-- CAGG on CAGG (3th level) +CREATE MATERIALIZED VIEW :CAGG_NAME_3TH_LEVEL +WITH (timescaledb.continuous, timescaledb.materialized_only=true) AS +SELECT + time_bucket(:BUCKET_WIDTH_3TH, "bucket") AS bucket, + SUM(temperature) AS temperature +FROM :CAGG_NAME_2TH_LEVEL +GROUP BY 1 +WITH NO DATA; +-- No data because the CAGGs are just for materialized data +SELECT * FROM :CAGG_NAME_1ST_LEVEL ORDER BY bucket; + bucket | temperature +--------+------------- +(0 rows) + +SELECT * FROM :CAGG_NAME_2TH_LEVEL ORDER BY bucket; + bucket | temperature +--------+------------- +(0 rows) + +SELECT * FROM :CAGG_NAME_3TH_LEVEL ORDER BY bucket; + bucket | temperature +--------+------------- +(0 rows) + +-- Turn CAGGs into Realtime +ALTER MATERIALIZED VIEW :CAGG_NAME_1ST_LEVEL SET (timescaledb.materialized_only=false); +ALTER MATERIALIZED VIEW :CAGG_NAME_2TH_LEVEL SET (timescaledb.materialized_only=false); +ALTER MATERIALIZED VIEW :CAGG_NAME_3TH_LEVEL SET (timescaledb.materialized_only=false); +-- Realtime data +SELECT * FROM :CAGG_NAME_1ST_LEVEL ORDER BY bucket; + bucket | temperature +--------+------------- + 1 | 10 + 2 | 5 + 5 | 20 +(3 rows) + +SELECT * FROM :CAGG_NAME_2TH_LEVEL ORDER BY bucket; + bucket | temperature +--------+------------- + 0 | 15 + 5 | 20 +(2 rows) + +SELECT * FROM :CAGG_NAME_3TH_LEVEL ORDER BY bucket; + bucket | temperature +--------+------------- + 0 | 35 +(1 row) + +-- Turn CAGGs into materialized only again +ALTER MATERIALIZED VIEW :CAGG_NAME_1ST_LEVEL SET (timescaledb.materialized_only=true); +ALTER MATERIALIZED VIEW :CAGG_NAME_2TH_LEVEL SET (timescaledb.materialized_only=true); +ALTER MATERIALIZED VIEW :CAGG_NAME_3TH_LEVEL SET (timescaledb.materialized_only=true); +-- Refresh all data +CALL refresh_continuous_aggregate(:'CAGG_NAME_1ST_LEVEL', NULL, NULL); +CALL refresh_continuous_aggregate(:'CAGG_NAME_2TH_LEVEL', NULL, NULL); +CALL refresh_continuous_aggregate(:'CAGG_NAME_3TH_LEVEL', NULL, NULL); +-- Materialized data +SELECT * FROM :CAGG_NAME_1ST_LEVEL ORDER BY bucket; + bucket | temperature +--------+------------- + 1 | 10 + 2 | 5 + 5 | 20 +(3 rows) + +SELECT * FROM :CAGG_NAME_2TH_LEVEL ORDER BY bucket; + bucket | temperature +--------+------------- + 0 | 15 + 5 | 20 +(2 rows) + +SELECT * FROM :CAGG_NAME_3TH_LEVEL ORDER BY bucket; + bucket | temperature +--------+------------- + 0 | 35 +(1 row) + +\if :IS_TIME_DIMENSION +-- Invalidate an old region +INSERT INTO conditions VALUES ('2022-01-01 01:00:00-00'::timestamptz, 2); +-- New region +INSERT INTO conditions VALUES ('2022-01-03 01:00:00-00'::timestamptz, 2); +\else +-- Invalidate an old region +INSERT INTO conditions VALUES (2, 2); +-- New region +INSERT INTO conditions VALUES (10, 2); +\endif +-- No changes +SELECT * FROM :CAGG_NAME_1ST_LEVEL ORDER BY bucket; + bucket | temperature +--------+------------- + 1 | 10 + 2 | 5 + 5 | 20 +(3 rows) + +SELECT * FROM :CAGG_NAME_2TH_LEVEL ORDER BY bucket; + bucket | temperature +--------+------------- + 0 | 15 + 5 | 20 +(2 rows) + +SELECT * FROM :CAGG_NAME_3TH_LEVEL ORDER BY bucket; + bucket | temperature +--------+------------- + 0 | 35 +(1 row) + +-- Turn CAGGs into Realtime +ALTER MATERIALIZED VIEW :CAGG_NAME_1ST_LEVEL SET (timescaledb.materialized_only=false); +ALTER MATERIALIZED VIEW :CAGG_NAME_2TH_LEVEL SET (timescaledb.materialized_only=false); +ALTER MATERIALIZED VIEW :CAGG_NAME_3TH_LEVEL SET (timescaledb.materialized_only=false); +-- Realtime changes, just new region +SELECT * FROM :CAGG_NAME_1ST_LEVEL ORDER BY bucket; + bucket | temperature +--------+------------- + 1 | 10 + 2 | 5 + 5 | 20 + 10 | 2 +(4 rows) + +SELECT * FROM :CAGG_NAME_2TH_LEVEL ORDER BY bucket; + bucket | temperature +--------+------------- + 0 | 15 + 5 | 20 + 10 | 2 +(3 rows) + +SELECT * FROM :CAGG_NAME_3TH_LEVEL ORDER BY bucket; + bucket | temperature +--------+------------- + 0 | 35 + 10 | 2 +(2 rows) + +-- Turn CAGGs into materialized only again +ALTER MATERIALIZED VIEW :CAGG_NAME_1ST_LEVEL SET (timescaledb.materialized_only=true); +ALTER MATERIALIZED VIEW :CAGG_NAME_2TH_LEVEL SET (timescaledb.materialized_only=true); +ALTER MATERIALIZED VIEW :CAGG_NAME_3TH_LEVEL SET (timescaledb.materialized_only=true); +-- Refresh all data +CALL refresh_continuous_aggregate(:'CAGG_NAME_1ST_LEVEL', NULL, NULL); +CALL refresh_continuous_aggregate(:'CAGG_NAME_2TH_LEVEL', NULL, NULL); +CALL refresh_continuous_aggregate(:'CAGG_NAME_3TH_LEVEL', NULL, NULL); +-- All changes are materialized +SELECT * FROM :CAGG_NAME_1ST_LEVEL ORDER BY bucket; + bucket | temperature +--------+------------- + 1 | 10 + 2 | 7 + 5 | 20 + 10 | 2 +(4 rows) + +SELECT * FROM :CAGG_NAME_2TH_LEVEL ORDER BY bucket; + bucket | temperature +--------+------------- + 0 | 17 + 5 | 20 + 10 | 2 +(3 rows) + +SELECT * FROM :CAGG_NAME_3TH_LEVEL ORDER BY bucket; + bucket | temperature +--------+------------- + 0 | 37 + 10 | 2 +(2 rows) + +-- DROP tests +\set ON_ERROR_STOP 0 +-- should error because it depends of other CAGGs +DROP MATERIALIZED VIEW :CAGG_NAME_1ST_LEVEL; +psql:include/cagg_on_cagg_common.sql:164: ERROR: cannot drop view conditions_summary_1_1 because other objects depend on it +DROP MATERIALIZED VIEW :CAGG_NAME_2TH_LEVEL; +psql:include/cagg_on_cagg_common.sql:165: ERROR: cannot drop view conditions_summary_2_5 because other objects depend on it +CALL refresh_continuous_aggregate(:'CAGG_NAME_1ST_LEVEL', NULL, NULL); +psql:include/cagg_on_cagg_common.sql:166: NOTICE: continuous aggregate "conditions_summary_1_1" is already up-to-date +CALL refresh_continuous_aggregate(:'CAGG_NAME_2TH_LEVEL', NULL, NULL); +psql:include/cagg_on_cagg_common.sql:167: NOTICE: continuous aggregate "conditions_summary_2_5" is already up-to-date +\set ON_ERROR_STOP 1 +-- DROP the 3TH level CAGG don't affect others +DROP MATERIALIZED VIEW :CAGG_NAME_3TH_LEVEL; +psql:include/cagg_on_cagg_common.sql:171: NOTICE: drop cascades to table _timescaledb_internal._hyper_4_4_chunk +\set ON_ERROR_STOP 0 +-- should error because it was dropped +SELECT * FROM :CAGG_NAME_3TH_LEVEL ORDER BY bucket; +psql:include/cagg_on_cagg_common.sql:174: ERROR: relation "conditions_summary_3_10" does not exist at character 15 +\set ON_ERROR_STOP 1 +-- should work because dropping the top level CAGG +-- don't affect the down level CAGGs +TRUNCATE :CAGG_NAME_2TH_LEVEL,:CAGG_NAME_1ST_LEVEL; +CALL refresh_continuous_aggregate(:'CAGG_NAME_2TH_LEVEL', NULL, NULL); +CALL refresh_continuous_aggregate(:'CAGG_NAME_1ST_LEVEL', NULL, NULL); +SELECT * FROM :CAGG_NAME_1ST_LEVEL ORDER BY bucket; + bucket | temperature +--------+------------- + 1 | 10 + 2 | 7 + 5 | 20 + 10 | 2 +(4 rows) + +SELECT * FROM :CAGG_NAME_2TH_LEVEL ORDER BY bucket; + bucket | temperature +--------+------------- +(0 rows) + +-- DROP the 2TH level CAGG don't affect others +DROP MATERIALIZED VIEW :CAGG_NAME_2TH_LEVEL; +psql:include/cagg_on_cagg_common.sql:185: NOTICE: drop cascades to table _timescaledb_internal._hyper_3_3_chunk +\set ON_ERROR_STOP 0 +-- should error because it was dropped +SELECT * FROM :CAGG_NAME_2TH_LEVEL ORDER BY bucket; +psql:include/cagg_on_cagg_common.sql:188: ERROR: relation "conditions_summary_2_5" does not exist at character 15 +\set ON_ERROR_STOP 1 +-- should work because dropping the top level CAGG +-- don't affect the down level CAGGs +SELECT * FROM :CAGG_NAME_1ST_LEVEL ORDER BY bucket; + bucket | temperature +--------+------------- + 1 | 10 + 2 | 7 + 5 | 20 + 10 | 2 +(4 rows) + +-- DROP the first CAGG should work +DROP MATERIALIZED VIEW :CAGG_NAME_1ST_LEVEL; +psql:include/cagg_on_cagg_common.sql:195: NOTICE: drop cascades to table _timescaledb_internal._hyper_2_2_chunk +\set ON_ERROR_STOP 0 +-- should error because it was dropped +SELECT * FROM :CAGG_NAME_1ST_LEVEL ORDER BY bucket; +psql:include/cagg_on_cagg_common.sql:198: ERROR: relation "conditions_summary_1_1" does not exist at character 15 +\set ON_ERROR_STOP 1 +-- cleanup +\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER; +DROP DATABASE :DATA_NODE_1; +DROP DATABASE :DATA_NODE_2; +DROP DATABASE :DATA_NODE_3; diff --git a/tsl/test/expected/cagg_on_cagg_timestamp.out b/tsl/test/expected/cagg_on_cagg_timestamp.out new file mode 100644 index 000000000..ab5611949 --- /dev/null +++ b/tsl/test/expected/cagg_on_cagg_timestamp.out @@ -0,0 +1,332 @@ +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +-- Setup test variables +\set IS_DISTRIBUTED FALSE +\set IS_TIME_DIMENSION TRUE +\set TIME_DIMENSION_DATATYPE TIMESTAMP +\set CAGG_NAME_1ST_LEVEL conditions_summary_1_hourly +\set CAGG_NAME_2TH_LEVEL conditions_summary_2_daily +\set CAGG_NAME_3TH_LEVEL conditions_summary_3_weekly +\set BUCKET_WIDTH_1ST 'INTERVAL \'1 hour\'' +\set BUCKET_WIDTH_2TH 'INTERVAL \'1 day\'' +\set BUCKET_WIDTH_3TH 'INTERVAL \'1 week\'' +SET timezone TO 'UTC'; +-- Run tests +\ir include/cagg_on_cagg_common.sql +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +\if :IS_DISTRIBUTED +\echo 'Running distributed hypertable tests' +\else +\echo 'Running local hypertable tests' +Running local hypertable tests +\endif +SET ROLE :ROLE_DEFAULT_PERM_USER; +-- CAGGs on CAGGs tests +CREATE TABLE conditions ( + time :TIME_DIMENSION_DATATYPE NOT NULL, + temperature NUMERIC +); +\if :IS_DISTRIBUTED + \if :IS_TIME_DIMENSION + SELECT table_name FROM create_distributed_hypertable('conditions', 'time', replication_factor => 2); + \else + SELECT table_name FROM create_distributed_hypertable('conditions', 'time', chunk_time_interval => 10, replication_factor => 2); + \endif +\else + \if :IS_TIME_DIMENSION + SELECT table_name FROM create_hypertable('conditions', 'time'); +psql:include/cagg_on_cagg_common.sql:27: WARNING: column type "timestamp without time zone" used for "time" does not follow best practices + table_name +------------ + conditions +(1 row) + + \else + SELECT table_name FROM create_hypertable('conditions', 'time', chunk_time_interval => 10); + \endif +\endif +\if :IS_TIME_DIMENSION + INSERT INTO conditions VALUES ('2022-01-01 00:00:00-00', 10); + INSERT INTO conditions VALUES ('2022-01-01 01:00:00-00', 5); + INSERT INTO conditions VALUES ('2022-01-02 01:00:00-00', 20); +\else + CREATE OR REPLACE FUNCTION integer_now() + RETURNS :TIME_DIMENSION_DATATYPE LANGUAGE SQL STABLE AS + $$ + SELECT coalesce(max(time), 0) + FROM conditions + $$; + \if :IS_DISTRIBUTED + SELECT + 'CREATE OR REPLACE FUNCTION integer_now() RETURNS '||:'TIME_DIMENSION_DATATYPE'||' LANGUAGE SQL STABLE AS $$ SELECT coalesce(max(time), 0) FROM conditions $$;' AS "STMT" + \gset + CALL distributed_exec (:'STMT'); + \endif + SELECT set_integer_now_func('conditions', 'integer_now'); + INSERT INTO conditions VALUES (1, 10); + INSERT INTO conditions VALUES (2, 5); + INSERT INTO conditions VALUES (5, 20); +\endif +-- CAGG on hypertable (1st level) +CREATE MATERIALIZED VIEW :CAGG_NAME_1ST_LEVEL +WITH (timescaledb.continuous, timescaledb.materialized_only=true) AS +SELECT + time_bucket(:BUCKET_WIDTH_1ST, "time") AS bucket, + SUM(temperature) AS temperature +FROM conditions +GROUP BY 1 +WITH NO DATA; +-- CAGG on CAGG (2th level) +CREATE MATERIALIZED VIEW :CAGG_NAME_2TH_LEVEL +WITH (timescaledb.continuous, timescaledb.materialized_only=true) AS +SELECT + time_bucket(:BUCKET_WIDTH_2TH, "bucket") AS bucket, + SUM(temperature) AS temperature +FROM :CAGG_NAME_1ST_LEVEL +GROUP BY 1 +WITH NO DATA; +-- CAGG on CAGG (3th level) +CREATE MATERIALIZED VIEW :CAGG_NAME_3TH_LEVEL +WITH (timescaledb.continuous, timescaledb.materialized_only=true) AS +SELECT + time_bucket(:BUCKET_WIDTH_3TH, "bucket") AS bucket, + SUM(temperature) AS temperature +FROM :CAGG_NAME_2TH_LEVEL +GROUP BY 1 +WITH NO DATA; +-- No data because the CAGGs are just for materialized data +SELECT * FROM :CAGG_NAME_1ST_LEVEL ORDER BY bucket; + bucket | temperature +--------+------------- +(0 rows) + +SELECT * FROM :CAGG_NAME_2TH_LEVEL ORDER BY bucket; + bucket | temperature +--------+------------- +(0 rows) + +SELECT * FROM :CAGG_NAME_3TH_LEVEL ORDER BY bucket; + bucket | temperature +--------+------------- +(0 rows) + +-- Turn CAGGs into Realtime +ALTER MATERIALIZED VIEW :CAGG_NAME_1ST_LEVEL SET (timescaledb.materialized_only=false); +ALTER MATERIALIZED VIEW :CAGG_NAME_2TH_LEVEL SET (timescaledb.materialized_only=false); +ALTER MATERIALIZED VIEW :CAGG_NAME_3TH_LEVEL SET (timescaledb.materialized_only=false); +-- Realtime data +SELECT * FROM :CAGG_NAME_1ST_LEVEL ORDER BY bucket; + bucket | temperature +--------------------------+------------- + Sat Jan 01 00:00:00 2022 | 10 + Sat Jan 01 01:00:00 2022 | 5 + Sun Jan 02 01:00:00 2022 | 20 +(3 rows) + +SELECT * FROM :CAGG_NAME_2TH_LEVEL ORDER BY bucket; + bucket | temperature +--------------------------+------------- + Sat Jan 01 00:00:00 2022 | 15 + Sun Jan 02 00:00:00 2022 | 20 +(2 rows) + +SELECT * FROM :CAGG_NAME_3TH_LEVEL ORDER BY bucket; + bucket | temperature +--------------------------+------------- + Mon Dec 27 00:00:00 2021 | 35 +(1 row) + +-- Turn CAGGs into materialized only again +ALTER MATERIALIZED VIEW :CAGG_NAME_1ST_LEVEL SET (timescaledb.materialized_only=true); +ALTER MATERIALIZED VIEW :CAGG_NAME_2TH_LEVEL SET (timescaledb.materialized_only=true); +ALTER MATERIALIZED VIEW :CAGG_NAME_3TH_LEVEL SET (timescaledb.materialized_only=true); +-- Refresh all data +CALL refresh_continuous_aggregate(:'CAGG_NAME_1ST_LEVEL', NULL, NULL); +CALL refresh_continuous_aggregate(:'CAGG_NAME_2TH_LEVEL', NULL, NULL); +CALL refresh_continuous_aggregate(:'CAGG_NAME_3TH_LEVEL', NULL, NULL); +-- Materialized data +SELECT * FROM :CAGG_NAME_1ST_LEVEL ORDER BY bucket; + bucket | temperature +--------------------------+------------- + Sat Jan 01 00:00:00 2022 | 10 + Sat Jan 01 01:00:00 2022 | 5 + Sun Jan 02 01:00:00 2022 | 20 +(3 rows) + +SELECT * FROM :CAGG_NAME_2TH_LEVEL ORDER BY bucket; + bucket | temperature +--------------------------+------------- + Sat Jan 01 00:00:00 2022 | 15 + Sun Jan 02 00:00:00 2022 | 20 +(2 rows) + +SELECT * FROM :CAGG_NAME_3TH_LEVEL ORDER BY bucket; + bucket | temperature +--------------------------+------------- + Mon Dec 27 00:00:00 2021 | 35 +(1 row) + +\if :IS_TIME_DIMENSION +-- Invalidate an old region +INSERT INTO conditions VALUES ('2022-01-01 01:00:00-00'::timestamptz, 2); +-- New region +INSERT INTO conditions VALUES ('2022-01-03 01:00:00-00'::timestamptz, 2); +\else +-- Invalidate an old region +INSERT INTO conditions VALUES (2, 2); +-- New region +INSERT INTO conditions VALUES (10, 2); +\endif +-- No changes +SELECT * FROM :CAGG_NAME_1ST_LEVEL ORDER BY bucket; + bucket | temperature +--------------------------+------------- + Sat Jan 01 00:00:00 2022 | 10 + Sat Jan 01 01:00:00 2022 | 5 + Sun Jan 02 01:00:00 2022 | 20 +(3 rows) + +SELECT * FROM :CAGG_NAME_2TH_LEVEL ORDER BY bucket; + bucket | temperature +--------------------------+------------- + Sat Jan 01 00:00:00 2022 | 15 + Sun Jan 02 00:00:00 2022 | 20 +(2 rows) + +SELECT * FROM :CAGG_NAME_3TH_LEVEL ORDER BY bucket; + bucket | temperature +--------------------------+------------- + Mon Dec 27 00:00:00 2021 | 35 +(1 row) + +-- Turn CAGGs into Realtime +ALTER MATERIALIZED VIEW :CAGG_NAME_1ST_LEVEL SET (timescaledb.materialized_only=false); +ALTER MATERIALIZED VIEW :CAGG_NAME_2TH_LEVEL SET (timescaledb.materialized_only=false); +ALTER MATERIALIZED VIEW :CAGG_NAME_3TH_LEVEL SET (timescaledb.materialized_only=false); +-- Realtime changes, just new region +SELECT * FROM :CAGG_NAME_1ST_LEVEL ORDER BY bucket; + bucket | temperature +--------------------------+------------- + Sat Jan 01 00:00:00 2022 | 10 + Sat Jan 01 01:00:00 2022 | 5 + Sun Jan 02 01:00:00 2022 | 20 + Mon Jan 03 01:00:00 2022 | 2 +(4 rows) + +SELECT * FROM :CAGG_NAME_2TH_LEVEL ORDER BY bucket; + bucket | temperature +--------------------------+------------- + Sat Jan 01 00:00:00 2022 | 15 + Sun Jan 02 00:00:00 2022 | 20 + Mon Jan 03 00:00:00 2022 | 2 +(3 rows) + +SELECT * FROM :CAGG_NAME_3TH_LEVEL ORDER BY bucket; + bucket | temperature +--------------------------+------------- + Mon Dec 27 00:00:00 2021 | 35 + Mon Jan 03 00:00:00 2022 | 2 +(2 rows) + +-- Turn CAGGs into materialized only again +ALTER MATERIALIZED VIEW :CAGG_NAME_1ST_LEVEL SET (timescaledb.materialized_only=true); +ALTER MATERIALIZED VIEW :CAGG_NAME_2TH_LEVEL SET (timescaledb.materialized_only=true); +ALTER MATERIALIZED VIEW :CAGG_NAME_3TH_LEVEL SET (timescaledb.materialized_only=true); +-- Refresh all data +CALL refresh_continuous_aggregate(:'CAGG_NAME_1ST_LEVEL', NULL, NULL); +CALL refresh_continuous_aggregate(:'CAGG_NAME_2TH_LEVEL', NULL, NULL); +CALL refresh_continuous_aggregate(:'CAGG_NAME_3TH_LEVEL', NULL, NULL); +-- All changes are materialized +SELECT * FROM :CAGG_NAME_1ST_LEVEL ORDER BY bucket; + bucket | temperature +--------------------------+------------- + Sat Jan 01 00:00:00 2022 | 10 + Sat Jan 01 01:00:00 2022 | 7 + Sun Jan 02 01:00:00 2022 | 20 + Mon Jan 03 01:00:00 2022 | 2 +(4 rows) + +SELECT * FROM :CAGG_NAME_2TH_LEVEL ORDER BY bucket; + bucket | temperature +--------------------------+------------- + Sat Jan 01 00:00:00 2022 | 17 + Sun Jan 02 00:00:00 2022 | 20 + Mon Jan 03 00:00:00 2022 | 2 +(3 rows) + +SELECT * FROM :CAGG_NAME_3TH_LEVEL ORDER BY bucket; + bucket | temperature +--------------------------+------------- + Mon Dec 27 00:00:00 2021 | 37 + Mon Jan 03 00:00:00 2022 | 2 +(2 rows) + +-- DROP tests +\set ON_ERROR_STOP 0 +-- should error because it depends of other CAGGs +DROP MATERIALIZED VIEW :CAGG_NAME_1ST_LEVEL; +psql:include/cagg_on_cagg_common.sql:164: ERROR: cannot drop view conditions_summary_1_hourly because other objects depend on it +DROP MATERIALIZED VIEW :CAGG_NAME_2TH_LEVEL; +psql:include/cagg_on_cagg_common.sql:165: ERROR: cannot drop view conditions_summary_2_daily because other objects depend on it +CALL refresh_continuous_aggregate(:'CAGG_NAME_1ST_LEVEL', NULL, NULL); +psql:include/cagg_on_cagg_common.sql:166: NOTICE: continuous aggregate "conditions_summary_1_hourly" is already up-to-date +CALL refresh_continuous_aggregate(:'CAGG_NAME_2TH_LEVEL', NULL, NULL); +psql:include/cagg_on_cagg_common.sql:167: NOTICE: continuous aggregate "conditions_summary_2_daily" is already up-to-date +\set ON_ERROR_STOP 1 +-- DROP the 3TH level CAGG don't affect others +DROP MATERIALIZED VIEW :CAGG_NAME_3TH_LEVEL; +psql:include/cagg_on_cagg_common.sql:171: NOTICE: drop cascades to table _timescaledb_internal._hyper_4_4_chunk +\set ON_ERROR_STOP 0 +-- should error because it was dropped +SELECT * FROM :CAGG_NAME_3TH_LEVEL ORDER BY bucket; +psql:include/cagg_on_cagg_common.sql:174: ERROR: relation "conditions_summary_3_weekly" does not exist at character 15 +\set ON_ERROR_STOP 1 +-- should work because dropping the top level CAGG +-- don't affect the down level CAGGs +TRUNCATE :CAGG_NAME_2TH_LEVEL,:CAGG_NAME_1ST_LEVEL; +CALL refresh_continuous_aggregate(:'CAGG_NAME_2TH_LEVEL', NULL, NULL); +CALL refresh_continuous_aggregate(:'CAGG_NAME_1ST_LEVEL', NULL, NULL); +SELECT * FROM :CAGG_NAME_1ST_LEVEL ORDER BY bucket; + bucket | temperature +--------------------------+------------- + Sat Jan 01 00:00:00 2022 | 10 + Sat Jan 01 01:00:00 2022 | 7 + Sun Jan 02 01:00:00 2022 | 20 + Mon Jan 03 01:00:00 2022 | 2 +(4 rows) + +SELECT * FROM :CAGG_NAME_2TH_LEVEL ORDER BY bucket; + bucket | temperature +--------+------------- +(0 rows) + +-- DROP the 2TH level CAGG don't affect others +DROP MATERIALIZED VIEW :CAGG_NAME_2TH_LEVEL; +psql:include/cagg_on_cagg_common.sql:185: NOTICE: drop cascades to table _timescaledb_internal._hyper_3_3_chunk +\set ON_ERROR_STOP 0 +-- should error because it was dropped +SELECT * FROM :CAGG_NAME_2TH_LEVEL ORDER BY bucket; +psql:include/cagg_on_cagg_common.sql:188: ERROR: relation "conditions_summary_2_daily" does not exist at character 15 +\set ON_ERROR_STOP 1 +-- should work because dropping the top level CAGG +-- don't affect the down level CAGGs +SELECT * FROM :CAGG_NAME_1ST_LEVEL ORDER BY bucket; + bucket | temperature +--------------------------+------------- + Sat Jan 01 00:00:00 2022 | 10 + Sat Jan 01 01:00:00 2022 | 7 + Sun Jan 02 01:00:00 2022 | 20 + Mon Jan 03 01:00:00 2022 | 2 +(4 rows) + +-- DROP the first CAGG should work +DROP MATERIALIZED VIEW :CAGG_NAME_1ST_LEVEL; +psql:include/cagg_on_cagg_common.sql:195: NOTICE: drop cascades to table _timescaledb_internal._hyper_2_2_chunk +\set ON_ERROR_STOP 0 +-- should error because it was dropped +SELECT * FROM :CAGG_NAME_1ST_LEVEL ORDER BY bucket; +psql:include/cagg_on_cagg_common.sql:198: ERROR: relation "conditions_summary_1_hourly" does not exist at character 15 +\set ON_ERROR_STOP 1 diff --git a/tsl/test/expected/cagg_on_cagg_timestamp_dist_ht.out b/tsl/test/expected/cagg_on_cagg_timestamp_dist_ht.out new file mode 100644 index 000000000..00143f4ee --- /dev/null +++ b/tsl/test/expected/cagg_on_cagg_timestamp_dist_ht.out @@ -0,0 +1,369 @@ +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +------------------------------------ +-- Set up a distributed environment +------------------------------------ +\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER +\set DATA_NODE_1 :TEST_DBNAME _1 +\set DATA_NODE_2 :TEST_DBNAME _2 +\set DATA_NODE_3 :TEST_DBNAME _3 +\ir include/remote_exec.sql +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +CREATE SCHEMA IF NOT EXISTS test; +psql:include/remote_exec.sql:5: NOTICE: schema "test" already exists, skipping +GRANT USAGE ON SCHEMA test TO PUBLIC; +CREATE OR REPLACE FUNCTION test.remote_exec(srv_name name[], command text) +RETURNS VOID +AS :TSL_MODULE_PATHNAME, 'ts_remote_exec' +LANGUAGE C; +CREATE OR REPLACE FUNCTION test.remote_exec_get_result_strings(srv_name name[], command text) +RETURNS TABLE("table_record" CSTRING[]) +AS :TSL_MODULE_PATHNAME, 'ts_remote_exec_get_result_strings' +LANGUAGE C; +SELECT (add_data_node (name, host => 'localhost', DATABASE => name)).* +FROM (VALUES (:'DATA_NODE_1'), (:'DATA_NODE_2'), (:'DATA_NODE_3')) v (name); + node_name | host | port | database | node_created | database_created | extension_created +-------------------------------------+-----------+-------+-------------------------------------+--------------+------------------+------------------- + db_cagg_on_cagg_timestamp_dist_ht_1 | localhost | 55432 | db_cagg_on_cagg_timestamp_dist_ht_1 | t | t | t + db_cagg_on_cagg_timestamp_dist_ht_2 | localhost | 55432 | db_cagg_on_cagg_timestamp_dist_ht_2 | t | t | t + db_cagg_on_cagg_timestamp_dist_ht_3 | localhost | 55432 | db_cagg_on_cagg_timestamp_dist_ht_3 | t | t | t +(3 rows) + +GRANT USAGE ON FOREIGN SERVER :DATA_NODE_1, :DATA_NODE_2, :DATA_NODE_3 TO PUBLIC; +-- Setup test variables +\set IS_DISTRIBUTED TRUE +\set IS_TIME_DIMENSION TRUE +\set TIME_DIMENSION_DATATYPE TIMESTAMP +\set CAGG_NAME_1ST_LEVEL conditions_summary_1_hourly +\set CAGG_NAME_2TH_LEVEL conditions_summary_2_daily +\set CAGG_NAME_3TH_LEVEL conditions_summary_3_weekly +\set BUCKET_WIDTH_1ST 'INTERVAL \'1 hour\'' +\set BUCKET_WIDTH_2TH 'INTERVAL \'1 day\'' +\set BUCKET_WIDTH_3TH 'INTERVAL \'1 week\'' +SET timezone TO 'UTC'; +-- Run tests +\ir include/cagg_on_cagg_common.sql +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +\if :IS_DISTRIBUTED +\echo 'Running distributed hypertable tests' +Running distributed hypertable tests +\else +\echo 'Running local hypertable tests' +\endif +SET ROLE :ROLE_DEFAULT_PERM_USER; +-- CAGGs on CAGGs tests +CREATE TABLE conditions ( + time :TIME_DIMENSION_DATATYPE NOT NULL, + temperature NUMERIC +); +\if :IS_DISTRIBUTED + \if :IS_TIME_DIMENSION + SELECT table_name FROM create_distributed_hypertable('conditions', 'time', replication_factor => 2); +psql:include/cagg_on_cagg_common.sql:21: WARNING: column type "timestamp without time zone" used for "time" does not follow best practices + table_name +------------ + conditions +(1 row) + + \else + SELECT table_name FROM create_distributed_hypertable('conditions', 'time', chunk_time_interval => 10, replication_factor => 2); + \endif +\else + \if :IS_TIME_DIMENSION + SELECT table_name FROM create_hypertable('conditions', 'time'); + \else + SELECT table_name FROM create_hypertable('conditions', 'time', chunk_time_interval => 10); + \endif +\endif +\if :IS_TIME_DIMENSION + INSERT INTO conditions VALUES ('2022-01-01 00:00:00-00', 10); + INSERT INTO conditions VALUES ('2022-01-01 01:00:00-00', 5); + INSERT INTO conditions VALUES ('2022-01-02 01:00:00-00', 20); +\else + CREATE OR REPLACE FUNCTION integer_now() + RETURNS :TIME_DIMENSION_DATATYPE LANGUAGE SQL STABLE AS + $$ + SELECT coalesce(max(time), 0) + FROM conditions + $$; + \if :IS_DISTRIBUTED + SELECT + 'CREATE OR REPLACE FUNCTION integer_now() RETURNS '||:'TIME_DIMENSION_DATATYPE'||' LANGUAGE SQL STABLE AS $$ SELECT coalesce(max(time), 0) FROM conditions $$;' AS "STMT" + \gset + CALL distributed_exec (:'STMT'); + \endif + SELECT set_integer_now_func('conditions', 'integer_now'); + INSERT INTO conditions VALUES (1, 10); + INSERT INTO conditions VALUES (2, 5); + INSERT INTO conditions VALUES (5, 20); +\endif +-- CAGG on hypertable (1st level) +CREATE MATERIALIZED VIEW :CAGG_NAME_1ST_LEVEL +WITH (timescaledb.continuous, timescaledb.materialized_only=true) AS +SELECT + time_bucket(:BUCKET_WIDTH_1ST, "time") AS bucket, + SUM(temperature) AS temperature +FROM conditions +GROUP BY 1 +WITH NO DATA; +-- CAGG on CAGG (2th level) +CREATE MATERIALIZED VIEW :CAGG_NAME_2TH_LEVEL +WITH (timescaledb.continuous, timescaledb.materialized_only=true) AS +SELECT + time_bucket(:BUCKET_WIDTH_2TH, "bucket") AS bucket, + SUM(temperature) AS temperature +FROM :CAGG_NAME_1ST_LEVEL +GROUP BY 1 +WITH NO DATA; +-- CAGG on CAGG (3th level) +CREATE MATERIALIZED VIEW :CAGG_NAME_3TH_LEVEL +WITH (timescaledb.continuous, timescaledb.materialized_only=true) AS +SELECT + time_bucket(:BUCKET_WIDTH_3TH, "bucket") AS bucket, + SUM(temperature) AS temperature +FROM :CAGG_NAME_2TH_LEVEL +GROUP BY 1 +WITH NO DATA; +-- No data because the CAGGs are just for materialized data +SELECT * FROM :CAGG_NAME_1ST_LEVEL ORDER BY bucket; + bucket | temperature +--------+------------- +(0 rows) + +SELECT * FROM :CAGG_NAME_2TH_LEVEL ORDER BY bucket; + bucket | temperature +--------+------------- +(0 rows) + +SELECT * FROM :CAGG_NAME_3TH_LEVEL ORDER BY bucket; + bucket | temperature +--------+------------- +(0 rows) + +-- Turn CAGGs into Realtime +ALTER MATERIALIZED VIEW :CAGG_NAME_1ST_LEVEL SET (timescaledb.materialized_only=false); +ALTER MATERIALIZED VIEW :CAGG_NAME_2TH_LEVEL SET (timescaledb.materialized_only=false); +ALTER MATERIALIZED VIEW :CAGG_NAME_3TH_LEVEL SET (timescaledb.materialized_only=false); +-- Realtime data +SELECT * FROM :CAGG_NAME_1ST_LEVEL ORDER BY bucket; + bucket | temperature +--------------------------+------------- + Sat Jan 01 00:00:00 2022 | 10 + Sat Jan 01 01:00:00 2022 | 5 + Sun Jan 02 01:00:00 2022 | 20 +(3 rows) + +SELECT * FROM :CAGG_NAME_2TH_LEVEL ORDER BY bucket; + bucket | temperature +--------------------------+------------- + Sat Jan 01 00:00:00 2022 | 15 + Sun Jan 02 00:00:00 2022 | 20 +(2 rows) + +SELECT * FROM :CAGG_NAME_3TH_LEVEL ORDER BY bucket; + bucket | temperature +--------------------------+------------- + Mon Dec 27 00:00:00 2021 | 35 +(1 row) + +-- Turn CAGGs into materialized only again +ALTER MATERIALIZED VIEW :CAGG_NAME_1ST_LEVEL SET (timescaledb.materialized_only=true); +ALTER MATERIALIZED VIEW :CAGG_NAME_2TH_LEVEL SET (timescaledb.materialized_only=true); +ALTER MATERIALIZED VIEW :CAGG_NAME_3TH_LEVEL SET (timescaledb.materialized_only=true); +-- Refresh all data +CALL refresh_continuous_aggregate(:'CAGG_NAME_1ST_LEVEL', NULL, NULL); +CALL refresh_continuous_aggregate(:'CAGG_NAME_2TH_LEVEL', NULL, NULL); +CALL refresh_continuous_aggregate(:'CAGG_NAME_3TH_LEVEL', NULL, NULL); +-- Materialized data +SELECT * FROM :CAGG_NAME_1ST_LEVEL ORDER BY bucket; + bucket | temperature +--------------------------+------------- + Sat Jan 01 00:00:00 2022 | 10 + Sat Jan 01 01:00:00 2022 | 5 + Sun Jan 02 01:00:00 2022 | 20 +(3 rows) + +SELECT * FROM :CAGG_NAME_2TH_LEVEL ORDER BY bucket; + bucket | temperature +--------------------------+------------- + Sat Jan 01 00:00:00 2022 | 15 + Sun Jan 02 00:00:00 2022 | 20 +(2 rows) + +SELECT * FROM :CAGG_NAME_3TH_LEVEL ORDER BY bucket; + bucket | temperature +--------------------------+------------- + Mon Dec 27 00:00:00 2021 | 35 +(1 row) + +\if :IS_TIME_DIMENSION +-- Invalidate an old region +INSERT INTO conditions VALUES ('2022-01-01 01:00:00-00'::timestamptz, 2); +-- New region +INSERT INTO conditions VALUES ('2022-01-03 01:00:00-00'::timestamptz, 2); +\else +-- Invalidate an old region +INSERT INTO conditions VALUES (2, 2); +-- New region +INSERT INTO conditions VALUES (10, 2); +\endif +-- No changes +SELECT * FROM :CAGG_NAME_1ST_LEVEL ORDER BY bucket; + bucket | temperature +--------------------------+------------- + Sat Jan 01 00:00:00 2022 | 10 + Sat Jan 01 01:00:00 2022 | 5 + Sun Jan 02 01:00:00 2022 | 20 +(3 rows) + +SELECT * FROM :CAGG_NAME_2TH_LEVEL ORDER BY bucket; + bucket | temperature +--------------------------+------------- + Sat Jan 01 00:00:00 2022 | 15 + Sun Jan 02 00:00:00 2022 | 20 +(2 rows) + +SELECT * FROM :CAGG_NAME_3TH_LEVEL ORDER BY bucket; + bucket | temperature +--------------------------+------------- + Mon Dec 27 00:00:00 2021 | 35 +(1 row) + +-- Turn CAGGs into Realtime +ALTER MATERIALIZED VIEW :CAGG_NAME_1ST_LEVEL SET (timescaledb.materialized_only=false); +ALTER MATERIALIZED VIEW :CAGG_NAME_2TH_LEVEL SET (timescaledb.materialized_only=false); +ALTER MATERIALIZED VIEW :CAGG_NAME_3TH_LEVEL SET (timescaledb.materialized_only=false); +-- Realtime changes, just new region +SELECT * FROM :CAGG_NAME_1ST_LEVEL ORDER BY bucket; + bucket | temperature +--------------------------+------------- + Sat Jan 01 00:00:00 2022 | 10 + Sat Jan 01 01:00:00 2022 | 5 + Sun Jan 02 01:00:00 2022 | 20 + Mon Jan 03 01:00:00 2022 | 2 +(4 rows) + +SELECT * FROM :CAGG_NAME_2TH_LEVEL ORDER BY bucket; + bucket | temperature +--------------------------+------------- + Sat Jan 01 00:00:00 2022 | 15 + Sun Jan 02 00:00:00 2022 | 20 + Mon Jan 03 00:00:00 2022 | 2 +(3 rows) + +SELECT * FROM :CAGG_NAME_3TH_LEVEL ORDER BY bucket; + bucket | temperature +--------------------------+------------- + Mon Dec 27 00:00:00 2021 | 35 + Mon Jan 03 00:00:00 2022 | 2 +(2 rows) + +-- Turn CAGGs into materialized only again +ALTER MATERIALIZED VIEW :CAGG_NAME_1ST_LEVEL SET (timescaledb.materialized_only=true); +ALTER MATERIALIZED VIEW :CAGG_NAME_2TH_LEVEL SET (timescaledb.materialized_only=true); +ALTER MATERIALIZED VIEW :CAGG_NAME_3TH_LEVEL SET (timescaledb.materialized_only=true); +-- Refresh all data +CALL refresh_continuous_aggregate(:'CAGG_NAME_1ST_LEVEL', NULL, NULL); +CALL refresh_continuous_aggregate(:'CAGG_NAME_2TH_LEVEL', NULL, NULL); +CALL refresh_continuous_aggregate(:'CAGG_NAME_3TH_LEVEL', NULL, NULL); +-- All changes are materialized +SELECT * FROM :CAGG_NAME_1ST_LEVEL ORDER BY bucket; + bucket | temperature +--------------------------+------------- + Sat Jan 01 00:00:00 2022 | 10 + Sat Jan 01 01:00:00 2022 | 7 + Sun Jan 02 01:00:00 2022 | 20 + Mon Jan 03 01:00:00 2022 | 2 +(4 rows) + +SELECT * FROM :CAGG_NAME_2TH_LEVEL ORDER BY bucket; + bucket | temperature +--------------------------+------------- + Sat Jan 01 00:00:00 2022 | 17 + Sun Jan 02 00:00:00 2022 | 20 + Mon Jan 03 00:00:00 2022 | 2 +(3 rows) + +SELECT * FROM :CAGG_NAME_3TH_LEVEL ORDER BY bucket; + bucket | temperature +--------------------------+------------- + Mon Dec 27 00:00:00 2021 | 37 + Mon Jan 03 00:00:00 2022 | 2 +(2 rows) + +-- DROP tests +\set ON_ERROR_STOP 0 +-- should error because it depends of other CAGGs +DROP MATERIALIZED VIEW :CAGG_NAME_1ST_LEVEL; +psql:include/cagg_on_cagg_common.sql:164: ERROR: cannot drop view conditions_summary_1_hourly because other objects depend on it +DROP MATERIALIZED VIEW :CAGG_NAME_2TH_LEVEL; +psql:include/cagg_on_cagg_common.sql:165: ERROR: cannot drop view conditions_summary_2_daily because other objects depend on it +CALL refresh_continuous_aggregate(:'CAGG_NAME_1ST_LEVEL', NULL, NULL); +psql:include/cagg_on_cagg_common.sql:166: NOTICE: continuous aggregate "conditions_summary_1_hourly" is already up-to-date +CALL refresh_continuous_aggregate(:'CAGG_NAME_2TH_LEVEL', NULL, NULL); +psql:include/cagg_on_cagg_common.sql:167: NOTICE: continuous aggregate "conditions_summary_2_daily" is already up-to-date +\set ON_ERROR_STOP 1 +-- DROP the 3TH level CAGG don't affect others +DROP MATERIALIZED VIEW :CAGG_NAME_3TH_LEVEL; +psql:include/cagg_on_cagg_common.sql:171: NOTICE: drop cascades to table _timescaledb_internal._hyper_4_4_chunk +\set ON_ERROR_STOP 0 +-- should error because it was dropped +SELECT * FROM :CAGG_NAME_3TH_LEVEL ORDER BY bucket; +psql:include/cagg_on_cagg_common.sql:174: ERROR: relation "conditions_summary_3_weekly" does not exist at character 15 +\set ON_ERROR_STOP 1 +-- should work because dropping the top level CAGG +-- don't affect the down level CAGGs +TRUNCATE :CAGG_NAME_2TH_LEVEL,:CAGG_NAME_1ST_LEVEL; +CALL refresh_continuous_aggregate(:'CAGG_NAME_2TH_LEVEL', NULL, NULL); +CALL refresh_continuous_aggregate(:'CAGG_NAME_1ST_LEVEL', NULL, NULL); +SELECT * FROM :CAGG_NAME_1ST_LEVEL ORDER BY bucket; + bucket | temperature +--------------------------+------------- + Sat Jan 01 00:00:00 2022 | 10 + Sat Jan 01 01:00:00 2022 | 7 + Sun Jan 02 01:00:00 2022 | 20 + Mon Jan 03 01:00:00 2022 | 2 +(4 rows) + +SELECT * FROM :CAGG_NAME_2TH_LEVEL ORDER BY bucket; + bucket | temperature +--------+------------- +(0 rows) + +-- DROP the 2TH level CAGG don't affect others +DROP MATERIALIZED VIEW :CAGG_NAME_2TH_LEVEL; +psql:include/cagg_on_cagg_common.sql:185: NOTICE: drop cascades to table _timescaledb_internal._hyper_3_3_chunk +\set ON_ERROR_STOP 0 +-- should error because it was dropped +SELECT * FROM :CAGG_NAME_2TH_LEVEL ORDER BY bucket; +psql:include/cagg_on_cagg_common.sql:188: ERROR: relation "conditions_summary_2_daily" does not exist at character 15 +\set ON_ERROR_STOP 1 +-- should work because dropping the top level CAGG +-- don't affect the down level CAGGs +SELECT * FROM :CAGG_NAME_1ST_LEVEL ORDER BY bucket; + bucket | temperature +--------------------------+------------- + Sat Jan 01 00:00:00 2022 | 10 + Sat Jan 01 01:00:00 2022 | 7 + Sun Jan 02 01:00:00 2022 | 20 + Mon Jan 03 01:00:00 2022 | 2 +(4 rows) + +-- DROP the first CAGG should work +DROP MATERIALIZED VIEW :CAGG_NAME_1ST_LEVEL; +psql:include/cagg_on_cagg_common.sql:195: NOTICE: drop cascades to table _timescaledb_internal._hyper_2_2_chunk +\set ON_ERROR_STOP 0 +-- should error because it was dropped +SELECT * FROM :CAGG_NAME_1ST_LEVEL ORDER BY bucket; +psql:include/cagg_on_cagg_common.sql:198: ERROR: relation "conditions_summary_1_hourly" does not exist at character 15 +\set ON_ERROR_STOP 1 +-- cleanup +\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER; +DROP DATABASE :DATA_NODE_1; +DROP DATABASE :DATA_NODE_2; +DROP DATABASE :DATA_NODE_3; diff --git a/tsl/test/expected/cagg_on_cagg_timestamptz.out b/tsl/test/expected/cagg_on_cagg_timestamptz.out new file mode 100644 index 000000000..fa3d3095b --- /dev/null +++ b/tsl/test/expected/cagg_on_cagg_timestamptz.out @@ -0,0 +1,331 @@ +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +-- Setup test variables +\set IS_DISTRIBUTED FALSE +\set IS_TIME_DIMENSION TRUE +\set TIME_DIMENSION_DATATYPE TIMESTAMPTZ +\set CAGG_NAME_1ST_LEVEL conditions_summary_1_hourly +\set CAGG_NAME_2TH_LEVEL conditions_summary_2_daily +\set CAGG_NAME_3TH_LEVEL conditions_summary_3_weekly +\set BUCKET_WIDTH_1ST 'INTERVAL \'1 hour\'' +\set BUCKET_WIDTH_2TH 'INTERVAL \'1 day\'' +\set BUCKET_WIDTH_3TH 'INTERVAL \'1 week\'' +SET timezone TO 'UTC'; +-- Run tests +\ir include/cagg_on_cagg_common.sql +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +\if :IS_DISTRIBUTED +\echo 'Running distributed hypertable tests' +\else +\echo 'Running local hypertable tests' +Running local hypertable tests +\endif +SET ROLE :ROLE_DEFAULT_PERM_USER; +-- CAGGs on CAGGs tests +CREATE TABLE conditions ( + time :TIME_DIMENSION_DATATYPE NOT NULL, + temperature NUMERIC +); +\if :IS_DISTRIBUTED + \if :IS_TIME_DIMENSION + SELECT table_name FROM create_distributed_hypertable('conditions', 'time', replication_factor => 2); + \else + SELECT table_name FROM create_distributed_hypertable('conditions', 'time', chunk_time_interval => 10, replication_factor => 2); + \endif +\else + \if :IS_TIME_DIMENSION + SELECT table_name FROM create_hypertable('conditions', 'time'); + table_name +------------ + conditions +(1 row) + + \else + SELECT table_name FROM create_hypertable('conditions', 'time', chunk_time_interval => 10); + \endif +\endif +\if :IS_TIME_DIMENSION + INSERT INTO conditions VALUES ('2022-01-01 00:00:00-00', 10); + INSERT INTO conditions VALUES ('2022-01-01 01:00:00-00', 5); + INSERT INTO conditions VALUES ('2022-01-02 01:00:00-00', 20); +\else + CREATE OR REPLACE FUNCTION integer_now() + RETURNS :TIME_DIMENSION_DATATYPE LANGUAGE SQL STABLE AS + $$ + SELECT coalesce(max(time), 0) + FROM conditions + $$; + \if :IS_DISTRIBUTED + SELECT + 'CREATE OR REPLACE FUNCTION integer_now() RETURNS '||:'TIME_DIMENSION_DATATYPE'||' LANGUAGE SQL STABLE AS $$ SELECT coalesce(max(time), 0) FROM conditions $$;' AS "STMT" + \gset + CALL distributed_exec (:'STMT'); + \endif + SELECT set_integer_now_func('conditions', 'integer_now'); + INSERT INTO conditions VALUES (1, 10); + INSERT INTO conditions VALUES (2, 5); + INSERT INTO conditions VALUES (5, 20); +\endif +-- CAGG on hypertable (1st level) +CREATE MATERIALIZED VIEW :CAGG_NAME_1ST_LEVEL +WITH (timescaledb.continuous, timescaledb.materialized_only=true) AS +SELECT + time_bucket(:BUCKET_WIDTH_1ST, "time") AS bucket, + SUM(temperature) AS temperature +FROM conditions +GROUP BY 1 +WITH NO DATA; +-- CAGG on CAGG (2th level) +CREATE MATERIALIZED VIEW :CAGG_NAME_2TH_LEVEL +WITH (timescaledb.continuous, timescaledb.materialized_only=true) AS +SELECT + time_bucket(:BUCKET_WIDTH_2TH, "bucket") AS bucket, + SUM(temperature) AS temperature +FROM :CAGG_NAME_1ST_LEVEL +GROUP BY 1 +WITH NO DATA; +-- CAGG on CAGG (3th level) +CREATE MATERIALIZED VIEW :CAGG_NAME_3TH_LEVEL +WITH (timescaledb.continuous, timescaledb.materialized_only=true) AS +SELECT + time_bucket(:BUCKET_WIDTH_3TH, "bucket") AS bucket, + SUM(temperature) AS temperature +FROM :CAGG_NAME_2TH_LEVEL +GROUP BY 1 +WITH NO DATA; +-- No data because the CAGGs are just for materialized data +SELECT * FROM :CAGG_NAME_1ST_LEVEL ORDER BY bucket; + bucket | temperature +--------+------------- +(0 rows) + +SELECT * FROM :CAGG_NAME_2TH_LEVEL ORDER BY bucket; + bucket | temperature +--------+------------- +(0 rows) + +SELECT * FROM :CAGG_NAME_3TH_LEVEL ORDER BY bucket; + bucket | temperature +--------+------------- +(0 rows) + +-- Turn CAGGs into Realtime +ALTER MATERIALIZED VIEW :CAGG_NAME_1ST_LEVEL SET (timescaledb.materialized_only=false); +ALTER MATERIALIZED VIEW :CAGG_NAME_2TH_LEVEL SET (timescaledb.materialized_only=false); +ALTER MATERIALIZED VIEW :CAGG_NAME_3TH_LEVEL SET (timescaledb.materialized_only=false); +-- Realtime data +SELECT * FROM :CAGG_NAME_1ST_LEVEL ORDER BY bucket; + bucket | temperature +------------------------------+------------- + Sat Jan 01 00:00:00 2022 UTC | 10 + Sat Jan 01 01:00:00 2022 UTC | 5 + Sun Jan 02 01:00:00 2022 UTC | 20 +(3 rows) + +SELECT * FROM :CAGG_NAME_2TH_LEVEL ORDER BY bucket; + bucket | temperature +------------------------------+------------- + Sat Jan 01 00:00:00 2022 UTC | 15 + Sun Jan 02 00:00:00 2022 UTC | 20 +(2 rows) + +SELECT * FROM :CAGG_NAME_3TH_LEVEL ORDER BY bucket; + bucket | temperature +------------------------------+------------- + Mon Dec 27 00:00:00 2021 UTC | 35 +(1 row) + +-- Turn CAGGs into materialized only again +ALTER MATERIALIZED VIEW :CAGG_NAME_1ST_LEVEL SET (timescaledb.materialized_only=true); +ALTER MATERIALIZED VIEW :CAGG_NAME_2TH_LEVEL SET (timescaledb.materialized_only=true); +ALTER MATERIALIZED VIEW :CAGG_NAME_3TH_LEVEL SET (timescaledb.materialized_only=true); +-- Refresh all data +CALL refresh_continuous_aggregate(:'CAGG_NAME_1ST_LEVEL', NULL, NULL); +CALL refresh_continuous_aggregate(:'CAGG_NAME_2TH_LEVEL', NULL, NULL); +CALL refresh_continuous_aggregate(:'CAGG_NAME_3TH_LEVEL', NULL, NULL); +-- Materialized data +SELECT * FROM :CAGG_NAME_1ST_LEVEL ORDER BY bucket; + bucket | temperature +------------------------------+------------- + Sat Jan 01 00:00:00 2022 UTC | 10 + Sat Jan 01 01:00:00 2022 UTC | 5 + Sun Jan 02 01:00:00 2022 UTC | 20 +(3 rows) + +SELECT * FROM :CAGG_NAME_2TH_LEVEL ORDER BY bucket; + bucket | temperature +------------------------------+------------- + Sat Jan 01 00:00:00 2022 UTC | 15 + Sun Jan 02 00:00:00 2022 UTC | 20 +(2 rows) + +SELECT * FROM :CAGG_NAME_3TH_LEVEL ORDER BY bucket; + bucket | temperature +------------------------------+------------- + Mon Dec 27 00:00:00 2021 UTC | 35 +(1 row) + +\if :IS_TIME_DIMENSION +-- Invalidate an old region +INSERT INTO conditions VALUES ('2022-01-01 01:00:00-00'::timestamptz, 2); +-- New region +INSERT INTO conditions VALUES ('2022-01-03 01:00:00-00'::timestamptz, 2); +\else +-- Invalidate an old region +INSERT INTO conditions VALUES (2, 2); +-- New region +INSERT INTO conditions VALUES (10, 2); +\endif +-- No changes +SELECT * FROM :CAGG_NAME_1ST_LEVEL ORDER BY bucket; + bucket | temperature +------------------------------+------------- + Sat Jan 01 00:00:00 2022 UTC | 10 + Sat Jan 01 01:00:00 2022 UTC | 5 + Sun Jan 02 01:00:00 2022 UTC | 20 +(3 rows) + +SELECT * FROM :CAGG_NAME_2TH_LEVEL ORDER BY bucket; + bucket | temperature +------------------------------+------------- + Sat Jan 01 00:00:00 2022 UTC | 15 + Sun Jan 02 00:00:00 2022 UTC | 20 +(2 rows) + +SELECT * FROM :CAGG_NAME_3TH_LEVEL ORDER BY bucket; + bucket | temperature +------------------------------+------------- + Mon Dec 27 00:00:00 2021 UTC | 35 +(1 row) + +-- Turn CAGGs into Realtime +ALTER MATERIALIZED VIEW :CAGG_NAME_1ST_LEVEL SET (timescaledb.materialized_only=false); +ALTER MATERIALIZED VIEW :CAGG_NAME_2TH_LEVEL SET (timescaledb.materialized_only=false); +ALTER MATERIALIZED VIEW :CAGG_NAME_3TH_LEVEL SET (timescaledb.materialized_only=false); +-- Realtime changes, just new region +SELECT * FROM :CAGG_NAME_1ST_LEVEL ORDER BY bucket; + bucket | temperature +------------------------------+------------- + Sat Jan 01 00:00:00 2022 UTC | 10 + Sat Jan 01 01:00:00 2022 UTC | 5 + Sun Jan 02 01:00:00 2022 UTC | 20 + Mon Jan 03 01:00:00 2022 UTC | 2 +(4 rows) + +SELECT * FROM :CAGG_NAME_2TH_LEVEL ORDER BY bucket; + bucket | temperature +------------------------------+------------- + Sat Jan 01 00:00:00 2022 UTC | 15 + Sun Jan 02 00:00:00 2022 UTC | 20 + Mon Jan 03 00:00:00 2022 UTC | 2 +(3 rows) + +SELECT * FROM :CAGG_NAME_3TH_LEVEL ORDER BY bucket; + bucket | temperature +------------------------------+------------- + Mon Dec 27 00:00:00 2021 UTC | 35 + Mon Jan 03 00:00:00 2022 UTC | 2 +(2 rows) + +-- Turn CAGGs into materialized only again +ALTER MATERIALIZED VIEW :CAGG_NAME_1ST_LEVEL SET (timescaledb.materialized_only=true); +ALTER MATERIALIZED VIEW :CAGG_NAME_2TH_LEVEL SET (timescaledb.materialized_only=true); +ALTER MATERIALIZED VIEW :CAGG_NAME_3TH_LEVEL SET (timescaledb.materialized_only=true); +-- Refresh all data +CALL refresh_continuous_aggregate(:'CAGG_NAME_1ST_LEVEL', NULL, NULL); +CALL refresh_continuous_aggregate(:'CAGG_NAME_2TH_LEVEL', NULL, NULL); +CALL refresh_continuous_aggregate(:'CAGG_NAME_3TH_LEVEL', NULL, NULL); +-- All changes are materialized +SELECT * FROM :CAGG_NAME_1ST_LEVEL ORDER BY bucket; + bucket | temperature +------------------------------+------------- + Sat Jan 01 00:00:00 2022 UTC | 10 + Sat Jan 01 01:00:00 2022 UTC | 7 + Sun Jan 02 01:00:00 2022 UTC | 20 + Mon Jan 03 01:00:00 2022 UTC | 2 +(4 rows) + +SELECT * FROM :CAGG_NAME_2TH_LEVEL ORDER BY bucket; + bucket | temperature +------------------------------+------------- + Sat Jan 01 00:00:00 2022 UTC | 17 + Sun Jan 02 00:00:00 2022 UTC | 20 + Mon Jan 03 00:00:00 2022 UTC | 2 +(3 rows) + +SELECT * FROM :CAGG_NAME_3TH_LEVEL ORDER BY bucket; + bucket | temperature +------------------------------+------------- + Mon Dec 27 00:00:00 2021 UTC | 37 + Mon Jan 03 00:00:00 2022 UTC | 2 +(2 rows) + +-- DROP tests +\set ON_ERROR_STOP 0 +-- should error because it depends of other CAGGs +DROP MATERIALIZED VIEW :CAGG_NAME_1ST_LEVEL; +psql:include/cagg_on_cagg_common.sql:164: ERROR: cannot drop view conditions_summary_1_hourly because other objects depend on it +DROP MATERIALIZED VIEW :CAGG_NAME_2TH_LEVEL; +psql:include/cagg_on_cagg_common.sql:165: ERROR: cannot drop view conditions_summary_2_daily because other objects depend on it +CALL refresh_continuous_aggregate(:'CAGG_NAME_1ST_LEVEL', NULL, NULL); +psql:include/cagg_on_cagg_common.sql:166: NOTICE: continuous aggregate "conditions_summary_1_hourly" is already up-to-date +CALL refresh_continuous_aggregate(:'CAGG_NAME_2TH_LEVEL', NULL, NULL); +psql:include/cagg_on_cagg_common.sql:167: NOTICE: continuous aggregate "conditions_summary_2_daily" is already up-to-date +\set ON_ERROR_STOP 1 +-- DROP the 3TH level CAGG don't affect others +DROP MATERIALIZED VIEW :CAGG_NAME_3TH_LEVEL; +psql:include/cagg_on_cagg_common.sql:171: NOTICE: drop cascades to table _timescaledb_internal._hyper_4_4_chunk +\set ON_ERROR_STOP 0 +-- should error because it was dropped +SELECT * FROM :CAGG_NAME_3TH_LEVEL ORDER BY bucket; +psql:include/cagg_on_cagg_common.sql:174: ERROR: relation "conditions_summary_3_weekly" does not exist at character 15 +\set ON_ERROR_STOP 1 +-- should work because dropping the top level CAGG +-- don't affect the down level CAGGs +TRUNCATE :CAGG_NAME_2TH_LEVEL,:CAGG_NAME_1ST_LEVEL; +CALL refresh_continuous_aggregate(:'CAGG_NAME_2TH_LEVEL', NULL, NULL); +CALL refresh_continuous_aggregate(:'CAGG_NAME_1ST_LEVEL', NULL, NULL); +SELECT * FROM :CAGG_NAME_1ST_LEVEL ORDER BY bucket; + bucket | temperature +------------------------------+------------- + Sat Jan 01 00:00:00 2022 UTC | 10 + Sat Jan 01 01:00:00 2022 UTC | 7 + Sun Jan 02 01:00:00 2022 UTC | 20 + Mon Jan 03 01:00:00 2022 UTC | 2 +(4 rows) + +SELECT * FROM :CAGG_NAME_2TH_LEVEL ORDER BY bucket; + bucket | temperature +--------+------------- +(0 rows) + +-- DROP the 2TH level CAGG don't affect others +DROP MATERIALIZED VIEW :CAGG_NAME_2TH_LEVEL; +psql:include/cagg_on_cagg_common.sql:185: NOTICE: drop cascades to table _timescaledb_internal._hyper_3_3_chunk +\set ON_ERROR_STOP 0 +-- should error because it was dropped +SELECT * FROM :CAGG_NAME_2TH_LEVEL ORDER BY bucket; +psql:include/cagg_on_cagg_common.sql:188: ERROR: relation "conditions_summary_2_daily" does not exist at character 15 +\set ON_ERROR_STOP 1 +-- should work because dropping the top level CAGG +-- don't affect the down level CAGGs +SELECT * FROM :CAGG_NAME_1ST_LEVEL ORDER BY bucket; + bucket | temperature +------------------------------+------------- + Sat Jan 01 00:00:00 2022 UTC | 10 + Sat Jan 01 01:00:00 2022 UTC | 7 + Sun Jan 02 01:00:00 2022 UTC | 20 + Mon Jan 03 01:00:00 2022 UTC | 2 +(4 rows) + +-- DROP the first CAGG should work +DROP MATERIALIZED VIEW :CAGG_NAME_1ST_LEVEL; +psql:include/cagg_on_cagg_common.sql:195: NOTICE: drop cascades to table _timescaledb_internal._hyper_2_2_chunk +\set ON_ERROR_STOP 0 +-- should error because it was dropped +SELECT * FROM :CAGG_NAME_1ST_LEVEL ORDER BY bucket; +psql:include/cagg_on_cagg_common.sql:198: ERROR: relation "conditions_summary_1_hourly" does not exist at character 15 +\set ON_ERROR_STOP 1 diff --git a/tsl/test/expected/cagg_on_cagg_timestamptz_dist_ht.out b/tsl/test/expected/cagg_on_cagg_timestamptz_dist_ht.out new file mode 100644 index 000000000..92e16f0e2 --- /dev/null +++ b/tsl/test/expected/cagg_on_cagg_timestamptz_dist_ht.out @@ -0,0 +1,368 @@ +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +------------------------------------ +-- Set up a distributed environment +------------------------------------ +\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER +\set DATA_NODE_1 :TEST_DBNAME _1 +\set DATA_NODE_2 :TEST_DBNAME _2 +\set DATA_NODE_3 :TEST_DBNAME _3 +\ir include/remote_exec.sql +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +CREATE SCHEMA IF NOT EXISTS test; +psql:include/remote_exec.sql:5: NOTICE: schema "test" already exists, skipping +GRANT USAGE ON SCHEMA test TO PUBLIC; +CREATE OR REPLACE FUNCTION test.remote_exec(srv_name name[], command text) +RETURNS VOID +AS :TSL_MODULE_PATHNAME, 'ts_remote_exec' +LANGUAGE C; +CREATE OR REPLACE FUNCTION test.remote_exec_get_result_strings(srv_name name[], command text) +RETURNS TABLE("table_record" CSTRING[]) +AS :TSL_MODULE_PATHNAME, 'ts_remote_exec_get_result_strings' +LANGUAGE C; +SELECT (add_data_node (name, host => 'localhost', DATABASE => name)).* +FROM (VALUES (:'DATA_NODE_1'), (:'DATA_NODE_2'), (:'DATA_NODE_3')) v (name); + node_name | host | port | database | node_created | database_created | extension_created +---------------------------------------+-----------+-------+---------------------------------------+--------------+------------------+------------------- + db_cagg_on_cagg_timestamptz_dist_ht_1 | localhost | 55432 | db_cagg_on_cagg_timestamptz_dist_ht_1 | t | t | t + db_cagg_on_cagg_timestamptz_dist_ht_2 | localhost | 55432 | db_cagg_on_cagg_timestamptz_dist_ht_2 | t | t | t + db_cagg_on_cagg_timestamptz_dist_ht_3 | localhost | 55432 | db_cagg_on_cagg_timestamptz_dist_ht_3 | t | t | t +(3 rows) + +GRANT USAGE ON FOREIGN SERVER :DATA_NODE_1, :DATA_NODE_2, :DATA_NODE_3 TO PUBLIC; +-- Setup test variables +\set IS_DISTRIBUTED TRUE +\set IS_TIME_DIMENSION TRUE +\set TIME_DIMENSION_DATATYPE TIMESTAMPTZ +\set CAGG_NAME_1ST_LEVEL conditions_summary_1_hourly +\set CAGG_NAME_2TH_LEVEL conditions_summary_2_daily +\set CAGG_NAME_3TH_LEVEL conditions_summary_3_weekly +\set BUCKET_WIDTH_1ST 'INTERVAL \'1 hour\'' +\set BUCKET_WIDTH_2TH 'INTERVAL \'1 day\'' +\set BUCKET_WIDTH_3TH 'INTERVAL \'1 week\'' +SET timezone TO 'UTC'; +-- Run tests +\ir include/cagg_on_cagg_common.sql +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +\if :IS_DISTRIBUTED +\echo 'Running distributed hypertable tests' +Running distributed hypertable tests +\else +\echo 'Running local hypertable tests' +\endif +SET ROLE :ROLE_DEFAULT_PERM_USER; +-- CAGGs on CAGGs tests +CREATE TABLE conditions ( + time :TIME_DIMENSION_DATATYPE NOT NULL, + temperature NUMERIC +); +\if :IS_DISTRIBUTED + \if :IS_TIME_DIMENSION + SELECT table_name FROM create_distributed_hypertable('conditions', 'time', replication_factor => 2); + table_name +------------ + conditions +(1 row) + + \else + SELECT table_name FROM create_distributed_hypertable('conditions', 'time', chunk_time_interval => 10, replication_factor => 2); + \endif +\else + \if :IS_TIME_DIMENSION + SELECT table_name FROM create_hypertable('conditions', 'time'); + \else + SELECT table_name FROM create_hypertable('conditions', 'time', chunk_time_interval => 10); + \endif +\endif +\if :IS_TIME_DIMENSION + INSERT INTO conditions VALUES ('2022-01-01 00:00:00-00', 10); + INSERT INTO conditions VALUES ('2022-01-01 01:00:00-00', 5); + INSERT INTO conditions VALUES ('2022-01-02 01:00:00-00', 20); +\else + CREATE OR REPLACE FUNCTION integer_now() + RETURNS :TIME_DIMENSION_DATATYPE LANGUAGE SQL STABLE AS + $$ + SELECT coalesce(max(time), 0) + FROM conditions + $$; + \if :IS_DISTRIBUTED + SELECT + 'CREATE OR REPLACE FUNCTION integer_now() RETURNS '||:'TIME_DIMENSION_DATATYPE'||' LANGUAGE SQL STABLE AS $$ SELECT coalesce(max(time), 0) FROM conditions $$;' AS "STMT" + \gset + CALL distributed_exec (:'STMT'); + \endif + SELECT set_integer_now_func('conditions', 'integer_now'); + INSERT INTO conditions VALUES (1, 10); + INSERT INTO conditions VALUES (2, 5); + INSERT INTO conditions VALUES (5, 20); +\endif +-- CAGG on hypertable (1st level) +CREATE MATERIALIZED VIEW :CAGG_NAME_1ST_LEVEL +WITH (timescaledb.continuous, timescaledb.materialized_only=true) AS +SELECT + time_bucket(:BUCKET_WIDTH_1ST, "time") AS bucket, + SUM(temperature) AS temperature +FROM conditions +GROUP BY 1 +WITH NO DATA; +-- CAGG on CAGG (2th level) +CREATE MATERIALIZED VIEW :CAGG_NAME_2TH_LEVEL +WITH (timescaledb.continuous, timescaledb.materialized_only=true) AS +SELECT + time_bucket(:BUCKET_WIDTH_2TH, "bucket") AS bucket, + SUM(temperature) AS temperature +FROM :CAGG_NAME_1ST_LEVEL +GROUP BY 1 +WITH NO DATA; +-- CAGG on CAGG (3th level) +CREATE MATERIALIZED VIEW :CAGG_NAME_3TH_LEVEL +WITH (timescaledb.continuous, timescaledb.materialized_only=true) AS +SELECT + time_bucket(:BUCKET_WIDTH_3TH, "bucket") AS bucket, + SUM(temperature) AS temperature +FROM :CAGG_NAME_2TH_LEVEL +GROUP BY 1 +WITH NO DATA; +-- No data because the CAGGs are just for materialized data +SELECT * FROM :CAGG_NAME_1ST_LEVEL ORDER BY bucket; + bucket | temperature +--------+------------- +(0 rows) + +SELECT * FROM :CAGG_NAME_2TH_LEVEL ORDER BY bucket; + bucket | temperature +--------+------------- +(0 rows) + +SELECT * FROM :CAGG_NAME_3TH_LEVEL ORDER BY bucket; + bucket | temperature +--------+------------- +(0 rows) + +-- Turn CAGGs into Realtime +ALTER MATERIALIZED VIEW :CAGG_NAME_1ST_LEVEL SET (timescaledb.materialized_only=false); +ALTER MATERIALIZED VIEW :CAGG_NAME_2TH_LEVEL SET (timescaledb.materialized_only=false); +ALTER MATERIALIZED VIEW :CAGG_NAME_3TH_LEVEL SET (timescaledb.materialized_only=false); +-- Realtime data +SELECT * FROM :CAGG_NAME_1ST_LEVEL ORDER BY bucket; + bucket | temperature +------------------------------+------------- + Sat Jan 01 00:00:00 2022 UTC | 10 + Sat Jan 01 01:00:00 2022 UTC | 5 + Sun Jan 02 01:00:00 2022 UTC | 20 +(3 rows) + +SELECT * FROM :CAGG_NAME_2TH_LEVEL ORDER BY bucket; + bucket | temperature +------------------------------+------------- + Sat Jan 01 00:00:00 2022 UTC | 15 + Sun Jan 02 00:00:00 2022 UTC | 20 +(2 rows) + +SELECT * FROM :CAGG_NAME_3TH_LEVEL ORDER BY bucket; + bucket | temperature +------------------------------+------------- + Mon Dec 27 00:00:00 2021 UTC | 35 +(1 row) + +-- Turn CAGGs into materialized only again +ALTER MATERIALIZED VIEW :CAGG_NAME_1ST_LEVEL SET (timescaledb.materialized_only=true); +ALTER MATERIALIZED VIEW :CAGG_NAME_2TH_LEVEL SET (timescaledb.materialized_only=true); +ALTER MATERIALIZED VIEW :CAGG_NAME_3TH_LEVEL SET (timescaledb.materialized_only=true); +-- Refresh all data +CALL refresh_continuous_aggregate(:'CAGG_NAME_1ST_LEVEL', NULL, NULL); +CALL refresh_continuous_aggregate(:'CAGG_NAME_2TH_LEVEL', NULL, NULL); +CALL refresh_continuous_aggregate(:'CAGG_NAME_3TH_LEVEL', NULL, NULL); +-- Materialized data +SELECT * FROM :CAGG_NAME_1ST_LEVEL ORDER BY bucket; + bucket | temperature +------------------------------+------------- + Sat Jan 01 00:00:00 2022 UTC | 10 + Sat Jan 01 01:00:00 2022 UTC | 5 + Sun Jan 02 01:00:00 2022 UTC | 20 +(3 rows) + +SELECT * FROM :CAGG_NAME_2TH_LEVEL ORDER BY bucket; + bucket | temperature +------------------------------+------------- + Sat Jan 01 00:00:00 2022 UTC | 15 + Sun Jan 02 00:00:00 2022 UTC | 20 +(2 rows) + +SELECT * FROM :CAGG_NAME_3TH_LEVEL ORDER BY bucket; + bucket | temperature +------------------------------+------------- + Mon Dec 27 00:00:00 2021 UTC | 35 +(1 row) + +\if :IS_TIME_DIMENSION +-- Invalidate an old region +INSERT INTO conditions VALUES ('2022-01-01 01:00:00-00'::timestamptz, 2); +-- New region +INSERT INTO conditions VALUES ('2022-01-03 01:00:00-00'::timestamptz, 2); +\else +-- Invalidate an old region +INSERT INTO conditions VALUES (2, 2); +-- New region +INSERT INTO conditions VALUES (10, 2); +\endif +-- No changes +SELECT * FROM :CAGG_NAME_1ST_LEVEL ORDER BY bucket; + bucket | temperature +------------------------------+------------- + Sat Jan 01 00:00:00 2022 UTC | 10 + Sat Jan 01 01:00:00 2022 UTC | 5 + Sun Jan 02 01:00:00 2022 UTC | 20 +(3 rows) + +SELECT * FROM :CAGG_NAME_2TH_LEVEL ORDER BY bucket; + bucket | temperature +------------------------------+------------- + Sat Jan 01 00:00:00 2022 UTC | 15 + Sun Jan 02 00:00:00 2022 UTC | 20 +(2 rows) + +SELECT * FROM :CAGG_NAME_3TH_LEVEL ORDER BY bucket; + bucket | temperature +------------------------------+------------- + Mon Dec 27 00:00:00 2021 UTC | 35 +(1 row) + +-- Turn CAGGs into Realtime +ALTER MATERIALIZED VIEW :CAGG_NAME_1ST_LEVEL SET (timescaledb.materialized_only=false); +ALTER MATERIALIZED VIEW :CAGG_NAME_2TH_LEVEL SET (timescaledb.materialized_only=false); +ALTER MATERIALIZED VIEW :CAGG_NAME_3TH_LEVEL SET (timescaledb.materialized_only=false); +-- Realtime changes, just new region +SELECT * FROM :CAGG_NAME_1ST_LEVEL ORDER BY bucket; + bucket | temperature +------------------------------+------------- + Sat Jan 01 00:00:00 2022 UTC | 10 + Sat Jan 01 01:00:00 2022 UTC | 5 + Sun Jan 02 01:00:00 2022 UTC | 20 + Mon Jan 03 01:00:00 2022 UTC | 2 +(4 rows) + +SELECT * FROM :CAGG_NAME_2TH_LEVEL ORDER BY bucket; + bucket | temperature +------------------------------+------------- + Sat Jan 01 00:00:00 2022 UTC | 15 + Sun Jan 02 00:00:00 2022 UTC | 20 + Mon Jan 03 00:00:00 2022 UTC | 2 +(3 rows) + +SELECT * FROM :CAGG_NAME_3TH_LEVEL ORDER BY bucket; + bucket | temperature +------------------------------+------------- + Mon Dec 27 00:00:00 2021 UTC | 35 + Mon Jan 03 00:00:00 2022 UTC | 2 +(2 rows) + +-- Turn CAGGs into materialized only again +ALTER MATERIALIZED VIEW :CAGG_NAME_1ST_LEVEL SET (timescaledb.materialized_only=true); +ALTER MATERIALIZED VIEW :CAGG_NAME_2TH_LEVEL SET (timescaledb.materialized_only=true); +ALTER MATERIALIZED VIEW :CAGG_NAME_3TH_LEVEL SET (timescaledb.materialized_only=true); +-- Refresh all data +CALL refresh_continuous_aggregate(:'CAGG_NAME_1ST_LEVEL', NULL, NULL); +CALL refresh_continuous_aggregate(:'CAGG_NAME_2TH_LEVEL', NULL, NULL); +CALL refresh_continuous_aggregate(:'CAGG_NAME_3TH_LEVEL', NULL, NULL); +-- All changes are materialized +SELECT * FROM :CAGG_NAME_1ST_LEVEL ORDER BY bucket; + bucket | temperature +------------------------------+------------- + Sat Jan 01 00:00:00 2022 UTC | 10 + Sat Jan 01 01:00:00 2022 UTC | 7 + Sun Jan 02 01:00:00 2022 UTC | 20 + Mon Jan 03 01:00:00 2022 UTC | 2 +(4 rows) + +SELECT * FROM :CAGG_NAME_2TH_LEVEL ORDER BY bucket; + bucket | temperature +------------------------------+------------- + Sat Jan 01 00:00:00 2022 UTC | 17 + Sun Jan 02 00:00:00 2022 UTC | 20 + Mon Jan 03 00:00:00 2022 UTC | 2 +(3 rows) + +SELECT * FROM :CAGG_NAME_3TH_LEVEL ORDER BY bucket; + bucket | temperature +------------------------------+------------- + Mon Dec 27 00:00:00 2021 UTC | 37 + Mon Jan 03 00:00:00 2022 UTC | 2 +(2 rows) + +-- DROP tests +\set ON_ERROR_STOP 0 +-- should error because it depends of other CAGGs +DROP MATERIALIZED VIEW :CAGG_NAME_1ST_LEVEL; +psql:include/cagg_on_cagg_common.sql:164: ERROR: cannot drop view conditions_summary_1_hourly because other objects depend on it +DROP MATERIALIZED VIEW :CAGG_NAME_2TH_LEVEL; +psql:include/cagg_on_cagg_common.sql:165: ERROR: cannot drop view conditions_summary_2_daily because other objects depend on it +CALL refresh_continuous_aggregate(:'CAGG_NAME_1ST_LEVEL', NULL, NULL); +psql:include/cagg_on_cagg_common.sql:166: NOTICE: continuous aggregate "conditions_summary_1_hourly" is already up-to-date +CALL refresh_continuous_aggregate(:'CAGG_NAME_2TH_LEVEL', NULL, NULL); +psql:include/cagg_on_cagg_common.sql:167: NOTICE: continuous aggregate "conditions_summary_2_daily" is already up-to-date +\set ON_ERROR_STOP 1 +-- DROP the 3TH level CAGG don't affect others +DROP MATERIALIZED VIEW :CAGG_NAME_3TH_LEVEL; +psql:include/cagg_on_cagg_common.sql:171: NOTICE: drop cascades to table _timescaledb_internal._hyper_4_4_chunk +\set ON_ERROR_STOP 0 +-- should error because it was dropped +SELECT * FROM :CAGG_NAME_3TH_LEVEL ORDER BY bucket; +psql:include/cagg_on_cagg_common.sql:174: ERROR: relation "conditions_summary_3_weekly" does not exist at character 15 +\set ON_ERROR_STOP 1 +-- should work because dropping the top level CAGG +-- don't affect the down level CAGGs +TRUNCATE :CAGG_NAME_2TH_LEVEL,:CAGG_NAME_1ST_LEVEL; +CALL refresh_continuous_aggregate(:'CAGG_NAME_2TH_LEVEL', NULL, NULL); +CALL refresh_continuous_aggregate(:'CAGG_NAME_1ST_LEVEL', NULL, NULL); +SELECT * FROM :CAGG_NAME_1ST_LEVEL ORDER BY bucket; + bucket | temperature +------------------------------+------------- + Sat Jan 01 00:00:00 2022 UTC | 10 + Sat Jan 01 01:00:00 2022 UTC | 7 + Sun Jan 02 01:00:00 2022 UTC | 20 + Mon Jan 03 01:00:00 2022 UTC | 2 +(4 rows) + +SELECT * FROM :CAGG_NAME_2TH_LEVEL ORDER BY bucket; + bucket | temperature +--------+------------- +(0 rows) + +-- DROP the 2TH level CAGG don't affect others +DROP MATERIALIZED VIEW :CAGG_NAME_2TH_LEVEL; +psql:include/cagg_on_cagg_common.sql:185: NOTICE: drop cascades to table _timescaledb_internal._hyper_3_3_chunk +\set ON_ERROR_STOP 0 +-- should error because it was dropped +SELECT * FROM :CAGG_NAME_2TH_LEVEL ORDER BY bucket; +psql:include/cagg_on_cagg_common.sql:188: ERROR: relation "conditions_summary_2_daily" does not exist at character 15 +\set ON_ERROR_STOP 1 +-- should work because dropping the top level CAGG +-- don't affect the down level CAGGs +SELECT * FROM :CAGG_NAME_1ST_LEVEL ORDER BY bucket; + bucket | temperature +------------------------------+------------- + Sat Jan 01 00:00:00 2022 UTC | 10 + Sat Jan 01 01:00:00 2022 UTC | 7 + Sun Jan 02 01:00:00 2022 UTC | 20 + Mon Jan 03 01:00:00 2022 UTC | 2 +(4 rows) + +-- DROP the first CAGG should work +DROP MATERIALIZED VIEW :CAGG_NAME_1ST_LEVEL; +psql:include/cagg_on_cagg_common.sql:195: NOTICE: drop cascades to table _timescaledb_internal._hyper_2_2_chunk +\set ON_ERROR_STOP 0 +-- should error because it was dropped +SELECT * FROM :CAGG_NAME_1ST_LEVEL ORDER BY bucket; +psql:include/cagg_on_cagg_common.sql:198: ERROR: relation "conditions_summary_1_hourly" does not exist at character 15 +\set ON_ERROR_STOP 1 +-- cleanup +\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER; +DROP DATABASE :DATA_NODE_1; +DROP DATABASE :DATA_NODE_2; +DROP DATABASE :DATA_NODE_3; diff --git a/tsl/test/expected/cagg_watermark.out b/tsl/test/expected/cagg_watermark.out index 2d47b1bc4..1a3e89deb 100644 --- a/tsl/test/expected/cagg_watermark.out +++ b/tsl/test/expected/cagg_watermark.out @@ -48,7 +48,7 @@ NOTICE: adding not-null constraint to column "time" (2,public,continuous_agg_test_mat,t) (1 row) -INSERT INTO _timescaledb_catalog.continuous_agg VALUES (2, 1, '','','','',0,'',''); +INSERT INTO _timescaledb_catalog.continuous_agg VALUES (2, 1, NULL, '','','','',0,'',''); \c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER -- create the trigger CREATE TRIGGER continuous_agg_insert_trigger diff --git a/tsl/test/expected/exp_cagg_monthly.out b/tsl/test/expected/exp_cagg_monthly.out index d1df7fea7..d46802dc8 100644 --- a/tsl/test/expected/exp_cagg_monthly.out +++ b/tsl/test/expected/exp_cagg_monthly.out @@ -322,8 +322,8 @@ DROP MATERIALIZED VIEW conditions_summary; NOTICE: drop cascades to 3 other objects SELECT * FROM _timescaledb_catalog.continuous_agg WHERE mat_hypertable_id = :cagg_id; - mat_hypertable_id | raw_hypertable_id | user_view_schema | user_view_name | partial_view_schema | partial_view_name | bucket_width | direct_view_schema | direct_view_name | materialized_only | finalized --------------------+-------------------+------------------+----------------+---------------------+-------------------+--------------+--------------------+------------------+-------------------+----------- + mat_hypertable_id | raw_hypertable_id | parent_mat_hypertable_id | user_view_schema | user_view_name | partial_view_schema | partial_view_name | bucket_width | direct_view_schema | direct_view_name | materialized_only | finalized +-------------------+-------------------+--------------------------+------------------+----------------+---------------------+-------------------+--------------+--------------------+------------------+-------------------+----------- (0 rows) SELECT * FROM _timescaledb_catalog.continuous_aggs_bucket_function diff --git a/tsl/test/sql/CMakeLists.txt b/tsl/test/sql/CMakeLists.txt index cf766ffca..bf9f35fd0 100644 --- a/tsl/test/sql/CMakeLists.txt +++ b/tsl/test/sql/CMakeLists.txt @@ -62,6 +62,12 @@ if(CMAKE_BUILD_TYPE MATCHES Debug) cagg_migrate_timestamptz.sql cagg_migrate_timestamptz_dist_ht.sql cagg_multi.sql + cagg_on_cagg_integer.sql + cagg_on_cagg_integer_dist_ht.sql + cagg_on_cagg_timestamp.sql + cagg_on_cagg_timestamp_dist_ht.sql + cagg_on_cagg_timestamptz.sql + cagg_on_cagg_timestamptz_dist_ht.sql continuous_aggs_deprecated.sql cagg_tableam.sql cagg_usage.sql diff --git a/tsl/test/sql/cagg_errors_deprecated.sql b/tsl/test/sql/cagg_errors_deprecated.sql index da4250598..d30764dd4 100644 --- a/tsl/test/sql/cagg_errors_deprecated.sql +++ b/tsl/test/sql/cagg_errors_deprecated.sql @@ -50,14 +50,12 @@ select location, count(*) from conditions, LATERAL (Select * from mat_t1 where c = conditions.location) q group by location WITH NO DATA; - --non-hypertable CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.finalized = false) as select a, count(*) from mat_t1 group by a WITH NO DATA; - -- no group by CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.finalized = false) as @@ -335,6 +333,19 @@ Select sum( b), min(c) from rowsec_tab group by time_bucket('1', a) WITH NO DATA; +-- cagg on cagg not allowed +CREATE MATERIALIZED VIEW mat_m1 WITH (timescaledb.continuous, timescaledb.finalized = false) +AS +SELECT time_bucket('1 day', timec) AS bucket + FROM conditions +GROUP BY time_bucket('1 day', timec); + +CREATE MATERIALIZED VIEW mat_m2_on_mat_m1 WITH (timescaledb.continuous) +AS +SELECT time_bucket('1 week', bucket) AS bucket + FROM mat_m1 +GROUP BY time_bucket('1 week', bucket); + drop table conditions cascade; --negative tests for WITH options @@ -595,4 +606,3 @@ FROM AND uncompress.table_name = 'comp_ht_test') \gset CREATE MATERIALIZED VIEW cagg1 WITH(timescaledb.continuous, timescaledb.finalized = false) AS SELECT time_bucket('1h',_ts_meta_min_1) FROM :INTERNALTABLE GROUP BY 1; - diff --git a/tsl/test/sql/cagg_on_cagg_integer.sql b/tsl/test/sql/cagg_on_cagg_integer.sql new file mode 100644 index 000000000..d86806538 --- /dev/null +++ b/tsl/test/sql/cagg_on_cagg_integer.sql @@ -0,0 +1,17 @@ +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. + +-- Setup test variables +\set IS_DISTRIBUTED FALSE +\set IS_TIME_DIMENSION FALSE +\set TIME_DIMENSION_DATATYPE INTEGER +\set CAGG_NAME_1ST_LEVEL conditions_summary_1_1 +\set CAGG_NAME_2TH_LEVEL conditions_summary_2_5 +\set CAGG_NAME_3TH_LEVEL conditions_summary_3_10 +\set BUCKET_WIDTH_1ST 'INTEGER \'1\'' +\set BUCKET_WIDTH_2TH 'INTEGER \'5\'' +\set BUCKET_WIDTH_3TH 'INTEGER \'10\'' + +-- Run tests +\ir include/cagg_on_cagg_common.sql diff --git a/tsl/test/sql/cagg_on_cagg_integer_dist_ht.sql b/tsl/test/sql/cagg_on_cagg_integer_dist_ht.sql new file mode 100644 index 000000000..0ec53f165 --- /dev/null +++ b/tsl/test/sql/cagg_on_cagg_integer_dist_ht.sql @@ -0,0 +1,39 @@ +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. + +------------------------------------ +-- Set up a distributed environment +------------------------------------ +\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER + +\set DATA_NODE_1 :TEST_DBNAME _1 +\set DATA_NODE_2 :TEST_DBNAME _2 +\set DATA_NODE_3 :TEST_DBNAME _3 + +\ir include/remote_exec.sql + +SELECT (add_data_node (name, host => 'localhost', DATABASE => name)).* +FROM (VALUES (:'DATA_NODE_1'), (:'DATA_NODE_2'), (:'DATA_NODE_3')) v (name); + +GRANT USAGE ON FOREIGN SERVER :DATA_NODE_1, :DATA_NODE_2, :DATA_NODE_3 TO PUBLIC; + +-- Setup test variables +\set IS_DISTRIBUTED TRUE +\set IS_TIME_DIMENSION FALSE +\set TIME_DIMENSION_DATATYPE INTEGER +\set CAGG_NAME_1ST_LEVEL conditions_summary_1_1 +\set CAGG_NAME_2TH_LEVEL conditions_summary_2_5 +\set CAGG_NAME_3TH_LEVEL conditions_summary_3_10 +\set BUCKET_WIDTH_1ST 'INTEGER \'1\'' +\set BUCKET_WIDTH_2TH 'INTEGER \'5\'' +\set BUCKET_WIDTH_3TH 'INTEGER \'10\'' + +-- Run tests +\ir include/cagg_on_cagg_common.sql + +-- cleanup +\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER; +DROP DATABASE :DATA_NODE_1; +DROP DATABASE :DATA_NODE_2; +DROP DATABASE :DATA_NODE_3; diff --git a/tsl/test/sql/cagg_on_cagg_timestamp.sql b/tsl/test/sql/cagg_on_cagg_timestamp.sql new file mode 100644 index 000000000..d09876365 --- /dev/null +++ b/tsl/test/sql/cagg_on_cagg_timestamp.sql @@ -0,0 +1,19 @@ +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. + +-- Setup test variables +\set IS_DISTRIBUTED FALSE +\set IS_TIME_DIMENSION TRUE +\set TIME_DIMENSION_DATATYPE TIMESTAMP +\set CAGG_NAME_1ST_LEVEL conditions_summary_1_hourly +\set CAGG_NAME_2TH_LEVEL conditions_summary_2_daily +\set CAGG_NAME_3TH_LEVEL conditions_summary_3_weekly +\set BUCKET_WIDTH_1ST 'INTERVAL \'1 hour\'' +\set BUCKET_WIDTH_2TH 'INTERVAL \'1 day\'' +\set BUCKET_WIDTH_3TH 'INTERVAL \'1 week\'' + +SET timezone TO 'UTC'; + +-- Run tests +\ir include/cagg_on_cagg_common.sql diff --git a/tsl/test/sql/cagg_on_cagg_timestamp_dist_ht.sql b/tsl/test/sql/cagg_on_cagg_timestamp_dist_ht.sql new file mode 100644 index 000000000..fbb5414bb --- /dev/null +++ b/tsl/test/sql/cagg_on_cagg_timestamp_dist_ht.sql @@ -0,0 +1,41 @@ +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. + +------------------------------------ +-- Set up a distributed environment +------------------------------------ +\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER + +\set DATA_NODE_1 :TEST_DBNAME _1 +\set DATA_NODE_2 :TEST_DBNAME _2 +\set DATA_NODE_3 :TEST_DBNAME _3 + +\ir include/remote_exec.sql + +SELECT (add_data_node (name, host => 'localhost', DATABASE => name)).* +FROM (VALUES (:'DATA_NODE_1'), (:'DATA_NODE_2'), (:'DATA_NODE_3')) v (name); + +GRANT USAGE ON FOREIGN SERVER :DATA_NODE_1, :DATA_NODE_2, :DATA_NODE_3 TO PUBLIC; + +-- Setup test variables +\set IS_DISTRIBUTED TRUE +\set IS_TIME_DIMENSION TRUE +\set TIME_DIMENSION_DATATYPE TIMESTAMP +\set CAGG_NAME_1ST_LEVEL conditions_summary_1_hourly +\set CAGG_NAME_2TH_LEVEL conditions_summary_2_daily +\set CAGG_NAME_3TH_LEVEL conditions_summary_3_weekly +\set BUCKET_WIDTH_1ST 'INTERVAL \'1 hour\'' +\set BUCKET_WIDTH_2TH 'INTERVAL \'1 day\'' +\set BUCKET_WIDTH_3TH 'INTERVAL \'1 week\'' + +SET timezone TO 'UTC'; + +-- Run tests +\ir include/cagg_on_cagg_common.sql + +-- cleanup +\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER; +DROP DATABASE :DATA_NODE_1; +DROP DATABASE :DATA_NODE_2; +DROP DATABASE :DATA_NODE_3; diff --git a/tsl/test/sql/cagg_on_cagg_timestamptz.sql b/tsl/test/sql/cagg_on_cagg_timestamptz.sql new file mode 100644 index 000000000..852856f62 --- /dev/null +++ b/tsl/test/sql/cagg_on_cagg_timestamptz.sql @@ -0,0 +1,19 @@ +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. + +-- Setup test variables +\set IS_DISTRIBUTED FALSE +\set IS_TIME_DIMENSION TRUE +\set TIME_DIMENSION_DATATYPE TIMESTAMPTZ +\set CAGG_NAME_1ST_LEVEL conditions_summary_1_hourly +\set CAGG_NAME_2TH_LEVEL conditions_summary_2_daily +\set CAGG_NAME_3TH_LEVEL conditions_summary_3_weekly +\set BUCKET_WIDTH_1ST 'INTERVAL \'1 hour\'' +\set BUCKET_WIDTH_2TH 'INTERVAL \'1 day\'' +\set BUCKET_WIDTH_3TH 'INTERVAL \'1 week\'' + +SET timezone TO 'UTC'; + +-- Run tests +\ir include/cagg_on_cagg_common.sql diff --git a/tsl/test/sql/cagg_on_cagg_timestamptz_dist_ht.sql b/tsl/test/sql/cagg_on_cagg_timestamptz_dist_ht.sql new file mode 100644 index 000000000..3f35fc582 --- /dev/null +++ b/tsl/test/sql/cagg_on_cagg_timestamptz_dist_ht.sql @@ -0,0 +1,41 @@ +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. + +------------------------------------ +-- Set up a distributed environment +------------------------------------ +\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER + +\set DATA_NODE_1 :TEST_DBNAME _1 +\set DATA_NODE_2 :TEST_DBNAME _2 +\set DATA_NODE_3 :TEST_DBNAME _3 + +\ir include/remote_exec.sql + +SELECT (add_data_node (name, host => 'localhost', DATABASE => name)).* +FROM (VALUES (:'DATA_NODE_1'), (:'DATA_NODE_2'), (:'DATA_NODE_3')) v (name); + +GRANT USAGE ON FOREIGN SERVER :DATA_NODE_1, :DATA_NODE_2, :DATA_NODE_3 TO PUBLIC; + +-- Setup test variables +\set IS_DISTRIBUTED TRUE +\set IS_TIME_DIMENSION TRUE +\set TIME_DIMENSION_DATATYPE TIMESTAMPTZ +\set CAGG_NAME_1ST_LEVEL conditions_summary_1_hourly +\set CAGG_NAME_2TH_LEVEL conditions_summary_2_daily +\set CAGG_NAME_3TH_LEVEL conditions_summary_3_weekly +\set BUCKET_WIDTH_1ST 'INTERVAL \'1 hour\'' +\set BUCKET_WIDTH_2TH 'INTERVAL \'1 day\'' +\set BUCKET_WIDTH_3TH 'INTERVAL \'1 week\'' + +SET timezone TO 'UTC'; + +-- Run tests +\ir include/cagg_on_cagg_common.sql + +-- cleanup +\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER; +DROP DATABASE :DATA_NODE_1; +DROP DATABASE :DATA_NODE_2; +DROP DATABASE :DATA_NODE_3; diff --git a/tsl/test/sql/cagg_watermark.sql b/tsl/test/sql/cagg_watermark.sql index 8dff2bc29..6c558e10e 100644 --- a/tsl/test/sql/cagg_watermark.sql +++ b/tsl/test/sql/cagg_watermark.sql @@ -20,7 +20,7 @@ SELECT * from _timescaledb_catalog.continuous_aggs_hypertable_invalidation_log; \c :TEST_DBNAME :ROLE_SUPERUSER CREATE TABLE continuous_agg_test_mat(time int); select create_hypertable('continuous_agg_test_mat', 'time', chunk_time_interval=> 10); -INSERT INTO _timescaledb_catalog.continuous_agg VALUES (2, 1, '','','','',0,'',''); +INSERT INTO _timescaledb_catalog.continuous_agg VALUES (2, 1, NULL, '','','','',0,'',''); \c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER -- create the trigger diff --git a/tsl/test/sql/include/cagg_ddl_common.sql b/tsl/test/sql/include/cagg_ddl_common.sql index 1317d1a6b..cdf8db465 100644 --- a/tsl/test/sql/include/cagg_ddl_common.sql +++ b/tsl/test/sql/include/cagg_ddl_common.sql @@ -324,16 +324,6 @@ CREATE MATERIALIZED VIEW new_name_view AS SELECT time_bucket('6', time_bucket), COUNT("count") FROM new_name GROUP BY 1 WITH NO DATA; - --- cannot create a continuous aggregate on a continuous aggregate view -CREATE MATERIALIZED VIEW drop_chunks_view_view - WITH ( - timescaledb.continuous, - timescaledb.materialized_only=true - ) -AS SELECT time_bucket('6', time_bucket), SUM(count) - FROM drop_chunks_view - GROUP BY 1 WITH NO DATA; \set ON_ERROR_STOP 1 CREATE TABLE metrics(time timestamptz NOT NULL, device_id int, v1 float, v2 float); diff --git a/tsl/test/sql/include/cagg_on_cagg_common.sql b/tsl/test/sql/include/cagg_on_cagg_common.sql new file mode 100644 index 000000000..c565d8bc0 --- /dev/null +++ b/tsl/test/sql/include/cagg_on_cagg_common.sql @@ -0,0 +1,199 @@ +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. + +\if :IS_DISTRIBUTED +\echo 'Running distributed hypertable tests' +\else +\echo 'Running local hypertable tests' +\endif + +SET ROLE :ROLE_DEFAULT_PERM_USER; + +-- CAGGs on CAGGs tests +CREATE TABLE conditions ( + time :TIME_DIMENSION_DATATYPE NOT NULL, + temperature NUMERIC +); + +\if :IS_DISTRIBUTED + \if :IS_TIME_DIMENSION + SELECT table_name FROM create_distributed_hypertable('conditions', 'time', replication_factor => 2); + \else + SELECT table_name FROM create_distributed_hypertable('conditions', 'time', chunk_time_interval => 10, replication_factor => 2); + \endif +\else + \if :IS_TIME_DIMENSION + SELECT table_name FROM create_hypertable('conditions', 'time'); + \else + SELECT table_name FROM create_hypertable('conditions', 'time', chunk_time_interval => 10); + \endif +\endif + +\if :IS_TIME_DIMENSION + INSERT INTO conditions VALUES ('2022-01-01 00:00:00-00', 10); + INSERT INTO conditions VALUES ('2022-01-01 01:00:00-00', 5); + INSERT INTO conditions VALUES ('2022-01-02 01:00:00-00', 20); +\else + CREATE OR REPLACE FUNCTION integer_now() + RETURNS :TIME_DIMENSION_DATATYPE LANGUAGE SQL STABLE AS + $$ + SELECT coalesce(max(time), 0) + FROM conditions + $$; + + \if :IS_DISTRIBUTED + SELECT + 'CREATE OR REPLACE FUNCTION integer_now() RETURNS '||:'TIME_DIMENSION_DATATYPE'||' LANGUAGE SQL STABLE AS $$ SELECT coalesce(max(time), 0) FROM conditions $$;' AS "STMT" + \gset + CALL distributed_exec (:'STMT'); + \endif + + SELECT set_integer_now_func('conditions', 'integer_now'); + + INSERT INTO conditions VALUES (1, 10); + INSERT INTO conditions VALUES (2, 5); + INSERT INTO conditions VALUES (5, 20); +\endif + +-- CAGG on hypertable (1st level) +CREATE MATERIALIZED VIEW :CAGG_NAME_1ST_LEVEL +WITH (timescaledb.continuous, timescaledb.materialized_only=true) AS +SELECT + time_bucket(:BUCKET_WIDTH_1ST, "time") AS bucket, + SUM(temperature) AS temperature +FROM conditions +GROUP BY 1 +WITH NO DATA; + +-- CAGG on CAGG (2th level) +CREATE MATERIALIZED VIEW :CAGG_NAME_2TH_LEVEL +WITH (timescaledb.continuous, timescaledb.materialized_only=true) AS +SELECT + time_bucket(:BUCKET_WIDTH_2TH, "bucket") AS bucket, + SUM(temperature) AS temperature +FROM :CAGG_NAME_1ST_LEVEL +GROUP BY 1 +WITH NO DATA; + +-- CAGG on CAGG (3th level) +CREATE MATERIALIZED VIEW :CAGG_NAME_3TH_LEVEL +WITH (timescaledb.continuous, timescaledb.materialized_only=true) AS +SELECT + time_bucket(:BUCKET_WIDTH_3TH, "bucket") AS bucket, + SUM(temperature) AS temperature +FROM :CAGG_NAME_2TH_LEVEL +GROUP BY 1 +WITH NO DATA; + +-- No data because the CAGGs are just for materialized data +SELECT * FROM :CAGG_NAME_1ST_LEVEL ORDER BY bucket; +SELECT * FROM :CAGG_NAME_2TH_LEVEL ORDER BY bucket; +SELECT * FROM :CAGG_NAME_3TH_LEVEL ORDER BY bucket; + +-- Turn CAGGs into Realtime +ALTER MATERIALIZED VIEW :CAGG_NAME_1ST_LEVEL SET (timescaledb.materialized_only=false); +ALTER MATERIALIZED VIEW :CAGG_NAME_2TH_LEVEL SET (timescaledb.materialized_only=false); +ALTER MATERIALIZED VIEW :CAGG_NAME_3TH_LEVEL SET (timescaledb.materialized_only=false); + +-- Realtime data +SELECT * FROM :CAGG_NAME_1ST_LEVEL ORDER BY bucket; +SELECT * FROM :CAGG_NAME_2TH_LEVEL ORDER BY bucket; +SELECT * FROM :CAGG_NAME_3TH_LEVEL ORDER BY bucket; + +-- Turn CAGGs into materialized only again +ALTER MATERIALIZED VIEW :CAGG_NAME_1ST_LEVEL SET (timescaledb.materialized_only=true); +ALTER MATERIALIZED VIEW :CAGG_NAME_2TH_LEVEL SET (timescaledb.materialized_only=true); +ALTER MATERIALIZED VIEW :CAGG_NAME_3TH_LEVEL SET (timescaledb.materialized_only=true); + +-- Refresh all data +CALL refresh_continuous_aggregate(:'CAGG_NAME_1ST_LEVEL', NULL, NULL); +CALL refresh_continuous_aggregate(:'CAGG_NAME_2TH_LEVEL', NULL, NULL); +CALL refresh_continuous_aggregate(:'CAGG_NAME_3TH_LEVEL', NULL, NULL); + +-- Materialized data +SELECT * FROM :CAGG_NAME_1ST_LEVEL ORDER BY bucket; +SELECT * FROM :CAGG_NAME_2TH_LEVEL ORDER BY bucket; +SELECT * FROM :CAGG_NAME_3TH_LEVEL ORDER BY bucket; + +\if :IS_TIME_DIMENSION +-- Invalidate an old region +INSERT INTO conditions VALUES ('2022-01-01 01:00:00-00'::timestamptz, 2); +-- New region +INSERT INTO conditions VALUES ('2022-01-03 01:00:00-00'::timestamptz, 2); +\else +-- Invalidate an old region +INSERT INTO conditions VALUES (2, 2); +-- New region +INSERT INTO conditions VALUES (10, 2); +\endif + +-- No changes +SELECT * FROM :CAGG_NAME_1ST_LEVEL ORDER BY bucket; +SELECT * FROM :CAGG_NAME_2TH_LEVEL ORDER BY bucket; +SELECT * FROM :CAGG_NAME_3TH_LEVEL ORDER BY bucket; + +-- Turn CAGGs into Realtime +ALTER MATERIALIZED VIEW :CAGG_NAME_1ST_LEVEL SET (timescaledb.materialized_only=false); +ALTER MATERIALIZED VIEW :CAGG_NAME_2TH_LEVEL SET (timescaledb.materialized_only=false); +ALTER MATERIALIZED VIEW :CAGG_NAME_3TH_LEVEL SET (timescaledb.materialized_only=false); + +-- Realtime changes, just new region +SELECT * FROM :CAGG_NAME_1ST_LEVEL ORDER BY bucket; +SELECT * FROM :CAGG_NAME_2TH_LEVEL ORDER BY bucket; +SELECT * FROM :CAGG_NAME_3TH_LEVEL ORDER BY bucket; + +-- Turn CAGGs into materialized only again +ALTER MATERIALIZED VIEW :CAGG_NAME_1ST_LEVEL SET (timescaledb.materialized_only=true); +ALTER MATERIALIZED VIEW :CAGG_NAME_2TH_LEVEL SET (timescaledb.materialized_only=true); +ALTER MATERIALIZED VIEW :CAGG_NAME_3TH_LEVEL SET (timescaledb.materialized_only=true); + +-- Refresh all data +CALL refresh_continuous_aggregate(:'CAGG_NAME_1ST_LEVEL', NULL, NULL); +CALL refresh_continuous_aggregate(:'CAGG_NAME_2TH_LEVEL', NULL, NULL); +CALL refresh_continuous_aggregate(:'CAGG_NAME_3TH_LEVEL', NULL, NULL); + +-- All changes are materialized +SELECT * FROM :CAGG_NAME_1ST_LEVEL ORDER BY bucket; +SELECT * FROM :CAGG_NAME_2TH_LEVEL ORDER BY bucket; +SELECT * FROM :CAGG_NAME_3TH_LEVEL ORDER BY bucket; + +-- DROP tests +\set ON_ERROR_STOP 0 +-- should error because it depends of other CAGGs +DROP MATERIALIZED VIEW :CAGG_NAME_1ST_LEVEL; +DROP MATERIALIZED VIEW :CAGG_NAME_2TH_LEVEL; +CALL refresh_continuous_aggregate(:'CAGG_NAME_1ST_LEVEL', NULL, NULL); +CALL refresh_continuous_aggregate(:'CAGG_NAME_2TH_LEVEL', NULL, NULL); +\set ON_ERROR_STOP 1 + +-- DROP the 3TH level CAGG don't affect others +DROP MATERIALIZED VIEW :CAGG_NAME_3TH_LEVEL; +\set ON_ERROR_STOP 0 +-- should error because it was dropped +SELECT * FROM :CAGG_NAME_3TH_LEVEL ORDER BY bucket; +\set ON_ERROR_STOP 1 +-- should work because dropping the top level CAGG +-- don't affect the down level CAGGs +TRUNCATE :CAGG_NAME_2TH_LEVEL,:CAGG_NAME_1ST_LEVEL; +CALL refresh_continuous_aggregate(:'CAGG_NAME_2TH_LEVEL', NULL, NULL); +CALL refresh_continuous_aggregate(:'CAGG_NAME_1ST_LEVEL', NULL, NULL); +SELECT * FROM :CAGG_NAME_1ST_LEVEL ORDER BY bucket; +SELECT * FROM :CAGG_NAME_2TH_LEVEL ORDER BY bucket; + +-- DROP the 2TH level CAGG don't affect others +DROP MATERIALIZED VIEW :CAGG_NAME_2TH_LEVEL; +\set ON_ERROR_STOP 0 +-- should error because it was dropped +SELECT * FROM :CAGG_NAME_2TH_LEVEL ORDER BY bucket; +\set ON_ERROR_STOP 1 +-- should work because dropping the top level CAGG +-- don't affect the down level CAGGs +SELECT * FROM :CAGG_NAME_1ST_LEVEL ORDER BY bucket; + +-- DROP the first CAGG should work +DROP MATERIALIZED VIEW :CAGG_NAME_1ST_LEVEL; +\set ON_ERROR_STOP 0 +-- should error because it was dropped +SELECT * FROM :CAGG_NAME_1ST_LEVEL ORDER BY bucket; +\set ON_ERROR_STOP 1