From 45fb1fc2c81215fde662e17062b3051280b25bcf Mon Sep 17 00:00:00 2001 From: Joshua Lockerman Date: Tue, 9 Apr 2019 14:47:42 -0400 Subject: [PATCH] Handle drop_chunks on tables that have cont aggs For hypetables that have continuous aggregates, calling drop_chunks now drops all of the rows in the materialization table that were based on the dropped chunks. Since we don't know what the correct default behavior for drop_chunks is, we've added a new argument, cascade_to_materializations, which must be set to true in order to call drop_chunks on a hypertable which has a continuous aggregate. drop_chunks is blocked on the materialization tables of continuous aggregates --- sql/ddl_api.sql | 3 +- sql/updates/latest-dev.sql | 20 ++ src/chunk.c | 47 ++++- src/chunk.h | 3 +- src/continuous_agg.c | 50 +++++ src/continuous_agg.h | 15 ++ src/cross_module_fn.c | 9 + src/cross_module_fn.h | 2 + tsl/src/bgw_policy/job.c | 1 + tsl/src/continuous_aggs/CMakeLists.txt | 1 + tsl/src/continuous_aggs/cagg_create.c | 12 +- tsl/src/continuous_aggs/cagg_create.h | 3 + tsl/src/continuous_aggs/drop.c | 66 ++++++ tsl/src/continuous_aggs/drop.h | 16 ++ tsl/src/init.c | 2 + tsl/test/expected/contaggviews_ddl-10.out | 222 +++++++++++++++++++++ tsl/test/expected/contaggviews_ddl-11.out | 222 +++++++++++++++++++++ tsl/test/expected/contaggviews_ddl-9.6.out | 222 +++++++++++++++++++++ tsl/test/sql/contaggviews_ddl.sql.in | 108 ++++++++++ 19 files changed, 1009 insertions(+), 15 deletions(-) create mode 100644 tsl/src/continuous_aggs/drop.c create mode 100644 tsl/src/continuous_aggs/drop.h diff --git a/sql/ddl_api.sql b/sql/ddl_api.sql index 3a289df2e..ef2ad9bc3 100644 --- a/sql/ddl_api.sql +++ b/sql/ddl_api.sql @@ -74,7 +74,8 @@ CREATE OR REPLACE FUNCTION drop_chunks( schema_name NAME = NULL, cascade BOOLEAN = FALSE, newer_than "any" = NULL, - verbose BOOLEAN = FALSE + verbose BOOLEAN = FALSE, + cascade_to_materializations BOOLEAN = NULL ) RETURNS SETOF REGCLASS AS '@MODULE_PATHNAME@', 'ts_chunk_drop_chunks' LANGUAGE C STABLE PARALLEL SAFE; diff --git a/sql/updates/latest-dev.sql b/sql/updates/latest-dev.sql index 38d82eee7..7b7f2428c 100644 --- a/sql/updates/latest-dev.sql +++ b/sql/updates/latest-dev.sql @@ -72,3 +72,23 @@ CREATE INDEX continuous_aggs_hypertable_invalidation_log_idx ON _timescaledb_catalog.continuous_aggs_hypertable_invalidation_log (hypertable_id, lowest_modified_value ASC); GRANT SELECT ON _timescaledb_catalog.continuous_aggs_hypertable_invalidation_log TO PUBLIC; + +DROP FUNCTION IF EXISTS drop_chunks( + older_than "any", + table_name NAME, + schema_name NAME, + cascade BOOLEAN, + newer_than "any", + verbose BOOLEAN +); + +CREATE OR REPLACE FUNCTION drop_chunks( + older_than "any" = NULL, + table_name NAME = NULL, + schema_name NAME = NULL, + cascade BOOLEAN = FALSE, + newer_than "any" = NULL, + verbose BOOLEAN = FALSE, + cascade_to_materializations BOOLEAN = NULL +) RETURNS SETOF REGCLASS AS '@MODULE_PATHNAME@', 'ts_chunk_drop_chunks' +LANGUAGE C STABLE PARALLEL SAFE; diff --git a/src/chunk.c b/src/chunk.c index 48489889b..f64a01576 100644 --- a/src/chunk.c +++ b/src/chunk.c @@ -36,6 +36,8 @@ #include "chunk.h" #include "chunk_index.h" #include "catalog.h" +#include "continuous_agg.h" +#include "cross_module_fn.h" #include "dimension.h" #include "dimension_slice.h" #include "dimension_vector.h" @@ -1895,18 +1897,40 @@ chunks_return_srf(FunctionCallInfo fcinfo) void ts_chunk_do_drop_chunks(Oid table_relid, Datum older_than_datum, Datum newer_than_datum, - Oid older_than_type, Oid newer_than_type, bool cascade, int32 log_level) + Oid older_than_type, Oid newer_than_type, bool cascade, + bool cascades_to_materializations, int32 log_level) { int i = 0; uint64 num_chunks = 0; - Chunk **chunks = chunk_get_chunks_in_time_range(table_relid, - older_than_datum, - newer_than_datum, - older_than_type, - newer_than_type, - "drop_chunks", - CurrentMemoryContext, - &num_chunks); + Chunk **chunks; + int32 hypertable_id = ts_hypertable_relid_to_id(table_relid); + + switch (ts_continuous_agg_hypertable_status(hypertable_id)) + { + case HypertableIsMaterialization: + case HypertableIsMaterializationAndRaw: + elog(ERROR, "cannot drop_chunks on a continuous aggregate materialization table"); + return; + case HypertableIsRawTable: + if (!cascades_to_materializations) + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("cannot drop_chunks on hypertable that has a continuous aggregate " + "without cascade_to_materializations set to true"))); + break; + default: + cascades_to_materializations = false; + break; + } + + chunks = chunk_get_chunks_in_time_range(table_relid, + older_than_datum, + newer_than_datum, + older_than_type, + newer_than_type, + "drop_chunks", + CurrentMemoryContext, + &num_chunks); for (; i < num_chunks; i++) { @@ -1926,6 +1950,9 @@ ts_chunk_do_drop_chunks(Oid table_relid, Datum older_than_datum, Datum newer_tha /* Drop the table */ performDeletion(&objaddr, cascade, 0); } + + if (cascades_to_materializations) + ts_cm_functions->continuous_agg_drop_chunks_by_chunk_id(hypertable_id, chunks, num_chunks); } Datum @@ -1943,6 +1970,7 @@ ts_chunk_drop_chunks(PG_FUNCTION_ARGS) Oid newer_than_type = PG_ARGISNULL(4) ? InvalidOid : get_fn_expr_argtype(fcinfo->flinfo, 4); bool cascade = PG_GETARG_BOOL(3); bool verbose = PG_ARGISNULL(5) ? false : PG_GETARG_BOOL(5); + bool cascades_to_materializations = PG_ARGISNULL(6) ? false : PG_GETARG_BOOL(6); int elevel = verbose ? INFO : DEBUG2; if (PG_ARGISNULL(0) && PG_ARGISNULL(4)) @@ -2016,6 +2044,7 @@ ts_chunk_drop_chunks(PG_FUNCTION_ARGS) older_than_type, newer_than_type, cascade, + cascades_to_materializations, elevel); } diff --git a/src/chunk.h b/src/chunk.h index 935340215..807bf7347 100644 --- a/src/chunk.h +++ b/src/chunk.h @@ -93,7 +93,8 @@ extern List *ts_chunk_get_window(int32 dimension_id, int64 point, int count, Mem extern void ts_chunks_rename_schema_name(char *old_schema, char *new_schema); extern TSDLLEXPORT void ts_chunk_do_drop_chunks(Oid table_relid, Datum older_than_datum, Datum newer_than_datum, Oid older_than_type, - Oid newer_than_type, bool cascade, int32 log_level); + Oid newer_than_type, bool cascade, + bool cascades_to_materializations, int32 log_level); #define chunk_get_by_name(schema_name, table_name, num_constraints, fail_if_not_found) \ ts_chunk_get_by_name_with_memory_context(schema_name, \ diff --git a/src/continuous_agg.c b/src/continuous_agg.c index 5c6caa5ad..262badda1 100644 --- a/src/continuous_agg.c +++ b/src/continuous_agg.c @@ -181,6 +181,56 @@ continuous_agg_init(ContinuousAgg *cagg, FormData_continuous_agg *fd) memcpy(&cagg->data, fd, sizeof(cagg->data)); } +ContinuousAggHypertableStatus +ts_continuous_agg_hypertable_status(int32 hypertable_id) +{ + ScanIterator iterator = + ts_scan_iterator_create(CONTINUOUS_AGG, AccessShareLock, CurrentMemoryContext); + ContinuousAggHypertableStatus status = HypertableIsNotContinuousAgg; + + ts_scanner_foreach(&iterator) + { + FormData_continuous_agg *data = + (FormData_continuous_agg *) GETSTRUCT(ts_scan_iterator_tuple(&iterator)); + + if (data->raw_hypertable_id == hypertable_id) + status |= HypertableIsRawTable; + if (data->mat_hypertable_id == hypertable_id) + status |= HypertableIsMaterialization; + + if (status == HypertableIsMaterializationAndRaw) + { + ts_scan_iterator_close(&iterator); + return status; + } + } + + return status; +} + +TSDLLEXPORT List * +ts_continuous_aggs_find_by_raw_table_id(int32 raw_hypertable_id) +{ + List *continuous_aggs = NIL; + ScanIterator iterator = + ts_scan_iterator_create(CONTINUOUS_AGG, AccessShareLock, CurrentMemoryContext); + ts_scanner_foreach(&iterator) + { + ContinuousAgg *ca; + Form_continuous_agg data = + (Form_continuous_agg) GETSTRUCT(ts_scan_iterator_tuple(&iterator)); + + if (data->raw_hypertable_id != raw_hypertable_id) + continue; + + ca = palloc0(sizeof(*ca)); + continuous_agg_init(ca, data); + continuous_aggs = lappend(continuous_aggs, ca); + } + + return continuous_aggs; +} + ContinuousAgg * ts_continuous_agg_find_by_view_name(const char *schema, const char *name) { diff --git a/src/continuous_agg.h b/src/continuous_agg.h index 958ec5df1..e68bb3f16 100644 --- a/src/continuous_agg.h +++ b/src/continuous_agg.h @@ -9,8 +9,11 @@ #include #include +#include + #include "with_clause_parser.h" #include "compat.h" + #define CAGGINVAL_TRIGGER_NAME "ts_cagg_invalidation_trigger" typedef enum ContinuousAggViewOption @@ -27,6 +30,18 @@ typedef struct ContinuousAgg FormData_continuous_agg data; } ContinuousAgg; +typedef enum ContinuousAggHypertableStatus +{ + HypertableIsNotContinuousAgg = 0, + HypertableIsMaterialization = 1, + HypertableIsRawTable = 2, + HypertableIsMaterializationAndRaw = HypertableIsMaterialization | HypertableIsRawTable, +} ContinuousAggHypertableStatus; + +extern ContinuousAggHypertableStatus ts_continuous_agg_hypertable_status(int32 hypertable_id); +extern void ts_continuous_agg_drop_chunks_by_chunk_id(int32 raw_hypertable_id, Chunk **chunks, + Size num_chunks); +extern TSDLLEXPORT List *ts_continuous_aggs_find_by_raw_table_id(int32 raw_hypertable_id); extern TSDLLEXPORT ContinuousAgg *ts_continuous_agg_find_by_view_name(const char *schema, const char *name); extern void ts_continuous_agg_drop_view_callback(ContinuousAgg *ca, const char *schema, diff --git a/src/cross_module_fn.c b/src/cross_module_fn.c index 48ec5a29d..ed371058c 100644 --- a/src/cross_module_fn.c +++ b/src/cross_module_fn.c @@ -199,6 +199,14 @@ continuous_agg_update_options_default(ContinuousAgg *cagg, WithClauseResult *wit error_no_default_fn_community(); pg_unreachable(); } + +static void +continuous_agg_drop_chunks_by_chunk_id_default(int32 raw_hypertable_id, Chunk **chunks, + Size num_chunks) +{ + error_no_default_fn_community(); +} + /* * Define cross-module functions' default values: * If the submodule isn't activated, using one of the cm functions will throw an @@ -235,6 +243,7 @@ TSDLLEXPORT CrossModuleFunctions ts_cm_functions_default = { .finalize_agg_sfunc = error_no_default_fn_pg_community, .finalize_agg_ffunc = error_no_default_fn_pg_community, .process_cagg_viewstmt = process_cagg_viewstmt_default, + .continuous_agg_drop_chunks_by_chunk_id = continuous_agg_drop_chunks_by_chunk_id_default, .continuous_agg_trigfn = error_no_default_fn_pg_community, .continuous_agg_update_options = continuous_agg_update_options_default, }; diff --git a/src/cross_module_fn.h b/src/cross_module_fn.h index c25722a16..711f9cde5 100644 --- a/src/cross_module_fn.h +++ b/src/cross_module_fn.h @@ -64,6 +64,8 @@ typedef struct CrossModuleFunctions PGFunction finalize_agg_ffunc; bool (*process_cagg_viewstmt)(ViewStmt *stmt, const char *query_string, void *pstmt, WithClauseResult *with_clause_options); + void (*continuous_agg_drop_chunks_by_chunk_id)(int32 raw_hypertable_id, Chunk **chunks, + Size num_chunks); PGFunction continuous_agg_trigfn; void (*continuous_agg_update_options)(ContinuousAgg *cagg, WithClauseResult *with_clause_options); diff --git a/tsl/src/bgw_policy/job.c b/tsl/src/bgw_policy/job.c index ac2ff71b8..cfda1186b 100644 --- a/tsl/src/bgw_policy/job.c +++ b/tsl/src/bgw_policy/job.c @@ -168,6 +168,7 @@ execute_drop_chunks_policy(int32 job_id) INTERVALOID, InvalidOid, args->fd.cascade, + false, LOG); elog(LOG, "completed dropping chunks"); diff --git a/tsl/src/continuous_aggs/CMakeLists.txt b/tsl/src/continuous_aggs/CMakeLists.txt index 1a1c6fa54..f80242c4a 100644 --- a/tsl/src/continuous_aggs/CMakeLists.txt +++ b/tsl/src/continuous_aggs/CMakeLists.txt @@ -1,5 +1,6 @@ set(SOURCES ${CMAKE_CURRENT_SOURCE_DIR}/cagg_create.c + ${CMAKE_CURRENT_SOURCE_DIR}/drop.c ${CMAKE_CURRENT_SOURCE_DIR}/insert.c ${CMAKE_CURRENT_SOURCE_DIR}/job.c ${CMAKE_CURRENT_SOURCE_DIR}/materialize.c diff --git a/tsl/src/continuous_aggs/cagg_create.c b/tsl/src/continuous_aggs/cagg_create.c index 45dc69703..1db6c240d 100644 --- a/tsl/src/continuous_aggs/cagg_create.c +++ b/tsl/src/continuous_aggs/cagg_create.c @@ -60,8 +60,6 @@ #define PARTIALFN "partialize_agg" #define TIMEBUCKETFN "time_bucket" #define CHUNKTUPFN "chunk_for_tuple" - -#define MATCHUNKCOLNM "chunk_id" #define MATPARTCOLNM "time_partition_col" #define MATPARTCOL_INTERVAL_FACTOR 10 #define HT_DEFAULT_CHUNKFN "calculate_chunk_interval" @@ -999,7 +997,10 @@ mattablecolumninfo_addinternal(MatTableColumnInfo *matcolinfo, RangeTblEntry *us /* add a chunk_id column for materialization table */ Node *vexpr = (Node *) makeVar(1, colno, INT4OID, -1, InvalidOid, 0); - col = makeColumnDef(MATCHUNKCOLNM, exprType(vexpr), exprTypmod(vexpr), exprCollation(vexpr)); + col = makeColumnDef(CONTINUOUS_AGG_CHUNK_ID_COL_NAME, + exprType(vexpr), + exprTypmod(vexpr), + exprCollation(vexpr)); matcolinfo->matcollist = lappend(matcolinfo->matcollist, col); /* need to add an entry to the target list for computing chunk_id column @@ -1019,7 +1020,10 @@ mattablecolumninfo_addinternal(MatTableColumnInfo *matcolinfo, RangeTblEntry *us InvalidOid, InvalidOid, COERCE_EXPLICIT_CALL); - chunk_te = makeTargetEntry((Expr *) chunk_fnexpr, colno, pstrdup(MATCHUNKCOLNM), false); + chunk_te = makeTargetEntry((Expr *) chunk_fnexpr, + colno, + pstrdup(CONTINUOUS_AGG_CHUNK_ID_COL_NAME), + false); matcolinfo->partial_seltlist = lappend(matcolinfo->partial_seltlist, chunk_te); /*any internal column needs to be added to the group-by clause as well */ maxRef = 0; diff --git a/tsl/src/continuous_aggs/cagg_create.h b/tsl/src/continuous_aggs/cagg_create.h index df60cd404..3df39ef3a 100644 --- a/tsl/src/continuous_aggs/cagg_create.h +++ b/tsl/src/continuous_aggs/cagg_create.h @@ -10,6 +10,9 @@ #include "with_clause_parser.h" +#define CONTINUOUS_AGG_CHUNK_ID_COL_NAME "chunk_id" + bool tsl_process_continuous_agg_viewstmt(ViewStmt *stmt, const char *query_string, void *pstmt, WithClauseResult *with_clause_options); + #endif /* TIMESCALEDB_TSL_CONTINUOUS_AGGS_CAGG_CREATE_H */ diff --git a/tsl/src/continuous_aggs/drop.c b/tsl/src/continuous_aggs/drop.c new file mode 100644 index 000000000..34df20ca5 --- /dev/null +++ b/tsl/src/continuous_aggs/drop.c @@ -0,0 +1,66 @@ +/* + * This file and its contents are licensed under the Timescale License. + * Please see the included NOTICE for copyright information and + * LICENSE-TIMESCALE for a copy of the license. + */ +#include +#include +#include +#include +#include + +#include "drop.h" + +#include + +#include "cagg_create.h" + +void +ts_continuous_agg_drop_chunks_by_chunk_id(int32 raw_hypertable_id, Chunk **chunks, Size num_chunks) +{ + ListCell *lc; + Oid arg_type = INT4OID; + List *continuous_aggs = ts_continuous_aggs_find_by_raw_table_id(raw_hypertable_id); + StringInfo command = makeStringInfo(); + CatalogSecurityContext sec_ctx; + + ts_catalog_database_info_become_owner(ts_catalog_database_info_get(), &sec_ctx); + + if (SPI_connect() != SPI_OK_CONNECT) + elog(ERROR, "could not connect to SPI deleting materialization"); + + foreach (lc, continuous_aggs) + { + int32 i; + SPIPlanPtr delete_plan; + ContinuousAgg *agg = lfirst(lc); + Hypertable *mat_table = ts_hypertable_get_by_id(agg->data.mat_hypertable_id); + + resetStringInfo(command); + + appendStringInfo(command, + "DELETE FROM %s.%s AS D WHERE " + "D.%s = $1", + quote_identifier(NameStr(mat_table->fd.schema_name)), + quote_identifier(NameStr(mat_table->fd.table_name)), + quote_identifier(CONTINUOUS_AGG_CHUNK_ID_COL_NAME)); + + delete_plan = SPI_prepare(command->data, 1, &arg_type); + if (delete_plan == NULL) + elog(ERROR, "could not prepare delete materialization"); + + for (i = 0; i < num_chunks; i++) + { + Datum arg = Int32GetDatum(chunks[i]->fd.id); + int res = SPI_execute_plan(delete_plan, &arg, NULL, false, 0); + if (res < 0) + elog(ERROR, "could not delete from the materialization"); + } + + SPI_freeplan(delete_plan); + } + + SPI_finish(); + + ts_catalog_restore_user(&sec_ctx); +} diff --git a/tsl/src/continuous_aggs/drop.h b/tsl/src/continuous_aggs/drop.h new file mode 100644 index 000000000..d8ccc9b45 --- /dev/null +++ b/tsl/src/continuous_aggs/drop.h @@ -0,0 +1,16 @@ +/* + * This file and its contents are licensed under the Timescale License. + * Please see the included NOTICE for copyright information and + * LICENSE-TIMESCALE for a copy of the license. + */ +#ifndef TIMESCALEDB_TSL_CONTINUOUS_AGGS_DROP_H +#define TIMESCALEDB_TSL_CONTINUOUS_AGGS_DROP_H + +#include + +#include + +extern void ts_continuous_agg_drop_chunks_by_chunk_id(int32 raw_hypertable_id, Chunk **chunks, + Size num_chunks); + +#endif /* TIMESCALEDB_TSL_CONTINUOUS_AGGS_DROP_H */ diff --git a/tsl/src/init.c b/tsl/src/init.c index fc79a4a13..0db5c5542 100644 --- a/tsl/src/init.c +++ b/tsl/src/init.c @@ -21,6 +21,7 @@ #include "bgw_policy/reorder_api.h" #include "bgw_policy/drop_chunks_api.h" #include "continuous_aggs/cagg_create.h" +#include "continuous_aggs/drop.h" #include "continuous_aggs/insert.h" #include "continuous_aggs/materialize.h" #include "continuous_aggs/options.h" @@ -78,6 +79,7 @@ CrossModuleFunctions tsl_cm_functions = { .finalize_agg_sfunc = tsl_finalize_agg_sfunc, .finalize_agg_ffunc = tsl_finalize_agg_ffunc, .process_cagg_viewstmt = tsl_process_continuous_agg_viewstmt, + .continuous_agg_drop_chunks_by_chunk_id = ts_continuous_agg_drop_chunks_by_chunk_id, .continuous_agg_trigfn = continuous_agg_trigfn, .continuous_agg_update_options = continuous_agg_update_options, }; diff --git a/tsl/test/expected/contaggviews_ddl-10.out b/tsl/test/expected/contaggviews_ddl-10.out index 1283eda27..010db4b11 100644 --- a/tsl/test/expected/contaggviews_ddl-10.out +++ b/tsl/test/expected/contaggviews_ddl-10.out @@ -37,6 +37,7 @@ select count(*) from mat_m1; (1 row) \set ON_ERROR_STOP 1 +-- schema tests \c :TEST_DBNAME :ROLE_SUPERUSER CREATE SCHEMA rename_schema; GRANT ALL ON SCHEMA rename_schema TO :ROLE_DEFAULT_PERM_USER; @@ -156,3 +157,224 @@ SELECT user_view_schema, user_view_name, partial_view_schema, partial_view_name public | rename_c_aggregate | rename_schema | partial_view (2 rows) +-- drop_chunks tests +DROP TABLE conditions CASCADE; +NOTICE: drop cascades to view _timescaledb_internal.ts_internal_mat_m1view +DROP TABLE foo CASCADE; +NOTICE: drop cascades to view rename_schema.partial_view +CREATE TABLE drop_chunks_table(time BIGINT, data INTEGER); +SELECT hypertable_id AS drop_chunks_table_id + FROM create_hypertable('drop_chunks_table', 'time', chunk_time_interval => 10) \gset +NOTICE: adding not-null constraint to column "time" +CREATE VIEW drop_chunks_view WITH ( timescaledb.continuous, timescaledb.refresh_interval='72 hours') +AS SELECT time_bucket('5', time), COUNT(data) + FROM drop_chunks_table + GROUP BY 1; +NOTICE: adding not-null constraint to column "time_partition_col" +SELECT format('%s.%s', schema_name, table_name) AS drop_chunks_mat_table, + schema_name AS drop_chunks_mat_schema, + table_name AS drop_chunks_mat_table_name + FROM _timescaledb_catalog.hypertable, _timescaledb_catalog.continuous_agg + WHERE _timescaledb_catalog.continuous_agg.raw_hypertable_id = :drop_chunks_table_id + AND _timescaledb_catalog.hypertable.id = _timescaledb_catalog.continuous_agg.mat_hypertable_id \gset +-- create 3 chunks, with 3 time bucket +INSERT INTO drop_chunks_table SELECT i, i FROM generate_series(0, 29) AS i; +REFRESH MATERIALIZED VIEW drop_chunks_view; +INFO: new materialization range for public.drop_chunks_table (time column time) (15) +INFO: materializing continuous aggregate public.drop_chunks_view: new range up to 15 +SELECT count(c) FROM show_chunks('drop_chunks_table') AS c; + count +------- + 3 +(1 row) + +SELECT count(c) FROM show_chunks(:'drop_chunks_mat_table') AS c; + count +------- + 1 +(1 row) + +SELECT * FROM drop_chunks_view ORDER BY 1; + time_bucket | count +-------------+------- + 0 | 5 + 5 | 5 + 10 | 5 +(3 rows) + +-- cannot drop directly from the materialization table +\set ON_ERROR_STOP 0 +SELECT drop_chunks(schema_name => :'drop_chunks_mat_schema', + table_name => :'drop_chunks_mat_table_name', + newer_than => -20, + verbose => true); +ERROR: cannot drop_chunks on a continuous aggregate materialization table +\set ON_ERROR_STOP 1 +SELECT count(c) FROM show_chunks('drop_chunks_table') AS c; + count +------- + 3 +(1 row) + +SELECT count(c) FROM show_chunks(:'drop_chunks_mat_table') AS c; + count +------- + 1 +(1 row) + +SELECT * FROM drop_chunks_view ORDER BY 1; + time_bucket | count +-------------+------- + 0 | 5 + 5 | 5 + 10 | 5 +(3 rows) + +-- cannot drop from the raw table without specifying cascade_to_materializations +\set ON_ERROR_STOP 0 +SELECT drop_chunks(table_name => 'drop_chunks_table', older_than => 10); +ERROR: cannot drop_chunks on hypertable that has a continuous aggregate without cascade_to_materializations set to true +\set ON_ERROR_STOP 1 +SELECT count(c) FROM show_chunks('drop_chunks_table') AS c; + count +------- + 3 +(1 row) + +SELECT count(c) FROM show_chunks(:'drop_chunks_mat_table') AS c; + count +------- + 1 +(1 row) + +SELECT * FROM drop_chunks_view ORDER BY 1; + time_bucket | count +-------------+------- + 0 | 5 + 5 | 5 + 10 | 5 +(3 rows) + +\set ON_ERROR_STOP 0 +SELECT drop_chunks(older_than => 200); +ERROR: cannot drop_chunks on hypertable that has a continuous aggregate without cascade_to_materializations set to true +\set ON_ERROR_STOP 1 +SELECT count(c) FROM show_chunks('drop_chunks_table') AS c; + count +------- + 3 +(1 row) + +SELECT count(c) FROM show_chunks(:'drop_chunks_mat_table') AS c; + count +------- + 1 +(1 row) + +SELECT * FROM drop_chunks_view ORDER BY 1; + time_bucket | count +-------------+------- + 0 | 5 + 5 | 5 + 10 | 5 +(3 rows) + +SELECT drop_chunks(table_name => 'drop_chunks_table', older_than => 13, cascade_to_materializations => true); + drop_chunks +------------- + +(1 row) + +SELECT count(c) FROM show_chunks('drop_chunks_table') AS c; + count +------- + 2 +(1 row) + +SELECT count(c) FROM show_chunks(:'drop_chunks_mat_table') AS c; + count +------- + 1 +(1 row) + +SELECT * FROM drop_chunks_view ORDER BY 1; + time_bucket | count +-------------+------- + 10 | 5 +(1 row) + +-- drop chunks when the chunksize and time_bucket aren't aligned +DROP TABLE drop_chunks_table CASCADE; +NOTICE: drop cascades to view _timescaledb_internal.ts_internal_drop_chunks_viewview +NOTICE: drop cascades to table _timescaledb_internal._hyper_6_4_chunk +CREATE TABLE drop_chunks_table_u(time BIGINT, data INTEGER); +SELECT hypertable_id AS drop_chunks_table_u_id + FROM create_hypertable('drop_chunks_table_u', 'time', chunk_time_interval => 7) \gset +NOTICE: adding not-null constraint to column "time" +CREATE VIEW drop_chunks_view WITH ( timescaledb.continuous, timescaledb.refresh_interval='72 hours') +AS SELECT time_bucket('3', time), COUNT(data) + FROM drop_chunks_table_u + GROUP BY 1; +NOTICE: adding not-null constraint to column "time_partition_col" +SELECT format('%s.%s', schema_name, table_name) AS drop_chunks_mat_table_u, + schema_name AS drop_chunks_mat_schema, + table_name AS drop_chunks_mat_table_u_name + FROM _timescaledb_catalog.hypertable, _timescaledb_catalog.continuous_agg + WHERE _timescaledb_catalog.continuous_agg.raw_hypertable_id = :drop_chunks_table_u_id + AND _timescaledb_catalog.hypertable.id = _timescaledb_catalog.continuous_agg.mat_hypertable_id \gset +-- create 3 chunks, with 3 time bucket +INSERT INTO drop_chunks_table_u SELECT i, i FROM generate_series(0, 21) AS i; +REFRESH MATERIALIZED VIEW drop_chunks_view; +INFO: new materialization range for public.drop_chunks_table_u (time column time) (15) +INFO: materializing continuous aggregate public.drop_chunks_view: new range up to 15 +SELECT count(c) FROM show_chunks('drop_chunks_table_u') AS c; + count +------- + 4 +(1 row) + +SELECT count(c) FROM show_chunks(:'drop_chunks_mat_table_u') AS c; + count +------- + 1 +(1 row) + +SELECT * FROM drop_chunks_view ORDER BY 1; + time_bucket | count +-------------+------- + 0 | 3 + 3 | 3 + 6 | 3 + 9 | 3 + 12 | 3 +(5 rows) + +SELECT drop_chunks(table_name => 'drop_chunks_table_u', older_than => 13, cascade_to_materializations => true); + drop_chunks +------------- + +(1 row) + +-- everything in the first chunk (values within [0, 6]) should be dropped +-- the time_bucket [6, 8] will lose it's first value, but should still have +-- the other two +SELECT count(c) FROM show_chunks('drop_chunks_table_u') AS c; + count +------- + 3 +(1 row) + +SELECT count(c) FROM show_chunks(:'drop_chunks_mat_table_u') AS c; + count +------- + 1 +(1 row) + +SELECT * FROM drop_chunks_view ORDER BY 1; + time_bucket | count +-------------+------- + 6 | 2 + 9 | 3 + 12 | 3 +(3 rows) + diff --git a/tsl/test/expected/contaggviews_ddl-11.out b/tsl/test/expected/contaggviews_ddl-11.out index 0202435e6..f80884936 100644 --- a/tsl/test/expected/contaggviews_ddl-11.out +++ b/tsl/test/expected/contaggviews_ddl-11.out @@ -37,6 +37,7 @@ select count(*) from mat_m1; (1 row) \set ON_ERROR_STOP 1 +-- schema tests \c :TEST_DBNAME :ROLE_SUPERUSER CREATE SCHEMA rename_schema; GRANT ALL ON SCHEMA rename_schema TO :ROLE_DEFAULT_PERM_USER; @@ -156,3 +157,224 @@ SELECT user_view_schema, user_view_name, partial_view_schema, partial_view_name public | rename_c_aggregate | rename_schema | partial_view (2 rows) +-- drop_chunks tests +DROP TABLE conditions CASCADE; +NOTICE: drop cascades to view _timescaledb_internal.ts_internal_mat_m1view +DROP TABLE foo CASCADE; +NOTICE: drop cascades to view rename_schema.partial_view +CREATE TABLE drop_chunks_table(time BIGINT, data INTEGER); +SELECT hypertable_id AS drop_chunks_table_id + FROM create_hypertable('drop_chunks_table', 'time', chunk_time_interval => 10) \gset +NOTICE: adding not-null constraint to column "time" +CREATE VIEW drop_chunks_view WITH ( timescaledb.continuous, timescaledb.refresh_interval='72 hours') +AS SELECT time_bucket('5', time), COUNT(data) + FROM drop_chunks_table + GROUP BY 1; +NOTICE: adding not-null constraint to column "time_partition_col" +SELECT format('%s.%s', schema_name, table_name) AS drop_chunks_mat_table, + schema_name AS drop_chunks_mat_schema, + table_name AS drop_chunks_mat_table_name + FROM _timescaledb_catalog.hypertable, _timescaledb_catalog.continuous_agg + WHERE _timescaledb_catalog.continuous_agg.raw_hypertable_id = :drop_chunks_table_id + AND _timescaledb_catalog.hypertable.id = _timescaledb_catalog.continuous_agg.mat_hypertable_id \gset +-- create 3 chunks, with 3 time bucket +INSERT INTO drop_chunks_table SELECT i, i FROM generate_series(0, 29) AS i; +REFRESH MATERIALIZED VIEW drop_chunks_view; +INFO: new materialization range for public.drop_chunks_table (time column time) (15) +INFO: materializing continuous aggregate public.drop_chunks_view: new range up to 15 +SELECT count(c) FROM show_chunks('drop_chunks_table') AS c; + count +------- + 3 +(1 row) + +SELECT count(c) FROM show_chunks(:'drop_chunks_mat_table') AS c; + count +------- + 1 +(1 row) + +SELECT * FROM drop_chunks_view ORDER BY 1; + time_bucket | count +-------------+------- + 0 | 5 + 5 | 5 + 10 | 5 +(3 rows) + +-- cannot drop directly from the materialization table +\set ON_ERROR_STOP 0 +SELECT drop_chunks(schema_name => :'drop_chunks_mat_schema', + table_name => :'drop_chunks_mat_table_name', + newer_than => -20, + verbose => true); +ERROR: cannot drop_chunks on a continuous aggregate materialization table +\set ON_ERROR_STOP 1 +SELECT count(c) FROM show_chunks('drop_chunks_table') AS c; + count +------- + 3 +(1 row) + +SELECT count(c) FROM show_chunks(:'drop_chunks_mat_table') AS c; + count +------- + 1 +(1 row) + +SELECT * FROM drop_chunks_view ORDER BY 1; + time_bucket | count +-------------+------- + 0 | 5 + 5 | 5 + 10 | 5 +(3 rows) + +-- cannot drop from the raw table without specifying cascade_to_materializations +\set ON_ERROR_STOP 0 +SELECT drop_chunks(table_name => 'drop_chunks_table', older_than => 10); +ERROR: cannot drop_chunks on hypertable that has a continuous aggregate without cascade_to_materializations set to true +\set ON_ERROR_STOP 1 +SELECT count(c) FROM show_chunks('drop_chunks_table') AS c; + count +------- + 3 +(1 row) + +SELECT count(c) FROM show_chunks(:'drop_chunks_mat_table') AS c; + count +------- + 1 +(1 row) + +SELECT * FROM drop_chunks_view ORDER BY 1; + time_bucket | count +-------------+------- + 0 | 5 + 5 | 5 + 10 | 5 +(3 rows) + +\set ON_ERROR_STOP 0 +SELECT drop_chunks(older_than => 200); +ERROR: cannot drop_chunks on hypertable that has a continuous aggregate without cascade_to_materializations set to true +\set ON_ERROR_STOP 1 +SELECT count(c) FROM show_chunks('drop_chunks_table') AS c; + count +------- + 3 +(1 row) + +SELECT count(c) FROM show_chunks(:'drop_chunks_mat_table') AS c; + count +------- + 1 +(1 row) + +SELECT * FROM drop_chunks_view ORDER BY 1; + time_bucket | count +-------------+------- + 0 | 5 + 5 | 5 + 10 | 5 +(3 rows) + +SELECT drop_chunks(table_name => 'drop_chunks_table', older_than => 13, cascade_to_materializations => true); + drop_chunks +------------- + +(1 row) + +SELECT count(c) FROM show_chunks('drop_chunks_table') AS c; + count +------- + 2 +(1 row) + +SELECT count(c) FROM show_chunks(:'drop_chunks_mat_table') AS c; + count +------- + 1 +(1 row) + +SELECT * FROM drop_chunks_view ORDER BY 1; + time_bucket | count +-------------+------- + 10 | 5 +(1 row) + +-- drop chunks when the chunksize and time_bucket aren't aligned +DROP TABLE drop_chunks_table CASCADE; +NOTICE: drop cascades to view _timescaledb_internal.ts_internal_drop_chunks_viewview +NOTICE: drop cascades to table _timescaledb_internal._hyper_6_4_chunk +CREATE TABLE drop_chunks_table_u(time BIGINT, data INTEGER); +SELECT hypertable_id AS drop_chunks_table_u_id + FROM create_hypertable('drop_chunks_table_u', 'time', chunk_time_interval => 7) \gset +NOTICE: adding not-null constraint to column "time" +CREATE VIEW drop_chunks_view WITH ( timescaledb.continuous, timescaledb.refresh_interval='72 hours') +AS SELECT time_bucket('3', time), COUNT(data) + FROM drop_chunks_table_u + GROUP BY 1; +NOTICE: adding not-null constraint to column "time_partition_col" +SELECT format('%s.%s', schema_name, table_name) AS drop_chunks_mat_table_u, + schema_name AS drop_chunks_mat_schema, + table_name AS drop_chunks_mat_table_u_name + FROM _timescaledb_catalog.hypertable, _timescaledb_catalog.continuous_agg + WHERE _timescaledb_catalog.continuous_agg.raw_hypertable_id = :drop_chunks_table_u_id + AND _timescaledb_catalog.hypertable.id = _timescaledb_catalog.continuous_agg.mat_hypertable_id \gset +-- create 3 chunks, with 3 time bucket +INSERT INTO drop_chunks_table_u SELECT i, i FROM generate_series(0, 21) AS i; +REFRESH MATERIALIZED VIEW drop_chunks_view; +INFO: new materialization range for public.drop_chunks_table_u (time column time) (15) +INFO: materializing continuous aggregate public.drop_chunks_view: new range up to 15 +SELECT count(c) FROM show_chunks('drop_chunks_table_u') AS c; + count +------- + 4 +(1 row) + +SELECT count(c) FROM show_chunks(:'drop_chunks_mat_table_u') AS c; + count +------- + 1 +(1 row) + +SELECT * FROM drop_chunks_view ORDER BY 1; + time_bucket | count +-------------+------- + 0 | 3 + 3 | 3 + 6 | 3 + 9 | 3 + 12 | 3 +(5 rows) + +SELECT drop_chunks(table_name => 'drop_chunks_table_u', older_than => 13, cascade_to_materializations => true); + drop_chunks +------------- + +(1 row) + +-- everything in the first chunk (values within [0, 6]) should be dropped +-- the time_bucket [6, 8] will lose it's first value, but should still have +-- the other two +SELECT count(c) FROM show_chunks('drop_chunks_table_u') AS c; + count +------- + 3 +(1 row) + +SELECT count(c) FROM show_chunks(:'drop_chunks_mat_table_u') AS c; + count +------- + 1 +(1 row) + +SELECT * FROM drop_chunks_view ORDER BY 1; + time_bucket | count +-------------+------- + 6 | 2 + 9 | 3 + 12 | 3 +(3 rows) + diff --git a/tsl/test/expected/contaggviews_ddl-9.6.out b/tsl/test/expected/contaggviews_ddl-9.6.out index 1283eda27..010db4b11 100644 --- a/tsl/test/expected/contaggviews_ddl-9.6.out +++ b/tsl/test/expected/contaggviews_ddl-9.6.out @@ -37,6 +37,7 @@ select count(*) from mat_m1; (1 row) \set ON_ERROR_STOP 1 +-- schema tests \c :TEST_DBNAME :ROLE_SUPERUSER CREATE SCHEMA rename_schema; GRANT ALL ON SCHEMA rename_schema TO :ROLE_DEFAULT_PERM_USER; @@ -156,3 +157,224 @@ SELECT user_view_schema, user_view_name, partial_view_schema, partial_view_name public | rename_c_aggregate | rename_schema | partial_view (2 rows) +-- drop_chunks tests +DROP TABLE conditions CASCADE; +NOTICE: drop cascades to view _timescaledb_internal.ts_internal_mat_m1view +DROP TABLE foo CASCADE; +NOTICE: drop cascades to view rename_schema.partial_view +CREATE TABLE drop_chunks_table(time BIGINT, data INTEGER); +SELECT hypertable_id AS drop_chunks_table_id + FROM create_hypertable('drop_chunks_table', 'time', chunk_time_interval => 10) \gset +NOTICE: adding not-null constraint to column "time" +CREATE VIEW drop_chunks_view WITH ( timescaledb.continuous, timescaledb.refresh_interval='72 hours') +AS SELECT time_bucket('5', time), COUNT(data) + FROM drop_chunks_table + GROUP BY 1; +NOTICE: adding not-null constraint to column "time_partition_col" +SELECT format('%s.%s', schema_name, table_name) AS drop_chunks_mat_table, + schema_name AS drop_chunks_mat_schema, + table_name AS drop_chunks_mat_table_name + FROM _timescaledb_catalog.hypertable, _timescaledb_catalog.continuous_agg + WHERE _timescaledb_catalog.continuous_agg.raw_hypertable_id = :drop_chunks_table_id + AND _timescaledb_catalog.hypertable.id = _timescaledb_catalog.continuous_agg.mat_hypertable_id \gset +-- create 3 chunks, with 3 time bucket +INSERT INTO drop_chunks_table SELECT i, i FROM generate_series(0, 29) AS i; +REFRESH MATERIALIZED VIEW drop_chunks_view; +INFO: new materialization range for public.drop_chunks_table (time column time) (15) +INFO: materializing continuous aggregate public.drop_chunks_view: new range up to 15 +SELECT count(c) FROM show_chunks('drop_chunks_table') AS c; + count +------- + 3 +(1 row) + +SELECT count(c) FROM show_chunks(:'drop_chunks_mat_table') AS c; + count +------- + 1 +(1 row) + +SELECT * FROM drop_chunks_view ORDER BY 1; + time_bucket | count +-------------+------- + 0 | 5 + 5 | 5 + 10 | 5 +(3 rows) + +-- cannot drop directly from the materialization table +\set ON_ERROR_STOP 0 +SELECT drop_chunks(schema_name => :'drop_chunks_mat_schema', + table_name => :'drop_chunks_mat_table_name', + newer_than => -20, + verbose => true); +ERROR: cannot drop_chunks on a continuous aggregate materialization table +\set ON_ERROR_STOP 1 +SELECT count(c) FROM show_chunks('drop_chunks_table') AS c; + count +------- + 3 +(1 row) + +SELECT count(c) FROM show_chunks(:'drop_chunks_mat_table') AS c; + count +------- + 1 +(1 row) + +SELECT * FROM drop_chunks_view ORDER BY 1; + time_bucket | count +-------------+------- + 0 | 5 + 5 | 5 + 10 | 5 +(3 rows) + +-- cannot drop from the raw table without specifying cascade_to_materializations +\set ON_ERROR_STOP 0 +SELECT drop_chunks(table_name => 'drop_chunks_table', older_than => 10); +ERROR: cannot drop_chunks on hypertable that has a continuous aggregate without cascade_to_materializations set to true +\set ON_ERROR_STOP 1 +SELECT count(c) FROM show_chunks('drop_chunks_table') AS c; + count +------- + 3 +(1 row) + +SELECT count(c) FROM show_chunks(:'drop_chunks_mat_table') AS c; + count +------- + 1 +(1 row) + +SELECT * FROM drop_chunks_view ORDER BY 1; + time_bucket | count +-------------+------- + 0 | 5 + 5 | 5 + 10 | 5 +(3 rows) + +\set ON_ERROR_STOP 0 +SELECT drop_chunks(older_than => 200); +ERROR: cannot drop_chunks on hypertable that has a continuous aggregate without cascade_to_materializations set to true +\set ON_ERROR_STOP 1 +SELECT count(c) FROM show_chunks('drop_chunks_table') AS c; + count +------- + 3 +(1 row) + +SELECT count(c) FROM show_chunks(:'drop_chunks_mat_table') AS c; + count +------- + 1 +(1 row) + +SELECT * FROM drop_chunks_view ORDER BY 1; + time_bucket | count +-------------+------- + 0 | 5 + 5 | 5 + 10 | 5 +(3 rows) + +SELECT drop_chunks(table_name => 'drop_chunks_table', older_than => 13, cascade_to_materializations => true); + drop_chunks +------------- + +(1 row) + +SELECT count(c) FROM show_chunks('drop_chunks_table') AS c; + count +------- + 2 +(1 row) + +SELECT count(c) FROM show_chunks(:'drop_chunks_mat_table') AS c; + count +------- + 1 +(1 row) + +SELECT * FROM drop_chunks_view ORDER BY 1; + time_bucket | count +-------------+------- + 10 | 5 +(1 row) + +-- drop chunks when the chunksize and time_bucket aren't aligned +DROP TABLE drop_chunks_table CASCADE; +NOTICE: drop cascades to view _timescaledb_internal.ts_internal_drop_chunks_viewview +NOTICE: drop cascades to table _timescaledb_internal._hyper_6_4_chunk +CREATE TABLE drop_chunks_table_u(time BIGINT, data INTEGER); +SELECT hypertable_id AS drop_chunks_table_u_id + FROM create_hypertable('drop_chunks_table_u', 'time', chunk_time_interval => 7) \gset +NOTICE: adding not-null constraint to column "time" +CREATE VIEW drop_chunks_view WITH ( timescaledb.continuous, timescaledb.refresh_interval='72 hours') +AS SELECT time_bucket('3', time), COUNT(data) + FROM drop_chunks_table_u + GROUP BY 1; +NOTICE: adding not-null constraint to column "time_partition_col" +SELECT format('%s.%s', schema_name, table_name) AS drop_chunks_mat_table_u, + schema_name AS drop_chunks_mat_schema, + table_name AS drop_chunks_mat_table_u_name + FROM _timescaledb_catalog.hypertable, _timescaledb_catalog.continuous_agg + WHERE _timescaledb_catalog.continuous_agg.raw_hypertable_id = :drop_chunks_table_u_id + AND _timescaledb_catalog.hypertable.id = _timescaledb_catalog.continuous_agg.mat_hypertable_id \gset +-- create 3 chunks, with 3 time bucket +INSERT INTO drop_chunks_table_u SELECT i, i FROM generate_series(0, 21) AS i; +REFRESH MATERIALIZED VIEW drop_chunks_view; +INFO: new materialization range for public.drop_chunks_table_u (time column time) (15) +INFO: materializing continuous aggregate public.drop_chunks_view: new range up to 15 +SELECT count(c) FROM show_chunks('drop_chunks_table_u') AS c; + count +------- + 4 +(1 row) + +SELECT count(c) FROM show_chunks(:'drop_chunks_mat_table_u') AS c; + count +------- + 1 +(1 row) + +SELECT * FROM drop_chunks_view ORDER BY 1; + time_bucket | count +-------------+------- + 0 | 3 + 3 | 3 + 6 | 3 + 9 | 3 + 12 | 3 +(5 rows) + +SELECT drop_chunks(table_name => 'drop_chunks_table_u', older_than => 13, cascade_to_materializations => true); + drop_chunks +------------- + +(1 row) + +-- everything in the first chunk (values within [0, 6]) should be dropped +-- the time_bucket [6, 8] will lose it's first value, but should still have +-- the other two +SELECT count(c) FROM show_chunks('drop_chunks_table_u') AS c; + count +------- + 3 +(1 row) + +SELECT count(c) FROM show_chunks(:'drop_chunks_mat_table_u') AS c; + count +------- + 1 +(1 row) + +SELECT * FROM drop_chunks_view ORDER BY 1; + time_bucket | count +-------------+------- + 6 | 2 + 9 | 3 + 12 | 3 +(3 rows) + diff --git a/tsl/test/sql/contaggviews_ddl.sql.in b/tsl/test/sql/contaggviews_ddl.sql.in index b6f4df2bb..143982f3d 100644 --- a/tsl/test/sql/contaggviews_ddl.sql.in +++ b/tsl/test/sql/contaggviews_ddl.sql.in @@ -33,6 +33,8 @@ select count(*) from mat_m1; \set ON_ERROR_STOP 1 +-- schema tests + \c :TEST_DBNAME :ROLE_SUPERUSER CREATE SCHEMA rename_schema; @@ -111,3 +113,109 @@ ALTER VIEW rename_schema.ts_internal_rename_testview RENAME TO partial_view; SELECT user_view_schema, user_view_name, partial_view_schema, partial_view_name FROM _timescaledb_catalog.continuous_agg; + +-- drop_chunks tests +DROP TABLE conditions CASCADE; +DROP TABLE foo CASCADE; + +CREATE TABLE drop_chunks_table(time BIGINT, data INTEGER); +SELECT hypertable_id AS drop_chunks_table_id + FROM create_hypertable('drop_chunks_table', 'time', chunk_time_interval => 10) \gset + +CREATE VIEW drop_chunks_view WITH ( timescaledb.continuous, timescaledb.refresh_interval='72 hours') +AS SELECT time_bucket('5', time), COUNT(data) + FROM drop_chunks_table + GROUP BY 1; + +SELECT format('%s.%s', schema_name, table_name) AS drop_chunks_mat_table, + schema_name AS drop_chunks_mat_schema, + table_name AS drop_chunks_mat_table_name + FROM _timescaledb_catalog.hypertable, _timescaledb_catalog.continuous_agg + WHERE _timescaledb_catalog.continuous_agg.raw_hypertable_id = :drop_chunks_table_id + AND _timescaledb_catalog.hypertable.id = _timescaledb_catalog.continuous_agg.mat_hypertable_id \gset + +-- create 3 chunks, with 3 time bucket +INSERT INTO drop_chunks_table SELECT i, i FROM generate_series(0, 29) AS i; +REFRESH MATERIALIZED VIEW drop_chunks_view; + +SELECT count(c) FROM show_chunks('drop_chunks_table') AS c; +SELECT count(c) FROM show_chunks(:'drop_chunks_mat_table') AS c; + +SELECT * FROM drop_chunks_view ORDER BY 1; + +-- cannot drop directly from the materialization table +\set ON_ERROR_STOP 0 +SELECT drop_chunks(schema_name => :'drop_chunks_mat_schema', + table_name => :'drop_chunks_mat_table_name', + newer_than => -20, + verbose => true); +\set ON_ERROR_STOP 1 + +SELECT count(c) FROM show_chunks('drop_chunks_table') AS c; +SELECT count(c) FROM show_chunks(:'drop_chunks_mat_table') AS c; + +SELECT * FROM drop_chunks_view ORDER BY 1; + +-- cannot drop from the raw table without specifying cascade_to_materializations + +\set ON_ERROR_STOP 0 +SELECT drop_chunks(table_name => 'drop_chunks_table', older_than => 10); +\set ON_ERROR_STOP 1 + +SELECT count(c) FROM show_chunks('drop_chunks_table') AS c; +SELECT count(c) FROM show_chunks(:'drop_chunks_mat_table') AS c; + +SELECT * FROM drop_chunks_view ORDER BY 1; + +\set ON_ERROR_STOP 0 +SELECT drop_chunks(older_than => 200); +\set ON_ERROR_STOP 1 + +SELECT count(c) FROM show_chunks('drop_chunks_table') AS c; +SELECT count(c) FROM show_chunks(:'drop_chunks_mat_table') AS c; + +SELECT * FROM drop_chunks_view ORDER BY 1; + +SELECT drop_chunks(table_name => 'drop_chunks_table', older_than => 13, cascade_to_materializations => true); + +SELECT count(c) FROM show_chunks('drop_chunks_table') AS c; +SELECT count(c) FROM show_chunks(:'drop_chunks_mat_table') AS c; + +SELECT * FROM drop_chunks_view ORDER BY 1; + +-- drop chunks when the chunksize and time_bucket aren't aligned +DROP TABLE drop_chunks_table CASCADE; +CREATE TABLE drop_chunks_table_u(time BIGINT, data INTEGER); +SELECT hypertable_id AS drop_chunks_table_u_id + FROM create_hypertable('drop_chunks_table_u', 'time', chunk_time_interval => 7) \gset + +CREATE VIEW drop_chunks_view WITH ( timescaledb.continuous, timescaledb.refresh_interval='72 hours') +AS SELECT time_bucket('3', time), COUNT(data) + FROM drop_chunks_table_u + GROUP BY 1; + +SELECT format('%s.%s', schema_name, table_name) AS drop_chunks_mat_table_u, + schema_name AS drop_chunks_mat_schema, + table_name AS drop_chunks_mat_table_u_name + FROM _timescaledb_catalog.hypertable, _timescaledb_catalog.continuous_agg + WHERE _timescaledb_catalog.continuous_agg.raw_hypertable_id = :drop_chunks_table_u_id + AND _timescaledb_catalog.hypertable.id = _timescaledb_catalog.continuous_agg.mat_hypertable_id \gset + +-- create 3 chunks, with 3 time bucket +INSERT INTO drop_chunks_table_u SELECT i, i FROM generate_series(0, 21) AS i; +REFRESH MATERIALIZED VIEW drop_chunks_view; + +SELECT count(c) FROM show_chunks('drop_chunks_table_u') AS c; +SELECT count(c) FROM show_chunks(:'drop_chunks_mat_table_u') AS c; + +SELECT * FROM drop_chunks_view ORDER BY 1; + +SELECT drop_chunks(table_name => 'drop_chunks_table_u', older_than => 13, cascade_to_materializations => true); + +-- everything in the first chunk (values within [0, 6]) should be dropped +-- the time_bucket [6, 8] will lose it's first value, but should still have +-- the other two +SELECT count(c) FROM show_chunks('drop_chunks_table_u') AS c; +SELECT count(c) FROM show_chunks(:'drop_chunks_mat_table_u') AS c; + +SELECT * FROM drop_chunks_view ORDER BY 1;