mirror of
https://github.com/timescale/timescaledb.git
synced 2025-05-16 18:43:18 +08:00
Handle drop_chunks on tables that have cont aggs
For hypetables that have continuous aggregates, calling drop_chunks now drops all of the rows in the materialization table that were based on the dropped chunks. Since we don't know what the correct default behavior for drop_chunks is, we've added a new argument, cascade_to_materializations, which must be set to true in order to call drop_chunks on a hypertable which has a continuous aggregate. drop_chunks is blocked on the materialization tables of continuous aggregates
This commit is contained in:
parent
18d1607909
commit
45fb1fc2c8
@ -74,7 +74,8 @@ CREATE OR REPLACE FUNCTION drop_chunks(
|
|||||||
schema_name NAME = NULL,
|
schema_name NAME = NULL,
|
||||||
cascade BOOLEAN = FALSE,
|
cascade BOOLEAN = FALSE,
|
||||||
newer_than "any" = NULL,
|
newer_than "any" = NULL,
|
||||||
verbose BOOLEAN = FALSE
|
verbose BOOLEAN = FALSE,
|
||||||
|
cascade_to_materializations BOOLEAN = NULL
|
||||||
) RETURNS SETOF REGCLASS AS '@MODULE_PATHNAME@', 'ts_chunk_drop_chunks'
|
) RETURNS SETOF REGCLASS AS '@MODULE_PATHNAME@', 'ts_chunk_drop_chunks'
|
||||||
LANGUAGE C STABLE PARALLEL SAFE;
|
LANGUAGE C STABLE PARALLEL SAFE;
|
||||||
|
|
||||||
|
@ -72,3 +72,23 @@ CREATE INDEX continuous_aggs_hypertable_invalidation_log_idx
|
|||||||
ON _timescaledb_catalog.continuous_aggs_hypertable_invalidation_log (hypertable_id, lowest_modified_value ASC);
|
ON _timescaledb_catalog.continuous_aggs_hypertable_invalidation_log (hypertable_id, lowest_modified_value ASC);
|
||||||
|
|
||||||
GRANT SELECT ON _timescaledb_catalog.continuous_aggs_hypertable_invalidation_log TO PUBLIC;
|
GRANT SELECT ON _timescaledb_catalog.continuous_aggs_hypertable_invalidation_log TO PUBLIC;
|
||||||
|
|
||||||
|
DROP FUNCTION IF EXISTS drop_chunks(
|
||||||
|
older_than "any",
|
||||||
|
table_name NAME,
|
||||||
|
schema_name NAME,
|
||||||
|
cascade BOOLEAN,
|
||||||
|
newer_than "any",
|
||||||
|
verbose BOOLEAN
|
||||||
|
);
|
||||||
|
|
||||||
|
CREATE OR REPLACE FUNCTION drop_chunks(
|
||||||
|
older_than "any" = NULL,
|
||||||
|
table_name NAME = NULL,
|
||||||
|
schema_name NAME = NULL,
|
||||||
|
cascade BOOLEAN = FALSE,
|
||||||
|
newer_than "any" = NULL,
|
||||||
|
verbose BOOLEAN = FALSE,
|
||||||
|
cascade_to_materializations BOOLEAN = NULL
|
||||||
|
) RETURNS SETOF REGCLASS AS '@MODULE_PATHNAME@', 'ts_chunk_drop_chunks'
|
||||||
|
LANGUAGE C STABLE PARALLEL SAFE;
|
||||||
|
33
src/chunk.c
33
src/chunk.c
@ -36,6 +36,8 @@
|
|||||||
#include "chunk.h"
|
#include "chunk.h"
|
||||||
#include "chunk_index.h"
|
#include "chunk_index.h"
|
||||||
#include "catalog.h"
|
#include "catalog.h"
|
||||||
|
#include "continuous_agg.h"
|
||||||
|
#include "cross_module_fn.h"
|
||||||
#include "dimension.h"
|
#include "dimension.h"
|
||||||
#include "dimension_slice.h"
|
#include "dimension_slice.h"
|
||||||
#include "dimension_vector.h"
|
#include "dimension_vector.h"
|
||||||
@ -1895,11 +1897,33 @@ chunks_return_srf(FunctionCallInfo fcinfo)
|
|||||||
|
|
||||||
void
|
void
|
||||||
ts_chunk_do_drop_chunks(Oid table_relid, Datum older_than_datum, Datum newer_than_datum,
|
ts_chunk_do_drop_chunks(Oid table_relid, Datum older_than_datum, Datum newer_than_datum,
|
||||||
Oid older_than_type, Oid newer_than_type, bool cascade, int32 log_level)
|
Oid older_than_type, Oid newer_than_type, bool cascade,
|
||||||
|
bool cascades_to_materializations, int32 log_level)
|
||||||
{
|
{
|
||||||
int i = 0;
|
int i = 0;
|
||||||
uint64 num_chunks = 0;
|
uint64 num_chunks = 0;
|
||||||
Chunk **chunks = chunk_get_chunks_in_time_range(table_relid,
|
Chunk **chunks;
|
||||||
|
int32 hypertable_id = ts_hypertable_relid_to_id(table_relid);
|
||||||
|
|
||||||
|
switch (ts_continuous_agg_hypertable_status(hypertable_id))
|
||||||
|
{
|
||||||
|
case HypertableIsMaterialization:
|
||||||
|
case HypertableIsMaterializationAndRaw:
|
||||||
|
elog(ERROR, "cannot drop_chunks on a continuous aggregate materialization table");
|
||||||
|
return;
|
||||||
|
case HypertableIsRawTable:
|
||||||
|
if (!cascades_to_materializations)
|
||||||
|
ereport(ERROR,
|
||||||
|
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
|
||||||
|
errmsg("cannot drop_chunks on hypertable that has a continuous aggregate "
|
||||||
|
"without cascade_to_materializations set to true")));
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
cascades_to_materializations = false;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
chunks = chunk_get_chunks_in_time_range(table_relid,
|
||||||
older_than_datum,
|
older_than_datum,
|
||||||
newer_than_datum,
|
newer_than_datum,
|
||||||
older_than_type,
|
older_than_type,
|
||||||
@ -1926,6 +1950,9 @@ ts_chunk_do_drop_chunks(Oid table_relid, Datum older_than_datum, Datum newer_tha
|
|||||||
/* Drop the table */
|
/* Drop the table */
|
||||||
performDeletion(&objaddr, cascade, 0);
|
performDeletion(&objaddr, cascade, 0);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (cascades_to_materializations)
|
||||||
|
ts_cm_functions->continuous_agg_drop_chunks_by_chunk_id(hypertable_id, chunks, num_chunks);
|
||||||
}
|
}
|
||||||
|
|
||||||
Datum
|
Datum
|
||||||
@ -1943,6 +1970,7 @@ ts_chunk_drop_chunks(PG_FUNCTION_ARGS)
|
|||||||
Oid newer_than_type = PG_ARGISNULL(4) ? InvalidOid : get_fn_expr_argtype(fcinfo->flinfo, 4);
|
Oid newer_than_type = PG_ARGISNULL(4) ? InvalidOid : get_fn_expr_argtype(fcinfo->flinfo, 4);
|
||||||
bool cascade = PG_GETARG_BOOL(3);
|
bool cascade = PG_GETARG_BOOL(3);
|
||||||
bool verbose = PG_ARGISNULL(5) ? false : PG_GETARG_BOOL(5);
|
bool verbose = PG_ARGISNULL(5) ? false : PG_GETARG_BOOL(5);
|
||||||
|
bool cascades_to_materializations = PG_ARGISNULL(6) ? false : PG_GETARG_BOOL(6);
|
||||||
int elevel = verbose ? INFO : DEBUG2;
|
int elevel = verbose ? INFO : DEBUG2;
|
||||||
|
|
||||||
if (PG_ARGISNULL(0) && PG_ARGISNULL(4))
|
if (PG_ARGISNULL(0) && PG_ARGISNULL(4))
|
||||||
@ -2016,6 +2044,7 @@ ts_chunk_drop_chunks(PG_FUNCTION_ARGS)
|
|||||||
older_than_type,
|
older_than_type,
|
||||||
newer_than_type,
|
newer_than_type,
|
||||||
cascade,
|
cascade,
|
||||||
|
cascades_to_materializations,
|
||||||
elevel);
|
elevel);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -93,7 +93,8 @@ extern List *ts_chunk_get_window(int32 dimension_id, int64 point, int count, Mem
|
|||||||
extern void ts_chunks_rename_schema_name(char *old_schema, char *new_schema);
|
extern void ts_chunks_rename_schema_name(char *old_schema, char *new_schema);
|
||||||
extern TSDLLEXPORT void ts_chunk_do_drop_chunks(Oid table_relid, Datum older_than_datum,
|
extern TSDLLEXPORT void ts_chunk_do_drop_chunks(Oid table_relid, Datum older_than_datum,
|
||||||
Datum newer_than_datum, Oid older_than_type,
|
Datum newer_than_datum, Oid older_than_type,
|
||||||
Oid newer_than_type, bool cascade, int32 log_level);
|
Oid newer_than_type, bool cascade,
|
||||||
|
bool cascades_to_materializations, int32 log_level);
|
||||||
|
|
||||||
#define chunk_get_by_name(schema_name, table_name, num_constraints, fail_if_not_found) \
|
#define chunk_get_by_name(schema_name, table_name, num_constraints, fail_if_not_found) \
|
||||||
ts_chunk_get_by_name_with_memory_context(schema_name, \
|
ts_chunk_get_by_name_with_memory_context(schema_name, \
|
||||||
|
@ -181,6 +181,56 @@ continuous_agg_init(ContinuousAgg *cagg, FormData_continuous_agg *fd)
|
|||||||
memcpy(&cagg->data, fd, sizeof(cagg->data));
|
memcpy(&cagg->data, fd, sizeof(cagg->data));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ContinuousAggHypertableStatus
|
||||||
|
ts_continuous_agg_hypertable_status(int32 hypertable_id)
|
||||||
|
{
|
||||||
|
ScanIterator iterator =
|
||||||
|
ts_scan_iterator_create(CONTINUOUS_AGG, AccessShareLock, CurrentMemoryContext);
|
||||||
|
ContinuousAggHypertableStatus status = HypertableIsNotContinuousAgg;
|
||||||
|
|
||||||
|
ts_scanner_foreach(&iterator)
|
||||||
|
{
|
||||||
|
FormData_continuous_agg *data =
|
||||||
|
(FormData_continuous_agg *) GETSTRUCT(ts_scan_iterator_tuple(&iterator));
|
||||||
|
|
||||||
|
if (data->raw_hypertable_id == hypertable_id)
|
||||||
|
status |= HypertableIsRawTable;
|
||||||
|
if (data->mat_hypertable_id == hypertable_id)
|
||||||
|
status |= HypertableIsMaterialization;
|
||||||
|
|
||||||
|
if (status == HypertableIsMaterializationAndRaw)
|
||||||
|
{
|
||||||
|
ts_scan_iterator_close(&iterator);
|
||||||
|
return status;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return status;
|
||||||
|
}
|
||||||
|
|
||||||
|
TSDLLEXPORT List *
|
||||||
|
ts_continuous_aggs_find_by_raw_table_id(int32 raw_hypertable_id)
|
||||||
|
{
|
||||||
|
List *continuous_aggs = NIL;
|
||||||
|
ScanIterator iterator =
|
||||||
|
ts_scan_iterator_create(CONTINUOUS_AGG, AccessShareLock, CurrentMemoryContext);
|
||||||
|
ts_scanner_foreach(&iterator)
|
||||||
|
{
|
||||||
|
ContinuousAgg *ca;
|
||||||
|
Form_continuous_agg data =
|
||||||
|
(Form_continuous_agg) GETSTRUCT(ts_scan_iterator_tuple(&iterator));
|
||||||
|
|
||||||
|
if (data->raw_hypertable_id != raw_hypertable_id)
|
||||||
|
continue;
|
||||||
|
|
||||||
|
ca = palloc0(sizeof(*ca));
|
||||||
|
continuous_agg_init(ca, data);
|
||||||
|
continuous_aggs = lappend(continuous_aggs, ca);
|
||||||
|
}
|
||||||
|
|
||||||
|
return continuous_aggs;
|
||||||
|
}
|
||||||
|
|
||||||
ContinuousAgg *
|
ContinuousAgg *
|
||||||
ts_continuous_agg_find_by_view_name(const char *schema, const char *name)
|
ts_continuous_agg_find_by_view_name(const char *schema, const char *name)
|
||||||
{
|
{
|
||||||
|
@ -9,8 +9,11 @@
|
|||||||
#include <catalog/pg_type.h>
|
#include <catalog/pg_type.h>
|
||||||
|
|
||||||
#include <catalog.h>
|
#include <catalog.h>
|
||||||
|
#include <chunk.h>
|
||||||
|
|
||||||
#include "with_clause_parser.h"
|
#include "with_clause_parser.h"
|
||||||
#include "compat.h"
|
#include "compat.h"
|
||||||
|
|
||||||
#define CAGGINVAL_TRIGGER_NAME "ts_cagg_invalidation_trigger"
|
#define CAGGINVAL_TRIGGER_NAME "ts_cagg_invalidation_trigger"
|
||||||
|
|
||||||
typedef enum ContinuousAggViewOption
|
typedef enum ContinuousAggViewOption
|
||||||
@ -27,6 +30,18 @@ typedef struct ContinuousAgg
|
|||||||
FormData_continuous_agg data;
|
FormData_continuous_agg data;
|
||||||
} ContinuousAgg;
|
} ContinuousAgg;
|
||||||
|
|
||||||
|
typedef enum ContinuousAggHypertableStatus
|
||||||
|
{
|
||||||
|
HypertableIsNotContinuousAgg = 0,
|
||||||
|
HypertableIsMaterialization = 1,
|
||||||
|
HypertableIsRawTable = 2,
|
||||||
|
HypertableIsMaterializationAndRaw = HypertableIsMaterialization | HypertableIsRawTable,
|
||||||
|
} ContinuousAggHypertableStatus;
|
||||||
|
|
||||||
|
extern ContinuousAggHypertableStatus ts_continuous_agg_hypertable_status(int32 hypertable_id);
|
||||||
|
extern void ts_continuous_agg_drop_chunks_by_chunk_id(int32 raw_hypertable_id, Chunk **chunks,
|
||||||
|
Size num_chunks);
|
||||||
|
extern TSDLLEXPORT List *ts_continuous_aggs_find_by_raw_table_id(int32 raw_hypertable_id);
|
||||||
extern TSDLLEXPORT ContinuousAgg *ts_continuous_agg_find_by_view_name(const char *schema,
|
extern TSDLLEXPORT ContinuousAgg *ts_continuous_agg_find_by_view_name(const char *schema,
|
||||||
const char *name);
|
const char *name);
|
||||||
extern void ts_continuous_agg_drop_view_callback(ContinuousAgg *ca, const char *schema,
|
extern void ts_continuous_agg_drop_view_callback(ContinuousAgg *ca, const char *schema,
|
||||||
|
@ -199,6 +199,14 @@ continuous_agg_update_options_default(ContinuousAgg *cagg, WithClauseResult *wit
|
|||||||
error_no_default_fn_community();
|
error_no_default_fn_community();
|
||||||
pg_unreachable();
|
pg_unreachable();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void
|
||||||
|
continuous_agg_drop_chunks_by_chunk_id_default(int32 raw_hypertable_id, Chunk **chunks,
|
||||||
|
Size num_chunks)
|
||||||
|
{
|
||||||
|
error_no_default_fn_community();
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Define cross-module functions' default values:
|
* Define cross-module functions' default values:
|
||||||
* If the submodule isn't activated, using one of the cm functions will throw an
|
* If the submodule isn't activated, using one of the cm functions will throw an
|
||||||
@ -235,6 +243,7 @@ TSDLLEXPORT CrossModuleFunctions ts_cm_functions_default = {
|
|||||||
.finalize_agg_sfunc = error_no_default_fn_pg_community,
|
.finalize_agg_sfunc = error_no_default_fn_pg_community,
|
||||||
.finalize_agg_ffunc = error_no_default_fn_pg_community,
|
.finalize_agg_ffunc = error_no_default_fn_pg_community,
|
||||||
.process_cagg_viewstmt = process_cagg_viewstmt_default,
|
.process_cagg_viewstmt = process_cagg_viewstmt_default,
|
||||||
|
.continuous_agg_drop_chunks_by_chunk_id = continuous_agg_drop_chunks_by_chunk_id_default,
|
||||||
.continuous_agg_trigfn = error_no_default_fn_pg_community,
|
.continuous_agg_trigfn = error_no_default_fn_pg_community,
|
||||||
.continuous_agg_update_options = continuous_agg_update_options_default,
|
.continuous_agg_update_options = continuous_agg_update_options_default,
|
||||||
};
|
};
|
||||||
|
@ -64,6 +64,8 @@ typedef struct CrossModuleFunctions
|
|||||||
PGFunction finalize_agg_ffunc;
|
PGFunction finalize_agg_ffunc;
|
||||||
bool (*process_cagg_viewstmt)(ViewStmt *stmt, const char *query_string, void *pstmt,
|
bool (*process_cagg_viewstmt)(ViewStmt *stmt, const char *query_string, void *pstmt,
|
||||||
WithClauseResult *with_clause_options);
|
WithClauseResult *with_clause_options);
|
||||||
|
void (*continuous_agg_drop_chunks_by_chunk_id)(int32 raw_hypertable_id, Chunk **chunks,
|
||||||
|
Size num_chunks);
|
||||||
PGFunction continuous_agg_trigfn;
|
PGFunction continuous_agg_trigfn;
|
||||||
void (*continuous_agg_update_options)(ContinuousAgg *cagg,
|
void (*continuous_agg_update_options)(ContinuousAgg *cagg,
|
||||||
WithClauseResult *with_clause_options);
|
WithClauseResult *with_clause_options);
|
||||||
|
@ -168,6 +168,7 @@ execute_drop_chunks_policy(int32 job_id)
|
|||||||
INTERVALOID,
|
INTERVALOID,
|
||||||
InvalidOid,
|
InvalidOid,
|
||||||
args->fd.cascade,
|
args->fd.cascade,
|
||||||
|
false,
|
||||||
LOG);
|
LOG);
|
||||||
elog(LOG, "completed dropping chunks");
|
elog(LOG, "completed dropping chunks");
|
||||||
|
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
set(SOURCES
|
set(SOURCES
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/cagg_create.c
|
${CMAKE_CURRENT_SOURCE_DIR}/cagg_create.c
|
||||||
|
${CMAKE_CURRENT_SOURCE_DIR}/drop.c
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/insert.c
|
${CMAKE_CURRENT_SOURCE_DIR}/insert.c
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/job.c
|
${CMAKE_CURRENT_SOURCE_DIR}/job.c
|
||||||
${CMAKE_CURRENT_SOURCE_DIR}/materialize.c
|
${CMAKE_CURRENT_SOURCE_DIR}/materialize.c
|
||||||
|
@ -60,8 +60,6 @@
|
|||||||
#define PARTIALFN "partialize_agg"
|
#define PARTIALFN "partialize_agg"
|
||||||
#define TIMEBUCKETFN "time_bucket"
|
#define TIMEBUCKETFN "time_bucket"
|
||||||
#define CHUNKTUPFN "chunk_for_tuple"
|
#define CHUNKTUPFN "chunk_for_tuple"
|
||||||
|
|
||||||
#define MATCHUNKCOLNM "chunk_id"
|
|
||||||
#define MATPARTCOLNM "time_partition_col"
|
#define MATPARTCOLNM "time_partition_col"
|
||||||
#define MATPARTCOL_INTERVAL_FACTOR 10
|
#define MATPARTCOL_INTERVAL_FACTOR 10
|
||||||
#define HT_DEFAULT_CHUNKFN "calculate_chunk_interval"
|
#define HT_DEFAULT_CHUNKFN "calculate_chunk_interval"
|
||||||
@ -999,7 +997,10 @@ mattablecolumninfo_addinternal(MatTableColumnInfo *matcolinfo, RangeTblEntry *us
|
|||||||
|
|
||||||
/* add a chunk_id column for materialization table */
|
/* add a chunk_id column for materialization table */
|
||||||
Node *vexpr = (Node *) makeVar(1, colno, INT4OID, -1, InvalidOid, 0);
|
Node *vexpr = (Node *) makeVar(1, colno, INT4OID, -1, InvalidOid, 0);
|
||||||
col = makeColumnDef(MATCHUNKCOLNM, exprType(vexpr), exprTypmod(vexpr), exprCollation(vexpr));
|
col = makeColumnDef(CONTINUOUS_AGG_CHUNK_ID_COL_NAME,
|
||||||
|
exprType(vexpr),
|
||||||
|
exprTypmod(vexpr),
|
||||||
|
exprCollation(vexpr));
|
||||||
matcolinfo->matcollist = lappend(matcolinfo->matcollist, col);
|
matcolinfo->matcollist = lappend(matcolinfo->matcollist, col);
|
||||||
|
|
||||||
/* need to add an entry to the target list for computing chunk_id column
|
/* need to add an entry to the target list for computing chunk_id column
|
||||||
@ -1019,7 +1020,10 @@ mattablecolumninfo_addinternal(MatTableColumnInfo *matcolinfo, RangeTblEntry *us
|
|||||||
InvalidOid,
|
InvalidOid,
|
||||||
InvalidOid,
|
InvalidOid,
|
||||||
COERCE_EXPLICIT_CALL);
|
COERCE_EXPLICIT_CALL);
|
||||||
chunk_te = makeTargetEntry((Expr *) chunk_fnexpr, colno, pstrdup(MATCHUNKCOLNM), false);
|
chunk_te = makeTargetEntry((Expr *) chunk_fnexpr,
|
||||||
|
colno,
|
||||||
|
pstrdup(CONTINUOUS_AGG_CHUNK_ID_COL_NAME),
|
||||||
|
false);
|
||||||
matcolinfo->partial_seltlist = lappend(matcolinfo->partial_seltlist, chunk_te);
|
matcolinfo->partial_seltlist = lappend(matcolinfo->partial_seltlist, chunk_te);
|
||||||
/*any internal column needs to be added to the group-by clause as well */
|
/*any internal column needs to be added to the group-by clause as well */
|
||||||
maxRef = 0;
|
maxRef = 0;
|
||||||
|
@ -10,6 +10,9 @@
|
|||||||
|
|
||||||
#include "with_clause_parser.h"
|
#include "with_clause_parser.h"
|
||||||
|
|
||||||
|
#define CONTINUOUS_AGG_CHUNK_ID_COL_NAME "chunk_id"
|
||||||
|
|
||||||
bool tsl_process_continuous_agg_viewstmt(ViewStmt *stmt, const char *query_string, void *pstmt,
|
bool tsl_process_continuous_agg_viewstmt(ViewStmt *stmt, const char *query_string, void *pstmt,
|
||||||
WithClauseResult *with_clause_options);
|
WithClauseResult *with_clause_options);
|
||||||
|
|
||||||
#endif /* TIMESCALEDB_TSL_CONTINUOUS_AGGS_CAGG_CREATE_H */
|
#endif /* TIMESCALEDB_TSL_CONTINUOUS_AGGS_CAGG_CREATE_H */
|
||||||
|
66
tsl/src/continuous_aggs/drop.c
Normal file
66
tsl/src/continuous_aggs/drop.c
Normal file
@ -0,0 +1,66 @@
|
|||||||
|
/*
|
||||||
|
* This file and its contents are licensed under the Timescale License.
|
||||||
|
* Please see the included NOTICE for copyright information and
|
||||||
|
* LICENSE-TIMESCALE for a copy of the license.
|
||||||
|
*/
|
||||||
|
#include <postgres.h>
|
||||||
|
#include <catalog/pg_type.h>
|
||||||
|
#include <executor/spi.h>
|
||||||
|
#include <lib/stringinfo.h>
|
||||||
|
#include <utils/builtins.h>
|
||||||
|
|
||||||
|
#include "drop.h"
|
||||||
|
|
||||||
|
#include <continuous_agg.h>
|
||||||
|
|
||||||
|
#include "cagg_create.h"
|
||||||
|
|
||||||
|
void
|
||||||
|
ts_continuous_agg_drop_chunks_by_chunk_id(int32 raw_hypertable_id, Chunk **chunks, Size num_chunks)
|
||||||
|
{
|
||||||
|
ListCell *lc;
|
||||||
|
Oid arg_type = INT4OID;
|
||||||
|
List *continuous_aggs = ts_continuous_aggs_find_by_raw_table_id(raw_hypertable_id);
|
||||||
|
StringInfo command = makeStringInfo();
|
||||||
|
CatalogSecurityContext sec_ctx;
|
||||||
|
|
||||||
|
ts_catalog_database_info_become_owner(ts_catalog_database_info_get(), &sec_ctx);
|
||||||
|
|
||||||
|
if (SPI_connect() != SPI_OK_CONNECT)
|
||||||
|
elog(ERROR, "could not connect to SPI deleting materialization");
|
||||||
|
|
||||||
|
foreach (lc, continuous_aggs)
|
||||||
|
{
|
||||||
|
int32 i;
|
||||||
|
SPIPlanPtr delete_plan;
|
||||||
|
ContinuousAgg *agg = lfirst(lc);
|
||||||
|
Hypertable *mat_table = ts_hypertable_get_by_id(agg->data.mat_hypertable_id);
|
||||||
|
|
||||||
|
resetStringInfo(command);
|
||||||
|
|
||||||
|
appendStringInfo(command,
|
||||||
|
"DELETE FROM %s.%s AS D WHERE "
|
||||||
|
"D.%s = $1",
|
||||||
|
quote_identifier(NameStr(mat_table->fd.schema_name)),
|
||||||
|
quote_identifier(NameStr(mat_table->fd.table_name)),
|
||||||
|
quote_identifier(CONTINUOUS_AGG_CHUNK_ID_COL_NAME));
|
||||||
|
|
||||||
|
delete_plan = SPI_prepare(command->data, 1, &arg_type);
|
||||||
|
if (delete_plan == NULL)
|
||||||
|
elog(ERROR, "could not prepare delete materialization");
|
||||||
|
|
||||||
|
for (i = 0; i < num_chunks; i++)
|
||||||
|
{
|
||||||
|
Datum arg = Int32GetDatum(chunks[i]->fd.id);
|
||||||
|
int res = SPI_execute_plan(delete_plan, &arg, NULL, false, 0);
|
||||||
|
if (res < 0)
|
||||||
|
elog(ERROR, "could not delete from the materialization");
|
||||||
|
}
|
||||||
|
|
||||||
|
SPI_freeplan(delete_plan);
|
||||||
|
}
|
||||||
|
|
||||||
|
SPI_finish();
|
||||||
|
|
||||||
|
ts_catalog_restore_user(&sec_ctx);
|
||||||
|
}
|
16
tsl/src/continuous_aggs/drop.h
Normal file
16
tsl/src/continuous_aggs/drop.h
Normal file
@ -0,0 +1,16 @@
|
|||||||
|
/*
|
||||||
|
* This file and its contents are licensed under the Timescale License.
|
||||||
|
* Please see the included NOTICE for copyright information and
|
||||||
|
* LICENSE-TIMESCALE for a copy of the license.
|
||||||
|
*/
|
||||||
|
#ifndef TIMESCALEDB_TSL_CONTINUOUS_AGGS_DROP_H
|
||||||
|
#define TIMESCALEDB_TSL_CONTINUOUS_AGGS_DROP_H
|
||||||
|
|
||||||
|
#include <postgres.h>
|
||||||
|
|
||||||
|
#include <chunk.h>
|
||||||
|
|
||||||
|
extern void ts_continuous_agg_drop_chunks_by_chunk_id(int32 raw_hypertable_id, Chunk **chunks,
|
||||||
|
Size num_chunks);
|
||||||
|
|
||||||
|
#endif /* TIMESCALEDB_TSL_CONTINUOUS_AGGS_DROP_H */
|
@ -21,6 +21,7 @@
|
|||||||
#include "bgw_policy/reorder_api.h"
|
#include "bgw_policy/reorder_api.h"
|
||||||
#include "bgw_policy/drop_chunks_api.h"
|
#include "bgw_policy/drop_chunks_api.h"
|
||||||
#include "continuous_aggs/cagg_create.h"
|
#include "continuous_aggs/cagg_create.h"
|
||||||
|
#include "continuous_aggs/drop.h"
|
||||||
#include "continuous_aggs/insert.h"
|
#include "continuous_aggs/insert.h"
|
||||||
#include "continuous_aggs/materialize.h"
|
#include "continuous_aggs/materialize.h"
|
||||||
#include "continuous_aggs/options.h"
|
#include "continuous_aggs/options.h"
|
||||||
@ -78,6 +79,7 @@ CrossModuleFunctions tsl_cm_functions = {
|
|||||||
.finalize_agg_sfunc = tsl_finalize_agg_sfunc,
|
.finalize_agg_sfunc = tsl_finalize_agg_sfunc,
|
||||||
.finalize_agg_ffunc = tsl_finalize_agg_ffunc,
|
.finalize_agg_ffunc = tsl_finalize_agg_ffunc,
|
||||||
.process_cagg_viewstmt = tsl_process_continuous_agg_viewstmt,
|
.process_cagg_viewstmt = tsl_process_continuous_agg_viewstmt,
|
||||||
|
.continuous_agg_drop_chunks_by_chunk_id = ts_continuous_agg_drop_chunks_by_chunk_id,
|
||||||
.continuous_agg_trigfn = continuous_agg_trigfn,
|
.continuous_agg_trigfn = continuous_agg_trigfn,
|
||||||
.continuous_agg_update_options = continuous_agg_update_options,
|
.continuous_agg_update_options = continuous_agg_update_options,
|
||||||
};
|
};
|
||||||
|
@ -37,6 +37,7 @@ select count(*) from mat_m1;
|
|||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
\set ON_ERROR_STOP 1
|
\set ON_ERROR_STOP 1
|
||||||
|
-- schema tests
|
||||||
\c :TEST_DBNAME :ROLE_SUPERUSER
|
\c :TEST_DBNAME :ROLE_SUPERUSER
|
||||||
CREATE SCHEMA rename_schema;
|
CREATE SCHEMA rename_schema;
|
||||||
GRANT ALL ON SCHEMA rename_schema TO :ROLE_DEFAULT_PERM_USER;
|
GRANT ALL ON SCHEMA rename_schema TO :ROLE_DEFAULT_PERM_USER;
|
||||||
@ -156,3 +157,224 @@ SELECT user_view_schema, user_view_name, partial_view_schema, partial_view_name
|
|||||||
public | rename_c_aggregate | rename_schema | partial_view
|
public | rename_c_aggregate | rename_schema | partial_view
|
||||||
(2 rows)
|
(2 rows)
|
||||||
|
|
||||||
|
-- drop_chunks tests
|
||||||
|
DROP TABLE conditions CASCADE;
|
||||||
|
NOTICE: drop cascades to view _timescaledb_internal.ts_internal_mat_m1view
|
||||||
|
DROP TABLE foo CASCADE;
|
||||||
|
NOTICE: drop cascades to view rename_schema.partial_view
|
||||||
|
CREATE TABLE drop_chunks_table(time BIGINT, data INTEGER);
|
||||||
|
SELECT hypertable_id AS drop_chunks_table_id
|
||||||
|
FROM create_hypertable('drop_chunks_table', 'time', chunk_time_interval => 10) \gset
|
||||||
|
NOTICE: adding not-null constraint to column "time"
|
||||||
|
CREATE VIEW drop_chunks_view WITH ( timescaledb.continuous, timescaledb.refresh_interval='72 hours')
|
||||||
|
AS SELECT time_bucket('5', time), COUNT(data)
|
||||||
|
FROM drop_chunks_table
|
||||||
|
GROUP BY 1;
|
||||||
|
NOTICE: adding not-null constraint to column "time_partition_col"
|
||||||
|
SELECT format('%s.%s', schema_name, table_name) AS drop_chunks_mat_table,
|
||||||
|
schema_name AS drop_chunks_mat_schema,
|
||||||
|
table_name AS drop_chunks_mat_table_name
|
||||||
|
FROM _timescaledb_catalog.hypertable, _timescaledb_catalog.continuous_agg
|
||||||
|
WHERE _timescaledb_catalog.continuous_agg.raw_hypertable_id = :drop_chunks_table_id
|
||||||
|
AND _timescaledb_catalog.hypertable.id = _timescaledb_catalog.continuous_agg.mat_hypertable_id \gset
|
||||||
|
-- create 3 chunks, with 3 time bucket
|
||||||
|
INSERT INTO drop_chunks_table SELECT i, i FROM generate_series(0, 29) AS i;
|
||||||
|
REFRESH MATERIALIZED VIEW drop_chunks_view;
|
||||||
|
INFO: new materialization range for public.drop_chunks_table (time column time) (15)
|
||||||
|
INFO: materializing continuous aggregate public.drop_chunks_view: new range up to 15
|
||||||
|
SELECT count(c) FROM show_chunks('drop_chunks_table') AS c;
|
||||||
|
count
|
||||||
|
-------
|
||||||
|
3
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
SELECT count(c) FROM show_chunks(:'drop_chunks_mat_table') AS c;
|
||||||
|
count
|
||||||
|
-------
|
||||||
|
1
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
SELECT * FROM drop_chunks_view ORDER BY 1;
|
||||||
|
time_bucket | count
|
||||||
|
-------------+-------
|
||||||
|
0 | 5
|
||||||
|
5 | 5
|
||||||
|
10 | 5
|
||||||
|
(3 rows)
|
||||||
|
|
||||||
|
-- cannot drop directly from the materialization table
|
||||||
|
\set ON_ERROR_STOP 0
|
||||||
|
SELECT drop_chunks(schema_name => :'drop_chunks_mat_schema',
|
||||||
|
table_name => :'drop_chunks_mat_table_name',
|
||||||
|
newer_than => -20,
|
||||||
|
verbose => true);
|
||||||
|
ERROR: cannot drop_chunks on a continuous aggregate materialization table
|
||||||
|
\set ON_ERROR_STOP 1
|
||||||
|
SELECT count(c) FROM show_chunks('drop_chunks_table') AS c;
|
||||||
|
count
|
||||||
|
-------
|
||||||
|
3
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
SELECT count(c) FROM show_chunks(:'drop_chunks_mat_table') AS c;
|
||||||
|
count
|
||||||
|
-------
|
||||||
|
1
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
SELECT * FROM drop_chunks_view ORDER BY 1;
|
||||||
|
time_bucket | count
|
||||||
|
-------------+-------
|
||||||
|
0 | 5
|
||||||
|
5 | 5
|
||||||
|
10 | 5
|
||||||
|
(3 rows)
|
||||||
|
|
||||||
|
-- cannot drop from the raw table without specifying cascade_to_materializations
|
||||||
|
\set ON_ERROR_STOP 0
|
||||||
|
SELECT drop_chunks(table_name => 'drop_chunks_table', older_than => 10);
|
||||||
|
ERROR: cannot drop_chunks on hypertable that has a continuous aggregate without cascade_to_materializations set to true
|
||||||
|
\set ON_ERROR_STOP 1
|
||||||
|
SELECT count(c) FROM show_chunks('drop_chunks_table') AS c;
|
||||||
|
count
|
||||||
|
-------
|
||||||
|
3
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
SELECT count(c) FROM show_chunks(:'drop_chunks_mat_table') AS c;
|
||||||
|
count
|
||||||
|
-------
|
||||||
|
1
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
SELECT * FROM drop_chunks_view ORDER BY 1;
|
||||||
|
time_bucket | count
|
||||||
|
-------------+-------
|
||||||
|
0 | 5
|
||||||
|
5 | 5
|
||||||
|
10 | 5
|
||||||
|
(3 rows)
|
||||||
|
|
||||||
|
\set ON_ERROR_STOP 0
|
||||||
|
SELECT drop_chunks(older_than => 200);
|
||||||
|
ERROR: cannot drop_chunks on hypertable that has a continuous aggregate without cascade_to_materializations set to true
|
||||||
|
\set ON_ERROR_STOP 1
|
||||||
|
SELECT count(c) FROM show_chunks('drop_chunks_table') AS c;
|
||||||
|
count
|
||||||
|
-------
|
||||||
|
3
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
SELECT count(c) FROM show_chunks(:'drop_chunks_mat_table') AS c;
|
||||||
|
count
|
||||||
|
-------
|
||||||
|
1
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
SELECT * FROM drop_chunks_view ORDER BY 1;
|
||||||
|
time_bucket | count
|
||||||
|
-------------+-------
|
||||||
|
0 | 5
|
||||||
|
5 | 5
|
||||||
|
10 | 5
|
||||||
|
(3 rows)
|
||||||
|
|
||||||
|
SELECT drop_chunks(table_name => 'drop_chunks_table', older_than => 13, cascade_to_materializations => true);
|
||||||
|
drop_chunks
|
||||||
|
-------------
|
||||||
|
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
SELECT count(c) FROM show_chunks('drop_chunks_table') AS c;
|
||||||
|
count
|
||||||
|
-------
|
||||||
|
2
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
SELECT count(c) FROM show_chunks(:'drop_chunks_mat_table') AS c;
|
||||||
|
count
|
||||||
|
-------
|
||||||
|
1
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
SELECT * FROM drop_chunks_view ORDER BY 1;
|
||||||
|
time_bucket | count
|
||||||
|
-------------+-------
|
||||||
|
10 | 5
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
-- drop chunks when the chunksize and time_bucket aren't aligned
|
||||||
|
DROP TABLE drop_chunks_table CASCADE;
|
||||||
|
NOTICE: drop cascades to view _timescaledb_internal.ts_internal_drop_chunks_viewview
|
||||||
|
NOTICE: drop cascades to table _timescaledb_internal._hyper_6_4_chunk
|
||||||
|
CREATE TABLE drop_chunks_table_u(time BIGINT, data INTEGER);
|
||||||
|
SELECT hypertable_id AS drop_chunks_table_u_id
|
||||||
|
FROM create_hypertable('drop_chunks_table_u', 'time', chunk_time_interval => 7) \gset
|
||||||
|
NOTICE: adding not-null constraint to column "time"
|
||||||
|
CREATE VIEW drop_chunks_view WITH ( timescaledb.continuous, timescaledb.refresh_interval='72 hours')
|
||||||
|
AS SELECT time_bucket('3', time), COUNT(data)
|
||||||
|
FROM drop_chunks_table_u
|
||||||
|
GROUP BY 1;
|
||||||
|
NOTICE: adding not-null constraint to column "time_partition_col"
|
||||||
|
SELECT format('%s.%s', schema_name, table_name) AS drop_chunks_mat_table_u,
|
||||||
|
schema_name AS drop_chunks_mat_schema,
|
||||||
|
table_name AS drop_chunks_mat_table_u_name
|
||||||
|
FROM _timescaledb_catalog.hypertable, _timescaledb_catalog.continuous_agg
|
||||||
|
WHERE _timescaledb_catalog.continuous_agg.raw_hypertable_id = :drop_chunks_table_u_id
|
||||||
|
AND _timescaledb_catalog.hypertable.id = _timescaledb_catalog.continuous_agg.mat_hypertable_id \gset
|
||||||
|
-- create 3 chunks, with 3 time bucket
|
||||||
|
INSERT INTO drop_chunks_table_u SELECT i, i FROM generate_series(0, 21) AS i;
|
||||||
|
REFRESH MATERIALIZED VIEW drop_chunks_view;
|
||||||
|
INFO: new materialization range for public.drop_chunks_table_u (time column time) (15)
|
||||||
|
INFO: materializing continuous aggregate public.drop_chunks_view: new range up to 15
|
||||||
|
SELECT count(c) FROM show_chunks('drop_chunks_table_u') AS c;
|
||||||
|
count
|
||||||
|
-------
|
||||||
|
4
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
SELECT count(c) FROM show_chunks(:'drop_chunks_mat_table_u') AS c;
|
||||||
|
count
|
||||||
|
-------
|
||||||
|
1
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
SELECT * FROM drop_chunks_view ORDER BY 1;
|
||||||
|
time_bucket | count
|
||||||
|
-------------+-------
|
||||||
|
0 | 3
|
||||||
|
3 | 3
|
||||||
|
6 | 3
|
||||||
|
9 | 3
|
||||||
|
12 | 3
|
||||||
|
(5 rows)
|
||||||
|
|
||||||
|
SELECT drop_chunks(table_name => 'drop_chunks_table_u', older_than => 13, cascade_to_materializations => true);
|
||||||
|
drop_chunks
|
||||||
|
-------------
|
||||||
|
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
-- everything in the first chunk (values within [0, 6]) should be dropped
|
||||||
|
-- the time_bucket [6, 8] will lose it's first value, but should still have
|
||||||
|
-- the other two
|
||||||
|
SELECT count(c) FROM show_chunks('drop_chunks_table_u') AS c;
|
||||||
|
count
|
||||||
|
-------
|
||||||
|
3
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
SELECT count(c) FROM show_chunks(:'drop_chunks_mat_table_u') AS c;
|
||||||
|
count
|
||||||
|
-------
|
||||||
|
1
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
SELECT * FROM drop_chunks_view ORDER BY 1;
|
||||||
|
time_bucket | count
|
||||||
|
-------------+-------
|
||||||
|
6 | 2
|
||||||
|
9 | 3
|
||||||
|
12 | 3
|
||||||
|
(3 rows)
|
||||||
|
|
||||||
|
@ -37,6 +37,7 @@ select count(*) from mat_m1;
|
|||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
\set ON_ERROR_STOP 1
|
\set ON_ERROR_STOP 1
|
||||||
|
-- schema tests
|
||||||
\c :TEST_DBNAME :ROLE_SUPERUSER
|
\c :TEST_DBNAME :ROLE_SUPERUSER
|
||||||
CREATE SCHEMA rename_schema;
|
CREATE SCHEMA rename_schema;
|
||||||
GRANT ALL ON SCHEMA rename_schema TO :ROLE_DEFAULT_PERM_USER;
|
GRANT ALL ON SCHEMA rename_schema TO :ROLE_DEFAULT_PERM_USER;
|
||||||
@ -156,3 +157,224 @@ SELECT user_view_schema, user_view_name, partial_view_schema, partial_view_name
|
|||||||
public | rename_c_aggregate | rename_schema | partial_view
|
public | rename_c_aggregate | rename_schema | partial_view
|
||||||
(2 rows)
|
(2 rows)
|
||||||
|
|
||||||
|
-- drop_chunks tests
|
||||||
|
DROP TABLE conditions CASCADE;
|
||||||
|
NOTICE: drop cascades to view _timescaledb_internal.ts_internal_mat_m1view
|
||||||
|
DROP TABLE foo CASCADE;
|
||||||
|
NOTICE: drop cascades to view rename_schema.partial_view
|
||||||
|
CREATE TABLE drop_chunks_table(time BIGINT, data INTEGER);
|
||||||
|
SELECT hypertable_id AS drop_chunks_table_id
|
||||||
|
FROM create_hypertable('drop_chunks_table', 'time', chunk_time_interval => 10) \gset
|
||||||
|
NOTICE: adding not-null constraint to column "time"
|
||||||
|
CREATE VIEW drop_chunks_view WITH ( timescaledb.continuous, timescaledb.refresh_interval='72 hours')
|
||||||
|
AS SELECT time_bucket('5', time), COUNT(data)
|
||||||
|
FROM drop_chunks_table
|
||||||
|
GROUP BY 1;
|
||||||
|
NOTICE: adding not-null constraint to column "time_partition_col"
|
||||||
|
SELECT format('%s.%s', schema_name, table_name) AS drop_chunks_mat_table,
|
||||||
|
schema_name AS drop_chunks_mat_schema,
|
||||||
|
table_name AS drop_chunks_mat_table_name
|
||||||
|
FROM _timescaledb_catalog.hypertable, _timescaledb_catalog.continuous_agg
|
||||||
|
WHERE _timescaledb_catalog.continuous_agg.raw_hypertable_id = :drop_chunks_table_id
|
||||||
|
AND _timescaledb_catalog.hypertable.id = _timescaledb_catalog.continuous_agg.mat_hypertable_id \gset
|
||||||
|
-- create 3 chunks, with 3 time bucket
|
||||||
|
INSERT INTO drop_chunks_table SELECT i, i FROM generate_series(0, 29) AS i;
|
||||||
|
REFRESH MATERIALIZED VIEW drop_chunks_view;
|
||||||
|
INFO: new materialization range for public.drop_chunks_table (time column time) (15)
|
||||||
|
INFO: materializing continuous aggregate public.drop_chunks_view: new range up to 15
|
||||||
|
SELECT count(c) FROM show_chunks('drop_chunks_table') AS c;
|
||||||
|
count
|
||||||
|
-------
|
||||||
|
3
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
SELECT count(c) FROM show_chunks(:'drop_chunks_mat_table') AS c;
|
||||||
|
count
|
||||||
|
-------
|
||||||
|
1
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
SELECT * FROM drop_chunks_view ORDER BY 1;
|
||||||
|
time_bucket | count
|
||||||
|
-------------+-------
|
||||||
|
0 | 5
|
||||||
|
5 | 5
|
||||||
|
10 | 5
|
||||||
|
(3 rows)
|
||||||
|
|
||||||
|
-- cannot drop directly from the materialization table
|
||||||
|
\set ON_ERROR_STOP 0
|
||||||
|
SELECT drop_chunks(schema_name => :'drop_chunks_mat_schema',
|
||||||
|
table_name => :'drop_chunks_mat_table_name',
|
||||||
|
newer_than => -20,
|
||||||
|
verbose => true);
|
||||||
|
ERROR: cannot drop_chunks on a continuous aggregate materialization table
|
||||||
|
\set ON_ERROR_STOP 1
|
||||||
|
SELECT count(c) FROM show_chunks('drop_chunks_table') AS c;
|
||||||
|
count
|
||||||
|
-------
|
||||||
|
3
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
SELECT count(c) FROM show_chunks(:'drop_chunks_mat_table') AS c;
|
||||||
|
count
|
||||||
|
-------
|
||||||
|
1
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
SELECT * FROM drop_chunks_view ORDER BY 1;
|
||||||
|
time_bucket | count
|
||||||
|
-------------+-------
|
||||||
|
0 | 5
|
||||||
|
5 | 5
|
||||||
|
10 | 5
|
||||||
|
(3 rows)
|
||||||
|
|
||||||
|
-- cannot drop from the raw table without specifying cascade_to_materializations
|
||||||
|
\set ON_ERROR_STOP 0
|
||||||
|
SELECT drop_chunks(table_name => 'drop_chunks_table', older_than => 10);
|
||||||
|
ERROR: cannot drop_chunks on hypertable that has a continuous aggregate without cascade_to_materializations set to true
|
||||||
|
\set ON_ERROR_STOP 1
|
||||||
|
SELECT count(c) FROM show_chunks('drop_chunks_table') AS c;
|
||||||
|
count
|
||||||
|
-------
|
||||||
|
3
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
SELECT count(c) FROM show_chunks(:'drop_chunks_mat_table') AS c;
|
||||||
|
count
|
||||||
|
-------
|
||||||
|
1
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
SELECT * FROM drop_chunks_view ORDER BY 1;
|
||||||
|
time_bucket | count
|
||||||
|
-------------+-------
|
||||||
|
0 | 5
|
||||||
|
5 | 5
|
||||||
|
10 | 5
|
||||||
|
(3 rows)
|
||||||
|
|
||||||
|
\set ON_ERROR_STOP 0
|
||||||
|
SELECT drop_chunks(older_than => 200);
|
||||||
|
ERROR: cannot drop_chunks on hypertable that has a continuous aggregate without cascade_to_materializations set to true
|
||||||
|
\set ON_ERROR_STOP 1
|
||||||
|
SELECT count(c) FROM show_chunks('drop_chunks_table') AS c;
|
||||||
|
count
|
||||||
|
-------
|
||||||
|
3
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
SELECT count(c) FROM show_chunks(:'drop_chunks_mat_table') AS c;
|
||||||
|
count
|
||||||
|
-------
|
||||||
|
1
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
SELECT * FROM drop_chunks_view ORDER BY 1;
|
||||||
|
time_bucket | count
|
||||||
|
-------------+-------
|
||||||
|
0 | 5
|
||||||
|
5 | 5
|
||||||
|
10 | 5
|
||||||
|
(3 rows)
|
||||||
|
|
||||||
|
SELECT drop_chunks(table_name => 'drop_chunks_table', older_than => 13, cascade_to_materializations => true);
|
||||||
|
drop_chunks
|
||||||
|
-------------
|
||||||
|
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
SELECT count(c) FROM show_chunks('drop_chunks_table') AS c;
|
||||||
|
count
|
||||||
|
-------
|
||||||
|
2
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
SELECT count(c) FROM show_chunks(:'drop_chunks_mat_table') AS c;
|
||||||
|
count
|
||||||
|
-------
|
||||||
|
1
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
SELECT * FROM drop_chunks_view ORDER BY 1;
|
||||||
|
time_bucket | count
|
||||||
|
-------------+-------
|
||||||
|
10 | 5
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
-- drop chunks when the chunksize and time_bucket aren't aligned
|
||||||
|
DROP TABLE drop_chunks_table CASCADE;
|
||||||
|
NOTICE: drop cascades to view _timescaledb_internal.ts_internal_drop_chunks_viewview
|
||||||
|
NOTICE: drop cascades to table _timescaledb_internal._hyper_6_4_chunk
|
||||||
|
CREATE TABLE drop_chunks_table_u(time BIGINT, data INTEGER);
|
||||||
|
SELECT hypertable_id AS drop_chunks_table_u_id
|
||||||
|
FROM create_hypertable('drop_chunks_table_u', 'time', chunk_time_interval => 7) \gset
|
||||||
|
NOTICE: adding not-null constraint to column "time"
|
||||||
|
CREATE VIEW drop_chunks_view WITH ( timescaledb.continuous, timescaledb.refresh_interval='72 hours')
|
||||||
|
AS SELECT time_bucket('3', time), COUNT(data)
|
||||||
|
FROM drop_chunks_table_u
|
||||||
|
GROUP BY 1;
|
||||||
|
NOTICE: adding not-null constraint to column "time_partition_col"
|
||||||
|
SELECT format('%s.%s', schema_name, table_name) AS drop_chunks_mat_table_u,
|
||||||
|
schema_name AS drop_chunks_mat_schema,
|
||||||
|
table_name AS drop_chunks_mat_table_u_name
|
||||||
|
FROM _timescaledb_catalog.hypertable, _timescaledb_catalog.continuous_agg
|
||||||
|
WHERE _timescaledb_catalog.continuous_agg.raw_hypertable_id = :drop_chunks_table_u_id
|
||||||
|
AND _timescaledb_catalog.hypertable.id = _timescaledb_catalog.continuous_agg.mat_hypertable_id \gset
|
||||||
|
-- create 3 chunks, with 3 time bucket
|
||||||
|
INSERT INTO drop_chunks_table_u SELECT i, i FROM generate_series(0, 21) AS i;
|
||||||
|
REFRESH MATERIALIZED VIEW drop_chunks_view;
|
||||||
|
INFO: new materialization range for public.drop_chunks_table_u (time column time) (15)
|
||||||
|
INFO: materializing continuous aggregate public.drop_chunks_view: new range up to 15
|
||||||
|
SELECT count(c) FROM show_chunks('drop_chunks_table_u') AS c;
|
||||||
|
count
|
||||||
|
-------
|
||||||
|
4
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
SELECT count(c) FROM show_chunks(:'drop_chunks_mat_table_u') AS c;
|
||||||
|
count
|
||||||
|
-------
|
||||||
|
1
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
SELECT * FROM drop_chunks_view ORDER BY 1;
|
||||||
|
time_bucket | count
|
||||||
|
-------------+-------
|
||||||
|
0 | 3
|
||||||
|
3 | 3
|
||||||
|
6 | 3
|
||||||
|
9 | 3
|
||||||
|
12 | 3
|
||||||
|
(5 rows)
|
||||||
|
|
||||||
|
SELECT drop_chunks(table_name => 'drop_chunks_table_u', older_than => 13, cascade_to_materializations => true);
|
||||||
|
drop_chunks
|
||||||
|
-------------
|
||||||
|
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
-- everything in the first chunk (values within [0, 6]) should be dropped
|
||||||
|
-- the time_bucket [6, 8] will lose it's first value, but should still have
|
||||||
|
-- the other two
|
||||||
|
SELECT count(c) FROM show_chunks('drop_chunks_table_u') AS c;
|
||||||
|
count
|
||||||
|
-------
|
||||||
|
3
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
SELECT count(c) FROM show_chunks(:'drop_chunks_mat_table_u') AS c;
|
||||||
|
count
|
||||||
|
-------
|
||||||
|
1
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
SELECT * FROM drop_chunks_view ORDER BY 1;
|
||||||
|
time_bucket | count
|
||||||
|
-------------+-------
|
||||||
|
6 | 2
|
||||||
|
9 | 3
|
||||||
|
12 | 3
|
||||||
|
(3 rows)
|
||||||
|
|
||||||
|
@ -37,6 +37,7 @@ select count(*) from mat_m1;
|
|||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
\set ON_ERROR_STOP 1
|
\set ON_ERROR_STOP 1
|
||||||
|
-- schema tests
|
||||||
\c :TEST_DBNAME :ROLE_SUPERUSER
|
\c :TEST_DBNAME :ROLE_SUPERUSER
|
||||||
CREATE SCHEMA rename_schema;
|
CREATE SCHEMA rename_schema;
|
||||||
GRANT ALL ON SCHEMA rename_schema TO :ROLE_DEFAULT_PERM_USER;
|
GRANT ALL ON SCHEMA rename_schema TO :ROLE_DEFAULT_PERM_USER;
|
||||||
@ -156,3 +157,224 @@ SELECT user_view_schema, user_view_name, partial_view_schema, partial_view_name
|
|||||||
public | rename_c_aggregate | rename_schema | partial_view
|
public | rename_c_aggregate | rename_schema | partial_view
|
||||||
(2 rows)
|
(2 rows)
|
||||||
|
|
||||||
|
-- drop_chunks tests
|
||||||
|
DROP TABLE conditions CASCADE;
|
||||||
|
NOTICE: drop cascades to view _timescaledb_internal.ts_internal_mat_m1view
|
||||||
|
DROP TABLE foo CASCADE;
|
||||||
|
NOTICE: drop cascades to view rename_schema.partial_view
|
||||||
|
CREATE TABLE drop_chunks_table(time BIGINT, data INTEGER);
|
||||||
|
SELECT hypertable_id AS drop_chunks_table_id
|
||||||
|
FROM create_hypertable('drop_chunks_table', 'time', chunk_time_interval => 10) \gset
|
||||||
|
NOTICE: adding not-null constraint to column "time"
|
||||||
|
CREATE VIEW drop_chunks_view WITH ( timescaledb.continuous, timescaledb.refresh_interval='72 hours')
|
||||||
|
AS SELECT time_bucket('5', time), COUNT(data)
|
||||||
|
FROM drop_chunks_table
|
||||||
|
GROUP BY 1;
|
||||||
|
NOTICE: adding not-null constraint to column "time_partition_col"
|
||||||
|
SELECT format('%s.%s', schema_name, table_name) AS drop_chunks_mat_table,
|
||||||
|
schema_name AS drop_chunks_mat_schema,
|
||||||
|
table_name AS drop_chunks_mat_table_name
|
||||||
|
FROM _timescaledb_catalog.hypertable, _timescaledb_catalog.continuous_agg
|
||||||
|
WHERE _timescaledb_catalog.continuous_agg.raw_hypertable_id = :drop_chunks_table_id
|
||||||
|
AND _timescaledb_catalog.hypertable.id = _timescaledb_catalog.continuous_agg.mat_hypertable_id \gset
|
||||||
|
-- create 3 chunks, with 3 time bucket
|
||||||
|
INSERT INTO drop_chunks_table SELECT i, i FROM generate_series(0, 29) AS i;
|
||||||
|
REFRESH MATERIALIZED VIEW drop_chunks_view;
|
||||||
|
INFO: new materialization range for public.drop_chunks_table (time column time) (15)
|
||||||
|
INFO: materializing continuous aggregate public.drop_chunks_view: new range up to 15
|
||||||
|
SELECT count(c) FROM show_chunks('drop_chunks_table') AS c;
|
||||||
|
count
|
||||||
|
-------
|
||||||
|
3
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
SELECT count(c) FROM show_chunks(:'drop_chunks_mat_table') AS c;
|
||||||
|
count
|
||||||
|
-------
|
||||||
|
1
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
SELECT * FROM drop_chunks_view ORDER BY 1;
|
||||||
|
time_bucket | count
|
||||||
|
-------------+-------
|
||||||
|
0 | 5
|
||||||
|
5 | 5
|
||||||
|
10 | 5
|
||||||
|
(3 rows)
|
||||||
|
|
||||||
|
-- cannot drop directly from the materialization table
|
||||||
|
\set ON_ERROR_STOP 0
|
||||||
|
SELECT drop_chunks(schema_name => :'drop_chunks_mat_schema',
|
||||||
|
table_name => :'drop_chunks_mat_table_name',
|
||||||
|
newer_than => -20,
|
||||||
|
verbose => true);
|
||||||
|
ERROR: cannot drop_chunks on a continuous aggregate materialization table
|
||||||
|
\set ON_ERROR_STOP 1
|
||||||
|
SELECT count(c) FROM show_chunks('drop_chunks_table') AS c;
|
||||||
|
count
|
||||||
|
-------
|
||||||
|
3
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
SELECT count(c) FROM show_chunks(:'drop_chunks_mat_table') AS c;
|
||||||
|
count
|
||||||
|
-------
|
||||||
|
1
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
SELECT * FROM drop_chunks_view ORDER BY 1;
|
||||||
|
time_bucket | count
|
||||||
|
-------------+-------
|
||||||
|
0 | 5
|
||||||
|
5 | 5
|
||||||
|
10 | 5
|
||||||
|
(3 rows)
|
||||||
|
|
||||||
|
-- cannot drop from the raw table without specifying cascade_to_materializations
|
||||||
|
\set ON_ERROR_STOP 0
|
||||||
|
SELECT drop_chunks(table_name => 'drop_chunks_table', older_than => 10);
|
||||||
|
ERROR: cannot drop_chunks on hypertable that has a continuous aggregate without cascade_to_materializations set to true
|
||||||
|
\set ON_ERROR_STOP 1
|
||||||
|
SELECT count(c) FROM show_chunks('drop_chunks_table') AS c;
|
||||||
|
count
|
||||||
|
-------
|
||||||
|
3
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
SELECT count(c) FROM show_chunks(:'drop_chunks_mat_table') AS c;
|
||||||
|
count
|
||||||
|
-------
|
||||||
|
1
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
SELECT * FROM drop_chunks_view ORDER BY 1;
|
||||||
|
time_bucket | count
|
||||||
|
-------------+-------
|
||||||
|
0 | 5
|
||||||
|
5 | 5
|
||||||
|
10 | 5
|
||||||
|
(3 rows)
|
||||||
|
|
||||||
|
\set ON_ERROR_STOP 0
|
||||||
|
SELECT drop_chunks(older_than => 200);
|
||||||
|
ERROR: cannot drop_chunks on hypertable that has a continuous aggregate without cascade_to_materializations set to true
|
||||||
|
\set ON_ERROR_STOP 1
|
||||||
|
SELECT count(c) FROM show_chunks('drop_chunks_table') AS c;
|
||||||
|
count
|
||||||
|
-------
|
||||||
|
3
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
SELECT count(c) FROM show_chunks(:'drop_chunks_mat_table') AS c;
|
||||||
|
count
|
||||||
|
-------
|
||||||
|
1
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
SELECT * FROM drop_chunks_view ORDER BY 1;
|
||||||
|
time_bucket | count
|
||||||
|
-------------+-------
|
||||||
|
0 | 5
|
||||||
|
5 | 5
|
||||||
|
10 | 5
|
||||||
|
(3 rows)
|
||||||
|
|
||||||
|
SELECT drop_chunks(table_name => 'drop_chunks_table', older_than => 13, cascade_to_materializations => true);
|
||||||
|
drop_chunks
|
||||||
|
-------------
|
||||||
|
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
SELECT count(c) FROM show_chunks('drop_chunks_table') AS c;
|
||||||
|
count
|
||||||
|
-------
|
||||||
|
2
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
SELECT count(c) FROM show_chunks(:'drop_chunks_mat_table') AS c;
|
||||||
|
count
|
||||||
|
-------
|
||||||
|
1
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
SELECT * FROM drop_chunks_view ORDER BY 1;
|
||||||
|
time_bucket | count
|
||||||
|
-------------+-------
|
||||||
|
10 | 5
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
-- drop chunks when the chunksize and time_bucket aren't aligned
|
||||||
|
DROP TABLE drop_chunks_table CASCADE;
|
||||||
|
NOTICE: drop cascades to view _timescaledb_internal.ts_internal_drop_chunks_viewview
|
||||||
|
NOTICE: drop cascades to table _timescaledb_internal._hyper_6_4_chunk
|
||||||
|
CREATE TABLE drop_chunks_table_u(time BIGINT, data INTEGER);
|
||||||
|
SELECT hypertable_id AS drop_chunks_table_u_id
|
||||||
|
FROM create_hypertable('drop_chunks_table_u', 'time', chunk_time_interval => 7) \gset
|
||||||
|
NOTICE: adding not-null constraint to column "time"
|
||||||
|
CREATE VIEW drop_chunks_view WITH ( timescaledb.continuous, timescaledb.refresh_interval='72 hours')
|
||||||
|
AS SELECT time_bucket('3', time), COUNT(data)
|
||||||
|
FROM drop_chunks_table_u
|
||||||
|
GROUP BY 1;
|
||||||
|
NOTICE: adding not-null constraint to column "time_partition_col"
|
||||||
|
SELECT format('%s.%s', schema_name, table_name) AS drop_chunks_mat_table_u,
|
||||||
|
schema_name AS drop_chunks_mat_schema,
|
||||||
|
table_name AS drop_chunks_mat_table_u_name
|
||||||
|
FROM _timescaledb_catalog.hypertable, _timescaledb_catalog.continuous_agg
|
||||||
|
WHERE _timescaledb_catalog.continuous_agg.raw_hypertable_id = :drop_chunks_table_u_id
|
||||||
|
AND _timescaledb_catalog.hypertable.id = _timescaledb_catalog.continuous_agg.mat_hypertable_id \gset
|
||||||
|
-- create 3 chunks, with 3 time bucket
|
||||||
|
INSERT INTO drop_chunks_table_u SELECT i, i FROM generate_series(0, 21) AS i;
|
||||||
|
REFRESH MATERIALIZED VIEW drop_chunks_view;
|
||||||
|
INFO: new materialization range for public.drop_chunks_table_u (time column time) (15)
|
||||||
|
INFO: materializing continuous aggregate public.drop_chunks_view: new range up to 15
|
||||||
|
SELECT count(c) FROM show_chunks('drop_chunks_table_u') AS c;
|
||||||
|
count
|
||||||
|
-------
|
||||||
|
4
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
SELECT count(c) FROM show_chunks(:'drop_chunks_mat_table_u') AS c;
|
||||||
|
count
|
||||||
|
-------
|
||||||
|
1
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
SELECT * FROM drop_chunks_view ORDER BY 1;
|
||||||
|
time_bucket | count
|
||||||
|
-------------+-------
|
||||||
|
0 | 3
|
||||||
|
3 | 3
|
||||||
|
6 | 3
|
||||||
|
9 | 3
|
||||||
|
12 | 3
|
||||||
|
(5 rows)
|
||||||
|
|
||||||
|
SELECT drop_chunks(table_name => 'drop_chunks_table_u', older_than => 13, cascade_to_materializations => true);
|
||||||
|
drop_chunks
|
||||||
|
-------------
|
||||||
|
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
-- everything in the first chunk (values within [0, 6]) should be dropped
|
||||||
|
-- the time_bucket [6, 8] will lose it's first value, but should still have
|
||||||
|
-- the other two
|
||||||
|
SELECT count(c) FROM show_chunks('drop_chunks_table_u') AS c;
|
||||||
|
count
|
||||||
|
-------
|
||||||
|
3
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
SELECT count(c) FROM show_chunks(:'drop_chunks_mat_table_u') AS c;
|
||||||
|
count
|
||||||
|
-------
|
||||||
|
1
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
SELECT * FROM drop_chunks_view ORDER BY 1;
|
||||||
|
time_bucket | count
|
||||||
|
-------------+-------
|
||||||
|
6 | 2
|
||||||
|
9 | 3
|
||||||
|
12 | 3
|
||||||
|
(3 rows)
|
||||||
|
|
||||||
|
@ -33,6 +33,8 @@ select count(*) from mat_m1;
|
|||||||
|
|
||||||
\set ON_ERROR_STOP 1
|
\set ON_ERROR_STOP 1
|
||||||
|
|
||||||
|
-- schema tests
|
||||||
|
|
||||||
\c :TEST_DBNAME :ROLE_SUPERUSER
|
\c :TEST_DBNAME :ROLE_SUPERUSER
|
||||||
|
|
||||||
CREATE SCHEMA rename_schema;
|
CREATE SCHEMA rename_schema;
|
||||||
@ -111,3 +113,109 @@ ALTER VIEW rename_schema.ts_internal_rename_testview RENAME TO partial_view;
|
|||||||
|
|
||||||
SELECT user_view_schema, user_view_name, partial_view_schema, partial_view_name
|
SELECT user_view_schema, user_view_name, partial_view_schema, partial_view_name
|
||||||
FROM _timescaledb_catalog.continuous_agg;
|
FROM _timescaledb_catalog.continuous_agg;
|
||||||
|
|
||||||
|
-- drop_chunks tests
|
||||||
|
DROP TABLE conditions CASCADE;
|
||||||
|
DROP TABLE foo CASCADE;
|
||||||
|
|
||||||
|
CREATE TABLE drop_chunks_table(time BIGINT, data INTEGER);
|
||||||
|
SELECT hypertable_id AS drop_chunks_table_id
|
||||||
|
FROM create_hypertable('drop_chunks_table', 'time', chunk_time_interval => 10) \gset
|
||||||
|
|
||||||
|
CREATE VIEW drop_chunks_view WITH ( timescaledb.continuous, timescaledb.refresh_interval='72 hours')
|
||||||
|
AS SELECT time_bucket('5', time), COUNT(data)
|
||||||
|
FROM drop_chunks_table
|
||||||
|
GROUP BY 1;
|
||||||
|
|
||||||
|
SELECT format('%s.%s', schema_name, table_name) AS drop_chunks_mat_table,
|
||||||
|
schema_name AS drop_chunks_mat_schema,
|
||||||
|
table_name AS drop_chunks_mat_table_name
|
||||||
|
FROM _timescaledb_catalog.hypertable, _timescaledb_catalog.continuous_agg
|
||||||
|
WHERE _timescaledb_catalog.continuous_agg.raw_hypertable_id = :drop_chunks_table_id
|
||||||
|
AND _timescaledb_catalog.hypertable.id = _timescaledb_catalog.continuous_agg.mat_hypertable_id \gset
|
||||||
|
|
||||||
|
-- create 3 chunks, with 3 time bucket
|
||||||
|
INSERT INTO drop_chunks_table SELECT i, i FROM generate_series(0, 29) AS i;
|
||||||
|
REFRESH MATERIALIZED VIEW drop_chunks_view;
|
||||||
|
|
||||||
|
SELECT count(c) FROM show_chunks('drop_chunks_table') AS c;
|
||||||
|
SELECT count(c) FROM show_chunks(:'drop_chunks_mat_table') AS c;
|
||||||
|
|
||||||
|
SELECT * FROM drop_chunks_view ORDER BY 1;
|
||||||
|
|
||||||
|
-- cannot drop directly from the materialization table
|
||||||
|
\set ON_ERROR_STOP 0
|
||||||
|
SELECT drop_chunks(schema_name => :'drop_chunks_mat_schema',
|
||||||
|
table_name => :'drop_chunks_mat_table_name',
|
||||||
|
newer_than => -20,
|
||||||
|
verbose => true);
|
||||||
|
\set ON_ERROR_STOP 1
|
||||||
|
|
||||||
|
SELECT count(c) FROM show_chunks('drop_chunks_table') AS c;
|
||||||
|
SELECT count(c) FROM show_chunks(:'drop_chunks_mat_table') AS c;
|
||||||
|
|
||||||
|
SELECT * FROM drop_chunks_view ORDER BY 1;
|
||||||
|
|
||||||
|
-- cannot drop from the raw table without specifying cascade_to_materializations
|
||||||
|
|
||||||
|
\set ON_ERROR_STOP 0
|
||||||
|
SELECT drop_chunks(table_name => 'drop_chunks_table', older_than => 10);
|
||||||
|
\set ON_ERROR_STOP 1
|
||||||
|
|
||||||
|
SELECT count(c) FROM show_chunks('drop_chunks_table') AS c;
|
||||||
|
SELECT count(c) FROM show_chunks(:'drop_chunks_mat_table') AS c;
|
||||||
|
|
||||||
|
SELECT * FROM drop_chunks_view ORDER BY 1;
|
||||||
|
|
||||||
|
\set ON_ERROR_STOP 0
|
||||||
|
SELECT drop_chunks(older_than => 200);
|
||||||
|
\set ON_ERROR_STOP 1
|
||||||
|
|
||||||
|
SELECT count(c) FROM show_chunks('drop_chunks_table') AS c;
|
||||||
|
SELECT count(c) FROM show_chunks(:'drop_chunks_mat_table') AS c;
|
||||||
|
|
||||||
|
SELECT * FROM drop_chunks_view ORDER BY 1;
|
||||||
|
|
||||||
|
SELECT drop_chunks(table_name => 'drop_chunks_table', older_than => 13, cascade_to_materializations => true);
|
||||||
|
|
||||||
|
SELECT count(c) FROM show_chunks('drop_chunks_table') AS c;
|
||||||
|
SELECT count(c) FROM show_chunks(:'drop_chunks_mat_table') AS c;
|
||||||
|
|
||||||
|
SELECT * FROM drop_chunks_view ORDER BY 1;
|
||||||
|
|
||||||
|
-- drop chunks when the chunksize and time_bucket aren't aligned
|
||||||
|
DROP TABLE drop_chunks_table CASCADE;
|
||||||
|
CREATE TABLE drop_chunks_table_u(time BIGINT, data INTEGER);
|
||||||
|
SELECT hypertable_id AS drop_chunks_table_u_id
|
||||||
|
FROM create_hypertable('drop_chunks_table_u', 'time', chunk_time_interval => 7) \gset
|
||||||
|
|
||||||
|
CREATE VIEW drop_chunks_view WITH ( timescaledb.continuous, timescaledb.refresh_interval='72 hours')
|
||||||
|
AS SELECT time_bucket('3', time), COUNT(data)
|
||||||
|
FROM drop_chunks_table_u
|
||||||
|
GROUP BY 1;
|
||||||
|
|
||||||
|
SELECT format('%s.%s', schema_name, table_name) AS drop_chunks_mat_table_u,
|
||||||
|
schema_name AS drop_chunks_mat_schema,
|
||||||
|
table_name AS drop_chunks_mat_table_u_name
|
||||||
|
FROM _timescaledb_catalog.hypertable, _timescaledb_catalog.continuous_agg
|
||||||
|
WHERE _timescaledb_catalog.continuous_agg.raw_hypertable_id = :drop_chunks_table_u_id
|
||||||
|
AND _timescaledb_catalog.hypertable.id = _timescaledb_catalog.continuous_agg.mat_hypertable_id \gset
|
||||||
|
|
||||||
|
-- create 3 chunks, with 3 time bucket
|
||||||
|
INSERT INTO drop_chunks_table_u SELECT i, i FROM generate_series(0, 21) AS i;
|
||||||
|
REFRESH MATERIALIZED VIEW drop_chunks_view;
|
||||||
|
|
||||||
|
SELECT count(c) FROM show_chunks('drop_chunks_table_u') AS c;
|
||||||
|
SELECT count(c) FROM show_chunks(:'drop_chunks_mat_table_u') AS c;
|
||||||
|
|
||||||
|
SELECT * FROM drop_chunks_view ORDER BY 1;
|
||||||
|
|
||||||
|
SELECT drop_chunks(table_name => 'drop_chunks_table_u', older_than => 13, cascade_to_materializations => true);
|
||||||
|
|
||||||
|
-- everything in the first chunk (values within [0, 6]) should be dropped
|
||||||
|
-- the time_bucket [6, 8] will lose it's first value, but should still have
|
||||||
|
-- the other two
|
||||||
|
SELECT count(c) FROM show_chunks('drop_chunks_table_u') AS c;
|
||||||
|
SELECT count(c) FROM show_chunks(:'drop_chunks_mat_table_u') AS c;
|
||||||
|
|
||||||
|
SELECT * FROM drop_chunks_view ORDER BY 1;
|
||||||
|
Loading…
x
Reference in New Issue
Block a user