Handle TRUNCATE TABLE on chunks

Make truncating a uncompressed chunk drop the data for the case where
they reside in a corresponding compressed chunk.

Generate invalidations for Continuous Aggregates after TRUNCATE, so
as to have consistent refresh operations on the materialization
hypertable.

Fixes #4362
This commit is contained in:
Markos Fountoulakis 2022-08-16 19:10:07 +03:00 committed by Markos Fountoulakis
parent fc865de6e0
commit 9c6433e6ed
15 changed files with 3017 additions and 39 deletions

View File

@ -15,11 +15,13 @@ accidentally triggering the load of a previous DB version.**
* #4486 Adding boolean column with default value doesn't work on compressed table
* #4555 Handle properly default privileges on Continuous Aggregates
* #4575 Fix use of `get_partition_hash` and `get_partition_for_key` inside an IMMUTABLE function
* #4416 Handle TRUNCATE TABLE on chunks
**Thanks**
@janko for reporting
@AlmiS for reporting error on `get_partition_hash` executed inside an IMMUTABLE function
@michaelkitson for reporting permission errors using default privileges on Continuous Aggregates
@jayadevanm for reporting error of TRUNCATE TABLE on compressed chunk
## 2.7.2 (2022-07-26)

View File

@ -1001,7 +1001,7 @@ process_truncate(ProcessUtilityArgs *args)
List *hypertables = NIL;
List *relations = NIL;
bool list_changed = false;
MemoryContext parsetreectx = GetMemoryChunkContext(args->parsetree);
MemoryContext oldctx, parsetreectx = GetMemoryChunkContext(args->parsetree);
/* For all hypertables, we drop the now empty chunks. We also propagate the
* TRUNCATE call to the compressed version of the hypertable, if it exists.
@ -1037,7 +1037,6 @@ process_truncate(ProcessUtilityArgs *args)
if (cagg)
{
Hypertable *mat_ht, *raw_ht;
MemoryContext oldctx;
if (!relation_should_recurse(rv))
ereport(ERROR,
@ -1071,13 +1070,14 @@ process_truncate(ProcessUtilityArgs *args)
break;
}
case RELKIND_RELATION:
/* TRUNCATE for foreign tables not implemented yet. This will raise an error. */
case RELKIND_FOREIGN_TABLE:
{
Hypertable *ht =
ts_hypertable_cache_get_entry(hcache, relid, CACHE_FLAG_MISSING_OK);
Chunk *chunk;
if (!ht)
list_append = true;
else
if (ht)
{
ContinuousAggHypertableStatus agg_status;
@ -1114,6 +1114,38 @@ process_truncate(ProcessUtilityArgs *args)
*/
list_changed = true;
}
else if ((chunk = ts_chunk_get_by_relid(relid, false)) != NULL)
{ /* this is a chunk */
ht = ts_hypertable_cache_get_entry(hcache,
chunk->hypertable_relid,
CACHE_FLAG_NONE);
Assert(ht != NULL);
/* If the hypertable has continuous aggregates, then invalidate
* the truncated region. */
if (ts_continuous_agg_hypertable_status(ht->fd.id) == HypertableIsRawTable)
ts_continuous_agg_invalidate_chunk(ht, chunk);
/* Truncate the compressed chunk too. */
if (chunk->fd.compressed_chunk_id != INVALID_CHUNK_ID)
{
Chunk *compressed_chunk =
ts_chunk_get_by_id(chunk->fd.compressed_chunk_id, false);
if (compressed_chunk != NULL)
{
/* Create list item into the same context of the list. */
oldctx = MemoryContextSwitchTo(parsetreectx);
rv = makeRangeVar(NameStr(compressed_chunk->fd.schema_name),
NameStr(compressed_chunk->fd.table_name),
-1);
MemoryContextSwitchTo(oldctx);
list_changed = true;
}
}
list_append = true;
}
else
list_append = true;
break;
}
}
@ -1234,14 +1266,7 @@ process_drop_chunk(ProcessUtilityArgs *args, DropStmt *stmt)
/* If the hypertable has continuous aggregates, then invalidate
* the dropped region. */
if (ts_continuous_agg_hypertable_status(ht->fd.id) == HypertableIsRawTable)
{
int64 start = ts_chunk_primary_dimension_start(chunk);
int64 end = ts_chunk_primary_dimension_end(chunk);
Assert(hyperspace_get_open_dimension(ht->space, 0)->fd.id ==
chunk->cube->slices[0]->fd.dimension_id);
ts_cm_functions->continuous_agg_invalidate_raw_ht(ht, start, end);
}
ts_continuous_agg_invalidate_chunk(ht, chunk);
}
}

View File

@ -31,6 +31,7 @@
#include "bgw/job.h"
#include "ts_catalog/continuous_agg.h"
#include "cross_module_fn.h"
#include "hypercube.h"
#include "hypertable.h"
#include "hypertable_cache.h"
#include "scan_iterator.h"
@ -1351,6 +1352,17 @@ ts_continuous_agg_find_integer_now_func_by_materialization_id(int32 mat_htid)
return par_dim;
}
TSDLLEXPORT void
ts_continuous_agg_invalidate_chunk(Hypertable *ht, Chunk *chunk)
{
int64 start = ts_chunk_primary_dimension_start(chunk);
int64 end = ts_chunk_primary_dimension_end(chunk);
Assert(hyperspace_get_open_dimension(ht->space, 0)->fd.id ==
chunk->cube->slices[0]->fd.dimension_id);
ts_cm_functions->continuous_agg_invalidate_raw_ht(ht, start, end);
}
typedef struct Watermark
{
int32 hyper_id;

View File

@ -174,6 +174,8 @@ extern TSDLLEXPORT const Dimension *
ts_continuous_agg_find_integer_now_func_by_materialization_id(int32 mat_htid);
extern ContinuousAgg *ts_continuous_agg_find_userview_name(const char *schema, const char *name);
extern TSDLLEXPORT void ts_continuous_agg_invalidate_chunk(Hypertable *ht, Chunk *chunk);
extern TSDLLEXPORT bool ts_continuous_agg_bucket_width_variable(const ContinuousAgg *agg);
extern TSDLLEXPORT int64 ts_continuous_agg_bucket_width(const ContinuousAgg *agg);

View File

@ -283,6 +283,20 @@ SELECT * FROM _timescaledb_catalog.dimension_slice ORDER BY id;
24 | 3 | 6 | 7
(24 rows)
-- Test that truncating chunks works
SELECT count(*) FROM _timescaledb_internal._hyper_2_7_chunk;
count
-------
1
(1 row)
TRUNCATE TABLE _timescaledb_internal._hyper_2_7_chunk;
SELECT count(*) FROM _timescaledb_internal._hyper_2_7_chunk;
count
-------
0
(1 row)
-- Drop one chunk "manually" and verify that dimension slices and
-- constraints are cleaned up. Each chunk has two constraints and two
-- dimension slices. Both constraints should be deleted, but only one

View File

@ -117,6 +117,11 @@ FULL OUTER JOIN _timescaledb_catalog.dimension_slice ds ON (ds.id = cc.dimension
ORDER BY c.id;
SELECT * FROM _timescaledb_catalog.dimension_slice ORDER BY id;
-- Test that truncating chunks works
SELECT count(*) FROM _timescaledb_internal._hyper_2_7_chunk;
TRUNCATE TABLE _timescaledb_internal._hyper_2_7_chunk;
SELECT count(*) FROM _timescaledb_internal._hyper_2_7_chunk;
-- Drop one chunk "manually" and verify that dimension slices and
-- constraints are cleaned up. Each chunk has two constraints and two
-- dimension slices. Both constraints should be deleted, but only one

View File

@ -577,6 +577,47 @@ SELECT * FROM hyper_invals;
2 | 20 | 20
(1 row)
-- Pick the first chunk of conditions to TRUNCATE
SELECT show_chunks AS chunk_to_truncate
FROM show_chunks('conditions')
ORDER BY 1
LIMIT 1 \gset
-- Show the data before truncating one of the chunks
SELECT * FROM :chunk_to_truncate
ORDER BY 1;
time | device | temp
------+--------+------
1 | 4 | 23.7
1 | 0 | 16
2 | 2 | 23.5
2 | 1 | 25
3 | 2 | 23.5
3 | 0 | 20
4 | 2 | 10
5 | 2 | 26
6 | 1 | 13
7 | 3 | 35
8 | 1 | 37
9 | 3 | 7
(12 rows)
-- Truncate one chunk
\if :IS_DISTRIBUTED
-- There is no TRUNCATE implementation for FOREIGN tables yet
\set ON_ERROR_STOP 0
\endif
TRUNCATE TABLE :chunk_to_truncate;
\if :IS_DISTRIBUTED
\set ON_ERROR_STOP 1
\endif
-- Should see new invalidation entries for conditions for the non-distributed case
SELECT * FROM hyper_invals;
hyper_id | start | end
----------+-------+-----
1 | 0 | 10
2 | 20 | 20
(2 rows)
-- TRUNCATE the hypertable to invalidate all its continuous aggregates
TRUNCATE conditions;
-- Now empty
@ -590,8 +631,9 @@ SELECT * FROM hyper_invals;
hyper_id | start | end
----------+----------------------+---------------------
1 | -9223372036854775808 | 9223372036854775807
1 | 0 | 10
2 | 20 | 20
(2 rows)
(3 rows)
-- Aggregates still hold data
SELECT * FROM cond_10
@ -673,7 +715,7 @@ ORDER BY 1,2;
-- TRUNCATE ONLY
\set ON_ERROR_STOP 0
TRUNCATE ONLY cond_20;
psql:include/cagg_invalidation_common.sql:385: ERROR: cannot truncate only a continuous aggregate
psql:include/cagg_invalidation_common.sql:408: ERROR: cannot truncate only a continuous aggregate
\set ON_ERROR_STOP 1
TRUNCATE cond_20;
-- Should now be empty
@ -746,7 +788,7 @@ WHERE user_view_name = 'cond_1' \gset
\else
\set ON_ERROR_STOP 0
SELECT _timescaledb_internal.invalidation_cagg_log_add_entry(:cond_1_id, 1, 0);
psql:include/cagg_invalidation_common.sql:433: ERROR: cannot invalidate cagg, end time should be greater than start time
psql:include/cagg_invalidation_common.sql:456: ERROR: cannot invalidate cagg, end time should be greater than start time
\set ON_ERROR_STOP 1
\endif
-- Test invalidations with bucket size 1
@ -923,7 +965,7 @@ CREATE table threshold_test (time int, value int);
SELECT create_distributed_hypertable('threshold_test', 'time', chunk_time_interval => 4, replication_factor => 2);
\else
SELECT create_hypertable('threshold_test', 'time', chunk_time_interval => 4);
psql:include/cagg_invalidation_common.sql:544: NOTICE: adding not-null constraint to column "time"
psql:include/cagg_invalidation_common.sql:567: NOTICE: adding not-null constraint to column "time"
create_hypertable
-----------------------------
(7,public,threshold_test,t)
@ -959,14 +1001,14 @@ ORDER BY 1,2;
\else
\set ON_ERROR_STOP 0
SELECT _timescaledb_internal.invalidation_hyper_log_add_entry(:thresh_hyper_id, 1, 0);
psql:include/cagg_invalidation_common.sql:569: ERROR: cannot invalidate hypertable, end time should be greater than start time
psql:include/cagg_invalidation_common.sql:592: ERROR: cannot invalidate hypertable, end time should be greater than start time
\set ON_ERROR_STOP 1
\endif
-- Test that threshold is initilized to min value when there's no data
-- and we specify an infinite end. Note that the min value may differ
-- depending on time type.
CALL refresh_continuous_aggregate('thresh_2', 0, NULL);
psql:include/cagg_invalidation_common.sql:576: NOTICE: continuous aggregate "thresh_2" is already up-to-date
psql:include/cagg_invalidation_common.sql:599: NOTICE: continuous aggregate "thresh_2" is already up-to-date
SELECT * FROM _timescaledb_catalog.continuous_aggs_invalidation_threshold
WHERE hypertable_id = :thresh_hyper_id
ORDER BY 1,2;
@ -992,13 +1034,13 @@ ORDER BY 1,2;
-- Refresh where both the start and end of the window is above the
-- max data value
CALL refresh_continuous_aggregate('thresh_2', 14, NULL);
psql:include/cagg_invalidation_common.sql:596: NOTICE: continuous aggregate "thresh_2" is already up-to-date
psql:include/cagg_invalidation_common.sql:619: NOTICE: continuous aggregate "thresh_2" is already up-to-date
SELECT watermark AS thresh_hyper_id_watermark
FROM _timescaledb_catalog.continuous_aggs_invalidation_threshold
WHERE hypertable_id = :thresh_hyper_id \gset
-- Refresh where we start from the current watermark to infinity
CALL refresh_continuous_aggregate('thresh_2', :thresh_hyper_id_watermark, NULL);
psql:include/cagg_invalidation_common.sql:603: NOTICE: continuous aggregate "thresh_2" is already up-to-date
psql:include/cagg_invalidation_common.sql:626: NOTICE: continuous aggregate "thresh_2" is already up-to-date
-- Now refresh with max end of the window to test that the
-- invalidation threshold is capped at the last bucket of data
CALL refresh_continuous_aggregate('thresh_2', 0, NULL);
@ -1200,7 +1242,7 @@ INSERT INTO conditions VALUES(3, 1, 1.0);
INSERT INTO conditions VALUES(4, 1, 1.0);
INSERT INTO conditions VALUES(6, 1, 1.0);
CALL refresh_continuous_aggregate('cond_1', 10, NULL);
psql:include/cagg_invalidation_common.sql:725: NOTICE: continuous aggregate "cond_1" is already up-to-date
psql:include/cagg_invalidation_common.sql:748: NOTICE: continuous aggregate "cond_1" is already up-to-date
SELECT * FROM cagg_invals
WHERE cagg_id = :cond_1_id;
cagg_id | start | end
@ -1226,7 +1268,7 @@ INSERT INTO conditions VALUES (40, 1, 1.0);
-- Refresh to process invalidations, but outside the range of
-- invalidations we inserted so that we don't clear them.
CALL refresh_continuous_aggregate('cond_10', 50, 60);
psql:include/cagg_invalidation_common.sql:746: NOTICE: continuous aggregate "cond_10" is already up-to-date
psql:include/cagg_invalidation_common.sql:769: NOTICE: continuous aggregate "cond_10" is already up-to-date
SELECT mat_hypertable_id AS cond_10_id
FROM _timescaledb_catalog.continuous_agg
WHERE user_view_name = 'cond_10' \gset
@ -1266,16 +1308,16 @@ CALL refresh_continuous_aggregate('cond_10', 0, 200);
SET timescaledb.materializations_per_refresh_window='foo';
INSERT INTO conditions VALUES (140, 1, 1.0);
CALL refresh_continuous_aggregate('cond_10', 0, 200);
psql:include/cagg_invalidation_common.sql:785: WARNING: invalid value for session variable "timescaledb.materializations_per_refresh_window"
psql:include/cagg_invalidation_common.sql:808: WARNING: invalid value for session variable "timescaledb.materializations_per_refresh_window"
DETAIL: Expected an integer but current value is "foo".
SET timescaledb.materializations_per_refresh_window='2bar';
INSERT INTO conditions VALUES (140, 1, 1.0);
CALL refresh_continuous_aggregate('cond_10', 0, 200);
psql:include/cagg_invalidation_common.sql:788: WARNING: invalid value for session variable "timescaledb.materializations_per_refresh_window"
psql:include/cagg_invalidation_common.sql:811: WARNING: invalid value for session variable "timescaledb.materializations_per_refresh_window"
DETAIL: Expected an integer but current value is "2bar".
SET timescaledb.materializations_per_refresh_window='-';
INSERT INTO conditions VALUES (140, 1, 1.0);
CALL refresh_continuous_aggregate('cond_10', 0, 200);
psql:include/cagg_invalidation_common.sql:792: WARNING: invalid value for session variable "timescaledb.materializations_per_refresh_window"
psql:include/cagg_invalidation_common.sql:815: WARNING: invalid value for session variable "timescaledb.materializations_per_refresh_window"
DETAIL: Expected an integer but current value is "-".
\set VERBOSITY terse

View File

@ -635,6 +635,48 @@ SELECT * FROM hyper_invals;
2 | 30 | 80
(2 rows)
-- Pick the first chunk of conditions to TRUNCATE
SELECT show_chunks AS chunk_to_truncate
FROM show_chunks('conditions')
ORDER BY 1
LIMIT 1 \gset
-- Show the data before truncating one of the chunks
SELECT * FROM :chunk_to_truncate
ORDER BY 1;
time | device | temp
------+--------+------
1 | 4 | 23.7
1 | 0 | 16
2 | 2 | 23.5
2 | 1 | 25
3 | 2 | 23.5
3 | 0 | 20
4 | 2 | 10
5 | 2 | 26
6 | 1 | 13
7 | 3 | 35
8 | 1 | 37
9 | 3 | 7
(12 rows)
-- Truncate one chunk
\if :IS_DISTRIBUTED
-- There is no TRUNCATE implementation for FOREIGN tables yet
\set ON_ERROR_STOP 0
\endif
TRUNCATE TABLE :chunk_to_truncate;
psql:include/cagg_invalidation_common.sql:352: ERROR: "_dist_hyper_1_1_chunk" is not a table
\if :IS_DISTRIBUTED
\set ON_ERROR_STOP 1
\endif
-- Should see new invalidation entries for conditions for the non-distributed case
SELECT * FROM hyper_invals;
hyper_id | start | end
----------+-------+-----
2 | 20 | 20
2 | 30 | 80
(2 rows)
-- TRUNCATE the hypertable to invalidate all its continuous aggregates
TRUNCATE conditions;
-- Now empty
@ -732,7 +774,7 @@ ORDER BY 1,2;
-- TRUNCATE ONLY
\set ON_ERROR_STOP 0
TRUNCATE ONLY cond_20;
psql:include/cagg_invalidation_common.sql:385: ERROR: cannot truncate only a continuous aggregate
psql:include/cagg_invalidation_common.sql:408: ERROR: cannot truncate only a continuous aggregate
\set ON_ERROR_STOP 1
TRUNCATE cond_20;
-- Should now be empty
@ -980,7 +1022,7 @@ ORDER BY 1,2;
CREATE table threshold_test (time int, value int);
\if :IS_DISTRIBUTED
SELECT create_distributed_hypertable('threshold_test', 'time', chunk_time_interval => 4, replication_factor => 2);
psql:include/cagg_invalidation_common.sql:542: NOTICE: adding not-null constraint to column "time"
psql:include/cagg_invalidation_common.sql:565: NOTICE: adding not-null constraint to column "time"
create_distributed_hypertable
-------------------------------
(7,public,threshold_test,t)
@ -1024,7 +1066,7 @@ SELECT _timescaledb_internal.invalidation_hyper_log_add_entry(:thresh_hyper_id,
-- and we specify an infinite end. Note that the min value may differ
-- depending on time type.
CALL refresh_continuous_aggregate('thresh_2', 0, NULL);
psql:include/cagg_invalidation_common.sql:576: NOTICE: continuous aggregate "thresh_2" is already up-to-date
psql:include/cagg_invalidation_common.sql:599: NOTICE: continuous aggregate "thresh_2" is already up-to-date
SELECT * FROM _timescaledb_catalog.continuous_aggs_invalidation_threshold
WHERE hypertable_id = :thresh_hyper_id
ORDER BY 1,2;
@ -1050,13 +1092,13 @@ ORDER BY 1,2;
-- Refresh where both the start and end of the window is above the
-- max data value
CALL refresh_continuous_aggregate('thresh_2', 14, NULL);
psql:include/cagg_invalidation_common.sql:596: NOTICE: continuous aggregate "thresh_2" is already up-to-date
psql:include/cagg_invalidation_common.sql:619: NOTICE: continuous aggregate "thresh_2" is already up-to-date
SELECT watermark AS thresh_hyper_id_watermark
FROM _timescaledb_catalog.continuous_aggs_invalidation_threshold
WHERE hypertable_id = :thresh_hyper_id \gset
-- Refresh where we start from the current watermark to infinity
CALL refresh_continuous_aggregate('thresh_2', :thresh_hyper_id_watermark, NULL);
psql:include/cagg_invalidation_common.sql:603: NOTICE: continuous aggregate "thresh_2" is already up-to-date
psql:include/cagg_invalidation_common.sql:626: NOTICE: continuous aggregate "thresh_2" is already up-to-date
-- Now refresh with max end of the window to test that the
-- invalidation threshold is capped at the last bucket of data
CALL refresh_continuous_aggregate('thresh_2', 0, NULL);
@ -1258,7 +1300,7 @@ INSERT INTO conditions VALUES(3, 1, 1.0);
INSERT INTO conditions VALUES(4, 1, 1.0);
INSERT INTO conditions VALUES(6, 1, 1.0);
CALL refresh_continuous_aggregate('cond_1', 10, NULL);
psql:include/cagg_invalidation_common.sql:725: NOTICE: continuous aggregate "cond_1" is already up-to-date
psql:include/cagg_invalidation_common.sql:748: NOTICE: continuous aggregate "cond_1" is already up-to-date
SELECT * FROM cagg_invals
WHERE cagg_id = :cond_1_id;
cagg_id | start | end
@ -1284,7 +1326,7 @@ INSERT INTO conditions VALUES (40, 1, 1.0);
-- Refresh to process invalidations, but outside the range of
-- invalidations we inserted so that we don't clear them.
CALL refresh_continuous_aggregate('cond_10', 50, 60);
psql:include/cagg_invalidation_common.sql:746: NOTICE: continuous aggregate "cond_10" is already up-to-date
psql:include/cagg_invalidation_common.sql:769: NOTICE: continuous aggregate "cond_10" is already up-to-date
SELECT mat_hypertable_id AS cond_10_id
FROM _timescaledb_catalog.continuous_agg
WHERE user_view_name = 'cond_10' \gset
@ -1327,17 +1369,17 @@ CALL refresh_continuous_aggregate('cond_10', 0, 200);
SET timescaledb.materializations_per_refresh_window='foo';
INSERT INTO conditions VALUES (140, 1, 1.0);
CALL refresh_continuous_aggregate('cond_10', 0, 200);
psql:include/cagg_invalidation_common.sql:785: WARNING: invalid value for session variable "timescaledb.materializations_per_refresh_window"
psql:include/cagg_invalidation_common.sql:808: WARNING: invalid value for session variable "timescaledb.materializations_per_refresh_window"
DETAIL: Expected an integer but current value is "foo".
SET timescaledb.materializations_per_refresh_window='2bar';
INSERT INTO conditions VALUES (140, 1, 1.0);
CALL refresh_continuous_aggregate('cond_10', 0, 200);
psql:include/cagg_invalidation_common.sql:788: WARNING: invalid value for session variable "timescaledb.materializations_per_refresh_window"
psql:include/cagg_invalidation_common.sql:811: WARNING: invalid value for session variable "timescaledb.materializations_per_refresh_window"
DETAIL: Expected an integer but current value is "2bar".
SET timescaledb.materializations_per_refresh_window='-';
INSERT INTO conditions VALUES (140, 1, 1.0);
CALL refresh_continuous_aggregate('cond_10', 0, 200);
psql:include/cagg_invalidation_common.sql:792: WARNING: invalid value for session variable "timescaledb.materializations_per_refresh_window"
psql:include/cagg_invalidation_common.sql:815: WARNING: invalid value for session variable "timescaledb.materializations_per_refresh_window"
DETAIL: Expected an integer but current value is "-".
\set VERBOSITY terse
-- cleanup

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -106,6 +106,25 @@ SELECT show_chunks('public.table_to_compress', newer_than=>'1 day'::interval);
-------------
(0 rows)
-- truncate one compressed chunk
SELECT chunk_schema || '.' || chunk_name as "CHNAME"
FROM timescaledb_information.chunks
WHERE hypertable_name = 'table_to_compress' and hypertable_schema = 'public'
ORDER BY chunk_name LIMIT 1
\gset
SELECT count(*) FROM :CHNAME;
count
-------
1
(1 row)
TRUNCATE TABLE :CHNAME;
SELECT count(*) FROM :CHNAME;
count
-------
0
(1 row)
-- drop all hypertables' old chunks
SELECT drop_chunks(table_name::regclass, older_than=>'1 day'::interval)
FROM _timescaledb_catalog.hypertable

View File

@ -53,7 +53,6 @@ if(CMAKE_BUILD_TYPE MATCHES Debug)
cagg_ddl_dist_ht.sql
cagg_drop_chunks.sql
cagg_dump.sql
cagg_invalidation_dist_ht.sql
cagg_multi.sql
continuous_aggs.sql
continuous_aggs_deprecated.sql
@ -129,8 +128,14 @@ set(TEST_TEMPLATES
transparent_decompression_ordered_index.sql.in)
if(CMAKE_BUILD_TYPE MATCHES Debug)
list(APPEND TEST_TEMPLATES cagg_query.sql.in dist_hypertable.sql.in
remote_copy.sql.in dist_grant.sql.in)
list(
APPEND
TEST_TEMPLATES
cagg_query.sql.in
dist_hypertable.sql.in
remote_copy.sql.in
dist_grant.sql.in
cagg_invalidation_dist_ht.sql.in)
endif(CMAKE_BUILD_TYPE MATCHES Debug)
# Check if PostgreSQL was compiled with JIT support

View File

@ -37,6 +37,15 @@ SELECT show_chunks('public.uncompressed_table');
SELECT show_chunks('public.table_to_compress');
SELECT show_chunks('public.table_to_compress', older_than=>'1 day'::interval);
SELECT show_chunks('public.table_to_compress', newer_than=>'1 day'::interval);
-- truncate one compressed chunk
SELECT chunk_schema || '.' || chunk_name as "CHNAME"
FROM timescaledb_information.chunks
WHERE hypertable_name = 'table_to_compress' and hypertable_schema = 'public'
ORDER BY chunk_name LIMIT 1
\gset
SELECT count(*) FROM :CHNAME;
TRUNCATE TABLE :CHNAME;
SELECT count(*) FROM :CHNAME;
-- drop all hypertables' old chunks
SELECT drop_chunks(table_name::regclass, older_than=>'1 day'::interval)
FROM _timescaledb_catalog.hypertable

View File

@ -334,6 +334,29 @@ CALL refresh_continuous_aggregate('cond_10', NULL, NULL);
SELECT * FROM cagg_invals;
SELECT * FROM hyper_invals;
-- Pick the first chunk of conditions to TRUNCATE
SELECT show_chunks AS chunk_to_truncate
FROM show_chunks('conditions')
ORDER BY 1
LIMIT 1 \gset
-- Show the data before truncating one of the chunks
SELECT * FROM :chunk_to_truncate
ORDER BY 1;
-- Truncate one chunk
\if :IS_DISTRIBUTED
-- There is no TRUNCATE implementation for FOREIGN tables yet
\set ON_ERROR_STOP 0
\endif
TRUNCATE TABLE :chunk_to_truncate;
\if :IS_DISTRIBUTED
\set ON_ERROR_STOP 1
\endif
-- Should see new invalidation entries for conditions for the non-distributed case
SELECT * FROM hyper_invals;
-- TRUNCATE the hypertable to invalidate all its continuous aggregates
TRUNCATE conditions;