Remove metadata when dropping chunk

Historically we preserve chunk metadata because the old format of the
Continuous Aggregate has the `chunk_id` column in the materialization
hypertable so in order to don't have chunk ids left over there we just
mark it as dropped when dropping chunks.

In #4269 we introduced a new Continuous Aggregate format that don't
store the `chunk_id` in the materialization hypertable anymore so it's
safe to also remove the metadata when dropping chunk and all associated
Continuous Aggregates are in the new format.

Also added a post-update SQL script to cleanup unnecessary dropped chunk
metadata in our catalog.

Closes #6570
This commit is contained in:
Fabrízio de Royes Mello 2024-02-08 15:11:17 -03:00
parent 89af50d886
commit 5a359ac660
28 changed files with 1353 additions and 545 deletions

2
.unreleased/pr_6621 Normal file
View File

@ -0,0 +1,2 @@
Fixes: #6621 Remove metadata when dropping chunks
Thanks: @ndjzurawsk For reporting error when dropping chunks

View File

@ -586,6 +586,9 @@ BEGIN
-- execute the migration plan
CALL _timescaledb_functions.cagg_migrate_execute_plan(_cagg_data);
-- Remove chunk metadata when marked as dropped
PERFORM _timescaledb_functions.remove_dropped_chunk_metadata(_cagg_data.raw_hypertable_id);
-- finish the migration plan
UPDATE _timescaledb_catalog.continuous_agg_migrate_plan
SET end_ts = pg_catalog.clock_timestamp()

View File

@ -111,3 +111,59 @@ LANGUAGE SQL AS $$
SET relacl = (SELECT acl FROM cleanacls n WHERE c.oid = n.oid)
WHERE oid IN (SELECT oid FROM badrels)
$$ SET search_path TO pg_catalog, pg_temp;
-- Remove chunk metadata when marked as dropped
CREATE OR REPLACE FUNCTION _timescaledb_functions.remove_dropped_chunk_metadata(_hypertable_id INTEGER)
RETURNS INTEGER LANGUAGE plpgsql AS $$
DECLARE
_chunk_id INTEGER;
_removed INTEGER := 0;
BEGIN
FOR _chunk_id IN
SELECT id FROM _timescaledb_catalog.chunk
WHERE hypertable_id = _hypertable_id
AND dropped IS TRUE
AND NOT EXISTS (
SELECT FROM information_schema.tables
WHERE tables.table_schema = chunk.schema_name
AND tables.table_name = chunk.table_name
)
AND NOT EXISTS (
SELECT FROM _timescaledb_catalog.hypertable
JOIN _timescaledb_catalog.continuous_agg ON continuous_agg.raw_hypertable_id = hypertable.id
WHERE hypertable.id = chunk.hypertable_id
-- for the old caggs format we need to keep chunk metadata for dropped chunks
AND continuous_agg.finalized IS FALSE
)
LOOP
_removed := _removed + 1;
RAISE INFO 'Removing metadata of chunk % from hypertable %', _chunk_id, _hypertable_id;
WITH _dimension_slice_remove AS (
DELETE FROM _timescaledb_catalog.dimension_slice
USING _timescaledb_catalog.chunk_constraint
WHERE dimension_slice.id = chunk_constraint.dimension_slice_id
AND chunk_constraint.chunk_id = _chunk_id
RETURNING _timescaledb_catalog.dimension_slice.id
)
DELETE FROM _timescaledb_catalog.chunk_constraint
USING _dimension_slice_remove
WHERE chunk_constraint.dimension_slice_id = _dimension_slice_remove.id;
DELETE FROM _timescaledb_internal.bgw_policy_chunk_stats
WHERE bgw_policy_chunk_stats.chunk_id = _chunk_id;
DELETE FROM _timescaledb_catalog.chunk_index
WHERE chunk_index.chunk_id = _chunk_id;
DELETE FROM _timescaledb_catalog.compression_chunk_size
WHERE compression_chunk_size.chunk_id = _chunk_id
OR compression_chunk_size.compressed_chunk_id = _chunk_id;
DELETE FROM _timescaledb_catalog.chunk
WHERE chunk.id = _chunk_id
OR chunk.compressed_chunk_id = _chunk_id;
END LOOP;
RETURN _removed;
END;
$$ SET search_path TO pg_catalog, pg_temp;

View File

@ -17,3 +17,61 @@ DROP FUNCTION IF EXISTS _timescaledb_functions.invalidation_process_hypertable_l
DROP FUNCTION IF EXISTS _timescaledb_functions.invalidation_process_hypertable_log(integer,integer,regtype,integer[],bigint[],bigint[],text[]);
DROP FUNCTION IF EXISTS _timescaledb_functions.hypertable_invalidation_log_delete(integer);
-- Remove chunk metadata when marked as dropped
CREATE FUNCTION _timescaledb_functions.remove_dropped_chunk_metadata(_hypertable_id INTEGER)
RETURNS INTEGER LANGUAGE plpgsql AS $$
DECLARE
_chunk_id INTEGER;
_removed INTEGER := 0;
BEGIN
FOR _chunk_id IN
SELECT id FROM _timescaledb_catalog.chunk
WHERE hypertable_id = _hypertable_id
AND dropped IS TRUE
AND NOT EXISTS (
SELECT FROM information_schema.tables
WHERE tables.table_schema = chunk.schema_name
AND tables.table_name = chunk.table_name
)
AND NOT EXISTS (
SELECT FROM _timescaledb_catalog.hypertable
JOIN _timescaledb_catalog.continuous_agg ON continuous_agg.raw_hypertable_id = hypertable.id
WHERE hypertable.id = chunk.hypertable_id
-- for the old caggs format we need to keep chunk metadata for dropped chunks
AND continuous_agg.finalized IS FALSE
)
LOOP
_removed := _removed + 1;
RAISE INFO 'Removing metadata of chunk % from hypertable %', _chunk_id, _hypertable_id;
WITH _dimension_slice_remove AS (
DELETE FROM _timescaledb_catalog.dimension_slice
USING _timescaledb_catalog.chunk_constraint
WHERE dimension_slice.id = chunk_constraint.dimension_slice_id
AND chunk_constraint.chunk_id = _chunk_id
RETURNING _timescaledb_catalog.dimension_slice.id
)
DELETE FROM _timescaledb_catalog.chunk_constraint
USING _dimension_slice_remove
WHERE chunk_constraint.dimension_slice_id = _dimension_slice_remove.id;
DELETE FROM _timescaledb_internal.bgw_policy_chunk_stats
WHERE bgw_policy_chunk_stats.chunk_id = _chunk_id;
DELETE FROM _timescaledb_catalog.chunk_index
WHERE chunk_index.chunk_id = _chunk_id;
DELETE FROM _timescaledb_catalog.compression_chunk_size
WHERE compression_chunk_size.chunk_id = _chunk_id
OR compression_chunk_size.compressed_chunk_id = _chunk_id;
DELETE FROM _timescaledb_catalog.chunk
WHERE chunk.id = _chunk_id
OR chunk.compressed_chunk_id = _chunk_id;
END LOOP;
RETURN _removed;
END;
$$ SET search_path TO pg_catalog, pg_temp;
SELECT _timescaledb_functions.remove_dropped_chunk_metadata(id) AS chunks_metadata_removed
FROM _timescaledb_catalog.hypertable;

View File

@ -0,0 +1 @@
DROP FUNCTION IF EXISTS _timescaledb_functions.remove_dropped_chunk_metadata(INTEGER);

View File

@ -3946,6 +3946,7 @@ ts_chunk_do_drop_chunks(Hypertable *ht, int64 older_than, int64 newer_than, int3
}
}
bool all_caggs_finalized = ts_continuous_agg_hypertable_all_finalized(hypertable_id);
List *dropped_chunk_names = NIL;
for (uint64 i = 0; i < num_chunks; i++)
{
@ -3968,7 +3969,7 @@ ts_chunk_do_drop_chunks(Hypertable *ht, int64 older_than, int64 newer_than, int3
chunk_name = psprintf("%s.%s", schema_name, table_name);
dropped_chunk_names = lappend(dropped_chunk_names, chunk_name);
if (has_continuous_aggs)
if (has_continuous_aggs && !all_caggs_finalized)
ts_chunk_drop_preserve_catalog_row(chunks + i, DROP_RESTRICT, log_level);
else
ts_chunk_drop(chunks + i, DROP_RESTRICT, log_level);

View File

@ -568,6 +568,33 @@ ts_continuous_agg_hypertable_status(int32 hypertable_id)
return status;
}
TSDLLEXPORT bool
ts_continuous_agg_hypertable_all_finalized(int32 raw_hypertable_id)
{
ScanIterator iterator =
ts_scan_iterator_create(CONTINUOUS_AGG, AccessShareLock, CurrentMemoryContext);
bool all_finalized = true;
init_scan_by_raw_hypertable_id(&iterator, raw_hypertable_id);
ts_scanner_foreach(&iterator)
{
FormData_continuous_agg data;
TupleInfo *ti = ts_scan_iterator_tuple_info(&iterator);
continuous_agg_formdata_fill(&data, ti);
if (!data.finalized)
{
all_finalized = false;
break;
}
}
ts_scan_iterator_close(&iterator);
return all_finalized;
}
TSDLLEXPORT List *
ts_continuous_aggs_find_by_raw_table_id(int32 raw_hypertable_id)
{

View File

@ -165,6 +165,7 @@ extern TSDLLEXPORT void ts_materialization_invalidation_log_delete_inner(int32 m
extern TSDLLEXPORT ContinuousAggHypertableStatus
ts_continuous_agg_hypertable_status(int32 hypertable_id);
extern TSDLLEXPORT bool ts_continuous_agg_hypertable_all_finalized(int32 raw_hypertable_id);
extern TSDLLEXPORT List *ts_continuous_aggs_find_by_raw_table_id(int32 raw_hypertable_id);
extern TSDLLEXPORT ContinuousAgg *ts_continuous_agg_find_by_view_name(const char *schema,
const char *name,

View File

@ -45,12 +45,6 @@ SELECT count(*)
-- The list of tables configured to be dumped.
SELECT unnest(extconfig)::regclass::text, unnest(extcondition) FROM pg_extension WHERE extname = 'timescaledb' ORDER BY 1;
-- Show dropped chunks
SELECT id, hypertable_id, schema_name, table_name, dropped
FROM _timescaledb_catalog.chunk c
WHERE c.dropped
ORDER BY c.id, c.hypertable_id;
-- Show chunks that are not dropped and include owner in the output
SELECT c.id, c.hypertable_id, c.schema_name, c.table_name, c.dropped, cl.relowner::regrole
FROM _timescaledb_catalog.chunk c
@ -58,7 +52,11 @@ INNER JOIN pg_class cl ON (cl.oid=format('%I.%I', schema_name, table_name)::regc
WHERE NOT c.dropped
ORDER BY c.id, c.hypertable_id;
SELECT * FROM _timescaledb_catalog.chunk_constraint ORDER BY chunk_id, dimension_slice_id, constraint_name;
SELECT chunk_constraint.* FROM _timescaledb_catalog.chunk_constraint
JOIN _timescaledb_catalog.chunk ON chunk.id = chunk_constraint.chunk_id
WHERE NOT chunk.dropped
ORDER BY chunk_constraint.chunk_id, chunk_constraint.dimension_slice_id, chunk_constraint.constraint_name;
SELECT index_name FROM _timescaledb_catalog.chunk_index ORDER BY index_name;
-- Show attnum of all regclass objects belonging to our extension

View File

@ -573,14 +573,11 @@ SELECT * FROM drop_chunks_table ORDER BY time ASC limit 1;
30 | 30
(1 row)
--we see the chunks row with the dropped flags set;
SELECT id, hypertable_id, schema_name, table_name, compressed_chunk_id, dropped, status, osm_chunk FROM _timescaledb_catalog.chunk where dropped;
id | hypertable_id | schema_name | table_name | compressed_chunk_id | dropped | status | osm_chunk
----+---------------+-----------------------+--------------------+---------------------+---------+--------+-----------
13 | 10 | _timescaledb_internal | _hyper_10_13_chunk | | t | 0 | f
14 | 10 | _timescaledb_internal | _hyper_10_14_chunk | | t | 0 | f
15 | 10 | _timescaledb_internal | _hyper_10_15_chunk | | t | 0 | f
(3 rows)
--chunks are removed
SELECT id, hypertable_id, schema_name, table_name, compressed_chunk_id, dropped, status, osm_chunk FROM _timescaledb_catalog.chunk WHERE dropped;
id | hypertable_id | schema_name | table_name | compressed_chunk_id | dropped | status | osm_chunk
----+---------------+-------------+------------+---------------------+---------+--------+-----------
(0 rows)
--still see data in the view
SELECT * FROM drop_chunks_view WHERE time_bucket < (integer_now_test2()-9) ORDER BY time_bucket DESC;
@ -634,9 +631,9 @@ WHERE hypertable_name = 'drop_chunks_table'
ORDER BY range_start_integer;
chunk_name | range_start_integer | range_end_integer
--------------------+---------------------+-------------------
_hyper_10_13_chunk | 0 | 10
_hyper_10_14_chunk | 10 | 20
_hyper_10_15_chunk | 20 | 30
_hyper_10_18_chunk | 0 | 10
_hyper_10_19_chunk | 10 | 20
_hyper_10_20_chunk | 20 | 30
_hyper_10_16_chunk | 30 | 40
(4 rows)
@ -681,7 +678,7 @@ FROM timescaledb_information.chunks
WHERE hypertable_name = :'drop_chunks_mat_table_name' ORDER BY range_start_integer;
chunk_name | range_start_integer | range_end_integer
--------------------+---------------------+-------------------
_hyper_11_20_chunk | 0 | 100
_hyper_11_23_chunk | 0 | 100
(1 row)
\set ON_ERROR_STOP 0
@ -702,12 +699,12 @@ WHERE hypertable_name = 'drop_chunks_table'
ORDER BY 2,3;
chunk_name | range_start_integer | range_end_integer
--------------------+---------------------+-------------------
_hyper_10_13_chunk | 0 | 10
_hyper_10_14_chunk | 10 | 20
_hyper_10_15_chunk | 20 | 30
_hyper_10_18_chunk | 0 | 10
_hyper_10_19_chunk | 10 | 20
_hyper_10_20_chunk | 20 | 30
_hyper_10_16_chunk | 30 | 40
_hyper_10_18_chunk | 40 | 50
_hyper_10_19_chunk | 50 | 60
_hyper_10_21_chunk | 40 | 50
_hyper_10_22_chunk | 50 | 60
(6 rows)
-- Pick the second chunk as the one to drop
@ -766,11 +763,11 @@ WHERE hypertable_name = 'drop_chunks_table'
ORDER BY 2,3;
chunk_name | range_start_integer | range_end_integer
--------------------+---------------------+-------------------
_hyper_10_13_chunk | 0 | 10
_hyper_10_15_chunk | 20 | 30
_hyper_10_18_chunk | 0 | 10
_hyper_10_20_chunk | 20 | 30
_hyper_10_16_chunk | 30 | 40
_hyper_10_18_chunk | 40 | 50
_hyper_10_19_chunk | 50 | 60
_hyper_10_21_chunk | 40 | 50
_hyper_10_22_chunk | 50 | 60
(5 rows)
-- Data is no longer in the table but still in the view
@ -799,8 +796,8 @@ CALL refresh_continuous_aggregate('drop_chunks_view', NULL, 30);
SELECT drop_chunks('drop_chunks_table', older_than=>30);
drop_chunks
------------------------------------------
_timescaledb_internal._hyper_10_13_chunk
_timescaledb_internal._hyper_10_15_chunk
_timescaledb_internal._hyper_10_18_chunk
_timescaledb_internal._hyper_10_20_chunk
(2 rows)
-- Verify that the chunks are dropped
@ -811,8 +808,8 @@ ORDER BY 2,3;
chunk_name | range_start_integer | range_end_integer
--------------------+---------------------+-------------------
_hyper_10_16_chunk | 30 | 40
_hyper_10_18_chunk | 40 | 50
_hyper_10_19_chunk | 50 | 60
_hyper_10_21_chunk | 40 | 50
_hyper_10_22_chunk | 50 | 60
(3 rows)
-- The continuous aggregate should be refreshed in the regions covered
@ -906,8 +903,8 @@ SELECT user_view,
AND user_view::text LIKE 'whatever_view%';
user_view | mat_table | mat_tablespace | chunk_name | chunk_tablespace
-----------------+-----------------------------+----------------+--------------------+------------------
whatever_view_1 | _materialized_hypertable_13 | | _hyper_13_24_chunk |
whatever_view_2 | _materialized_hypertable_14 | tablespace1 | _hyper_14_25_chunk | tablespace1
whatever_view_1 | _materialized_hypertable_13 | | _hyper_13_27_chunk |
whatever_view_2 | _materialized_hypertable_14 | tablespace1 | _hyper_14_28_chunk | tablespace1
(2 rows)
ALTER MATERIALIZED VIEW whatever_view_1 SET TABLESPACE tablespace2;
@ -921,14 +918,14 @@ SELECT user_view,
AND user_view::text LIKE 'whatever_view%';
user_view | mat_table | mat_tablespace | chunk_name | chunk_tablespace
-----------------+-----------------------------+----------------+--------------------+------------------
whatever_view_1 | _materialized_hypertable_13 | tablespace2 | _hyper_13_24_chunk | tablespace2
whatever_view_2 | _materialized_hypertable_14 | tablespace1 | _hyper_14_25_chunk | tablespace1
whatever_view_1 | _materialized_hypertable_13 | tablespace2 | _hyper_13_27_chunk | tablespace2
whatever_view_2 | _materialized_hypertable_14 | tablespace1 | _hyper_14_28_chunk | tablespace1
(2 rows)
DROP MATERIALIZED VIEW whatever_view_1;
NOTICE: drop cascades to table _timescaledb_internal._hyper_13_24_chunk
NOTICE: drop cascades to table _timescaledb_internal._hyper_13_27_chunk
DROP MATERIALIZED VIEW whatever_view_2;
NOTICE: drop cascades to table _timescaledb_internal._hyper_14_25_chunk
NOTICE: drop cascades to table _timescaledb_internal._hyper_14_28_chunk
-- test bucket width expressions on integer hypertables
CREATE TABLE metrics_int2 (
time int2 NOT NULL,
@ -1128,7 +1125,7 @@ SUM(value), COUNT(value)
FROM conditionsnm GROUP BY bucket WITH DATA;
NOTICE: refreshing continuous aggregate "conditionsnm_4"
DROP materialized view conditionsnm_4;
NOTICE: drop cascades to table _timescaledb_internal._hyper_26_37_chunk
NOTICE: drop cascades to table _timescaledb_internal._hyper_26_40_chunk
-- Case 2: DROP CASCADE should have similar behaviour as DROP
CREATE MATERIALIZED VIEW conditionsnm_4
WITH (timescaledb.continuous, timescaledb.materialized_only = TRUE)
@ -1138,7 +1135,7 @@ SUM(value), COUNT(value)
FROM conditionsnm GROUP BY bucket WITH DATA;
NOTICE: refreshing continuous aggregate "conditionsnm_4"
DROP materialized view conditionsnm_4 CASCADE;
NOTICE: drop cascades to table _timescaledb_internal._hyper_27_38_chunk
NOTICE: drop cascades to table _timescaledb_internal._hyper_27_41_chunk
-- Case 3: require CASCADE in case of dependent object
CREATE MATERIALIZED VIEW conditionsnm_4
WITH (timescaledb.continuous, timescaledb.materialized_only = TRUE)
@ -1155,7 +1152,7 @@ ERROR: cannot drop view conditionsnm_4 because other objects depend on it
-- Case 4: DROP CASCADE with dependency
DROP MATERIALIZED VIEW conditionsnm_4 CASCADE;
NOTICE: drop cascades to view see_cagg
NOTICE: drop cascades to table _timescaledb_internal._hyper_28_39_chunk
NOTICE: drop cascades to table _timescaledb_internal._hyper_28_42_chunk
-- Test DROP SCHEMA CASCADE with continuous aggregates
--
-- Issue: #2350
@ -1580,7 +1577,7 @@ DELETE FROM test_setting WHERE val = 20;
--TEST test with multiple settings on continuous aggregates with real time aggregates turned off initially --
-- test for materialized_only + compress combinations (real time aggs enabled initially)
DROP MATERIALIZED VIEW test_setting_cagg;
NOTICE: drop cascades to table _timescaledb_internal._hyper_40_47_chunk
NOTICE: drop cascades to table _timescaledb_internal._hyper_40_50_chunk
CREATE MATERIALIZED VIEW test_setting_cagg with (timescaledb.continuous, timescaledb.materialized_only = true)
AS SELECT time_bucket('1h',time), avg(val), count(*) FROM test_setting GROUP BY 1;
NOTICE: refreshing continuous aggregate "test_setting_cagg"
@ -1770,8 +1767,8 @@ Indexes:
"_materialized_hypertable_45_bucket_idx" btree (bucket DESC)
Triggers:
ts_insert_blocker BEFORE INSERT ON _timescaledb_internal._materialized_hypertable_45 FOR EACH ROW EXECUTE FUNCTION _timescaledb_functions.insert_blocker()
Child tables: _timescaledb_internal._hyper_45_52_chunk,
_timescaledb_internal._hyper_45_53_chunk
Child tables: _timescaledb_internal._hyper_45_55_chunk,
_timescaledb_internal._hyper_45_56_chunk
\d+ 'cashflows'
View "public.cashflows"

View File

@ -573,14 +573,11 @@ SELECT * FROM drop_chunks_table ORDER BY time ASC limit 1;
30 | 30
(1 row)
--we see the chunks row with the dropped flags set;
SELECT id, hypertable_id, schema_name, table_name, compressed_chunk_id, dropped, status, osm_chunk FROM _timescaledb_catalog.chunk where dropped;
id | hypertable_id | schema_name | table_name | compressed_chunk_id | dropped | status | osm_chunk
----+---------------+-----------------------+--------------------+---------------------+---------+--------+-----------
13 | 10 | _timescaledb_internal | _hyper_10_13_chunk | | t | 0 | f
14 | 10 | _timescaledb_internal | _hyper_10_14_chunk | | t | 0 | f
15 | 10 | _timescaledb_internal | _hyper_10_15_chunk | | t | 0 | f
(3 rows)
--chunks are removed
SELECT id, hypertable_id, schema_name, table_name, compressed_chunk_id, dropped, status, osm_chunk FROM _timescaledb_catalog.chunk WHERE dropped;
id | hypertable_id | schema_name | table_name | compressed_chunk_id | dropped | status | osm_chunk
----+---------------+-------------+------------+---------------------+---------+--------+-----------
(0 rows)
--still see data in the view
SELECT * FROM drop_chunks_view WHERE time_bucket < (integer_now_test2()-9) ORDER BY time_bucket DESC;
@ -634,9 +631,9 @@ WHERE hypertable_name = 'drop_chunks_table'
ORDER BY range_start_integer;
chunk_name | range_start_integer | range_end_integer
--------------------+---------------------+-------------------
_hyper_10_13_chunk | 0 | 10
_hyper_10_14_chunk | 10 | 20
_hyper_10_15_chunk | 20 | 30
_hyper_10_18_chunk | 0 | 10
_hyper_10_19_chunk | 10 | 20
_hyper_10_20_chunk | 20 | 30
_hyper_10_16_chunk | 30 | 40
(4 rows)
@ -681,7 +678,7 @@ FROM timescaledb_information.chunks
WHERE hypertable_name = :'drop_chunks_mat_table_name' ORDER BY range_start_integer;
chunk_name | range_start_integer | range_end_integer
--------------------+---------------------+-------------------
_hyper_11_20_chunk | 0 | 100
_hyper_11_23_chunk | 0 | 100
(1 row)
\set ON_ERROR_STOP 0
@ -702,12 +699,12 @@ WHERE hypertable_name = 'drop_chunks_table'
ORDER BY 2,3;
chunk_name | range_start_integer | range_end_integer
--------------------+---------------------+-------------------
_hyper_10_13_chunk | 0 | 10
_hyper_10_14_chunk | 10 | 20
_hyper_10_15_chunk | 20 | 30
_hyper_10_18_chunk | 0 | 10
_hyper_10_19_chunk | 10 | 20
_hyper_10_20_chunk | 20 | 30
_hyper_10_16_chunk | 30 | 40
_hyper_10_18_chunk | 40 | 50
_hyper_10_19_chunk | 50 | 60
_hyper_10_21_chunk | 40 | 50
_hyper_10_22_chunk | 50 | 60
(6 rows)
-- Pick the second chunk as the one to drop
@ -766,11 +763,11 @@ WHERE hypertable_name = 'drop_chunks_table'
ORDER BY 2,3;
chunk_name | range_start_integer | range_end_integer
--------------------+---------------------+-------------------
_hyper_10_13_chunk | 0 | 10
_hyper_10_15_chunk | 20 | 30
_hyper_10_18_chunk | 0 | 10
_hyper_10_20_chunk | 20 | 30
_hyper_10_16_chunk | 30 | 40
_hyper_10_18_chunk | 40 | 50
_hyper_10_19_chunk | 50 | 60
_hyper_10_21_chunk | 40 | 50
_hyper_10_22_chunk | 50 | 60
(5 rows)
-- Data is no longer in the table but still in the view
@ -799,8 +796,8 @@ CALL refresh_continuous_aggregate('drop_chunks_view', NULL, 30);
SELECT drop_chunks('drop_chunks_table', older_than=>30);
drop_chunks
------------------------------------------
_timescaledb_internal._hyper_10_13_chunk
_timescaledb_internal._hyper_10_15_chunk
_timescaledb_internal._hyper_10_18_chunk
_timescaledb_internal._hyper_10_20_chunk
(2 rows)
-- Verify that the chunks are dropped
@ -811,8 +808,8 @@ ORDER BY 2,3;
chunk_name | range_start_integer | range_end_integer
--------------------+---------------------+-------------------
_hyper_10_16_chunk | 30 | 40
_hyper_10_18_chunk | 40 | 50
_hyper_10_19_chunk | 50 | 60
_hyper_10_21_chunk | 40 | 50
_hyper_10_22_chunk | 50 | 60
(3 rows)
-- The continuous aggregate should be refreshed in the regions covered
@ -906,8 +903,8 @@ SELECT user_view,
AND user_view::text LIKE 'whatever_view%';
user_view | mat_table | mat_tablespace | chunk_name | chunk_tablespace
-----------------+-----------------------------+----------------+--------------------+------------------
whatever_view_1 | _materialized_hypertable_13 | | _hyper_13_24_chunk |
whatever_view_2 | _materialized_hypertable_14 | tablespace1 | _hyper_14_25_chunk | tablespace1
whatever_view_1 | _materialized_hypertable_13 | | _hyper_13_27_chunk |
whatever_view_2 | _materialized_hypertable_14 | tablespace1 | _hyper_14_28_chunk | tablespace1
(2 rows)
ALTER MATERIALIZED VIEW whatever_view_1 SET TABLESPACE tablespace2;
@ -921,14 +918,14 @@ SELECT user_view,
AND user_view::text LIKE 'whatever_view%';
user_view | mat_table | mat_tablespace | chunk_name | chunk_tablespace
-----------------+-----------------------------+----------------+--------------------+------------------
whatever_view_1 | _materialized_hypertable_13 | tablespace2 | _hyper_13_24_chunk | tablespace2
whatever_view_2 | _materialized_hypertable_14 | tablespace1 | _hyper_14_25_chunk | tablespace1
whatever_view_1 | _materialized_hypertable_13 | tablespace2 | _hyper_13_27_chunk | tablespace2
whatever_view_2 | _materialized_hypertable_14 | tablespace1 | _hyper_14_28_chunk | tablespace1
(2 rows)
DROP MATERIALIZED VIEW whatever_view_1;
NOTICE: drop cascades to table _timescaledb_internal._hyper_13_24_chunk
NOTICE: drop cascades to table _timescaledb_internal._hyper_13_27_chunk
DROP MATERIALIZED VIEW whatever_view_2;
NOTICE: drop cascades to table _timescaledb_internal._hyper_14_25_chunk
NOTICE: drop cascades to table _timescaledb_internal._hyper_14_28_chunk
-- test bucket width expressions on integer hypertables
CREATE TABLE metrics_int2 (
time int2 NOT NULL,
@ -1128,7 +1125,7 @@ SUM(value), COUNT(value)
FROM conditionsnm GROUP BY bucket WITH DATA;
NOTICE: refreshing continuous aggregate "conditionsnm_4"
DROP materialized view conditionsnm_4;
NOTICE: drop cascades to table _timescaledb_internal._hyper_26_37_chunk
NOTICE: drop cascades to table _timescaledb_internal._hyper_26_40_chunk
-- Case 2: DROP CASCADE should have similar behaviour as DROP
CREATE MATERIALIZED VIEW conditionsnm_4
WITH (timescaledb.continuous, timescaledb.materialized_only = TRUE)
@ -1138,7 +1135,7 @@ SUM(value), COUNT(value)
FROM conditionsnm GROUP BY bucket WITH DATA;
NOTICE: refreshing continuous aggregate "conditionsnm_4"
DROP materialized view conditionsnm_4 CASCADE;
NOTICE: drop cascades to table _timescaledb_internal._hyper_27_38_chunk
NOTICE: drop cascades to table _timescaledb_internal._hyper_27_41_chunk
-- Case 3: require CASCADE in case of dependent object
CREATE MATERIALIZED VIEW conditionsnm_4
WITH (timescaledb.continuous, timescaledb.materialized_only = TRUE)
@ -1155,7 +1152,7 @@ ERROR: cannot drop view conditionsnm_4 because other objects depend on it
-- Case 4: DROP CASCADE with dependency
DROP MATERIALIZED VIEW conditionsnm_4 CASCADE;
NOTICE: drop cascades to view see_cagg
NOTICE: drop cascades to table _timescaledb_internal._hyper_28_39_chunk
NOTICE: drop cascades to table _timescaledb_internal._hyper_28_42_chunk
-- Test DROP SCHEMA CASCADE with continuous aggregates
--
-- Issue: #2350
@ -1580,7 +1577,7 @@ DELETE FROM test_setting WHERE val = 20;
--TEST test with multiple settings on continuous aggregates with real time aggregates turned off initially --
-- test for materialized_only + compress combinations (real time aggs enabled initially)
DROP MATERIALIZED VIEW test_setting_cagg;
NOTICE: drop cascades to table _timescaledb_internal._hyper_40_47_chunk
NOTICE: drop cascades to table _timescaledb_internal._hyper_40_50_chunk
CREATE MATERIALIZED VIEW test_setting_cagg with (timescaledb.continuous, timescaledb.materialized_only = true)
AS SELECT time_bucket('1h',time), avg(val), count(*) FROM test_setting GROUP BY 1;
NOTICE: refreshing continuous aggregate "test_setting_cagg"
@ -1770,8 +1767,8 @@ Indexes:
"_materialized_hypertable_45_bucket_idx" btree (bucket DESC)
Triggers:
ts_insert_blocker BEFORE INSERT ON _timescaledb_internal._materialized_hypertable_45 FOR EACH ROW EXECUTE FUNCTION _timescaledb_functions.insert_blocker()
Child tables: _timescaledb_internal._hyper_45_52_chunk,
_timescaledb_internal._hyper_45_53_chunk
Child tables: _timescaledb_internal._hyper_45_55_chunk,
_timescaledb_internal._hyper_45_56_chunk
\d+ 'cashflows'
View "public.cashflows"

View File

@ -573,14 +573,11 @@ SELECT * FROM drop_chunks_table ORDER BY time ASC limit 1;
30 | 30
(1 row)
--we see the chunks row with the dropped flags set;
SELECT id, hypertable_id, schema_name, table_name, compressed_chunk_id, dropped, status, osm_chunk FROM _timescaledb_catalog.chunk where dropped;
id | hypertable_id | schema_name | table_name | compressed_chunk_id | dropped | status | osm_chunk
----+---------------+-----------------------+--------------------+---------------------+---------+--------+-----------
13 | 10 | _timescaledb_internal | _hyper_10_13_chunk | | t | 0 | f
14 | 10 | _timescaledb_internal | _hyper_10_14_chunk | | t | 0 | f
15 | 10 | _timescaledb_internal | _hyper_10_15_chunk | | t | 0 | f
(3 rows)
--chunks are removed
SELECT id, hypertable_id, schema_name, table_name, compressed_chunk_id, dropped, status, osm_chunk FROM _timescaledb_catalog.chunk WHERE dropped;
id | hypertable_id | schema_name | table_name | compressed_chunk_id | dropped | status | osm_chunk
----+---------------+-------------+------------+---------------------+---------+--------+-----------
(0 rows)
--still see data in the view
SELECT * FROM drop_chunks_view WHERE time_bucket < (integer_now_test2()-9) ORDER BY time_bucket DESC;
@ -634,9 +631,9 @@ WHERE hypertable_name = 'drop_chunks_table'
ORDER BY range_start_integer;
chunk_name | range_start_integer | range_end_integer
--------------------+---------------------+-------------------
_hyper_10_13_chunk | 0 | 10
_hyper_10_14_chunk | 10 | 20
_hyper_10_15_chunk | 20 | 30
_hyper_10_18_chunk | 0 | 10
_hyper_10_19_chunk | 10 | 20
_hyper_10_20_chunk | 20 | 30
_hyper_10_16_chunk | 30 | 40
(4 rows)
@ -681,7 +678,7 @@ FROM timescaledb_information.chunks
WHERE hypertable_name = :'drop_chunks_mat_table_name' ORDER BY range_start_integer;
chunk_name | range_start_integer | range_end_integer
--------------------+---------------------+-------------------
_hyper_11_20_chunk | 0 | 100
_hyper_11_23_chunk | 0 | 100
(1 row)
\set ON_ERROR_STOP 0
@ -702,12 +699,12 @@ WHERE hypertable_name = 'drop_chunks_table'
ORDER BY 2,3;
chunk_name | range_start_integer | range_end_integer
--------------------+---------------------+-------------------
_hyper_10_13_chunk | 0 | 10
_hyper_10_14_chunk | 10 | 20
_hyper_10_15_chunk | 20 | 30
_hyper_10_18_chunk | 0 | 10
_hyper_10_19_chunk | 10 | 20
_hyper_10_20_chunk | 20 | 30
_hyper_10_16_chunk | 30 | 40
_hyper_10_18_chunk | 40 | 50
_hyper_10_19_chunk | 50 | 60
_hyper_10_21_chunk | 40 | 50
_hyper_10_22_chunk | 50 | 60
(6 rows)
-- Pick the second chunk as the one to drop
@ -766,11 +763,11 @@ WHERE hypertable_name = 'drop_chunks_table'
ORDER BY 2,3;
chunk_name | range_start_integer | range_end_integer
--------------------+---------------------+-------------------
_hyper_10_13_chunk | 0 | 10
_hyper_10_15_chunk | 20 | 30
_hyper_10_18_chunk | 0 | 10
_hyper_10_20_chunk | 20 | 30
_hyper_10_16_chunk | 30 | 40
_hyper_10_18_chunk | 40 | 50
_hyper_10_19_chunk | 50 | 60
_hyper_10_21_chunk | 40 | 50
_hyper_10_22_chunk | 50 | 60
(5 rows)
-- Data is no longer in the table but still in the view
@ -799,8 +796,8 @@ CALL refresh_continuous_aggregate('drop_chunks_view', NULL, 30);
SELECT drop_chunks('drop_chunks_table', older_than=>30);
drop_chunks
------------------------------------------
_timescaledb_internal._hyper_10_13_chunk
_timescaledb_internal._hyper_10_15_chunk
_timescaledb_internal._hyper_10_18_chunk
_timescaledb_internal._hyper_10_20_chunk
(2 rows)
-- Verify that the chunks are dropped
@ -811,8 +808,8 @@ ORDER BY 2,3;
chunk_name | range_start_integer | range_end_integer
--------------------+---------------------+-------------------
_hyper_10_16_chunk | 30 | 40
_hyper_10_18_chunk | 40 | 50
_hyper_10_19_chunk | 50 | 60
_hyper_10_21_chunk | 40 | 50
_hyper_10_22_chunk | 50 | 60
(3 rows)
-- The continuous aggregate should be refreshed in the regions covered
@ -906,8 +903,8 @@ SELECT user_view,
AND user_view::text LIKE 'whatever_view%';
user_view | mat_table | mat_tablespace | chunk_name | chunk_tablespace
-----------------+-----------------------------+----------------+--------------------+------------------
whatever_view_1 | _materialized_hypertable_13 | | _hyper_13_24_chunk |
whatever_view_2 | _materialized_hypertable_14 | tablespace1 | _hyper_14_25_chunk | tablespace1
whatever_view_1 | _materialized_hypertable_13 | | _hyper_13_27_chunk |
whatever_view_2 | _materialized_hypertable_14 | tablespace1 | _hyper_14_28_chunk | tablespace1
(2 rows)
ALTER MATERIALIZED VIEW whatever_view_1 SET TABLESPACE tablespace2;
@ -921,14 +918,14 @@ SELECT user_view,
AND user_view::text LIKE 'whatever_view%';
user_view | mat_table | mat_tablespace | chunk_name | chunk_tablespace
-----------------+-----------------------------+----------------+--------------------+------------------
whatever_view_1 | _materialized_hypertable_13 | tablespace2 | _hyper_13_24_chunk | tablespace2
whatever_view_2 | _materialized_hypertable_14 | tablespace1 | _hyper_14_25_chunk | tablespace1
whatever_view_1 | _materialized_hypertable_13 | tablespace2 | _hyper_13_27_chunk | tablespace2
whatever_view_2 | _materialized_hypertable_14 | tablespace1 | _hyper_14_28_chunk | tablespace1
(2 rows)
DROP MATERIALIZED VIEW whatever_view_1;
NOTICE: drop cascades to table _timescaledb_internal._hyper_13_24_chunk
NOTICE: drop cascades to table _timescaledb_internal._hyper_13_27_chunk
DROP MATERIALIZED VIEW whatever_view_2;
NOTICE: drop cascades to table _timescaledb_internal._hyper_14_25_chunk
NOTICE: drop cascades to table _timescaledb_internal._hyper_14_28_chunk
-- test bucket width expressions on integer hypertables
CREATE TABLE metrics_int2 (
time int2 NOT NULL,
@ -1128,7 +1125,7 @@ SUM(value), COUNT(value)
FROM conditionsnm GROUP BY bucket WITH DATA;
NOTICE: refreshing continuous aggregate "conditionsnm_4"
DROP materialized view conditionsnm_4;
NOTICE: drop cascades to table _timescaledb_internal._hyper_26_37_chunk
NOTICE: drop cascades to table _timescaledb_internal._hyper_26_40_chunk
-- Case 2: DROP CASCADE should have similar behaviour as DROP
CREATE MATERIALIZED VIEW conditionsnm_4
WITH (timescaledb.continuous, timescaledb.materialized_only = TRUE)
@ -1138,7 +1135,7 @@ SUM(value), COUNT(value)
FROM conditionsnm GROUP BY bucket WITH DATA;
NOTICE: refreshing continuous aggregate "conditionsnm_4"
DROP materialized view conditionsnm_4 CASCADE;
NOTICE: drop cascades to table _timescaledb_internal._hyper_27_38_chunk
NOTICE: drop cascades to table _timescaledb_internal._hyper_27_41_chunk
-- Case 3: require CASCADE in case of dependent object
CREATE MATERIALIZED VIEW conditionsnm_4
WITH (timescaledb.continuous, timescaledb.materialized_only = TRUE)
@ -1155,7 +1152,7 @@ ERROR: cannot drop view conditionsnm_4 because other objects depend on it
-- Case 4: DROP CASCADE with dependency
DROP MATERIALIZED VIEW conditionsnm_4 CASCADE;
NOTICE: drop cascades to view see_cagg
NOTICE: drop cascades to table _timescaledb_internal._hyper_28_39_chunk
NOTICE: drop cascades to table _timescaledb_internal._hyper_28_42_chunk
-- Test DROP SCHEMA CASCADE with continuous aggregates
--
-- Issue: #2350
@ -1580,7 +1577,7 @@ DELETE FROM test_setting WHERE val = 20;
--TEST test with multiple settings on continuous aggregates with real time aggregates turned off initially --
-- test for materialized_only + compress combinations (real time aggs enabled initially)
DROP MATERIALIZED VIEW test_setting_cagg;
NOTICE: drop cascades to table _timescaledb_internal._hyper_40_47_chunk
NOTICE: drop cascades to table _timescaledb_internal._hyper_40_50_chunk
CREATE MATERIALIZED VIEW test_setting_cagg with (timescaledb.continuous, timescaledb.materialized_only = true)
AS SELECT time_bucket('1h',time), avg(val), count(*) FROM test_setting GROUP BY 1;
NOTICE: refreshing continuous aggregate "test_setting_cagg"
@ -1770,8 +1767,8 @@ Indexes:
"_materialized_hypertable_45_bucket_idx" btree (bucket DESC)
Triggers:
ts_insert_blocker BEFORE INSERT ON _timescaledb_internal._materialized_hypertable_45 FOR EACH ROW EXECUTE FUNCTION _timescaledb_functions.insert_blocker()
Child tables: _timescaledb_internal._hyper_45_52_chunk,
_timescaledb_internal._hyper_45_53_chunk
Child tables: _timescaledb_internal._hyper_45_55_chunk,
_timescaledb_internal._hyper_45_56_chunk
\d+ 'cashflows'
View "public.cashflows"

View File

@ -573,14 +573,11 @@ SELECT * FROM drop_chunks_table ORDER BY time ASC limit 1;
30 | 30
(1 row)
--we see the chunks row with the dropped flags set;
SELECT id, hypertable_id, schema_name, table_name, compressed_chunk_id, dropped, status, osm_chunk FROM _timescaledb_catalog.chunk where dropped;
id | hypertable_id | schema_name | table_name | compressed_chunk_id | dropped | status | osm_chunk
----+---------------+-----------------------+--------------------+---------------------+---------+--------+-----------
13 | 10 | _timescaledb_internal | _hyper_10_13_chunk | | t | 0 | f
14 | 10 | _timescaledb_internal | _hyper_10_14_chunk | | t | 0 | f
15 | 10 | _timescaledb_internal | _hyper_10_15_chunk | | t | 0 | f
(3 rows)
--chunks are removed
SELECT id, hypertable_id, schema_name, table_name, compressed_chunk_id, dropped, status, osm_chunk FROM _timescaledb_catalog.chunk WHERE dropped;
id | hypertable_id | schema_name | table_name | compressed_chunk_id | dropped | status | osm_chunk
----+---------------+-------------+------------+---------------------+---------+--------+-----------
(0 rows)
--still see data in the view
SELECT * FROM drop_chunks_view WHERE time_bucket < (integer_now_test2()-9) ORDER BY time_bucket DESC;
@ -634,9 +631,9 @@ WHERE hypertable_name = 'drop_chunks_table'
ORDER BY range_start_integer;
chunk_name | range_start_integer | range_end_integer
--------------------+---------------------+-------------------
_hyper_10_13_chunk | 0 | 10
_hyper_10_14_chunk | 10 | 20
_hyper_10_15_chunk | 20 | 30
_hyper_10_18_chunk | 0 | 10
_hyper_10_19_chunk | 10 | 20
_hyper_10_20_chunk | 20 | 30
_hyper_10_16_chunk | 30 | 40
(4 rows)
@ -681,7 +678,7 @@ FROM timescaledb_information.chunks
WHERE hypertable_name = :'drop_chunks_mat_table_name' ORDER BY range_start_integer;
chunk_name | range_start_integer | range_end_integer
--------------------+---------------------+-------------------
_hyper_11_20_chunk | 0 | 100
_hyper_11_23_chunk | 0 | 100
(1 row)
\set ON_ERROR_STOP 0
@ -702,12 +699,12 @@ WHERE hypertable_name = 'drop_chunks_table'
ORDER BY 2,3;
chunk_name | range_start_integer | range_end_integer
--------------------+---------------------+-------------------
_hyper_10_13_chunk | 0 | 10
_hyper_10_14_chunk | 10 | 20
_hyper_10_15_chunk | 20 | 30
_hyper_10_18_chunk | 0 | 10
_hyper_10_19_chunk | 10 | 20
_hyper_10_20_chunk | 20 | 30
_hyper_10_16_chunk | 30 | 40
_hyper_10_18_chunk | 40 | 50
_hyper_10_19_chunk | 50 | 60
_hyper_10_21_chunk | 40 | 50
_hyper_10_22_chunk | 50 | 60
(6 rows)
-- Pick the second chunk as the one to drop
@ -766,11 +763,11 @@ WHERE hypertable_name = 'drop_chunks_table'
ORDER BY 2,3;
chunk_name | range_start_integer | range_end_integer
--------------------+---------------------+-------------------
_hyper_10_13_chunk | 0 | 10
_hyper_10_15_chunk | 20 | 30
_hyper_10_18_chunk | 0 | 10
_hyper_10_20_chunk | 20 | 30
_hyper_10_16_chunk | 30 | 40
_hyper_10_18_chunk | 40 | 50
_hyper_10_19_chunk | 50 | 60
_hyper_10_21_chunk | 40 | 50
_hyper_10_22_chunk | 50 | 60
(5 rows)
-- Data is no longer in the table but still in the view
@ -799,8 +796,8 @@ CALL refresh_continuous_aggregate('drop_chunks_view', NULL, 30);
SELECT drop_chunks('drop_chunks_table', older_than=>30);
drop_chunks
------------------------------------------
_timescaledb_internal._hyper_10_13_chunk
_timescaledb_internal._hyper_10_15_chunk
_timescaledb_internal._hyper_10_18_chunk
_timescaledb_internal._hyper_10_20_chunk
(2 rows)
-- Verify that the chunks are dropped
@ -811,8 +808,8 @@ ORDER BY 2,3;
chunk_name | range_start_integer | range_end_integer
--------------------+---------------------+-------------------
_hyper_10_16_chunk | 30 | 40
_hyper_10_18_chunk | 40 | 50
_hyper_10_19_chunk | 50 | 60
_hyper_10_21_chunk | 40 | 50
_hyper_10_22_chunk | 50 | 60
(3 rows)
-- The continuous aggregate should be refreshed in the regions covered
@ -906,8 +903,8 @@ SELECT user_view,
AND user_view::text LIKE 'whatever_view%';
user_view | mat_table | mat_tablespace | chunk_name | chunk_tablespace
-----------------+-----------------------------+----------------+--------------------+------------------
whatever_view_1 | _materialized_hypertable_13 | | _hyper_13_24_chunk |
whatever_view_2 | _materialized_hypertable_14 | tablespace1 | _hyper_14_25_chunk | tablespace1
whatever_view_1 | _materialized_hypertable_13 | | _hyper_13_27_chunk |
whatever_view_2 | _materialized_hypertable_14 | tablespace1 | _hyper_14_28_chunk | tablespace1
(2 rows)
ALTER MATERIALIZED VIEW whatever_view_1 SET TABLESPACE tablespace2;
@ -921,14 +918,14 @@ SELECT user_view,
AND user_view::text LIKE 'whatever_view%';
user_view | mat_table | mat_tablespace | chunk_name | chunk_tablespace
-----------------+-----------------------------+----------------+--------------------+------------------
whatever_view_1 | _materialized_hypertable_13 | tablespace2 | _hyper_13_24_chunk | tablespace2
whatever_view_2 | _materialized_hypertable_14 | tablespace1 | _hyper_14_25_chunk | tablespace1
whatever_view_1 | _materialized_hypertable_13 | tablespace2 | _hyper_13_27_chunk | tablespace2
whatever_view_2 | _materialized_hypertable_14 | tablespace1 | _hyper_14_28_chunk | tablespace1
(2 rows)
DROP MATERIALIZED VIEW whatever_view_1;
NOTICE: drop cascades to table _timescaledb_internal._hyper_13_24_chunk
NOTICE: drop cascades to table _timescaledb_internal._hyper_13_27_chunk
DROP MATERIALIZED VIEW whatever_view_2;
NOTICE: drop cascades to table _timescaledb_internal._hyper_14_25_chunk
NOTICE: drop cascades to table _timescaledb_internal._hyper_14_28_chunk
-- test bucket width expressions on integer hypertables
CREATE TABLE metrics_int2 (
time int2 NOT NULL,
@ -1128,7 +1125,7 @@ SUM(value), COUNT(value)
FROM conditionsnm GROUP BY bucket WITH DATA;
NOTICE: refreshing continuous aggregate "conditionsnm_4"
DROP materialized view conditionsnm_4;
NOTICE: drop cascades to table _timescaledb_internal._hyper_26_37_chunk
NOTICE: drop cascades to table _timescaledb_internal._hyper_26_40_chunk
-- Case 2: DROP CASCADE should have similar behaviour as DROP
CREATE MATERIALIZED VIEW conditionsnm_4
WITH (timescaledb.continuous, timescaledb.materialized_only = TRUE)
@ -1138,7 +1135,7 @@ SUM(value), COUNT(value)
FROM conditionsnm GROUP BY bucket WITH DATA;
NOTICE: refreshing continuous aggregate "conditionsnm_4"
DROP materialized view conditionsnm_4 CASCADE;
NOTICE: drop cascades to table _timescaledb_internal._hyper_27_38_chunk
NOTICE: drop cascades to table _timescaledb_internal._hyper_27_41_chunk
-- Case 3: require CASCADE in case of dependent object
CREATE MATERIALIZED VIEW conditionsnm_4
WITH (timescaledb.continuous, timescaledb.materialized_only = TRUE)
@ -1155,7 +1152,7 @@ ERROR: cannot drop view conditionsnm_4 because other objects depend on it
-- Case 4: DROP CASCADE with dependency
DROP MATERIALIZED VIEW conditionsnm_4 CASCADE;
NOTICE: drop cascades to view see_cagg
NOTICE: drop cascades to table _timescaledb_internal._hyper_28_39_chunk
NOTICE: drop cascades to table _timescaledb_internal._hyper_28_42_chunk
-- Test DROP SCHEMA CASCADE with continuous aggregates
--
-- Issue: #2350
@ -1580,7 +1577,7 @@ DELETE FROM test_setting WHERE val = 20;
--TEST test with multiple settings on continuous aggregates with real time aggregates turned off initially --
-- test for materialized_only + compress combinations (real time aggs enabled initially)
DROP MATERIALIZED VIEW test_setting_cagg;
NOTICE: drop cascades to table _timescaledb_internal._hyper_40_47_chunk
NOTICE: drop cascades to table _timescaledb_internal._hyper_40_50_chunk
CREATE MATERIALIZED VIEW test_setting_cagg with (timescaledb.continuous, timescaledb.materialized_only = true)
AS SELECT time_bucket('1h',time), avg(val), count(*) FROM test_setting GROUP BY 1;
NOTICE: refreshing continuous aggregate "test_setting_cagg"
@ -1770,8 +1767,8 @@ Indexes:
"_materialized_hypertable_45_bucket_idx" btree (bucket DESC)
Triggers:
ts_insert_blocker BEFORE INSERT ON _timescaledb_internal._materialized_hypertable_45 FOR EACH ROW EXECUTE FUNCTION _timescaledb_functions.insert_blocker()
Child tables: _timescaledb_internal._hyper_45_52_chunk,
_timescaledb_internal._hyper_45_53_chunk
Child tables: _timescaledb_internal._hyper_45_55_chunk,
_timescaledb_internal._hyper_45_56_chunk
\d+ 'cashflows'
View "public.cashflows"

View File

@ -792,16 +792,69 @@ SELECT execute_migration();
psql:include/cagg_migrate_common.sql:296: ERROR: invalid transaction termination
ROLLBACK;
\set ON_ERROR_STOP 1
--
-- test dropping chunks
--
-- no chunks marked as dropped
SELECT
c.table_name as chunk_name,
c.dropped
FROM _timescaledb_catalog.hypertable h, _timescaledb_catalog.chunk c
WHERE h.id = c.hypertable_id AND h.table_name = 'conditions' AND c.dropped
ORDER BY 1;
chunk_name | dropped
------------+---------
(0 rows)
-- drop 1 chunk
\if :IS_TIME_DIMENSION
SELECT drop_chunks('conditions', older_than => CAST('2022-01-08 00:00:00-00' AS :TIME_DIMENSION_DATATYPE));
\else
SELECT drop_chunks('conditions', older_than => 10);
drop_chunks
----------------------------------------
_timescaledb_internal._hyper_1_1_chunk
(1 row)
\endif
-- now he have one chunk marked as dropped
SELECT
c.table_name as chunk_name,
c.dropped
FROM _timescaledb_catalog.hypertable h, _timescaledb_catalog.chunk c
WHERE h.id = c.hypertable_id AND h.table_name = 'conditions' AND c.dropped
ORDER BY 1;
chunk_name | dropped
------------------+---------
_hyper_1_1_chunk | t
(1 row)
-- this migration should remove the chunk metadata marked as dropped
CALL cagg_migrate('conditions_summary_weekly', override => TRUE, drop_old => TRUE);
psql:include/cagg_migrate_common.sql:328: WARNING: refresh the continuous aggregate after the migration executing this statement: "CALL public.refresh_continuous_aggregate('public.conditions_summary_weekly', CAST('1008' AS integer), NULL);"
psql:include/cagg_migrate_common.sql:328: NOTICE: drop cascades to 6 other objects
psql:include/cagg_migrate_common.sql:328: INFO: Removing metadata of chunk 1 from hypertable 1
-- no chunks marked as dropped
SELECT
c.table_name as chunk_name,
c.dropped
FROM _timescaledb_catalog.hypertable h, _timescaledb_catalog.chunk c
WHERE h.id = c.hypertable_id AND h.table_name = 'conditions' AND c.dropped
ORDER BY 1;
chunk_name | dropped
------------+---------
(0 rows)
-- cleanup
DROP FUNCTION execute_migration();
REVOKE SELECT, INSERT, UPDATE ON TABLE _timescaledb_catalog.continuous_agg_migrate_plan FROM :ROLE_DEFAULT_PERM_USER;
REVOKE USAGE ON SEQUENCE _timescaledb_catalog.continuous_agg_migrate_plan_step_step_id_seq FROM :ROLE_DEFAULT_PERM_USER;
TRUNCATE _timescaledb_catalog.continuous_agg_migrate_plan RESTART IDENTITY CASCADE;
psql:include/cagg_migrate_common.sql:304: NOTICE: truncate cascades to table "continuous_agg_migrate_plan_step"
psql:include/cagg_migrate_common.sql:342: NOTICE: truncate cascades to table "continuous_agg_migrate_plan_step"
DROP MATERIALIZED VIEW conditions_summary_daily;
psql:include/cagg_migrate_common.sql:305: NOTICE: drop cascades to 10 other objects
psql:include/cagg_migrate_common.sql:343: NOTICE: drop cascades to 10 other objects
DROP MATERIALIZED VIEW conditions_summary_weekly;
psql:include/cagg_migrate_common.sql:306: NOTICE: drop cascades to 6 other objects
psql:include/cagg_migrate_common.sql:344: NOTICE: drop cascades to 6 other objects
DROP TABLE conditions;
SELECT _timescaledb_functions.start_background_workers();
start_background_workers
@ -1268,23 +1321,23 @@ SELECT * FROM conditions_summary_daily_new;
SELECT compress_chunk(c) FROM show_chunks('conditions_summary_daily') c ORDER BY c::regclass::text;
compress_chunk
------------------------------------------
_timescaledb_internal._hyper_7_237_chunk
_timescaledb_internal._hyper_7_238_chunk
_timescaledb_internal._hyper_7_239_chunk
_timescaledb_internal._hyper_7_240_chunk
_timescaledb_internal._hyper_7_241_chunk
_timescaledb_internal._hyper_7_242_chunk
_timescaledb_internal._hyper_7_243_chunk
_timescaledb_internal._hyper_7_244_chunk
_timescaledb_internal._hyper_7_245_chunk
_timescaledb_internal._hyper_7_246_chunk
_timescaledb_internal._hyper_7_247_chunk
_timescaledb_internal._hyper_7_248_chunk
(6 rows)
SELECT compress_chunk(c) FROM show_chunks('conditions_summary_daily_new') c ORDER BY c::regclass::text;
compress_chunk
-------------------------------------------
_timescaledb_internal._hyper_11_255_chunk
_timescaledb_internal._hyper_11_256_chunk
_timescaledb_internal._hyper_11_257_chunk
_timescaledb_internal._hyper_11_258_chunk
_timescaledb_internal._hyper_11_259_chunk
_timescaledb_internal._hyper_11_260_chunk
_timescaledb_internal._hyper_11_261_chunk
_timescaledb_internal._hyper_11_262_chunk
_timescaledb_internal._hyper_11_263_chunk
_timescaledb_internal._hyper_11_264_chunk
_timescaledb_internal._hyper_11_265_chunk
_timescaledb_internal._hyper_11_266_chunk
(6 rows)
-- check migrated data after compression. should return 0 (zero) rows
@ -1576,16 +1629,69 @@ SELECT execute_migration();
psql:include/cagg_migrate_common.sql:296: ERROR: invalid transaction termination
ROLLBACK;
\set ON_ERROR_STOP 1
--
-- test dropping chunks
--
-- no chunks marked as dropped
SELECT
c.table_name as chunk_name,
c.dropped
FROM _timescaledb_catalog.hypertable h, _timescaledb_catalog.chunk c
WHERE h.id = c.hypertable_id AND h.table_name = 'conditions' AND c.dropped
ORDER BY 1;
chunk_name | dropped
------------+---------
(0 rows)
-- drop 1 chunk
\if :IS_TIME_DIMENSION
SELECT drop_chunks('conditions', older_than => CAST('2022-01-08 00:00:00-00' AS :TIME_DIMENSION_DATATYPE));
drop_chunks
------------------------------------------
_timescaledb_internal._hyper_5_190_chunk
(1 row)
\else
SELECT drop_chunks('conditions', older_than => 10);
\endif
-- now he have one chunk marked as dropped
SELECT
c.table_name as chunk_name,
c.dropped
FROM _timescaledb_catalog.hypertable h, _timescaledb_catalog.chunk c
WHERE h.id = c.hypertable_id AND h.table_name = 'conditions' AND c.dropped
ORDER BY 1;
chunk_name | dropped
--------------------+---------
_hyper_5_190_chunk | t
(1 row)
-- this migration should remove the chunk metadata marked as dropped
CALL cagg_migrate('conditions_summary_weekly', override => TRUE, drop_old => TRUE);
psql:include/cagg_migrate_common.sql:328: WARNING: refresh the continuous aggregate after the migration executing this statement: "CALL public.refresh_continuous_aggregate('public.conditions_summary_weekly', CAST('Mon Jan 02 00:00:00 2023' AS timestamp without time zone), NULL);"
psql:include/cagg_migrate_common.sql:328: NOTICE: drop cascades to 6 other objects
psql:include/cagg_migrate_common.sql:328: INFO: Removing metadata of chunk 190 from hypertable 5
-- no chunks marked as dropped
SELECT
c.table_name as chunk_name,
c.dropped
FROM _timescaledb_catalog.hypertable h, _timescaledb_catalog.chunk c
WHERE h.id = c.hypertable_id AND h.table_name = 'conditions' AND c.dropped
ORDER BY 1;
chunk_name | dropped
------------+---------
(0 rows)
-- cleanup
DROP FUNCTION execute_migration();
REVOKE SELECT, INSERT, UPDATE ON TABLE _timescaledb_catalog.continuous_agg_migrate_plan FROM :ROLE_DEFAULT_PERM_USER;
REVOKE USAGE ON SEQUENCE _timescaledb_catalog.continuous_agg_migrate_plan_step_step_id_seq FROM :ROLE_DEFAULT_PERM_USER;
TRUNCATE _timescaledb_catalog.continuous_agg_migrate_plan RESTART IDENTITY CASCADE;
psql:include/cagg_migrate_common.sql:304: NOTICE: truncate cascades to table "continuous_agg_migrate_plan_step"
psql:include/cagg_migrate_common.sql:342: NOTICE: truncate cascades to table "continuous_agg_migrate_plan_step"
DROP MATERIALIZED VIEW conditions_summary_daily;
psql:include/cagg_migrate_common.sql:305: NOTICE: drop cascades to 6 other objects
psql:include/cagg_migrate_common.sql:343: NOTICE: drop cascades to 6 other objects
DROP MATERIALIZED VIEW conditions_summary_weekly;
psql:include/cagg_migrate_common.sql:306: NOTICE: drop cascades to 6 other objects
psql:include/cagg_migrate_common.sql:344: NOTICE: drop cascades to 6 other objects
DROP TABLE conditions;
SELECT _timescaledb_functions.start_background_workers();
start_background_workers
@ -1986,13 +2092,13 @@ WHERE
avg | numeric | | | | main |
sum | numeric | | | | main |
View definition:
SELECT _materialized_hypertable_20.bucket,
_materialized_hypertable_20.min,
_materialized_hypertable_20.max,
_materialized_hypertable_20.avg,
_materialized_hypertable_20.sum
FROM _timescaledb_internal._materialized_hypertable_20
WHERE _materialized_hypertable_20.bucket < COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(20)), '-infinity'::timestamp with time zone)
SELECT _materialized_hypertable_21.bucket,
_materialized_hypertable_21.min,
_materialized_hypertable_21.max,
_materialized_hypertable_21.avg,
_materialized_hypertable_21.sum
FROM _timescaledb_internal._materialized_hypertable_21
WHERE _materialized_hypertable_21.bucket < COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(21)), '-infinity'::timestamp with time zone)
UNION ALL
SELECT time_bucket('@ 1 day'::interval, conditions."time") AS bucket,
min(conditions.temperature) AS min,
@ -2000,7 +2106,7 @@ UNION ALL
avg(conditions.temperature) AS avg,
sum(conditions.temperature) AS sum
FROM conditions
WHERE conditions."time" >= COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(20)), '-infinity'::timestamp with time zone)
WHERE conditions."time" >= COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(21)), '-infinity'::timestamp with time zone)
GROUP BY (time_bucket('@ 1 day'::interval, conditions."time"));
SELECT *
@ -2010,9 +2116,9 @@ AND hypertable_name = :'NEW_MAT_TABLE_NAME'
AND job_id >= 1000;
job_id | application_name | schedule_interval | max_runtime | max_retries | retry_period | proc_schema | proc_name | owner | scheduled | fixed_schedule | config | next_start | initial_start | hypertable_schema | hypertable_name | check_schema | check_name
--------+--------------------------------------------+-------------------+-------------+-------------+--------------+------------------------+-------------------------------------+--------------------+-----------+----------------+---------------------------------------------------------------------------------+------------+---------------+-----------------------+-----------------------------+------------------------+-------------------------------------------
1029 | Compression Policy [1029] | @ 12 hours | @ 0 | -1 | @ 1 hour | _timescaledb_functions | policy_compression | cluster_super_user | t | f | {"hypertable_id": 20, "compress_after": "@ 45 days"} | | | _timescaledb_internal | _materialized_hypertable_20 | _timescaledb_functions | policy_compression_check
1028 | Refresh Continuous Aggregate Policy [1028] | @ 1 hour | @ 0 | -1 | @ 1 hour | _timescaledb_functions | policy_refresh_continuous_aggregate | cluster_super_user | t | f | {"end_offset": "@ 1 day", "start_offset": "@ 30 days", "mat_hypertable_id": 20} | | | _timescaledb_internal | _materialized_hypertable_20 | _timescaledb_functions | policy_refresh_continuous_aggregate_check
1027 | Retention Policy [1027] | @ 1 day | @ 5 mins | -1 | @ 5 mins | _timescaledb_functions | policy_retention | cluster_super_user | t | f | {"drop_after": "@ 30 days", "hypertable_id": 20} | | | _timescaledb_internal | _materialized_hypertable_20 | _timescaledb_functions | policy_retention_check
1029 | Compression Policy [1029] | @ 12 hours | @ 0 | -1 | @ 1 hour | _timescaledb_functions | policy_compression | cluster_super_user | t | f | {"hypertable_id": 21, "compress_after": "@ 45 days"} | | | _timescaledb_internal | _materialized_hypertable_21 | _timescaledb_functions | policy_compression_check
1028 | Refresh Continuous Aggregate Policy [1028] | @ 1 hour | @ 0 | -1 | @ 1 hour | _timescaledb_functions | policy_refresh_continuous_aggregate | cluster_super_user | t | f | {"end_offset": "@ 1 day", "start_offset": "@ 30 days", "mat_hypertable_id": 21} | | | _timescaledb_internal | _materialized_hypertable_21 | _timescaledb_functions | policy_refresh_continuous_aggregate_check
1027 | Retention Policy [1027] | @ 1 day | @ 5 mins | -1 | @ 5 mins | _timescaledb_functions | policy_retention | cluster_super_user | t | f | {"drop_after": "@ 30 days", "hypertable_id": 21} | | | _timescaledb_internal | _materialized_hypertable_21 | _timescaledb_functions | policy_retention_check
(3 rows)
SELECT mat_hypertable_id, step_id, status, type, config FROM _timescaledb_catalog.continuous_agg_migrate_plan_step ORDER BY step_id;
@ -2046,23 +2152,23 @@ SELECT * FROM conditions_summary_daily_new;
SELECT compress_chunk(c) FROM show_chunks('conditions_summary_daily') c ORDER BY c::regclass::text;
compress_chunk
-------------------------------------------
_timescaledb_internal._hyper_11_344_chunk
_timescaledb_internal._hyper_11_345_chunk
_timescaledb_internal._hyper_11_346_chunk
_timescaledb_internal._hyper_11_347_chunk
_timescaledb_internal._hyper_11_348_chunk
_timescaledb_internal._hyper_11_349_chunk
_timescaledb_internal._hyper_11_356_chunk
_timescaledb_internal._hyper_11_357_chunk
_timescaledb_internal._hyper_11_358_chunk
_timescaledb_internal._hyper_11_359_chunk
_timescaledb_internal._hyper_11_360_chunk
_timescaledb_internal._hyper_11_361_chunk
(6 rows)
SELECT compress_chunk(c) FROM show_chunks('conditions_summary_daily_new') c ORDER BY c::regclass::text;
compress_chunk
-------------------------------------------
_timescaledb_internal._hyper_20_362_chunk
_timescaledb_internal._hyper_20_363_chunk
_timescaledb_internal._hyper_20_364_chunk
_timescaledb_internal._hyper_20_365_chunk
_timescaledb_internal._hyper_20_366_chunk
_timescaledb_internal._hyper_20_367_chunk
_timescaledb_internal._hyper_21_374_chunk
_timescaledb_internal._hyper_21_375_chunk
_timescaledb_internal._hyper_21_376_chunk
_timescaledb_internal._hyper_21_377_chunk
_timescaledb_internal._hyper_21_378_chunk
_timescaledb_internal._hyper_21_379_chunk
(6 rows)
-- check migrated data after compression. should return 0 (zero) rows
@ -2106,13 +2212,13 @@ psql:include/cagg_migrate_common.sql:181: WARNING: refresh the continuous aggre
avg | numeric | | | | main |
sum | numeric | | | | main |
View definition:
SELECT _materialized_hypertable_22.bucket,
_materialized_hypertable_22.min,
_materialized_hypertable_22.max,
_materialized_hypertable_22.avg,
_materialized_hypertable_22.sum
FROM _timescaledb_internal._materialized_hypertable_22
WHERE _materialized_hypertable_22.bucket < COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(22)), '-infinity'::timestamp with time zone)
SELECT _materialized_hypertable_23.bucket,
_materialized_hypertable_23.min,
_materialized_hypertable_23.max,
_materialized_hypertable_23.avg,
_materialized_hypertable_23.sum
FROM _timescaledb_internal._materialized_hypertable_23
WHERE _materialized_hypertable_23.bucket < COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(23)), '-infinity'::timestamp with time zone)
UNION ALL
SELECT time_bucket('@ 1 day'::interval, conditions."time") AS bucket,
min(conditions.temperature) AS min,
@ -2120,7 +2226,7 @@ UNION ALL
avg(conditions.temperature) AS avg,
sum(conditions.temperature) AS sum
FROM conditions
WHERE conditions."time" >= COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(22)), '-infinity'::timestamp with time zone)
WHERE conditions."time" >= COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(23)), '-infinity'::timestamp with time zone)
GROUP BY (time_bucket('@ 1 day'::interval, conditions."time"));
-- cagg with the old format because it was overriden
@ -2161,9 +2267,9 @@ psql:include/cagg_migrate_common.sql:188: ERROR: relation "conditions_summary_d
SELECT * FROM cagg_jobs WHERE schema = 'public' AND name = 'conditions_summary_daily';
schema | name | id | application_name | schedule_interval | max_runtime | max_retries | retry_period | proc_schema | proc_name | owner | scheduled | fixed_schedule | initial_start | hypertable_id | config | check_schema | check_name | timezone
--------+--------------------------+------+--------------------------------------------+-------------------+-------------+-------------+--------------+------------------------+-------------------------------------+--------------------+-----------+----------------+---------------+---------------+---------------------------------------------------------------------------------+------------------------+-------------------------------------------+----------
public | conditions_summary_daily | 1030 | Retention Policy [1030] | @ 1 day | @ 5 mins | -1 | @ 5 mins | _timescaledb_functions | policy_retention | cluster_super_user | t | f | | 22 | {"drop_after": "@ 30 days", "hypertable_id": 22} | _timescaledb_functions | policy_retention_check |
public | conditions_summary_daily | 1031 | Refresh Continuous Aggregate Policy [1031] | @ 1 hour | @ 0 | -1 | @ 1 hour | _timescaledb_functions | policy_refresh_continuous_aggregate | cluster_super_user | t | f | | 22 | {"end_offset": "@ 1 day", "start_offset": "@ 30 days", "mat_hypertable_id": 22} | _timescaledb_functions | policy_refresh_continuous_aggregate_check |
public | conditions_summary_daily | 1032 | Compression Policy [1032] | @ 12 hours | @ 0 | -1 | @ 1 hour | _timescaledb_functions | policy_compression | cluster_super_user | t | f | | 22 | {"hypertable_id": 22, "compress_after": "@ 45 days"} | _timescaledb_functions | policy_compression_check |
public | conditions_summary_daily | 1030 | Retention Policy [1030] | @ 1 day | @ 5 mins | -1 | @ 5 mins | _timescaledb_functions | policy_retention | cluster_super_user | t | f | | 23 | {"drop_after": "@ 30 days", "hypertable_id": 23} | _timescaledb_functions | policy_retention_check |
public | conditions_summary_daily | 1031 | Refresh Continuous Aggregate Policy [1031] | @ 1 hour | @ 0 | -1 | @ 1 hour | _timescaledb_functions | policy_refresh_continuous_aggregate | cluster_super_user | t | f | | 23 | {"end_offset": "@ 1 day", "start_offset": "@ 30 days", "mat_hypertable_id": 23} | _timescaledb_functions | policy_refresh_continuous_aggregate_check |
public | conditions_summary_daily | 1032 | Compression Policy [1032] | @ 12 hours | @ 0 | -1 | @ 1 hour | _timescaledb_functions | policy_compression | cluster_super_user | t | f | | 23 | {"hypertable_id": 23, "compress_after": "@ 45 days"} | _timescaledb_functions | policy_compression_check |
(3 rows)
-- should return the old cagg jobs
@ -2214,13 +2320,13 @@ psql:include/cagg_migrate_common.sql:203: NOTICE: job 1024 not found, skipping
avg | numeric | | | | main |
sum | numeric | | | | main |
View definition:
SELECT _materialized_hypertable_24.bucket,
_materialized_hypertable_24.min,
_materialized_hypertable_24.max,
_materialized_hypertable_24.avg,
_materialized_hypertable_24.sum
FROM _timescaledb_internal._materialized_hypertable_24
WHERE _materialized_hypertable_24.bucket < COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(24)), '-infinity'::timestamp with time zone)
SELECT _materialized_hypertable_25.bucket,
_materialized_hypertable_25.min,
_materialized_hypertable_25.max,
_materialized_hypertable_25.avg,
_materialized_hypertable_25.sum
FROM _timescaledb_internal._materialized_hypertable_25
WHERE _materialized_hypertable_25.bucket < COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(25)), '-infinity'::timestamp with time zone)
UNION ALL
SELECT time_bucket('@ 1 day'::interval, conditions."time") AS bucket,
min(conditions.temperature) AS min,
@ -2228,7 +2334,7 @@ UNION ALL
avg(conditions.temperature) AS avg,
sum(conditions.temperature) AS sum
FROM conditions
WHERE conditions."time" >= COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(24)), '-infinity'::timestamp with time zone)
WHERE conditions."time" >= COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(25)), '-infinity'::timestamp with time zone)
GROUP BY (time_bucket('@ 1 day'::interval, conditions."time"));
\set ON_ERROR_STOP 0
@ -2243,9 +2349,9 @@ psql:include/cagg_migrate_common.sql:210: ERROR: relation "conditions_summary_d
SELECT * FROM cagg_jobs WHERE schema = 'public' AND name = 'conditions_summary_daily';
schema | name | id | application_name | schedule_interval | max_runtime | max_retries | retry_period | proc_schema | proc_name | owner | scheduled | fixed_schedule | initial_start | hypertable_id | config | check_schema | check_name | timezone
--------+--------------------------+------+--------------------------------------------+-------------------+-------------+-------------+--------------+------------------------+-------------------------------------+--------------------+-----------+----------------+---------------+---------------+---------------------------------------------------------------------------------+------------------------+-------------------------------------------+----------
public | conditions_summary_daily | 1033 | Retention Policy [1033] | @ 1 day | @ 5 mins | -1 | @ 5 mins | _timescaledb_functions | policy_retention | cluster_super_user | t | f | | 24 | {"drop_after": "@ 30 days", "hypertable_id": 24} | _timescaledb_functions | policy_retention_check |
public | conditions_summary_daily | 1034 | Refresh Continuous Aggregate Policy [1034] | @ 1 hour | @ 0 | -1 | @ 1 hour | _timescaledb_functions | policy_refresh_continuous_aggregate | cluster_super_user | t | f | | 24 | {"end_offset": "@ 1 day", "start_offset": "@ 30 days", "mat_hypertable_id": 24} | _timescaledb_functions | policy_refresh_continuous_aggregate_check |
public | conditions_summary_daily | 1035 | Compression Policy [1035] | @ 12 hours | @ 0 | -1 | @ 1 hour | _timescaledb_functions | policy_compression | cluster_super_user | t | f | | 24 | {"hypertable_id": 24, "compress_after": "@ 45 days"} | _timescaledb_functions | policy_compression_check |
public | conditions_summary_daily | 1033 | Retention Policy [1033] | @ 1 day | @ 5 mins | -1 | @ 5 mins | _timescaledb_functions | policy_retention | cluster_super_user | t | f | | 25 | {"drop_after": "@ 30 days", "hypertable_id": 25} | _timescaledb_functions | policy_retention_check |
public | conditions_summary_daily | 1034 | Refresh Continuous Aggregate Policy [1034] | @ 1 hour | @ 0 | -1 | @ 1 hour | _timescaledb_functions | policy_refresh_continuous_aggregate | cluster_super_user | t | f | | 25 | {"end_offset": "@ 1 day", "start_offset": "@ 30 days", "mat_hypertable_id": 25} | _timescaledb_functions | policy_refresh_continuous_aggregate_check |
public | conditions_summary_daily | 1035 | Compression Policy [1035] | @ 12 hours | @ 0 | -1 | @ 1 hour | _timescaledb_functions | policy_compression | cluster_super_user | t | f | | 25 | {"hypertable_id": 25, "compress_after": "@ 45 days"} | _timescaledb_functions | policy_compression_check |
(3 rows)
-- should return no rows because the old cagg was removed
@ -2354,16 +2460,69 @@ SELECT execute_migration();
psql:include/cagg_migrate_common.sql:296: ERROR: invalid transaction termination
ROLLBACK;
\set ON_ERROR_STOP 1
--
-- test dropping chunks
--
-- no chunks marked as dropped
SELECT
c.table_name as chunk_name,
c.dropped
FROM _timescaledb_catalog.hypertable h, _timescaledb_catalog.chunk c
WHERE h.id = c.hypertable_id AND h.table_name = 'conditions' AND c.dropped
ORDER BY 1;
chunk_name | dropped
------------+---------
(0 rows)
-- drop 1 chunk
\if :IS_TIME_DIMENSION
SELECT drop_chunks('conditions', older_than => CAST('2022-01-08 00:00:00-00' AS :TIME_DIMENSION_DATATYPE));
drop_chunks
------------------------------------------
_timescaledb_internal._hyper_9_303_chunk
(1 row)
\else
SELECT drop_chunks('conditions', older_than => 10);
\endif
-- now he have one chunk marked as dropped
SELECT
c.table_name as chunk_name,
c.dropped
FROM _timescaledb_catalog.hypertable h, _timescaledb_catalog.chunk c
WHERE h.id = c.hypertable_id AND h.table_name = 'conditions' AND c.dropped
ORDER BY 1;
chunk_name | dropped
--------------------+---------
_hyper_9_303_chunk | t
(1 row)
-- this migration should remove the chunk metadata marked as dropped
CALL cagg_migrate('conditions_summary_weekly', override => TRUE, drop_old => TRUE);
psql:include/cagg_migrate_common.sql:328: WARNING: refresh the continuous aggregate after the migration executing this statement: "CALL public.refresh_continuous_aggregate('public.conditions_summary_weekly', CAST('Mon Jan 02 00:00:00 2023' AS timestamp with time zone), NULL);"
psql:include/cagg_migrate_common.sql:328: NOTICE: drop cascades to 6 other objects
psql:include/cagg_migrate_common.sql:328: INFO: Removing metadata of chunk 303 from hypertable 9
-- no chunks marked as dropped
SELECT
c.table_name as chunk_name,
c.dropped
FROM _timescaledb_catalog.hypertable h, _timescaledb_catalog.chunk c
WHERE h.id = c.hypertable_id AND h.table_name = 'conditions' AND c.dropped
ORDER BY 1;
chunk_name | dropped
------------+---------
(0 rows)
-- cleanup
DROP FUNCTION execute_migration();
REVOKE SELECT, INSERT, UPDATE ON TABLE _timescaledb_catalog.continuous_agg_migrate_plan FROM :ROLE_DEFAULT_PERM_USER;
REVOKE USAGE ON SEQUENCE _timescaledb_catalog.continuous_agg_migrate_plan_step_step_id_seq FROM :ROLE_DEFAULT_PERM_USER;
TRUNCATE _timescaledb_catalog.continuous_agg_migrate_plan RESTART IDENTITY CASCADE;
psql:include/cagg_migrate_common.sql:304: NOTICE: truncate cascades to table "continuous_agg_migrate_plan_step"
psql:include/cagg_migrate_common.sql:342: NOTICE: truncate cascades to table "continuous_agg_migrate_plan_step"
DROP MATERIALIZED VIEW conditions_summary_daily;
psql:include/cagg_migrate_common.sql:305: NOTICE: drop cascades to 6 other objects
psql:include/cagg_migrate_common.sql:343: NOTICE: drop cascades to 6 other objects
DROP MATERIALIZED VIEW conditions_summary_weekly;
psql:include/cagg_migrate_common.sql:306: NOTICE: drop cascades to 6 other objects
psql:include/cagg_migrate_common.sql:344: NOTICE: drop cascades to 6 other objects
DROP TABLE conditions;
SELECT _timescaledb_functions.start_background_workers();
start_background_workers

View File

@ -482,3 +482,118 @@ SELECT * FROM cagg3;
CREATE MATERIALIZED VIEW cagg4 WITH (timescaledb.continuous,timescaledb.materialized_only=true) AS SELECT time_bucket('1 month', time, 'PST8PDT', "offset":= INTERVAL '15 day') FROM metrics GROUP BY 1;
ERROR: continuous aggregate view must include a valid time bucket function
\set ON_ERROR_STOP 1
--
-- drop chunks tests
--
-- should return 4 chunks
SELECT
c.table_name as chunk_name,
c.status as chunk_status, c.dropped, c.compressed_chunk_id as comp_id
FROM _timescaledb_catalog.hypertable h, _timescaledb_catalog.chunk c
WHERE h.id = c.hypertable_id and h.table_name = 'metrics'
ORDER BY 1;
chunk_name | chunk_status | dropped | comp_id
--------------------+--------------+---------+---------
_hyper_11_17_chunk | 0 | f |
_hyper_11_18_chunk | 0 | f |
_hyper_11_19_chunk | 0 | f |
_hyper_11_20_chunk | 0 | f |
(4 rows)
-- all caggs in the new format (finalized=true)
SELECT user_view_name, finalized FROM _timescaledb_catalog.continuous_agg WHERE user_view_name in ('cagg1', 'cagg2', 'cagg3') ORDER BY 1;
user_view_name | finalized
----------------+-----------
cagg1 | t
cagg2 | t
cagg3 | t
(3 rows)
-- dropping chunk should also remove the catalog data
SELECT drop_chunks('metrics', older_than => '2000-01-01 00:00:00-02'::timestamptz);
drop_chunks
------------------------------------------
_timescaledb_internal._hyper_11_17_chunk
(1 row)
-- should return 3 chunks
SELECT
c.table_name as chunk_name,
c.status as chunk_status, c.dropped, c.compressed_chunk_id as comp_id
FROM _timescaledb_catalog.hypertable h, _timescaledb_catalog.chunk c
WHERE h.id = c.hypertable_id AND h.table_name = 'metrics'
ORDER BY 1;
chunk_name | chunk_status | dropped | comp_id
--------------------+--------------+---------+---------
_hyper_11_18_chunk | 0 | f |
_hyper_11_19_chunk | 0 | f |
_hyper_11_20_chunk | 0 | f |
(3 rows)
-- let's update the catalog to fake an old format cagg (finalized=false)
\c :TEST_DBNAME :ROLE_SUPERUSER
UPDATE _timescaledb_catalog.continuous_agg SET finalized=FALSE WHERE user_view_name = 'cagg1';
\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER
-- cagg1 now is a fake old format (finalized=false)
SELECT user_view_name, finalized FROM _timescaledb_catalog.continuous_agg WHERE user_view_name in ('cagg1', 'cagg2', 'cagg3') ORDER BY 1;
user_view_name | finalized
----------------+-----------
cagg1 | f
cagg2 | t
cagg3 | t
(3 rows)
-- cagg1 now is in the old format (finalized=false)
-- dropping chunk should NOT remove the catalog data
SELECT drop_chunks('metrics', older_than => '2000-01-13 00:00:00-02'::timestamptz);
drop_chunks
------------------------------------------
_timescaledb_internal._hyper_11_18_chunk
(1 row)
-- should return 3 chunks and one of them should be marked as dropped
SELECT
c.table_name as chunk_name,
c.status as chunk_status, c.dropped, c.compressed_chunk_id as comp_id
FROM _timescaledb_catalog.hypertable h, _timescaledb_catalog.chunk c
WHERE h.id = c.hypertable_id and h.table_name = 'metrics'
ORDER BY 1;
chunk_name | chunk_status | dropped | comp_id
--------------------+--------------+---------+---------
_hyper_11_18_chunk | 0 | t |
_hyper_11_19_chunk | 0 | f |
_hyper_11_20_chunk | 0 | f |
(3 rows)
-- remove the fake old format cagg
DROP MATERIALIZED VIEW cagg1;
NOTICE: drop cascades to table _timescaledb_internal._hyper_12_21_chunk
-- no more old format caggs (finalized=false)
SELECT user_view_name, finalized FROM _timescaledb_catalog.continuous_agg WHERE user_view_name in ('cagg1', 'cagg2', 'cagg3') ORDER BY 1;
user_view_name | finalized
----------------+-----------
cagg2 | t
cagg3 | t
(2 rows)
-- dropping chunk should remove the catalog data
SELECT drop_chunks('metrics', older_than => '2000-01-25 00:00:00-02'::timestamptz);
drop_chunks
------------------------------------------
_timescaledb_internal._hyper_11_19_chunk
(1 row)
-- should return 2 chunks and one of them should be marked as dropped
-- because we dropped chunk before when an old format cagg exists
SELECT
c.table_name as chunk_name,
c.status as chunk_status, c.dropped, c.compressed_chunk_id as comp_id
FROM _timescaledb_catalog.hypertable h, _timescaledb_catalog.chunk c
WHERE h.id = c.hypertable_id and h.table_name = 'metrics'
ORDER BY 1;
chunk_name | chunk_status | dropped | comp_id
--------------------+--------------+---------+---------
_hyper_11_18_chunk | 0 | t |
_hyper_11_20_chunk | 0 | f |
(2 rows)

View File

@ -482,3 +482,118 @@ SELECT * FROM cagg3;
CREATE MATERIALIZED VIEW cagg4 WITH (timescaledb.continuous,timescaledb.materialized_only=true) AS SELECT time_bucket('1 month', time, 'PST8PDT', "offset":= INTERVAL '15 day') FROM metrics GROUP BY 1;
ERROR: continuous aggregate view must include a valid time bucket function
\set ON_ERROR_STOP 1
--
-- drop chunks tests
--
-- should return 4 chunks
SELECT
c.table_name as chunk_name,
c.status as chunk_status, c.dropped, c.compressed_chunk_id as comp_id
FROM _timescaledb_catalog.hypertable h, _timescaledb_catalog.chunk c
WHERE h.id = c.hypertable_id and h.table_name = 'metrics'
ORDER BY 1;
chunk_name | chunk_status | dropped | comp_id
--------------------+--------------+---------+---------
_hyper_11_17_chunk | 0 | f |
_hyper_11_18_chunk | 0 | f |
_hyper_11_19_chunk | 0 | f |
_hyper_11_20_chunk | 0 | f |
(4 rows)
-- all caggs in the new format (finalized=true)
SELECT user_view_name, finalized FROM _timescaledb_catalog.continuous_agg WHERE user_view_name in ('cagg1', 'cagg2', 'cagg3') ORDER BY 1;
user_view_name | finalized
----------------+-----------
cagg1 | t
cagg2 | t
cagg3 | t
(3 rows)
-- dropping chunk should also remove the catalog data
SELECT drop_chunks('metrics', older_than => '2000-01-01 00:00:00-02'::timestamptz);
drop_chunks
------------------------------------------
_timescaledb_internal._hyper_11_17_chunk
(1 row)
-- should return 3 chunks
SELECT
c.table_name as chunk_name,
c.status as chunk_status, c.dropped, c.compressed_chunk_id as comp_id
FROM _timescaledb_catalog.hypertable h, _timescaledb_catalog.chunk c
WHERE h.id = c.hypertable_id AND h.table_name = 'metrics'
ORDER BY 1;
chunk_name | chunk_status | dropped | comp_id
--------------------+--------------+---------+---------
_hyper_11_18_chunk | 0 | f |
_hyper_11_19_chunk | 0 | f |
_hyper_11_20_chunk | 0 | f |
(3 rows)
-- let's update the catalog to fake an old format cagg (finalized=false)
\c :TEST_DBNAME :ROLE_SUPERUSER
UPDATE _timescaledb_catalog.continuous_agg SET finalized=FALSE WHERE user_view_name = 'cagg1';
\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER
-- cagg1 now is a fake old format (finalized=false)
SELECT user_view_name, finalized FROM _timescaledb_catalog.continuous_agg WHERE user_view_name in ('cagg1', 'cagg2', 'cagg3') ORDER BY 1;
user_view_name | finalized
----------------+-----------
cagg1 | f
cagg2 | t
cagg3 | t
(3 rows)
-- cagg1 now is in the old format (finalized=false)
-- dropping chunk should NOT remove the catalog data
SELECT drop_chunks('metrics', older_than => '2000-01-13 00:00:00-02'::timestamptz);
drop_chunks
------------------------------------------
_timescaledb_internal._hyper_11_18_chunk
(1 row)
-- should return 3 chunks and one of them should be marked as dropped
SELECT
c.table_name as chunk_name,
c.status as chunk_status, c.dropped, c.compressed_chunk_id as comp_id
FROM _timescaledb_catalog.hypertable h, _timescaledb_catalog.chunk c
WHERE h.id = c.hypertable_id and h.table_name = 'metrics'
ORDER BY 1;
chunk_name | chunk_status | dropped | comp_id
--------------------+--------------+---------+---------
_hyper_11_18_chunk | 0 | t |
_hyper_11_19_chunk | 0 | f |
_hyper_11_20_chunk | 0 | f |
(3 rows)
-- remove the fake old format cagg
DROP MATERIALIZED VIEW cagg1;
NOTICE: drop cascades to table _timescaledb_internal._hyper_12_21_chunk
-- no more old format caggs (finalized=false)
SELECT user_view_name, finalized FROM _timescaledb_catalog.continuous_agg WHERE user_view_name in ('cagg1', 'cagg2', 'cagg3') ORDER BY 1;
user_view_name | finalized
----------------+-----------
cagg2 | t
cagg3 | t
(2 rows)
-- dropping chunk should remove the catalog data
SELECT drop_chunks('metrics', older_than => '2000-01-25 00:00:00-02'::timestamptz);
drop_chunks
------------------------------------------
_timescaledb_internal._hyper_11_19_chunk
(1 row)
-- should return 2 chunks and one of them should be marked as dropped
-- because we dropped chunk before when an old format cagg exists
SELECT
c.table_name as chunk_name,
c.status as chunk_status, c.dropped, c.compressed_chunk_id as comp_id
FROM _timescaledb_catalog.hypertable h, _timescaledb_catalog.chunk c
WHERE h.id = c.hypertable_id and h.table_name = 'metrics'
ORDER BY 1;
chunk_name | chunk_status | dropped | comp_id
--------------------+--------------+---------+---------
_hyper_11_18_chunk | 0 | t |
_hyper_11_20_chunk | 0 | f |
(2 rows)

View File

@ -482,3 +482,118 @@ SELECT * FROM cagg3;
CREATE MATERIALIZED VIEW cagg4 WITH (timescaledb.continuous,timescaledb.materialized_only=true) AS SELECT time_bucket('1 month', time, 'PST8PDT', "offset":= INTERVAL '15 day') FROM metrics GROUP BY 1;
ERROR: continuous aggregate view must include a valid time bucket function
\set ON_ERROR_STOP 1
--
-- drop chunks tests
--
-- should return 4 chunks
SELECT
c.table_name as chunk_name,
c.status as chunk_status, c.dropped, c.compressed_chunk_id as comp_id
FROM _timescaledb_catalog.hypertable h, _timescaledb_catalog.chunk c
WHERE h.id = c.hypertable_id and h.table_name = 'metrics'
ORDER BY 1;
chunk_name | chunk_status | dropped | comp_id
--------------------+--------------+---------+---------
_hyper_11_17_chunk | 0 | f |
_hyper_11_18_chunk | 0 | f |
_hyper_11_19_chunk | 0 | f |
_hyper_11_20_chunk | 0 | f |
(4 rows)
-- all caggs in the new format (finalized=true)
SELECT user_view_name, finalized FROM _timescaledb_catalog.continuous_agg WHERE user_view_name in ('cagg1', 'cagg2', 'cagg3') ORDER BY 1;
user_view_name | finalized
----------------+-----------
cagg1 | t
cagg2 | t
cagg3 | t
(3 rows)
-- dropping chunk should also remove the catalog data
SELECT drop_chunks('metrics', older_than => '2000-01-01 00:00:00-02'::timestamptz);
drop_chunks
------------------------------------------
_timescaledb_internal._hyper_11_17_chunk
(1 row)
-- should return 3 chunks
SELECT
c.table_name as chunk_name,
c.status as chunk_status, c.dropped, c.compressed_chunk_id as comp_id
FROM _timescaledb_catalog.hypertable h, _timescaledb_catalog.chunk c
WHERE h.id = c.hypertable_id AND h.table_name = 'metrics'
ORDER BY 1;
chunk_name | chunk_status | dropped | comp_id
--------------------+--------------+---------+---------
_hyper_11_18_chunk | 0 | f |
_hyper_11_19_chunk | 0 | f |
_hyper_11_20_chunk | 0 | f |
(3 rows)
-- let's update the catalog to fake an old format cagg (finalized=false)
\c :TEST_DBNAME :ROLE_SUPERUSER
UPDATE _timescaledb_catalog.continuous_agg SET finalized=FALSE WHERE user_view_name = 'cagg1';
\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER
-- cagg1 now is a fake old format (finalized=false)
SELECT user_view_name, finalized FROM _timescaledb_catalog.continuous_agg WHERE user_view_name in ('cagg1', 'cagg2', 'cagg3') ORDER BY 1;
user_view_name | finalized
----------------+-----------
cagg1 | f
cagg2 | t
cagg3 | t
(3 rows)
-- cagg1 now is in the old format (finalized=false)
-- dropping chunk should NOT remove the catalog data
SELECT drop_chunks('metrics', older_than => '2000-01-13 00:00:00-02'::timestamptz);
drop_chunks
------------------------------------------
_timescaledb_internal._hyper_11_18_chunk
(1 row)
-- should return 3 chunks and one of them should be marked as dropped
SELECT
c.table_name as chunk_name,
c.status as chunk_status, c.dropped, c.compressed_chunk_id as comp_id
FROM _timescaledb_catalog.hypertable h, _timescaledb_catalog.chunk c
WHERE h.id = c.hypertable_id and h.table_name = 'metrics'
ORDER BY 1;
chunk_name | chunk_status | dropped | comp_id
--------------------+--------------+---------+---------
_hyper_11_18_chunk | 0 | t |
_hyper_11_19_chunk | 0 | f |
_hyper_11_20_chunk | 0 | f |
(3 rows)
-- remove the fake old format cagg
DROP MATERIALIZED VIEW cagg1;
NOTICE: drop cascades to table _timescaledb_internal._hyper_12_21_chunk
-- no more old format caggs (finalized=false)
SELECT user_view_name, finalized FROM _timescaledb_catalog.continuous_agg WHERE user_view_name in ('cagg1', 'cagg2', 'cagg3') ORDER BY 1;
user_view_name | finalized
----------------+-----------
cagg2 | t
cagg3 | t
(2 rows)
-- dropping chunk should remove the catalog data
SELECT drop_chunks('metrics', older_than => '2000-01-25 00:00:00-02'::timestamptz);
drop_chunks
------------------------------------------
_timescaledb_internal._hyper_11_19_chunk
(1 row)
-- should return 2 chunks and one of them should be marked as dropped
-- because we dropped chunk before when an old format cagg exists
SELECT
c.table_name as chunk_name,
c.status as chunk_status, c.dropped, c.compressed_chunk_id as comp_id
FROM _timescaledb_catalog.hypertable h, _timescaledb_catalog.chunk c
WHERE h.id = c.hypertable_id and h.table_name = 'metrics'
ORDER BY 1;
chunk_name | chunk_status | dropped | comp_id
--------------------+--------------+---------+---------
_hyper_11_18_chunk | 0 | t |
_hyper_11_20_chunk | 0 | f |
(2 rows)

View File

@ -482,3 +482,118 @@ SELECT * FROM cagg3;
CREATE MATERIALIZED VIEW cagg4 WITH (timescaledb.continuous,timescaledb.materialized_only=true) AS SELECT time_bucket('1 month', time, 'PST8PDT', "offset":= INTERVAL '15 day') FROM metrics GROUP BY 1;
ERROR: continuous aggregate view must include a valid time bucket function
\set ON_ERROR_STOP 1
--
-- drop chunks tests
--
-- should return 4 chunks
SELECT
c.table_name as chunk_name,
c.status as chunk_status, c.dropped, c.compressed_chunk_id as comp_id
FROM _timescaledb_catalog.hypertable h, _timescaledb_catalog.chunk c
WHERE h.id = c.hypertable_id and h.table_name = 'metrics'
ORDER BY 1;
chunk_name | chunk_status | dropped | comp_id
--------------------+--------------+---------+---------
_hyper_11_17_chunk | 0 | f |
_hyper_11_18_chunk | 0 | f |
_hyper_11_19_chunk | 0 | f |
_hyper_11_20_chunk | 0 | f |
(4 rows)
-- all caggs in the new format (finalized=true)
SELECT user_view_name, finalized FROM _timescaledb_catalog.continuous_agg WHERE user_view_name in ('cagg1', 'cagg2', 'cagg3') ORDER BY 1;
user_view_name | finalized
----------------+-----------
cagg1 | t
cagg2 | t
cagg3 | t
(3 rows)
-- dropping chunk should also remove the catalog data
SELECT drop_chunks('metrics', older_than => '2000-01-01 00:00:00-02'::timestamptz);
drop_chunks
------------------------------------------
_timescaledb_internal._hyper_11_17_chunk
(1 row)
-- should return 3 chunks
SELECT
c.table_name as chunk_name,
c.status as chunk_status, c.dropped, c.compressed_chunk_id as comp_id
FROM _timescaledb_catalog.hypertable h, _timescaledb_catalog.chunk c
WHERE h.id = c.hypertable_id AND h.table_name = 'metrics'
ORDER BY 1;
chunk_name | chunk_status | dropped | comp_id
--------------------+--------------+---------+---------
_hyper_11_18_chunk | 0 | f |
_hyper_11_19_chunk | 0 | f |
_hyper_11_20_chunk | 0 | f |
(3 rows)
-- let's update the catalog to fake an old format cagg (finalized=false)
\c :TEST_DBNAME :ROLE_SUPERUSER
UPDATE _timescaledb_catalog.continuous_agg SET finalized=FALSE WHERE user_view_name = 'cagg1';
\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER
-- cagg1 now is a fake old format (finalized=false)
SELECT user_view_name, finalized FROM _timescaledb_catalog.continuous_agg WHERE user_view_name in ('cagg1', 'cagg2', 'cagg3') ORDER BY 1;
user_view_name | finalized
----------------+-----------
cagg1 | f
cagg2 | t
cagg3 | t
(3 rows)
-- cagg1 now is in the old format (finalized=false)
-- dropping chunk should NOT remove the catalog data
SELECT drop_chunks('metrics', older_than => '2000-01-13 00:00:00-02'::timestamptz);
drop_chunks
------------------------------------------
_timescaledb_internal._hyper_11_18_chunk
(1 row)
-- should return 3 chunks and one of them should be marked as dropped
SELECT
c.table_name as chunk_name,
c.status as chunk_status, c.dropped, c.compressed_chunk_id as comp_id
FROM _timescaledb_catalog.hypertable h, _timescaledb_catalog.chunk c
WHERE h.id = c.hypertable_id and h.table_name = 'metrics'
ORDER BY 1;
chunk_name | chunk_status | dropped | comp_id
--------------------+--------------+---------+---------
_hyper_11_18_chunk | 0 | t |
_hyper_11_19_chunk | 0 | f |
_hyper_11_20_chunk | 0 | f |
(3 rows)
-- remove the fake old format cagg
DROP MATERIALIZED VIEW cagg1;
NOTICE: drop cascades to table _timescaledb_internal._hyper_12_21_chunk
-- no more old format caggs (finalized=false)
SELECT user_view_name, finalized FROM _timescaledb_catalog.continuous_agg WHERE user_view_name in ('cagg1', 'cagg2', 'cagg3') ORDER BY 1;
user_view_name | finalized
----------------+-----------
cagg2 | t
cagg3 | t
(2 rows)
-- dropping chunk should remove the catalog data
SELECT drop_chunks('metrics', older_than => '2000-01-25 00:00:00-02'::timestamptz);
drop_chunks
------------------------------------------
_timescaledb_internal._hyper_11_19_chunk
(1 row)
-- should return 2 chunks and one of them should be marked as dropped
-- because we dropped chunk before when an old format cagg exists
SELECT
c.table_name as chunk_name,
c.status as chunk_status, c.dropped, c.compressed_chunk_id as comp_id
FROM _timescaledb_catalog.hypertable h, _timescaledb_catalog.chunk c
WHERE h.id = c.hypertable_id and h.table_name = 'metrics'
ORDER BY 1;
chunk_name | chunk_status | dropped | comp_id
--------------------+--------------+---------+---------
_hyper_11_18_chunk | 0 | t |
_hyper_11_20_chunk | 0 | f |
(2 rows)

View File

@ -1592,11 +1592,9 @@ SELECT
FROM _timescaledb_catalog.hypertable h, _timescaledb_catalog.chunk c
WHERE h.id = c.hypertable_id and h.table_name = 'metrics'
ORDER BY 1;
chunk_name | chunk_status | dropped | comp_id
--------------------+--------------+---------+---------
_hyper_13_33_chunk | 0 | t |
_hyper_13_34_chunk | 0 | t |
(2 rows)
chunk_name | chunk_status | dropped | comp_id
------------+--------------+---------+---------
(0 rows)
SELECT "time", cnt FROM cagg_expr ORDER BY time LIMIT 5;
time | cnt
@ -1625,8 +1623,8 @@ WHERE h.id = c.hypertable_id and h.table_name = 'metrics'
ORDER BY 1;
chunk_name | chunk_status | dropped | comp_id
--------------------+--------------+---------+---------
_hyper_13_33_chunk | 1 | f | 64
_hyper_13_34_chunk | 1 | f | 65
_hyper_13_64_chunk | 1 | f | 66
_hyper_13_65_chunk | 1 | f | 67
(2 rows)
SELECT count(*) FROM metrics;
@ -1653,7 +1651,7 @@ INSERT INTO local_seq SELECT '2000-01-01', generate_series(5,8);
SELECT compress_chunk(c) FROM show_chunks('local_seq') c;
compress_chunk
------------------------------------------
_timescaledb_internal._hyper_33_66_chunk
_timescaledb_internal._hyper_33_68_chunk
(1 row)
SELECT
@ -1745,7 +1743,7 @@ SELECT set_chunk_time_interval('f_sensor_data', INTERVAL '1 year');
SELECT * FROM _timescaledb_functions.create_chunk('f_sensor_data',' {"time": [181900977000000, 515024000000000]}');
chunk_id | hypertable_id | schema_name | table_name | relkind | slices | created
----------+---------------+-----------------------+--------------------+---------+----------------------------------------------+---------
71 | 37 | _timescaledb_internal | _hyper_37_71_chunk | r | {"time": [181900977000000, 515024000000000]} | t
73 | 37 | _timescaledb_internal | _hyper_37_73_chunk | r | {"time": [181900977000000, 515024000000000]} | t
(1 row)
INSERT INTO f_sensor_data
@ -1763,7 +1761,7 @@ ALTER TABLE f_sensor_data SET (timescaledb.compress, timescaledb.compress_segmen
SELECT compress_chunk(i) FROM show_chunks('f_sensor_data') i;
compress_chunk
------------------------------------------
_timescaledb_internal._hyper_37_71_chunk
_timescaledb_internal._hyper_37_73_chunk
(1 row)
CALL reindex_compressed_hypertable('f_sensor_data');
@ -1805,16 +1803,16 @@ SELECT sum(cpu) FROM f_sensor_data;
QUERY PLAN
----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
Finalize Aggregate
Output: sum(_hyper_37_71_chunk.cpu)
Output: sum(_hyper_37_73_chunk.cpu)
-> Gather
Output: (PARTIAL sum(_hyper_37_71_chunk.cpu))
Output: (PARTIAL sum(_hyper_37_73_chunk.cpu))
Workers Planned: 4
-> Partial Aggregate
Output: PARTIAL sum(_hyper_37_71_chunk.cpu)
-> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_37_71_chunk
Output: _hyper_37_71_chunk.cpu
-> Parallel Seq Scan on _timescaledb_internal.compress_hyper_38_72_chunk
Output: compress_hyper_38_72_chunk."time", compress_hyper_38_72_chunk.sensor_id, compress_hyper_38_72_chunk.cpu, compress_hyper_38_72_chunk.temperature, compress_hyper_38_72_chunk._ts_meta_count, compress_hyper_38_72_chunk._ts_meta_sequence_num, compress_hyper_38_72_chunk._ts_meta_min_1, compress_hyper_38_72_chunk._ts_meta_max_1
Output: PARTIAL sum(_hyper_37_73_chunk.cpu)
-> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_37_73_chunk
Output: _hyper_37_73_chunk.cpu
-> Parallel Seq Scan on _timescaledb_internal.compress_hyper_38_74_chunk
Output: compress_hyper_38_74_chunk."time", compress_hyper_38_74_chunk.sensor_id, compress_hyper_38_74_chunk.cpu, compress_hyper_38_74_chunk.temperature, compress_hyper_38_74_chunk._ts_meta_count, compress_hyper_38_74_chunk._ts_meta_sequence_num, compress_hyper_38_74_chunk._ts_meta_min_1, compress_hyper_38_74_chunk._ts_meta_max_1
(11 rows)
-- Encourage use of Index Scan
@ -1828,13 +1826,13 @@ SELECT * FROM f_sensor_data WHERE sensor_id > 100;
QUERY PLAN
----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
Gather
Output: _hyper_37_71_chunk."time", _hyper_37_71_chunk.sensor_id, _hyper_37_71_chunk.cpu, _hyper_37_71_chunk.temperature
Output: _hyper_37_73_chunk."time", _hyper_37_73_chunk.sensor_id, _hyper_37_73_chunk.cpu, _hyper_37_73_chunk.temperature
Workers Planned: 2
-> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_37_71_chunk
Output: _hyper_37_71_chunk."time", _hyper_37_71_chunk.sensor_id, _hyper_37_71_chunk.cpu, _hyper_37_71_chunk.temperature
-> Parallel Index Scan using compress_hyper_38_72_chunk_sensor_id__ts_meta_sequence_num_idx on _timescaledb_internal.compress_hyper_38_72_chunk
Output: compress_hyper_38_72_chunk."time", compress_hyper_38_72_chunk.sensor_id, compress_hyper_38_72_chunk.cpu, compress_hyper_38_72_chunk.temperature, compress_hyper_38_72_chunk._ts_meta_count, compress_hyper_38_72_chunk._ts_meta_sequence_num, compress_hyper_38_72_chunk._ts_meta_min_1, compress_hyper_38_72_chunk._ts_meta_max_1
Index Cond: (compress_hyper_38_72_chunk.sensor_id > 100)
-> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_37_73_chunk
Output: _hyper_37_73_chunk."time", _hyper_37_73_chunk.sensor_id, _hyper_37_73_chunk.cpu, _hyper_37_73_chunk.temperature
-> Parallel Index Scan using compress_hyper_38_74_chunk_sensor_id__ts_meta_sequence_num_idx on _timescaledb_internal.compress_hyper_38_74_chunk
Output: compress_hyper_38_74_chunk."time", compress_hyper_38_74_chunk.sensor_id, compress_hyper_38_74_chunk.cpu, compress_hyper_38_74_chunk.temperature, compress_hyper_38_74_chunk._ts_meta_count, compress_hyper_38_74_chunk._ts_meta_sequence_num, compress_hyper_38_74_chunk._ts_meta_min_1, compress_hyper_38_74_chunk._ts_meta_max_1
Index Cond: (compress_hyper_38_74_chunk.sensor_id > 100)
(8 rows)
RESET enable_parallel_append;
@ -1855,21 +1853,21 @@ SELECT sum(cpu) FROM f_sensor_data;
QUERY PLAN
----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
Finalize Aggregate
Output: sum(_hyper_37_71_chunk.cpu)
Output: sum(_hyper_37_73_chunk.cpu)
-> Gather
Output: (PARTIAL sum(_hyper_37_71_chunk.cpu))
Output: (PARTIAL sum(_hyper_37_73_chunk.cpu))
Workers Planned: 4
-> Parallel Append
-> Partial Aggregate
Output: PARTIAL sum(_hyper_37_71_chunk.cpu)
-> Parallel Seq Scan on _timescaledb_internal._hyper_37_71_chunk
Output: _hyper_37_71_chunk.cpu
Output: PARTIAL sum(_hyper_37_73_chunk.cpu)
-> Parallel Seq Scan on _timescaledb_internal._hyper_37_73_chunk
Output: _hyper_37_73_chunk.cpu
-> Partial Aggregate
Output: PARTIAL sum(_hyper_37_71_chunk.cpu)
-> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_37_71_chunk
Output: _hyper_37_71_chunk.cpu
-> Parallel Seq Scan on _timescaledb_internal.compress_hyper_38_72_chunk
Output: compress_hyper_38_72_chunk."time", compress_hyper_38_72_chunk.sensor_id, compress_hyper_38_72_chunk.cpu, compress_hyper_38_72_chunk.temperature, compress_hyper_38_72_chunk._ts_meta_count, compress_hyper_38_72_chunk._ts_meta_sequence_num, compress_hyper_38_72_chunk._ts_meta_min_1, compress_hyper_38_72_chunk._ts_meta_max_1
Output: PARTIAL sum(_hyper_37_73_chunk.cpu)
-> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_37_73_chunk
Output: _hyper_37_73_chunk.cpu
-> Parallel Seq Scan on _timescaledb_internal.compress_hyper_38_74_chunk
Output: compress_hyper_38_74_chunk."time", compress_hyper_38_74_chunk.sensor_id, compress_hyper_38_74_chunk.cpu, compress_hyper_38_74_chunk.temperature, compress_hyper_38_74_chunk._ts_meta_count, compress_hyper_38_74_chunk._ts_meta_sequence_num, compress_hyper_38_74_chunk._ts_meta_min_1, compress_hyper_38_74_chunk._ts_meta_max_1
(16 rows)
:explain
@ -1877,18 +1875,18 @@ SELECT * FROM f_sensor_data WHERE sensor_id > 100;
QUERY PLAN
----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
Gather
Output: _hyper_37_71_chunk."time", _hyper_37_71_chunk.sensor_id, _hyper_37_71_chunk.cpu, _hyper_37_71_chunk.temperature
Output: _hyper_37_73_chunk."time", _hyper_37_73_chunk.sensor_id, _hyper_37_73_chunk.cpu, _hyper_37_73_chunk.temperature
Workers Planned: 3
-> Parallel Append
-> Parallel Index Scan using _hyper_37_71_chunk_f_sensor_data_time_sensor_id_idx on _timescaledb_internal._hyper_37_71_chunk
Output: _hyper_37_71_chunk."time", _hyper_37_71_chunk.sensor_id, _hyper_37_71_chunk.cpu, _hyper_37_71_chunk.temperature
Index Cond: (_hyper_37_71_chunk.sensor_id > 100)
-> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_37_71_chunk
Output: _hyper_37_71_chunk."time", _hyper_37_71_chunk.sensor_id, _hyper_37_71_chunk.cpu, _hyper_37_71_chunk.temperature
Filter: (_hyper_37_71_chunk.sensor_id > 100)
-> Parallel Index Scan using compress_hyper_38_72_chunk_sensor_id__ts_meta_sequence_num_idx on _timescaledb_internal.compress_hyper_38_72_chunk
Output: compress_hyper_38_72_chunk."time", compress_hyper_38_72_chunk.sensor_id, compress_hyper_38_72_chunk.cpu, compress_hyper_38_72_chunk.temperature, compress_hyper_38_72_chunk._ts_meta_count, compress_hyper_38_72_chunk._ts_meta_sequence_num, compress_hyper_38_72_chunk._ts_meta_min_1, compress_hyper_38_72_chunk._ts_meta_max_1
Index Cond: (compress_hyper_38_72_chunk.sensor_id > 100)
-> Parallel Index Scan using _hyper_37_73_chunk_f_sensor_data_time_sensor_id_idx on _timescaledb_internal._hyper_37_73_chunk
Output: _hyper_37_73_chunk."time", _hyper_37_73_chunk.sensor_id, _hyper_37_73_chunk.cpu, _hyper_37_73_chunk.temperature
Index Cond: (_hyper_37_73_chunk.sensor_id > 100)
-> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_37_73_chunk
Output: _hyper_37_73_chunk."time", _hyper_37_73_chunk.sensor_id, _hyper_37_73_chunk.cpu, _hyper_37_73_chunk.temperature
Filter: (_hyper_37_73_chunk.sensor_id > 100)
-> Parallel Index Scan using compress_hyper_38_74_chunk_sensor_id__ts_meta_sequence_num_idx on _timescaledb_internal.compress_hyper_38_74_chunk
Output: compress_hyper_38_74_chunk."time", compress_hyper_38_74_chunk.sensor_id, compress_hyper_38_74_chunk.cpu, compress_hyper_38_74_chunk.temperature, compress_hyper_38_74_chunk._ts_meta_count, compress_hyper_38_74_chunk._ts_meta_sequence_num, compress_hyper_38_74_chunk._ts_meta_min_1, compress_hyper_38_74_chunk._ts_meta_max_1
Index Cond: (compress_hyper_38_74_chunk.sensor_id > 100)
(13 rows)
-- Test non-partial paths below append are not executed multiple times
@ -1961,8 +1959,8 @@ SELECT time, device, device * 0.1 FROM
SELECT compress_chunk(c) FROM show_chunks('ht_metrics_partially_compressed') c;
compress_chunk
------------------------------------------
_timescaledb_internal._hyper_41_75_chunk
_timescaledb_internal._hyper_41_76_chunk
_timescaledb_internal._hyper_41_77_chunk
_timescaledb_internal._hyper_41_78_chunk
(2 rows)
INSERT INTO ht_metrics_partially_compressed VALUES ('2020-01-01'::timestamptz, 1, 0.1);
@ -1978,26 +1976,26 @@ SELECT * FROM ht_metrics_partially_compressed ORDER BY time DESC, device LIMIT 1
Startup Exclusion: false
Runtime Exclusion: false
-> Sort
Output: _hyper_41_76_chunk."time", _hyper_41_76_chunk.device, _hyper_41_76_chunk.value
Sort Key: _hyper_41_76_chunk."time" DESC, _hyper_41_76_chunk.device
-> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_41_76_chunk
Output: _hyper_41_76_chunk."time", _hyper_41_76_chunk.device, _hyper_41_76_chunk.value
-> Seq Scan on _timescaledb_internal.compress_hyper_42_78_chunk
Output: compress_hyper_42_78_chunk."time", compress_hyper_42_78_chunk.device, compress_hyper_42_78_chunk.value, compress_hyper_42_78_chunk._ts_meta_count, compress_hyper_42_78_chunk._ts_meta_sequence_num, compress_hyper_42_78_chunk._ts_meta_min_1, compress_hyper_42_78_chunk._ts_meta_max_1
Output: _hyper_41_78_chunk."time", _hyper_41_78_chunk.device, _hyper_41_78_chunk.value
Sort Key: _hyper_41_78_chunk."time" DESC, _hyper_41_78_chunk.device
-> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_41_78_chunk
Output: _hyper_41_78_chunk."time", _hyper_41_78_chunk.device, _hyper_41_78_chunk.value
-> Seq Scan on _timescaledb_internal.compress_hyper_42_80_chunk
Output: compress_hyper_42_80_chunk."time", compress_hyper_42_80_chunk.device, compress_hyper_42_80_chunk.value, compress_hyper_42_80_chunk._ts_meta_count, compress_hyper_42_80_chunk._ts_meta_sequence_num, compress_hyper_42_80_chunk._ts_meta_min_1, compress_hyper_42_80_chunk._ts_meta_max_1
-> Merge Append
Sort Key: _hyper_41_75_chunk."time" DESC, _hyper_41_75_chunk.device
Sort Key: _hyper_41_77_chunk."time" DESC, _hyper_41_77_chunk.device
-> Sort
Output: _hyper_41_75_chunk."time", _hyper_41_75_chunk.device, _hyper_41_75_chunk.value
Sort Key: _hyper_41_75_chunk."time" DESC, _hyper_41_75_chunk.device
-> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_41_75_chunk
Output: _hyper_41_75_chunk."time", _hyper_41_75_chunk.device, _hyper_41_75_chunk.value
-> Seq Scan on _timescaledb_internal.compress_hyper_42_77_chunk
Output: compress_hyper_42_77_chunk."time", compress_hyper_42_77_chunk.device, compress_hyper_42_77_chunk.value, compress_hyper_42_77_chunk._ts_meta_count, compress_hyper_42_77_chunk._ts_meta_sequence_num, compress_hyper_42_77_chunk._ts_meta_min_1, compress_hyper_42_77_chunk._ts_meta_max_1
Output: _hyper_41_77_chunk."time", _hyper_41_77_chunk.device, _hyper_41_77_chunk.value
Sort Key: _hyper_41_77_chunk."time" DESC, _hyper_41_77_chunk.device
-> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_41_77_chunk
Output: _hyper_41_77_chunk."time", _hyper_41_77_chunk.device, _hyper_41_77_chunk.value
-> Seq Scan on _timescaledb_internal.compress_hyper_42_79_chunk
Output: compress_hyper_42_79_chunk."time", compress_hyper_42_79_chunk.device, compress_hyper_42_79_chunk.value, compress_hyper_42_79_chunk._ts_meta_count, compress_hyper_42_79_chunk._ts_meta_sequence_num, compress_hyper_42_79_chunk._ts_meta_min_1, compress_hyper_42_79_chunk._ts_meta_max_1
-> Sort
Output: _hyper_41_75_chunk."time", _hyper_41_75_chunk.device, _hyper_41_75_chunk.value
Sort Key: _hyper_41_75_chunk."time" DESC, _hyper_41_75_chunk.device
-> Seq Scan on _timescaledb_internal._hyper_41_75_chunk
Output: _hyper_41_75_chunk."time", _hyper_41_75_chunk.device, _hyper_41_75_chunk.value
Output: _hyper_41_77_chunk."time", _hyper_41_77_chunk.device, _hyper_41_77_chunk.value
Sort Key: _hyper_41_77_chunk."time" DESC, _hyper_41_77_chunk.device
-> Seq Scan on _timescaledb_internal._hyper_41_77_chunk
Output: _hyper_41_77_chunk."time", _hyper_41_77_chunk.device, _hyper_41_77_chunk.value
(28 rows)
-- Test parameter change on rescan
@ -2064,9 +2062,9 @@ INSERT INTO i6069 VALUES('2023-07-01', 1, 1),('2023-07-03', 2, 1),('2023-07-05',
SELECT compress_chunk(i, if_not_compressed => true) FROM show_chunks('i6069') i;
compress_chunk
------------------------------------------
_timescaledb_internal._hyper_43_79_chunk
_timescaledb_internal._hyper_43_80_chunk
_timescaledb_internal._hyper_43_81_chunk
_timescaledb_internal._hyper_43_82_chunk
_timescaledb_internal._hyper_43_83_chunk
(3 rows)
SET enable_indexscan = ON;
@ -2091,6 +2089,28 @@ ORDER BY timestamp desc LIMIT 1 ) a ON true;
Order: i6069."timestamp" DESC
Startup Exclusion: false
Runtime Exclusion: true
-> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_43_83_chunk
Output: _hyper_43_83_chunk."timestamp", _hyper_43_83_chunk.attr_id, _hyper_43_83_chunk.number_val
Filter: ((_hyper_43_83_chunk."timestamp" > 'Fri Jun 30 00:00:00 2023'::timestamp without time zone) AND (_hyper_43_83_chunk."timestamp" < 'Thu Jul 06 00:00:00 2023'::timestamp without time zone))
Batch Sorted Merge: true
-> Sort
Output: compress_hyper_44_86_chunk."timestamp", compress_hyper_44_86_chunk.attr_id, compress_hyper_44_86_chunk.number_val, compress_hyper_44_86_chunk._ts_meta_count, compress_hyper_44_86_chunk._ts_meta_sequence_num, compress_hyper_44_86_chunk._ts_meta_min_1, compress_hyper_44_86_chunk._ts_meta_max_1
Sort Key: compress_hyper_44_86_chunk._ts_meta_max_1 DESC
-> Index Scan using compress_hyper_44_86_chunk_attr_id__ts_meta_sequence_num_idx on _timescaledb_internal.compress_hyper_44_86_chunk
Output: compress_hyper_44_86_chunk."timestamp", compress_hyper_44_86_chunk.attr_id, compress_hyper_44_86_chunk.number_val, compress_hyper_44_86_chunk._ts_meta_count, compress_hyper_44_86_chunk._ts_meta_sequence_num, compress_hyper_44_86_chunk._ts_meta_min_1, compress_hyper_44_86_chunk._ts_meta_max_1
Index Cond: (compress_hyper_44_86_chunk.attr_id = "*VALUES*".column1)
Filter: ((compress_hyper_44_86_chunk._ts_meta_max_1 > 'Fri Jun 30 00:00:00 2023'::timestamp without time zone) AND (compress_hyper_44_86_chunk._ts_meta_min_1 < 'Thu Jul 06 00:00:00 2023'::timestamp without time zone))
-> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_43_82_chunk
Output: _hyper_43_82_chunk."timestamp", _hyper_43_82_chunk.attr_id, _hyper_43_82_chunk.number_val
Filter: ((_hyper_43_82_chunk."timestamp" > 'Fri Jun 30 00:00:00 2023'::timestamp without time zone) AND (_hyper_43_82_chunk."timestamp" < 'Thu Jul 06 00:00:00 2023'::timestamp without time zone))
Batch Sorted Merge: true
-> Sort
Output: compress_hyper_44_85_chunk."timestamp", compress_hyper_44_85_chunk.attr_id, compress_hyper_44_85_chunk.number_val, compress_hyper_44_85_chunk._ts_meta_count, compress_hyper_44_85_chunk._ts_meta_sequence_num, compress_hyper_44_85_chunk._ts_meta_min_1, compress_hyper_44_85_chunk._ts_meta_max_1
Sort Key: compress_hyper_44_85_chunk._ts_meta_max_1 DESC
-> Index Scan using compress_hyper_44_85_chunk_attr_id__ts_meta_sequence_num_idx on _timescaledb_internal.compress_hyper_44_85_chunk
Output: compress_hyper_44_85_chunk."timestamp", compress_hyper_44_85_chunk.attr_id, compress_hyper_44_85_chunk.number_val, compress_hyper_44_85_chunk._ts_meta_count, compress_hyper_44_85_chunk._ts_meta_sequence_num, compress_hyper_44_85_chunk._ts_meta_min_1, compress_hyper_44_85_chunk._ts_meta_max_1
Index Cond: (compress_hyper_44_85_chunk.attr_id = "*VALUES*".column1)
Filter: ((compress_hyper_44_85_chunk._ts_meta_max_1 > 'Fri Jun 30 00:00:00 2023'::timestamp without time zone) AND (compress_hyper_44_85_chunk._ts_meta_min_1 < 'Thu Jul 06 00:00:00 2023'::timestamp without time zone))
-> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_43_81_chunk
Output: _hyper_43_81_chunk."timestamp", _hyper_43_81_chunk.attr_id, _hyper_43_81_chunk.number_val
Filter: ((_hyper_43_81_chunk."timestamp" > 'Fri Jun 30 00:00:00 2023'::timestamp without time zone) AND (_hyper_43_81_chunk."timestamp" < 'Thu Jul 06 00:00:00 2023'::timestamp without time zone))
@ -2102,28 +2122,6 @@ ORDER BY timestamp desc LIMIT 1 ) a ON true;
Output: compress_hyper_44_84_chunk."timestamp", compress_hyper_44_84_chunk.attr_id, compress_hyper_44_84_chunk.number_val, compress_hyper_44_84_chunk._ts_meta_count, compress_hyper_44_84_chunk._ts_meta_sequence_num, compress_hyper_44_84_chunk._ts_meta_min_1, compress_hyper_44_84_chunk._ts_meta_max_1
Index Cond: (compress_hyper_44_84_chunk.attr_id = "*VALUES*".column1)
Filter: ((compress_hyper_44_84_chunk._ts_meta_max_1 > 'Fri Jun 30 00:00:00 2023'::timestamp without time zone) AND (compress_hyper_44_84_chunk._ts_meta_min_1 < 'Thu Jul 06 00:00:00 2023'::timestamp without time zone))
-> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_43_80_chunk
Output: _hyper_43_80_chunk."timestamp", _hyper_43_80_chunk.attr_id, _hyper_43_80_chunk.number_val
Filter: ((_hyper_43_80_chunk."timestamp" > 'Fri Jun 30 00:00:00 2023'::timestamp without time zone) AND (_hyper_43_80_chunk."timestamp" < 'Thu Jul 06 00:00:00 2023'::timestamp without time zone))
Batch Sorted Merge: true
-> Sort
Output: compress_hyper_44_83_chunk."timestamp", compress_hyper_44_83_chunk.attr_id, compress_hyper_44_83_chunk.number_val, compress_hyper_44_83_chunk._ts_meta_count, compress_hyper_44_83_chunk._ts_meta_sequence_num, compress_hyper_44_83_chunk._ts_meta_min_1, compress_hyper_44_83_chunk._ts_meta_max_1
Sort Key: compress_hyper_44_83_chunk._ts_meta_max_1 DESC
-> Index Scan using compress_hyper_44_83_chunk_attr_id__ts_meta_sequence_num_idx on _timescaledb_internal.compress_hyper_44_83_chunk
Output: compress_hyper_44_83_chunk."timestamp", compress_hyper_44_83_chunk.attr_id, compress_hyper_44_83_chunk.number_val, compress_hyper_44_83_chunk._ts_meta_count, compress_hyper_44_83_chunk._ts_meta_sequence_num, compress_hyper_44_83_chunk._ts_meta_min_1, compress_hyper_44_83_chunk._ts_meta_max_1
Index Cond: (compress_hyper_44_83_chunk.attr_id = "*VALUES*".column1)
Filter: ((compress_hyper_44_83_chunk._ts_meta_max_1 > 'Fri Jun 30 00:00:00 2023'::timestamp without time zone) AND (compress_hyper_44_83_chunk._ts_meta_min_1 < 'Thu Jul 06 00:00:00 2023'::timestamp without time zone))
-> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_43_79_chunk
Output: _hyper_43_79_chunk."timestamp", _hyper_43_79_chunk.attr_id, _hyper_43_79_chunk.number_val
Filter: ((_hyper_43_79_chunk."timestamp" > 'Fri Jun 30 00:00:00 2023'::timestamp without time zone) AND (_hyper_43_79_chunk."timestamp" < 'Thu Jul 06 00:00:00 2023'::timestamp without time zone))
Batch Sorted Merge: true
-> Sort
Output: compress_hyper_44_82_chunk."timestamp", compress_hyper_44_82_chunk.attr_id, compress_hyper_44_82_chunk.number_val, compress_hyper_44_82_chunk._ts_meta_count, compress_hyper_44_82_chunk._ts_meta_sequence_num, compress_hyper_44_82_chunk._ts_meta_min_1, compress_hyper_44_82_chunk._ts_meta_max_1
Sort Key: compress_hyper_44_82_chunk._ts_meta_max_1 DESC
-> Index Scan using compress_hyper_44_82_chunk_attr_id__ts_meta_sequence_num_idx on _timescaledb_internal.compress_hyper_44_82_chunk
Output: compress_hyper_44_82_chunk."timestamp", compress_hyper_44_82_chunk.attr_id, compress_hyper_44_82_chunk.number_val, compress_hyper_44_82_chunk._ts_meta_count, compress_hyper_44_82_chunk._ts_meta_sequence_num, compress_hyper_44_82_chunk._ts_meta_min_1, compress_hyper_44_82_chunk._ts_meta_max_1
Index Cond: (compress_hyper_44_82_chunk.attr_id = "*VALUES*".column1)
Filter: ((compress_hyper_44_82_chunk._ts_meta_max_1 > 'Fri Jun 30 00:00:00 2023'::timestamp without time zone) AND (compress_hyper_44_82_chunk._ts_meta_min_1 < 'Thu Jul 06 00:00:00 2023'::timestamp without time zone))
(44 rows)
SELECT * FROM ( VALUES(1),(2),(3),(4),(5),(6),(7),(8),(9),(10) ) AS attr_ids(attr_id)
@ -2180,9 +2178,9 @@ SET work_mem = '16MB';
SELECT compress_chunk(ch) FROM show_chunks('sensor_data_compressed') ch LIMIT 3;
compress_chunk
------------------------------------------
_timescaledb_internal._hyper_45_85_chunk
_timescaledb_internal._hyper_45_86_chunk
_timescaledb_internal._hyper_45_87_chunk
_timescaledb_internal._hyper_45_88_chunk
_timescaledb_internal._hyper_45_89_chunk
(3 rows)
ANALYZE sensor_data_compressed;
@ -2208,14 +2206,32 @@ SELECT * FROM sensor_data_compressed ORDER BY time DESC LIMIT 5;
Order: sensor_data_compressed."time" DESC
Startup Exclusion: false
Runtime Exclusion: false
-> Index Scan using _hyper_45_91_chunk_sensor_data_compressed_time_idx on _timescaledb_internal._hyper_45_91_chunk (actual rows=2 loops=1)
-> Index Scan using _hyper_45_93_chunk_sensor_data_compressed_time_idx on _timescaledb_internal._hyper_45_93_chunk (actual rows=2 loops=1)
Output: _hyper_45_93_chunk."time", _hyper_45_93_chunk.sensor_id, _hyper_45_93_chunk.cpu, _hyper_45_93_chunk.temperature
-> Index Scan using _hyper_45_92_chunk_sensor_data_compressed_time_idx on _timescaledb_internal._hyper_45_92_chunk (actual rows=2 loops=1)
Output: _hyper_45_92_chunk."time", _hyper_45_92_chunk.sensor_id, _hyper_45_92_chunk.cpu, _hyper_45_92_chunk.temperature
-> Index Scan using _hyper_45_91_chunk_sensor_data_compressed_time_idx on _timescaledb_internal._hyper_45_91_chunk (actual rows=1 loops=1)
Output: _hyper_45_91_chunk."time", _hyper_45_91_chunk.sensor_id, _hyper_45_91_chunk.cpu, _hyper_45_91_chunk.temperature
-> Index Scan using _hyper_45_90_chunk_sensor_data_compressed_time_idx on _timescaledb_internal._hyper_45_90_chunk (actual rows=2 loops=1)
-> Index Scan using _hyper_45_90_chunk_sensor_data_compressed_time_idx on _timescaledb_internal._hyper_45_90_chunk (never executed)
Output: _hyper_45_90_chunk."time", _hyper_45_90_chunk.sensor_id, _hyper_45_90_chunk.cpu, _hyper_45_90_chunk.temperature
-> Index Scan using _hyper_45_89_chunk_sensor_data_compressed_time_idx on _timescaledb_internal._hyper_45_89_chunk (actual rows=1 loops=1)
-> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_45_89_chunk (never executed)
Output: _hyper_45_89_chunk."time", _hyper_45_89_chunk.sensor_id, _hyper_45_89_chunk.cpu, _hyper_45_89_chunk.temperature
-> Index Scan using _hyper_45_88_chunk_sensor_data_compressed_time_idx on _timescaledb_internal._hyper_45_88_chunk (never executed)
Batch Sorted Merge: true
Bulk Decompression: false
-> Sort (never executed)
Output: compress_hyper_46_96_chunk."time", compress_hyper_46_96_chunk.sensor_id, compress_hyper_46_96_chunk.cpu, compress_hyper_46_96_chunk.temperature, compress_hyper_46_96_chunk._ts_meta_count, compress_hyper_46_96_chunk._ts_meta_sequence_num, compress_hyper_46_96_chunk._ts_meta_min_1, compress_hyper_46_96_chunk._ts_meta_max_1
Sort Key: compress_hyper_46_96_chunk._ts_meta_max_1 DESC
-> Seq Scan on _timescaledb_internal.compress_hyper_46_96_chunk (never executed)
Output: compress_hyper_46_96_chunk."time", compress_hyper_46_96_chunk.sensor_id, compress_hyper_46_96_chunk.cpu, compress_hyper_46_96_chunk.temperature, compress_hyper_46_96_chunk._ts_meta_count, compress_hyper_46_96_chunk._ts_meta_sequence_num, compress_hyper_46_96_chunk._ts_meta_min_1, compress_hyper_46_96_chunk._ts_meta_max_1
-> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_45_88_chunk (never executed)
Output: _hyper_45_88_chunk."time", _hyper_45_88_chunk.sensor_id, _hyper_45_88_chunk.cpu, _hyper_45_88_chunk.temperature
Batch Sorted Merge: true
Bulk Decompression: false
-> Sort (never executed)
Output: compress_hyper_46_95_chunk."time", compress_hyper_46_95_chunk.sensor_id, compress_hyper_46_95_chunk.cpu, compress_hyper_46_95_chunk.temperature, compress_hyper_46_95_chunk._ts_meta_count, compress_hyper_46_95_chunk._ts_meta_sequence_num, compress_hyper_46_95_chunk._ts_meta_min_1, compress_hyper_46_95_chunk._ts_meta_max_1
Sort Key: compress_hyper_46_95_chunk._ts_meta_max_1 DESC
-> Seq Scan on _timescaledb_internal.compress_hyper_46_95_chunk (never executed)
Output: compress_hyper_46_95_chunk."time", compress_hyper_46_95_chunk.sensor_id, compress_hyper_46_95_chunk.cpu, compress_hyper_46_95_chunk.temperature, compress_hyper_46_95_chunk._ts_meta_count, compress_hyper_46_95_chunk._ts_meta_sequence_num, compress_hyper_46_95_chunk._ts_meta_min_1, compress_hyper_46_95_chunk._ts_meta_max_1
-> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_45_87_chunk (never executed)
Output: _hyper_45_87_chunk."time", _hyper_45_87_chunk.sensor_id, _hyper_45_87_chunk.cpu, _hyper_45_87_chunk.temperature
Batch Sorted Merge: true
@ -2225,24 +2241,6 @@ SELECT * FROM sensor_data_compressed ORDER BY time DESC LIMIT 5;
Sort Key: compress_hyper_46_94_chunk._ts_meta_max_1 DESC
-> Seq Scan on _timescaledb_internal.compress_hyper_46_94_chunk (never executed)
Output: compress_hyper_46_94_chunk."time", compress_hyper_46_94_chunk.sensor_id, compress_hyper_46_94_chunk.cpu, compress_hyper_46_94_chunk.temperature, compress_hyper_46_94_chunk._ts_meta_count, compress_hyper_46_94_chunk._ts_meta_sequence_num, compress_hyper_46_94_chunk._ts_meta_min_1, compress_hyper_46_94_chunk._ts_meta_max_1
-> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_45_86_chunk (never executed)
Output: _hyper_45_86_chunk."time", _hyper_45_86_chunk.sensor_id, _hyper_45_86_chunk.cpu, _hyper_45_86_chunk.temperature
Batch Sorted Merge: true
Bulk Decompression: false
-> Sort (never executed)
Output: compress_hyper_46_93_chunk."time", compress_hyper_46_93_chunk.sensor_id, compress_hyper_46_93_chunk.cpu, compress_hyper_46_93_chunk.temperature, compress_hyper_46_93_chunk._ts_meta_count, compress_hyper_46_93_chunk._ts_meta_sequence_num, compress_hyper_46_93_chunk._ts_meta_min_1, compress_hyper_46_93_chunk._ts_meta_max_1
Sort Key: compress_hyper_46_93_chunk._ts_meta_max_1 DESC
-> Seq Scan on _timescaledb_internal.compress_hyper_46_93_chunk (never executed)
Output: compress_hyper_46_93_chunk."time", compress_hyper_46_93_chunk.sensor_id, compress_hyper_46_93_chunk.cpu, compress_hyper_46_93_chunk.temperature, compress_hyper_46_93_chunk._ts_meta_count, compress_hyper_46_93_chunk._ts_meta_sequence_num, compress_hyper_46_93_chunk._ts_meta_min_1, compress_hyper_46_93_chunk._ts_meta_max_1
-> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_45_85_chunk (never executed)
Output: _hyper_45_85_chunk."time", _hyper_45_85_chunk.sensor_id, _hyper_45_85_chunk.cpu, _hyper_45_85_chunk.temperature
Batch Sorted Merge: true
Bulk Decompression: false
-> Sort (never executed)
Output: compress_hyper_46_92_chunk."time", compress_hyper_46_92_chunk.sensor_id, compress_hyper_46_92_chunk.cpu, compress_hyper_46_92_chunk.temperature, compress_hyper_46_92_chunk._ts_meta_count, compress_hyper_46_92_chunk._ts_meta_sequence_num, compress_hyper_46_92_chunk._ts_meta_min_1, compress_hyper_46_92_chunk._ts_meta_max_1
Sort Key: compress_hyper_46_92_chunk._ts_meta_max_1 DESC
-> Seq Scan on _timescaledb_internal.compress_hyper_46_92_chunk (never executed)
Output: compress_hyper_46_92_chunk."time", compress_hyper_46_92_chunk.sensor_id, compress_hyper_46_92_chunk.cpu, compress_hyper_46_92_chunk.temperature, compress_hyper_46_92_chunk._ts_meta_count, compress_hyper_46_92_chunk._ts_meta_sequence_num, compress_hyper_46_92_chunk._ts_meta_min_1, compress_hyper_46_92_chunk._ts_meta_max_1
(42 rows)
-- Only the first chunks should be accessed (batch sorted merge is disabled)
@ -2258,14 +2256,30 @@ SELECT * FROM sensor_data_compressed ORDER BY time DESC LIMIT 5;
Order: sensor_data_compressed."time" DESC
Startup Exclusion: false
Runtime Exclusion: false
-> Index Scan using _hyper_45_91_chunk_sensor_data_compressed_time_idx on _timescaledb_internal._hyper_45_91_chunk (actual rows=2 loops=1)
-> Index Scan using _hyper_45_93_chunk_sensor_data_compressed_time_idx on _timescaledb_internal._hyper_45_93_chunk (actual rows=2 loops=1)
Output: _hyper_45_93_chunk."time", _hyper_45_93_chunk.sensor_id, _hyper_45_93_chunk.cpu, _hyper_45_93_chunk.temperature
-> Index Scan using _hyper_45_92_chunk_sensor_data_compressed_time_idx on _timescaledb_internal._hyper_45_92_chunk (actual rows=2 loops=1)
Output: _hyper_45_92_chunk."time", _hyper_45_92_chunk.sensor_id, _hyper_45_92_chunk.cpu, _hyper_45_92_chunk.temperature
-> Index Scan using _hyper_45_91_chunk_sensor_data_compressed_time_idx on _timescaledb_internal._hyper_45_91_chunk (actual rows=1 loops=1)
Output: _hyper_45_91_chunk."time", _hyper_45_91_chunk.sensor_id, _hyper_45_91_chunk.cpu, _hyper_45_91_chunk.temperature
-> Index Scan using _hyper_45_90_chunk_sensor_data_compressed_time_idx on _timescaledb_internal._hyper_45_90_chunk (actual rows=2 loops=1)
-> Index Scan using _hyper_45_90_chunk_sensor_data_compressed_time_idx on _timescaledb_internal._hyper_45_90_chunk (never executed)
Output: _hyper_45_90_chunk."time", _hyper_45_90_chunk.sensor_id, _hyper_45_90_chunk.cpu, _hyper_45_90_chunk.temperature
-> Index Scan using _hyper_45_89_chunk_sensor_data_compressed_time_idx on _timescaledb_internal._hyper_45_89_chunk (actual rows=1 loops=1)
-> Sort (never executed)
Output: _hyper_45_89_chunk."time", _hyper_45_89_chunk.sensor_id, _hyper_45_89_chunk.cpu, _hyper_45_89_chunk.temperature
-> Index Scan using _hyper_45_88_chunk_sensor_data_compressed_time_idx on _timescaledb_internal._hyper_45_88_chunk (never executed)
Sort Key: _hyper_45_89_chunk."time" DESC
-> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_45_89_chunk (never executed)
Output: _hyper_45_89_chunk."time", _hyper_45_89_chunk.sensor_id, _hyper_45_89_chunk.cpu, _hyper_45_89_chunk.temperature
Bulk Decompression: true
-> Seq Scan on _timescaledb_internal.compress_hyper_46_96_chunk (never executed)
Output: compress_hyper_46_96_chunk."time", compress_hyper_46_96_chunk.sensor_id, compress_hyper_46_96_chunk.cpu, compress_hyper_46_96_chunk.temperature, compress_hyper_46_96_chunk._ts_meta_count, compress_hyper_46_96_chunk._ts_meta_sequence_num, compress_hyper_46_96_chunk._ts_meta_min_1, compress_hyper_46_96_chunk._ts_meta_max_1
-> Sort (never executed)
Output: _hyper_45_88_chunk."time", _hyper_45_88_chunk.sensor_id, _hyper_45_88_chunk.cpu, _hyper_45_88_chunk.temperature
Sort Key: _hyper_45_88_chunk."time" DESC
-> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_45_88_chunk (never executed)
Output: _hyper_45_88_chunk."time", _hyper_45_88_chunk.sensor_id, _hyper_45_88_chunk.cpu, _hyper_45_88_chunk.temperature
Bulk Decompression: true
-> Seq Scan on _timescaledb_internal.compress_hyper_46_95_chunk (never executed)
Output: compress_hyper_46_95_chunk."time", compress_hyper_46_95_chunk.sensor_id, compress_hyper_46_95_chunk.cpu, compress_hyper_46_95_chunk.temperature, compress_hyper_46_95_chunk._ts_meta_count, compress_hyper_46_95_chunk._ts_meta_sequence_num, compress_hyper_46_95_chunk._ts_meta_min_1, compress_hyper_46_95_chunk._ts_meta_max_1
-> Sort (never executed)
Output: _hyper_45_87_chunk."time", _hyper_45_87_chunk.sensor_id, _hyper_45_87_chunk.cpu, _hyper_45_87_chunk.temperature
Sort Key: _hyper_45_87_chunk."time" DESC
@ -2274,39 +2288,23 @@ SELECT * FROM sensor_data_compressed ORDER BY time DESC LIMIT 5;
Bulk Decompression: true
-> Seq Scan on _timescaledb_internal.compress_hyper_46_94_chunk (never executed)
Output: compress_hyper_46_94_chunk."time", compress_hyper_46_94_chunk.sensor_id, compress_hyper_46_94_chunk.cpu, compress_hyper_46_94_chunk.temperature, compress_hyper_46_94_chunk._ts_meta_count, compress_hyper_46_94_chunk._ts_meta_sequence_num, compress_hyper_46_94_chunk._ts_meta_min_1, compress_hyper_46_94_chunk._ts_meta_max_1
-> Sort (never executed)
Output: _hyper_45_86_chunk."time", _hyper_45_86_chunk.sensor_id, _hyper_45_86_chunk.cpu, _hyper_45_86_chunk.temperature
Sort Key: _hyper_45_86_chunk."time" DESC
-> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_45_86_chunk (never executed)
Output: _hyper_45_86_chunk."time", _hyper_45_86_chunk.sensor_id, _hyper_45_86_chunk.cpu, _hyper_45_86_chunk.temperature
Bulk Decompression: true
-> Seq Scan on _timescaledb_internal.compress_hyper_46_93_chunk (never executed)
Output: compress_hyper_46_93_chunk."time", compress_hyper_46_93_chunk.sensor_id, compress_hyper_46_93_chunk.cpu, compress_hyper_46_93_chunk.temperature, compress_hyper_46_93_chunk._ts_meta_count, compress_hyper_46_93_chunk._ts_meta_sequence_num, compress_hyper_46_93_chunk._ts_meta_min_1, compress_hyper_46_93_chunk._ts_meta_max_1
-> Sort (never executed)
Output: _hyper_45_85_chunk."time", _hyper_45_85_chunk.sensor_id, _hyper_45_85_chunk.cpu, _hyper_45_85_chunk.temperature
Sort Key: _hyper_45_85_chunk."time" DESC
-> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_45_85_chunk (never executed)
Output: _hyper_45_85_chunk."time", _hyper_45_85_chunk.sensor_id, _hyper_45_85_chunk.cpu, _hyper_45_85_chunk.temperature
Bulk Decompression: true
-> Seq Scan on _timescaledb_internal.compress_hyper_46_92_chunk (never executed)
Output: compress_hyper_46_92_chunk."time", compress_hyper_46_92_chunk.sensor_id, compress_hyper_46_92_chunk.cpu, compress_hyper_46_92_chunk.temperature, compress_hyper_46_92_chunk._ts_meta_count, compress_hyper_46_92_chunk._ts_meta_sequence_num, compress_hyper_46_92_chunk._ts_meta_min_1, compress_hyper_46_92_chunk._ts_meta_max_1
(39 rows)
RESET timescaledb.enable_decompression_sorted_merge;
-- Compress the remaining chunks
SELECT compress_chunk(ch, if_not_compressed => true) FROM show_chunks('sensor_data_compressed') ch;
NOTICE: chunk "_hyper_45_85_chunk" is already compressed
NOTICE: chunk "_hyper_45_86_chunk" is already compressed
NOTICE: chunk "_hyper_45_87_chunk" is already compressed
NOTICE: chunk "_hyper_45_88_chunk" is already compressed
NOTICE: chunk "_hyper_45_89_chunk" is already compressed
compress_chunk
------------------------------------------
_timescaledb_internal._hyper_45_85_chunk
_timescaledb_internal._hyper_45_86_chunk
_timescaledb_internal._hyper_45_87_chunk
_timescaledb_internal._hyper_45_88_chunk
_timescaledb_internal._hyper_45_89_chunk
_timescaledb_internal._hyper_45_90_chunk
_timescaledb_internal._hyper_45_91_chunk
_timescaledb_internal._hyper_45_92_chunk
_timescaledb_internal._hyper_45_93_chunk
(7 rows)
SELECT * FROM sensor_data_compressed ORDER BY time DESC LIMIT 5;
@ -2322,8 +2320,8 @@ SELECT * FROM sensor_data_compressed ORDER BY time DESC LIMIT 5;
-- Only the first chunks should be accessed (batch sorted merge is enabled)
:PREFIX
SELECT * FROM sensor_data_compressed ORDER BY time DESC LIMIT 5;
QUERY PLAN
----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
QUERY PLAN
------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
Limit (actual rows=5 loops=1)
Output: sensor_data_compressed."time", sensor_data_compressed.sensor_id, sensor_data_compressed.cpu, sensor_data_compressed.temperature
-> Custom Scan (ChunkAppend) on public.sensor_data_compressed (actual rows=5 loops=1)
@ -2331,7 +2329,27 @@ SELECT * FROM sensor_data_compressed ORDER BY time DESC LIMIT 5;
Order: sensor_data_compressed."time" DESC
Startup Exclusion: false
Runtime Exclusion: false
-> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_45_91_chunk (actual rows=2 loops=1)
-> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_45_93_chunk (actual rows=2 loops=1)
Output: _hyper_45_93_chunk."time", _hyper_45_93_chunk.sensor_id, _hyper_45_93_chunk.cpu, _hyper_45_93_chunk.temperature
Batch Sorted Merge: true
Bulk Decompression: false
-> Sort (actual rows=2 loops=1)
Output: compress_hyper_46_100_chunk."time", compress_hyper_46_100_chunk.sensor_id, compress_hyper_46_100_chunk.cpu, compress_hyper_46_100_chunk.temperature, compress_hyper_46_100_chunk._ts_meta_count, compress_hyper_46_100_chunk._ts_meta_sequence_num, compress_hyper_46_100_chunk._ts_meta_min_1, compress_hyper_46_100_chunk._ts_meta_max_1
Sort Key: compress_hyper_46_100_chunk._ts_meta_max_1 DESC
Sort Method: quicksort
-> Seq Scan on _timescaledb_internal.compress_hyper_46_100_chunk (actual rows=2 loops=1)
Output: compress_hyper_46_100_chunk."time", compress_hyper_46_100_chunk.sensor_id, compress_hyper_46_100_chunk.cpu, compress_hyper_46_100_chunk.temperature, compress_hyper_46_100_chunk._ts_meta_count, compress_hyper_46_100_chunk._ts_meta_sequence_num, compress_hyper_46_100_chunk._ts_meta_min_1, compress_hyper_46_100_chunk._ts_meta_max_1
-> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_45_92_chunk (actual rows=2 loops=1)
Output: _hyper_45_92_chunk."time", _hyper_45_92_chunk.sensor_id, _hyper_45_92_chunk.cpu, _hyper_45_92_chunk.temperature
Batch Sorted Merge: true
Bulk Decompression: false
-> Sort (actual rows=2 loops=1)
Output: compress_hyper_46_99_chunk."time", compress_hyper_46_99_chunk.sensor_id, compress_hyper_46_99_chunk.cpu, compress_hyper_46_99_chunk.temperature, compress_hyper_46_99_chunk._ts_meta_count, compress_hyper_46_99_chunk._ts_meta_sequence_num, compress_hyper_46_99_chunk._ts_meta_min_1, compress_hyper_46_99_chunk._ts_meta_max_1
Sort Key: compress_hyper_46_99_chunk._ts_meta_max_1 DESC
Sort Method: quicksort
-> Seq Scan on _timescaledb_internal.compress_hyper_46_99_chunk (actual rows=2 loops=1)
Output: compress_hyper_46_99_chunk."time", compress_hyper_46_99_chunk.sensor_id, compress_hyper_46_99_chunk.cpu, compress_hyper_46_99_chunk.temperature, compress_hyper_46_99_chunk._ts_meta_count, compress_hyper_46_99_chunk._ts_meta_sequence_num, compress_hyper_46_99_chunk._ts_meta_min_1, compress_hyper_46_99_chunk._ts_meta_max_1
-> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_45_91_chunk (actual rows=1 loops=1)
Output: _hyper_45_91_chunk."time", _hyper_45_91_chunk.sensor_id, _hyper_45_91_chunk.cpu, _hyper_45_91_chunk.temperature
Batch Sorted Merge: true
Bulk Decompression: false
@ -2341,25 +2359,23 @@ SELECT * FROM sensor_data_compressed ORDER BY time DESC LIMIT 5;
Sort Method: quicksort
-> Seq Scan on _timescaledb_internal.compress_hyper_46_98_chunk (actual rows=2 loops=1)
Output: compress_hyper_46_98_chunk."time", compress_hyper_46_98_chunk.sensor_id, compress_hyper_46_98_chunk.cpu, compress_hyper_46_98_chunk.temperature, compress_hyper_46_98_chunk._ts_meta_count, compress_hyper_46_98_chunk._ts_meta_sequence_num, compress_hyper_46_98_chunk._ts_meta_min_1, compress_hyper_46_98_chunk._ts_meta_max_1
-> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_45_90_chunk (actual rows=2 loops=1)
-> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_45_90_chunk (never executed)
Output: _hyper_45_90_chunk."time", _hyper_45_90_chunk.sensor_id, _hyper_45_90_chunk.cpu, _hyper_45_90_chunk.temperature
Batch Sorted Merge: true
Bulk Decompression: false
-> Sort (actual rows=2 loops=1)
-> Sort (never executed)
Output: compress_hyper_46_97_chunk."time", compress_hyper_46_97_chunk.sensor_id, compress_hyper_46_97_chunk.cpu, compress_hyper_46_97_chunk.temperature, compress_hyper_46_97_chunk._ts_meta_count, compress_hyper_46_97_chunk._ts_meta_sequence_num, compress_hyper_46_97_chunk._ts_meta_min_1, compress_hyper_46_97_chunk._ts_meta_max_1
Sort Key: compress_hyper_46_97_chunk._ts_meta_max_1 DESC
Sort Method: quicksort
-> Seq Scan on _timescaledb_internal.compress_hyper_46_97_chunk (actual rows=2 loops=1)
-> Seq Scan on _timescaledb_internal.compress_hyper_46_97_chunk (never executed)
Output: compress_hyper_46_97_chunk."time", compress_hyper_46_97_chunk.sensor_id, compress_hyper_46_97_chunk.cpu, compress_hyper_46_97_chunk.temperature, compress_hyper_46_97_chunk._ts_meta_count, compress_hyper_46_97_chunk._ts_meta_sequence_num, compress_hyper_46_97_chunk._ts_meta_min_1, compress_hyper_46_97_chunk._ts_meta_max_1
-> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_45_89_chunk (actual rows=1 loops=1)
-> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_45_89_chunk (never executed)
Output: _hyper_45_89_chunk."time", _hyper_45_89_chunk.sensor_id, _hyper_45_89_chunk.cpu, _hyper_45_89_chunk.temperature
Batch Sorted Merge: true
Bulk Decompression: false
-> Sort (actual rows=2 loops=1)
-> Sort (never executed)
Output: compress_hyper_46_96_chunk."time", compress_hyper_46_96_chunk.sensor_id, compress_hyper_46_96_chunk.cpu, compress_hyper_46_96_chunk.temperature, compress_hyper_46_96_chunk._ts_meta_count, compress_hyper_46_96_chunk._ts_meta_sequence_num, compress_hyper_46_96_chunk._ts_meta_min_1, compress_hyper_46_96_chunk._ts_meta_max_1
Sort Key: compress_hyper_46_96_chunk._ts_meta_max_1 DESC
Sort Method: quicksort
-> Seq Scan on _timescaledb_internal.compress_hyper_46_96_chunk (actual rows=2 loops=1)
-> Seq Scan on _timescaledb_internal.compress_hyper_46_96_chunk (never executed)
Output: compress_hyper_46_96_chunk."time", compress_hyper_46_96_chunk.sensor_id, compress_hyper_46_96_chunk.cpu, compress_hyper_46_96_chunk.temperature, compress_hyper_46_96_chunk._ts_meta_count, compress_hyper_46_96_chunk._ts_meta_sequence_num, compress_hyper_46_96_chunk._ts_meta_min_1, compress_hyper_46_96_chunk._ts_meta_max_1
-> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_45_88_chunk (never executed)
Output: _hyper_45_88_chunk."time", _hyper_45_88_chunk.sensor_id, _hyper_45_88_chunk.cpu, _hyper_45_88_chunk.temperature
@ -2379,32 +2395,14 @@ SELECT * FROM sensor_data_compressed ORDER BY time DESC LIMIT 5;
Sort Key: compress_hyper_46_94_chunk._ts_meta_max_1 DESC
-> Seq Scan on _timescaledb_internal.compress_hyper_46_94_chunk (never executed)
Output: compress_hyper_46_94_chunk."time", compress_hyper_46_94_chunk.sensor_id, compress_hyper_46_94_chunk.cpu, compress_hyper_46_94_chunk.temperature, compress_hyper_46_94_chunk._ts_meta_count, compress_hyper_46_94_chunk._ts_meta_sequence_num, compress_hyper_46_94_chunk._ts_meta_min_1, compress_hyper_46_94_chunk._ts_meta_max_1
-> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_45_86_chunk (never executed)
Output: _hyper_45_86_chunk."time", _hyper_45_86_chunk.sensor_id, _hyper_45_86_chunk.cpu, _hyper_45_86_chunk.temperature
Batch Sorted Merge: true
Bulk Decompression: false
-> Sort (never executed)
Output: compress_hyper_46_93_chunk."time", compress_hyper_46_93_chunk.sensor_id, compress_hyper_46_93_chunk.cpu, compress_hyper_46_93_chunk.temperature, compress_hyper_46_93_chunk._ts_meta_count, compress_hyper_46_93_chunk._ts_meta_sequence_num, compress_hyper_46_93_chunk._ts_meta_min_1, compress_hyper_46_93_chunk._ts_meta_max_1
Sort Key: compress_hyper_46_93_chunk._ts_meta_max_1 DESC
-> Seq Scan on _timescaledb_internal.compress_hyper_46_93_chunk (never executed)
Output: compress_hyper_46_93_chunk."time", compress_hyper_46_93_chunk.sensor_id, compress_hyper_46_93_chunk.cpu, compress_hyper_46_93_chunk.temperature, compress_hyper_46_93_chunk._ts_meta_count, compress_hyper_46_93_chunk._ts_meta_sequence_num, compress_hyper_46_93_chunk._ts_meta_min_1, compress_hyper_46_93_chunk._ts_meta_max_1
-> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_45_85_chunk (never executed)
Output: _hyper_45_85_chunk."time", _hyper_45_85_chunk.sensor_id, _hyper_45_85_chunk.cpu, _hyper_45_85_chunk.temperature
Batch Sorted Merge: true
Bulk Decompression: false
-> Sort (never executed)
Output: compress_hyper_46_92_chunk."time", compress_hyper_46_92_chunk.sensor_id, compress_hyper_46_92_chunk.cpu, compress_hyper_46_92_chunk.temperature, compress_hyper_46_92_chunk._ts_meta_count, compress_hyper_46_92_chunk._ts_meta_sequence_num, compress_hyper_46_92_chunk._ts_meta_min_1, compress_hyper_46_92_chunk._ts_meta_max_1
Sort Key: compress_hyper_46_92_chunk._ts_meta_max_1 DESC
-> Seq Scan on _timescaledb_internal.compress_hyper_46_92_chunk (never executed)
Output: compress_hyper_46_92_chunk."time", compress_hyper_46_92_chunk.sensor_id, compress_hyper_46_92_chunk.cpu, compress_hyper_46_92_chunk.temperature, compress_hyper_46_92_chunk._ts_meta_count, compress_hyper_46_92_chunk._ts_meta_sequence_num, compress_hyper_46_92_chunk._ts_meta_min_1, compress_hyper_46_92_chunk._ts_meta_max_1
(73 rows)
-- Only the first chunks should be accessed (batch sorted merge is disabled)
SET timescaledb.enable_decompression_sorted_merge = FALSE;
:PREFIX
SELECT * FROM sensor_data_compressed ORDER BY time DESC LIMIT 5;
QUERY PLAN
----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
QUERY PLAN
------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
Limit (actual rows=5 loops=1)
Output: sensor_data_compressed."time", sensor_data_compressed.sensor_id, sensor_data_compressed.cpu, sensor_data_compressed.temperature
-> Custom Scan (ChunkAppend) on public.sensor_data_compressed (actual rows=5 loops=1)
@ -2413,6 +2411,24 @@ SELECT * FROM sensor_data_compressed ORDER BY time DESC LIMIT 5;
Startup Exclusion: false
Runtime Exclusion: false
-> Sort (actual rows=2 loops=1)
Output: _hyper_45_93_chunk."time", _hyper_45_93_chunk.sensor_id, _hyper_45_93_chunk.cpu, _hyper_45_93_chunk.temperature
Sort Key: _hyper_45_93_chunk."time" DESC
Sort Method: quicksort
-> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_45_93_chunk (actual rows=2 loops=1)
Output: _hyper_45_93_chunk."time", _hyper_45_93_chunk.sensor_id, _hyper_45_93_chunk.cpu, _hyper_45_93_chunk.temperature
Bulk Decompression: true
-> Seq Scan on _timescaledb_internal.compress_hyper_46_100_chunk (actual rows=2 loops=1)
Output: compress_hyper_46_100_chunk."time", compress_hyper_46_100_chunk.sensor_id, compress_hyper_46_100_chunk.cpu, compress_hyper_46_100_chunk.temperature, compress_hyper_46_100_chunk._ts_meta_count, compress_hyper_46_100_chunk._ts_meta_sequence_num, compress_hyper_46_100_chunk._ts_meta_min_1, compress_hyper_46_100_chunk._ts_meta_max_1
-> Sort (actual rows=2 loops=1)
Output: _hyper_45_92_chunk."time", _hyper_45_92_chunk.sensor_id, _hyper_45_92_chunk.cpu, _hyper_45_92_chunk.temperature
Sort Key: _hyper_45_92_chunk."time" DESC
Sort Method: quicksort
-> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_45_92_chunk (actual rows=2 loops=1)
Output: _hyper_45_92_chunk."time", _hyper_45_92_chunk.sensor_id, _hyper_45_92_chunk.cpu, _hyper_45_92_chunk.temperature
Bulk Decompression: true
-> Seq Scan on _timescaledb_internal.compress_hyper_46_99_chunk (actual rows=2 loops=1)
Output: compress_hyper_46_99_chunk."time", compress_hyper_46_99_chunk.sensor_id, compress_hyper_46_99_chunk.cpu, compress_hyper_46_99_chunk.temperature, compress_hyper_46_99_chunk._ts_meta_count, compress_hyper_46_99_chunk._ts_meta_sequence_num, compress_hyper_46_99_chunk._ts_meta_min_1, compress_hyper_46_99_chunk._ts_meta_max_1
-> Sort (actual rows=1 loops=1)
Output: _hyper_45_91_chunk."time", _hyper_45_91_chunk.sensor_id, _hyper_45_91_chunk.cpu, _hyper_45_91_chunk.temperature
Sort Key: _hyper_45_91_chunk."time" DESC
Sort Method: quicksort
@ -2421,23 +2437,21 @@ SELECT * FROM sensor_data_compressed ORDER BY time DESC LIMIT 5;
Bulk Decompression: true
-> Seq Scan on _timescaledb_internal.compress_hyper_46_98_chunk (actual rows=2 loops=1)
Output: compress_hyper_46_98_chunk."time", compress_hyper_46_98_chunk.sensor_id, compress_hyper_46_98_chunk.cpu, compress_hyper_46_98_chunk.temperature, compress_hyper_46_98_chunk._ts_meta_count, compress_hyper_46_98_chunk._ts_meta_sequence_num, compress_hyper_46_98_chunk._ts_meta_min_1, compress_hyper_46_98_chunk._ts_meta_max_1
-> Sort (actual rows=2 loops=1)
-> Sort (never executed)
Output: _hyper_45_90_chunk."time", _hyper_45_90_chunk.sensor_id, _hyper_45_90_chunk.cpu, _hyper_45_90_chunk.temperature
Sort Key: _hyper_45_90_chunk."time" DESC
Sort Method: quicksort
-> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_45_90_chunk (actual rows=2 loops=1)
-> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_45_90_chunk (never executed)
Output: _hyper_45_90_chunk."time", _hyper_45_90_chunk.sensor_id, _hyper_45_90_chunk.cpu, _hyper_45_90_chunk.temperature
Bulk Decompression: true
-> Seq Scan on _timescaledb_internal.compress_hyper_46_97_chunk (actual rows=2 loops=1)
-> Seq Scan on _timescaledb_internal.compress_hyper_46_97_chunk (never executed)
Output: compress_hyper_46_97_chunk."time", compress_hyper_46_97_chunk.sensor_id, compress_hyper_46_97_chunk.cpu, compress_hyper_46_97_chunk.temperature, compress_hyper_46_97_chunk._ts_meta_count, compress_hyper_46_97_chunk._ts_meta_sequence_num, compress_hyper_46_97_chunk._ts_meta_min_1, compress_hyper_46_97_chunk._ts_meta_max_1
-> Sort (actual rows=1 loops=1)
-> Sort (never executed)
Output: _hyper_45_89_chunk."time", _hyper_45_89_chunk.sensor_id, _hyper_45_89_chunk.cpu, _hyper_45_89_chunk.temperature
Sort Key: _hyper_45_89_chunk."time" DESC
Sort Method: quicksort
-> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_45_89_chunk (actual rows=2 loops=1)
-> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_45_89_chunk (never executed)
Output: _hyper_45_89_chunk."time", _hyper_45_89_chunk.sensor_id, _hyper_45_89_chunk.cpu, _hyper_45_89_chunk.temperature
Bulk Decompression: true
-> Seq Scan on _timescaledb_internal.compress_hyper_46_96_chunk (actual rows=2 loops=1)
-> Seq Scan on _timescaledb_internal.compress_hyper_46_96_chunk (never executed)
Output: compress_hyper_46_96_chunk."time", compress_hyper_46_96_chunk.sensor_id, compress_hyper_46_96_chunk.cpu, compress_hyper_46_96_chunk.temperature, compress_hyper_46_96_chunk._ts_meta_count, compress_hyper_46_96_chunk._ts_meta_sequence_num, compress_hyper_46_96_chunk._ts_meta_min_1, compress_hyper_46_96_chunk._ts_meta_max_1
-> Sort (never executed)
Output: _hyper_45_88_chunk."time", _hyper_45_88_chunk.sensor_id, _hyper_45_88_chunk.cpu, _hyper_45_88_chunk.temperature
@ -2455,22 +2469,6 @@ SELECT * FROM sensor_data_compressed ORDER BY time DESC LIMIT 5;
Bulk Decompression: true
-> Seq Scan on _timescaledb_internal.compress_hyper_46_94_chunk (never executed)
Output: compress_hyper_46_94_chunk."time", compress_hyper_46_94_chunk.sensor_id, compress_hyper_46_94_chunk.cpu, compress_hyper_46_94_chunk.temperature, compress_hyper_46_94_chunk._ts_meta_count, compress_hyper_46_94_chunk._ts_meta_sequence_num, compress_hyper_46_94_chunk._ts_meta_min_1, compress_hyper_46_94_chunk._ts_meta_max_1
-> Sort (never executed)
Output: _hyper_45_86_chunk."time", _hyper_45_86_chunk.sensor_id, _hyper_45_86_chunk.cpu, _hyper_45_86_chunk.temperature
Sort Key: _hyper_45_86_chunk."time" DESC
-> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_45_86_chunk (never executed)
Output: _hyper_45_86_chunk."time", _hyper_45_86_chunk.sensor_id, _hyper_45_86_chunk.cpu, _hyper_45_86_chunk.temperature
Bulk Decompression: true
-> Seq Scan on _timescaledb_internal.compress_hyper_46_93_chunk (never executed)
Output: compress_hyper_46_93_chunk."time", compress_hyper_46_93_chunk.sensor_id, compress_hyper_46_93_chunk.cpu, compress_hyper_46_93_chunk.temperature, compress_hyper_46_93_chunk._ts_meta_count, compress_hyper_46_93_chunk._ts_meta_sequence_num, compress_hyper_46_93_chunk._ts_meta_min_1, compress_hyper_46_93_chunk._ts_meta_max_1
-> Sort (never executed)
Output: _hyper_45_85_chunk."time", _hyper_45_85_chunk.sensor_id, _hyper_45_85_chunk.cpu, _hyper_45_85_chunk.temperature
Sort Key: _hyper_45_85_chunk."time" DESC
-> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_45_85_chunk (never executed)
Output: _hyper_45_85_chunk."time", _hyper_45_85_chunk.sensor_id, _hyper_45_85_chunk.cpu, _hyper_45_85_chunk.temperature
Bulk Decompression: true
-> Seq Scan on _timescaledb_internal.compress_hyper_46_92_chunk (never executed)
Output: compress_hyper_46_92_chunk."time", compress_hyper_46_92_chunk.sensor_id, compress_hyper_46_92_chunk.cpu, compress_hyper_46_92_chunk.temperature, compress_hyper_46_92_chunk._ts_meta_count, compress_hyper_46_92_chunk._ts_meta_sequence_num, compress_hyper_46_92_chunk._ts_meta_min_1, compress_hyper_46_92_chunk._ts_meta_max_1
(66 rows)
RESET timescaledb.enable_decompression_sorted_merge;
@ -2480,8 +2478,8 @@ INSERT INTO sensor_data_compressed (time, sensor_id, cpu, temperature)
-- Only the first chunks should be accessed (batch sorted merge is enabled)
:PREFIX
SELECT * FROM sensor_data_compressed ORDER BY time DESC LIMIT 5;
QUERY PLAN
----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
QUERY PLAN
------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
Limit (actual rows=5 loops=1)
Output: sensor_data_compressed."time", sensor_data_compressed.sensor_id, sensor_data_compressed.cpu, sensor_data_compressed.temperature
-> Custom Scan (ChunkAppend) on public.sensor_data_compressed (actual rows=5 loops=1)
@ -2489,7 +2487,27 @@ SELECT * FROM sensor_data_compressed ORDER BY time DESC LIMIT 5;
Order: sensor_data_compressed."time" DESC
Startup Exclusion: false
Runtime Exclusion: false
-> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_45_91_chunk (actual rows=2 loops=1)
-> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_45_93_chunk (actual rows=2 loops=1)
Output: _hyper_45_93_chunk."time", _hyper_45_93_chunk.sensor_id, _hyper_45_93_chunk.cpu, _hyper_45_93_chunk.temperature
Batch Sorted Merge: true
Bulk Decompression: false
-> Sort (actual rows=2 loops=1)
Output: compress_hyper_46_100_chunk."time", compress_hyper_46_100_chunk.sensor_id, compress_hyper_46_100_chunk.cpu, compress_hyper_46_100_chunk.temperature, compress_hyper_46_100_chunk._ts_meta_count, compress_hyper_46_100_chunk._ts_meta_sequence_num, compress_hyper_46_100_chunk._ts_meta_min_1, compress_hyper_46_100_chunk._ts_meta_max_1
Sort Key: compress_hyper_46_100_chunk._ts_meta_max_1 DESC
Sort Method: quicksort
-> Seq Scan on _timescaledb_internal.compress_hyper_46_100_chunk (actual rows=2 loops=1)
Output: compress_hyper_46_100_chunk."time", compress_hyper_46_100_chunk.sensor_id, compress_hyper_46_100_chunk.cpu, compress_hyper_46_100_chunk.temperature, compress_hyper_46_100_chunk._ts_meta_count, compress_hyper_46_100_chunk._ts_meta_sequence_num, compress_hyper_46_100_chunk._ts_meta_min_1, compress_hyper_46_100_chunk._ts_meta_max_1
-> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_45_92_chunk (actual rows=2 loops=1)
Output: _hyper_45_92_chunk."time", _hyper_45_92_chunk.sensor_id, _hyper_45_92_chunk.cpu, _hyper_45_92_chunk.temperature
Batch Sorted Merge: true
Bulk Decompression: false
-> Sort (actual rows=2 loops=1)
Output: compress_hyper_46_99_chunk."time", compress_hyper_46_99_chunk.sensor_id, compress_hyper_46_99_chunk.cpu, compress_hyper_46_99_chunk.temperature, compress_hyper_46_99_chunk._ts_meta_count, compress_hyper_46_99_chunk._ts_meta_sequence_num, compress_hyper_46_99_chunk._ts_meta_min_1, compress_hyper_46_99_chunk._ts_meta_max_1
Sort Key: compress_hyper_46_99_chunk._ts_meta_max_1 DESC
Sort Method: quicksort
-> Seq Scan on _timescaledb_internal.compress_hyper_46_99_chunk (actual rows=2 loops=1)
Output: compress_hyper_46_99_chunk."time", compress_hyper_46_99_chunk.sensor_id, compress_hyper_46_99_chunk.cpu, compress_hyper_46_99_chunk.temperature, compress_hyper_46_99_chunk._ts_meta_count, compress_hyper_46_99_chunk._ts_meta_sequence_num, compress_hyper_46_99_chunk._ts_meta_min_1, compress_hyper_46_99_chunk._ts_meta_max_1
-> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_45_91_chunk (actual rows=1 loops=1)
Output: _hyper_45_91_chunk."time", _hyper_45_91_chunk.sensor_id, _hyper_45_91_chunk.cpu, _hyper_45_91_chunk.temperature
Batch Sorted Merge: true
Bulk Decompression: false
@ -2499,25 +2517,23 @@ SELECT * FROM sensor_data_compressed ORDER BY time DESC LIMIT 5;
Sort Method: quicksort
-> Seq Scan on _timescaledb_internal.compress_hyper_46_98_chunk (actual rows=2 loops=1)
Output: compress_hyper_46_98_chunk."time", compress_hyper_46_98_chunk.sensor_id, compress_hyper_46_98_chunk.cpu, compress_hyper_46_98_chunk.temperature, compress_hyper_46_98_chunk._ts_meta_count, compress_hyper_46_98_chunk._ts_meta_sequence_num, compress_hyper_46_98_chunk._ts_meta_min_1, compress_hyper_46_98_chunk._ts_meta_max_1
-> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_45_90_chunk (actual rows=2 loops=1)
-> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_45_90_chunk (never executed)
Output: _hyper_45_90_chunk."time", _hyper_45_90_chunk.sensor_id, _hyper_45_90_chunk.cpu, _hyper_45_90_chunk.temperature
Batch Sorted Merge: true
Bulk Decompression: false
-> Sort (actual rows=2 loops=1)
-> Sort (never executed)
Output: compress_hyper_46_97_chunk."time", compress_hyper_46_97_chunk.sensor_id, compress_hyper_46_97_chunk.cpu, compress_hyper_46_97_chunk.temperature, compress_hyper_46_97_chunk._ts_meta_count, compress_hyper_46_97_chunk._ts_meta_sequence_num, compress_hyper_46_97_chunk._ts_meta_min_1, compress_hyper_46_97_chunk._ts_meta_max_1
Sort Key: compress_hyper_46_97_chunk._ts_meta_max_1 DESC
Sort Method: quicksort
-> Seq Scan on _timescaledb_internal.compress_hyper_46_97_chunk (actual rows=2 loops=1)
-> Seq Scan on _timescaledb_internal.compress_hyper_46_97_chunk (never executed)
Output: compress_hyper_46_97_chunk."time", compress_hyper_46_97_chunk.sensor_id, compress_hyper_46_97_chunk.cpu, compress_hyper_46_97_chunk.temperature, compress_hyper_46_97_chunk._ts_meta_count, compress_hyper_46_97_chunk._ts_meta_sequence_num, compress_hyper_46_97_chunk._ts_meta_min_1, compress_hyper_46_97_chunk._ts_meta_max_1
-> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_45_89_chunk (actual rows=1 loops=1)
-> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_45_89_chunk (never executed)
Output: _hyper_45_89_chunk."time", _hyper_45_89_chunk.sensor_id, _hyper_45_89_chunk.cpu, _hyper_45_89_chunk.temperature
Batch Sorted Merge: true
Bulk Decompression: false
-> Sort (actual rows=2 loops=1)
-> Sort (never executed)
Output: compress_hyper_46_96_chunk."time", compress_hyper_46_96_chunk.sensor_id, compress_hyper_46_96_chunk.cpu, compress_hyper_46_96_chunk.temperature, compress_hyper_46_96_chunk._ts_meta_count, compress_hyper_46_96_chunk._ts_meta_sequence_num, compress_hyper_46_96_chunk._ts_meta_min_1, compress_hyper_46_96_chunk._ts_meta_max_1
Sort Key: compress_hyper_46_96_chunk._ts_meta_max_1 DESC
Sort Method: quicksort
-> Seq Scan on _timescaledb_internal.compress_hyper_46_96_chunk (actual rows=2 loops=1)
-> Seq Scan on _timescaledb_internal.compress_hyper_46_96_chunk (never executed)
Output: compress_hyper_46_96_chunk."time", compress_hyper_46_96_chunk.sensor_id, compress_hyper_46_96_chunk.cpu, compress_hyper_46_96_chunk.temperature, compress_hyper_46_96_chunk._ts_meta_count, compress_hyper_46_96_chunk._ts_meta_sequence_num, compress_hyper_46_96_chunk._ts_meta_min_1, compress_hyper_46_96_chunk._ts_meta_max_1
-> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_45_88_chunk (never executed)
Output: _hyper_45_88_chunk."time", _hyper_45_88_chunk.sensor_id, _hyper_45_88_chunk.cpu, _hyper_45_88_chunk.temperature
@ -2528,45 +2544,27 @@ SELECT * FROM sensor_data_compressed ORDER BY time DESC LIMIT 5;
Sort Key: compress_hyper_46_95_chunk._ts_meta_max_1 DESC
-> Seq Scan on _timescaledb_internal.compress_hyper_46_95_chunk (never executed)
Output: compress_hyper_46_95_chunk."time", compress_hyper_46_95_chunk.sensor_id, compress_hyper_46_95_chunk.cpu, compress_hyper_46_95_chunk.temperature, compress_hyper_46_95_chunk._ts_meta_count, compress_hyper_46_95_chunk._ts_meta_sequence_num, compress_hyper_46_95_chunk._ts_meta_min_1, compress_hyper_46_95_chunk._ts_meta_max_1
-> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_45_87_chunk (never executed)
Output: _hyper_45_87_chunk."time", _hyper_45_87_chunk.sensor_id, _hyper_45_87_chunk.cpu, _hyper_45_87_chunk.temperature
Batch Sorted Merge: true
Bulk Decompression: false
-> Sort (never executed)
Output: compress_hyper_46_94_chunk."time", compress_hyper_46_94_chunk.sensor_id, compress_hyper_46_94_chunk.cpu, compress_hyper_46_94_chunk.temperature, compress_hyper_46_94_chunk._ts_meta_count, compress_hyper_46_94_chunk._ts_meta_sequence_num, compress_hyper_46_94_chunk._ts_meta_min_1, compress_hyper_46_94_chunk._ts_meta_max_1
Sort Key: compress_hyper_46_94_chunk._ts_meta_max_1 DESC
-> Seq Scan on _timescaledb_internal.compress_hyper_46_94_chunk (never executed)
Output: compress_hyper_46_94_chunk."time", compress_hyper_46_94_chunk.sensor_id, compress_hyper_46_94_chunk.cpu, compress_hyper_46_94_chunk.temperature, compress_hyper_46_94_chunk._ts_meta_count, compress_hyper_46_94_chunk._ts_meta_sequence_num, compress_hyper_46_94_chunk._ts_meta_min_1, compress_hyper_46_94_chunk._ts_meta_max_1
-> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_45_86_chunk (never executed)
Output: _hyper_45_86_chunk."time", _hyper_45_86_chunk.sensor_id, _hyper_45_86_chunk.cpu, _hyper_45_86_chunk.temperature
Batch Sorted Merge: true
Bulk Decompression: false
-> Sort (never executed)
Output: compress_hyper_46_93_chunk."time", compress_hyper_46_93_chunk.sensor_id, compress_hyper_46_93_chunk.cpu, compress_hyper_46_93_chunk.temperature, compress_hyper_46_93_chunk._ts_meta_count, compress_hyper_46_93_chunk._ts_meta_sequence_num, compress_hyper_46_93_chunk._ts_meta_min_1, compress_hyper_46_93_chunk._ts_meta_max_1
Sort Key: compress_hyper_46_93_chunk._ts_meta_max_1 DESC
-> Seq Scan on _timescaledb_internal.compress_hyper_46_93_chunk (never executed)
Output: compress_hyper_46_93_chunk."time", compress_hyper_46_93_chunk.sensor_id, compress_hyper_46_93_chunk.cpu, compress_hyper_46_93_chunk.temperature, compress_hyper_46_93_chunk._ts_meta_count, compress_hyper_46_93_chunk._ts_meta_sequence_num, compress_hyper_46_93_chunk._ts_meta_min_1, compress_hyper_46_93_chunk._ts_meta_max_1
-> Merge Append (never executed)
Sort Key: _hyper_45_85_chunk."time" DESC
-> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_45_85_chunk (never executed)
Output: _hyper_45_85_chunk."time", _hyper_45_85_chunk.sensor_id, _hyper_45_85_chunk.cpu, _hyper_45_85_chunk.temperature
Sort Key: _hyper_45_87_chunk."time" DESC
-> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_45_87_chunk (never executed)
Output: _hyper_45_87_chunk."time", _hyper_45_87_chunk.sensor_id, _hyper_45_87_chunk.cpu, _hyper_45_87_chunk.temperature
Batch Sorted Merge: true
Bulk Decompression: false
-> Sort (never executed)
Output: compress_hyper_46_92_chunk."time", compress_hyper_46_92_chunk.sensor_id, compress_hyper_46_92_chunk.cpu, compress_hyper_46_92_chunk.temperature, compress_hyper_46_92_chunk._ts_meta_count, compress_hyper_46_92_chunk._ts_meta_sequence_num, compress_hyper_46_92_chunk._ts_meta_min_1, compress_hyper_46_92_chunk._ts_meta_max_1
Sort Key: compress_hyper_46_92_chunk._ts_meta_max_1 DESC
-> Seq Scan on _timescaledb_internal.compress_hyper_46_92_chunk (never executed)
Output: compress_hyper_46_92_chunk."time", compress_hyper_46_92_chunk.sensor_id, compress_hyper_46_92_chunk.cpu, compress_hyper_46_92_chunk.temperature, compress_hyper_46_92_chunk._ts_meta_count, compress_hyper_46_92_chunk._ts_meta_sequence_num, compress_hyper_46_92_chunk._ts_meta_min_1, compress_hyper_46_92_chunk._ts_meta_max_1
-> Index Scan using _hyper_45_85_chunk_sensor_data_compressed_time_idx on _timescaledb_internal._hyper_45_85_chunk (never executed)
Output: _hyper_45_85_chunk."time", _hyper_45_85_chunk.sensor_id, _hyper_45_85_chunk.cpu, _hyper_45_85_chunk.temperature
Output: compress_hyper_46_94_chunk."time", compress_hyper_46_94_chunk.sensor_id, compress_hyper_46_94_chunk.cpu, compress_hyper_46_94_chunk.temperature, compress_hyper_46_94_chunk._ts_meta_count, compress_hyper_46_94_chunk._ts_meta_sequence_num, compress_hyper_46_94_chunk._ts_meta_min_1, compress_hyper_46_94_chunk._ts_meta_max_1
Sort Key: compress_hyper_46_94_chunk._ts_meta_max_1 DESC
-> Seq Scan on _timescaledb_internal.compress_hyper_46_94_chunk (never executed)
Output: compress_hyper_46_94_chunk."time", compress_hyper_46_94_chunk.sensor_id, compress_hyper_46_94_chunk.cpu, compress_hyper_46_94_chunk.temperature, compress_hyper_46_94_chunk._ts_meta_count, compress_hyper_46_94_chunk._ts_meta_sequence_num, compress_hyper_46_94_chunk._ts_meta_min_1, compress_hyper_46_94_chunk._ts_meta_max_1
-> Index Scan using _hyper_45_87_chunk_sensor_data_compressed_time_idx on _timescaledb_internal._hyper_45_87_chunk (never executed)
Output: _hyper_45_87_chunk."time", _hyper_45_87_chunk.sensor_id, _hyper_45_87_chunk.cpu, _hyper_45_87_chunk.temperature
(77 rows)
-- Only the first chunks should be accessed (batch sorted merge is disabled)
SET timescaledb.enable_decompression_sorted_merge = FALSE;
:PREFIX
SELECT * FROM sensor_data_compressed ORDER BY time DESC LIMIT 5;
QUERY PLAN
----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
QUERY PLAN
------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
Limit (actual rows=5 loops=1)
Output: sensor_data_compressed."time", sensor_data_compressed.sensor_id, sensor_data_compressed.cpu, sensor_data_compressed.temperature
-> Custom Scan (ChunkAppend) on public.sensor_data_compressed (actual rows=5 loops=1)
@ -2575,6 +2573,24 @@ SELECT * FROM sensor_data_compressed ORDER BY time DESC LIMIT 5;
Startup Exclusion: false
Runtime Exclusion: false
-> Sort (actual rows=2 loops=1)
Output: _hyper_45_93_chunk."time", _hyper_45_93_chunk.sensor_id, _hyper_45_93_chunk.cpu, _hyper_45_93_chunk.temperature
Sort Key: _hyper_45_93_chunk."time" DESC
Sort Method: quicksort
-> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_45_93_chunk (actual rows=2 loops=1)
Output: _hyper_45_93_chunk."time", _hyper_45_93_chunk.sensor_id, _hyper_45_93_chunk.cpu, _hyper_45_93_chunk.temperature
Bulk Decompression: true
-> Seq Scan on _timescaledb_internal.compress_hyper_46_100_chunk (actual rows=2 loops=1)
Output: compress_hyper_46_100_chunk."time", compress_hyper_46_100_chunk.sensor_id, compress_hyper_46_100_chunk.cpu, compress_hyper_46_100_chunk.temperature, compress_hyper_46_100_chunk._ts_meta_count, compress_hyper_46_100_chunk._ts_meta_sequence_num, compress_hyper_46_100_chunk._ts_meta_min_1, compress_hyper_46_100_chunk._ts_meta_max_1
-> Sort (actual rows=2 loops=1)
Output: _hyper_45_92_chunk."time", _hyper_45_92_chunk.sensor_id, _hyper_45_92_chunk.cpu, _hyper_45_92_chunk.temperature
Sort Key: _hyper_45_92_chunk."time" DESC
Sort Method: quicksort
-> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_45_92_chunk (actual rows=2 loops=1)
Output: _hyper_45_92_chunk."time", _hyper_45_92_chunk.sensor_id, _hyper_45_92_chunk.cpu, _hyper_45_92_chunk.temperature
Bulk Decompression: true
-> Seq Scan on _timescaledb_internal.compress_hyper_46_99_chunk (actual rows=2 loops=1)
Output: compress_hyper_46_99_chunk."time", compress_hyper_46_99_chunk.sensor_id, compress_hyper_46_99_chunk.cpu, compress_hyper_46_99_chunk.temperature, compress_hyper_46_99_chunk._ts_meta_count, compress_hyper_46_99_chunk._ts_meta_sequence_num, compress_hyper_46_99_chunk._ts_meta_min_1, compress_hyper_46_99_chunk._ts_meta_max_1
-> Sort (actual rows=1 loops=1)
Output: _hyper_45_91_chunk."time", _hyper_45_91_chunk.sensor_id, _hyper_45_91_chunk.cpu, _hyper_45_91_chunk.temperature
Sort Key: _hyper_45_91_chunk."time" DESC
Sort Method: quicksort
@ -2583,23 +2599,21 @@ SELECT * FROM sensor_data_compressed ORDER BY time DESC LIMIT 5;
Bulk Decompression: true
-> Seq Scan on _timescaledb_internal.compress_hyper_46_98_chunk (actual rows=2 loops=1)
Output: compress_hyper_46_98_chunk."time", compress_hyper_46_98_chunk.sensor_id, compress_hyper_46_98_chunk.cpu, compress_hyper_46_98_chunk.temperature, compress_hyper_46_98_chunk._ts_meta_count, compress_hyper_46_98_chunk._ts_meta_sequence_num, compress_hyper_46_98_chunk._ts_meta_min_1, compress_hyper_46_98_chunk._ts_meta_max_1
-> Sort (actual rows=2 loops=1)
-> Sort (never executed)
Output: _hyper_45_90_chunk."time", _hyper_45_90_chunk.sensor_id, _hyper_45_90_chunk.cpu, _hyper_45_90_chunk.temperature
Sort Key: _hyper_45_90_chunk."time" DESC
Sort Method: quicksort
-> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_45_90_chunk (actual rows=2 loops=1)
-> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_45_90_chunk (never executed)
Output: _hyper_45_90_chunk."time", _hyper_45_90_chunk.sensor_id, _hyper_45_90_chunk.cpu, _hyper_45_90_chunk.temperature
Bulk Decompression: true
-> Seq Scan on _timescaledb_internal.compress_hyper_46_97_chunk (actual rows=2 loops=1)
-> Seq Scan on _timescaledb_internal.compress_hyper_46_97_chunk (never executed)
Output: compress_hyper_46_97_chunk."time", compress_hyper_46_97_chunk.sensor_id, compress_hyper_46_97_chunk.cpu, compress_hyper_46_97_chunk.temperature, compress_hyper_46_97_chunk._ts_meta_count, compress_hyper_46_97_chunk._ts_meta_sequence_num, compress_hyper_46_97_chunk._ts_meta_min_1, compress_hyper_46_97_chunk._ts_meta_max_1
-> Sort (actual rows=1 loops=1)
-> Sort (never executed)
Output: _hyper_45_89_chunk."time", _hyper_45_89_chunk.sensor_id, _hyper_45_89_chunk.cpu, _hyper_45_89_chunk.temperature
Sort Key: _hyper_45_89_chunk."time" DESC
Sort Method: quicksort
-> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_45_89_chunk (actual rows=2 loops=1)
-> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_45_89_chunk (never executed)
Output: _hyper_45_89_chunk."time", _hyper_45_89_chunk.sensor_id, _hyper_45_89_chunk.cpu, _hyper_45_89_chunk.temperature
Bulk Decompression: true
-> Seq Scan on _timescaledb_internal.compress_hyper_46_96_chunk (actual rows=2 loops=1)
-> Seq Scan on _timescaledb_internal.compress_hyper_46_96_chunk (never executed)
Output: compress_hyper_46_96_chunk."time", compress_hyper_46_96_chunk.sensor_id, compress_hyper_46_96_chunk.cpu, compress_hyper_46_96_chunk.temperature, compress_hyper_46_96_chunk._ts_meta_count, compress_hyper_46_96_chunk._ts_meta_sequence_num, compress_hyper_46_96_chunk._ts_meta_min_1, compress_hyper_46_96_chunk._ts_meta_max_1
-> Sort (never executed)
Output: _hyper_45_88_chunk."time", _hyper_45_88_chunk.sensor_id, _hyper_45_88_chunk.cpu, _hyper_45_88_chunk.temperature
@ -2609,34 +2623,18 @@ SELECT * FROM sensor_data_compressed ORDER BY time DESC LIMIT 5;
Bulk Decompression: true
-> Seq Scan on _timescaledb_internal.compress_hyper_46_95_chunk (never executed)
Output: compress_hyper_46_95_chunk."time", compress_hyper_46_95_chunk.sensor_id, compress_hyper_46_95_chunk.cpu, compress_hyper_46_95_chunk.temperature, compress_hyper_46_95_chunk._ts_meta_count, compress_hyper_46_95_chunk._ts_meta_sequence_num, compress_hyper_46_95_chunk._ts_meta_min_1, compress_hyper_46_95_chunk._ts_meta_max_1
-> Sort (never executed)
Output: _hyper_45_87_chunk."time", _hyper_45_87_chunk.sensor_id, _hyper_45_87_chunk.cpu, _hyper_45_87_chunk.temperature
Sort Key: _hyper_45_87_chunk."time" DESC
-> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_45_87_chunk (never executed)
Output: _hyper_45_87_chunk."time", _hyper_45_87_chunk.sensor_id, _hyper_45_87_chunk.cpu, _hyper_45_87_chunk.temperature
Bulk Decompression: true
-> Seq Scan on _timescaledb_internal.compress_hyper_46_94_chunk (never executed)
Output: compress_hyper_46_94_chunk."time", compress_hyper_46_94_chunk.sensor_id, compress_hyper_46_94_chunk.cpu, compress_hyper_46_94_chunk.temperature, compress_hyper_46_94_chunk._ts_meta_count, compress_hyper_46_94_chunk._ts_meta_sequence_num, compress_hyper_46_94_chunk._ts_meta_min_1, compress_hyper_46_94_chunk._ts_meta_max_1
-> Sort (never executed)
Output: _hyper_45_86_chunk."time", _hyper_45_86_chunk.sensor_id, _hyper_45_86_chunk.cpu, _hyper_45_86_chunk.temperature
Sort Key: _hyper_45_86_chunk."time" DESC
-> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_45_86_chunk (never executed)
Output: _hyper_45_86_chunk."time", _hyper_45_86_chunk.sensor_id, _hyper_45_86_chunk.cpu, _hyper_45_86_chunk.temperature
Bulk Decompression: true
-> Seq Scan on _timescaledb_internal.compress_hyper_46_93_chunk (never executed)
Output: compress_hyper_46_93_chunk."time", compress_hyper_46_93_chunk.sensor_id, compress_hyper_46_93_chunk.cpu, compress_hyper_46_93_chunk.temperature, compress_hyper_46_93_chunk._ts_meta_count, compress_hyper_46_93_chunk._ts_meta_sequence_num, compress_hyper_46_93_chunk._ts_meta_min_1, compress_hyper_46_93_chunk._ts_meta_max_1
-> Merge Append (never executed)
Sort Key: _hyper_45_85_chunk."time" DESC
Sort Key: _hyper_45_87_chunk."time" DESC
-> Sort (never executed)
Output: _hyper_45_85_chunk."time", _hyper_45_85_chunk.sensor_id, _hyper_45_85_chunk.cpu, _hyper_45_85_chunk.temperature
Sort Key: _hyper_45_85_chunk."time" DESC
-> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_45_85_chunk (never executed)
Output: _hyper_45_85_chunk."time", _hyper_45_85_chunk.sensor_id, _hyper_45_85_chunk.cpu, _hyper_45_85_chunk.temperature
Output: _hyper_45_87_chunk."time", _hyper_45_87_chunk.sensor_id, _hyper_45_87_chunk.cpu, _hyper_45_87_chunk.temperature
Sort Key: _hyper_45_87_chunk."time" DESC
-> Custom Scan (DecompressChunk) on _timescaledb_internal._hyper_45_87_chunk (never executed)
Output: _hyper_45_87_chunk."time", _hyper_45_87_chunk.sensor_id, _hyper_45_87_chunk.cpu, _hyper_45_87_chunk.temperature
Bulk Decompression: true
-> Seq Scan on _timescaledb_internal.compress_hyper_46_92_chunk (never executed)
Output: compress_hyper_46_92_chunk."time", compress_hyper_46_92_chunk.sensor_id, compress_hyper_46_92_chunk.cpu, compress_hyper_46_92_chunk.temperature, compress_hyper_46_92_chunk._ts_meta_count, compress_hyper_46_92_chunk._ts_meta_sequence_num, compress_hyper_46_92_chunk._ts_meta_min_1, compress_hyper_46_92_chunk._ts_meta_max_1
-> Index Scan using _hyper_45_85_chunk_sensor_data_compressed_time_idx on _timescaledb_internal._hyper_45_85_chunk (never executed)
Output: _hyper_45_85_chunk."time", _hyper_45_85_chunk.sensor_id, _hyper_45_85_chunk.cpu, _hyper_45_85_chunk.temperature
-> Seq Scan on _timescaledb_internal.compress_hyper_46_94_chunk (never executed)
Output: compress_hyper_46_94_chunk."time", compress_hyper_46_94_chunk.sensor_id, compress_hyper_46_94_chunk.cpu, compress_hyper_46_94_chunk.temperature, compress_hyper_46_94_chunk._ts_meta_count, compress_hyper_46_94_chunk._ts_meta_sequence_num, compress_hyper_46_94_chunk._ts_meta_min_1, compress_hyper_46_94_chunk._ts_meta_max_1
-> Index Scan using _hyper_45_87_chunk_sensor_data_compressed_time_idx on _timescaledb_internal._hyper_45_87_chunk (never executed)
Output: _hyper_45_87_chunk."time", _hyper_45_87_chunk.sensor_id, _hyper_45_87_chunk.cpu, _hyper_45_87_chunk.temperature
(70 rows)
RESET timescaledb.enable_decompression_sorted_merge;
@ -2682,49 +2680,49 @@ SELECT show_chunks('compress_chunk_test') AS "CHUNK" \gset
SELECT compress_chunk(:'CHUNK');
compress_chunk
-------------------------------------------
_timescaledb_internal._hyper_47_100_chunk
_timescaledb_internal._hyper_47_102_chunk
(1 row)
-- subsequent calls will be noop
SELECT compress_chunk(:'CHUNK');
NOTICE: chunk "_hyper_47_100_chunk" is already compressed
NOTICE: chunk "_hyper_47_102_chunk" is already compressed
compress_chunk
-------------------------------------------
_timescaledb_internal._hyper_47_100_chunk
_timescaledb_internal._hyper_47_102_chunk
(1 row)
-- unless if_not_compressed is set to false
\set ON_ERROR_STOP 0
SELECT compress_chunk(:'CHUNK', false);
ERROR: chunk "_hyper_47_100_chunk" is already compressed
ERROR: chunk "_hyper_47_102_chunk" is already compressed
\set ON_ERROR_STOP 1
ALTER TABLE compress_chunk_test SET (timescaledb.compress_segmentby='device');
SELECT compressed_chunk_id from _timescaledb_catalog.chunk ch INNER JOIN _timescaledb_catalog.hypertable ht ON ht.id = ch.hypertable_id AND ht.table_name='compress_chunk_test';
compressed_chunk_id
---------------------
101
103
(1 row)
-- changing compression settings will not recompress the chunk by default
SELECT compress_chunk(:'CHUNK');
NOTICE: chunk "_hyper_47_100_chunk" is already compressed
NOTICE: chunk "_hyper_47_102_chunk" is already compressed
compress_chunk
-------------------------------------------
_timescaledb_internal._hyper_47_100_chunk
_timescaledb_internal._hyper_47_102_chunk
(1 row)
-- unless we specify recompress := true
SELECT compress_chunk(:'CHUNK', recompress := true);
compress_chunk
-------------------------------------------
_timescaledb_internal._hyper_47_100_chunk
_timescaledb_internal._hyper_47_102_chunk
(1 row)
-- compressed_chunk_id should be different now
SELECT compressed_chunk_id from _timescaledb_catalog.chunk ch INNER JOIN _timescaledb_catalog.hypertable ht ON ht.id = ch.hypertable_id AND ht.table_name='compress_chunk_test';
compressed_chunk_id
---------------------
102
104
(1 row)
--test partial handling
@ -2733,14 +2731,14 @@ INSERT INTO compress_chunk_test SELECT '2020-01-01', 'c3po', 3.14;
SELECT compress_chunk(:'CHUNK');
compress_chunk
-------------------------------------------
_timescaledb_internal._hyper_47_100_chunk
_timescaledb_internal._hyper_47_102_chunk
(1 row)
-- compressed_chunk_id should not have changed
SELECT compressed_chunk_id from _timescaledb_catalog.chunk ch INNER JOIN _timescaledb_catalog.hypertable ht ON ht.id = ch.hypertable_id AND ht.table_name='compress_chunk_test';
compressed_chunk_id
---------------------
102
104
(1 row)
-- should return no rows
@ -2756,7 +2754,7 @@ SELECT show_chunks('compress_chunk_test') AS "CHUNK2" LIMIT 1 OFFSET 1 \gset
SELECT compress_chunk(:'CHUNK2');
compress_chunk
-------------------------------------------
_timescaledb_internal._hyper_47_103_chunk
_timescaledb_internal._hyper_47_105_chunk
(1 row)
-- make it partial and compress again
@ -2764,7 +2762,7 @@ INSERT INTO compress_chunk_test SELECT '2021-01-01', 'r2d2', 3.14;
SELECT compress_chunk(:'CHUNK2');
compress_chunk
-------------------------------------------
_timescaledb_internal._hyper_47_103_chunk
_timescaledb_internal._hyper_47_105_chunk
(1 row)
-- should return no rows

View File

@ -307,16 +307,6 @@ SELECT COUNT(*) AS dropped_chunks_count
14
(1 row)
-- We need to have some chunks that are marked as dropped, otherwise
-- we will not have a problem below.
SELECT COUNT(*) AS dropped_chunks_count
FROM _timescaledb_catalog.chunk
WHERE dropped = TRUE;
dropped_chunks_count
----------------------
14
(1 row)
SELECT count(*) FROM timescaledb_information.chunks
WHERE hypertable_name = 'conditions' and is_compressed = true;
count

View File

@ -307,16 +307,6 @@ SELECT COUNT(*) AS dropped_chunks_count
14
(1 row)
-- We need to have some chunks that are marked as dropped, otherwise
-- we will not have a problem below.
SELECT COUNT(*) AS dropped_chunks_count
FROM _timescaledb_catalog.chunk
WHERE dropped = TRUE;
dropped_chunks_count
----------------------
14
(1 row)
SELECT count(*) FROM timescaledb_information.chunks
WHERE hypertable_name = 'conditions' and is_compressed = true;
count

View File

@ -307,16 +307,6 @@ SELECT COUNT(*) AS dropped_chunks_count
14
(1 row)
-- We need to have some chunks that are marked as dropped, otherwise
-- we will not have a problem below.
SELECT COUNT(*) AS dropped_chunks_count
FROM _timescaledb_catalog.chunk
WHERE dropped = TRUE;
dropped_chunks_count
----------------------
14
(1 row)
SELECT count(*) FROM timescaledb_information.chunks
WHERE hypertable_name = 'conditions' and is_compressed = true;
count

View File

@ -307,16 +307,6 @@ SELECT COUNT(*) AS dropped_chunks_count
14
(1 row)
-- We need to have some chunks that are marked as dropped, otherwise
-- we will not have a problem below.
SELECT COUNT(*) AS dropped_chunks_count
FROM _timescaledb_catalog.chunk
WHERE dropped = TRUE;
dropped_chunks_count
----------------------
14
(1 row)
SELECT count(*) FROM timescaledb_information.chunks
WHERE hypertable_name = 'conditions' and is_compressed = true;
count

View File

@ -110,6 +110,7 @@ ORDER BY pronamespace::regnamespace::text COLLATE "C", p.oid::regprocedure::text
_timescaledb_functions.recompress_chunk_segmentwise(regclass,boolean)
_timescaledb_functions.relation_approximate_size(regclass)
_timescaledb_functions.relation_size(regclass)
_timescaledb_functions.remove_dropped_chunk_metadata(integer)
_timescaledb_functions.repair_relation_acls()
_timescaledb_functions.restart_background_workers()
_timescaledb_functions.show_chunk(regclass)

View File

@ -378,8 +378,8 @@ SELECT drop_chunks('drop_chunks_table', older_than => (integer_now_test2()-9));
SELECT * FROM drop_chunks_view ORDER BY time_bucket DESC;
--earliest datapoint now in table
SELECT * FROM drop_chunks_table ORDER BY time ASC limit 1;
--we see the chunks row with the dropped flags set;
SELECT id, hypertable_id, schema_name, table_name, compressed_chunk_id, dropped, status, osm_chunk FROM _timescaledb_catalog.chunk where dropped;
--chunks are removed
SELECT id, hypertable_id, schema_name, table_name, compressed_chunk_id, dropped, status, osm_chunk FROM _timescaledb_catalog.chunk WHERE dropped;
--still see data in the view
SELECT * FROM drop_chunks_view WHERE time_bucket < (integer_now_test2()-9) ORDER BY time_bucket DESC;
--no data but covers dropped chunks

View File

@ -310,3 +310,66 @@ SELECT * FROM cagg3;
CREATE MATERIALIZED VIEW cagg4 WITH (timescaledb.continuous,timescaledb.materialized_only=true) AS SELECT time_bucket('1 month', time, 'PST8PDT', "offset":= INTERVAL '15 day') FROM metrics GROUP BY 1;
\set ON_ERROR_STOP 1
--
-- drop chunks tests
--
-- should return 4 chunks
SELECT
c.table_name as chunk_name,
c.status as chunk_status, c.dropped, c.compressed_chunk_id as comp_id
FROM _timescaledb_catalog.hypertable h, _timescaledb_catalog.chunk c
WHERE h.id = c.hypertable_id and h.table_name = 'metrics'
ORDER BY 1;
-- all caggs in the new format (finalized=true)
SELECT user_view_name, finalized FROM _timescaledb_catalog.continuous_agg WHERE user_view_name in ('cagg1', 'cagg2', 'cagg3') ORDER BY 1;
-- dropping chunk should also remove the catalog data
SELECT drop_chunks('metrics', older_than => '2000-01-01 00:00:00-02'::timestamptz);
-- should return 3 chunks
SELECT
c.table_name as chunk_name,
c.status as chunk_status, c.dropped, c.compressed_chunk_id as comp_id
FROM _timescaledb_catalog.hypertable h, _timescaledb_catalog.chunk c
WHERE h.id = c.hypertable_id AND h.table_name = 'metrics'
ORDER BY 1;
-- let's update the catalog to fake an old format cagg (finalized=false)
\c :TEST_DBNAME :ROLE_SUPERUSER
UPDATE _timescaledb_catalog.continuous_agg SET finalized=FALSE WHERE user_view_name = 'cagg1';
\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER
-- cagg1 now is a fake old format (finalized=false)
SELECT user_view_name, finalized FROM _timescaledb_catalog.continuous_agg WHERE user_view_name in ('cagg1', 'cagg2', 'cagg3') ORDER BY 1;
-- cagg1 now is in the old format (finalized=false)
-- dropping chunk should NOT remove the catalog data
SELECT drop_chunks('metrics', older_than => '2000-01-13 00:00:00-02'::timestamptz);
-- should return 3 chunks and one of them should be marked as dropped
SELECT
c.table_name as chunk_name,
c.status as chunk_status, c.dropped, c.compressed_chunk_id as comp_id
FROM _timescaledb_catalog.hypertable h, _timescaledb_catalog.chunk c
WHERE h.id = c.hypertable_id and h.table_name = 'metrics'
ORDER BY 1;
-- remove the fake old format cagg
DROP MATERIALIZED VIEW cagg1;
-- no more old format caggs (finalized=false)
SELECT user_view_name, finalized FROM _timescaledb_catalog.continuous_agg WHERE user_view_name in ('cagg1', 'cagg2', 'cagg3') ORDER BY 1;
-- dropping chunk should remove the catalog data
SELECT drop_chunks('metrics', older_than => '2000-01-25 00:00:00-02'::timestamptz);
-- should return 2 chunks and one of them should be marked as dropped
-- because we dropped chunk before when an old format cagg exists
SELECT
c.table_name as chunk_name,
c.status as chunk_status, c.dropped, c.compressed_chunk_id as comp_id
FROM _timescaledb_catalog.hypertable h, _timescaledb_catalog.chunk c
WHERE h.id = c.hypertable_id and h.table_name = 'metrics'
ORDER BY 1;

View File

@ -193,12 +193,6 @@ ALTER TABLE conditions SET (timescaledb.compress);
SELECT COUNT(*) AS dropped_chunks_count
FROM drop_chunks('conditions', TIMESTAMPTZ '2018-12-15 00:00');
-- We need to have some chunks that are marked as dropped, otherwise
-- we will not have a problem below.
SELECT COUNT(*) AS dropped_chunks_count
FROM _timescaledb_catalog.chunk
WHERE dropped = TRUE;
SELECT count(*) FROM timescaledb_information.chunks
WHERE hypertable_name = 'conditions' and is_compressed = true;

View File

@ -297,6 +297,44 @@ SELECT execute_migration();
ROLLBACK;
\set ON_ERROR_STOP 1
--
-- test dropping chunks
--
-- no chunks marked as dropped
SELECT
c.table_name as chunk_name,
c.dropped
FROM _timescaledb_catalog.hypertable h, _timescaledb_catalog.chunk c
WHERE h.id = c.hypertable_id AND h.table_name = 'conditions' AND c.dropped
ORDER BY 1;
-- drop 1 chunk
\if :IS_TIME_DIMENSION
SELECT drop_chunks('conditions', older_than => CAST('2022-01-08 00:00:00-00' AS :TIME_DIMENSION_DATATYPE));
\else
SELECT drop_chunks('conditions', older_than => 10);
\endif
-- now he have one chunk marked as dropped
SELECT
c.table_name as chunk_name,
c.dropped
FROM _timescaledb_catalog.hypertable h, _timescaledb_catalog.chunk c
WHERE h.id = c.hypertable_id AND h.table_name = 'conditions' AND c.dropped
ORDER BY 1;
-- this migration should remove the chunk metadata marked as dropped
CALL cagg_migrate('conditions_summary_weekly', override => TRUE, drop_old => TRUE);
-- no chunks marked as dropped
SELECT
c.table_name as chunk_name,
c.dropped
FROM _timescaledb_catalog.hypertable h, _timescaledb_catalog.chunk c
WHERE h.id = c.hypertable_id AND h.table_name = 'conditions' AND c.dropped
ORDER BY 1;
-- cleanup
DROP FUNCTION execute_migration();
REVOKE SELECT, INSERT, UPDATE ON TABLE _timescaledb_catalog.continuous_agg_migrate_plan FROM :ROLE_DEFAULT_PERM_USER;