Stabilize more tests (#7066)

* cagg_watermark_concurrent_update is very dependent on the chunk
numbers, and should be ran first.
* telemetry_stats should do VACUUM and REINDEX before getting the
statistics, to avoid dependency on how the index was build
* cagg_migrate_function is missing some orderbys
This commit is contained in:
Alexander Kuzmenkov 2024-06-26 12:28:09 +02:00 committed by GitHub
parent fb14771308
commit 82ab09d8fb
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
15 changed files with 331 additions and 278 deletions

View File

@ -11,7 +11,14 @@ set -u
ISOLATIONTEST=$1
shift
# Note that removing the chunk numbers is not enough. The chunk numbers also
# influence the alignment of the EXPLAIN output, so not only we have to replace
# them, we also have to remove the "----"s and the trailing spaces. The aligned
# output format in isolation tester is hardcoded, we cannot change it. Moreover,
# the chunk numbers influence the names of indexes if they are long enough to be
# truncated, so the only way to get a stable explain output is to run such a test
# in a separate database.
$ISOLATIONTEST "$@" | \
sed -e 's!_[0-9]\{1,\}_[0-9]\{1,\}_chunk!_X_X_chunk!g' | \
sed -e 's!hypertable_[0-9]\{1,\}!hypertable_X!g'
sed -e 's!_[0-9]\{1,\}_[0-9]\{1,\}_chunk!_X_X_chunk!g' \
-e 's!hypertable_[0-9]\{1,\}!hypertable_X!g'

View File

@ -944,7 +944,7 @@ SELECT time_bucket('1 week', time, origin=>'2000-01-01 00:00:00'), sensor, avg(v
(4 rows)
-- Even the CAgg now uses time_bucket, we should see buckets of the same alignment
SELECT * FROM cagg_temp_ng_1week_timestamp;
SELECT * FROM cagg_temp_ng_1week_timestamp ORDER BY time, sensor;
time | sensor | avg
--------------------------+--------+-----
Sat Dec 26 00:00:00 2009 | 1 | 101
@ -1027,7 +1027,7 @@ SELECT time_bucket('1 week', time, origin=>'2000-01-01 01:00:00+00'), sensor, av
(4 rows)
-- Even the CAgg now uses time_bucket, we should see buckets of the same alignment
SELECT * FROM cagg_temp_ng_1week_date;
SELECT * FROM cagg_temp_ng_1week_date ORDER BY time, sensor;
time | sensor | avg
------------+--------+-----
12-26-2009 | 1 | 101

View File

@ -944,7 +944,7 @@ SELECT time_bucket('1 week', time, origin=>'2000-01-01 00:00:00'), sensor, avg(v
(4 rows)
-- Even the CAgg now uses time_bucket, we should see buckets of the same alignment
SELECT * FROM cagg_temp_ng_1week_timestamp;
SELECT * FROM cagg_temp_ng_1week_timestamp ORDER BY time, sensor;
time | sensor | avg
--------------------------+--------+-----
Sat Dec 26 00:00:00 2009 | 1 | 101
@ -1027,7 +1027,7 @@ SELECT time_bucket('1 week', time, origin=>'2000-01-01 01:00:00+00'), sensor, av
(4 rows)
-- Even the CAgg now uses time_bucket, we should see buckets of the same alignment
SELECT * FROM cagg_temp_ng_1week_date;
SELECT * FROM cagg_temp_ng_1week_date ORDER BY time, sensor;
time | sensor | avg
------------+--------+-----
12-26-2009 | 1 | 101

View File

@ -944,7 +944,7 @@ SELECT time_bucket('1 week', time, origin=>'2000-01-01 00:00:00'), sensor, avg(v
(4 rows)
-- Even the CAgg now uses time_bucket, we should see buckets of the same alignment
SELECT * FROM cagg_temp_ng_1week_timestamp;
SELECT * FROM cagg_temp_ng_1week_timestamp ORDER BY time, sensor;
time | sensor | avg
--------------------------+--------+-----
Sat Dec 26 00:00:00 2009 | 1 | 101
@ -1027,7 +1027,7 @@ SELECT time_bucket('1 week', time, origin=>'2000-01-01 01:00:00+00'), sensor, av
(4 rows)
-- Even the CAgg now uses time_bucket, we should see buckets of the same alignment
SELECT * FROM cagg_temp_ng_1week_date;
SELECT * FROM cagg_temp_ng_1week_date ORDER BY time, sensor;
time | sensor | avg
------------+--------+-----
12-26-2009 | 1 | 101

View File

@ -161,8 +161,13 @@ SELECT * FROM normal;
INSERT INTO part
SELECT * FROM normal;
CALL refresh_continuous_aggregate('contagg', NULL, NULL);
-- Reindex to avoid the dependency on the way the index is built (e.g. the caggs
-- might get their rows inserted in different order during the refresh based on
-- the underlying aggregation plan, and the index will be built differently,
-- which can influence its size).
REINDEX DATABASE :TEST_DBNAME;
-- ANALYZE to get updated reltuples stats
ANALYZE normal, hyper, part;
VACUUM ANALYZE;
SELECT count(c) FROM show_chunks('hyper') c;
count
-------
@ -185,14 +190,14 @@ SELECT jsonb_pretty(rels) AS relations FROM relations;
"num_relations": 2 +
}, +
"tables": { +
"heap_size": 65536, +
"heap_size": 73728, +
"toast_size": 8192, +
"indexes_size": 0, +
"num_relations": 2, +
"num_reltuples": 697 +
}, +
"hypertables": { +
"heap_size": 163840, +
"heap_size": 475136, +
"toast_size": 0, +
"compression": { +
"compressed_heap_size": 0, +
@ -207,19 +212,19 @@ SELECT jsonb_pretty(rels) AS relations FROM relations;
"num_compressed_hypertables": 0, +
"compressed_row_count_frozen_immediately": 0+
}, +
"indexes_size": 286720, +
"indexes_size": 270336, +
"num_children": 11, +
"num_relations": 2, +
"num_reltuples": 697 +
"num_reltuples": 1394 +
}, +
"materialized_views": { +
"toast_size": 8192, +
"indexes_size": 0, +
"num_relations": 1, +
"num_reltuples": 0 +
"num_reltuples": 1 +
}, +
"partitioned_tables": { +
"heap_size": 98304, +
"heap_size": 180224, +
"toast_size": 0, +
"indexes_size": 0, +
"num_children": 6, +
@ -260,7 +265,7 @@ SELECT (SELECT count(*) FROM normal) num_inserted_rows,
(SELECT rels -> 'partitioned_tables' -> 'num_reltuples' FROM relations) part_reltuples;
num_inserted_rows | normal_reltuples | hyper_reltuples | part_reltuples
-------------------+------------------+-----------------+----------------
697 | 697 | 697 | 697
697 | 697 | 1394 | 697
(1 row)
-- Add compression
@ -290,7 +295,13 @@ FROM show_chunks('contagg') c ORDER BY c LIMIT 1;
-- Turn of real-time aggregation
ALTER MATERIALIZED VIEW contagg SET (timescaledb.materialized_only = true);
ANALYZE normal, hyper, part;
-- Reindex to avoid the dependency on the way the index is built (e.g. the caggs
-- might get their rows inserted in different order during the refresh based on
-- the underlying aggregation plan, and the index will be built differently,
-- which can influence its size).
REINDEX DATABASE :TEST_DBNAME;
-- ANALYZE to get updated reltuples stats
VACUUM ANALYZE;
REFRESH MATERIALIZED VIEW telemetry_report;
SELECT jsonb_pretty(rels) AS relations FROM relations;
relations
@ -300,41 +311,41 @@ SELECT jsonb_pretty(rels) AS relations FROM relations;
"num_relations": 2 +
}, +
"tables": { +
"heap_size": 65536, +
"heap_size": 73728, +
"toast_size": 8192, +
"indexes_size": 0, +
"num_relations": 2, +
"num_reltuples": 697 +
}, +
"hypertables": { +
"heap_size": 196608, +
"heap_size": 368640, +
"toast_size": 40960, +
"compression": { +
"compressed_heap_size": 114688, +
"compressed_row_count": 14, +
"compressed_toast_size": 40960, +
"num_compressed_chunks": 5, +
"uncompressed_heap_size": 81920, +
"uncompressed_heap_size": 221184, +
"uncompressed_row_count": 736, +
"compressed_indexes_size": 16384, +
"uncompressed_toast_size": 0, +
"uncompressed_indexes_size": 147456, +
"uncompressed_indexes_size": 131072, +
"num_compressed_hypertables": 2, +
"compressed_row_count_frozen_immediately": 14+
}, +
"indexes_size": 204800, +
"num_children": 11, +
"num_relations": 2, +
"num_reltuples": 413 +
"num_reltuples": 658 +
}, +
"materialized_views": { +
"toast_size": 8192, +
"indexes_size": 0, +
"num_relations": 1, +
"num_reltuples": 0 +
"num_reltuples": 1 +
}, +
"partitioned_tables": { +
"heap_size": 98304, +
"heap_size": 180224, +
"toast_size": 0, +
"indexes_size": 0, +
"num_children": 6, +

View File

@ -18,21 +18,24 @@ debug_waitpoint_enable
step s2_select:
EXPLAIN (COSTS OFF) EXECUTE pstmt;
QUERY PLAN
-----------------------------------------------------------------------------------------------------------------------
Merge Append
Sort Key: _hyper_X_X_chunk.time_bucket
-> Index Scan Backward using _hyper_X_X_chunk__materialized_hypertable_X_time_bucket_i on _hyper_X_X_chunk
Index Cond: (time_bucket < 'Sat Jan 01 16:00:00 2000 PST'::timestamp with time zone)
-> GroupAggregate
Group Key: (time_bucket('@ 4 hours'::interval, temperature."time"))
-> Custom Scan (ChunkAppend) on temperature
Order: time_bucket('@ 4 hours'::interval, temperature."time")
-> Index Scan Backward using _hyper_X_X_chunk_temperature_time_idx on _hyper_X_X_chunk
Index Cond: ("time" >= 'Sat Jan 01 16:00:00 2000 PST'::timestamp with time zone)
-> Index Scan Backward using _hyper_X_X_chunk_temperature_time_idx on _hyper_X_X_chunk
Index Cond: ("time" >= 'Sat Jan 01 16:00:00 2000 PST'::timestamp with time zone)
(12 rows)
QUERY PLAN
-----------------------------------------------------------------------------------------------------------
Merge Append
Sort Key: _hyper_X_X_chunk.time_bucket
-> Sort
Sort Key: _hyper_X_X_chunk.time_bucket
-> Seq Scan on _hyper_X_X_chunk
Filter: (time_bucket < 'Sat Jan 01 16:00:00 2000 PST'::timestamp with time zone)
-> GroupAggregate
Group Key: (time_bucket('@ 4 hours'::interval, temperature."time"))
-> Result
-> Custom Scan (ChunkAppend) on temperature
Order: time_bucket('@ 4 hours'::interval, temperature."time")
-> Index Scan Backward using _hyper_X_X_chunk_temperature_time_idx on _hyper_X_X_chunk
Index Cond: ("time" >= 'Sat Jan 01 16:00:00 2000 PST'::timestamp with time zone)
-> Index Scan Backward using _hyper_X_X_chunk_temperature_time_idx on _hyper_X_X_chunk
Index Cond: ("time" >= 'Sat Jan 01 16:00:00 2000 PST'::timestamp with time zone)
(15 rows)
step s1_run_update:
CALL refresh_continuous_aggregate('cagg', '2020-01-01 00:00:00', '2021-01-01 00:00:00');
@ -40,21 +43,24 @@ step s1_run_update:
step s2_select:
EXPLAIN (COSTS OFF) EXECUTE pstmt;
QUERY PLAN
-----------------------------------------------------------------------------------------------------------------------
Merge Append
Sort Key: _hyper_X_X_chunk.time_bucket
-> Index Scan Backward using _hyper_X_X_chunk__materialized_hypertable_X_time_bucket_i on _hyper_X_X_chunk
Index Cond: (time_bucket < 'Sat Jan 01 16:00:00 2000 PST'::timestamp with time zone)
-> GroupAggregate
Group Key: (time_bucket('@ 4 hours'::interval, temperature."time"))
-> Custom Scan (ChunkAppend) on temperature
Order: time_bucket('@ 4 hours'::interval, temperature."time")
-> Index Scan Backward using _hyper_X_X_chunk_temperature_time_idx on _hyper_X_X_chunk
Index Cond: ("time" >= 'Sat Jan 01 16:00:00 2000 PST'::timestamp with time zone)
-> Index Scan Backward using _hyper_X_X_chunk_temperature_time_idx on _hyper_X_X_chunk
Index Cond: ("time" >= 'Sat Jan 01 16:00:00 2000 PST'::timestamp with time zone)
(12 rows)
QUERY PLAN
-----------------------------------------------------------------------------------------------------------
Merge Append
Sort Key: _hyper_X_X_chunk.time_bucket
-> Sort
Sort Key: _hyper_X_X_chunk.time_bucket
-> Seq Scan on _hyper_X_X_chunk
Filter: (time_bucket < 'Sat Jan 01 16:00:00 2000 PST'::timestamp with time zone)
-> GroupAggregate
Group Key: (time_bucket('@ 4 hours'::interval, temperature."time"))
-> Result
-> Custom Scan (ChunkAppend) on temperature
Order: time_bucket('@ 4 hours'::interval, temperature."time")
-> Index Scan Backward using _hyper_X_X_chunk_temperature_time_idx on _hyper_X_X_chunk
Index Cond: ("time" >= 'Sat Jan 01 16:00:00 2000 PST'::timestamp with time zone)
-> Index Scan Backward using _hyper_X_X_chunk_temperature_time_idx on _hyper_X_X_chunk
Index Cond: ("time" >= 'Sat Jan 01 16:00:00 2000 PST'::timestamp with time zone)
(15 rows)
step s3_release_invalidation:
SELECT debug_waitpoint_release('cagg_watermark_update_internal_before_refresh');
@ -68,43 +74,43 @@ step s1_run_update: <... completed>
step s2_select:
EXPLAIN (COSTS OFF) EXECUTE pstmt;
QUERY PLAN
-----------------------------------------------------------------------------------------------------------------------------
Merge Append
Sort Key: _materialized_hypertable_X.time_bucket
-> Custom Scan (ChunkAppend) on _materialized_hypertable_X
Order: _materialized_hypertable_X.time_bucket
-> Index Scan Backward using _hyper_X_X_chunk__materialized_hypertable_X_time_bucket_i on _hyper_X_X_chunk
Index Cond: (time_bucket < 'Wed Jan 01 16:00:00 2020 PST'::timestamp with time zone)
-> Index Scan Backward using _hyper_X_X_chunk__materialized_hypertable_X_time_bucket_i on _hyper_X_X_chunk
Index Cond: (time_bucket < 'Wed Jan 01 16:00:00 2020 PST'::timestamp with time zone)
-> GroupAggregate
Group Key: (time_bucket('@ 4 hours'::interval, "time"))
-> Sort
Sort Key: (time_bucket('@ 4 hours'::interval, "time"))
-> Result
One-Time Filter: false
QUERY PLAN
---------------------------------------------------------------------------------------------------------------------
Merge Append
Sort Key: _materialized_hypertable_X.time_bucket
-> Custom Scan (ChunkAppend) on _materialized_hypertable_X
Order: _materialized_hypertable_X.time_bucket
-> Index Scan Backward using _hyper_X_X_chunk__materialized_hypertable_X_time_bucket_idx on _hyper_X_X_chunk
Index Cond: (time_bucket < 'Wed Jan 01 16:00:00 2020 PST'::timestamp with time zone)
-> Index Scan Backward using _hyper_X_X_chunk__materialized_hypertable_X_time_bucket_idx on _hyper_X_X_chunk
Index Cond: (time_bucket < 'Wed Jan 01 16:00:00 2020 PST'::timestamp with time zone)
-> GroupAggregate
Group Key: (time_bucket('@ 4 hours'::interval, "time"))
-> Sort
Sort Key: (time_bucket('@ 4 hours'::interval, "time"))
-> Result
One-Time Filter: false
(14 rows)
step s1_select:
EXPLAIN (COSTS OFF) EXECUTE pstmt;
QUERY PLAN
-----------------------------------------------------------------------------------------------------------------------------
Merge Append
Sort Key: _materialized_hypertable_X.time_bucket
-> Custom Scan (ChunkAppend) on _materialized_hypertable_X
Order: _materialized_hypertable_X.time_bucket
-> Index Scan Backward using _hyper_X_X_chunk__materialized_hypertable_X_time_bucket_i on _hyper_X_X_chunk
Index Cond: (time_bucket < 'Wed Jan 01 16:00:00 2020 PST'::timestamp with time zone)
-> Index Scan Backward using _hyper_X_X_chunk__materialized_hypertable_X_time_bucket_i on _hyper_X_X_chunk
Index Cond: (time_bucket < 'Wed Jan 01 16:00:00 2020 PST'::timestamp with time zone)
-> GroupAggregate
Group Key: (time_bucket('@ 4 hours'::interval, "time"))
-> Sort
Sort Key: (time_bucket('@ 4 hours'::interval, "time"))
-> Result
One-Time Filter: false
QUERY PLAN
---------------------------------------------------------------------------------------------------------------------
Merge Append
Sort Key: _materialized_hypertable_X.time_bucket
-> Custom Scan (ChunkAppend) on _materialized_hypertable_X
Order: _materialized_hypertable_X.time_bucket
-> Index Scan Backward using _hyper_X_X_chunk__materialized_hypertable_X_time_bucket_idx on _hyper_X_X_chunk
Index Cond: (time_bucket < 'Wed Jan 01 16:00:00 2020 PST'::timestamp with time zone)
-> Index Scan Backward using _hyper_X_X_chunk__materialized_hypertable_X_time_bucket_idx on _hyper_X_X_chunk
Index Cond: (time_bucket < 'Wed Jan 01 16:00:00 2020 PST'::timestamp with time zone)
-> GroupAggregate
Group Key: (time_bucket('@ 4 hours'::interval, "time"))
-> Sort
Sort Key: (time_bucket('@ 4 hours'::interval, "time"))
-> Result
One-Time Filter: false
(14 rows)
@ -120,21 +126,24 @@ debug_waitpoint_enable
step s2_select:
EXPLAIN (COSTS OFF) EXECUTE pstmt;
QUERY PLAN
-----------------------------------------------------------------------------------------------------------------------
Merge Append
Sort Key: _hyper_X_X_chunk.time_bucket
-> Index Scan Backward using _hyper_X_X_chunk__materialized_hypertable_X_time_bucket_i on _hyper_X_X_chunk
Index Cond: (time_bucket < 'Sat Jan 01 16:00:00 2000 PST'::timestamp with time zone)
-> GroupAggregate
Group Key: (time_bucket('@ 4 hours'::interval, temperature."time"))
-> Custom Scan (ChunkAppend) on temperature
Order: time_bucket('@ 4 hours'::interval, temperature."time")
-> Index Scan Backward using _hyper_X_X_chunk_temperature_time_idx on _hyper_X_X_chunk
Index Cond: ("time" >= 'Sat Jan 01 16:00:00 2000 PST'::timestamp with time zone)
-> Index Scan Backward using _hyper_X_X_chunk_temperature_time_idx on _hyper_X_X_chunk
Index Cond: ("time" >= 'Sat Jan 01 16:00:00 2000 PST'::timestamp with time zone)
(12 rows)
QUERY PLAN
-----------------------------------------------------------------------------------------------------------
Merge Append
Sort Key: _hyper_X_X_chunk.time_bucket
-> Sort
Sort Key: _hyper_X_X_chunk.time_bucket
-> Seq Scan on _hyper_X_X_chunk
Filter: (time_bucket < 'Sat Jan 01 16:00:00 2000 PST'::timestamp with time zone)
-> GroupAggregate
Group Key: (time_bucket('@ 4 hours'::interval, temperature."time"))
-> Result
-> Custom Scan (ChunkAppend) on temperature
Order: time_bucket('@ 4 hours'::interval, temperature."time")
-> Index Scan Backward using _hyper_X_X_chunk_temperature_time_idx on _hyper_X_X_chunk
Index Cond: ("time" >= 'Sat Jan 01 16:00:00 2000 PST'::timestamp with time zone)
-> Index Scan Backward using _hyper_X_X_chunk_temperature_time_idx on _hyper_X_X_chunk
Index Cond: ("time" >= 'Sat Jan 01 16:00:00 2000 PST'::timestamp with time zone)
(15 rows)
step s1_run_update:
CALL refresh_continuous_aggregate('cagg', '2020-01-01 00:00:00', '2021-01-01 00:00:00');
@ -142,21 +151,24 @@ step s1_run_update:
step s2_select:
EXPLAIN (COSTS OFF) EXECUTE pstmt;
QUERY PLAN
-----------------------------------------------------------------------------------------------------------------------
Merge Append
Sort Key: _hyper_X_X_chunk.time_bucket
-> Index Scan Backward using _hyper_X_X_chunk__materialized_hypertable_X_time_bucket_i on _hyper_X_X_chunk
Index Cond: (time_bucket < 'Sat Jan 01 16:00:00 2000 PST'::timestamp with time zone)
-> GroupAggregate
Group Key: (time_bucket('@ 4 hours'::interval, temperature."time"))
-> Custom Scan (ChunkAppend) on temperature
Order: time_bucket('@ 4 hours'::interval, temperature."time")
-> Index Scan Backward using _hyper_X_X_chunk_temperature_time_idx on _hyper_X_X_chunk
Index Cond: ("time" >= 'Sat Jan 01 16:00:00 2000 PST'::timestamp with time zone)
-> Index Scan Backward using _hyper_X_X_chunk_temperature_time_idx on _hyper_X_X_chunk
Index Cond: ("time" >= 'Sat Jan 01 16:00:00 2000 PST'::timestamp with time zone)
(12 rows)
QUERY PLAN
-----------------------------------------------------------------------------------------------------------
Merge Append
Sort Key: _hyper_X_X_chunk.time_bucket
-> Sort
Sort Key: _hyper_X_X_chunk.time_bucket
-> Seq Scan on _hyper_X_X_chunk
Filter: (time_bucket < 'Sat Jan 01 16:00:00 2000 PST'::timestamp with time zone)
-> GroupAggregate
Group Key: (time_bucket('@ 4 hours'::interval, temperature."time"))
-> Result
-> Custom Scan (ChunkAppend) on temperature
Order: time_bucket('@ 4 hours'::interval, temperature."time")
-> Index Scan Backward using _hyper_X_X_chunk_temperature_time_idx on _hyper_X_X_chunk
Index Cond: ("time" >= 'Sat Jan 01 16:00:00 2000 PST'::timestamp with time zone)
-> Index Scan Backward using _hyper_X_X_chunk_temperature_time_idx on _hyper_X_X_chunk
Index Cond: ("time" >= 'Sat Jan 01 16:00:00 2000 PST'::timestamp with time zone)
(15 rows)
step s3_release_invalidation:
SELECT debug_waitpoint_release('cagg_watermark_update_internal_before_refresh');
@ -184,21 +196,21 @@ step s1_run_update:
step s2_select:
EXPLAIN (COSTS OFF) EXECUTE pstmt;
QUERY PLAN
-----------------------------------------------------------------------------------------------------------------------------
Merge Append
Sort Key: _materialized_hypertable_X.time_bucket
-> Custom Scan (ChunkAppend) on _materialized_hypertable_X
Order: _materialized_hypertable_X.time_bucket
-> Index Scan Backward using _hyper_X_X_chunk__materialized_hypertable_X_time_bucket_i on _hyper_X_X_chunk
Index Cond: (time_bucket < 'Wed Jan 01 16:00:00 2020 PST'::timestamp with time zone)
-> Index Scan Backward using _hyper_X_X_chunk__materialized_hypertable_X_time_bucket_i on _hyper_X_X_chunk
Index Cond: (time_bucket < 'Wed Jan 01 16:00:00 2020 PST'::timestamp with time zone)
-> GroupAggregate
Group Key: (time_bucket('@ 4 hours'::interval, _hyper_X_X_chunk."time"))
-> Result
QUERY PLAN
---------------------------------------------------------------------------------------------------------------------
Merge Append
Sort Key: _materialized_hypertable_X.time_bucket
-> Custom Scan (ChunkAppend) on _materialized_hypertable_X
Order: _materialized_hypertable_X.time_bucket
-> Index Scan Backward using _hyper_X_X_chunk__materialized_hypertable_X_time_bucket_idx on _hyper_X_X_chunk
Index Cond: (time_bucket < 'Wed Jan 01 16:00:00 2020 PST'::timestamp with time zone)
-> Index Scan Backward using _hyper_X_X_chunk__materialized_hypertable_X_time_bucket_idx on _hyper_X_X_chunk
Index Cond: (time_bucket < 'Wed Jan 01 16:00:00 2020 PST'::timestamp with time zone)
-> GroupAggregate
Group Key: (time_bucket('@ 4 hours'::interval, _hyper_X_X_chunk."time"))
-> Result
-> Index Scan Backward using _hyper_X_X_chunk_temperature_time_idx on _hyper_X_X_chunk
Index Cond: ("time" >= 'Wed Jan 01 16:00:00 2020 PST'::timestamp with time zone)
Index Cond: ("time" >= 'Wed Jan 01 16:00:00 2020 PST'::timestamp with time zone)
(13 rows)
step s3_release_invalidation:
@ -213,20 +225,20 @@ step s1_run_update: <... completed>
step s2_select:
EXPLAIN (COSTS OFF) EXECUTE pstmt;
QUERY PLAN
-----------------------------------------------------------------------------------------------------------------------------
Merge Append
Sort Key: _materialized_hypertable_X.time_bucket
-> Custom Scan (ChunkAppend) on _materialized_hypertable_X
Order: _materialized_hypertable_X.time_bucket
-> Index Scan Backward using _hyper_X_X_chunk__materialized_hypertable_X_time_bucket_i on _hyper_X_X_chunk
Index Cond: (time_bucket < 'Thu Jan 02 16:00:00 2020 PST'::timestamp with time zone)
-> Index Scan Backward using _hyper_X_X_chunk__materialized_hypertable_X_time_bucket_i on _hyper_X_X_chunk
Index Cond: (time_bucket < 'Thu Jan 02 16:00:00 2020 PST'::timestamp with time zone)
-> GroupAggregate
Group Key: (time_bucket('@ 4 hours'::interval, _hyper_X_X_chunk."time"))
-> Result
QUERY PLAN
---------------------------------------------------------------------------------------------------------------------
Merge Append
Sort Key: _materialized_hypertable_X.time_bucket
-> Custom Scan (ChunkAppend) on _materialized_hypertable_X
Order: _materialized_hypertable_X.time_bucket
-> Index Scan Backward using _hyper_X_X_chunk__materialized_hypertable_X_time_bucket_idx on _hyper_X_X_chunk
Index Cond: (time_bucket < 'Thu Jan 02 16:00:00 2020 PST'::timestamp with time zone)
-> Index Scan Backward using _hyper_X_X_chunk__materialized_hypertable_X_time_bucket_idx on _hyper_X_X_chunk
Index Cond: (time_bucket < 'Thu Jan 02 16:00:00 2020 PST'::timestamp with time zone)
-> GroupAggregate
Group Key: (time_bucket('@ 4 hours'::interval, _hyper_X_X_chunk."time"))
-> Result
-> Index Scan Backward using _hyper_X_X_chunk_temperature_time_idx on _hyper_X_X_chunk
Index Cond: ("time" >= 'Thu Jan 02 16:00:00 2020 PST'::timestamp with time zone)
Index Cond: ("time" >= 'Thu Jan 02 16:00:00 2020 PST'::timestamp with time zone)
(13 rows)

View File

@ -18,22 +18,23 @@ debug_waitpoint_enable
step s2_select:
EXPLAIN (COSTS OFF) EXECUTE pstmt;
QUERY PLAN
-----------------------------------------------------------------------------------------------------------------------
Merge Append
Sort Key: _hyper_X_X_chunk.time_bucket
-> Index Scan Backward using _hyper_X_X_chunk__materialized_hypertable_X_time_bucket_i on _hyper_X_X_chunk
Index Cond: (time_bucket < 'Sat Jan 01 16:00:00 2000 PST'::timestamp with time zone)
-> GroupAggregate
Group Key: (time_bucket('@ 4 hours'::interval, temperature."time"))
-> Result
-> Custom Scan (ChunkAppend) on temperature
Order: time_bucket('@ 4 hours'::interval, temperature."time")
-> Index Scan Backward using _hyper_X_X_chunk_temperature_time_idx on _hyper_X_X_chunk
Index Cond: ("time" >= 'Sat Jan 01 16:00:00 2000 PST'::timestamp with time zone)
-> Index Scan Backward using _hyper_X_X_chunk_temperature_time_idx on _hyper_X_X_chunk
Index Cond: ("time" >= 'Sat Jan 01 16:00:00 2000 PST'::timestamp with time zone)
(13 rows)
QUERY PLAN
-----------------------------------------------------------------------------------------------------
Merge Append
Sort Key: _hyper_X_X_chunk.time_bucket
-> Sort
Sort Key: _hyper_X_X_chunk.time_bucket
-> Seq Scan on _hyper_X_X_chunk
Filter: (time_bucket < 'Sat Jan 01 16:00:00 2000 PST'::timestamp with time zone)
-> GroupAggregate
Group Key: (time_bucket('@ 4 hours'::interval, temperature."time"))
-> Custom Scan (ChunkAppend) on temperature
Order: time_bucket('@ 4 hours'::interval, temperature."time")
-> Index Scan Backward using _hyper_X_X_chunk_temperature_time_idx on _hyper_X_X_chunk
Index Cond: ("time" >= 'Sat Jan 01 16:00:00 2000 PST'::timestamp with time zone)
-> Index Scan Backward using _hyper_X_X_chunk_temperature_time_idx on _hyper_X_X_chunk
Index Cond: ("time" >= 'Sat Jan 01 16:00:00 2000 PST'::timestamp with time zone)
(14 rows)
step s1_run_update:
CALL refresh_continuous_aggregate('cagg', '2020-01-01 00:00:00', '2021-01-01 00:00:00');
@ -41,22 +42,23 @@ step s1_run_update:
step s2_select:
EXPLAIN (COSTS OFF) EXECUTE pstmt;
QUERY PLAN
-----------------------------------------------------------------------------------------------------------------------
Merge Append
Sort Key: _hyper_X_X_chunk.time_bucket
-> Index Scan Backward using _hyper_X_X_chunk__materialized_hypertable_X_time_bucket_i on _hyper_X_X_chunk
Index Cond: (time_bucket < 'Sat Jan 01 16:00:00 2000 PST'::timestamp with time zone)
-> GroupAggregate
Group Key: (time_bucket('@ 4 hours'::interval, temperature."time"))
-> Result
-> Custom Scan (ChunkAppend) on temperature
Order: time_bucket('@ 4 hours'::interval, temperature."time")
-> Index Scan Backward using _hyper_X_X_chunk_temperature_time_idx on _hyper_X_X_chunk
Index Cond: ("time" >= 'Sat Jan 01 16:00:00 2000 PST'::timestamp with time zone)
-> Index Scan Backward using _hyper_X_X_chunk_temperature_time_idx on _hyper_X_X_chunk
Index Cond: ("time" >= 'Sat Jan 01 16:00:00 2000 PST'::timestamp with time zone)
(13 rows)
QUERY PLAN
-----------------------------------------------------------------------------------------------------
Merge Append
Sort Key: _hyper_X_X_chunk.time_bucket
-> Sort
Sort Key: _hyper_X_X_chunk.time_bucket
-> Seq Scan on _hyper_X_X_chunk
Filter: (time_bucket < 'Sat Jan 01 16:00:00 2000 PST'::timestamp with time zone)
-> GroupAggregate
Group Key: (time_bucket('@ 4 hours'::interval, temperature."time"))
-> Custom Scan (ChunkAppend) on temperature
Order: time_bucket('@ 4 hours'::interval, temperature."time")
-> Index Scan Backward using _hyper_X_X_chunk_temperature_time_idx on _hyper_X_X_chunk
Index Cond: ("time" >= 'Sat Jan 01 16:00:00 2000 PST'::timestamp with time zone)
-> Index Scan Backward using _hyper_X_X_chunk_temperature_time_idx on _hyper_X_X_chunk
Index Cond: ("time" >= 'Sat Jan 01 16:00:00 2000 PST'::timestamp with time zone)
(14 rows)
step s3_release_invalidation:
SELECT debug_waitpoint_release('cagg_watermark_update_internal_before_refresh');
@ -70,43 +72,43 @@ step s1_run_update: <... completed>
step s2_select:
EXPLAIN (COSTS OFF) EXECUTE pstmt;
QUERY PLAN
-----------------------------------------------------------------------------------------------------------------------------
Merge Append
Sort Key: _materialized_hypertable_X.time_bucket
-> Custom Scan (ChunkAppend) on _materialized_hypertable_X
Order: _materialized_hypertable_X.time_bucket
-> Index Scan Backward using _hyper_X_X_chunk__materialized_hypertable_X_time_bucket_i on _hyper_X_X_chunk
Index Cond: (time_bucket < 'Wed Jan 01 16:00:00 2020 PST'::timestamp with time zone)
-> Index Scan Backward using _hyper_X_X_chunk__materialized_hypertable_X_time_bucket_i on _hyper_X_X_chunk
Index Cond: (time_bucket < 'Wed Jan 01 16:00:00 2020 PST'::timestamp with time zone)
-> GroupAggregate
Group Key: (time_bucket('@ 4 hours'::interval, "time"))
-> Sort
Sort Key: (time_bucket('@ 4 hours'::interval, "time"))
-> Result
One-Time Filter: false
QUERY PLAN
---------------------------------------------------------------------------------------------------------------------
Merge Append
Sort Key: _materialized_hypertable_X.time_bucket
-> Custom Scan (ChunkAppend) on _materialized_hypertable_X
Order: _materialized_hypertable_X.time_bucket
-> Index Scan Backward using _hyper_X_X_chunk__materialized_hypertable_X_time_bucket_idx on _hyper_X_X_chunk
Index Cond: (time_bucket < 'Wed Jan 01 16:00:00 2020 PST'::timestamp with time zone)
-> Index Scan Backward using _hyper_X_X_chunk__materialized_hypertable_X_time_bucket_idx on _hyper_X_X_chunk
Index Cond: (time_bucket < 'Wed Jan 01 16:00:00 2020 PST'::timestamp with time zone)
-> GroupAggregate
Group Key: (time_bucket('@ 4 hours'::interval, "time"))
-> Sort
Sort Key: (time_bucket('@ 4 hours'::interval, "time"))
-> Result
One-Time Filter: false
(14 rows)
step s1_select:
EXPLAIN (COSTS OFF) EXECUTE pstmt;
QUERY PLAN
-----------------------------------------------------------------------------------------------------------------------------
Merge Append
Sort Key: _materialized_hypertable_X.time_bucket
-> Custom Scan (ChunkAppend) on _materialized_hypertable_X
Order: _materialized_hypertable_X.time_bucket
-> Index Scan Backward using _hyper_X_X_chunk__materialized_hypertable_X_time_bucket_i on _hyper_X_X_chunk
Index Cond: (time_bucket < 'Wed Jan 01 16:00:00 2020 PST'::timestamp with time zone)
-> Index Scan Backward using _hyper_X_X_chunk__materialized_hypertable_X_time_bucket_i on _hyper_X_X_chunk
Index Cond: (time_bucket < 'Wed Jan 01 16:00:00 2020 PST'::timestamp with time zone)
-> GroupAggregate
Group Key: (time_bucket('@ 4 hours'::interval, "time"))
-> Sort
Sort Key: (time_bucket('@ 4 hours'::interval, "time"))
-> Result
One-Time Filter: false
QUERY PLAN
---------------------------------------------------------------------------------------------------------------------
Merge Append
Sort Key: _materialized_hypertable_X.time_bucket
-> Custom Scan (ChunkAppend) on _materialized_hypertable_X
Order: _materialized_hypertable_X.time_bucket
-> Index Scan Backward using _hyper_X_X_chunk__materialized_hypertable_X_time_bucket_idx on _hyper_X_X_chunk
Index Cond: (time_bucket < 'Wed Jan 01 16:00:00 2020 PST'::timestamp with time zone)
-> Index Scan Backward using _hyper_X_X_chunk__materialized_hypertable_X_time_bucket_idx on _hyper_X_X_chunk
Index Cond: (time_bucket < 'Wed Jan 01 16:00:00 2020 PST'::timestamp with time zone)
-> GroupAggregate
Group Key: (time_bucket('@ 4 hours'::interval, "time"))
-> Sort
Sort Key: (time_bucket('@ 4 hours'::interval, "time"))
-> Result
One-Time Filter: false
(14 rows)
@ -122,22 +124,23 @@ debug_waitpoint_enable
step s2_select:
EXPLAIN (COSTS OFF) EXECUTE pstmt;
QUERY PLAN
-----------------------------------------------------------------------------------------------------------------------
Merge Append
Sort Key: _hyper_X_X_chunk.time_bucket
-> Index Scan Backward using _hyper_X_X_chunk__materialized_hypertable_X_time_bucket_i on _hyper_X_X_chunk
Index Cond: (time_bucket < 'Sat Jan 01 16:00:00 2000 PST'::timestamp with time zone)
-> GroupAggregate
Group Key: (time_bucket('@ 4 hours'::interval, temperature."time"))
-> Result
-> Custom Scan (ChunkAppend) on temperature
Order: time_bucket('@ 4 hours'::interval, temperature."time")
-> Index Scan Backward using _hyper_X_X_chunk_temperature_time_idx on _hyper_X_X_chunk
Index Cond: ("time" >= 'Sat Jan 01 16:00:00 2000 PST'::timestamp with time zone)
-> Index Scan Backward using _hyper_X_X_chunk_temperature_time_idx on _hyper_X_X_chunk
Index Cond: ("time" >= 'Sat Jan 01 16:00:00 2000 PST'::timestamp with time zone)
(13 rows)
QUERY PLAN
-----------------------------------------------------------------------------------------------------
Merge Append
Sort Key: _hyper_X_X_chunk.time_bucket
-> Sort
Sort Key: _hyper_X_X_chunk.time_bucket
-> Seq Scan on _hyper_X_X_chunk
Filter: (time_bucket < 'Sat Jan 01 16:00:00 2000 PST'::timestamp with time zone)
-> GroupAggregate
Group Key: (time_bucket('@ 4 hours'::interval, temperature."time"))
-> Custom Scan (ChunkAppend) on temperature
Order: time_bucket('@ 4 hours'::interval, temperature."time")
-> Index Scan Backward using _hyper_X_X_chunk_temperature_time_idx on _hyper_X_X_chunk
Index Cond: ("time" >= 'Sat Jan 01 16:00:00 2000 PST'::timestamp with time zone)
-> Index Scan Backward using _hyper_X_X_chunk_temperature_time_idx on _hyper_X_X_chunk
Index Cond: ("time" >= 'Sat Jan 01 16:00:00 2000 PST'::timestamp with time zone)
(14 rows)
step s1_run_update:
CALL refresh_continuous_aggregate('cagg', '2020-01-01 00:00:00', '2021-01-01 00:00:00');
@ -145,22 +148,23 @@ step s1_run_update:
step s2_select:
EXPLAIN (COSTS OFF) EXECUTE pstmt;
QUERY PLAN
-----------------------------------------------------------------------------------------------------------------------
Merge Append
Sort Key: _hyper_X_X_chunk.time_bucket
-> Index Scan Backward using _hyper_X_X_chunk__materialized_hypertable_X_time_bucket_i on _hyper_X_X_chunk
Index Cond: (time_bucket < 'Sat Jan 01 16:00:00 2000 PST'::timestamp with time zone)
-> GroupAggregate
Group Key: (time_bucket('@ 4 hours'::interval, temperature."time"))
-> Result
-> Custom Scan (ChunkAppend) on temperature
Order: time_bucket('@ 4 hours'::interval, temperature."time")
-> Index Scan Backward using _hyper_X_X_chunk_temperature_time_idx on _hyper_X_X_chunk
Index Cond: ("time" >= 'Sat Jan 01 16:00:00 2000 PST'::timestamp with time zone)
-> Index Scan Backward using _hyper_X_X_chunk_temperature_time_idx on _hyper_X_X_chunk
Index Cond: ("time" >= 'Sat Jan 01 16:00:00 2000 PST'::timestamp with time zone)
(13 rows)
QUERY PLAN
-----------------------------------------------------------------------------------------------------
Merge Append
Sort Key: _hyper_X_X_chunk.time_bucket
-> Sort
Sort Key: _hyper_X_X_chunk.time_bucket
-> Seq Scan on _hyper_X_X_chunk
Filter: (time_bucket < 'Sat Jan 01 16:00:00 2000 PST'::timestamp with time zone)
-> GroupAggregate
Group Key: (time_bucket('@ 4 hours'::interval, temperature."time"))
-> Custom Scan (ChunkAppend) on temperature
Order: time_bucket('@ 4 hours'::interval, temperature."time")
-> Index Scan Backward using _hyper_X_X_chunk_temperature_time_idx on _hyper_X_X_chunk
Index Cond: ("time" >= 'Sat Jan 01 16:00:00 2000 PST'::timestamp with time zone)
-> Index Scan Backward using _hyper_X_X_chunk_temperature_time_idx on _hyper_X_X_chunk
Index Cond: ("time" >= 'Sat Jan 01 16:00:00 2000 PST'::timestamp with time zone)
(14 rows)
step s3_release_invalidation:
SELECT debug_waitpoint_release('cagg_watermark_update_internal_before_refresh');
@ -188,21 +192,21 @@ step s1_run_update:
step s2_select:
EXPLAIN (COSTS OFF) EXECUTE pstmt;
QUERY PLAN
-----------------------------------------------------------------------------------------------------------------------------
Merge Append
Sort Key: _materialized_hypertable_X.time_bucket
-> Custom Scan (ChunkAppend) on _materialized_hypertable_X
Order: _materialized_hypertable_X.time_bucket
-> Index Scan Backward using _hyper_X_X_chunk__materialized_hypertable_X_time_bucket_i on _hyper_X_X_chunk
Index Cond: (time_bucket < 'Wed Jan 01 16:00:00 2020 PST'::timestamp with time zone)
-> Index Scan Backward using _hyper_X_X_chunk__materialized_hypertable_X_time_bucket_i on _hyper_X_X_chunk
Index Cond: (time_bucket < 'Wed Jan 01 16:00:00 2020 PST'::timestamp with time zone)
-> GroupAggregate
Group Key: (time_bucket('@ 4 hours'::interval, _hyper_X_X_chunk."time"))
-> Result
QUERY PLAN
---------------------------------------------------------------------------------------------------------------------
Merge Append
Sort Key: _materialized_hypertable_X.time_bucket
-> Custom Scan (ChunkAppend) on _materialized_hypertable_X
Order: _materialized_hypertable_X.time_bucket
-> Index Scan Backward using _hyper_X_X_chunk__materialized_hypertable_X_time_bucket_idx on _hyper_X_X_chunk
Index Cond: (time_bucket < 'Wed Jan 01 16:00:00 2020 PST'::timestamp with time zone)
-> Index Scan Backward using _hyper_X_X_chunk__materialized_hypertable_X_time_bucket_idx on _hyper_X_X_chunk
Index Cond: (time_bucket < 'Wed Jan 01 16:00:00 2020 PST'::timestamp with time zone)
-> GroupAggregate
Group Key: (time_bucket('@ 4 hours'::interval, _hyper_X_X_chunk."time"))
-> Result
-> Index Scan Backward using _hyper_X_X_chunk_temperature_time_idx on _hyper_X_X_chunk
Index Cond: ("time" >= 'Wed Jan 01 16:00:00 2020 PST'::timestamp with time zone)
Index Cond: ("time" >= 'Wed Jan 01 16:00:00 2020 PST'::timestamp with time zone)
(13 rows)
step s3_release_invalidation:
@ -217,20 +221,20 @@ step s1_run_update: <... completed>
step s2_select:
EXPLAIN (COSTS OFF) EXECUTE pstmt;
QUERY PLAN
-----------------------------------------------------------------------------------------------------------------------------
Merge Append
Sort Key: _materialized_hypertable_X.time_bucket
-> Custom Scan (ChunkAppend) on _materialized_hypertable_X
Order: _materialized_hypertable_X.time_bucket
-> Index Scan Backward using _hyper_X_X_chunk__materialized_hypertable_X_time_bucket_i on _hyper_X_X_chunk
Index Cond: (time_bucket < 'Thu Jan 02 16:00:00 2020 PST'::timestamp with time zone)
-> Index Scan Backward using _hyper_X_X_chunk__materialized_hypertable_X_time_bucket_i on _hyper_X_X_chunk
Index Cond: (time_bucket < 'Thu Jan 02 16:00:00 2020 PST'::timestamp with time zone)
-> GroupAggregate
Group Key: (time_bucket('@ 4 hours'::interval, _hyper_X_X_chunk."time"))
-> Result
QUERY PLAN
---------------------------------------------------------------------------------------------------------------------
Merge Append
Sort Key: _materialized_hypertable_X.time_bucket
-> Custom Scan (ChunkAppend) on _materialized_hypertable_X
Order: _materialized_hypertable_X.time_bucket
-> Index Scan Backward using _hyper_X_X_chunk__materialized_hypertable_X_time_bucket_idx on _hyper_X_X_chunk
Index Cond: (time_bucket < 'Thu Jan 02 16:00:00 2020 PST'::timestamp with time zone)
-> Index Scan Backward using _hyper_X_X_chunk__materialized_hypertable_X_time_bucket_idx on _hyper_X_X_chunk
Index Cond: (time_bucket < 'Thu Jan 02 16:00:00 2020 PST'::timestamp with time zone)
-> GroupAggregate
Group Key: (time_bucket('@ 4 hours'::interval, _hyper_X_X_chunk."time"))
-> Result
-> Index Scan Backward using _hyper_X_X_chunk_temperature_time_idx on _hyper_X_X_chunk
Index Cond: ("time" >= 'Thu Jan 02 16:00:00 2020 PST'::timestamp with time zone)
Index Cond: ("time" >= 'Thu Jan 02 16:00:00 2020 PST'::timestamp with time zone)
(13 rows)

View File

@ -6,6 +6,11 @@ set(TEST_TEMPLATES_MODULE_DEBUG
reorder_vs_insert.spec.in reorder_vs_select.spec.in
decompression_chunk_and_parallel_query.in)
# This one must go first because it is very dependent on chunk IDs in EXPLAIN.
if(CMAKE_BUILD_TYPE MATCHES Debug)
set(TEST_FILES cagg_watermark_concurrent_update.spec)
endif()
list(
APPEND
TEST_FILES
@ -28,7 +33,6 @@ if(CMAKE_BUILD_TYPE MATCHES Debug)
APPEND
TEST_FILES
cagg_concurrent_invalidation.spec
cagg_watermark_concurrent_update.spec
compression_chunk_race.spec
compression_freeze.spec
compression_merge_race.spec
@ -64,8 +68,6 @@ endforeach(TEMPLATE_FILE)
file(REMOVE ${ISOLATION_TEST_SCHEDULE})
list(SORT TEST_FILES)
foreach(TEST_FILE ${TEST_FILES})
string(REGEX REPLACE "(.+)\.spec" "\\1" TESTS_TO_RUN ${TEST_FILE})
file(APPEND ${ISOLATION_TEST_SCHEDULE} "test: ${TESTS_TO_RUN}\n")

View File

@ -46,6 +46,11 @@ setup
'2020-01-01 23:59:59+0','1m') time;
}
setup
{
VACUUM ANALYZE;
}
teardown {
DROP TABLE temperature CASCADE;
}

View File

@ -9,7 +9,7 @@ setup
SELECT create_hypertable('ts_device_table', 'time', chunk_time_interval => 10);
INSERT INTO ts_device_table SELECT generate_series(0,9,1), 1, 100, 20;
ALTER TABLE ts_device_table set(timescaledb.compress, timescaledb.compress_segmentby='location', timescaledb.compress_orderby='time');
CREATE FUNCTION lock_chunktable( name text) RETURNS void AS $$
CREATE OR REPLACE FUNCTION lock_chunktable( name text) RETURNS void AS $$
BEGIN EXECUTE format( 'lock table %s IN SHARE MODE', name);
END; $$ LANGUAGE plpgsql;
CREATE FUNCTION count_chunktable(tbl regclass) RETURNS TABLE("count(*)" int, "count(*) only" int) AS $$

View File

@ -8,7 +8,7 @@ setup
SELECT create_hypertable('ts_device_table', 'time', chunk_time_interval => 10);
INSERT INTO ts_device_table SELECT generate_series(0,29,1), 1, 100, 20;
ALTER TABLE ts_device_table set(timescaledb.compress, timescaledb.compress_segmentby='location', timescaledb.compress_orderby='time');
CREATE FUNCTION lock_chunktable( name text) RETURNS void AS $$
CREATE OR REPLACE FUNCTION lock_chunktable( name text) RETURNS void AS $$
BEGIN EXECUTE format( 'lock table %s IN SHARE MODE', name);
END; $$ LANGUAGE plpgsql;
CREATE FUNCTION count_chunktable(tbl regclass) RETURNS TABLE("count(*)" int, "count(*) only" int) AS $$

View File

@ -8,7 +8,7 @@ setup
SELECT create_hypertable('ts_device_table', 'time', chunk_time_interval => 10);
INSERT INTO ts_device_table SELECT generate_series(0,29,1), 1, 100, 20;
ALTER TABLE ts_device_table set(timescaledb.compress, timescaledb.compress_segmentby='location', timescaledb.compress_orderby='time');
CREATE FUNCTION lock_chunktable( name text) RETURNS void AS $$
CREATE OR REPLACE FUNCTION lock_chunktable( name text) RETURNS void AS $$
BEGIN EXECUTE format( 'lock table %s IN SHARE MODE', name);
END; $$ LANGUAGE plpgsql;
CREATE FUNCTION count_chunktable(tbl regclass) RETURNS TABLE("count(*)" int, "count(*) only" int) AS $$

View File

@ -18,6 +18,7 @@ setup {
teardown {
DROP TABLE measurements;
DROP FUNCTION lock_chunktable;
}
# Test concurrent DML and freeze chunk. The wait point happens

View File

@ -295,7 +295,7 @@ SELECT time_bucket('1 week', time, origin=>'2000-01-01 00:00:00'), sensor, avg(v
ORDER BY 1, 2;
-- Even the CAgg now uses time_bucket, we should see buckets of the same alignment
SELECT * FROM cagg_temp_ng_1week_timestamp;
SELECT * FROM cagg_temp_ng_1week_timestamp ORDER BY time, sensor;
----
-- Check bucket conversion -- date without custom origin
@ -346,7 +346,7 @@ SELECT time_bucket('1 week', time, origin=>'2000-01-01 01:00:00+00'), sensor, av
ORDER BY 1, 2;
-- Even the CAgg now uses time_bucket, we should see buckets of the same alignment
SELECT * FROM cagg_temp_ng_1week_date;
SELECT * FROM cagg_temp_ng_1week_date ORDER BY time, sensor;
-- Ensure we error out when the CAgg does not use a deprecated function
\set ON_ERROR_STOP 0

View File

@ -77,8 +77,13 @@ SELECT * FROM normal;
CALL refresh_continuous_aggregate('contagg', NULL, NULL);
-- Reindex to avoid the dependency on the way the index is built (e.g. the caggs
-- might get their rows inserted in different order during the refresh based on
-- the underlying aggregation plan, and the index will be built differently,
-- which can influence its size).
REINDEX DATABASE :TEST_DBNAME;
-- ANALYZE to get updated reltuples stats
ANALYZE normal, hyper, part;
VACUUM ANALYZE;
SELECT count(c) FROM show_chunks('hyper') c;
SELECT count(c) FROM show_chunks('contagg') c;
@ -105,7 +110,13 @@ FROM show_chunks('contagg') c ORDER BY c LIMIT 1;
-- Turn of real-time aggregation
ALTER MATERIALIZED VIEW contagg SET (timescaledb.materialized_only = true);
ANALYZE normal, hyper, part;
-- Reindex to avoid the dependency on the way the index is built (e.g. the caggs
-- might get their rows inserted in different order during the refresh based on
-- the underlying aggregation plan, and the index will be built differently,
-- which can influence its size).
REINDEX DATABASE :TEST_DBNAME;
-- ANALYZE to get updated reltuples stats
VACUUM ANALYZE;
REFRESH MATERIALIZED VIEW telemetry_report;
SELECT jsonb_pretty(rels) AS relations FROM relations;