From 45fac0ebe6bfe8d486b07327ea9590d4ed44d789 Mon Sep 17 00:00:00 2001 From: Sven Klemm Date: Wed, 21 Aug 2019 14:41:22 +0200 Subject: [PATCH] Add test for compress_chunk plan invalidation This patch adds a testcase for prepared statement plan invalidation when a chunk gets compressed. --- tsl/test/expected/compression.out | 60 ++++++++++++++++++++++++++++++- tsl/test/sql/compression.sql | 26 +++++++++++++- 2 files changed, 84 insertions(+), 2 deletions(-) diff --git a/tsl/test/expected/compression.out b/tsl/test/expected/compression.out index bd029dcff..2fa81d1fd 100644 --- a/tsl/test/expected/compression.out +++ b/tsl/test/expected/compression.out @@ -289,10 +289,68 @@ where hypertable_name::text like 'conditions'; --make sure compressed_chunk_id is reset to NULL select ch1.compressed_chunk_id IS NULL -FROM _timescaledb_catalog.chunk ch1, _timescaledb_catalog.hypertable ht where ch1.hypertable_id = ht.id and ht.table_name like 'conditions' +FROM _timescaledb_catalog.chunk ch1, _timescaledb_catalog.hypertable ht where ch1.hypertable_id = ht.id and ht.table_name like 'conditions'; ?column? ---------- t t (2 rows) +-- test plans get invalidated when chunks get compressed +SET timescaledb.enable_transparent_decompression TO ON; +CREATE TABLE plan_inval(time timestamptz, device_id int); +SELECT create_hypertable('plan_inval','time'); +NOTICE: adding not-null constraint to column "time" + create_hypertable +------------------------- + (5,public,plan_inval,t) +(1 row) + +ALTER TABLE plan_inval SET (timescaledb.compress,timescaledb.compress_orderby='time desc'); +-- create 2 chunks +INSERT INTO plan_inval SELECT * FROM (VALUES ('2000-01-01'::timestamptz,1), ('2000-01-07'::timestamptz,1)) v(time,device_id); +SET max_parallel_workers_per_gather to 0; +PREPARE prep_plan AS SELECT count(*) FROM plan_inval; +EXECUTE prep_plan; + count +------- + 2 +(1 row) + +EXECUTE prep_plan; + count +------- + 2 +(1 row) + +EXECUTE prep_plan; + count +------- + 2 +(1 row) + +-- get name of first chunk +SELECT tableoid::regclass AS "CHUNK_NAME" FROM plan_inval ORDER BY time LIMIT 1 +\gset +SELECT compress_chunk(:'CHUNK_NAME'); + compress_chunk +---------------- + +(1 row) + +EXECUTE prep_plan; + count +------- + 2 +(1 row) + +EXPLAIN (COSTS OFF) EXECUTE prep_plan; + QUERY PLAN +---------------------------------------------------------------- + Aggregate + -> Append + -> Custom Scan (DecompressChunk) on _hyper_5_11_chunk + -> Seq Scan on compress_hyper_6_13_chunk + -> Seq Scan on _hyper_5_12_chunk +(5 rows) + diff --git a/tsl/test/sql/compression.sql b/tsl/test/sql/compression.sql index eb597e779..5fc828d45 100644 --- a/tsl/test/sql/compression.sql +++ b/tsl/test/sql/compression.sql @@ -114,4 +114,28 @@ where hypertable_name::text like 'conditions'; --make sure compressed_chunk_id is reset to NULL select ch1.compressed_chunk_id IS NULL -FROM _timescaledb_catalog.chunk ch1, _timescaledb_catalog.hypertable ht where ch1.hypertable_id = ht.id and ht.table_name like 'conditions' +FROM _timescaledb_catalog.chunk ch1, _timescaledb_catalog.hypertable ht where ch1.hypertable_id = ht.id and ht.table_name like 'conditions'; + +-- test plans get invalidated when chunks get compressed + +SET timescaledb.enable_transparent_decompression TO ON; +CREATE TABLE plan_inval(time timestamptz, device_id int); +SELECT create_hypertable('plan_inval','time'); +ALTER TABLE plan_inval SET (timescaledb.compress,timescaledb.compress_orderby='time desc'); + +-- create 2 chunks +INSERT INTO plan_inval SELECT * FROM (VALUES ('2000-01-01'::timestamptz,1), ('2000-01-07'::timestamptz,1)) v(time,device_id); +SET max_parallel_workers_per_gather to 0; +PREPARE prep_plan AS SELECT count(*) FROM plan_inval; +EXECUTE prep_plan; +EXECUTE prep_plan; +EXECUTE prep_plan; +-- get name of first chunk +SELECT tableoid::regclass AS "CHUNK_NAME" FROM plan_inval ORDER BY time LIMIT 1 +\gset + +SELECT compress_chunk(:'CHUNK_NAME'); + +EXECUTE prep_plan; +EXPLAIN (COSTS OFF) EXECUTE prep_plan; +