From e9e7c5f38e1880b2b8d8042f08108cfde8ca9628 Mon Sep 17 00:00:00 2001 From: Joshua Lockerman Date: Tue, 29 Oct 2019 14:38:25 -0400 Subject: [PATCH] Add missing tests discovered by Codecov 3 Tests for continuous aggregates over compressed data, which also tests selecting tableoids from compressed tables. --- tsl/test/expected/compression_ddl.out | 35 ++++++++++++++++----------- tsl/test/sql/compression_ddl.sql | 21 +++++++--------- 2 files changed, 30 insertions(+), 26 deletions(-) diff --git a/tsl/test/expected/compression_ddl.out b/tsl/test/expected/compression_ddl.out index fcf928486..d5599abe0 100644 --- a/tsl/test/expected/compression_ddl.out +++ b/tsl/test/expected/compression_ddl.out @@ -441,15 +441,19 @@ ROLLBACK; DROP VIEW dependent_1; --create a cont agg view on the ht as well then the drop should nuke everything --TODO put back when cont aggs work ---CREATE VIEW test1_cont_view WITH ( timescaledb.continuous, timescaledb.refresh_interval='72 hours') ---AS SELECT time_bucket('1 hour', "Time"), SUM(i) --- FROM test1 --- GROUP BY 1; ---REFRESH MATERIALIZED VIEW test1_cont_view; ---SELECT count(*) FROM test1_cont_view; ---DROP TABLE :UNCOMPRESSED_HYPER_NAME CASCADE; ---verify that there are no more hypertable remaining ---SELECT count(*) FROM _timescaledb_catalog.hypertable hypertable; +CREATE VIEW test1_cont_view WITH ( timescaledb.continuous, timescaledb.refresh_interval='72 hours') +AS SELECT time_bucket('1 hour', "Time"), SUM(i) + FROM test1 + GROUP BY 1; +REFRESH MATERIALIZED VIEW test1_cont_view; +INFO: new materialization range for public.test1 (time column Time) (1522216800000000) +INFO: materializing continuous aggregate public.test1_cont_view: new range up to 1522216800000000 +SELECT count(*) FROM test1_cont_view; + count +------- + 6 +(1 row) + \c :TEST_DBNAME :ROLE_SUPERUSER SELECT chunk.schema_name|| '.' || chunk.table_name as "COMPRESSED_CHUNK_NAME" FROM _timescaledb_catalog.chunk chunk @@ -499,7 +503,7 @@ ALTER table test1 set (timescaledb.compress='f'); SELECT count(*) = 1 FROM _timescaledb_catalog.hypertable hypertable; ?column? ---------- - t + f (1 row) SELECT compressed_hypertable_id IS NULL FROM _timescaledb_catalog.hypertable hypertable WHERE hypertable.table_name like 'test1' ; @@ -527,7 +531,7 @@ WHERE chunk.id IS NULL; --can turn compression back on ALTER TABLE test1 set (timescaledb.compress, timescaledb.compress_segmentby = 'b', timescaledb.compress_orderby = '"Time" DESC'); -NOTICE: adding index _compressed_hypertable_3_b__ts_meta_sequence_num_idx ON _timescaledb_internal._compressed_hypertable_3 USING BTREE(b, _ts_meta_sequence_num) +NOTICE: adding index _compressed_hypertable_4_b__ts_meta_sequence_num_idx ON _timescaledb_internal._compressed_hypertable_4 USING BTREE(b, _ts_meta_sequence_num) SELECT COUNT(*) AS count_compressed FROM ( @@ -542,7 +546,10 @@ AS sub; 1 (1 row) -DROP TABLE test1; +DROP TABLE test1 CASCADE; +NOTICE: drop cascades to table _timescaledb_internal.compress_hyper_4_57_chunk +NOTICE: drop cascades to 2 other objects +NOTICE: drop cascades to table _timescaledb_internal._hyper_3_56_chunk DROP TABLESPACE tablespace1; DROP TABLESPACE tablespace2; -- Triggers are NOT fired for compress/decompress @@ -562,13 +569,13 @@ BEGIN RETURN OLD; END; $BODY$; -CREATE TRIGGER test1_trigger +CREATE TRIGGER test1_trigger BEFORE INSERT OR UPDATE OR DELETE OR TRUNCATE ON test1 FOR EACH STATEMENT EXECUTE PROCEDURE test1_print_func(); INSERT INTO test1 SELECT generate_series('2018-03-02 1:00'::TIMESTAMPTZ, '2018-03-03 1:00', '1 hour') , 1 ; NOTICE: raise notice test1_print_trigger called -- add a row trigger too -- -CREATE TRIGGER test1_trigger2 +CREATE TRIGGER test1_trigger2 BEFORE INSERT OR UPDATE OR DELETE ON test1 FOR EACH ROW EXECUTE PROCEDURE test1_print_func(); INSERT INTO test1 SELECT '2018-03-02 1:05'::TIMESTAMPTZ, 2; diff --git a/tsl/test/sql/compression_ddl.sql b/tsl/test/sql/compression_ddl.sql index fe8b34bb9..8c52e82f4 100644 --- a/tsl/test/sql/compression_ddl.sql +++ b/tsl/test/sql/compression_ddl.sql @@ -304,17 +304,14 @@ DROP VIEW dependent_1; --create a cont agg view on the ht as well then the drop should nuke everything --TODO put back when cont aggs work ---CREATE VIEW test1_cont_view WITH ( timescaledb.continuous, timescaledb.refresh_interval='72 hours') ---AS SELECT time_bucket('1 hour', "Time"), SUM(i) --- FROM test1 --- GROUP BY 1; +CREATE VIEW test1_cont_view WITH ( timescaledb.continuous, timescaledb.refresh_interval='72 hours') +AS SELECT time_bucket('1 hour', "Time"), SUM(i) + FROM test1 + GROUP BY 1; ---REFRESH MATERIALIZED VIEW test1_cont_view; +REFRESH MATERIALIZED VIEW test1_cont_view; ---SELECT count(*) FROM test1_cont_view; ---DROP TABLE :UNCOMPRESSED_HYPER_NAME CASCADE; ---verify that there are no more hypertable remaining ---SELECT count(*) FROM _timescaledb_catalog.hypertable hypertable; +SELECT count(*) FROM test1_cont_view; \c :TEST_DBNAME :ROLE_SUPERUSER @@ -375,7 +372,7 @@ AS sub; -DROP TABLE test1; +DROP TABLE test1 CASCADE; DROP TABLESPACE tablespace1; DROP TABLESPACE tablespace2; @@ -390,13 +387,13 @@ BEGIN RETURN OLD; END; $BODY$; -CREATE TRIGGER test1_trigger +CREATE TRIGGER test1_trigger BEFORE INSERT OR UPDATE OR DELETE OR TRUNCATE ON test1 FOR EACH STATEMENT EXECUTE PROCEDURE test1_print_func(); INSERT INTO test1 SELECT generate_series('2018-03-02 1:00'::TIMESTAMPTZ, '2018-03-03 1:00', '1 hour') , 1 ; -- add a row trigger too -- -CREATE TRIGGER test1_trigger2 +CREATE TRIGGER test1_trigger2 BEFORE INSERT OR UPDATE OR DELETE ON test1 FOR EACH ROW EXECUTE PROCEDURE test1_print_func(); INSERT INTO test1 SELECT '2018-03-02 1:05'::TIMESTAMPTZ, 2;