timescaledb/tsl/test/expected/compression_ddl.out
gayyappan 5be6a3e4e9 Support column rename for hypertables with compression enabled
ALTER TABLE <hypertable> RENAME <column_name> TO <new_column_name>
is now supported for hypertables that have compression enabled.

Note: Column renaming is not supported for distributed hypertables.
So this will not work on distributed hypertables that have
compression enabled.
2021-02-19 10:21:50 -05:00

983 lines
37 KiB
Plaintext

-- This file and its contents are licensed under the Timescale License.
-- Please see the included NOTICE for copyright information and
-- LICENSE-TIMESCALE for a copy of the license.
\ir include/rand_generator.sql
-- This file and its contents are licensed under the Timescale License.
-- Please see the included NOTICE for copyright information and
-- LICENSE-TIMESCALE for a copy of the license.
--------------------------
-- cheap rand generator --
--------------------------
create table rand_minstd_state(i bigint);
create function rand_minstd_advance(bigint) returns bigint
language sql immutable as
$$
select (16807 * $1) % 2147483647
$$;
create function gen_rand_minstd() returns bigint
language sql security definer as
$$
update rand_minstd_state set i = rand_minstd_advance(i) returning i
$$;
-- seed the random num generator
insert into rand_minstd_state values (321);
\c :TEST_DBNAME :ROLE_SUPERUSER
SET client_min_messages = ERROR;
DROP TABLESPACE IF EXISTS tablespace1;
DROP TABLESPACE IF EXISTS tablespace2;
SET client_min_messages = NOTICE;
--test hypertable with tables space
CREATE TABLESPACE tablespace1 OWNER :ROLE_DEFAULT_PERM_USER LOCATION :TEST_TABLESPACE1_PATH;
CREATE TABLESPACE tablespace2 OWNER :ROLE_DEFAULT_PERM_USER LOCATION :TEST_TABLESPACE2_PATH;
\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER
CREATE TABLE test1 ("Time" timestamptz, i integer, b bigint, t text);
SELECT table_name from create_hypertable('test1', 'Time', chunk_time_interval=> INTERVAL '1 day');
NOTICE: adding not-null constraint to column "Time"
table_name
------------
test1
(1 row)
INSERT INTO test1 SELECT t, gen_rand_minstd(), gen_rand_minstd(), gen_rand_minstd()::text FROM generate_series('2018-03-02 1:00'::TIMESTAMPTZ, '2018-03-28 1:00', '1 hour') t;
ALTER TABLE test1 set (timescaledb.compress, timescaledb.compress_segmentby = 'b', timescaledb.compress_orderby = '"Time" DESC');
SELECT COUNT(*) AS count_compressed
FROM
(
SELECT compress_chunk(chunk.schema_name|| '.' || chunk.table_name)
FROM _timescaledb_catalog.chunk chunk
INNER JOIN _timescaledb_catalog.hypertable hypertable ON (chunk.hypertable_id = hypertable.id)
WHERE hypertable.table_name like 'test1' and chunk.compressed_chunk_id IS NULL ORDER BY chunk.id
)
AS sub;
count_compressed
------------------
27
(1 row)
--make sure allowed ddl still work
ALTER TABLE test1 CLUSTER ON "test1_Time_idx";
ALTER TABLE test1 SET WITHOUT CLUSTER;
CREATE INDEX new_index ON test1(b);
DROP INDEX new_index;
ALTER TABLE test1 SET (fillfactor=100);
ALTER TABLE test1 RESET (fillfactor);
ALTER TABLE test1 ALTER COLUMN b SET STATISTICS 10;
-- TABLESPACES
-- For tablepaces with compressed chunks the semantics are the following:
-- - compressed chunks get put into the same tablespace as the
-- uncompressed chunk on compression.
-- - set tablespace on uncompressed hypertable cascades to compressed hypertable+chunks
-- - set tablespace on all chunks is blocked (same as w/o compression)
-- - move chunks on a uncompressed chunk errors
-- - move chunks on compressed chunk works
--In the future we will:
-- - add tablespace option to compress_chunk function and policy (this will override the setting
-- of the uncompressed chunk). This will allow changing tablespaces upon compression
-- - Note: The current plan is to never listen to the setting on compressed hypertable. In fact,
-- we will block setting tablespace on compressed hypertables
SELECT count(*) as "COUNT_CHUNKS_UNCOMPRESSED"
FROM _timescaledb_catalog.chunk chunk
INNER JOIN _timescaledb_catalog.hypertable hypertable ON (chunk.hypertable_id = hypertable.id)
WHERE hypertable.table_name like 'test1' \gset
SELECT count(*) as "COUNT_CHUNKS_COMPRESSED"
FROM _timescaledb_catalog.chunk chunk
INNER JOIN _timescaledb_catalog.hypertable comp_hyper ON (chunk.hypertable_id = comp_hyper.id)
INNER JOIN _timescaledb_catalog.hypertable uncomp_hyper ON (comp_hyper.id = uncomp_hyper.compressed_hypertable_id)
WHERE uncomp_hyper.table_name like 'test1' \gset
ALTER TABLE test1 SET TABLESPACE tablespace1;
--all chunks + both the compressed and uncompressed hypertable moved to new tablespace
SELECT count(*) = (:COUNT_CHUNKS_UNCOMPRESSED +:COUNT_CHUNKS_COMPRESSED + 2)
FROM pg_tables WHERE tablespace = 'tablespace1';
?column?
----------
t
(1 row)
ALTER TABLE test1 SET TABLESPACE tablespace2;
SELECT count(*) = (:COUNT_CHUNKS_UNCOMPRESSED +:COUNT_CHUNKS_COMPRESSED + 2)
FROM pg_tables WHERE tablespace = 'tablespace2';
?column?
----------
t
(1 row)
SELECT
comp_chunk.schema_name|| '.' || comp_chunk.table_name as "COMPRESSED_CHUNK_NAME",
uncomp_chunk.schema_name|| '.' || uncomp_chunk.table_name as "UNCOMPRESSED_CHUNK_NAME"
FROM _timescaledb_catalog.chunk comp_chunk
INNER JOIN _timescaledb_catalog.hypertable comp_hyper ON (comp_chunk.hypertable_id = comp_hyper.id)
INNER JOIN _timescaledb_catalog.hypertable uncomp_hyper ON (comp_hyper.id = uncomp_hyper.compressed_hypertable_id)
INNER JOIN _timescaledb_catalog.chunk uncomp_chunk ON (uncomp_chunk.compressed_chunk_id = comp_chunk.id)
WHERE uncomp_hyper.table_name like 'test1' ORDER BY comp_chunk.id LIMIT 1\gset
-- ensure compression chunk cannot be moved directly
SELECT tablename
FROM pg_tables WHERE tablespace = 'tablespace1';
tablename
-----------
(0 rows)
\set ON_ERROR_STOP 0
ALTER TABLE :COMPRESSED_CHUNK_NAME SET TABLESPACE tablespace1;
ERROR: changing tablespace of compressed chunk is not supported
\set ON_ERROR_STOP 1
SELECT tablename
FROM pg_tables WHERE tablespace = 'tablespace1';
tablename
-----------
(0 rows)
-- ensure that both compressed and uncompressed chunks moved
ALTER TABLE :UNCOMPRESSED_CHUNK_NAME SET TABLESPACE tablespace1;
SELECT tablename
FROM pg_tables WHERE tablespace = 'tablespace1';
tablename
---------------------------
compress_hyper_2_28_chunk
_hyper_1_1_chunk
(2 rows)
ALTER TABLE test1 SET TABLESPACE tablespace2;
SELECT tablename
FROM pg_tables WHERE tablespace = 'tablespace1';
tablename
-----------
(0 rows)
\set ON_ERROR_STOP 0
SELECT move_chunk(chunk=>:'COMPRESSED_CHUNK_NAME', destination_tablespace=>'tablespace1', index_destination_tablespace=>'tablespace1', reorder_index=>'_timescaledb_internal."compress_hyper_2_28_chunk__compressed_hypertable_2_b__ts_meta_s"');
ERROR: cannot directly move internal compression data
\set ON_ERROR_STOP 1
-- ensure that both compressed and uncompressed chunks moved
SELECT move_chunk(chunk=>:'UNCOMPRESSED_CHUNK_NAME', destination_tablespace=>'tablespace1', index_destination_tablespace=>'tablespace1', reorder_index=>'_timescaledb_internal."_hyper_1_1_chunk_test1_Time_idx"');
NOTICE: ignoring index parameter
move_chunk
------------
(1 row)
SELECT tablename
FROM pg_tables WHERE tablespace = 'tablespace1';
tablename
---------------------------
_hyper_1_1_chunk
compress_hyper_2_28_chunk
(2 rows)
-- the compressed chunk is in here now
SELECT count(*)
FROM pg_tables WHERE tablespace = 'tablespace1';
count
-------
2
(1 row)
SELECT decompress_chunk(:'UNCOMPRESSED_CHUNK_NAME');
decompress_chunk
----------------------------------------
_timescaledb_internal._hyper_1_1_chunk
(1 row)
--the compresse chunk was dropped by decompression
SELECT count(*)
FROM pg_tables WHERE tablespace = 'tablespace1';
count
-------
1
(1 row)
SELECT move_chunk(chunk=>:'UNCOMPRESSED_CHUNK_NAME', destination_tablespace=>'tablespace1', index_destination_tablespace=>'tablespace1', reorder_index=>'_timescaledb_internal."_hyper_1_1_chunk_test1_Time_idx"');
move_chunk
------------
(1 row)
--the uncompressed chunks has now been moved
SELECT count(*)
FROM pg_tables WHERE tablespace = 'tablespace1';
count
-------
1
(1 row)
SELECT compress_chunk(:'UNCOMPRESSED_CHUNK_NAME');
compress_chunk
----------------------------------------
_timescaledb_internal._hyper_1_1_chunk
(1 row)
--the compressed chunk is now in the same tablespace as the uncompressed one
SELECT count(*)
FROM pg_tables WHERE tablespace = 'tablespace1';
count
-------
2
(1 row)
--
-- DROP CHUNKS
--
SELECT count(*) as count_chunks_uncompressed
FROM _timescaledb_catalog.chunk chunk
INNER JOIN _timescaledb_catalog.hypertable hypertable ON (chunk.hypertable_id = hypertable.id)
WHERE hypertable.table_name like 'test1';
count_chunks_uncompressed
---------------------------
27
(1 row)
SELECT count(*) as count_chunks_compressed
FROM _timescaledb_catalog.chunk chunk
INNER JOIN _timescaledb_catalog.hypertable comp_hyper ON (chunk.hypertable_id = comp_hyper.id)
INNER JOIN _timescaledb_catalog.hypertable uncomp_hyper ON (comp_hyper.id = uncomp_hyper.compressed_hypertable_id)
WHERE uncomp_hyper.table_name like 'test1';
count_chunks_compressed
-------------------------
27
(1 row)
SELECT chunk.schema_name|| '.' || chunk.table_name as "UNCOMPRESSED_CHUNK_NAME"
FROM _timescaledb_catalog.chunk chunk
INNER JOIN _timescaledb_catalog.hypertable hypertable ON (chunk.hypertable_id = hypertable.id)
WHERE hypertable.table_name like 'test1' ORDER BY chunk.id LIMIT 1 \gset
DROP TABLE :UNCOMPRESSED_CHUNK_NAME;
--should decrease #chunks both compressed and decompressed
SELECT count(*) as count_chunks_uncompressed
FROM _timescaledb_catalog.chunk chunk
INNER JOIN _timescaledb_catalog.hypertable hypertable ON (chunk.hypertable_id = hypertable.id)
WHERE hypertable.table_name like 'test1';
count_chunks_uncompressed
---------------------------
26
(1 row)
--make sure there are no orphaned _timescaledb_catalog.compression_chunk_size entries (should be 0)
SELECT count(*) as orphaned_compression_chunk_size
FROM _timescaledb_catalog.compression_chunk_size size
LEFT JOIN _timescaledb_catalog.chunk chunk ON (chunk.id = size.chunk_id)
WHERE chunk.id IS NULL;
orphaned_compression_chunk_size
---------------------------------
0
(1 row)
SELECT count(*) as count_chunks_compressed
FROM _timescaledb_catalog.chunk chunk
INNER JOIN _timescaledb_catalog.hypertable comp_hyper ON (chunk.hypertable_id = comp_hyper.id)
INNER JOIN _timescaledb_catalog.hypertable uncomp_hyper ON (comp_hyper.id = uncomp_hyper.compressed_hypertable_id)
WHERE uncomp_hyper.table_name like 'test1';
count_chunks_compressed
-------------------------
26
(1 row)
SELECT drop_chunks('test1', older_than => '2018-03-10'::TIMESTAMPTZ);
drop_chunks
----------------------------------------
_timescaledb_internal._hyper_1_2_chunk
_timescaledb_internal._hyper_1_3_chunk
_timescaledb_internal._hyper_1_4_chunk
_timescaledb_internal._hyper_1_5_chunk
_timescaledb_internal._hyper_1_6_chunk
_timescaledb_internal._hyper_1_7_chunk
_timescaledb_internal._hyper_1_8_chunk
(7 rows)
--should decrease #chunks both compressed and decompressed
SELECT count(*) as count_chunks_uncompressed
FROM _timescaledb_catalog.chunk chunk
INNER JOIN _timescaledb_catalog.hypertable hypertable ON (chunk.hypertable_id = hypertable.id)
WHERE hypertable.table_name like 'test1';
count_chunks_uncompressed
---------------------------
19
(1 row)
SELECT count(*) as count_chunks_compressed
FROM _timescaledb_catalog.chunk chunk
INNER JOIN _timescaledb_catalog.hypertable comp_hyper ON (chunk.hypertable_id = comp_hyper.id)
INNER JOIN _timescaledb_catalog.hypertable uncomp_hyper ON (comp_hyper.id = uncomp_hyper.compressed_hypertable_id)
WHERE uncomp_hyper.table_name like 'test1';
count_chunks_compressed
-------------------------
19
(1 row)
SELECT chunk.schema_name|| '.' || chunk.table_name as "UNCOMPRESSED_CHUNK_NAME"
FROM _timescaledb_catalog.chunk chunk
INNER JOIN _timescaledb_catalog.hypertable hypertable ON (chunk.hypertable_id = hypertable.id)
WHERE hypertable.table_name like 'test1' ORDER BY chunk.id LIMIT 1 \gset
SELECT chunk.schema_name|| '.' || chunk.table_name as "COMPRESSED_CHUNK_NAME"
FROM _timescaledb_catalog.chunk chunk
INNER JOIN _timescaledb_catalog.hypertable comp_hyper ON (chunk.hypertable_id = comp_hyper.id)
INNER JOIN _timescaledb_catalog.hypertable uncomp_hyper ON (comp_hyper.id = uncomp_hyper.compressed_hypertable_id)
WHERE uncomp_hyper.table_name like 'test1' ORDER BY chunk.id LIMIT 1
\gset
\set ON_ERROR_STOP 0
DROP TABLE :COMPRESSED_CHUNK_NAME;
ERROR: dropping compressed chunks not supported
\set ON_ERROR_STOP 1
SELECT
chunk.schema_name|| '.' || chunk.table_name as "UNCOMPRESSED_CHUNK_NAME",
comp_chunk.schema_name|| '.' || comp_chunk.table_name as "COMPRESSED_CHUNK_NAME"
FROM _timescaledb_catalog.chunk chunk
INNER JOIN _timescaledb_catalog.chunk comp_chunk ON (chunk.compressed_chunk_id = comp_chunk.id)
INNER JOIN _timescaledb_catalog.hypertable hypertable ON (chunk.hypertable_id = hypertable.id)
WHERE hypertable.table_name like 'test1' ORDER BY chunk.id LIMIT 1 \gset
--create a dependent object on the compressed chunk to test cascade behaviour
CREATE VIEW dependent_1 AS SELECT * FROM :COMPRESSED_CHUNK_NAME;
\set ON_ERROR_STOP 0
--errors due to dependent objects
DROP TABLE :UNCOMPRESSED_CHUNK_NAME;
ERROR: cannot drop table _timescaledb_internal.compress_hyper_2_36_chunk because other objects depend on it
\set ON_ERROR_STOP 1
DROP TABLE :UNCOMPRESSED_CHUNK_NAME CASCADE;
NOTICE: drop cascades to view dependent_1
--should decrease #chunks both compressed and decompressed
SELECT count(*) as count_chunks_uncompressed
FROM _timescaledb_catalog.chunk chunk
INNER JOIN _timescaledb_catalog.hypertable hypertable ON (chunk.hypertable_id = hypertable.id)
WHERE hypertable.table_name like 'test1';
count_chunks_uncompressed
---------------------------
18
(1 row)
SELECT count(*) as count_chunks_compressed
FROM _timescaledb_catalog.chunk chunk
INNER JOIN _timescaledb_catalog.hypertable comp_hyper ON (chunk.hypertable_id = comp_hyper.id)
INNER JOIN _timescaledb_catalog.hypertable uncomp_hyper ON (comp_hyper.id = uncomp_hyper.compressed_hypertable_id)
WHERE uncomp_hyper.table_name like 'test1';
count_chunks_compressed
-------------------------
18
(1 row)
SELECT
chunk.schema_name|| '.' || chunk.table_name as "UNCOMPRESSED_CHUNK_NAME",
comp_chunk.schema_name|| '.' || comp_chunk.table_name as "COMPRESSED_CHUNK_NAME"
FROM _timescaledb_catalog.chunk chunk
INNER JOIN _timescaledb_catalog.chunk comp_chunk ON (chunk.compressed_chunk_id = comp_chunk.id)
INNER JOIN _timescaledb_catalog.hypertable hypertable ON (chunk.hypertable_id = hypertable.id)
WHERE hypertable.table_name like 'test1' ORDER BY chunk.id LIMIT 1 \gset
CREATE VIEW dependent_1 AS SELECT * FROM :COMPRESSED_CHUNK_NAME;
\set ON_ERROR_STOP 0
\set VERBOSITY default
--errors due to dependent objects
SELECT drop_chunks('test1', older_than => '2018-03-28'::TIMESTAMPTZ);
ERROR: cannot drop table _timescaledb_internal.compress_hyper_2_37_chunk because other objects depend on it
DETAIL: view dependent_1 depends on table _timescaledb_internal.compress_hyper_2_37_chunk
HINT: Use DROP ... to drop the dependent objects.
\set VERBOSITY terse
\set ON_ERROR_STOP 1
DROP VIEW dependent_1;
SELECT drop_chunks('test1', older_than => '2018-03-28'::TIMESTAMPTZ);
drop_chunks
-----------------------------------------
_timescaledb_internal._hyper_1_10_chunk
_timescaledb_internal._hyper_1_11_chunk
_timescaledb_internal._hyper_1_12_chunk
_timescaledb_internal._hyper_1_13_chunk
_timescaledb_internal._hyper_1_14_chunk
_timescaledb_internal._hyper_1_15_chunk
_timescaledb_internal._hyper_1_16_chunk
_timescaledb_internal._hyper_1_17_chunk
_timescaledb_internal._hyper_1_18_chunk
_timescaledb_internal._hyper_1_19_chunk
_timescaledb_internal._hyper_1_20_chunk
_timescaledb_internal._hyper_1_21_chunk
_timescaledb_internal._hyper_1_22_chunk
_timescaledb_internal._hyper_1_23_chunk
_timescaledb_internal._hyper_1_24_chunk
_timescaledb_internal._hyper_1_25_chunk
_timescaledb_internal._hyper_1_26_chunk
(17 rows)
--should decrease #chunks both compressed and decompressed
SELECT count(*) as count_chunks_uncompressed
FROM _timescaledb_catalog.chunk chunk
INNER JOIN _timescaledb_catalog.hypertable hypertable ON (chunk.hypertable_id = hypertable.id)
WHERE hypertable.table_name like 'test1';
count_chunks_uncompressed
---------------------------
1
(1 row)
SELECT count(*) as count_chunks_compressed
FROM _timescaledb_catalog.chunk chunk
INNER JOIN _timescaledb_catalog.hypertable comp_hyper ON (chunk.hypertable_id = comp_hyper.id)
INNER JOIN _timescaledb_catalog.hypertable uncomp_hyper ON (comp_hyper.id = uncomp_hyper.compressed_hypertable_id)
WHERE uncomp_hyper.table_name like 'test1';
count_chunks_compressed
-------------------------
1
(1 row)
--make sure there are no orphaned _timescaledb_catalog.compression_chunk_size entries (should be 0)
SELECT count(*) as orphaned_compression_chunk_size
FROM _timescaledb_catalog.compression_chunk_size size
LEFT JOIN _timescaledb_catalog.chunk chunk ON (chunk.id = size.chunk_id)
WHERE chunk.id IS NULL;
orphaned_compression_chunk_size
---------------------------------
0
(1 row)
--
-- DROP HYPERTABLE
--
SELECT comp_hyper.schema_name|| '.' || comp_hyper.table_name as "COMPRESSED_HYPER_NAME"
FROM _timescaledb_catalog.hypertable comp_hyper
INNER JOIN _timescaledb_catalog.hypertable uncomp_hyper ON (comp_hyper.id = uncomp_hyper.compressed_hypertable_id)
WHERE uncomp_hyper.table_name like 'test1' ORDER BY comp_hyper.id LIMIT 1 \gset
\set ON_ERROR_STOP 0
DROP TABLE :COMPRESSED_HYPER_NAME;
ERROR: dropping compressed hypertables not supported
\set ON_ERROR_STOP 1
BEGIN;
SELECT hypertable.schema_name|| '.' || hypertable.table_name as "UNCOMPRESSED_HYPER_NAME"
FROM _timescaledb_catalog.hypertable hypertable
WHERE hypertable.table_name like 'test1' ORDER BY hypertable.id LIMIT 1 \gset
--before the drop there are 2 hypertables: the compressed and uncompressed ones
SELECT count(*) FROM _timescaledb_catalog.hypertable hypertable;
count
-------
2
(1 row)
--add policy to make sure it's dropped later
select add_compression_policy(:'UNCOMPRESSED_HYPER_NAME', interval '1 day');
add_compression_policy
------------------------
1000
(1 row)
SELECT count(*) FROM _timescaledb_config.bgw_job WHERE id >= 1000;
count
-------
1
(1 row)
DROP TABLE :UNCOMPRESSED_HYPER_NAME;
--verify that there are no more hypertable remaining
SELECT count(*) FROM _timescaledb_catalog.hypertable hypertable;
count
-------
0
(1 row)
SELECT count(*) FROM _timescaledb_catalog.hypertable_compression;
count
-------
0
(1 row)
--verify that the policy is gone
SELECT count(*) FROM _timescaledb_config.bgw_job WHERE id >= 1000;
count
-------
0
(1 row)
ROLLBACK;
--create a dependent object on the compressed hypertable to test cascade behaviour
CREATE VIEW dependent_1 AS SELECT * FROM :COMPRESSED_HYPER_NAME;
\set ON_ERROR_STOP 0
DROP TABLE :UNCOMPRESSED_HYPER_NAME;
ERROR: cannot drop table _timescaledb_internal._compressed_hypertable_2 because other objects depend on it
\set ON_ERROR_STOP 1
BEGIN;
DROP TABLE :UNCOMPRESSED_HYPER_NAME CASCADE;
NOTICE: drop cascades to 2 other objects
SELECT count(*) FROM _timescaledb_catalog.hypertable hypertable;
count
-------
0
(1 row)
ROLLBACK;
DROP VIEW dependent_1;
--create a cont agg view on the ht as well then the drop should nuke everything
CREATE MATERIALIZED VIEW test1_cont_view
WITH (timescaledb.continuous,
timescaledb.materialized_only=true)
AS SELECT time_bucket('1 hour', "Time"), SUM(i)
FROM test1
GROUP BY 1 WITH NO DATA;
SELECT add_continuous_aggregate_policy('test1_cont_view', NULL, '1 hour'::interval, '1 day'::interval);
add_continuous_aggregate_policy
---------------------------------
1001
(1 row)
CALL refresh_continuous_aggregate('test1_cont_view', NULL, NULL);
SELECT count(*) FROM test1_cont_view;
count
-------
9
(1 row)
\c :TEST_DBNAME :ROLE_SUPERUSER
SELECT chunk.schema_name|| '.' || chunk.table_name as "COMPRESSED_CHUNK_NAME"
FROM _timescaledb_catalog.chunk chunk
INNER JOIN _timescaledb_catalog.hypertable comp_hyper ON (chunk.hypertable_id = comp_hyper.id)
INNER JOIN _timescaledb_catalog.hypertable uncomp_hyper ON (comp_hyper.id = uncomp_hyper.compressed_hypertable_id)
WHERE uncomp_hyper.table_name like 'test1' ORDER BY chunk.id LIMIT 1
\gset
ALTER TABLE test1 OWNER TO :ROLE_DEFAULT_PERM_USER_2;
--make sure new owner is propagated down
SELECT a.rolname from pg_class c INNER JOIN pg_authid a ON(c.relowner = a.oid) WHERE c.oid = 'test1'::regclass;
rolname
---------------------
default_perm_user_2
(1 row)
SELECT a.rolname from pg_class c INNER JOIN pg_authid a ON(c.relowner = a.oid) WHERE c.oid = :'COMPRESSED_HYPER_NAME'::regclass;
rolname
---------------------
default_perm_user_2
(1 row)
SELECT a.rolname from pg_class c INNER JOIN pg_authid a ON(c.relowner = a.oid) WHERE c.oid = :'COMPRESSED_CHUNK_NAME'::regclass;
rolname
---------------------
default_perm_user_2
(1 row)
--
-- turn off compression
--
SELECT COUNT(*) AS count_compressed
FROM
(
SELECT decompress_chunk(chunk.schema_name|| '.' || chunk.table_name)
FROM _timescaledb_catalog.chunk chunk
INNER JOIN _timescaledb_catalog.hypertable hypertable ON (chunk.hypertable_id = hypertable.id)
WHERE hypertable.table_name like 'test1' and chunk.compressed_chunk_id IS NOT NULL ORDER BY chunk.id
)
AS sub;
count_compressed
------------------
1
(1 row)
select add_compression_policy('test1', interval '1 day');
add_compression_policy
------------------------
1002
(1 row)
\set ON_ERROR_STOP 0
ALTER table test1 set (timescaledb.compress='f');
\set ON_ERROR_STOP 1
select remove_compression_policy('test1');
remove_compression_policy
---------------------------
t
(1 row)
ALTER table test1 set (timescaledb.compress='f');
--only one hypertable left
SELECT count(*) = 1 FROM _timescaledb_catalog.hypertable hypertable;
?column?
----------
f
(1 row)
SELECT compressed_hypertable_id IS NULL FROM _timescaledb_catalog.hypertable hypertable WHERE hypertable.table_name like 'test1' ;
?column?
----------
t
(1 row)
--no hypertable compression entries left
SELECT count(*) = 0 FROM _timescaledb_catalog.hypertable_compression;
?column?
----------
t
(1 row)
--make sure there are no orphaned _timescaledb_catalog.compression_chunk_size entries (should be 0)
SELECT count(*) as orphaned_compression_chunk_size
FROM _timescaledb_catalog.compression_chunk_size size
LEFT JOIN _timescaledb_catalog.chunk chunk ON (chunk.id = size.chunk_id)
WHERE chunk.id IS NULL;
orphaned_compression_chunk_size
---------------------------------
0
(1 row)
--can turn compression back on
ALTER TABLE test1 set (timescaledb.compress, timescaledb.compress_segmentby = 'b', timescaledb.compress_orderby = '"Time" DESC');
SELECT COUNT(*) AS count_compressed
FROM
(
SELECT compress_chunk(chunk.schema_name|| '.' || chunk.table_name)
FROM _timescaledb_catalog.chunk chunk
INNER JOIN _timescaledb_catalog.hypertable hypertable ON (chunk.hypertable_id = hypertable.id)
WHERE hypertable.table_name like 'test1' and chunk.compressed_chunk_id IS NULL ORDER BY chunk.id
)
AS sub;
count_compressed
------------------
1
(1 row)
DROP TABLE test1 CASCADE;
NOTICE: drop cascades to table _timescaledb_internal.compress_hyper_4_57_chunk
NOTICE: drop cascades to 2 other objects
NOTICE: drop cascades to table _timescaledb_internal._hyper_3_56_chunk
DROP TABLESPACE tablespace1;
DROP TABLESPACE tablespace2;
-- Triggers are NOT fired for compress/decompress
CREATE TABLE test1 ("Time" timestamptz, i integer);
SELECT table_name from create_hypertable('test1', 'Time', chunk_time_interval=> INTERVAL '1 day');
NOTICE: adding not-null constraint to column "Time"
table_name
------------
test1
(1 row)
CREATE OR REPLACE FUNCTION test1_print_func()
RETURNS TRIGGER LANGUAGE PLPGSQL AS
$BODY$
BEGIN
RAISE NOTICE ' raise notice test1_print_trigger called ';
RETURN OLD;
END;
$BODY$;
CREATE TRIGGER test1_trigger
BEFORE INSERT OR UPDATE OR DELETE OR TRUNCATE ON test1
FOR EACH STATEMENT EXECUTE FUNCTION test1_print_func();
INSERT INTO test1 SELECT generate_series('2018-03-02 1:00'::TIMESTAMPTZ, '2018-03-03 1:00', '1 hour') , 1 ;
NOTICE: raise notice test1_print_trigger called
-- add a row trigger too --
CREATE TRIGGER test1_trigger2
BEFORE INSERT OR UPDATE OR DELETE ON test1
FOR EACH ROW EXECUTE FUNCTION test1_print_func();
INSERT INTO test1 SELECT '2018-03-02 1:05'::TIMESTAMPTZ, 2;
NOTICE: raise notice test1_print_trigger called
NOTICE: raise notice test1_print_trigger called
ALTER TABLE test1 set (timescaledb.compress, timescaledb.compress_orderby = '"Time" DESC');
SELECT COUNT(*) AS count_compressed FROM
(
SELECT compress_chunk(chunk.schema_name|| '.' || chunk.table_name)
FROM _timescaledb_catalog.chunk chunk
INNER JOIN _timescaledb_catalog.hypertable hypertable ON (chunk.hypertable_id = hypertable.id)
WHERE hypertable.table_name like 'test1' and chunk.compressed_chunk_id IS NULL ORDER BY chunk.id) AS subq;
count_compressed
------------------
2
(1 row)
SELECT COUNT(*) AS count_compressed FROM
(
SELECT decompress_chunk(chunk.schema_name|| '.' || chunk.table_name)
FROM _timescaledb_catalog.chunk chunk
INNER JOIN _timescaledb_catalog.hypertable hypertable ON (chunk.hypertable_id = hypertable.id)
WHERE hypertable.table_name like 'test1' ORDER BY chunk.id ) as subq;
count_compressed
------------------
2
(1 row)
DROP TABLE test1;
-- test disabling compression on hypertables with caggs and dropped chunks
-- github issue 2844
CREATE TABLE i2844 (created_at timestamptz NOT NULL,c1 float);
SELECT create_hypertable('i2844', 'created_at', chunk_time_interval => '6 hour'::interval);
create_hypertable
--------------------
(7,public,i2844,t)
(1 row)
INSERT INTO i2844 SELECT generate_series('2000-01-01'::timestamptz, '2000-01-02'::timestamptz,'1h'::interval);
CREATE MATERIALIZED VIEW test_agg WITH (timescaledb.continuous) AS SELECT time_bucket('1 hour', created_at) AS bucket, AVG(c1) AS avg_c1 FROM i2844 GROUP BY bucket;
NOTICE: refreshing continuous aggregate "test_agg"
ALTER TABLE i2844 SET (timescaledb.compress);
SELECT compress_chunk(show_chunks) AS compressed_chunk FROM show_chunks('i2844');
compressed_chunk
-----------------------------------------
_timescaledb_internal._hyper_7_62_chunk
_timescaledb_internal._hyper_7_63_chunk
_timescaledb_internal._hyper_7_64_chunk
_timescaledb_internal._hyper_7_65_chunk
_timescaledb_internal._hyper_7_66_chunk
(5 rows)
SELECT drop_chunks('i2844', older_than => '2000-01-01 18:00'::timestamptz);
drop_chunks
-----------------------------------------
_timescaledb_internal._hyper_7_62_chunk
_timescaledb_internal._hyper_7_63_chunk
_timescaledb_internal._hyper_7_64_chunk
(3 rows)
SELECT decompress_chunk(show_chunks, if_compressed => TRUE) AS decompressed_chunks FROM show_chunks('i2844');
decompressed_chunks
-----------------------------------------
_timescaledb_internal._hyper_7_65_chunk
_timescaledb_internal._hyper_7_66_chunk
(2 rows)
ALTER TABLE i2844 SET (timescaledb.compress = FALSE);
-- compression alter schema tests
\ir include/compression_alter.sql
-- This file and its contents are licensed under the Timescale License.
-- Please see the included NOTICE for copyright information and
-- LICENSE-TIMESCALE for a copy of the license.
CREATE TABLE test1 ("Time" timestamptz, intcol integer, bntcol bigint, txtcol text);
SELECT table_name from create_hypertable('test1', 'Time', chunk_time_interval=> INTERVAL '1 day');
psql:include/compression_alter.sql:6: NOTICE: adding not-null constraint to column "Time"
table_name
------------
test1
(1 row)
INSERT INTO test1
SELECT t, gen_rand_minstd(), gen_rand_minstd(), gen_rand_minstd()::text
FROM generate_series('2018-03-02 1:00'::TIMESTAMPTZ, '2018-03-05 1:00', '1 hour') t;
INSERT INTO test1
SELECT '2018-03-04 2:00', 100, 200, 'hello' ;
ALTER TABLE test1 set (timescaledb.compress, timescaledb.compress_segmentby = 'bntcol', timescaledb.compress_orderby = '"Time" DESC');
SELECT COUNT(*) AS count_compressed
FROM
(
SELECT compress_chunk(chunk.schema_name|| '.' || chunk.table_name)
FROM _timescaledb_catalog.chunk chunk
INNER JOIN _timescaledb_catalog.hypertable hypertable ON (chunk.hypertable_id = hypertable.id)
WHERE hypertable.table_name like 'test1' and chunk.compressed_chunk_id IS NULL ORDER BY chunk.id
)
AS sub;
count_compressed
------------------
4
(1 row)
-- TEST: ALTER TABLE add column tests --
ALTER TABLE test1 ADD COLUMN new_coli integer;
ALTER TABLE test1 ADD COLUMN new_colv varchar(30);
SELECT * FROM _timescaledb_catalog.hypertable_compression
ORDER BY attname;
hypertable_id | attname | compression_algorithm_id | segmentby_column_index | orderby_column_index | orderby_asc | orderby_nullsfirst
---------------+----------+--------------------------+------------------------+----------------------+-------------+--------------------
10 | Time | 4 | | 1 | f | t
10 | bntcol | 0 | 1 | | |
10 | intcol | 4 | | | |
10 | new_coli | 4 | | | |
10 | new_colv | 2 | | | |
10 | txtcol | 2 | | | |
(6 rows)
SELECT count(*) from test1 where new_coli is not null;
count
-------
0
(1 row)
SELECT count(*) from test1 where new_colv is null;
count
-------
74
(1 row)
--decompress 1 chunk and query again
SELECT COUNT(*) AS count_compressed
FROM
(
SELECT decompress_chunk(chunk.schema_name|| '.' || chunk.table_name)
FROM _timescaledb_catalog.chunk chunk
INNER JOIN _timescaledb_catalog.hypertable hypertable ON (chunk.hypertable_id = hypertable.id)
WHERE hypertable.table_name like 'test1' and chunk.compressed_chunk_id IS NOT NULL ORDER BY chunk.id
LIMIT 1
)
AS sub;
count_compressed
------------------
1
(1 row)
SELECT count(*) from test1 where new_coli is not null;
count
-------
0
(1 row)
SELECT count(*) from test1 where new_colv is null;
count
-------
74
(1 row)
--compress all chunks and query ---
--create new chunk and fill in data --
INSERT INTO test1 SELECT t, gen_rand_minstd(), gen_rand_minstd(), gen_rand_minstd()::text , 100, '101t'
FROM generate_series('2018-03-08 1:00'::TIMESTAMPTZ, '2018-03-09 1:00', '1 hour') t;
SELECT count(*) from test1 where new_coli = 100;
count
-------
25
(1 row)
SELECT count(*) from test1 where new_colv = '101t';
count
-------
25
(1 row)
SELECT COUNT(*) AS count_compressed
FROM
(
SELECT compress_chunk(chunk.schema_name|| '.' || chunk.table_name)
FROM _timescaledb_catalog.chunk chunk
INNER JOIN _timescaledb_catalog.hypertable hypertable ON (chunk.hypertable_id = hypertable.id)
WHERE hypertable.table_name like 'test1' and chunk.compressed_chunk_id IS NULL ORDER BY chunk.id
)
AS sub;
count_compressed
------------------
3
(1 row)
SELECT count(*) from test1 where new_coli = 100;
count
-------
25
(1 row)
SELECT count(*) from test1 where new_colv = '101t';
count
-------
25
(1 row)
CREATE INDEX new_index ON test1(new_colv);
-- TEST 2: ALTER TABLE rename column
SELECT * FROM _timescaledb_catalog.hypertable_compression
WHERE attname = 'new_coli' and hypertable_id = (SELECT id from _timescaledb_catalog.hypertable
WHERE table_name = 'test1' );
hypertable_id | attname | compression_algorithm_id | segmentby_column_index | orderby_column_index | orderby_asc | orderby_nullsfirst
---------------+----------+--------------------------+------------------------+----------------------+-------------+--------------------
10 | new_coli | 4 | | | |
(1 row)
ALTER TABLE test1 RENAME new_coli TO coli;
SELECT * FROM _timescaledb_catalog.hypertable_compression
WHERE attname = 'coli' and hypertable_id = (SELECT id from _timescaledb_catalog.hypertable
WHERE table_name = 'test1' );
hypertable_id | attname | compression_algorithm_id | segmentby_column_index | orderby_column_index | orderby_asc | orderby_nullsfirst
---------------+---------+--------------------------+------------------------+----------------------+-------------+--------------------
10 | coli | 4 | | | |
(1 row)
SELECT count(*) from test1 where coli = 100;
count
-------
25
(1 row)
--rename segment by column name
ALTER TABLE test1 RENAME bntcol TO bigintcol ;
SELECT * FROM _timescaledb_catalog.hypertable_compression
WHERE attname = 'bigintcol' and hypertable_id = (SELECT id from _timescaledb_catalog.hypertable
WHERE table_name = 'test1' );
hypertable_id | attname | compression_algorithm_id | segmentby_column_index | orderby_column_index | orderby_asc | orderby_nullsfirst
---------------+-----------+--------------------------+------------------------+----------------------+-------------+--------------------
10 | bigintcol | 0 | 1 | | |
(1 row)
--query by segment by column name
SELECT * from test1 WHERE bigintcol = 100;
Time | intcol | bigintcol | txtcol | coli | new_colv
------+--------+-----------+--------+------+----------
(0 rows)
SELECT * from test1 WHERE bigintcol = 200;
Time | intcol | bigintcol | txtcol | coli | new_colv
------------------------------+--------+-----------+--------+------+----------
Sun Mar 04 02:00:00 2018 PST | 100 | 200 | hello | |
(1 row)
-- add a new chunk and compress
INSERT INTO test1 SELECT '2019-03-04 2:00', 99, 800, 'newchunk' ;
SELECT COUNT(*) AS count_compressed
FROM
(
SELECT compress_chunk(chunk.schema_name|| '.' || chunk.table_name)
FROM _timescaledb_catalog.chunk chunk
INNER JOIN _timescaledb_catalog.hypertable hypertable ON (chunk.hypertable_id = hypertable.id)
WHERE hypertable.table_name = 'test1' and chunk.compressed_chunk_id IS NULL ORDER BY chunk.id
) q;
count_compressed
------------------
1
(1 row)
--check if all chunks have new column names
--both counts should be equal
SELECT count(*) FROM _timescaledb_catalog.chunk
WHERE hypertable_id = ( SELECT id FROM _timescaledb_catalog.hypertable
WHERE table_name = 'test1' );
count
-------
7
(1 row)
SELECT count(*)
FROM ( SELECT attrelid::regclass, attname FROM pg_attribute
WHERE attrelid in (SELECT inhrelid::regclass from pg_inherits
where inhparent = 'test1'::regclass)
and attname = 'bigintcol' ) q;
count
-------
7
(1 row)
--check count on internal compression table too i.e. all the chunks have
--the correct column name
SELECT format('%I.%I', cht.schema_name, cht.table_name) AS "COMPRESSION_TBLNM"
FROM _timescaledb_catalog.hypertable ht, _timescaledb_catalog.hypertable cht
WHERE ht.table_name = 'test1' and cht.id = ht.compressed_hypertable_id \gset
SELECT count(*)
FROM ( SELECT attrelid::regclass, attname FROM pg_attribute
WHERE attrelid in (SELECT inhrelid::regclass from pg_inherits
where inhparent = :'COMPRESSION_TBLNM'::regclass )
and attname = 'bigintcol' ) q;
count
-------
7
(1 row)
-- check column name truncation with renames
-- check if the name change is reflected for settings
ALTER TABLE test1 RENAME bigintcol TO
ccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccabdeeeeeeccccccccccccc;
psql:include/compression_alter.sql:133: NOTICE: identifier "ccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccabdeeeeeeccccccccccccc" will be truncated to "cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccca"
SELECT * from timescaledb_information.compression_settings
WHERE hypertable_name = 'test1' and attname like 'ccc%';
hypertable_schema | hypertable_name | attname | segmentby_column_index | orderby_column_index | orderby_asc | orderby_nullsfirst
-------------------+-----------------+-----------------------------------------------------------------+------------------------+----------------------+-------------+--------------------
public | test1 | cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccca | 1 | | |
(1 row)
SELECT count(*)
FROM ( SELECT attrelid::regclass, attname FROM pg_attribute
WHERE attrelid in (SELECT inhrelid::regclass from pg_inherits
where inhparent = :'COMPRESSION_TBLNM'::regclass )
and attname like 'ccc%a' ) q;
count
-------
7
(1 row)
ALTER TABLE test1 RENAME
ccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccabdeeeeeeccccccccccccc
TO bigintcol;
psql:include/compression_alter.sql:146: NOTICE: identifier "ccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccabdeeeeeeccccccccccccc" will be truncated to "cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccca"
SELECT * from timescaledb_information.compression_settings
WHERE hypertable_name = 'test1' and attname = 'bigintcol' ;
hypertable_schema | hypertable_name | attname | segmentby_column_index | orderby_column_index | orderby_asc | orderby_nullsfirst
-------------------+-----------------+-----------+------------------------+----------------------+-------------+--------------------
public | test1 | bigintcol | 1 | | |
(1 row)