mirror of
https://github.com/timescale/timescaledb.git
synced 2025-05-14 17:43:34 +08:00
This was previously disabled as no data resided on the uncompressed chunk once it was compressed, but this is not the case anymore with partially compressed chunks, so we enable indexscan for the uncompressed chunk again. Fixes #5432 Co-authored-by: Ante Kresic <ante.kresic@gmail.com>
1812 lines
70 KiB
Plaintext
1812 lines
70 KiB
Plaintext
-- This file and its contents are licensed under the Timescale License.
|
|
-- Please see the included NOTICE for copyright information and
|
|
-- LICENSE-TIMESCALE for a copy of the license.
|
|
\ir include/rand_generator.sql
|
|
-- This file and its contents are licensed under the Timescale License.
|
|
-- Please see the included NOTICE for copyright information and
|
|
-- LICENSE-TIMESCALE for a copy of the license.
|
|
--------------------------
|
|
-- cheap rand generator --
|
|
--------------------------
|
|
create table rand_minstd_state(i bigint);
|
|
create function rand_minstd_advance(bigint) returns bigint
|
|
language sql immutable as
|
|
$$
|
|
select (16807 * $1) % 2147483647
|
|
$$;
|
|
create function gen_rand_minstd() returns bigint
|
|
language sql security definer as
|
|
$$
|
|
update rand_minstd_state set i = rand_minstd_advance(i) returning i
|
|
$$;
|
|
-- seed the random num generator
|
|
insert into rand_minstd_state values (321);
|
|
\c :TEST_DBNAME :ROLE_SUPERUSER
|
|
SET client_min_messages = ERROR;
|
|
DROP TABLESPACE IF EXISTS tablespace1;
|
|
DROP TABLESPACE IF EXISTS tablespace2;
|
|
SET client_min_messages = NOTICE;
|
|
--test hypertable with tables space
|
|
CREATE TABLESPACE tablespace1 OWNER :ROLE_DEFAULT_PERM_USER LOCATION :TEST_TABLESPACE1_PATH;
|
|
CREATE TABLESPACE tablespace2 OWNER :ROLE_DEFAULT_PERM_USER LOCATION :TEST_TABLESPACE2_PATH;
|
|
\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER
|
|
CREATE TABLE test1 ("Time" timestamptz, i integer, b bigint, t text);
|
|
SELECT table_name from create_hypertable('test1', 'Time', chunk_time_interval=> INTERVAL '1 day');
|
|
NOTICE: adding not-null constraint to column "Time"
|
|
table_name
|
|
------------
|
|
test1
|
|
(1 row)
|
|
|
|
INSERT INTO test1 SELECT t, gen_rand_minstd(), gen_rand_minstd(), gen_rand_minstd()::text FROM generate_series('2018-03-02 1:00'::TIMESTAMPTZ, '2018-03-28 1:00', '1 hour') t;
|
|
ALTER TABLE test1 set (timescaledb.compress, timescaledb.compress_segmentby = 'b', timescaledb.compress_orderby = '"Time" DESC');
|
|
SELECT COUNT(*) AS count_compressed
|
|
FROM
|
|
(
|
|
SELECT compress_chunk(chunk.schema_name|| '.' || chunk.table_name)
|
|
FROM _timescaledb_catalog.chunk chunk
|
|
INNER JOIN _timescaledb_catalog.hypertable hypertable ON (chunk.hypertable_id = hypertable.id)
|
|
WHERE hypertable.table_name like 'test1' and chunk.compressed_chunk_id IS NULL ORDER BY chunk.id
|
|
)
|
|
AS sub;
|
|
count_compressed
|
|
------------------
|
|
27
|
|
(1 row)
|
|
|
|
--make sure allowed ddl still work
|
|
ALTER TABLE test1 CLUSTER ON "test1_Time_idx";
|
|
ALTER TABLE test1 SET WITHOUT CLUSTER;
|
|
CREATE INDEX new_index ON test1(b);
|
|
DROP INDEX new_index;
|
|
ALTER TABLE test1 SET (fillfactor=100);
|
|
ALTER TABLE test1 RESET (fillfactor);
|
|
ALTER TABLE test1 ALTER COLUMN b SET STATISTICS 10;
|
|
--test adding boolean columns with default and not null
|
|
CREATE TABLE records (time timestamp NOT NULL);
|
|
SELECT create_hypertable('records', 'time');
|
|
WARNING: column type "timestamp without time zone" used for "time" does not follow best practices
|
|
create_hypertable
|
|
----------------------
|
|
(3,public,records,t)
|
|
(1 row)
|
|
|
|
ALTER TABLE records SET (timescaledb.compress = true);
|
|
ALTER TABLE records ADD COLUMN col1 boolean DEFAULT false NOT NULL;
|
|
-- NULL constraints are useless and it is safe allow adding this
|
|
-- column with NULL constraint to a compressed hypertable (Issue #5151)
|
|
ALTER TABLE records ADD COLUMN col2 BOOLEAN NULL;
|
|
DROP table records CASCADE;
|
|
-- TABLESPACES
|
|
-- For tablepaces with compressed chunks the semantics are the following:
|
|
-- - compressed chunks get put into the same tablespace as the
|
|
-- uncompressed chunk on compression.
|
|
-- - set tablespace on uncompressed hypertable cascades to compressed hypertable+chunks
|
|
-- - set tablespace on all chunks is blocked (same as w/o compression)
|
|
-- - move chunks on a uncompressed chunk errors
|
|
-- - move chunks on compressed chunk works
|
|
--In the future we will:
|
|
-- - add tablespace option to compress_chunk function and policy (this will override the setting
|
|
-- of the uncompressed chunk). This will allow changing tablespaces upon compression
|
|
-- - Note: The current plan is to never listen to the setting on compressed hypertable. In fact,
|
|
-- we will block setting tablespace on compressed hypertables
|
|
SELECT count(*) as "COUNT_CHUNKS_UNCOMPRESSED"
|
|
FROM _timescaledb_catalog.chunk chunk
|
|
INNER JOIN _timescaledb_catalog.hypertable hypertable ON (chunk.hypertable_id = hypertable.id)
|
|
WHERE hypertable.table_name like 'test1' \gset
|
|
SELECT count(*) as "COUNT_CHUNKS_COMPRESSED"
|
|
FROM _timescaledb_catalog.chunk chunk
|
|
INNER JOIN _timescaledb_catalog.hypertable comp_hyper ON (chunk.hypertable_id = comp_hyper.id)
|
|
INNER JOIN _timescaledb_catalog.hypertable uncomp_hyper ON (comp_hyper.id = uncomp_hyper.compressed_hypertable_id)
|
|
WHERE uncomp_hyper.table_name like 'test1' \gset
|
|
ALTER TABLE test1 SET TABLESPACE tablespace1;
|
|
--all chunks + both the compressed and uncompressed hypertable moved to new tablespace
|
|
SELECT count(*) = (:COUNT_CHUNKS_UNCOMPRESSED +:COUNT_CHUNKS_COMPRESSED + 2)
|
|
FROM pg_tables WHERE tablespace = 'tablespace1';
|
|
?column?
|
|
----------
|
|
t
|
|
(1 row)
|
|
|
|
ALTER TABLE test1 SET TABLESPACE tablespace2;
|
|
SELECT count(*) = (:COUNT_CHUNKS_UNCOMPRESSED +:COUNT_CHUNKS_COMPRESSED + 2)
|
|
FROM pg_tables WHERE tablespace = 'tablespace2';
|
|
?column?
|
|
----------
|
|
t
|
|
(1 row)
|
|
|
|
SELECT
|
|
comp_chunk.schema_name|| '.' || comp_chunk.table_name as "COMPRESSED_CHUNK_NAME",
|
|
uncomp_chunk.schema_name|| '.' || uncomp_chunk.table_name as "UNCOMPRESSED_CHUNK_NAME"
|
|
FROM _timescaledb_catalog.chunk comp_chunk
|
|
INNER JOIN _timescaledb_catalog.hypertable comp_hyper ON (comp_chunk.hypertable_id = comp_hyper.id)
|
|
INNER JOIN _timescaledb_catalog.hypertable uncomp_hyper ON (comp_hyper.id = uncomp_hyper.compressed_hypertable_id)
|
|
INNER JOIN _timescaledb_catalog.chunk uncomp_chunk ON (uncomp_chunk.compressed_chunk_id = comp_chunk.id)
|
|
WHERE uncomp_hyper.table_name like 'test1' ORDER BY comp_chunk.id LIMIT 1\gset
|
|
-- ensure compression chunk cannot be moved directly
|
|
SELECT tablename
|
|
FROM pg_tables WHERE tablespace = 'tablespace1';
|
|
tablename
|
|
-----------
|
|
(0 rows)
|
|
|
|
\set ON_ERROR_STOP 0
|
|
ALTER TABLE :COMPRESSED_CHUNK_NAME SET TABLESPACE tablespace1;
|
|
ERROR: changing tablespace of compressed chunk is not supported
|
|
\set ON_ERROR_STOP 1
|
|
SELECT tablename
|
|
FROM pg_tables WHERE tablespace = 'tablespace1';
|
|
tablename
|
|
-----------
|
|
(0 rows)
|
|
|
|
-- ensure that both compressed and uncompressed chunks moved
|
|
ALTER TABLE :UNCOMPRESSED_CHUNK_NAME SET TABLESPACE tablespace1;
|
|
SELECT tablename
|
|
FROM pg_tables WHERE tablespace = 'tablespace1';
|
|
tablename
|
|
---------------------------
|
|
compress_hyper_2_28_chunk
|
|
_hyper_1_1_chunk
|
|
(2 rows)
|
|
|
|
ALTER TABLE test1 SET TABLESPACE tablespace2;
|
|
SELECT tablename
|
|
FROM pg_tables WHERE tablespace = 'tablespace1';
|
|
tablename
|
|
-----------
|
|
(0 rows)
|
|
|
|
\set ON_ERROR_STOP 0
|
|
SELECT move_chunk(chunk=>:'COMPRESSED_CHUNK_NAME', destination_tablespace=>'tablespace1', index_destination_tablespace=>'tablespace1', reorder_index=>'_timescaledb_internal."compress_hyper_2_28_chunk__compressed_hypertable_2_b__ts_meta_s"');
|
|
ERROR: cannot directly move internal compression data
|
|
\set ON_ERROR_STOP 1
|
|
-- ensure that both compressed and uncompressed chunks moved
|
|
SELECT move_chunk(chunk=>:'UNCOMPRESSED_CHUNK_NAME', destination_tablespace=>'tablespace1', index_destination_tablespace=>'tablespace1', reorder_index=>'_timescaledb_internal."_hyper_1_1_chunk_test1_Time_idx"');
|
|
NOTICE: ignoring index parameter
|
|
move_chunk
|
|
------------
|
|
|
|
(1 row)
|
|
|
|
SELECT tablename
|
|
FROM pg_tables WHERE tablespace = 'tablespace1';
|
|
tablename
|
|
---------------------------
|
|
_hyper_1_1_chunk
|
|
compress_hyper_2_28_chunk
|
|
(2 rows)
|
|
|
|
-- the compressed chunk is in here now
|
|
SELECT count(*)
|
|
FROM pg_tables WHERE tablespace = 'tablespace1';
|
|
count
|
|
-------
|
|
2
|
|
(1 row)
|
|
|
|
SELECT decompress_chunk(:'UNCOMPRESSED_CHUNK_NAME');
|
|
decompress_chunk
|
|
----------------------------------------
|
|
_timescaledb_internal._hyper_1_1_chunk
|
|
(1 row)
|
|
|
|
--the compresse chunk was dropped by decompression
|
|
SELECT count(*)
|
|
FROM pg_tables WHERE tablespace = 'tablespace1';
|
|
count
|
|
-------
|
|
1
|
|
(1 row)
|
|
|
|
SELECT move_chunk(chunk=>:'UNCOMPRESSED_CHUNK_NAME', destination_tablespace=>'tablespace1', index_destination_tablespace=>'tablespace1', reorder_index=>'_timescaledb_internal."_hyper_1_1_chunk_test1_Time_idx"');
|
|
move_chunk
|
|
------------
|
|
|
|
(1 row)
|
|
|
|
--the uncompressed chunks has now been moved
|
|
SELECT count(*)
|
|
FROM pg_tables WHERE tablespace = 'tablespace1';
|
|
count
|
|
-------
|
|
1
|
|
(1 row)
|
|
|
|
SELECT compress_chunk(:'UNCOMPRESSED_CHUNK_NAME');
|
|
compress_chunk
|
|
----------------------------------------
|
|
_timescaledb_internal._hyper_1_1_chunk
|
|
(1 row)
|
|
|
|
--the compressed chunk is now in the same tablespace as the uncompressed one
|
|
SELECT count(*)
|
|
FROM pg_tables WHERE tablespace = 'tablespace1';
|
|
count
|
|
-------
|
|
2
|
|
(1 row)
|
|
|
|
--
|
|
-- DROP CHUNKS
|
|
--
|
|
SELECT count(*) as count_chunks_uncompressed
|
|
FROM _timescaledb_catalog.chunk chunk
|
|
INNER JOIN _timescaledb_catalog.hypertable hypertable ON (chunk.hypertable_id = hypertable.id)
|
|
WHERE hypertable.table_name like 'test1';
|
|
count_chunks_uncompressed
|
|
---------------------------
|
|
27
|
|
(1 row)
|
|
|
|
SELECT count(*) as count_chunks_compressed
|
|
FROM _timescaledb_catalog.chunk chunk
|
|
INNER JOIN _timescaledb_catalog.hypertable comp_hyper ON (chunk.hypertable_id = comp_hyper.id)
|
|
INNER JOIN _timescaledb_catalog.hypertable uncomp_hyper ON (comp_hyper.id = uncomp_hyper.compressed_hypertable_id)
|
|
WHERE uncomp_hyper.table_name like 'test1';
|
|
count_chunks_compressed
|
|
-------------------------
|
|
27
|
|
(1 row)
|
|
|
|
SELECT chunk.schema_name|| '.' || chunk.table_name as "UNCOMPRESSED_CHUNK_NAME"
|
|
FROM _timescaledb_catalog.chunk chunk
|
|
INNER JOIN _timescaledb_catalog.hypertable hypertable ON (chunk.hypertable_id = hypertable.id)
|
|
WHERE hypertable.table_name like 'test1' ORDER BY chunk.id LIMIT 1 \gset
|
|
DROP TABLE :UNCOMPRESSED_CHUNK_NAME;
|
|
--should decrease #chunks both compressed and decompressed
|
|
SELECT count(*) as count_chunks_uncompressed
|
|
FROM _timescaledb_catalog.chunk chunk
|
|
INNER JOIN _timescaledb_catalog.hypertable hypertable ON (chunk.hypertable_id = hypertable.id)
|
|
WHERE hypertable.table_name like 'test1';
|
|
count_chunks_uncompressed
|
|
---------------------------
|
|
26
|
|
(1 row)
|
|
|
|
--make sure there are no orphaned _timescaledb_catalog.compression_chunk_size entries (should be 0)
|
|
SELECT count(*) as orphaned_compression_chunk_size
|
|
FROM _timescaledb_catalog.compression_chunk_size size
|
|
LEFT JOIN _timescaledb_catalog.chunk chunk ON (chunk.id = size.chunk_id)
|
|
WHERE chunk.id IS NULL;
|
|
orphaned_compression_chunk_size
|
|
---------------------------------
|
|
0
|
|
(1 row)
|
|
|
|
SELECT count(*) as count_chunks_compressed
|
|
FROM _timescaledb_catalog.chunk chunk
|
|
INNER JOIN _timescaledb_catalog.hypertable comp_hyper ON (chunk.hypertable_id = comp_hyper.id)
|
|
INNER JOIN _timescaledb_catalog.hypertable uncomp_hyper ON (comp_hyper.id = uncomp_hyper.compressed_hypertable_id)
|
|
WHERE uncomp_hyper.table_name like 'test1';
|
|
count_chunks_compressed
|
|
-------------------------
|
|
26
|
|
(1 row)
|
|
|
|
SELECT drop_chunks('test1', older_than => '2018-03-10'::TIMESTAMPTZ);
|
|
drop_chunks
|
|
----------------------------------------
|
|
_timescaledb_internal._hyper_1_2_chunk
|
|
_timescaledb_internal._hyper_1_3_chunk
|
|
_timescaledb_internal._hyper_1_4_chunk
|
|
_timescaledb_internal._hyper_1_5_chunk
|
|
_timescaledb_internal._hyper_1_6_chunk
|
|
_timescaledb_internal._hyper_1_7_chunk
|
|
_timescaledb_internal._hyper_1_8_chunk
|
|
(7 rows)
|
|
|
|
--should decrease #chunks both compressed and decompressed
|
|
SELECT count(*) as count_chunks_uncompressed
|
|
FROM _timescaledb_catalog.chunk chunk
|
|
INNER JOIN _timescaledb_catalog.hypertable hypertable ON (chunk.hypertable_id = hypertable.id)
|
|
WHERE hypertable.table_name like 'test1';
|
|
count_chunks_uncompressed
|
|
---------------------------
|
|
19
|
|
(1 row)
|
|
|
|
SELECT count(*) as count_chunks_compressed
|
|
FROM _timescaledb_catalog.chunk chunk
|
|
INNER JOIN _timescaledb_catalog.hypertable comp_hyper ON (chunk.hypertable_id = comp_hyper.id)
|
|
INNER JOIN _timescaledb_catalog.hypertable uncomp_hyper ON (comp_hyper.id = uncomp_hyper.compressed_hypertable_id)
|
|
WHERE uncomp_hyper.table_name like 'test1';
|
|
count_chunks_compressed
|
|
-------------------------
|
|
19
|
|
(1 row)
|
|
|
|
SELECT chunk.schema_name|| '.' || chunk.table_name as "UNCOMPRESSED_CHUNK_NAME"
|
|
FROM _timescaledb_catalog.chunk chunk
|
|
INNER JOIN _timescaledb_catalog.hypertable hypertable ON (chunk.hypertable_id = hypertable.id)
|
|
WHERE hypertable.table_name like 'test1' ORDER BY chunk.id LIMIT 1 \gset
|
|
SELECT chunk.schema_name|| '.' || chunk.table_name as "COMPRESSED_CHUNK_NAME"
|
|
FROM _timescaledb_catalog.chunk chunk
|
|
INNER JOIN _timescaledb_catalog.hypertable comp_hyper ON (chunk.hypertable_id = comp_hyper.id)
|
|
INNER JOIN _timescaledb_catalog.hypertable uncomp_hyper ON (comp_hyper.id = uncomp_hyper.compressed_hypertable_id)
|
|
WHERE uncomp_hyper.table_name like 'test1' ORDER BY chunk.id LIMIT 1
|
|
\gset
|
|
\set ON_ERROR_STOP 0
|
|
DROP TABLE :COMPRESSED_CHUNK_NAME;
|
|
ERROR: dropping compressed chunks not supported
|
|
\set ON_ERROR_STOP 1
|
|
SELECT
|
|
chunk.schema_name|| '.' || chunk.table_name as "UNCOMPRESSED_CHUNK_NAME",
|
|
comp_chunk.schema_name|| '.' || comp_chunk.table_name as "COMPRESSED_CHUNK_NAME"
|
|
FROM _timescaledb_catalog.chunk chunk
|
|
INNER JOIN _timescaledb_catalog.chunk comp_chunk ON (chunk.compressed_chunk_id = comp_chunk.id)
|
|
INNER JOIN _timescaledb_catalog.hypertable hypertable ON (chunk.hypertable_id = hypertable.id)
|
|
WHERE hypertable.table_name like 'test1' ORDER BY chunk.id LIMIT 1 \gset
|
|
--create a dependent object on the compressed chunk to test cascade behaviour
|
|
CREATE VIEW dependent_1 AS SELECT * FROM :COMPRESSED_CHUNK_NAME;
|
|
\set ON_ERROR_STOP 0
|
|
--errors due to dependent objects
|
|
DROP TABLE :UNCOMPRESSED_CHUNK_NAME;
|
|
ERROR: cannot drop table _timescaledb_internal.compress_hyper_2_36_chunk because other objects depend on it
|
|
\set ON_ERROR_STOP 1
|
|
DROP TABLE :UNCOMPRESSED_CHUNK_NAME CASCADE;
|
|
NOTICE: drop cascades to view dependent_1
|
|
--should decrease #chunks both compressed and decompressed
|
|
SELECT count(*) as count_chunks_uncompressed
|
|
FROM _timescaledb_catalog.chunk chunk
|
|
INNER JOIN _timescaledb_catalog.hypertable hypertable ON (chunk.hypertable_id = hypertable.id)
|
|
WHERE hypertable.table_name like 'test1';
|
|
count_chunks_uncompressed
|
|
---------------------------
|
|
18
|
|
(1 row)
|
|
|
|
SELECT count(*) as count_chunks_compressed
|
|
FROM _timescaledb_catalog.chunk chunk
|
|
INNER JOIN _timescaledb_catalog.hypertable comp_hyper ON (chunk.hypertable_id = comp_hyper.id)
|
|
INNER JOIN _timescaledb_catalog.hypertable uncomp_hyper ON (comp_hyper.id = uncomp_hyper.compressed_hypertable_id)
|
|
WHERE uncomp_hyper.table_name like 'test1';
|
|
count_chunks_compressed
|
|
-------------------------
|
|
18
|
|
(1 row)
|
|
|
|
SELECT
|
|
chunk.schema_name|| '.' || chunk.table_name as "UNCOMPRESSED_CHUNK_NAME",
|
|
comp_chunk.schema_name|| '.' || comp_chunk.table_name as "COMPRESSED_CHUNK_NAME"
|
|
FROM _timescaledb_catalog.chunk chunk
|
|
INNER JOIN _timescaledb_catalog.chunk comp_chunk ON (chunk.compressed_chunk_id = comp_chunk.id)
|
|
INNER JOIN _timescaledb_catalog.hypertable hypertable ON (chunk.hypertable_id = hypertable.id)
|
|
WHERE hypertable.table_name like 'test1' ORDER BY chunk.id LIMIT 1 \gset
|
|
CREATE VIEW dependent_1 AS SELECT * FROM :COMPRESSED_CHUNK_NAME;
|
|
\set ON_ERROR_STOP 0
|
|
\set VERBOSITY default
|
|
--errors due to dependent objects
|
|
SELECT drop_chunks('test1', older_than => '2018-03-28'::TIMESTAMPTZ);
|
|
ERROR: cannot drop table _timescaledb_internal.compress_hyper_2_37_chunk because other objects depend on it
|
|
DETAIL: view dependent_1 depends on table _timescaledb_internal.compress_hyper_2_37_chunk
|
|
HINT: Use DROP ... to drop the dependent objects.
|
|
\set VERBOSITY terse
|
|
\set ON_ERROR_STOP 1
|
|
DROP VIEW dependent_1;
|
|
SELECT drop_chunks('test1', older_than => '2018-03-28'::TIMESTAMPTZ);
|
|
drop_chunks
|
|
-----------------------------------------
|
|
_timescaledb_internal._hyper_1_10_chunk
|
|
_timescaledb_internal._hyper_1_11_chunk
|
|
_timescaledb_internal._hyper_1_12_chunk
|
|
_timescaledb_internal._hyper_1_13_chunk
|
|
_timescaledb_internal._hyper_1_14_chunk
|
|
_timescaledb_internal._hyper_1_15_chunk
|
|
_timescaledb_internal._hyper_1_16_chunk
|
|
_timescaledb_internal._hyper_1_17_chunk
|
|
_timescaledb_internal._hyper_1_18_chunk
|
|
_timescaledb_internal._hyper_1_19_chunk
|
|
_timescaledb_internal._hyper_1_20_chunk
|
|
_timescaledb_internal._hyper_1_21_chunk
|
|
_timescaledb_internal._hyper_1_22_chunk
|
|
_timescaledb_internal._hyper_1_23_chunk
|
|
_timescaledb_internal._hyper_1_24_chunk
|
|
_timescaledb_internal._hyper_1_25_chunk
|
|
_timescaledb_internal._hyper_1_26_chunk
|
|
(17 rows)
|
|
|
|
--should decrease #chunks both compressed and decompressed
|
|
SELECT count(*) as count_chunks_uncompressed
|
|
FROM _timescaledb_catalog.chunk chunk
|
|
INNER JOIN _timescaledb_catalog.hypertable hypertable ON (chunk.hypertable_id = hypertable.id)
|
|
WHERE hypertable.table_name like 'test1';
|
|
count_chunks_uncompressed
|
|
---------------------------
|
|
1
|
|
(1 row)
|
|
|
|
SELECT count(*) as count_chunks_compressed
|
|
FROM _timescaledb_catalog.chunk chunk
|
|
INNER JOIN _timescaledb_catalog.hypertable comp_hyper ON (chunk.hypertable_id = comp_hyper.id)
|
|
INNER JOIN _timescaledb_catalog.hypertable uncomp_hyper ON (comp_hyper.id = uncomp_hyper.compressed_hypertable_id)
|
|
WHERE uncomp_hyper.table_name like 'test1';
|
|
count_chunks_compressed
|
|
-------------------------
|
|
1
|
|
(1 row)
|
|
|
|
--make sure there are no orphaned _timescaledb_catalog.compression_chunk_size entries (should be 0)
|
|
SELECT count(*) as orphaned_compression_chunk_size
|
|
FROM _timescaledb_catalog.compression_chunk_size size
|
|
LEFT JOIN _timescaledb_catalog.chunk chunk ON (chunk.id = size.chunk_id)
|
|
WHERE chunk.id IS NULL;
|
|
orphaned_compression_chunk_size
|
|
---------------------------------
|
|
0
|
|
(1 row)
|
|
|
|
--
|
|
-- DROP HYPERTABLE
|
|
--
|
|
SELECT comp_hyper.schema_name|| '.' || comp_hyper.table_name as "COMPRESSED_HYPER_NAME"
|
|
FROM _timescaledb_catalog.hypertable comp_hyper
|
|
INNER JOIN _timescaledb_catalog.hypertable uncomp_hyper ON (comp_hyper.id = uncomp_hyper.compressed_hypertable_id)
|
|
WHERE uncomp_hyper.table_name like 'test1' ORDER BY comp_hyper.id LIMIT 1 \gset
|
|
\set ON_ERROR_STOP 0
|
|
DROP TABLE :COMPRESSED_HYPER_NAME;
|
|
ERROR: dropping compressed hypertables not supported
|
|
\set ON_ERROR_STOP 1
|
|
BEGIN;
|
|
SELECT hypertable.schema_name|| '.' || hypertable.table_name as "UNCOMPRESSED_HYPER_NAME"
|
|
FROM _timescaledb_catalog.hypertable hypertable
|
|
WHERE hypertable.table_name like 'test1' ORDER BY hypertable.id LIMIT 1 \gset
|
|
--before the drop there are 2 hypertables: the compressed and uncompressed ones
|
|
SELECT count(*) FROM _timescaledb_catalog.hypertable hypertable;
|
|
count
|
|
-------
|
|
2
|
|
(1 row)
|
|
|
|
--add policy to make sure it's dropped later
|
|
select add_compression_policy(:'UNCOMPRESSED_HYPER_NAME', interval '1 day');
|
|
add_compression_policy
|
|
------------------------
|
|
1000
|
|
(1 row)
|
|
|
|
SELECT count(*) FROM _timescaledb_config.bgw_job WHERE id >= 1000;
|
|
count
|
|
-------
|
|
1
|
|
(1 row)
|
|
|
|
DROP TABLE :UNCOMPRESSED_HYPER_NAME;
|
|
--verify that there are no more hypertable remaining
|
|
SELECT count(*) FROM _timescaledb_catalog.hypertable hypertable;
|
|
count
|
|
-------
|
|
0
|
|
(1 row)
|
|
|
|
SELECT count(*) FROM _timescaledb_catalog.hypertable_compression;
|
|
count
|
|
-------
|
|
0
|
|
(1 row)
|
|
|
|
--verify that the policy is gone
|
|
SELECT count(*) FROM _timescaledb_config.bgw_job WHERE id >= 1000;
|
|
count
|
|
-------
|
|
0
|
|
(1 row)
|
|
|
|
ROLLBACK;
|
|
--create a dependent object on the compressed hypertable to test cascade behaviour
|
|
CREATE VIEW dependent_1 AS SELECT * FROM :COMPRESSED_HYPER_NAME;
|
|
\set ON_ERROR_STOP 0
|
|
DROP TABLE :UNCOMPRESSED_HYPER_NAME;
|
|
ERROR: cannot drop table _timescaledb_internal._compressed_hypertable_2 because other objects depend on it
|
|
\set ON_ERROR_STOP 1
|
|
BEGIN;
|
|
DROP TABLE :UNCOMPRESSED_HYPER_NAME CASCADE;
|
|
NOTICE: drop cascades to 2 other objects
|
|
SELECT count(*) FROM _timescaledb_catalog.hypertable hypertable;
|
|
count
|
|
-------
|
|
0
|
|
(1 row)
|
|
|
|
ROLLBACK;
|
|
DROP VIEW dependent_1;
|
|
--create a cont agg view on the ht as well then the drop should nuke everything
|
|
CREATE MATERIALIZED VIEW test1_cont_view
|
|
WITH (timescaledb.continuous,
|
|
timescaledb.materialized_only=true)
|
|
AS SELECT time_bucket('1 hour', "Time"), SUM(i)
|
|
FROM test1
|
|
GROUP BY 1 WITH NO DATA;
|
|
SELECT add_continuous_aggregate_policy('test1_cont_view', NULL, '1 hour'::interval, '1 day'::interval);
|
|
add_continuous_aggregate_policy
|
|
---------------------------------
|
|
1001
|
|
(1 row)
|
|
|
|
CALL refresh_continuous_aggregate('test1_cont_view', NULL, NULL);
|
|
SELECT count(*) FROM test1_cont_view;
|
|
count
|
|
-------
|
|
9
|
|
(1 row)
|
|
|
|
\c :TEST_DBNAME :ROLE_SUPERUSER
|
|
SELECT chunk.schema_name|| '.' || chunk.table_name as "COMPRESSED_CHUNK_NAME"
|
|
FROM _timescaledb_catalog.chunk chunk
|
|
INNER JOIN _timescaledb_catalog.hypertable comp_hyper ON (chunk.hypertable_id = comp_hyper.id)
|
|
INNER JOIN _timescaledb_catalog.hypertable uncomp_hyper ON (comp_hyper.id = uncomp_hyper.compressed_hypertable_id)
|
|
WHERE uncomp_hyper.table_name like 'test1' ORDER BY chunk.id LIMIT 1
|
|
\gset
|
|
ALTER TABLE test1 OWNER TO :ROLE_DEFAULT_PERM_USER_2;
|
|
--make sure new owner is propagated down
|
|
SELECT a.rolname from pg_class c INNER JOIN pg_authid a ON(c.relowner = a.oid) WHERE c.oid = 'test1'::regclass;
|
|
rolname
|
|
---------------------
|
|
default_perm_user_2
|
|
(1 row)
|
|
|
|
SELECT a.rolname from pg_class c INNER JOIN pg_authid a ON(c.relowner = a.oid) WHERE c.oid = :'COMPRESSED_HYPER_NAME'::regclass;
|
|
rolname
|
|
---------------------
|
|
default_perm_user_2
|
|
(1 row)
|
|
|
|
SELECT a.rolname from pg_class c INNER JOIN pg_authid a ON(c.relowner = a.oid) WHERE c.oid = :'COMPRESSED_CHUNK_NAME'::regclass;
|
|
rolname
|
|
---------------------
|
|
default_perm_user_2
|
|
(1 row)
|
|
|
|
--
|
|
-- turn off compression
|
|
--
|
|
SELECT COUNT(*) AS count_compressed
|
|
FROM
|
|
(
|
|
SELECT decompress_chunk(chunk.schema_name|| '.' || chunk.table_name)
|
|
FROM _timescaledb_catalog.chunk chunk
|
|
INNER JOIN _timescaledb_catalog.hypertable hypertable ON (chunk.hypertable_id = hypertable.id)
|
|
WHERE hypertable.table_name like 'test1' and chunk.compressed_chunk_id IS NOT NULL ORDER BY chunk.id
|
|
)
|
|
AS sub;
|
|
count_compressed
|
|
------------------
|
|
1
|
|
(1 row)
|
|
|
|
select add_compression_policy('test1', interval '1 day');
|
|
add_compression_policy
|
|
------------------------
|
|
1002
|
|
(1 row)
|
|
|
|
\set ON_ERROR_STOP 0
|
|
ALTER table test1 set (timescaledb.compress='f');
|
|
\set ON_ERROR_STOP 1
|
|
select remove_compression_policy('test1');
|
|
remove_compression_policy
|
|
---------------------------
|
|
t
|
|
(1 row)
|
|
|
|
ALTER table test1 set (timescaledb.compress='f');
|
|
--only one hypertable left
|
|
SELECT count(*) = 1 FROM _timescaledb_catalog.hypertable hypertable;
|
|
?column?
|
|
----------
|
|
f
|
|
(1 row)
|
|
|
|
SELECT compressed_hypertable_id IS NULL FROM _timescaledb_catalog.hypertable hypertable WHERE hypertable.table_name like 'test1' ;
|
|
?column?
|
|
----------
|
|
t
|
|
(1 row)
|
|
|
|
--no hypertable compression entries left
|
|
SELECT count(*) = 0 FROM _timescaledb_catalog.hypertable_compression;
|
|
?column?
|
|
----------
|
|
t
|
|
(1 row)
|
|
|
|
--make sure there are no orphaned _timescaledb_catalog.compression_chunk_size entries (should be 0)
|
|
SELECT count(*) as orphaned_compression_chunk_size
|
|
FROM _timescaledb_catalog.compression_chunk_size size
|
|
LEFT JOIN _timescaledb_catalog.chunk chunk ON (chunk.id = size.chunk_id)
|
|
WHERE chunk.id IS NULL;
|
|
orphaned_compression_chunk_size
|
|
---------------------------------
|
|
0
|
|
(1 row)
|
|
|
|
--can turn compression back on
|
|
ALTER TABLE test1 set (timescaledb.compress, timescaledb.compress_segmentby = 'b', timescaledb.compress_orderby = '"Time" DESC');
|
|
SELECT COUNT(*) AS count_compressed
|
|
FROM
|
|
(
|
|
SELECT compress_chunk(chunk.schema_name|| '.' || chunk.table_name)
|
|
FROM _timescaledb_catalog.chunk chunk
|
|
INNER JOIN _timescaledb_catalog.hypertable hypertable ON (chunk.hypertable_id = hypertable.id)
|
|
WHERE hypertable.table_name like 'test1' and chunk.compressed_chunk_id IS NULL ORDER BY chunk.id
|
|
)
|
|
AS sub;
|
|
count_compressed
|
|
------------------
|
|
1
|
|
(1 row)
|
|
|
|
DROP TABLE test1 CASCADE;
|
|
NOTICE: drop cascades to table _timescaledb_internal.compress_hyper_6_57_chunk
|
|
NOTICE: drop cascades to 2 other objects
|
|
NOTICE: drop cascades to table _timescaledb_internal._hyper_5_56_chunk
|
|
DROP TABLESPACE tablespace1;
|
|
-- Triggers are NOT fired for compress/decompress
|
|
CREATE TABLE test1 ("Time" timestamptz, i integer);
|
|
SELECT table_name from create_hypertable('test1', 'Time', chunk_time_interval=> INTERVAL '1 day');
|
|
NOTICE: adding not-null constraint to column "Time"
|
|
table_name
|
|
------------
|
|
test1
|
|
(1 row)
|
|
|
|
CREATE OR REPLACE FUNCTION test1_print_func()
|
|
RETURNS TRIGGER LANGUAGE PLPGSQL AS
|
|
$BODY$
|
|
BEGIN
|
|
RAISE NOTICE ' raise notice test1_print_trigger called ';
|
|
RETURN OLD;
|
|
END;
|
|
$BODY$;
|
|
CREATE TRIGGER test1_trigger
|
|
BEFORE INSERT OR UPDATE OR DELETE OR TRUNCATE ON test1
|
|
FOR EACH STATEMENT EXECUTE FUNCTION test1_print_func();
|
|
INSERT INTO test1 SELECT generate_series('2018-03-02 1:00'::TIMESTAMPTZ, '2018-03-03 1:00', '1 hour') , 1 ;
|
|
NOTICE: raise notice test1_print_trigger called
|
|
-- add a row trigger too --
|
|
CREATE TRIGGER test1_trigger2
|
|
BEFORE INSERT OR UPDATE OR DELETE ON test1
|
|
FOR EACH ROW EXECUTE FUNCTION test1_print_func();
|
|
INSERT INTO test1 SELECT '2018-03-02 1:05'::TIMESTAMPTZ, 2;
|
|
NOTICE: raise notice test1_print_trigger called
|
|
NOTICE: raise notice test1_print_trigger called
|
|
ALTER TABLE test1 set (timescaledb.compress, timescaledb.compress_orderby = '"Time" DESC');
|
|
SELECT COUNT(*) AS count_compressed FROM
|
|
(
|
|
SELECT compress_chunk(chunk.schema_name|| '.' || chunk.table_name)
|
|
FROM _timescaledb_catalog.chunk chunk
|
|
INNER JOIN _timescaledb_catalog.hypertable hypertable ON (chunk.hypertable_id = hypertable.id)
|
|
WHERE hypertable.table_name like 'test1' and chunk.compressed_chunk_id IS NULL ORDER BY chunk.id) AS subq;
|
|
count_compressed
|
|
------------------
|
|
2
|
|
(1 row)
|
|
|
|
SELECT COUNT(*) AS count_compressed FROM
|
|
(
|
|
SELECT decompress_chunk(chunk.schema_name|| '.' || chunk.table_name)
|
|
FROM _timescaledb_catalog.chunk chunk
|
|
INNER JOIN _timescaledb_catalog.hypertable hypertable ON (chunk.hypertable_id = hypertable.id)
|
|
WHERE hypertable.table_name like 'test1' ORDER BY chunk.id ) as subq;
|
|
count_compressed
|
|
------------------
|
|
2
|
|
(1 row)
|
|
|
|
DROP TABLE test1;
|
|
-- test disabling compression on hypertables with caggs and dropped chunks
|
|
-- github issue 2844
|
|
CREATE TABLE i2844 (created_at timestamptz NOT NULL,c1 float);
|
|
SELECT create_hypertable('i2844', 'created_at', chunk_time_interval => '6 hour'::interval);
|
|
create_hypertable
|
|
--------------------
|
|
(9,public,i2844,t)
|
|
(1 row)
|
|
|
|
INSERT INTO i2844 SELECT generate_series('2000-01-01'::timestamptz, '2000-01-02'::timestamptz,'1h'::interval);
|
|
CREATE MATERIALIZED VIEW test_agg WITH (timescaledb.continuous) AS SELECT time_bucket('1 hour', created_at) AS bucket, AVG(c1) AS avg_c1 FROM i2844 GROUP BY bucket;
|
|
NOTICE: refreshing continuous aggregate "test_agg"
|
|
ALTER TABLE i2844 SET (timescaledb.compress);
|
|
SELECT compress_chunk(show_chunks) AS compressed_chunk FROM show_chunks('i2844');
|
|
compressed_chunk
|
|
-----------------------------------------
|
|
_timescaledb_internal._hyper_9_62_chunk
|
|
_timescaledb_internal._hyper_9_63_chunk
|
|
_timescaledb_internal._hyper_9_64_chunk
|
|
_timescaledb_internal._hyper_9_65_chunk
|
|
_timescaledb_internal._hyper_9_66_chunk
|
|
(5 rows)
|
|
|
|
SELECT drop_chunks('i2844', older_than => '2000-01-01 18:00'::timestamptz);
|
|
drop_chunks
|
|
-----------------------------------------
|
|
_timescaledb_internal._hyper_9_62_chunk
|
|
_timescaledb_internal._hyper_9_63_chunk
|
|
_timescaledb_internal._hyper_9_64_chunk
|
|
(3 rows)
|
|
|
|
SELECT decompress_chunk(show_chunks, if_compressed => TRUE) AS decompressed_chunks FROM show_chunks('i2844');
|
|
decompressed_chunks
|
|
-----------------------------------------
|
|
_timescaledb_internal._hyper_9_65_chunk
|
|
_timescaledb_internal._hyper_9_66_chunk
|
|
(2 rows)
|
|
|
|
ALTER TABLE i2844 SET (timescaledb.compress = FALSE);
|
|
-- TEST compression alter schema tests
|
|
\ir include/compression_alter.sql
|
|
-- This file and its contents are licensed under the Timescale License.
|
|
-- Please see the included NOTICE for copyright information and
|
|
-- LICENSE-TIMESCALE for a copy of the license.
|
|
\ir compression_utils.sql
|
|
-- This file and its contents are licensed under the Timescale License.
|
|
-- Please see the included NOTICE for copyright information and
|
|
-- LICENSE-TIMESCALE for a copy of the license.
|
|
\set ECHO errors
|
|
\ir ../../../../test/sql/include/test_utils.sql
|
|
-- This file and its contents are licensed under the Apache License 2.0.
|
|
-- Please see the included NOTICE for copyright information and
|
|
-- LICENSE-APACHE for a copy of the license.
|
|
CREATE OR REPLACE FUNCTION assert_true(
|
|
val boolean
|
|
)
|
|
RETURNS VOID LANGUAGE PLPGSQL IMMUTABLE AS
|
|
$BODY$
|
|
BEGIN
|
|
IF val IS NOT TRUE THEN
|
|
RAISE 'Assert failed';
|
|
END IF;
|
|
END
|
|
$BODY$;
|
|
CREATE OR REPLACE FUNCTION assert_equal(
|
|
val1 anyelement,
|
|
val2 anyelement
|
|
)
|
|
RETURNS VOID LANGUAGE PLPGSQL IMMUTABLE AS
|
|
$BODY$
|
|
BEGIN
|
|
IF (val1 = val2) IS NOT TRUE THEN
|
|
RAISE 'Assert failed: % = %',val1,val2;
|
|
END IF;
|
|
END
|
|
$BODY$;
|
|
CREATE TABLE test1 ("Time" timestamptz, intcol integer, bntcol bigint, txtcol text);
|
|
SELECT table_name from create_hypertable('test1', 'Time', chunk_time_interval=> INTERVAL '1 day');
|
|
psql:include/compression_alter.sql:8: NOTICE: adding not-null constraint to column "Time"
|
|
table_name
|
|
------------
|
|
test1
|
|
(1 row)
|
|
|
|
INSERT INTO test1
|
|
SELECT t, gen_rand_minstd(), gen_rand_minstd(), gen_rand_minstd()::text
|
|
FROM generate_series('2018-03-02 1:00'::TIMESTAMPTZ, '2018-03-05 1:00', '1 hour') t;
|
|
INSERT INTO test1
|
|
SELECT '2018-03-04 2:00', 100, 200, 'hello' ;
|
|
ALTER TABLE test1 set (timescaledb.compress, timescaledb.compress_segmentby = 'bntcol', timescaledb.compress_orderby = '"Time" DESC');
|
|
SELECT COUNT(*) AS count_compressed
|
|
FROM
|
|
(
|
|
SELECT compress_chunk(chunk.schema_name|| '.' || chunk.table_name)
|
|
FROM _timescaledb_catalog.chunk chunk
|
|
INNER JOIN _timescaledb_catalog.hypertable hypertable ON (chunk.hypertable_id = hypertable.id)
|
|
WHERE hypertable.table_name like 'test1' and chunk.compressed_chunk_id IS NULL ORDER BY chunk.id
|
|
)
|
|
AS sub;
|
|
count_compressed
|
|
------------------
|
|
4
|
|
(1 row)
|
|
|
|
-- TEST: ALTER TABLE add column tests --
|
|
ALTER TABLE test1 ADD COLUMN new_coli integer;
|
|
ALTER TABLE test1 ADD COLUMN new_colv varchar(30);
|
|
SELECT * FROM _timescaledb_catalog.hypertable_compression
|
|
ORDER BY attname;
|
|
hypertable_id | attname | compression_algorithm_id | segmentby_column_index | orderby_column_index | orderby_asc | orderby_nullsfirst
|
|
---------------+----------+--------------------------+------------------------+----------------------+-------------+--------------------
|
|
12 | Time | 4 | | 1 | f | t
|
|
12 | bntcol | 0 | 1 | | |
|
|
12 | intcol | 4 | | | |
|
|
12 | new_coli | 4 | | | |
|
|
12 | new_colv | 2 | | | |
|
|
12 | txtcol | 2 | | | |
|
|
(6 rows)
|
|
|
|
SELECT count(*) from test1 where new_coli is not null;
|
|
count
|
|
-------
|
|
0
|
|
(1 row)
|
|
|
|
SELECT count(*) from test1 where new_colv is null;
|
|
count
|
|
-------
|
|
74
|
|
(1 row)
|
|
|
|
--decompress 1 chunk and query again
|
|
SELECT COUNT(*) AS count_compressed
|
|
FROM
|
|
(
|
|
SELECT decompress_chunk(chunk.schema_name|| '.' || chunk.table_name)
|
|
FROM _timescaledb_catalog.chunk chunk
|
|
INNER JOIN _timescaledb_catalog.hypertable hypertable ON (chunk.hypertable_id = hypertable.id)
|
|
WHERE hypertable.table_name like 'test1' and chunk.compressed_chunk_id IS NOT NULL ORDER BY chunk.id
|
|
LIMIT 1
|
|
)
|
|
AS sub;
|
|
count_compressed
|
|
------------------
|
|
1
|
|
(1 row)
|
|
|
|
SELECT count(*) from test1 where new_coli is not null;
|
|
count
|
|
-------
|
|
0
|
|
(1 row)
|
|
|
|
SELECT count(*) from test1 where new_colv is null;
|
|
count
|
|
-------
|
|
74
|
|
(1 row)
|
|
|
|
--compress all chunks and query ---
|
|
--create new chunk and fill in data --
|
|
INSERT INTO test1 SELECT t, gen_rand_minstd(), gen_rand_minstd(), gen_rand_minstd()::text , 100, '101t'
|
|
FROM generate_series('2018-03-08 1:00'::TIMESTAMPTZ, '2018-03-09 1:00', '1 hour') t;
|
|
SELECT count(*) from test1 where new_coli = 100;
|
|
count
|
|
-------
|
|
25
|
|
(1 row)
|
|
|
|
SELECT count(*) from test1 where new_colv = '101t';
|
|
count
|
|
-------
|
|
25
|
|
(1 row)
|
|
|
|
SELECT COUNT(*) AS count_compressed
|
|
FROM
|
|
(
|
|
SELECT compress_chunk(chunk.schema_name|| '.' || chunk.table_name)
|
|
FROM _timescaledb_catalog.chunk chunk
|
|
INNER JOIN _timescaledb_catalog.hypertable hypertable ON (chunk.hypertable_id = hypertable.id)
|
|
WHERE hypertable.table_name like 'test1' and chunk.compressed_chunk_id IS NULL ORDER BY chunk.id
|
|
)
|
|
AS sub;
|
|
count_compressed
|
|
------------------
|
|
3
|
|
(1 row)
|
|
|
|
SELECT count(*) from test1 where new_coli = 100;
|
|
count
|
|
-------
|
|
25
|
|
(1 row)
|
|
|
|
SELECT count(*) from test1 where new_colv = '101t';
|
|
count
|
|
-------
|
|
25
|
|
(1 row)
|
|
|
|
CREATE INDEX new_index ON test1(new_colv);
|
|
-- TEST 2: ALTER TABLE rename column
|
|
SELECT * FROM _timescaledb_catalog.hypertable_compression
|
|
WHERE attname = 'new_coli' and hypertable_id = (SELECT id from _timescaledb_catalog.hypertable
|
|
WHERE table_name = 'test1' );
|
|
hypertable_id | attname | compression_algorithm_id | segmentby_column_index | orderby_column_index | orderby_asc | orderby_nullsfirst
|
|
---------------+----------+--------------------------+------------------------+----------------------+-------------+--------------------
|
|
12 | new_coli | 4 | | | |
|
|
(1 row)
|
|
|
|
ALTER TABLE test1 RENAME new_coli TO coli;
|
|
SELECT * FROM _timescaledb_catalog.hypertable_compression
|
|
WHERE attname = 'coli' and hypertable_id = (SELECT id from _timescaledb_catalog.hypertable
|
|
WHERE table_name = 'test1' );
|
|
hypertable_id | attname | compression_algorithm_id | segmentby_column_index | orderby_column_index | orderby_asc | orderby_nullsfirst
|
|
---------------+---------+--------------------------+------------------------+----------------------+-------------+--------------------
|
|
12 | coli | 4 | | | |
|
|
(1 row)
|
|
|
|
SELECT count(*) from test1 where coli = 100;
|
|
count
|
|
-------
|
|
25
|
|
(1 row)
|
|
|
|
--rename segment by column name
|
|
ALTER TABLE test1 RENAME bntcol TO bigintcol ;
|
|
SELECT * FROM _timescaledb_catalog.hypertable_compression
|
|
WHERE attname = 'bigintcol' and hypertable_id = (SELECT id from _timescaledb_catalog.hypertable
|
|
WHERE table_name = 'test1' );
|
|
hypertable_id | attname | compression_algorithm_id | segmentby_column_index | orderby_column_index | orderby_asc | orderby_nullsfirst
|
|
---------------+-----------+--------------------------+------------------------+----------------------+-------------+--------------------
|
|
12 | bigintcol | 0 | 1 | | |
|
|
(1 row)
|
|
|
|
--query by segment by column name
|
|
SELECT * from test1 WHERE bigintcol = 100;
|
|
Time | intcol | bigintcol | txtcol | coli | new_colv
|
|
------+--------+-----------+--------+------+----------
|
|
(0 rows)
|
|
|
|
SELECT * from test1 WHERE bigintcol = 200;
|
|
Time | intcol | bigintcol | txtcol | coli | new_colv
|
|
------------------------------+--------+-----------+--------+------+----------
|
|
Sun Mar 04 02:00:00 2018 PST | 100 | 200 | hello | |
|
|
(1 row)
|
|
|
|
-- add a new chunk and compress
|
|
INSERT INTO test1 SELECT '2019-03-04 2:00', 99, 800, 'newchunk' ;
|
|
SELECT COUNT(*) AS count_compressed
|
|
FROM
|
|
(
|
|
SELECT compress_chunk(chunk.schema_name|| '.' || chunk.table_name)
|
|
FROM _timescaledb_catalog.chunk chunk
|
|
INNER JOIN _timescaledb_catalog.hypertable hypertable ON (chunk.hypertable_id = hypertable.id)
|
|
WHERE hypertable.table_name = 'test1' and chunk.compressed_chunk_id IS NULL ORDER BY chunk.id
|
|
) q;
|
|
count_compressed
|
|
------------------
|
|
1
|
|
(1 row)
|
|
|
|
--check if all chunks have new column names
|
|
--both counts should be equal
|
|
SELECT count(*) FROM _timescaledb_catalog.chunk
|
|
WHERE hypertable_id = ( SELECT id FROM _timescaledb_catalog.hypertable
|
|
WHERE table_name = 'test1' );
|
|
count
|
|
-------
|
|
7
|
|
(1 row)
|
|
|
|
SELECT count(*)
|
|
FROM ( SELECT attrelid::regclass, attname FROM pg_attribute
|
|
WHERE attrelid in (SELECT inhrelid::regclass from pg_inherits
|
|
where inhparent = 'test1'::regclass)
|
|
and attname = 'bigintcol' ) q;
|
|
count
|
|
-------
|
|
7
|
|
(1 row)
|
|
|
|
--check count on internal compression table too i.e. all the chunks have
|
|
--the correct column name
|
|
SELECT format('%I.%I', cht.schema_name, cht.table_name) AS "COMPRESSION_TBLNM"
|
|
FROM _timescaledb_catalog.hypertable ht, _timescaledb_catalog.hypertable cht
|
|
WHERE ht.table_name = 'test1' and cht.id = ht.compressed_hypertable_id \gset
|
|
SELECT count(*)
|
|
FROM ( SELECT attrelid::regclass, attname FROM pg_attribute
|
|
WHERE attrelid in (SELECT inhrelid::regclass from pg_inherits
|
|
where inhparent = :'COMPRESSION_TBLNM'::regclass )
|
|
and attname = 'bigintcol' ) q;
|
|
count
|
|
-------
|
|
7
|
|
(1 row)
|
|
|
|
-- check column name truncation with renames
|
|
-- check if the name change is reflected for settings
|
|
ALTER TABLE test1 RENAME bigintcol TO
|
|
ccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccabdeeeeeeccccccccccccc;
|
|
psql:include/compression_alter.sql:135: NOTICE: identifier "ccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccabdeeeeeeccccccccccccc" will be truncated to "cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccca"
|
|
SELECT * from timescaledb_information.compression_settings
|
|
WHERE hypertable_name = 'test1' and attname like 'ccc%';
|
|
hypertable_schema | hypertable_name | attname | segmentby_column_index | orderby_column_index | orderby_asc | orderby_nullsfirst
|
|
-------------------+-----------------+-----------------------------------------------------------------+------------------------+----------------------+-------------+--------------------
|
|
public | test1 | cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccca | 1 | | |
|
|
(1 row)
|
|
|
|
SELECT count(*)
|
|
FROM ( SELECT attrelid::regclass, attname FROM pg_attribute
|
|
WHERE attrelid in (SELECT inhrelid::regclass from pg_inherits
|
|
where inhparent = :'COMPRESSION_TBLNM'::regclass )
|
|
and attname like 'ccc%a' ) q;
|
|
count
|
|
-------
|
|
7
|
|
(1 row)
|
|
|
|
ALTER TABLE test1 RENAME
|
|
ccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccabdeeeeeeccccccccccccc
|
|
TO bigintcol;
|
|
psql:include/compression_alter.sql:148: NOTICE: identifier "ccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccabdeeeeeeccccccccccccc" will be truncated to "cccccccccccccccccccccccccccccccccccccccccccccccccccccccccccccca"
|
|
SELECT * from timescaledb_information.compression_settings
|
|
WHERE hypertable_name = 'test1' and attname = 'bigintcol' ;
|
|
hypertable_schema | hypertable_name | attname | segmentby_column_index | orderby_column_index | orderby_asc | orderby_nullsfirst
|
|
-------------------+-----------------+-----------+------------------------+----------------------+-------------+--------------------
|
|
public | test1 | bigintcol | 1 | | |
|
|
(1 row)
|
|
|
|
-- test compression default handling
|
|
CREATE TABLE test_defaults(time timestamptz NOT NULL, device_id int);
|
|
SELECT create_hypertable('test_defaults','time');
|
|
create_hypertable
|
|
-----------------------------
|
|
(14,public,test_defaults,t)
|
|
(1 row)
|
|
|
|
ALTER TABLE test_defaults SET (timescaledb.compress,timescaledb.compress_segmentby='device_id');
|
|
-- create 2 chunks
|
|
INSERT INTO test_defaults SELECT '2000-01-01', 1;
|
|
INSERT INTO test_defaults SELECT '2001-01-01', 1;
|
|
-- compress first chunk
|
|
SELECT compress_chunk(show_chunks) AS compressed_chunk FROM show_chunks('test_defaults') ORDER BY show_chunks::text LIMIT 1;
|
|
compressed_chunk
|
|
------------------------------------------
|
|
_timescaledb_internal._hyper_14_89_chunk
|
|
(1 row)
|
|
|
|
SELECT * FROM test_defaults ORDER BY 1;
|
|
time | device_id
|
|
------------------------------+-----------
|
|
Sat Jan 01 00:00:00 2000 PST | 1
|
|
Mon Jan 01 00:00:00 2001 PST | 1
|
|
(2 rows)
|
|
|
|
ALTER TABLE test_defaults ADD COLUMN c1 int;
|
|
ALTER TABLE test_defaults ADD COLUMN c2 int NOT NULL DEFAULT 42;
|
|
SELECT * FROM test_defaults ORDER BY 1,2;
|
|
time | device_id | c1 | c2
|
|
------------------------------+-----------+----+----
|
|
Sat Jan 01 00:00:00 2000 PST | 1 | | 42
|
|
Mon Jan 01 00:00:00 2001 PST | 1 | | 42
|
|
(2 rows)
|
|
|
|
-- try insert into compressed and recompress
|
|
INSERT INTO test_defaults SELECT '2000-01-01', 2;
|
|
SELECT * FROM test_defaults ORDER BY 1,2;
|
|
time | device_id | c1 | c2
|
|
------------------------------+-----------+----+----
|
|
Sat Jan 01 00:00:00 2000 PST | 1 | | 42
|
|
Sat Jan 01 00:00:00 2000 PST | 2 | | 42
|
|
Mon Jan 01 00:00:00 2001 PST | 1 | | 42
|
|
(3 rows)
|
|
|
|
CALL recompress_all_chunks('test_defaults', 1, false);
|
|
SELECT * FROM test_defaults ORDER BY 1,2;
|
|
time | device_id | c1 | c2
|
|
------------------------------+-----------+----+----
|
|
Sat Jan 01 00:00:00 2000 PST | 1 | | 42
|
|
Sat Jan 01 00:00:00 2000 PST | 2 | | 42
|
|
Mon Jan 01 00:00:00 2001 PST | 1 | | 42
|
|
(3 rows)
|
|
|
|
-- timescale/timescaledb#5412
|
|
ALTER TABLE test_defaults ADD COLUMN c3 int NOT NULL DEFAULT 43;
|
|
SELECT *,assert_equal(c3,43) FROM test_defaults ORDER BY 1,2;
|
|
time | device_id | c1 | c2 | c3 | assert_equal
|
|
------------------------------+-----------+----+----+----+--------------
|
|
Sat Jan 01 00:00:00 2000 PST | 1 | | 42 | 43 |
|
|
Sat Jan 01 00:00:00 2000 PST | 2 | | 42 | 43 |
|
|
Mon Jan 01 00:00:00 2001 PST | 1 | | 42 | 43 |
|
|
(3 rows)
|
|
|
|
select decompress_chunk(show_chunks('test_defaults'),true);
|
|
psql:include/compression_alter.sql:179: NOTICE: chunk "_hyper_14_90_chunk" is not compressed
|
|
decompress_chunk
|
|
------------------------------------------
|
|
_timescaledb_internal._hyper_14_89_chunk
|
|
|
|
(2 rows)
|
|
|
|
SELECT *,assert_equal(c3,43) FROM test_defaults ORDER BY 1,2;
|
|
time | device_id | c1 | c2 | c3 | assert_equal
|
|
------------------------------+-----------+----+----+----+--------------
|
|
Sat Jan 01 00:00:00 2000 PST | 1 | | 42 | 43 |
|
|
Sat Jan 01 00:00:00 2000 PST | 2 | | 42 | 43 |
|
|
Mon Jan 01 00:00:00 2001 PST | 1 | | 42 | 43 |
|
|
(3 rows)
|
|
|
|
select compress_chunk(show_chunks('test_defaults'));
|
|
compress_chunk
|
|
------------------------------------------
|
|
_timescaledb_internal._hyper_14_89_chunk
|
|
_timescaledb_internal._hyper_14_90_chunk
|
|
(2 rows)
|
|
|
|
SELECT *,assert_equal(c3,43) FROM test_defaults ORDER BY 1,2;
|
|
time | device_id | c1 | c2 | c3 | assert_equal
|
|
------------------------------+-----------+----+----+----+--------------
|
|
Sat Jan 01 00:00:00 2000 PST | 1 | | 42 | 43 |
|
|
Sat Jan 01 00:00:00 2000 PST | 2 | | 42 | 43 |
|
|
Mon Jan 01 00:00:00 2001 PST | 1 | | 42 | 43 |
|
|
(3 rows)
|
|
|
|
-- test dropping columns from compressed
|
|
CREATE TABLE test_drop(f1 text, f2 text, f3 text, time timestamptz, device int, o1 text, o2 text);
|
|
SELECT create_hypertable('test_drop','time');
|
|
psql:include/compression_alter.sql:186: NOTICE: adding not-null constraint to column "time"
|
|
create_hypertable
|
|
-------------------------
|
|
(16,public,test_drop,t)
|
|
(1 row)
|
|
|
|
ALTER TABLE test_drop SET (timescaledb.compress,timescaledb.compress_segmentby='device',timescaledb.compress_orderby='o1,o2');
|
|
-- dropping segmentby or orderby columns will fail
|
|
\set ON_ERROR_STOP 0
|
|
ALTER TABLE test_drop DROP COLUMN time;
|
|
psql:include/compression_alter.sql:191: ERROR: cannot drop column named in partition key
|
|
ALTER TABLE test_drop DROP COLUMN o1;
|
|
psql:include/compression_alter.sql:192: ERROR: cannot drop orderby or segmentby column from a hypertable with compression enabled
|
|
ALTER TABLE test_drop DROP COLUMN o2;
|
|
psql:include/compression_alter.sql:193: ERROR: cannot drop orderby or segmentby column from a hypertable with compression enabled
|
|
ALTER TABLE test_drop DROP COLUMN device;
|
|
psql:include/compression_alter.sql:194: ERROR: cannot drop orderby or segmentby column from a hypertable with compression enabled
|
|
\set ON_ERROR_STOP 1
|
|
-- switch to WARNING only to suppress compress_chunk NOTICEs
|
|
SET client_min_messages TO WARNING;
|
|
-- create some chunks each with different physical layout
|
|
ALTER TABLE test_drop DROP COLUMN f1;
|
|
INSERT INTO test_drop SELECT NULL,NULL,'2000-01-01',1,'o1','o2';
|
|
SELECT count(compress_chunk(chunk,true)) FROM show_chunks('test_drop') chunk;
|
|
count
|
|
-------
|
|
1
|
|
(1 row)
|
|
|
|
ALTER TABLE test_drop DROP COLUMN f2;
|
|
-- test non-existant column
|
|
\set ON_ERROR_STOP 0
|
|
ALTER TABLE test_drop DROP COLUMN f10;
|
|
psql:include/compression_alter.sql:208: ERROR: column "f10" of relation "test_drop" does not exist
|
|
\set ON_ERROR_STOP 1
|
|
ALTER TABLE test_drop DROP COLUMN IF EXISTS f10;
|
|
INSERT INTO test_drop SELECT NULL,'2001-01-01',2,'o1','o2';
|
|
SELECT count(compress_chunk(chunk,true)) FROM show_chunks('test_drop') chunk;
|
|
count
|
|
-------
|
|
2
|
|
(1 row)
|
|
|
|
ALTER TABLE test_drop DROP COLUMN f3;
|
|
INSERT INTO test_drop SELECT '2003-01-01',3,'o1','o2';
|
|
SELECT count(compress_chunk(chunk,true)) FROM show_chunks('test_drop') chunk;
|
|
count
|
|
-------
|
|
3
|
|
(1 row)
|
|
|
|
ALTER TABLE test_drop ADD COLUMN c1 TEXT;
|
|
ALTER TABLE test_drop ADD COLUMN c2 TEXT;
|
|
INSERT INTO test_drop SELECT '2004-01-01',4,'o1','o2','c1','c2-4';
|
|
SELECT count(compress_chunk(chunk,true)) FROM show_chunks('test_drop') chunk;
|
|
count
|
|
-------
|
|
4
|
|
(1 row)
|
|
|
|
ALTER TABLE test_drop DROP COLUMN c1;
|
|
INSERT INTO test_drop SELECT '2005-01-01',5,'o1','o2','c2-5';
|
|
SELECT count(compress_chunk(chunk,true)) FROM show_chunks('test_drop') chunk;
|
|
count
|
|
-------
|
|
5
|
|
(1 row)
|
|
|
|
RESET client_min_messages;
|
|
SELECT * FROM test_drop ORDER BY 1;
|
|
time | device | o1 | o2 | c2
|
|
------------------------------+--------+----+----+------
|
|
Sat Jan 01 00:00:00 2000 PST | 1 | o1 | o2 |
|
|
Mon Jan 01 00:00:00 2001 PST | 2 | o1 | o2 |
|
|
Wed Jan 01 00:00:00 2003 PST | 3 | o1 | o2 |
|
|
Thu Jan 01 00:00:00 2004 PST | 4 | o1 | o2 | c2-4
|
|
Sat Jan 01 00:00:00 2005 PST | 5 | o1 | o2 | c2-5
|
|
(5 rows)
|
|
|
|
-- check dropped columns got removed from catalog
|
|
-- only c2 should be left in metadata
|
|
SELECT attname
|
|
FROM _timescaledb_catalog.hypertable_compression htc
|
|
INNER JOIN _timescaledb_catalog.hypertable ht
|
|
ON ht.id=htc.hypertable_id AND ht.table_name='test_drop'
|
|
WHERE attname NOT IN ('time','device','o1','o2')
|
|
ORDER BY 1;
|
|
attname
|
|
---------
|
|
c2
|
|
(1 row)
|
|
|
|
--TEST tablespaces for compressed chunks with attach_tablespace interface --
|
|
CREATE TABLE test2 (timec timestamptz, i integer, t integer);
|
|
SELECT table_name from create_hypertable('test2', 'timec', chunk_time_interval=> INTERVAL '1 day');
|
|
NOTICE: adding not-null constraint to column "timec"
|
|
table_name
|
|
------------
|
|
test2
|
|
(1 row)
|
|
|
|
SELECT attach_tablespace('tablespace2', 'test2');
|
|
attach_tablespace
|
|
-------------------
|
|
|
|
(1 row)
|
|
|
|
INSERT INTO test2 SELECT t, gen_rand_minstd(), 22
|
|
FROM generate_series('2018-03-02 1:00'::TIMESTAMPTZ, '2018-03-02 13:00', '1 hour') t;
|
|
ALTER TABLE test2 set (timescaledb.compress, timescaledb.compress_segmentby = 'i', timescaledb.compress_orderby = 'timec');
|
|
SELECT relname FROM pg_class
|
|
WHERE reltablespace in
|
|
( SELECT oid from pg_tablespace WHERE spcname = 'tablespace2') ORDER BY 1;
|
|
relname
|
|
-------------------------------------
|
|
_hyper_18_104_chunk
|
|
_hyper_18_104_chunk_test2_timec_idx
|
|
test2
|
|
(3 rows)
|
|
|
|
-- test compress_chunk() with utility statement (SELECT ... INTO)
|
|
SELECT compress_chunk(ch) INTO compressed_chunks FROM show_chunks('test2') ch;
|
|
SELECT decompress_chunk(ch) INTO decompressed_chunks FROM show_chunks('test2') ch;
|
|
-- compress again
|
|
SELECT compress_chunk(ch) FROM show_chunks('test2') ch;
|
|
compress_chunk
|
|
-------------------------------------------
|
|
_timescaledb_internal._hyper_18_104_chunk
|
|
(1 row)
|
|
|
|
-- the chunk, compressed chunk + index + toast tables are in tablespace2 now .
|
|
-- toast table names differ across runs. So we use count to verify the results
|
|
-- instead of printing the table/index names
|
|
SELECT count(*) FROM (
|
|
SELECT relname FROM pg_class
|
|
WHERE reltablespace in
|
|
( SELECT oid from pg_tablespace WHERE spcname = 'tablespace2'))q;
|
|
count
|
|
-------
|
|
7
|
|
(1 row)
|
|
|
|
DROP TABLE test2 CASCADE;
|
|
NOTICE: drop cascades to table _timescaledb_internal.compress_hyper_19_106_chunk
|
|
DROP TABLESPACE tablespace2;
|
|
-- Create a table with a compressed table and then delete the
|
|
-- compressed table and see that the drop of the hypertable does not
|
|
-- generate an error. This scenario can be triggered if an extension
|
|
-- is created with compressed hypertables since the tables are dropped
|
|
-- as part of the drop of the extension.
|
|
CREATE TABLE issue4140("time" timestamptz NOT NULL, device_id int);
|
|
SELECT create_hypertable('issue4140', 'time');
|
|
create_hypertable
|
|
-------------------------
|
|
(20,public,issue4140,t)
|
|
(1 row)
|
|
|
|
ALTER TABLE issue4140 SET(timescaledb.compress);
|
|
SELECT format('%I.%I', schema_name, table_name)::regclass AS ctable
|
|
FROM _timescaledb_catalog.hypertable
|
|
WHERE id = (SELECT compressed_hypertable_id FROM _timescaledb_catalog.hypertable WHERE table_name = 'issue4140') \gset
|
|
SELECT timescaledb_pre_restore();
|
|
timescaledb_pre_restore
|
|
-------------------------
|
|
t
|
|
(1 row)
|
|
|
|
DROP TABLE :ctable;
|
|
SELECT timescaledb_post_restore();
|
|
timescaledb_post_restore
|
|
--------------------------
|
|
t
|
|
(1 row)
|
|
|
|
DROP TABLE issue4140;
|
|
-- github issue 5104
|
|
CREATE TABLE metric(
|
|
time TIMESTAMPTZ NOT NULL,
|
|
value DOUBLE PRECISION NOT NULL,
|
|
series_id BIGINT NOT NULL);
|
|
SELECT create_hypertable('metric', 'time',
|
|
chunk_time_interval => interval '1 h',
|
|
create_default_indexes => false);
|
|
create_hypertable
|
|
----------------------
|
|
(22,public,metric,t)
|
|
(1 row)
|
|
|
|
-- enable compression
|
|
ALTER TABLE metric set(timescaledb.compress,
|
|
timescaledb.compress_segmentby = 'series_id, value',
|
|
timescaledb.compress_orderby = 'time'
|
|
);
|
|
SELECT
|
|
comp_hypertable.schema_name AS "COMP_SCHEMA_NAME",
|
|
comp_hypertable.table_name AS "COMP_TABLE_NAME"
|
|
FROM _timescaledb_catalog.hypertable uc_hypertable
|
|
INNER JOIN _timescaledb_catalog.hypertable comp_hypertable ON (comp_hypertable.id = uc_hypertable.compressed_hypertable_id)
|
|
WHERE uc_hypertable.table_name like 'metric' \gset
|
|
-- get definition of compressed hypertable and notice the index
|
|
\d :COMP_SCHEMA_NAME.:COMP_TABLE_NAME
|
|
Table "_timescaledb_internal._compressed_hypertable_23"
|
|
Column | Type | Collation | Nullable | Default
|
|
-----------------------+---------------------------------------+-----------+----------+---------
|
|
time | _timescaledb_internal.compressed_data | | |
|
|
value | double precision | | |
|
|
series_id | bigint | | |
|
|
_ts_meta_count | integer | | |
|
|
_ts_meta_sequence_num | integer | | |
|
|
_ts_meta_min_1 | timestamp with time zone | | |
|
|
_ts_meta_max_1 | timestamp with time zone | | |
|
|
Indexes:
|
|
"_compressed_hypertable_23_series_id_value__ts_meta_sequence_idx" btree (series_id, value, _ts_meta_sequence_num)
|
|
Triggers:
|
|
ts_insert_blocker BEFORE INSERT ON _timescaledb_internal._compressed_hypertable_23 FOR EACH ROW EXECUTE FUNCTION _timescaledb_internal.insert_blocker()
|
|
|
|
-- #5290 Compression can't be enabled on caggs
|
|
CREATE TABLE "tEst2" (
|
|
"Id" uuid NOT NULL,
|
|
"Time" timestamp with time zone NOT NULL,
|
|
CONSTRAINT "test2_pkey" PRIMARY KEY ("Id", "Time")
|
|
);
|
|
SELECT create_hypertable(
|
|
'"tEst2"',
|
|
'Time',
|
|
chunk_time_interval => INTERVAL '1 day'
|
|
);
|
|
create_hypertable
|
|
---------------------
|
|
(24,public,tEst2,t)
|
|
(1 row)
|
|
|
|
alter table "tEst2" set (timescaledb.compress=true, timescaledb.compress_segmentby='"Id"');
|
|
CREATE MATERIALIZED VIEW "tEst2_mv"
|
|
WITH (timescaledb.continuous) AS
|
|
SELECT "Id" as "Idd",
|
|
time_bucket(INTERVAL '1 day', "Time") AS "bUcket"
|
|
FROM public."tEst2"
|
|
GROUP BY "Idd", "bUcket";
|
|
NOTICE: continuous aggregate "tEst2_mv" is already up-to-date
|
|
ALTER MATERIALIZED VIEW "tEst2_mv" SET (timescaledb.compress = true);
|
|
NOTICE: defaulting compress_segmentby to "Idd"
|
|
NOTICE: defaulting compress_orderby to "bUcket"
|
|
-- #5161 segmentby param
|
|
CREATE MATERIALIZED VIEW test1_cont_view2
|
|
WITH (timescaledb.continuous,
|
|
timescaledb.materialized_only=true
|
|
)
|
|
AS SELECT time_bucket('1 hour', "Time") as t, SUM(intcol) as sum,txtcol as "iDeA"
|
|
FROM test1
|
|
GROUP BY 1,txtcol WITH NO DATA;
|
|
\set ON_ERROR_STOP 0
|
|
ALTER MATERIALIZED VIEW test1_cont_view2 SET (
|
|
timescaledb.compress = true,
|
|
timescaledb.compress_segmentby = 'invalid_column'
|
|
);
|
|
NOTICE: defaulting compress_orderby to t
|
|
ERROR: column "invalid_column" does not exist
|
|
\set ON_ERROR_STOP 1
|
|
ALTER MATERIALIZED VIEW test1_cont_view2 SET (
|
|
timescaledb.compress = true
|
|
);
|
|
NOTICE: defaulting compress_segmentby to "iDeA"
|
|
NOTICE: defaulting compress_orderby to t
|
|
ALTER MATERIALIZED VIEW test1_cont_view2 SET (
|
|
timescaledb.compress = true,
|
|
timescaledb.compress_segmentby = '"iDeA"'
|
|
);
|
|
NOTICE: defaulting compress_orderby to t
|
|
\set ON_ERROR_STOP 0
|
|
ALTER MATERIALIZED VIEW test1_cont_view2 SET (
|
|
timescaledb.compress = true,
|
|
timescaledb.compress_orderby = '"iDeA"'
|
|
);
|
|
NOTICE: defaulting compress_segmentby to "iDeA"
|
|
ERROR: cannot use column "iDeA" for both ordering and segmenting
|
|
\set ON_ERROR_STOP 1
|
|
ALTER MATERIALIZED VIEW test1_cont_view2 SET (
|
|
timescaledb.compress = false
|
|
);
|
|
DROP TABLE metric CASCADE;
|
|
-- inserting into compressed chunks with different physical layouts
|
|
CREATE TABLE compression_insert(filler_1 int, filler_2 int, filler_3 int, time timestamptz NOT NULL, device_id int, v0 int, v1 int, v2 float, v3 float);
|
|
CREATE INDEX ON compression_insert(time);
|
|
CREATE INDEX ON compression_insert(device_id,time);
|
|
SELECT create_hypertable('compression_insert','time',create_default_indexes:=false);
|
|
create_hypertable
|
|
----------------------------------
|
|
(31,public,compression_insert,t)
|
|
(1 row)
|
|
|
|
ALTER TABLE compression_insert SET (timescaledb.compress, timescaledb.compress_orderby='time DESC', timescaledb.compress_segmentby='device_id');
|
|
-- test without altering physical layout
|
|
-- this is a baseline test to compare results with
|
|
-- next series of tests which should yield identical results
|
|
-- while changing the physical layouts of chunks
|
|
INSERT INTO compression_insert(time,device_id,v0,v1,v2,v3)
|
|
SELECT time, device_id, device_id+1, device_id + 2, device_id + 0.5, NULL
|
|
FROM generate_series('2000-01-01 0:00:00+0'::timestamptz,'2000-01-03 23:55:00+0','2m') gtime(time), generate_series(1,5,1) gdevice(device_id);
|
|
SELECT compress_chunk(c.schema_name|| '.' || c.table_name) as "CHUNK_NAME"
|
|
FROM _timescaledb_catalog.chunk c, _timescaledb_catalog.hypertable ht
|
|
WHERE c.hypertable_id = ht.id and ht.table_name = 'compression_insert'
|
|
AND c.compressed_chunk_id IS NULL
|
|
ORDER BY c.table_name DESC \gset
|
|
INSERT INTO compression_insert(time,device_id,v0,v1,v2,v3)
|
|
SELECT time, device_id, device_id+1, device_id + 2, device_id + 0.5, NULL
|
|
FROM generate_series('2000-01-04 0:00:00+0'::timestamptz,'2000-01-05 23:55:00+0','2m') gtime(time), generate_series(1,5,1) gdevice(device_id);
|
|
SELECT count(*), sum(v0), sum(v1), sum(v2), sum(v3)
|
|
FROM compression_insert
|
|
WHERE time >= '2000-01-01 0:00:00+0'
|
|
AND time <= '2000-01-05 23:55:00+0';
|
|
count | sum | sum | sum | sum
|
|
-------+-------+-------+-------+-----
|
|
17980 | 71920 | 89900 | 62930 |
|
|
(1 row)
|
|
|
|
-- force index scans to check index mapping
|
|
-- this verifies that we are actually using compressed chunk index scans
|
|
-- previously we could not use indexes on uncompressed chunks due to a bug:
|
|
-- https://github.com/timescale/timescaledb/issues/5432
|
|
--
|
|
-- this check basically makes sure that the indexes are built properly
|
|
-- and there are no issues in attribute mappings while building them
|
|
SET enable_seqscan = off;
|
|
EXPLAIN (costs off) SELECT device_id, count(*)
|
|
FROM compression_insert
|
|
GROUP BY device_id;
|
|
QUERY PLAN
|
|
-----------------------------------------------------------------------------------------------------------------------------------
|
|
HashAggregate
|
|
Group Key: _hyper_31_107_chunk.device_id
|
|
-> Append
|
|
-> Custom Scan (DecompressChunk) on _hyper_31_107_chunk
|
|
-> Index Scan using compress_hyper_32_108_chunk__compressed_hypertable_32_device_id on compress_hyper_32_108_chunk
|
|
-> Index Only Scan using _hyper_31_107_chunk_compression_insert_device_id_time_idx on _hyper_31_107_chunk
|
|
(6 rows)
|
|
|
|
SELECT device_id, count(*)
|
|
FROM compression_insert
|
|
GROUP BY device_id;
|
|
device_id | count
|
|
-----------+-------
|
|
3 | 3596
|
|
5 | 3596
|
|
4 | 3596
|
|
2 | 3596
|
|
1 | 3596
|
|
(5 rows)
|
|
|
|
CALL recompress_chunk(:'CHUNK_NAME'::regclass);
|
|
SELECT count(*), sum(v0), sum(v1), sum(v2), sum(v3)
|
|
FROM compression_insert
|
|
WHERE time >= '2000-01-01 0:00:00+0'
|
|
AND time <= '2000-01-05 23:55:00+0';
|
|
count | sum | sum | sum | sum
|
|
-------+-------+-------+-------+-----
|
|
17980 | 71920 | 89900 | 62930 |
|
|
(1 row)
|
|
|
|
SELECT device_id, count(*)
|
|
FROM compression_insert
|
|
GROUP BY device_id;
|
|
device_id | count
|
|
-----------+-------
|
|
1 | 3596
|
|
2 | 3596
|
|
3 | 3596
|
|
4 | 3596
|
|
5 | 3596
|
|
(5 rows)
|
|
|
|
SET enable_seqscan = default;
|
|
-- 1. drop column after first insert into chunk, before compressing
|
|
INSERT INTO compression_insert(time,device_id,v0,v1,v2,v3)
|
|
SELECT time, device_id, device_id+1, device_id + 2, device_id + 0.5, NULL
|
|
FROM generate_series('2000-01-07 0:00:00+0'::timestamptz,'2000-01-09 23:55:00+0','2m') gtime(time), generate_series(1,5,1) gdevice(device_id);
|
|
ALTER TABLE compression_insert DROP COLUMN filler_1;
|
|
SELECT compress_chunk(c.schema_name|| '.' || c.table_name) as "CHUNK_NAME"
|
|
FROM _timescaledb_catalog.chunk c, _timescaledb_catalog.hypertable ht
|
|
WHERE c.hypertable_id = ht.id
|
|
AND ht.table_name = 'compression_insert'
|
|
AND c.compressed_chunk_id IS NULL
|
|
ORDER BY c.table_name DESC \gset
|
|
INSERT INTO compression_insert(time,device_id,v0,v1,v2,v3)
|
|
SELECT time, device_id, device_id+1, device_id + 2, device_id + 0.5, NULL
|
|
FROM generate_series('2000-01-10 0:00:00+0'::timestamptz,'2000-01-11 23:55:00+0','2m') gtime(time), generate_series(1,5,1) gdevice(device_id);
|
|
SELECT count(*), sum(v0), sum(v1), sum(v2), sum(v3)
|
|
FROM compression_insert
|
|
WHERE time >= '2000-01-07 0:00:00+0'
|
|
AND time <= '2000-01-11 23:55:00+0';
|
|
count | sum | sum | sum | sum
|
|
-------+-------+-------+-------+-----
|
|
17980 | 71920 | 89900 | 62930 |
|
|
(1 row)
|
|
|
|
-- force index scans to check index mapping
|
|
SET enable_seqscan = off;
|
|
EXPLAIN (costs off) SELECT device_id, count(*)
|
|
FROM compression_insert
|
|
GROUP BY device_id;
|
|
QUERY PLAN
|
|
-----------------------------------------------------------------------------------------------------------------------------------
|
|
HashAggregate
|
|
Group Key: _hyper_31_107_chunk.device_id
|
|
-> Append
|
|
-> Custom Scan (DecompressChunk) on _hyper_31_107_chunk
|
|
-> Index Scan using compress_hyper_32_108_chunk__compressed_hypertable_32_device_id on compress_hyper_32_108_chunk
|
|
-> Custom Scan (DecompressChunk) on _hyper_31_109_chunk
|
|
-> Index Scan using compress_hyper_32_110_chunk__compressed_hypertable_32_device_id on compress_hyper_32_110_chunk
|
|
-> Index Only Scan using _hyper_31_109_chunk_compression_insert_device_id_time_idx on _hyper_31_109_chunk
|
|
(8 rows)
|
|
|
|
SELECT device_id, count(*)
|
|
FROM compression_insert
|
|
GROUP BY device_id;
|
|
device_id | count
|
|
-----------+-------
|
|
3 | 7192
|
|
5 | 7192
|
|
4 | 7192
|
|
2 | 7192
|
|
1 | 7192
|
|
(5 rows)
|
|
|
|
CALL recompress_chunk(:'CHUNK_NAME'::regclass);
|
|
SELECT count(*), sum(v0), sum(v1), sum(v2), sum(v3)
|
|
FROM compression_insert
|
|
WHERE time >= '2000-01-07 0:00:00+0'
|
|
AND time <= '2000-01-11 23:55:00+0';
|
|
count | sum | sum | sum | sum
|
|
-------+-------+-------+-------+-----
|
|
17980 | 71920 | 89900 | 62930 |
|
|
(1 row)
|
|
|
|
SELECT device_id, count(*)
|
|
FROM compression_insert
|
|
GROUP BY device_id;
|
|
device_id | count
|
|
-----------+-------
|
|
3 | 7192
|
|
5 | 7192
|
|
4 | 7192
|
|
2 | 7192
|
|
1 | 7192
|
|
(5 rows)
|
|
|
|
SET enable_seqscan = default;
|
|
-- 2. drop column after compressing chunk
|
|
INSERT INTO compression_insert(time,device_id,v0,v1,v2,v3)
|
|
SELECT time, device_id, device_id+1, device_id + 2, device_id + 0.5, NULL
|
|
FROM generate_series('2000-01-15 0:00:00+0'::timestamptz,'2000-01-17 23:55:00+0','2m') gtime(time), generate_series(1,5,1) gdevice(device_id);
|
|
SELECT compress_chunk(c.schema_name|| '.' || c.table_name) as "CHUNK_NAME"
|
|
FROM _timescaledb_catalog.chunk c, _timescaledb_catalog.hypertable ht
|
|
WHERE c.hypertable_id = ht.id
|
|
AND ht.table_name = 'compression_insert'
|
|
AND c.compressed_chunk_id IS NULL
|
|
ORDER BY c.table_name DESC \gset
|
|
ALTER TABLE compression_insert DROP COLUMN filler_2;
|
|
INSERT INTO compression_insert(time,device_id,v0,v1,v2,v3)
|
|
SELECT time, device_id, device_id+1, device_id + 2, device_id + 0.5, NULL
|
|
FROM generate_series('2000-01-18 0:00:00+0'::timestamptz,'2000-01-19 23:55:00+0','2m') gtime(time), generate_series(1,5,1) gdevice(device_id);
|
|
SELECT count(*), sum(v0), sum(v1), sum(v2), sum(v3)
|
|
FROM compression_insert
|
|
WHERE time >= '2000-01-15 0:00:00+0'
|
|
AND time <= '2000-01-19 23:55:00+0';
|
|
count | sum | sum | sum | sum
|
|
-------+-------+-------+-------+-----
|
|
17980 | 71920 | 89900 | 62930 |
|
|
(1 row)
|
|
|
|
-- force index scans to check index mapping
|
|
SET enable_seqscan = off;
|
|
EXPLAIN (costs off) SELECT device_id, count(*)
|
|
FROM compression_insert
|
|
GROUP BY device_id;
|
|
QUERY PLAN
|
|
-----------------------------------------------------------------------------------------------------------------------------------
|
|
HashAggregate
|
|
Group Key: _hyper_31_107_chunk.device_id
|
|
-> Append
|
|
-> Custom Scan (DecompressChunk) on _hyper_31_107_chunk
|
|
-> Index Scan using compress_hyper_32_108_chunk__compressed_hypertable_32_device_id on compress_hyper_32_108_chunk
|
|
-> Custom Scan (DecompressChunk) on _hyper_31_109_chunk
|
|
-> Index Scan using compress_hyper_32_110_chunk__compressed_hypertable_32_device_id on compress_hyper_32_110_chunk
|
|
-> Custom Scan (DecompressChunk) on _hyper_31_111_chunk
|
|
-> Index Scan using compress_hyper_32_112_chunk__compressed_hypertable_32_device_id on compress_hyper_32_112_chunk
|
|
-> Index Only Scan using _hyper_31_111_chunk_compression_insert_device_id_time_idx on _hyper_31_111_chunk
|
|
(10 rows)
|
|
|
|
SELECT device_id, count(*)
|
|
FROM compression_insert
|
|
GROUP BY device_id;
|
|
device_id | count
|
|
-----------+-------
|
|
3 | 10788
|
|
5 | 10788
|
|
4 | 10788
|
|
2 | 10788
|
|
1 | 10788
|
|
(5 rows)
|
|
|
|
CALL recompress_chunk(:'CHUNK_NAME'::regclass);
|
|
SELECT count(*), sum(v0), sum(v1), sum(v2), sum(v3)
|
|
FROM compression_insert
|
|
WHERE time >= '2000-01-15 0:00:00+0'
|
|
AND time <= '2000-01-19 23:55:00+0';
|
|
count | sum | sum | sum | sum
|
|
-------+-------+-------+-------+-----
|
|
17980 | 71920 | 89900 | 62930 |
|
|
(1 row)
|
|
|
|
SELECT device_id, count(*)
|
|
FROM compression_insert
|
|
GROUP BY device_id;
|
|
device_id | count
|
|
-----------+-------
|
|
3 | 10788
|
|
5 | 10788
|
|
4 | 10788
|
|
2 | 10788
|
|
1 | 10788
|
|
(5 rows)
|
|
|
|
SET enable_seqscan = default;
|
|
-- 3. add new column after first insert into chunk, before compressing
|
|
INSERT INTO compression_insert(time,device_id,v0,v1,v2,v3)
|
|
SELECT time, device_id, device_id+1, device_id + 2, device_id + 0.5, NULL
|
|
FROM generate_series('2000-01-22 0:00:00+0'::timestamptz,'2000-01-24 23:55:00+0','2m') gtime(time), generate_series(1,5,1) gdevice(device_id);
|
|
ALTER TABLE compression_insert ADD COLUMN filler_4 int;
|
|
SELECT compress_chunk(c.schema_name|| '.' || c.table_name) as "CHUNK_NAME"
|
|
FROM _timescaledb_catalog.chunk c, _timescaledb_catalog.hypertable ht
|
|
WHERE c.hypertable_id = ht.id
|
|
AND ht.table_name = 'compression_insert'
|
|
AND c.compressed_chunk_id IS NULL
|
|
ORDER BY c.table_name DESC \gset
|
|
INSERT INTO compression_insert(time,device_id,v0,v1,v2,v3)
|
|
SELECT time, device_id, device_id+1, device_id + 2, device_id + 0.5, NULL
|
|
FROM generate_series('2000-01-25 0:00:00+0'::timestamptz,'2000-01-26 23:55:00+0','2m') gtime(time), generate_series(1,5,1) gdevice(device_id);
|
|
SELECT count(*), sum(v0), sum(v1), sum(v2), sum(v3)
|
|
FROM compression_insert
|
|
WHERE time >= '2000-01-22 0:00:00+0'
|
|
AND time <= '2000-01-26 23:55:00+0';
|
|
count | sum | sum | sum | sum
|
|
-------+-------+-------+-------+-----
|
|
17980 | 71920 | 89900 | 62930 |
|
|
(1 row)
|
|
|
|
-- force index scans to check index mapping
|
|
SET enable_seqscan = off;
|
|
EXPLAIN (costs off) SELECT device_id, count(*)
|
|
FROM compression_insert
|
|
GROUP BY device_id;
|
|
QUERY PLAN
|
|
-----------------------------------------------------------------------------------------------------------------------------------
|
|
HashAggregate
|
|
Group Key: _hyper_31_107_chunk.device_id
|
|
-> Append
|
|
-> Custom Scan (DecompressChunk) on _hyper_31_107_chunk
|
|
-> Index Scan using compress_hyper_32_108_chunk__compressed_hypertable_32_device_id on compress_hyper_32_108_chunk
|
|
-> Custom Scan (DecompressChunk) on _hyper_31_109_chunk
|
|
-> Index Scan using compress_hyper_32_110_chunk__compressed_hypertable_32_device_id on compress_hyper_32_110_chunk
|
|
-> Custom Scan (DecompressChunk) on _hyper_31_111_chunk
|
|
-> Index Scan using compress_hyper_32_112_chunk__compressed_hypertable_32_device_id on compress_hyper_32_112_chunk
|
|
-> Custom Scan (DecompressChunk) on _hyper_31_113_chunk
|
|
-> Index Scan using compress_hyper_32_114_chunk__compressed_hypertable_32_device_id on compress_hyper_32_114_chunk
|
|
-> Index Only Scan using _hyper_31_113_chunk_compression_insert_device_id_time_idx on _hyper_31_113_chunk
|
|
(12 rows)
|
|
|
|
SELECT device_id, count(*)
|
|
FROM compression_insert
|
|
GROUP BY device_id;
|
|
device_id | count
|
|
-----------+-------
|
|
3 | 14384
|
|
5 | 14384
|
|
4 | 14384
|
|
2 | 14384
|
|
1 | 14384
|
|
(5 rows)
|
|
|
|
CALL recompress_chunk(:'CHUNK_NAME'::regclass);
|
|
SELECT count(*), sum(v0), sum(v1), sum(v2), sum(v3)
|
|
FROM compression_insert
|
|
WHERE time >= '2000-01-22 0:00:00+0'
|
|
AND time <= '2000-01-26 23:55:00+0';
|
|
count | sum | sum | sum | sum
|
|
-------+-------+-------+-------+-----
|
|
17980 | 71920 | 89900 | 62930 |
|
|
(1 row)
|
|
|
|
SELECT device_id, count(*)
|
|
FROM compression_insert
|
|
GROUP BY device_id;
|
|
device_id | count
|
|
-----------+-------
|
|
3 | 14384
|
|
5 | 14384
|
|
4 | 14384
|
|
2 | 14384
|
|
1 | 14384
|
|
(5 rows)
|
|
|
|
SET enable_seqscan = default;
|
|
-- 4. add new column after compressing chunk
|
|
INSERT INTO compression_insert(time,device_id,v0,v1,v2,v3)
|
|
SELECT time, device_id, device_id+1, device_id + 2, device_id + 0.5, NULL
|
|
FROM generate_series('2000-01-28 0:00:00+0'::timestamptz,'2000-01-30 23:55:00+0','2m') gtime(time), generate_series(1,5,1) gdevice(device_id);
|
|
SELECT compress_chunk(c.schema_name|| '.' || c.table_name) as "CHUNK_NAME"
|
|
FROM _timescaledb_catalog.chunk c, _timescaledb_catalog.hypertable ht
|
|
WHERE c.hypertable_id = ht.id
|
|
AND ht.table_name = 'compression_insert'
|
|
AND c.compressed_chunk_id IS NULL
|
|
ORDER BY c.table_name DESC \gset
|
|
ALTER TABLE compression_insert ADD COLUMN filler_5 int;
|
|
INSERT INTO compression_insert(time,device_id,v0,v1,v2,v3)
|
|
SELECT time, device_id, device_id+1, device_id + 2, device_id + 0.5, NULL
|
|
FROM generate_series('2000-01-31 0:00:00+0'::timestamptz,'2000-02-01 23:55:00+0','2m') gtime(time), generate_series(1,5,1) gdevice(device_id);
|
|
SELECT count(*), sum(v0), sum(v1), sum(v2), sum(v3)
|
|
FROM compression_insert
|
|
WHERE time >= '2000-01-28 0:00:00+0'
|
|
AND time <= '2000-02-01 23:55:00+0';
|
|
count | sum | sum | sum | sum
|
|
-------+-------+-------+-------+-----
|
|
17980 | 71920 | 89900 | 62930 |
|
|
(1 row)
|
|
|
|
-- force index scans to check index mapping
|
|
SET enable_seqscan = off;
|
|
EXPLAIN (costs off) SELECT device_id, count(*)
|
|
FROM compression_insert
|
|
GROUP BY device_id;
|
|
QUERY PLAN
|
|
-----------------------------------------------------------------------------------------------------------------------------------
|
|
HashAggregate
|
|
Group Key: _hyper_31_107_chunk.device_id
|
|
-> Append
|
|
-> Custom Scan (DecompressChunk) on _hyper_31_107_chunk
|
|
-> Index Scan using compress_hyper_32_108_chunk__compressed_hypertable_32_device_id on compress_hyper_32_108_chunk
|
|
-> Custom Scan (DecompressChunk) on _hyper_31_109_chunk
|
|
-> Index Scan using compress_hyper_32_110_chunk__compressed_hypertable_32_device_id on compress_hyper_32_110_chunk
|
|
-> Custom Scan (DecompressChunk) on _hyper_31_111_chunk
|
|
-> Index Scan using compress_hyper_32_112_chunk__compressed_hypertable_32_device_id on compress_hyper_32_112_chunk
|
|
-> Custom Scan (DecompressChunk) on _hyper_31_113_chunk
|
|
-> Index Scan using compress_hyper_32_114_chunk__compressed_hypertable_32_device_id on compress_hyper_32_114_chunk
|
|
-> Custom Scan (DecompressChunk) on _hyper_31_115_chunk
|
|
-> Index Scan using compress_hyper_32_116_chunk__compressed_hypertable_32_device_id on compress_hyper_32_116_chunk
|
|
-> Index Only Scan using _hyper_31_115_chunk_compression_insert_device_id_time_idx on _hyper_31_115_chunk
|
|
(14 rows)
|
|
|
|
SELECT device_id, count(*)
|
|
FROM compression_insert
|
|
GROUP BY device_id;
|
|
device_id | count
|
|
-----------+-------
|
|
3 | 17980
|
|
5 | 17980
|
|
4 | 17980
|
|
2 | 17980
|
|
1 | 17980
|
|
(5 rows)
|
|
|
|
CALL recompress_chunk(:'CHUNK_NAME'::regclass);
|
|
SELECT count(*), sum(v0), sum(v1), sum(v2), sum(v3)
|
|
FROM compression_insert
|
|
WHERE time >= '2000-01-28 0:00:00+0'
|
|
AND time <= '2000-02-01 23:55:00+0';
|
|
count | sum | sum | sum | sum
|
|
-------+-------+-------+-------+-----
|
|
17980 | 71920 | 89900 | 62930 |
|
|
(1 row)
|
|
|
|
SELECT device_id, count(*)
|
|
FROM compression_insert
|
|
GROUP BY device_id;
|
|
device_id | count
|
|
-----------+-------
|
|
3 | 17980
|
|
5 | 17980
|
|
4 | 17980
|
|
2 | 17980
|
|
1 | 17980
|
|
(5 rows)
|
|
|
|
SET enable_seqscan = default;
|
|
DROP TABLE compression_insert;
|