mirror of
https://github.com/timescale/timescaledb.git
synced 2025-05-15 01:53:41 +08:00
The new "create_hypertable" API using the dimension info inadvertantly allowed creating hypertables with hash partitioning on the primary column. Since the rest of the machinery (policies, tiering, etc.) does not support hash partitions on primary column properly, we restrict it now in the new API. The older "create_hypertable" API was disallowing it earlier anyways. Fixes #6993
823 lines
35 KiB
Plaintext
823 lines
35 KiB
Plaintext
-- This file and its contents are licensed under the Timescale License.
|
|
-- Please see the included NOTICE for copyright information and
|
|
-- LICENSE-TIMESCALE for a copy of the license.
|
|
\ir include/rand_generator.sql
|
|
-- This file and its contents are licensed under the Timescale License.
|
|
-- Please see the included NOTICE for copyright information and
|
|
-- LICENSE-TIMESCALE for a copy of the license.
|
|
--------------------------
|
|
-- cheap rand generator --
|
|
--------------------------
|
|
create table rand_minstd_state(i bigint);
|
|
create function rand_minstd_advance(bigint) returns bigint
|
|
language sql immutable as
|
|
$$
|
|
select (16807 * $1) % 2147483647
|
|
$$;
|
|
create function gen_rand_minstd() returns bigint
|
|
language sql security definer as
|
|
$$
|
|
update rand_minstd_state set i = rand_minstd_advance(i) returning i
|
|
$$;
|
|
-- seed the random num generator
|
|
insert into rand_minstd_state values (321);
|
|
\c :TEST_DBNAME :ROLE_SUPERUSER
|
|
\ir include/compression_utils.sql
|
|
-- This file and its contents are licensed under the Timescale License.
|
|
-- Please see the included NOTICE for copyright information and
|
|
-- LICENSE-TIMESCALE for a copy of the license.
|
|
\set ECHO errors
|
|
\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER
|
|
CREATE TABLE test1 ("Time" timestamptz, i integer, value integer);
|
|
SELECT table_name from create_hypertable('test1', 'Time', chunk_time_interval=> INTERVAL '1 hour');
|
|
NOTICE: adding not-null constraint to column "Time"
|
|
table_name
|
|
------------
|
|
test1
|
|
(1 row)
|
|
|
|
-- This will generate 24 chunks
|
|
INSERT INTO test1
|
|
SELECT t, i, gen_rand_minstd()
|
|
FROM generate_series('2018-03-02 1:00'::TIMESTAMPTZ, '2018-03-03 0:59', '1 minute') t
|
|
CROSS JOIN generate_series(1, 5, 1) i;
|
|
-- Compression is set to merge those 24 chunks into 12 2 hour chunks
|
|
ALTER TABLE test1 set (timescaledb.compress, timescaledb.compress_segmentby='i', timescaledb.compress_orderby='"Time"', timescaledb.compress_chunk_time_interval='2 hours');
|
|
SELECT
|
|
$$
|
|
SELECT * FROM test1 ORDER BY i, "Time"
|
|
$$ AS "QUERY" \gset
|
|
SELECT 'test1' AS "HYPERTABLE_NAME" \gset
|
|
\ir include/compression_test_merge.sql
|
|
-- This file and its contents are licensed under the Timescale License.
|
|
-- Please see the included NOTICE for copyright information and
|
|
-- LICENSE-TIMESCALE for a copy of the license.
|
|
\set ECHO errors
|
|
psql:include/compression_test_merge.sql:7: NOTICE: table "merge_result" does not exist, skipping
|
|
count_compressed
|
|
------------------
|
|
24
|
|
(1 row)
|
|
|
|
?column? | count
|
|
-----------------------------------------------------------------------------------+-------
|
|
Number of rows different between original and query on compressed data (expect 0) | 0
|
|
(1 row)
|
|
|
|
count
|
|
-------
|
|
12
|
|
(1 row)
|
|
|
|
?column? | count
|
|
--------------------------------------------------------------------------------------------------------------+-------
|
|
Number of rows different between original and data that has been compressed and then decompressed (expect 0) | 0
|
|
(1 row)
|
|
|
|
\set TYPE timestamptz
|
|
\set ORDER_BY_COL_NAME Time
|
|
\set SEGMENT_META_COL_MIN _ts_meta_min_1
|
|
\set SEGMENT_META_COL_MAX _ts_meta_max_1
|
|
\ir include/compression_test_hypertable_segment_meta.sql
|
|
-- This file and its contents are licensed under the Timescale License.
|
|
-- Please see the included NOTICE for copyright information and
|
|
-- LICENSE-TIMESCALE for a copy of the license.
|
|
\set ECHO errors
|
|
count
|
|
-------
|
|
12
|
|
(1 row)
|
|
|
|
min_correct | max_correct
|
|
-------------+-------------
|
|
t | t
|
|
(1 row)
|
|
|
|
DROP TABLE test1;
|
|
CREATE TABLE test2 ("Time" timestamptz, i integer, loc integer, value integer);
|
|
SELECT table_name from create_hypertable('test2', 'Time', chunk_time_interval=> INTERVAL '1 hour');
|
|
NOTICE: adding not-null constraint to column "Time"
|
|
table_name
|
|
------------
|
|
test2
|
|
(1 row)
|
|
|
|
-- This will generate 24 1 hour chunks.
|
|
INSERT INTO test2
|
|
SELECT t, i, gen_rand_minstd()
|
|
FROM generate_series('2018-03-02 1:00'::TIMESTAMPTZ, '2018-03-03 0:59', '1 minute') t
|
|
CROSS JOIN generate_series(1, 5, 1) i;
|
|
-- Compression is set to merge those 24 chunks into 3 chunks, two 10 hour chunks and a single 4 hour chunk.
|
|
ALTER TABLE test2 set (timescaledb.compress, timescaledb.compress_segmentby='i', timescaledb.compress_orderby='loc,"Time"', timescaledb.compress_chunk_time_interval='10 hours');
|
|
-- Verify we are fully recompressing unordered chunks
|
|
BEGIN;
|
|
SELECT count(compress_chunk(chunk, true)) FROM show_chunks('test2') chunk;
|
|
count
|
|
-------
|
|
24
|
|
(1 row)
|
|
|
|
SELECT format('%I.%I',ch.schema_name,ch.table_name) AS "CHUNK"
|
|
FROM _timescaledb_catalog.chunk ch
|
|
JOIN _timescaledb_catalog.hypertable ht ON ht.id=ch.hypertable_id
|
|
JOIN _timescaledb_catalog.hypertable ht2 ON ht.id=ht2.compressed_hypertable_id AND ht2.table_name='test2' LIMIT 1 \gset
|
|
-- Not using time as first column in orderby makes the merged chunks unordered
|
|
-- We want to sure we are fully recompressing them which will make only
|
|
-- a single batch per segment group
|
|
SELECT count(*)
|
|
FROM :CHUNK
|
|
WHERE i = 1;
|
|
count
|
|
-------
|
|
1
|
|
(1 row)
|
|
|
|
ROLLBACK;
|
|
SELECT
|
|
$$
|
|
SELECT * FROM test2 ORDER BY i, "Time"
|
|
$$ AS "QUERY" \gset
|
|
SELECT 'test2' AS "HYPERTABLE_NAME" \gset
|
|
\ir include/compression_test_merge.sql
|
|
-- This file and its contents are licensed under the Timescale License.
|
|
-- Please see the included NOTICE for copyright information and
|
|
-- LICENSE-TIMESCALE for a copy of the license.
|
|
\set ECHO errors
|
|
count_compressed
|
|
------------------
|
|
24
|
|
(1 row)
|
|
|
|
?column? | count
|
|
-----------------------------------------------------------------------------------+-------
|
|
Number of rows different between original and query on compressed data (expect 0) | 0
|
|
(1 row)
|
|
|
|
count
|
|
-------
|
|
3
|
|
(1 row)
|
|
|
|
?column? | count
|
|
--------------------------------------------------------------------------------------------------------------+-------
|
|
Number of rows different between original and data that has been compressed and then decompressed (expect 0) | 0
|
|
(1 row)
|
|
|
|
\set TYPE integer
|
|
\set ORDER_BY_COL_NAME loc
|
|
\set SEGMENT_META_COL_MIN _ts_meta_min_1
|
|
\set SEGMENT_META_COL_MAX _ts_meta_max_1
|
|
\ir include/compression_test_hypertable_segment_meta.sql
|
|
-- This file and its contents are licensed under the Timescale License.
|
|
-- Please see the included NOTICE for copyright information and
|
|
-- LICENSE-TIMESCALE for a copy of the license.
|
|
\set ECHO errors
|
|
count
|
|
-------
|
|
3
|
|
(1 row)
|
|
|
|
min_correct | max_correct
|
|
-------------+-------------
|
|
t | t
|
|
(1 row)
|
|
|
|
DROP TABLE test2;
|
|
CREATE TABLE test3 ("Time" timestamptz, i integer, loc integer, value integer);
|
|
SELECT table_name from create_hypertable('test3', 'Time', chunk_time_interval=> INTERVAL '1 hour');
|
|
NOTICE: adding not-null constraint to column "Time"
|
|
table_name
|
|
------------
|
|
test3
|
|
(1 row)
|
|
|
|
SELECT table_name from add_dimension('test3', 'i', number_partitions=> 3);
|
|
table_name
|
|
------------
|
|
test3
|
|
(1 row)
|
|
|
|
-- This will generate 25 1 hour chunks with a closed space dimension.
|
|
INSERT INTO test3 SELECT t, 1, gen_rand_minstd(), gen_rand_minstd() FROM generate_series('2018-03-02 1:00'::TIMESTAMPTZ, '2018-03-02 12:59', '1 minute') t;
|
|
INSERT INTO test3 SELECT t, 2, gen_rand_minstd(), gen_rand_minstd() FROM generate_series('2018-03-02 13:00'::TIMESTAMPTZ, '2018-03-03 0:59', '1 minute') t;
|
|
INSERT INTO test3 SELECT t, 3, gen_rand_minstd(), gen_rand_minstd() FROM generate_series('2018-03-02 2:00'::TIMESTAMPTZ, '2018-03-02 2:01', '1 minute') t;
|
|
-- Compression is set to merge those 25 chunks into 12 2 hour chunks and a single 1 hour chunks on a different space dimensions.
|
|
ALTER TABLE test3 set (timescaledb.compress, timescaledb.compress_orderby='loc,"Time"', timescaledb.compress_chunk_time_interval='2 hours');
|
|
WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes
|
|
NOTICE: default segment by for hypertable "test3" is set to ""
|
|
SELECT
|
|
$$
|
|
SELECT * FROM test3 WHERE i = 1 ORDER BY "Time"
|
|
$$ AS "QUERY" \gset
|
|
SELECT 'test3' AS "HYPERTABLE_NAME" \gset
|
|
\ir include/compression_test_merge.sql
|
|
-- This file and its contents are licensed under the Timescale License.
|
|
-- Please see the included NOTICE for copyright information and
|
|
-- LICENSE-TIMESCALE for a copy of the license.
|
|
\set ECHO errors
|
|
count_compressed
|
|
------------------
|
|
25
|
|
(1 row)
|
|
|
|
?column? | count
|
|
-----------------------------------------------------------------------------------+-------
|
|
Number of rows different between original and query on compressed data (expect 0) | 0
|
|
(1 row)
|
|
|
|
count
|
|
-------
|
|
13
|
|
(1 row)
|
|
|
|
?column? | count
|
|
--------------------------------------------------------------------------------------------------------------+-------
|
|
Number of rows different between original and data that has been compressed and then decompressed (expect 0) | 0
|
|
(1 row)
|
|
|
|
\set TYPE integer
|
|
\set ORDER_BY_COL_NAME loc
|
|
\set SEGMENT_META_COL_MIN _ts_meta_min_1
|
|
\set SEGMENT_META_COL_MAX _ts_meta_max_1
|
|
\ir include/compression_test_hypertable_segment_meta.sql
|
|
-- This file and its contents are licensed under the Timescale License.
|
|
-- Please see the included NOTICE for copyright information and
|
|
-- LICENSE-TIMESCALE for a copy of the license.
|
|
\set ECHO errors
|
|
count
|
|
-------
|
|
13
|
|
(1 row)
|
|
|
|
min_correct | max_correct
|
|
-------------+-------------
|
|
t | t
|
|
(1 row)
|
|
|
|
DROP TABLE test3;
|
|
CREATE TABLE test4 ("Time" timestamptz, i integer, loc integer, value integer);
|
|
SELECT table_name from create_hypertable('test4', 'Time', chunk_time_interval=> INTERVAL '1 hour');
|
|
NOTICE: adding not-null constraint to column "Time"
|
|
table_name
|
|
------------
|
|
test4
|
|
(1 row)
|
|
|
|
-- Setting compress_chunk_time_interval to non-multiple of chunk_time_interval should emit a warning.
|
|
ALTER TABLE test4 set (timescaledb.compress, timescaledb.compress_orderby='loc,"Time"', timescaledb.compress_chunk_time_interval='90 minutes');
|
|
WARNING: compress chunk interval is not a multiple of chunk interval, you should use a factor of chunk interval to merge as much as possible
|
|
WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes
|
|
NOTICE: default segment by for hypertable "test4" is set to ""
|
|
DROP TABLE test4;
|
|
CREATE TABLE test5 ("Time" timestamptz, i integer, value integer);
|
|
SELECT table_name from create_hypertable('test5', 'Time', chunk_time_interval=> INTERVAL '1 hour');
|
|
NOTICE: adding not-null constraint to column "Time"
|
|
table_name
|
|
------------
|
|
test5
|
|
(1 row)
|
|
|
|
-- This will generate 24 chunks. Note that we are forcing everyting into a single segment so we use table scan to when merging chunks.
|
|
INSERT INTO test5 SELECT t, 1, gen_rand_minstd() FROM generate_series('2018-03-02 1:00'::TIMESTAMPTZ, '2018-03-03 0:59', '1 minute') t;
|
|
-- Compression is set to merge those 24 chunks into 1 24 hour chunk
|
|
ALTER TABLE test5 set (timescaledb.compress, timescaledb.compress_segmentby='i', timescaledb.compress_orderby='"Time"', timescaledb.compress_chunk_time_interval='24 hours');
|
|
SELECT
|
|
$$
|
|
SELECT * FROM test5 ORDER BY i, "Time"
|
|
$$ AS "QUERY" \gset
|
|
SELECT compress_chunk(i) FROM show_chunks('test5') i LIMIT 4;
|
|
compress_chunk
|
|
------------------------------------------
|
|
_timescaledb_internal._hyper_9_187_chunk
|
|
_timescaledb_internal._hyper_9_187_chunk
|
|
_timescaledb_internal._hyper_9_187_chunk
|
|
_timescaledb_internal._hyper_9_187_chunk
|
|
(4 rows)
|
|
|
|
SELECT format('%I.%I',ch.schema_name,ch.table_name) AS "CHUNK"
|
|
FROM _timescaledb_catalog.chunk ch
|
|
JOIN _timescaledb_catalog.hypertable ht ON ht.id=ch.hypertable_id
|
|
JOIN _timescaledb_catalog.hypertable ht2 ON ht.id=ht2.compressed_hypertable_id AND ht2.table_name='test5' \gset
|
|
-- Make sure sequence numbers are correctly fetched from index.
|
|
SELECT _ts_meta_sequence_num FROM :CHUNK where i = 1;
|
|
_ts_meta_sequence_num
|
|
-----------------------
|
|
10
|
|
20
|
|
30
|
|
40
|
|
(4 rows)
|
|
|
|
SELECT schemaname || '.' || indexname AS "INDEXNAME"
|
|
FROM pg_indexes i
|
|
INNER JOIN _timescaledb_catalog.chunk cc ON i.schemaname = cc.schema_name and i.tablename = cc.table_name
|
|
INNER JOIN _timescaledb_catalog.chunk c ON (cc.id = c.compressed_chunk_id)
|
|
LIMIT 1 \gset
|
|
DROP INDEX :INDEXNAME;
|
|
-- We dropped the index from compressed chunk thats needed to determine sequence numbers
|
|
-- during merge, merging will fallback to doing heap scans and work just fine.
|
|
SELECT compress_chunk(i, true) FROM show_chunks('test5') i LIMIT 5;
|
|
NOTICE: chunk "_hyper_9_187_chunk" is already compressed
|
|
compress_chunk
|
|
------------------------------------------
|
|
_timescaledb_internal._hyper_9_187_chunk
|
|
_timescaledb_internal._hyper_9_187_chunk
|
|
_timescaledb_internal._hyper_9_187_chunk
|
|
_timescaledb_internal._hyper_9_187_chunk
|
|
_timescaledb_internal._hyper_9_187_chunk
|
|
(5 rows)
|
|
|
|
-- Make sure sequence numbers are correctly fetched from heap.
|
|
SELECT _ts_meta_sequence_num FROM :CHUNK where i = 1;
|
|
_ts_meta_sequence_num
|
|
-----------------------
|
|
10
|
|
20
|
|
30
|
|
40
|
|
50
|
|
60
|
|
70
|
|
80
|
|
(8 rows)
|
|
|
|
SELECT 'test5' AS "HYPERTABLE_NAME" \gset
|
|
\ir include/compression_test_merge.sql
|
|
-- This file and its contents are licensed under the Timescale License.
|
|
-- Please see the included NOTICE for copyright information and
|
|
-- LICENSE-TIMESCALE for a copy of the license.
|
|
\set ECHO errors
|
|
psql:include/compression_test_merge.sql:12: NOTICE: chunk "_hyper_9_187_chunk" is already compressed
|
|
count_compressed
|
|
------------------
|
|
17
|
|
(1 row)
|
|
|
|
?column? | count
|
|
-----------------------------------------------------------------------------------+-------
|
|
Number of rows different between original and query on compressed data (expect 0) | 0
|
|
(1 row)
|
|
|
|
count
|
|
-------
|
|
1
|
|
(1 row)
|
|
|
|
?column? | count
|
|
--------------------------------------------------------------------------------------------------------------+-------
|
|
Number of rows different between original and data that has been compressed and then decompressed (expect 0) | 0
|
|
(1 row)
|
|
|
|
DROP TABLE test5;
|
|
CREATE TABLE test6 ("Time" timestamptz, i integer, value integer);
|
|
SELECT table_name from create_hypertable('test6', 'Time', chunk_time_interval=> INTERVAL '1 hour');
|
|
NOTICE: adding not-null constraint to column "Time"
|
|
table_name
|
|
------------
|
|
test6
|
|
(1 row)
|
|
|
|
-- This will generate 24 chunks
|
|
INSERT INTO test6
|
|
SELECT t, i, gen_rand_minstd()
|
|
FROM generate_series('2018-03-02 1:00'::TIMESTAMPTZ, '2018-03-03 0:59', '1 minute') t
|
|
CROSS JOIN generate_series(1, 5, 1) i;
|
|
-- Compression is set to merge those 24 chunks into 12 2 hour chunks.
|
|
ALTER TABLE test6 set (timescaledb.compress, timescaledb.compress_segmentby='i', timescaledb.compress_orderby='"Time"', timescaledb.compress_chunk_time_interval='2 hours');
|
|
SELECT compress_chunk(i) FROM show_chunks('test6') i;
|
|
compress_chunk
|
|
-------------------------------------------
|
|
_timescaledb_internal._hyper_11_212_chunk
|
|
_timescaledb_internal._hyper_11_212_chunk
|
|
_timescaledb_internal._hyper_11_214_chunk
|
|
_timescaledb_internal._hyper_11_214_chunk
|
|
_timescaledb_internal._hyper_11_216_chunk
|
|
_timescaledb_internal._hyper_11_216_chunk
|
|
_timescaledb_internal._hyper_11_218_chunk
|
|
_timescaledb_internal._hyper_11_218_chunk
|
|
_timescaledb_internal._hyper_11_220_chunk
|
|
_timescaledb_internal._hyper_11_220_chunk
|
|
_timescaledb_internal._hyper_11_222_chunk
|
|
_timescaledb_internal._hyper_11_222_chunk
|
|
_timescaledb_internal._hyper_11_224_chunk
|
|
_timescaledb_internal._hyper_11_224_chunk
|
|
_timescaledb_internal._hyper_11_226_chunk
|
|
_timescaledb_internal._hyper_11_226_chunk
|
|
_timescaledb_internal._hyper_11_228_chunk
|
|
_timescaledb_internal._hyper_11_228_chunk
|
|
_timescaledb_internal._hyper_11_230_chunk
|
|
_timescaledb_internal._hyper_11_230_chunk
|
|
_timescaledb_internal._hyper_11_232_chunk
|
|
_timescaledb_internal._hyper_11_232_chunk
|
|
_timescaledb_internal._hyper_11_234_chunk
|
|
_timescaledb_internal._hyper_11_234_chunk
|
|
(24 rows)
|
|
|
|
SELECT count(*) as number_of_chunks FROM show_chunks('test6');
|
|
number_of_chunks
|
|
------------------
|
|
12
|
|
(1 row)
|
|
|
|
-- This will generate another 24 chunks
|
|
INSERT INTO test6
|
|
SELECT t, i, gen_rand_minstd()
|
|
FROM generate_series('2018-03-03 1:00'::TIMESTAMPTZ, '2018-03-04 0:59', '1 minute') t
|
|
CROSS JOIN generate_series(1, 5, 1) i;
|
|
-- Altering compress chunk time interval will cause us to create 6 chunks from the additional 24 chunks.
|
|
ALTER TABLE test6 set (timescaledb.compress_chunk_time_interval='4 hours');
|
|
SELECT compress_chunk(i, true) FROM show_chunks('test6') i;
|
|
NOTICE: chunk "_hyper_11_212_chunk" is already compressed
|
|
NOTICE: chunk "_hyper_11_214_chunk" is already compressed
|
|
NOTICE: chunk "_hyper_11_216_chunk" is already compressed
|
|
NOTICE: chunk "_hyper_11_218_chunk" is already compressed
|
|
NOTICE: chunk "_hyper_11_220_chunk" is already compressed
|
|
NOTICE: chunk "_hyper_11_222_chunk" is already compressed
|
|
NOTICE: chunk "_hyper_11_224_chunk" is already compressed
|
|
NOTICE: chunk "_hyper_11_226_chunk" is already compressed
|
|
NOTICE: chunk "_hyper_11_228_chunk" is already compressed
|
|
NOTICE: chunk "_hyper_11_230_chunk" is already compressed
|
|
NOTICE: chunk "_hyper_11_232_chunk" is already compressed
|
|
NOTICE: chunk "_hyper_11_234_chunk" is already compressed
|
|
compress_chunk
|
|
-------------------------------------------
|
|
_timescaledb_internal._hyper_11_212_chunk
|
|
_timescaledb_internal._hyper_11_214_chunk
|
|
_timescaledb_internal._hyper_11_216_chunk
|
|
_timescaledb_internal._hyper_11_218_chunk
|
|
_timescaledb_internal._hyper_11_220_chunk
|
|
_timescaledb_internal._hyper_11_222_chunk
|
|
_timescaledb_internal._hyper_11_224_chunk
|
|
_timescaledb_internal._hyper_11_226_chunk
|
|
_timescaledb_internal._hyper_11_228_chunk
|
|
_timescaledb_internal._hyper_11_230_chunk
|
|
_timescaledb_internal._hyper_11_232_chunk
|
|
_timescaledb_internal._hyper_11_234_chunk
|
|
_timescaledb_internal._hyper_11_234_chunk
|
|
_timescaledb_internal._hyper_11_234_chunk
|
|
_timescaledb_internal._hyper_11_250_chunk
|
|
_timescaledb_internal._hyper_11_250_chunk
|
|
_timescaledb_internal._hyper_11_250_chunk
|
|
_timescaledb_internal._hyper_11_250_chunk
|
|
_timescaledb_internal._hyper_11_254_chunk
|
|
_timescaledb_internal._hyper_11_254_chunk
|
|
_timescaledb_internal._hyper_11_254_chunk
|
|
_timescaledb_internal._hyper_11_254_chunk
|
|
_timescaledb_internal._hyper_11_258_chunk
|
|
_timescaledb_internal._hyper_11_258_chunk
|
|
_timescaledb_internal._hyper_11_258_chunk
|
|
_timescaledb_internal._hyper_11_258_chunk
|
|
_timescaledb_internal._hyper_11_262_chunk
|
|
_timescaledb_internal._hyper_11_262_chunk
|
|
_timescaledb_internal._hyper_11_262_chunk
|
|
_timescaledb_internal._hyper_11_262_chunk
|
|
_timescaledb_internal._hyper_11_266_chunk
|
|
_timescaledb_internal._hyper_11_266_chunk
|
|
_timescaledb_internal._hyper_11_266_chunk
|
|
_timescaledb_internal._hyper_11_266_chunk
|
|
_timescaledb_internal._hyper_11_270_chunk
|
|
_timescaledb_internal._hyper_11_270_chunk
|
|
(36 rows)
|
|
|
|
SELECT count(*) as number_of_chunks FROM show_chunks('test6');
|
|
number_of_chunks
|
|
------------------
|
|
18
|
|
(1 row)
|
|
|
|
-- This will generate another 3 chunks
|
|
INSERT INTO test6
|
|
SELECT t, i, gen_rand_minstd()
|
|
FROM generate_series('2018-03-04 1:00'::TIMESTAMPTZ, '2018-03-04 3:59', '1 minute') t
|
|
CROSS JOIN generate_series(1, 5, 1) i;
|
|
-- Altering compress chunk time interval will cause us to create 3 chunks from the additional 3 chunks.
|
|
-- Setting compressed chunk to anything less than chunk interval should disable merging chunks.
|
|
ALTER TABLE test6 set (timescaledb.compress_chunk_time_interval='30 minutes');
|
|
WARNING: compress chunk interval is not a multiple of chunk interval, you should use a factor of chunk interval to merge as much as possible
|
|
SELECT compress_chunk(i, true) FROM show_chunks('test6') i;
|
|
NOTICE: chunk "_hyper_11_212_chunk" is already compressed
|
|
NOTICE: chunk "_hyper_11_214_chunk" is already compressed
|
|
NOTICE: chunk "_hyper_11_216_chunk" is already compressed
|
|
NOTICE: chunk "_hyper_11_218_chunk" is already compressed
|
|
NOTICE: chunk "_hyper_11_220_chunk" is already compressed
|
|
NOTICE: chunk "_hyper_11_222_chunk" is already compressed
|
|
NOTICE: chunk "_hyper_11_224_chunk" is already compressed
|
|
NOTICE: chunk "_hyper_11_226_chunk" is already compressed
|
|
NOTICE: chunk "_hyper_11_228_chunk" is already compressed
|
|
NOTICE: chunk "_hyper_11_230_chunk" is already compressed
|
|
NOTICE: chunk "_hyper_11_232_chunk" is already compressed
|
|
NOTICE: chunk "_hyper_11_234_chunk" is already compressed
|
|
NOTICE: chunk "_hyper_11_250_chunk" is already compressed
|
|
NOTICE: chunk "_hyper_11_254_chunk" is already compressed
|
|
NOTICE: chunk "_hyper_11_258_chunk" is already compressed
|
|
NOTICE: chunk "_hyper_11_262_chunk" is already compressed
|
|
NOTICE: chunk "_hyper_11_266_chunk" is already compressed
|
|
NOTICE: chunk "_hyper_11_270_chunk" is already compressed
|
|
compress_chunk
|
|
-------------------------------------------
|
|
_timescaledb_internal._hyper_11_212_chunk
|
|
_timescaledb_internal._hyper_11_214_chunk
|
|
_timescaledb_internal._hyper_11_216_chunk
|
|
_timescaledb_internal._hyper_11_218_chunk
|
|
_timescaledb_internal._hyper_11_220_chunk
|
|
_timescaledb_internal._hyper_11_222_chunk
|
|
_timescaledb_internal._hyper_11_224_chunk
|
|
_timescaledb_internal._hyper_11_226_chunk
|
|
_timescaledb_internal._hyper_11_228_chunk
|
|
_timescaledb_internal._hyper_11_230_chunk
|
|
_timescaledb_internal._hyper_11_232_chunk
|
|
_timescaledb_internal._hyper_11_234_chunk
|
|
_timescaledb_internal._hyper_11_250_chunk
|
|
_timescaledb_internal._hyper_11_254_chunk
|
|
_timescaledb_internal._hyper_11_258_chunk
|
|
_timescaledb_internal._hyper_11_262_chunk
|
|
_timescaledb_internal._hyper_11_266_chunk
|
|
_timescaledb_internal._hyper_11_270_chunk
|
|
_timescaledb_internal._hyper_11_278_chunk
|
|
_timescaledb_internal._hyper_11_279_chunk
|
|
_timescaledb_internal._hyper_11_280_chunk
|
|
(21 rows)
|
|
|
|
SELECT count(*) as number_of_chunks FROM show_chunks('test6');
|
|
number_of_chunks
|
|
------------------
|
|
21
|
|
(1 row)
|
|
|
|
-- This will generate another 3 chunks
|
|
INSERT INTO test6
|
|
SELECT t, i, gen_rand_minstd()
|
|
FROM generate_series('2018-03-04 4:00'::TIMESTAMPTZ, '2018-03-04 6:59', '1 minute') t
|
|
CROSS JOIN generate_series(1, 5, 1) i;
|
|
-- Altering compress chunk time interval will cause us to create 3 chunks from the additional 3 chunks.
|
|
-- Setting compressed chunk to anything less than chunk interval should disable merging chunks.
|
|
ALTER TABLE test6 set (timescaledb.compress_chunk_time_interval=0);
|
|
SELECT compress_chunk(i, true) FROM show_chunks('test6') i;
|
|
NOTICE: chunk "_hyper_11_212_chunk" is already compressed
|
|
NOTICE: chunk "_hyper_11_214_chunk" is already compressed
|
|
NOTICE: chunk "_hyper_11_216_chunk" is already compressed
|
|
NOTICE: chunk "_hyper_11_218_chunk" is already compressed
|
|
NOTICE: chunk "_hyper_11_220_chunk" is already compressed
|
|
NOTICE: chunk "_hyper_11_222_chunk" is already compressed
|
|
NOTICE: chunk "_hyper_11_224_chunk" is already compressed
|
|
NOTICE: chunk "_hyper_11_226_chunk" is already compressed
|
|
NOTICE: chunk "_hyper_11_228_chunk" is already compressed
|
|
NOTICE: chunk "_hyper_11_230_chunk" is already compressed
|
|
NOTICE: chunk "_hyper_11_232_chunk" is already compressed
|
|
NOTICE: chunk "_hyper_11_234_chunk" is already compressed
|
|
NOTICE: chunk "_hyper_11_250_chunk" is already compressed
|
|
NOTICE: chunk "_hyper_11_254_chunk" is already compressed
|
|
NOTICE: chunk "_hyper_11_258_chunk" is already compressed
|
|
NOTICE: chunk "_hyper_11_262_chunk" is already compressed
|
|
NOTICE: chunk "_hyper_11_266_chunk" is already compressed
|
|
NOTICE: chunk "_hyper_11_270_chunk" is already compressed
|
|
NOTICE: chunk "_hyper_11_278_chunk" is already compressed
|
|
NOTICE: chunk "_hyper_11_279_chunk" is already compressed
|
|
NOTICE: chunk "_hyper_11_280_chunk" is already compressed
|
|
compress_chunk
|
|
-------------------------------------------
|
|
_timescaledb_internal._hyper_11_212_chunk
|
|
_timescaledb_internal._hyper_11_214_chunk
|
|
_timescaledb_internal._hyper_11_216_chunk
|
|
_timescaledb_internal._hyper_11_218_chunk
|
|
_timescaledb_internal._hyper_11_220_chunk
|
|
_timescaledb_internal._hyper_11_222_chunk
|
|
_timescaledb_internal._hyper_11_224_chunk
|
|
_timescaledb_internal._hyper_11_226_chunk
|
|
_timescaledb_internal._hyper_11_228_chunk
|
|
_timescaledb_internal._hyper_11_230_chunk
|
|
_timescaledb_internal._hyper_11_232_chunk
|
|
_timescaledb_internal._hyper_11_234_chunk
|
|
_timescaledb_internal._hyper_11_250_chunk
|
|
_timescaledb_internal._hyper_11_254_chunk
|
|
_timescaledb_internal._hyper_11_258_chunk
|
|
_timescaledb_internal._hyper_11_262_chunk
|
|
_timescaledb_internal._hyper_11_266_chunk
|
|
_timescaledb_internal._hyper_11_270_chunk
|
|
_timescaledb_internal._hyper_11_278_chunk
|
|
_timescaledb_internal._hyper_11_279_chunk
|
|
_timescaledb_internal._hyper_11_280_chunk
|
|
_timescaledb_internal._hyper_11_284_chunk
|
|
_timescaledb_internal._hyper_11_285_chunk
|
|
_timescaledb_internal._hyper_11_286_chunk
|
|
(24 rows)
|
|
|
|
SELECT count(*) as number_of_chunks FROM show_chunks('test6');
|
|
number_of_chunks
|
|
------------------
|
|
24
|
|
(1 row)
|
|
|
|
-- Setting compress chunk time to NULL will error out.
|
|
\set ON_ERROR_STOP 0
|
|
ALTER TABLE test6 set (timescaledb.compress_chunk_time_interval=NULL);
|
|
ERROR: invalid value for timescaledb.compress_chunk_time_interval 'null'
|
|
\set ON_ERROR_STOP 1
|
|
DROP TABLE test6;
|
|
CREATE TABLE test7 ("Time" timestamptz, i integer, j integer, k integer, value integer);
|
|
SELECT table_name from create_hypertable('test7', 'Time', chunk_time_interval=> INTERVAL '1 hour');
|
|
NOTICE: adding not-null constraint to column "Time"
|
|
table_name
|
|
------------
|
|
test7
|
|
(1 row)
|
|
|
|
-- This will generate 24 chunks
|
|
INSERT INTO test7
|
|
SELECT t, i, gen_rand_minstd(), gen_rand_minstd(), gen_rand_minstd()
|
|
FROM generate_series('2018-03-02 1:00'::TIMESTAMPTZ, '2018-03-03 0:59', '1 minute') t
|
|
CROSS JOIN generate_series(1, 5, 1) i;
|
|
-- Compression is set to merge those 24 chunks into 12 2 hour chunks with ordering by j column before time column, causing recompression to occur during merge.
|
|
ALTER TABLE test7 set (timescaledb.compress, timescaledb.compress_segmentby='i', timescaledb.compress_orderby='j, "Time" desc', timescaledb.compress_chunk_time_interval='2 hours');
|
|
SELECT
|
|
$$
|
|
SELECT * FROM test7 ORDER BY i, "Time"
|
|
$$ AS "QUERY" \gset
|
|
SELECT 'test7' AS "HYPERTABLE_NAME" \gset
|
|
\ir include/compression_test_merge.sql
|
|
-- This file and its contents are licensed under the Timescale License.
|
|
-- Please see the included NOTICE for copyright information and
|
|
-- LICENSE-TIMESCALE for a copy of the license.
|
|
\set ECHO errors
|
|
count_compressed
|
|
------------------
|
|
24
|
|
(1 row)
|
|
|
|
?column? | count
|
|
-----------------------------------------------------------------------------------+-------
|
|
Number of rows different between original and query on compressed data (expect 0) | 0
|
|
(1 row)
|
|
|
|
count
|
|
-------
|
|
12
|
|
(1 row)
|
|
|
|
?column? | count
|
|
--------------------------------------------------------------------------------------------------------------+-------
|
|
Number of rows different between original and data that has been compressed and then decompressed (expect 0) | 0
|
|
(1 row)
|
|
|
|
DROP TABLE test7;
|
|
--#5090
|
|
CREATE TABLE test8(time TIMESTAMPTZ NOT NULL, value DOUBLE PRECISION NOT NULL, series_id BIGINT NOT NULL);
|
|
SELECT create_hypertable('test8', 'time', chunk_time_interval => INTERVAL '1 h');
|
|
create_hypertable
|
|
---------------------
|
|
(15,public,test8,t)
|
|
(1 row)
|
|
|
|
ALTER TABLE test8 set (timescaledb.compress,
|
|
timescaledb.compress_segmentby = 'series_id',
|
|
timescaledb.compress_orderby = 'time',
|
|
timescaledb.compress_chunk_time_interval = '1 day');
|
|
INSERT INTO test8 (time, series_id, value) SELECT t, s, 1 FROM generate_series(NOW(), NOW()+INTERVAL'4h', INTERVAL '30s') t CROSS JOIN generate_series(0, 100, 1) s;
|
|
SELECT compress_chunk(c, true) FROM show_chunks('test8') c LIMIT 4;
|
|
compress_chunk
|
|
-------------------------------------------
|
|
_timescaledb_internal._hyper_15_338_chunk
|
|
_timescaledb_internal._hyper_15_338_chunk
|
|
_timescaledb_internal._hyper_15_338_chunk
|
|
_timescaledb_internal._hyper_15_338_chunk
|
|
(4 rows)
|
|
|
|
SET enable_indexscan TO OFF;
|
|
SET enable_seqscan TO OFF;
|
|
SET enable_bitmapscan TO ON;
|
|
SELECT count(*) FROM test8 WHERE series_id = 1;
|
|
count
|
|
-------
|
|
481
|
|
(1 row)
|
|
|
|
-- Verify rollup is prevented when compression settings differ
|
|
CREATE TABLE test9(time TIMESTAMPTZ NOT NULL, value DOUBLE PRECISION NOT NULL, series_id BIGINT NOT NULL);
|
|
SELECT create_hypertable('test9', 'time', chunk_time_interval => INTERVAL '1 h');
|
|
create_hypertable
|
|
---------------------
|
|
(17,public,test9,t)
|
|
(1 row)
|
|
|
|
ALTER TABLE test9 set (timescaledb.compress,
|
|
timescaledb.compress_segmentby = 'series_id',
|
|
timescaledb.compress_orderby = 'time',
|
|
timescaledb.compress_chunk_time_interval = '1 day');
|
|
-- create chunk and compress
|
|
INSERT INTO test9 (time, series_id, value) SELECT '2020-01-01 00:00:00'::TIMESTAMPTZ, 1, 1;
|
|
SELECT compress_chunk(show_chunks('test9'), true);
|
|
compress_chunk
|
|
-------------------------------------------
|
|
_timescaledb_internal._hyper_17_344_chunk
|
|
(1 row)
|
|
|
|
INSERT INTO test9 (time, series_id, value) SELECT '2020-01-01 01:00:00'::TIMESTAMPTZ, 1, 1;
|
|
-- should be 2 chunk before rollup
|
|
SELECT hypertable_name, range_start, range_end FROM timescaledb_information.chunks WHERE hypertable_name = 'test9' ORDER BY 2;
|
|
hypertable_name | range_start | range_end
|
|
-----------------+------------------------------+------------------------------
|
|
test9 | Wed Jan 01 00:00:00 2020 PST | Wed Jan 01 01:00:00 2020 PST
|
|
test9 | Wed Jan 01 01:00:00 2020 PST | Wed Jan 01 02:00:00 2020 PST
|
|
(2 rows)
|
|
|
|
SELECT compress_chunk(show_chunks('test9'), true);
|
|
NOTICE: chunk "_hyper_17_344_chunk" is already compressed
|
|
compress_chunk
|
|
-------------------------------------------
|
|
_timescaledb_internal._hyper_17_344_chunk
|
|
_timescaledb_internal._hyper_17_344_chunk
|
|
(2 rows)
|
|
|
|
-- should be 1 chunk because of rollup
|
|
SELECT hypertable_name, range_start, range_end FROM timescaledb_information.chunks WHERE hypertable_name = 'test9' ORDER BY 2;
|
|
hypertable_name | range_start | range_end
|
|
-----------------+------------------------------+------------------------------
|
|
test9 | Wed Jan 01 00:00:00 2020 PST | Wed Jan 01 02:00:00 2020 PST
|
|
(1 row)
|
|
|
|
INSERT INTO test9 (time, series_id, value) SELECT '2020-01-01 02:00:00'::TIMESTAMPTZ, 1, 1;
|
|
-- should be 2 chunks again
|
|
SELECT hypertable_name, range_start, range_end FROM timescaledb_information.chunks WHERE hypertable_name = 'test9' ORDER BY 2;
|
|
hypertable_name | range_start | range_end
|
|
-----------------+------------------------------+------------------------------
|
|
test9 | Wed Jan 01 00:00:00 2020 PST | Wed Jan 01 02:00:00 2020 PST
|
|
test9 | Wed Jan 01 02:00:00 2020 PST | Wed Jan 01 03:00:00 2020 PST
|
|
(2 rows)
|
|
|
|
ALTER TABLE test9 SET (timescaledb.compress_segmentby = '');
|
|
BEGIN;
|
|
SELECT compress_chunk(show_chunks('test9'), true);
|
|
NOTICE: chunk "_hyper_17_344_chunk" is already compressed
|
|
compress_chunk
|
|
-------------------------------------------
|
|
_timescaledb_internal._hyper_17_344_chunk
|
|
_timescaledb_internal._hyper_17_347_chunk
|
|
(2 rows)
|
|
|
|
-- should not be rolled up
|
|
SELECT hypertable_name, range_start, range_end FROM timescaledb_information.chunks WHERE hypertable_name = 'test9' ORDER BY 2;
|
|
hypertable_name | range_start | range_end
|
|
-----------------+------------------------------+------------------------------
|
|
test9 | Wed Jan 01 00:00:00 2020 PST | Wed Jan 01 02:00:00 2020 PST
|
|
test9 | Wed Jan 01 02:00:00 2020 PST | Wed Jan 01 03:00:00 2020 PST
|
|
(2 rows)
|
|
|
|
ROLLBACK;
|
|
ALTER TABLE test9 SET (timescaledb.compress_segmentby = 'series_id', timescaledb.compress_orderby = 'time DESC');
|
|
BEGIN;
|
|
SELECT compress_chunk(show_chunks('test9'), true);
|
|
NOTICE: chunk "_hyper_17_344_chunk" is already compressed
|
|
compress_chunk
|
|
-------------------------------------------
|
|
_timescaledb_internal._hyper_17_344_chunk
|
|
_timescaledb_internal._hyper_17_347_chunk
|
|
(2 rows)
|
|
|
|
-- should not be rolled up
|
|
SELECT hypertable_name, range_start, range_end FROM timescaledb_information.chunks WHERE hypertable_name = 'test9' ORDER BY 2;
|
|
hypertable_name | range_start | range_end
|
|
-----------------+------------------------------+------------------------------
|
|
test9 | Wed Jan 01 00:00:00 2020 PST | Wed Jan 01 02:00:00 2020 PST
|
|
test9 | Wed Jan 01 02:00:00 2020 PST | Wed Jan 01 03:00:00 2020 PST
|
|
(2 rows)
|
|
|
|
ROLLBACK;
|
|
ALTER TABLE test9 SET (timescaledb.compress_segmentby = 'series_id', timescaledb.compress_orderby = 'time NULLS FIRST');
|
|
BEGIN;
|
|
SELECT compress_chunk(show_chunks('test9'), true);
|
|
NOTICE: chunk "_hyper_17_344_chunk" is already compressed
|
|
compress_chunk
|
|
-------------------------------------------
|
|
_timescaledb_internal._hyper_17_344_chunk
|
|
_timescaledb_internal._hyper_17_347_chunk
|
|
(2 rows)
|
|
|
|
-- should not be rolled up
|
|
SELECT hypertable_name, range_start, range_end FROM timescaledb_information.chunks WHERE hypertable_name = 'test9' ORDER BY 2;
|
|
hypertable_name | range_start | range_end
|
|
-----------------+------------------------------+------------------------------
|
|
test9 | Wed Jan 01 00:00:00 2020 PST | Wed Jan 01 02:00:00 2020 PST
|
|
test9 | Wed Jan 01 02:00:00 2020 PST | Wed Jan 01 03:00:00 2020 PST
|
|
(2 rows)
|
|
|
|
ROLLBACK;
|
|
-- reset back to original settings
|
|
ALTER TABLE test9 SET (timescaledb.compress_segmentby = 'series_id', timescaledb.compress_orderby = 'time');
|
|
BEGIN;
|
|
SELECT compress_chunk(show_chunks('test9'), true);
|
|
NOTICE: chunk "_hyper_17_344_chunk" is already compressed
|
|
compress_chunk
|
|
-------------------------------------------
|
|
_timescaledb_internal._hyper_17_344_chunk
|
|
_timescaledb_internal._hyper_17_344_chunk
|
|
(2 rows)
|
|
|
|
-- should be rolled up
|
|
SELECT hypertable_name, range_start, range_end FROM timescaledb_information.chunks WHERE hypertable_name = 'test9' ORDER BY 2;
|
|
hypertable_name | range_start | range_end
|
|
-----------------+------------------------------+------------------------------
|
|
test9 | Wed Jan 01 00:00:00 2020 PST | Wed Jan 01 03:00:00 2020 PST
|
|
(1 row)
|
|
|
|
ROLLBACK;
|