mirror of
https://github.com/timescale/timescaledb.git
synced 2025-05-16 18:43:18 +08:00
During the compression autovacuum use to be disabled for uncompressed chunk and enable after decompression. This leads to postgres maintainence issue. Let's not disable autovacuum for uncompressed chunk anymore. Let postgres take care of the stats in its natural way. Fixes #309
1699 lines
70 KiB
Plaintext
1699 lines
70 KiB
Plaintext
-- This file and its contents are licensed under the Timescale License.
|
|
-- Please see the included NOTICE for copyright information and
|
|
-- LICENSE-TIMESCALE for a copy of the license.
|
|
SET timescaledb.enable_transparent_decompression to OFF;
|
|
\ir include/rand_generator.sql
|
|
-- This file and its contents are licensed under the Timescale License.
|
|
-- Please see the included NOTICE for copyright information and
|
|
-- LICENSE-TIMESCALE for a copy of the license.
|
|
--------------------------
|
|
-- cheap rand generator --
|
|
--------------------------
|
|
create table rand_minstd_state(i bigint);
|
|
create function rand_minstd_advance(bigint) returns bigint
|
|
language sql immutable as
|
|
$$
|
|
select (16807 * $1) % 2147483647
|
|
$$;
|
|
create function gen_rand_minstd() returns bigint
|
|
language sql security definer as
|
|
$$
|
|
update rand_minstd_state set i = rand_minstd_advance(i) returning i
|
|
$$;
|
|
-- seed the random num generator
|
|
insert into rand_minstd_state values (321);
|
|
--test_collation ---
|
|
--basic test with count
|
|
create table foo (a integer, b integer, c integer, d integer);
|
|
select table_name from create_hypertable('foo', 'a', chunk_time_interval=> 10);
|
|
NOTICE: adding not-null constraint to column "a"
|
|
table_name
|
|
------------
|
|
foo
|
|
(1 row)
|
|
|
|
create unique index foo_uniq ON foo (a, b);
|
|
--note that the "d" order by column is all NULL
|
|
insert into foo values( 3 , 16 , 20, NULL);
|
|
insert into foo values( 10 , 10 , 20, NULL);
|
|
insert into foo values( 20 , 11 , 20, NULL);
|
|
insert into foo values( 30 , 12 , 20, NULL);
|
|
alter table foo set (timescaledb.compress, timescaledb.compress_segmentby = 'a,b', timescaledb.compress_orderby = 'c desc, d asc nulls last');
|
|
--test self-refencing updates
|
|
SET timescaledb.enable_transparent_decompression to ON;
|
|
update foo set c = 40
|
|
where a = (SELECT max(a) FROM foo);
|
|
SET timescaledb.enable_transparent_decompression to OFF;
|
|
select id, schema_name, table_name, compression_state as compressed, compressed_hypertable_id from
|
|
_timescaledb_catalog.hypertable order by id;
|
|
id | schema_name | table_name | compressed | compressed_hypertable_id
|
|
----+-----------------------+--------------------------+------------+--------------------------
|
|
1 | public | foo | 1 | 2
|
|
2 | _timescaledb_internal | _compressed_hypertable_2 | 2 |
|
|
(2 rows)
|
|
|
|
select * from _timescaledb_catalog.hypertable_compression order by hypertable_id, attname;
|
|
hypertable_id | attname | compression_algorithm_id | segmentby_column_index | orderby_column_index | orderby_asc | orderby_nullsfirst
|
|
---------------+---------+--------------------------+------------------------+----------------------+-------------+--------------------
|
|
1 | a | 0 | 1 | | |
|
|
1 | b | 0 | 2 | | |
|
|
1 | c | 4 | | 1 | f | t
|
|
1 | d | 4 | | 2 | t | f
|
|
(4 rows)
|
|
|
|
select * from timescaledb_information.compression_settings ORDER BY hypertable_name;
|
|
hypertable_schema | hypertable_name | attname | segmentby_column_index | orderby_column_index | orderby_asc | orderby_nullsfirst
|
|
-------------------+-----------------+---------+------------------------+----------------------+-------------+--------------------
|
|
public | foo | a | 1 | | |
|
|
public | foo | b | 2 | | |
|
|
public | foo | c | | 1 | f | t
|
|
public | foo | d | | 2 | t | f
|
|
(4 rows)
|
|
|
|
-- TEST2 compress-chunk for the chunks created earlier --
|
|
select compress_chunk( '_timescaledb_internal._hyper_1_2_chunk');
|
|
compress_chunk
|
|
----------------------------------------
|
|
_timescaledb_internal._hyper_1_2_chunk
|
|
(1 row)
|
|
|
|
select tgname , tgtype, tgenabled , relname
|
|
from pg_trigger t, pg_class rel
|
|
where t.tgrelid = rel.oid and rel.relname like '_hyper_1_2_chunk' order by tgname;
|
|
tgname | tgtype | tgenabled | relname
|
|
--------+--------+-----------+---------
|
|
(0 rows)
|
|
|
|
\x
|
|
select * from chunk_compression_stats('foo')
|
|
order by chunk_name limit 2;
|
|
-[ RECORD 1 ]------------------+----------------------
|
|
chunk_schema | _timescaledb_internal
|
|
chunk_name | _hyper_1_1_chunk
|
|
compression_status | Uncompressed
|
|
before_compression_table_bytes |
|
|
before_compression_index_bytes |
|
|
before_compression_toast_bytes |
|
|
before_compression_total_bytes |
|
|
after_compression_table_bytes |
|
|
after_compression_index_bytes |
|
|
after_compression_toast_bytes |
|
|
after_compression_total_bytes |
|
|
node_name |
|
|
-[ RECORD 2 ]------------------+----------------------
|
|
chunk_schema | _timescaledb_internal
|
|
chunk_name | _hyper_1_2_chunk
|
|
compression_status | Compressed
|
|
before_compression_table_bytes | 8192
|
|
before_compression_index_bytes | 32768
|
|
before_compression_toast_bytes | 0
|
|
before_compression_total_bytes | 40960
|
|
after_compression_table_bytes | 8192
|
|
after_compression_index_bytes | 16384
|
|
after_compression_toast_bytes | 8192
|
|
after_compression_total_bytes | 32768
|
|
node_name |
|
|
|
|
\x
|
|
select compress_chunk( '_timescaledb_internal._hyper_1_1_chunk');
|
|
compress_chunk
|
|
----------------------------------------
|
|
_timescaledb_internal._hyper_1_1_chunk
|
|
(1 row)
|
|
|
|
\x
|
|
select * from _timescaledb_catalog.compression_chunk_size
|
|
order by chunk_id;
|
|
-[ RECORD 1 ]------------+------
|
|
chunk_id | 1
|
|
compressed_chunk_id | 6
|
|
uncompressed_heap_size | 8192
|
|
uncompressed_toast_size | 0
|
|
uncompressed_index_size | 32768
|
|
compressed_heap_size | 8192
|
|
compressed_toast_size | 8192
|
|
compressed_index_size | 16384
|
|
numrows_pre_compression | 1
|
|
numrows_post_compression | 1
|
|
-[ RECORD 2 ]------------+------
|
|
chunk_id | 2
|
|
compressed_chunk_id | 5
|
|
uncompressed_heap_size | 8192
|
|
uncompressed_toast_size | 0
|
|
uncompressed_index_size | 32768
|
|
compressed_heap_size | 8192
|
|
compressed_toast_size | 8192
|
|
compressed_index_size | 16384
|
|
numrows_pre_compression | 1
|
|
numrows_post_compression | 1
|
|
|
|
\x
|
|
select ch1.id, ch1.schema_name, ch1.table_name , ch2.table_name as compress_table
|
|
from
|
|
_timescaledb_catalog.chunk ch1, _timescaledb_catalog.chunk ch2
|
|
where ch1.compressed_chunk_id = ch2.id;
|
|
id | schema_name | table_name | compress_table
|
|
----+-----------------------+------------------+--------------------------
|
|
2 | _timescaledb_internal | _hyper_1_2_chunk | compress_hyper_2_5_chunk
|
|
1 | _timescaledb_internal | _hyper_1_1_chunk | compress_hyper_2_6_chunk
|
|
(2 rows)
|
|
|
|
\set ON_ERROR_STOP 0
|
|
--cannot recompress the chunk the second time around
|
|
select compress_chunk( '_timescaledb_internal._hyper_1_2_chunk');
|
|
ERROR: chunk "_hyper_1_2_chunk" is already compressed
|
|
--TEST2a try DML on a compressed chunk
|
|
BEGIN;
|
|
insert into foo values( 11 , 10 , 20, 120);
|
|
ROLLBACK;
|
|
update foo set b =20 where a = 10;
|
|
ERROR: cannot update/delete rows from chunk "_hyper_1_2_chunk" as it is compressed
|
|
delete from foo where a = 10;
|
|
ERROR: cannot update/delete rows from chunk "_hyper_1_2_chunk" as it is compressed
|
|
--TEST2b try complex DML on compressed chunk
|
|
create table foo_join ( a integer, newval integer);
|
|
select table_name from create_hypertable('foo_join', 'a', chunk_time_interval=> 10);
|
|
NOTICE: adding not-null constraint to column "a"
|
|
table_name
|
|
------------
|
|
foo_join
|
|
(1 row)
|
|
|
|
insert into foo_join select generate_series(0,40, 10), 111;
|
|
create table foo_join2 ( a integer, newval integer);
|
|
select table_name from create_hypertable('foo_join2', 'a', chunk_time_interval=> 10);
|
|
NOTICE: adding not-null constraint to column "a"
|
|
table_name
|
|
------------
|
|
foo_join2
|
|
(1 row)
|
|
|
|
insert into foo_join select generate_series(0,40, 10), 222;
|
|
update foo
|
|
set b = newval
|
|
from foo_join where foo.a = foo_join.a;
|
|
ERROR: cannot update/delete rows from chunk "_hyper_1_1_chunk" as it is compressed
|
|
update foo
|
|
set b = newval
|
|
from foo_join where foo.a = foo_join.a and foo_join.a > 10;
|
|
ERROR: cannot update/delete rows from chunk "_hyper_1_1_chunk" as it is compressed
|
|
--here the chunk gets excluded , so succeeds --
|
|
update foo
|
|
set b = newval
|
|
from foo_join where foo.a = foo_join.a and foo.a > 20;
|
|
update foo
|
|
set b = (select f1.newval from foo_join f1 left join lateral (select newval as newval2 from foo_join2 f2 where f1.a= f2.a ) subq on true limit 1);
|
|
ERROR: cannot update/delete rows from chunk "_hyper_1_1_chunk" as it is compressed
|
|
--upsert test --
|
|
insert into foo values(10, 12, 12, 12)
|
|
on conflict( a, b)
|
|
do update set b = excluded.b;
|
|
SELECT * from foo ORDER BY a,b;
|
|
a | b | c | d
|
|
----+-----+----+----
|
|
10 | 12 | 12 | 12
|
|
20 | 11 | 20 |
|
|
30 | 222 | 40 |
|
|
(3 rows)
|
|
|
|
--TEST2c Do DML directly on the chunk.
|
|
insert into _timescaledb_internal._hyper_1_2_chunk values(10, 12, 12, 12)
|
|
on conflict( a, b)
|
|
do update set b = excluded.b + 12;
|
|
SELECT * from foo ORDER BY a,b;
|
|
a | b | c | d
|
|
----+-----+----+----
|
|
10 | 24 | 12 | 12
|
|
20 | 11 | 20 |
|
|
30 | 222 | 40 |
|
|
(3 rows)
|
|
|
|
update _timescaledb_internal._hyper_1_2_chunk
|
|
set b = 12;
|
|
ERROR: cannot update/delete rows from chunk "_hyper_1_2_chunk" as it is compressed
|
|
delete from _timescaledb_internal._hyper_1_2_chunk;
|
|
ERROR: cannot update/delete rows from chunk "_hyper_1_2_chunk" as it is compressed
|
|
--TEST2d decompress the chunk and try DML
|
|
select decompress_chunk( '_timescaledb_internal._hyper_1_2_chunk');
|
|
decompress_chunk
|
|
----------------------------------------
|
|
_timescaledb_internal._hyper_1_2_chunk
|
|
(1 row)
|
|
|
|
insert into foo values( 11 , 10 , 20, 120);
|
|
update foo set b =20 where a = 10;
|
|
ERROR: duplicate key value violates unique constraint "_hyper_1_2_chunk_foo_uniq"
|
|
select * from _timescaledb_internal._hyper_1_2_chunk order by a,b;
|
|
a | b | c | d
|
|
----+----+----+-----
|
|
10 | 10 | 20 |
|
|
10 | 24 | 12 | 12
|
|
11 | 10 | 20 | 120
|
|
(3 rows)
|
|
|
|
delete from foo where a = 10;
|
|
select * from _timescaledb_internal._hyper_1_2_chunk order by a,b;
|
|
a | b | c | d
|
|
----+----+----+-----
|
|
11 | 10 | 20 | 120
|
|
(1 row)
|
|
|
|
-- TEST3 check if compress data from views is accurate
|
|
CREATE TABLE conditions (
|
|
time TIMESTAMPTZ NOT NULL,
|
|
location TEXT NOT NULL,
|
|
location2 char(10) NOT NULL,
|
|
temperature DOUBLE PRECISION NULL,
|
|
humidity DOUBLE PRECISION NULL
|
|
);
|
|
select create_hypertable( 'conditions', 'time', chunk_time_interval=> '31days'::interval);
|
|
create_hypertable
|
|
-------------------------
|
|
(5,public,conditions,t)
|
|
(1 row)
|
|
|
|
alter table conditions set (timescaledb.compress, timescaledb.compress_segmentby = 'location', timescaledb.compress_orderby = 'time');
|
|
insert into conditions
|
|
select generate_series('2018-12-01 00:00'::timestamp, '2018-12-31 00:00'::timestamp, '1 day'), 'POR', 'klick', 55, 75;
|
|
insert into conditions
|
|
select generate_series('2018-12-01 00:00'::timestamp, '2018-12-31 00:00'::timestamp, '1 day'), 'NYC', 'klick', 55, 75;
|
|
select hypertable_id, attname, compression_algorithm_id , al.name
|
|
from _timescaledb_catalog.hypertable_compression hc,
|
|
_timescaledb_catalog.hypertable ht,
|
|
_timescaledb_catalog.compression_algorithm al
|
|
where ht.id = hc.hypertable_id and ht.table_name like 'conditions' and al.id = hc.compression_algorithm_id
|
|
ORDER BY hypertable_id, attname;
|
|
hypertable_id | attname | compression_algorithm_id | name
|
|
---------------+-------------+--------------------------+----------------------------------
|
|
5 | humidity | 3 | COMPRESSION_ALGORITHM_GORILLA
|
|
5 | location | 0 | COMPRESSION_ALGORITHM_NONE
|
|
5 | location2 | 2 | COMPRESSION_ALGORITHM_DICTIONARY
|
|
5 | temperature | 3 | COMPRESSION_ALGORITHM_GORILLA
|
|
5 | time | 4 | COMPRESSION_ALGORITHM_DELTADELTA
|
|
(5 rows)
|
|
|
|
select attname, attstorage, typname from pg_attribute at, pg_class cl , pg_type ty
|
|
where cl.oid = at.attrelid and at.attnum > 0
|
|
and cl.relname = '_compressed_hypertable_4'
|
|
and atttypid = ty.oid
|
|
order by at.attnum;
|
|
attname | attstorage | typname
|
|
---------+------------+---------
|
|
(0 rows)
|
|
|
|
SELECT ch1.schema_name|| '.' || ch1.table_name as "CHUNK_NAME", ch1.id "CHUNK_ID"
|
|
FROM _timescaledb_catalog.chunk ch1, _timescaledb_catalog.hypertable ht where ch1.hypertable_id = ht.id and ht.table_name like 'conditions'
|
|
ORDER BY ch1.id
|
|
LIMIT 1 \gset
|
|
SELECT count(*) from :CHUNK_NAME;
|
|
count
|
|
-------
|
|
42
|
|
(1 row)
|
|
|
|
SELECT count(*) as "ORIGINAL_CHUNK_COUNT" from :CHUNK_NAME \gset
|
|
select tableoid::regclass, count(*) from conditions group by tableoid order by tableoid;
|
|
tableoid | count
|
|
-----------------------------------------+-------
|
|
_timescaledb_internal._hyper_5_12_chunk | 42
|
|
_timescaledb_internal._hyper_5_13_chunk | 20
|
|
(2 rows)
|
|
|
|
select compress_chunk(ch1.schema_name|| '.' || ch1.table_name)
|
|
FROM _timescaledb_catalog.chunk ch1, _timescaledb_catalog.hypertable ht where ch1.hypertable_id = ht.id and ht.table_name like 'conditions' ORDER BY ch1.id limit 1;
|
|
compress_chunk
|
|
-----------------------------------------
|
|
_timescaledb_internal._hyper_5_12_chunk
|
|
(1 row)
|
|
|
|
--test that only one chunk was affected
|
|
--note tables with 0 rows will not show up in here.
|
|
select tableoid::regclass, count(*) from conditions group by tableoid order by tableoid;
|
|
tableoid | count
|
|
-----------------------------------------+-------
|
|
_timescaledb_internal._hyper_5_13_chunk | 20
|
|
(1 row)
|
|
|
|
select compress_chunk(ch1.schema_name|| '.' || ch1.table_name)
|
|
FROM _timescaledb_catalog.chunk ch1, _timescaledb_catalog.hypertable ht where ch1.hypertable_id = ht.id and ht.table_name like 'conditions' and ch1.compressed_chunk_id IS NULL;
|
|
compress_chunk
|
|
-----------------------------------------
|
|
_timescaledb_internal._hyper_5_13_chunk
|
|
(1 row)
|
|
|
|
select tableoid::regclass, count(*) from conditions group by tableoid order by tableoid;
|
|
tableoid | count
|
|
----------+-------
|
|
(0 rows)
|
|
|
|
select compressed.schema_name|| '.' || compressed.table_name as "COMPRESSED_CHUNK_NAME"
|
|
from _timescaledb_catalog.chunk uncompressed, _timescaledb_catalog.chunk compressed
|
|
where uncompressed.compressed_chunk_id = compressed.id AND uncompressed.id = :'CHUNK_ID' \gset
|
|
SELECT count(*) from :CHUNK_NAME;
|
|
count
|
|
-------
|
|
0
|
|
(1 row)
|
|
|
|
SELECT count(*) from :COMPRESSED_CHUNK_NAME;
|
|
count
|
|
-------
|
|
2
|
|
(1 row)
|
|
|
|
SELECT sum(_ts_meta_count) from :COMPRESSED_CHUNK_NAME;
|
|
sum
|
|
-----
|
|
42
|
|
(1 row)
|
|
|
|
SELECT location, _ts_meta_sequence_num from :COMPRESSED_CHUNK_NAME ORDER BY 1,2;
|
|
location | _ts_meta_sequence_num
|
|
----------+-----------------------
|
|
NYC | 10
|
|
POR | 10
|
|
(2 rows)
|
|
|
|
\x
|
|
SELECT chunk_id, numrows_pre_compression, numrows_post_compression
|
|
FROM _timescaledb_catalog.chunk srcch,
|
|
_timescaledb_catalog.compression_chunk_size map,
|
|
_timescaledb_catalog.hypertable srcht
|
|
WHERE map.chunk_id = srcch.id and srcht.id = srcch.hypertable_id
|
|
and srcht.table_name like 'conditions'
|
|
order by chunk_id;
|
|
-[ RECORD 1 ]------------+---
|
|
chunk_id | 12
|
|
numrows_pre_compression | 42
|
|
numrows_post_compression | 2
|
|
-[ RECORD 2 ]------------+---
|
|
chunk_id | 13
|
|
numrows_pre_compression | 20
|
|
numrows_post_compression | 2
|
|
|
|
select * from chunk_compression_stats('conditions')
|
|
order by chunk_name;
|
|
-[ RECORD 1 ]------------------+----------------------
|
|
chunk_schema | _timescaledb_internal
|
|
chunk_name | _hyper_5_12_chunk
|
|
compression_status | Compressed
|
|
before_compression_table_bytes | 8192
|
|
before_compression_index_bytes | 16384
|
|
before_compression_toast_bytes | 8192
|
|
before_compression_total_bytes | 32768
|
|
after_compression_table_bytes | 8192
|
|
after_compression_index_bytes | 16384
|
|
after_compression_toast_bytes | 8192
|
|
after_compression_total_bytes | 32768
|
|
node_name |
|
|
-[ RECORD 2 ]------------------+----------------------
|
|
chunk_schema | _timescaledb_internal
|
|
chunk_name | _hyper_5_13_chunk
|
|
compression_status | Compressed
|
|
before_compression_table_bytes | 8192
|
|
before_compression_index_bytes | 16384
|
|
before_compression_toast_bytes | 8192
|
|
before_compression_total_bytes | 32768
|
|
after_compression_table_bytes | 8192
|
|
after_compression_index_bytes | 16384
|
|
after_compression_toast_bytes | 8192
|
|
after_compression_total_bytes | 32768
|
|
node_name |
|
|
|
|
select * from hypertable_compression_stats('foo');
|
|
-[ RECORD 1 ]------------------+------
|
|
total_chunks | 4
|
|
number_compressed_chunks | 1
|
|
before_compression_table_bytes | 8192
|
|
before_compression_index_bytes | 32768
|
|
before_compression_toast_bytes | 0
|
|
before_compression_total_bytes | 40960
|
|
after_compression_table_bytes | 8192
|
|
after_compression_index_bytes | 16384
|
|
after_compression_toast_bytes | 8192
|
|
after_compression_total_bytes | 32768
|
|
node_name |
|
|
|
|
select * from hypertable_compression_stats('conditions');
|
|
-[ RECORD 1 ]------------------+------
|
|
total_chunks | 2
|
|
number_compressed_chunks | 2
|
|
before_compression_table_bytes | 16384
|
|
before_compression_index_bytes | 32768
|
|
before_compression_toast_bytes | 16384
|
|
before_compression_total_bytes | 65536
|
|
after_compression_table_bytes | 16384
|
|
after_compression_index_bytes | 32768
|
|
after_compression_toast_bytes | 16384
|
|
after_compression_total_bytes | 65536
|
|
node_name |
|
|
|
|
vacuum full foo;
|
|
vacuum full conditions;
|
|
-- After vacuum, table_bytes is 0, but any associated index/toast storage is not
|
|
-- completely reclaimed. Sets it at 8K (page size). So a chunk which has
|
|
-- been compressed still incurs an overhead of n * 8KB (for every index + toast table) storage on the original uncompressed chunk.
|
|
select pg_size_pretty(table_bytes), pg_size_pretty(index_bytes),
|
|
pg_size_pretty(toast_bytes), pg_size_pretty(total_bytes)
|
|
from hypertable_detailed_size('foo');
|
|
-[ RECORD 1 ]--+-----------
|
|
pg_size_pretty | 32 kB
|
|
pg_size_pretty | 144 kB
|
|
pg_size_pretty | 8192 bytes
|
|
pg_size_pretty | 184 kB
|
|
|
|
select pg_size_pretty(table_bytes), pg_size_pretty(index_bytes),
|
|
pg_size_pretty(toast_bytes), pg_size_pretty(total_bytes)
|
|
from hypertable_detailed_size('conditions');
|
|
-[ RECORD 1 ]--+-------
|
|
pg_size_pretty | 16 kB
|
|
pg_size_pretty | 56 kB
|
|
pg_size_pretty | 40 kB
|
|
pg_size_pretty | 112 kB
|
|
|
|
select * from timescaledb_information.hypertables
|
|
where hypertable_name like 'foo' or hypertable_name like 'conditions'
|
|
order by hypertable_name;
|
|
-[ RECORD 1 ]-------+------------------
|
|
hypertable_schema | public
|
|
hypertable_name | conditions
|
|
owner | default_perm_user
|
|
num_dimensions | 1
|
|
num_chunks | 2
|
|
compression_enabled | t
|
|
is_distributed | f
|
|
replication_factor |
|
|
data_nodes |
|
|
tablespaces |
|
|
-[ RECORD 2 ]-------+------------------
|
|
hypertable_schema | public
|
|
hypertable_name | foo
|
|
owner | default_perm_user
|
|
num_dimensions | 1
|
|
num_chunks | 4
|
|
compression_enabled | t
|
|
is_distributed | f
|
|
replication_factor |
|
|
data_nodes |
|
|
tablespaces |
|
|
|
|
\x
|
|
SELECT decompress_chunk(ch1.schema_name|| '.' || ch1.table_name) AS chunk
|
|
FROM _timescaledb_catalog.chunk ch1, _timescaledb_catalog.hypertable ht
|
|
WHERE ch1.hypertable_id = ht.id and ht.table_name LIKE 'conditions'
|
|
ORDER BY chunk;
|
|
chunk
|
|
-----------------------------------------
|
|
_timescaledb_internal._hyper_5_12_chunk
|
|
_timescaledb_internal._hyper_5_13_chunk
|
|
(2 rows)
|
|
|
|
SELECT count(*), count(*) = :'ORIGINAL_CHUNK_COUNT' from :CHUNK_NAME;
|
|
count | ?column?
|
|
-------+----------
|
|
42 | t
|
|
(1 row)
|
|
|
|
--check that the compressed chunk is dropped
|
|
\set ON_ERROR_STOP 0
|
|
SELECT count(*) from :COMPRESSED_CHUNK_NAME;
|
|
ERROR: relation "_timescaledb_internal.compress_hyper_6_14_chunk" does not exist at character 22
|
|
\set ON_ERROR_STOP 1
|
|
--size information is gone too
|
|
select count(*)
|
|
FROM _timescaledb_catalog.chunk ch1, _timescaledb_catalog.hypertable ht,
|
|
_timescaledb_catalog.compression_chunk_size map
|
|
where ch1.hypertable_id = ht.id and ht.table_name like 'conditions'
|
|
and map.chunk_id = ch1.id;
|
|
count
|
|
-------
|
|
0
|
|
(1 row)
|
|
|
|
--make sure compressed_chunk_id is reset to NULL
|
|
select ch1.compressed_chunk_id IS NULL
|
|
FROM _timescaledb_catalog.chunk ch1, _timescaledb_catalog.hypertable ht where ch1.hypertable_id = ht.id and ht.table_name like 'conditions';
|
|
?column?
|
|
----------
|
|
t
|
|
t
|
|
(2 rows)
|
|
|
|
-- test plans get invalidated when chunks get compressed
|
|
SET timescaledb.enable_transparent_decompression TO ON;
|
|
CREATE TABLE plan_inval(time timestamptz, device_id int);
|
|
SELECT create_hypertable('plan_inval','time');
|
|
NOTICE: adding not-null constraint to column "time"
|
|
create_hypertable
|
|
-------------------------
|
|
(7,public,plan_inval,t)
|
|
(1 row)
|
|
|
|
ALTER TABLE plan_inval SET (timescaledb.compress,timescaledb.compress_orderby='time desc');
|
|
-- create 2 chunks
|
|
INSERT INTO plan_inval SELECT * FROM (VALUES ('2000-01-01'::timestamptz,1), ('2000-01-07'::timestamptz,1)) v(time,device_id);
|
|
SET max_parallel_workers_per_gather to 0;
|
|
PREPARE prep_plan AS SELECT count(*) FROM plan_inval;
|
|
EXECUTE prep_plan;
|
|
count
|
|
-------
|
|
2
|
|
(1 row)
|
|
|
|
EXECUTE prep_plan;
|
|
count
|
|
-------
|
|
2
|
|
(1 row)
|
|
|
|
EXECUTE prep_plan;
|
|
count
|
|
-------
|
|
2
|
|
(1 row)
|
|
|
|
-- get name of first chunk
|
|
SELECT tableoid::regclass AS "CHUNK_NAME" FROM plan_inval ORDER BY time LIMIT 1
|
|
\gset
|
|
SELECT compress_chunk(:'CHUNK_NAME');
|
|
compress_chunk
|
|
-----------------------------------------
|
|
_timescaledb_internal._hyper_7_16_chunk
|
|
(1 row)
|
|
|
|
EXECUTE prep_plan;
|
|
count
|
|
-------
|
|
2
|
|
(1 row)
|
|
|
|
EXPLAIN (COSTS OFF) EXECUTE prep_plan;
|
|
QUERY PLAN
|
|
----------------------------------------------------------------
|
|
Aggregate
|
|
-> Append
|
|
-> Custom Scan (DecompressChunk) on _hyper_7_16_chunk
|
|
-> Seq Scan on compress_hyper_8_18_chunk
|
|
-> Seq Scan on _hyper_7_17_chunk
|
|
(5 rows)
|
|
|
|
CREATE TABLE test_collation (
|
|
time TIMESTAMPTZ NOT NULL,
|
|
device_id TEXT COLLATE "C" NULL,
|
|
device_id_2 TEXT COLLATE "POSIX" NULL,
|
|
val_1 TEXT COLLATE "C" NULL,
|
|
val_2 TEXT COLLATE "POSIX" NULL
|
|
);
|
|
--we want all the data to go into 1 chunk. so use 1 year chunk interval
|
|
select create_hypertable( 'test_collation', 'time', chunk_time_interval=> '1 day'::interval);
|
|
create_hypertable
|
|
-----------------------------
|
|
(9,public,test_collation,t)
|
|
(1 row)
|
|
|
|
\set ON_ERROR_STOP 0
|
|
--forbid setting collation in compression ORDER BY clause. (parse error is fine)
|
|
alter table test_collation set (timescaledb.compress, timescaledb.compress_segmentby='device_id, device_id_2', timescaledb.compress_orderby = 'val_1 COLLATE "POSIX", val2, time');
|
|
ERROR: unable to parse ordering option "val_1 COLLATE "POSIX", val2, time"
|
|
\set ON_ERROR_STOP 1
|
|
alter table test_collation set (timescaledb.compress, timescaledb.compress_segmentby='device_id, device_id_2', timescaledb.compress_orderby = 'val_1, val_2, time');
|
|
insert into test_collation
|
|
select generate_series('2018-01-01 00:00'::timestamp, '2018-01-10 00:00'::timestamp, '2 hour'), 'device_1', 'device_3', gen_rand_minstd(), gen_rand_minstd();
|
|
insert into test_collation
|
|
select generate_series('2018-01-01 00:00'::timestamp, '2018-01-10 00:00'::timestamp, '2 hour'), 'device_2', 'device_4', gen_rand_minstd(), gen_rand_minstd();
|
|
insert into test_collation
|
|
select generate_series('2018-01-01 00:00'::timestamp, '2018-01-10 00:00'::timestamp, '2 hour'), NULL, 'device_5', gen_rand_minstd(), gen_rand_minstd();
|
|
--compress 2 chunks
|
|
SELECT compress_chunk(ch1.schema_name|| '.' || ch1.table_name)
|
|
FROM _timescaledb_catalog.chunk ch1, _timescaledb_catalog.hypertable ht where ch1.hypertable_id = ht.id
|
|
and ht.table_name like 'test_collation' ORDER BY ch1.id LIMIT 2;
|
|
compress_chunk
|
|
-----------------------------------------
|
|
_timescaledb_internal._hyper_9_19_chunk
|
|
_timescaledb_internal._hyper_9_20_chunk
|
|
(2 rows)
|
|
|
|
--segment bys are pushed down correctly
|
|
EXPLAIN (costs off) SELECT * FROM test_collation WHERE device_id < 'a';
|
|
QUERY PLAN
|
|
----------------------------------------------------------
|
|
Append
|
|
-> Custom Scan (DecompressChunk) on _hyper_9_19_chunk
|
|
-> Seq Scan on compress_hyper_10_29_chunk
|
|
Filter: (device_id < 'a'::text)
|
|
-> Custom Scan (DecompressChunk) on _hyper_9_20_chunk
|
|
-> Seq Scan on compress_hyper_10_30_chunk
|
|
Filter: (device_id < 'a'::text)
|
|
-> Seq Scan on _hyper_9_21_chunk
|
|
Filter: (device_id < 'a'::text)
|
|
-> Seq Scan on _hyper_9_22_chunk
|
|
Filter: (device_id < 'a'::text)
|
|
-> Seq Scan on _hyper_9_23_chunk
|
|
Filter: (device_id < 'a'::text)
|
|
-> Seq Scan on _hyper_9_24_chunk
|
|
Filter: (device_id < 'a'::text)
|
|
-> Seq Scan on _hyper_9_25_chunk
|
|
Filter: (device_id < 'a'::text)
|
|
-> Seq Scan on _hyper_9_26_chunk
|
|
Filter: (device_id < 'a'::text)
|
|
-> Seq Scan on _hyper_9_27_chunk
|
|
Filter: (device_id < 'a'::text)
|
|
-> Seq Scan on _hyper_9_28_chunk
|
|
Filter: (device_id < 'a'::text)
|
|
(23 rows)
|
|
|
|
EXPLAIN (costs off) SELECT * FROM test_collation WHERE device_id < 'a' COLLATE "POSIX";
|
|
QUERY PLAN
|
|
---------------------------------------------------------------
|
|
Append
|
|
-> Custom Scan (DecompressChunk) on _hyper_9_19_chunk
|
|
-> Seq Scan on compress_hyper_10_29_chunk
|
|
Filter: (device_id < 'a'::text COLLATE "POSIX")
|
|
-> Custom Scan (DecompressChunk) on _hyper_9_20_chunk
|
|
-> Seq Scan on compress_hyper_10_30_chunk
|
|
Filter: (device_id < 'a'::text COLLATE "POSIX")
|
|
-> Seq Scan on _hyper_9_21_chunk
|
|
Filter: (device_id < 'a'::text COLLATE "POSIX")
|
|
-> Seq Scan on _hyper_9_22_chunk
|
|
Filter: (device_id < 'a'::text COLLATE "POSIX")
|
|
-> Seq Scan on _hyper_9_23_chunk
|
|
Filter: (device_id < 'a'::text COLLATE "POSIX")
|
|
-> Seq Scan on _hyper_9_24_chunk
|
|
Filter: (device_id < 'a'::text COLLATE "POSIX")
|
|
-> Seq Scan on _hyper_9_25_chunk
|
|
Filter: (device_id < 'a'::text COLLATE "POSIX")
|
|
-> Seq Scan on _hyper_9_26_chunk
|
|
Filter: (device_id < 'a'::text COLLATE "POSIX")
|
|
-> Seq Scan on _hyper_9_27_chunk
|
|
Filter: (device_id < 'a'::text COLLATE "POSIX")
|
|
-> Seq Scan on _hyper_9_28_chunk
|
|
Filter: (device_id < 'a'::text COLLATE "POSIX")
|
|
(23 rows)
|
|
|
|
\set ON_ERROR_STOP 0
|
|
EXPLAIN (costs off) SELECT * FROM test_collation WHERE device_id COLLATE "POSIX" < device_id_2 COLLATE "C";
|
|
ERROR: collation mismatch between explicit collations "POSIX" and "C" at character 96
|
|
SELECT device_id < device_id_2 FROM test_collation;
|
|
ERROR: could not determine which collation to use for string comparison
|
|
\set ON_ERROR_STOP 1
|
|
--segment meta on order bys pushdown
|
|
--should work
|
|
EXPLAIN (costs off) SELECT * FROM test_collation WHERE val_1 < 'a';
|
|
QUERY PLAN
|
|
----------------------------------------------------------
|
|
Append
|
|
-> Custom Scan (DecompressChunk) on _hyper_9_19_chunk
|
|
Filter: (val_1 < 'a'::text)
|
|
-> Seq Scan on compress_hyper_10_29_chunk
|
|
Filter: (_ts_meta_min_1 < 'a'::text)
|
|
-> Custom Scan (DecompressChunk) on _hyper_9_20_chunk
|
|
Filter: (val_1 < 'a'::text)
|
|
-> Seq Scan on compress_hyper_10_30_chunk
|
|
Filter: (_ts_meta_min_1 < 'a'::text)
|
|
-> Seq Scan on _hyper_9_21_chunk
|
|
Filter: (val_1 < 'a'::text)
|
|
-> Seq Scan on _hyper_9_22_chunk
|
|
Filter: (val_1 < 'a'::text)
|
|
-> Seq Scan on _hyper_9_23_chunk
|
|
Filter: (val_1 < 'a'::text)
|
|
-> Seq Scan on _hyper_9_24_chunk
|
|
Filter: (val_1 < 'a'::text)
|
|
-> Seq Scan on _hyper_9_25_chunk
|
|
Filter: (val_1 < 'a'::text)
|
|
-> Seq Scan on _hyper_9_26_chunk
|
|
Filter: (val_1 < 'a'::text)
|
|
-> Seq Scan on _hyper_9_27_chunk
|
|
Filter: (val_1 < 'a'::text)
|
|
-> Seq Scan on _hyper_9_28_chunk
|
|
Filter: (val_1 < 'a'::text)
|
|
(25 rows)
|
|
|
|
EXPLAIN (costs off) SELECT * FROM test_collation WHERE val_2 < 'a';
|
|
QUERY PLAN
|
|
----------------------------------------------------------
|
|
Append
|
|
-> Custom Scan (DecompressChunk) on _hyper_9_19_chunk
|
|
Filter: (val_2 < 'a'::text)
|
|
-> Seq Scan on compress_hyper_10_29_chunk
|
|
Filter: (_ts_meta_min_2 < 'a'::text)
|
|
-> Custom Scan (DecompressChunk) on _hyper_9_20_chunk
|
|
Filter: (val_2 < 'a'::text)
|
|
-> Seq Scan on compress_hyper_10_30_chunk
|
|
Filter: (_ts_meta_min_2 < 'a'::text)
|
|
-> Seq Scan on _hyper_9_21_chunk
|
|
Filter: (val_2 < 'a'::text)
|
|
-> Seq Scan on _hyper_9_22_chunk
|
|
Filter: (val_2 < 'a'::text)
|
|
-> Seq Scan on _hyper_9_23_chunk
|
|
Filter: (val_2 < 'a'::text)
|
|
-> Seq Scan on _hyper_9_24_chunk
|
|
Filter: (val_2 < 'a'::text)
|
|
-> Seq Scan on _hyper_9_25_chunk
|
|
Filter: (val_2 < 'a'::text)
|
|
-> Seq Scan on _hyper_9_26_chunk
|
|
Filter: (val_2 < 'a'::text)
|
|
-> Seq Scan on _hyper_9_27_chunk
|
|
Filter: (val_2 < 'a'::text)
|
|
-> Seq Scan on _hyper_9_28_chunk
|
|
Filter: (val_2 < 'a'::text)
|
|
(25 rows)
|
|
|
|
EXPLAIN (costs off) SELECT * FROM test_collation WHERE val_1 < 'a' COLLATE "C";
|
|
QUERY PLAN
|
|
----------------------------------------------------------------
|
|
Append
|
|
-> Custom Scan (DecompressChunk) on _hyper_9_19_chunk
|
|
Filter: (val_1 < 'a'::text COLLATE "C")
|
|
-> Seq Scan on compress_hyper_10_29_chunk
|
|
Filter: (_ts_meta_min_1 < 'a'::text COLLATE "C")
|
|
-> Custom Scan (DecompressChunk) on _hyper_9_20_chunk
|
|
Filter: (val_1 < 'a'::text COLLATE "C")
|
|
-> Seq Scan on compress_hyper_10_30_chunk
|
|
Filter: (_ts_meta_min_1 < 'a'::text COLLATE "C")
|
|
-> Seq Scan on _hyper_9_21_chunk
|
|
Filter: (val_1 < 'a'::text COLLATE "C")
|
|
-> Seq Scan on _hyper_9_22_chunk
|
|
Filter: (val_1 < 'a'::text COLLATE "C")
|
|
-> Seq Scan on _hyper_9_23_chunk
|
|
Filter: (val_1 < 'a'::text COLLATE "C")
|
|
-> Seq Scan on _hyper_9_24_chunk
|
|
Filter: (val_1 < 'a'::text COLLATE "C")
|
|
-> Seq Scan on _hyper_9_25_chunk
|
|
Filter: (val_1 < 'a'::text COLLATE "C")
|
|
-> Seq Scan on _hyper_9_26_chunk
|
|
Filter: (val_1 < 'a'::text COLLATE "C")
|
|
-> Seq Scan on _hyper_9_27_chunk
|
|
Filter: (val_1 < 'a'::text COLLATE "C")
|
|
-> Seq Scan on _hyper_9_28_chunk
|
|
Filter: (val_1 < 'a'::text COLLATE "C")
|
|
(25 rows)
|
|
|
|
EXPLAIN (costs off) SELECT * FROM test_collation WHERE val_2 < 'a' COLLATE "POSIX";
|
|
QUERY PLAN
|
|
--------------------------------------------------------------------
|
|
Append
|
|
-> Custom Scan (DecompressChunk) on _hyper_9_19_chunk
|
|
Filter: (val_2 < 'a'::text COLLATE "POSIX")
|
|
-> Seq Scan on compress_hyper_10_29_chunk
|
|
Filter: (_ts_meta_min_2 < 'a'::text COLLATE "POSIX")
|
|
-> Custom Scan (DecompressChunk) on _hyper_9_20_chunk
|
|
Filter: (val_2 < 'a'::text COLLATE "POSIX")
|
|
-> Seq Scan on compress_hyper_10_30_chunk
|
|
Filter: (_ts_meta_min_2 < 'a'::text COLLATE "POSIX")
|
|
-> Seq Scan on _hyper_9_21_chunk
|
|
Filter: (val_2 < 'a'::text COLLATE "POSIX")
|
|
-> Seq Scan on _hyper_9_22_chunk
|
|
Filter: (val_2 < 'a'::text COLLATE "POSIX")
|
|
-> Seq Scan on _hyper_9_23_chunk
|
|
Filter: (val_2 < 'a'::text COLLATE "POSIX")
|
|
-> Seq Scan on _hyper_9_24_chunk
|
|
Filter: (val_2 < 'a'::text COLLATE "POSIX")
|
|
-> Seq Scan on _hyper_9_25_chunk
|
|
Filter: (val_2 < 'a'::text COLLATE "POSIX")
|
|
-> Seq Scan on _hyper_9_26_chunk
|
|
Filter: (val_2 < 'a'::text COLLATE "POSIX")
|
|
-> Seq Scan on _hyper_9_27_chunk
|
|
Filter: (val_2 < 'a'::text COLLATE "POSIX")
|
|
-> Seq Scan on _hyper_9_28_chunk
|
|
Filter: (val_2 < 'a'::text COLLATE "POSIX")
|
|
(25 rows)
|
|
|
|
--cannot pushdown when op collation does not match column's collation since min/max used different collation than what op needs
|
|
EXPLAIN (costs off) SELECT * FROM test_collation WHERE val_1 < 'a' COLLATE "POSIX";
|
|
QUERY PLAN
|
|
----------------------------------------------------------
|
|
Append
|
|
-> Custom Scan (DecompressChunk) on _hyper_9_19_chunk
|
|
Filter: (val_1 < 'a'::text COLLATE "POSIX")
|
|
-> Seq Scan on compress_hyper_10_29_chunk
|
|
-> Custom Scan (DecompressChunk) on _hyper_9_20_chunk
|
|
Filter: (val_1 < 'a'::text COLLATE "POSIX")
|
|
-> Seq Scan on compress_hyper_10_30_chunk
|
|
-> Seq Scan on _hyper_9_21_chunk
|
|
Filter: (val_1 < 'a'::text COLLATE "POSIX")
|
|
-> Seq Scan on _hyper_9_22_chunk
|
|
Filter: (val_1 < 'a'::text COLLATE "POSIX")
|
|
-> Seq Scan on _hyper_9_23_chunk
|
|
Filter: (val_1 < 'a'::text COLLATE "POSIX")
|
|
-> Seq Scan on _hyper_9_24_chunk
|
|
Filter: (val_1 < 'a'::text COLLATE "POSIX")
|
|
-> Seq Scan on _hyper_9_25_chunk
|
|
Filter: (val_1 < 'a'::text COLLATE "POSIX")
|
|
-> Seq Scan on _hyper_9_26_chunk
|
|
Filter: (val_1 < 'a'::text COLLATE "POSIX")
|
|
-> Seq Scan on _hyper_9_27_chunk
|
|
Filter: (val_1 < 'a'::text COLLATE "POSIX")
|
|
-> Seq Scan on _hyper_9_28_chunk
|
|
Filter: (val_1 < 'a'::text COLLATE "POSIX")
|
|
(23 rows)
|
|
|
|
EXPLAIN (costs off) SELECT * FROM test_collation WHERE val_2 < 'a' COLLATE "C";
|
|
QUERY PLAN
|
|
----------------------------------------------------------
|
|
Append
|
|
-> Custom Scan (DecompressChunk) on _hyper_9_19_chunk
|
|
Filter: (val_2 < 'a'::text COLLATE "C")
|
|
-> Seq Scan on compress_hyper_10_29_chunk
|
|
-> Custom Scan (DecompressChunk) on _hyper_9_20_chunk
|
|
Filter: (val_2 < 'a'::text COLLATE "C")
|
|
-> Seq Scan on compress_hyper_10_30_chunk
|
|
-> Seq Scan on _hyper_9_21_chunk
|
|
Filter: (val_2 < 'a'::text COLLATE "C")
|
|
-> Seq Scan on _hyper_9_22_chunk
|
|
Filter: (val_2 < 'a'::text COLLATE "C")
|
|
-> Seq Scan on _hyper_9_23_chunk
|
|
Filter: (val_2 < 'a'::text COLLATE "C")
|
|
-> Seq Scan on _hyper_9_24_chunk
|
|
Filter: (val_2 < 'a'::text COLLATE "C")
|
|
-> Seq Scan on _hyper_9_25_chunk
|
|
Filter: (val_2 < 'a'::text COLLATE "C")
|
|
-> Seq Scan on _hyper_9_26_chunk
|
|
Filter: (val_2 < 'a'::text COLLATE "C")
|
|
-> Seq Scan on _hyper_9_27_chunk
|
|
Filter: (val_2 < 'a'::text COLLATE "C")
|
|
-> Seq Scan on _hyper_9_28_chunk
|
|
Filter: (val_2 < 'a'::text COLLATE "C")
|
|
(23 rows)
|
|
|
|
--test datatypes
|
|
CREATE TABLE datatype_test(
|
|
time timestamptz NOT NULL,
|
|
int2_column int2,
|
|
int4_column int4,
|
|
int8_column int8,
|
|
float4_column float4,
|
|
float8_column float8,
|
|
date_column date,
|
|
timestamp_column timestamp,
|
|
timestamptz_column timestamptz,
|
|
interval_column interval,
|
|
numeric_column numeric,
|
|
decimal_column decimal,
|
|
text_column text,
|
|
char_column char
|
|
);
|
|
SELECT create_hypertable('datatype_test','time');
|
|
WARNING: column type "timestamp without time zone" used for "timestamp_column" does not follow best practices
|
|
create_hypertable
|
|
-----------------------------
|
|
(11,public,datatype_test,t)
|
|
(1 row)
|
|
|
|
ALTER TABLE datatype_test SET (timescaledb.compress);
|
|
INSERT INTO datatype_test VALUES ('2000-01-01',2,4,8,4.0,8.0,'2000-01-01','2001-01-01 12:00','2001-01-01 6:00','1 week', 3.41, 4.2, 'text', 'x');
|
|
SELECT compress_chunk(ch1.schema_name|| '.' || ch1.table_name)
|
|
FROM _timescaledb_catalog.chunk ch1, _timescaledb_catalog.hypertable ht where ch1.hypertable_id = ht.id
|
|
and ht.table_name like 'datatype_test' ORDER BY ch1.id;
|
|
compress_chunk
|
|
------------------------------------------
|
|
_timescaledb_internal._hyper_11_31_chunk
|
|
(1 row)
|
|
|
|
SELECT
|
|
attname, alg.name
|
|
FROM _timescaledb_catalog.hypertable ht
|
|
INNER JOIN _timescaledb_catalog.hypertable_compression htc ON ht.id=htc.hypertable_id
|
|
INNER JOIN _timescaledb_catalog.compression_algorithm alg ON alg.id=htc.compression_algorithm_id
|
|
WHERE ht.table_name='datatype_test'
|
|
ORDER BY attname;
|
|
attname | name
|
|
--------------------+----------------------------------
|
|
char_column | COMPRESSION_ALGORITHM_DICTIONARY
|
|
date_column | COMPRESSION_ALGORITHM_DELTADELTA
|
|
decimal_column | COMPRESSION_ALGORITHM_ARRAY
|
|
float4_column | COMPRESSION_ALGORITHM_GORILLA
|
|
float8_column | COMPRESSION_ALGORITHM_GORILLA
|
|
int2_column | COMPRESSION_ALGORITHM_DELTADELTA
|
|
int4_column | COMPRESSION_ALGORITHM_DELTADELTA
|
|
int8_column | COMPRESSION_ALGORITHM_DELTADELTA
|
|
interval_column | COMPRESSION_ALGORITHM_DICTIONARY
|
|
numeric_column | COMPRESSION_ALGORITHM_ARRAY
|
|
text_column | COMPRESSION_ALGORITHM_DICTIONARY
|
|
time | COMPRESSION_ALGORITHM_DELTADELTA
|
|
timestamp_column | COMPRESSION_ALGORITHM_DELTADELTA
|
|
timestamptz_column | COMPRESSION_ALGORITHM_DELTADELTA
|
|
(14 rows)
|
|
|
|
--TEST try to compress a hypertable that has a continuous aggregate
|
|
CREATE TABLE metrics(time timestamptz, device_id int, v1 float, v2 float);
|
|
SELECT create_hypertable('metrics','time');
|
|
NOTICE: adding not-null constraint to column "time"
|
|
create_hypertable
|
|
-----------------------
|
|
(13,public,metrics,t)
|
|
(1 row)
|
|
|
|
INSERT INTO metrics SELECT generate_series('2000-01-01'::timestamptz,'2000-01-10','1m'),1,0.25,0.75;
|
|
-- check expressions in view definition
|
|
CREATE MATERIALIZED VIEW cagg_expr WITH (timescaledb.continuous)
|
|
AS
|
|
SELECT
|
|
time_bucket('1d', time) AS time,
|
|
'Const'::text AS Const,
|
|
4.3::numeric AS "numeric",
|
|
first(metrics,time),
|
|
CASE WHEN true THEN 'foo' ELSE 'bar' END,
|
|
COALESCE(NULL,'coalesce'),
|
|
avg(v1) + avg(v2) AS avg1,
|
|
avg(v1+v2) AS avg2,
|
|
count(*) AS cnt
|
|
FROM metrics
|
|
GROUP BY 1 WITH NO DATA;
|
|
CALL refresh_continuous_aggregate('cagg_expr', NULL, NULL);
|
|
SELECT * FROM cagg_expr ORDER BY time LIMIT 5;
|
|
time | const | numeric | first | case | coalesce | avg1 | avg2 | cnt
|
|
------------------------------+-------+---------+----------------------------------------------+------+----------+------+------+------
|
|
Fri Dec 31 16:00:00 1999 PST | Const | 4.3 | ("Sat Jan 01 00:00:00 2000 PST",1,0.25,0.75) | foo | coalesce | 1 | 1 | 960
|
|
Sat Jan 01 16:00:00 2000 PST | Const | 4.3 | ("Sat Jan 01 16:00:00 2000 PST",1,0.25,0.75) | foo | coalesce | 1 | 1 | 1440
|
|
Sun Jan 02 16:00:00 2000 PST | Const | 4.3 | ("Sun Jan 02 16:00:00 2000 PST",1,0.25,0.75) | foo | coalesce | 1 | 1 | 1440
|
|
Mon Jan 03 16:00:00 2000 PST | Const | 4.3 | ("Mon Jan 03 16:00:00 2000 PST",1,0.25,0.75) | foo | coalesce | 1 | 1 | 1440
|
|
Tue Jan 04 16:00:00 2000 PST | Const | 4.3 | ("Tue Jan 04 16:00:00 2000 PST",1,0.25,0.75) | foo | coalesce | 1 | 1 | 1440
|
|
(5 rows)
|
|
|
|
ALTER TABLE metrics set(timescaledb.compress);
|
|
-- test rescan in compress chunk dml blocker
|
|
CREATE TABLE rescan_test(id integer NOT NULL, t timestamptz NOT NULL, val double precision, PRIMARY KEY(id, t));
|
|
SELECT create_hypertable('rescan_test', 't', chunk_time_interval => interval '1 day');
|
|
create_hypertable
|
|
---------------------------
|
|
(16,public,rescan_test,t)
|
|
(1 row)
|
|
|
|
-- compression
|
|
ALTER TABLE rescan_test SET (timescaledb.compress, timescaledb.compress_segmentby = 'id');
|
|
-- INSERT dummy data
|
|
INSERT INTO rescan_test SELECT 1, time, random() FROM generate_series('2000-01-01'::timestamptz, '2000-01-05'::timestamptz, '1h'::interval) g(time);
|
|
SELECT count(*) FROM rescan_test;
|
|
count
|
|
-------
|
|
97
|
|
(1 row)
|
|
|
|
-- compress first chunk
|
|
SELECT compress_chunk(ch1.schema_name|| '.' || ch1.table_name)
|
|
FROM _timescaledb_catalog.chunk ch1, _timescaledb_catalog.hypertable ht where ch1.hypertable_id = ht.id
|
|
and ht.table_name like 'rescan_test' ORDER BY ch1.id LIMIT 1;
|
|
compress_chunk
|
|
------------------------------------------
|
|
_timescaledb_internal._hyper_16_36_chunk
|
|
(1 row)
|
|
|
|
-- count should be equal to count before compression
|
|
SELECT count(*) FROM rescan_test;
|
|
count
|
|
-------
|
|
97
|
|
(1 row)
|
|
|
|
-- single row update is fine
|
|
UPDATE rescan_test SET val = val + 1 WHERE rescan_test.id = 1 AND rescan_test.t = '2000-01-03 00:00:00+00';
|
|
-- multi row update via WHERE is fine
|
|
UPDATE rescan_test SET val = val + 1 WHERE rescan_test.id = 1 AND rescan_test.t > '2000-01-03 00:00:00+00';
|
|
-- single row update with FROM is allowed if no compressed chunks are hit
|
|
UPDATE rescan_test SET val = tmp.val
|
|
FROM (SELECT x.id, x.t, x.val FROM unnest(array[(1, '2000-01-03 00:00:00+00', 2.045)]::rescan_test[]) AS x) AS tmp
|
|
WHERE rescan_test.id = tmp.id AND rescan_test.t = tmp.t AND rescan_test.t >= '2000-01-03';
|
|
-- single row update with FROM is blocked
|
|
\set ON_ERROR_STOP 0
|
|
UPDATE rescan_test SET val = tmp.val
|
|
FROM (SELECT x.id, x.t, x.val FROM unnest(array[(1, '2000-01-03 00:00:00+00', 2.045)]::rescan_test[]) AS x) AS tmp
|
|
WHERE rescan_test.id = tmp.id AND rescan_test.t = tmp.t;
|
|
ERROR: cannot update/delete rows from chunk "_hyper_16_36_chunk" as it is compressed
|
|
-- bulk row update with FROM is blocked
|
|
UPDATE rescan_test SET val = tmp.val
|
|
FROM (SELECT x.id, x.t, x.val FROM unnest(array[(1, '2000-01-03 00:00:00+00', 2.045), (1, '2000-01-03 01:00:00+00', 8.045)]::rescan_test[]) AS x) AS tmp
|
|
WHERE rescan_test.id = tmp.id AND rescan_test.t = tmp.t;
|
|
ERROR: cannot update/delete rows from chunk "_hyper_16_36_chunk" as it is compressed
|
|
\set ON_ERROR_STOP 1
|
|
-- Test FK constraint drop and recreate during compression and decompression on a chunk
|
|
CREATE TABLE meta (device_id INT PRIMARY KEY);
|
|
CREATE TABLE hyper(
|
|
time INT NOT NULL,
|
|
device_id INT REFERENCES meta(device_id) ON DELETE CASCADE ON UPDATE CASCADE,
|
|
val INT);
|
|
SELECT * FROM create_hypertable('hyper', 'time', chunk_time_interval => 10);
|
|
hypertable_id | schema_name | table_name | created
|
|
---------------+-------------+------------+---------
|
|
18 | public | hyper | t
|
|
(1 row)
|
|
|
|
ALTER TABLE hyper SET (
|
|
timescaledb.compress,
|
|
timescaledb.compress_orderby = 'time',
|
|
timescaledb.compress_segmentby = 'device_id');
|
|
INSERT INTO meta VALUES (1), (2), (3), (4), (5);
|
|
INSERT INTO hyper VALUES (1, 1, 1), (2, 2, 1), (3, 3, 1), (10, 3, 2), (11, 4, 2), (11, 5, 2);
|
|
SELECT ch1.table_name AS "CHUNK_NAME", ch1.schema_name|| '.' || ch1.table_name AS "CHUNK_FULL_NAME"
|
|
FROM _timescaledb_catalog.chunk ch1, _timescaledb_catalog.hypertable ht
|
|
WHERE ch1.hypertable_id = ht.id AND ht.table_name LIKE 'hyper'
|
|
ORDER BY ch1.id LIMIT 1 \gset
|
|
SELECT constraint_schema, constraint_name, table_schema, table_name, constraint_type
|
|
FROM information_schema.table_constraints
|
|
WHERE table_name = :'CHUNK_NAME' AND constraint_type = 'FOREIGN KEY'
|
|
ORDER BY constraint_name;
|
|
constraint_schema | constraint_name | table_schema | table_name | constraint_type
|
|
-----------------------+---------------------------+-----------------------+--------------------+-----------------
|
|
_timescaledb_internal | 42_6_hyper_device_id_fkey | _timescaledb_internal | _hyper_18_42_chunk | FOREIGN KEY
|
|
(1 row)
|
|
|
|
SELECT compress_chunk(:'CHUNK_FULL_NAME');
|
|
compress_chunk
|
|
------------------------------------------
|
|
_timescaledb_internal._hyper_18_42_chunk
|
|
(1 row)
|
|
|
|
SELECT constraint_schema, constraint_name, table_schema, table_name, constraint_type
|
|
FROM information_schema.table_constraints
|
|
WHERE table_name = :'CHUNK_NAME' AND constraint_type = 'FOREIGN KEY'
|
|
ORDER BY constraint_name;
|
|
constraint_schema | constraint_name | table_schema | table_name | constraint_type
|
|
-------------------+-----------------+--------------+------------+-----------------
|
|
(0 rows)
|
|
|
|
-- Delete data from compressed chunk directly fails
|
|
\set ON_ERROR_STOP 0
|
|
DELETE FROM hyper WHERE device_id = 3;
|
|
ERROR: cannot update/delete rows from chunk "_hyper_18_42_chunk" as it is compressed
|
|
\set ON_ERROR_STOP 0
|
|
-- Delete data from FK-referenced table deletes data from compressed chunk
|
|
SELECT * FROM hyper ORDER BY time, device_id;
|
|
time | device_id | val
|
|
------+-----------+-----
|
|
1 | 1 | 1
|
|
2 | 2 | 1
|
|
3 | 3 | 1
|
|
10 | 3 | 2
|
|
11 | 4 | 2
|
|
11 | 5 | 2
|
|
(6 rows)
|
|
|
|
DELETE FROM meta WHERE device_id = 3;
|
|
SELECT * FROM hyper ORDER BY time, device_id;
|
|
time | device_id | val
|
|
------+-----------+-----
|
|
1 | 1 | 1
|
|
2 | 2 | 1
|
|
11 | 4 | 2
|
|
11 | 5 | 2
|
|
(4 rows)
|
|
|
|
SELECT decompress_chunk(:'CHUNK_FULL_NAME');
|
|
decompress_chunk
|
|
------------------------------------------
|
|
_timescaledb_internal._hyper_18_42_chunk
|
|
(1 row)
|
|
|
|
SELECT constraint_schema, constraint_name, table_schema, table_name, constraint_type
|
|
FROM information_schema.table_constraints
|
|
WHERE table_name = :'CHUNK_NAME' AND constraint_type = 'FOREIGN KEY'
|
|
ORDER BY constraint_name;
|
|
constraint_schema | constraint_name | table_schema | table_name | constraint_type
|
|
-----------------------+---------------------------+-----------------------+--------------------+-----------------
|
|
_timescaledb_internal | 42_9_hyper_device_id_fkey | _timescaledb_internal | _hyper_18_42_chunk | FOREIGN KEY
|
|
(1 row)
|
|
|
|
-- create hypertable with 2 chunks
|
|
CREATE TABLE ht5(time TIMESTAMPTZ NOT NULL);
|
|
SELECT create_hypertable('ht5','time');
|
|
create_hypertable
|
|
-------------------
|
|
(20,public,ht5,t)
|
|
(1 row)
|
|
|
|
INSERT INTO ht5 SELECT '2000-01-01'::TIMESTAMPTZ;
|
|
INSERT INTO ht5 SELECT '2001-01-01'::TIMESTAMPTZ;
|
|
-- compressed chunk stats should not show dropped chunks
|
|
ALTER TABLE ht5 SET (timescaledb.compress);
|
|
SELECT compress_chunk(i) FROM show_chunks('ht5') i;
|
|
compress_chunk
|
|
------------------------------------------
|
|
_timescaledb_internal._hyper_20_45_chunk
|
|
_timescaledb_internal._hyper_20_46_chunk
|
|
(2 rows)
|
|
|
|
SELECT drop_chunks('ht5', newer_than => '2000-01-01'::TIMESTAMPTZ);
|
|
drop_chunks
|
|
------------------------------------------
|
|
_timescaledb_internal._hyper_20_46_chunk
|
|
(1 row)
|
|
|
|
select chunk_name from chunk_compression_stats('ht5')
|
|
order by chunk_name;
|
|
chunk_name
|
|
--------------------
|
|
_hyper_20_45_chunk
|
|
(1 row)
|
|
|
|
-- Test enabling compression for a table with compound foreign key
|
|
-- (Issue https://github.com/timescale/timescaledb/issues/2000)
|
|
CREATE TABLE table2(col1 INT, col2 int, primary key (col1,col2));
|
|
CREATE TABLE table1(col1 INT NOT NULL, col2 INT);
|
|
ALTER TABLE table1 ADD CONSTRAINT fk_table1 FOREIGN KEY (col1,col2) REFERENCES table2(col1,col2);
|
|
SELECT create_hypertable('table1','col1', chunk_time_interval => 10);
|
|
create_hypertable
|
|
----------------------
|
|
(22,public,table1,t)
|
|
(1 row)
|
|
|
|
-- Trying to list an incomplete set of fields of the compound key (should fail with a nice message)
|
|
ALTER TABLE table1 SET (timescaledb.compress, timescaledb.compress_segmentby = 'col1');
|
|
ERROR: column "col2" must be used for segmenting
|
|
-- Listing all fields of the compound key should succeed:
|
|
ALTER TABLE table1 SET (timescaledb.compress, timescaledb.compress_segmentby = 'col1,col2');
|
|
SELECT * FROM timescaledb_information.compression_settings ORDER BY hypertable_name;
|
|
hypertable_schema | hypertable_name | attname | segmentby_column_index | orderby_column_index | orderby_asc | orderby_nullsfirst
|
|
-------------------+-----------------+-------------+------------------------+----------------------+-------------+--------------------
|
|
public | conditions | location | 1 | | |
|
|
public | conditions | time | | 1 | t | f
|
|
public | datatype_test | time | | 1 | f | t
|
|
public | foo | a | 1 | | |
|
|
public | foo | b | 2 | | |
|
|
public | foo | c | | 1 | f | t
|
|
public | foo | d | | 2 | t | f
|
|
public | ht5 | time | | 1 | f | t
|
|
public | hyper | device_id | 1 | | |
|
|
public | hyper | time | | 1 | t | f
|
|
public | metrics | time | | 1 | f | t
|
|
public | plan_inval | time | | 1 | f | t
|
|
public | rescan_test | id | 1 | | |
|
|
public | rescan_test | t | | 1 | f | t
|
|
public | table1 | col1 | 1 | | |
|
|
public | table1 | col2 | 2 | | |
|
|
public | test_collation | device_id | 1 | | |
|
|
public | test_collation | device_id_2 | 2 | | |
|
|
public | test_collation | val_1 | | 1 | t | f
|
|
public | test_collation | val_2 | | 2 | t | f
|
|
public | test_collation | time | | 3 | t | f
|
|
(21 rows)
|
|
|
|
-- test delete/update on non-compressed tables involving hypertables with compression
|
|
CREATE TABLE uncompressed_ht (
|
|
time timestamptz NOT NULL,
|
|
value double precision,
|
|
series_id integer
|
|
);
|
|
SELECT table_name FROM create_hypertable ('uncompressed_ht', 'time');
|
|
table_name
|
|
-----------------
|
|
uncompressed_ht
|
|
(1 row)
|
|
|
|
INSERT INTO uncompressed_ht
|
|
VALUES ('2020-04-20 01:01', 100, 1), ('2020-05-20 01:01', 100, 1), ('2020-04-20 01:01', 200, 2);
|
|
CREATE TABLE compressed_ht (
|
|
time timestamptz NOT NULL,
|
|
value double precision,
|
|
series_id integer
|
|
);
|
|
SELECT table_name FROM create_hypertable ('compressed_ht', 'time');
|
|
table_name
|
|
---------------
|
|
compressed_ht
|
|
(1 row)
|
|
|
|
ALTER TABLE compressed_ht SET (timescaledb.compress);
|
|
INSERT INTO compressed_ht
|
|
VALUES ('2020-04-20 01:01', 100, 1), ('2020-05-20 01:01', 100, 1);
|
|
SELECT compress_chunk (ch1.schema_name || '.' || ch1.table_name)
|
|
FROM _timescaledb_catalog.chunk ch1,
|
|
_timescaledb_catalog.hypertable ht
|
|
WHERE ch1.hypertable_id = ht.id
|
|
AND ht.table_name LIKE 'compressed_ht'
|
|
ORDER BY ch1.id;
|
|
compress_chunk
|
|
------------------------------------------
|
|
_timescaledb_internal._hyper_25_51_chunk
|
|
_timescaledb_internal._hyper_25_52_chunk
|
|
(2 rows)
|
|
|
|
BEGIN;
|
|
WITH compressed AS (
|
|
SELECT series_id
|
|
FROM compressed_ht
|
|
WHERE time >= '2020-04-17 17:14:24.161989+00'
|
|
)
|
|
DELETE FROM uncompressed_ht
|
|
WHERE series_id IN (SELECT series_id FROM compressed);
|
|
ROLLBACK;
|
|
\set ON_ERROR_STOP 0
|
|
-- test delete inside CTE is blocked
|
|
WITH compressed AS (
|
|
DELETE FROM compressed_ht RETURNING series_id
|
|
)
|
|
SELECT * FROM uncompressed_ht
|
|
WHERE series_id IN (SELECT series_id FROM compressed);
|
|
ERROR: cannot update/delete rows from chunk "_hyper_25_51_chunk" as it is compressed
|
|
-- test update inside CTE is blocked
|
|
WITH compressed AS (
|
|
UPDATE compressed_ht SET value = 0.2 RETURNING *
|
|
)
|
|
SELECT * FROM uncompressed_ht
|
|
WHERE series_id IN (SELECT series_id FROM compressed);
|
|
ERROR: cannot update/delete rows from chunk "_hyper_25_51_chunk" as it is compressed
|
|
\set ON_ERROR_STOP 1
|
|
DROP TABLE compressed_ht;
|
|
DROP TABLE uncompressed_ht;
|
|
-- Test that pg_stats and pg_class stats for uncompressed chunks are frozen at compression time
|
|
-- Note that approximate_row_count pulls from pg_class
|
|
CREATE TABLE stattest(time TIMESTAMPTZ NOT NULL, c1 int);
|
|
SELECT create_hypertable('stattest', 'time');
|
|
create_hypertable
|
|
------------------------
|
|
(27,public,stattest,t)
|
|
(1 row)
|
|
|
|
INSERT INTO stattest SELECT '2020/02/20 01:00'::TIMESTAMPTZ + ('1 hour'::interval * v), 250 * v FROM generate_series(0,25) v;
|
|
SELECT table_name INTO TEMPORARY temptable FROM _timescaledb_catalog.chunk WHERE hypertable_id = (SELECT id FROM _timescaledb_catalog.hypertable WHERE table_name = 'stattest');
|
|
\set statchunk '(select table_name from temptable)'
|
|
SELECT * FROM pg_stats WHERE tablename = :statchunk;
|
|
schemaname | tablename | attname | inherited | null_frac | avg_width | n_distinct | most_common_vals | most_common_freqs | histogram_bounds | correlation | most_common_elems | most_common_elem_freqs | elem_count_histogram
|
|
------------+-----------+---------+-----------+-----------+-----------+------------+------------------+-------------------+------------------+-------------+-------------------+------------------------+----------------------
|
|
(0 rows)
|
|
|
|
ALTER TABLE stattest SET (timescaledb.compress);
|
|
SELECT approximate_row_count('stattest');
|
|
approximate_row_count
|
|
-----------------------
|
|
0
|
|
(1 row)
|
|
|
|
SELECT compress_chunk(c) FROM show_chunks('stattest') c;
|
|
compress_chunk
|
|
------------------------------------------
|
|
_timescaledb_internal._hyper_27_55_chunk
|
|
(1 row)
|
|
|
|
SELECT approximate_row_count('stattest');
|
|
approximate_row_count
|
|
-----------------------
|
|
0
|
|
(1 row)
|
|
|
|
-- reltuples is initially -1 on PG14 before VACUUM/ANALYZE was run
|
|
SELECT relpages, CASE WHEN reltuples > 0 THEN reltuples ELSE 0 END as reltuples FROM pg_class WHERE relname = :statchunk;
|
|
relpages | reltuples
|
|
----------+-----------
|
|
0 | 0
|
|
(1 row)
|
|
|
|
SELECT histogram_bounds FROM pg_stats WHERE tablename = :statchunk AND attname = 'c1';
|
|
histogram_bounds
|
|
-------------------------------------------------------------------------------------------------------------------------------
|
|
{0,250,500,750,1000,1250,1500,1750,2000,2250,2500,2750,3000,3250,3500,3750,4000,4250,4500,4750,5000,5250,5500,5750,6000,6250}
|
|
(1 row)
|
|
|
|
SELECT compch.table_name as "STAT_COMP_CHUNK_NAME"
|
|
FROM _timescaledb_catalog.hypertable ht, _timescaledb_catalog.chunk ch
|
|
, _timescaledb_catalog.chunk compch
|
|
WHERE ht.table_name = 'stattest' AND ch.hypertable_id = ht.id
|
|
AND compch.id = ch.compressed_chunk_id AND ch.compressed_chunk_id > 0 \gset
|
|
-- reltuples is initially -1 on PG14 before VACUUM/ANALYZE was run
|
|
SELECT relpages, CASE WHEN reltuples > 0 THEN reltuples ELSE 0 END AS reltuples FROM pg_class WHERE relname = :'STAT_COMP_CHUNK_NAME';
|
|
relpages | reltuples
|
|
----------+-----------
|
|
0 | 0
|
|
(1 row)
|
|
|
|
-- Now verify stats are not changed when we analyze the hypertable
|
|
ANALYZE stattest;
|
|
SELECT histogram_bounds FROM pg_stats WHERE tablename = :statchunk AND attname = 'c1';
|
|
histogram_bounds
|
|
-------------------------------------------------------------------------------------------------------------------------------
|
|
{0,250,500,750,1000,1250,1500,1750,2000,2250,2500,2750,3000,3250,3500,3750,4000,4250,4500,4750,5000,5250,5500,5750,6000,6250}
|
|
(1 row)
|
|
|
|
-- Unfortunately, the stats on the hypertable won't find any rows to sample from the chunk
|
|
SELECT histogram_bounds FROM pg_stats WHERE tablename = 'stattest' AND attname = 'c1';
|
|
histogram_bounds
|
|
------------------
|
|
(0 rows)
|
|
|
|
-- reltuples is initially -1 on PG14 before VACUUM/ANALYZE was run
|
|
SELECT relpages, CASE WHEN reltuples > 0 THEN reltuples ELSE 0 END as reltuples FROM pg_class WHERE relname = :statchunk;
|
|
relpages | reltuples
|
|
----------+-----------
|
|
0 | 0
|
|
(1 row)
|
|
|
|
-- verify that corresponding compressed chunk's stats is updated as well.
|
|
-- reltuples is initially -1 on PG14 before VACUUM/ANALYZE was run
|
|
SELECT relpages, CASE WHEN reltuples > 0 THEN reltuples ELSE 0 END as reltuples FROM pg_class WHERE relname = :'STAT_COMP_CHUNK_NAME';
|
|
relpages | reltuples
|
|
----------+-----------
|
|
0 | 0
|
|
(1 row)
|
|
|
|
-- Verify that even a global analyze doesn't affect the chunk stats, changing message scope here
|
|
-- to hide WARNINGs for skipped tables
|
|
SET client_min_messages TO ERROR;
|
|
ANALYZE;
|
|
SET client_min_messages TO NOTICE;
|
|
SELECT histogram_bounds FROM pg_stats WHERE tablename = :statchunk AND attname = 'c1';
|
|
histogram_bounds
|
|
-------------------------------------------------------------------------------------------------------------------------------
|
|
{0,250,500,750,1000,1250,1500,1750,2000,2250,2500,2750,3000,3250,3500,3750,4000,4250,4500,4750,5000,5250,5500,5750,6000,6250}
|
|
(1 row)
|
|
|
|
SELECT relpages, reltuples FROM pg_class WHERE relname = :statchunk;
|
|
relpages | reltuples
|
|
----------+-----------
|
|
0 | 0
|
|
(1 row)
|
|
|
|
-- Verify that decompressing the chunk restores autoanalyze to the hypertable's setting
|
|
SELECT reloptions FROM pg_class WHERE relname = :statchunk;
|
|
reloptions
|
|
------------
|
|
|
|
(1 row)
|
|
|
|
SELECT decompress_chunk(c) FROM show_chunks('stattest') c;
|
|
decompress_chunk
|
|
------------------------------------------
|
|
_timescaledb_internal._hyper_27_55_chunk
|
|
(1 row)
|
|
|
|
SELECT reloptions FROM pg_class WHERE relname = :statchunk;
|
|
reloptions
|
|
------------
|
|
|
|
(1 row)
|
|
|
|
SELECT compress_chunk(c) FROM show_chunks('stattest') c;
|
|
compress_chunk
|
|
------------------------------------------
|
|
_timescaledb_internal._hyper_27_55_chunk
|
|
(1 row)
|
|
|
|
SELECT reloptions FROM pg_class WHERE relname = :statchunk;
|
|
reloptions
|
|
------------
|
|
|
|
(1 row)
|
|
|
|
ALTER TABLE stattest SET (autovacuum_enabled = false);
|
|
SELECT decompress_chunk(c) FROM show_chunks('stattest') c;
|
|
decompress_chunk
|
|
------------------------------------------
|
|
_timescaledb_internal._hyper_27_55_chunk
|
|
(1 row)
|
|
|
|
SELECT reloptions FROM pg_class WHERE relname = :statchunk;
|
|
reloptions
|
|
----------------------------
|
|
{autovacuum_enabled=false}
|
|
(1 row)
|
|
|
|
DROP TABLE stattest;
|
|
--- Test that analyze on compression internal table updates stats on original chunks
|
|
CREATE TABLE stattest2(time TIMESTAMPTZ NOT NULL, c1 int, c2 int);
|
|
SELECT create_hypertable('stattest2', 'time', chunk_time_interval=>'1 day'::interval);
|
|
create_hypertable
|
|
-------------------------
|
|
(29,public,stattest2,t)
|
|
(1 row)
|
|
|
|
ALTER TABLE stattest2 SET (timescaledb.compress, timescaledb.compress_segmentby='c1');
|
|
INSERT INTO stattest2 SELECT '2020/06/20 01:00'::TIMESTAMPTZ ,1 , generate_series(1, 200, 1);
|
|
INSERT INTO stattest2 SELECT '2020/07/20 01:00'::TIMESTAMPTZ ,1 , generate_series(1, 200, 1);
|
|
SELECT compress_chunk(ch1.schema_name|| '.' || ch1.table_name)
|
|
FROM _timescaledb_catalog.chunk ch1, _timescaledb_catalog.hypertable ht
|
|
WHERE ch1.hypertable_id = ht.id and ht.table_name like 'stattest2'
|
|
ORDER BY ch1.id limit 1;
|
|
compress_chunk
|
|
------------------------------------------
|
|
_timescaledb_internal._hyper_29_58_chunk
|
|
(1 row)
|
|
|
|
-- reltuples is initially -1 on PG14 before VACUUM/ANALYZE has been run
|
|
SELECT relname, CASE WHEN reltuples > 0 THEN reltuples ELSE 0 END AS reltuples, relpages, relallvisible FROM pg_class
|
|
WHERE relname in ( SELECT ch.table_name FROM
|
|
_timescaledb_catalog.chunk ch, _timescaledb_catalog.hypertable ht
|
|
WHERE ht.table_name = 'stattest2' AND ch.hypertable_id = ht.id )
|
|
order by relname;
|
|
relname | reltuples | relpages | relallvisible
|
|
--------------------+-----------+----------+---------------
|
|
_hyper_29_58_chunk | 0 | 0 | 0
|
|
_hyper_29_59_chunk | 0 | 0 | 0
|
|
(2 rows)
|
|
|
|
\c :TEST_DBNAME :ROLE_SUPERUSER
|
|
--overwrite pg_class stats for the compressed chunk.
|
|
UPDATE pg_class
|
|
SET reltuples = 0, relpages = 0
|
|
WHERE relname in ( SELECT ch.table_name FROM
|
|
_timescaledb_catalog.chunk ch,
|
|
_timescaledb_catalog.hypertable ht
|
|
WHERE ht.table_name = 'stattest2' AND ch.hypertable_id = ht.id
|
|
AND ch.compressed_chunk_id > 0 );
|
|
\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER
|
|
-- reltuples is initially -1 on PG14 before VACUUM/ANALYZE has been run
|
|
SELECT relname, CASE WHEN reltuples > 0 THEN reltuples ELSE 0 END AS reltuples, relpages, relallvisible FROM pg_class
|
|
WHERE relname in ( SELECT ch.table_name FROM
|
|
_timescaledb_catalog.chunk ch, _timescaledb_catalog.hypertable ht
|
|
WHERE ht.table_name = 'stattest2' AND ch.hypertable_id = ht.id )
|
|
order by relname;
|
|
relname | reltuples | relpages | relallvisible
|
|
--------------------+-----------+----------+---------------
|
|
_hyper_29_58_chunk | 0 | 0 | 0
|
|
_hyper_29_59_chunk | 0 | 0 | 0
|
|
(2 rows)
|
|
|
|
SELECT '_timescaledb_internal.' || compht.table_name as "STAT_COMP_TABLE",
|
|
compht.table_name as "STAT_COMP_TABLE_NAME"
|
|
FROM _timescaledb_catalog.hypertable ht, _timescaledb_catalog.hypertable compht
|
|
WHERE ht.table_name = 'stattest2' AND ht.compressed_hypertable_id = compht.id \gset
|
|
--analyze the compressed table, will update stats for the raw table.
|
|
ANALYZE :STAT_COMP_TABLE;
|
|
-- reltuples is initially -1 on PG14 before VACUUM/ANALYZE has been run
|
|
SELECT relname, CASE WHEN reltuples > 0 THEN reltuples ELSE 0 END AS reltuples, relpages, relallvisible FROM pg_class
|
|
WHERE relname in ( SELECT ch.table_name FROM
|
|
_timescaledb_catalog.chunk ch, _timescaledb_catalog.hypertable ht
|
|
WHERE ht.table_name = 'stattest2' AND ch.hypertable_id = ht.id )
|
|
ORDER BY relname;
|
|
relname | reltuples | relpages | relallvisible
|
|
--------------------+-----------+----------+---------------
|
|
_hyper_29_58_chunk | 0 | 0 | 0
|
|
_hyper_29_59_chunk | 0 | 0 | 0
|
|
(2 rows)
|
|
|
|
SELECT relname, reltuples, relpages, relallvisible FROM pg_class
|
|
WHERE relname in ( SELECT ch.table_name FROM
|
|
_timescaledb_catalog.chunk ch, _timescaledb_catalog.hypertable ht
|
|
WHERE ht.table_name = :'STAT_COMP_TABLE_NAME' AND ch.hypertable_id = ht.id )
|
|
ORDER BY relname;
|
|
relname | reltuples | relpages | relallvisible
|
|
----------------------------+-----------+----------+---------------
|
|
compress_hyper_30_60_chunk | 1 | 1 | 0
|
|
(1 row)
|
|
|
|
--analyze on stattest2 should not overwrite
|
|
ANALYZE stattest2;
|
|
SELECT relname, reltuples, relpages, relallvisible FROM pg_class
|
|
WHERE relname in ( SELECT ch.table_name FROM
|
|
_timescaledb_catalog.chunk ch, _timescaledb_catalog.hypertable ht
|
|
WHERE ht.table_name = 'stattest2' AND ch.hypertable_id = ht.id )
|
|
ORDER BY relname;
|
|
relname | reltuples | relpages | relallvisible
|
|
--------------------+-----------+----------+---------------
|
|
_hyper_29_58_chunk | 0 | 0 | 0
|
|
_hyper_29_59_chunk | 200 | 2 | 0
|
|
(2 rows)
|
|
|
|
SELECT relname, reltuples, relpages, relallvisible FROM pg_class
|
|
WHERE relname in ( SELECT ch.table_name FROM
|
|
_timescaledb_catalog.chunk ch, _timescaledb_catalog.hypertable ht
|
|
WHERE ht.table_name = :'STAT_COMP_TABLE_NAME' AND ch.hypertable_id = ht.id )
|
|
ORDER BY relname;
|
|
relname | reltuples | relpages | relallvisible
|
|
----------------------------+-----------+----------+---------------
|
|
compress_hyper_30_60_chunk | 1 | 1 | 0
|
|
(1 row)
|
|
|
|
-- analyze on compressed hypertable should restore stats
|
|
-- Test approximate_row_count() with compressed hypertable
|
|
--
|
|
CREATE TABLE approx_count(time timestamptz not null, device int, temp float);
|
|
SELECT create_hypertable('approx_count', 'time');
|
|
create_hypertable
|
|
----------------------------
|
|
(31,public,approx_count,t)
|
|
(1 row)
|
|
|
|
INSERT INTO approx_count SELECT t, (abs(timestamp_hash(t::timestamp)) % 10) + 1, random()*80
|
|
FROM generate_series('2018-03-02 1:00'::TIMESTAMPTZ, '2018-03-04 1:00', '1 hour') t;
|
|
SELECT count(*) FROM approx_count;
|
|
count
|
|
-------
|
|
49
|
|
(1 row)
|
|
|
|
ALTER TABLE approx_count SET (timescaledb.compress, timescaledb.compress_segmentby='device', timescaledb.compress_orderby = 'time DESC');
|
|
SELECT approximate_row_count('approx_count');
|
|
approximate_row_count
|
|
-----------------------
|
|
0
|
|
(1 row)
|
|
|
|
ANALYZE approx_count;
|
|
SELECT approximate_row_count('approx_count');
|
|
approximate_row_count
|
|
-----------------------
|
|
49
|
|
(1 row)
|
|
|
|
DROP TABLE approx_count;
|
|
--TEST drop_chunks from a compressed hypertable (that has caggs defined).
|
|
-- chunk metadata is still retained. verify correct status for chunk
|
|
SELECT count(*)
|
|
FROM (SELECT compress_chunk(ch) FROM show_chunks('metrics') ch ) q;
|
|
count
|
|
-------
|
|
2
|
|
(1 row)
|
|
|
|
SELECT drop_chunks('metrics', older_than=>'1 day'::interval);
|
|
drop_chunks
|
|
------------------------------------------
|
|
_timescaledb_internal._hyper_13_33_chunk
|
|
_timescaledb_internal._hyper_13_34_chunk
|
|
(2 rows)
|
|
|
|
SELECT
|
|
c.table_name as chunk_name,
|
|
c.status as chunk_status, c.dropped, c.compressed_chunk_id as comp_id
|
|
FROM _timescaledb_catalog.hypertable h, _timescaledb_catalog.chunk c
|
|
WHERE h.id = c.hypertable_id and h.table_name = 'metrics'
|
|
ORDER BY 1;
|
|
chunk_name | chunk_status | dropped | comp_id
|
|
--------------------+--------------+---------+---------
|
|
_hyper_13_33_chunk | 0 | t |
|
|
_hyper_13_34_chunk | 0 | t |
|
|
(2 rows)
|
|
|
|
SELECT "time", cnt FROM cagg_expr ORDER BY time LIMIT 5;
|
|
time | cnt
|
|
------------------------------+------
|
|
Fri Dec 31 16:00:00 1999 PST | 960
|
|
Sat Jan 01 16:00:00 2000 PST | 1440
|
|
Sun Jan 02 16:00:00 2000 PST | 1440
|
|
Mon Jan 03 16:00:00 2000 PST | 1440
|
|
Tue Jan 04 16:00:00 2000 PST | 1440
|
|
(5 rows)
|
|
|
|
--now reload data into the dropped chunks region, then compress
|
|
-- then verify chunk status/dropped column
|
|
INSERT INTO metrics SELECT generate_series('2000-01-01'::timestamptz,'2000-01-10','1m'),1,0.25,0.75;
|
|
SELECT count(*)
|
|
FROM (SELECT compress_chunk(ch) FROM show_chunks('metrics') ch) q;
|
|
count
|
|
-------
|
|
2
|
|
(1 row)
|
|
|
|
SELECT
|
|
c.table_name as chunk_name,
|
|
c.status as chunk_status, c.dropped, c.compressed_chunk_id as comp_id
|
|
FROM _timescaledb_catalog.hypertable h, _timescaledb_catalog.chunk c
|
|
WHERE h.id = c.hypertable_id and h.table_name = 'metrics'
|
|
ORDER BY 1;
|
|
chunk_name | chunk_status | dropped | comp_id
|
|
--------------------+--------------+---------+---------
|
|
_hyper_13_33_chunk | 1 | f | 64
|
|
_hyper_13_34_chunk | 1 | f | 65
|
|
(2 rows)
|
|
|
|
SELECT count(*) FROM metrics;
|
|
count
|
|
-------
|
|
12961
|
|
(1 row)
|
|
|
|
-- test sequence number is local to segment by
|
|
CREATE TABLE local_seq(time timestamptz, device int);
|
|
SELECT table_name FROM create_hypertable('local_seq','time');
|
|
NOTICE: adding not-null constraint to column "time"
|
|
table_name
|
|
------------
|
|
local_seq
|
|
(1 row)
|
|
|
|
ALTER TABLE local_seq SET(timescaledb.compress,timescaledb.compress_segmentby='device');
|
|
INSERT INTO local_seq SELECT '2000-01-01',1 FROM generate_series(1,3000);
|
|
INSERT INTO local_seq SELECT '2000-01-01',2 FROM generate_series(1,3500);
|
|
INSERT INTO local_seq SELECT '2000-01-01',3 FROM generate_series(1,3000);
|
|
INSERT INTO local_seq SELECT '2000-01-01',4 FROM generate_series(1,3000);
|
|
INSERT INTO local_seq SELECT '2000-01-01', generate_series(5,8);
|
|
SELECT compress_chunk(c) FROM show_chunks('local_seq') c;
|
|
compress_chunk
|
|
------------------------------------------
|
|
_timescaledb_internal._hyper_33_66_chunk
|
|
(1 row)
|
|
|
|
SELECT
|
|
format('%s.%s',chunk.schema_name,chunk.table_name) AS "COMP_CHUNK"
|
|
FROM _timescaledb_catalog.hypertable ht
|
|
INNER JOIN _timescaledb_catalog.hypertable ht_comp ON ht_comp.id = ht.compressed_hypertable_id
|
|
INNER JOIN _timescaledb_catalog.chunk ON chunk.hypertable_id = ht_comp.id
|
|
WHERE ht.table_name = 'local_seq' \gset
|
|
SELECT device, _ts_meta_sequence_num, _ts_meta_count FROM :COMP_CHUNK ORDER BY 1,2;
|
|
device | _ts_meta_sequence_num | _ts_meta_count
|
|
--------+-----------------------+----------------
|
|
1 | 10 | 1000
|
|
1 | 20 | 1000
|
|
1 | 30 | 1000
|
|
2 | 10 | 1000
|
|
2 | 20 | 1000
|
|
2 | 30 | 1000
|
|
2 | 40 | 500
|
|
3 | 10 | 1000
|
|
3 | 20 | 1000
|
|
3 | 30 | 1000
|
|
4 | 10 | 1000
|
|
4 | 20 | 1000
|
|
4 | 30 | 1000
|
|
5 | 10 | 1
|
|
6 | 10 | 1
|
|
7 | 10 | 1
|
|
8 | 10 | 1
|
|
(17 rows)
|
|
|
|
-- github issue 4872
|
|
-- If subplan of ConstraintAwareAppend is TidRangeScan, then SELECT on
|
|
-- hypertable fails with error "invalid child of chunk append: Node (26)"
|
|
CREATE TABLE tidrangescan_test(time timestamptz, device_id int, v1 float, v2 float);
|
|
SELECT create_hypertable('tidrangescan_test','time');
|
|
NOTICE: adding not-null constraint to column "time"
|
|
create_hypertable
|
|
---------------------------------
|
|
(35,public,tidrangescan_test,t)
|
|
(1 row)
|
|
|
|
INSERT INTO tidrangescan_test SELECT generate_series('2000-01-01'::timestamptz,'2000-01-10','1m'),1,0.25,0.75;
|
|
CREATE MATERIALIZED VIEW tidrangescan_expr WITH (timescaledb.continuous)
|
|
AS
|
|
SELECT
|
|
time_bucket('1d', time) AS time,
|
|
'Const'::text AS Const,
|
|
4.3::numeric AS "numeric",
|
|
first(tidrangescan_test,time),
|
|
CASE WHEN true THEN 'foo' ELSE 'bar' END,
|
|
COALESCE(NULL,'coalesce'),
|
|
avg(v1) + avg(v2) AS avg1,
|
|
avg(v1+v2) AS avg2,
|
|
count(*) AS cnt
|
|
FROM tidrangescan_test
|
|
WHERE ctid < '(1,1)'::tid GROUP BY 1 WITH NO DATA;
|
|
CALL refresh_continuous_aggregate('tidrangescan_expr', NULL, NULL);
|
|
SET timescaledb.enable_chunk_append to off;
|
|
SET enable_indexscan to off;
|
|
SELECT time, const, numeric,first, avg1, avg2 FROM tidrangescan_expr ORDER BY time LIMIT 5;
|
|
time | const | numeric | first | avg1 | avg2
|
|
------------------------------+-------+---------+----------------------------------------------+------+------
|
|
Fri Dec 31 16:00:00 1999 PST | Const | 4.3 | ("Sat Jan 01 00:00:00 2000 PST",1,0.25,0.75) | 1 | 1
|
|
Wed Jan 05 16:00:00 2000 PST | Const | 4.3 | ("Wed Jan 05 16:00:00 2000 PST",1,0.25,0.75) | 1 | 1
|
|
(2 rows)
|
|
|
|
RESET timescaledb.enable_chunk_append;
|
|
RESET enable_indexscan;
|