mirror of
https://github.com/timescale/timescaledb.git
synced 2025-05-17 11:03:36 +08:00
Reduce runtime of tests based on setup_hyperstore
This commit reduces the number of tuples added to the hyperstore table to reduce the runtime and also fixes `hyperstore_scans`. For `hyperstore_scans` it is necessary to reduce the number of locations since we want to trigger dictionary compression and make sure that it works for that as well.
This commit is contained in:
parent
46bf07d727
commit
8be54d759d
@ -84,7 +84,7 @@ select setseed(1);
|
||||
-- seconds. Any other timestamps are inserted as part of the test.
|
||||
insert into :hypertable (created_at, location_id, device_id, owner_id, temp, humidity)
|
||||
select t, ceil(random()*10), ceil(random()*30), ceil(random() * 5), random()*40, random()*100
|
||||
from generate_series('2022-06-01'::timestamptz, '2022-07-01', '10s') t;
|
||||
from generate_series('2022-06-01'::timestamptz, '2022-07-01', '5m') t;
|
||||
alter table :hypertable set (
|
||||
timescaledb.compress,
|
||||
timescaledb.compress_orderby = 'created_at',
|
||||
|
@ -84,7 +84,7 @@ select setseed(1);
|
||||
-- seconds. Any other timestamps are inserted as part of the test.
|
||||
insert into :hypertable (created_at, location_id, device_id, owner_id, temp, humidity)
|
||||
select t, ceil(random()*10), ceil(random()*30), ceil(random() * 5), random()*40, random()*100
|
||||
from generate_series('2022-06-01'::timestamptz, '2022-07-01', '10s') t;
|
||||
from generate_series('2022-06-01'::timestamptz, '2022-07-01', '5m') t;
|
||||
alter table :hypertable set (
|
||||
timescaledb.compress,
|
||||
timescaledb.compress_orderby = 'created_at',
|
||||
|
@ -87,7 +87,7 @@ select setseed(1);
|
||||
-- seconds. Any other timestamps are inserted as part of the test.
|
||||
insert into :hypertable (created_at, location_id, device_id, owner_id, temp, humidity)
|
||||
select t, ceil(random()*10), ceil(random()*30), ceil(random() * 5), random()*40, random()*100
|
||||
from generate_series('2022-06-01'::timestamptz, '2022-07-01', '10s') t;
|
||||
from generate_series('2022-06-01'::timestamptz, '2022-07-01', '5m') t;
|
||||
alter table :hypertable set (
|
||||
timescaledb.compress,
|
||||
timescaledb.compress_orderby = 'created_at',
|
||||
@ -305,28 +305,32 @@ select _timescaledb_debug.is_compressed_tid(ctid), created_at, location_id, temp
|
||||
from :chunk2 order by location_id, created_at desc limit 2;
|
||||
is_compressed_tid | created_at | location_id | temp
|
||||
-------------------+------------------------------+-------------+------------------
|
||||
t | Wed Jun 08 16:57:50 2022 PDT | 1 | 4.61673551524566
|
||||
t | Wed Jun 08 16:56:40 2022 PDT | 1 | 38.0183806703047
|
||||
t | Wed Jun 08 16:40:00 2022 PDT | 1 | 11.3788992881785
|
||||
t | Wed Jun 08 15:50:00 2022 PDT | 1 | 18.3279156589956
|
||||
(2 rows)
|
||||
|
||||
-- find a compressed tuple in a deterministic manner and get location and timestamp
|
||||
select created_at, location_id
|
||||
from :chunk2 where _timescaledb_debug.is_compressed_tid(ctid)
|
||||
order by created_at, location_id limit 1 \gset
|
||||
-- first update moves the value from the compressed rel to the non-compressed (seen via ctid)
|
||||
update :hypertable set temp=1.0 where location_id=1 and created_at='Wed Jun 08 16:57:50 2022 PDT';
|
||||
update :hypertable set temp=1.0 where location_id=:location_id and created_at=:'created_at';
|
||||
select _timescaledb_debug.is_compressed_tid(ctid), created_at, location_id, temp
|
||||
from :chunk2 order by location_id, created_at desc limit 2;
|
||||
is_compressed_tid | created_at | location_id | temp
|
||||
-------------------+------------------------------+-------------+------------------
|
||||
f | Wed Jun 08 16:57:50 2022 PDT | 1 | 1
|
||||
t | Wed Jun 08 16:56:40 2022 PDT | 1 | 38.0183806703047
|
||||
t | Wed Jun 08 16:40:00 2022 PDT | 1 | 11.3788992881785
|
||||
t | Wed Jun 08 15:50:00 2022 PDT | 1 | 18.3279156589956
|
||||
(2 rows)
|
||||
|
||||
-- second update should be a hot update (tuple in same block after update, as shown by ctid)
|
||||
update :hypertable set temp=2.0 where location_id=1 and created_at='Wed Jun 08 16:57:50 2022 PDT';
|
||||
update :hypertable set temp=2.0 where location_id=:location_id and created_at=:'created_at';
|
||||
select _timescaledb_debug.is_compressed_tid(ctid), created_at, location_id, temp
|
||||
from :chunk2 order by location_id, created_at desc limit 2;
|
||||
is_compressed_tid | created_at | location_id | temp
|
||||
-------------------+------------------------------+-------------+------------------
|
||||
f | Wed Jun 08 16:57:50 2022 PDT | 1 | 2
|
||||
t | Wed Jun 08 16:56:40 2022 PDT | 1 | 38.0183806703047
|
||||
t | Wed Jun 08 16:40:00 2022 PDT | 1 | 11.3788992881785
|
||||
t | Wed Jun 08 15:50:00 2022 PDT | 1 | 18.3279156589956
|
||||
(2 rows)
|
||||
|
||||
-- make sure query uses a segmentby index and returns the correct data for the update value
|
||||
@ -343,7 +347,7 @@ $$, :'chunk2'));
|
||||
select created_at, location_id, temp from :chunk2 where location_id=1 and temp=2.0;
|
||||
created_at | location_id | temp
|
||||
------------------------------+-------------+------
|
||||
Wed Jun 08 16:57:50 2022 PDT | 1 | 2
|
||||
Wed Jun 01 17:00:00 2022 PDT | 1 | 2
|
||||
(1 row)
|
||||
|
||||
select compress_chunk(show_chunks(:'hypertable'), compress_using => 'hyperstore');
|
||||
@ -397,57 +401,65 @@ select explain_analyze_anonymize(format($$
|
||||
select device_id, avg(temp) from %s where device_id between 10 and 20
|
||||
group by device_id
|
||||
$$, :'hypertable'));
|
||||
explain_analyze_anonymize
|
||||
--------------------------------------------------------------------------------------------------------------------------------
|
||||
Finalize GroupAggregate (actual rows=N loops=N)
|
||||
-> Sort (actual rows=N loops=N)
|
||||
Sort Key: _hyper_I_N_chunk.device_id
|
||||
Sort Method: quicksort
|
||||
-> Append (actual rows=N loops=N)
|
||||
-> Partial GroupAggregate (actual rows=N loops=N)
|
||||
-> Index Scan using _hyper_I_N_chunk_hypertable_device_id_idx on _hyper_I_N_chunk (actual rows=N loops=N)
|
||||
Index Cond: ((device_id >= 10) AND (device_id <= 20))
|
||||
-> Partial GroupAggregate (actual rows=N loops=N)
|
||||
-> Index Scan using _hyper_I_N_chunk_hypertable_device_id_idx on _hyper_I_N_chunk (actual rows=N loops=N)
|
||||
Index Cond: ((device_id >= 10) AND (device_id <= 20))
|
||||
-> Partial GroupAggregate (actual rows=N loops=N)
|
||||
-> Index Scan using _hyper_I_N_chunk_hypertable_device_id_idx on _hyper_I_N_chunk (actual rows=N loops=N)
|
||||
Index Cond: ((device_id >= 10) AND (device_id <= 20))
|
||||
-> Partial GroupAggregate (actual rows=N loops=N)
|
||||
-> Index Scan using _hyper_I_N_chunk_hypertable_device_id_idx on _hyper_I_N_chunk (actual rows=N loops=N)
|
||||
Index Cond: ((device_id >= 10) AND (device_id <= 20))
|
||||
-> Partial GroupAggregate (actual rows=N loops=N)
|
||||
-> Index Scan using _hyper_I_N_chunk_hypertable_device_id_idx on _hyper_I_N_chunk (actual rows=N loops=N)
|
||||
Index Cond: ((device_id >= 10) AND (device_id <= 20))
|
||||
-> Partial GroupAggregate (actual rows=N loops=N)
|
||||
-> Index Scan using _hyper_I_N_chunk_hypertable_device_id_idx on _hyper_I_N_chunk (actual rows=N loops=N)
|
||||
Index Cond: ((device_id >= 10) AND (device_id <= 20))
|
||||
explain_analyze_anonymize
|
||||
--------------------------------------------------------------------------------------------------------------------------
|
||||
Finalize HashAggregate (actual rows=N loops=N)
|
||||
Batches: 1
|
||||
-> Append (actual rows=N loops=N)
|
||||
-> Partial HashAggregate (actual rows=N loops=N)
|
||||
Batches: 1
|
||||
-> Custom Scan (ColumnarScan) on _hyper_I_N_chunk (actual rows=N loops=N)
|
||||
Vectorized Filter: ((device_id >= 10) AND (device_id <= 20))
|
||||
Rows Removed by Filter: 133
|
||||
-> Partial HashAggregate (actual rows=N loops=N)
|
||||
Batches: 1
|
||||
-> Index Scan using _hyper_I_N_chunk_hypertable_device_id_idx on _hyper_I_N_chunk (actual rows=N loops=N)
|
||||
Index Cond: ((device_id >= 10) AND (device_id <= 20))
|
||||
-> Partial HashAggregate (actual rows=N loops=N)
|
||||
Batches: 1
|
||||
-> Index Scan using _hyper_I_N_chunk_hypertable_device_id_idx on _hyper_I_N_chunk (actual rows=N loops=N)
|
||||
Index Cond: ((device_id >= 10) AND (device_id <= 20))
|
||||
-> Partial HashAggregate (actual rows=N loops=N)
|
||||
Batches: 1
|
||||
-> Index Scan using _hyper_I_N_chunk_hypertable_device_id_idx on _hyper_I_N_chunk (actual rows=N loops=N)
|
||||
Index Cond: ((device_id >= 10) AND (device_id <= 20))
|
||||
-> Partial HashAggregate (actual rows=N loops=N)
|
||||
Batches: 1
|
||||
-> Index Scan using _hyper_I_N_chunk_hypertable_device_id_idx on _hyper_I_N_chunk (actual rows=N loops=N)
|
||||
Index Cond: ((device_id >= 10) AND (device_id <= 20))
|
||||
-> Partial HashAggregate (actual rows=N loops=N)
|
||||
Batches: 1
|
||||
-> Custom Scan (ColumnarScan) on _hyper_I_N_chunk (actual rows=N loops=N)
|
||||
Vectorized Filter: ((device_id >= 10) AND (device_id <= 20))
|
||||
Rows Removed by Filter: 234
|
||||
Arrays read from cache: N
|
||||
Arrays decompressed: 1000
|
||||
(25 rows)
|
||||
Arrays decompressed: 578
|
||||
(31 rows)
|
||||
|
||||
select explain_analyze_anonymize(format($$
|
||||
select device_id, avg(temp) from %s where device_id between 10 and 20
|
||||
group by device_id
|
||||
$$, :'chunk1'));
|
||||
explain_analyze_anonymize
|
||||
--------------------------------------------------------------------------------------------------------------
|
||||
GroupAggregate (actual rows=N loops=N)
|
||||
-> Index Scan using _hyper_I_N_chunk_hypertable_device_id_idx on _hyper_I_N_chunk (actual rows=N loops=N)
|
||||
Index Cond: ((device_id >= 10) AND (device_id <= 20))
|
||||
explain_analyze_anonymize
|
||||
------------------------------------------------------------------------------
|
||||
HashAggregate (actual rows=N loops=N)
|
||||
Batches: 1
|
||||
-> Custom Scan (ColumnarScan) on _hyper_I_N_chunk (actual rows=N loops=N)
|
||||
Vectorized Filter: ((device_id >= 10) AND (device_id <= 20))
|
||||
Rows Removed by Filter: 133
|
||||
Arrays read from cache: N
|
||||
Arrays decompressed: 201
|
||||
(5 rows)
|
||||
Arrays decompressed: 83
|
||||
(7 rows)
|
||||
|
||||
-- Test index scan on segmentby column
|
||||
select explain_analyze_anonymize(format($$
|
||||
select created_at, location_id, temp from %s where location_id between 5 and 10
|
||||
$$, :'hypertable'));
|
||||
explain_analyze_anonymize
|
||||
-------------------------------------------------------------------------------------------------------------------------
|
||||
explain_analyze_anonymize
|
||||
----------------------------------------------------------------------------------------------------------------
|
||||
Append (actual rows=N loops=N)
|
||||
-> Index Scan using _hyper_I_N_chunk_hypertable_location_id_owner_id_idx on _hyper_I_N_chunk (actual rows=N loops=N)
|
||||
Index Cond: ((location_id >= 5) AND (location_id <= 10))
|
||||
-> Custom Scan (ColumnarScan) on _hyper_I_N_chunk (actual rows=N loops=N)
|
||||
Scankey: ((location_id >= 5) AND (location_id <= 10))
|
||||
-> Index Scan using _hyper_I_N_chunk_hypertable_location_id_idx on _hyper_I_N_chunk (actual rows=N loops=N)
|
||||
Index Cond: ((location_id >= 5) AND (location_id <= 10))
|
||||
-> Index Scan using _hyper_I_N_chunk_hypertable_location_id_idx on _hyper_I_N_chunk (actual rows=N loops=N)
|
||||
@ -456,19 +468,19 @@ $$, :'hypertable'));
|
||||
Index Cond: ((location_id >= 5) AND (location_id <= 10))
|
||||
-> Index Scan using _hyper_I_N_chunk_hypertable_location_id_idx on _hyper_I_N_chunk (actual rows=N loops=N)
|
||||
Index Cond: ((location_id >= 5) AND (location_id <= 10))
|
||||
-> Index Scan using _hyper_I_N_chunk_hypertable_location_id_owner_id_idx on _hyper_I_N_chunk (actual rows=N loops=N)
|
||||
Index Cond: ((location_id >= 5) AND (location_id <= 10))
|
||||
-> Custom Scan (ColumnarScan) on _hyper_I_N_chunk (actual rows=N loops=N)
|
||||
Scankey: ((location_id >= 5) AND (location_id <= 10))
|
||||
Arrays read from cache: N
|
||||
Arrays decompressed: 600
|
||||
Arrays decompressed: 360
|
||||
(15 rows)
|
||||
|
||||
select explain_analyze_anonymize(format($$
|
||||
select created_at, location_id, temp from %s where location_id between 5 and 10
|
||||
$$, :'chunk1'));
|
||||
explain_analyze_anonymize
|
||||
-------------------------------------------------------------------------------------------------------------------
|
||||
Index Scan using _hyper_I_N_chunk_hypertable_location_id_owner_id_idx on _hyper_I_N_chunk (actual rows=N loops=N)
|
||||
Index Cond: ((location_id >= 5) AND (location_id <= 10))
|
||||
explain_analyze_anonymize
|
||||
------------------------------------------------------------------------
|
||||
Custom Scan (ColumnarScan) on _hyper_I_N_chunk (actual rows=N loops=N)
|
||||
Scankey: ((location_id >= 5) AND (location_id <= 10))
|
||||
Arrays read from cache: N
|
||||
Arrays decompressed: 60
|
||||
(4 rows)
|
||||
@ -477,11 +489,11 @@ $$, :'chunk1'));
|
||||
select explain_analyze_anonymize(format($$
|
||||
select * from %s where location_id between 5 and 10
|
||||
$$, :'hypertable'));
|
||||
explain_analyze_anonymize
|
||||
-------------------------------------------------------------------------------------------------------------------------
|
||||
explain_analyze_anonymize
|
||||
----------------------------------------------------------------------------------------------------------------
|
||||
Append (actual rows=N loops=N)
|
||||
-> Index Scan using _hyper_I_N_chunk_hypertable_location_id_owner_id_idx on _hyper_I_N_chunk (actual rows=N loops=N)
|
||||
Index Cond: ((location_id >= 5) AND (location_id <= 10))
|
||||
-> Custom Scan (ColumnarScan) on _hyper_I_N_chunk (actual rows=N loops=N)
|
||||
Scankey: ((location_id >= 5) AND (location_id <= 10))
|
||||
-> Index Scan using _hyper_I_N_chunk_hypertable_location_id_idx on _hyper_I_N_chunk (actual rows=N loops=N)
|
||||
Index Cond: ((location_id >= 5) AND (location_id <= 10))
|
||||
-> Index Scan using _hyper_I_N_chunk_hypertable_location_id_idx on _hyper_I_N_chunk (actual rows=N loops=N)
|
||||
@ -490,8 +502,8 @@ $$, :'hypertable'));
|
||||
Index Cond: ((location_id >= 5) AND (location_id <= 10))
|
||||
-> Index Scan using _hyper_I_N_chunk_hypertable_location_id_idx on _hyper_I_N_chunk (actual rows=N loops=N)
|
||||
Index Cond: ((location_id >= 5) AND (location_id <= 10))
|
||||
-> Index Scan using _hyper_I_N_chunk_hypertable_location_id_owner_id_idx on _hyper_I_N_chunk (actual rows=N loops=N)
|
||||
Index Cond: ((location_id >= 5) AND (location_id <= 10))
|
||||
-> Custom Scan (ColumnarScan) on _hyper_I_N_chunk (actual rows=N loops=N)
|
||||
Scankey: ((location_id >= 5) AND (location_id <= 10))
|
||||
Arrays read from cache: N
|
||||
Arrays decompressed: 0
|
||||
(15 rows)
|
||||
@ -499,10 +511,10 @@ $$, :'hypertable'));
|
||||
select explain_analyze_anonymize(format($$
|
||||
select * from %s where location_id between 5 and 10
|
||||
$$, :'chunk1'));
|
||||
explain_analyze_anonymize
|
||||
-------------------------------------------------------------------------------------------------------------------
|
||||
Index Scan using _hyper_I_N_chunk_hypertable_location_id_owner_id_idx on _hyper_I_N_chunk (actual rows=N loops=N)
|
||||
Index Cond: ((location_id >= 5) AND (location_id <= 10))
|
||||
explain_analyze_anonymize
|
||||
------------------------------------------------------------------------
|
||||
Custom Scan (ColumnarScan) on _hyper_I_N_chunk (actual rows=N loops=N)
|
||||
Scankey: ((location_id >= 5) AND (location_id <= 10))
|
||||
Arrays read from cache: N
|
||||
Arrays decompressed: 0
|
||||
(4 rows)
|
||||
@ -510,6 +522,7 @@ $$, :'chunk1'));
|
||||
--
|
||||
-- Test index only scan
|
||||
--
|
||||
vacuum analyze :hypertable;
|
||||
create table saved_hypertable as select * from :hypertable;
|
||||
-- This will not use index-only scan because it is using a segment-by
|
||||
-- column, but we check that it works as expected though.
|
||||
@ -519,11 +532,11 @@ create table saved_hypertable as select * from :hypertable;
|
||||
select explain_analyze_anonymize(format($$
|
||||
select location_id from %s where location_id between 5 and 10
|
||||
$$, :'hypertable'));
|
||||
explain_analyze_anonymize
|
||||
-------------------------------------------------------------------------------------------------------------------------
|
||||
explain_analyze_anonymize
|
||||
----------------------------------------------------------------------------------------------------------------
|
||||
Append (actual rows=N loops=N)
|
||||
-> Index Scan using _hyper_I_N_chunk_hypertable_location_id_owner_id_idx on _hyper_I_N_chunk (actual rows=N loops=N)
|
||||
Index Cond: ((location_id >= 5) AND (location_id <= 10))
|
||||
-> Custom Scan (ColumnarScan) on _hyper_I_N_chunk (actual rows=N loops=N)
|
||||
Scankey: ((location_id >= 5) AND (location_id <= 10))
|
||||
-> Index Scan using _hyper_I_N_chunk_hypertable_location_id_idx on _hyper_I_N_chunk (actual rows=N loops=N)
|
||||
Index Cond: ((location_id >= 5) AND (location_id <= 10))
|
||||
-> Index Scan using _hyper_I_N_chunk_hypertable_location_id_idx on _hyper_I_N_chunk (actual rows=N loops=N)
|
||||
@ -532,8 +545,8 @@ $$, :'hypertable'));
|
||||
Index Cond: ((location_id >= 5) AND (location_id <= 10))
|
||||
-> Index Scan using _hyper_I_N_chunk_hypertable_location_id_idx on _hyper_I_N_chunk (actual rows=N loops=N)
|
||||
Index Cond: ((location_id >= 5) AND (location_id <= 10))
|
||||
-> Index Scan using _hyper_I_N_chunk_hypertable_location_id_owner_id_idx on _hyper_I_N_chunk (actual rows=N loops=N)
|
||||
Index Cond: ((location_id >= 5) AND (location_id <= 10))
|
||||
-> Custom Scan (ColumnarScan) on _hyper_I_N_chunk (actual rows=N loops=N)
|
||||
Scankey: ((location_id >= 5) AND (location_id <= 10))
|
||||
Arrays read from cache: N
|
||||
Arrays decompressed: 0
|
||||
(15 rows)
|
||||
@ -544,12 +557,10 @@ select heapam.count as heapam, hyperstore.count as hyperstore
|
||||
(select count(location_id) from :hypertable where location_id between 5 and 10) hyperstore;
|
||||
heapam | hyperstore
|
||||
--------+------------
|
||||
155776 | 155776
|
||||
5126 | 5126
|
||||
(1 row)
|
||||
|
||||
drop table saved_hypertable;
|
||||
\echo == This should use index-only scan ==
|
||||
== This should use index-only scan ==
|
||||
select explain_analyze_anonymize(format($$
|
||||
select device_id from %s where device_id between 5 and 10
|
||||
$$, :'hypertable'));
|
||||
@ -581,10 +592,10 @@ $$, :'hypertable'));
|
||||
select explain_analyze_anonymize(format($$
|
||||
select location_id from %s where location_id between 5 and 10
|
||||
$$, :'chunk1'));
|
||||
explain_analyze_anonymize
|
||||
-------------------------------------------------------------------------------------------------------------------
|
||||
Index Scan using _hyper_I_N_chunk_hypertable_location_id_owner_id_idx on _hyper_I_N_chunk (actual rows=N loops=N)
|
||||
Index Cond: ((location_id >= 5) AND (location_id <= 10))
|
||||
explain_analyze_anonymize
|
||||
------------------------------------------------------------------------
|
||||
Custom Scan (ColumnarScan) on _hyper_I_N_chunk (actual rows=N loops=N)
|
||||
Scankey: ((location_id >= 5) AND (location_id <= 10))
|
||||
Arrays read from cache: N
|
||||
Arrays decompressed: 0
|
||||
(4 rows)
|
||||
@ -601,39 +612,50 @@ $$, :'chunk1'));
|
||||
Arrays decompressed: 0
|
||||
(5 rows)
|
||||
|
||||
-- Test index only scan with covering indexes
|
||||
-- Test index only scan with covering indexes.
|
||||
--
|
||||
-- Analyze will run the queries so we are satisfied with this right
|
||||
-- now and do not run the queries separately since they can generate
|
||||
-- different results depending on table contents.
|
||||
select explain_analyze_anonymize(format($$
|
||||
select location_id, avg(humidity) from %s where location_id between 5 and 10
|
||||
group by location_id order by location_id
|
||||
$$, :'hypertable'));
|
||||
explain_analyze_anonymize
|
||||
-------------------------------------------------------------------------------------------------------------------------------------------
|
||||
Finalize GroupAggregate (actual rows=N loops=N)
|
||||
-> Sort (actual rows=N loops=N)
|
||||
Sort Key: _hyper_I_N_chunk.location_id
|
||||
Sort Method: quicksort
|
||||
explain_analyze_anonymize
|
||||
----------------------------------------------------------------------------------------------------------------------------------
|
||||
Sort (actual rows=N loops=N)
|
||||
Sort Key: _hyper_I_N_chunk.location_id
|
||||
Sort Method: quicksort
|
||||
-> Finalize HashAggregate (actual rows=N loops=N)
|
||||
Batches: 1
|
||||
-> Append (actual rows=N loops=N)
|
||||
-> Partial GroupAggregate (actual rows=N loops=N)
|
||||
-> Index Scan using _hyper_I_N_chunk_hypertable_location_id_owner_id_idx on _hyper_I_N_chunk (actual rows=N loops=N)
|
||||
Index Cond: ((location_id >= 5) AND (location_id <= 10))
|
||||
-> Partial GroupAggregate (actual rows=N loops=N)
|
||||
-> Partial HashAggregate (actual rows=N loops=N)
|
||||
Batches: 1
|
||||
-> Custom Scan (ColumnarScan) on _hyper_I_N_chunk (actual rows=N loops=N)
|
||||
Scankey: ((location_id >= 5) AND (location_id <= 10))
|
||||
-> Partial HashAggregate (actual rows=N loops=N)
|
||||
Batches: 1
|
||||
-> Index Scan using _hyper_I_N_chunk_hypertable_location_id_idx on _hyper_I_N_chunk (actual rows=N loops=N)
|
||||
Index Cond: ((location_id >= 5) AND (location_id <= 10))
|
||||
-> Partial GroupAggregate (actual rows=N loops=N)
|
||||
-> Partial HashAggregate (actual rows=N loops=N)
|
||||
Batches: 1
|
||||
-> Index Scan using _hyper_I_N_chunk_hypertable_location_id_idx on _hyper_I_N_chunk (actual rows=N loops=N)
|
||||
Index Cond: ((location_id >= 5) AND (location_id <= 10))
|
||||
-> Partial GroupAggregate (actual rows=N loops=N)
|
||||
-> Partial HashAggregate (actual rows=N loops=N)
|
||||
Batches: 1
|
||||
-> Index Scan using _hyper_I_N_chunk_hypertable_location_id_idx on _hyper_I_N_chunk (actual rows=N loops=N)
|
||||
Index Cond: ((location_id >= 5) AND (location_id <= 10))
|
||||
-> Partial GroupAggregate (actual rows=N loops=N)
|
||||
-> Partial HashAggregate (actual rows=N loops=N)
|
||||
Batches: 1
|
||||
-> Index Scan using _hyper_I_N_chunk_hypertable_location_id_idx on _hyper_I_N_chunk (actual rows=N loops=N)
|
||||
Index Cond: ((location_id >= 5) AND (location_id <= 10))
|
||||
-> Partial GroupAggregate (actual rows=N loops=N)
|
||||
-> Index Scan using _hyper_I_N_chunk_hypertable_location_id_owner_id_idx on _hyper_I_N_chunk (actual rows=N loops=N)
|
||||
Index Cond: ((location_id >= 5) AND (location_id <= 10))
|
||||
-> Partial HashAggregate (actual rows=N loops=N)
|
||||
Batches: 1
|
||||
-> Custom Scan (ColumnarScan) on _hyper_I_N_chunk (actual rows=N loops=N)
|
||||
Scankey: ((location_id >= 5) AND (location_id <= 10))
|
||||
Arrays read from cache: N
|
||||
Arrays decompressed: 300
|
||||
(25 rows)
|
||||
Arrays decompressed: 180
|
||||
(32 rows)
|
||||
|
||||
select explain_analyze_anonymize(format($$
|
||||
select device_id, avg(humidity) from %s where device_id between 5 and 10
|
||||
@ -678,14 +700,18 @@ select explain_analyze_anonymize(format($$
|
||||
select location_id, avg(humidity) from %s where location_id between 5 and 10
|
||||
group by location_id order by location_id
|
||||
$$, :'chunk1'));
|
||||
explain_analyze_anonymize
|
||||
-------------------------------------------------------------------------------------------------------------------------
|
||||
GroupAggregate (actual rows=N loops=N)
|
||||
-> Index Scan using _hyper_I_N_chunk_hypertable_location_id_owner_id_idx on _hyper_I_N_chunk (actual rows=N loops=N)
|
||||
Index Cond: ((location_id >= 5) AND (location_id <= 10))
|
||||
explain_analyze_anonymize
|
||||
------------------------------------------------------------------------------------
|
||||
Sort (actual rows=N loops=N)
|
||||
Sort Key: location_id
|
||||
Sort Method: quicksort
|
||||
-> HashAggregate (actual rows=N loops=N)
|
||||
Batches: 1
|
||||
-> Custom Scan (ColumnarScan) on _hyper_I_N_chunk (actual rows=N loops=N)
|
||||
Scankey: ((location_id >= 5) AND (location_id <= 10))
|
||||
Arrays read from cache: N
|
||||
Arrays decompressed: 150
|
||||
(5 rows)
|
||||
Arrays decompressed: 30
|
||||
(9 rows)
|
||||
|
||||
select explain_analyze_anonymize(format($$
|
||||
select device_id, avg(humidity) from %s where device_id between 5 and 10
|
||||
@ -701,54 +727,6 @@ $$, :'chunk1'));
|
||||
Arrays decompressed: 0
|
||||
(6 rows)
|
||||
|
||||
select location_id, round(avg(humidity)) from :hypertable where location_id between 5 and 10
|
||||
group by location_id order by location_id;
|
||||
location_id | round
|
||||
-------------+-------
|
||||
5 | 50
|
||||
6 | 50
|
||||
7 | 50
|
||||
8 | 50
|
||||
9 | 50
|
||||
10 | 50
|
||||
(6 rows)
|
||||
|
||||
select location_id, round(avg(humidity)) from :chunk1 where location_id between 5 and 10
|
||||
group by location_id order by location_id;
|
||||
location_id | round
|
||||
-------------+-------
|
||||
5 | 50
|
||||
6 | 51
|
||||
7 | 51
|
||||
8 | 51
|
||||
9 | 51
|
||||
10 | 49
|
||||
(6 rows)
|
||||
|
||||
select device_id, round(avg(humidity)) from :hypertable where device_id between 5 and 10
|
||||
group by device_id order by device_id;
|
||||
device_id | round
|
||||
-----------+-------
|
||||
5 | 50
|
||||
6 | 50
|
||||
7 | 50
|
||||
8 | 50
|
||||
9 | 50
|
||||
10 | 50
|
||||
(6 rows)
|
||||
|
||||
select device_id, round(avg(humidity)) from :chunk1 where device_id between 5 and 10
|
||||
group by device_id order by device_id;
|
||||
device_id | round
|
||||
-----------+-------
|
||||
5 | 52
|
||||
6 | 49
|
||||
7 | 53
|
||||
8 | 45
|
||||
9 | 49
|
||||
10 | 54
|
||||
(6 rows)
|
||||
|
||||
-------------------------------------
|
||||
-- Test UNIQUE and Partial indexes --
|
||||
-------------------------------------
|
||||
|
@ -84,7 +84,7 @@ select setseed(1);
|
||||
-- seconds. Any other timestamps are inserted as part of the test.
|
||||
insert into :hypertable (created_at, location_id, device_id, owner_id, temp, humidity)
|
||||
select t, ceil(random()*10), ceil(random()*30), ceil(random() * 5), random()*40, random()*100
|
||||
from generate_series('2022-06-01'::timestamptz, '2022-07-01', '10s') t;
|
||||
from generate_series('2022-06-01'::timestamptz, '2022-07-01', '5m') t;
|
||||
alter table :hypertable set (
|
||||
timescaledb.compress,
|
||||
timescaledb.compress_orderby = 'created_at',
|
||||
@ -107,6 +107,9 @@ set max_parallel_workers_per_gather to 0;
|
||||
-- Redefine the indexes to use hash indexes
|
||||
drop index hypertable_location_id_idx;
|
||||
drop index hypertable_device_id_idx;
|
||||
-- Discourage sequence scan when there are alternatives to avoid flaky
|
||||
-- tests.
|
||||
set enable_seqscan to false;
|
||||
create index hypertable_location_id_idx on :hypertable using hash (location_id);
|
||||
create index hypertable_device_id_idx on :hypertable using hash (device_id);
|
||||
create view chunk_indexes as
|
||||
@ -169,45 +172,34 @@ alter table :chunk2 set access method hyperstore;
|
||||
--
|
||||
-- test that indexes work after updates
|
||||
--
|
||||
select _timescaledb_debug.is_compressed_tid(ctid),
|
||||
created_at,
|
||||
location_id,
|
||||
temp
|
||||
from :chunk2
|
||||
order by location_id, created_at desc limit 2;
|
||||
is_compressed_tid | created_at | location_id | temp
|
||||
-------------------+------------------------------+-------------+------------------
|
||||
t | Wed Jun 08 16:57:50 2022 PDT | 1 | 4.61673551524566
|
||||
t | Wed Jun 08 16:56:40 2022 PDT | 1 | 38.0183806703047
|
||||
(2 rows)
|
||||
-- find a compressed tuple in a deterministic manner and get location and timestamp
|
||||
select created_at, location_id
|
||||
from :chunk2 where _timescaledb_debug.is_compressed_tid(ctid)
|
||||
order by created_at, location_id limit 1 \gset
|
||||
select _timescaledb_debug.is_compressed_tid(ctid), *
|
||||
from :chunk2 where location_id = :location_id and created_at = :'created_at';
|
||||
is_compressed_tid | metric_id | created_at | location_id | owner_id | device_id | temp | humidity
|
||||
-------------------+-----------+------------------------------+-------------+----------+-----------+------------------+----------
|
||||
t | 205 | Wed Jun 01 17:00:00 2022 PDT | 1 | 5 | 18 | 39.9895349415735 | 49.7787
|
||||
(1 row)
|
||||
|
||||
-- first update moves the value from the compressed rel to the non-compressed (seen via ctid)
|
||||
update :hypertable set temp=1.0 where location_id=1 and created_at='Wed Jun 08 16:57:50 2022 PDT';
|
||||
select _timescaledb_debug.is_compressed_tid(ctid),
|
||||
created_at,
|
||||
location_id,
|
||||
temp
|
||||
from :chunk2
|
||||
order by location_id, created_at desc limit 2;
|
||||
is_compressed_tid | created_at | location_id | temp
|
||||
-------------------+------------------------------+-------------+------------------
|
||||
f | Wed Jun 08 16:57:50 2022 PDT | 1 | 1
|
||||
t | Wed Jun 08 16:56:40 2022 PDT | 1 | 38.0183806703047
|
||||
(2 rows)
|
||||
update :hypertable set temp=1.0 where location_id=:location_id and created_at=:'created_at';
|
||||
select _timescaledb_debug.is_compressed_tid(ctid), *
|
||||
from :chunk2 where location_id = :location_id and created_at = :'created_at';
|
||||
is_compressed_tid | metric_id | created_at | location_id | owner_id | device_id | temp | humidity
|
||||
-------------------+-----------+------------------------------+-------------+----------+-----------+------+----------
|
||||
f | 205 | Wed Jun 01 17:00:00 2022 PDT | 1 | 5 | 18 | 1 | 49.7787
|
||||
(1 row)
|
||||
|
||||
-- second update should be a hot update (tuple in same block after update, as shown by ctid)
|
||||
update :hypertable set temp=2.0 where location_id=1 and created_at='Wed Jun 08 16:57:50 2022 PDT';
|
||||
select _timescaledb_debug.is_compressed_tid(ctid),
|
||||
created_at,
|
||||
location_id,
|
||||
temp
|
||||
from :chunk2
|
||||
order by location_id, created_at desc limit 2;
|
||||
is_compressed_tid | created_at | location_id | temp
|
||||
-------------------+------------------------------+-------------+------------------
|
||||
f | Wed Jun 08 16:57:50 2022 PDT | 1 | 2
|
||||
t | Wed Jun 08 16:56:40 2022 PDT | 1 | 38.0183806703047
|
||||
(2 rows)
|
||||
update :hypertable set temp=2.0 where location_id=:location_id and created_at=:'created_at';
|
||||
select _timescaledb_debug.is_compressed_tid(ctid), *
|
||||
from :chunk2 where location_id = :location_id and created_at = :'created_at';
|
||||
is_compressed_tid | metric_id | created_at | location_id | owner_id | device_id | temp | humidity
|
||||
-------------------+-----------+------------------------------+-------------+----------+-----------+------+----------
|
||||
f | 205 | Wed Jun 01 17:00:00 2022 PDT | 1 | 5 | 18 | 2 | 49.7787
|
||||
(1 row)
|
||||
|
||||
-- make sure query uses a segmentby index and returns the correct data for the update value
|
||||
select explain_anonymize(format($$
|
||||
@ -223,7 +215,7 @@ $$, :'chunk2'));
|
||||
select created_at, location_id, temp from :chunk2 where location_id=1 and temp=2.0;
|
||||
created_at | location_id | temp
|
||||
------------------------------+-------------+------
|
||||
Wed Jun 08 16:57:50 2022 PDT | 1 | 2
|
||||
Wed Jun 01 17:00:00 2022 PDT | 1 | 2
|
||||
(1 row)
|
||||
|
||||
select compress_chunk(show_chunks(:'hypertable'), compress_using => 'hyperstore');
|
||||
@ -266,7 +258,7 @@ $$, :'hypertable'));
|
||||
-> Index Scan using _hyper_I_N_chunk_hypertable_device_id_idx on _hyper_I_N_chunk (actual rows=N loops=N)
|
||||
Index Cond: (device_id = 10)
|
||||
Arrays read from cache: N
|
||||
Arrays decompressed: 998
|
||||
Arrays decompressed: 320
|
||||
(22 rows)
|
||||
|
||||
select explain_analyze_anonymize(format($$
|
||||
@ -279,7 +271,7 @@ $$, :'chunk1'));
|
||||
-> Index Scan using _hyper_I_N_chunk_hypertable_device_id_idx on _hyper_I_N_chunk (actual rows=N loops=N)
|
||||
Index Cond: (device_id = 10)
|
||||
Arrays read from cache: N
|
||||
Arrays decompressed: 197
|
||||
Arrays decompressed: 17
|
||||
(5 rows)
|
||||
|
||||
-- Test index scan on segmentby column
|
||||
@ -302,7 +294,7 @@ $$, :'hypertable'));
|
||||
-> Index Scan using _hyper_I_N_chunk_hypertable_location_id_idx on _hyper_I_N_chunk (actual rows=N loops=N)
|
||||
Index Cond: (location_id = 5)
|
||||
Arrays read from cache: N
|
||||
Arrays decompressed: 100
|
||||
Arrays decompressed: 60
|
||||
(15 rows)
|
||||
|
||||
select explain_analyze_anonymize(format($$
|
||||
@ -336,7 +328,7 @@ $$, :'hypertable'));
|
||||
-> Index Scan using _hyper_I_N_chunk_hypertable_location_id_idx on _hyper_I_N_chunk (actual rows=N loops=N)
|
||||
Index Cond: (location_id = 5)
|
||||
Arrays read from cache: N
|
||||
Arrays decompressed: 100
|
||||
Arrays decompressed: 60
|
||||
(15 rows)
|
||||
|
||||
select explain_analyze_anonymize(format($$
|
||||
|
@ -84,7 +84,7 @@ select setseed(1);
|
||||
-- seconds. Any other timestamps are inserted as part of the test.
|
||||
insert into :hypertable (created_at, location_id, device_id, owner_id, temp, humidity)
|
||||
select t, ceil(random()*10), ceil(random()*30), ceil(random() * 5), random()*40, random()*100
|
||||
from generate_series('2022-06-01'::timestamptz, '2022-07-01', '10s') t;
|
||||
from generate_series('2022-06-01'::timestamptz, '2022-07-01', '5m') t;
|
||||
alter table :hypertable set (
|
||||
timescaledb.compress,
|
||||
timescaledb.compress_orderby = 'created_at',
|
||||
@ -205,8 +205,8 @@ select location_id, count(*) into curr from :hypertable GROUP BY location_id;
|
||||
select * from :hypertable where created_at between '2022-06-01 00:00:01' and '2022-06-01 00:00:09';
|
||||
metric_id | created_at | location_id | owner_id | device_id | temp | humidity
|
||||
-----------+------------------------------+-------------+----------+-----------+------+----------
|
||||
259204 | Wed Jun 01 00:00:02 2022 PDT | 1 | | 1 | 1 | 1
|
||||
259205 | Wed Jun 01 00:00:03 2022 PDT | 1 | | 1 | 1 | 1
|
||||
8644 | Wed Jun 01 00:00:02 2022 PDT | 1 | | 1 | 1 | 1
|
||||
8645 | Wed Jun 01 00:00:03 2022 PDT | 1 | | 1 | 1 | 1
|
||||
(2 rows)
|
||||
|
||||
select * from orig join curr using (location_id) where orig.count != curr.count;
|
||||
@ -238,14 +238,37 @@ drop table orig, curr;
|
||||
-- We have tested this above, but since different code paths are used
|
||||
-- for DO UPDATE, DO NOTHING, and plain inserts, we test this as well
|
||||
-- to be safe.
|
||||
-- Insert of a value that exists in the compressed part.
|
||||
-- find a compressed tuple in a deterministic manner and get the
|
||||
-- timestamp. Make sure to find one in chunk1 since we will use that
|
||||
-- later.
|
||||
select created_at
|
||||
from :chunk1 where _timescaledb_debug.is_compressed_tid(ctid)
|
||||
order by created_at limit 1 \gset
|
||||
select * from :hypertable where created_at = :'created_at';
|
||||
metric_id | created_at | location_id | owner_id | device_id | temp | humidity
|
||||
-----------+------------------------------+-------------+----------+-----------+------------------+----------
|
||||
1 | Wed Jun 01 00:00:00 2022 PDT | 4 | 2 | 23 | 16.4320374922463 | 55.2454
|
||||
(1 row)
|
||||
|
||||
-- Insert of a value that exists in the compressed part should work
|
||||
-- when done through the hypertable.
|
||||
insert into :hypertable(created_at, location_id, device_id, temp, humidity)
|
||||
values ('2022-06-01 00:00:00', 11, 1, 1.0, 1.0)
|
||||
values (:'created_at', 11, 1, 1.0, 1.0)
|
||||
on conflict (created_at) do update set location_id = 12;
|
||||
-- TODO(timescale/timescaledb-private#1087): Inserts directly into chunks do not work.
|
||||
select * from :hypertable where created_at = :'created_at';
|
||||
metric_id | created_at | location_id | owner_id | device_id | temp | humidity
|
||||
-----------+------------------------------+-------------+----------+-----------+------------------+----------
|
||||
1 | Wed Jun 01 00:00:00 2022 PDT | 12 | 2 | 23 | 16.4320374922463 | 55.2454
|
||||
(1 row)
|
||||
|
||||
-- TODO(timescale/timescaledb-private#1087): Inserts directly into a
|
||||
-- compressed tuple in a chunk do not work.
|
||||
select created_at
|
||||
from :chunk1 where _timescaledb_debug.is_compressed_tid(ctid)
|
||||
order by created_at limit 1 \gset
|
||||
\set ON_ERROR_STOP 0
|
||||
insert into :chunk1(created_at, location_id, device_id, temp, humidity)
|
||||
values ('2022-06-01 00:00:10', 13, 1, 1.0, 1.0)
|
||||
values (:'created_at', 13, 1, 1.0, 1.0)
|
||||
on conflict (created_at) do update set location_id = 14;
|
||||
ERROR: cannot update compressed tuple
|
||||
\set ON_ERROR_STOP 1
|
||||
@ -269,8 +292,8 @@ order by location_id;
|
||||
metric_id | created_at | location_id | owner_id | device_id | temp | humidity
|
||||
-----------+------------------------------+-------------+----------+-----------+------------------+----------
|
||||
1 | Wed Jun 01 00:00:00 2022 PDT | 12 | 2 | 23 | 16.4320374922463 | 55.2454
|
||||
259204 | Wed Jun 01 00:00:02 2022 PDT | 20 | | 1 | 1 | 1
|
||||
259205 | Wed Jun 01 00:00:03 2022 PDT | 22 | | 1 | 1 | 1
|
||||
8644 | Wed Jun 01 00:00:02 2022 PDT | 20 | | 1 | 1 | 1
|
||||
8645 | Wed Jun 01 00:00:03 2022 PDT | 22 | | 1 | 1 | 1
|
||||
(3 rows)
|
||||
|
||||
drop table :hypertable;
|
||||
|
@ -84,7 +84,7 @@ select setseed(1);
|
||||
-- seconds. Any other timestamps are inserted as part of the test.
|
||||
insert into :hypertable (created_at, location_id, device_id, owner_id, temp, humidity)
|
||||
select t, ceil(random()*10), ceil(random()*30), ceil(random() * 5), random()*40, random()*100
|
||||
from generate_series('2022-06-01'::timestamptz, '2022-07-01', '10s') t;
|
||||
from generate_series('2022-06-01'::timestamptz, '2022-07-01', '5m') t;
|
||||
alter table :hypertable set (
|
||||
timescaledb.compress,
|
||||
timescaledb.compress_orderby = 'created_at',
|
||||
|
@ -84,7 +84,7 @@ select setseed(1);
|
||||
-- seconds. Any other timestamps are inserted as part of the test.
|
||||
insert into :hypertable (created_at, location_id, device_id, owner_id, temp, humidity)
|
||||
select t, ceil(random()*10), ceil(random()*30), ceil(random() * 5), random()*40, random()*100
|
||||
from generate_series('2022-06-01'::timestamptz, '2022-07-01', '10s') t;
|
||||
from generate_series('2022-06-01'::timestamptz, '2022-07-01', '5m') t;
|
||||
alter table :hypertable set (
|
||||
timescaledb.compress,
|
||||
timescaledb.compress_orderby = 'created_at',
|
||||
@ -101,6 +101,9 @@ select format('%I.%I', chunk_schema, chunk_name)::regclass as chunk2
|
||||
where format('%I.%I', hypertable_schema, hypertable_name)::regclass = :'hypertable'::regclass
|
||||
order by chunk2 asc
|
||||
limit 1 offset 1 \gset
|
||||
-- Set parallel cost to zero to force parallel plans and avoid flaky test.
|
||||
set parallel_tuple_cost to 0;
|
||||
set parallel_setup_cost to 0;
|
||||
-- We need to drop the index to trigger parallel plans. Otherwise they
|
||||
-- will use the index.
|
||||
drop index hypertable_device_id_idx;
|
||||
@ -139,7 +142,7 @@ $$, :'hypertable'));
|
||||
select device_id, count(*) from :hypertable where device_id=1 group by device_id;
|
||||
device_id | count
|
||||
-----------+-------
|
||||
1 | 8625
|
||||
1 | 312
|
||||
(1 row)
|
||||
|
||||
-- Save counts collected over entire hypertable
|
||||
@ -193,7 +196,7 @@ $$, :'hypertable'));
|
||||
select device_id, count(*) from :hypertable where device_id=1 group by device_id;
|
||||
device_id | count
|
||||
-----------+-------
|
||||
1 | 8625
|
||||
1 | 312
|
||||
(1 row)
|
||||
|
||||
-- Enable parallel on SeqScan and check for same result
|
||||
@ -230,7 +233,7 @@ $$, :'hypertable'));
|
||||
select device_id, count(*) from :hypertable where device_id=1 group by device_id;
|
||||
device_id | count
|
||||
-----------+-------
|
||||
1 | 8625
|
||||
1 | 312
|
||||
(1 row)
|
||||
|
||||
-- Enable ColumnarScan and check for same result
|
||||
@ -267,7 +270,7 @@ $$, :'hypertable'));
|
||||
select device_id, count(*) from :hypertable where device_id=1 group by device_id;
|
||||
device_id | count
|
||||
-----------+-------
|
||||
1 | 8625
|
||||
1 | 312
|
||||
(1 row)
|
||||
|
||||
-- Parallel plan with hyperstore on single chunk
|
||||
@ -303,7 +306,7 @@ $$, :'hypertable'));
|
||||
select device_id, count(*) from :chunk1 where device_id=1 group by device_id;
|
||||
device_id | count
|
||||
-----------+-------
|
||||
1 | 226
|
||||
1 | 3
|
||||
(1 row)
|
||||
|
||||
-- Compare hyperstore per-location counts with original counts without
|
||||
|
@ -24,8 +24,8 @@ select setseed(1);
|
||||
(1 row)
|
||||
|
||||
insert into readings (time, location, device, temp, humidity)
|
||||
select t, ceil(random()*10), ceil(random()*30), random()*40, random()*100
|
||||
from generate_series('2022-06-01'::timestamptz, '2022-07-01', '5s') t;
|
||||
select t, ceil(random()*3), ceil(random()*30), random()*40, random()*100
|
||||
from generate_series('2022-06-01'::timestamptz, '2022-07-01', '5m') t;
|
||||
alter table readings set (
|
||||
timescaledb.compress,
|
||||
timescaledb.compress_orderby = 'time',
|
||||
@ -51,7 +51,7 @@ select * from :chunk where ctid = :'ctid';
|
||||
QUERY PLAN
|
||||
------------------------------------------------------
|
||||
Tid Scan on _hyper_1_1_chunk (actual rows=1 loops=1)
|
||||
TID Cond: (ctid = '(2147483649,11)'::tid)
|
||||
TID Cond: (ctid = '(2147483650,1)'::tid)
|
||||
Arrays read from cache: 0
|
||||
Arrays decompressed: 0
|
||||
(4 rows)
|
||||
@ -59,7 +59,7 @@ select * from :chunk where ctid = :'ctid';
|
||||
select * from :chunk where ctid = :'ctid';
|
||||
time | location | device | temp | humidity
|
||||
------------------------------+----------+--------+------------------+------------------
|
||||
Wed Jun 01 00:19:20 2022 PDT | 8 | 1 | 18.1995372460622 | 31.5529442138474
|
||||
Wed Jun 01 02:50:00 2022 PDT | 2 | 2 | 3.41795491467339 | 79.4169433908854
|
||||
(1 row)
|
||||
|
||||
-- Insert a new row, which will then be non-compressed, and fetch it.
|
||||
@ -89,35 +89,35 @@ ERROR: unrecognized EXPLAIN option "decopress_cache_stats" at character 55
|
||||
\set ON_ERROR_STOP 1
|
||||
explain (analyze, costs off, timing off, summary off, decompress_cache_stats)
|
||||
select time, temp + humidity from readings where device between 5 and 10 and humidity > 5;
|
||||
QUERY PLAN
|
||||
-------------------------------------------------------------------------------------------------------------------
|
||||
Result (actual rows=97923 loops=1)
|
||||
-> Append (actual rows=97923 loops=1)
|
||||
-> Index Scan using _hyper_1_1_chunk_readings_device_idx on _hyper_1_1_chunk (actual rows=2340 loops=1)
|
||||
QUERY PLAN
|
||||
-----------------------------------------------------------------------------------------------------------------
|
||||
Result (actual rows=1624 loops=1)
|
||||
-> Append (actual rows=1624 loops=1)
|
||||
-> Index Scan using _hyper_1_1_chunk_readings_device_idx on _hyper_1_1_chunk (actual rows=34 loops=1)
|
||||
Index Cond: ((device >= 5) AND (device <= 10))
|
||||
Filter: (humidity > '5'::double precision)
|
||||
Rows Removed by Filter: 119
|
||||
-> Index Scan using _hyper_1_2_chunk_readings_device_idx on _hyper_1_2_chunk (actual rows=22692 loops=1)
|
||||
Rows Removed by Filter: 1
|
||||
-> Index Scan using _hyper_1_2_chunk_readings_device_idx on _hyper_1_2_chunk (actual rows=404 loops=1)
|
||||
Index Cond: ((device >= 5) AND (device <= 10))
|
||||
Filter: (humidity > '5'::double precision)
|
||||
Rows Removed by Filter: 1192
|
||||
-> Index Scan using _hyper_1_3_chunk_readings_device_idx on _hyper_1_3_chunk (actual rows=22872 loops=1)
|
||||
Rows Removed by Filter: 17
|
||||
-> Index Scan using _hyper_1_3_chunk_readings_device_idx on _hyper_1_3_chunk (actual rows=380 loops=1)
|
||||
Index Cond: ((device >= 5) AND (device <= 10))
|
||||
Filter: (humidity > '5'::double precision)
|
||||
Rows Removed by Filter: 1258
|
||||
-> Index Scan using _hyper_1_4_chunk_readings_device_idx on _hyper_1_4_chunk (actual rows=22902 loops=1)
|
||||
Rows Removed by Filter: 23
|
||||
-> Index Scan using _hyper_1_4_chunk_readings_device_idx on _hyper_1_4_chunk (actual rows=359 loops=1)
|
||||
Index Cond: ((device >= 5) AND (device <= 10))
|
||||
Filter: (humidity > '5'::double precision)
|
||||
Rows Removed by Filter: 1237
|
||||
-> Index Scan using _hyper_1_5_chunk_readings_device_idx on _hyper_1_5_chunk (actual rows=22930 loops=1)
|
||||
Rows Removed by Filter: 18
|
||||
-> Index Scan using _hyper_1_5_chunk_readings_device_idx on _hyper_1_5_chunk (actual rows=379 loops=1)
|
||||
Index Cond: ((device >= 5) AND (device <= 10))
|
||||
Filter: (humidity > '5'::double precision)
|
||||
Rows Removed by Filter: 1273
|
||||
-> Index Scan using _hyper_1_6_chunk_readings_device_idx on _hyper_1_6_chunk (actual rows=4187 loops=1)
|
||||
Rows Removed by Filter: 16
|
||||
-> Index Scan using _hyper_1_6_chunk_readings_device_idx on _hyper_1_6_chunk (actual rows=68 loops=1)
|
||||
Index Cond: ((device >= 5) AND (device <= 10))
|
||||
Filter: (humidity > '5'::double precision)
|
||||
Rows Removed by Filter: 223
|
||||
Arrays read from cache: 7359
|
||||
Rows Removed by Filter: 6
|
||||
Arrays read from cache: 87
|
||||
Arrays decompressed: 18
|
||||
(28 rows)
|
||||
|
||||
@ -127,35 +127,35 @@ select time, temp + humidity from readings where device between 5 and 10 and hum
|
||||
-- the middle should not include decompress stats:
|
||||
explain (analyze, costs off, timing off, summary off, decompress_cache_stats)
|
||||
select time, temp + humidity from readings where device between 5 and 10 and humidity > 5;
|
||||
QUERY PLAN
|
||||
-------------------------------------------------------------------------------------------------------------------
|
||||
Result (actual rows=97923 loops=1)
|
||||
-> Append (actual rows=97923 loops=1)
|
||||
-> Index Scan using _hyper_1_1_chunk_readings_device_idx on _hyper_1_1_chunk (actual rows=2340 loops=1)
|
||||
QUERY PLAN
|
||||
-----------------------------------------------------------------------------------------------------------------
|
||||
Result (actual rows=1624 loops=1)
|
||||
-> Append (actual rows=1624 loops=1)
|
||||
-> Index Scan using _hyper_1_1_chunk_readings_device_idx on _hyper_1_1_chunk (actual rows=34 loops=1)
|
||||
Index Cond: ((device >= 5) AND (device <= 10))
|
||||
Filter: (humidity > '5'::double precision)
|
||||
Rows Removed by Filter: 119
|
||||
-> Index Scan using _hyper_1_2_chunk_readings_device_idx on _hyper_1_2_chunk (actual rows=22692 loops=1)
|
||||
Rows Removed by Filter: 1
|
||||
-> Index Scan using _hyper_1_2_chunk_readings_device_idx on _hyper_1_2_chunk (actual rows=404 loops=1)
|
||||
Index Cond: ((device >= 5) AND (device <= 10))
|
||||
Filter: (humidity > '5'::double precision)
|
||||
Rows Removed by Filter: 1192
|
||||
-> Index Scan using _hyper_1_3_chunk_readings_device_idx on _hyper_1_3_chunk (actual rows=22872 loops=1)
|
||||
Rows Removed by Filter: 17
|
||||
-> Index Scan using _hyper_1_3_chunk_readings_device_idx on _hyper_1_3_chunk (actual rows=380 loops=1)
|
||||
Index Cond: ((device >= 5) AND (device <= 10))
|
||||
Filter: (humidity > '5'::double precision)
|
||||
Rows Removed by Filter: 1258
|
||||
-> Index Scan using _hyper_1_4_chunk_readings_device_idx on _hyper_1_4_chunk (actual rows=22902 loops=1)
|
||||
Rows Removed by Filter: 23
|
||||
-> Index Scan using _hyper_1_4_chunk_readings_device_idx on _hyper_1_4_chunk (actual rows=359 loops=1)
|
||||
Index Cond: ((device >= 5) AND (device <= 10))
|
||||
Filter: (humidity > '5'::double precision)
|
||||
Rows Removed by Filter: 1237
|
||||
-> Index Scan using _hyper_1_5_chunk_readings_device_idx on _hyper_1_5_chunk (actual rows=22930 loops=1)
|
||||
Rows Removed by Filter: 18
|
||||
-> Index Scan using _hyper_1_5_chunk_readings_device_idx on _hyper_1_5_chunk (actual rows=379 loops=1)
|
||||
Index Cond: ((device >= 5) AND (device <= 10))
|
||||
Filter: (humidity > '5'::double precision)
|
||||
Rows Removed by Filter: 1273
|
||||
-> Index Scan using _hyper_1_6_chunk_readings_device_idx on _hyper_1_6_chunk (actual rows=4187 loops=1)
|
||||
Rows Removed by Filter: 16
|
||||
-> Index Scan using _hyper_1_6_chunk_readings_device_idx on _hyper_1_6_chunk (actual rows=68 loops=1)
|
||||
Index Cond: ((device >= 5) AND (device <= 10))
|
||||
Filter: (humidity > '5'::double precision)
|
||||
Rows Removed by Filter: 223
|
||||
Arrays read from cache: 7359
|
||||
Rows Removed by Filter: 6
|
||||
Arrays read from cache: 87
|
||||
Arrays decompressed: 18
|
||||
(28 rows)
|
||||
|
||||
@ -164,9 +164,9 @@ select time, temp + humidity from readings where device between 5 and 10 and hum
|
||||
-- include decompress stats:
|
||||
explain (analyze, costs off, timing off, summary off, decompress_cache_stats)
|
||||
select * from :chunk where device between 5 and 10;
|
||||
QUERY PLAN
|
||||
------------------------------------------------------------------------------------------------------
|
||||
Index Scan using _hyper_1_1_chunk_readings_device_idx on _hyper_1_1_chunk (actual rows=2459 loops=1)
|
||||
QUERY PLAN
|
||||
----------------------------------------------------------------------------------------------------
|
||||
Index Scan using _hyper_1_1_chunk_readings_device_idx on _hyper_1_1_chunk (actual rows=35 loops=1)
|
||||
Index Cond: ((device >= 5) AND (device <= 10))
|
||||
Arrays read from cache: 0
|
||||
Arrays decompressed: 0
|
||||
@ -174,9 +174,9 @@ select * from :chunk where device between 5 and 10;
|
||||
|
||||
explain (analyze, costs off, timing off, summary off, decompress_cache_stats)
|
||||
select * from :chunk where device between 5 and 10;
|
||||
QUERY PLAN
|
||||
------------------------------------------------------------------------------------------------------
|
||||
Index Scan using _hyper_1_1_chunk_readings_device_idx on _hyper_1_1_chunk (actual rows=2459 loops=1)
|
||||
QUERY PLAN
|
||||
----------------------------------------------------------------------------------------------------
|
||||
Index Scan using _hyper_1_1_chunk_readings_device_idx on _hyper_1_1_chunk (actual rows=35 loops=1)
|
||||
Index Cond: ((device >= 5) AND (device <= 10))
|
||||
Arrays read from cache: 0
|
||||
Arrays decompressed: 0
|
||||
@ -186,30 +186,30 @@ select * from :chunk where device between 5 and 10;
|
||||
set max_parallel_workers_per_gather to 0;
|
||||
explain (analyze, costs off, timing off, summary off, decompress_cache_stats)
|
||||
select device, humidity from readings where device between 5 and 10;
|
||||
QUERY PLAN
|
||||
-------------------------------------------------------------------------------------------------------------
|
||||
Append (actual rows=103225 loops=1)
|
||||
-> Index Scan using _hyper_1_1_chunk_readings_device_idx on _hyper_1_1_chunk (actual rows=2459 loops=1)
|
||||
QUERY PLAN
|
||||
-----------------------------------------------------------------------------------------------------------
|
||||
Append (actual rows=1705 loops=1)
|
||||
-> Index Scan using _hyper_1_1_chunk_readings_device_idx on _hyper_1_1_chunk (actual rows=35 loops=1)
|
||||
Index Cond: ((device >= 5) AND (device <= 10))
|
||||
-> Index Scan using _hyper_1_2_chunk_readings_device_idx on _hyper_1_2_chunk (actual rows=23884 loops=1)
|
||||
-> Index Scan using _hyper_1_2_chunk_readings_device_idx on _hyper_1_2_chunk (actual rows=421 loops=1)
|
||||
Index Cond: ((device >= 5) AND (device <= 10))
|
||||
-> Index Scan using _hyper_1_3_chunk_readings_device_idx on _hyper_1_3_chunk (actual rows=24130 loops=1)
|
||||
-> Index Scan using _hyper_1_3_chunk_readings_device_idx on _hyper_1_3_chunk (actual rows=403 loops=1)
|
||||
Index Cond: ((device >= 5) AND (device <= 10))
|
||||
-> Index Scan using _hyper_1_4_chunk_readings_device_idx on _hyper_1_4_chunk (actual rows=24139 loops=1)
|
||||
-> Index Scan using _hyper_1_4_chunk_readings_device_idx on _hyper_1_4_chunk (actual rows=377 loops=1)
|
||||
Index Cond: ((device >= 5) AND (device <= 10))
|
||||
-> Index Scan using _hyper_1_5_chunk_readings_device_idx on _hyper_1_5_chunk (actual rows=24203 loops=1)
|
||||
-> Index Scan using _hyper_1_5_chunk_readings_device_idx on _hyper_1_5_chunk (actual rows=395 loops=1)
|
||||
Index Cond: ((device >= 5) AND (device <= 10))
|
||||
-> Index Scan using _hyper_1_6_chunk_readings_device_idx on _hyper_1_6_chunk (actual rows=4410 loops=1)
|
||||
-> Index Scan using _hyper_1_6_chunk_readings_device_idx on _hyper_1_6_chunk (actual rows=74 loops=1)
|
||||
Index Cond: ((device >= 5) AND (device <= 10))
|
||||
Arrays read from cache: 2453
|
||||
Arrays read from cache: 29
|
||||
Arrays decompressed: 6
|
||||
(15 rows)
|
||||
|
||||
explain (analyze, costs off, timing off, summary off, decompress_cache_stats)
|
||||
select device, avg(humidity) from readings where device between 5 and 10
|
||||
group by device;
|
||||
QUERY PLAN
|
||||
-------------------------------------------------------------------------------------------------------------------------------
|
||||
QUERY PLAN
|
||||
-----------------------------------------------------------------------------------------------------------------------------
|
||||
Finalize GroupAggregate (actual rows=6 loops=1)
|
||||
Group Key: _hyper_1_1_chunk.device
|
||||
-> Sort (actual rows=36 loops=1)
|
||||
@ -218,29 +218,29 @@ group by device;
|
||||
-> Append (actual rows=36 loops=1)
|
||||
-> Partial GroupAggregate (actual rows=6 loops=1)
|
||||
Group Key: _hyper_1_1_chunk.device
|
||||
-> Index Scan using _hyper_1_1_chunk_readings_device_idx on _hyper_1_1_chunk (actual rows=2459 loops=1)
|
||||
-> Index Scan using _hyper_1_1_chunk_readings_device_idx on _hyper_1_1_chunk (actual rows=35 loops=1)
|
||||
Index Cond: ((device >= 5) AND (device <= 10))
|
||||
-> Partial GroupAggregate (actual rows=6 loops=1)
|
||||
Group Key: _hyper_1_2_chunk.device
|
||||
-> Index Scan using _hyper_1_2_chunk_readings_device_idx on _hyper_1_2_chunk (actual rows=23884 loops=1)
|
||||
-> Index Scan using _hyper_1_2_chunk_readings_device_idx on _hyper_1_2_chunk (actual rows=421 loops=1)
|
||||
Index Cond: ((device >= 5) AND (device <= 10))
|
||||
-> Partial GroupAggregate (actual rows=6 loops=1)
|
||||
Group Key: _hyper_1_3_chunk.device
|
||||
-> Index Scan using _hyper_1_3_chunk_readings_device_idx on _hyper_1_3_chunk (actual rows=24130 loops=1)
|
||||
-> Index Scan using _hyper_1_3_chunk_readings_device_idx on _hyper_1_3_chunk (actual rows=403 loops=1)
|
||||
Index Cond: ((device >= 5) AND (device <= 10))
|
||||
-> Partial GroupAggregate (actual rows=6 loops=1)
|
||||
Group Key: _hyper_1_4_chunk.device
|
||||
-> Index Scan using _hyper_1_4_chunk_readings_device_idx on _hyper_1_4_chunk (actual rows=24139 loops=1)
|
||||
-> Index Scan using _hyper_1_4_chunk_readings_device_idx on _hyper_1_4_chunk (actual rows=377 loops=1)
|
||||
Index Cond: ((device >= 5) AND (device <= 10))
|
||||
-> Partial GroupAggregate (actual rows=6 loops=1)
|
||||
Group Key: _hyper_1_5_chunk.device
|
||||
-> Index Scan using _hyper_1_5_chunk_readings_device_idx on _hyper_1_5_chunk (actual rows=24203 loops=1)
|
||||
-> Index Scan using _hyper_1_5_chunk_readings_device_idx on _hyper_1_5_chunk (actual rows=395 loops=1)
|
||||
Index Cond: ((device >= 5) AND (device <= 10))
|
||||
-> Partial GroupAggregate (actual rows=6 loops=1)
|
||||
Group Key: _hyper_1_6_chunk.device
|
||||
-> Index Scan using _hyper_1_6_chunk_readings_device_idx on _hyper_1_6_chunk (actual rows=4410 loops=1)
|
||||
-> Index Scan using _hyper_1_6_chunk_readings_device_idx on _hyper_1_6_chunk (actual rows=74 loops=1)
|
||||
Index Cond: ((device >= 5) AND (device <= 10))
|
||||
Arrays read from cache: 2453
|
||||
Arrays read from cache: 29
|
||||
Arrays decompressed: 6
|
||||
(32 rows)
|
||||
|
||||
@ -249,21 +249,21 @@ group by device;
|
||||
explain (analyze, costs off, timing off, summary off, decompress_cache_stats)
|
||||
insert into readings (time, location, device, temp, humidity)
|
||||
select t, ceil(random()*10), ceil(random()*30), random()*40, random()*100
|
||||
from generate_series('2022-06-01'::timestamptz, '2022-07-01', '5s') t
|
||||
from generate_series('2022-06-01'::timestamptz, '2022-07-01', '5m') t
|
||||
on conflict (location, device, time) do nothing;
|
||||
QUERY PLAN
|
||||
-----------------------------------------------------------------------------------------
|
||||
QUERY PLAN
|
||||
---------------------------------------------------------------------------------------
|
||||
Custom Scan (HypertableModify) (actual rows=0 loops=1)
|
||||
-> Insert on readings (actual rows=0 loops=1)
|
||||
Conflict Resolution: NOTHING
|
||||
Conflict Arbiter Indexes: readings_device_location_time_key
|
||||
Tuples Inserted: 516712
|
||||
Conflicting Tuples: 1689
|
||||
-> Custom Scan (ChunkDispatch) (actual rows=518401 loops=1)
|
||||
-> Subquery Scan on "*SELECT*" (actual rows=518401 loops=1)
|
||||
-> Function Scan on generate_series t (actual rows=518401 loops=1)
|
||||
Tuples Inserted: 8608
|
||||
Conflicting Tuples: 33
|
||||
-> Custom Scan (ChunkDispatch) (actual rows=8641 loops=1)
|
||||
-> Subquery Scan on "*SELECT*" (actual rows=8641 loops=1)
|
||||
-> Function Scan on generate_series t (actual rows=8641 loops=1)
|
||||
Arrays read from cache: 0
|
||||
Arrays decompressed: 88
|
||||
Arrays decompressed: 4
|
||||
(11 rows)
|
||||
|
||||
-- This should show values for all columns
|
||||
@ -300,11 +300,11 @@ select time, temp + humidity from readings where device between 5 and 10 and hum
|
||||
select time, temp + humidity from readings where device between 5 and 10 and humidity > 5 limit 5;
|
||||
time | ?column?
|
||||
------------------------------+------------------
|
||||
Wed Jun 01 00:09:30 2022 PDT | 58.2121830134964
|
||||
Wed Jun 01 00:11:30 2022 PDT | 78.7699285749949
|
||||
Wed Jun 01 00:13:50 2022 PDT | 90.6603130792196
|
||||
Wed Jun 01 00:15:25 2022 PDT | 11.6666413710752
|
||||
Wed Jun 01 00:16:05 2022 PDT | 98.9968121849908
|
||||
Wed Jun 01 04:30:00 2022 PDT | 100.201246910669
|
||||
Wed Jun 01 07:30:00 2022 PDT | 92.3407555735537
|
||||
Wed Jun 01 11:00:00 2022 PDT | 82.8938507105022
|
||||
Wed Jun 01 11:35:00 2022 PDT | 70.1222724677943
|
||||
Wed Jun 01 14:10:00 2022 PDT | 15.8070593822794
|
||||
(5 rows)
|
||||
|
||||
-- Get the compressed chunk
|
||||
@ -326,16 +326,16 @@ explain (analyze, costs off, timing off, summary off, decompress_cache_stats)
|
||||
select time, location, temp from :chunk
|
||||
where location = 1::text
|
||||
order by time desc;
|
||||
QUERY PLAN
|
||||
---------------------------------------------------------------------------------
|
||||
Sort (actual rows=2417 loops=1)
|
||||
QUERY PLAN
|
||||
-------------------------------------------------------------------------------
|
||||
Sort (actual rows=88 loops=1)
|
||||
Sort Key: "time" DESC
|
||||
Sort Method: quicksort
|
||||
-> Custom Scan (ColumnarScan) on _hyper_1_1_chunk (actual rows=2417 loops=1)
|
||||
-> Custom Scan (ColumnarScan) on _hyper_1_1_chunk (actual rows=88 loops=1)
|
||||
Vectorized Filter: (location = '1'::text)
|
||||
Rows Removed by Filter: 22020
|
||||
Arrays read from cache: 3573
|
||||
Arrays decompressed: 90
|
||||
Rows Removed by Filter: 319
|
||||
Arrays read from cache: 168
|
||||
Arrays decompressed: 84
|
||||
(8 rows)
|
||||
|
||||
-- Save the data for comparison with seqscan
|
||||
@ -349,14 +349,14 @@ explain (analyze, costs off, timing off, summary off)
|
||||
select time, location, temp from :chunk
|
||||
where location = 1::text
|
||||
order by time desc;
|
||||
QUERY PLAN
|
||||
---------------------------------------------------------------
|
||||
Sort (actual rows=2417 loops=1)
|
||||
QUERY PLAN
|
||||
-------------------------------------------------------------
|
||||
Sort (actual rows=88 loops=1)
|
||||
Sort Key: "time" DESC
|
||||
Sort Method: quicksort
|
||||
-> Seq Scan on _hyper_1_1_chunk (actual rows=2417 loops=1)
|
||||
-> Seq Scan on _hyper_1_1_chunk (actual rows=88 loops=1)
|
||||
Filter: (location = '1'::text)
|
||||
Rows Removed by Filter: 22020
|
||||
Rows Removed by Filter: 319
|
||||
(6 rows)
|
||||
|
||||
-- If output is the same, this query should return nothing
|
||||
@ -389,10 +389,10 @@ select * from chunk_saved;
|
||||
set timescaledb.enable_columnarscan=true;
|
||||
explain (analyze, costs off, timing off, summary off, decompress_cache_stats)
|
||||
select count(*) from :chunk where device = 1;
|
||||
QUERY PLAN
|
||||
--------------------------------------------------------------------------------
|
||||
QUERY PLAN
|
||||
-------------------------------------------------------------------------------
|
||||
Aggregate (actual rows=1 loops=1)
|
||||
-> Custom Scan (ColumnarScan) on _hyper_1_1_chunk (actual rows=827 loops=1)
|
||||
-> Custom Scan (ColumnarScan) on _hyper_1_1_chunk (actual rows=17 loops=1)
|
||||
Scankey: (device = 1)
|
||||
Arrays read from cache: 0
|
||||
Arrays decompressed: 0
|
||||
@ -400,9 +400,9 @@ select count(*) from :chunk where device = 1;
|
||||
|
||||
explain (analyze, costs off, timing off, summary off, decompress_cache_stats)
|
||||
select device from :chunk where device = 1;
|
||||
QUERY PLAN
|
||||
--------------------------------------------------------------------------
|
||||
Custom Scan (ColumnarScan) on _hyper_1_1_chunk (actual rows=827 loops=1)
|
||||
QUERY PLAN
|
||||
-------------------------------------------------------------------------
|
||||
Custom Scan (ColumnarScan) on _hyper_1_1_chunk (actual rows=17 loops=1)
|
||||
Scankey: (device = 1)
|
||||
Arrays read from cache: 0
|
||||
Arrays decompressed: 0
|
||||
@ -411,12 +411,12 @@ select device from :chunk where device = 1;
|
||||
-- Using a non-segmentby column will decompress that column
|
||||
explain (analyze, costs off, timing off, summary off, decompress_cache_stats)
|
||||
select count(*) from :chunk where location = 1::text;
|
||||
QUERY PLAN
|
||||
---------------------------------------------------------------------------------
|
||||
QUERY PLAN
|
||||
-------------------------------------------------------------------------------
|
||||
Aggregate (actual rows=1 loops=1)
|
||||
-> Custom Scan (ColumnarScan) on _hyper_1_1_chunk (actual rows=2418 loops=1)
|
||||
-> Custom Scan (ColumnarScan) on _hyper_1_1_chunk (actual rows=89 loops=1)
|
||||
Vectorized Filter: (location = '1'::text)
|
||||
Rows Removed by Filter: 22021
|
||||
Rows Removed by Filter: 320
|
||||
Arrays read from cache: 0
|
||||
Arrays decompressed: 30
|
||||
(6 rows)
|
||||
@ -427,36 +427,36 @@ select count(*) from :chunk where location = 1::text;
|
||||
set timescaledb.enable_columnarscan=false;
|
||||
explain (analyze, costs off, timing off, summary off, decompress_cache_stats)
|
||||
select count(*) from :chunk where device = 1;
|
||||
QUERY PLAN
|
||||
--------------------------------------------------------------
|
||||
QUERY PLAN
|
||||
-------------------------------------------------------------
|
||||
Aggregate (actual rows=1 loops=1)
|
||||
-> Seq Scan on _hyper_1_1_chunk (actual rows=827 loops=1)
|
||||
-> Seq Scan on _hyper_1_1_chunk (actual rows=17 loops=1)
|
||||
Filter: (device = 1)
|
||||
Rows Removed by Filter: 23612
|
||||
Arrays read from cache: 24422
|
||||
Rows Removed by Filter: 392
|
||||
Arrays read from cache: 350
|
||||
Arrays decompressed: 62
|
||||
(6 rows)
|
||||
|
||||
explain (analyze, costs off, timing off, summary off, decompress_cache_stats)
|
||||
select device from :chunk where device = 1;
|
||||
QUERY PLAN
|
||||
--------------------------------------------------------
|
||||
Seq Scan on _hyper_1_1_chunk (actual rows=827 loops=1)
|
||||
QUERY PLAN
|
||||
-------------------------------------------------------
|
||||
Seq Scan on _hyper_1_1_chunk (actual rows=17 loops=1)
|
||||
Filter: (device = 1)
|
||||
Rows Removed by Filter: 23612
|
||||
Rows Removed by Filter: 392
|
||||
Arrays read from cache: 0
|
||||
Arrays decompressed: 0
|
||||
(5 rows)
|
||||
|
||||
explain (analyze, costs off, timing off, summary off, decompress_cache_stats)
|
||||
select count(*) from :chunk where location = 1::text;
|
||||
QUERY PLAN
|
||||
---------------------------------------------------------------
|
||||
QUERY PLAN
|
||||
-------------------------------------------------------------
|
||||
Aggregate (actual rows=1 loops=1)
|
||||
-> Seq Scan on _hyper_1_1_chunk (actual rows=2418 loops=1)
|
||||
-> Seq Scan on _hyper_1_1_chunk (actual rows=89 loops=1)
|
||||
Filter: (location = '1'::text)
|
||||
Rows Removed by Filter: 22021
|
||||
Arrays read from cache: 24422
|
||||
Rows Removed by Filter: 320
|
||||
Arrays read from cache: 350
|
||||
Arrays decompressed: 62
|
||||
(6 rows)
|
||||
|
||||
|
@ -84,7 +84,7 @@ select setseed(1);
|
||||
-- seconds. Any other timestamps are inserted as part of the test.
|
||||
insert into :hypertable (created_at, location_id, device_id, owner_id, temp, humidity)
|
||||
select t, ceil(random()*10), ceil(random()*30), ceil(random() * 5), random()*40, random()*100
|
||||
from generate_series('2022-06-01'::timestamptz, '2022-07-01', '10s') t;
|
||||
from generate_series('2022-06-01'::timestamptz, '2022-07-01', '5m') t;
|
||||
alter table :hypertable set (
|
||||
timescaledb.compress,
|
||||
timescaledb.compress_orderby = 'created_at',
|
||||
@ -153,20 +153,20 @@ select * from attrstats_compare;
|
||||
-- normal table and in the right ballpark
|
||||
explain (analyze, costs off, timing off, summary off)
|
||||
select * from :chunk1 where location_id = 1;
|
||||
QUERY PLAN
|
||||
--------------------------------------------------------
|
||||
Seq Scan on _hyper_1_1_chunk (actual rows=605 loops=1)
|
||||
QUERY PLAN
|
||||
-------------------------------------------------------
|
||||
Seq Scan on _hyper_1_1_chunk (actual rows=19 loops=1)
|
||||
Filter: (location_id = 1)
|
||||
Rows Removed by Filter: 5515
|
||||
Rows Removed by Filter: 185
|
||||
(3 rows)
|
||||
|
||||
explain (analyze, costs off, timing off, summary off)
|
||||
select * from normaltable where location_id = 1;
|
||||
QUERY PLAN
|
||||
---------------------------------------------------
|
||||
Seq Scan on normaltable (actual rows=605 loops=1)
|
||||
QUERY PLAN
|
||||
--------------------------------------------------
|
||||
Seq Scan on normaltable (actual rows=19 loops=1)
|
||||
Filter: (location_id = 1)
|
||||
Rows Removed by Filter: 5515
|
||||
Rows Removed by Filter: 185
|
||||
(3 rows)
|
||||
|
||||
-- Changing to hyperstore will update relstats since it process all
|
||||
@ -179,8 +179,8 @@ create index normaltable_location_id_idx on normaltable (location_id);
|
||||
select * from relstats_compare;
|
||||
relid | reltuples
|
||||
----------------------------------------+-----------
|
||||
_timescaledb_internal._hyper_1_1_chunk | 6120
|
||||
normaltable | 6120
|
||||
_timescaledb_internal._hyper_1_1_chunk | 204
|
||||
normaltable | 204
|
||||
(2 rows)
|
||||
|
||||
select * from attrstats_compare;
|
||||
@ -195,24 +195,24 @@ drop index normaltable_location_id_idx;
|
||||
select count(*) from :chunk1 where location_id = 1;
|
||||
count
|
||||
-------
|
||||
605
|
||||
19
|
||||
(1 row)
|
||||
|
||||
explain (analyze, costs off, timing off, summary off)
|
||||
select * from :chunk1 where location_id = 1;
|
||||
QUERY PLAN
|
||||
--------------------------------------------------------------------------
|
||||
Custom Scan (ColumnarScan) on _hyper_1_1_chunk (actual rows=605 loops=1)
|
||||
QUERY PLAN
|
||||
-------------------------------------------------------------------------
|
||||
Custom Scan (ColumnarScan) on _hyper_1_1_chunk (actual rows=19 loops=1)
|
||||
Scankey: (location_id = 1)
|
||||
(2 rows)
|
||||
|
||||
explain (analyze, costs off, timing off, summary off)
|
||||
select * from normaltable where location_id = 1;
|
||||
QUERY PLAN
|
||||
---------------------------------------------------
|
||||
Seq Scan on normaltable (actual rows=605 loops=1)
|
||||
QUERY PLAN
|
||||
--------------------------------------------------
|
||||
Seq Scan on normaltable (actual rows=19 loops=1)
|
||||
Filter: (location_id = 1)
|
||||
Rows Removed by Filter: 5515
|
||||
Rows Removed by Filter: 185
|
||||
(3 rows)
|
||||
|
||||
-- ANALYZE directly on chunk
|
||||
@ -222,8 +222,8 @@ analyze normaltable;
|
||||
select * from relstats_compare;
|
||||
relid | reltuples
|
||||
----------------------------------------+-----------
|
||||
_timescaledb_internal._hyper_1_1_chunk | 6120
|
||||
normaltable | 6120
|
||||
_timescaledb_internal._hyper_1_1_chunk | 204
|
||||
normaltable | 204
|
||||
(2 rows)
|
||||
|
||||
select * from attrstats_same;
|
||||
@ -235,19 +235,19 @@ select * from attrstats_same;
|
||||
-- Check that the estimated rows is now correct based on stats (reltuples)
|
||||
explain (analyze, costs off, timing off, summary off)
|
||||
select * from :chunk1 where location_id = 1;
|
||||
QUERY PLAN
|
||||
--------------------------------------------------------------------------
|
||||
Custom Scan (ColumnarScan) on _hyper_1_1_chunk (actual rows=605 loops=1)
|
||||
QUERY PLAN
|
||||
-------------------------------------------------------------------------
|
||||
Custom Scan (ColumnarScan) on _hyper_1_1_chunk (actual rows=19 loops=1)
|
||||
Scankey: (location_id = 1)
|
||||
(2 rows)
|
||||
|
||||
explain (analyze, costs off, timing off, summary off)
|
||||
select * from normaltable where location_id = 1;
|
||||
QUERY PLAN
|
||||
---------------------------------------------------
|
||||
Seq Scan on normaltable (actual rows=605 loops=1)
|
||||
QUERY PLAN
|
||||
--------------------------------------------------
|
||||
Seq Scan on normaltable (actual rows=19 loops=1)
|
||||
Filter: (location_id = 1)
|
||||
Rows Removed by Filter: 5515
|
||||
Rows Removed by Filter: 185
|
||||
(3 rows)
|
||||
|
||||
delete from :chunk1 where location_id=1;
|
||||
@ -255,8 +255,8 @@ delete from normaltable where location_id=1;
|
||||
select * from relstats_compare;
|
||||
relid | reltuples
|
||||
----------------------------------------+-----------
|
||||
_timescaledb_internal._hyper_1_1_chunk | 6120
|
||||
normaltable | 6120
|
||||
_timescaledb_internal._hyper_1_1_chunk | 204
|
||||
normaltable | 204
|
||||
(2 rows)
|
||||
|
||||
select * from attrstats_same;
|
||||
@ -275,8 +275,8 @@ create index normaltable_location_id_idx on normaltable (location_id);
|
||||
select * from relstats_compare;
|
||||
relid | reltuples
|
||||
----------------------------------------+-----------
|
||||
_timescaledb_internal._hyper_1_1_chunk | 5515
|
||||
normaltable | 5515
|
||||
_timescaledb_internal._hyper_1_1_chunk | 185
|
||||
normaltable | 185
|
||||
(2 rows)
|
||||
|
||||
vacuum :chunk1;
|
||||
@ -284,8 +284,8 @@ vacuum normaltable;
|
||||
select * from relstats_compare;
|
||||
relid | reltuples
|
||||
----------------------------------------+-----------
|
||||
_timescaledb_internal._hyper_1_1_chunk | 5515
|
||||
normaltable | 5515
|
||||
_timescaledb_internal._hyper_1_1_chunk | 185
|
||||
normaltable | 185
|
||||
(2 rows)
|
||||
|
||||
select * from attrstats_same;
|
||||
@ -299,8 +299,8 @@ vacuum analyze normaltable;
|
||||
select * from relstats_compare;
|
||||
relid | reltuples
|
||||
----------------------------------------+-----------
|
||||
_timescaledb_internal._hyper_1_1_chunk | 5515
|
||||
normaltable | 5515
|
||||
_timescaledb_internal._hyper_1_1_chunk | 185
|
||||
normaltable | 185
|
||||
(2 rows)
|
||||
|
||||
select * from attrstats_same;
|
||||
@ -316,7 +316,7 @@ update :hypertable set device_id = 2 where device_id = 1;
|
||||
select * from relstats where relid = :'chunk2'::regclass;
|
||||
relid | reltuples
|
||||
----------------------------------------+-----------
|
||||
_timescaledb_internal._hyper_1_2_chunk | 60480
|
||||
_timescaledb_internal._hyper_1_2_chunk | 2016
|
||||
(1 row)
|
||||
|
||||
select * from attrstats where relid = :'chunk2'::regclass;
|
||||
@ -327,14 +327,14 @@ select * from attrstats where relid = :'chunk2'::regclass;
|
||||
select count(*) from :chunk2;
|
||||
count
|
||||
-------
|
||||
60480
|
||||
2016
|
||||
(1 row)
|
||||
|
||||
analyze :hypertable;
|
||||
select * from relstats where relid = :'chunk2'::regclass;
|
||||
relid | reltuples
|
||||
----------------------------------------+-----------
|
||||
_timescaledb_internal._hyper_1_2_chunk | 60480
|
||||
_timescaledb_internal._hyper_1_2_chunk | 2016
|
||||
(1 row)
|
||||
|
||||
-- Just show that there are attrstats via a count avoid flaky output
|
||||
|
@ -85,7 +85,7 @@ select setseed(1);
|
||||
-- seconds. Any other timestamps are inserted as part of the test.
|
||||
insert into :hypertable (created_at, location_id, device_id, owner_id, temp, humidity)
|
||||
select t, ceil(random()*10), ceil(random()*30), ceil(random() * 5), random()*40, random()*100
|
||||
from generate_series('2022-06-01'::timestamptz, '2022-07-01', '10s') t;
|
||||
from generate_series('2022-06-01'::timestamptz, '2022-07-01', '5m') t;
|
||||
alter table :hypertable set (
|
||||
timescaledb.compress,
|
||||
timescaledb.compress_orderby = 'created_at',
|
||||
@ -146,7 +146,7 @@ order by relname;
|
||||
select * from :hypertable order by created_at offset 577 limit 1;
|
||||
-[ RECORD 1 ]-----------------------------
|
||||
metric_id | 578
|
||||
created_at | Wed Jun 01 01:36:10 2022 PDT
|
||||
created_at | Fri Jun 03 00:05:00 2022 PDT
|
||||
location_id | 5
|
||||
owner_id | 4
|
||||
device_id | 19
|
||||
@ -163,17 +163,17 @@ update :hypertable set temp = 100.0 where created_at = :'created_at';
|
||||
---------------------------------------------------------------------------------------------------------
|
||||
Custom Scan (HypertableModify)
|
||||
-> Update on readings
|
||||
Update on _hyper_1_1_chunk readings_1
|
||||
Update on _hyper_1_2_chunk readings_1
|
||||
-> Result
|
||||
-> Index Scan using "1_1_readings_created_at_key" on _hyper_1_1_chunk readings_1
|
||||
Index Cond: (created_at = 'Wed Jun 01 01:36:10 2022 PDT'::timestamp with time zone)
|
||||
-> Index Scan using "2_2_readings_created_at_key" on _hyper_1_2_chunk readings_1
|
||||
Index Cond: (created_at = 'Fri Jun 03 00:05:00 2022 PDT'::timestamp with time zone)
|
||||
(6 rows)
|
||||
|
||||
\x on
|
||||
select * from :hypertable where created_at = :'created_at';
|
||||
-[ RECORD 1 ]-----------------------------
|
||||
metric_id | 578
|
||||
created_at | Wed Jun 01 01:36:10 2022 PDT
|
||||
created_at | Fri Jun 03 00:05:00 2022 PDT
|
||||
location_id | 5
|
||||
owner_id | 4
|
||||
device_id | 19
|
||||
@ -184,7 +184,7 @@ update :hypertable set temp = 100.0 where created_at = :'created_at';
|
||||
select * from :hypertable where created_at = :'created_at';
|
||||
-[ RECORD 1 ]-----------------------------
|
||||
metric_id | 578
|
||||
created_at | Wed Jun 01 01:36:10 2022 PDT
|
||||
created_at | Fri Jun 03 00:05:00 2022 PDT
|
||||
location_id | 5
|
||||
owner_id | 4
|
||||
device_id | 19
|
||||
@ -233,7 +233,7 @@ update :hypertable set humidity = 110.0 where location_id = :location_id;
|
||||
select count(*) from :hypertable where humidity = 110.0;
|
||||
count
|
||||
-------
|
||||
25888
|
||||
832
|
||||
(1 row)
|
||||
|
||||
-- Make sure there is a mix of compressed and non-compressed rows for
|
||||
@ -253,17 +253,17 @@ order by metric_id for update;
|
||||
select * from to_update order by metric_id;
|
||||
is_compressed_tid | metric_id | created_at
|
||||
-------------------+-----------+------------------------------
|
||||
f | 6330 | Wed Jun 01 17:34:50 2022 PDT
|
||||
t | 6331 | Wed Jun 01 17:35:00 2022 PDT
|
||||
t | 6332 | Wed Jun 01 17:35:10 2022 PDT
|
||||
f | 6333 | Wed Jun 01 17:35:20 2022 PDT
|
||||
t | 6334 | Wed Jun 01 17:35:30 2022 PDT
|
||||
t | 6335 | Wed Jun 01 17:35:40 2022 PDT
|
||||
t | 6336 | Wed Jun 01 17:35:50 2022 PDT
|
||||
t | 6337 | Wed Jun 01 17:36:00 2022 PDT
|
||||
t | 6338 | Wed Jun 01 17:36:10 2022 PDT
|
||||
t | 6339 | Wed Jun 01 17:36:20 2022 PDT
|
||||
t | 6340 | Wed Jun 01 17:36:30 2022 PDT
|
||||
f | 6330 | Wed Jun 22 23:25:00 2022 PDT
|
||||
t | 6331 | Wed Jun 22 23:30:00 2022 PDT
|
||||
t | 6332 | Wed Jun 22 23:35:00 2022 PDT
|
||||
f | 6333 | Wed Jun 22 23:40:00 2022 PDT
|
||||
t | 6334 | Wed Jun 22 23:45:00 2022 PDT
|
||||
t | 6335 | Wed Jun 22 23:50:00 2022 PDT
|
||||
t | 6336 | Wed Jun 22 23:55:00 2022 PDT
|
||||
t | 6337 | Thu Jun 23 00:00:00 2022 PDT
|
||||
t | 6338 | Thu Jun 23 00:05:00 2022 PDT
|
||||
t | 6339 | Thu Jun 23 00:10:00 2022 PDT
|
||||
t | 6340 | Thu Jun 23 00:15:00 2022 PDT
|
||||
(11 rows)
|
||||
|
||||
update :hypertable set humidity = 200.0, temp = 500.0
|
||||
@ -271,17 +271,17 @@ where (created_at, metric_id) in (select created_at, metric_id from to_update);
|
||||
select * from :hypertable where humidity = 200.0 order by metric_id;
|
||||
metric_id | created_at | location_id | owner_id | device_id | temp | humidity
|
||||
-----------+------------------------------+-------------+----------+-----------+------+----------
|
||||
6330 | Wed Jun 01 17:34:50 2022 PDT | 5 | 4 | 6 | 500 | 200
|
||||
6331 | Wed Jun 01 17:35:00 2022 PDT | 9 | 3 | 12 | 500 | 200
|
||||
6332 | Wed Jun 01 17:35:10 2022 PDT | 1 | 1 | 15 | 500 | 200
|
||||
6333 | Wed Jun 01 17:35:20 2022 PDT | 5 | 1 | 24 | 500 | 200
|
||||
6334 | Wed Jun 01 17:35:30 2022 PDT | 7 | 5 | 25 | 500 | 200
|
||||
6335 | Wed Jun 01 17:35:40 2022 PDT | 4 | 4 | 10 | 500 | 200
|
||||
6336 | Wed Jun 01 17:35:50 2022 PDT | 6 | 5 | 23 | 500 | 200
|
||||
6337 | Wed Jun 01 17:36:00 2022 PDT | 2 | 3 | 1 | 500 | 200
|
||||
6338 | Wed Jun 01 17:36:10 2022 PDT | 7 | 3 | 27 | 500 | 200
|
||||
6339 | Wed Jun 01 17:36:20 2022 PDT | 1 | 1 | 24 | 500 | 200
|
||||
6340 | Wed Jun 01 17:36:30 2022 PDT | 10 | 3 | 21 | 500 | 200
|
||||
6330 | Wed Jun 22 23:25:00 2022 PDT | 5 | 4 | 6 | 500 | 200
|
||||
6331 | Wed Jun 22 23:30:00 2022 PDT | 9 | 3 | 12 | 500 | 200
|
||||
6332 | Wed Jun 22 23:35:00 2022 PDT | 1 | 1 | 15 | 500 | 200
|
||||
6333 | Wed Jun 22 23:40:00 2022 PDT | 5 | 1 | 24 | 500 | 200
|
||||
6334 | Wed Jun 22 23:45:00 2022 PDT | 7 | 5 | 25 | 500 | 200
|
||||
6335 | Wed Jun 22 23:50:00 2022 PDT | 4 | 4 | 10 | 500 | 200
|
||||
6336 | Wed Jun 22 23:55:00 2022 PDT | 6 | 5 | 23 | 500 | 200
|
||||
6337 | Thu Jun 23 00:00:00 2022 PDT | 2 | 3 | 1 | 500 | 200
|
||||
6338 | Thu Jun 23 00:05:00 2022 PDT | 7 | 3 | 27 | 500 | 200
|
||||
6339 | Thu Jun 23 00:10:00 2022 PDT | 1 | 1 | 24 | 500 | 200
|
||||
6340 | Thu Jun 23 00:15:00 2022 PDT | 10 | 3 | 21 | 500 | 200
|
||||
(11 rows)
|
||||
|
||||
commit;
|
||||
@ -295,7 +295,7 @@ from :hypertable order by created_at offset 898 limit 1;
|
||||
-[ RECORD 1 ]-----+-----------------------------
|
||||
is_compressed_tid | f
|
||||
metric_id | 899
|
||||
created_at | Wed Jun 01 02:29:40 2022 PDT
|
||||
created_at | Sat Jun 04 02:50:00 2022 PDT
|
||||
location_id | 4
|
||||
owner_id | 1
|
||||
device_id | 5
|
||||
@ -331,28 +331,31 @@ where created_at = :'created_at' and humidity = 200.0;
|
||||
select ch as chunk
|
||||
from show_chunks(:'hypertable') ch limit 1 \gset
|
||||
vacuum full :chunk;
|
||||
-- Pick a tuple in the compressed chunk and get the values from that
|
||||
-- tuple for the cursor.
|
||||
select metric_id from :chunk offset 5 limit 1 \gset
|
||||
\x on
|
||||
select _timescaledb_debug.is_compressed_tid(ctid), *
|
||||
from :hypertable order by created_at offset 898 limit 1;
|
||||
from :hypertable where metric_id = :metric_id;
|
||||
-[ RECORD 1 ]-----+-----------------------------
|
||||
is_compressed_tid | t
|
||||
metric_id | 899
|
||||
created_at | Wed Jun 01 02:29:40 2022 PDT
|
||||
location_id | 4
|
||||
owner_id | 1
|
||||
device_id | 5
|
||||
temp | 32.7528003907232
|
||||
humidity | 200
|
||||
metric_id | 50
|
||||
created_at | Wed Jun 01 04:05:00 2022 PDT
|
||||
location_id | 1
|
||||
owner_id | 2
|
||||
device_id | 18
|
||||
temp | 6.16907446378801
|
||||
humidity | 33.7603
|
||||
|
||||
select created_at, location_id, owner_id, device_id, humidity
|
||||
from :hypertable order by created_at offset 898 limit 1 \gset
|
||||
from :hypertable where metric_id = :metric_id \gset
|
||||
\x off
|
||||
begin;
|
||||
declare curs1 cursor for select humidity from :hypertable where created_at = :'created_at' for update;
|
||||
fetch forward 1 from curs1;
|
||||
humidity
|
||||
----------
|
||||
200
|
||||
33.7603
|
||||
(1 row)
|
||||
|
||||
update :hypertable set humidity = 400.0 where current of curs1;
|
||||
@ -367,19 +370,19 @@ select humidity from :hypertable where created_at = :'created_at' and humidity =
|
||||
|
||||
\x on
|
||||
select _timescaledb_debug.is_compressed_tid(ctid), *
|
||||
from :hypertable order by created_at offset 898 limit 1;
|
||||
from :hypertable where metric_id = :metric_id;
|
||||
-[ RECORD 1 ]-----+-----------------------------
|
||||
is_compressed_tid | f
|
||||
metric_id | 899
|
||||
created_at | Wed Jun 01 02:29:40 2022 PDT
|
||||
location_id | 4
|
||||
owner_id | 1
|
||||
device_id | 5
|
||||
temp | 32.7528003907232
|
||||
humidity | 200
|
||||
metric_id | 50
|
||||
created_at | Wed Jun 01 04:05:00 2022 PDT
|
||||
location_id | 1
|
||||
owner_id | 2
|
||||
device_id | 18
|
||||
temp | 6.16907446378801
|
||||
humidity | 33.7603
|
||||
|
||||
select created_at, location_id, owner_id, device_id, humidity
|
||||
from :hypertable order by created_at offset 898 limit 1 \gset
|
||||
from :hypertable where metric_id = :metric_id \gset
|
||||
\x off
|
||||
-- Test doing the update directly on the chunk. The data should now be
|
||||
-- decompressed again due to DML decompression in the previous query.
|
||||
@ -388,7 +391,7 @@ declare curs1 cursor for select humidity from :chunk where created_at = :'create
|
||||
fetch forward 1 from curs1;
|
||||
humidity
|
||||
----------
|
||||
200
|
||||
33.7603
|
||||
(1 row)
|
||||
|
||||
update :chunk set humidity = 400.0 where current of curs1;
|
||||
|
@ -84,7 +84,7 @@ select setseed(1);
|
||||
-- seconds. Any other timestamps are inserted as part of the test.
|
||||
insert into :hypertable (created_at, location_id, device_id, owner_id, temp, humidity)
|
||||
select t, ceil(random()*10), ceil(random()*30), ceil(random() * 5), random()*40, random()*100
|
||||
from generate_series('2022-06-01'::timestamptz, '2022-07-01', '10s') t;
|
||||
from generate_series('2022-06-01'::timestamptz, '2022-07-01', '5m') t;
|
||||
alter table :hypertable set (
|
||||
timescaledb.compress,
|
||||
timescaledb.compress_orderby = 'created_at',
|
||||
@ -129,7 +129,7 @@ select location_id, count(*) into orig from :hypertable GROUP BY location_id;
|
||||
select count(*) from :cchunk1;
|
||||
count
|
||||
-------
|
||||
50
|
||||
48
|
||||
(1 row)
|
||||
|
||||
-- update one location_id to decompress some data
|
||||
@ -145,7 +145,7 @@ ERROR: cannot cluster a hyperstore table
|
||||
select count(*) from :cchunk1;
|
||||
count
|
||||
-------
|
||||
45
|
||||
43
|
||||
(1 row)
|
||||
|
||||
-- run vacuum full to recompress
|
||||
@ -154,7 +154,7 @@ vacuum full :hypertable;
|
||||
select count(*) from :cchunk1;
|
||||
count
|
||||
-------
|
||||
50
|
||||
48
|
||||
(1 row)
|
||||
|
||||
-- also try vacuum full on chunk level
|
||||
@ -162,14 +162,14 @@ update :hypertable set temp=1.0 where location_id=1;
|
||||
select count(*) from :cchunk1;
|
||||
count
|
||||
-------
|
||||
45
|
||||
43
|
||||
(1 row)
|
||||
|
||||
vacuum full :hypertable;
|
||||
select count(*) from :cchunk1;
|
||||
count
|
||||
-------
|
||||
50
|
||||
48
|
||||
(1 row)
|
||||
|
||||
-- check that table data (or at least counts) is still the same
|
||||
|
@ -109,13 +109,18 @@ drop table orig, owner_orig, owner_comp;
|
||||
select _timescaledb_debug.is_compressed_tid(ctid), created_at, location_id, temp
|
||||
from :chunk2 order by location_id, created_at desc limit 2;
|
||||
|
||||
-- find a compressed tuple in a deterministic manner and get location and timestamp
|
||||
select created_at, location_id
|
||||
from :chunk2 where _timescaledb_debug.is_compressed_tid(ctid)
|
||||
order by created_at, location_id limit 1 \gset
|
||||
|
||||
-- first update moves the value from the compressed rel to the non-compressed (seen via ctid)
|
||||
update :hypertable set temp=1.0 where location_id=1 and created_at='Wed Jun 08 16:57:50 2022 PDT';
|
||||
update :hypertable set temp=1.0 where location_id=:location_id and created_at=:'created_at';
|
||||
select _timescaledb_debug.is_compressed_tid(ctid), created_at, location_id, temp
|
||||
from :chunk2 order by location_id, created_at desc limit 2;
|
||||
|
||||
-- second update should be a hot update (tuple in same block after update, as shown by ctid)
|
||||
update :hypertable set temp=2.0 where location_id=1 and created_at='Wed Jun 08 16:57:50 2022 PDT';
|
||||
update :hypertable set temp=2.0 where location_id=:location_id and created_at=:'created_at';
|
||||
select _timescaledb_debug.is_compressed_tid(ctid), created_at, location_id, temp
|
||||
from :chunk2 order by location_id, created_at desc limit 2;
|
||||
|
||||
@ -171,6 +176,8 @@ $$, :'chunk1'));
|
||||
-- Test index only scan
|
||||
--
|
||||
|
||||
vacuum analyze :hypertable;
|
||||
|
||||
create table saved_hypertable as select * from :hypertable;
|
||||
|
||||
-- This will not use index-only scan because it is using a segment-by
|
||||
@ -189,7 +196,6 @@ select heapam.count as heapam, hyperstore.count as hyperstore
|
||||
|
||||
drop table saved_hypertable;
|
||||
|
||||
\echo == This should use index-only scan ==
|
||||
select explain_analyze_anonymize(format($$
|
||||
select device_id from %s where device_id between 5 and 10
|
||||
$$, :'hypertable'));
|
||||
@ -201,7 +207,11 @@ select explain_analyze_anonymize(format($$
|
||||
select device_id from %s where device_id between 5 and 10
|
||||
$$, :'chunk1'));
|
||||
|
||||
-- Test index only scan with covering indexes
|
||||
-- Test index only scan with covering indexes.
|
||||
--
|
||||
-- Analyze will run the queries so we are satisfied with this right
|
||||
-- now and do not run the queries separately since they can generate
|
||||
-- different results depending on table contents.
|
||||
select explain_analyze_anonymize(format($$
|
||||
select location_id, avg(humidity) from %s where location_id between 5 and 10
|
||||
group by location_id order by location_id
|
||||
@ -222,18 +232,6 @@ select explain_analyze_anonymize(format($$
|
||||
group by device_id order by device_id
|
||||
$$, :'chunk1'));
|
||||
|
||||
select location_id, round(avg(humidity)) from :hypertable where location_id between 5 and 10
|
||||
group by location_id order by location_id;
|
||||
|
||||
select location_id, round(avg(humidity)) from :chunk1 where location_id between 5 and 10
|
||||
group by location_id order by location_id;
|
||||
|
||||
select device_id, round(avg(humidity)) from :hypertable where device_id between 5 and 10
|
||||
group by device_id order by device_id;
|
||||
|
||||
select device_id, round(avg(humidity)) from :chunk1 where device_id between 5 and 10
|
||||
group by device_id order by device_id;
|
||||
|
||||
-------------------------------------
|
||||
-- Test UNIQUE and Partial indexes --
|
||||
-------------------------------------
|
||||
|
@ -12,6 +12,10 @@ set max_parallel_workers_per_gather to 0;
|
||||
drop index hypertable_location_id_idx;
|
||||
drop index hypertable_device_id_idx;
|
||||
|
||||
-- Discourage sequence scan when there are alternatives to avoid flaky
|
||||
-- tests.
|
||||
set enable_seqscan to false;
|
||||
|
||||
create index hypertable_location_id_idx on :hypertable using hash (location_id);
|
||||
create index hypertable_device_id_idx on :hypertable using hash (device_id);
|
||||
|
||||
@ -36,30 +40,26 @@ alter table :chunk2 set access method hyperstore;
|
||||
--
|
||||
-- test that indexes work after updates
|
||||
--
|
||||
select _timescaledb_debug.is_compressed_tid(ctid),
|
||||
created_at,
|
||||
location_id,
|
||||
temp
|
||||
from :chunk2
|
||||
order by location_id, created_at desc limit 2;
|
||||
|
||||
-- find a compressed tuple in a deterministic manner and get location and timestamp
|
||||
select created_at, location_id
|
||||
from :chunk2 where _timescaledb_debug.is_compressed_tid(ctid)
|
||||
order by created_at, location_id limit 1 \gset
|
||||
|
||||
select _timescaledb_debug.is_compressed_tid(ctid), *
|
||||
from :chunk2 where location_id = :location_id and created_at = :'created_at';
|
||||
|
||||
-- first update moves the value from the compressed rel to the non-compressed (seen via ctid)
|
||||
update :hypertable set temp=1.0 where location_id=1 and created_at='Wed Jun 08 16:57:50 2022 PDT';
|
||||
select _timescaledb_debug.is_compressed_tid(ctid),
|
||||
created_at,
|
||||
location_id,
|
||||
temp
|
||||
from :chunk2
|
||||
order by location_id, created_at desc limit 2;
|
||||
update :hypertable set temp=1.0 where location_id=:location_id and created_at=:'created_at';
|
||||
|
||||
select _timescaledb_debug.is_compressed_tid(ctid), *
|
||||
from :chunk2 where location_id = :location_id and created_at = :'created_at';
|
||||
|
||||
-- second update should be a hot update (tuple in same block after update, as shown by ctid)
|
||||
update :hypertable set temp=2.0 where location_id=1 and created_at='Wed Jun 08 16:57:50 2022 PDT';
|
||||
select _timescaledb_debug.is_compressed_tid(ctid),
|
||||
created_at,
|
||||
location_id,
|
||||
temp
|
||||
from :chunk2
|
||||
order by location_id, created_at desc limit 2;
|
||||
update :hypertable set temp=2.0 where location_id=:location_id and created_at=:'created_at';
|
||||
|
||||
select _timescaledb_debug.is_compressed_tid(ctid), *
|
||||
from :chunk2 where location_id = :location_id and created_at = :'created_at';
|
||||
|
||||
-- make sure query uses a segmentby index and returns the correct data for the update value
|
||||
select explain_anonymize(format($$
|
||||
|
@ -120,15 +120,33 @@ drop table orig, curr;
|
||||
-- for DO UPDATE, DO NOTHING, and plain inserts, we test this as well
|
||||
-- to be safe.
|
||||
|
||||
-- Insert of a value that exists in the compressed part.
|
||||
-- find a compressed tuple in a deterministic manner and get the
|
||||
-- timestamp. Make sure to find one in chunk1 since we will use that
|
||||
-- later.
|
||||
select created_at
|
||||
from :chunk1 where _timescaledb_debug.is_compressed_tid(ctid)
|
||||
order by created_at limit 1 \gset
|
||||
|
||||
select * from :hypertable where created_at = :'created_at';
|
||||
|
||||
-- Insert of a value that exists in the compressed part should work
|
||||
-- when done through the hypertable.
|
||||
insert into :hypertable(created_at, location_id, device_id, temp, humidity)
|
||||
values ('2022-06-01 00:00:00', 11, 1, 1.0, 1.0)
|
||||
values (:'created_at', 11, 1, 1.0, 1.0)
|
||||
on conflict (created_at) do update set location_id = 12;
|
||||
|
||||
-- TODO(timescale/timescaledb-private#1087): Inserts directly into chunks do not work.
|
||||
select * from :hypertable where created_at = :'created_at';
|
||||
|
||||
-- TODO(timescale/timescaledb-private#1087): Inserts directly into a
|
||||
-- compressed tuple in a chunk do not work.
|
||||
|
||||
select created_at
|
||||
from :chunk1 where _timescaledb_debug.is_compressed_tid(ctid)
|
||||
order by created_at limit 1 \gset
|
||||
|
||||
\set ON_ERROR_STOP 0
|
||||
insert into :chunk1(created_at, location_id, device_id, temp, humidity)
|
||||
values ('2022-06-01 00:00:10', 13, 1, 1.0, 1.0)
|
||||
values (:'created_at', 13, 1, 1.0, 1.0)
|
||||
on conflict (created_at) do update set location_id = 14;
|
||||
\set ON_ERROR_STOP 1
|
||||
|
||||
|
@ -4,6 +4,10 @@
|
||||
|
||||
\ir include/setup_hyperstore.sql
|
||||
|
||||
-- Set parallel cost to zero to force parallel plans and avoid flaky test.
|
||||
set parallel_tuple_cost to 0;
|
||||
set parallel_setup_cost to 0;
|
||||
|
||||
-- We need to drop the index to trigger parallel plans. Otherwise they
|
||||
-- will use the index.
|
||||
drop index hypertable_device_id_idx;
|
||||
@ -11,6 +15,7 @@ drop index hypertable_device_id_idx;
|
||||
-- Show parallel plan and count on uncompressed (non-hyperstore)
|
||||
-- hypertable
|
||||
set max_parallel_workers_per_gather=2;
|
||||
|
||||
select explain_anonymize(format($$
|
||||
select device_id, count(*) from %s where device_id=1 group by device_id
|
||||
$$, :'hypertable'));
|
||||
|
@ -18,8 +18,8 @@ select create_hypertable('readings', 'time');
|
||||
select setseed(1);
|
||||
|
||||
insert into readings (time, location, device, temp, humidity)
|
||||
select t, ceil(random()*10), ceil(random()*30), random()*40, random()*100
|
||||
from generate_series('2022-06-01'::timestamptz, '2022-07-01', '5s') t;
|
||||
select t, ceil(random()*3), ceil(random()*30), random()*40, random()*100
|
||||
from generate_series('2022-06-01'::timestamptz, '2022-07-01', '5m') t;
|
||||
|
||||
alter table readings set (
|
||||
timescaledb.compress,
|
||||
@ -96,7 +96,7 @@ group by device;
|
||||
explain (analyze, costs off, timing off, summary off, decompress_cache_stats)
|
||||
insert into readings (time, location, device, temp, humidity)
|
||||
select t, ceil(random()*10), ceil(random()*30), random()*40, random()*100
|
||||
from generate_series('2022-06-01'::timestamptz, '2022-07-01', '5s') t
|
||||
from generate_series('2022-06-01'::timestamptz, '2022-07-01', '5m') t
|
||||
on conflict (location, device, time) do nothing;
|
||||
|
||||
-- This should show values for all columns
|
||||
|
@ -100,11 +100,15 @@ select ch as chunk
|
||||
from show_chunks(:'hypertable') ch limit 1 \gset
|
||||
vacuum full :chunk;
|
||||
|
||||
-- Pick a tuple in the compressed chunk and get the values from that
|
||||
-- tuple for the cursor.
|
||||
select metric_id from :chunk offset 5 limit 1 \gset
|
||||
|
||||
\x on
|
||||
select _timescaledb_debug.is_compressed_tid(ctid), *
|
||||
from :hypertable order by created_at offset 898 limit 1;
|
||||
from :hypertable where metric_id = :metric_id;
|
||||
select created_at, location_id, owner_id, device_id, humidity
|
||||
from :hypertable order by created_at offset 898 limit 1 \gset
|
||||
from :hypertable where metric_id = :metric_id \gset
|
||||
\x off
|
||||
|
||||
begin;
|
||||
@ -119,9 +123,9 @@ select humidity from :hypertable where created_at = :'created_at' and humidity =
|
||||
|
||||
\x on
|
||||
select _timescaledb_debug.is_compressed_tid(ctid), *
|
||||
from :hypertable order by created_at offset 898 limit 1;
|
||||
from :hypertable where metric_id = :metric_id;
|
||||
select created_at, location_id, owner_id, device_id, humidity
|
||||
from :hypertable order by created_at offset 898 limit 1 \gset
|
||||
from :hypertable where metric_id = :metric_id \gset
|
||||
\x off
|
||||
|
||||
-- Test doing the update directly on the chunk. The data should now be
|
||||
|
@ -30,7 +30,7 @@ select setseed(1);
|
||||
-- seconds. Any other timestamps are inserted as part of the test.
|
||||
insert into :hypertable (created_at, location_id, device_id, owner_id, temp, humidity)
|
||||
select t, ceil(random()*10), ceil(random()*30), ceil(random() * 5), random()*40, random()*100
|
||||
from generate_series('2022-06-01'::timestamptz, '2022-07-01', '10s') t;
|
||||
from generate_series('2022-06-01'::timestamptz, '2022-07-01', '5m') t;
|
||||
|
||||
alter table :hypertable set (
|
||||
timescaledb.compress,
|
||||
|
Loading…
x
Reference in New Issue
Block a user