1
0
mirror of https://github.com/timescale/timescaledb.git synced 2025-06-01 02:37:55 +08:00
timescaledb/tsl/test/expected/hypercore_index_hash.out
Mats Kindahl 406901d838 Rename files using "hyperstore" to use "hypercore"
Files and directories using "hyperstore" as part of the name is moved
to the new name using "hypercore".
2024-10-16 13:13:34 +02:00

363 lines
18 KiB
Plaintext

-- This file and its contents are licensed under the Timescale License.
-- Please see the included NOTICE for copyright information and
-- LICENSE-TIMESCALE for a copy of the license.
\ir include/setup_hyperstore.sql
-- This file and its contents are licensed under the Timescale License.
-- Please see the included NOTICE for copyright information and
-- LICENSE-TIMESCALE for a copy of the license.
\set hypertable readings
\ir hyperstore_helpers.sql
-- This file and its contents are licensed under the Timescale License.
-- Please see the included NOTICE for copyright information and
-- LICENSE-TIMESCALE for a copy of the license.
-- Function to run an explain analyze with and do replacements on the
-- emitted plan. This is intended to be used when the structure of the
-- plan is important, but not the specific chunks scanned nor the
-- number of heap fetches, rows, loops, etc.
create function explain_analyze_anonymize(text) returns setof text
language plpgsql as
$$
declare
ln text;
begin
for ln in
execute format('explain (analyze, costs off, summary off, timing off, decompress_cache_stats) %s', $1)
loop
if trim(both from ln) like 'Group Key:%' then
continue;
end if;
ln := regexp_replace(ln, 'Array Cache Hits: \d+', 'Array Cache Hits: N');
ln := regexp_replace(ln, 'Array Cache Misses: \d+', 'Array Cache Misses: N');
ln := regexp_replace(ln, 'Array Cache Evictions: \d+', 'Array Cache Evictions: N');
ln := regexp_replace(ln, 'Heap Fetches: \d+', 'Heap Fetches: N');
ln := regexp_replace(ln, 'Workers Launched: \d+', 'Workers Launched: N');
ln := regexp_replace(ln, 'actual rows=\d+ loops=\d+', 'actual rows=N loops=N');
ln := regexp_replace(ln, '_hyper_\d+_\d+_chunk', '_hyper_I_N_chunk', 1, 0);
return next ln;
end loop;
end;
$$;
create function explain_anonymize(text) returns setof text
language plpgsql as
$$
declare
ln text;
begin
for ln in
execute format('explain (costs off, summary off, timing off) %s', $1)
loop
ln := regexp_replace(ln, 'Array Cache Hits: \d+', 'Array Cache Hits: N');
ln := regexp_replace(ln, 'Array Cache Misses: \d+', 'Array Cache Misses: N');
ln := regexp_replace(ln, 'Array Cache Evictions: \d+', 'Array Cache Evictions: N');
ln := regexp_replace(ln, 'Heap Fetches: \d+', 'Heap Fetches: N');
ln := regexp_replace(ln, 'Workers Launched: \d+', 'Workers Launched: N');
ln := regexp_replace(ln, 'actual rows=\d+ loops=\d+', 'actual rows=N loops=N');
ln := regexp_replace(ln, '_hyper_\d+_\d+_chunk', '_hyper_I_N_chunk', 1, 0);
return next ln;
end loop;
end;
$$;
create table :hypertable(
metric_id serial,
created_at timestamptz not null unique,
location_id smallint, --segmentby attribute with index
owner_id bigint, --segmentby attribute without index
device_id bigint, --non-segmentby attribute
temp float8,
humidity float4
);
create index hypertable_location_id_idx on :hypertable (location_id);
create index hypertable_device_id_idx on :hypertable (device_id);
select create_hypertable(:'hypertable', by_range('created_at'));
create_hypertable
-------------------
(1,t)
(1 row)
-- Disable incremental sort to make tests stable
set enable_incremental_sort = false;
select setseed(1);
setseed
---------
(1 row)
-- Insert rows into the tables.
--
-- The timestamps for the original rows will have timestamps every 10
-- seconds. Any other timestamps are inserted as part of the test.
insert into :hypertable (created_at, location_id, device_id, owner_id, temp, humidity)
select t, ceil(random()*10), ceil(random()*30), ceil(random() * 5), random()*40, random()*100
from generate_series('2022-06-01'::timestamptz, '2022-07-01', '5m') t;
alter table :hypertable set (
timescaledb.compress,
timescaledb.compress_orderby = 'created_at',
timescaledb.compress_segmentby = 'location_id, owner_id'
);
-- Get some test chunks as global variables (first and last chunk here)
select format('%I.%I', chunk_schema, chunk_name)::regclass as chunk1
from timescaledb_information.chunks
where format('%I.%I', hypertable_schema, hypertable_name)::regclass = :'hypertable'::regclass
order by chunk1 asc
limit 1 \gset
select format('%I.%I', chunk_schema, chunk_name)::regclass as chunk2
from timescaledb_information.chunks
where format('%I.%I', hypertable_schema, hypertable_name)::regclass = :'hypertable'::regclass
order by chunk2 asc
limit 1 offset 1 \gset
-- Set the number of parallel workers to zero to disable parallel
-- plans. This differs between different PG versions.
set max_parallel_workers_per_gather to 0;
-- Redefine the indexes to use hash indexes
drop index hypertable_location_id_idx;
drop index hypertable_device_id_idx;
-- Discourage sequence scan when there are alternatives to avoid flaky
-- tests.
set enable_seqscan to false;
create index hypertable_location_id_idx on :hypertable using hash (location_id);
create index hypertable_device_id_idx on :hypertable using hash (device_id);
create view chunk_indexes as
select ch::regclass as chunk, indexrelid::regclass as index, attname
from pg_attribute att inner join pg_index ind
on (att.attrelid=ind.indrelid and att.attnum=ind.indkey[0])
inner join show_chunks(:'hypertable') ch on (ch = att.attrelid)
order by chunk, index;
-- save some reference data from an index (only) scan
select explain_anonymize(format($$
select location_id, count(*) into orig from %s
where location_id in (3,4,5) group by location_id
$$, :'hypertable'));
explain_anonymize
------------------------------------------------------------------------------------------
Finalize HashAggregate
Group Key: _hyper_I_N_chunk.location_id
-> Append
-> Partial HashAggregate
Group Key: _hyper_I_N_chunk.location_id
-> Bitmap Heap Scan on _hyper_I_N_chunk
Recheck Cond: (location_id = ANY ('{3,4,5}'::integer[]))
-> Bitmap Index Scan on _hyper_I_N_chunk_hypertable_location_id_idx
Index Cond: (location_id = ANY ('{3,4,5}'::integer[]))
-> Partial HashAggregate
Group Key: _hyper_I_N_chunk.location_id
-> Bitmap Heap Scan on _hyper_I_N_chunk
Recheck Cond: (location_id = ANY ('{3,4,5}'::integer[]))
-> Bitmap Index Scan on _hyper_I_N_chunk_hypertable_location_id_idx
Index Cond: (location_id = ANY ('{3,4,5}'::integer[]))
-> Partial HashAggregate
Group Key: _hyper_I_N_chunk.location_id
-> Bitmap Heap Scan on _hyper_I_N_chunk
Recheck Cond: (location_id = ANY ('{3,4,5}'::integer[]))
-> Bitmap Index Scan on _hyper_I_N_chunk_hypertable_location_id_idx
Index Cond: (location_id = ANY ('{3,4,5}'::integer[]))
-> Partial HashAggregate
Group Key: _hyper_I_N_chunk.location_id
-> Bitmap Heap Scan on _hyper_I_N_chunk
Recheck Cond: (location_id = ANY ('{3,4,5}'::integer[]))
-> Bitmap Index Scan on _hyper_I_N_chunk_hypertable_location_id_idx
Index Cond: (location_id = ANY ('{3,4,5}'::integer[]))
-> Partial HashAggregate
Group Key: _hyper_I_N_chunk.location_id
-> Bitmap Heap Scan on _hyper_I_N_chunk
Recheck Cond: (location_id = ANY ('{3,4,5}'::integer[]))
-> Bitmap Index Scan on _hyper_I_N_chunk_hypertable_location_id_idx
Index Cond: (location_id = ANY ('{3,4,5}'::integer[]))
-> Partial HashAggregate
Group Key: _hyper_I_N_chunk.location_id
-> Bitmap Heap Scan on _hyper_I_N_chunk
Recheck Cond: (location_id = ANY ('{3,4,5}'::integer[]))
-> Bitmap Index Scan on _hyper_I_N_chunk_hypertable_location_id_idx
Index Cond: (location_id = ANY ('{3,4,5}'::integer[]))
(39 rows)
select location_id, count(*) into orig from :hypertable
where location_id in (3,4,5) group by location_id;
alter table :chunk2 set access method hyperstore;
--
-- test that indexes work after updates
--
-- find a compressed tuple in a deterministic manner and get location and timestamp
select created_at, location_id
from :chunk2 where _timescaledb_debug.is_compressed_tid(ctid)
order by created_at, location_id limit 1 \gset
select _timescaledb_debug.is_compressed_tid(ctid), *
from :chunk2 where location_id = :location_id and created_at = :'created_at';
is_compressed_tid | metric_id | created_at | location_id | owner_id | device_id | temp | humidity
-------------------+-----------+------------------------------+-------------+----------+-----------+------------------+----------
t | 205 | Wed Jun 01 17:00:00 2022 PDT | 1 | 5 | 18 | 39.9895349415735 | 49.7787
(1 row)
-- first update moves the value from the compressed rel to the non-compressed (seen via ctid)
update :hypertable set temp=1.0 where location_id=:location_id and created_at=:'created_at';
select _timescaledb_debug.is_compressed_tid(ctid), *
from :chunk2 where location_id = :location_id and created_at = :'created_at';
is_compressed_tid | metric_id | created_at | location_id | owner_id | device_id | temp | humidity
-------------------+-----------+------------------------------+-------------+----------+-----------+------+----------
f | 205 | Wed Jun 01 17:00:00 2022 PDT | 1 | 5 | 18 | 1 | 49.7787
(1 row)
-- second update should be a hot update (tuple in same block after update, as shown by ctid)
update :hypertable set temp=2.0 where location_id=:location_id and created_at=:'created_at';
select _timescaledb_debug.is_compressed_tid(ctid), *
from :chunk2 where location_id = :location_id and created_at = :'created_at';
is_compressed_tid | metric_id | created_at | location_id | owner_id | device_id | temp | humidity
-------------------+-----------+------------------------------+-------------+----------+-----------+------+----------
f | 205 | Wed Jun 01 17:00:00 2022 PDT | 1 | 5 | 18 | 2 | 49.7787
(1 row)
-- make sure query uses a segmentby index and returns the correct data for the update value
select explain_anonymize(format($$
select created_at, location_id, temp from %s where location_id=1 and temp=2.0
$$, :'chunk2'));
explain_anonymize
----------------------------------------------------------------------------------
Index Scan using _hyper_I_N_chunk_hypertable_location_id_idx on _hyper_I_N_chunk
Index Cond: (location_id = 1)
Filter: (temp = '2'::double precision)
(3 rows)
select created_at, location_id, temp from :chunk2 where location_id=1 and temp=2.0;
created_at | location_id | temp
------------------------------+-------------+------
Wed Jun 01 17:00:00 2022 PDT | 1 | 2
(1 row)
select compress_chunk(show_chunks(:'hypertable'), compress_using => 'hyperstore');
compress_chunk
----------------------------------------
_timescaledb_internal._hyper_1_1_chunk
_timescaledb_internal._hyper_1_2_chunk
_timescaledb_internal._hyper_1_3_chunk
_timescaledb_internal._hyper_1_4_chunk
_timescaledb_internal._hyper_1_5_chunk
_timescaledb_internal._hyper_1_6_chunk
(6 rows)
vacuum analyze :hypertable;
-- Test index scan on non-segmentby column
select explain_analyze_anonymize(format($$
select device_id, avg(temp) from %s where device_id = 10
group by device_id
$$, :'hypertable'));
explain_analyze_anonymize
--------------------------------------------------------------------------------------------------------------------------
Finalize GroupAggregate (actual rows=N loops=N)
-> Append (actual rows=N loops=N)
-> Partial GroupAggregate (actual rows=N loops=N)
-> Index Scan using _hyper_I_N_chunk_hypertable_device_id_idx on _hyper_I_N_chunk (actual rows=N loops=N)
Index Cond: (device_id = 10)
-> Partial GroupAggregate (actual rows=N loops=N)
-> Index Scan using _hyper_I_N_chunk_hypertable_device_id_idx on _hyper_I_N_chunk (actual rows=N loops=N)
Index Cond: (device_id = 10)
-> Partial GroupAggregate (actual rows=N loops=N)
-> Index Scan using _hyper_I_N_chunk_hypertable_device_id_idx on _hyper_I_N_chunk (actual rows=N loops=N)
Index Cond: (device_id = 10)
-> Partial GroupAggregate (actual rows=N loops=N)
-> Index Scan using _hyper_I_N_chunk_hypertable_device_id_idx on _hyper_I_N_chunk (actual rows=N loops=N)
Index Cond: (device_id = 10)
-> Partial GroupAggregate (actual rows=N loops=N)
-> Index Scan using _hyper_I_N_chunk_hypertable_device_id_idx on _hyper_I_N_chunk (actual rows=N loops=N)
Index Cond: (device_id = 10)
-> Partial GroupAggregate (actual rows=N loops=N)
-> Index Scan using _hyper_I_N_chunk_hypertable_device_id_idx on _hyper_I_N_chunk (actual rows=N loops=N)
Index Cond: (device_id = 10)
Array Cache Hits: N
Array Cache Misses: N
Array Cache Evictions: N
Array Decompressions: 320
(24 rows)
select explain_analyze_anonymize(format($$
select device_id, avg(temp) from %s where device_id = 10
group by device_id
$$, :'chunk1'));
explain_analyze_anonymize
--------------------------------------------------------------------------------------------------------------
GroupAggregate (actual rows=N loops=N)
-> Index Scan using _hyper_I_N_chunk_hypertable_device_id_idx on _hyper_I_N_chunk (actual rows=N loops=N)
Index Cond: (device_id = 10)
Array Cache Hits: N
Array Cache Misses: N
Array Cache Evictions: N
Array Decompressions: 17
(7 rows)
-- Test index scan on segmentby column
select explain_analyze_anonymize(format($$
select created_at, location_id, temp from %s where location_id = 5
$$, :'hypertable'));
explain_analyze_anonymize
----------------------------------------------------------------------------------------------------------------
Append (actual rows=N loops=N)
-> Index Scan using _hyper_I_N_chunk_hypertable_location_id_idx on _hyper_I_N_chunk (actual rows=N loops=N)
Index Cond: (location_id = 5)
-> Index Scan using _hyper_I_N_chunk_hypertable_location_id_idx on _hyper_I_N_chunk (actual rows=N loops=N)
Index Cond: (location_id = 5)
-> Index Scan using _hyper_I_N_chunk_hypertable_location_id_idx on _hyper_I_N_chunk (actual rows=N loops=N)
Index Cond: (location_id = 5)
-> Index Scan using _hyper_I_N_chunk_hypertable_location_id_idx on _hyper_I_N_chunk (actual rows=N loops=N)
Index Cond: (location_id = 5)
-> Index Scan using _hyper_I_N_chunk_hypertable_location_id_idx on _hyper_I_N_chunk (actual rows=N loops=N)
Index Cond: (location_id = 5)
-> Index Scan using _hyper_I_N_chunk_hypertable_location_id_idx on _hyper_I_N_chunk (actual rows=N loops=N)
Index Cond: (location_id = 5)
Array Cache Hits: N
Array Cache Misses: N
Array Cache Evictions: N
Array Decompressions: 60
(17 rows)
select explain_analyze_anonymize(format($$
select created_at, location_id, temp from %s where location_id = 5
$$, :'chunk1'));
explain_analyze_anonymize
----------------------------------------------------------------------------------------------------------
Index Scan using _hyper_I_N_chunk_hypertable_location_id_idx on _hyper_I_N_chunk (actual rows=N loops=N)
Index Cond: (location_id = 5)
Array Cache Hits: N
Array Cache Misses: N
Array Cache Evictions: N
Array Decompressions: 10
(6 rows)
-- These should generate decompressions as above, but for all columns.
select explain_analyze_anonymize(format($$
select * from %s where location_id = 5
$$, :'hypertable'));
explain_analyze_anonymize
----------------------------------------------------------------------------------------------------------------
Append (actual rows=N loops=N)
-> Index Scan using _hyper_I_N_chunk_hypertable_location_id_idx on _hyper_I_N_chunk (actual rows=N loops=N)
Index Cond: (location_id = 5)
-> Index Scan using _hyper_I_N_chunk_hypertable_location_id_idx on _hyper_I_N_chunk (actual rows=N loops=N)
Index Cond: (location_id = 5)
-> Index Scan using _hyper_I_N_chunk_hypertable_location_id_idx on _hyper_I_N_chunk (actual rows=N loops=N)
Index Cond: (location_id = 5)
-> Index Scan using _hyper_I_N_chunk_hypertable_location_id_idx on _hyper_I_N_chunk (actual rows=N loops=N)
Index Cond: (location_id = 5)
-> Index Scan using _hyper_I_N_chunk_hypertable_location_id_idx on _hyper_I_N_chunk (actual rows=N loops=N)
Index Cond: (location_id = 5)
-> Index Scan using _hyper_I_N_chunk_hypertable_location_id_idx on _hyper_I_N_chunk (actual rows=N loops=N)
Index Cond: (location_id = 5)
Array Cache Hits: N
Array Cache Misses: N
Array Cache Evictions: N
Array Decompressions: 60
(17 rows)
select explain_analyze_anonymize(format($$
select * from %s where location_id = 5
$$, :'chunk1'));
explain_analyze_anonymize
----------------------------------------------------------------------------------------------------------
Index Scan using _hyper_I_N_chunk_hypertable_location_id_idx on _hyper_I_N_chunk (actual rows=N loops=N)
Index Cond: (location_id = 5)
Array Cache Hits: N
Array Cache Misses: N
Array Cache Evictions: N
Array Decompressions: 10
(6 rows)
drop table :hypertable cascade;
NOTICE: drop cascades to view chunk_indexes