1
0
mirror of https://github.com/timescale/timescaledb.git synced 2025-05-25 07:40:48 +08:00
timescaledb/tsl/test/expected/hypercore_index_btree.out
Mats Kindahl e5e94960d0 Change parameter name to enable Hypercore TAM
Changing from using the `compress_using` parameter with a table access
method name to use the boolean parameter `hypercore_use_access_method`
instead to avoid having to provide a name when using the table access
method for compression.
2024-11-10 10:50:48 +01:00

1065 lines
54 KiB
Plaintext

-- This file and its contents are licensed under the Timescale License.
-- Please see the included NOTICE for copyright information and
-- LICENSE-TIMESCALE for a copy of the license.
\c :TEST_DBNAME :ROLE_SUPERUSER
create extension pageinspect;
set role :ROLE_DEFAULT_PERM_USER;
\ir include/setup_hypercore.sql
-- This file and its contents are licensed under the Timescale License.
-- Please see the included NOTICE for copyright information and
-- LICENSE-TIMESCALE for a copy of the license.
\set hypertable readings
\ir hypercore_helpers.sql
-- This file and its contents are licensed under the Timescale License.
-- Please see the included NOTICE for copyright information and
-- LICENSE-TIMESCALE for a copy of the license.
-- Function to run an explain analyze with and do replacements on the
-- emitted plan. This is intended to be used when the structure of the
-- plan is important, but not the specific chunks scanned nor the
-- number of heap fetches, rows, loops, etc.
create function explain_analyze_anonymize(text) returns setof text
language plpgsql as
$$
declare
ln text;
begin
for ln in
execute format('explain (analyze, costs off, summary off, timing off, decompress_cache_stats) %s', $1)
loop
if trim(both from ln) like 'Group Key:%' then
continue;
end if;
ln := regexp_replace(ln, 'Array Cache Hits: \d+', 'Array Cache Hits: N');
ln := regexp_replace(ln, 'Array Cache Misses: \d+', 'Array Cache Misses: N');
ln := regexp_replace(ln, 'Array Cache Evictions: \d+', 'Array Cache Evictions: N');
ln := regexp_replace(ln, 'Heap Fetches: \d+', 'Heap Fetches: N');
ln := regexp_replace(ln, 'Workers Launched: \d+', 'Workers Launched: N');
ln := regexp_replace(ln, 'actual rows=\d+ loops=\d+', 'actual rows=N loops=N');
ln := regexp_replace(ln, '_hyper_\d+_\d+_chunk', '_hyper_I_N_chunk', 1, 0);
return next ln;
end loop;
end;
$$;
create function explain_anonymize(text) returns setof text
language plpgsql as
$$
declare
ln text;
begin
for ln in
execute format('explain (costs off, summary off, timing off) %s', $1)
loop
ln := regexp_replace(ln, 'Array Cache Hits: \d+', 'Array Cache Hits: N');
ln := regexp_replace(ln, 'Array Cache Misses: \d+', 'Array Cache Misses: N');
ln := regexp_replace(ln, 'Array Cache Evictions: \d+', 'Array Cache Evictions: N');
ln := regexp_replace(ln, 'Heap Fetches: \d+', 'Heap Fetches: N');
ln := regexp_replace(ln, 'Workers Launched: \d+', 'Workers Launched: N');
ln := regexp_replace(ln, 'actual rows=\d+ loops=\d+', 'actual rows=N loops=N');
ln := regexp_replace(ln, '_hyper_\d+_\d+_chunk', '_hyper_I_N_chunk', 1, 0);
return next ln;
end loop;
end;
$$;
create table :hypertable(
metric_id serial,
created_at timestamptz not null unique,
location_id smallint, --segmentby attribute with index
owner_id bigint, --segmentby attribute without index
device_id bigint, --non-segmentby attribute
temp float8,
humidity float4
);
create index hypertable_location_id_idx on :hypertable (location_id);
create index hypertable_device_id_idx on :hypertable (device_id);
select create_hypertable(:'hypertable', by_range('created_at'));
create_hypertable
-------------------
(1,t)
(1 row)
-- Disable incremental sort to make tests stable
set enable_incremental_sort = false;
select setseed(1);
setseed
---------
(1 row)
-- Insert rows into the tables.
--
-- The timestamps for the original rows will have timestamps every 10
-- seconds. Any other timestamps are inserted as part of the test.
insert into :hypertable (created_at, location_id, device_id, owner_id, temp, humidity)
select t, ceil(random()*10), ceil(random()*30), ceil(random() * 5), random()*40, random()*100
from generate_series('2022-06-01'::timestamptz, '2022-07-01', '5m') t;
alter table :hypertable set (
timescaledb.compress,
timescaledb.compress_orderby = 'created_at',
timescaledb.compress_segmentby = 'location_id, owner_id'
);
-- Get some test chunks as global variables (first and last chunk here)
select format('%I.%I', chunk_schema, chunk_name)::regclass as chunk1
from timescaledb_information.chunks
where format('%I.%I', hypertable_schema, hypertable_name)::regclass = :'hypertable'::regclass
order by chunk1 asc
limit 1 \gset
select format('%I.%I', chunk_schema, chunk_name)::regclass as chunk2
from timescaledb_information.chunks
where format('%I.%I', hypertable_schema, hypertable_name)::regclass = :'hypertable'::regclass
order by chunk2 asc
limit 1 offset 1 \gset
-- Avoid parallel (index) scans to make test stable
set max_parallel_workers_per_gather to 0;
set enable_hashagg to off;
-- Drop the device_id index and redefine it later with extra columns.
drop index hypertable_device_id_idx;
create view chunk_indexes as
select ch::regclass::text as chunk, indexrelid::regclass::text as index, attname
from pg_attribute att inner join pg_index ind
on (att.attrelid=ind.indrelid and att.attnum=ind.indkey[0])
inner join show_chunks(:'hypertable') ch on (ch = att.attrelid)
order by chunk, index;
-- To get stable plans
set max_parallel_workers_per_gather to 0;
-- save some reference data from an index (only) scan
select explain_anonymize(format($$
select location_id, count(*) into orig from %s
where location_id in (3,4,5) group by location_id
$$, :'hypertable'));
explain_anonymize
---------------------------------------------------------------------------------------------------------------
Finalize GroupAggregate
Group Key: _hyper_I_N_chunk.location_id
-> Sort
Sort Key: _hyper_I_N_chunk.location_id
-> Append
-> Partial GroupAggregate
Group Key: _hyper_I_N_chunk.location_id
-> Index Only Scan using _hyper_I_N_chunk_hypertable_location_id_idx on _hyper_I_N_chunk
Index Cond: (location_id = ANY ('{3,4,5}'::integer[]))
-> Partial GroupAggregate
Group Key: _hyper_I_N_chunk.location_id
-> Index Only Scan using _hyper_I_N_chunk_hypertable_location_id_idx on _hyper_I_N_chunk
Index Cond: (location_id = ANY ('{3,4,5}'::integer[]))
-> Partial GroupAggregate
Group Key: _hyper_I_N_chunk.location_id
-> Index Only Scan using _hyper_I_N_chunk_hypertable_location_id_idx on _hyper_I_N_chunk
Index Cond: (location_id = ANY ('{3,4,5}'::integer[]))
-> Partial GroupAggregate
Group Key: _hyper_I_N_chunk.location_id
-> Index Only Scan using _hyper_I_N_chunk_hypertable_location_id_idx on _hyper_I_N_chunk
Index Cond: (location_id = ANY ('{3,4,5}'::integer[]))
-> Partial GroupAggregate
Group Key: _hyper_I_N_chunk.location_id
-> Index Only Scan using _hyper_I_N_chunk_hypertable_location_id_idx on _hyper_I_N_chunk
Index Cond: (location_id = ANY ('{3,4,5}'::integer[]))
-> Partial GroupAggregate
Group Key: _hyper_I_N_chunk.location_id
-> Index Only Scan using _hyper_I_N_chunk_hypertable_location_id_idx on _hyper_I_N_chunk
Index Cond: (location_id = ANY ('{3,4,5}'::integer[]))
(29 rows)
select location_id, count(*) into orig from :hypertable
where location_id in (3,4,5) group by location_id;
-- Create other segmentby indexes to test different combinations. Also
-- redefine the device_id index to include one value field in the
-- index and check that index-only scans work also for included
-- attributes.
create index hypertable_location_id_include_humidity_idx on :hypertable (location_id) include (humidity);
create index hypertable_device_id_idx on :hypertable (device_id) include (humidity);
create index hypertable_owner_idx on :hypertable (owner_id);
create index hypertable_location_id_owner_id_idx on :hypertable (location_id, owner_id);
-- Save index size before switching to hypercore so that we can
-- compare sizes after. Don't show the actual sizes because it varies
-- slightly on different platforms.
create table index_sizes_before as
select index, pg_relation_size(index::regclass)
from chunk_indexes
where chunk::regclass = :'chunk2'::regclass
and (attname='location_id' or attname='device_id' or attname='owner_id');
-- Drop some segmentby indexes and recreate them after converting to
-- hypercore. This is to test having some created before conversion
-- and some after.
drop index hypertable_owner_idx;
drop index hypertable_location_id_owner_id_idx;
alter table :chunk2 set access method hypercore;
-- count without indexes
select owner_id, count(*) into owner_orig from :hypertable
where owner_id in (3,4,5) group by owner_id;
-- create indexes on all segmentby columns
create index hypertable_owner_idx on :hypertable (owner_id);
create index hypertable_location_id_owner_id_idx on :hypertable (location_id, owner_id);
-- Result should be the same with indexes
select explain_anonymize(format($$
select owner_id, count(*) into owner_comp from %s
where owner_id in (3,4,5) group by owner_id
$$, :'hypertable'));
explain_anonymize
---------------------------------------------------------------------------------------------------------
Finalize GroupAggregate
Group Key: _hyper_I_N_chunk.owner_id
-> Sort
Sort Key: _hyper_I_N_chunk.owner_id
-> Append
-> Partial GroupAggregate
Group Key: _hyper_I_N_chunk.owner_id
-> Index Only Scan using _hyper_I_N_chunk_hypertable_owner_idx on _hyper_I_N_chunk
Index Cond: (owner_id = ANY ('{3,4,5}'::bigint[]))
-> Partial GroupAggregate
Group Key: _hyper_I_N_chunk.owner_id
-> Index Scan using _hyper_I_N_chunk_hypertable_owner_idx on _hyper_I_N_chunk
Index Cond: (owner_id = ANY ('{3,4,5}'::bigint[]))
-> Partial GroupAggregate
Group Key: _hyper_I_N_chunk.owner_id
-> Index Only Scan using _hyper_I_N_chunk_hypertable_owner_idx on _hyper_I_N_chunk
Index Cond: (owner_id = ANY ('{3,4,5}'::bigint[]))
-> Partial GroupAggregate
Group Key: _hyper_I_N_chunk.owner_id
-> Index Only Scan using _hyper_I_N_chunk_hypertable_owner_idx on _hyper_I_N_chunk
Index Cond: (owner_id = ANY ('{3,4,5}'::bigint[]))
-> Partial GroupAggregate
Group Key: _hyper_I_N_chunk.owner_id
-> Index Only Scan using _hyper_I_N_chunk_hypertable_owner_idx on _hyper_I_N_chunk
Index Cond: (owner_id = ANY ('{3,4,5}'::bigint[]))
-> Partial GroupAggregate
Group Key: _hyper_I_N_chunk.owner_id
-> Index Only Scan using _hyper_I_N_chunk_hypertable_owner_idx on _hyper_I_N_chunk
Index Cond: (owner_id = ANY ('{3,4,5}'::bigint[]))
(29 rows)
select owner_id, count(*) into owner_comp from :hypertable
where owner_id in (3,4,5) group by owner_id;
select * from owner_orig join owner_comp using (owner_id) where owner_orig.count != owner_comp.count;
owner_id | count | count
----------+-------+-------
(0 rows)
-- the indexes on segmentby columns should be smaller on hypercore,
-- except for the covering index on location_id (because it also
-- includes the non-segmentby column humidity). The device_id index
-- should also remain the same size since it is not on a segmentby
-- column.
select
a.index,
pg_relation_size(a.index) = b.pg_relation_size as is_same_size,
pg_relation_size(a.index) < b.pg_relation_size as is_smaller
from chunk_indexes a
join index_sizes_before b on (a.index = b.index)
where chunk::regclass=:'chunk2'::regclass
and (attname='location_id' or attname='device_id' or attname='owner_id');
index | is_same_size | is_smaller
------------------------------------------------------------------------------------+--------------+------------
_timescaledb_internal._hyper_1_2_chunk_hypertable_device_id_idx | t | f
_timescaledb_internal._hyper_1_2_chunk_hypertable_location_id_idx | f | t
_timescaledb_internal._hyper_1_2_chunk_hypertable_location_id_include_humidity_idx | t | f
_timescaledb_internal._hyper_1_2_chunk_hypertable_location_id_owner_id_idx | f | t
_timescaledb_internal._hyper_1_2_chunk_hypertable_owner_idx | f | t
(5 rows)
-- the query should not use index-only scan on the hypestore chunk
-- (number 2) because it is not supported on segmentby indexes
--
-- first, drop one of the indexes on location_id to make the index to
-- pick predictible
drop index hypertable_location_id_include_humidity_idx;
select explain_anonymize(format($$
select location_id, count(*) into comp from %s
where location_id in (3,4,5) group by location_id
$$, :'hypertable'));
explain_anonymize
------------------------------------------------------------------------------------------------------------------------
Finalize GroupAggregate
Group Key: _hyper_I_N_chunk.location_id
-> Sort
Sort Key: _hyper_I_N_chunk.location_id
-> Append
-> Partial GroupAggregate
Group Key: _hyper_I_N_chunk.location_id
-> Index Only Scan using _hyper_I_N_chunk_hypertable_location_id_owner_id_idx on _hyper_I_N_chunk
Index Cond: (location_id = ANY ('{3,4,5}'::integer[]))
-> Partial GroupAggregate
Group Key: _hyper_I_N_chunk.location_id
-> Index Scan using _hyper_I_N_chunk_hypertable_location_id_owner_id_idx on _hyper_I_N_chunk
Index Cond: (location_id = ANY ('{3,4,5}'::integer[]))
-> Partial GroupAggregate
Group Key: _hyper_I_N_chunk.location_id
-> Index Only Scan using _hyper_I_N_chunk_hypertable_location_id_owner_id_idx on _hyper_I_N_chunk
Index Cond: (location_id = ANY ('{3,4,5}'::integer[]))
-> Partial GroupAggregate
Group Key: _hyper_I_N_chunk.location_id
-> Index Only Scan using _hyper_I_N_chunk_hypertable_location_id_owner_id_idx on _hyper_I_N_chunk
Index Cond: (location_id = ANY ('{3,4,5}'::integer[]))
-> Partial GroupAggregate
Group Key: _hyper_I_N_chunk.location_id
-> Index Only Scan using _hyper_I_N_chunk_hypertable_location_id_owner_id_idx on _hyper_I_N_chunk
Index Cond: (location_id = ANY ('{3,4,5}'::integer[]))
-> Partial GroupAggregate
Group Key: _hyper_I_N_chunk.location_id
-> Index Only Scan using _hyper_I_N_chunk_hypertable_location_id_owner_id_idx on _hyper_I_N_chunk
Index Cond: (location_id = ANY ('{3,4,5}'::integer[]))
(29 rows)
-- result should be the same
select location_id, count(*) into comp from :hypertable where location_id in (3,4,5) group by location_id;
select * from orig join comp using (location_id) where orig.count != comp.count;
location_id | count | count
-------------+-------+-------
(0 rows)
drop table orig, owner_orig, owner_comp;
--
-- test that indexes work after updates
--
select _timescaledb_debug.is_compressed_tid(ctid), created_at, location_id, temp
from :chunk2 order by location_id, created_at desc limit 2;
is_compressed_tid | created_at | location_id | temp
-------------------+------------------------------+-------------+------------------
t | Wed Jun 08 16:40:00 2022 PDT | 1 | 11.3788992881785
t | Wed Jun 08 15:50:00 2022 PDT | 1 | 18.3279156589956
(2 rows)
-- find a compressed tuple in a deterministic manner and get location and timestamp
select created_at, location_id
from :chunk2 where _timescaledb_debug.is_compressed_tid(ctid)
order by created_at, location_id limit 1 \gset
-- first update moves the value from the compressed rel to the non-compressed (seen via ctid)
update :hypertable set temp=1.0 where location_id=:location_id and created_at=:'created_at';
select _timescaledb_debug.is_compressed_tid(ctid), created_at, location_id, temp
from :chunk2 order by location_id, created_at desc limit 2;
is_compressed_tid | created_at | location_id | temp
-------------------+------------------------------+-------------+------------------
t | Wed Jun 08 16:40:00 2022 PDT | 1 | 11.3788992881785
t | Wed Jun 08 15:50:00 2022 PDT | 1 | 18.3279156589956
(2 rows)
-- second update should be a hot update (tuple in same block after update, as shown by ctid)
update :hypertable set temp=2.0 where location_id=:location_id and created_at=:'created_at';
select _timescaledb_debug.is_compressed_tid(ctid), created_at, location_id, temp
from :chunk2 order by location_id, created_at desc limit 2;
is_compressed_tid | created_at | location_id | temp
-------------------+------------------------------+-------------+------------------
t | Wed Jun 08 16:40:00 2022 PDT | 1 | 11.3788992881785
t | Wed Jun 08 15:50:00 2022 PDT | 1 | 18.3279156589956
(2 rows)
-- make sure query uses a segmentby index and returns the correct data for the update value
select explain_anonymize(format($$
select created_at, location_id, temp from %s where location_id=1 and temp=2.0
$$, :'chunk2'));
explain_anonymize
-------------------------------------------------------------------------------------------
Index Scan using _hyper_I_N_chunk_hypertable_location_id_owner_id_idx on _hyper_I_N_chunk
Index Cond: (location_id = 1)
Filter: (temp = '2'::double precision)
(3 rows)
select created_at, location_id, temp from :chunk2 where location_id=1 and temp=2.0;
created_at | location_id | temp
------------------------------+-------------+------
Wed Jun 01 17:00:00 2022 PDT | 1 | 2
(1 row)
select compress_chunk(show_chunks(:'hypertable'), hypercore_use_access_method => true);
compress_chunk
----------------------------------------
_timescaledb_internal._hyper_1_1_chunk
_timescaledb_internal._hyper_1_2_chunk
_timescaledb_internal._hyper_1_3_chunk
_timescaledb_internal._hyper_1_4_chunk
_timescaledb_internal._hyper_1_5_chunk
_timescaledb_internal._hyper_1_6_chunk
(6 rows)
vacuum analyze :hypertable;
-- Test sequence scan
set enable_indexscan to off;
select explain_analyze_anonymize(format('select * from %s where owner_id = 3', :'hypertable'));
explain_analyze_anonymize
------------------------------------------------------------------------------
Append (actual rows=N loops=N)
-> Custom Scan (ColumnarScan) on _hyper_I_N_chunk (actual rows=N loops=N)
Scankey: (owner_id = 3)
-> Custom Scan (ColumnarScan) on _hyper_I_N_chunk (actual rows=N loops=N)
Scankey: (owner_id = 3)
-> Custom Scan (ColumnarScan) on _hyper_I_N_chunk (actual rows=N loops=N)
Scankey: (owner_id = 3)
-> Custom Scan (ColumnarScan) on _hyper_I_N_chunk (actual rows=N loops=N)
Scankey: (owner_id = 3)
-> Custom Scan (ColumnarScan) on _hyper_I_N_chunk (actual rows=N loops=N)
Scankey: (owner_id = 3)
-> Custom Scan (ColumnarScan) on _hyper_I_N_chunk (actual rows=N loops=N)
Scankey: (owner_id = 3)
Array Cache Hits: N
Array Cache Misses: N
Array Cache Evictions: N
Array Decompressions: 0
(17 rows)
-- TODO(timescale/timescaledb-private#1117): the Decompress Count here
-- is not correct, but the result shows correctly.
select explain_analyze_anonymize(format('select * from %s where owner_id = 3', :'chunk1'));
explain_analyze_anonymize
------------------------------------------------------------------------
Custom Scan (ColumnarScan) on _hyper_I_N_chunk (actual rows=N loops=N)
Scankey: (owner_id = 3)
Array Cache Hits: N
Array Cache Misses: N
Array Cache Evictions: N
Array Decompressions: 0
(6 rows)
reset enable_indexscan;
-- Test index scan on non-segmentby column
select explain_analyze_anonymize(format($$
select device_id, avg(temp) from %s where device_id between 10 and 20
group by device_id
$$, :'hypertable'));
explain_analyze_anonymize
--------------------------------------------------------------------------------------------------------------------------------
Finalize GroupAggregate (actual rows=N loops=N)
-> Sort (actual rows=N loops=N)
Sort Key: _hyper_I_N_chunk.device_id
Sort Method: quicksort
-> Append (actual rows=N loops=N)
-> Partial GroupAggregate (actual rows=N loops=N)
-> Sort (actual rows=N loops=N)
Sort Key: _hyper_I_N_chunk.device_id
Sort Method: quicksort
-> Custom Scan (ColumnarScan) on _hyper_I_N_chunk (actual rows=N loops=N)
Vectorized Filter: ((device_id >= 10) AND (device_id <= 20))
Rows Removed by Filter: 133
-> Partial GroupAggregate (actual rows=N loops=N)
-> Index Scan using _hyper_I_N_chunk_hypertable_device_id_idx on _hyper_I_N_chunk (actual rows=N loops=N)
Index Cond: ((device_id >= 10) AND (device_id <= 20))
-> Partial GroupAggregate (actual rows=N loops=N)
-> Index Scan using _hyper_I_N_chunk_hypertable_device_id_idx on _hyper_I_N_chunk (actual rows=N loops=N)
Index Cond: ((device_id >= 10) AND (device_id <= 20))
-> Partial GroupAggregate (actual rows=N loops=N)
-> Index Scan using _hyper_I_N_chunk_hypertable_device_id_idx on _hyper_I_N_chunk (actual rows=N loops=N)
Index Cond: ((device_id >= 10) AND (device_id <= 20))
-> Partial GroupAggregate (actual rows=N loops=N)
-> Index Scan using _hyper_I_N_chunk_hypertable_device_id_idx on _hyper_I_N_chunk (actual rows=N loops=N)
Index Cond: ((device_id >= 10) AND (device_id <= 20))
-> Partial GroupAggregate (actual rows=N loops=N)
-> Sort (actual rows=N loops=N)
Sort Key: _hyper_I_N_chunk.device_id
Sort Method: quicksort
-> Custom Scan (ColumnarScan) on _hyper_I_N_chunk (actual rows=N loops=N)
Vectorized Filter: ((device_id >= 10) AND (device_id <= 20))
Rows Removed by Filter: 234
Array Cache Hits: N
Array Cache Misses: N
Array Cache Evictions: N
Array Decompressions: 578
(35 rows)
select explain_analyze_anonymize(format($$
select device_id, avg(temp) from %s where device_id between 10 and 20
group by device_id
$$, :'chunk1'));
explain_analyze_anonymize
--------------------------------------------------------------------------------------------------------------
GroupAggregate (actual rows=N loops=N)
-> Index Scan using _hyper_I_N_chunk_hypertable_device_id_idx on _hyper_I_N_chunk (actual rows=N loops=N)
Index Cond: ((device_id >= 10) AND (device_id <= 20))
Array Cache Hits: N
Array Cache Misses: N
Array Cache Evictions: N
Array Decompressions: 149
(7 rows)
-- Test index scan on segmentby column
select explain_analyze_anonymize(format($$
select created_at, location_id, temp from %s where location_id between 5 and 10
$$, :'hypertable'));
explain_analyze_anonymize
----------------------------------------------------------------------------------------------------------------
Append (actual rows=N loops=N)
-> Custom Scan (ColumnarScan) on _hyper_I_N_chunk (actual rows=N loops=N)
Scankey: ((location_id >= 5) AND (location_id <= 10))
-> Index Scan using _hyper_I_N_chunk_hypertable_location_id_idx on _hyper_I_N_chunk (actual rows=N loops=N)
Index Cond: ((location_id >= 5) AND (location_id <= 10))
-> Index Scan using _hyper_I_N_chunk_hypertable_location_id_idx on _hyper_I_N_chunk (actual rows=N loops=N)
Index Cond: ((location_id >= 5) AND (location_id <= 10))
-> Index Scan using _hyper_I_N_chunk_hypertable_location_id_idx on _hyper_I_N_chunk (actual rows=N loops=N)
Index Cond: ((location_id >= 5) AND (location_id <= 10))
-> Index Scan using _hyper_I_N_chunk_hypertable_location_id_idx on _hyper_I_N_chunk (actual rows=N loops=N)
Index Cond: ((location_id >= 5) AND (location_id <= 10))
-> Custom Scan (ColumnarScan) on _hyper_I_N_chunk (actual rows=N loops=N)
Scankey: ((location_id >= 5) AND (location_id <= 10))
Array Cache Hits: N
Array Cache Misses: N
Array Cache Evictions: N
Array Decompressions: 360
(17 rows)
select explain_analyze_anonymize(format($$
select created_at, location_id, temp from %s where location_id between 5 and 10
$$, :'chunk1'));
explain_analyze_anonymize
------------------------------------------------------------------------
Custom Scan (ColumnarScan) on _hyper_I_N_chunk (actual rows=N loops=N)
Scankey: ((location_id >= 5) AND (location_id <= 10))
Array Cache Hits: N
Array Cache Misses: N
Array Cache Evictions: N
Array Decompressions: 60
(6 rows)
-- These should generate decompressions as above, but for all columns.
select explain_analyze_anonymize(format($$
select * from %s where location_id between 5 and 10
$$, :'hypertable'));
explain_analyze_anonymize
----------------------------------------------------------------------------------------------------------------
Append (actual rows=N loops=N)
-> Custom Scan (ColumnarScan) on _hyper_I_N_chunk (actual rows=N loops=N)
Scankey: ((location_id >= 5) AND (location_id <= 10))
-> Index Scan using _hyper_I_N_chunk_hypertable_location_id_idx on _hyper_I_N_chunk (actual rows=N loops=N)
Index Cond: ((location_id >= 5) AND (location_id <= 10))
-> Index Scan using _hyper_I_N_chunk_hypertable_location_id_idx on _hyper_I_N_chunk (actual rows=N loops=N)
Index Cond: ((location_id >= 5) AND (location_id <= 10))
-> Index Scan using _hyper_I_N_chunk_hypertable_location_id_idx on _hyper_I_N_chunk (actual rows=N loops=N)
Index Cond: ((location_id >= 5) AND (location_id <= 10))
-> Index Scan using _hyper_I_N_chunk_hypertable_location_id_idx on _hyper_I_N_chunk (actual rows=N loops=N)
Index Cond: ((location_id >= 5) AND (location_id <= 10))
-> Custom Scan (ColumnarScan) on _hyper_I_N_chunk (actual rows=N loops=N)
Scankey: ((location_id >= 5) AND (location_id <= 10))
Array Cache Hits: N
Array Cache Misses: N
Array Cache Evictions: N
Array Decompressions: 0
(17 rows)
select explain_analyze_anonymize(format($$
select * from %s where location_id between 5 and 10
$$, :'chunk1'));
explain_analyze_anonymize
------------------------------------------------------------------------
Custom Scan (ColumnarScan) on _hyper_I_N_chunk (actual rows=N loops=N)
Scankey: ((location_id >= 5) AND (location_id <= 10))
Array Cache Hits: N
Array Cache Misses: N
Array Cache Evictions: N
Array Decompressions: 0
(6 rows)
--
-- Test index only scan
--
vacuum analyze :hypertable;
create table saved_hypertable as select * from :hypertable;
-- This will not use index-only scan because it is using a segment-by
-- column, but we check that it works as expected though.
--
-- Note that the number of columns decompressed should be zero, since
-- we do not have to decompress any columns.
select explain_analyze_anonymize(format($$
select location_id from %s where location_id between 5 and 10
$$, :'hypertable'));
explain_analyze_anonymize
----------------------------------------------------------------------------------------------------------------
Append (actual rows=N loops=N)
-> Custom Scan (ColumnarScan) on _hyper_I_N_chunk (actual rows=N loops=N)
Scankey: ((location_id >= 5) AND (location_id <= 10))
-> Index Scan using _hyper_I_N_chunk_hypertable_location_id_idx on _hyper_I_N_chunk (actual rows=N loops=N)
Index Cond: ((location_id >= 5) AND (location_id <= 10))
-> Index Scan using _hyper_I_N_chunk_hypertable_location_id_idx on _hyper_I_N_chunk (actual rows=N loops=N)
Index Cond: ((location_id >= 5) AND (location_id <= 10))
-> Index Scan using _hyper_I_N_chunk_hypertable_location_id_idx on _hyper_I_N_chunk (actual rows=N loops=N)
Index Cond: ((location_id >= 5) AND (location_id <= 10))
-> Index Scan using _hyper_I_N_chunk_hypertable_location_id_idx on _hyper_I_N_chunk (actual rows=N loops=N)
Index Cond: ((location_id >= 5) AND (location_id <= 10))
-> Custom Scan (ColumnarScan) on _hyper_I_N_chunk (actual rows=N loops=N)
Scankey: ((location_id >= 5) AND (location_id <= 10))
Array Cache Hits: N
Array Cache Misses: N
Array Cache Evictions: N
Array Decompressions: 0
(17 rows)
-- We just compare the counts here, not the full content.
select heapam.count as heapam, hypercore.count as hypercore
from (select count(location_id) from :hypertable where location_id between 5 and 10) heapam,
(select count(location_id) from :hypertable where location_id between 5 and 10) hypercore;
heapam | hypercore
--------+-----------
5126 | 5126
(1 row)
drop table saved_hypertable;
select explain_analyze_anonymize(format($$
select device_id from %s where device_id between 5 and 10
$$, :'hypertable'));
explain_analyze_anonymize
-------------------------------------------------------------------------------------------------------------------
Append (actual rows=N loops=N)
-> Index Only Scan using _hyper_I_N_chunk_hypertable_device_id_idx on _hyper_I_N_chunk (actual rows=N loops=N)
Index Cond: ((device_id >= 5) AND (device_id <= 10))
Heap Fetches: N
-> Index Only Scan using _hyper_I_N_chunk_hypertable_device_id_idx on _hyper_I_N_chunk (actual rows=N loops=N)
Index Cond: ((device_id >= 5) AND (device_id <= 10))
Heap Fetches: N
-> Index Only Scan using _hyper_I_N_chunk_hypertable_device_id_idx on _hyper_I_N_chunk (actual rows=N loops=N)
Index Cond: ((device_id >= 5) AND (device_id <= 10))
Heap Fetches: N
-> Index Only Scan using _hyper_I_N_chunk_hypertable_device_id_idx on _hyper_I_N_chunk (actual rows=N loops=N)
Index Cond: ((device_id >= 5) AND (device_id <= 10))
Heap Fetches: N
-> Index Only Scan using _hyper_I_N_chunk_hypertable_device_id_idx on _hyper_I_N_chunk (actual rows=N loops=N)
Index Cond: ((device_id >= 5) AND (device_id <= 10))
Heap Fetches: N
-> Index Only Scan using _hyper_I_N_chunk_hypertable_device_id_idx on _hyper_I_N_chunk (actual rows=N loops=N)
Index Cond: ((device_id >= 5) AND (device_id <= 10))
Heap Fetches: N
Array Cache Hits: N
Array Cache Misses: N
Array Cache Evictions: N
Array Decompressions: 0
(23 rows)
select explain_analyze_anonymize(format($$
select location_id from %s where location_id between 5 and 10
$$, :'chunk1'));
explain_analyze_anonymize
------------------------------------------------------------------------
Custom Scan (ColumnarScan) on _hyper_I_N_chunk (actual rows=N loops=N)
Scankey: ((location_id >= 5) AND (location_id <= 10))
Array Cache Hits: N
Array Cache Misses: N
Array Cache Evictions: N
Array Decompressions: 0
(6 rows)
select explain_analyze_anonymize(format($$
select device_id from %s where device_id between 5 and 10
$$, :'chunk1'));
explain_analyze_anonymize
-------------------------------------------------------------------------------------------------------------
Index Only Scan using _hyper_I_N_chunk_hypertable_device_id_idx on _hyper_I_N_chunk (actual rows=N loops=N)
Index Cond: ((device_id >= 5) AND (device_id <= 10))
Heap Fetches: N
Array Cache Hits: N
Array Cache Misses: N
Array Cache Evictions: N
Array Decompressions: 0
(7 rows)
-- Test index only scan with covering indexes.
--
-- Analyze will run the queries so we are satisfied with this right
-- now and do not run the queries separately since they can generate
-- different results depending on table contents.
-- Add back covering index on location_id
create index hypertable_location_id_include_humidity_idx on :hypertable (location_id) include (humidity);
select explain_analyze_anonymize(format($$
select location_id, avg(humidity) from %s where location_id between 5 and 10
group by location_id order by location_id
$$, :'hypertable'));
explain_analyze_anonymize
----------------------------------------------------------------------------------------------------------------------------------
Finalize GroupAggregate (actual rows=N loops=N)
-> Sort (actual rows=N loops=N)
Sort Key: _hyper_I_N_chunk.location_id
Sort Method: quicksort
-> Append (actual rows=N loops=N)
-> Partial GroupAggregate (actual rows=N loops=N)
-> Sort (actual rows=N loops=N)
Sort Key: _hyper_I_N_chunk.location_id
Sort Method: quicksort
-> Custom Scan (ColumnarScan) on _hyper_I_N_chunk (actual rows=N loops=N)
Scankey: ((location_id >= 5) AND (location_id <= 10))
-> Partial GroupAggregate (actual rows=N loops=N)
-> Index Scan using _hyper_I_N_chunk_hypertable_location_id_idx on _hyper_I_N_chunk (actual rows=N loops=N)
Index Cond: ((location_id >= 5) AND (location_id <= 10))
-> Partial GroupAggregate (actual rows=N loops=N)
-> Index Scan using _hyper_I_N_chunk_hypertable_location_id_idx on _hyper_I_N_chunk (actual rows=N loops=N)
Index Cond: ((location_id >= 5) AND (location_id <= 10))
-> Partial GroupAggregate (actual rows=N loops=N)
-> Index Scan using _hyper_I_N_chunk_hypertable_location_id_idx on _hyper_I_N_chunk (actual rows=N loops=N)
Index Cond: ((location_id >= 5) AND (location_id <= 10))
-> Partial GroupAggregate (actual rows=N loops=N)
-> Index Scan using _hyper_I_N_chunk_hypertable_location_id_idx on _hyper_I_N_chunk (actual rows=N loops=N)
Index Cond: ((location_id >= 5) AND (location_id <= 10))
-> Partial GroupAggregate (actual rows=N loops=N)
-> Sort (actual rows=N loops=N)
Sort Key: _hyper_I_N_chunk.location_id
Sort Method: quicksort
-> Custom Scan (ColumnarScan) on _hyper_I_N_chunk (actual rows=N loops=N)
Scankey: ((location_id >= 5) AND (location_id <= 10))
Array Cache Hits: N
Array Cache Misses: N
Array Cache Evictions: N
Array Decompressions: 180
(33 rows)
select explain_analyze_anonymize(format($$
select device_id, avg(humidity) from %s where device_id between 5 and 10
group by device_id order by device_id
$$, :'hypertable'));
explain_analyze_anonymize
-------------------------------------------------------------------------------------------------------------------------------------
Finalize GroupAggregate (actual rows=N loops=N)
-> Sort (actual rows=N loops=N)
Sort Key: _hyper_I_N_chunk.device_id
Sort Method: quicksort
-> Append (actual rows=N loops=N)
-> Partial GroupAggregate (actual rows=N loops=N)
-> Index Only Scan using _hyper_I_N_chunk_hypertable_device_id_idx on _hyper_I_N_chunk (actual rows=N loops=N)
Index Cond: ((device_id >= 5) AND (device_id <= 10))
Heap Fetches: N
-> Partial GroupAggregate (actual rows=N loops=N)
-> Index Only Scan using _hyper_I_N_chunk_hypertable_device_id_idx on _hyper_I_N_chunk (actual rows=N loops=N)
Index Cond: ((device_id >= 5) AND (device_id <= 10))
Heap Fetches: N
-> Partial GroupAggregate (actual rows=N loops=N)
-> Index Only Scan using _hyper_I_N_chunk_hypertable_device_id_idx on _hyper_I_N_chunk (actual rows=N loops=N)
Index Cond: ((device_id >= 5) AND (device_id <= 10))
Heap Fetches: N
-> Partial GroupAggregate (actual rows=N loops=N)
-> Index Only Scan using _hyper_I_N_chunk_hypertable_device_id_idx on _hyper_I_N_chunk (actual rows=N loops=N)
Index Cond: ((device_id >= 5) AND (device_id <= 10))
Heap Fetches: N
-> Partial GroupAggregate (actual rows=N loops=N)
-> Index Only Scan using _hyper_I_N_chunk_hypertable_device_id_idx on _hyper_I_N_chunk (actual rows=N loops=N)
Index Cond: ((device_id >= 5) AND (device_id <= 10))
Heap Fetches: N
-> Partial GroupAggregate (actual rows=N loops=N)
-> Index Only Scan using _hyper_I_N_chunk_hypertable_device_id_idx on _hyper_I_N_chunk (actual rows=N loops=N)
Index Cond: ((device_id >= 5) AND (device_id <= 10))
Heap Fetches: N
Array Cache Hits: N
Array Cache Misses: N
Array Cache Evictions: N
Array Decompressions: 0
(33 rows)
select explain_analyze_anonymize(format($$
select location_id, avg(humidity) from %s where location_id between 5 and 10
group by location_id order by location_id
$$, :'chunk1'));
explain_analyze_anonymize
--------------------------------------------------------------------------------------------------------------------------------------
GroupAggregate (actual rows=N loops=N)
-> Index Only Scan using _hyper_I_N_chunk_hypertable_location_id_include_humidity_idx on _hyper_I_N_chunk (actual rows=N loops=N)
Index Cond: ((location_id >= 5) AND (location_id <= 10))
Heap Fetches: N
Array Cache Hits: N
Array Cache Misses: N
Array Cache Evictions: N
Array Decompressions: 0
(8 rows)
select explain_analyze_anonymize(format($$
select device_id, avg(humidity) from %s where device_id between 5 and 10
group by device_id order by device_id
$$, :'chunk1'));
explain_analyze_anonymize
-------------------------------------------------------------------------------------------------------------------
GroupAggregate (actual rows=N loops=N)
-> Index Only Scan using _hyper_I_N_chunk_hypertable_device_id_idx on _hyper_I_N_chunk (actual rows=N loops=N)
Index Cond: ((device_id >= 5) AND (device_id <= 10))
Heap Fetches: N
Array Cache Hits: N
Array Cache Misses: N
Array Cache Evictions: N
Array Decompressions: 0
(8 rows)
-------------------------------------
-- Test UNIQUE and Partial indexes --
-------------------------------------
\set VERBOSITY default
---
-- Test that building a UNIQUE index won't work on a hypercore table
-- that contains non-unique values.
---
create table non_unique_metrics (time timestamptz, temp float, device int);
select create_hypertable('non_unique_metrics', 'time', create_default_indexes => false);
NOTICE: adding not-null constraint to column "time"
DETAIL: Dimensions cannot have NULL values.
create_hypertable
---------------------------------
(3,public,non_unique_metrics,t)
(1 row)
insert into non_unique_metrics values ('2024-01-01', 1.0, 1), ('2024-01-01', 2.0, 1), ('2024-01-02', 3.0, 2);
select ch as non_unique_chunk from show_chunks('non_unique_metrics') ch limit 1 \gset
alter table non_unique_metrics set (timescaledb.compress_segmentby = 'device', timescaledb.compress_orderby = 'time');
alter table :non_unique_chunk set access method hypercore;
\set ON_ERROR_STOP 0
---
-- UNIQUE index creation on compressed hypercore should fail due to
-- non-unique values
---
create unique index on non_unique_metrics (time);
ERROR: could not create unique index "_hyper_3_13_chunk_non_unique_metrics_time_idx"
DETAIL: Key ("time")=(Mon Jan 01 00:00:00 2024 PST) is duplicated.
-- TODO(@mats): Expression indexes not yet implemented
CREATE INDEX p1_expr_index ON :hypertable (('device_' || device_id::text));
ERROR: expression indexes not supported
\set ON_ERROR_STOP 1
--------------------------
-- Test partial indexes --
--------------------------
-- Create partial predicate index
create index time_idx on non_unique_metrics (time) where (time < '2024-01-02'::timestamptz);
-- Find the index on the chunk and save as a variable
select indexrelid::regclass as chunk_time_idx
from pg_index i inner join pg_class c on (i.indexrelid=c.oid)
where indrelid = :'non_unique_chunk'::regclass
and relname like '%time%' \gset
reset role; -- need superuser for pageinspect
-- The index should contain 1 key (device 1) with 2 TIDs
select _timescaledb_debug.is_compressed_tid(ctid), dead, htid, tids from bt_page_items(:'chunk_time_idx', 1);
is_compressed_tid | dead | htid | tids
-------------------+------+----------------+-------------------------------------
f | f | (2147483649,1) | {"(2147483649,1)","(2147483649,2)"}
(1 row)
-- Turn off columnarscan and seqscan to ensure index scan is picked
set timescaledb.enable_columnarscan=false;
set enable_seqscan=false;
-- Check that a query can use the index
explain (costs off)
select * from non_unique_metrics where time <= '2024-01-01'::timestamptz;
QUERY PLAN
------------------------------------------------------------------------------------
Index Scan using _hyper_3_13_chunk_time_idx on _hyper_3_13_chunk
Index Cond: ("time" <= 'Mon Jan 01 00:00:00 2024 PST'::timestamp with time zone)
(2 rows)
select * from non_unique_metrics where time <= '2024-01-01'::timestamptz;
time | temp | device
------------------------------+------+--------
Mon Jan 01 00:00:00 2024 PST | 1 | 1
Mon Jan 01 00:00:00 2024 PST | 2 | 1
(2 rows)
-- Test a partial index with a predicate on non-index column
drop index time_idx;
create index time_idx on non_unique_metrics (time) where (device < 2);
select indexrelid::regclass as chunk_time_idx
from pg_index i inner join pg_class c on (i.indexrelid=c.oid)
where indrelid = :'non_unique_chunk'::regclass
and relname like '%time%' \gset
-- Index should have two tids, since one row excluded
select _timescaledb_debug.is_compressed_tid(ctid), dead, htid, tids from bt_page_items(:'chunk_time_idx', 1);
is_compressed_tid | dead | htid | tids
-------------------+------+----------------+-------------------------------------
f | f | (2147483649,1) | {"(2147483649,1)","(2147483649,2)"}
(1 row)
-- Check that the index works. Expect two rows to be returned.
explain (costs off)
select * from non_unique_metrics where time < '2024-01-02'::timestamptz and device < 2;
QUERY PLAN
-----------------------------------------------------------------------------------
Index Scan using _hyper_3_13_chunk_time_idx on _hyper_3_13_chunk
Index Cond: ("time" < 'Tue Jan 02 00:00:00 2024 PST'::timestamp with time zone)
(2 rows)
select * from non_unique_metrics where time < '2024-01-02'::timestamptz and device < 2;
time | temp | device
------------------------------+------+--------
Mon Jan 01 00:00:00 2024 PST | 1 | 1
Mon Jan 01 00:00:00 2024 PST | 2 | 1
(2 rows)
drop index time_idx;
create index time_idx on non_unique_metrics (time) where (device < 2) and temp < 2.0;
select indexrelid::regclass as chunk_time_idx
from pg_index i inner join pg_class c on (i.indexrelid=c.oid)
where indrelid = :'non_unique_chunk'::regclass
and relname like '%time%' \gset
-- Index should have two tids, since one row excluded
select _timescaledb_debug.is_compressed_tid(ctid), dead, htid, tids from bt_page_items(:'chunk_time_idx', 1);
is_compressed_tid | dead | htid | tids
-------------------+------+----------------+------
t | f | (2147483649,1) |
(1 row)
-- Check that the index works as expected. Only one row should match.
explain (costs off)
select * from non_unique_metrics where time < '2024-01-02'::timestamptz and device < 2 and temp = 1.0;
QUERY PLAN
-----------------------------------------------------------------------------------
Index Scan using _hyper_3_13_chunk_time_idx on _hyper_3_13_chunk
Index Cond: ("time" < 'Tue Jan 02 00:00:00 2024 PST'::timestamp with time zone)
Filter: (temp = '1'::double precision)
(3 rows)
select * from non_unique_metrics where time < '2024-01-02'::timestamptz and device < 2 and temp = 1.0;
time | temp | device
------------------------------+------+--------
Mon Jan 01 00:00:00 2024 PST | 1 | 1
(1 row)
-- Make time column data unique to test UNIQUE constraint/index creation
delete from non_unique_metrics where temp = 1.0;
alter table non_unique_metrics add constraint u1 unique(time);
\set ON_ERROR_STOP 0
insert into non_unique_metrics values ('2024-01-01', 1.0, 1);
ERROR: duplicate key value violates unique constraint "13_7_u1"
DETAIL: Key ("time")=(Mon Jan 01 00:00:00 2024 PST) already exists.
\set ON_ERROR_STOP 1
-- Should also be able to create via "create index"
create unique index ui1 on non_unique_metrics (time);
drop table :hypertable cascade;
NOTICE: drop cascades to view chunk_indexes
--------------------------------------------------
-- Test that an index build handles null values --
--------------------------------------------------
create table nullvalues (eventid int, time timestamptz not null, device int, location int, value float, only_nulls text);
select create_hypertable('nullvalues', 'time', create_default_indexes => false);
create_hypertable
-------------------------
(5,public,nullvalues,t)
(1 row)
insert into nullvalues values
(1, '2024-01-01 00:01', 1, null, null, null),
(2, '2024-01-02 00:01', 1, 1, 3.0, null),
(3, '2024-01-03 00:01', 2, 2, null, null),
(4, '2024-01-04 00:01', 2, 3, 4.0, null);
alter table nullvalues set (timescaledb.compress, timescaledb.compress_orderby='time', timescaledb.compress_segmentby='device');
select format('%I.%I', chunk_schema, chunk_name)::regclass as nulls_chunk
from timescaledb_information.chunks
where format('%I.%I', hypertable_schema, hypertable_name)::regclass = 'nullvalues'::regclass
order by nulls_chunk asc
limit 1 offset 1 \gset
create index nullvalues_location_idx on nullvalues (location);
create index nullvalues_device_location_idx on nullvalues (device, location);
create index nullvalues_value_idx on nullvalues (value);
create index nullvalues_only_nulls_idx on nullvalues (only_nulls);
explain (costs off) select * from nullvalues where location is not null;
QUERY PLAN
----------------------------------------------------------------------------------------------
Append
-> Index Scan using _hyper_5_15_chunk_nullvalues_device_location_idx on _hyper_5_15_chunk
Index Cond: (location IS NOT NULL)
-> Index Scan using _hyper_5_16_chunk_nullvalues_device_location_idx on _hyper_5_16_chunk
Index Cond: (location IS NOT NULL)
(5 rows)
create table location_not_null as
select * from nullvalues where location is not null;
select * from location_not_null;
eventid | time | device | location | value | only_nulls
---------+------------------------------+--------+----------+-------+------------
2 | Tue Jan 02 00:01:00 2024 PST | 1 | 1 | 3 |
3 | Wed Jan 03 00:01:00 2024 PST | 2 | 2 | |
4 | Thu Jan 04 00:01:00 2024 PST | 2 | 3 | 4 |
(3 rows)
explain (costs off) select * from nullvalues where location is null;
QUERY PLAN
----------------------------------------------------------------------------------------------
Append
-> Index Scan using _hyper_5_15_chunk_nullvalues_location_idx on _hyper_5_15_chunk
Index Cond: (location IS NULL)
-> Index Scan using _hyper_5_16_chunk_nullvalues_device_location_idx on _hyper_5_16_chunk
Index Cond: (location IS NULL)
(5 rows)
create table location_null as
select * from nullvalues where location is null;
select * from location_null;
eventid | time | device | location | value | only_nulls
---------+------------------------------+--------+----------+-------+------------
1 | Mon Jan 01 00:01:00 2024 PST | 1 | | |
(1 row)
explain (costs off) select * from nullvalues where only_nulls is null;
QUERY PLAN
-----------------------------------------------------------------------------------------
Append
-> Index Scan using _hyper_5_15_chunk_nullvalues_only_nulls_idx on _hyper_5_15_chunk
Index Cond: (only_nulls IS NULL)
-> Index Scan using _hyper_5_16_chunk_nullvalues_only_nulls_idx on _hyper_5_16_chunk
Index Cond: (only_nulls IS NULL)
(5 rows)
create table only_nulls_null as
select * from nullvalues where only_nulls is null;
select * from only_nulls_null;
eventid | time | device | location | value | only_nulls
---------+------------------------------+--------+----------+-------+------------
1 | Mon Jan 01 00:01:00 2024 PST | 1 | | |
2 | Tue Jan 02 00:01:00 2024 PST | 1 | 1 | 3 |
3 | Wed Jan 03 00:01:00 2024 PST | 2 | 2 | |
4 | Thu Jan 04 00:01:00 2024 PST | 2 | 3 | 4 |
(4 rows)
-- Convert all chunks to hypercore and run same queries
select compress_chunk(ch, hypercore_use_access_method => true) from show_chunks('nullvalues') ch;
compress_chunk
-----------------------------------------
_timescaledb_internal._hyper_5_15_chunk
_timescaledb_internal._hyper_5_16_chunk
(2 rows)
select c.relname, a.amname FROM pg_class c
join pg_am a on (c.relam = a.oid)
join show_chunks('nullvalues') ch on (ch = c.oid);
relname | amname
-------------------+-----------
_hyper_5_15_chunk | hypercore
_hyper_5_16_chunk | hypercore
(2 rows)
-- The explains should be index scans and there should be no rows
-- returned if the result is the same as before when the chunks where
-- not hypercores.
explain (costs off) select * from nullvalues where location is not null;
QUERY PLAN
----------------------------------------------------------------------------------------------
Append
-> Index Scan using _hyper_5_15_chunk_nullvalues_device_location_idx on _hyper_5_15_chunk
Index Cond: (location IS NOT NULL)
-> Index Scan using _hyper_5_16_chunk_nullvalues_device_location_idx on _hyper_5_16_chunk
Index Cond: (location IS NOT NULL)
(5 rows)
select * from nullvalues where location is not null
except
select * from location_not_null;
eventid | time | device | location | value | only_nulls
---------+------+--------+----------+-------+------------
(0 rows)
explain (costs off) select * from nullvalues where location is null;
QUERY PLAN
----------------------------------------------------------------------------------------------
Append
-> Index Scan using _hyper_5_15_chunk_nullvalues_location_idx on _hyper_5_15_chunk
Index Cond: (location IS NULL)
-> Index Scan using _hyper_5_16_chunk_nullvalues_device_location_idx on _hyper_5_16_chunk
Index Cond: (location IS NULL)
(5 rows)
select * from nullvalues where location is null
except
select * from location_null;
eventid | time | device | location | value | only_nulls
---------+------+--------+----------+-------+------------
(0 rows)
explain (costs off) select * from nullvalues where only_nulls is null;
QUERY PLAN
-----------------------------------------------------------------------------------------
Append
-> Index Scan using _hyper_5_15_chunk_nullvalues_only_nulls_idx on _hyper_5_15_chunk
Index Cond: (only_nulls IS NULL)
-> Index Scan using _hyper_5_16_chunk_nullvalues_only_nulls_idx on _hyper_5_16_chunk
Index Cond: (only_nulls IS NULL)
(5 rows)
select * from nullvalues where only_nulls is null
except
select * from only_nulls_null;
eventid | time | device | location | value | only_nulls
---------+------+--------+----------+-------+------------
(0 rows)