Create indexes on segmentby columns

This commit creates indexes on all segmentby columns of the compressed
hypertable.
This commit is contained in:
Joshua Lockerman 2019-09-25 14:16:28 -04:00 committed by Matvey Arye
parent 46b4a9d3c2
commit 64f56d5088
12 changed files with 252 additions and 13 deletions

View File

@ -9,7 +9,9 @@
#include <access/tupdesc.h>
#include <access/xact.h>
#include <catalog/pg_type.h>
#include <catalog/index.h>
#include <catalog/toasting.h>
#include <commands/defrem.h>
#include <commands/tablecmds.h>
#include <commands/tablespace.h>
#include <miscadmin.h>
@ -17,6 +19,7 @@
#include <storage/lmgr.h>
#include <utils/builtins.h>
#include <utils/rel.h>
#include <utils/syscache.h>
#include "catalog.h"
#include "create.h"
@ -337,6 +340,60 @@ compresscolinfo_add_catalog_entries(CompressColInfo *compress_cols, int32 htid)
heap_close(rel, NoLock); /*lock will be released at end of transaction only*/
}
static void
create_compressed_table_indexes(Oid compresstable_relid, CompressColInfo *compress_cols)
{
Cache *hcache = ts_hypertable_cache_pin();
Hypertable *ht = ts_hypertable_cache_get_entry(hcache, compresstable_relid);
IndexStmt stmt = {
.type = T_IndexStmt,
.accessMethod = DEFAULT_INDEX_TYPE,
.idxname = NULL,
.relation = makeRangeVar(NameStr(ht->fd.schema_name), NameStr(ht->fd.table_name), 0),
.tableSpace = get_tablespace_name(get_rel_tablespace(ht->main_table_relid)),
};
IndexElem sequence_num_elem = {
.type = T_IndexElem,
.name = COMPRESSION_COLUMN_METADATA_SEQUENCE_NUM_NAME,
};
int i;
for (i = 0; i < compress_cols->numcols; i++)
{
NameData index_name;
ObjectAddress index_addr;
HeapTuple index_tuple;
FormData_hypertable_compression *col = &compress_cols->col_meta[i];
IndexElem segment_elem = { .type = T_IndexElem, .name = NameStr(col->attname) };
if (col->segmentby_column_index <= 0)
continue;
stmt.indexParams = list_make2(&segment_elem, &sequence_num_elem);
index_addr = DefineIndexCompat(ht->main_table_relid,
&stmt,
InvalidOid,
false, /* is alter table */
false, /* check rights */
false, /* skip_build */
false); /* quiet */
index_tuple = SearchSysCache1(RELOID, ObjectIdGetDatum(index_addr.objectId));
if (!HeapTupleIsValid(index_tuple))
elog(ERROR, "cache lookup failed for index relid %d", index_addr.objectId);
index_name = ((Form_pg_class) GETSTRUCT(index_tuple))->relname;
elog(NOTICE,
"adding index %s ON %s.%s USING BTREE(%s, %s)",
NameStr(index_name),
NameStr(ht->fd.schema_name),
NameStr(ht->fd.table_name),
NameStr(col->attname),
COMPRESSION_COLUMN_METADATA_SEQUENCE_NUM_NAME);
ReleaseSysCache(index_tuple);
}
ts_cache_release(hcache);
}
static int32
create_compression_table(Oid owner, CompressColInfo *compress_cols)
{
@ -379,6 +436,7 @@ create_compression_table(Oid owner, CompressColInfo *compress_cols)
ts_catalog_restore_user(&sec_ctx);
modify_compressed_toast_table_storage(compress_cols, compress_relid);
ts_hypertable_create_compressed(compress_relid, compress_hypertable_id);
create_compressed_table_indexes(compress_relid, compress_cols);
return compress_hypertable_id;
}
@ -566,6 +624,7 @@ tsl_process_compress_table(AlterTableCmd *cmd, Hypertable *ht,
compress_htid = create_compression_table(ownerid, &compress_cols);
ts_hypertable_set_compressed_id(ht, compress_htid);
compresscolinfo_add_catalog_entries(&compress_cols, ht->fd.id);
/* do not release any locks, will get released by xact end */
return true;

View File

@ -38,6 +38,8 @@ insert into foo values( 10 , 10 , 20, 120);
insert into foo values( 20 , 11 , 20, 13);
insert into foo values( 30 , 12 , 20, 14);
alter table foo set (timescaledb.compress, timescaledb.compress_segmentby = 'a,b', timescaledb.compress_orderby = 'c desc, d asc nulls last');
NOTICE: adding index _compressed_hypertable_2_a__ts_meta_sequence_num_idx ON _timescaledb_internal._compressed_hypertable_2 USING BTREE(a, _ts_meta_sequence_num)
NOTICE: adding index _compressed_hypertable_2_b__ts_meta_sequence_num_idx ON _timescaledb_internal._compressed_hypertable_2 USING BTREE(b, _ts_meta_sequence_num)
select id, schema_name, table_name, compressed, compressed_hypertable_id from
_timescaledb_catalog.hypertable order by id;
id | schema_name | table_name | compressed | compressed_hypertable_id
@ -80,9 +82,9 @@ uncompressed_index_bytes | 32 kB
uncompressed_toast_bytes | 0 bytes
uncompressed_total_bytes | 40 kB
compressed_heap_bytes | 8192 bytes
compressed_index_bytes | 0 bytes
compressed_index_bytes | 32 kB
compressed_toast_bytes | 8192 bytes
compressed_total_bytes | 16 kB
compressed_total_bytes | 48 kB
\x
select compress_chunk( '_timescaledb_internal._hyper_1_1_chunk');
@ -102,7 +104,7 @@ uncompressed_toast_size | 0
uncompressed_index_size | 32768
compressed_heap_size | 8192
compressed_toast_size | 8192
compressed_index_size | 0
compressed_index_size | 32768
-[ RECORD 2 ]-----------+------
chunk_id | 2
compressed_chunk_id | 5
@ -111,7 +113,7 @@ uncompressed_toast_size | 0
uncompressed_index_size | 32768
compressed_heap_size | 8192
compressed_toast_size | 8192
compressed_index_size | 0
compressed_index_size | 32768
\x
select ch1.id, ch1.schema_name, ch1.table_name , ch2.table_name as compress_table
@ -219,6 +221,7 @@ select create_hypertable( 'conditions', 'time', chunk_time_interval=> '31days'::
(1 row)
alter table conditions set (timescaledb.compress, timescaledb.compress_segmentby = 'location', timescaledb.compress_orderby = 'time');
NOTICE: adding index _compressed_hypertable_6_location__ts_meta_sequence_num_idx ON _timescaledb_internal._compressed_hypertable_6 USING BTREE(location, _ts_meta_sequence_num)
insert into conditions
select generate_series('2018-12-01 00:00'::timestamp, '2018-12-31 00:00'::timestamp, '1 day'), 'POR', 'klick', 55, 75;
insert into conditions
@ -332,9 +335,9 @@ uncompressed_index_bytes | 16 kB
uncompressed_toast_bytes | 8192 bytes
uncompressed_total_bytes | 32 kB
compressed_heap_bytes | 8192 bytes
compressed_index_bytes | 0 bytes
compressed_index_bytes | 16 kB
compressed_toast_bytes | 8192 bytes
compressed_total_bytes | 16 kB
compressed_total_bytes | 32 kB
-[ RECORD 2 ]------------+----------------------------------------
hypertable_name | conditions
chunk_name | _timescaledb_internal._hyper_5_13_chunk
@ -343,9 +346,9 @@ uncompressed_index_bytes | 16 kB
uncompressed_toast_bytes | 8192 bytes
uncompressed_total_bytes | 32 kB
compressed_heap_bytes | 8192 bytes
compressed_index_bytes | 0 bytes
compressed_index_bytes | 16 kB
compressed_toast_bytes | 8192 bytes
compressed_total_bytes | 16 kB
compressed_total_bytes | 32 kB
select * from timescaledb_information.compressed_hypertable_size
order by hypertable_name;
@ -356,9 +359,9 @@ uncompressed_index_bytes | 32 kB
uncompressed_toast_bytes | 0 bytes
uncompressed_total_bytes | 40 kB
compressed_heap_bytes | 8192 bytes
compressed_index_bytes | 0 bytes
compressed_index_bytes | 32 kB
compressed_toast_bytes | 8192 bytes
compressed_total_bytes | 16 kB
compressed_total_bytes | 48 kB
-[ RECORD 2 ]------------+-----------
hypertable_name | conditions
uncompressed_heap_bytes | 16 kB
@ -366,9 +369,9 @@ uncompressed_index_bytes | 32 kB
uncompressed_toast_bytes | 16 kB
uncompressed_total_bytes | 64 kB
compressed_heap_bytes | 16 kB
compressed_index_bytes | 0 bytes
compressed_index_bytes | 32 kB
compressed_toast_bytes | 16 kB
compressed_total_bytes | 32 kB
compressed_total_bytes | 64 kB
\x
select decompress_chunk(ch1.schema_name|| '.' || ch1.table_name)
@ -485,6 +488,8 @@ alter table test_collation set (timescaledb.compress, timescaledb.compress_segme
ERROR: unable to parse the compress_orderby option 'val_1 COLLATE "POSIX", val2, time'
\set ON_ERROR_STOP 1
alter table test_collation set (timescaledb.compress, timescaledb.compress_segmentby='device_id, device_id_2', timescaledb.compress_orderby = 'val_1, val_2, time');
NOTICE: adding index _compressed_hypertable_10_device_id__ts_meta_sequence_num_idx ON _timescaledb_internal._compressed_hypertable_10 USING BTREE(device_id, _ts_meta_sequence_num)
NOTICE: adding index _compressed_hypertable_10_device_id_2__ts_meta_sequence_num_idx ON _timescaledb_internal._compressed_hypertable_10 USING BTREE(device_id_2, _ts_meta_sequence_num)
insert into test_collation
select generate_series('2018-01-01 00:00'::timestamp, '2018-01-10 00:00'::timestamp, '2 hour'), 'device_1', 'device_3', gen_rand_minstd(), gen_rand_minstd();
insert into test_collation

View File

@ -41,6 +41,7 @@ ERROR: can add compress_chunks policy only on hypertables with compression enab
-- TEST2 --
--add a policy to compress chunks --
alter table conditions set (timescaledb.compress, timescaledb.compress_segmentby = 'location', timescaledb.compress_orderby = 'time');
NOTICE: adding index _compressed_hypertable_2_location__ts_meta_sequence_num_idx ON _timescaledb_internal._compressed_hypertable_2 USING BTREE(location, _ts_meta_sequence_num)
insert into conditions
select generate_series('2018-12-01 00:00'::timestamp, '2018-12-31 00:00'::timestamp, '1 day'), 'POR', 'klick', 55, 75;
select add_compress_chunks_policy('conditions', '60d'::interval);
@ -88,7 +89,7 @@ select test_compress_chunks_policy(:compressjob_id);
select hypertable_name, chunk_name, uncompressed_total_bytes, compressed_total_bytes from timescaledb_information.compressed_chunk_size order by chunk_name;
hypertable_name | chunk_name | uncompressed_total_bytes | compressed_total_bytes
-----------------+----------------------------------------+--------------------------+------------------------
conditions | _timescaledb_internal._hyper_1_1_chunk | 32 kB | 16 kB
conditions | _timescaledb_internal._hyper_1_1_chunk | 32 kB | 32 kB
(1 row)
-- TEST 4 --

View File

@ -33,7 +33,11 @@ ERROR: must set the 'compress' boolean option when setting compression options
ALTER TABLE foo2 set (timescaledb.compress, timescaledb.compress_segmentby = '"bacB toD",c' , timescaledb.compress_orderby = 'c');
ERROR: cannot use the same column c in compress_orderby and compress_segmentby
ALTER TABLE foo2 set (timescaledb.compress, timescaledb.compress_segmentby = '"bacB toD",c' , timescaledb.compress_orderby = 'd DESC');
NOTICE: adding index _compressed_hypertable_4_bacB toD__ts_meta_sequence_num_idx ON _timescaledb_internal._compressed_hypertable_4 USING BTREE(bacB toD, _ts_meta_sequence_num)
NOTICE: adding index _compressed_hypertable_4_c__ts_meta_sequence_num_idx ON _timescaledb_internal._compressed_hypertable_4 USING BTREE(c, _ts_meta_sequence_num)
ALTER TABLE foo2 set (timescaledb.compress, timescaledb.compress_segmentby = '"bacB toD",c' , timescaledb.compress_orderby = 'd');
NOTICE: adding index _compressed_hypertable_5_bacB toD__ts_meta_sequence_num_idx ON _timescaledb_internal._compressed_hypertable_5 USING BTREE(bacB toD, _ts_meta_sequence_num)
NOTICE: adding index _compressed_hypertable_5_c__ts_meta_sequence_num_idx ON _timescaledb_internal._compressed_hypertable_5 USING BTREE(c, _ts_meta_sequence_num)
--note that the time column "a" should be added to the end of the orderby list
select * from _timescaledb_catalog.hypertable_compression order by attname;
hypertable_id | attname | compression_algorithm_id | segmentby_column_index | orderby_column_index | orderby_asc | orderby_nullsfirst
@ -45,6 +49,8 @@ select * from _timescaledb_catalog.hypertable_compression order by attname;
(4 rows)
ALTER TABLE foo3 set (timescaledb.compress, timescaledb.compress_segmentby = '"bacB toD",c' , timescaledb.compress_orderby = 'd DeSc NullS lAsT');
NOTICE: adding index _compressed_hypertable_6_bacB toD__ts_meta_sequence_num_idx ON _timescaledb_internal._compressed_hypertable_6 USING BTREE(bacB toD, _ts_meta_sequence_num)
NOTICE: adding index _compressed_hypertable_6_c__ts_meta_sequence_num_idx ON _timescaledb_internal._compressed_hypertable_6 USING BTREE(c, _ts_meta_sequence_num)
-- Negative test cases ---
ALTER TABLE foo2 set (timescaledb.compress, timescaledb.compress_segmentby = '"bacB toD",c');
ERROR: need to specify both compress_orderby and compress_groupby if altering compression
@ -144,6 +150,7 @@ FROM _timescaledb_catalog.chunk ch1, _timescaledb_catalog.hypertable ht where ch
ERROR: chunk "_hyper_8_2_chunk" is not a compressed
--test changing the segment by columns
ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_orderby = 'a', timescaledb.compress_segmentby = 'b');
NOTICE: adding index _compressed_hypertable_10_b__ts_meta_sequence_num_idx ON _timescaledb_internal._compressed_hypertable_10 USING BTREE(b, _ts_meta_sequence_num)
select ch1.schema_name|| '.' || ch1.table_name AS "CHUNK_NAME"
FROM _timescaledb_catalog.chunk ch1, _timescaledb_catalog.hypertable ht where ch1.hypertable_id = ht.id and ht.table_name like 'foo' ORDER BY ch1.id limit 1 \gset
--should succeed
@ -173,6 +180,7 @@ FROM _timescaledb_catalog.chunk ch1, _timescaledb_catalog.hypertable ht where ch
--should succeed
ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_orderby = 'a', timescaledb.compress_segmentby = 'b');
NOTICE: adding index _compressed_hypertable_11_b__ts_meta_sequence_num_idx ON _timescaledb_internal._compressed_hypertable_11 USING BTREE(b, _ts_meta_sequence_num)
select hc.* from _timescaledb_catalog.hypertable_compression hc inner join _timescaledb_catalog.hypertable h on (h.id = hc.hypertable_id) where h.table_name = 'foo' order by attname;
hypertable_id | attname | compression_algorithm_id | segmentby_column_index | orderby_column_index | orderby_asc | orderby_nullsfirst
---------------+---------+--------------------------+------------------------+----------------------+-------------+--------------------

View File

@ -204,6 +204,7 @@ select create_hypertable( 'test4', 'timec', chunk_time_interval=> '1 year'::inte
(1 row)
alter table test4 set (timescaledb.compress, timescaledb.compress_segmentby = 'location', timescaledb.compress_orderby = 'timec');
NOTICE: adding index _compressed_hypertable_6_location__ts_meta_sequence_num_idx ON _timescaledb_internal._compressed_hypertable_6 USING BTREE(location, _ts_meta_sequence_num)
insert into test4
select generate_series('2018-01-01 00:00'::timestamp, '2018-01-31 00:00'::timestamp, '1 day'), 'NYC', 'klick', 55, 75;
insert into test4

View File

@ -34,6 +34,7 @@ ERROR: permission denied for relation conditions
\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER
--now owner tries and succeeds --
alter table conditions set (timescaledb.compress, timescaledb.compress_segmentby = 'location', timescaledb.compress_orderby = 'timec');
NOTICE: adding index _compressed_hypertable_2_location__ts_meta_sequence_num_idx ON _timescaledb_internal._compressed_hypertable_2 USING BTREE(location, _ts_meta_sequence_num)
insert into conditions
select generate_series('2018-12-01 00:00'::timestamp, '2018-12-31 00:00'::timestamp, '1 day'), 'POR', 'klick', 55, 75;
--try modifying compress properties --

View File

@ -34,6 +34,7 @@ ERROR: permission denied for table conditions
\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER
--now owner tries and succeeds --
alter table conditions set (timescaledb.compress, timescaledb.compress_segmentby = 'location', timescaledb.compress_orderby = 'timec');
NOTICE: adding index _compressed_hypertable_2_location__ts_meta_sequence_num_idx ON _timescaledb_internal._compressed_hypertable_2 USING BTREE(location, _ts_meta_sequence_num)
insert into conditions
select generate_series('2018-12-01 00:00'::timestamp, '2018-12-31 00:00'::timestamp, '1 day'), 'POR', 'klick', 55, 75;
--try modifying compress properties --

View File

@ -34,6 +34,7 @@ ERROR: permission denied for relation conditions
\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER
--now owner tries and succeeds --
alter table conditions set (timescaledb.compress, timescaledb.compress_segmentby = 'location', timescaledb.compress_orderby = 'timec');
NOTICE: adding index _compressed_hypertable_2_location__ts_meta_sequence_num_idx ON _timescaledb_internal._compressed_hypertable_2 USING BTREE(location, _ts_meta_sequence_num)
insert into conditions
select generate_series('2018-12-01 00:00'::timestamp, '2018-12-31 00:00'::timestamp, '1 day'), 'POR', 'klick', 55, 75;
--try modifying compress properties --

View File

@ -49,6 +49,8 @@ psql:include/transparent_decompression_query.sql:261: INFO: new materialization
psql:include/transparent_decompression_query.sql:261: INFO: materializing continuous aggregate public.cagg_test: new range up to 948067200000000
-- compress first and last chunk on the hypertable
ALTER TABLE metrics SET (timescaledb.compress, timescaledb.compress_orderby='v0, v1 desc, time', timescaledb.compress_segmentby='device_id,device_id_peer');
NOTICE: adding index _compressed_hypertable_5_device_id__ts_meta_sequence_num_idx ON _timescaledb_internal._compressed_hypertable_5 USING BTREE(device_id, _ts_meta_sequence_num)
NOTICE: adding index _compressed_hypertable_5_device_id_peer__ts_meta_sequence_n_idx ON _timescaledb_internal._compressed_hypertable_5 USING BTREE(device_id_peer, _ts_meta_sequence_num)
SELECT compress_chunk('_timescaledb_internal._hyper_1_1_chunk');
compress_chunk
----------------
@ -64,6 +66,8 @@ SELECT compress_chunk('_timescaledb_internal._hyper_1_3_chunk');
-- compress some chunks on space partitioned hypertable
-- we compress all chunks of first time slice, none of second, and 2 of the last time slice
ALTER TABLE metrics_space SET (timescaledb.compress, timescaledb.compress_orderby='v0, v1 desc, time', timescaledb.compress_segmentby='device_id,device_id_peer');
NOTICE: adding index _compressed_hypertable_6_device_id__ts_meta_sequence_num_idx ON _timescaledb_internal._compressed_hypertable_6 USING BTREE(device_id, _ts_meta_sequence_num)
NOTICE: adding index _compressed_hypertable_6_device_id_peer__ts_meta_sequence_n_idx ON _timescaledb_internal._compressed_hypertable_6 USING BTREE(device_id_peer, _ts_meta_sequence_num)
SELECT compress_chunk('_timescaledb_internal._hyper_2_4_chunk');
compress_chunk
----------------
@ -5326,6 +5330,8 @@ SELECT create_hypertable('metrics_ordered','time');
(1 row)
ALTER TABLE metrics_ordered SET (timescaledb.compress, timescaledb.compress_orderby='time DESC',timescaledb.compress_segmentby='device_id,device_id_peer');
psql:include/transparent_decompression_ordered.sql:10: NOTICE: adding index _compressed_hypertable_12_device_id__ts_meta_sequence_num_idx ON _timescaledb_internal._compressed_hypertable_12 USING BTREE(device_id, _ts_meta_sequence_num)
psql:include/transparent_decompression_ordered.sql:10: NOTICE: adding index _compressed_hypertable_12_device_id_peer__ts_meta_sequence__idx ON _timescaledb_internal._compressed_hypertable_12 USING BTREE(device_id_peer, _ts_meta_sequence_num)
INSERT INTO metrics_ordered SELECT * FROM metrics;
CREATE INDEX ON metrics_ordered(device_id,device_id_peer,time);
CREATE INDEX ON metrics_ordered(device_id,time);
@ -5580,5 +5586,50 @@ EXPLAIN (costs off) SELECT * FROM metrics_space ORDER BY time, device_id;
-> Seq Scan on _hyper_2_12_chunk
(17 rows)
SET enable_seqscan TO false;
-- should order compressed chunks using index
-- (we only EXPLAIN here b/c the resulting order is too inconsistent)
EXPLAIN (costs off) SELECT * FROM metrics WHERE time > '2000-01-08' ORDER BY device_id;
QUERY PLAN
-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
Sort
Sort Key: _hyper_1_2_chunk.device_id
-> Append
-> Index Scan using _hyper_1_2_chunk_metrics_time_idx on _hyper_1_2_chunk
Index Cond: ("time" > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone)
-> Custom Scan (DecompressChunk) on _hyper_1_3_chunk
Filter: ("time" > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone)
-> Index Scan using compress_hyper_5_16_chunk_c_index_2 on compress_hyper_5_16_chunk
Filter: (_timescaledb_internal.segment_meta_get_max(_ts_meta_min_max_3, NULL::timestamp with time zone) > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone)
(9 rows)
EXPLAIN (costs off) SELECT * FROM metrics_space WHERE time > '2000-01-08' ORDER BY device_id;
QUERY PLAN
-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
Merge Append
Sort Key: _hyper_2_8_chunk.device_id
-> Index Scan Backward using _hyper_2_8_chunk_device_id_device_id_peer_v0_v1_time_idx2 on _hyper_2_8_chunk
Index Cond: ("time" > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone)
-> Index Scan Backward using _hyper_2_12_chunk_device_id_device_id_peer_v0_v1_time_idx2 on _hyper_2_12_chunk
Index Cond: ("time" > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone)
-> Sort
Sort Key: _hyper_2_10_chunk.device_id
-> Custom Scan (DecompressChunk) on _hyper_2_10_chunk
Filter: ("time" > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone)
-> Index Scan using compress_hyper_6_20_chunk_c_space_index_2 on compress_hyper_6_20_chunk
Filter: (_timescaledb_internal.segment_meta_get_max(_ts_meta_min_max_3, NULL::timestamp with time zone) > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone)
-> Sort
Sort Key: _hyper_2_11_chunk.device_id
-> Custom Scan (DecompressChunk) on _hyper_2_11_chunk
Filter: ("time" > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone)
-> Index Scan using compress_hyper_6_21_chunk_c_space_index_2 on compress_hyper_6_21_chunk
Filter: (_timescaledb_internal.segment_meta_get_max(_ts_meta_min_max_3, NULL::timestamp with time zone) > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone)
-> Index Scan Backward using _hyper_2_9_chunk_device_id_device_id_peer_v0_v1_time_idx2 on _hyper_2_9_chunk
Index Cond: ("time" > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone)
-> Index Scan Backward using _hyper_2_7_chunk_device_id_device_id_peer_v0_v1_time_idx2 on _hyper_2_7_chunk
Index Cond: ("time" > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone)
(22 rows)
SET enable_seqscan TO true;
-- diff compressed and uncompressed results
:DIFF_CMD

View File

@ -49,6 +49,8 @@ psql:include/transparent_decompression_query.sql:261: INFO: new materialization
psql:include/transparent_decompression_query.sql:261: INFO: materializing continuous aggregate public.cagg_test: new range up to 948067200000000
-- compress first and last chunk on the hypertable
ALTER TABLE metrics SET (timescaledb.compress, timescaledb.compress_orderby='v0, v1 desc, time', timescaledb.compress_segmentby='device_id,device_id_peer');
NOTICE: adding index _compressed_hypertable_5_device_id__ts_meta_sequence_num_idx ON _timescaledb_internal._compressed_hypertable_5 USING BTREE(device_id, _ts_meta_sequence_num)
NOTICE: adding index _compressed_hypertable_5_device_id_peer__ts_meta_sequence_n_idx ON _timescaledb_internal._compressed_hypertable_5 USING BTREE(device_id_peer, _ts_meta_sequence_num)
SELECT compress_chunk('_timescaledb_internal._hyper_1_1_chunk');
compress_chunk
----------------
@ -64,6 +66,8 @@ SELECT compress_chunk('_timescaledb_internal._hyper_1_3_chunk');
-- compress some chunks on space partitioned hypertable
-- we compress all chunks of first time slice, none of second, and 2 of the last time slice
ALTER TABLE metrics_space SET (timescaledb.compress, timescaledb.compress_orderby='v0, v1 desc, time', timescaledb.compress_segmentby='device_id,device_id_peer');
NOTICE: adding index _compressed_hypertable_6_device_id__ts_meta_sequence_num_idx ON _timescaledb_internal._compressed_hypertable_6 USING BTREE(device_id, _ts_meta_sequence_num)
NOTICE: adding index _compressed_hypertable_6_device_id_peer__ts_meta_sequence_n_idx ON _timescaledb_internal._compressed_hypertable_6 USING BTREE(device_id_peer, _ts_meta_sequence_num)
SELECT compress_chunk('_timescaledb_internal._hyper_2_4_chunk');
compress_chunk
----------------
@ -5430,6 +5434,8 @@ SELECT create_hypertable('metrics_ordered','time');
(1 row)
ALTER TABLE metrics_ordered SET (timescaledb.compress, timescaledb.compress_orderby='time DESC',timescaledb.compress_segmentby='device_id,device_id_peer');
psql:include/transparent_decompression_ordered.sql:10: NOTICE: adding index _compressed_hypertable_12_device_id__ts_meta_sequence_num_idx ON _timescaledb_internal._compressed_hypertable_12 USING BTREE(device_id, _ts_meta_sequence_num)
psql:include/transparent_decompression_ordered.sql:10: NOTICE: adding index _compressed_hypertable_12_device_id_peer__ts_meta_sequence__idx ON _timescaledb_internal._compressed_hypertable_12 USING BTREE(device_id_peer, _ts_meta_sequence_num)
INSERT INTO metrics_ordered SELECT * FROM metrics;
CREATE INDEX ON metrics_ordered(device_id,device_id_peer,time);
CREATE INDEX ON metrics_ordered(device_id,time);
@ -5684,5 +5690,50 @@ EXPLAIN (costs off) SELECT * FROM metrics_space ORDER BY time, device_id;
-> Seq Scan on _hyper_2_12_chunk
(17 rows)
SET enable_seqscan TO false;
-- should order compressed chunks using index
-- (we only EXPLAIN here b/c the resulting order is too inconsistent)
EXPLAIN (costs off) SELECT * FROM metrics WHERE time > '2000-01-08' ORDER BY device_id;
QUERY PLAN
-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
Sort
Sort Key: _hyper_1_2_chunk.device_id
-> Append
-> Index Scan using _hyper_1_2_chunk_metrics_time_idx on _hyper_1_2_chunk
Index Cond: ("time" > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone)
-> Custom Scan (DecompressChunk) on _hyper_1_3_chunk
Filter: ("time" > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone)
-> Index Scan using compress_hyper_5_16_chunk_c_index_2 on compress_hyper_5_16_chunk
Filter: (_timescaledb_internal.segment_meta_get_max(_ts_meta_min_max_3, NULL::timestamp with time zone) > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone)
(9 rows)
EXPLAIN (costs off) SELECT * FROM metrics_space WHERE time > '2000-01-08' ORDER BY device_id;
QUERY PLAN
-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
Merge Append
Sort Key: _hyper_2_8_chunk.device_id
-> Index Scan Backward using _hyper_2_8_chunk_device_id_device_id_peer_v0_v1_time_idx2 on _hyper_2_8_chunk
Index Cond: ("time" > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone)
-> Index Scan Backward using _hyper_2_12_chunk_device_id_device_id_peer_v0_v1_time_idx2 on _hyper_2_12_chunk
Index Cond: ("time" > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone)
-> Sort
Sort Key: _hyper_2_10_chunk.device_id
-> Custom Scan (DecompressChunk) on _hyper_2_10_chunk
Filter: ("time" > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone)
-> Index Scan using compress_hyper_6_20_chunk_c_space_index_2 on compress_hyper_6_20_chunk
Filter: (_timescaledb_internal.segment_meta_get_max(_ts_meta_min_max_3, NULL::timestamp with time zone) > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone)
-> Sort
Sort Key: _hyper_2_11_chunk.device_id
-> Custom Scan (DecompressChunk) on _hyper_2_11_chunk
Filter: ("time" > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone)
-> Index Scan using compress_hyper_6_21_chunk_c_space_index_2 on compress_hyper_6_21_chunk
Filter: (_timescaledb_internal.segment_meta_get_max(_ts_meta_min_max_3, NULL::timestamp with time zone) > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone)
-> Index Scan Backward using _hyper_2_9_chunk_device_id_device_id_peer_v0_v1_time_idx2 on _hyper_2_9_chunk
Index Cond: ("time" > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone)
-> Index Scan Backward using _hyper_2_7_chunk_device_id_device_id_peer_v0_v1_time_idx2 on _hyper_2_7_chunk
Index Cond: ("time" > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone)
(22 rows)
SET enable_seqscan TO true;
-- diff compressed and uncompressed results
:DIFF_CMD

View File

@ -49,6 +49,8 @@ psql:include/transparent_decompression_query.sql:261: INFO: new materialization
psql:include/transparent_decompression_query.sql:261: INFO: materializing continuous aggregate public.cagg_test: new range up to 948067200000000
-- compress first and last chunk on the hypertable
ALTER TABLE metrics SET (timescaledb.compress, timescaledb.compress_orderby='v0, v1 desc, time', timescaledb.compress_segmentby='device_id,device_id_peer');
NOTICE: adding index _compressed_hypertable_5_device_id__ts_meta_sequence_num_idx ON _timescaledb_internal._compressed_hypertable_5 USING BTREE(device_id, _ts_meta_sequence_num)
NOTICE: adding index _compressed_hypertable_5_device_id_peer__ts_meta_sequence_n_idx ON _timescaledb_internal._compressed_hypertable_5 USING BTREE(device_id_peer, _ts_meta_sequence_num)
SELECT compress_chunk('_timescaledb_internal._hyper_1_1_chunk');
compress_chunk
----------------
@ -64,6 +66,8 @@ SELECT compress_chunk('_timescaledb_internal._hyper_1_3_chunk');
-- compress some chunks on space partitioned hypertable
-- we compress all chunks of first time slice, none of second, and 2 of the last time slice
ALTER TABLE metrics_space SET (timescaledb.compress, timescaledb.compress_orderby='v0, v1 desc, time', timescaledb.compress_segmentby='device_id,device_id_peer');
NOTICE: adding index _compressed_hypertable_6_device_id__ts_meta_sequence_num_idx ON _timescaledb_internal._compressed_hypertable_6 USING BTREE(device_id, _ts_meta_sequence_num)
NOTICE: adding index _compressed_hypertable_6_device_id_peer__ts_meta_sequence_n_idx ON _timescaledb_internal._compressed_hypertable_6 USING BTREE(device_id_peer, _ts_meta_sequence_num)
SELECT compress_chunk('_timescaledb_internal._hyper_2_4_chunk');
compress_chunk
----------------
@ -4805,6 +4809,8 @@ SELECT create_hypertable('metrics_ordered','time');
(1 row)
ALTER TABLE metrics_ordered SET (timescaledb.compress, timescaledb.compress_orderby='time DESC',timescaledb.compress_segmentby='device_id,device_id_peer');
psql:include/transparent_decompression_ordered.sql:10: NOTICE: adding index _compressed_hypertable_12_device_id__ts_meta_sequence_num_idx ON _timescaledb_internal._compressed_hypertable_12 USING BTREE(device_id, _ts_meta_sequence_num)
psql:include/transparent_decompression_ordered.sql:10: NOTICE: adding index _compressed_hypertable_12_device_id_peer__ts_meta_sequence__idx ON _timescaledb_internal._compressed_hypertable_12 USING BTREE(device_id_peer, _ts_meta_sequence_num)
INSERT INTO metrics_ordered SELECT * FROM metrics;
CREATE INDEX ON metrics_ordered(device_id,device_id_peer,time);
CREATE INDEX ON metrics_ordered(device_id,time);
@ -5043,5 +5049,50 @@ EXPLAIN (costs off) SELECT * FROM metrics_space ORDER BY time, device_id;
-> Seq Scan on _hyper_2_12_chunk
(17 rows)
SET enable_seqscan TO false;
-- should order compressed chunks using index
-- (we only EXPLAIN here b/c the resulting order is too inconsistent)
EXPLAIN (costs off) SELECT * FROM metrics WHERE time > '2000-01-08' ORDER BY device_id;
QUERY PLAN
-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
Sort
Sort Key: _hyper_1_2_chunk.device_id
-> Append
-> Index Scan using _hyper_1_2_chunk_metrics_time_idx on _hyper_1_2_chunk
Index Cond: ("time" > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone)
-> Custom Scan (DecompressChunk) on _hyper_1_3_chunk
Filter: ("time" > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone)
-> Index Scan using compress_hyper_5_16_chunk_c_index_2 on compress_hyper_5_16_chunk
Filter: (_timescaledb_internal.segment_meta_get_max(_ts_meta_min_max_3, NULL::timestamp with time zone) > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone)
(9 rows)
EXPLAIN (costs off) SELECT * FROM metrics_space WHERE time > '2000-01-08' ORDER BY device_id;
QUERY PLAN
-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
Merge Append
Sort Key: _hyper_2_8_chunk.device_id
-> Index Scan Backward using _hyper_2_8_chunk_device_id_device_id_peer_v0_v1_time_idx2 on _hyper_2_8_chunk
Index Cond: ("time" > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone)
-> Index Scan Backward using _hyper_2_12_chunk_device_id_device_id_peer_v0_v1_time_idx2 on _hyper_2_12_chunk
Index Cond: ("time" > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone)
-> Sort
Sort Key: _hyper_2_10_chunk.device_id
-> Custom Scan (DecompressChunk) on _hyper_2_10_chunk
Filter: ("time" > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone)
-> Index Scan using compress_hyper_6_20_chunk_c_space_index_2 on compress_hyper_6_20_chunk
Filter: (_timescaledb_internal.segment_meta_get_max(_ts_meta_min_max_3, NULL::timestamp with time zone) > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone)
-> Sort
Sort Key: _hyper_2_11_chunk.device_id
-> Custom Scan (DecompressChunk) on _hyper_2_11_chunk
Filter: ("time" > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone)
-> Index Scan using compress_hyper_6_21_chunk_c_space_index_2 on compress_hyper_6_21_chunk
Filter: (_timescaledb_internal.segment_meta_get_max(_ts_meta_min_max_3, NULL::timestamp with time zone) > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone)
-> Index Scan Backward using _hyper_2_9_chunk_device_id_device_id_peer_v0_v1_time_idx2 on _hyper_2_9_chunk
Index Cond: ("time" > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone)
-> Index Scan Backward using _hyper_2_7_chunk_device_id_device_id_peer_v0_v1_time_idx2 on _hyper_2_7_chunk
Index Cond: ("time" > 'Sat Jan 08 00:00:00 2000 PST'::timestamp with time zone)
(22 rows)
SET enable_seqscan TO true;
-- diff compressed and uncompressed results
:DIFF_CMD

View File

@ -136,5 +136,14 @@ SET max_parallel_workers_per_gather TO 4;
EXPLAIN (costs off) SELECT * FROM metrics ORDER BY time, device_id;
EXPLAIN (costs off) SELECT * FROM metrics_space ORDER BY time, device_id;
SET enable_seqscan TO false;
-- should order compressed chunks using index
-- (we only EXPLAIN here b/c the resulting order is too inconsistent)
EXPLAIN (costs off) SELECT * FROM metrics WHERE time > '2000-01-08' ORDER BY device_id;
EXPLAIN (costs off) SELECT * FROM metrics_space WHERE time > '2000-01-08' ORDER BY device_id;
SET enable_seqscan TO true;
-- diff compressed and uncompressed results
:DIFF_CMD