mirror of
https://github.com/timescale/timescaledb.git
synced 2025-05-15 18:13:18 +08:00
Disable default indexscan for compression
The current code would always prefer indexscan over tuplesort while doing scans of the rows from the chunk that was being compressed. The thinking was that we'd avoid doing a sort via the indexscan. The theory looked good on paper, but from various cloud customer reports we have seen that the random access of the heap pages via the indexscan was typically more expensive than doing the tuplesort. So we disable the default indexscan till we get better usecases warranting enabling it again for all scenarios. Specific use cases can enable the timescaledb.enable_compression_indexscan manually if desired.
This commit is contained in:
parent
2265c18baf
commit
4f912f77ca
@ -71,7 +71,7 @@ TSDLLEXPORT bool ts_guc_enable_decompression_sorted_merge = true;
|
||||
bool ts_guc_enable_async_append = true;
|
||||
bool ts_guc_enable_chunkwise_aggregation = true;
|
||||
bool ts_guc_enable_vectorized_aggregation = true;
|
||||
TSDLLEXPORT bool ts_guc_enable_compression_indexscan = true;
|
||||
TSDLLEXPORT bool ts_guc_enable_compression_indexscan = false;
|
||||
TSDLLEXPORT bool ts_guc_enable_bulk_decompression = true;
|
||||
TSDLLEXPORT int ts_guc_bgw_log_level = WARNING;
|
||||
TSDLLEXPORT bool ts_guc_enable_skip_scan = true;
|
||||
@ -506,7 +506,7 @@ _guc_init(void)
|
||||
"Enable compression to take indexscan path",
|
||||
"Enable indexscan during compression, if matching index is found",
|
||||
&ts_guc_enable_compression_indexscan,
|
||||
true,
|
||||
false,
|
||||
PGC_USERSET,
|
||||
0,
|
||||
NULL,
|
||||
|
@ -347,7 +347,7 @@ LOG: statement: CALL run_job(1004);
|
||||
LOG: acquiring locks for compressing "_timescaledb_internal._hyper_11_40_chunk"
|
||||
LOG: locks acquired for compressing "_timescaledb_internal._hyper_11_40_chunk"
|
||||
LOG: new compressed chunk "_timescaledb_internal.compress_hyper_13_61_chunk" created
|
||||
LOG: using index "_hyper_11_40_chunk_conditions_time_idx" to scan rows for compression
|
||||
LOG: using tuplesort to scan rows from "_hyper_11_40_chunk" for compression
|
||||
LOG: finished compressing 144 rows from "_hyper_11_40_chunk"
|
||||
LOG: job 1004 completed processing chunk _timescaledb_internal._hyper_11_40_chunk
|
||||
set client_min_messages TO NOTICE;
|
||||
|
@ -347,7 +347,7 @@ LOG: statement: CALL run_job(1004);
|
||||
LOG: acquiring locks for compressing "_timescaledb_internal._hyper_11_40_chunk"
|
||||
LOG: locks acquired for compressing "_timescaledb_internal._hyper_11_40_chunk"
|
||||
LOG: new compressed chunk "_timescaledb_internal.compress_hyper_13_61_chunk" created
|
||||
LOG: using index "_hyper_11_40_chunk_conditions_time_idx" to scan rows for compression
|
||||
LOG: using tuplesort to scan rows from "_hyper_11_40_chunk" for compression
|
||||
LOG: finished compressing 144 rows from "_hyper_11_40_chunk"
|
||||
LOG: job 1004 completed processing chunk _timescaledb_internal._hyper_11_40_chunk
|
||||
set client_min_messages TO NOTICE;
|
||||
|
@ -347,7 +347,7 @@ LOG: statement: CALL run_job(1004);
|
||||
LOG: acquiring locks for compressing "_timescaledb_internal._hyper_11_40_chunk"
|
||||
LOG: locks acquired for compressing "_timescaledb_internal._hyper_11_40_chunk"
|
||||
LOG: new compressed chunk "_timescaledb_internal.compress_hyper_13_61_chunk" created
|
||||
LOG: using index "_hyper_11_40_chunk_conditions_time_idx" to scan rows for compression
|
||||
LOG: using tuplesort to scan rows from "_hyper_11_40_chunk" for compression
|
||||
LOG: finished compressing 144 rows from "_hyper_11_40_chunk"
|
||||
LOG: job 1004 completed processing chunk _timescaledb_internal._hyper_11_40_chunk
|
||||
set client_min_messages TO NOTICE;
|
||||
|
@ -347,7 +347,7 @@ LOG: statement: CALL run_job(1004);
|
||||
LOG: acquiring locks for compressing "_timescaledb_internal._hyper_11_40_chunk"
|
||||
LOG: locks acquired for compressing "_timescaledb_internal._hyper_11_40_chunk"
|
||||
LOG: new compressed chunk "_timescaledb_internal.compress_hyper_13_61_chunk" created
|
||||
LOG: using index "_hyper_11_40_chunk_conditions_time_idx" to scan rows for compression
|
||||
LOG: using tuplesort to scan rows from "_hyper_11_40_chunk" for compression
|
||||
LOG: finished compressing 144 rows from "_hyper_11_40_chunk"
|
||||
LOG: job 1004 completed processing chunk _timescaledb_internal._hyper_11_40_chunk
|
||||
set client_min_messages TO NOTICE;
|
||||
|
@ -37,6 +37,8 @@ generate_series('2018-03-02 1:00'::TIMESTAMPTZ, '2018-03-28 1:00', '1 hour') AS
|
||||
generate_series(1, 100, 1 ) AS g2(id)
|
||||
ORDER BY
|
||||
time;
|
||||
--Test with indexscan enabled
|
||||
SET timescaledb.enable_compression_indexscan = 'ON';
|
||||
--Test Set 1.1 [ Index(ASC, Null_First), Compression(ASC, Null_First) ]
|
||||
CREATE INDEX idx_asc_null_first ON tab1(id, time ASC NULLS FIRST);
|
||||
ALTER TABLE tab1 SET(timescaledb.compress, timescaledb.compress_segmentby = 'id', timescaledb.compress_orderby = 'time NULLS FIRST');
|
||||
@ -584,8 +586,14 @@ SELECT decompress_chunk(show_chunks('tab1'));
|
||||
|
||||
DROP INDEX idx_desc_null_last;
|
||||
--Test Set 5 GUC SET timescaledb.enable_compression_indexscan
|
||||
-- Default this flag will be true.
|
||||
SET timescaledb.enable_compression_indexscan = 'OFF';
|
||||
-- Default this flag will be false.
|
||||
RESET timescaledb.enable_compression_indexscan;
|
||||
SHOW timescaledb.enable_compression_indexscan;
|
||||
timescaledb.enable_compression_indexscan
|
||||
------------------------------------------
|
||||
off
|
||||
(1 row)
|
||||
|
||||
SELECT compress_chunk(show_chunks('tab1'));
|
||||
INFO: compress_chunk_tuplesort_start
|
||||
INFO: compress_chunk_tuplesort_start
|
||||
@ -608,6 +616,7 @@ SELECT decompress_chunk(show_chunks('tab1'));
|
||||
_timescaledb_internal._hyper_1_4_chunk
|
||||
(4 rows)
|
||||
|
||||
--Test with this guc enabled
|
||||
SET timescaledb.enable_compression_indexscan = 'ON';
|
||||
SELECT compress_chunk(show_chunks('tab1'));
|
||||
INFO: compress_chunk_indexscan_start matched index "_hyper_1_1_chunk_tab1_time_idx"
|
||||
@ -637,7 +646,7 @@ CREATE INDEX idx_asc_null_first ON tab1(id, time ASC NULLS FIRST);
|
||||
CREATE INDEX idx2_asc_null_first ON tab2(id, time ASC NULLS FIRST);
|
||||
ALTER TABLE tab1 SET(timescaledb.compress, timescaledb.compress_segmentby = 'id', timescaledb.compress_orderby = 'time NULLS FIRST');
|
||||
ALTER TABLE tab2 SET(timescaledb.compress, timescaledb.compress_segmentby = 'id', timescaledb.compress_orderby = 'time NULLS FIRST');
|
||||
SET timescaledb.enable_compression_indexscan = 'OFF';
|
||||
RESET timescaledb.enable_compression_indexscan;
|
||||
SELECT compress_chunk(show_chunks('tab1'));
|
||||
INFO: compress_chunk_tuplesort_start
|
||||
INFO: compress_chunk_tuplesort_start
|
||||
|
@ -35,6 +35,8 @@ generate_series(1, 100, 1 ) AS g2(id)
|
||||
ORDER BY
|
||||
time;
|
||||
|
||||
--Test with indexscan enabled
|
||||
SET timescaledb.enable_compression_indexscan = 'ON';
|
||||
--Test Set 1.1 [ Index(ASC, Null_First), Compression(ASC, Null_First) ]
|
||||
CREATE INDEX idx_asc_null_first ON tab1(id, time ASC NULLS FIRST);
|
||||
ALTER TABLE tab1 SET(timescaledb.compress, timescaledb.compress_segmentby = 'id', timescaledb.compress_orderby = 'time NULLS FIRST');
|
||||
@ -163,10 +165,12 @@ SELECT decompress_chunk(show_chunks('tab1'));
|
||||
DROP INDEX idx_desc_null_last;
|
||||
|
||||
--Test Set 5 GUC SET timescaledb.enable_compression_indexscan
|
||||
-- Default this flag will be true.
|
||||
SET timescaledb.enable_compression_indexscan = 'OFF';
|
||||
-- Default this flag will be false.
|
||||
RESET timescaledb.enable_compression_indexscan;
|
||||
SHOW timescaledb.enable_compression_indexscan;
|
||||
SELECT compress_chunk(show_chunks('tab1'));
|
||||
SELECT decompress_chunk(show_chunks('tab1'));
|
||||
--Test with this guc enabled
|
||||
SET timescaledb.enable_compression_indexscan = 'ON';
|
||||
SELECT compress_chunk(show_chunks('tab1'));
|
||||
SELECT decompress_chunk(show_chunks('tab1'));
|
||||
@ -177,7 +181,7 @@ CREATE INDEX idx_asc_null_first ON tab1(id, time ASC NULLS FIRST);
|
||||
CREATE INDEX idx2_asc_null_first ON tab2(id, time ASC NULLS FIRST);
|
||||
ALTER TABLE tab1 SET(timescaledb.compress, timescaledb.compress_segmentby = 'id', timescaledb.compress_orderby = 'time NULLS FIRST');
|
||||
ALTER TABLE tab2 SET(timescaledb.compress, timescaledb.compress_segmentby = 'id', timescaledb.compress_orderby = 'time NULLS FIRST');
|
||||
SET timescaledb.enable_compression_indexscan = 'OFF';
|
||||
RESET timescaledb.enable_compression_indexscan;
|
||||
SELECT compress_chunk(show_chunks('tab1'));
|
||||
SET timescaledb.enable_compression_indexscan = 'ON';
|
||||
SELECT compress_chunk(show_chunks('tab2'));
|
||||
|
Loading…
x
Reference in New Issue
Block a user