Fix integer overflow in batch sorted merge costs

Also remove a leftover debug print.
This commit is contained in:
Alexander Kuzmenkov 2023-12-13 18:25:06 +01:00
parent 06867af966
commit e0a3e309a7
3 changed files with 27 additions and 6 deletions

View File

@ -411,15 +411,10 @@ cost_batch_sorted_merge(PlannerInfo *root, CompressionInfo *compression_info,
* we often read a small subset of columns in analytical queries. The
* compressed chunk is never projected so we can't use it for that.
*/
const double work_mem_bytes = work_mem * 1024;
const double work_mem_bytes = work_mem * (double) 1024.0;
const double needed_memory_bytes = open_batches_clamped * DECOMPRESS_CHUNK_BATCH_SIZE *
dcpath->custom_path.path.pathtarget->width;
fprintf(stderr,
"open batches %lf, needed_bytes %lf\n",
open_batches_clamped,
needed_memory_bytes);
/*
* Next, calculate the cost penalty. It is a smooth step, starting at 75% of
* work_mem, and ending at 125%. We want to effectively disable this plan

View File

@ -116,3 +116,20 @@ explain (costs off) select * from t where high_card < 10 order by ts;
(5 rows)
reset work_mem;
-- Test for large values of memory limit bytes that don't fit into an int.
-- Note that on i386 the max value is 2GB which is not enough to trigger the
-- overflow we had on 64-bit systems, so we have to use different values based
-- on the architecture.
select least(4194304, max_val::bigint) large_work_mem from pg_settings where name = 'work_mem' \gset
set work_mem to :large_work_mem;
explain (costs off) select * from t where high_card < 10 order by ts;
QUERY PLAN
--------------------------------------------------------------------------------------------------------------------------
Custom Scan (DecompressChunk) on _hyper_1_1_chunk
-> Sort
Sort Key: compress_hyper_2_2_chunk._ts_meta_min_1
-> Index Scan using compress_hyper_2_2_chunk__compressed_hypertable_2_low_card_high on compress_hyper_2_2_chunk
Index Cond: (high_card < 10)
(5 rows)
reset work_mem;

View File

@ -43,3 +43,12 @@ explain (costs off) select * from t where high_card < 500 order by ts;
set work_mem to 64;
explain (costs off) select * from t where high_card < 10 order by ts;
reset work_mem;
-- Test for large values of memory limit bytes that don't fit into an int.
-- Note that on i386 the max value is 2GB which is not enough to trigger the
-- overflow we had on 64-bit systems, so we have to use different values based
-- on the architecture.
select least(4194304, max_val::bigint) large_work_mem from pg_settings where name = 'work_mem' \gset
set work_mem to :large_work_mem;
explain (costs off) select * from t where high_card < 10 order by ts;
reset work_mem;