From e0a3e309a77e0d4c50b051308ece4e7e6faa49f0 Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov <36882414+akuzm@users.noreply.github.com> Date: Wed, 13 Dec 2023 18:25:06 +0100 Subject: [PATCH] Fix integer overflow in batch sorted merge costs Also remove a leftover debug print. --- .../nodes/decompress_chunk/decompress_chunk.c | 7 +------ .../compression_sorted_merge_distinct.out | 17 +++++++++++++++++ .../sql/compression_sorted_merge_distinct.sql | 9 +++++++++ 3 files changed, 27 insertions(+), 6 deletions(-) diff --git a/tsl/src/nodes/decompress_chunk/decompress_chunk.c b/tsl/src/nodes/decompress_chunk/decompress_chunk.c index 94ba4bfd2..1e4f10b4a 100644 --- a/tsl/src/nodes/decompress_chunk/decompress_chunk.c +++ b/tsl/src/nodes/decompress_chunk/decompress_chunk.c @@ -411,15 +411,10 @@ cost_batch_sorted_merge(PlannerInfo *root, CompressionInfo *compression_info, * we often read a small subset of columns in analytical queries. The * compressed chunk is never projected so we can't use it for that. */ - const double work_mem_bytes = work_mem * 1024; + const double work_mem_bytes = work_mem * (double) 1024.0; const double needed_memory_bytes = open_batches_clamped * DECOMPRESS_CHUNK_BATCH_SIZE * dcpath->custom_path.path.pathtarget->width; - fprintf(stderr, - "open batches %lf, needed_bytes %lf\n", - open_batches_clamped, - needed_memory_bytes); - /* * Next, calculate the cost penalty. It is a smooth step, starting at 75% of * work_mem, and ending at 125%. We want to effectively disable this plan diff --git a/tsl/test/expected/compression_sorted_merge_distinct.out b/tsl/test/expected/compression_sorted_merge_distinct.out index 39f6cf508..f98aeff70 100644 --- a/tsl/test/expected/compression_sorted_merge_distinct.out +++ b/tsl/test/expected/compression_sorted_merge_distinct.out @@ -116,3 +116,20 @@ explain (costs off) select * from t where high_card < 10 order by ts; (5 rows) reset work_mem; +-- Test for large values of memory limit bytes that don't fit into an int. +-- Note that on i386 the max value is 2GB which is not enough to trigger the +-- overflow we had on 64-bit systems, so we have to use different values based +-- on the architecture. +select least(4194304, max_val::bigint) large_work_mem from pg_settings where name = 'work_mem' \gset +set work_mem to :large_work_mem; +explain (costs off) select * from t where high_card < 10 order by ts; + QUERY PLAN +-------------------------------------------------------------------------------------------------------------------------- + Custom Scan (DecompressChunk) on _hyper_1_1_chunk + -> Sort + Sort Key: compress_hyper_2_2_chunk._ts_meta_min_1 + -> Index Scan using compress_hyper_2_2_chunk__compressed_hypertable_2_low_card_high on compress_hyper_2_2_chunk + Index Cond: (high_card < 10) +(5 rows) + +reset work_mem; diff --git a/tsl/test/sql/compression_sorted_merge_distinct.sql b/tsl/test/sql/compression_sorted_merge_distinct.sql index c8db36d14..5e8ee2004 100644 --- a/tsl/test/sql/compression_sorted_merge_distinct.sql +++ b/tsl/test/sql/compression_sorted_merge_distinct.sql @@ -43,3 +43,12 @@ explain (costs off) select * from t where high_card < 500 order by ts; set work_mem to 64; explain (costs off) select * from t where high_card < 10 order by ts; reset work_mem; + +-- Test for large values of memory limit bytes that don't fit into an int. +-- Note that on i386 the max value is 2GB which is not enough to trigger the +-- overflow we had on 64-bit systems, so we have to use different values based +-- on the architecture. +select least(4194304, max_val::bigint) large_work_mem from pg_settings where name = 'work_mem' \gset +set work_mem to :large_work_mem; +explain (costs off) select * from t where high_card < 10 order by ts; +reset work_mem;