Do not produce partial aggregation plans for the only chunk

It doesn't make much sense.
This commit is contained in:
Alexander Kuzmenkov 2023-12-13 16:34:49 +01:00
parent e0a3e309a7
commit 9a4eb12eb6
16 changed files with 171 additions and 193 deletions

View File

@ -461,6 +461,15 @@ generate_agg_pushdown_path(PlannerInfo *root, Path *cheapest_total_path, RelOptI
if (subpaths == NIL) if (subpaths == NIL)
return; return;
if (list_length(subpaths) < 2)
{
/*
* Doesn't make sense to add per-chunk aggregation paths if there's
* only one chunk.
*/
return;
}
/* Generate agg paths on top of the append children */ /* Generate agg paths on top of the append children */
List *sorted_subpaths = NIL; List *sorted_subpaths = NIL;
List *hashed_subpaths = NIL; List *hashed_subpaths = NIL;
@ -580,6 +589,14 @@ generate_partial_agg_pushdown_path(PlannerInfo *root, Path *cheapest_partial_pat
if (subpaths == NIL) if (subpaths == NIL)
return; return;
if (list_length(subpaths) < 2)
{
/*
* Doesn't make sense to add per-chunk aggregation paths if there's
* only one chunk.
*/
return;
}
/* Generate agg paths on top of the append children */ /* Generate agg paths on top of the append children */
ListCell *lc; ListCell *lc;
List *sorted_subpaths = NIL; List *sorted_subpaths = NIL;

View File

@ -77,116 +77,92 @@ SELECT setting, current_setting(setting) AS value from (VALUES ('timescaledb.ena
(5 rows) (5 rows)
:PREFIX SELECT last(temp, time_alt) FROM btest; :PREFIX SELECT last(temp, time_alt) FROM btest;
QUERY PLAN QUERY PLAN
------------------------------------------------------------------ ------------------------------------------------------------
Finalize HashAggregate (actual rows=1 loops=1) Aggregate (actual rows=1 loops=1)
Batches: 1 -> Seq Scan on _hyper_1_1_chunk (actual rows=6 loops=1)
-> Partial HashAggregate (actual rows=1 loops=1) (2 rows)
Batches: 1
-> Seq Scan on _hyper_1_1_chunk (actual rows=6 loops=1)
(5 rows)
:PREFIX SELECT first(temp, time_alt) FROM btest; :PREFIX SELECT first(temp, time_alt) FROM btest;
QUERY PLAN QUERY PLAN
------------------------------------------------------------------ ------------------------------------------------------------
Finalize HashAggregate (actual rows=1 loops=1) Aggregate (actual rows=1 loops=1)
Batches: 1 -> Seq Scan on _hyper_1_1_chunk (actual rows=6 loops=1)
-> Partial HashAggregate (actual rows=1 loops=1) (2 rows)
Batches: 1
-> Seq Scan on _hyper_1_1_chunk (actual rows=6 loops=1)
(5 rows)
:PREFIX SELECT gp, last(temp, time) FROM btest GROUP BY gp ORDER BY gp; :PREFIX SELECT gp, last(temp, time) FROM btest GROUP BY gp ORDER BY gp;
QUERY PLAN QUERY PLAN
------------------------------------------------------------------------ ------------------------------------------------------------------
Sort (actual rows=2 loops=1) Sort (actual rows=2 loops=1)
Sort Key: _hyper_1_1_chunk.gp Sort Key: _hyper_1_1_chunk.gp
Sort Method: quicksort Sort Method: quicksort
-> Finalize HashAggregate (actual rows=2 loops=1) -> HashAggregate (actual rows=2 loops=1)
Group Key: _hyper_1_1_chunk.gp Group Key: _hyper_1_1_chunk.gp
Batches: 1 Batches: 1
-> Partial HashAggregate (actual rows=2 loops=1) -> Seq Scan on _hyper_1_1_chunk (actual rows=6 loops=1)
Group Key: _hyper_1_1_chunk.gp (7 rows)
Batches: 1
-> Seq Scan on _hyper_1_1_chunk (actual rows=6 loops=1)
(10 rows)
:PREFIX SELECT gp, first(temp, time) FROM btest GROUP BY gp ORDER BY gp; :PREFIX SELECT gp, first(temp, time) FROM btest GROUP BY gp ORDER BY gp;
QUERY PLAN QUERY PLAN
------------------------------------------------------------------------ ------------------------------------------------------------------
Sort (actual rows=2 loops=1) Sort (actual rows=2 loops=1)
Sort Key: _hyper_1_1_chunk.gp Sort Key: _hyper_1_1_chunk.gp
Sort Method: quicksort Sort Method: quicksort
-> Finalize HashAggregate (actual rows=2 loops=1) -> HashAggregate (actual rows=2 loops=1)
Group Key: _hyper_1_1_chunk.gp Group Key: _hyper_1_1_chunk.gp
Batches: 1 Batches: 1
-> Partial HashAggregate (actual rows=2 loops=1) -> Seq Scan on _hyper_1_1_chunk (actual rows=6 loops=1)
Group Key: _hyper_1_1_chunk.gp (7 rows)
Batches: 1
-> Seq Scan on _hyper_1_1_chunk (actual rows=6 loops=1)
(10 rows)
--check whole row --check whole row
:PREFIX SELECT gp, first(btest, time) FROM btest GROUP BY gp ORDER BY gp; :PREFIX SELECT gp, first(btest, time) FROM btest GROUP BY gp ORDER BY gp;
QUERY PLAN QUERY PLAN
------------------------------------------------------------------------ ------------------------------------------------------------------
Sort (actual rows=2 loops=1) Sort (actual rows=2 loops=1)
Sort Key: _hyper_1_1_chunk.gp Sort Key: _hyper_1_1_chunk.gp
Sort Method: quicksort Sort Method: quicksort
-> Finalize HashAggregate (actual rows=2 loops=1) -> HashAggregate (actual rows=2 loops=1)
Group Key: _hyper_1_1_chunk.gp Group Key: _hyper_1_1_chunk.gp
Batches: 1 Batches: 1
-> Partial HashAggregate (actual rows=2 loops=1) -> Seq Scan on _hyper_1_1_chunk (actual rows=6 loops=1)
Group Key: _hyper_1_1_chunk.gp (7 rows)
Batches: 1
-> Seq Scan on _hyper_1_1_chunk (actual rows=6 loops=1)
(10 rows)
--check toasted col --check toasted col
:PREFIX SELECT gp, left(last(strid, time), 10) FROM btest GROUP BY gp ORDER BY gp; :PREFIX SELECT gp, left(last(strid, time), 10) FROM btest GROUP BY gp ORDER BY gp;
QUERY PLAN QUERY PLAN
------------------------------------------------------------------------ ------------------------------------------------------------------
Sort (actual rows=2 loops=1) Sort (actual rows=2 loops=1)
Sort Key: _hyper_1_1_chunk.gp Sort Key: _hyper_1_1_chunk.gp
Sort Method: quicksort Sort Method: quicksort
-> Finalize HashAggregate (actual rows=2 loops=1) -> HashAggregate (actual rows=2 loops=1)
Group Key: _hyper_1_1_chunk.gp Group Key: _hyper_1_1_chunk.gp
Batches: 1 Batches: 1
-> Partial HashAggregate (actual rows=2 loops=1) -> Seq Scan on _hyper_1_1_chunk (actual rows=6 loops=1)
Group Key: _hyper_1_1_chunk.gp (7 rows)
Batches: 1
-> Seq Scan on _hyper_1_1_chunk (actual rows=6 loops=1)
(10 rows)
:PREFIX SELECT gp, last(temp, strid) FROM btest GROUP BY gp ORDER BY gp; :PREFIX SELECT gp, last(temp, strid) FROM btest GROUP BY gp ORDER BY gp;
QUERY PLAN QUERY PLAN
------------------------------------------------------------------------ ------------------------------------------------------------------
Sort (actual rows=2 loops=1) Sort (actual rows=2 loops=1)
Sort Key: _hyper_1_1_chunk.gp Sort Key: _hyper_1_1_chunk.gp
Sort Method: quicksort Sort Method: quicksort
-> Finalize HashAggregate (actual rows=2 loops=1) -> HashAggregate (actual rows=2 loops=1)
Group Key: _hyper_1_1_chunk.gp Group Key: _hyper_1_1_chunk.gp
Batches: 1 Batches: 1
-> Partial HashAggregate (actual rows=2 loops=1) -> Seq Scan on _hyper_1_1_chunk (actual rows=6 loops=1)
Group Key: _hyper_1_1_chunk.gp (7 rows)
Batches: 1
-> Seq Scan on _hyper_1_1_chunk (actual rows=6 loops=1)
(10 rows)
:PREFIX SELECT gp, last(strid, temp) FROM btest GROUP BY gp ORDER BY gp; :PREFIX SELECT gp, last(strid, temp) FROM btest GROUP BY gp ORDER BY gp;
QUERY PLAN QUERY PLAN
------------------------------------------------------------------------ ------------------------------------------------------------------
Sort (actual rows=2 loops=1) Sort (actual rows=2 loops=1)
Sort Key: _hyper_1_1_chunk.gp Sort Key: _hyper_1_1_chunk.gp
Sort Method: quicksort Sort Method: quicksort
-> Finalize HashAggregate (actual rows=2 loops=1) -> HashAggregate (actual rows=2 loops=1)
Group Key: _hyper_1_1_chunk.gp Group Key: _hyper_1_1_chunk.gp
Batches: 1 Batches: 1
-> Partial HashAggregate (actual rows=2 loops=1) -> Seq Scan on _hyper_1_1_chunk (actual rows=6 loops=1)
Group Key: _hyper_1_1_chunk.gp (7 rows)
Batches: 1
-> Seq Scan on _hyper_1_1_chunk (actual rows=6 loops=1)
(10 rows)
BEGIN; BEGIN;
--check null value as last element --check null value as last element
@ -849,24 +825,18 @@ INSERT INTO btest_numeric VALUES('2018-01-20T09:00:43', NULL);
(5 rows) (5 rows)
:PREFIX SELECT first(time, quantity) FROM btest_numeric; :PREFIX SELECT first(time, quantity) FROM btest_numeric;
QUERY PLAN QUERY PLAN
------------------------------------------------------------------ ------------------------------------------------------------
Finalize HashAggregate (actual rows=1 loops=1) Aggregate (actual rows=1 loops=1)
Batches: 1 -> Seq Scan on _hyper_2_8_chunk (actual rows=2 loops=1)
-> Partial HashAggregate (actual rows=1 loops=1) (2 rows)
Batches: 1
-> Seq Scan on _hyper_2_8_chunk (actual rows=2 loops=1)
(5 rows)
:PREFIX SELECT last(time, quantity) FROM btest_numeric; :PREFIX SELECT last(time, quantity) FROM btest_numeric;
QUERY PLAN QUERY PLAN
------------------------------------------------------------------ ------------------------------------------------------------
Finalize HashAggregate (actual rows=1 loops=1) Aggregate (actual rows=1 loops=1)
Batches: 1 -> Seq Scan on _hyper_2_8_chunk (actual rows=2 loops=1)
-> Partial HashAggregate (actual rows=1 loops=1) (2 rows)
Batches: 1
-> Seq Scan on _hyper_2_8_chunk (actual rows=2 loops=1)
(5 rows)
-- NULL values followed by non-NULL values -- NULL values followed by non-NULL values
INSERT INTO btest_numeric VALUES('2019-01-20T09:00:43', 1); INSERT INTO btest_numeric VALUES('2019-01-20T09:00:43', 1);

View File

@ -641,11 +641,11 @@ SET max_parallel_workers_per_gather TO 2;
Finalize Aggregate Finalize Aggregate
-> Gather -> Gather
Workers Planned: 2 Workers Planned: 2
-> Result -> Partial Aggregate
One-Time Filter: (length(version()) > 0) -> Result
-> Parallel Custom Scan (ChunkAppend) on test One-Time Filter: (length(version()) > 0)
Chunks excluded during startup: 0 -> Parallel Custom Scan (ChunkAppend) on test
-> Partial Aggregate Chunks excluded during startup: 0
-> Result -> Result
One-Time Filter: (length(version()) > 0) One-Time Filter: (length(version()) > 0)
-> Parallel Seq Scan on _hyper_1_2_chunk -> Parallel Seq Scan on _hyper_1_2_chunk

View File

@ -641,11 +641,11 @@ SET max_parallel_workers_per_gather TO 2;
Finalize Aggregate Finalize Aggregate
-> Gather -> Gather
Workers Planned: 2 Workers Planned: 2
-> Result -> Partial Aggregate
One-Time Filter: (length(version()) > 0) -> Result
-> Parallel Custom Scan (ChunkAppend) on test One-Time Filter: (length(version()) > 0)
Chunks excluded during startup: 0 -> Parallel Custom Scan (ChunkAppend) on test
-> Partial Aggregate Chunks excluded during startup: 0
-> Result -> Result
One-Time Filter: (length(version()) > 0) One-Time Filter: (length(version()) > 0)
-> Parallel Seq Scan on _hyper_1_2_chunk -> Parallel Seq Scan on _hyper_1_2_chunk

View File

@ -642,11 +642,11 @@ SET max_parallel_workers_per_gather TO 2;
Finalize Aggregate Finalize Aggregate
-> Gather -> Gather
Workers Planned: 2 Workers Planned: 2
-> Result -> Partial Aggregate
One-Time Filter: (length(version()) > 0) -> Result
-> Parallel Custom Scan (ChunkAppend) on test One-Time Filter: (length(version()) > 0)
Chunks excluded during startup: 0 -> Parallel Custom Scan (ChunkAppend) on test
-> Partial Aggregate Chunks excluded during startup: 0
-> Result -> Result
One-Time Filter: (length(version()) > 0) One-Time Filter: (length(version()) > 0)
-> Parallel Seq Scan on _hyper_1_2_chunk -> Parallel Seq Scan on _hyper_1_2_chunk

View File

@ -643,11 +643,11 @@ SET max_parallel_workers_per_gather TO 2;
Finalize Aggregate Finalize Aggregate
-> Gather -> Gather
Workers Planned: 2 Workers Planned: 2
-> Result -> Partial Aggregate
One-Time Filter: (length(version()) > 0) -> Result
-> Parallel Custom Scan (ChunkAppend) on test One-Time Filter: (length(version()) > 0)
Chunks excluded during startup: 0 -> Parallel Custom Scan (ChunkAppend) on test
-> Partial Aggregate Chunks excluded during startup: 0
-> Result -> Result
One-Time Filter: (length(version()) > 0) One-Time Filter: (length(version()) > 0)
-> Parallel Seq Scan on _hyper_1_2_chunk -> Parallel Seq Scan on _hyper_1_2_chunk

View File

@ -889,18 +889,15 @@ FROM hyper_timepart
WHERE device = 1 WHERE device = 1
GROUP BY 1 GROUP BY 1
LIMIT 10; LIMIT 10;
QUERY PLAN QUERY PLAN
------------------------------------------------------------------------------------------------------------------------------ ------------------------------------------------------------------------------------------------------------------------
Limit Limit
Output: _hyper_3_8_chunk.device, (avg(_hyper_3_8_chunk.temp)) Output: _hyper_3_8_chunk.device, (avg(_hyper_3_8_chunk.temp))
-> Finalize GroupAggregate -> GroupAggregate
Output: _hyper_3_8_chunk.device, avg(_hyper_3_8_chunk.temp) Output: _hyper_3_8_chunk.device, avg(_hyper_3_8_chunk.temp)
Group Key: _hyper_3_8_chunk.device Group Key: _hyper_3_8_chunk.device
-> Partial GroupAggregate -> Index Scan using _hyper_3_8_chunk_hyper_timepart_device_expr_idx on _timescaledb_internal._hyper_3_8_chunk
Output: _hyper_3_8_chunk.device, PARTIAL avg(_hyper_3_8_chunk.temp) Output: _hyper_3_8_chunk.device, _hyper_3_8_chunk.temp
Group Key: _hyper_3_8_chunk.device Index Cond: (_hyper_3_8_chunk.device = 1)
-> Index Scan using _hyper_3_8_chunk_hyper_timepart_device_expr_idx on _timescaledb_internal._hyper_3_8_chunk (8 rows)
Output: _hyper_3_8_chunk.device, _hyper_3_8_chunk.temp
Index Cond: (_hyper_3_8_chunk.device = 1)
(11 rows)

View File

@ -889,18 +889,15 @@ FROM hyper_timepart
WHERE device = 1 WHERE device = 1
GROUP BY 1 GROUP BY 1
LIMIT 10; LIMIT 10;
QUERY PLAN QUERY PLAN
------------------------------------------------------------------------------------------------------------------------------ ------------------------------------------------------------------------------------------------------------------------
Limit Limit
Output: _hyper_3_8_chunk.device, (avg(_hyper_3_8_chunk.temp)) Output: _hyper_3_8_chunk.device, (avg(_hyper_3_8_chunk.temp))
-> Finalize GroupAggregate -> GroupAggregate
Output: _hyper_3_8_chunk.device, avg(_hyper_3_8_chunk.temp) Output: _hyper_3_8_chunk.device, avg(_hyper_3_8_chunk.temp)
Group Key: _hyper_3_8_chunk.device Group Key: _hyper_3_8_chunk.device
-> Partial GroupAggregate -> Index Scan using _hyper_3_8_chunk_hyper_timepart_device_expr_idx on _timescaledb_internal._hyper_3_8_chunk
Output: _hyper_3_8_chunk.device, PARTIAL avg(_hyper_3_8_chunk.temp) Output: _hyper_3_8_chunk.device, _hyper_3_8_chunk.temp
Group Key: _hyper_3_8_chunk.device Index Cond: (_hyper_3_8_chunk.device = 1)
-> Index Scan using _hyper_3_8_chunk_hyper_timepart_device_expr_idx on _timescaledb_internal._hyper_3_8_chunk (8 rows)
Output: _hyper_3_8_chunk.device, _hyper_3_8_chunk.temp
Index Cond: (_hyper_3_8_chunk.device = 1)
(11 rows)

View File

@ -889,18 +889,15 @@ FROM hyper_timepart
WHERE device = 1 WHERE device = 1
GROUP BY 1 GROUP BY 1
LIMIT 10; LIMIT 10;
QUERY PLAN QUERY PLAN
------------------------------------------------------------------------------------------------------------------------------ ------------------------------------------------------------------------------------------------------------------------
Limit Limit
Output: _hyper_3_8_chunk.device, (avg(_hyper_3_8_chunk.temp)) Output: _hyper_3_8_chunk.device, (avg(_hyper_3_8_chunk.temp))
-> Finalize GroupAggregate -> GroupAggregate
Output: _hyper_3_8_chunk.device, avg(_hyper_3_8_chunk.temp) Output: _hyper_3_8_chunk.device, avg(_hyper_3_8_chunk.temp)
Group Key: _hyper_3_8_chunk.device Group Key: _hyper_3_8_chunk.device
-> Partial GroupAggregate -> Index Scan using _hyper_3_8_chunk_hyper_timepart_device_expr_idx on _timescaledb_internal._hyper_3_8_chunk
Output: _hyper_3_8_chunk.device, PARTIAL avg(_hyper_3_8_chunk.temp) Output: _hyper_3_8_chunk.device, _hyper_3_8_chunk.temp
Group Key: _hyper_3_8_chunk.device Index Cond: (_hyper_3_8_chunk.device = 1)
-> Index Scan using _hyper_3_8_chunk_hyper_timepart_device_expr_idx on _timescaledb_internal._hyper_3_8_chunk (8 rows)
Output: _hyper_3_8_chunk.device, _hyper_3_8_chunk.temp
Index Cond: (_hyper_3_8_chunk.device = 1)
(11 rows)

View File

@ -889,16 +889,14 @@ FROM hyper_timepart
WHERE device = 1 WHERE device = 1
GROUP BY 1 GROUP BY 1
LIMIT 10; LIMIT 10;
QUERY PLAN QUERY PLAN
------------------------------------------------------------------------------------------------------------------------------ ------------------------------------------------------------------------------------------------------------------------
Limit Limit
Output: _hyper_3_8_chunk.device, (avg(_hyper_3_8_chunk.temp)) Output: _hyper_3_8_chunk.device, (avg(_hyper_3_8_chunk.temp))
-> Finalize GroupAggregate -> GroupAggregate
Output: _hyper_3_8_chunk.device, avg(_hyper_3_8_chunk.temp) Output: _hyper_3_8_chunk.device, avg(_hyper_3_8_chunk.temp)
-> Partial GroupAggregate -> Index Scan using _hyper_3_8_chunk_hyper_timepart_device_expr_idx on _timescaledb_internal._hyper_3_8_chunk
Output: _hyper_3_8_chunk.device, PARTIAL avg(_hyper_3_8_chunk.temp) Output: _hyper_3_8_chunk.device, _hyper_3_8_chunk.temp
-> Index Scan using _hyper_3_8_chunk_hyper_timepart_device_expr_idx on _timescaledb_internal._hyper_3_8_chunk Index Cond: (_hyper_3_8_chunk.device = 1)
Output: _hyper_3_8_chunk.device, _hyper_3_8_chunk.temp (7 rows)
Index Cond: (_hyper_3_8_chunk.device = 1)
(9 rows)

View File

@ -291,7 +291,7 @@ SELECT compress_chunk(i) FROM show_chunks('deleteme') i;
EXPLAIN (costs off) SELECT sum(data) FROM deleteme WHERE segment::text like '%4%'; EXPLAIN (costs off) SELECT sum(data) FROM deleteme WHERE segment::text like '%4%';
QUERY PLAN QUERY PLAN
--------------------------------------------------------- ---------------------------------------------------------
Finalize Aggregate Aggregate
-> Custom Scan (DecompressChunk) on _hyper_7_8_chunk -> Custom Scan (DecompressChunk) on _hyper_7_8_chunk
-> Seq Scan on compress_hyper_8_9_chunk -> Seq Scan on compress_hyper_8_9_chunk
Filter: ((segment)::text ~~ '%4%'::text) Filter: ((segment)::text ~~ '%4%'::text)
@ -300,7 +300,7 @@ EXPLAIN (costs off) SELECT sum(data) FROM deleteme WHERE segment::text like '%4%
EXPLAIN (costs off) SELECT sum(data) FROM deleteme WHERE '4' = segment::text; EXPLAIN (costs off) SELECT sum(data) FROM deleteme WHERE '4' = segment::text;
QUERY PLAN QUERY PLAN
--------------------------------------------------------- ---------------------------------------------------------
Finalize Aggregate Aggregate
-> Custom Scan (DecompressChunk) on _hyper_7_8_chunk -> Custom Scan (DecompressChunk) on _hyper_7_8_chunk
-> Seq Scan on compress_hyper_8_9_chunk -> Seq Scan on compress_hyper_8_9_chunk
Filter: ('4'::text = (segment)::text) Filter: ('4'::text = (segment)::text)

View File

@ -4,6 +4,7 @@
-- this test checks the validity of the produced plans for partially compressed chunks -- this test checks the validity of the produced plans for partially compressed chunks
-- when injecting query_pathkeys on top of the append -- when injecting query_pathkeys on top of the append
-- path that combines the uncompressed and compressed parts of a chunk. -- path that combines the uncompressed and compressed parts of a chunk.
set enable_parallel_append to off; -- for less flaky plans
set timescaledb.enable_decompression_sorted_merge = off; set timescaledb.enable_decompression_sorted_merge = off;
\set PREFIX 'EXPLAIN (analyze, costs off, timing off, summary off)' \set PREFIX 'EXPLAIN (analyze, costs off, timing off, summary off)'
CREATE TABLE ht_metrics_compressed(time timestamptz, device int, value float); CREATE TABLE ht_metrics_compressed(time timestamptz, device int, value float);
@ -690,12 +691,14 @@ SELECT * FROM test1 ORDER BY time ASC NULLS FIRST, x3 DESC NULLS LAST, x4 ASC;
-> Seq Scan on _hyper_3_7_chunk (actual rows=1 loops=1) -> Seq Scan on _hyper_3_7_chunk (actual rows=1 loops=1)
(13 rows) (13 rows)
set max_parallel_workers_per_gather = 0; -- parallel plan different on Windows
set enable_hashagg to off; -- different on PG13
:PREFIX :PREFIX
SELECT x1, x2, max(time) FROM test1 GROUP BY x1, x2, time ORDER BY time limit 10; SELECT x1, x2, max(time) FROM test1 GROUP BY x1, x2, time ORDER BY time limit 10;
QUERY PLAN QUERY PLAN
--------------------------------------------------------------------------------------------------------------- ---------------------------------------------------------------------------------------------------------
Limit (actual rows=5 loops=1) Limit (actual rows=5 loops=1)
-> Finalize GroupAggregate (actual rows=5 loops=1) -> GroupAggregate (actual rows=5 loops=1)
Group Key: test1."time", test1.x1, test1.x2 Group Key: test1."time", test1.x1, test1.x2
-> Custom Scan (ChunkAppend) on test1 (actual rows=5 loops=1) -> Custom Scan (ChunkAppend) on test1 (actual rows=5 loops=1)
Order: test1."time", test1.x1, test1.x2 Order: test1."time", test1.x1, test1.x2
@ -704,20 +707,16 @@ SELECT x1, x2, max(time) FROM test1 GROUP BY x1, x2, time ORDER BY time limit 10
-> Sort (actual rows=4 loops=1) -> Sort (actual rows=4 loops=1)
Sort Key: _hyper_3_7_chunk."time", _hyper_3_7_chunk.x1, _hyper_3_7_chunk.x2 Sort Key: _hyper_3_7_chunk."time", _hyper_3_7_chunk.x1, _hyper_3_7_chunk.x2
Sort Method: quicksort Sort Method: quicksort
-> Partial HashAggregate (actual rows=4 loops=1) -> Custom Scan (DecompressChunk) on _hyper_3_7_chunk (actual rows=4 loops=1)
Group Key: _hyper_3_7_chunk."time", _hyper_3_7_chunk.x1, _hyper_3_7_chunk.x2 -> Seq Scan on compress_hyper_4_8_chunk (actual rows=3 loops=1)
Batches: 1
-> Custom Scan (DecompressChunk) on _hyper_3_7_chunk (actual rows=4 loops=1)
-> Seq Scan on compress_hyper_4_8_chunk (actual rows=3 loops=1)
-> Sort (actual rows=1 loops=1) -> Sort (actual rows=1 loops=1)
Sort Key: _hyper_3_7_chunk."time", _hyper_3_7_chunk.x1, _hyper_3_7_chunk.x2 Sort Key: _hyper_3_7_chunk."time", _hyper_3_7_chunk.x1, _hyper_3_7_chunk.x2
Sort Method: quicksort Sort Method: quicksort
-> Partial HashAggregate (actual rows=1 loops=1) -> Seq Scan on _hyper_3_7_chunk (actual rows=1 loops=1)
Group Key: _hyper_3_7_chunk."time", _hyper_3_7_chunk.x1, _hyper_3_7_chunk.x2 (16 rows)
Batches: 1
-> Seq Scan on _hyper_3_7_chunk (actual rows=1 loops=1)
(22 rows)
reset max_parallel_workers_per_gather;
reset enable_hashagg;
:PREFIX :PREFIX
SELECT * FROM test1 ORDER BY x1, x2, x5, x4, time LIMIT 10; SELECT * FROM test1 ORDER BY x1, x2, x5, x4, time LIMIT 10;
QUERY PLAN QUERY PLAN

View File

@ -4,6 +4,7 @@
-- this test checks the validity of the produced plans for partially compressed chunks -- this test checks the validity of the produced plans for partially compressed chunks
-- when injecting query_pathkeys on top of the append -- when injecting query_pathkeys on top of the append
-- path that combines the uncompressed and compressed parts of a chunk. -- path that combines the uncompressed and compressed parts of a chunk.
set enable_parallel_append to off; -- for less flaky plans
set timescaledb.enable_decompression_sorted_merge = off; set timescaledb.enable_decompression_sorted_merge = off;
\set PREFIX 'EXPLAIN (analyze, costs off, timing off, summary off)' \set PREFIX 'EXPLAIN (analyze, costs off, timing off, summary off)'
CREATE TABLE ht_metrics_compressed(time timestamptz, device int, value float); CREATE TABLE ht_metrics_compressed(time timestamptz, device int, value float);
@ -690,12 +691,14 @@ SELECT * FROM test1 ORDER BY time ASC NULLS FIRST, x3 DESC NULLS LAST, x4 ASC;
-> Seq Scan on _hyper_3_7_chunk (actual rows=1 loops=1) -> Seq Scan on _hyper_3_7_chunk (actual rows=1 loops=1)
(13 rows) (13 rows)
set max_parallel_workers_per_gather = 0; -- parallel plan different on Windows
set enable_hashagg to off; -- different on PG13
:PREFIX :PREFIX
SELECT x1, x2, max(time) FROM test1 GROUP BY x1, x2, time ORDER BY time limit 10; SELECT x1, x2, max(time) FROM test1 GROUP BY x1, x2, time ORDER BY time limit 10;
QUERY PLAN QUERY PLAN
--------------------------------------------------------------------------------------------------------------- ---------------------------------------------------------------------------------------------------------
Limit (actual rows=5 loops=1) Limit (actual rows=5 loops=1)
-> Finalize GroupAggregate (actual rows=5 loops=1) -> GroupAggregate (actual rows=5 loops=1)
Group Key: test1."time", test1.x1, test1.x2 Group Key: test1."time", test1.x1, test1.x2
-> Custom Scan (ChunkAppend) on test1 (actual rows=5 loops=1) -> Custom Scan (ChunkAppend) on test1 (actual rows=5 loops=1)
Order: test1."time", test1.x1, test1.x2 Order: test1."time", test1.x1, test1.x2
@ -704,20 +707,16 @@ SELECT x1, x2, max(time) FROM test1 GROUP BY x1, x2, time ORDER BY time limit 10
-> Sort (actual rows=4 loops=1) -> Sort (actual rows=4 loops=1)
Sort Key: _hyper_3_7_chunk."time", _hyper_3_7_chunk.x1, _hyper_3_7_chunk.x2 Sort Key: _hyper_3_7_chunk."time", _hyper_3_7_chunk.x1, _hyper_3_7_chunk.x2
Sort Method: quicksort Sort Method: quicksort
-> Partial HashAggregate (actual rows=4 loops=1) -> Custom Scan (DecompressChunk) on _hyper_3_7_chunk (actual rows=4 loops=1)
Group Key: _hyper_3_7_chunk."time", _hyper_3_7_chunk.x1, _hyper_3_7_chunk.x2 -> Seq Scan on compress_hyper_4_8_chunk (actual rows=3 loops=1)
Batches: 1
-> Custom Scan (DecompressChunk) on _hyper_3_7_chunk (actual rows=4 loops=1)
-> Seq Scan on compress_hyper_4_8_chunk (actual rows=3 loops=1)
-> Sort (actual rows=1 loops=1) -> Sort (actual rows=1 loops=1)
Sort Key: _hyper_3_7_chunk."time", _hyper_3_7_chunk.x1, _hyper_3_7_chunk.x2 Sort Key: _hyper_3_7_chunk."time", _hyper_3_7_chunk.x1, _hyper_3_7_chunk.x2
Sort Method: quicksort Sort Method: quicksort
-> Partial HashAggregate (actual rows=1 loops=1) -> Seq Scan on _hyper_3_7_chunk (actual rows=1 loops=1)
Group Key: _hyper_3_7_chunk."time", _hyper_3_7_chunk.x1, _hyper_3_7_chunk.x2 (16 rows)
Batches: 1
-> Seq Scan on _hyper_3_7_chunk (actual rows=1 loops=1)
(22 rows)
reset max_parallel_workers_per_gather;
reset enable_hashagg;
:PREFIX :PREFIX
SELECT * FROM test1 ORDER BY x1, x2, x5, x4, time LIMIT 10; SELECT * FROM test1 ORDER BY x1, x2, x5, x4, time LIMIT 10;
QUERY PLAN QUERY PLAN

View File

@ -4,6 +4,7 @@
-- this test checks the validity of the produced plans for partially compressed chunks -- this test checks the validity of the produced plans for partially compressed chunks
-- when injecting query_pathkeys on top of the append -- when injecting query_pathkeys on top of the append
-- path that combines the uncompressed and compressed parts of a chunk. -- path that combines the uncompressed and compressed parts of a chunk.
set enable_parallel_append to off; -- for less flaky plans
set timescaledb.enable_decompression_sorted_merge = off; set timescaledb.enable_decompression_sorted_merge = off;
\set PREFIX 'EXPLAIN (analyze, costs off, timing off, summary off)' \set PREFIX 'EXPLAIN (analyze, costs off, timing off, summary off)'
CREATE TABLE ht_metrics_compressed(time timestamptz, device int, value float); CREATE TABLE ht_metrics_compressed(time timestamptz, device int, value float);
@ -696,12 +697,14 @@ SELECT * FROM test1 ORDER BY time ASC NULLS FIRST, x3 DESC NULLS LAST, x4 ASC;
-> Seq Scan on _hyper_3_7_chunk (actual rows=1 loops=1) -> Seq Scan on _hyper_3_7_chunk (actual rows=1 loops=1)
(13 rows) (13 rows)
set max_parallel_workers_per_gather = 0; -- parallel plan different on Windows
set enable_hashagg to off; -- different on PG13
:PREFIX :PREFIX
SELECT x1, x2, max(time) FROM test1 GROUP BY x1, x2, time ORDER BY time limit 10; SELECT x1, x2, max(time) FROM test1 GROUP BY x1, x2, time ORDER BY time limit 10;
QUERY PLAN QUERY PLAN
--------------------------------------------------------------------------------------------------------------- ---------------------------------------------------------------------------------------------------------
Limit (actual rows=5 loops=1) Limit (actual rows=5 loops=1)
-> Finalize GroupAggregate (actual rows=5 loops=1) -> GroupAggregate (actual rows=5 loops=1)
Group Key: test1."time", test1.x1, test1.x2 Group Key: test1."time", test1.x1, test1.x2
-> Custom Scan (ChunkAppend) on test1 (actual rows=5 loops=1) -> Custom Scan (ChunkAppend) on test1 (actual rows=5 loops=1)
Order: test1."time", test1.x1, test1.x2 Order: test1."time", test1.x1, test1.x2
@ -710,20 +713,16 @@ SELECT x1, x2, max(time) FROM test1 GROUP BY x1, x2, time ORDER BY time limit 10
-> Sort (actual rows=4 loops=1) -> Sort (actual rows=4 loops=1)
Sort Key: _hyper_3_7_chunk."time", _hyper_3_7_chunk.x1, _hyper_3_7_chunk.x2 Sort Key: _hyper_3_7_chunk."time", _hyper_3_7_chunk.x1, _hyper_3_7_chunk.x2
Sort Method: quicksort Sort Method: quicksort
-> Partial HashAggregate (actual rows=4 loops=1) -> Custom Scan (DecompressChunk) on _hyper_3_7_chunk (actual rows=4 loops=1)
Group Key: _hyper_3_7_chunk."time", _hyper_3_7_chunk.x1, _hyper_3_7_chunk.x2 -> Seq Scan on compress_hyper_4_8_chunk (actual rows=3 loops=1)
Batches: 1
-> Custom Scan (DecompressChunk) on _hyper_3_7_chunk (actual rows=4 loops=1)
-> Seq Scan on compress_hyper_4_8_chunk (actual rows=3 loops=1)
-> Sort (actual rows=1 loops=1) -> Sort (actual rows=1 loops=1)
Sort Key: _hyper_3_7_chunk."time", _hyper_3_7_chunk.x1, _hyper_3_7_chunk.x2 Sort Key: _hyper_3_7_chunk."time", _hyper_3_7_chunk.x1, _hyper_3_7_chunk.x2
Sort Method: quicksort Sort Method: quicksort
-> Partial HashAggregate (actual rows=1 loops=1) -> Seq Scan on _hyper_3_7_chunk (actual rows=1 loops=1)
Group Key: _hyper_3_7_chunk."time", _hyper_3_7_chunk.x1, _hyper_3_7_chunk.x2 (16 rows)
Batches: 1
-> Seq Scan on _hyper_3_7_chunk (actual rows=1 loops=1)
(22 rows)
reset max_parallel_workers_per_gather;
reset enable_hashagg;
:PREFIX :PREFIX
SELECT * FROM test1 ORDER BY x1, x2, x5, x4, time LIMIT 10; SELECT * FROM test1 ORDER BY x1, x2, x5, x4, time LIMIT 10;
QUERY PLAN QUERY PLAN

View File

@ -4,6 +4,7 @@
-- this test checks the validity of the produced plans for partially compressed chunks -- this test checks the validity of the produced plans for partially compressed chunks
-- when injecting query_pathkeys on top of the append -- when injecting query_pathkeys on top of the append
-- path that combines the uncompressed and compressed parts of a chunk. -- path that combines the uncompressed and compressed parts of a chunk.
set enable_parallel_append to off; -- for less flaky plans
set timescaledb.enable_decompression_sorted_merge = off; set timescaledb.enable_decompression_sorted_merge = off;
\set PREFIX 'EXPLAIN (analyze, costs off, timing off, summary off)' \set PREFIX 'EXPLAIN (analyze, costs off, timing off, summary off)'
CREATE TABLE ht_metrics_compressed(time timestamptz, device int, value float); CREATE TABLE ht_metrics_compressed(time timestamptz, device int, value float);
@ -696,12 +697,14 @@ SELECT * FROM test1 ORDER BY time ASC NULLS FIRST, x3 DESC NULLS LAST, x4 ASC;
-> Seq Scan on _hyper_3_7_chunk (actual rows=1 loops=1) -> Seq Scan on _hyper_3_7_chunk (actual rows=1 loops=1)
(13 rows) (13 rows)
set max_parallel_workers_per_gather = 0; -- parallel plan different on Windows
set enable_hashagg to off; -- different on PG13
:PREFIX :PREFIX
SELECT x1, x2, max(time) FROM test1 GROUP BY x1, x2, time ORDER BY time limit 10; SELECT x1, x2, max(time) FROM test1 GROUP BY x1, x2, time ORDER BY time limit 10;
QUERY PLAN QUERY PLAN
--------------------------------------------------------------------------------------------------------------- ---------------------------------------------------------------------------------------------------------
Limit (actual rows=5 loops=1) Limit (actual rows=5 loops=1)
-> Finalize GroupAggregate (actual rows=5 loops=1) -> GroupAggregate (actual rows=5 loops=1)
Group Key: test1."time", test1.x1, test1.x2 Group Key: test1."time", test1.x1, test1.x2
-> Custom Scan (ChunkAppend) on test1 (actual rows=5 loops=1) -> Custom Scan (ChunkAppend) on test1 (actual rows=5 loops=1)
Order: test1."time", test1.x1, test1.x2 Order: test1."time", test1.x1, test1.x2
@ -710,20 +713,16 @@ SELECT x1, x2, max(time) FROM test1 GROUP BY x1, x2, time ORDER BY time limit 10
-> Sort (actual rows=4 loops=1) -> Sort (actual rows=4 loops=1)
Sort Key: _hyper_3_7_chunk."time", _hyper_3_7_chunk.x1, _hyper_3_7_chunk.x2 Sort Key: _hyper_3_7_chunk."time", _hyper_3_7_chunk.x1, _hyper_3_7_chunk.x2
Sort Method: quicksort Sort Method: quicksort
-> Partial HashAggregate (actual rows=4 loops=1) -> Custom Scan (DecompressChunk) on _hyper_3_7_chunk (actual rows=4 loops=1)
Group Key: _hyper_3_7_chunk."time", _hyper_3_7_chunk.x1, _hyper_3_7_chunk.x2 -> Seq Scan on compress_hyper_4_8_chunk (actual rows=3 loops=1)
Batches: 1
-> Custom Scan (DecompressChunk) on _hyper_3_7_chunk (actual rows=4 loops=1)
-> Seq Scan on compress_hyper_4_8_chunk (actual rows=3 loops=1)
-> Sort (actual rows=1 loops=1) -> Sort (actual rows=1 loops=1)
Sort Key: _hyper_3_7_chunk."time", _hyper_3_7_chunk.x1, _hyper_3_7_chunk.x2 Sort Key: _hyper_3_7_chunk."time", _hyper_3_7_chunk.x1, _hyper_3_7_chunk.x2
Sort Method: quicksort Sort Method: quicksort
-> Partial HashAggregate (actual rows=1 loops=1) -> Seq Scan on _hyper_3_7_chunk (actual rows=1 loops=1)
Group Key: _hyper_3_7_chunk."time", _hyper_3_7_chunk.x1, _hyper_3_7_chunk.x2 (16 rows)
Batches: 1
-> Seq Scan on _hyper_3_7_chunk (actual rows=1 loops=1)
(22 rows)
reset max_parallel_workers_per_gather;
reset enable_hashagg;
:PREFIX :PREFIX
SELECT * FROM test1 ORDER BY x1, x2, x5, x4, time LIMIT 10; SELECT * FROM test1 ORDER BY x1, x2, x5, x4, time LIMIT 10;
QUERY PLAN QUERY PLAN

View File

@ -6,6 +6,8 @@
-- when injecting query_pathkeys on top of the append -- when injecting query_pathkeys on top of the append
-- path that combines the uncompressed and compressed parts of a chunk. -- path that combines the uncompressed and compressed parts of a chunk.
set enable_parallel_append to off; -- for less flaky plans
set timescaledb.enable_decompression_sorted_merge = off; set timescaledb.enable_decompression_sorted_merge = off;
\set PREFIX 'EXPLAIN (analyze, costs off, timing off, summary off)' \set PREFIX 'EXPLAIN (analyze, costs off, timing off, summary off)'
@ -99,8 +101,12 @@ SELECT * FROM test1 ORDER BY time ASC NULLS LAST, x3 DESC NULLS FIRST, x4 DESC N
:PREFIX :PREFIX
SELECT * FROM test1 ORDER BY time ASC NULLS FIRST, x3 DESC NULLS LAST, x4 ASC; SELECT * FROM test1 ORDER BY time ASC NULLS FIRST, x3 DESC NULLS LAST, x4 ASC;
set max_parallel_workers_per_gather = 0; -- parallel plan different on Windows
set enable_hashagg to off; -- different on PG13
:PREFIX :PREFIX
SELECT x1, x2, max(time) FROM test1 GROUP BY x1, x2, time ORDER BY time limit 10; SELECT x1, x2, max(time) FROM test1 GROUP BY x1, x2, time ORDER BY time limit 10;
reset max_parallel_workers_per_gather;
reset enable_hashagg;
:PREFIX :PREFIX
SELECT * FROM test1 ORDER BY x1, x2, x5, x4, time LIMIT 10; SELECT * FROM test1 ORDER BY x1, x2, x5, x4, time LIMIT 10;