mirror of
https://github.com/timescale/timescaledb.git
synced 2025-05-18 11:45:11 +08:00
Do not produce partial aggregation plans for the only chunk
It doesn't make much sense.
This commit is contained in:
parent
e0a3e309a7
commit
9a4eb12eb6
@ -461,6 +461,15 @@ generate_agg_pushdown_path(PlannerInfo *root, Path *cheapest_total_path, RelOptI
|
||||
if (subpaths == NIL)
|
||||
return;
|
||||
|
||||
if (list_length(subpaths) < 2)
|
||||
{
|
||||
/*
|
||||
* Doesn't make sense to add per-chunk aggregation paths if there's
|
||||
* only one chunk.
|
||||
*/
|
||||
return;
|
||||
}
|
||||
|
||||
/* Generate agg paths on top of the append children */
|
||||
List *sorted_subpaths = NIL;
|
||||
List *hashed_subpaths = NIL;
|
||||
@ -580,6 +589,14 @@ generate_partial_agg_pushdown_path(PlannerInfo *root, Path *cheapest_partial_pat
|
||||
if (subpaths == NIL)
|
||||
return;
|
||||
|
||||
if (list_length(subpaths) < 2)
|
||||
{
|
||||
/*
|
||||
* Doesn't make sense to add per-chunk aggregation paths if there's
|
||||
* only one chunk.
|
||||
*/
|
||||
return;
|
||||
}
|
||||
/* Generate agg paths on top of the append children */
|
||||
ListCell *lc;
|
||||
List *sorted_subpaths = NIL;
|
||||
|
@ -77,116 +77,92 @@ SELECT setting, current_setting(setting) AS value from (VALUES ('timescaledb.ena
|
||||
(5 rows)
|
||||
|
||||
:PREFIX SELECT last(temp, time_alt) FROM btest;
|
||||
QUERY PLAN
|
||||
------------------------------------------------------------------
|
||||
Finalize HashAggregate (actual rows=1 loops=1)
|
||||
Batches: 1
|
||||
-> Partial HashAggregate (actual rows=1 loops=1)
|
||||
Batches: 1
|
||||
-> Seq Scan on _hyper_1_1_chunk (actual rows=6 loops=1)
|
||||
(5 rows)
|
||||
QUERY PLAN
|
||||
------------------------------------------------------------
|
||||
Aggregate (actual rows=1 loops=1)
|
||||
-> Seq Scan on _hyper_1_1_chunk (actual rows=6 loops=1)
|
||||
(2 rows)
|
||||
|
||||
:PREFIX SELECT first(temp, time_alt) FROM btest;
|
||||
QUERY PLAN
|
||||
------------------------------------------------------------------
|
||||
Finalize HashAggregate (actual rows=1 loops=1)
|
||||
Batches: 1
|
||||
-> Partial HashAggregate (actual rows=1 loops=1)
|
||||
Batches: 1
|
||||
-> Seq Scan on _hyper_1_1_chunk (actual rows=6 loops=1)
|
||||
(5 rows)
|
||||
QUERY PLAN
|
||||
------------------------------------------------------------
|
||||
Aggregate (actual rows=1 loops=1)
|
||||
-> Seq Scan on _hyper_1_1_chunk (actual rows=6 loops=1)
|
||||
(2 rows)
|
||||
|
||||
:PREFIX SELECT gp, last(temp, time) FROM btest GROUP BY gp ORDER BY gp;
|
||||
QUERY PLAN
|
||||
------------------------------------------------------------------------
|
||||
QUERY PLAN
|
||||
------------------------------------------------------------------
|
||||
Sort (actual rows=2 loops=1)
|
||||
Sort Key: _hyper_1_1_chunk.gp
|
||||
Sort Method: quicksort
|
||||
-> Finalize HashAggregate (actual rows=2 loops=1)
|
||||
-> HashAggregate (actual rows=2 loops=1)
|
||||
Group Key: _hyper_1_1_chunk.gp
|
||||
Batches: 1
|
||||
-> Partial HashAggregate (actual rows=2 loops=1)
|
||||
Group Key: _hyper_1_1_chunk.gp
|
||||
Batches: 1
|
||||
-> Seq Scan on _hyper_1_1_chunk (actual rows=6 loops=1)
|
||||
(10 rows)
|
||||
-> Seq Scan on _hyper_1_1_chunk (actual rows=6 loops=1)
|
||||
(7 rows)
|
||||
|
||||
:PREFIX SELECT gp, first(temp, time) FROM btest GROUP BY gp ORDER BY gp;
|
||||
QUERY PLAN
|
||||
------------------------------------------------------------------------
|
||||
QUERY PLAN
|
||||
------------------------------------------------------------------
|
||||
Sort (actual rows=2 loops=1)
|
||||
Sort Key: _hyper_1_1_chunk.gp
|
||||
Sort Method: quicksort
|
||||
-> Finalize HashAggregate (actual rows=2 loops=1)
|
||||
-> HashAggregate (actual rows=2 loops=1)
|
||||
Group Key: _hyper_1_1_chunk.gp
|
||||
Batches: 1
|
||||
-> Partial HashAggregate (actual rows=2 loops=1)
|
||||
Group Key: _hyper_1_1_chunk.gp
|
||||
Batches: 1
|
||||
-> Seq Scan on _hyper_1_1_chunk (actual rows=6 loops=1)
|
||||
(10 rows)
|
||||
-> Seq Scan on _hyper_1_1_chunk (actual rows=6 loops=1)
|
||||
(7 rows)
|
||||
|
||||
--check whole row
|
||||
:PREFIX SELECT gp, first(btest, time) FROM btest GROUP BY gp ORDER BY gp;
|
||||
QUERY PLAN
|
||||
------------------------------------------------------------------------
|
||||
QUERY PLAN
|
||||
------------------------------------------------------------------
|
||||
Sort (actual rows=2 loops=1)
|
||||
Sort Key: _hyper_1_1_chunk.gp
|
||||
Sort Method: quicksort
|
||||
-> Finalize HashAggregate (actual rows=2 loops=1)
|
||||
-> HashAggregate (actual rows=2 loops=1)
|
||||
Group Key: _hyper_1_1_chunk.gp
|
||||
Batches: 1
|
||||
-> Partial HashAggregate (actual rows=2 loops=1)
|
||||
Group Key: _hyper_1_1_chunk.gp
|
||||
Batches: 1
|
||||
-> Seq Scan on _hyper_1_1_chunk (actual rows=6 loops=1)
|
||||
(10 rows)
|
||||
-> Seq Scan on _hyper_1_1_chunk (actual rows=6 loops=1)
|
||||
(7 rows)
|
||||
|
||||
--check toasted col
|
||||
:PREFIX SELECT gp, left(last(strid, time), 10) FROM btest GROUP BY gp ORDER BY gp;
|
||||
QUERY PLAN
|
||||
------------------------------------------------------------------------
|
||||
QUERY PLAN
|
||||
------------------------------------------------------------------
|
||||
Sort (actual rows=2 loops=1)
|
||||
Sort Key: _hyper_1_1_chunk.gp
|
||||
Sort Method: quicksort
|
||||
-> Finalize HashAggregate (actual rows=2 loops=1)
|
||||
-> HashAggregate (actual rows=2 loops=1)
|
||||
Group Key: _hyper_1_1_chunk.gp
|
||||
Batches: 1
|
||||
-> Partial HashAggregate (actual rows=2 loops=1)
|
||||
Group Key: _hyper_1_1_chunk.gp
|
||||
Batches: 1
|
||||
-> Seq Scan on _hyper_1_1_chunk (actual rows=6 loops=1)
|
||||
(10 rows)
|
||||
-> Seq Scan on _hyper_1_1_chunk (actual rows=6 loops=1)
|
||||
(7 rows)
|
||||
|
||||
:PREFIX SELECT gp, last(temp, strid) FROM btest GROUP BY gp ORDER BY gp;
|
||||
QUERY PLAN
|
||||
------------------------------------------------------------------------
|
||||
QUERY PLAN
|
||||
------------------------------------------------------------------
|
||||
Sort (actual rows=2 loops=1)
|
||||
Sort Key: _hyper_1_1_chunk.gp
|
||||
Sort Method: quicksort
|
||||
-> Finalize HashAggregate (actual rows=2 loops=1)
|
||||
-> HashAggregate (actual rows=2 loops=1)
|
||||
Group Key: _hyper_1_1_chunk.gp
|
||||
Batches: 1
|
||||
-> Partial HashAggregate (actual rows=2 loops=1)
|
||||
Group Key: _hyper_1_1_chunk.gp
|
||||
Batches: 1
|
||||
-> Seq Scan on _hyper_1_1_chunk (actual rows=6 loops=1)
|
||||
(10 rows)
|
||||
-> Seq Scan on _hyper_1_1_chunk (actual rows=6 loops=1)
|
||||
(7 rows)
|
||||
|
||||
:PREFIX SELECT gp, last(strid, temp) FROM btest GROUP BY gp ORDER BY gp;
|
||||
QUERY PLAN
|
||||
------------------------------------------------------------------------
|
||||
QUERY PLAN
|
||||
------------------------------------------------------------------
|
||||
Sort (actual rows=2 loops=1)
|
||||
Sort Key: _hyper_1_1_chunk.gp
|
||||
Sort Method: quicksort
|
||||
-> Finalize HashAggregate (actual rows=2 loops=1)
|
||||
-> HashAggregate (actual rows=2 loops=1)
|
||||
Group Key: _hyper_1_1_chunk.gp
|
||||
Batches: 1
|
||||
-> Partial HashAggregate (actual rows=2 loops=1)
|
||||
Group Key: _hyper_1_1_chunk.gp
|
||||
Batches: 1
|
||||
-> Seq Scan on _hyper_1_1_chunk (actual rows=6 loops=1)
|
||||
(10 rows)
|
||||
-> Seq Scan on _hyper_1_1_chunk (actual rows=6 loops=1)
|
||||
(7 rows)
|
||||
|
||||
BEGIN;
|
||||
--check null value as last element
|
||||
@ -849,24 +825,18 @@ INSERT INTO btest_numeric VALUES('2018-01-20T09:00:43', NULL);
|
||||
(5 rows)
|
||||
|
||||
:PREFIX SELECT first(time, quantity) FROM btest_numeric;
|
||||
QUERY PLAN
|
||||
------------------------------------------------------------------
|
||||
Finalize HashAggregate (actual rows=1 loops=1)
|
||||
Batches: 1
|
||||
-> Partial HashAggregate (actual rows=1 loops=1)
|
||||
Batches: 1
|
||||
-> Seq Scan on _hyper_2_8_chunk (actual rows=2 loops=1)
|
||||
(5 rows)
|
||||
QUERY PLAN
|
||||
------------------------------------------------------------
|
||||
Aggregate (actual rows=1 loops=1)
|
||||
-> Seq Scan on _hyper_2_8_chunk (actual rows=2 loops=1)
|
||||
(2 rows)
|
||||
|
||||
:PREFIX SELECT last(time, quantity) FROM btest_numeric;
|
||||
QUERY PLAN
|
||||
------------------------------------------------------------------
|
||||
Finalize HashAggregate (actual rows=1 loops=1)
|
||||
Batches: 1
|
||||
-> Partial HashAggregate (actual rows=1 loops=1)
|
||||
Batches: 1
|
||||
-> Seq Scan on _hyper_2_8_chunk (actual rows=2 loops=1)
|
||||
(5 rows)
|
||||
QUERY PLAN
|
||||
------------------------------------------------------------
|
||||
Aggregate (actual rows=1 loops=1)
|
||||
-> Seq Scan on _hyper_2_8_chunk (actual rows=2 loops=1)
|
||||
(2 rows)
|
||||
|
||||
-- NULL values followed by non-NULL values
|
||||
INSERT INTO btest_numeric VALUES('2019-01-20T09:00:43', 1);
|
||||
|
@ -641,11 +641,11 @@ SET max_parallel_workers_per_gather TO 2;
|
||||
Finalize Aggregate
|
||||
-> Gather
|
||||
Workers Planned: 2
|
||||
-> Result
|
||||
One-Time Filter: (length(version()) > 0)
|
||||
-> Parallel Custom Scan (ChunkAppend) on test
|
||||
Chunks excluded during startup: 0
|
||||
-> Partial Aggregate
|
||||
-> Partial Aggregate
|
||||
-> Result
|
||||
One-Time Filter: (length(version()) > 0)
|
||||
-> Parallel Custom Scan (ChunkAppend) on test
|
||||
Chunks excluded during startup: 0
|
||||
-> Result
|
||||
One-Time Filter: (length(version()) > 0)
|
||||
-> Parallel Seq Scan on _hyper_1_2_chunk
|
||||
|
@ -641,11 +641,11 @@ SET max_parallel_workers_per_gather TO 2;
|
||||
Finalize Aggregate
|
||||
-> Gather
|
||||
Workers Planned: 2
|
||||
-> Result
|
||||
One-Time Filter: (length(version()) > 0)
|
||||
-> Parallel Custom Scan (ChunkAppend) on test
|
||||
Chunks excluded during startup: 0
|
||||
-> Partial Aggregate
|
||||
-> Partial Aggregate
|
||||
-> Result
|
||||
One-Time Filter: (length(version()) > 0)
|
||||
-> Parallel Custom Scan (ChunkAppend) on test
|
||||
Chunks excluded during startup: 0
|
||||
-> Result
|
||||
One-Time Filter: (length(version()) > 0)
|
||||
-> Parallel Seq Scan on _hyper_1_2_chunk
|
||||
|
@ -642,11 +642,11 @@ SET max_parallel_workers_per_gather TO 2;
|
||||
Finalize Aggregate
|
||||
-> Gather
|
||||
Workers Planned: 2
|
||||
-> Result
|
||||
One-Time Filter: (length(version()) > 0)
|
||||
-> Parallel Custom Scan (ChunkAppend) on test
|
||||
Chunks excluded during startup: 0
|
||||
-> Partial Aggregate
|
||||
-> Partial Aggregate
|
||||
-> Result
|
||||
One-Time Filter: (length(version()) > 0)
|
||||
-> Parallel Custom Scan (ChunkAppend) on test
|
||||
Chunks excluded during startup: 0
|
||||
-> Result
|
||||
One-Time Filter: (length(version()) > 0)
|
||||
-> Parallel Seq Scan on _hyper_1_2_chunk
|
||||
|
@ -643,11 +643,11 @@ SET max_parallel_workers_per_gather TO 2;
|
||||
Finalize Aggregate
|
||||
-> Gather
|
||||
Workers Planned: 2
|
||||
-> Result
|
||||
One-Time Filter: (length(version()) > 0)
|
||||
-> Parallel Custom Scan (ChunkAppend) on test
|
||||
Chunks excluded during startup: 0
|
||||
-> Partial Aggregate
|
||||
-> Partial Aggregate
|
||||
-> Result
|
||||
One-Time Filter: (length(version()) > 0)
|
||||
-> Parallel Custom Scan (ChunkAppend) on test
|
||||
Chunks excluded during startup: 0
|
||||
-> Result
|
||||
One-Time Filter: (length(version()) > 0)
|
||||
-> Parallel Seq Scan on _hyper_1_2_chunk
|
||||
|
@ -889,18 +889,15 @@ FROM hyper_timepart
|
||||
WHERE device = 1
|
||||
GROUP BY 1
|
||||
LIMIT 10;
|
||||
QUERY PLAN
|
||||
------------------------------------------------------------------------------------------------------------------------------
|
||||
QUERY PLAN
|
||||
------------------------------------------------------------------------------------------------------------------------
|
||||
Limit
|
||||
Output: _hyper_3_8_chunk.device, (avg(_hyper_3_8_chunk.temp))
|
||||
-> Finalize GroupAggregate
|
||||
-> GroupAggregate
|
||||
Output: _hyper_3_8_chunk.device, avg(_hyper_3_8_chunk.temp)
|
||||
Group Key: _hyper_3_8_chunk.device
|
||||
-> Partial GroupAggregate
|
||||
Output: _hyper_3_8_chunk.device, PARTIAL avg(_hyper_3_8_chunk.temp)
|
||||
Group Key: _hyper_3_8_chunk.device
|
||||
-> Index Scan using _hyper_3_8_chunk_hyper_timepart_device_expr_idx on _timescaledb_internal._hyper_3_8_chunk
|
||||
Output: _hyper_3_8_chunk.device, _hyper_3_8_chunk.temp
|
||||
Index Cond: (_hyper_3_8_chunk.device = 1)
|
||||
(11 rows)
|
||||
-> Index Scan using _hyper_3_8_chunk_hyper_timepart_device_expr_idx on _timescaledb_internal._hyper_3_8_chunk
|
||||
Output: _hyper_3_8_chunk.device, _hyper_3_8_chunk.temp
|
||||
Index Cond: (_hyper_3_8_chunk.device = 1)
|
||||
(8 rows)
|
||||
|
||||
|
@ -889,18 +889,15 @@ FROM hyper_timepart
|
||||
WHERE device = 1
|
||||
GROUP BY 1
|
||||
LIMIT 10;
|
||||
QUERY PLAN
|
||||
------------------------------------------------------------------------------------------------------------------------------
|
||||
QUERY PLAN
|
||||
------------------------------------------------------------------------------------------------------------------------
|
||||
Limit
|
||||
Output: _hyper_3_8_chunk.device, (avg(_hyper_3_8_chunk.temp))
|
||||
-> Finalize GroupAggregate
|
||||
-> GroupAggregate
|
||||
Output: _hyper_3_8_chunk.device, avg(_hyper_3_8_chunk.temp)
|
||||
Group Key: _hyper_3_8_chunk.device
|
||||
-> Partial GroupAggregate
|
||||
Output: _hyper_3_8_chunk.device, PARTIAL avg(_hyper_3_8_chunk.temp)
|
||||
Group Key: _hyper_3_8_chunk.device
|
||||
-> Index Scan using _hyper_3_8_chunk_hyper_timepart_device_expr_idx on _timescaledb_internal._hyper_3_8_chunk
|
||||
Output: _hyper_3_8_chunk.device, _hyper_3_8_chunk.temp
|
||||
Index Cond: (_hyper_3_8_chunk.device = 1)
|
||||
(11 rows)
|
||||
-> Index Scan using _hyper_3_8_chunk_hyper_timepart_device_expr_idx on _timescaledb_internal._hyper_3_8_chunk
|
||||
Output: _hyper_3_8_chunk.device, _hyper_3_8_chunk.temp
|
||||
Index Cond: (_hyper_3_8_chunk.device = 1)
|
||||
(8 rows)
|
||||
|
||||
|
@ -889,18 +889,15 @@ FROM hyper_timepart
|
||||
WHERE device = 1
|
||||
GROUP BY 1
|
||||
LIMIT 10;
|
||||
QUERY PLAN
|
||||
------------------------------------------------------------------------------------------------------------------------------
|
||||
QUERY PLAN
|
||||
------------------------------------------------------------------------------------------------------------------------
|
||||
Limit
|
||||
Output: _hyper_3_8_chunk.device, (avg(_hyper_3_8_chunk.temp))
|
||||
-> Finalize GroupAggregate
|
||||
-> GroupAggregate
|
||||
Output: _hyper_3_8_chunk.device, avg(_hyper_3_8_chunk.temp)
|
||||
Group Key: _hyper_3_8_chunk.device
|
||||
-> Partial GroupAggregate
|
||||
Output: _hyper_3_8_chunk.device, PARTIAL avg(_hyper_3_8_chunk.temp)
|
||||
Group Key: _hyper_3_8_chunk.device
|
||||
-> Index Scan using _hyper_3_8_chunk_hyper_timepart_device_expr_idx on _timescaledb_internal._hyper_3_8_chunk
|
||||
Output: _hyper_3_8_chunk.device, _hyper_3_8_chunk.temp
|
||||
Index Cond: (_hyper_3_8_chunk.device = 1)
|
||||
(11 rows)
|
||||
-> Index Scan using _hyper_3_8_chunk_hyper_timepart_device_expr_idx on _timescaledb_internal._hyper_3_8_chunk
|
||||
Output: _hyper_3_8_chunk.device, _hyper_3_8_chunk.temp
|
||||
Index Cond: (_hyper_3_8_chunk.device = 1)
|
||||
(8 rows)
|
||||
|
||||
|
@ -889,16 +889,14 @@ FROM hyper_timepart
|
||||
WHERE device = 1
|
||||
GROUP BY 1
|
||||
LIMIT 10;
|
||||
QUERY PLAN
|
||||
------------------------------------------------------------------------------------------------------------------------------
|
||||
QUERY PLAN
|
||||
------------------------------------------------------------------------------------------------------------------------
|
||||
Limit
|
||||
Output: _hyper_3_8_chunk.device, (avg(_hyper_3_8_chunk.temp))
|
||||
-> Finalize GroupAggregate
|
||||
-> GroupAggregate
|
||||
Output: _hyper_3_8_chunk.device, avg(_hyper_3_8_chunk.temp)
|
||||
-> Partial GroupAggregate
|
||||
Output: _hyper_3_8_chunk.device, PARTIAL avg(_hyper_3_8_chunk.temp)
|
||||
-> Index Scan using _hyper_3_8_chunk_hyper_timepart_device_expr_idx on _timescaledb_internal._hyper_3_8_chunk
|
||||
Output: _hyper_3_8_chunk.device, _hyper_3_8_chunk.temp
|
||||
Index Cond: (_hyper_3_8_chunk.device = 1)
|
||||
(9 rows)
|
||||
-> Index Scan using _hyper_3_8_chunk_hyper_timepart_device_expr_idx on _timescaledb_internal._hyper_3_8_chunk
|
||||
Output: _hyper_3_8_chunk.device, _hyper_3_8_chunk.temp
|
||||
Index Cond: (_hyper_3_8_chunk.device = 1)
|
||||
(7 rows)
|
||||
|
||||
|
@ -291,7 +291,7 @@ SELECT compress_chunk(i) FROM show_chunks('deleteme') i;
|
||||
EXPLAIN (costs off) SELECT sum(data) FROM deleteme WHERE segment::text like '%4%';
|
||||
QUERY PLAN
|
||||
---------------------------------------------------------
|
||||
Finalize Aggregate
|
||||
Aggregate
|
||||
-> Custom Scan (DecompressChunk) on _hyper_7_8_chunk
|
||||
-> Seq Scan on compress_hyper_8_9_chunk
|
||||
Filter: ((segment)::text ~~ '%4%'::text)
|
||||
@ -300,7 +300,7 @@ EXPLAIN (costs off) SELECT sum(data) FROM deleteme WHERE segment::text like '%4%
|
||||
EXPLAIN (costs off) SELECT sum(data) FROM deleteme WHERE '4' = segment::text;
|
||||
QUERY PLAN
|
||||
---------------------------------------------------------
|
||||
Finalize Aggregate
|
||||
Aggregate
|
||||
-> Custom Scan (DecompressChunk) on _hyper_7_8_chunk
|
||||
-> Seq Scan on compress_hyper_8_9_chunk
|
||||
Filter: ('4'::text = (segment)::text)
|
||||
|
@ -4,6 +4,7 @@
|
||||
-- this test checks the validity of the produced plans for partially compressed chunks
|
||||
-- when injecting query_pathkeys on top of the append
|
||||
-- path that combines the uncompressed and compressed parts of a chunk.
|
||||
set enable_parallel_append to off; -- for less flaky plans
|
||||
set timescaledb.enable_decompression_sorted_merge = off;
|
||||
\set PREFIX 'EXPLAIN (analyze, costs off, timing off, summary off)'
|
||||
CREATE TABLE ht_metrics_compressed(time timestamptz, device int, value float);
|
||||
@ -690,12 +691,14 @@ SELECT * FROM test1 ORDER BY time ASC NULLS FIRST, x3 DESC NULLS LAST, x4 ASC;
|
||||
-> Seq Scan on _hyper_3_7_chunk (actual rows=1 loops=1)
|
||||
(13 rows)
|
||||
|
||||
set max_parallel_workers_per_gather = 0; -- parallel plan different on Windows
|
||||
set enable_hashagg to off; -- different on PG13
|
||||
:PREFIX
|
||||
SELECT x1, x2, max(time) FROM test1 GROUP BY x1, x2, time ORDER BY time limit 10;
|
||||
QUERY PLAN
|
||||
---------------------------------------------------------------------------------------------------------------
|
||||
QUERY PLAN
|
||||
---------------------------------------------------------------------------------------------------------
|
||||
Limit (actual rows=5 loops=1)
|
||||
-> Finalize GroupAggregate (actual rows=5 loops=1)
|
||||
-> GroupAggregate (actual rows=5 loops=1)
|
||||
Group Key: test1."time", test1.x1, test1.x2
|
||||
-> Custom Scan (ChunkAppend) on test1 (actual rows=5 loops=1)
|
||||
Order: test1."time", test1.x1, test1.x2
|
||||
@ -704,20 +707,16 @@ SELECT x1, x2, max(time) FROM test1 GROUP BY x1, x2, time ORDER BY time limit 10
|
||||
-> Sort (actual rows=4 loops=1)
|
||||
Sort Key: _hyper_3_7_chunk."time", _hyper_3_7_chunk.x1, _hyper_3_7_chunk.x2
|
||||
Sort Method: quicksort
|
||||
-> Partial HashAggregate (actual rows=4 loops=1)
|
||||
Group Key: _hyper_3_7_chunk."time", _hyper_3_7_chunk.x1, _hyper_3_7_chunk.x2
|
||||
Batches: 1
|
||||
-> Custom Scan (DecompressChunk) on _hyper_3_7_chunk (actual rows=4 loops=1)
|
||||
-> Seq Scan on compress_hyper_4_8_chunk (actual rows=3 loops=1)
|
||||
-> Custom Scan (DecompressChunk) on _hyper_3_7_chunk (actual rows=4 loops=1)
|
||||
-> Seq Scan on compress_hyper_4_8_chunk (actual rows=3 loops=1)
|
||||
-> Sort (actual rows=1 loops=1)
|
||||
Sort Key: _hyper_3_7_chunk."time", _hyper_3_7_chunk.x1, _hyper_3_7_chunk.x2
|
||||
Sort Method: quicksort
|
||||
-> Partial HashAggregate (actual rows=1 loops=1)
|
||||
Group Key: _hyper_3_7_chunk."time", _hyper_3_7_chunk.x1, _hyper_3_7_chunk.x2
|
||||
Batches: 1
|
||||
-> Seq Scan on _hyper_3_7_chunk (actual rows=1 loops=1)
|
||||
(22 rows)
|
||||
-> Seq Scan on _hyper_3_7_chunk (actual rows=1 loops=1)
|
||||
(16 rows)
|
||||
|
||||
reset max_parallel_workers_per_gather;
|
||||
reset enable_hashagg;
|
||||
:PREFIX
|
||||
SELECT * FROM test1 ORDER BY x1, x2, x5, x4, time LIMIT 10;
|
||||
QUERY PLAN
|
||||
|
@ -4,6 +4,7 @@
|
||||
-- this test checks the validity of the produced plans for partially compressed chunks
|
||||
-- when injecting query_pathkeys on top of the append
|
||||
-- path that combines the uncompressed and compressed parts of a chunk.
|
||||
set enable_parallel_append to off; -- for less flaky plans
|
||||
set timescaledb.enable_decompression_sorted_merge = off;
|
||||
\set PREFIX 'EXPLAIN (analyze, costs off, timing off, summary off)'
|
||||
CREATE TABLE ht_metrics_compressed(time timestamptz, device int, value float);
|
||||
@ -690,12 +691,14 @@ SELECT * FROM test1 ORDER BY time ASC NULLS FIRST, x3 DESC NULLS LAST, x4 ASC;
|
||||
-> Seq Scan on _hyper_3_7_chunk (actual rows=1 loops=1)
|
||||
(13 rows)
|
||||
|
||||
set max_parallel_workers_per_gather = 0; -- parallel plan different on Windows
|
||||
set enable_hashagg to off; -- different on PG13
|
||||
:PREFIX
|
||||
SELECT x1, x2, max(time) FROM test1 GROUP BY x1, x2, time ORDER BY time limit 10;
|
||||
QUERY PLAN
|
||||
---------------------------------------------------------------------------------------------------------------
|
||||
QUERY PLAN
|
||||
---------------------------------------------------------------------------------------------------------
|
||||
Limit (actual rows=5 loops=1)
|
||||
-> Finalize GroupAggregate (actual rows=5 loops=1)
|
||||
-> GroupAggregate (actual rows=5 loops=1)
|
||||
Group Key: test1."time", test1.x1, test1.x2
|
||||
-> Custom Scan (ChunkAppend) on test1 (actual rows=5 loops=1)
|
||||
Order: test1."time", test1.x1, test1.x2
|
||||
@ -704,20 +707,16 @@ SELECT x1, x2, max(time) FROM test1 GROUP BY x1, x2, time ORDER BY time limit 10
|
||||
-> Sort (actual rows=4 loops=1)
|
||||
Sort Key: _hyper_3_7_chunk."time", _hyper_3_7_chunk.x1, _hyper_3_7_chunk.x2
|
||||
Sort Method: quicksort
|
||||
-> Partial HashAggregate (actual rows=4 loops=1)
|
||||
Group Key: _hyper_3_7_chunk."time", _hyper_3_7_chunk.x1, _hyper_3_7_chunk.x2
|
||||
Batches: 1
|
||||
-> Custom Scan (DecompressChunk) on _hyper_3_7_chunk (actual rows=4 loops=1)
|
||||
-> Seq Scan on compress_hyper_4_8_chunk (actual rows=3 loops=1)
|
||||
-> Custom Scan (DecompressChunk) on _hyper_3_7_chunk (actual rows=4 loops=1)
|
||||
-> Seq Scan on compress_hyper_4_8_chunk (actual rows=3 loops=1)
|
||||
-> Sort (actual rows=1 loops=1)
|
||||
Sort Key: _hyper_3_7_chunk."time", _hyper_3_7_chunk.x1, _hyper_3_7_chunk.x2
|
||||
Sort Method: quicksort
|
||||
-> Partial HashAggregate (actual rows=1 loops=1)
|
||||
Group Key: _hyper_3_7_chunk."time", _hyper_3_7_chunk.x1, _hyper_3_7_chunk.x2
|
||||
Batches: 1
|
||||
-> Seq Scan on _hyper_3_7_chunk (actual rows=1 loops=1)
|
||||
(22 rows)
|
||||
-> Seq Scan on _hyper_3_7_chunk (actual rows=1 loops=1)
|
||||
(16 rows)
|
||||
|
||||
reset max_parallel_workers_per_gather;
|
||||
reset enable_hashagg;
|
||||
:PREFIX
|
||||
SELECT * FROM test1 ORDER BY x1, x2, x5, x4, time LIMIT 10;
|
||||
QUERY PLAN
|
||||
|
@ -4,6 +4,7 @@
|
||||
-- this test checks the validity of the produced plans for partially compressed chunks
|
||||
-- when injecting query_pathkeys on top of the append
|
||||
-- path that combines the uncompressed and compressed parts of a chunk.
|
||||
set enable_parallel_append to off; -- for less flaky plans
|
||||
set timescaledb.enable_decompression_sorted_merge = off;
|
||||
\set PREFIX 'EXPLAIN (analyze, costs off, timing off, summary off)'
|
||||
CREATE TABLE ht_metrics_compressed(time timestamptz, device int, value float);
|
||||
@ -696,12 +697,14 @@ SELECT * FROM test1 ORDER BY time ASC NULLS FIRST, x3 DESC NULLS LAST, x4 ASC;
|
||||
-> Seq Scan on _hyper_3_7_chunk (actual rows=1 loops=1)
|
||||
(13 rows)
|
||||
|
||||
set max_parallel_workers_per_gather = 0; -- parallel plan different on Windows
|
||||
set enable_hashagg to off; -- different on PG13
|
||||
:PREFIX
|
||||
SELECT x1, x2, max(time) FROM test1 GROUP BY x1, x2, time ORDER BY time limit 10;
|
||||
QUERY PLAN
|
||||
---------------------------------------------------------------------------------------------------------------
|
||||
QUERY PLAN
|
||||
---------------------------------------------------------------------------------------------------------
|
||||
Limit (actual rows=5 loops=1)
|
||||
-> Finalize GroupAggregate (actual rows=5 loops=1)
|
||||
-> GroupAggregate (actual rows=5 loops=1)
|
||||
Group Key: test1."time", test1.x1, test1.x2
|
||||
-> Custom Scan (ChunkAppend) on test1 (actual rows=5 loops=1)
|
||||
Order: test1."time", test1.x1, test1.x2
|
||||
@ -710,20 +713,16 @@ SELECT x1, x2, max(time) FROM test1 GROUP BY x1, x2, time ORDER BY time limit 10
|
||||
-> Sort (actual rows=4 loops=1)
|
||||
Sort Key: _hyper_3_7_chunk."time", _hyper_3_7_chunk.x1, _hyper_3_7_chunk.x2
|
||||
Sort Method: quicksort
|
||||
-> Partial HashAggregate (actual rows=4 loops=1)
|
||||
Group Key: _hyper_3_7_chunk."time", _hyper_3_7_chunk.x1, _hyper_3_7_chunk.x2
|
||||
Batches: 1
|
||||
-> Custom Scan (DecompressChunk) on _hyper_3_7_chunk (actual rows=4 loops=1)
|
||||
-> Seq Scan on compress_hyper_4_8_chunk (actual rows=3 loops=1)
|
||||
-> Custom Scan (DecompressChunk) on _hyper_3_7_chunk (actual rows=4 loops=1)
|
||||
-> Seq Scan on compress_hyper_4_8_chunk (actual rows=3 loops=1)
|
||||
-> Sort (actual rows=1 loops=1)
|
||||
Sort Key: _hyper_3_7_chunk."time", _hyper_3_7_chunk.x1, _hyper_3_7_chunk.x2
|
||||
Sort Method: quicksort
|
||||
-> Partial HashAggregate (actual rows=1 loops=1)
|
||||
Group Key: _hyper_3_7_chunk."time", _hyper_3_7_chunk.x1, _hyper_3_7_chunk.x2
|
||||
Batches: 1
|
||||
-> Seq Scan on _hyper_3_7_chunk (actual rows=1 loops=1)
|
||||
(22 rows)
|
||||
-> Seq Scan on _hyper_3_7_chunk (actual rows=1 loops=1)
|
||||
(16 rows)
|
||||
|
||||
reset max_parallel_workers_per_gather;
|
||||
reset enable_hashagg;
|
||||
:PREFIX
|
||||
SELECT * FROM test1 ORDER BY x1, x2, x5, x4, time LIMIT 10;
|
||||
QUERY PLAN
|
||||
|
@ -4,6 +4,7 @@
|
||||
-- this test checks the validity of the produced plans for partially compressed chunks
|
||||
-- when injecting query_pathkeys on top of the append
|
||||
-- path that combines the uncompressed and compressed parts of a chunk.
|
||||
set enable_parallel_append to off; -- for less flaky plans
|
||||
set timescaledb.enable_decompression_sorted_merge = off;
|
||||
\set PREFIX 'EXPLAIN (analyze, costs off, timing off, summary off)'
|
||||
CREATE TABLE ht_metrics_compressed(time timestamptz, device int, value float);
|
||||
@ -696,12 +697,14 @@ SELECT * FROM test1 ORDER BY time ASC NULLS FIRST, x3 DESC NULLS LAST, x4 ASC;
|
||||
-> Seq Scan on _hyper_3_7_chunk (actual rows=1 loops=1)
|
||||
(13 rows)
|
||||
|
||||
set max_parallel_workers_per_gather = 0; -- parallel plan different on Windows
|
||||
set enable_hashagg to off; -- different on PG13
|
||||
:PREFIX
|
||||
SELECT x1, x2, max(time) FROM test1 GROUP BY x1, x2, time ORDER BY time limit 10;
|
||||
QUERY PLAN
|
||||
---------------------------------------------------------------------------------------------------------------
|
||||
QUERY PLAN
|
||||
---------------------------------------------------------------------------------------------------------
|
||||
Limit (actual rows=5 loops=1)
|
||||
-> Finalize GroupAggregate (actual rows=5 loops=1)
|
||||
-> GroupAggregate (actual rows=5 loops=1)
|
||||
Group Key: test1."time", test1.x1, test1.x2
|
||||
-> Custom Scan (ChunkAppend) on test1 (actual rows=5 loops=1)
|
||||
Order: test1."time", test1.x1, test1.x2
|
||||
@ -710,20 +713,16 @@ SELECT x1, x2, max(time) FROM test1 GROUP BY x1, x2, time ORDER BY time limit 10
|
||||
-> Sort (actual rows=4 loops=1)
|
||||
Sort Key: _hyper_3_7_chunk."time", _hyper_3_7_chunk.x1, _hyper_3_7_chunk.x2
|
||||
Sort Method: quicksort
|
||||
-> Partial HashAggregate (actual rows=4 loops=1)
|
||||
Group Key: _hyper_3_7_chunk."time", _hyper_3_7_chunk.x1, _hyper_3_7_chunk.x2
|
||||
Batches: 1
|
||||
-> Custom Scan (DecompressChunk) on _hyper_3_7_chunk (actual rows=4 loops=1)
|
||||
-> Seq Scan on compress_hyper_4_8_chunk (actual rows=3 loops=1)
|
||||
-> Custom Scan (DecompressChunk) on _hyper_3_7_chunk (actual rows=4 loops=1)
|
||||
-> Seq Scan on compress_hyper_4_8_chunk (actual rows=3 loops=1)
|
||||
-> Sort (actual rows=1 loops=1)
|
||||
Sort Key: _hyper_3_7_chunk."time", _hyper_3_7_chunk.x1, _hyper_3_7_chunk.x2
|
||||
Sort Method: quicksort
|
||||
-> Partial HashAggregate (actual rows=1 loops=1)
|
||||
Group Key: _hyper_3_7_chunk."time", _hyper_3_7_chunk.x1, _hyper_3_7_chunk.x2
|
||||
Batches: 1
|
||||
-> Seq Scan on _hyper_3_7_chunk (actual rows=1 loops=1)
|
||||
(22 rows)
|
||||
-> Seq Scan on _hyper_3_7_chunk (actual rows=1 loops=1)
|
||||
(16 rows)
|
||||
|
||||
reset max_parallel_workers_per_gather;
|
||||
reset enable_hashagg;
|
||||
:PREFIX
|
||||
SELECT * FROM test1 ORDER BY x1, x2, x5, x4, time LIMIT 10;
|
||||
QUERY PLAN
|
||||
|
@ -6,6 +6,8 @@
|
||||
-- when injecting query_pathkeys on top of the append
|
||||
-- path that combines the uncompressed and compressed parts of a chunk.
|
||||
|
||||
set enable_parallel_append to off; -- for less flaky plans
|
||||
|
||||
set timescaledb.enable_decompression_sorted_merge = off;
|
||||
\set PREFIX 'EXPLAIN (analyze, costs off, timing off, summary off)'
|
||||
|
||||
@ -99,8 +101,12 @@ SELECT * FROM test1 ORDER BY time ASC NULLS LAST, x3 DESC NULLS FIRST, x4 DESC N
|
||||
:PREFIX
|
||||
SELECT * FROM test1 ORDER BY time ASC NULLS FIRST, x3 DESC NULLS LAST, x4 ASC;
|
||||
|
||||
set max_parallel_workers_per_gather = 0; -- parallel plan different on Windows
|
||||
set enable_hashagg to off; -- different on PG13
|
||||
:PREFIX
|
||||
SELECT x1, x2, max(time) FROM test1 GROUP BY x1, x2, time ORDER BY time limit 10;
|
||||
reset max_parallel_workers_per_gather;
|
||||
reset enable_hashagg;
|
||||
|
||||
:PREFIX
|
||||
SELECT * FROM test1 ORDER BY x1, x2, x5, x4, time LIMIT 10;
|
||||
|
Loading…
x
Reference in New Issue
Block a user