mirror of
https://github.com/timescale/timescaledb.git
synced 2025-05-15 18:13:18 +08:00
760 lines
38 KiB
Plaintext
760 lines
38 KiB
Plaintext
-- This file and its contents are licensed under the Apache License 2.0.
|
|
-- Please see the included NOTICE for copyright information and
|
|
-- LICENSE-APACHE for a copy of the license.
|
|
--parallel queries require big-ish tables so collect them all here
|
|
--so that we need to generate queries only once.
|
|
-- output with analyze is not stable because it depends on worker assignment
|
|
\set PREFIX 'EXPLAIN (costs off)'
|
|
\set CHUNK1 _timescaledb_internal._hyper_1_1_chunk
|
|
\set CHUNK2 _timescaledb_internal._hyper_1_2_chunk
|
|
CREATE TABLE test (i int, j double precision, ts timestamp);
|
|
SELECT create_hypertable('test','i',chunk_time_interval:=500000);
|
|
WARNING: column type "timestamp without time zone" used for "ts" does not follow best practices
|
|
NOTICE: adding not-null constraint to column "i"
|
|
create_hypertable
|
|
-------------------
|
|
(1,public,test,t)
|
|
(1 row)
|
|
|
|
INSERT INTO test SELECT x, x+0.1, _timescaledb_functions.to_timestamp(x*1000) FROM generate_series(0,1000000-1,10) AS x;
|
|
ANALYZE test;
|
|
ALTER TABLE :CHUNK1 SET (parallel_workers=2);
|
|
ALTER TABLE :CHUNK2 SET (parallel_workers=2);
|
|
SET work_mem TO '50MB';
|
|
SELECT set_config(CASE WHEN current_setting('server_version_num')::int < 160000 THEN 'force_parallel_mode' ELSE 'debug_parallel_query' END,'on', false);
|
|
set_config
|
|
------------
|
|
on
|
|
(1 row)
|
|
|
|
SET max_parallel_workers_per_gather = 4;
|
|
SET parallel_setup_cost TO 0;
|
|
EXPLAIN (costs off) SELECT first(i, j) FROM "test";
|
|
QUERY PLAN
|
|
---------------------------------------------------------------
|
|
Finalize Aggregate
|
|
-> Gather
|
|
Workers Planned: 2
|
|
-> Parallel Append
|
|
-> Partial Aggregate
|
|
-> Parallel Seq Scan on _hyper_1_1_chunk
|
|
-> Partial Aggregate
|
|
-> Parallel Seq Scan on _hyper_1_2_chunk
|
|
(8 rows)
|
|
|
|
SELECT first(i, j) FROM "test";
|
|
first
|
|
-------
|
|
0
|
|
(1 row)
|
|
|
|
EXPLAIN (costs off) SELECT last(i, j) FROM "test";
|
|
QUERY PLAN
|
|
---------------------------------------------------------------
|
|
Finalize Aggregate
|
|
-> Gather
|
|
Workers Planned: 2
|
|
-> Parallel Append
|
|
-> Partial Aggregate
|
|
-> Parallel Seq Scan on _hyper_1_1_chunk
|
|
-> Partial Aggregate
|
|
-> Parallel Seq Scan on _hyper_1_2_chunk
|
|
(8 rows)
|
|
|
|
SELECT last(i, j) FROM "test";
|
|
last
|
|
--------
|
|
999990
|
|
(1 row)
|
|
|
|
EXPLAIN (costs off) SELECT time_bucket('1 second', ts) sec, last(i, j)
|
|
FROM "test"
|
|
GROUP BY sec
|
|
ORDER BY sec
|
|
LIMIT 5;
|
|
QUERY PLAN
|
|
--------------------------------------------------------------------------------------
|
|
Gather
|
|
Workers Planned: 1
|
|
Single Copy: true
|
|
-> Limit
|
|
-> Sort
|
|
Sort Key: (time_bucket('@ 1 sec'::interval, _hyper_1_1_chunk.ts))
|
|
-> HashAggregate
|
|
Group Key: time_bucket('@ 1 sec'::interval, _hyper_1_1_chunk.ts)
|
|
-> Result
|
|
-> Append
|
|
-> Seq Scan on _hyper_1_1_chunk
|
|
-> Seq Scan on _hyper_1_2_chunk
|
|
(12 rows)
|
|
|
|
-- test single copy parallel plan with parallel chunk append
|
|
:PREFIX SELECT time_bucket('1 second', ts) sec, last(i, j)
|
|
FROM "test"
|
|
WHERE length(version()) > 0
|
|
GROUP BY sec
|
|
ORDER BY sec
|
|
LIMIT 5;
|
|
QUERY PLAN
|
|
--------------------------------------------------------------------------------
|
|
Gather
|
|
Workers Planned: 1
|
|
Single Copy: true
|
|
-> Limit
|
|
-> Sort
|
|
Sort Key: (time_bucket('@ 1 sec'::interval, test.ts))
|
|
-> HashAggregate
|
|
Group Key: time_bucket('@ 1 sec'::interval, test.ts)
|
|
-> Result
|
|
One-Time Filter: (length(version()) > 0)
|
|
-> Custom Scan (ChunkAppend) on test
|
|
Chunks excluded during startup: 0
|
|
-> Result
|
|
One-Time Filter: (length(version()) > 0)
|
|
-> Seq Scan on _hyper_1_1_chunk
|
|
-> Result
|
|
One-Time Filter: (length(version()) > 0)
|
|
-> Seq Scan on _hyper_1_2_chunk
|
|
(18 rows)
|
|
|
|
SELECT time_bucket('1 second', ts) sec, last(i, j)
|
|
FROM "test"
|
|
GROUP BY sec
|
|
ORDER BY sec
|
|
LIMIT 5;
|
|
sec | last
|
|
--------------------------+------
|
|
Wed Dec 31 16:00:00 1969 | 990
|
|
Wed Dec 31 16:00:01 1969 | 1990
|
|
Wed Dec 31 16:00:02 1969 | 2990
|
|
Wed Dec 31 16:00:03 1969 | 3990
|
|
Wed Dec 31 16:00:04 1969 | 4990
|
|
(5 rows)
|
|
|
|
--test variants of histogram
|
|
EXPLAIN (costs off) SELECT histogram(i, 1, 1000000, 2) FROM "test";
|
|
QUERY PLAN
|
|
---------------------------------------------------------------
|
|
Finalize Aggregate
|
|
-> Gather
|
|
Workers Planned: 2
|
|
-> Parallel Append
|
|
-> Partial Aggregate
|
|
-> Parallel Seq Scan on _hyper_1_1_chunk
|
|
-> Partial Aggregate
|
|
-> Parallel Seq Scan on _hyper_1_2_chunk
|
|
(8 rows)
|
|
|
|
SELECT histogram(i, 1, 1000000, 2) FROM "test";
|
|
histogram
|
|
-------------------
|
|
{1,50000,49999,0}
|
|
(1 row)
|
|
|
|
EXPLAIN (costs off) SELECT histogram(i, 1,1000001,10) FROM "test";
|
|
QUERY PLAN
|
|
---------------------------------------------------------------
|
|
Finalize Aggregate
|
|
-> Gather
|
|
Workers Planned: 2
|
|
-> Parallel Append
|
|
-> Partial Aggregate
|
|
-> Parallel Seq Scan on _hyper_1_1_chunk
|
|
-> Partial Aggregate
|
|
-> Parallel Seq Scan on _hyper_1_2_chunk
|
|
(8 rows)
|
|
|
|
SELECT histogram(i, 1, 1000001, 10) FROM "test";
|
|
histogram
|
|
------------------------------------------------------------------
|
|
{1,10000,10000,10000,10000,10000,10000,10000,10000,10000,9999,0}
|
|
(1 row)
|
|
|
|
EXPLAIN (costs off) SELECT histogram(i, 0,100000,5) FROM "test";
|
|
QUERY PLAN
|
|
---------------------------------------------------------------
|
|
Finalize Aggregate
|
|
-> Gather
|
|
Workers Planned: 2
|
|
-> Parallel Append
|
|
-> Partial Aggregate
|
|
-> Parallel Seq Scan on _hyper_1_1_chunk
|
|
-> Partial Aggregate
|
|
-> Parallel Seq Scan on _hyper_1_2_chunk
|
|
(8 rows)
|
|
|
|
SELECT histogram(i, 0, 100000, 5) FROM "test";
|
|
histogram
|
|
------------------------------------
|
|
{0,2000,2000,2000,2000,2000,90000}
|
|
(1 row)
|
|
|
|
EXPLAIN (costs off) SELECT histogram(i, 10,100000,5) FROM "test";
|
|
QUERY PLAN
|
|
---------------------------------------------------------------
|
|
Finalize Aggregate
|
|
-> Gather
|
|
Workers Planned: 2
|
|
-> Parallel Append
|
|
-> Partial Aggregate
|
|
-> Parallel Seq Scan on _hyper_1_1_chunk
|
|
-> Partial Aggregate
|
|
-> Parallel Seq Scan on _hyper_1_2_chunk
|
|
(8 rows)
|
|
|
|
SELECT histogram(i, 10, 100000, 5) FROM "test";
|
|
histogram
|
|
------------------------------------
|
|
{1,2000,2000,2000,2000,1999,90000}
|
|
(1 row)
|
|
|
|
EXPLAIN (costs off) SELECT histogram(NULL, 10,100000,5) FROM "test" WHERE i = coalesce(-1,j);
|
|
QUERY PLAN
|
|
------------------------------------------------------------------------------------
|
|
Finalize Aggregate
|
|
-> Gather
|
|
Workers Planned: 2
|
|
-> Parallel Append
|
|
-> Partial Aggregate
|
|
-> Parallel Seq Scan on _hyper_1_1_chunk
|
|
Filter: ((i)::double precision = '-1'::double precision)
|
|
-> Partial Aggregate
|
|
-> Parallel Seq Scan on _hyper_1_2_chunk
|
|
Filter: ((i)::double precision = '-1'::double precision)
|
|
(10 rows)
|
|
|
|
SELECT histogram(NULL, 10,100000,5) FROM "test" WHERE i = coalesce(-1,j);
|
|
histogram
|
|
-----------
|
|
|
|
(1 row)
|
|
|
|
-- test parallel ChunkAppend
|
|
:PREFIX SELECT i FROM "test" WHERE length(version()) > 0;
|
|
QUERY PLAN
|
|
--------------------------------------------------------------
|
|
Gather
|
|
Workers Planned: 1
|
|
Single Copy: true
|
|
-> Result
|
|
One-Time Filter: (length(version()) > 0)
|
|
-> Custom Scan (ChunkAppend) on test
|
|
Chunks excluded during startup: 0
|
|
-> Result
|
|
One-Time Filter: (length(version()) > 0)
|
|
-> Seq Scan on _hyper_1_1_chunk
|
|
-> Result
|
|
One-Time Filter: (length(version()) > 0)
|
|
-> Seq Scan on _hyper_1_2_chunk
|
|
(13 rows)
|
|
|
|
:PREFIX SELECT count(*) FROM "test" WHERE i > 1 AND length(version()) > 0;
|
|
QUERY PLAN
|
|
---------------------------------------------------------------------------
|
|
Finalize Aggregate
|
|
-> Gather
|
|
Workers Planned: 2
|
|
-> Result
|
|
One-Time Filter: (length(version()) > 0)
|
|
-> Parallel Custom Scan (ChunkAppend) on test
|
|
Chunks excluded during startup: 0
|
|
-> Partial Aggregate
|
|
-> Result
|
|
One-Time Filter: (length(version()) > 0)
|
|
-> Parallel Seq Scan on _hyper_1_1_chunk
|
|
Filter: (i > 1)
|
|
-> Partial Aggregate
|
|
-> Result
|
|
One-Time Filter: (length(version()) > 0)
|
|
-> Parallel Seq Scan on _hyper_1_2_chunk
|
|
Filter: (i > 1)
|
|
(17 rows)
|
|
|
|
SELECT count(*) FROM "test" WHERE i > 1 AND length(version()) > 0;
|
|
count
|
|
-------
|
|
99999
|
|
(1 row)
|
|
|
|
-- test parallel ChunkAppend with only work done in the parallel workers
|
|
SET parallel_leader_participation = off;
|
|
SELECT count(*) FROM "test" WHERE i > 1 AND length(version()) > 0;
|
|
count
|
|
-------
|
|
99999
|
|
(1 row)
|
|
|
|
RESET parallel_leader_participation;
|
|
-- Test parallel chunk append is used (index scan is disabled to trigger a parallel chunk append)
|
|
SET parallel_tuple_cost = 0;
|
|
SET enable_indexscan = OFF;
|
|
:PREFIX SELECT * FROM (SELECT * FROM "test" WHERE length(version()) > 0 ORDER BY I LIMIT 10) AS t1 LEFT JOIN (SELECT * FROM "test" WHERE i < 500000 ORDER BY I LIMIT 10) AS t2 ON (t1.i = t2.i) ORDER BY t1.i, t2.i;
|
|
QUERY PLAN
|
|
----------------------------------------------------------------------------------------------
|
|
Sort
|
|
Sort Key: test.i, _hyper_1_1_chunk_1.i
|
|
-> Merge Left Join
|
|
Merge Cond: (test.i = _hyper_1_1_chunk_1.i)
|
|
-> Limit
|
|
-> Gather Merge
|
|
Workers Planned: 2
|
|
-> Sort
|
|
Sort Key: test.i
|
|
-> Result
|
|
One-Time Filter: (length(version()) > 0)
|
|
-> Parallel Custom Scan (ChunkAppend) on test
|
|
Chunks excluded during startup: 0
|
|
-> Result
|
|
One-Time Filter: (length(version()) > 0)
|
|
-> Parallel Seq Scan on _hyper_1_1_chunk
|
|
-> Result
|
|
One-Time Filter: (length(version()) > 0)
|
|
-> Parallel Seq Scan on _hyper_1_2_chunk
|
|
-> Materialize
|
|
-> Limit
|
|
-> Gather Merge
|
|
Workers Planned: 2
|
|
-> Sort
|
|
Sort Key: _hyper_1_1_chunk_1.i
|
|
-> Parallel Seq Scan on _hyper_1_1_chunk _hyper_1_1_chunk_1
|
|
Filter: (i < 500000)
|
|
(27 rows)
|
|
|
|
SELECT * FROM (SELECT * FROM "test" WHERE length(version()) > 0 ORDER BY I LIMIT 10) AS t1 LEFT JOIN (SELECT * FROM "test" WHERE i < 500000 ORDER BY I LIMIT 10) AS t2 ON (t1.i = t2.i) ORDER BY t1.i, t2.i;
|
|
i | j | ts | i | j | ts
|
|
----+------+-----------------------------+----+------+-----------------------------
|
|
0 | 0.1 | Wed Dec 31 16:00:00 1969 | 0 | 0.1 | Wed Dec 31 16:00:00 1969
|
|
10 | 10.1 | Wed Dec 31 16:00:00.01 1969 | 10 | 10.1 | Wed Dec 31 16:00:00.01 1969
|
|
20 | 20.1 | Wed Dec 31 16:00:00.02 1969 | 20 | 20.1 | Wed Dec 31 16:00:00.02 1969
|
|
30 | 30.1 | Wed Dec 31 16:00:00.03 1969 | 30 | 30.1 | Wed Dec 31 16:00:00.03 1969
|
|
40 | 40.1 | Wed Dec 31 16:00:00.04 1969 | 40 | 40.1 | Wed Dec 31 16:00:00.04 1969
|
|
50 | 50.1 | Wed Dec 31 16:00:00.05 1969 | 50 | 50.1 | Wed Dec 31 16:00:00.05 1969
|
|
60 | 60.1 | Wed Dec 31 16:00:00.06 1969 | 60 | 60.1 | Wed Dec 31 16:00:00.06 1969
|
|
70 | 70.1 | Wed Dec 31 16:00:00.07 1969 | 70 | 70.1 | Wed Dec 31 16:00:00.07 1969
|
|
80 | 80.1 | Wed Dec 31 16:00:00.08 1969 | 80 | 80.1 | Wed Dec 31 16:00:00.08 1969
|
|
90 | 90.1 | Wed Dec 31 16:00:00.09 1969 | 90 | 90.1 | Wed Dec 31 16:00:00.09 1969
|
|
(10 rows)
|
|
|
|
SET enable_indexscan = ON;
|
|
-- Test normal chunk append can be used in a parallel worker
|
|
:PREFIX SELECT * FROM (SELECT * FROM "test" WHERE i >= 999000 ORDER BY i) AS t1 JOIN (SELECT * FROM "test" WHERE i >= 400000 ORDER BY i) AS t2 ON (TRUE) ORDER BY t1.i, t2.i LIMIT 10;
|
|
QUERY PLAN
|
|
----------------------------------------------------------------------------------------------------------------------------------
|
|
Gather
|
|
Workers Planned: 1
|
|
Single Copy: true
|
|
-> Limit
|
|
-> Incremental Sort
|
|
Sort Key: _hyper_1_2_chunk.i, test.i
|
|
Presorted Key: _hyper_1_2_chunk.i
|
|
-> Nested Loop
|
|
-> Index Scan Backward using _hyper_1_2_chunk_test_i_idx on _hyper_1_2_chunk
|
|
Index Cond: (i >= 999000)
|
|
-> Materialize
|
|
-> Custom Scan (ChunkAppend) on test
|
|
Order: test.i
|
|
-> Index Scan Backward using _hyper_1_1_chunk_test_i_idx on _hyper_1_1_chunk
|
|
Index Cond: (i >= 400000)
|
|
-> Index Scan Backward using _hyper_1_2_chunk_test_i_idx on _hyper_1_2_chunk _hyper_1_2_chunk_1
|
|
Index Cond: (i >= 400000)
|
|
(17 rows)
|
|
|
|
SELECT * FROM (SELECT * FROM "test" WHERE i >= 999000 ORDER BY i) AS t1 JOIN (SELECT * FROM "test" WHERE i >= 400000 ORDER BY i) AS t2 ON (TRUE) ORDER BY t1.i, t2.i LIMIT 10;
|
|
i | j | ts | i | j | ts
|
|
--------+----------+--------------------------+--------+----------+-----------------------------
|
|
999000 | 999000.1 | Wed Dec 31 16:16:39 1969 | 400000 | 400000.1 | Wed Dec 31 16:06:40 1969
|
|
999000 | 999000.1 | Wed Dec 31 16:16:39 1969 | 400010 | 400010.1 | Wed Dec 31 16:06:40.01 1969
|
|
999000 | 999000.1 | Wed Dec 31 16:16:39 1969 | 400020 | 400020.1 | Wed Dec 31 16:06:40.02 1969
|
|
999000 | 999000.1 | Wed Dec 31 16:16:39 1969 | 400030 | 400030.1 | Wed Dec 31 16:06:40.03 1969
|
|
999000 | 999000.1 | Wed Dec 31 16:16:39 1969 | 400040 | 400040.1 | Wed Dec 31 16:06:40.04 1969
|
|
999000 | 999000.1 | Wed Dec 31 16:16:39 1969 | 400050 | 400050.1 | Wed Dec 31 16:06:40.05 1969
|
|
999000 | 999000.1 | Wed Dec 31 16:16:39 1969 | 400060 | 400060.1 | Wed Dec 31 16:06:40.06 1969
|
|
999000 | 999000.1 | Wed Dec 31 16:16:39 1969 | 400070 | 400070.1 | Wed Dec 31 16:06:40.07 1969
|
|
999000 | 999000.1 | Wed Dec 31 16:16:39 1969 | 400080 | 400080.1 | Wed Dec 31 16:06:40.08 1969
|
|
999000 | 999000.1 | Wed Dec 31 16:16:39 1969 | 400090 | 400090.1 | Wed Dec 31 16:06:40.09 1969
|
|
(10 rows)
|
|
|
|
-- Test parallel ChunkAppend reinit
|
|
SET enable_material = off;
|
|
SET min_parallel_table_scan_size = 0;
|
|
SET min_parallel_index_scan_size = 0;
|
|
SET enable_hashjoin = 'off';
|
|
SET enable_nestloop = 'off';
|
|
CREATE TABLE sensor_data(
|
|
time timestamptz NOT NULL,
|
|
sensor_id integer NOT NULL);
|
|
SELECT FROM create_hypertable(relation=>'sensor_data', time_column_name=> 'time');
|
|
--
|
|
(1 row)
|
|
|
|
-- Sensors 1 and 2
|
|
INSERT INTO sensor_data
|
|
SELECT time, sensor_id
|
|
FROM
|
|
generate_series('2000-01-01 00:00:30', '2022-01-01 00:00:30', INTERVAL '3 months') AS g1(time),
|
|
generate_series(1, 2, 1) AS g2(sensor_id)
|
|
ORDER BY time;
|
|
-- Sensor 100
|
|
INSERT INTO sensor_data
|
|
SELECT time, 100 as sensor_id
|
|
FROM
|
|
generate_series('2000-01-01 00:00:30', '2022-01-01 00:00:30', INTERVAL '1 year') AS g1(time)
|
|
ORDER BY time;
|
|
:PREFIX SELECT * FROM sensor_data AS s1 JOIN sensor_data AS s2 ON (TRUE) WHERE s1.time > '2020-01-01 00:00:30'::text::timestamptz AND s2.time > '2020-01-01 00:00:30' AND s2.time < '2021-01-01 00:00:30' AND s1.sensor_id > 50 ORDER BY s2.time, s1.time, s1.sensor_id, s2.sensor_id;
|
|
QUERY PLAN
|
|
-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
|
|
Gather Merge
|
|
Workers Planned: 4
|
|
-> Sort
|
|
Sort Key: s2."time", s1."time", s1.sensor_id, s2.sensor_id
|
|
-> Nested Loop
|
|
-> Parallel Custom Scan (ChunkAppend) on sensor_data s1
|
|
Chunks excluded during startup: 80
|
|
-> Parallel Bitmap Heap Scan on _hyper_2_83_chunk s1_1
|
|
Recheck Cond: ("time" > ('2020-01-01 00:00:30'::cstring)::timestamp with time zone)
|
|
Filter: (sensor_id > 50)
|
|
-> Bitmap Index Scan on _hyper_2_83_chunk_sensor_data_time_idx
|
|
Index Cond: ("time" > ('2020-01-01 00:00:30'::cstring)::timestamp with time zone)
|
|
-> Parallel Bitmap Heap Scan on _hyper_2_84_chunk s1_2
|
|
Recheck Cond: ("time" > ('2020-01-01 00:00:30'::cstring)::timestamp with time zone)
|
|
Filter: (sensor_id > 50)
|
|
-> Bitmap Index Scan on _hyper_2_84_chunk_sensor_data_time_idx
|
|
Index Cond: ("time" > ('2020-01-01 00:00:30'::cstring)::timestamp with time zone)
|
|
-> Parallel Bitmap Heap Scan on _hyper_2_85_chunk s1_3
|
|
Recheck Cond: ("time" > ('2020-01-01 00:00:30'::cstring)::timestamp with time zone)
|
|
Filter: (sensor_id > 50)
|
|
-> Bitmap Index Scan on _hyper_2_85_chunk_sensor_data_time_idx
|
|
Index Cond: ("time" > ('2020-01-01 00:00:30'::cstring)::timestamp with time zone)
|
|
-> Parallel Bitmap Heap Scan on _hyper_2_86_chunk s1_4
|
|
Recheck Cond: ("time" > ('2020-01-01 00:00:30'::cstring)::timestamp with time zone)
|
|
Filter: (sensor_id > 50)
|
|
-> Bitmap Index Scan on _hyper_2_86_chunk_sensor_data_time_idx
|
|
Index Cond: ("time" > ('2020-01-01 00:00:30'::cstring)::timestamp with time zone)
|
|
-> Parallel Bitmap Heap Scan on _hyper_2_87_chunk s1_5
|
|
Recheck Cond: ("time" > ('2020-01-01 00:00:30'::cstring)::timestamp with time zone)
|
|
Filter: (sensor_id > 50)
|
|
-> Bitmap Index Scan on _hyper_2_87_chunk_sensor_data_time_idx
|
|
Index Cond: ("time" > ('2020-01-01 00:00:30'::cstring)::timestamp with time zone)
|
|
-> Parallel Bitmap Heap Scan on _hyper_2_88_chunk s1_6
|
|
Recheck Cond: ("time" > ('2020-01-01 00:00:30'::cstring)::timestamp with time zone)
|
|
Filter: (sensor_id > 50)
|
|
-> Bitmap Index Scan on _hyper_2_88_chunk_sensor_data_time_idx
|
|
Index Cond: ("time" > ('2020-01-01 00:00:30'::cstring)::timestamp with time zone)
|
|
-> Parallel Bitmap Heap Scan on _hyper_2_89_chunk s1_7
|
|
Recheck Cond: ("time" > ('2020-01-01 00:00:30'::cstring)::timestamp with time zone)
|
|
Filter: (sensor_id > 50)
|
|
-> Bitmap Index Scan on _hyper_2_89_chunk_sensor_data_time_idx
|
|
Index Cond: ("time" > ('2020-01-01 00:00:30'::cstring)::timestamp with time zone)
|
|
-> Parallel Bitmap Heap Scan on _hyper_2_90_chunk s1_8
|
|
Recheck Cond: ("time" > ('2020-01-01 00:00:30'::cstring)::timestamp with time zone)
|
|
Filter: (sensor_id > 50)
|
|
-> Bitmap Index Scan on _hyper_2_90_chunk_sensor_data_time_idx
|
|
Index Cond: ("time" > ('2020-01-01 00:00:30'::cstring)::timestamp with time zone)
|
|
-> Parallel Bitmap Heap Scan on _hyper_2_91_chunk s1_9
|
|
Recheck Cond: ("time" > ('2020-01-01 00:00:30'::cstring)::timestamp with time zone)
|
|
Filter: (sensor_id > 50)
|
|
-> Bitmap Index Scan on _hyper_2_91_chunk_sensor_data_time_idx
|
|
Index Cond: ("time" > ('2020-01-01 00:00:30'::cstring)::timestamp with time zone)
|
|
-> Custom Scan (ChunkAppend) on sensor_data s2
|
|
Order: s2."time"
|
|
-> Index Scan Backward using _hyper_2_83_chunk_sensor_data_time_idx on _hyper_2_83_chunk s2_1
|
|
Index Cond: (("time" > 'Wed Jan 01 00:00:30 2020 PST'::timestamp with time zone) AND ("time" < 'Fri Jan 01 00:00:30 2021 PST'::timestamp with time zone))
|
|
-> Index Scan Backward using _hyper_2_84_chunk_sensor_data_time_idx on _hyper_2_84_chunk s2_2
|
|
Index Cond: (("time" > 'Wed Jan 01 00:00:30 2020 PST'::timestamp with time zone) AND ("time" < 'Fri Jan 01 00:00:30 2021 PST'::timestamp with time zone))
|
|
-> Index Scan Backward using _hyper_2_85_chunk_sensor_data_time_idx on _hyper_2_85_chunk s2_3
|
|
Index Cond: (("time" > 'Wed Jan 01 00:00:30 2020 PST'::timestamp with time zone) AND ("time" < 'Fri Jan 01 00:00:30 2021 PST'::timestamp with time zone))
|
|
-> Index Scan Backward using _hyper_2_86_chunk_sensor_data_time_idx on _hyper_2_86_chunk s2_4
|
|
Index Cond: (("time" > 'Wed Jan 01 00:00:30 2020 PST'::timestamp with time zone) AND ("time" < 'Fri Jan 01 00:00:30 2021 PST'::timestamp with time zone))
|
|
-> Index Scan Backward using _hyper_2_87_chunk_sensor_data_time_idx on _hyper_2_87_chunk s2_5
|
|
Index Cond: (("time" > 'Wed Jan 01 00:00:30 2020 PST'::timestamp with time zone) AND ("time" < 'Fri Jan 01 00:00:30 2021 PST'::timestamp with time zone))
|
|
(64 rows)
|
|
|
|
-- Check query result
|
|
SELECT * FROM sensor_data AS s1 JOIN sensor_data AS s2 ON (TRUE) WHERE s1.time > '2020-01-01 00:00:30'::text::timestamptz AND s2.time > '2020-01-01 00:00:30' AND s2.time < '2021-01-01 00:00:30' AND s1.sensor_id > 50 ORDER BY s2.time, s1.time, s1.sensor_id, s2.sensor_id;
|
|
time | sensor_id | time | sensor_id
|
|
------------------------------+-----------+------------------------------+-----------
|
|
Fri Jan 01 00:00:30 2021 PST | 100 | Wed Apr 01 00:00:30 2020 PDT | 1
|
|
Fri Jan 01 00:00:30 2021 PST | 100 | Wed Apr 01 00:00:30 2020 PDT | 2
|
|
Sat Jan 01 00:00:30 2022 PST | 100 | Wed Apr 01 00:00:30 2020 PDT | 1
|
|
Sat Jan 01 00:00:30 2022 PST | 100 | Wed Apr 01 00:00:30 2020 PDT | 2
|
|
Fri Jan 01 00:00:30 2021 PST | 100 | Wed Jul 01 00:00:30 2020 PDT | 1
|
|
Fri Jan 01 00:00:30 2021 PST | 100 | Wed Jul 01 00:00:30 2020 PDT | 2
|
|
Sat Jan 01 00:00:30 2022 PST | 100 | Wed Jul 01 00:00:30 2020 PDT | 1
|
|
Sat Jan 01 00:00:30 2022 PST | 100 | Wed Jul 01 00:00:30 2020 PDT | 2
|
|
Fri Jan 01 00:00:30 2021 PST | 100 | Thu Oct 01 00:00:30 2020 PDT | 1
|
|
Fri Jan 01 00:00:30 2021 PST | 100 | Thu Oct 01 00:00:30 2020 PDT | 2
|
|
Sat Jan 01 00:00:30 2022 PST | 100 | Thu Oct 01 00:00:30 2020 PDT | 1
|
|
Sat Jan 01 00:00:30 2022 PST | 100 | Thu Oct 01 00:00:30 2020 PDT | 2
|
|
(12 rows)
|
|
|
|
-- Ensure the same result is produced if only the parallel workers have to produce them (i.e., the pstate is reinitialized properly)
|
|
SET parallel_leader_participation = off;
|
|
SELECT * FROM sensor_data AS s1 JOIN sensor_data AS s2 ON (TRUE) WHERE s1.time > '2020-01-01 00:00:30'::text::timestamptz AND s2.time > '2020-01-01 00:00:30' AND s2.time < '2021-01-01 00:00:30' AND s1.sensor_id > 50 ORDER BY s2.time, s1.time, s1.sensor_id, s2.sensor_id;
|
|
time | sensor_id | time | sensor_id
|
|
------------------------------+-----------+------------------------------+-----------
|
|
Fri Jan 01 00:00:30 2021 PST | 100 | Wed Apr 01 00:00:30 2020 PDT | 1
|
|
Fri Jan 01 00:00:30 2021 PST | 100 | Wed Apr 01 00:00:30 2020 PDT | 2
|
|
Sat Jan 01 00:00:30 2022 PST | 100 | Wed Apr 01 00:00:30 2020 PDT | 1
|
|
Sat Jan 01 00:00:30 2022 PST | 100 | Wed Apr 01 00:00:30 2020 PDT | 2
|
|
Fri Jan 01 00:00:30 2021 PST | 100 | Wed Jul 01 00:00:30 2020 PDT | 1
|
|
Fri Jan 01 00:00:30 2021 PST | 100 | Wed Jul 01 00:00:30 2020 PDT | 2
|
|
Sat Jan 01 00:00:30 2022 PST | 100 | Wed Jul 01 00:00:30 2020 PDT | 1
|
|
Sat Jan 01 00:00:30 2022 PST | 100 | Wed Jul 01 00:00:30 2020 PDT | 2
|
|
Fri Jan 01 00:00:30 2021 PST | 100 | Thu Oct 01 00:00:30 2020 PDT | 1
|
|
Fri Jan 01 00:00:30 2021 PST | 100 | Thu Oct 01 00:00:30 2020 PDT | 2
|
|
Sat Jan 01 00:00:30 2022 PST | 100 | Thu Oct 01 00:00:30 2020 PDT | 1
|
|
Sat Jan 01 00:00:30 2022 PST | 100 | Thu Oct 01 00:00:30 2020 PDT | 2
|
|
(12 rows)
|
|
|
|
RESET parallel_leader_participation;
|
|
-- Ensure the same query result is produced by a sequencial query
|
|
SET max_parallel_workers_per_gather TO 0;
|
|
SELECT set_config(CASE WHEN current_setting('server_version_num')::int < 160000 THEN 'force_parallel_mode' ELSE 'debug_parallel_query' END,'off', false);
|
|
set_config
|
|
------------
|
|
off
|
|
(1 row)
|
|
|
|
SELECT * FROM sensor_data AS s1 JOIN sensor_data AS s2 ON (TRUE) WHERE s1.time > '2020-01-01 00:00:30'::text::timestamptz AND s2.time > '2020-01-01 00:00:30' AND s2.time < '2021-01-01 00:00:30' AND s1.sensor_id > 50 ORDER BY s2.time, s1.time, s1.sensor_id, s2.sensor_id;
|
|
time | sensor_id | time | sensor_id
|
|
------------------------------+-----------+------------------------------+-----------
|
|
Fri Jan 01 00:00:30 2021 PST | 100 | Wed Apr 01 00:00:30 2020 PDT | 1
|
|
Fri Jan 01 00:00:30 2021 PST | 100 | Wed Apr 01 00:00:30 2020 PDT | 2
|
|
Sat Jan 01 00:00:30 2022 PST | 100 | Wed Apr 01 00:00:30 2020 PDT | 1
|
|
Sat Jan 01 00:00:30 2022 PST | 100 | Wed Apr 01 00:00:30 2020 PDT | 2
|
|
Fri Jan 01 00:00:30 2021 PST | 100 | Wed Jul 01 00:00:30 2020 PDT | 1
|
|
Fri Jan 01 00:00:30 2021 PST | 100 | Wed Jul 01 00:00:30 2020 PDT | 2
|
|
Sat Jan 01 00:00:30 2022 PST | 100 | Wed Jul 01 00:00:30 2020 PDT | 1
|
|
Sat Jan 01 00:00:30 2022 PST | 100 | Wed Jul 01 00:00:30 2020 PDT | 2
|
|
Fri Jan 01 00:00:30 2021 PST | 100 | Thu Oct 01 00:00:30 2020 PDT | 1
|
|
Fri Jan 01 00:00:30 2021 PST | 100 | Thu Oct 01 00:00:30 2020 PDT | 2
|
|
Sat Jan 01 00:00:30 2022 PST | 100 | Thu Oct 01 00:00:30 2020 PDT | 1
|
|
Sat Jan 01 00:00:30 2022 PST | 100 | Thu Oct 01 00:00:30 2020 PDT | 2
|
|
(12 rows)
|
|
|
|
RESET enable_material;
|
|
RESET min_parallel_table_scan_size;
|
|
RESET min_parallel_index_scan_size;
|
|
RESET enable_hashjoin;
|
|
RESET enable_nestloop;
|
|
RESET parallel_tuple_cost;
|
|
SELECT set_config(CASE WHEN current_setting('server_version_num')::int < 160000 THEN 'force_parallel_mode' ELSE 'debug_parallel_query' END,'on', false);
|
|
set_config
|
|
------------
|
|
on
|
|
(1 row)
|
|
|
|
-- test worker assignment
|
|
-- first chunk should have 1 worker and second chunk should have 2
|
|
SET max_parallel_workers_per_gather TO 2;
|
|
:PREFIX SELECT count(*) FROM "test" WHERE i >= 400000 AND length(version()) > 0;
|
|
QUERY PLAN
|
|
--------------------------------------------------------------------------------------------------------------------
|
|
Finalize Aggregate
|
|
-> Gather
|
|
Workers Planned: 2
|
|
-> Result
|
|
One-Time Filter: (length(version()) > 0)
|
|
-> Parallel Custom Scan (ChunkAppend) on test
|
|
Chunks excluded during startup: 0
|
|
-> Partial Aggregate
|
|
-> Result
|
|
One-Time Filter: (length(version()) > 0)
|
|
-> Parallel Index Only Scan using _hyper_1_1_chunk_test_i_idx on _hyper_1_1_chunk
|
|
Index Cond: (i >= 400000)
|
|
-> Partial Aggregate
|
|
-> Result
|
|
One-Time Filter: (length(version()) > 0)
|
|
-> Parallel Seq Scan on _hyper_1_2_chunk
|
|
Filter: (i >= 400000)
|
|
(17 rows)
|
|
|
|
SELECT count(*) FROM "test" WHERE i >= 400000 AND length(version()) > 0;
|
|
count
|
|
-------
|
|
60000
|
|
(1 row)
|
|
|
|
-- test worker assignment
|
|
-- first chunk should have 2 worker and second chunk should have 1
|
|
:PREFIX SELECT count(*) FROM "test" WHERE i < 600000 AND length(version()) > 0;
|
|
QUERY PLAN
|
|
--------------------------------------------------------------------------------------------------------------------
|
|
Finalize Aggregate
|
|
-> Gather
|
|
Workers Planned: 2
|
|
-> Result
|
|
One-Time Filter: (length(version()) > 0)
|
|
-> Parallel Custom Scan (ChunkAppend) on test
|
|
Chunks excluded during startup: 0
|
|
-> Partial Aggregate
|
|
-> Result
|
|
One-Time Filter: (length(version()) > 0)
|
|
-> Parallel Index Only Scan using _hyper_1_2_chunk_test_i_idx on _hyper_1_2_chunk
|
|
Index Cond: (i < 600000)
|
|
-> Partial Aggregate
|
|
-> Result
|
|
One-Time Filter: (length(version()) > 0)
|
|
-> Parallel Seq Scan on _hyper_1_1_chunk
|
|
Filter: (i < 600000)
|
|
(17 rows)
|
|
|
|
SELECT count(*) FROM "test" WHERE i < 600000 AND length(version()) > 0;
|
|
count
|
|
-------
|
|
60000
|
|
(1 row)
|
|
|
|
-- test ChunkAppend with # workers < # childs
|
|
SET max_parallel_workers_per_gather TO 1;
|
|
:PREFIX SELECT count(*) FROM "test" WHERE length(version()) > 0;
|
|
QUERY PLAN
|
|
---------------------------------------------------------------------------
|
|
Finalize Aggregate
|
|
-> Gather
|
|
Workers Planned: 1
|
|
-> Result
|
|
One-Time Filter: (length(version()) > 0)
|
|
-> Parallel Custom Scan (ChunkAppend) on test
|
|
Chunks excluded during startup: 0
|
|
-> Partial Aggregate
|
|
-> Result
|
|
One-Time Filter: (length(version()) > 0)
|
|
-> Parallel Seq Scan on _hyper_1_1_chunk
|
|
-> Partial Aggregate
|
|
-> Result
|
|
One-Time Filter: (length(version()) > 0)
|
|
-> Parallel Seq Scan on _hyper_1_2_chunk
|
|
(15 rows)
|
|
|
|
SELECT count(*) FROM "test" WHERE length(version()) > 0;
|
|
count
|
|
--------
|
|
100000
|
|
(1 row)
|
|
|
|
-- test ChunkAppend with # workers > # childs
|
|
SET max_parallel_workers_per_gather TO 2;
|
|
:PREFIX SELECT count(*) FROM "test" WHERE i >= 500000 AND length(version()) > 0;
|
|
QUERY PLAN
|
|
---------------------------------------------------------------------------
|
|
Finalize Aggregate
|
|
-> Gather
|
|
Workers Planned: 2
|
|
-> Partial Aggregate
|
|
-> Result
|
|
One-Time Filter: (length(version()) > 0)
|
|
-> Parallel Custom Scan (ChunkAppend) on test
|
|
Chunks excluded during startup: 0
|
|
-> Result
|
|
One-Time Filter: (length(version()) > 0)
|
|
-> Parallel Seq Scan on _hyper_1_2_chunk
|
|
Filter: (i >= 500000)
|
|
(12 rows)
|
|
|
|
SELECT count(*) FROM "test" WHERE i >= 500000 AND length(version()) > 0;
|
|
count
|
|
-------
|
|
50000
|
|
(1 row)
|
|
|
|
RESET max_parallel_workers_per_gather;
|
|
-- test partial and non-partial plans
|
|
-- these will not be parallel on PG < 11
|
|
ALTER TABLE :CHUNK1 SET (parallel_workers=0);
|
|
ALTER TABLE :CHUNK2 SET (parallel_workers=2);
|
|
:PREFIX SELECT count(*) FROM "test" WHERE i > 400000 AND length(version()) > 0;
|
|
QUERY PLAN
|
|
-----------------------------------------------------------------------------------------------------------
|
|
Finalize Aggregate
|
|
-> Gather
|
|
Workers Planned: 2
|
|
-> Result
|
|
One-Time Filter: (length(version()) > 0)
|
|
-> Parallel Custom Scan (ChunkAppend) on test
|
|
Chunks excluded during startup: 0
|
|
-> Partial Aggregate
|
|
-> Result
|
|
One-Time Filter: (length(version()) > 0)
|
|
-> Index Only Scan using _hyper_1_1_chunk_test_i_idx on _hyper_1_1_chunk
|
|
Index Cond: (i > 400000)
|
|
-> Partial Aggregate
|
|
-> Result
|
|
One-Time Filter: (length(version()) > 0)
|
|
-> Parallel Seq Scan on _hyper_1_2_chunk
|
|
Filter: (i > 400000)
|
|
(17 rows)
|
|
|
|
ALTER TABLE :CHUNK1 SET (parallel_workers=2);
|
|
ALTER TABLE :CHUNK2 SET (parallel_workers=0);
|
|
:PREFIX SELECT count(*) FROM "test" WHERE i < 600000 AND length(version()) > 0;
|
|
QUERY PLAN
|
|
-----------------------------------------------------------------------------------------------------------
|
|
Finalize Aggregate
|
|
-> Gather
|
|
Workers Planned: 2
|
|
-> Result
|
|
One-Time Filter: (length(version()) > 0)
|
|
-> Parallel Custom Scan (ChunkAppend) on test
|
|
Chunks excluded during startup: 0
|
|
-> Partial Aggregate
|
|
-> Result
|
|
One-Time Filter: (length(version()) > 0)
|
|
-> Index Only Scan using _hyper_1_2_chunk_test_i_idx on _hyper_1_2_chunk
|
|
Index Cond: (i < 600000)
|
|
-> Partial Aggregate
|
|
-> Result
|
|
One-Time Filter: (length(version()) > 0)
|
|
-> Parallel Seq Scan on _hyper_1_1_chunk
|
|
Filter: (i < 600000)
|
|
(17 rows)
|
|
|
|
ALTER TABLE :CHUNK1 RESET (parallel_workers);
|
|
ALTER TABLE :CHUNK2 RESET (parallel_workers);
|
|
-- now() is not marked parallel safe in PostgreSQL < 12 so using now()
|
|
-- in a query will prevent parallelism but CURRENT_TIMESTAMP and
|
|
-- transaction_timestamp() are marked parallel safe
|
|
:PREFIX SELECT i FROM "test" WHERE ts < CURRENT_TIMESTAMP;
|
|
QUERY PLAN
|
|
------------------------------------------------
|
|
Gather
|
|
Workers Planned: 1
|
|
Single Copy: true
|
|
-> Custom Scan (ChunkAppend) on test
|
|
Chunks excluded during startup: 0
|
|
-> Seq Scan on _hyper_1_1_chunk
|
|
Filter: (ts < CURRENT_TIMESTAMP)
|
|
-> Seq Scan on _hyper_1_2_chunk
|
|
Filter: (ts < CURRENT_TIMESTAMP)
|
|
(9 rows)
|
|
|
|
:PREFIX SELECT i FROM "test" WHERE ts < transaction_timestamp();
|
|
QUERY PLAN
|
|
------------------------------------------------------
|
|
Gather
|
|
Workers Planned: 1
|
|
Single Copy: true
|
|
-> Custom Scan (ChunkAppend) on test
|
|
Chunks excluded during startup: 0
|
|
-> Seq Scan on _hyper_1_1_chunk
|
|
Filter: (ts < transaction_timestamp())
|
|
-> Seq Scan on _hyper_1_2_chunk
|
|
Filter: (ts < transaction_timestamp())
|
|
(9 rows)
|
|
|
|
-- this won't be parallel query because now() is parallel restricted in PG < 12
|
|
:PREFIX SELECT i FROM "test" WHERE ts < now();
|
|
QUERY PLAN
|
|
-------------------------------------------
|
|
Gather
|
|
Workers Planned: 1
|
|
Single Copy: true
|
|
-> Custom Scan (ChunkAppend) on test
|
|
Chunks excluded during startup: 0
|
|
-> Seq Scan on _hyper_1_1_chunk
|
|
Filter: (ts < now())
|
|
-> Seq Scan on _hyper_1_2_chunk
|
|
Filter: (ts < now())
|
|
(9 rows)
|
|
|