Merge gapfill tests into single test

This commit is contained in:
Sven Klemm 2020-08-29 17:41:02 +02:00 committed by Sven Klemm
parent 66f80d159b
commit 9ae409259a
6 changed files with 460 additions and 470 deletions

View File

@ -1,6 +1,324 @@
-- This file and its contents are licensed under the Timescale License. -- This file and its contents are licensed under the Timescale License.
-- Please see the included NOTICE for copyright information and -- Please see the included NOTICE for copyright information and
-- LICENSE-TIMESCALE for a copy of the license. -- LICENSE-TIMESCALE for a copy of the license.
\set EXPLAIN 'EXPLAIN (COSTS OFF)'
CREATE TABLE gapfill_plan_test(time timestamptz NOT NULL, value float);
SELECT table_name FROM create_hypertable('gapfill_plan_test','time',chunk_time_interval=>'4 weeks'::interval);
table_name
-------------------
gapfill_plan_test
(1 row)
INSERT INTO gapfill_plan_test SELECT generate_series('2018-01-01'::timestamptz,'2018-04-01'::timestamptz,'1m'::interval), 1.0;
-- simple example
:EXPLAIN
SELECT
time_bucket_gapfill('5m',time,now(),now()),
avg(c2)
FROM (VALUES (now(),1),(now(),NULL),(now(),NULL)) as t(time,c2)
GROUP BY 1
ORDER BY 1;
QUERY PLAN
-----------------------------------------------------------------------------------------
Custom Scan (GapFill)
-> GroupAggregate
Group Key: (time_bucket_gapfill('@ 5 mins'::interval, "*VALUES*".column1))
-> Sort
Sort Key: (time_bucket_gapfill('@ 5 mins'::interval, "*VALUES*".column1))
-> Values Scan on "*VALUES*"
(6 rows)
-- test sorting
:EXPLAIN
SELECT
time_bucket_gapfill('5m',time,now(),now()),
avg(c2)
FROM (VALUES (now(),1),(now(),NULL),(now(),NULL)) as t(time,c2)
GROUP BY 1
ORDER BY 2;
QUERY PLAN
-----------------------------------------------------------------------------------------------
Sort
Sort Key: (avg("*VALUES*".column2))
-> Custom Scan (GapFill)
-> GroupAggregate
Group Key: (time_bucket_gapfill('@ 5 mins'::interval, "*VALUES*".column1))
-> Sort
Sort Key: (time_bucket_gapfill('@ 5 mins'::interval, "*VALUES*".column1))
-> Values Scan on "*VALUES*"
(8 rows)
-- test sort direction
:EXPLAIN
SELECT
time_bucket_gapfill('5m',time,now(),now()),
avg(c2)
FROM (VALUES (now(),1),(now(),NULL),(now(),NULL)) as t(time,c2)
GROUP BY 1
ORDER BY 1 DESC;
QUERY PLAN
-----------------------------------------------------------------------------------------------------
Sort
Sort Key: (time_bucket_gapfill('@ 5 mins'::interval, "*VALUES*".column1)) DESC
-> Custom Scan (GapFill)
-> Sort
Sort Key: (time_bucket_gapfill('@ 5 mins'::interval, "*VALUES*".column1)) NULLS FIRST
-> HashAggregate
Group Key: time_bucket_gapfill('@ 5 mins'::interval, "*VALUES*".column1)
-> Values Scan on "*VALUES*"
(8 rows)
-- test order by aggregate function
:EXPLAIN
SELECT
time_bucket_gapfill('5m',time,now(),now()),
avg(c2)
FROM (VALUES (now(),1),(now(),NULL),(now(),NULL)) as t(time,c2)
GROUP BY 1
ORDER BY 2,1;
QUERY PLAN
--------------------------------------------------------------------------------------------------------
Sort
Sort Key: (avg("*VALUES*".column2)), (time_bucket_gapfill('@ 5 mins'::interval, "*VALUES*".column1))
-> Custom Scan (GapFill)
-> GroupAggregate
Group Key: (time_bucket_gapfill('@ 5 mins'::interval, "*VALUES*".column1))
-> Sort
Sort Key: (time_bucket_gapfill('@ 5 mins'::interval, "*VALUES*".column1))
-> Values Scan on "*VALUES*"
(8 rows)
-- test query without order by
:EXPLAIN
SELECT
time_bucket_gapfill('5m',time,now(),now()),
avg(c2)
FROM (VALUES (now(),1),(now(),NULL),(now(),NULL)) as t(time,c2)
GROUP BY 1;
QUERY PLAN
-----------------------------------------------------------------------------------------
Custom Scan (GapFill)
-> GroupAggregate
Group Key: (time_bucket_gapfill('@ 5 mins'::interval, "*VALUES*".column1))
-> Sort
Sort Key: (time_bucket_gapfill('@ 5 mins'::interval, "*VALUES*".column1))
-> Values Scan on "*VALUES*"
(6 rows)
-- test parallel query
:EXPLAIN
SELECT
time_bucket_gapfill('5m',time,to_timestamp(0),to_timestamp(0)),
avg(value)
FROM gapfill_plan_test
GROUP BY 1
ORDER BY 1;
QUERY PLAN
---------------------------------------------------------------------------------------------------------
Custom Scan (GapFill)
-> Finalize GroupAggregate
Group Key: (time_bucket_gapfill('@ 5 mins'::interval, _hyper_1_2_chunk."time"))
-> Gather Merge
Workers Planned: 2
-> Sort
Sort Key: (time_bucket_gapfill('@ 5 mins'::interval, _hyper_1_2_chunk."time"))
-> Partial HashAggregate
Group Key: time_bucket_gapfill('@ 5 mins'::interval, _hyper_1_2_chunk."time")
-> Result
-> Parallel Append
-> Parallel Seq Scan on _hyper_1_2_chunk
-> Parallel Seq Scan on _hyper_1_3_chunk
-> Parallel Seq Scan on _hyper_1_1_chunk
-> Parallel Seq Scan on _hyper_1_4_chunk
(15 rows)
-- test parallel query with locf
:EXPLAIN
SELECT
time_bucket_gapfill('5m',time,to_timestamp(0),to_timestamp(0)),
locf(avg(value))
FROM gapfill_plan_test
GROUP BY 1
ORDER BY 1;
QUERY PLAN
---------------------------------------------------------------------------------------------------------
Custom Scan (GapFill)
-> Finalize GroupAggregate
Group Key: (time_bucket_gapfill('@ 5 mins'::interval, _hyper_1_2_chunk."time"))
-> Gather Merge
Workers Planned: 2
-> Sort
Sort Key: (time_bucket_gapfill('@ 5 mins'::interval, _hyper_1_2_chunk."time"))
-> Partial HashAggregate
Group Key: time_bucket_gapfill('@ 5 mins'::interval, _hyper_1_2_chunk."time")
-> Result
-> Parallel Append
-> Parallel Seq Scan on _hyper_1_2_chunk
-> Parallel Seq Scan on _hyper_1_3_chunk
-> Parallel Seq Scan on _hyper_1_1_chunk
-> Parallel Seq Scan on _hyper_1_4_chunk
(15 rows)
-- test parallel query with interpolate
:EXPLAIN
SELECT
time_bucket_gapfill('5m',time,to_timestamp(0),to_timestamp(0)),
interpolate(avg(value))
FROM gapfill_plan_test
GROUP BY 1
ORDER BY 1;
QUERY PLAN
---------------------------------------------------------------------------------------------------------
Custom Scan (GapFill)
-> Finalize GroupAggregate
Group Key: (time_bucket_gapfill('@ 5 mins'::interval, _hyper_1_2_chunk."time"))
-> Gather Merge
Workers Planned: 2
-> Sort
Sort Key: (time_bucket_gapfill('@ 5 mins'::interval, _hyper_1_2_chunk."time"))
-> Partial HashAggregate
Group Key: time_bucket_gapfill('@ 5 mins'::interval, _hyper_1_2_chunk."time")
-> Result
-> Parallel Append
-> Parallel Seq Scan on _hyper_1_2_chunk
-> Parallel Seq Scan on _hyper_1_3_chunk
-> Parallel Seq Scan on _hyper_1_1_chunk
-> Parallel Seq Scan on _hyper_1_4_chunk
(15 rows)
-- make sure we can run gapfill in parallel workers
-- ensure this plan runs in parallel
:EXPLAIN
SELECT
time_bucket_gapfill('5m',time,to_timestamp(0),to_timestamp(0)),
interpolate(avg(value))
FROM gapfill_plan_test
GROUP BY 1
ORDER BY 2
LIMIT 1;
QUERY PLAN
---------------------------------------------------------------------------------------------------------------------
Limit
-> Sort
Sort Key: (interpolate(avg(value), NULL::record, NULL::record))
-> Custom Scan (GapFill)
-> Finalize GroupAggregate
Group Key: (time_bucket_gapfill('@ 5 mins'::interval, _hyper_1_2_chunk."time"))
-> Gather Merge
Workers Planned: 2
-> Sort
Sort Key: (time_bucket_gapfill('@ 5 mins'::interval, _hyper_1_2_chunk."time"))
-> Partial HashAggregate
Group Key: time_bucket_gapfill('@ 5 mins'::interval, _hyper_1_2_chunk."time")
-> Result
-> Parallel Append
-> Parallel Seq Scan on _hyper_1_2_chunk
-> Parallel Seq Scan on _hyper_1_3_chunk
-> Parallel Seq Scan on _hyper_1_1_chunk
-> Parallel Seq Scan on _hyper_1_4_chunk
(18 rows)
-- actually run a parallel gapfill
SELECT
time_bucket_gapfill('5m',time,to_timestamp(0),to_timestamp(0)),
interpolate(avg(value))
FROM gapfill_plan_test
GROUP BY 1
ORDER BY 2
LIMIT 1;
time_bucket_gapfill | interpolate
------------------------------+-------------
Mon Jan 01 00:00:00 2018 PST | 1
(1 row)
-- test sort optimizations
-- test sort optimization with single member order by,
-- should use index scan (no GapFill node for this one since we're not gapfilling)
:EXPLAIN SELECT time_bucket_gapfill('5m',time),value
FROM gapfill_plan_test
ORDER BY 1;
QUERY PLAN
----------------------------------------------------------------------------------------------------------------------------------------------
Custom Scan (ChunkAppend) on gapfill_plan_test
Order: time_bucket_gapfill('@ 5 mins'::interval, gapfill_plan_test."time", NULL::timestamp with time zone, NULL::timestamp with time zone)
-> Index Scan Backward using _hyper_1_1_chunk_gapfill_plan_test_time_idx on _hyper_1_1_chunk
-> Index Scan Backward using _hyper_1_2_chunk_gapfill_plan_test_time_idx on _hyper_1_2_chunk
-> Index Scan Backward using _hyper_1_3_chunk_gapfill_plan_test_time_idx on _hyper_1_3_chunk
-> Index Scan Backward using _hyper_1_4_chunk_gapfill_plan_test_time_idx on _hyper_1_4_chunk
(6 rows)
SET max_parallel_workers_per_gather TO 0;
-- test sort optimizations with locf
:EXPLAIN SELECT time_bucket_gapfill('5m',time,to_timestamp(0),to_timestamp(0)), locf(avg(value))
FROM gapfill_plan_test
GROUP BY 1
ORDER BY 1;
QUERY PLAN
--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
Custom Scan (GapFill)
-> GroupAggregate
Group Key: time_bucket_gapfill('@ 5 mins'::interval, gapfill_plan_test."time")
-> Custom Scan (ChunkAppend) on gapfill_plan_test
Order: time_bucket_gapfill('@ 5 mins'::interval, gapfill_plan_test."time", 'Wed Dec 31 16:00:00 1969 PST'::timestamp with time zone, 'Wed Dec 31 16:00:00 1969 PST'::timestamp with time zone)
-> Index Scan Backward using _hyper_1_1_chunk_gapfill_plan_test_time_idx on _hyper_1_1_chunk
-> Index Scan Backward using _hyper_1_2_chunk_gapfill_plan_test_time_idx on _hyper_1_2_chunk
-> Index Scan Backward using _hyper_1_3_chunk_gapfill_plan_test_time_idx on _hyper_1_3_chunk
-> Index Scan Backward using _hyper_1_4_chunk_gapfill_plan_test_time_idx on _hyper_1_4_chunk
(9 rows)
-- test sort optimizations with interpolate
:EXPLAIN SELECT time_bucket_gapfill('5m',time,to_timestamp(0),to_timestamp(0)), interpolate(avg(value))
FROM gapfill_plan_test
GROUP BY 1
ORDER BY 1;
QUERY PLAN
--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
Custom Scan (GapFill)
-> GroupAggregate
Group Key: time_bucket_gapfill('@ 5 mins'::interval, gapfill_plan_test."time")
-> Custom Scan (ChunkAppend) on gapfill_plan_test
Order: time_bucket_gapfill('@ 5 mins'::interval, gapfill_plan_test."time", 'Wed Dec 31 16:00:00 1969 PST'::timestamp with time zone, 'Wed Dec 31 16:00:00 1969 PST'::timestamp with time zone)
-> Index Scan Backward using _hyper_1_1_chunk_gapfill_plan_test_time_idx on _hyper_1_1_chunk
-> Index Scan Backward using _hyper_1_2_chunk_gapfill_plan_test_time_idx on _hyper_1_2_chunk
-> Index Scan Backward using _hyper_1_3_chunk_gapfill_plan_test_time_idx on _hyper_1_3_chunk
-> Index Scan Backward using _hyper_1_4_chunk_gapfill_plan_test_time_idx on _hyper_1_4_chunk
(9 rows)
RESET max_parallel_workers_per_gather;
CREATE INDEX ON gapfill_plan_test(value, time);
-- test sort optimization with ordering by multiple columns and time_bucket_gapfill not last,
-- must not use index scan
:EXPLAIN SELECT time_bucket_gapfill('5m',time),value
FROM gapfill_plan_test
ORDER BY 1,2;
QUERY PLAN
--------------------------------------------------------------------------------------------------------------------------------------------------------------------------
Sort
Sort Key: (time_bucket_gapfill('@ 5 mins'::interval, _hyper_1_1_chunk."time", NULL::timestamp with time zone, NULL::timestamp with time zone)), _hyper_1_1_chunk.value
-> Result
-> Append
-> Seq Scan on _hyper_1_1_chunk
-> Seq Scan on _hyper_1_2_chunk
-> Seq Scan on _hyper_1_3_chunk
-> Seq Scan on _hyper_1_4_chunk
(8 rows)
-- test sort optimization with ordering by multiple columns and time_bucket as last member,
-- should use index scan
:EXPLAIN SELECT time_bucket_gapfill('5m',time),value
FROM gapfill_plan_test
ORDER BY 2,1;
QUERY PLAN
--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
Result
-> Merge Append
Sort Key: _hyper_1_1_chunk.value, (time_bucket_gapfill('@ 5 mins'::interval, _hyper_1_1_chunk."time", NULL::timestamp with time zone, NULL::timestamp with time zone))
-> Index Only Scan using _hyper_1_1_chunk_gapfill_plan_test_value_time_idx on _hyper_1_1_chunk
-> Index Only Scan using _hyper_1_2_chunk_gapfill_plan_test_value_time_idx on _hyper_1_2_chunk
-> Index Only Scan using _hyper_1_3_chunk_gapfill_plan_test_value_time_idx on _hyper_1_3_chunk
-> Index Only Scan using _hyper_1_4_chunk_gapfill_plan_test_value_time_idx on _hyper_1_4_chunk
(7 rows)
CREATE TABLE metrics_int(time int,device_id int, sensor_id int, value float); CREATE TABLE metrics_int(time int,device_id int, sensor_id int, value float);
INSERT INTO metrics_int VALUES INSERT INTO metrics_int VALUES
(-100,1,1,0.0), (-100,1,1,0.0),
@ -1581,11 +1899,11 @@ GROUP BY 1 ORDER BY 2 NULLS LAST,1;
-- test queries on hypertable -- test queries on hypertable
CREATE TABLE metrics_tstz(time timestamptz, device_id INT, v1 float, v2 int); CREATE TABLE metrics_tstz(time timestamptz, device_id INT, v1 float, v2 int);
SELECT create_hypertable('metrics_tstz','time'); SELECT table_name FROM create_hypertable('metrics_tstz','time');
NOTICE: adding not-null constraint to column "time" NOTICE: adding not-null constraint to column "time"
create_hypertable table_name
--------------------------- --------------
(1,public,metrics_tstz,t) metrics_tstz
(1 row) (1 row)
INSERT INTO metrics_tstz VALUES INSERT INTO metrics_tstz VALUES

View File

@ -1,321 +0,0 @@
-- This file and its contents are licensed under the Timescale License.
-- Please see the included NOTICE for copyright information and
-- LICENSE-TIMESCALE for a copy of the license.
\set EXPLAIN 'EXPLAIN (COSTS OFF)'
CREATE TABLE gapfill_plan_test(time timestamptz NOT NULL, value float);
SELECT table_name FROM create_hypertable('gapfill_plan_test','time',chunk_time_interval=>'4 weeks'::interval);
table_name
-------------------
gapfill_plan_test
(1 row)
INSERT INTO gapfill_plan_test SELECT generate_series('2018-01-01'::timestamptz,'2018-04-01'::timestamptz,'1m'::interval), 1.0;
-- simple example
:EXPLAIN
SELECT
time_bucket_gapfill('5m',time,now(),now()),
avg(c2)
FROM (VALUES (now(),1),(now(),NULL),(now(),NULL)) as t(time,c2)
GROUP BY 1
ORDER BY 1;
QUERY PLAN
-----------------------------------------------------------------------------------------
Custom Scan (GapFill)
-> GroupAggregate
Group Key: (time_bucket_gapfill('@ 5 mins'::interval, "*VALUES*".column1))
-> Sort
Sort Key: (time_bucket_gapfill('@ 5 mins'::interval, "*VALUES*".column1))
-> Values Scan on "*VALUES*"
(6 rows)
-- test sorting
:EXPLAIN
SELECT
time_bucket_gapfill('5m',time,now(),now()),
avg(c2)
FROM (VALUES (now(),1),(now(),NULL),(now(),NULL)) as t(time,c2)
GROUP BY 1
ORDER BY 2;
QUERY PLAN
-----------------------------------------------------------------------------------------------
Sort
Sort Key: (avg("*VALUES*".column2))
-> Custom Scan (GapFill)
-> GroupAggregate
Group Key: (time_bucket_gapfill('@ 5 mins'::interval, "*VALUES*".column1))
-> Sort
Sort Key: (time_bucket_gapfill('@ 5 mins'::interval, "*VALUES*".column1))
-> Values Scan on "*VALUES*"
(8 rows)
-- test sort direction
:EXPLAIN
SELECT
time_bucket_gapfill('5m',time,now(),now()),
avg(c2)
FROM (VALUES (now(),1),(now(),NULL),(now(),NULL)) as t(time,c2)
GROUP BY 1
ORDER BY 1 DESC;
QUERY PLAN
-----------------------------------------------------------------------------------------------------
Sort
Sort Key: (time_bucket_gapfill('@ 5 mins'::interval, "*VALUES*".column1)) DESC
-> Custom Scan (GapFill)
-> Sort
Sort Key: (time_bucket_gapfill('@ 5 mins'::interval, "*VALUES*".column1)) NULLS FIRST
-> HashAggregate
Group Key: time_bucket_gapfill('@ 5 mins'::interval, "*VALUES*".column1)
-> Values Scan on "*VALUES*"
(8 rows)
-- test order by aggregate function
:EXPLAIN
SELECT
time_bucket_gapfill('5m',time,now(),now()),
avg(c2)
FROM (VALUES (now(),1),(now(),NULL),(now(),NULL)) as t(time,c2)
GROUP BY 1
ORDER BY 2,1;
QUERY PLAN
--------------------------------------------------------------------------------------------------------
Sort
Sort Key: (avg("*VALUES*".column2)), (time_bucket_gapfill('@ 5 mins'::interval, "*VALUES*".column1))
-> Custom Scan (GapFill)
-> GroupAggregate
Group Key: (time_bucket_gapfill('@ 5 mins'::interval, "*VALUES*".column1))
-> Sort
Sort Key: (time_bucket_gapfill('@ 5 mins'::interval, "*VALUES*".column1))
-> Values Scan on "*VALUES*"
(8 rows)
-- test query without order by
:EXPLAIN
SELECT
time_bucket_gapfill('5m',time,now(),now()),
avg(c2)
FROM (VALUES (now(),1),(now(),NULL),(now(),NULL)) as t(time,c2)
GROUP BY 1;
QUERY PLAN
-----------------------------------------------------------------------------------------
Custom Scan (GapFill)
-> GroupAggregate
Group Key: (time_bucket_gapfill('@ 5 mins'::interval, "*VALUES*".column1))
-> Sort
Sort Key: (time_bucket_gapfill('@ 5 mins'::interval, "*VALUES*".column1))
-> Values Scan on "*VALUES*"
(6 rows)
-- test parallel query
:EXPLAIN
SELECT
time_bucket_gapfill('5m',time,to_timestamp(0),to_timestamp(0)),
avg(value)
FROM gapfill_plan_test
GROUP BY 1
ORDER BY 1;
QUERY PLAN
---------------------------------------------------------------------------------------------------------
Custom Scan (GapFill)
-> Finalize GroupAggregate
Group Key: (time_bucket_gapfill('@ 5 mins'::interval, _hyper_1_2_chunk."time"))
-> Gather Merge
Workers Planned: 2
-> Sort
Sort Key: (time_bucket_gapfill('@ 5 mins'::interval, _hyper_1_2_chunk."time"))
-> Partial HashAggregate
Group Key: time_bucket_gapfill('@ 5 mins'::interval, _hyper_1_2_chunk."time")
-> Result
-> Parallel Append
-> Parallel Seq Scan on _hyper_1_2_chunk
-> Parallel Seq Scan on _hyper_1_3_chunk
-> Parallel Seq Scan on _hyper_1_1_chunk
-> Parallel Seq Scan on _hyper_1_4_chunk
(15 rows)
-- test parallel query with locf
:EXPLAIN
SELECT
time_bucket_gapfill('5m',time,to_timestamp(0),to_timestamp(0)),
locf(avg(value))
FROM gapfill_plan_test
GROUP BY 1
ORDER BY 1;
QUERY PLAN
---------------------------------------------------------------------------------------------------------
Custom Scan (GapFill)
-> Finalize GroupAggregate
Group Key: (time_bucket_gapfill('@ 5 mins'::interval, _hyper_1_2_chunk."time"))
-> Gather Merge
Workers Planned: 2
-> Sort
Sort Key: (time_bucket_gapfill('@ 5 mins'::interval, _hyper_1_2_chunk."time"))
-> Partial HashAggregate
Group Key: time_bucket_gapfill('@ 5 mins'::interval, _hyper_1_2_chunk."time")
-> Result
-> Parallel Append
-> Parallel Seq Scan on _hyper_1_2_chunk
-> Parallel Seq Scan on _hyper_1_3_chunk
-> Parallel Seq Scan on _hyper_1_1_chunk
-> Parallel Seq Scan on _hyper_1_4_chunk
(15 rows)
-- test parallel query with interpolate
:EXPLAIN
SELECT
time_bucket_gapfill('5m',time,to_timestamp(0),to_timestamp(0)),
interpolate(avg(value))
FROM gapfill_plan_test
GROUP BY 1
ORDER BY 1;
QUERY PLAN
---------------------------------------------------------------------------------------------------------
Custom Scan (GapFill)
-> Finalize GroupAggregate
Group Key: (time_bucket_gapfill('@ 5 mins'::interval, _hyper_1_2_chunk."time"))
-> Gather Merge
Workers Planned: 2
-> Sort
Sort Key: (time_bucket_gapfill('@ 5 mins'::interval, _hyper_1_2_chunk."time"))
-> Partial HashAggregate
Group Key: time_bucket_gapfill('@ 5 mins'::interval, _hyper_1_2_chunk."time")
-> Result
-> Parallel Append
-> Parallel Seq Scan on _hyper_1_2_chunk
-> Parallel Seq Scan on _hyper_1_3_chunk
-> Parallel Seq Scan on _hyper_1_1_chunk
-> Parallel Seq Scan on _hyper_1_4_chunk
(15 rows)
-- make sure we can run gapfill in parallel workers
-- ensure this plan runs in parallel
:EXPLAIN
SELECT
time_bucket_gapfill('5m',time,to_timestamp(0),to_timestamp(0)),
interpolate(avg(value))
FROM gapfill_plan_test
GROUP BY 1
ORDER BY 2
LIMIT 1;
QUERY PLAN
---------------------------------------------------------------------------------------------------------------------
Limit
-> Sort
Sort Key: (interpolate(avg(value), NULL::record, NULL::record))
-> Custom Scan (GapFill)
-> Finalize GroupAggregate
Group Key: (time_bucket_gapfill('@ 5 mins'::interval, _hyper_1_2_chunk."time"))
-> Gather Merge
Workers Planned: 2
-> Sort
Sort Key: (time_bucket_gapfill('@ 5 mins'::interval, _hyper_1_2_chunk."time"))
-> Partial HashAggregate
Group Key: time_bucket_gapfill('@ 5 mins'::interval, _hyper_1_2_chunk."time")
-> Result
-> Parallel Append
-> Parallel Seq Scan on _hyper_1_2_chunk
-> Parallel Seq Scan on _hyper_1_3_chunk
-> Parallel Seq Scan on _hyper_1_1_chunk
-> Parallel Seq Scan on _hyper_1_4_chunk
(18 rows)
-- actually run a parallel gapfill
SELECT
time_bucket_gapfill('5m',time,to_timestamp(0),to_timestamp(0)),
interpolate(avg(value))
FROM gapfill_plan_test
GROUP BY 1
ORDER BY 2
LIMIT 1;
time_bucket_gapfill | interpolate
------------------------------+-------------
Mon Jan 01 00:00:00 2018 PST | 1
(1 row)
-- test sort optimizations
-- test sort optimization with single member order by,
-- should use index scan (no GapFill node for this one since we're not gapfilling)
:EXPLAIN SELECT time_bucket_gapfill('5m',time),value
FROM gapfill_plan_test
ORDER BY 1;
QUERY PLAN
----------------------------------------------------------------------------------------------------------------------------------------------
Custom Scan (ChunkAppend) on gapfill_plan_test
Order: time_bucket_gapfill('@ 5 mins'::interval, gapfill_plan_test."time", NULL::timestamp with time zone, NULL::timestamp with time zone)
-> Index Scan Backward using _hyper_1_1_chunk_gapfill_plan_test_time_idx on _hyper_1_1_chunk
-> Index Scan Backward using _hyper_1_2_chunk_gapfill_plan_test_time_idx on _hyper_1_2_chunk
-> Index Scan Backward using _hyper_1_3_chunk_gapfill_plan_test_time_idx on _hyper_1_3_chunk
-> Index Scan Backward using _hyper_1_4_chunk_gapfill_plan_test_time_idx on _hyper_1_4_chunk
(6 rows)
SET max_parallel_workers_per_gather TO 0;
-- test sort optimizations with locf
:EXPLAIN SELECT time_bucket_gapfill('5m',time,to_timestamp(0),to_timestamp(0)), locf(avg(value))
FROM gapfill_plan_test
GROUP BY 1
ORDER BY 1;
QUERY PLAN
--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
Custom Scan (GapFill)
-> GroupAggregate
Group Key: time_bucket_gapfill('@ 5 mins'::interval, gapfill_plan_test."time")
-> Custom Scan (ChunkAppend) on gapfill_plan_test
Order: time_bucket_gapfill('@ 5 mins'::interval, gapfill_plan_test."time", 'Wed Dec 31 16:00:00 1969 PST'::timestamp with time zone, 'Wed Dec 31 16:00:00 1969 PST'::timestamp with time zone)
-> Index Scan Backward using _hyper_1_1_chunk_gapfill_plan_test_time_idx on _hyper_1_1_chunk
-> Index Scan Backward using _hyper_1_2_chunk_gapfill_plan_test_time_idx on _hyper_1_2_chunk
-> Index Scan Backward using _hyper_1_3_chunk_gapfill_plan_test_time_idx on _hyper_1_3_chunk
-> Index Scan Backward using _hyper_1_4_chunk_gapfill_plan_test_time_idx on _hyper_1_4_chunk
(9 rows)
-- test sort optimizations with interpolate
:EXPLAIN SELECT time_bucket_gapfill('5m',time,to_timestamp(0),to_timestamp(0)), interpolate(avg(value))
FROM gapfill_plan_test
GROUP BY 1
ORDER BY 1;
QUERY PLAN
--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
Custom Scan (GapFill)
-> GroupAggregate
Group Key: time_bucket_gapfill('@ 5 mins'::interval, gapfill_plan_test."time")
-> Custom Scan (ChunkAppend) on gapfill_plan_test
Order: time_bucket_gapfill('@ 5 mins'::interval, gapfill_plan_test."time", 'Wed Dec 31 16:00:00 1969 PST'::timestamp with time zone, 'Wed Dec 31 16:00:00 1969 PST'::timestamp with time zone)
-> Index Scan Backward using _hyper_1_1_chunk_gapfill_plan_test_time_idx on _hyper_1_1_chunk
-> Index Scan Backward using _hyper_1_2_chunk_gapfill_plan_test_time_idx on _hyper_1_2_chunk
-> Index Scan Backward using _hyper_1_3_chunk_gapfill_plan_test_time_idx on _hyper_1_3_chunk
-> Index Scan Backward using _hyper_1_4_chunk_gapfill_plan_test_time_idx on _hyper_1_4_chunk
(9 rows)
RESET max_parallel_workers_per_gather;
CREATE INDEX ON gapfill_plan_test(value, time);
-- test sort optimization with ordering by multiple columns and time_bucket_gapfill not last,
-- must not use index scan
:EXPLAIN SELECT time_bucket_gapfill('5m',time),value
FROM gapfill_plan_test
ORDER BY 1,2;
QUERY PLAN
--------------------------------------------------------------------------------------------------------------------------------------------------------------------------
Sort
Sort Key: (time_bucket_gapfill('@ 5 mins'::interval, _hyper_1_1_chunk."time", NULL::timestamp with time zone, NULL::timestamp with time zone)), _hyper_1_1_chunk.value
-> Result
-> Append
-> Seq Scan on _hyper_1_1_chunk
-> Seq Scan on _hyper_1_2_chunk
-> Seq Scan on _hyper_1_3_chunk
-> Seq Scan on _hyper_1_4_chunk
(8 rows)
-- test sort optimization with ordering by multiple columns and time_bucket as last member,
-- should use index scan
:EXPLAIN SELECT time_bucket_gapfill('5m',time),value
FROM gapfill_plan_test
ORDER BY 2,1;
QUERY PLAN
--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
Result
-> Merge Append
Sort Key: _hyper_1_1_chunk.value, (time_bucket_gapfill('@ 5 mins'::interval, _hyper_1_1_chunk."time", NULL::timestamp with time zone, NULL::timestamp with time zone))
-> Index Only Scan using _hyper_1_1_chunk_gapfill_plan_test_value_time_idx on _hyper_1_1_chunk
-> Index Only Scan using _hyper_1_2_chunk_gapfill_plan_test_value_time_idx on _hyper_1_2_chunk
-> Index Only Scan using _hyper_1_3_chunk_gapfill_plan_test_value_time_idx on _hyper_1_3_chunk
-> Index Only Scan using _hyper_1_4_chunk_gapfill_plan_test_value_time_idx on _hyper_1_4_chunk
(7 rows)

View File

@ -3,7 +3,6 @@
/continuous_aggs_permissions-*.sql /continuous_aggs_permissions-*.sql
/continuous_aggs_query-*.sql /continuous_aggs_query-*.sql
/continuous_aggs_union_view-*.sql /continuous_aggs_union_view-*.sql
/plan_gapfill-*.sql
/transparent_decompression-*.sql /transparent_decompression-*.sql
/transparent_decompression_ordered_index-*.sql /transparent_decompression_ordered_index-*.sql
/compression_permissions-*.sql /compression_permissions-*.sql

View File

@ -12,7 +12,6 @@ set(TEST_FILES
edition.sql edition.sql
gapfill.sql gapfill.sql
partialize_finalize.sql partialize_finalize.sql
plan_gapfill.sql
) )
set(TEST_FILES_DEBUG set(TEST_FILES_DEBUG

View File

@ -2,6 +2,142 @@
-- Please see the included NOTICE for copyright information and -- Please see the included NOTICE for copyright information and
-- LICENSE-TIMESCALE for a copy of the license. -- LICENSE-TIMESCALE for a copy of the license.
\set EXPLAIN 'EXPLAIN (COSTS OFF)'
CREATE TABLE gapfill_plan_test(time timestamptz NOT NULL, value float);
SELECT table_name FROM create_hypertable('gapfill_plan_test','time',chunk_time_interval=>'4 weeks'::interval);
INSERT INTO gapfill_plan_test SELECT generate_series('2018-01-01'::timestamptz,'2018-04-01'::timestamptz,'1m'::interval), 1.0;
-- simple example
:EXPLAIN
SELECT
time_bucket_gapfill('5m',time,now(),now()),
avg(c2)
FROM (VALUES (now(),1),(now(),NULL),(now(),NULL)) as t(time,c2)
GROUP BY 1
ORDER BY 1;
-- test sorting
:EXPLAIN
SELECT
time_bucket_gapfill('5m',time,now(),now()),
avg(c2)
FROM (VALUES (now(),1),(now(),NULL),(now(),NULL)) as t(time,c2)
GROUP BY 1
ORDER BY 2;
-- test sort direction
:EXPLAIN
SELECT
time_bucket_gapfill('5m',time,now(),now()),
avg(c2)
FROM (VALUES (now(),1),(now(),NULL),(now(),NULL)) as t(time,c2)
GROUP BY 1
ORDER BY 1 DESC;
-- test order by aggregate function
:EXPLAIN
SELECT
time_bucket_gapfill('5m',time,now(),now()),
avg(c2)
FROM (VALUES (now(),1),(now(),NULL),(now(),NULL)) as t(time,c2)
GROUP BY 1
ORDER BY 2,1;
-- test query without order by
:EXPLAIN
SELECT
time_bucket_gapfill('5m',time,now(),now()),
avg(c2)
FROM (VALUES (now(),1),(now(),NULL),(now(),NULL)) as t(time,c2)
GROUP BY 1;
-- test parallel query
:EXPLAIN
SELECT
time_bucket_gapfill('5m',time,to_timestamp(0),to_timestamp(0)),
avg(value)
FROM gapfill_plan_test
GROUP BY 1
ORDER BY 1;
-- test parallel query with locf
:EXPLAIN
SELECT
time_bucket_gapfill('5m',time,to_timestamp(0),to_timestamp(0)),
locf(avg(value))
FROM gapfill_plan_test
GROUP BY 1
ORDER BY 1;
-- test parallel query with interpolate
:EXPLAIN
SELECT
time_bucket_gapfill('5m',time,to_timestamp(0),to_timestamp(0)),
interpolate(avg(value))
FROM gapfill_plan_test
GROUP BY 1
ORDER BY 1;
-- make sure we can run gapfill in parallel workers
-- ensure this plan runs in parallel
:EXPLAIN
SELECT
time_bucket_gapfill('5m',time,to_timestamp(0),to_timestamp(0)),
interpolate(avg(value))
FROM gapfill_plan_test
GROUP BY 1
ORDER BY 2
LIMIT 1;
-- actually run a parallel gapfill
SELECT
time_bucket_gapfill('5m',time,to_timestamp(0),to_timestamp(0)),
interpolate(avg(value))
FROM gapfill_plan_test
GROUP BY 1
ORDER BY 2
LIMIT 1;
-- test sort optimizations
-- test sort optimization with single member order by,
-- should use index scan (no GapFill node for this one since we're not gapfilling)
:EXPLAIN SELECT time_bucket_gapfill('5m',time),value
FROM gapfill_plan_test
ORDER BY 1;
SET max_parallel_workers_per_gather TO 0;
-- test sort optimizations with locf
:EXPLAIN SELECT time_bucket_gapfill('5m',time,to_timestamp(0),to_timestamp(0)), locf(avg(value))
FROM gapfill_plan_test
GROUP BY 1
ORDER BY 1;
-- test sort optimizations with interpolate
:EXPLAIN SELECT time_bucket_gapfill('5m',time,to_timestamp(0),to_timestamp(0)), interpolate(avg(value))
FROM gapfill_plan_test
GROUP BY 1
ORDER BY 1;
RESET max_parallel_workers_per_gather;
CREATE INDEX ON gapfill_plan_test(value, time);
-- test sort optimization with ordering by multiple columns and time_bucket_gapfill not last,
-- must not use index scan
:EXPLAIN SELECT time_bucket_gapfill('5m',time),value
FROM gapfill_plan_test
ORDER BY 1,2;
-- test sort optimization with ordering by multiple columns and time_bucket as last member,
-- should use index scan
:EXPLAIN SELECT time_bucket_gapfill('5m',time),value
FROM gapfill_plan_test
ORDER BY 2,1;
CREATE TABLE metrics_int(time int,device_id int, sensor_id int, value float); CREATE TABLE metrics_int(time int,device_id int, sensor_id int, value float);
INSERT INTO metrics_int VALUES INSERT INTO metrics_int VALUES
@ -932,7 +1068,7 @@ GROUP BY 1 ORDER BY 2 NULLS LAST,1;
-- test queries on hypertable -- test queries on hypertable
CREATE TABLE metrics_tstz(time timestamptz, device_id INT, v1 float, v2 int); CREATE TABLE metrics_tstz(time timestamptz, device_id INT, v1 float, v2 int);
SELECT create_hypertable('metrics_tstz','time'); SELECT table_name FROM create_hypertable('metrics_tstz','time');
INSERT INTO metrics_tstz VALUES INSERT INTO metrics_tstz VALUES
(timestamptz '2018-01-01 05:00:00 PST', 1, 0.5, 10), (timestamptz '2018-01-01 05:00:00 PST', 1, 0.5, 10),
(timestamptz '2018-01-01 05:00:00 PST', 2, 0.7, 20), (timestamptz '2018-01-01 05:00:00 PST', 2, 0.7, 20),
@ -1497,9 +1633,6 @@ SELECT
FROM (VALUES (1,1),(2,2)) v(time,device_id) FROM (VALUES (1,1),(2,2)) v(time,device_id)
GROUP BY 1,device_id; GROUP BY 1,device_id;
--test interpolation with big diifferences in values (test overflows in calculations) --test interpolation with big diifferences in values (test overflows in calculations)
--we use the biggest possible difference in time(x) and the value(y). --we use the biggest possible difference in time(x) and the value(y).
--For bigints we also test values of smaller than bigintmax/min to avoid --For bigints we also test values of smaller than bigintmax/min to avoid
@ -1517,3 +1650,4 @@ SELECT
FROM (values (:big_int_min,(-32768)::smallint,(-2147483648)::int,:big_int_min,-2147483648::bigint, '-Infinity'::double precision), FROM (values (:big_int_min,(-32768)::smallint,(-2147483648)::int,:big_int_min,-2147483648::bigint, '-Infinity'::double precision),
(:big_int_max, 32767::smallint, 2147483647::int,:big_int_max, 2147483647::bigint, 'Infinity'::double precision)) v(time,s,i,b,b2,d) (:big_int_max, 32767::smallint, 2147483647::int,:big_int_max, 2147483647::bigint, 'Infinity'::double precision)) v(time,s,i,b,b2,d)
GROUP BY 1 ORDER BY 1; GROUP BY 1 ORDER BY 1;

View File

@ -1,139 +0,0 @@
-- This file and its contents are licensed under the Timescale License.
-- Please see the included NOTICE for copyright information and
-- LICENSE-TIMESCALE for a copy of the license.
\set EXPLAIN 'EXPLAIN (COSTS OFF)'
CREATE TABLE gapfill_plan_test(time timestamptz NOT NULL, value float);
SELECT table_name FROM create_hypertable('gapfill_plan_test','time',chunk_time_interval=>'4 weeks'::interval);
INSERT INTO gapfill_plan_test SELECT generate_series('2018-01-01'::timestamptz,'2018-04-01'::timestamptz,'1m'::interval), 1.0;
-- simple example
:EXPLAIN
SELECT
time_bucket_gapfill('5m',time,now(),now()),
avg(c2)
FROM (VALUES (now(),1),(now(),NULL),(now(),NULL)) as t(time,c2)
GROUP BY 1
ORDER BY 1;
-- test sorting
:EXPLAIN
SELECT
time_bucket_gapfill('5m',time,now(),now()),
avg(c2)
FROM (VALUES (now(),1),(now(),NULL),(now(),NULL)) as t(time,c2)
GROUP BY 1
ORDER BY 2;
-- test sort direction
:EXPLAIN
SELECT
time_bucket_gapfill('5m',time,now(),now()),
avg(c2)
FROM (VALUES (now(),1),(now(),NULL),(now(),NULL)) as t(time,c2)
GROUP BY 1
ORDER BY 1 DESC;
-- test order by aggregate function
:EXPLAIN
SELECT
time_bucket_gapfill('5m',time,now(),now()),
avg(c2)
FROM (VALUES (now(),1),(now(),NULL),(now(),NULL)) as t(time,c2)
GROUP BY 1
ORDER BY 2,1;
-- test query without order by
:EXPLAIN
SELECT
time_bucket_gapfill('5m',time,now(),now()),
avg(c2)
FROM (VALUES (now(),1),(now(),NULL),(now(),NULL)) as t(time,c2)
GROUP BY 1;
-- test parallel query
:EXPLAIN
SELECT
time_bucket_gapfill('5m',time,to_timestamp(0),to_timestamp(0)),
avg(value)
FROM gapfill_plan_test
GROUP BY 1
ORDER BY 1;
-- test parallel query with locf
:EXPLAIN
SELECT
time_bucket_gapfill('5m',time,to_timestamp(0),to_timestamp(0)),
locf(avg(value))
FROM gapfill_plan_test
GROUP BY 1
ORDER BY 1;
-- test parallel query with interpolate
:EXPLAIN
SELECT
time_bucket_gapfill('5m',time,to_timestamp(0),to_timestamp(0)),
interpolate(avg(value))
FROM gapfill_plan_test
GROUP BY 1
ORDER BY 1;
-- make sure we can run gapfill in parallel workers
-- ensure this plan runs in parallel
:EXPLAIN
SELECT
time_bucket_gapfill('5m',time,to_timestamp(0),to_timestamp(0)),
interpolate(avg(value))
FROM gapfill_plan_test
GROUP BY 1
ORDER BY 2
LIMIT 1;
-- actually run a parallel gapfill
SELECT
time_bucket_gapfill('5m',time,to_timestamp(0),to_timestamp(0)),
interpolate(avg(value))
FROM gapfill_plan_test
GROUP BY 1
ORDER BY 2
LIMIT 1;
-- test sort optimizations
-- test sort optimization with single member order by,
-- should use index scan (no GapFill node for this one since we're not gapfilling)
:EXPLAIN SELECT time_bucket_gapfill('5m',time),value
FROM gapfill_plan_test
ORDER BY 1;
SET max_parallel_workers_per_gather TO 0;
-- test sort optimizations with locf
:EXPLAIN SELECT time_bucket_gapfill('5m',time,to_timestamp(0),to_timestamp(0)), locf(avg(value))
FROM gapfill_plan_test
GROUP BY 1
ORDER BY 1;
-- test sort optimizations with interpolate
:EXPLAIN SELECT time_bucket_gapfill('5m',time,to_timestamp(0),to_timestamp(0)), interpolate(avg(value))
FROM gapfill_plan_test
GROUP BY 1
ORDER BY 1;
RESET max_parallel_workers_per_gather;
CREATE INDEX ON gapfill_plan_test(value, time);
-- test sort optimization with ordering by multiple columns and time_bucket_gapfill not last,
-- must not use index scan
:EXPLAIN SELECT time_bucket_gapfill('5m',time),value
FROM gapfill_plan_test
ORDER BY 1,2;
-- test sort optimization with ordering by multiple columns and time_bucket as last member,
-- should use index scan
:EXPLAIN SELECT time_bucket_gapfill('5m',time),value
FROM gapfill_plan_test
ORDER BY 2,1;