Move gapfill tests into using shared database

This commit is contained in:
Ruslan Fomkin 2020-12-09 20:50:59 +01:00 committed by Ruslan Fomkin
parent 2e352664ba
commit 3448bcf2af
10 changed files with 369 additions and 523 deletions

View File

@ -2,11 +2,6 @@
-- Please see the included NOTICE for copyright information and
-- LICENSE-TIMESCALE for a copy of the license.
\set ECHO errors
hypertable_id | schema_name | table_name | created
---------------+-------------+------------+---------
1 | public | conditions | t
(1 row)
\ir :TEST_QUERY_NAME
-- This file and its contents are licensed under the Timescale License.
-- Please see the included NOTICE for copyright information and
@ -14,7 +9,7 @@
SELECT time_bucket_gapfill('3 hours', time, '2017-01-01 06:00', '2017-01-02 18:00'),
first(value, time),
avg(value)
FROM conditions
FROM :CONDITIONS
GROUP BY 1;
time_bucket_gapfill | first | avg
------------------------------+-------+---------
@ -40,7 +35,7 @@ SELECT time_bucket_gapfill('3 hours', time, '2017-01-01 06:00', '2017-01-01 18:0
device,
first(value, time),
avg(value)
FROM conditions
FROM :CONDITIONS
GROUP BY 1,2;
time_bucket_gapfill | device | first | avg
------------------------------+--------+----------+----------
@ -90,7 +85,7 @@ SELECT time_bucket_gapfill('3 hours', time, '2017-01-01 06:00', '2017-01-01 18:0
device,
first(value, time),
avg(value)
FROM conditions
FROM :CONDITIONS
GROUP BY 2,1;
time_bucket_gapfill | device | first | avg
------------------------------+--------+----------+----------
@ -139,7 +134,7 @@ GROUP BY 2,1;
SELECT
time_bucket_gapfill('3 hours', time, '2017-01-01 06:00', '2017-01-01 18:00'),
lag(min(time)) OVER ()
FROM conditions
FROM :CONDITIONS
GROUP BY 1;
time_bucket_gapfill | lag
------------------------------+------------------------------
@ -155,63 +150,10 @@ GROUP BY 1;
(9 rows)
\set ECHO errors
node_name | host | port | database | node_created | database_created | extension_created
----------------+-----------+-------+----------------+--------------+------------------+-------------------
dist_gapfill_1 | localhost | 55432 | dist_gapfill_1 | t | t | t
(1 row)
node_name | host | port | database | node_created | database_created | extension_created
----------------+-----------+-------+----------------+--------------+------------------+-------------------
dist_gapfill_2 | localhost | 55432 | dist_gapfill_2 | t | t | t
(1 row)
node_name | host | port | database | node_created | database_created | extension_created
----------------+-----------+-------+----------------+--------------+------------------+-------------------
dist_gapfill_3 | localhost | 55432 | dist_gapfill_3 | t | t | t
(1 row)
hypertable_id | schema_name | table_name | created
---------------+-------------+------------+---------
2 | public | conditions | t
(1 row)
create_distributed_hypertable
-------------------------------
(3,public,metrics_int,t)
(1 row)
:DIFF_CMD_PARTITIONWISE_OFF
:DIFF_CMD_PARTITIONWISE_ON
:DIFF_CMD_METRICS_PARTITIONWISE_OFF
-- Distributed hypertables with one data nodes
\set ECHO errors
WARNING: only one data node was assigned to the hypertable
hypertable_id | schema_name | table_name | created
---------------+-------------+------------+---------
4 | public | conditions | t
(1 row)
WARNING: only one data node was assigned to the hypertable
create_distributed_hypertable
-------------------------------
(5,public,metrics_int,t)
(1 row)
delete_data_node
------------------
t
(1 row)
delete_data_node
------------------
t
(1 row)
delete_data_node
------------------
t
(1 row)
:DIFF_CMD_PARTITIONWISE_OFF
:DIFF_CMD_PARTITIONWISE_ON
:DIFF_CMD_METRICS_PARTITIONWISE_OFF

View File

@ -2,14 +2,6 @@
-- Please see the included NOTICE for copyright information and
-- LICENSE-TIMESCALE for a copy of the license.
\set EXPLAIN 'EXPLAIN (COSTS OFF)'
CREATE TABLE gapfill_plan_test(time timestamptz NOT NULL, value float);
SELECT table_name FROM create_hypertable('gapfill_plan_test','time',chunk_time_interval=>'4 weeks'::interval);
table_name
-------------------
gapfill_plan_test
(1 row)
INSERT INTO gapfill_plan_test SELECT generate_series('2018-01-01'::timestamptz,'2018-04-01'::timestamptz,'1m'::interval), 1.0;
-- simple example
:EXPLAIN
SELECT
@ -113,23 +105,23 @@ SELECT
FROM gapfill_plan_test
GROUP BY 1
ORDER BY 1;
QUERY PLAN
---------------------------------------------------------------------------------------------------------
QUERY PLAN
----------------------------------------------------------------------------------------------------------
Custom Scan (GapFill)
-> Finalize GroupAggregate
Group Key: (time_bucket_gapfill('@ 5 mins'::interval, _hyper_1_2_chunk."time"))
Group Key: (time_bucket_gapfill('@ 5 mins'::interval, _hyper_8_47_chunk."time"))
-> Gather Merge
Workers Planned: 2
-> Sort
Sort Key: (time_bucket_gapfill('@ 5 mins'::interval, _hyper_1_2_chunk."time"))
Sort Key: (time_bucket_gapfill('@ 5 mins'::interval, _hyper_8_47_chunk."time"))
-> Partial HashAggregate
Group Key: time_bucket_gapfill('@ 5 mins'::interval, _hyper_1_2_chunk."time")
Group Key: time_bucket_gapfill('@ 5 mins'::interval, _hyper_8_47_chunk."time")
-> Result
-> Parallel Append
-> Parallel Seq Scan on _hyper_1_2_chunk
-> Parallel Seq Scan on _hyper_1_3_chunk
-> Parallel Seq Scan on _hyper_1_1_chunk
-> Parallel Seq Scan on _hyper_1_4_chunk
-> Parallel Seq Scan on _hyper_8_47_chunk
-> Parallel Seq Scan on _hyper_8_48_chunk
-> Parallel Seq Scan on _hyper_8_46_chunk
-> Parallel Seq Scan on _hyper_8_49_chunk
(15 rows)
-- test parallel query with locf
@ -140,23 +132,23 @@ SELECT
FROM gapfill_plan_test
GROUP BY 1
ORDER BY 1;
QUERY PLAN
---------------------------------------------------------------------------------------------------------
QUERY PLAN
----------------------------------------------------------------------------------------------------------
Custom Scan (GapFill)
-> Finalize GroupAggregate
Group Key: (time_bucket_gapfill('@ 5 mins'::interval, _hyper_1_2_chunk."time"))
Group Key: (time_bucket_gapfill('@ 5 mins'::interval, _hyper_8_47_chunk."time"))
-> Gather Merge
Workers Planned: 2
-> Sort
Sort Key: (time_bucket_gapfill('@ 5 mins'::interval, _hyper_1_2_chunk."time"))
Sort Key: (time_bucket_gapfill('@ 5 mins'::interval, _hyper_8_47_chunk."time"))
-> Partial HashAggregate
Group Key: time_bucket_gapfill('@ 5 mins'::interval, _hyper_1_2_chunk."time")
Group Key: time_bucket_gapfill('@ 5 mins'::interval, _hyper_8_47_chunk."time")
-> Result
-> Parallel Append
-> Parallel Seq Scan on _hyper_1_2_chunk
-> Parallel Seq Scan on _hyper_1_3_chunk
-> Parallel Seq Scan on _hyper_1_1_chunk
-> Parallel Seq Scan on _hyper_1_4_chunk
-> Parallel Seq Scan on _hyper_8_47_chunk
-> Parallel Seq Scan on _hyper_8_48_chunk
-> Parallel Seq Scan on _hyper_8_46_chunk
-> Parallel Seq Scan on _hyper_8_49_chunk
(15 rows)
-- test parallel query with interpolate
@ -167,23 +159,23 @@ SELECT
FROM gapfill_plan_test
GROUP BY 1
ORDER BY 1;
QUERY PLAN
---------------------------------------------------------------------------------------------------------
QUERY PLAN
----------------------------------------------------------------------------------------------------------
Custom Scan (GapFill)
-> Finalize GroupAggregate
Group Key: (time_bucket_gapfill('@ 5 mins'::interval, _hyper_1_2_chunk."time"))
Group Key: (time_bucket_gapfill('@ 5 mins'::interval, _hyper_8_47_chunk."time"))
-> Gather Merge
Workers Planned: 2
-> Sort
Sort Key: (time_bucket_gapfill('@ 5 mins'::interval, _hyper_1_2_chunk."time"))
Sort Key: (time_bucket_gapfill('@ 5 mins'::interval, _hyper_8_47_chunk."time"))
-> Partial HashAggregate
Group Key: time_bucket_gapfill('@ 5 mins'::interval, _hyper_1_2_chunk."time")
Group Key: time_bucket_gapfill('@ 5 mins'::interval, _hyper_8_47_chunk."time")
-> Result
-> Parallel Append
-> Parallel Seq Scan on _hyper_1_2_chunk
-> Parallel Seq Scan on _hyper_1_3_chunk
-> Parallel Seq Scan on _hyper_1_1_chunk
-> Parallel Seq Scan on _hyper_1_4_chunk
-> Parallel Seq Scan on _hyper_8_47_chunk
-> Parallel Seq Scan on _hyper_8_48_chunk
-> Parallel Seq Scan on _hyper_8_46_chunk
-> Parallel Seq Scan on _hyper_8_49_chunk
(15 rows)
-- make sure we can run gapfill in parallel workers
@ -196,26 +188,26 @@ FROM gapfill_plan_test
GROUP BY 1
ORDER BY 2
LIMIT 1;
QUERY PLAN
---------------------------------------------------------------------------------------------------------------------
QUERY PLAN
----------------------------------------------------------------------------------------------------------------------
Limit
-> Sort
Sort Key: (interpolate(avg(value), NULL::record, NULL::record))
-> Custom Scan (GapFill)
-> Finalize GroupAggregate
Group Key: (time_bucket_gapfill('@ 5 mins'::interval, _hyper_1_2_chunk."time"))
Group Key: (time_bucket_gapfill('@ 5 mins'::interval, _hyper_8_47_chunk."time"))
-> Gather Merge
Workers Planned: 2
-> Sort
Sort Key: (time_bucket_gapfill('@ 5 mins'::interval, _hyper_1_2_chunk."time"))
Sort Key: (time_bucket_gapfill('@ 5 mins'::interval, _hyper_8_47_chunk."time"))
-> Partial HashAggregate
Group Key: time_bucket_gapfill('@ 5 mins'::interval, _hyper_1_2_chunk."time")
Group Key: time_bucket_gapfill('@ 5 mins'::interval, _hyper_8_47_chunk."time")
-> Result
-> Parallel Append
-> Parallel Seq Scan on _hyper_1_2_chunk
-> Parallel Seq Scan on _hyper_1_3_chunk
-> Parallel Seq Scan on _hyper_1_1_chunk
-> Parallel Seq Scan on _hyper_1_4_chunk
-> Parallel Seq Scan on _hyper_8_47_chunk
-> Parallel Seq Scan on _hyper_8_48_chunk
-> Parallel Seq Scan on _hyper_8_46_chunk
-> Parallel Seq Scan on _hyper_8_49_chunk
(18 rows)
-- actually run a parallel gapfill
@ -241,10 +233,10 @@ ORDER BY 1;
----------------------------------------------------------------------------------------------------------------------------------------------
Custom Scan (ChunkAppend) on gapfill_plan_test
Order: time_bucket_gapfill('@ 5 mins'::interval, gapfill_plan_test."time", NULL::timestamp with time zone, NULL::timestamp with time zone)
-> Index Scan Backward using _hyper_1_1_chunk_gapfill_plan_test_time_idx on _hyper_1_1_chunk
-> Index Scan Backward using _hyper_1_2_chunk_gapfill_plan_test_time_idx on _hyper_1_2_chunk
-> Index Scan Backward using _hyper_1_3_chunk_gapfill_plan_test_time_idx on _hyper_1_3_chunk
-> Index Scan Backward using _hyper_1_4_chunk_gapfill_plan_test_time_idx on _hyper_1_4_chunk
-> Index Scan Backward using _hyper_8_46_chunk_gapfill_plan_test_time_idx on _hyper_8_46_chunk
-> Index Scan Backward using _hyper_8_47_chunk_gapfill_plan_test_time_idx on _hyper_8_47_chunk
-> Index Scan Backward using _hyper_8_48_chunk_gapfill_plan_test_time_idx on _hyper_8_48_chunk
-> Index Scan Backward using _hyper_8_49_chunk_gapfill_plan_test_time_idx on _hyper_8_49_chunk
(6 rows)
SET max_parallel_workers_per_gather TO 0;
@ -260,10 +252,10 @@ ORDER BY 1;
Group Key: time_bucket_gapfill('@ 5 mins'::interval, gapfill_plan_test."time")
-> Custom Scan (ChunkAppend) on gapfill_plan_test
Order: time_bucket_gapfill('@ 5 mins'::interval, gapfill_plan_test."time", 'Wed Dec 31 16:00:00 1969 PST'::timestamp with time zone, 'Wed Dec 31 16:00:00 1969 PST'::timestamp with time zone)
-> Index Scan Backward using _hyper_1_1_chunk_gapfill_plan_test_time_idx on _hyper_1_1_chunk
-> Index Scan Backward using _hyper_1_2_chunk_gapfill_plan_test_time_idx on _hyper_1_2_chunk
-> Index Scan Backward using _hyper_1_3_chunk_gapfill_plan_test_time_idx on _hyper_1_3_chunk
-> Index Scan Backward using _hyper_1_4_chunk_gapfill_plan_test_time_idx on _hyper_1_4_chunk
-> Index Scan Backward using _hyper_8_46_chunk_gapfill_plan_test_time_idx on _hyper_8_46_chunk
-> Index Scan Backward using _hyper_8_47_chunk_gapfill_plan_test_time_idx on _hyper_8_47_chunk
-> Index Scan Backward using _hyper_8_48_chunk_gapfill_plan_test_time_idx on _hyper_8_48_chunk
-> Index Scan Backward using _hyper_8_49_chunk_gapfill_plan_test_time_idx on _hyper_8_49_chunk
(9 rows)
-- test sort optimizations with interpolate
@ -278,29 +270,29 @@ ORDER BY 1;
Group Key: time_bucket_gapfill('@ 5 mins'::interval, gapfill_plan_test."time")
-> Custom Scan (ChunkAppend) on gapfill_plan_test
Order: time_bucket_gapfill('@ 5 mins'::interval, gapfill_plan_test."time", 'Wed Dec 31 16:00:00 1969 PST'::timestamp with time zone, 'Wed Dec 31 16:00:00 1969 PST'::timestamp with time zone)
-> Index Scan Backward using _hyper_1_1_chunk_gapfill_plan_test_time_idx on _hyper_1_1_chunk
-> Index Scan Backward using _hyper_1_2_chunk_gapfill_plan_test_time_idx on _hyper_1_2_chunk
-> Index Scan Backward using _hyper_1_3_chunk_gapfill_plan_test_time_idx on _hyper_1_3_chunk
-> Index Scan Backward using _hyper_1_4_chunk_gapfill_plan_test_time_idx on _hyper_1_4_chunk
-> Index Scan Backward using _hyper_8_46_chunk_gapfill_plan_test_time_idx on _hyper_8_46_chunk
-> Index Scan Backward using _hyper_8_47_chunk_gapfill_plan_test_time_idx on _hyper_8_47_chunk
-> Index Scan Backward using _hyper_8_48_chunk_gapfill_plan_test_time_idx on _hyper_8_48_chunk
-> Index Scan Backward using _hyper_8_49_chunk_gapfill_plan_test_time_idx on _hyper_8_49_chunk
(9 rows)
RESET max_parallel_workers_per_gather;
CREATE INDEX ON gapfill_plan_test(value, time);
CREATE INDEX gapfill_plan_test_indx ON gapfill_plan_test(value, time);
-- test sort optimization with ordering by multiple columns and time_bucket_gapfill not last,
-- must not use index scan
:EXPLAIN SELECT time_bucket_gapfill('5m',time),value
FROM gapfill_plan_test
ORDER BY 1,2;
QUERY PLAN
--------------------------------------------------------------------------------------------------------------------------------------------------------------------------
QUERY PLAN
----------------------------------------------------------------------------------------------------------------------------------------------------------------------------
Sort
Sort Key: (time_bucket_gapfill('@ 5 mins'::interval, _hyper_1_1_chunk."time", NULL::timestamp with time zone, NULL::timestamp with time zone)), _hyper_1_1_chunk.value
Sort Key: (time_bucket_gapfill('@ 5 mins'::interval, _hyper_8_46_chunk."time", NULL::timestamp with time zone, NULL::timestamp with time zone)), _hyper_8_46_chunk.value
-> Result
-> Append
-> Seq Scan on _hyper_1_1_chunk
-> Seq Scan on _hyper_1_2_chunk
-> Seq Scan on _hyper_1_3_chunk
-> Seq Scan on _hyper_1_4_chunk
-> Seq Scan on _hyper_8_46_chunk
-> Seq Scan on _hyper_8_47_chunk
-> Seq Scan on _hyper_8_48_chunk
-> Seq Scan on _hyper_8_49_chunk
(8 rows)
-- test sort optimization with ordering by multiple columns and time_bucket as last member,
@ -308,39 +300,27 @@ ORDER BY 1,2;
:EXPLAIN SELECT time_bucket_gapfill('5m',time),value
FROM gapfill_plan_test
ORDER BY 2,1;
QUERY PLAN
--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
QUERY PLAN
----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
Result
-> Merge Append
Sort Key: _hyper_1_1_chunk.value, (time_bucket_gapfill('@ 5 mins'::interval, _hyper_1_1_chunk."time", NULL::timestamp with time zone, NULL::timestamp with time zone))
-> Index Only Scan using _hyper_1_1_chunk_gapfill_plan_test_value_time_idx on _hyper_1_1_chunk
-> Index Only Scan using _hyper_1_2_chunk_gapfill_plan_test_value_time_idx on _hyper_1_2_chunk
-> Index Only Scan using _hyper_1_3_chunk_gapfill_plan_test_value_time_idx on _hyper_1_3_chunk
-> Index Only Scan using _hyper_1_4_chunk_gapfill_plan_test_value_time_idx on _hyper_1_4_chunk
Sort Key: _hyper_8_46_chunk.value, (time_bucket_gapfill('@ 5 mins'::interval, _hyper_8_46_chunk."time", NULL::timestamp with time zone, NULL::timestamp with time zone))
-> Index Only Scan using _hyper_8_46_chunk_gapfill_plan_test_indx on _hyper_8_46_chunk
-> Index Only Scan using _hyper_8_47_chunk_gapfill_plan_test_indx on _hyper_8_47_chunk
-> Index Only Scan using _hyper_8_48_chunk_gapfill_plan_test_indx on _hyper_8_48_chunk
-> Index Only Scan using _hyper_8_49_chunk_gapfill_plan_test_indx on _hyper_8_49_chunk
(7 rows)
CREATE TABLE metrics_int(time int,device_id int, sensor_id int, value float);
INSERT INTO metrics_int VALUES
(-100,1,1,0.0),
(-100,1,2,-100.0),
(0,1,1,5.0),
(5,1,2,10.0),
(100,1,1,0.0),
(100,1,2,-100.0)
;
CREATE TABLE devices(device_id INT, name TEXT);
INSERT INTO devices VALUES (1,'Device 1'),(2,'Device 2'),(3,'Device 3');
CREATE TABLE sensors(sensor_id INT, name TEXT);
INSERT INTO sensors VALUES (1,'Sensor 1'),(2,'Sensor 2'),(3,'Sensor 3');
-- All test against table metrics_int first
\set METRICS metrics_int
-- All test against table :METRICS first
\set ON_ERROR_STOP 0
-- inverse of previous test query to confirm an error is actually thrown
SELECT
time_bucket_gapfill(5,time,0,11) AS time,
device_id,
sensor_id,
locf(min(value)::int,(SELECT 1/(SELECT 0) FROM metrics_int m2 WHERE m2.device_id=m1.device_id AND m2.sensor_id=m1.sensor_id ORDER BY time DESC LIMIT 1)) AS locf3
FROM metrics_int m1
locf(min(value)::int,(SELECT 1/(SELECT 0) FROM :METRICS m2 WHERE m2.device_id=m1.device_id AND m2.sensor_id=m1.sensor_id ORDER BY time DESC LIMIT 1)) AS locf3
FROM :METRICS m1
WHERE time = 5
GROUP BY 1,2,3 ORDER BY 2,3,1;
ERROR: division by zero
@ -348,27 +328,27 @@ ERROR: division by zero
SELECT
time_bucket_gapfill(1,time,1,2),
first(min(time),min(time)) OVER ()
FROM metrics_int
FROM :METRICS
GROUP BY 1;
ERROR: window functions with multiple column references not supported
-- test with unsupported operator
SELECT
time_bucket_gapfill(1,time)
FROM metrics_int
FROM :METRICS
WHERE time =0 AND time < 2
GROUP BY 1;
ERROR: missing time_bucket_gapfill argument: could not infer start from WHERE clause
-- test with 2 tables and where clause doesnt match gapfill argument
SELECT
time_bucket_gapfill(1,m2.time)
FROM metrics_int m, metrics_int m2
FROM :METRICS m, :METRICS m2
WHERE m.time >=0 AND m.time < 2
GROUP BY 1;
ERROR: missing time_bucket_gapfill argument: could not infer start from WHERE clause
-- test inner join and where clause doesnt match gapfill argument
SELECT
time_bucket_gapfill(1,m2.time)
FROM metrics_int m1 INNER JOIN metrics_int m2 ON m1.time=m2.time
FROM :METRICS m1 INNER JOIN :METRICS m2 ON m1.time=m2.time
WHERE m1.time >=0 AND m1.time < 2
GROUP BY 1;
ERROR: missing time_bucket_gapfill argument: could not infer start from WHERE clause
@ -376,7 +356,7 @@ ERROR: missing time_bucket_gapfill argument: could not infer start from WHERE c
-- not usable as start/stop
SELECT
time_bucket_gapfill(1,m1.time)
FROM metrics_int m1 LEFT OUTER JOIN metrics_int m2 ON m1.time=m2.time AND m1.time >=0 AND m1.time < 2
FROM :METRICS m1 LEFT OUTER JOIN :METRICS m2 ON m1.time=m2.time AND m1.time >=0 AND m1.time < 2
GROUP BY 1;
ERROR: missing time_bucket_gapfill argument: could not infer start from WHERE clause
\set ON_ERROR_STOP 1
@ -393,8 +373,8 @@ SELECT
time_bucket_gapfill(5,time,0,11) AS time,
device_id,
sensor_id,
locf(min(value)::int,(SELECT 1/(SELECT 0) FROM metrics_int m2 WHERE m2.device_id=m1.device_id AND m2.sensor_id=m1.sensor_id ORDER BY time DESC LIMIT 1)) AS locf3
FROM metrics_int m1
locf(min(value)::int,(SELECT 1/(SELECT 0) FROM :METRICS m2 WHERE m2.device_id=m1.device_id AND m2.sensor_id=m1.sensor_id ORDER BY time DESC LIMIT 1)) AS locf3
FROM :METRICS m1
WHERE time >= 0 AND time < 5
GROUP BY 1,2,3 ORDER BY 2,3,1;
time | device_id | sensor_id | locf3
@ -413,8 +393,8 @@ SELECT
locf(min(value)) AS locf,
locf(min(value)::int,23) AS locf1,
locf(min(value)::int,(SELECT 42)) AS locf2,
locf(min(value),(SELECT value FROM metrics_int m2 WHERE time<0 AND m2.device_id=m1.device_id AND m2.sensor_id=m1.sensor_id ORDER BY time DESC LIMIT 1)) AS locf3
FROM metrics_int m1
locf(min(value),(SELECT value FROM :METRICS m2 WHERE time<0 AND m2.device_id=m1.device_id AND m2.sensor_id=m1.sensor_id ORDER BY time DESC LIMIT 1)) AS locf3
FROM :METRICS m1
WHERE time >= 0 AND time < 10
GROUP BY 1,2,3 ORDER BY 2,3,1;
time | device_id | sensor_id | avg | locf | locf1 | locf2 | locf3
@ -436,8 +416,8 @@ SELECT
locf(min(value)) AS locf,
locf(min(value),23::float) AS locf1,
locf(min(value),(SELECT 42::float)) AS locf2,
locf(min(value),(SELECT value FROM metrics_int m2 WHERE time<0 AND m2.device_id=m1.device_id AND m2.sensor_id=m1.sensor_id ORDER BY time DESC LIMIT 1)) AS locf3
FROM metrics_int m1
locf(min(value),(SELECT value FROM :METRICS m2 WHERE time<0 AND m2.device_id=m1.device_id AND m2.sensor_id=m1.sensor_id ORDER BY time DESC LIMIT 1)) AS locf3
FROM :METRICS m1
WHERE time >= 0 AND time < 10
GROUP BY 1,2,3 ORDER BY 1,2,3;
time | device_id | sensor_id | avg | locf | locf1 | locf2 | locf3
@ -455,9 +435,9 @@ SELECT
time_bucket_gapfill(5,time,0,11) AS time,
device_id,
sensor_id,
locf(min(value),(SELECT value FROM metrics_int m2 WHERE time<0 AND m2.device_id=m1.device_id AND m2.sensor_id=m1.sensor_id ORDER BY time DESC LIMIT 1)),
sum(locf(min(value),(SELECT value FROM metrics_int m2 WHERE time<0 AND m2.device_id=m1.device_id AND m2.sensor_id=m1.sensor_id ORDER BY time DESC LIMIT 1))) OVER (PARTITION BY device_id, sensor_id ROWS 1 PRECEDING)
FROM metrics_int m1
locf(min(value),(SELECT value FROM :METRICS m2 WHERE time<0 AND m2.device_id=m1.device_id AND m2.sensor_id=m1.sensor_id ORDER BY time DESC LIMIT 1)),
sum(locf(min(value),(SELECT value FROM :METRICS m2 WHERE time<0 AND m2.device_id=m1.device_id AND m2.sensor_id=m1.sensor_id ORDER BY time DESC LIMIT 1))) OVER (PARTITION BY device_id, sensor_id ROWS 1 PRECEDING)
FROM :METRICS m1
WHERE time >= 0 AND time < 10
GROUP BY 1,2,3;
time | device_id | sensor_id | locf | sum
@ -478,7 +458,7 @@ SELECT
sensor_id,
s.name,
avg(m.value)
FROM metrics_int m
FROM :METRICS m
INNER JOIN devices d USING(device_id)
INNER JOIN sensors s USING(sensor_id)
WHERE time BETWEEN 0 AND 5
@ -509,14 +489,14 @@ SELECT
interpolate(min(value),(SELECT (-10,-10.0::float)),(SELECT (15,20.0::float))) AS ip2,
interpolate(
min(value),
(SELECT (time,value) FROM metrics_int m2
(SELECT (time,value) FROM :METRICS m2
WHERE time<0 AND m2.device_id=m1.device_id AND m2.sensor_id=m1.sensor_id
ORDER BY time DESC LIMIT 1),
(SELECT (time,value) FROM metrics_int m2
(SELECT (time,value) FROM :METRICS m2
WHERE time>10 AND m2.device_id=m1.device_id AND m2.sensor_id=m1.sensor_id
ORDER BY time LIMIT 1)
) AS ip3
FROM metrics_int m1
FROM :METRICS m1
WHERE time >= 0 AND time < 10
GROUP BY 1,2,3 ORDER BY 2,3,1;
time | device_id | sensor_id | avg | ip | ip1 | ip2 | ip3
@ -536,23 +516,23 @@ SELECT
sensor_id,
interpolate(
min(value),
(SELECT (time,value) FROM metrics_int m2
(SELECT (time,value) FROM :METRICS m2
WHERE time<0 AND m2.device_id=m1.device_id AND m2.sensor_id=m1.sensor_id
ORDER BY time DESC LIMIT 1),
(SELECT (time,value) FROM metrics_int m2
(SELECT (time,value) FROM :METRICS m2
WHERE time>10 AND m2.device_id=m1.device_id AND m2.sensor_id=m1.sensor_id
ORDER BY time LIMIT 1)
),
sum(interpolate(
min(value),
(SELECT (time,value) FROM metrics_int m2
(SELECT (time,value) FROM :METRICS m2
WHERE time<0 AND m2.device_id=m1.device_id AND m2.sensor_id=m1.sensor_id
ORDER BY time DESC LIMIT 1),
(SELECT (time,value) FROM metrics_int m2
(SELECT (time,value) FROM :METRICS m2
WHERE time>10 AND m2.device_id=m1.device_id AND m2.sensor_id=m1.sensor_id
ORDER BY time LIMIT 1)
)) OVER (PARTITION BY device_id, sensor_id ROWS 1 PRECEDING)
FROM metrics_int m1
FROM :METRICS m1
WHERE time >= 0 AND time < 10
GROUP BY 1,2,3 ORDER BY 2,3,1;
time | device_id | sensor_id | interpolate | sum
@ -570,8 +550,8 @@ GROUP BY 1,2,3 ORDER BY 2,3,1;
-- might not end up in top-level of jointree
SELECT
time_bucket_gapfill(1,m1.time)
FROM metrics_int m1
WHERE m1.time >=0 AND m1.time < 2 AND device_id IN (SELECT device_id FROM metrics_int)
FROM :METRICS m1
WHERE m1.time >=0 AND m1.time < 2 AND device_id IN (SELECT device_id FROM :METRICS)
GROUP BY 1;
time_bucket_gapfill
---------------------
@ -582,7 +562,7 @@ GROUP BY 1;
-- test inner join with constraints in join condition
SELECT
time_bucket_gapfill(1,m2.time)
FROM metrics_int m1 INNER JOIN metrics_int m2 ON m1.time=m2.time AND m2.time >=0 AND m2.time < 2
FROM :METRICS m1 INNER JOIN :METRICS m2 ON m1.time=m2.time AND m2.time >=0 AND m2.time < 2
GROUP BY 1;
time_bucket_gapfill
---------------------
@ -593,7 +573,7 @@ GROUP BY 1;
-- test actual table
SELECT
time_bucket_gapfill(1,time)
FROM metrics_int
FROM :METRICS
WHERE time >=0 AND time < 2
GROUP BY 1;
time_bucket_gapfill
@ -605,7 +585,7 @@ GROUP BY 1;
-- test with table alias
SELECT
time_bucket_gapfill(1,time)
FROM metrics_int m
FROM :METRICS m
WHERE m.time >=0 AND m.time < 2
GROUP BY 1;
time_bucket_gapfill
@ -617,7 +597,7 @@ GROUP BY 1;
-- test with 2 tables
SELECT
time_bucket_gapfill(1,m.time)
FROM metrics_int m, metrics_int m2
FROM :METRICS m, :METRICS m2
WHERE m.time >=0 AND m.time < 2
GROUP BY 1;
time_bucket_gapfill
@ -632,8 +612,8 @@ SELECT
time_bucket_gapfill(5,time,0,11) AS time,
device_id,
sensor_id,
locf(min(value)::int,(SELECT 1/(SELECT 0) FROM metrics_int m2 WHERE m2.device_id=m1.device_id AND m2.sensor_id=m1.sensor_id ORDER BY time DESC LIMIT 1))
FROM metrics_int m1
locf(min(value)::int,(SELECT 1/(SELECT 0) FROM :METRICS m2 WHERE m2.device_id=m1.device_id AND m2.sensor_id=m1.sensor_id ORDER BY time DESC LIMIT 1))
FROM :METRICS m1
WHERE time >= 0 AND time < 5
GROUP BY 1,2,3;
-- execute 10 times to make sure turning it into generic plan works
@ -726,14 +706,14 @@ SELECT
sensor_id,
interpolate(
min(value),
(SELECT (time,value) FROM metrics_int m2
(SELECT (time,value) FROM :METRICS m2
WHERE time<0 AND m2.device_id=m1.device_id AND m2.sensor_id=m1.sensor_id
ORDER BY time DESC LIMIT 1),
(SELECT (time,value) FROM metrics_int m2
(SELECT (time,value) FROM :METRICS m2
WHERE time>10 AND m2.device_id=m1.device_id AND m2.sensor_id=m1.sensor_id
ORDER BY time LIMIT 1)
)
FROM metrics_int m1
FROM :METRICS m1
WHERE time >= 0 AND time < 10
GROUP BY 1,2,3 ORDER BY 2,3,1;
-- execute 10 times to make sure turning it into generic plan works
@ -855,7 +835,7 @@ SELECT
device_id,
sensor_id,
min(value)
FROM metrics_int m1
FROM :METRICS m1
WHERE time >= $2 AND time < $3 AND device_id=1 AND sensor_id=1
GROUP BY 1,2,3 ORDER BY 2,3,1;
-- execute 10 times to make sure turning it into generic plan works
@ -1566,8 +1546,6 @@ GROUP BY 1,color ORDER BY 2,1;
(0 rows)
-- test insert into SELECT
CREATE TABLE insert_test(id INT);
INSERT INTO insert_test SELECT time_bucket_gapfill(1,time,1,5) FROM (VALUES (1),(2)) v(time) GROUP BY 1 ORDER BY 1;
SELECT * FROM insert_test;
id
----
@ -2302,22 +2280,6 @@ GROUP BY 1 ORDER BY 2 NULLS LAST,1;
(5 rows)
-- test queries on hypertable
CREATE TABLE metrics_tstz(time timestamptz, device_id INT, v1 float, v2 int);
SELECT table_name FROM create_hypertable('metrics_tstz','time');
NOTICE: adding not-null constraint to column "time"
table_name
--------------
metrics_tstz
(1 row)
INSERT INTO metrics_tstz VALUES
(timestamptz '2018-01-01 05:00:00 PST', 1, 0.5, 10),
(timestamptz '2018-01-01 05:00:00 PST', 2, 0.7, 20),
(timestamptz '2018-01-01 05:00:00 PST', 3, 0.9, 30),
(timestamptz '2018-01-01 07:00:00 PST', 1, 0.0, 0),
(timestamptz '2018-01-01 07:00:00 PST', 2, 1.4, 40),
(timestamptz '2018-01-01 07:00:00 PST', 3, 0.9, 30)
;
-- test locf and interpolate together
SELECT
time_bucket_gapfill(interval '1h',time,timestamptz '2018-01-01 05:00:00-8', timestamptz '2018-01-01 07:00:00-8'),

View File

@ -1,7 +1,9 @@
set(TEST_FILES_SHARED
constraint_exclusion_prepared.sql
decompress_placeholdervar.sql
dist_gapfill.sql
dist_insert.sql
gapfill.sql
ordered_append_join.sql
)

View File

@ -0,0 +1,101 @@
-- This file and its contents are licensed under the Timescale License.
-- Please see the included NOTICE for copyright information and
-- LICENSE-TIMESCALE for a copy of the license.
\set ECHO errors
\set TEST_BASE_NAME dist_gapfill
\set TEST_METRICS_NAME gapfill_metrics
SELECT format('include/%s_query.sql', :'TEST_BASE_NAME') AS "TEST_QUERY_NAME",
format('%s/results/%s_singlenode.out', :'TEST_OUTPUT_DIR', :'TEST_BASE_NAME') AS "TEST_SINGLENODE",
format('%s/results/%s_partitionwise_off.out', :'TEST_OUTPUT_DIR', :'TEST_BASE_NAME') AS "TEST_PARTITIONWISE_OFF",
format('%s/results/%s_partitionwise_on.out', :'TEST_OUTPUT_DIR', :'TEST_BASE_NAME') AS "TEST_PARTITIONWISE_ON",
format('include/%s_query.sql', :'TEST_METRICS_NAME') AS "TEST_METRICS_QUERY_NAME",
format('%s/results/%s_nohyper.out', :'TEST_OUTPUT_DIR', :'TEST_METRICS_NAME') AS "TEST_METRICS_NOHYPER",
format('%s/results/%s_partitionwise_off.out', :'TEST_OUTPUT_DIR', :'TEST_METRICS_NAME') AS "TEST_METRICS_PARTITIONWISE_OFF" \gset
SELECT format('\! diff %s %s', :'TEST_SINGLENODE', :'TEST_PARTITIONWISE_OFF') AS "DIFF_CMD_PARTITIONWISE_OFF",
format('\! diff %s %s', :'TEST_SINGLENODE', :'TEST_PARTITIONWISE_ON') AS "DIFF_CMD_PARTITIONWISE_ON",
format('\! diff %s %s', :'TEST_METRICS_NOHYPER', :'TEST_METRICS_PARTITIONWISE_OFF') AS "DIFF_CMD_METRICS_PARTITIONWISE_OFF" \gset
-- Non-distributed hypertables
-- dist_gapfill_query
\set CONDITIONS conditions
\set ECHO all
\ir :TEST_QUERY_NAME
\set ECHO errors
\o :TEST_SINGLENODE
\ir :TEST_QUERY_NAME
\o
-- Run gapfill on a table as in gapfill.sql, where the result is verified
\set METRICS metrics_int
\o :TEST_METRICS_NOHYPER
\ir :TEST_METRICS_QUERY_NAME
\o
-- Distributed hypertables with three data nodes
-- dist_gapfill_query
\set CONDITIONS conditions_dist
SET enable_partitionwise_aggregate = 'off';
\o :TEST_PARTITIONWISE_OFF
\ir :TEST_QUERY_NAME
\o
SET enable_partitionwise_aggregate = 'on';
\o :TEST_PARTITIONWISE_ON
\ir :TEST_QUERY_NAME
\o
SET enable_partitionwise_aggregate = 'off';
-- gapfill_metrics_query
\set METRICS metrics_int_dist
\o :TEST_METRICS_PARTITIONWISE_OFF
\ir :TEST_METRICS_QUERY_NAME
\o
\set ECHO all
:DIFF_CMD_PARTITIONWISE_OFF
:DIFF_CMD_PARTITIONWISE_ON
:DIFF_CMD_METRICS_PARTITIONWISE_OFF
\set ECHO errors
-- Distributed hypertables with one data nodes
-- dist_gapfill_query
\set CONDITIONS conditions_dist1
SET enable_partitionwise_aggregate = 'off';
\o :TEST_PARTITIONWISE_OFF
\ir :TEST_QUERY_NAME
\o
SET enable_partitionwise_aggregate = 'on';
\o :TEST_PARTITIONWISE_ON
\ir :TEST_QUERY_NAME
\o
SET enable_partitionwise_aggregate = 'off';
-- gapfill_metrics_query
\set METRICS metrics_int_dist1
\o :TEST_METRICS_PARTITIONWISE_OFF
\ir :TEST_METRICS_QUERY_NAME
\o
\set ECHO all
:DIFF_CMD_PARTITIONWISE_OFF
:DIFF_CMD_PARTITIONWISE_ON
:DIFF_CMD_METRICS_PARTITIONWISE_OFF

View File

@ -4,11 +4,6 @@
\set EXPLAIN 'EXPLAIN (COSTS OFF)'
CREATE TABLE gapfill_plan_test(time timestamptz NOT NULL, value float);
SELECT table_name FROM create_hypertable('gapfill_plan_test','time',chunk_time_interval=>'4 weeks'::interval);
INSERT INTO gapfill_plan_test SELECT generate_series('2018-01-01'::timestamptz,'2018-04-01'::timestamptz,'1m'::interval), 1.0;
-- simple example
:EXPLAIN
SELECT
@ -124,7 +119,7 @@ ORDER BY 1;
RESET max_parallel_workers_per_gather;
CREATE INDEX ON gapfill_plan_test(value, time);
CREATE INDEX gapfill_plan_test_indx ON gapfill_plan_test(value, time);
-- test sort optimization with ordering by multiple columns and time_bucket_gapfill not last,
-- must not use index scan
@ -138,24 +133,9 @@ ORDER BY 1,2;
FROM gapfill_plan_test
ORDER BY 2,1;
CREATE TABLE metrics_int(time int,device_id int, sensor_id int, value float);
\set METRICS metrics_int
INSERT INTO metrics_int VALUES
(-100,1,1,0.0),
(-100,1,2,-100.0),
(0,1,1,5.0),
(5,1,2,10.0),
(100,1,1,0.0),
(100,1,2,-100.0)
;
CREATE TABLE devices(device_id INT, name TEXT);
INSERT INTO devices VALUES (1,'Device 1'),(2,'Device 2'),(3,'Device 3');
CREATE TABLE sensors(sensor_id INT, name TEXT);
INSERT INTO sensors VALUES (1,'Sensor 1'),(2,'Sensor 2'),(3,'Sensor 3');
-- All test against table metrics_int first
-- All test against table :METRICS first
\set ON_ERROR_STOP 0
-- inverse of previous test query to confirm an error is actually thrown
@ -163,8 +143,8 @@ SELECT
time_bucket_gapfill(5,time,0,11) AS time,
device_id,
sensor_id,
locf(min(value)::int,(SELECT 1/(SELECT 0) FROM metrics_int m2 WHERE m2.device_id=m1.device_id AND m2.sensor_id=m1.sensor_id ORDER BY time DESC LIMIT 1)) AS locf3
FROM metrics_int m1
locf(min(value)::int,(SELECT 1/(SELECT 0) FROM :METRICS m2 WHERE m2.device_id=m1.device_id AND m2.sensor_id=m1.sensor_id ORDER BY time DESC LIMIT 1)) AS locf3
FROM :METRICS m1
WHERE time = 5
GROUP BY 1,2,3 ORDER BY 2,3,1;
@ -172,27 +152,27 @@ GROUP BY 1,2,3 ORDER BY 2,3,1;
SELECT
time_bucket_gapfill(1,time,1,2),
first(min(time),min(time)) OVER ()
FROM metrics_int
FROM :METRICS
GROUP BY 1;
-- test with unsupported operator
SELECT
time_bucket_gapfill(1,time)
FROM metrics_int
FROM :METRICS
WHERE time =0 AND time < 2
GROUP BY 1;
-- test with 2 tables and where clause doesnt match gapfill argument
SELECT
time_bucket_gapfill(1,m2.time)
FROM metrics_int m, metrics_int m2
FROM :METRICS m, :METRICS m2
WHERE m.time >=0 AND m.time < 2
GROUP BY 1;
-- test inner join and where clause doesnt match gapfill argument
SELECT
time_bucket_gapfill(1,m2.time)
FROM metrics_int m1 INNER JOIN metrics_int m2 ON m1.time=m2.time
FROM :METRICS m1 INNER JOIN :METRICS m2 ON m1.time=m2.time
WHERE m1.time >=0 AND m1.time < 2
GROUP BY 1;
@ -200,7 +180,7 @@ GROUP BY 1;
-- not usable as start/stop
SELECT
time_bucket_gapfill(1,m1.time)
FROM metrics_int m1 LEFT OUTER JOIN metrics_int m2 ON m1.time=m2.time AND m1.time >=0 AND m1.time < 2
FROM :METRICS m1 LEFT OUTER JOIN :METRICS m2 ON m1.time=m2.time AND m1.time >=0 AND m1.time < 2
GROUP BY 1;
\set ON_ERROR_STOP 1
@ -618,8 +598,6 @@ WHERE false
GROUP BY 1,color ORDER BY 2,1;
-- test insert into SELECT
CREATE TABLE insert_test(id INT);
INSERT INTO insert_test SELECT time_bucket_gapfill(1,time,1,5) FROM (VALUES (1),(2)) v(time) GROUP BY 1 ORDER BY 1;
SELECT * FROM insert_test;
-- test join
@ -985,16 +963,6 @@ FROM
GROUP BY 1 ORDER BY 2 NULLS LAST,1;
-- test queries on hypertable
CREATE TABLE metrics_tstz(time timestamptz, device_id INT, v1 float, v2 int);
SELECT table_name FROM create_hypertable('metrics_tstz','time');
INSERT INTO metrics_tstz VALUES
(timestamptz '2018-01-01 05:00:00 PST', 1, 0.5, 10),
(timestamptz '2018-01-01 05:00:00 PST', 2, 0.7, 20),
(timestamptz '2018-01-01 05:00:00 PST', 3, 0.9, 30),
(timestamptz '2018-01-01 07:00:00 PST', 1, 0.0, 0),
(timestamptz '2018-01-01 07:00:00 PST', 2, 1.4, 40),
(timestamptz '2018-01-01 07:00:00 PST', 3, 0.9, 30)
;
-- test locf and interpolate together
SELECT

View File

@ -5,25 +5,25 @@
SELECT time_bucket_gapfill('3 hours', time, '2017-01-01 06:00', '2017-01-02 18:00'),
first(value, time),
avg(value)
FROM conditions
FROM :CONDITIONS
GROUP BY 1;
SELECT time_bucket_gapfill('3 hours', time, '2017-01-01 06:00', '2017-01-01 18:00'),
device,
first(value, time),
avg(value)
FROM conditions
FROM :CONDITIONS
GROUP BY 1,2;
SELECT time_bucket_gapfill('3 hours', time, '2017-01-01 06:00', '2017-01-01 18:00'),
device,
first(value, time),
avg(value)
FROM conditions
FROM :CONDITIONS
GROUP BY 2,1;
SELECT
time_bucket_gapfill('3 hours', time, '2017-01-01 06:00', '2017-01-01 18:00'),
lag(min(time)) OVER ()
FROM conditions
FROM :CONDITIONS
GROUP BY 1;

View File

@ -11,8 +11,8 @@ SELECT
time_bucket_gapfill(5,time,0,11) AS time,
device_id,
sensor_id,
locf(min(value)::int,(SELECT 1/(SELECT 0) FROM metrics_int m2 WHERE m2.device_id=m1.device_id AND m2.sensor_id=m1.sensor_id ORDER BY time DESC LIMIT 1)) AS locf3
FROM metrics_int m1
locf(min(value)::int,(SELECT 1/(SELECT 0) FROM :METRICS m2 WHERE m2.device_id=m1.device_id AND m2.sensor_id=m1.sensor_id ORDER BY time DESC LIMIT 1)) AS locf3
FROM :METRICS m1
WHERE time >= 0 AND time < 5
GROUP BY 1,2,3 ORDER BY 2,3,1;
@ -25,8 +25,8 @@ SELECT
locf(min(value)) AS locf,
locf(min(value)::int,23) AS locf1,
locf(min(value)::int,(SELECT 42)) AS locf2,
locf(min(value),(SELECT value FROM metrics_int m2 WHERE time<0 AND m2.device_id=m1.device_id AND m2.sensor_id=m1.sensor_id ORDER BY time DESC LIMIT 1)) AS locf3
FROM metrics_int m1
locf(min(value),(SELECT value FROM :METRICS m2 WHERE time<0 AND m2.device_id=m1.device_id AND m2.sensor_id=m1.sensor_id ORDER BY time DESC LIMIT 1)) AS locf3
FROM :METRICS m1
WHERE time >= 0 AND time < 10
GROUP BY 1,2,3 ORDER BY 2,3,1;
@ -39,8 +39,8 @@ SELECT
locf(min(value)) AS locf,
locf(min(value),23::float) AS locf1,
locf(min(value),(SELECT 42::float)) AS locf2,
locf(min(value),(SELECT value FROM metrics_int m2 WHERE time<0 AND m2.device_id=m1.device_id AND m2.sensor_id=m1.sensor_id ORDER BY time DESC LIMIT 1)) AS locf3
FROM metrics_int m1
locf(min(value),(SELECT value FROM :METRICS m2 WHERE time<0 AND m2.device_id=m1.device_id AND m2.sensor_id=m1.sensor_id ORDER BY time DESC LIMIT 1)) AS locf3
FROM :METRICS m1
WHERE time >= 0 AND time < 10
GROUP BY 1,2,3 ORDER BY 1,2,3;
@ -49,9 +49,9 @@ SELECT
time_bucket_gapfill(5,time,0,11) AS time,
device_id,
sensor_id,
locf(min(value),(SELECT value FROM metrics_int m2 WHERE time<0 AND m2.device_id=m1.device_id AND m2.sensor_id=m1.sensor_id ORDER BY time DESC LIMIT 1)),
sum(locf(min(value),(SELECT value FROM metrics_int m2 WHERE time<0 AND m2.device_id=m1.device_id AND m2.sensor_id=m1.sensor_id ORDER BY time DESC LIMIT 1))) OVER (PARTITION BY device_id, sensor_id ROWS 1 PRECEDING)
FROM metrics_int m1
locf(min(value),(SELECT value FROM :METRICS m2 WHERE time<0 AND m2.device_id=m1.device_id AND m2.sensor_id=m1.sensor_id ORDER BY time DESC LIMIT 1)),
sum(locf(min(value),(SELECT value FROM :METRICS m2 WHERE time<0 AND m2.device_id=m1.device_id AND m2.sensor_id=m1.sensor_id ORDER BY time DESC LIMIT 1))) OVER (PARTITION BY device_id, sensor_id ROWS 1 PRECEDING)
FROM :METRICS m1
WHERE time >= 0 AND time < 10
GROUP BY 1,2,3;
@ -63,7 +63,7 @@ SELECT
sensor_id,
s.name,
avg(m.value)
FROM metrics_int m
FROM :METRICS m
INNER JOIN devices d USING(device_id)
INNER JOIN sensors s USING(sensor_id)
WHERE time BETWEEN 0 AND 5
@ -80,14 +80,14 @@ SELECT
interpolate(min(value),(SELECT (-10,-10.0::float)),(SELECT (15,20.0::float))) AS ip2,
interpolate(
min(value),
(SELECT (time,value) FROM metrics_int m2
(SELECT (time,value) FROM :METRICS m2
WHERE time<0 AND m2.device_id=m1.device_id AND m2.sensor_id=m1.sensor_id
ORDER BY time DESC LIMIT 1),
(SELECT (time,value) FROM metrics_int m2
(SELECT (time,value) FROM :METRICS m2
WHERE time>10 AND m2.device_id=m1.device_id AND m2.sensor_id=m1.sensor_id
ORDER BY time LIMIT 1)
) AS ip3
FROM metrics_int m1
FROM :METRICS m1
WHERE time >= 0 AND time < 10
GROUP BY 1,2,3 ORDER BY 2,3,1;
@ -98,23 +98,23 @@ SELECT
sensor_id,
interpolate(
min(value),
(SELECT (time,value) FROM metrics_int m2
(SELECT (time,value) FROM :METRICS m2
WHERE time<0 AND m2.device_id=m1.device_id AND m2.sensor_id=m1.sensor_id
ORDER BY time DESC LIMIT 1),
(SELECT (time,value) FROM metrics_int m2
(SELECT (time,value) FROM :METRICS m2
WHERE time>10 AND m2.device_id=m1.device_id AND m2.sensor_id=m1.sensor_id
ORDER BY time LIMIT 1)
),
sum(interpolate(
min(value),
(SELECT (time,value) FROM metrics_int m2
(SELECT (time,value) FROM :METRICS m2
WHERE time<0 AND m2.device_id=m1.device_id AND m2.sensor_id=m1.sensor_id
ORDER BY time DESC LIMIT 1),
(SELECT (time,value) FROM metrics_int m2
(SELECT (time,value) FROM :METRICS m2
WHERE time>10 AND m2.device_id=m1.device_id AND m2.sensor_id=m1.sensor_id
ORDER BY time LIMIT 1)
)) OVER (PARTITION BY device_id, sensor_id ROWS 1 PRECEDING)
FROM metrics_int m1
FROM :METRICS m1
WHERE time >= 0 AND time < 10
GROUP BY 1,2,3 ORDER BY 2,3,1;
@ -123,34 +123,34 @@ GROUP BY 1,2,3 ORDER BY 2,3,1;
-- might not end up in top-level of jointree
SELECT
time_bucket_gapfill(1,m1.time)
FROM metrics_int m1
WHERE m1.time >=0 AND m1.time < 2 AND device_id IN (SELECT device_id FROM metrics_int)
FROM :METRICS m1
WHERE m1.time >=0 AND m1.time < 2 AND device_id IN (SELECT device_id FROM :METRICS)
GROUP BY 1;
-- test inner join with constraints in join condition
SELECT
time_bucket_gapfill(1,m2.time)
FROM metrics_int m1 INNER JOIN metrics_int m2 ON m1.time=m2.time AND m2.time >=0 AND m2.time < 2
FROM :METRICS m1 INNER JOIN :METRICS m2 ON m1.time=m2.time AND m2.time >=0 AND m2.time < 2
GROUP BY 1;
-- test actual table
SELECT
time_bucket_gapfill(1,time)
FROM metrics_int
FROM :METRICS
WHERE time >=0 AND time < 2
GROUP BY 1;
-- test with table alias
SELECT
time_bucket_gapfill(1,time)
FROM metrics_int m
FROM :METRICS m
WHERE m.time >=0 AND m.time < 2
GROUP BY 1;
-- test with 2 tables
SELECT
time_bucket_gapfill(1,m.time)
FROM metrics_int m, metrics_int m2
FROM :METRICS m, :METRICS m2
WHERE m.time >=0 AND m.time < 2
GROUP BY 1;
@ -160,8 +160,8 @@ SELECT
time_bucket_gapfill(5,time,0,11) AS time,
device_id,
sensor_id,
locf(min(value)::int,(SELECT 1/(SELECT 0) FROM metrics_int m2 WHERE m2.device_id=m1.device_id AND m2.sensor_id=m1.sensor_id ORDER BY time DESC LIMIT 1))
FROM metrics_int m1
locf(min(value)::int,(SELECT 1/(SELECT 0) FROM :METRICS m2 WHERE m2.device_id=m1.device_id AND m2.sensor_id=m1.sensor_id ORDER BY time DESC LIMIT 1))
FROM :METRICS m1
WHERE time >= 0 AND time < 5
GROUP BY 1,2,3;
@ -187,14 +187,14 @@ SELECT
sensor_id,
interpolate(
min(value),
(SELECT (time,value) FROM metrics_int m2
(SELECT (time,value) FROM :METRICS m2
WHERE time<0 AND m2.device_id=m1.device_id AND m2.sensor_id=m1.sensor_id
ORDER BY time DESC LIMIT 1),
(SELECT (time,value) FROM metrics_int m2
(SELECT (time,value) FROM :METRICS m2
WHERE time>10 AND m2.device_id=m1.device_id AND m2.sensor_id=m1.sensor_id
ORDER BY time LIMIT 1)
)
FROM metrics_int m1
FROM :METRICS m1
WHERE time >= 0 AND time < 10
GROUP BY 1,2,3 ORDER BY 2,3,1;
@ -220,7 +220,7 @@ SELECT
device_id,
sensor_id,
min(value)
FROM metrics_int m1
FROM :METRICS m1
WHERE time >= $2 AND time < $3 AND device_id=1 AND sensor_id=1
GROUP BY 1,2,3 ORDER BY 2,3,1;

View File

@ -95,3 +95,119 @@ ALTER TABLE metrics_dist DROP COLUMN filler_3;
INSERT INTO metrics_dist(time,device_id,v0,v1,v2,v3) SELECT time, device_id, device_id+1, device_id + 2, device_id + 0.5, NULL FROM generate_series('2000-01-13 0:00:00+0'::timestamptz,'2000-01-19 23:55:00+0','2m') gtime(time), generate_series(1,5,1) gdevice(device_id);
ANALYZE metrics_dist;
-- Tables for gapfill and distributed gapfill tests
CREATE TABLE gapfill_plan_test(time timestamptz NOT NULL, value float);
SELECT create_hypertable('gapfill_plan_test','time',chunk_time_interval=>'4 weeks'::interval);
INSERT INTO gapfill_plan_test SELECT generate_series('2018-01-01'::timestamptz,'2018-04-01'::timestamptz,'1m'::interval), 1.0;
CREATE TABLE metrics_int(
time int NOT NULL,
device_id int,
sensor_id int,
value float);
INSERT INTO metrics_int VALUES
(-100,1,1,0.0),
(-100,1,2,-100.0),
(0,1,1,5.0),
(5,1,2,10.0),
(100,1,1,0.0),
(100,1,2,-100.0);
CREATE TABLE devices(device_id INT, name TEXT);
INSERT INTO devices VALUES (1,'Device 1'),(2,'Device 2'),(3,'Device 3');
CREATE TABLE sensors(sensor_id INT, name TEXT);
INSERT INTO sensors VALUES (1,'Sensor 1'),(2,'Sensor 2'),(3,'Sensor 3');
CREATE TABLE insert_test(id INT);
INSERT INTO insert_test SELECT time_bucket_gapfill(1,time,1,5) FROM (VALUES (1),(2)) v(time) GROUP BY 1 ORDER BY 1;
CREATE TABLE metrics_tstz(time timestamptz, device_id INT, v1 float, v2 int);
SELECT create_hypertable('metrics_tstz','time');
INSERT INTO metrics_tstz VALUES
(timestamptz '2018-01-01 05:00:00 PST', 1, 0.5, 10),
(timestamptz '2018-01-01 05:00:00 PST', 2, 0.7, 20),
(timestamptz '2018-01-01 05:00:00 PST', 3, 0.9, 30),
(timestamptz '2018-01-01 07:00:00 PST', 1, 0.0, 0),
(timestamptz '2018-01-01 07:00:00 PST', 2, 1.4, 40),
(timestamptz '2018-01-01 07:00:00 PST', 3, 0.9, 30)
;
CREATE TABLE conditions(
time timestamptz NOT NULL,
device int,
value float
);
SELECT * FROM create_hypertable('conditions', 'time');
INSERT INTO conditions VALUES
('2017-01-01 06:01', 1, 1.2),
('2017-01-01 09:11', 3, 4.3),
('2017-01-01 08:01', 1, 7.3),
('2017-01-02 08:01', 2, 0.23),
('2018-07-02 08:01', 87, 0.0),
('2018-07-01 06:01', 13, 3.1),
('2018-07-01 09:11', 90, 10303.12),
('2018-07-01 08:01', 29, 64);
CREATE TABLE conditions_dist(
time timestamptz NOT NULL,
device int,
value float
);
SELECT * FROM create_distributed_hypertable('conditions_dist', 'time', 'device', 3);
INSERT INTO conditions_dist VALUES
('2017-01-01 06:01', 1, 1.2),
('2017-01-01 09:11', 3, 4.3),
('2017-01-01 08:01', 1, 7.3),
('2017-01-02 08:01', 2, 0.23),
('2018-07-02 08:01', 87, 0.0),
('2018-07-01 06:01', 13, 3.1),
('2018-07-01 09:11', 90, 10303.12),
('2018-07-01 08:01', 29, 64);
CREATE TABLE metrics_int_dist(
time int NOT NULL,
device_id int,
sensor_id int,
value float);
SELECT create_distributed_hypertable('metrics_int_dist','time','device_id',chunk_time_interval => 50);
INSERT INTO metrics_int_dist VALUES
(-100,1,1,0.0),
(-100,1,2,-100.0),
(0,1,1,5.0),
(5,1,2,10.0),
(100,1,1,0.0),
(100,1,2,-100.0);
CREATE TABLE conditions_dist1(
time timestamptz NOT NULL,
device int,
value float
);
SELECT * FROM create_distributed_hypertable('conditions_dist1', 'time', 'device', 1,
data_nodes => '{"data_node_1"}');
INSERT INTO conditions_dist1 VALUES
('2017-01-01 06:01', 1, 1.2),
('2017-01-01 09:11', 3, 4.3),
('2017-01-01 08:01', 1, 7.3),
('2017-01-02 08:01', 2, 0.23),
('2018-07-02 08:01', 87, 0.0),
('2018-07-01 06:01', 13, 3.1),
('2018-07-01 09:11', 90, 10303.12),
('2018-07-01 08:01', 29, 64);
CREATE TABLE metrics_int_dist1(
time int NOT NULL,
device_id int,
sensor_id int,
value float);
SELECT create_distributed_hypertable('metrics_int_dist1', 'time', 'device_id', chunk_time_interval => 50,
data_nodes => '{"data_node_1"}');
INSERT INTO metrics_int_dist1 VALUES
(-100,1,1,0.0),
(-100,1,2,-100.0),
(0,1,1,5.0),
(5,1,2,10.0),
(100,1,1,0.0),
(100,1,2,-100.0);

View File

@ -10,8 +10,6 @@ set(TEST_FILES
continuous_aggs_refresh.sql
continuous_aggs_watermark.sql
dist_views.sql
dist_gapfill.sql
gapfill.sql
partialize_finalize.sql
)

View File

@ -1,243 +0,0 @@
-- This file and its contents are licensed under the Timescale License.
-- Please see the included NOTICE for copyright information and
-- LICENSE-TIMESCALE for a copy of the license.
\set ECHO errors
\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER;
SET ROLE :ROLE_1;
\set TEST_BASE_NAME dist_gapfill
\set TEST_METRICS_NAME gapfill_metrics
\set DATA_NODE_1 :TEST_BASE_NAME _1
\set DATA_NODE_2 :TEST_BASE_NAME _2
\set DATA_NODE_3 :TEST_BASE_NAME _3
SELECT format('include/%s_query.sql', :'TEST_BASE_NAME') AS "TEST_QUERY_NAME",
format('%s/results/%s_singlenode.out', :'TEST_OUTPUT_DIR', :'TEST_BASE_NAME') AS "TEST_SINGLENODE",
format('%s/results/%s_partitionwise_off.out', :'TEST_OUTPUT_DIR', :'TEST_BASE_NAME') AS "TEST_PARTITIONWISE_OFF",
format('%s/results/%s_partitionwise_on.out', :'TEST_OUTPUT_DIR', :'TEST_BASE_NAME') AS "TEST_PARTITIONWISE_ON",
format('include/%s_query.sql', :'TEST_METRICS_NAME') AS "TEST_METRICS_QUERY_NAME",
format('%s/results/%s_nohyper.out', :'TEST_OUTPUT_DIR', :'TEST_METRICS_NAME') AS "TEST_METRICS_NOHYPER",
format('%s/results/%s_partitionwise_off.out', :'TEST_OUTPUT_DIR', :'TEST_METRICS_NAME') AS "TEST_METRICS_PARTITIONWISE_OFF" \gset
SELECT format('\! diff %s %s', :'TEST_SINGLENODE', :'TEST_PARTITIONWISE_OFF') AS "DIFF_CMD_PARTITIONWISE_OFF",
format('\! diff %s %s', :'TEST_SINGLENODE', :'TEST_PARTITIONWISE_ON') AS "DIFF_CMD_PARTITIONWISE_ON",
format('\! diff %s %s', :'TEST_METRICS_NOHYPER', :'TEST_METRICS_PARTITIONWISE_OFF') AS "DIFF_CMD_METRICS_PARTITIONWISE_OFF" \gset
SET client_min_messages TO ERROR;
DROP TABLE IF EXISTS metrics_int;
DROP TABLE IF EXISTS conditions;
DROP TABLE IF EXISTS devices;
DROP TABLE IF EXISTS sensors;
SET client_min_messages TO NOTICE;
-- Non-distributed hypertables
-- dist_gapfill_query
CREATE TABLE conditions(
time timestamptz NOT NULL,
device int,
value float
);
SELECT * FROM create_hypertable('conditions', 'time');
INSERT INTO conditions VALUES
('2017-01-01 06:01', 1, 1.2),
('2017-01-01 09:11', 3, 4.3),
('2017-01-01 08:01', 1, 7.3),
('2017-01-02 08:01', 2, 0.23),
('2018-07-02 08:01', 87, 0.0),
('2018-07-01 06:01', 13, 3.1),
('2018-07-01 09:11', 90, 10303.12),
('2018-07-01 08:01', 29, 64);
\set ECHO all
\ir :TEST_QUERY_NAME
\set ECHO errors
\o :TEST_SINGLENODE
\ir :TEST_QUERY_NAME
\o
DROP TABLE conditions CASCADE;
-- Run gapfill on a table as in gapfill.sql, where the result is verified
CREATE TABLE metrics_int(
time int NOT NULL,
device_id int,
sensor_id int,
value float);
INSERT INTO metrics_int VALUES
(-100,1,1,0.0),
(-100,1,2,-100.0),
(0,1,1,5.0),
(5,1,2,10.0),
(100,1,1,0.0),
(100,1,2,-100.0);
CREATE TABLE devices(device_id INT, name TEXT);
INSERT INTO devices VALUES (1,'Device 1'),(2,'Device 2'),(3,'Device 3');
CREATE TABLE sensors(sensor_id INT, name TEXT);
INSERT INTO sensors VALUES (1,'Sensor 1'),(2,'Sensor 2'),(3,'Sensor 3');
\o :TEST_METRICS_NOHYPER
\ir :TEST_METRICS_QUERY_NAME
\o
DROP TABLE metrics_int CASCADE;
-- Distributed hypertables with three data nodes
-- dist_gapfill_query
SET ROLE :ROLE_CLUSTER_SUPERUSER;
SET client_min_messages TO ERROR;
DROP DATABASE IF EXISTS :DATA_NODE_1;
DROP DATABASE IF EXISTS :DATA_NODE_2;
DROP DATABASE IF EXISTS :DATA_NODE_3;
SELECT * FROM add_data_node(:'DATA_NODE_1', host => 'localhost',
database => :'DATA_NODE_1');
SELECT * FROM add_data_node(:'DATA_NODE_2', host => 'localhost',
database => :'DATA_NODE_2');
SELECT * FROM add_data_node(:'DATA_NODE_3', host => 'localhost',
database => :'DATA_NODE_3');
GRANT USAGE ON FOREIGN SERVER :DATA_NODE_1, :DATA_NODE_2, :DATA_NODE_3 TO PUBLIC;
SET ROLE :ROLE_1;
SET client_min_messages TO NOTICE;
CREATE TABLE conditions(
time timestamptz NOT NULL,
device int,
value float
);
SELECT * FROM create_distributed_hypertable('conditions', 'time', 'device', 3);
INSERT INTO conditions VALUES
('2017-01-01 06:01', 1, 1.2),
('2017-01-01 09:11', 3, 4.3),
('2017-01-01 08:01', 1, 7.3),
('2017-01-02 08:01', 2, 0.23),
('2018-07-02 08:01', 87, 0.0),
('2018-07-01 06:01', 13, 3.1),
('2018-07-01 09:11', 90, 10303.12),
('2018-07-01 08:01', 29, 64);
SET enable_partitionwise_aggregate = 'off';
\o :TEST_PARTITIONWISE_OFF
\ir :TEST_QUERY_NAME
\o
SET enable_partitionwise_aggregate = 'on';
\o :TEST_PARTITIONWISE_ON
\ir :TEST_QUERY_NAME
\o
SET enable_partitionwise_aggregate = 'off';
-- gapfill_metrics_query
CREATE TABLE metrics_int(
time int NOT NULL,
device_id int,
sensor_id int,
value float);
SELECT create_distributed_hypertable('metrics_int','time','device_id',chunk_time_interval => 50);
INSERT INTO metrics_int VALUES
(-100,1,1,0.0),
(-100,1,2,-100.0),
(0,1,1,5.0),
(5,1,2,10.0),
(100,1,1,0.0),
(100,1,2,-100.0);
\o :TEST_METRICS_PARTITIONWISE_OFF
\ir :TEST_METRICS_QUERY_NAME
\o
SET client_min_messages TO ERROR;
DROP TABLE conditions CASCADE;
DROP TABLE metrics_int CASCADE;
SET client_min_messages TO NOTICE;
\set ECHO all
:DIFF_CMD_PARTITIONWISE_OFF
:DIFF_CMD_PARTITIONWISE_ON
:DIFF_CMD_METRICS_PARTITIONWISE_OFF
-- Distributed hypertables with one data nodes
\set ECHO errors
-- dist_gapfill_query
CREATE TABLE conditions(
time timestamptz NOT NULL,
device int,
value float
);
SELECT * FROM create_distributed_hypertable('conditions', 'time', 'device', 1,
data_nodes => ARRAY[:'DATA_NODE_1']);
INSERT INTO conditions VALUES
('2017-01-01 06:01', 1, 1.2),
('2017-01-01 09:11', 3, 4.3),
('2017-01-01 08:01', 1, 7.3),
('2017-01-02 08:01', 2, 0.23),
('2018-07-02 08:01', 87, 0.0),
('2018-07-01 06:01', 13, 3.1),
('2018-07-01 09:11', 90, 10303.12),
('2018-07-01 08:01', 29, 64);
SET enable_partitionwise_aggregate = 'off';
\o :TEST_PARTITIONWISE_OFF
\ir :TEST_QUERY_NAME
\o
SET enable_partitionwise_aggregate = 'on';
\o :TEST_PARTITIONWISE_ON
\ir :TEST_QUERY_NAME
\o
SET enable_partitionwise_aggregate = 'off';
-- gapfill_metrics_query
CREATE TABLE metrics_int(
time int NOT NULL,
device_id int,
sensor_id int,
value float);
SELECT create_distributed_hypertable('metrics_int', 'time', 'device_id', chunk_time_interval => 50,
data_nodes => ARRAY[:'DATA_NODE_1']);
INSERT INTO metrics_int VALUES
(-100,1,1,0.0),
(-100,1,2,-100.0),
(0,1,1,5.0),
(5,1,2,10.0),
(100,1,1,0.0),
(100,1,2,-100.0);
\o :TEST_METRICS_PARTITIONWISE_OFF
\ir :TEST_METRICS_QUERY_NAME
\o
SET client_min_messages TO ERROR;
DROP TABLE conditions CASCADE;
DROP TABLE metrics_int CASCADE;
DROP TABLE devices;
DROP TABLE sensors;
SET ROLE :ROLE_CLUSTER_SUPERUSER;
SELECT delete_data_node(:'DATA_NODE_1');
SELECT delete_data_node(:'DATA_NODE_2');
SELECT delete_data_node(:'DATA_NODE_3');
DROP DATABASE :DATA_NODE_1;
DROP DATABASE :DATA_NODE_2;
DROP DATABASE :DATA_NODE_3;
SET ROLE :ROLE_1;
SET client_min_messages TO NOTICE;
\set ECHO all
:DIFF_CMD_PARTITIONWISE_OFF
:DIFF_CMD_PARTITIONWISE_ON
:DIFF_CMD_METRICS_PARTITIONWISE_OFF