Run regresscheck on PG13

This commit is contained in:
Sven Klemm 2021-01-18 09:02:02 +01:00 committed by Sven Klemm
parent b2849d8f34
commit 04d614e32e
15 changed files with 10139 additions and 0 deletions

View File

@ -105,6 +105,7 @@ jobs:
set -o pipefail
if [[ "${{ matrix.pg }}" == "13.1" ]]; then
make -k -C build isolationcheck isolationcheck-t ${{ matrix.installcheck_args }} | tee installcheck.log
make -k -C build regresscheck | tee -a installcheck.log
else
make -k -C build installcheck ${{ matrix.installcheck_args }} | tee installcheck.log
fi

File diff suppressed because it is too large Load Diff

2252
test/expected/append-13.out Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,766 @@
-- This file and its contents are licensed under the Apache License 2.0.
-- Please see the included NOTICE for copyright information and
-- LICENSE-APACHE for a copy of the license.
-- Valid chunk sizing function for testing
CREATE OR REPLACE FUNCTION calculate_chunk_interval(
dimension_id INTEGER,
dimension_coord BIGINT,
chunk_target_size BIGINT
)
RETURNS BIGINT LANGUAGE PLPGSQL AS
$BODY$
DECLARE
BEGIN
RETURN -1;
END
$BODY$;
-- Chunk sizing function with bad signature
CREATE OR REPLACE FUNCTION bad_calculate_chunk_interval(
dimension_id INTEGER
)
RETURNS BIGINT LANGUAGE PLPGSQL AS
$BODY$
DECLARE
BEGIN
RETURN -1;
END
$BODY$;
-- Set a fixed memory cache size to make tests determinstic
-- (independent of available machine memory)
SELECT * FROM test.set_memory_cache_size('2GB');
set_memory_cache_size
-----------------------
2147483648
(1 row)
-- test NULL handling
\set ON_ERROR_STOP 0
SELECT * FROM set_adaptive_chunking(NULL,NULL);
ERROR: invalid hypertable: cannot be NULL
\set ON_ERROR_STOP 1
CREATE TABLE test_adaptive(time timestamptz, temp float, location int);
\set ON_ERROR_STOP 0
-- Bad signature of sizing func should fail
SELECT create_hypertable('test_adaptive', 'time',
chunk_target_size => '1MB',
chunk_sizing_func => 'bad_calculate_chunk_interval');
ERROR: invalid function signature
\set ON_ERROR_STOP 1
-- Setting sizing func with correct signature should work
SELECT create_hypertable('test_adaptive', 'time',
chunk_target_size => '1MB',
chunk_sizing_func => 'calculate_chunk_interval');
WARNING: target chunk size for adaptive chunking is less than 10 MB
NOTICE: adaptive chunking is a BETA feature and is not recommended for production deployments
NOTICE: adding not-null constraint to column "time"
create_hypertable
----------------------------
(1,public,test_adaptive,t)
(1 row)
DROP TABLE test_adaptive;
CREATE TABLE test_adaptive(time timestamptz, temp float, location int);
-- Size but no explicit func should use default func
SELECT create_hypertable('test_adaptive', 'time',
chunk_target_size => '1MB',
create_default_indexes => true);
WARNING: target chunk size for adaptive chunking is less than 10 MB
NOTICE: adaptive chunking is a BETA feature and is not recommended for production deployments
NOTICE: adding not-null constraint to column "time"
create_hypertable
----------------------------
(2,public,test_adaptive,t)
(1 row)
SELECT table_name, chunk_sizing_func_schema, chunk_sizing_func_name, chunk_target_size
FROM _timescaledb_catalog.hypertable;
table_name | chunk_sizing_func_schema | chunk_sizing_func_name | chunk_target_size
---------------+--------------------------+--------------------------+-------------------
test_adaptive | _timescaledb_internal | calculate_chunk_interval | 1048576
(1 row)
-- Check that adaptive chunking sets a 1 day default chunk time
-- interval => 86400000000 microseconds
SELECT * FROM _timescaledb_catalog.dimension;
id | hypertable_id | column_name | column_type | aligned | num_slices | partitioning_func_schema | partitioning_func | interval_length | integer_now_func_schema | integer_now_func
----+---------------+-------------+--------------------------+---------+------------+--------------------------+-------------------+-----------------+-------------------------+------------------
2 | 2 | time | timestamp with time zone | t | | | | 86400000000 | |
(1 row)
-- Change the target size
SELECT * FROM set_adaptive_chunking('test_adaptive', '2MB');
WARNING: target chunk size for adaptive chunking is less than 10 MB
chunk_sizing_func | chunk_target_size
------------------------------------------------+-------------------
_timescaledb_internal.calculate_chunk_interval | 2097152
(1 row)
SELECT table_name, chunk_sizing_func_schema, chunk_sizing_func_name, chunk_target_size
FROM _timescaledb_catalog.hypertable;
table_name | chunk_sizing_func_schema | chunk_sizing_func_name | chunk_target_size
---------------+--------------------------+--------------------------+-------------------
test_adaptive | _timescaledb_internal | calculate_chunk_interval | 2097152
(1 row)
\set ON_ERROR_STOP 0
-- Setting NULL func should fail
SELECT * FROM set_adaptive_chunking('test_adaptive', '1MB', NULL);
ERROR: invalid chunk sizing function
\set ON_ERROR_STOP 1
-- Setting NULL size disables adaptive chunking
SELECT * FROM set_adaptive_chunking('test_adaptive', NULL);
chunk_sizing_func | chunk_target_size
------------------------------------------------+-------------------
_timescaledb_internal.calculate_chunk_interval | 0
(1 row)
SELECT table_name, chunk_sizing_func_schema, chunk_sizing_func_name, chunk_target_size
FROM _timescaledb_catalog.hypertable;
table_name | chunk_sizing_func_schema | chunk_sizing_func_name | chunk_target_size
---------------+--------------------------+--------------------------+-------------------
test_adaptive | _timescaledb_internal | calculate_chunk_interval | 0
(1 row)
SELECT * FROM set_adaptive_chunking('test_adaptive', '1MB');
WARNING: target chunk size for adaptive chunking is less than 10 MB
chunk_sizing_func | chunk_target_size
------------------------------------------------+-------------------
_timescaledb_internal.calculate_chunk_interval | 1048576
(1 row)
-- Setting size to 'off' should also disable
SELECT * FROM set_adaptive_chunking('test_adaptive', 'off');
chunk_sizing_func | chunk_target_size
------------------------------------------------+-------------------
_timescaledb_internal.calculate_chunk_interval | 0
(1 row)
SELECT table_name, chunk_sizing_func_schema, chunk_sizing_func_name, chunk_target_size
FROM _timescaledb_catalog.hypertable;
table_name | chunk_sizing_func_schema | chunk_sizing_func_name | chunk_target_size
---------------+--------------------------+--------------------------+-------------------
test_adaptive | _timescaledb_internal | calculate_chunk_interval | 0
(1 row)
-- Setting 0 size should also disable
SELECT * FROM set_adaptive_chunking('test_adaptive', '0MB');
chunk_sizing_func | chunk_target_size
------------------------------------------------+-------------------
_timescaledb_internal.calculate_chunk_interval | 0
(1 row)
SELECT table_name, chunk_sizing_func_schema, chunk_sizing_func_name, chunk_target_size
FROM _timescaledb_catalog.hypertable;
table_name | chunk_sizing_func_schema | chunk_sizing_func_name | chunk_target_size
---------------+--------------------------+--------------------------+-------------------
test_adaptive | _timescaledb_internal | calculate_chunk_interval | 0
(1 row)
SELECT * FROM set_adaptive_chunking('test_adaptive', '1MB');
WARNING: target chunk size for adaptive chunking is less than 10 MB
chunk_sizing_func | chunk_target_size
------------------------------------------------+-------------------
_timescaledb_internal.calculate_chunk_interval | 1048576
(1 row)
-- No warning about small target size if > 10MB
SELECT * FROM set_adaptive_chunking('test_adaptive', '11MB');
chunk_sizing_func | chunk_target_size
------------------------------------------------+-------------------
_timescaledb_internal.calculate_chunk_interval | 11534336
(1 row)
-- Setting size to 'estimate' should also estimate size
SELECT * FROM set_adaptive_chunking('test_adaptive', 'estimate');
chunk_sizing_func | chunk_target_size
------------------------------------------------+-------------------
_timescaledb_internal.calculate_chunk_interval | 1932735283
(1 row)
SELECT table_name, chunk_sizing_func_schema, chunk_sizing_func_name, chunk_target_size
FROM _timescaledb_catalog.hypertable;
table_name | chunk_sizing_func_schema | chunk_sizing_func_name | chunk_target_size
---------------+--------------------------+--------------------------+-------------------
test_adaptive | _timescaledb_internal | calculate_chunk_interval | 1932735283
(1 row)
-- Use a lower memory setting to test that the calculated chunk_target_size is reduced
SELECT * FROM test.set_memory_cache_size('512MB');
set_memory_cache_size
-----------------------
536870912
(1 row)
SELECT * FROM set_adaptive_chunking('test_adaptive', 'estimate');
chunk_sizing_func | chunk_target_size
------------------------------------------------+-------------------
_timescaledb_internal.calculate_chunk_interval | 483183820
(1 row)
SELECT table_name, chunk_sizing_func_schema, chunk_sizing_func_name, chunk_target_size
FROM _timescaledb_catalog.hypertable;
table_name | chunk_sizing_func_schema | chunk_sizing_func_name | chunk_target_size
---------------+--------------------------+--------------------------+-------------------
test_adaptive | _timescaledb_internal | calculate_chunk_interval | 483183820
(1 row)
-- Reset memory settings
SELECT * FROM test.set_memory_cache_size('2GB');
set_memory_cache_size
-----------------------
2147483648
(1 row)
-- Set a reasonable test value
SELECT * FROM set_adaptive_chunking('test_adaptive', '1MB');
WARNING: target chunk size for adaptive chunking is less than 10 MB
chunk_sizing_func | chunk_target_size
------------------------------------------------+-------------------
_timescaledb_internal.calculate_chunk_interval | 1048576
(1 row)
-- Show the interval length before and after adaptation
SELECT id, hypertable_id, interval_length FROM _timescaledb_catalog.dimension;
id | hypertable_id | interval_length
----+---------------+-----------------
2 | 2 | 86400000000
(1 row)
-- Generate data to create chunks. We use the hash of the time value
-- to get determinstic location IDs so that we always spread these
-- values the same way across space partitions
INSERT INTO test_adaptive
SELECT time, random() * 35, _timescaledb_internal.get_partition_hash(time) FROM
generate_series('2017-03-07T18:18:03+00'::timestamptz - interval '175 days',
'2017-03-07T18:18:03+00'::timestamptz,
'2 minutes') as time;
SELECT chunk_name, primary_dimension, range_start, range_end
FROM timescaledb_information.chunks
WHERE hypertable_name = 'test_adaptive' ORDER BY chunk_name;
chunk_name | primary_dimension | range_start | range_end
-------------------+-------------------+-------------------------------------+-------------------------------------
_hyper_2_10_chunk | time | Fri Sep 23 22:08:15.728855 2016 PDT | Sat Oct 01 13:16:09.024252 2016 PDT
_hyper_2_11_chunk | time | Sat Oct 01 13:16:09.024252 2016 PDT | Fri Oct 14 03:19:44.231212 2016 PDT
_hyper_2_12_chunk | time | Fri Oct 14 03:19:44.231212 2016 PDT | Wed Oct 26 19:20:54.4938 2016 PDT
_hyper_2_13_chunk | time | Wed Oct 26 19:20:54.4938 2016 PDT | Fri Nov 04 04:03:56.248528 2016 PDT
_hyper_2_14_chunk | time | Fri Nov 04 04:03:56.248528 2016 PDT | Fri Nov 18 21:58:20.411232 2016 PST
_hyper_2_15_chunk | time | Fri Nov 18 21:58:20.411232 2016 PST | Sat Dec 03 16:52:44.573936 2016 PST
_hyper_2_16_chunk | time | Sat Dec 03 16:52:44.573936 2016 PST | Sun Dec 18 11:47:08.73664 2016 PST
_hyper_2_17_chunk | time | Sun Dec 18 11:47:08.73664 2016 PST | Mon Jan 02 06:41:32.899344 2017 PST
_hyper_2_18_chunk | time | Mon Jan 02 06:41:32.899344 2017 PST | Tue Jan 17 01:35:57.062048 2017 PST
_hyper_2_19_chunk | time | Tue Jan 17 01:35:57.062048 2017 PST | Tue Jan 31 20:30:21.224752 2017 PST
_hyper_2_1_chunk | time | Mon Sep 12 17:00:00 2016 PDT | Tue Sep 13 17:00:00 2016 PDT
_hyper_2_20_chunk | time | Tue Jan 31 20:30:21.224752 2017 PST | Wed Feb 15 15:24:45.387456 2017 PST
_hyper_2_21_chunk | time | Wed Feb 15 15:24:45.387456 2017 PST | Thu Mar 02 10:19:09.55016 2017 PST
_hyper_2_22_chunk | time | Thu Mar 02 10:19:09.55016 2017 PST | Fri Mar 17 06:13:33.712864 2017 PDT
_hyper_2_2_chunk | time | Tue Sep 13 17:00:00 2016 PDT | Wed Sep 14 17:00:00 2016 PDT
_hyper_2_3_chunk | time | Wed Sep 14 17:00:00 2016 PDT | Thu Sep 15 17:00:00 2016 PDT
_hyper_2_4_chunk | time | Thu Sep 15 17:00:00 2016 PDT | Fri Sep 16 15:02:54.2208 2016 PDT
_hyper_2_5_chunk | time | Fri Sep 16 15:02:54.2208 2016 PDT | Sun Sep 18 03:12:14.342144 2016 PDT
_hyper_2_6_chunk | time | Sun Sep 18 03:12:14.342144 2016 PDT | Mon Sep 19 15:21:34.463488 2016 PDT
_hyper_2_7_chunk | time | Mon Sep 19 15:21:34.463488 2016 PDT | Wed Sep 21 03:30:54.584832 2016 PDT
_hyper_2_8_chunk | time | Wed Sep 21 03:30:54.584832 2016 PDT | Thu Sep 22 03:45:14.901568 2016 PDT
_hyper_2_9_chunk | time | Thu Sep 22 03:45:14.901568 2016 PDT | Fri Sep 23 22:08:15.728855 2016 PDT
(22 rows)
-- Do same thing without an index on the time column. This affects
-- both the calculation of fill-factor of the chunk and its size
CREATE TABLE test_adaptive_no_index(time timestamptz, temp float, location int);
-- Size but no explicit func should use default func
-- No default indexes should warn and use heap scan for min and max
SELECT create_hypertable('test_adaptive_no_index', 'time',
chunk_target_size => '1MB',
create_default_indexes => false);
WARNING: target chunk size for adaptive chunking is less than 10 MB
WARNING: no index on "time" found for adaptive chunking on hypertable "test_adaptive_no_index"
NOTICE: adaptive chunking is a BETA feature and is not recommended for production deployments
NOTICE: adding not-null constraint to column "time"
create_hypertable
-------------------------------------
(3,public,test_adaptive_no_index,t)
(1 row)
SELECT id, hypertable_id, interval_length FROM _timescaledb_catalog.dimension;
id | hypertable_id | interval_length
----+---------------+-----------------
2 | 2 | 1277664162704
3 | 3 | 86400000000
(2 rows)
INSERT INTO test_adaptive_no_index
SELECT time, random() * 35, _timescaledb_internal.get_partition_hash(time) FROM
generate_series('2017-03-07T18:18:03+00'::timestamptz - interval '175 days',
'2017-03-07T18:18:03+00'::timestamptz,
'2 minutes') as time;
WARNING: no index on "time" found for adaptive chunking on chunk "_hyper_3_23_chunk"
WARNING: no index on "time" found for adaptive chunking on chunk "_hyper_3_23_chunk"
WARNING: no index on "time" found for adaptive chunking on chunk "_hyper_3_24_chunk"
WARNING: no index on "time" found for adaptive chunking on chunk "_hyper_3_23_chunk"
WARNING: no index on "time" found for adaptive chunking on chunk "_hyper_3_24_chunk"
WARNING: no index on "time" found for adaptive chunking on chunk "_hyper_3_25_chunk"
WARNING: no index on "time" found for adaptive chunking on chunk "_hyper_3_24_chunk"
WARNING: no index on "time" found for adaptive chunking on chunk "_hyper_3_25_chunk"
WARNING: no index on "time" found for adaptive chunking on chunk "_hyper_3_26_chunk"
WARNING: no index on "time" found for adaptive chunking on chunk "_hyper_3_25_chunk"
WARNING: no index on "time" found for adaptive chunking on chunk "_hyper_3_26_chunk"
WARNING: no index on "time" found for adaptive chunking on chunk "_hyper_3_27_chunk"
WARNING: no index on "time" found for adaptive chunking on chunk "_hyper_3_26_chunk"
WARNING: no index on "time" found for adaptive chunking on chunk "_hyper_3_27_chunk"
WARNING: no index on "time" found for adaptive chunking on chunk "_hyper_3_28_chunk"
WARNING: no index on "time" found for adaptive chunking on chunk "_hyper_3_27_chunk"
WARNING: no index on "time" found for adaptive chunking on chunk "_hyper_3_28_chunk"
WARNING: no index on "time" found for adaptive chunking on chunk "_hyper_3_29_chunk"
WARNING: no index on "time" found for adaptive chunking on chunk "_hyper_3_28_chunk"
WARNING: no index on "time" found for adaptive chunking on chunk "_hyper_3_29_chunk"
WARNING: no index on "time" found for adaptive chunking on chunk "_hyper_3_30_chunk"
WARNING: no index on "time" found for adaptive chunking on chunk "_hyper_3_29_chunk"
WARNING: no index on "time" found for adaptive chunking on chunk "_hyper_3_30_chunk"
WARNING: no index on "time" found for adaptive chunking on chunk "_hyper_3_31_chunk"
WARNING: no index on "time" found for adaptive chunking on chunk "_hyper_3_30_chunk"
WARNING: no index on "time" found for adaptive chunking on chunk "_hyper_3_31_chunk"
WARNING: no index on "time" found for adaptive chunking on chunk "_hyper_3_32_chunk"
WARNING: no index on "time" found for adaptive chunking on chunk "_hyper_3_31_chunk"
WARNING: no index on "time" found for adaptive chunking on chunk "_hyper_3_32_chunk"
WARNING: no index on "time" found for adaptive chunking on chunk "_hyper_3_33_chunk"
WARNING: no index on "time" found for adaptive chunking on chunk "_hyper_3_32_chunk"
WARNING: no index on "time" found for adaptive chunking on chunk "_hyper_3_33_chunk"
WARNING: no index on "time" found for adaptive chunking on chunk "_hyper_3_34_chunk"
WARNING: no index on "time" found for adaptive chunking on chunk "_hyper_3_33_chunk"
WARNING: no index on "time" found for adaptive chunking on chunk "_hyper_3_34_chunk"
WARNING: no index on "time" found for adaptive chunking on chunk "_hyper_3_35_chunk"
WARNING: no index on "time" found for adaptive chunking on chunk "_hyper_3_34_chunk"
WARNING: no index on "time" found for adaptive chunking on chunk "_hyper_3_35_chunk"
WARNING: no index on "time" found for adaptive chunking on chunk "_hyper_3_36_chunk"
WARNING: no index on "time" found for adaptive chunking on chunk "_hyper_3_35_chunk"
WARNING: no index on "time" found for adaptive chunking on chunk "_hyper_3_36_chunk"
WARNING: no index on "time" found for adaptive chunking on chunk "_hyper_3_37_chunk"
SELECT chunk_name, primary_dimension, range_start, range_end
FROM timescaledb_information.chunks
WHERE hypertable_name = 'test_adaptive_no_index' ORDER BY chunk_name;
chunk_name | primary_dimension | range_start | range_end
-------------------+-------------------+-------------------------------------+-------------------------------------
_hyper_3_23_chunk | time | Mon Sep 12 17:00:00 2016 PDT | Tue Sep 13 17:00:00 2016 PDT
_hyper_3_24_chunk | time | Tue Sep 13 17:00:00 2016 PDT | Wed Sep 14 17:00:00 2016 PDT
_hyper_3_25_chunk | time | Wed Sep 14 17:00:00 2016 PDT | Thu Sep 15 17:00:00 2016 PDT
_hyper_3_26_chunk | time | Thu Sep 15 17:00:00 2016 PDT | Sun Sep 18 02:18:45.310968 2016 PDT
_hyper_3_27_chunk | time | Sun Sep 18 02:18:45.310968 2016 PDT | Sun Sep 18 06:20:21.359312 2016 PDT
_hyper_3_28_chunk | time | Sun Sep 18 06:20:21.359312 2016 PDT | Wed Sep 21 08:25:00.957966 2016 PDT
_hyper_3_29_chunk | time | Wed Sep 21 08:25:00.957966 2016 PDT | Thu Sep 22 03:26:42.599807 2016 PDT
_hyper_3_30_chunk | time | Thu Sep 22 03:26:42.599807 2016 PDT | Sun Sep 25 18:03:30.59359 2016 PDT
_hyper_3_31_chunk | time | Sun Sep 25 18:03:30.59359 2016 PDT | Sat Oct 08 05:32:02.75732 2016 PDT
_hyper_3_32_chunk | time | Sat Oct 08 05:32:02.75732 2016 PDT | Mon Oct 31 07:33:42.652938 2016 PDT
_hyper_3_33_chunk | time | Mon Oct 31 07:33:42.652938 2016 PDT | Wed Nov 23 08:35:22.548556 2016 PST
_hyper_3_34_chunk | time | Wed Nov 23 08:35:22.548556 2016 PST | Thu Dec 15 09:48:28.1888 2016 PST
_hyper_3_35_chunk | time | Thu Dec 15 09:48:28.1888 2016 PST | Wed Jan 11 04:57:38.357845 2017 PST
_hyper_3_36_chunk | time | Wed Jan 11 04:57:38.357845 2017 PST | Tue Feb 07 00:06:48.52689 2017 PST
_hyper_3_37_chunk | time | Tue Feb 07 00:06:48.52689 2017 PST | Sun Mar 05 19:15:58.695935 2017 PST
_hyper_3_38_chunk | time | Sun Mar 05 19:15:58.695935 2017 PST | Sat Apr 01 15:25:08.86498 2017 PDT
(16 rows)
-- Test added to check that the correct index (i.e. time index) is being used
-- to find the min and max. Previously a bug selected the first index listed,
-- which in this case is location rather than time and therefore could return
-- the wrong min and max if items at the start and end of the index did not have
-- the correct min and max timestamps.
--
-- In this test, we create chunks with a lot of locations with only one reading
-- that is at the beginning of the time frame, and then one location in the middle
-- of the range that has two readings, one that is the same as the others and one
-- that is larger. The algorithm should use these two readings for min & max; however,
-- if it's broken (as it was before), it would choose just the reading that is common
-- to all the locations.
CREATE TABLE test_adaptive_correct_index(time timestamptz, temp float, location int);
SELECT create_hypertable('test_adaptive_correct_index', 'time',
chunk_target_size => '100MB',
chunk_time_interval => 86400000000,
create_default_indexes => false);
WARNING: no index on "time" found for adaptive chunking on hypertable "test_adaptive_correct_index"
NOTICE: adaptive chunking is a BETA feature and is not recommended for production deployments
NOTICE: adding not-null constraint to column "time"
create_hypertable
------------------------------------------
(4,public,test_adaptive_correct_index,t)
(1 row)
CREATE INDEX ON test_adaptive_correct_index(location);
CREATE INDEX ON test_adaptive_correct_index(time DESC);
-- First chunk
INSERT INTO test_adaptive_correct_index
SELECT '2018-01-01T00:00:00+00'::timestamptz, val, val + 1 FROM
generate_series(1, 1000) as val;
INSERT INTO test_adaptive_correct_index
SELECT time, 0.0, '1500' FROM
generate_series('2018-01-01T00:00:00+00'::timestamptz,
'2018-01-01T20:00:00+00'::timestamptz,
'10 hours') as time;
INSERT INTO test_adaptive_correct_index
SELECT '2018-01-01T00:00:00+00'::timestamptz, val, val + 1 FROM
generate_series(2001, 3000) as val;
-- Second chunk
INSERT INTO test_adaptive_correct_index
SELECT '2018-01-02T00:00:00+00'::timestamptz, val, val + 1 FROM
generate_series(1, 1000) as val;
INSERT INTO test_adaptive_correct_index
SELECT time, 0.0, '1500' FROM
generate_series('2018-01-02T00:00:00+00'::timestamptz,
'2018-01-02T20:00:00+00'::timestamptz,
'10 hours') as time;
INSERT INTO test_adaptive_correct_index
SELECT '2018-01-02T00:00:00+00'::timestamptz, val, val + 1 FROM
generate_series(2001, 3000) as val;
-- Third chunk
INSERT INTO test_adaptive_correct_index
SELECT '2018-01-03T00:00:00+00'::timestamptz, val, val + 1 FROM
generate_series(1, 1000) as val;
INSERT INTO test_adaptive_correct_index
SELECT time, 0.0, '1500' FROM
generate_series('2018-01-03T00:00:00+00'::timestamptz,
'2018-01-03T20:00:00+00'::timestamptz,
'10 hours') as time;
INSERT INTO test_adaptive_correct_index
SELECT '2018-01-03T00:00:00+00'::timestamptz, val, val + 1 FROM
generate_series(2001, 3000) as val;
-- This should be the start of the fourth chunk
INSERT INTO test_adaptive_correct_index
SELECT '2018-01-04T00:00:00+00'::timestamptz, val, val + 1 FROM
generate_series(1, 1000) as val;
INSERT INTO test_adaptive_correct_index
SELECT time, 0.0, '1500' FROM
generate_series('2018-01-04T00:00:00+00'::timestamptz,
'2018-01-04T20:00:00+00'::timestamptz,
'10 hours') as time;
INSERT INTO test_adaptive_correct_index
SELECT '2018-01-04T00:00:00+00'::timestamptz, val, val + 1 FROM
generate_series(2001, 3000) as val;
-- If working correctly, this goes in the 4th chunk, otherwise its a separate 5th chunk
INSERT INTO test_adaptive_correct_index
SELECT '2018-01-05T00:00:00+00'::timestamptz, val, val + 1 FROM
generate_series(1, 1000) as val;
INSERT INTO test_adaptive_correct_index
SELECT time, 0.0, '1500' FROM
generate_series('2018-01-05T00:00:00+00'::timestamptz,
'2018-01-05T20:00:00+00'::timestamptz,
'10 hours') as time;
INSERT INTO test_adaptive_correct_index
SELECT '2018-01-05T00:00:00+00'::timestamptz, val, val + 1 FROM
generate_series(2001, 3000) as val;
-- This should show 4 chunks, rather than 5
SELECT count(*)
FROM timescaledb_information.chunks
WHERE hypertable_name = 'test_adaptive_correct_index';
count
-------
4
(1 row)
-- The interval_length should no longer be 86400000000 for our hypertable, so 3rd column so be true.
-- Note: the exact interval_length is non-deterministic, so we can't use its actual value for tests
SELECT id, hypertable_id, interval_length > 86400000000 FROM _timescaledb_catalog.dimension;
id | hypertable_id | ?column?
----+---------------+----------
2 | 2 | t
3 | 3 | t
4 | 4 | t
(3 rows)
-- Drop because it's size and estimated chunk_interval is non-deterministic so
-- we don't want to make other tests flaky.
DROP TABLE test_adaptive_correct_index;
-- Test with space partitioning. This might affect the estimation
-- since there are more chunks in the same time interval and space
-- chunks might be unevenly filled.
CREATE TABLE test_adaptive_space(time timestamptz, temp float, location int);
SELECT create_hypertable('test_adaptive_space', 'time', 'location', 2,
chunk_target_size => '1MB',
create_default_indexes => true);
WARNING: target chunk size for adaptive chunking is less than 10 MB
NOTICE: adaptive chunking is a BETA feature and is not recommended for production deployments
NOTICE: adding not-null constraint to column "time"
create_hypertable
----------------------------------
(5,public,test_adaptive_space,t)
(1 row)
SELECT id, hypertable_id, interval_length FROM _timescaledb_catalog.dimension;
id | hypertable_id | interval_length
----+---------------+-----------------
2 | 2 | 1277664162704
3 | 3 | 2315350169045
5 | 5 | 86400000000
6 | 5 |
(4 rows)
INSERT INTO test_adaptive_space
SELECT time, random() * 35, _timescaledb_internal.get_partition_hash(time) FROM
generate_series('2017-03-07T18:18:03+00'::timestamptz - interval '175 days',
'2017-03-07T18:18:03+00'::timestamptz,
'2 minutes') as time;
\x
SELECT chunk_name, range_start, range_end
FROM timescaledb_information.chunks
WHERE hypertable_name = 'test_adaptive_space' ORDER BY chunk_name;
-[ RECORD 1 ]------------------------------------
chunk_name | _hyper_5_43_chunk
range_start | Mon Sep 12 17:00:00 2016 PDT
range_end | Tue Sep 13 17:00:00 2016 PDT
-[ RECORD 2 ]------------------------------------
chunk_name | _hyper_5_44_chunk
range_start | Mon Sep 12 17:00:00 2016 PDT
range_end | Tue Sep 13 17:00:00 2016 PDT
-[ RECORD 3 ]------------------------------------
chunk_name | _hyper_5_45_chunk
range_start | Tue Sep 13 17:00:00 2016 PDT
range_end | Wed Sep 14 17:00:00 2016 PDT
-[ RECORD 4 ]------------------------------------
chunk_name | _hyper_5_46_chunk
range_start | Tue Sep 13 17:00:00 2016 PDT
range_end | Wed Sep 14 17:00:00 2016 PDT
-[ RECORD 5 ]------------------------------------
chunk_name | _hyper_5_47_chunk
range_start | Wed Sep 14 17:00:00 2016 PDT
range_end | Thu Sep 15 11:47:51.47376 2016 PDT
-[ RECORD 6 ]------------------------------------
chunk_name | _hyper_5_48_chunk
range_start | Wed Sep 14 17:00:00 2016 PDT
range_end | Thu Sep 15 11:47:51.47376 2016 PDT
-[ RECORD 7 ]------------------------------------
chunk_name | _hyper_5_49_chunk
range_start | Thu Sep 15 11:47:51.47376 2016 PDT
range_end | Sat Sep 17 02:40:49.182352 2016 PDT
-[ RECORD 8 ]------------------------------------
chunk_name | _hyper_5_50_chunk
range_start | Thu Sep 15 11:47:51.47376 2016 PDT
range_end | Sat Sep 17 02:40:49.182352 2016 PDT
-[ RECORD 9 ]------------------------------------
chunk_name | _hyper_5_51_chunk
range_start | Sat Sep 17 02:40:49.182352 2016 PDT
range_end | Sun Sep 18 17:33:46.890944 2016 PDT
-[ RECORD 10 ]-----------------------------------
chunk_name | _hyper_5_52_chunk
range_start | Sat Sep 17 02:40:49.182352 2016 PDT
range_end | Sun Sep 18 17:33:46.890944 2016 PDT
-[ RECORD 11 ]-----------------------------------
chunk_name | _hyper_5_53_chunk
range_start | Sun Sep 18 17:33:46.890944 2016 PDT
range_end | Sun Sep 18 20:35:55.67676 2016 PDT
-[ RECORD 12 ]-----------------------------------
chunk_name | _hyper_5_54_chunk
range_start | Sun Sep 18 17:33:46.890944 2016 PDT
range_end | Sun Sep 18 20:35:55.67676 2016 PDT
-[ RECORD 13 ]-----------------------------------
chunk_name | _hyper_5_55_chunk
range_start | Sun Sep 18 20:35:55.67676 2016 PDT
range_end | Tue Sep 20 18:46:40.16883 2016 PDT
-[ RECORD 14 ]-----------------------------------
chunk_name | _hyper_5_56_chunk
range_start | Sun Sep 18 20:35:55.67676 2016 PDT
range_end | Tue Sep 20 18:46:40.16883 2016 PDT
-[ RECORD 15 ]-----------------------------------
chunk_name | _hyper_5_57_chunk
range_start | Tue Sep 20 18:46:40.16883 2016 PDT
range_end | Sun Oct 02 16:44:29.071032 2016 PDT
-[ RECORD 16 ]-----------------------------------
chunk_name | _hyper_5_58_chunk
range_start | Tue Sep 20 18:46:40.16883 2016 PDT
range_end | Sun Oct 02 16:44:29.071032 2016 PDT
-[ RECORD 17 ]-----------------------------------
chunk_name | _hyper_5_59_chunk
range_start | Sun Oct 02 16:44:29.071032 2016 PDT
range_end | Tue Oct 11 00:37:03.738979 2016 PDT
-[ RECORD 18 ]-----------------------------------
chunk_name | _hyper_5_60_chunk
range_start | Sun Oct 02 16:44:29.071032 2016 PDT
range_end | Tue Oct 11 00:37:03.738979 2016 PDT
-[ RECORD 19 ]-----------------------------------
chunk_name | _hyper_5_61_chunk
range_start | Tue Oct 11 00:37:03.738979 2016 PDT
range_end | Thu Oct 27 03:05:25.740618 2016 PDT
-[ RECORD 20 ]-----------------------------------
chunk_name | _hyper_5_62_chunk
range_start | Tue Oct 11 00:37:03.738979 2016 PDT
range_end | Thu Oct 27 03:05:25.740618 2016 PDT
-[ RECORD 21 ]-----------------------------------
chunk_name | _hyper_5_63_chunk
range_start | Thu Oct 27 03:05:25.740618 2016 PDT
range_end | Sun Nov 13 12:38:49.541703 2016 PST
-[ RECORD 22 ]-----------------------------------
chunk_name | _hyper_5_64_chunk
range_start | Thu Oct 27 03:05:25.740618 2016 PDT
range_end | Sun Nov 13 12:38:49.541703 2016 PST
-[ RECORD 23 ]-----------------------------------
chunk_name | _hyper_5_65_chunk
range_start | Sun Nov 13 12:38:49.541703 2016 PST
range_end | Fri Dec 02 17:45:40.237036 2016 PST
-[ RECORD 24 ]-----------------------------------
chunk_name | _hyper_5_66_chunk
range_start | Sun Nov 13 12:38:49.541703 2016 PST
range_end | Fri Dec 02 17:45:40.237036 2016 PST
-[ RECORD 25 ]-----------------------------------
chunk_name | _hyper_5_67_chunk
range_start | Fri Dec 02 17:45:40.237036 2016 PST
range_end | Wed Dec 21 22:52:30.932369 2016 PST
-[ RECORD 26 ]-----------------------------------
chunk_name | _hyper_5_68_chunk
range_start | Fri Dec 02 17:45:40.237036 2016 PST
range_end | Wed Dec 21 22:52:30.932369 2016 PST
-[ RECORD 27 ]-----------------------------------
chunk_name | _hyper_5_69_chunk
range_start | Wed Dec 21 22:52:30.932369 2016 PST
range_end | Tue Jan 10 03:59:21.627702 2017 PST
-[ RECORD 28 ]-----------------------------------
chunk_name | _hyper_5_70_chunk
range_start | Wed Dec 21 22:52:30.932369 2016 PST
range_end | Tue Jan 10 03:59:21.627702 2017 PST
-[ RECORD 29 ]-----------------------------------
chunk_name | _hyper_5_71_chunk
range_start | Tue Jan 10 03:59:21.627702 2017 PST
range_end | Sun Jan 29 09:06:12.323035 2017 PST
-[ RECORD 30 ]-----------------------------------
chunk_name | _hyper_5_72_chunk
range_start | Tue Jan 10 03:59:21.627702 2017 PST
range_end | Sun Jan 29 09:06:12.323035 2017 PST
-[ RECORD 31 ]-----------------------------------
chunk_name | _hyper_5_73_chunk
range_start | Sun Jan 29 09:06:12.323035 2017 PST
range_end | Fri Feb 17 14:13:03.018368 2017 PST
-[ RECORD 32 ]-----------------------------------
chunk_name | _hyper_5_74_chunk
range_start | Sun Jan 29 09:06:12.323035 2017 PST
range_end | Fri Feb 17 14:13:03.018368 2017 PST
-[ RECORD 33 ]-----------------------------------
chunk_name | _hyper_5_75_chunk
range_start | Fri Feb 17 14:13:03.018368 2017 PST
range_end | Wed Mar 08 19:19:53.713701 2017 PST
-[ RECORD 34 ]-----------------------------------
chunk_name | _hyper_5_76_chunk
range_start | Fri Feb 17 14:13:03.018368 2017 PST
range_end | Wed Mar 08 19:19:53.713701 2017 PST
SELECT *
FROM timescaledb_information.dimensions
WHERE hypertable_name = 'test_adaptive_space' ORDER BY dimension_number;
-[ RECORD 1 ]-----+----------------------------------------
hypertable_schema | public
hypertable_name | test_adaptive_space
dimension_number | 1
column_name | time
column_type | timestamp with time zone
dimension_type | Time
time_interval | @ 19 days 5 hours 6 mins 50.695333 secs
integer_interval |
integer_now_func |
num_partitions |
-[ RECORD 2 ]-----+----------------------------------------
hypertable_schema | public
hypertable_name | test_adaptive_space
dimension_number | 2
column_name | location
column_type | integer
dimension_type | Space
time_interval |
integer_interval |
integer_now_func |
num_partitions | 2
\x
SELECT *
FROM chunks_detailed_size('test_adaptive_space') ORDER BY chunk_name;
chunk_schema | chunk_name | table_bytes | index_bytes | toast_bytes | total_bytes | node_name
-----------------------+-------------------+-------------+-------------+-------------+-------------+-----------
_timescaledb_internal | _hyper_5_43_chunk | 8192 | 32768 | 0 | 40960 |
_timescaledb_internal | _hyper_5_44_chunk | 8192 | 32768 | 0 | 40960 |
_timescaledb_internal | _hyper_5_45_chunk | 49152 | 57344 | 0 | 106496 |
_timescaledb_internal | _hyper_5_46_chunk | 49152 | 57344 | 0 | 106496 |
_timescaledb_internal | _hyper_5_47_chunk | 40960 | 49152 | 0 | 90112 |
_timescaledb_internal | _hyper_5_48_chunk | 40960 | 32768 | 0 | 73728 |
_timescaledb_internal | _hyper_5_49_chunk | 57344 | 81920 | 0 | 139264 |
_timescaledb_internal | _hyper_5_50_chunk | 57344 | 81920 | 0 | 139264 |
_timescaledb_internal | _hyper_5_51_chunk | 57344 | 81920 | 0 | 139264 |
_timescaledb_internal | _hyper_5_52_chunk | 57344 | 81920 | 0 | 139264 |
_timescaledb_internal | _hyper_5_53_chunk | 8192 | 32768 | 0 | 40960 |
_timescaledb_internal | _hyper_5_54_chunk | 8192 | 32768 | 0 | 40960 |
_timescaledb_internal | _hyper_5_55_chunk | 65536 | 106496 | 0 | 172032 |
_timescaledb_internal | _hyper_5_56_chunk | 65536 | 98304 | 0 | 163840 |
_timescaledb_internal | _hyper_5_57_chunk | 253952 | 360448 | 0 | 614400 |
_timescaledb_internal | _hyper_5_58_chunk | 253952 | 368640 | 0 | 622592 |
_timescaledb_internal | _hyper_5_59_chunk | 180224 | 303104 | 0 | 483328 |
_timescaledb_internal | _hyper_5_60_chunk | 188416 | 303104 | 0 | 491520 |
_timescaledb_internal | _hyper_5_61_chunk | 327680 | 540672 | 0 | 868352 |
_timescaledb_internal | _hyper_5_62_chunk | 327680 | 532480 | 0 | 860160 |
_timescaledb_internal | _hyper_5_63_chunk | 360448 | 581632 | 0 | 942080 |
_timescaledb_internal | _hyper_5_64_chunk | 352256 | 589824 | 0 | 942080 |
_timescaledb_internal | _hyper_5_65_chunk | 385024 | 598016 | 0 | 983040 |
_timescaledb_internal | _hyper_5_66_chunk | 393216 | 614400 | 0 | 1007616 |
_timescaledb_internal | _hyper_5_67_chunk | 385024 | 598016 | 0 | 983040 |
_timescaledb_internal | _hyper_5_68_chunk | 393216 | 598016 | 0 | 991232 |
_timescaledb_internal | _hyper_5_69_chunk | 393216 | 622592 | 0 | 1015808 |
_timescaledb_internal | _hyper_5_70_chunk | 385024 | 606208 | 0 | 991232 |
_timescaledb_internal | _hyper_5_71_chunk | 385024 | 614400 | 0 | 999424 |
_timescaledb_internal | _hyper_5_72_chunk | 393216 | 622592 | 0 | 1015808 |
_timescaledb_internal | _hyper_5_73_chunk | 393216 | 614400 | 0 | 1007616 |
_timescaledb_internal | _hyper_5_74_chunk | 385024 | 614400 | 0 | 999424 |
_timescaledb_internal | _hyper_5_75_chunk | 360448 | 581632 | 0 | 942080 |
_timescaledb_internal | _hyper_5_76_chunk | 368640 | 598016 | 0 | 966656 |
(34 rows)
SELECT id, hypertable_id, interval_length FROM _timescaledb_catalog.dimension;
id | hypertable_id | interval_length
----+---------------+-----------------
2 | 2 | 1277664162704
3 | 3 | 2315350169045
6 | 5 |
5 | 5 | 1660010695333
(4 rows)
-- A previous version stopped working as soon as hypertable_id stopped being
-- equal to dimension_id (i.e., there was a hypertable with more than 1 dimension).
-- This test comes after test_adaptive_space, which has 2 dimensions, and makes
-- sure that it still works.
CREATE TABLE test_adaptive_after_multiple_dims(time timestamptz, temp float, location int);
SELECT create_hypertable('test_adaptive_after_multiple_dims', 'time',
chunk_target_size => '100MB',
create_default_indexes => true);
NOTICE: adaptive chunking is a BETA feature and is not recommended for production deployments
NOTICE: adding not-null constraint to column "time"
create_hypertable
------------------------------------------------
(6,public,test_adaptive_after_multiple_dims,t)
(1 row)
INSERT INTO test_adaptive_after_multiple_dims VALUES('2018-01-01T00:00:00+00'::timestamptz, 0.0, 5);
\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER_2
\set ON_ERROR_STOP 0
SELECT * FROM set_adaptive_chunking('test_adaptive', '2MB');
ERROR: must be owner of hypertable "test_adaptive"
\set ON_ERROR_STOP 1
\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER
-- Now make sure renaming schema gets propagated to the func_schema
DROP TABLE test_adaptive;
\c :TEST_DBNAME :ROLE_SUPERUSER
CREATE SCHEMA IF NOT EXISTS my_chunk_func_schema;
CREATE OR REPLACE FUNCTION my_chunk_func_schema.calculate_chunk_interval(
dimension_id INTEGER,
dimension_coord BIGINT,
chunk_target_size BIGINT
)
RETURNS BIGINT LANGUAGE PLPGSQL AS
$BODY$
DECLARE
BEGIN
RETURN 2;
END
$BODY$;
CREATE TABLE test_adaptive(time timestamptz, temp float, location int);
SELECT create_hypertable('test_adaptive', 'time',
chunk_target_size => '1MB',
chunk_sizing_func => 'my_chunk_func_schema.calculate_chunk_interval');
WARNING: target chunk size for adaptive chunking is less than 10 MB
NOTICE: adaptive chunking is a BETA feature and is not recommended for production deployments
NOTICE: adding not-null constraint to column "time"
create_hypertable
----------------------------
(7,public,test_adaptive,t)
(1 row)
ALTER SCHEMA my_chunk_func_schema RENAME TO new_chunk_func_schema;
INSERT INTO test_adaptive VALUES (now(), 1.0, 1);

View File

@ -0,0 +1,100 @@
-- This file and its contents are licensed under the Apache License 2.0.
-- Please see the included NOTICE for copyright information and
-- LICENSE-APACHE for a copy of the license.
\c :TEST_DBNAME :ROLE_SUPERUSER
CREATE OR REPLACE FUNCTION customtype_in(cstring) RETURNS customtype AS
'timestamptz_in'
LANGUAGE internal IMMUTABLE STRICT;
NOTICE: type "customtype" is not yet defined
CREATE OR REPLACE FUNCTION customtype_out(customtype) RETURNS cstring AS
'timestamptz_out'
LANGUAGE internal IMMUTABLE STRICT;
NOTICE: argument type customtype is only a shell
CREATE OR REPLACE FUNCTION customtype_recv(internal) RETURNS customtype AS
'timestamptz_recv'
LANGUAGE internal IMMUTABLE STRICT;
NOTICE: return type customtype is only a shell
CREATE OR REPLACE FUNCTION customtype_send(customtype) RETURNS bytea AS
'timestamptz_send'
LANGUAGE internal IMMUTABLE STRICT;
NOTICE: argument type customtype is only a shell
CREATE TYPE customtype (
INPUT = customtype_in,
OUTPUT = customtype_out,
RECEIVE = customtype_recv,
SEND = customtype_send,
LIKE = TIMESTAMPTZ
);
CREATE CAST (customtype AS bigint)
WITHOUT FUNCTION AS ASSIGNMENT;
CREATE CAST (bigint AS customtype)
WITHOUT FUNCTION AS IMPLICIT;
CREATE CAST (customtype AS timestamptz)
WITHOUT FUNCTION AS ASSIGNMENT;
CREATE CAST (timestamptz AS customtype)
WITHOUT FUNCTION AS ASSIGNMENT;
CREATE OR REPLACE FUNCTION customtype_lt(customtype, customtype) RETURNS bool AS
'timestamp_lt'
LANGUAGE internal IMMUTABLE STRICT;
CREATE OPERATOR < (
LEFTARG = customtype,
RIGHTARG = customtype,
PROCEDURE = customtype_lt,
COMMUTATOR = >,
NEGATOR = >=,
RESTRICT = scalarltsel,
JOIN = scalarltjoinsel
);
CREATE OR REPLACE FUNCTION customtype_ge(customtype, customtype) RETURNS bool AS
'timestamp_ge'
LANGUAGE internal IMMUTABLE STRICT;
CREATE OPERATOR >= (
LEFTARG = customtype,
RIGHTARG = customtype,
PROCEDURE = customtype_ge,
COMMUTATOR = <=,
NEGATOR = <,
RESTRICT = scalargtsel,
JOIN = scalargtjoinsel
);
\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER
CREATE TABLE customtype_test(time_custom customtype, val int);
\set ON_ERROR_STOP 0
-- Using interval type for chunk time interval should fail with custom time type
SELECT create_hypertable('customtype_test', 'time_custom', chunk_time_interval => INTERVAL '1 day', create_default_indexes=>false);
ERROR: invalid interval type for customtype dimension
\set ON_ERROR_STOP 1
SELECT create_hypertable('customtype_test', 'time_custom', chunk_time_interval => 10e6::bigint, create_default_indexes=>false);
NOTICE: adding not-null constraint to column "time_custom"
create_hypertable
------------------------------
(1,public,customtype_test,t)
(1 row)
INSERT INTO customtype_test VALUES ('2001-01-01 01:02:03'::customtype, 10);
INSERT INTO customtype_test VALUES ('2001-01-01 01:02:03'::customtype, 10);
INSERT INTO customtype_test VALUES ('2001-01-01 01:02:03'::customtype, 10);
EXPLAIN (costs off) SELECT * FROM customtype_test;
QUERY PLAN
------------------------------
Seq Scan on _hyper_1_1_chunk
(1 row)
INSERT INTO customtype_test VALUES ('2001-01-01 01:02:23'::customtype, 11);
EXPLAIN (costs off) SELECT * FROM customtype_test;
QUERY PLAN
------------------------------------
Append
-> Seq Scan on _hyper_1_1_chunk
-> Seq Scan on _hyper_1_2_chunk
(3 rows)
SELECT * FROM customtype_test;
time_custom | val
------------------------------+-----
Mon Jan 01 01:02:03 2001 PST | 10
Mon Jan 01 01:02:03 2001 PST | 10
Mon Jan 01 01:02:03 2001 PST | 10
Mon Jan 01 01:02:23 2001 PST | 11
(4 rows)

661
test/expected/insert-13.out Normal file
View File

@ -0,0 +1,661 @@
-- This file and its contents are licensed under the Apache License 2.0.
-- Please see the included NOTICE for copyright information and
-- LICENSE-APACHE for a copy of the license.
\ir include/insert_two_partitions.sql
-- This file and its contents are licensed under the Apache License 2.0.
-- Please see the included NOTICE for copyright information and
-- LICENSE-APACHE for a copy of the license.
CREATE TABLE PUBLIC."two_Partitions" (
"timeCustom" BIGINT NOT NULL,
device_id TEXT NOT NULL,
series_0 DOUBLE PRECISION NULL,
series_1 DOUBLE PRECISION NULL,
series_2 DOUBLE PRECISION NULL,
series_bool BOOLEAN NULL
);
CREATE INDEX ON PUBLIC."two_Partitions" (device_id, "timeCustom" DESC NULLS LAST) WHERE device_id IS NOT NULL;
CREATE INDEX ON PUBLIC."two_Partitions" ("timeCustom" DESC NULLS LAST, series_0) WHERE series_0 IS NOT NULL;
CREATE INDEX ON PUBLIC."two_Partitions" ("timeCustom" DESC NULLS LAST, series_1) WHERE series_1 IS NOT NULL;
CREATE INDEX ON PUBLIC."two_Partitions" ("timeCustom" DESC NULLS LAST, series_2) WHERE series_2 IS NOT NULL;
CREATE INDEX ON PUBLIC."two_Partitions" ("timeCustom" DESC NULLS LAST, series_bool) WHERE series_bool IS NOT NULL;
CREATE INDEX ON PUBLIC."two_Partitions" ("timeCustom" DESC NULLS LAST, device_id);
SELECT * FROM create_hypertable('"public"."two_Partitions"'::regclass, 'timeCustom'::name, 'device_id'::name, associated_schema_name=>'_timescaledb_internal'::text, number_partitions => 2, chunk_time_interval=>_timescaledb_internal.interval_to_usec('1 month'));
hypertable_id | schema_name | table_name | created
---------------+-------------+----------------+---------
1 | public | two_Partitions | t
(1 row)
\set QUIET off
BEGIN;
BEGIN
\COPY public."two_Partitions" FROM 'data/ds1_dev1_1.tsv' NULL AS '';
COPY 7
COMMIT;
COMMIT
INSERT INTO public."two_Partitions"("timeCustom", device_id, series_0, series_1) VALUES
(1257987600000000000, 'dev1', 1.5, 1),
(1257987600000000000, 'dev1', 1.5, 2),
(1257894000000000000, 'dev2', 1.5, 1),
(1257894002000000000, 'dev1', 2.5, 3);
INSERT 0 4
INSERT INTO "two_Partitions"("timeCustom", device_id, series_0, series_1) VALUES
(1257894000000000000, 'dev2', 1.5, 2);
INSERT 0 1
\set QUIET on
SELECT * FROM test.show_columnsp('_timescaledb_internal.%_hyper%');
Relation | Kind | Column | Column type | NotNull
------------------------------------------------------------------------------------+------+-------------+------------------+---------
_timescaledb_internal._hyper_1_1_chunk | r | timeCustom | bigint | t
_timescaledb_internal._hyper_1_1_chunk | r | device_id | text | t
_timescaledb_internal._hyper_1_1_chunk | r | series_0 | double precision | f
_timescaledb_internal._hyper_1_1_chunk | r | series_1 | double precision | f
_timescaledb_internal._hyper_1_1_chunk | r | series_2 | double precision | f
_timescaledb_internal._hyper_1_1_chunk | r | series_bool | boolean | f
_timescaledb_internal."_hyper_1_1_chunk_two_Partitions_device_id_timeCustom_idx" | i | device_id | text | f
_timescaledb_internal."_hyper_1_1_chunk_two_Partitions_device_id_timeCustom_idx" | i | timeCustom | bigint | f
_timescaledb_internal."_hyper_1_1_chunk_two_Partitions_timeCustom_device_id_idx" | i | timeCustom | bigint | f
_timescaledb_internal."_hyper_1_1_chunk_two_Partitions_timeCustom_device_id_idx" | i | device_id | text | f
_timescaledb_internal."_hyper_1_1_chunk_two_Partitions_timeCustom_idx" | i | timeCustom | bigint | f
_timescaledb_internal."_hyper_1_1_chunk_two_Partitions_timeCustom_series_0_idx" | i | timeCustom | bigint | f
_timescaledb_internal."_hyper_1_1_chunk_two_Partitions_timeCustom_series_0_idx" | i | series_0 | double precision | f
_timescaledb_internal."_hyper_1_1_chunk_two_Partitions_timeCustom_series_1_idx" | i | timeCustom | bigint | f
_timescaledb_internal."_hyper_1_1_chunk_two_Partitions_timeCustom_series_1_idx" | i | series_1 | double precision | f
_timescaledb_internal."_hyper_1_1_chunk_two_Partitions_timeCustom_series_2_idx" | i | timeCustom | bigint | f
_timescaledb_internal."_hyper_1_1_chunk_two_Partitions_timeCustom_series_2_idx" | i | series_2 | double precision | f
_timescaledb_internal."_hyper_1_1_chunk_two_Partitions_timeCustom_series_bool_idx" | i | timeCustom | bigint | f
_timescaledb_internal."_hyper_1_1_chunk_two_Partitions_timeCustom_series_bool_idx" | i | series_bool | boolean | f
_timescaledb_internal._hyper_1_2_chunk | r | timeCustom | bigint | t
_timescaledb_internal._hyper_1_2_chunk | r | device_id | text | t
_timescaledb_internal._hyper_1_2_chunk | r | series_0 | double precision | f
_timescaledb_internal._hyper_1_2_chunk | r | series_1 | double precision | f
_timescaledb_internal._hyper_1_2_chunk | r | series_2 | double precision | f
_timescaledb_internal._hyper_1_2_chunk | r | series_bool | boolean | f
_timescaledb_internal."_hyper_1_2_chunk_two_Partitions_device_id_timeCustom_idx" | i | device_id | text | f
_timescaledb_internal."_hyper_1_2_chunk_two_Partitions_device_id_timeCustom_idx" | i | timeCustom | bigint | f
_timescaledb_internal."_hyper_1_2_chunk_two_Partitions_timeCustom_device_id_idx" | i | timeCustom | bigint | f
_timescaledb_internal."_hyper_1_2_chunk_two_Partitions_timeCustom_device_id_idx" | i | device_id | text | f
_timescaledb_internal."_hyper_1_2_chunk_two_Partitions_timeCustom_idx" | i | timeCustom | bigint | f
_timescaledb_internal."_hyper_1_2_chunk_two_Partitions_timeCustom_series_0_idx" | i | timeCustom | bigint | f
_timescaledb_internal."_hyper_1_2_chunk_two_Partitions_timeCustom_series_0_idx" | i | series_0 | double precision | f
_timescaledb_internal."_hyper_1_2_chunk_two_Partitions_timeCustom_series_1_idx" | i | timeCustom | bigint | f
_timescaledb_internal."_hyper_1_2_chunk_two_Partitions_timeCustom_series_1_idx" | i | series_1 | double precision | f
_timescaledb_internal."_hyper_1_2_chunk_two_Partitions_timeCustom_series_2_idx" | i | timeCustom | bigint | f
_timescaledb_internal."_hyper_1_2_chunk_two_Partitions_timeCustom_series_2_idx" | i | series_2 | double precision | f
_timescaledb_internal."_hyper_1_2_chunk_two_Partitions_timeCustom_series_bool_idx" | i | timeCustom | bigint | f
_timescaledb_internal."_hyper_1_2_chunk_two_Partitions_timeCustom_series_bool_idx" | i | series_bool | boolean | f
_timescaledb_internal._hyper_1_3_chunk | r | timeCustom | bigint | t
_timescaledb_internal._hyper_1_3_chunk | r | device_id | text | t
_timescaledb_internal._hyper_1_3_chunk | r | series_0 | double precision | f
_timescaledb_internal._hyper_1_3_chunk | r | series_1 | double precision | f
_timescaledb_internal._hyper_1_3_chunk | r | series_2 | double precision | f
_timescaledb_internal._hyper_1_3_chunk | r | series_bool | boolean | f
_timescaledb_internal."_hyper_1_3_chunk_two_Partitions_device_id_timeCustom_idx" | i | device_id | text | f
_timescaledb_internal."_hyper_1_3_chunk_two_Partitions_device_id_timeCustom_idx" | i | timeCustom | bigint | f
_timescaledb_internal."_hyper_1_3_chunk_two_Partitions_timeCustom_device_id_idx" | i | timeCustom | bigint | f
_timescaledb_internal."_hyper_1_3_chunk_two_Partitions_timeCustom_device_id_idx" | i | device_id | text | f
_timescaledb_internal."_hyper_1_3_chunk_two_Partitions_timeCustom_idx" | i | timeCustom | bigint | f
_timescaledb_internal."_hyper_1_3_chunk_two_Partitions_timeCustom_series_0_idx" | i | timeCustom | bigint | f
_timescaledb_internal."_hyper_1_3_chunk_two_Partitions_timeCustom_series_0_idx" | i | series_0 | double precision | f
_timescaledb_internal."_hyper_1_3_chunk_two_Partitions_timeCustom_series_1_idx" | i | timeCustom | bigint | f
_timescaledb_internal."_hyper_1_3_chunk_two_Partitions_timeCustom_series_1_idx" | i | series_1 | double precision | f
_timescaledb_internal."_hyper_1_3_chunk_two_Partitions_timeCustom_series_2_idx" | i | timeCustom | bigint | f
_timescaledb_internal."_hyper_1_3_chunk_two_Partitions_timeCustom_series_2_idx" | i | series_2 | double precision | f
_timescaledb_internal."_hyper_1_3_chunk_two_Partitions_timeCustom_series_bool_idx" | i | timeCustom | bigint | f
_timescaledb_internal."_hyper_1_3_chunk_two_Partitions_timeCustom_series_bool_idx" | i | series_bool | boolean | f
_timescaledb_internal._hyper_1_4_chunk | r | timeCustom | bigint | t
_timescaledb_internal._hyper_1_4_chunk | r | device_id | text | t
_timescaledb_internal._hyper_1_4_chunk | r | series_0 | double precision | f
_timescaledb_internal._hyper_1_4_chunk | r | series_1 | double precision | f
_timescaledb_internal._hyper_1_4_chunk | r | series_2 | double precision | f
_timescaledb_internal._hyper_1_4_chunk | r | series_bool | boolean | f
_timescaledb_internal."_hyper_1_4_chunk_two_Partitions_device_id_timeCustom_idx" | i | device_id | text | f
_timescaledb_internal."_hyper_1_4_chunk_two_Partitions_device_id_timeCustom_idx" | i | timeCustom | bigint | f
_timescaledb_internal."_hyper_1_4_chunk_two_Partitions_timeCustom_device_id_idx" | i | timeCustom | bigint | f
_timescaledb_internal."_hyper_1_4_chunk_two_Partitions_timeCustom_device_id_idx" | i | device_id | text | f
_timescaledb_internal."_hyper_1_4_chunk_two_Partitions_timeCustom_idx" | i | timeCustom | bigint | f
_timescaledb_internal."_hyper_1_4_chunk_two_Partitions_timeCustom_series_0_idx" | i | timeCustom | bigint | f
_timescaledb_internal."_hyper_1_4_chunk_two_Partitions_timeCustom_series_0_idx" | i | series_0 | double precision | f
_timescaledb_internal."_hyper_1_4_chunk_two_Partitions_timeCustom_series_1_idx" | i | timeCustom | bigint | f
_timescaledb_internal."_hyper_1_4_chunk_two_Partitions_timeCustom_series_1_idx" | i | series_1 | double precision | f
_timescaledb_internal."_hyper_1_4_chunk_two_Partitions_timeCustom_series_2_idx" | i | timeCustom | bigint | f
_timescaledb_internal."_hyper_1_4_chunk_two_Partitions_timeCustom_series_2_idx" | i | series_2 | double precision | f
_timescaledb_internal."_hyper_1_4_chunk_two_Partitions_timeCustom_series_bool_idx" | i | timeCustom | bigint | f
_timescaledb_internal."_hyper_1_4_chunk_two_Partitions_timeCustom_series_bool_idx" | i | series_bool | boolean | f
(76 rows)
SELECT * FROM test.show_indexesp('_timescaledb_internal._hyper%');
Table | Index | Columns | Expr | Unique | Primary | Exclusion | Tablespace
----------------------------------------+------------------------------------------------------------------------------------+--------------------------+------+--------+---------+-----------+------------
_timescaledb_internal._hyper_1_1_chunk | _timescaledb_internal."_hyper_1_1_chunk_two_Partitions_device_id_timeCustom_idx" | {device_id,timeCustom} | | f | f | f |
_timescaledb_internal._hyper_1_1_chunk | _timescaledb_internal."_hyper_1_1_chunk_two_Partitions_timeCustom_series_0_idx" | {timeCustom,series_0} | | f | f | f |
_timescaledb_internal._hyper_1_1_chunk | _timescaledb_internal."_hyper_1_1_chunk_two_Partitions_timeCustom_series_1_idx" | {timeCustom,series_1} | | f | f | f |
_timescaledb_internal._hyper_1_1_chunk | _timescaledb_internal."_hyper_1_1_chunk_two_Partitions_timeCustom_series_2_idx" | {timeCustom,series_2} | | f | f | f |
_timescaledb_internal._hyper_1_1_chunk | _timescaledb_internal."_hyper_1_1_chunk_two_Partitions_timeCustom_series_bool_idx" | {timeCustom,series_bool} | | f | f | f |
_timescaledb_internal._hyper_1_1_chunk | _timescaledb_internal."_hyper_1_1_chunk_two_Partitions_timeCustom_device_id_idx" | {timeCustom,device_id} | | f | f | f |
_timescaledb_internal._hyper_1_1_chunk | _timescaledb_internal."_hyper_1_1_chunk_two_Partitions_timeCustom_idx" | {timeCustom} | | f | f | f |
_timescaledb_internal._hyper_1_2_chunk | _timescaledb_internal."_hyper_1_2_chunk_two_Partitions_device_id_timeCustom_idx" | {device_id,timeCustom} | | f | f | f |
_timescaledb_internal._hyper_1_2_chunk | _timescaledb_internal."_hyper_1_2_chunk_two_Partitions_timeCustom_series_0_idx" | {timeCustom,series_0} | | f | f | f |
_timescaledb_internal._hyper_1_2_chunk | _timescaledb_internal."_hyper_1_2_chunk_two_Partitions_timeCustom_series_1_idx" | {timeCustom,series_1} | | f | f | f |
_timescaledb_internal._hyper_1_2_chunk | _timescaledb_internal."_hyper_1_2_chunk_two_Partitions_timeCustom_series_2_idx" | {timeCustom,series_2} | | f | f | f |
_timescaledb_internal._hyper_1_2_chunk | _timescaledb_internal."_hyper_1_2_chunk_two_Partitions_timeCustom_series_bool_idx" | {timeCustom,series_bool} | | f | f | f |
_timescaledb_internal._hyper_1_2_chunk | _timescaledb_internal."_hyper_1_2_chunk_two_Partitions_timeCustom_device_id_idx" | {timeCustom,device_id} | | f | f | f |
_timescaledb_internal._hyper_1_2_chunk | _timescaledb_internal."_hyper_1_2_chunk_two_Partitions_timeCustom_idx" | {timeCustom} | | f | f | f |
_timescaledb_internal._hyper_1_3_chunk | _timescaledb_internal."_hyper_1_3_chunk_two_Partitions_device_id_timeCustom_idx" | {device_id,timeCustom} | | f | f | f |
_timescaledb_internal._hyper_1_3_chunk | _timescaledb_internal."_hyper_1_3_chunk_two_Partitions_timeCustom_series_0_idx" | {timeCustom,series_0} | | f | f | f |
_timescaledb_internal._hyper_1_3_chunk | _timescaledb_internal."_hyper_1_3_chunk_two_Partitions_timeCustom_series_1_idx" | {timeCustom,series_1} | | f | f | f |
_timescaledb_internal._hyper_1_3_chunk | _timescaledb_internal."_hyper_1_3_chunk_two_Partitions_timeCustom_series_2_idx" | {timeCustom,series_2} | | f | f | f |
_timescaledb_internal._hyper_1_3_chunk | _timescaledb_internal."_hyper_1_3_chunk_two_Partitions_timeCustom_series_bool_idx" | {timeCustom,series_bool} | | f | f | f |
_timescaledb_internal._hyper_1_3_chunk | _timescaledb_internal."_hyper_1_3_chunk_two_Partitions_timeCustom_device_id_idx" | {timeCustom,device_id} | | f | f | f |
_timescaledb_internal._hyper_1_3_chunk | _timescaledb_internal."_hyper_1_3_chunk_two_Partitions_timeCustom_idx" | {timeCustom} | | f | f | f |
_timescaledb_internal._hyper_1_4_chunk | _timescaledb_internal."_hyper_1_4_chunk_two_Partitions_device_id_timeCustom_idx" | {device_id,timeCustom} | | f | f | f |
_timescaledb_internal._hyper_1_4_chunk | _timescaledb_internal."_hyper_1_4_chunk_two_Partitions_timeCustom_series_0_idx" | {timeCustom,series_0} | | f | f | f |
_timescaledb_internal._hyper_1_4_chunk | _timescaledb_internal."_hyper_1_4_chunk_two_Partitions_timeCustom_series_1_idx" | {timeCustom,series_1} | | f | f | f |
_timescaledb_internal._hyper_1_4_chunk | _timescaledb_internal."_hyper_1_4_chunk_two_Partitions_timeCustom_series_2_idx" | {timeCustom,series_2} | | f | f | f |
_timescaledb_internal._hyper_1_4_chunk | _timescaledb_internal."_hyper_1_4_chunk_two_Partitions_timeCustom_series_bool_idx" | {timeCustom,series_bool} | | f | f | f |
_timescaledb_internal._hyper_1_4_chunk | _timescaledb_internal."_hyper_1_4_chunk_two_Partitions_timeCustom_device_id_idx" | {timeCustom,device_id} | | f | f | f |
_timescaledb_internal._hyper_1_4_chunk | _timescaledb_internal."_hyper_1_4_chunk_two_Partitions_timeCustom_idx" | {timeCustom} | | f | f | f |
(28 rows)
SELECT * FROM _timescaledb_catalog.chunk;
id | hypertable_id | schema_name | table_name | compressed_chunk_id | dropped
----+---------------+-----------------------+------------------+---------------------+---------
1 | 1 | _timescaledb_internal | _hyper_1_1_chunk | | f
2 | 1 | _timescaledb_internal | _hyper_1_2_chunk | | f
3 | 1 | _timescaledb_internal | _hyper_1_3_chunk | | f
4 | 1 | _timescaledb_internal | _hyper_1_4_chunk | | f
(4 rows)
SELECT * FROM "two_Partitions" ORDER BY "timeCustom", device_id, series_0, series_1;
timeCustom | device_id | series_0 | series_1 | series_2 | series_bool
---------------------+-----------+----------+----------+----------+-------------
1257894000000000000 | dev1 | 1.5 | 1 | 2 | t
1257894000000000000 | dev1 | 1.5 | 2 | |
1257894000000000000 | dev2 | 1.5 | 1 | |
1257894000000000000 | dev2 | 1.5 | 2 | |
1257894000000001000 | dev1 | 2.5 | 3 | |
1257894001000000000 | dev1 | 3.5 | 4 | |
1257894002000000000 | dev1 | 2.5 | 3 | |
1257894002000000000 | dev1 | 5.5 | 6 | | t
1257894002000000000 | dev1 | 5.5 | 7 | | f
1257897600000000000 | dev1 | 4.5 | 5 | | f
1257987600000000000 | dev1 | 1.5 | 1 | |
1257987600000000000 | dev1 | 1.5 | 2 | |
(12 rows)
SELECT * FROM ONLY "two_Partitions";
timeCustom | device_id | series_0 | series_1 | series_2 | series_bool
------------+-----------+----------+----------+----------+-------------
(0 rows)
CREATE TABLE error_test(time timestamp, temp float8, device text NOT NULL);
SELECT create_hypertable('error_test', 'time', 'device', 2);
NOTICE: adding not-null constraint to column "time"
create_hypertable
-------------------------
(2,public,error_test,t)
(1 row)
\set QUIET off
INSERT INTO error_test VALUES ('Mon Mar 20 09:18:20.1 2017', 21.3, 'dev1');
INSERT 0 1
\set ON_ERROR_STOP 0
-- generate insert error
INSERT INTO error_test VALUES ('Mon Mar 20 09:18:22.3 2017', 21.1, NULL);
ERROR: null value in column "device" of relation "_hyper_2_6_chunk" violates not-null constraint
\set ON_ERROR_STOP 1
INSERT INTO error_test VALUES ('Mon Mar 20 09:18:25.7 2017', 22.4, 'dev2');
INSERT 0 1
\set QUIET on
SELECT * FROM error_test;
time | temp | device
----------------------------+------+--------
Mon Mar 20 09:18:20.1 2017 | 21.3 | dev1
Mon Mar 20 09:18:25.7 2017 | 22.4 | dev2
(2 rows)
--test character(9) partition keys since there were issues with padding causing partitioning errors
CREATE TABLE tick_character (
symbol character(9) NOT NULL,
mid REAL NOT NULL,
spread REAL NOT NULL,
time TIMESTAMPTZ NOT NULL
);
SELECT create_hypertable ('tick_character', 'time', 'symbol', 2);
create_hypertable
-----------------------------
(3,public,tick_character,t)
(1 row)
INSERT INTO tick_character ( symbol, mid, spread, time ) VALUES ( 'GBPJPY', 142.639000, 5.80, 'Mon Mar 20 09:18:22.3 2017') RETURNING time, symbol, mid;
time | symbol | mid
--------------------------------+-----------+---------
Mon Mar 20 09:18:22.3 2017 PDT | GBPJPY | 142.639
(1 row)
SELECT * FROM tick_character;
symbol | mid | spread | time
-----------+---------+--------+--------------------------------
GBPJPY | 142.639 | 5.8 | Mon Mar 20 09:18:22.3 2017 PDT
(1 row)
CREATE TABLE date_col_test(time date, temp float8, device text NOT NULL);
SELECT create_hypertable('date_col_test', 'time', 'device', 1000, chunk_time_interval => INTERVAL '1 Day');
NOTICE: adding not-null constraint to column "time"
create_hypertable
----------------------------
(4,public,date_col_test,t)
(1 row)
INSERT INTO date_col_test
VALUES ('2001-02-01', 98, 'dev1'),
('2001-03-02', 98, 'dev1');
SELECT * FROM date_col_test WHERE time > '2001-01-01';
time | temp | device
------------+------+--------
03-02-2001 | 98 | dev1
02-01-2001 | 98 | dev1
(2 rows)
-- Out-of-order insertion regression test.
-- this used to trip an assert in subspace_store.c checking that
-- max_open_chunks_per_insert was obeyed
set timescaledb.max_open_chunks_per_insert=1;
CREATE TABLE chunk_assert_fail(i bigint, j bigint);
SELECT create_hypertable('chunk_assert_fail', 'i', 'j', 1000, chunk_time_interval=>1);
NOTICE: adding not-null constraint to column "i"
create_hypertable
--------------------------------
(5,public,chunk_assert_fail,t)
(1 row)
insert into chunk_assert_fail values (1, 1), (1, 2), (2,1);
select * from chunk_assert_fail;
i | j
---+---
1 | 1
1 | 2
2 | 1
(3 rows)
CREATE TABLE one_space_test(time timestamp, temp float8, device text NOT NULL);
SELECT create_hypertable('one_space_test', 'time', 'device', 1);
NOTICE: adding not-null constraint to column "time"
create_hypertable
-----------------------------
(6,public,one_space_test,t)
(1 row)
INSERT INTO one_space_test VALUES
('2001-01-01 01:01:01', 1.0, 'device'),
('2002-01-01 01:02:01', 1.0, 'device');
SELECT * FROM one_space_test;
time | temp | device
--------------------------+------+--------
Mon Jan 01 01:01:01 2001 | 1 | device
Tue Jan 01 01:02:01 2002 | 1 | device
(2 rows)
--CTE & EXPLAIN ANALYZE TESTS
WITH insert_cte as (
INSERT INTO one_space_test VALUES
('2001-01-01 01:02:01', 1.0, 'device')
RETURNING *)
SELECT * FROM insert_cte;
time | temp | device
--------------------------+------+--------
Mon Jan 01 01:02:01 2001 | 1 | device
(1 row)
EXPLAIN (analyze, costs off, timing off) --can't turn summary off in 9.6 so instead grep it away at end.
WITH insert_cte as (
INSERT INTO one_space_test VALUES
('2001-01-01 01:03:01', 1.0, 'device')
)
SELECT 1 \g | grep -v "Planning" | grep -v "Execution"
QUERY PLAN
-------------------------------------------------------------------------
Result (actual rows=1 loops=1)
CTE insert_cte
-> Custom Scan (HypertableInsert) (never executed)
-> Insert on one_space_test (actual rows=0 loops=1)
-> Custom Scan (ChunkDispatch) (actual rows=1 loops=1)
-> Result (actual rows=1 loops=1)
(8 rows)
-- INSERTs can exclude chunks based on constraints
EXPLAIN (costs off) INSERT INTO chunk_assert_fail SELECT i, j FROM chunk_assert_fail;
QUERY PLAN
-------------------------------------------------------
Custom Scan (HypertableInsert)
-> Insert on chunk_assert_fail
-> Custom Scan (ChunkDispatch)
-> Append
-> Seq Scan on _hyper_5_11_chunk
-> Seq Scan on _hyper_5_12_chunk
-> Seq Scan on _hyper_5_13_chunk
(7 rows)
EXPLAIN (costs off) INSERT INTO chunk_assert_fail SELECT i, j FROM chunk_assert_fail WHERE i < 1;
QUERY PLAN
--------------------------------------------
Custom Scan (HypertableInsert)
-> Insert on chunk_assert_fail
-> Custom Scan (ChunkDispatch)
-> Result
One-Time Filter: false
(5 rows)
EXPLAIN (costs off) INSERT INTO chunk_assert_fail SELECT i, j FROM chunk_assert_fail WHERE i = 1;
QUERY PLAN
---------------------------------------------------------------------------------------------------------
Custom Scan (HypertableInsert)
-> Insert on chunk_assert_fail
-> Custom Scan (ChunkDispatch)
-> Append
-> Index Scan using _hyper_5_12_chunk_chunk_assert_fail_i_idx on _hyper_5_12_chunk
Index Cond: (i = 1)
-> Index Scan using _hyper_5_11_chunk_chunk_assert_fail_i_idx on _hyper_5_11_chunk
Index Cond: (i = 1)
(8 rows)
EXPLAIN (costs off) INSERT INTO chunk_assert_fail SELECT i, j FROM chunk_assert_fail WHERE i > 1;
QUERY PLAN
---------------------------------------------------------------------------------------------------
Custom Scan (HypertableInsert)
-> Insert on chunk_assert_fail
-> Custom Scan (ChunkDispatch)
-> Index Scan using _hyper_5_13_chunk_chunk_assert_fail_i_idx on _hyper_5_13_chunk
Index Cond: (i > 1)
(5 rows)
INSERT INTO chunk_assert_fail SELECT i, j FROM chunk_assert_fail WHERE i > 1;
EXPLAIN (costs off) INSERT INTO one_space_test SELECT * FROM one_space_test WHERE time < 'infinity' LIMIT 1;
QUERY PLAN
---------------------------------------------------------------------------------------------------------------
Custom Scan (HypertableInsert)
-> Insert on one_space_test
-> Custom Scan (ChunkDispatch)
-> Limit
-> Append
-> Index Scan using _hyper_6_14_chunk_one_space_test_time_idx on _hyper_6_14_chunk
Index Cond: ("time" < 'infinity'::timestamp without time zone)
-> Index Scan using _hyper_6_15_chunk_one_space_test_time_idx on _hyper_6_15_chunk
Index Cond: ("time" < 'infinity'::timestamp without time zone)
(9 rows)
EXPLAIN (costs off) INSERT INTO one_space_test SELECT * FROM one_space_test WHERE time >= 'infinity' LIMIT 1;
QUERY PLAN
--------------------------------------------------
Custom Scan (HypertableInsert)
-> Insert on one_space_test
-> Custom Scan (ChunkDispatch)
-> Limit
-> Result
One-Time Filter: false
(6 rows)
EXPLAIN (costs off) INSERT INTO one_space_test SELECT * FROM one_space_test WHERE time <= '-infinity' LIMIT 1;
QUERY PLAN
--------------------------------------------------
Custom Scan (HypertableInsert)
-> Insert on one_space_test
-> Custom Scan (ChunkDispatch)
-> Limit
-> Result
One-Time Filter: false
(6 rows)
EXPLAIN (costs off) INSERT INTO one_space_test SELECT * FROM one_space_test WHERE time > '-infinity' LIMIT 1;
QUERY PLAN
---------------------------------------------------------------------------------------------------------------
Custom Scan (HypertableInsert)
-> Insert on one_space_test
-> Custom Scan (ChunkDispatch)
-> Limit
-> Append
-> Index Scan using _hyper_6_14_chunk_one_space_test_time_idx on _hyper_6_14_chunk
Index Cond: ("time" > '-infinity'::timestamp without time zone)
-> Index Scan using _hyper_6_15_chunk_one_space_test_time_idx on _hyper_6_15_chunk
Index Cond: ("time" > '-infinity'::timestamp without time zone)
(9 rows)
INSERT INTO one_space_test SELECT * FROM one_space_test WHERE time < 'infinity' LIMIT 1;
INSERT INTO one_space_test SELECT * FROM one_space_test WHERE time >= 'infinity' LIMIT 1;
INSERT INTO one_space_test SELECT * FROM one_space_test WHERE time <= '-infinity' LIMIT 1;
INSERT INTO one_space_test SELECT * FROM one_space_test WHERE time > '-infinity' LIMIT 1;
CREATE TABLE timestamp_inf(time TIMESTAMP);
SELECT create_hypertable('timestamp_inf', 'time');
NOTICE: adding not-null constraint to column "time"
create_hypertable
----------------------------
(7,public,timestamp_inf,t)
(1 row)
INSERT INTO timestamp_inf VALUES ('2018/01/02'), ('2019/01/02');
EXPLAIN (costs off) INSERT INTO timestamp_inf SELECT * FROM timestamp_inf
WHERE time < 'infinity' LIMIT 1;
QUERY PLAN
-------------------------------------------------------------------------------------------------------------------
Custom Scan (HypertableInsert)
-> Insert on timestamp_inf
-> Custom Scan (ChunkDispatch)
-> Limit
-> Append
-> Index Only Scan using _hyper_7_17_chunk_timestamp_inf_time_idx on _hyper_7_17_chunk
Index Cond: ("time" < 'infinity'::timestamp without time zone)
-> Index Only Scan using _hyper_7_16_chunk_timestamp_inf_time_idx on _hyper_7_16_chunk
Index Cond: ("time" < 'infinity'::timestamp without time zone)
(9 rows)
EXPLAIN (costs off) INSERT INTO timestamp_inf SELECT * FROM timestamp_inf
WHERE time >= 'infinity' LIMIT 1;
QUERY PLAN
--------------------------------------------------
Custom Scan (HypertableInsert)
-> Insert on timestamp_inf
-> Custom Scan (ChunkDispatch)
-> Limit
-> Result
One-Time Filter: false
(6 rows)
EXPLAIN (costs off) INSERT INTO timestamp_inf SELECT * FROM timestamp_inf
WHERE time <= '-infinity' LIMIT 1;
QUERY PLAN
--------------------------------------------------
Custom Scan (HypertableInsert)
-> Insert on timestamp_inf
-> Custom Scan (ChunkDispatch)
-> Limit
-> Result
One-Time Filter: false
(6 rows)
EXPLAIN (costs off) INSERT INTO timestamp_inf SELECT * FROM timestamp_inf
WHERE time > '-infinity' LIMIT 1;
QUERY PLAN
-------------------------------------------------------------------------------------------------------------------
Custom Scan (HypertableInsert)
-> Insert on timestamp_inf
-> Custom Scan (ChunkDispatch)
-> Limit
-> Append
-> Index Only Scan using _hyper_7_17_chunk_timestamp_inf_time_idx on _hyper_7_17_chunk
Index Cond: ("time" > '-infinity'::timestamp without time zone)
-> Index Only Scan using _hyper_7_16_chunk_timestamp_inf_time_idx on _hyper_7_16_chunk
Index Cond: ("time" > '-infinity'::timestamp without time zone)
(9 rows)
CREATE TABLE date_inf(time DATE);
SELECT create_hypertable('date_inf', 'time');
NOTICE: adding not-null constraint to column "time"
create_hypertable
-----------------------
(8,public,date_inf,t)
(1 row)
INSERT INTO date_inf VALUES ('2018/01/02'), ('2019/01/02');
EXPLAIN (costs off) INSERT INTO date_inf SELECT * FROM date_inf
WHERE time < 'infinity' LIMIT 1;
QUERY PLAN
--------------------------------------------------------------------------------------------------------------
Custom Scan (HypertableInsert)
-> Insert on date_inf
-> Custom Scan (ChunkDispatch)
-> Limit
-> Append
-> Index Only Scan using _hyper_8_18_chunk_date_inf_time_idx on _hyper_8_18_chunk
Index Cond: ("time" < 'infinity'::date)
-> Index Only Scan using _hyper_8_19_chunk_date_inf_time_idx on _hyper_8_19_chunk
Index Cond: ("time" < 'infinity'::date)
(9 rows)
EXPLAIN (costs off) INSERT INTO date_inf SELECT * FROM date_inf
WHERE time >= 'infinity' LIMIT 1;
QUERY PLAN
--------------------------------------------------
Custom Scan (HypertableInsert)
-> Insert on date_inf
-> Custom Scan (ChunkDispatch)
-> Limit
-> Result
One-Time Filter: false
(6 rows)
EXPLAIN (costs off) INSERT INTO date_inf SELECT * FROM date_inf
WHERE time <= '-infinity' LIMIT 1;
QUERY PLAN
--------------------------------------------------
Custom Scan (HypertableInsert)
-> Insert on date_inf
-> Custom Scan (ChunkDispatch)
-> Limit
-> Result
One-Time Filter: false
(6 rows)
EXPLAIN (costs off) INSERT INTO date_inf SELECT * FROM date_inf
WHERE time > '-infinity' LIMIT 1;
QUERY PLAN
--------------------------------------------------------------------------------------------------------------
Custom Scan (HypertableInsert)
-> Insert on date_inf
-> Custom Scan (ChunkDispatch)
-> Limit
-> Append
-> Index Only Scan using _hyper_8_18_chunk_date_inf_time_idx on _hyper_8_18_chunk
Index Cond: ("time" > '-infinity'::date)
-> Index Only Scan using _hyper_8_19_chunk_date_inf_time_idx on _hyper_8_19_chunk
Index Cond: ("time" > '-infinity'::date)
(9 rows)
-- test INSERT with cached plans / plpgsql functions
-- https://github.com/timescale/timescaledb/issues/1809
CREATE TABLE status_table(a int, b int, last_ts timestamptz, UNIQUE(a,b));
CREATE TABLE metrics(time timestamptz NOT NULL, value float);
CREATE TABLE metrics2(time timestamptz NOT NULL, value float);
SELECT (create_hypertable(t,'time')).table_name FROM (VALUES ('metrics'),('metrics2')) v(t);
table_name
------------
metrics
metrics2
(2 rows)
INSERT INTO metrics VALUES ('2000-01-01',random()), ('2000-02-01',random()), ('2000-03-01',random());
CREATE OR REPLACE FUNCTION insert_test() RETURNS VOID LANGUAGE plpgsql AS
$$
DECLARE
r RECORD;
BEGIN
FOR r IN
SELECT * FROM metrics
LOOP
WITH foo AS (
INSERT INTO metrics2 SELECT * FROM metrics RETURNING *
)
INSERT INTO status_table (a,b, last_ts)
VALUES (1,1, now())
ON CONFLICT (a,b) DO UPDATE SET last_ts=(SELECT max(time) FROM metrics);
END LOOP;
END;
$$;
SELECT insert_test(), insert_test(), insert_test();
insert_test | insert_test | insert_test
-------------+-------------+-------------
| |
(1 row)
-- test Postgres crashes on INSERT ... SELECT ... WHERE NOT EXISTS with empty table
-- https://github.com/timescale/timescaledb/issues/1883
CREATE TABLE readings (
toe TIMESTAMPTZ NOT NULL,
sensor_id INT NOT NULL,
value INT NOT NULL
);
SELECT create_hypertable(
'readings',
'toe',
chunk_time_interval => interval '1 day',
if_not_exists => TRUE,
migrate_data => TRUE
);
create_hypertable
------------------------
(11,public,readings,t)
(1 row)
EXPLAIN (costs off)
INSERT INTO readings
SELECT '2020-05-09 10:34:35.296288+00', 1, 0
WHERE NOT EXISTS (
SELECT 1
FROM readings
WHERE sensor_id = 1
AND toe = '2020-05-09 10:34:35.296288+00'
);
QUERY PLAN
-----------------------------------------------------
Custom Scan (HypertableInsert)
InitPlan 1 (returns $0)
-> Result
One-Time Filter: false
-> Insert on readings
-> Result
One-Time Filter: (NOT $0)
-> Custom Scan (ChunkDispatch)
-> Result
One-Time Filter: (NOT $0)
(10 rows)
INSERT INTO readings
SELECT '2020-05-09 10:34:35.296288+00', 1, 0
WHERE NOT EXISTS (
SELECT 1
FROM readings
WHERE sensor_id = 1
AND toe = '2020-05-09 10:34:35.296288+00'
);
DROP TABLE readings;
CREATE TABLE sample_table (
sequence INTEGER NOT NULL,
time TIMESTAMP WITHOUT TIME ZONE NOT NULL,
value NUMERIC NOT NULL,
UNIQUE (sequence, time)
);
SELECT * FROM create_hypertable('sample_table', 'time',
chunk_time_interval => INTERVAL '1 day');
hypertable_id | schema_name | table_name | created
---------------+-------------+--------------+---------
12 | public | sample_table | t
(1 row)
INSERT INTO sample_table (sequence,time,value) VALUES
(7, generate_series(TIMESTAMP '2019-08-01', TIMESTAMP '2019-08-10', INTERVAL '10 minutes'), ROUND(RANDOM()*10)::int);
\set ON_ERROR_STOP 0
INSERT INTO sample_table (sequence,time,value) VALUES
(7, generate_series(TIMESTAMP '2019-07-21', TIMESTAMP '2019-08-01', INTERVAL '10 minutes'), ROUND(RANDOM()*10)::int);
ERROR: duplicate key value violates unique constraint "27_1_sample_table_sequence_time_key"
\set ON_ERROR_STOP 1
INSERT INTO sample_table (sequence,time,value) VALUES
(7,generate_series(TIMESTAMP '2019-01-01', TIMESTAMP '2019-07-01', '10 minutes'), ROUND(RANDOM()*10)::int);
DROP TABLE sample_table;

View File

@ -0,0 +1,404 @@
-- This file and its contents are licensed under the Apache License 2.0.
-- Please see the included NOTICE for copyright information and
-- LICENSE-APACHE for a copy of the license.
CREATE TABLE index_test(id serial, time timestamptz, device integer, temp float);
SELECT * FROM test.show_columns('index_test');
Column | Type | NotNull
--------+--------------------------+---------
id | integer | t
time | timestamp with time zone | f
device | integer | f
temp | double precision | f
(4 rows)
-- Test that we can handle difference in attnos across hypertable and
-- chunks by dropping the ID column
ALTER TABLE index_test DROP COLUMN id;
SELECT * FROM test.show_columns('index_test');
Column | Type | NotNull
--------+--------------------------+---------
time | timestamp with time zone | f
device | integer | f
temp | double precision | f
(3 rows)
-- No pre-existing UNIQUE index, so partitioning on two columns should work
SELECT create_hypertable('index_test', 'time', 'device', 2);
NOTICE: adding not-null constraint to column "time"
create_hypertable
-------------------------
(1,public,index_test,t)
(1 row)
INSERT INTO index_test VALUES ('2017-01-20T09:00:01', 1, 17.5);
\set ON_ERROR_STOP 0
-- cannot create a UNIQUE index with transaction_per_chunk
CREATE UNIQUE INDEX index_test_time_device_idx ON index_test (time) WITH (timescaledb.transaction_per_chunk);
ERROR: cannot use timescaledb.transaction_per_chunk with UNIQUE or PRIMARY KEY
CREATE UNIQUE INDEX index_test_time_device_idx ON index_test (time, device) WITH(timescaledb.transaction_per_chunk);
ERROR: cannot use timescaledb.transaction_per_chunk with UNIQUE or PRIMARY KEY
\set ON_ERROR_STOP 1
CREATE INDEX index_test_time_device_idx ON index_test (time, device) WITH (timescaledb.transaction_per_chunk);
-- Regular index need not cover all partitioning columns
CREATE INDEX ON index_test (time, temp) WITH (timescaledb.transaction_per_chunk);
-- Create another chunk
INSERT INTO index_test VALUES ('2017-04-20T09:00:01', 1, 17.5);
-- New index should have been recursed to chunks
SELECT * FROM test.show_indexes('index_test');
Index | Columns | Expr | Unique | Primary | Exclusion | Tablespace
----------------------------+---------------+------+--------+---------+-----------+------------
index_test_device_time_idx | {device,time} | | f | f | f |
index_test_time_device_idx | {time,device} | | f | f | f |
index_test_time_idx | {time} | | f | f | f |
index_test_time_temp_idx | {time,temp} | | f | f | f |
(4 rows)
SELECT * FROM test.show_indexesp('_timescaledb_internal._hyper%_chunk') ORDER BY 1,2;
Table | Index | Columns | Expr | Unique | Primary | Exclusion | Tablespace
----------------------------------------+-------------------------------------------------------------------+---------------+------+--------+---------+-----------+------------
_timescaledb_internal._hyper_1_1_chunk | _timescaledb_internal._hyper_1_1_chunk_index_test_time_idx | {time} | | f | f | f |
_timescaledb_internal._hyper_1_1_chunk | _timescaledb_internal._hyper_1_1_chunk_index_test_device_time_idx | {device,time} | | f | f | f |
_timescaledb_internal._hyper_1_1_chunk | _timescaledb_internal._hyper_1_1_chunk_index_test_time_device_idx | {time,device} | | f | f | f |
_timescaledb_internal._hyper_1_1_chunk | _timescaledb_internal._hyper_1_1_chunk_index_test_time_temp_idx | {time,temp} | | f | f | f |
_timescaledb_internal._hyper_1_2_chunk | _timescaledb_internal._hyper_1_2_chunk_index_test_time_idx | {time} | | f | f | f |
_timescaledb_internal._hyper_1_2_chunk | _timescaledb_internal._hyper_1_2_chunk_index_test_device_time_idx | {device,time} | | f | f | f |
_timescaledb_internal._hyper_1_2_chunk | _timescaledb_internal._hyper_1_2_chunk_index_test_time_device_idx | {time,device} | | f | f | f |
_timescaledb_internal._hyper_1_2_chunk | _timescaledb_internal._hyper_1_2_chunk_index_test_time_temp_idx | {time,temp} | | f | f | f |
(8 rows)
SELECT * FROM _timescaledb_catalog.chunk_index ORDER BY index_name;
chunk_id | index_name | hypertable_id | hypertable_index_name
----------+---------------------------------------------+---------------+----------------------------
1 | _hyper_1_1_chunk_index_test_device_time_idx | 1 | index_test_device_time_idx
1 | _hyper_1_1_chunk_index_test_time_device_idx | 1 | index_test_time_device_idx
1 | _hyper_1_1_chunk_index_test_time_idx | 1 | index_test_time_idx
1 | _hyper_1_1_chunk_index_test_time_temp_idx | 1 | index_test_time_temp_idx
2 | _hyper_1_2_chunk_index_test_device_time_idx | 1 | index_test_device_time_idx
2 | _hyper_1_2_chunk_index_test_time_device_idx | 1 | index_test_time_device_idx
2 | _hyper_1_2_chunk_index_test_time_idx | 1 | index_test_time_idx
2 | _hyper_1_2_chunk_index_test_time_temp_idx | 1 | index_test_time_temp_idx
(8 rows)
ALTER INDEX index_test_time_idx RENAME TO index_test_time_idx2;
-- Metadata and index should have changed name
SELECT * FROM test.show_indexes('index_test');
Index | Columns | Expr | Unique | Primary | Exclusion | Tablespace
----------------------------+---------------+------+--------+---------+-----------+------------
index_test_device_time_idx | {device,time} | | f | f | f |
index_test_time_device_idx | {time,device} | | f | f | f |
index_test_time_idx2 | {time} | | f | f | f |
index_test_time_temp_idx | {time,temp} | | f | f | f |
(4 rows)
SELECT * FROM test.show_indexesp('_timescaledb_internal._hyper%_chunk') ORDER BY 1,2;
Table | Index | Columns | Expr | Unique | Primary | Exclusion | Tablespace
----------------------------------------+-------------------------------------------------------------------+---------------+------+--------+---------+-----------+------------
_timescaledb_internal._hyper_1_1_chunk | _timescaledb_internal._hyper_1_1_chunk_index_test_time_idx2 | {time} | | f | f | f |
_timescaledb_internal._hyper_1_1_chunk | _timescaledb_internal._hyper_1_1_chunk_index_test_device_time_idx | {device,time} | | f | f | f |
_timescaledb_internal._hyper_1_1_chunk | _timescaledb_internal._hyper_1_1_chunk_index_test_time_device_idx | {time,device} | | f | f | f |
_timescaledb_internal._hyper_1_1_chunk | _timescaledb_internal._hyper_1_1_chunk_index_test_time_temp_idx | {time,temp} | | f | f | f |
_timescaledb_internal._hyper_1_2_chunk | _timescaledb_internal._hyper_1_2_chunk_index_test_time_idx2 | {time} | | f | f | f |
_timescaledb_internal._hyper_1_2_chunk | _timescaledb_internal._hyper_1_2_chunk_index_test_device_time_idx | {device,time} | | f | f | f |
_timescaledb_internal._hyper_1_2_chunk | _timescaledb_internal._hyper_1_2_chunk_index_test_time_device_idx | {time,device} | | f | f | f |
_timescaledb_internal._hyper_1_2_chunk | _timescaledb_internal._hyper_1_2_chunk_index_test_time_temp_idx | {time,temp} | | f | f | f |
(8 rows)
SELECT * FROM _timescaledb_catalog.chunk_index ORDER BY index_name;
chunk_id | index_name | hypertable_id | hypertable_index_name
----------+---------------------------------------------+---------------+----------------------------
1 | _hyper_1_1_chunk_index_test_device_time_idx | 1 | index_test_device_time_idx
1 | _hyper_1_1_chunk_index_test_time_device_idx | 1 | index_test_time_device_idx
1 | _hyper_1_1_chunk_index_test_time_idx2 | 1 | index_test_time_idx2
1 | _hyper_1_1_chunk_index_test_time_temp_idx | 1 | index_test_time_temp_idx
2 | _hyper_1_2_chunk_index_test_device_time_idx | 1 | index_test_device_time_idx
2 | _hyper_1_2_chunk_index_test_time_device_idx | 1 | index_test_time_device_idx
2 | _hyper_1_2_chunk_index_test_time_idx2 | 1 | index_test_time_idx2
2 | _hyper_1_2_chunk_index_test_time_temp_idx | 1 | index_test_time_temp_idx
(8 rows)
DROP INDEX index_test_time_idx2;
DROP INDEX index_test_time_device_idx;
-- Index should have been dropped
SELECT * FROM test.show_indexes('index_test');
Index | Columns | Expr | Unique | Primary | Exclusion | Tablespace
----------------------------+---------------+------+--------+---------+-----------+------------
index_test_device_time_idx | {device,time} | | f | f | f |
index_test_time_temp_idx | {time,temp} | | f | f | f |
(2 rows)
SELECT * FROM test.show_indexesp('_timescaledb_internal._hyper%_chunk');
Table | Index | Columns | Expr | Unique | Primary | Exclusion | Tablespace
----------------------------------------+-------------------------------------------------------------------+---------------+------+--------+---------+-----------+------------
_timescaledb_internal._hyper_1_1_chunk | _timescaledb_internal._hyper_1_1_chunk_index_test_device_time_idx | {device,time} | | f | f | f |
_timescaledb_internal._hyper_1_1_chunk | _timescaledb_internal._hyper_1_1_chunk_index_test_time_temp_idx | {time,temp} | | f | f | f |
_timescaledb_internal._hyper_1_2_chunk | _timescaledb_internal._hyper_1_2_chunk_index_test_device_time_idx | {device,time} | | f | f | f |
_timescaledb_internal._hyper_1_2_chunk | _timescaledb_internal._hyper_1_2_chunk_index_test_time_temp_idx | {time,temp} | | f | f | f |
(4 rows)
SELECT * FROM _timescaledb_catalog.chunk_index;
chunk_id | index_name | hypertable_id | hypertable_index_name
----------+---------------------------------------------+---------------+----------------------------
1 | _hyper_1_1_chunk_index_test_device_time_idx | 1 | index_test_device_time_idx
1 | _hyper_1_1_chunk_index_test_time_temp_idx | 1 | index_test_time_temp_idx
2 | _hyper_1_2_chunk_index_test_device_time_idx | 1 | index_test_device_time_idx
2 | _hyper_1_2_chunk_index_test_time_temp_idx | 1 | index_test_time_temp_idx
(4 rows)
-- Create index with long name to see how this is handled on chunks
CREATE INDEX a_hypertable_index_with_a_very_very_long_name_that_truncates ON index_test (time, temp) WITH (timescaledb.transaction_per_chunk);
CREATE INDEX a_hypertable_index_with_a_very_very_long_name_that_truncates_2 ON index_test (time, temp) WITH (timescaledb.transaction_per_chunk);
SELECT * FROM test.show_indexes('index_test');
Index | Columns | Expr | Unique | Primary | Exclusion | Tablespace
----------------------------------------------------------------+---------------+------+--------+---------+-----------+------------
a_hypertable_index_with_a_very_very_long_name_that_truncates | {time,temp} | | f | f | f |
a_hypertable_index_with_a_very_very_long_name_that_truncates_2 | {time,temp} | | f | f | f |
index_test_device_time_idx | {device,time} | | f | f | f |
index_test_time_temp_idx | {time,temp} | | f | f | f |
(4 rows)
SELECT * FROM test.show_indexesp('_timescaledb_internal._hyper%_chunk');
Table | Index | Columns | Expr | Unique | Primary | Exclusion | Tablespace
----------------------------------------+---------------------------------------------------------------------------------------+---------------+------+--------+---------+-----------+------------
_timescaledb_internal._hyper_1_1_chunk | _timescaledb_internal._hyper_1_1_chunk_index_test_device_time_idx | {device,time} | | f | f | f |
_timescaledb_internal._hyper_1_1_chunk | _timescaledb_internal._hyper_1_1_chunk_index_test_time_temp_idx | {time,temp} | | f | f | f |
_timescaledb_internal._hyper_1_1_chunk | _timescaledb_internal._hyper_1_1_chunk_a_hypertable_index_with_a_very_very_long_name_ | {time,temp} | | f | f | f |
_timescaledb_internal._hyper_1_1_chunk | _timescaledb_internal._hyper_1_1_chunk_a_hypertable_index_with_a_very_very_long_nam_1 | {time,temp} | | f | f | f |
_timescaledb_internal._hyper_1_2_chunk | _timescaledb_internal._hyper_1_2_chunk_index_test_device_time_idx | {device,time} | | f | f | f |
_timescaledb_internal._hyper_1_2_chunk | _timescaledb_internal._hyper_1_2_chunk_index_test_time_temp_idx | {time,temp} | | f | f | f |
_timescaledb_internal._hyper_1_2_chunk | _timescaledb_internal._hyper_1_2_chunk_a_hypertable_index_with_a_very_very_long_name_ | {time,temp} | | f | f | f |
_timescaledb_internal._hyper_1_2_chunk | _timescaledb_internal._hyper_1_2_chunk_a_hypertable_index_with_a_very_very_long_nam_1 | {time,temp} | | f | f | f |
(8 rows)
DROP INDEX a_hypertable_index_with_a_very_very_long_name_that_truncates;
DROP INDEX a_hypertable_index_with_a_very_very_long_name_that_truncates_2;
SELECT * FROM test.show_indexes('index_test');
Index | Columns | Expr | Unique | Primary | Exclusion | Tablespace
----------------------------+---------------+------+--------+---------+-----------+------------
index_test_device_time_idx | {device,time} | | f | f | f |
index_test_time_temp_idx | {time,temp} | | f | f | f |
(2 rows)
SELECT * FROM test.show_indexesp('_timescaledb_internal._hyper%_chunk');
Table | Index | Columns | Expr | Unique | Primary | Exclusion | Tablespace
----------------------------------------+-------------------------------------------------------------------+---------------+------+--------+---------+-----------+------------
_timescaledb_internal._hyper_1_1_chunk | _timescaledb_internal._hyper_1_1_chunk_index_test_device_time_idx | {device,time} | | f | f | f |
_timescaledb_internal._hyper_1_1_chunk | _timescaledb_internal._hyper_1_1_chunk_index_test_time_temp_idx | {time,temp} | | f | f | f |
_timescaledb_internal._hyper_1_2_chunk | _timescaledb_internal._hyper_1_2_chunk_index_test_device_time_idx | {device,time} | | f | f | f |
_timescaledb_internal._hyper_1_2_chunk | _timescaledb_internal._hyper_1_2_chunk_index_test_time_temp_idx | {time,temp} | | f | f | f |
(4 rows)
SELECT * FROM test.show_indexes('index_test');
Index | Columns | Expr | Unique | Primary | Exclusion | Tablespace
----------------------------+---------------+------+--------+---------+-----------+------------
index_test_device_time_idx | {device,time} | | f | f | f |
index_test_time_temp_idx | {time,temp} | | f | f | f |
(2 rows)
SELECT * FROM test.show_indexesp('_timescaledb_internal._hyper%_chunk');
Table | Index | Columns | Expr | Unique | Primary | Exclusion | Tablespace
----------------------------------------+-------------------------------------------------------------------+---------------+------+--------+---------+-----------+------------
_timescaledb_internal._hyper_1_1_chunk | _timescaledb_internal._hyper_1_1_chunk_index_test_device_time_idx | {device,time} | | f | f | f |
_timescaledb_internal._hyper_1_1_chunk | _timescaledb_internal._hyper_1_1_chunk_index_test_time_temp_idx | {time,temp} | | f | f | f |
_timescaledb_internal._hyper_1_2_chunk | _timescaledb_internal._hyper_1_2_chunk_index_test_device_time_idx | {device,time} | | f | f | f |
_timescaledb_internal._hyper_1_2_chunk | _timescaledb_internal._hyper_1_2_chunk_index_test_time_temp_idx | {time,temp} | | f | f | f |
(4 rows)
-- Add constraint index
ALTER TABLE index_test ADD UNIQUE (time, device);
SELECT * FROM test.show_indexes('index_test');
Index | Columns | Expr | Unique | Primary | Exclusion | Tablespace
----------------------------+---------------+------+--------+---------+-----------+------------
index_test_device_time_idx | {device,time} | | f | f | f |
index_test_time_device_key | {time,device} | | t | f | f |
index_test_time_temp_idx | {time,temp} | | f | f | f |
(3 rows)
SELECT * FROM test.show_indexesp('_timescaledb_internal._hyper%_chunk');
Table | Index | Columns | Expr | Unique | Primary | Exclusion | Tablespace
----------------------------------------+-------------------------------------------------------------------+---------------+------+--------+---------+-----------+------------
_timescaledb_internal._hyper_1_1_chunk | _timescaledb_internal._hyper_1_1_chunk_index_test_device_time_idx | {device,time} | | f | f | f |
_timescaledb_internal._hyper_1_1_chunk | _timescaledb_internal._hyper_1_1_chunk_index_test_time_temp_idx | {time,temp} | | f | f | f |
_timescaledb_internal._hyper_1_1_chunk | _timescaledb_internal."1_1_index_test_time_device_key" | {time,device} | | t | f | f |
_timescaledb_internal._hyper_1_2_chunk | _timescaledb_internal._hyper_1_2_chunk_index_test_device_time_idx | {device,time} | | f | f | f |
_timescaledb_internal._hyper_1_2_chunk | _timescaledb_internal._hyper_1_2_chunk_index_test_time_temp_idx | {time,temp} | | f | f | f |
_timescaledb_internal._hyper_1_2_chunk | _timescaledb_internal."2_2_index_test_time_device_key" | {time,device} | | t | f | f |
(6 rows)
-- Constraint indexes are added to chunk_index table.
SELECT * FROM _timescaledb_catalog.chunk_index;
chunk_id | index_name | hypertable_id | hypertable_index_name
----------+---------------------------------------------+---------------+----------------------------
1 | _hyper_1_1_chunk_index_test_device_time_idx | 1 | index_test_device_time_idx
1 | _hyper_1_1_chunk_index_test_time_temp_idx | 1 | index_test_time_temp_idx
2 | _hyper_1_2_chunk_index_test_device_time_idx | 1 | index_test_device_time_idx
2 | _hyper_1_2_chunk_index_test_time_temp_idx | 1 | index_test_time_temp_idx
1 | 1_1_index_test_time_device_key | 1 | index_test_time_device_key
2 | 2_2_index_test_time_device_key | 1 | index_test_time_device_key
(6 rows)
SELECT * FROM _timescaledb_catalog.chunk_constraint;
chunk_id | dimension_slice_id | constraint_name | hypertable_constraint_name
----------+--------------------+--------------------------------+----------------------------
1 | 1 | constraint_1 |
1 | 2 | constraint_2 |
2 | 3 | constraint_3 |
2 | 2 | constraint_2 |
1 | | 1_1_index_test_time_device_key | index_test_time_device_key
2 | | 2_2_index_test_time_device_key | index_test_time_device_key
(6 rows)
DROP TABLE index_test;
-- Metadata removed
SELECT * FROM _timescaledb_catalog.chunk_index;
chunk_id | index_name | hypertable_id | hypertable_index_name
----------+------------+---------------+-----------------------
(0 rows)
-- Test that indexes are planned correctly
CREATE TABLE index_expr_test(id serial, time timestamptz, temp float, meta int);
select create_hypertable('index_expr_test', 'time');
NOTICE: adding not-null constraint to column "time"
create_hypertable
------------------------------
(2,public,index_expr_test,t)
(1 row)
-- Screw up the attribute numbers
ALTER TABLE index_expr_test DROP COLUMN id;
CREATE INDEX ON index_expr_test (meta) WITH (timescaledb.transaction_per_chunk);
INSERT INTO index_expr_test VALUES ('2017-01-20T09:00:01', 17.5, 1);
INSERT INTO index_expr_test VALUES ('2017-01-20T09:00:01', 17.5, 2);
SET enable_seqscan TO false;
SET enable_bitmapscan TO false;
EXPLAIN (verbose, costs off)
SELECT * FROM index_expr_test WHERE meta = 1;
QUERY PLAN
------------------------------------------------------------------------------------------------------
Index Scan using _hyper_2_3_chunk_index_expr_test_meta_idx on _timescaledb_internal._hyper_2_3_chunk
Output: _hyper_2_3_chunk."time", _hyper_2_3_chunk.temp, _hyper_2_3_chunk.meta
Index Cond: (_hyper_2_3_chunk.meta = 1)
(3 rows)
SELECT * FROM index_expr_test WHERE meta = 1;
time | temp | meta
------------------------------+------+------
Fri Jan 20 09:00:01 2017 PST | 17.5 | 1
(1 row)
SET enable_seqscan TO default;
SET enable_bitmapscan TO default;
\set ON_ERROR_STOP 0
-- cannot create a transaction_per_chunk index within a transaction block
BEGIN;
CREATE INDEX ON index_expr_test (temp) WITH (timescaledb.transaction_per_chunk);
ERROR: CREATE INDEX ... WITH (timescaledb.transaction_per_chunk) cannot run inside a transaction block
ROLLBACK;
\set ON_ERROR_STOP 1
DROP TABLE index_expr_test CASCADE;
CREATE TABLE partial_index_test(time INTEGER);
SELECT create_hypertable('partial_index_test', 'time', chunk_time_interval => 1, create_default_indexes => false);
NOTICE: adding not-null constraint to column "time"
create_hypertable
---------------------------------
(3,public,partial_index_test,t)
(1 row)
-- create 3 chunks
INSERT INTO partial_index_test(time) SELECT generate_series(0, 2);
select * from partial_index_test order by 1;
time
------
0
1
2
(3 rows)
-- create indexes on only 1 of the chunks
CREATE INDEX ON partial_index_test (time) WITH (timescaledb.transaction_per_chunk, timescaledb.max_chunks='1');
SELECT * FROM test.show_indexes('partial_index_test');
Index | Columns | Expr | Unique | Primary | Exclusion | Tablespace
-----------------------------+---------+------+--------+---------+-----------+------------
partial_index_test_time_idx | {time} | | f | f | f |
(1 row)
SELECT * FROM test.show_indexesp('_timescaledb_internal._hyper%_chunk');
Table | Index | Columns | Expr | Unique | Primary | Exclusion | Tablespace
----------------------------------------+--------------------------------------------------------------------+---------+------+--------+---------+-----------+------------
_timescaledb_internal._hyper_3_4_chunk | _timescaledb_internal._hyper_3_4_chunk_partial_index_test_time_idx | {time} | | f | f | f |
(1 row)
-- regerssion test for bug fixed by PR #1008.
-- this caused an assertion failure when a MergeAppend node contained unsorted children
SET enable_seqscan TO false;
SET enable_bitmapscan TO false;
EXPLAIN (verbose, costs off) SELECT * FROM partial_index_test WHERE time < 2 ORDER BY time LIMIT 2;
QUERY PLAN
--------------------------------------------------------------------------------------------------------------------------
Limit
Output: partial_index_test."time"
-> Custom Scan (ChunkAppend) on public.partial_index_test
Output: partial_index_test."time"
Order: partial_index_test."time"
Startup Exclusion: false
Runtime Exclusion: false
-> Index Only Scan using _hyper_3_4_chunk_partial_index_test_time_idx on _timescaledb_internal._hyper_3_4_chunk
Output: _hyper_3_4_chunk."time"
Index Cond: (_hyper_3_4_chunk."time" < 2)
-> Sort
Output: _hyper_3_5_chunk."time"
Sort Key: _hyper_3_5_chunk."time"
-> Seq Scan on _timescaledb_internal._hyper_3_5_chunk
Output: _hyper_3_5_chunk."time"
Filter: (_hyper_3_5_chunk."time" < 2)
(16 rows)
SELECT * FROM partial_index_test WHERE time < 2 ORDER BY time LIMIT 2;
time
------
0
1
(2 rows)
-- we can drop the partially created index
DROP INDEX partial_index_test_time_idx;
SELECT * FROM test.show_indexes('partial_index_test');
Index | Columns | Expr | Unique | Primary | Exclusion | Tablespace
-------+---------+------+--------+---------+-----------+------------
(0 rows)
SELECT * FROM test.show_indexesp('_timescaledb_internal._hyper%_chunk');
Table | Index | Columns | Expr | Unique | Primary | Exclusion | Tablespace
-------+-------+---------+------+--------+---------+-----------+------------
(0 rows)
EXPLAIN (verbose, costs off) SELECT * FROM partial_index_test WHERE time < 2 ORDER BY time LIMIT 2;
QUERY PLAN
----------------------------------------------------------------------
Limit
Output: _hyper_3_4_chunk."time"
-> Sort
Output: _hyper_3_4_chunk."time"
Sort Key: _hyper_3_4_chunk."time"
-> Append
-> Seq Scan on _timescaledb_internal._hyper_3_4_chunk
Output: _hyper_3_4_chunk."time"
Filter: (_hyper_3_4_chunk."time" < 2)
-> Seq Scan on _timescaledb_internal._hyper_3_5_chunk
Output: _hyper_3_5_chunk."time"
Filter: (_hyper_3_5_chunk."time" < 2)
(12 rows)
SELECT * FROM partial_index_test WHERE time < 2 ORDER BY time LIMIT 2;
time
------
0
1
(2 rows)
SET enable_seqscan TO true;
SET enable_bitmapscan TO true;
\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER_2
\set ON_ERROR_STOP 0
CREATE INDEX ON partial_index_test (time) WITH (timescaledb.transaction_per_chunk, timescaledb.max_chunks='1');
ERROR: must be owner of hypertable "partial_index_test"
\set ON_ERROR_STOP 1

View File

@ -0,0 +1,445 @@
-- This file and its contents are licensed under the Apache License 2.0.
-- Please see the included NOTICE for copyright information and
-- LICENSE-APACHE for a copy of the license.
--parallel queries require big-ish tables so collect them all here
--so that we need to generate queries only once.
-- output with analyze is not stable because it depends on worker assignment
\set PREFIX 'EXPLAIN (costs off)'
\set CHUNK1 _timescaledb_internal._hyper_1_1_chunk
\set CHUNK2 _timescaledb_internal._hyper_1_2_chunk
CREATE TABLE test (i int, j double precision, ts timestamp);
SELECT create_hypertable('test','i',chunk_time_interval:=500000);
NOTICE: adding not-null constraint to column "i"
create_hypertable
-------------------
(1,public,test,t)
(1 row)
INSERT INTO test SELECT x, x+0.1, _timescaledb_internal.to_timestamp(x*1000) FROM generate_series(0,1000000-1,10) AS x;
ANALYZE test;
ALTER TABLE :CHUNK1 SET (parallel_workers=2);
ALTER TABLE :CHUNK2 SET (parallel_workers=2);
SET work_mem TO '50MB';
SET force_parallel_mode = 'on';
SET max_parallel_workers_per_gather = 4;
SET parallel_setup_cost TO 0;
EXPLAIN (costs off) SELECT first(i, j) FROM "test";
QUERY PLAN
---------------------------------------------------------------
Finalize Aggregate
-> Gather
Workers Planned: 2
-> Partial Aggregate
-> Parallel Append
-> Parallel Seq Scan on _hyper_1_1_chunk
-> Parallel Seq Scan on _hyper_1_2_chunk
(7 rows)
SELECT first(i, j) FROM "test";
first
-------
0
(1 row)
EXPLAIN (costs off) SELECT last(i, j) FROM "test";
QUERY PLAN
---------------------------------------------------------------
Finalize Aggregate
-> Gather
Workers Planned: 2
-> Partial Aggregate
-> Parallel Append
-> Parallel Seq Scan on _hyper_1_1_chunk
-> Parallel Seq Scan on _hyper_1_2_chunk
(7 rows)
SELECT last(i, j) FROM "test";
last
--------
999990
(1 row)
EXPLAIN (costs off) SELECT time_bucket('1 second', ts) sec, last(i, j)
FROM "test"
GROUP BY sec
ORDER BY sec
LIMIT 5;
QUERY PLAN
--------------------------------------------------------------------------------------
Gather
Workers Planned: 1
Single Copy: true
-> Limit
-> Sort
Sort Key: (time_bucket('@ 1 sec'::interval, _hyper_1_1_chunk.ts))
-> HashAggregate
Group Key: time_bucket('@ 1 sec'::interval, _hyper_1_1_chunk.ts)
-> Result
-> Append
-> Seq Scan on _hyper_1_1_chunk
-> Seq Scan on _hyper_1_2_chunk
(12 rows)
-- test single copy parallel plan with parallel chunk append
:PREFIX SELECT time_bucket('1 second', ts) sec, last(i, j)
FROM "test"
WHERE length(version()) > 0
GROUP BY sec
ORDER BY sec
LIMIT 5;
QUERY PLAN
--------------------------------------------------------------------------------
Gather
Workers Planned: 1
Single Copy: true
-> Limit
-> Sort
Sort Key: (time_bucket('@ 1 sec'::interval, test.ts))
-> HashAggregate
Group Key: time_bucket('@ 1 sec'::interval, test.ts)
-> Result
One-Time Filter: (length(version()) > 0)
-> Custom Scan (ChunkAppend) on test
Chunks excluded during startup: 0
-> Result
One-Time Filter: (length(version()) > 0)
-> Seq Scan on _hyper_1_1_chunk
-> Result
One-Time Filter: (length(version()) > 0)
-> Seq Scan on _hyper_1_2_chunk
(18 rows)
SELECT time_bucket('1 second', ts) sec, last(i, j)
FROM "test"
GROUP BY sec
ORDER BY sec
LIMIT 5;
sec | last
--------------------------+------
Wed Dec 31 16:00:00 1969 | 990
Wed Dec 31 16:00:01 1969 | 1990
Wed Dec 31 16:00:02 1969 | 2990
Wed Dec 31 16:00:03 1969 | 3990
Wed Dec 31 16:00:04 1969 | 4990
(5 rows)
--test variants of histogram
EXPLAIN (costs off) SELECT histogram(i, 1, 1000000, 2) FROM "test";
QUERY PLAN
---------------------------------------------------------------
Finalize Aggregate
-> Gather
Workers Planned: 2
-> Partial Aggregate
-> Parallel Append
-> Parallel Seq Scan on _hyper_1_1_chunk
-> Parallel Seq Scan on _hyper_1_2_chunk
(7 rows)
SELECT histogram(i, 1, 1000000, 2) FROM "test";
histogram
-------------------
{1,50000,49999,0}
(1 row)
EXPLAIN (costs off) SELECT histogram(i, 1,1000001,10) FROM "test";
QUERY PLAN
---------------------------------------------------------------
Finalize Aggregate
-> Gather
Workers Planned: 2
-> Partial Aggregate
-> Parallel Append
-> Parallel Seq Scan on _hyper_1_1_chunk
-> Parallel Seq Scan on _hyper_1_2_chunk
(7 rows)
SELECT histogram(i, 1, 1000001, 10) FROM "test";
histogram
------------------------------------------------------------------
{1,10000,10000,10000,10000,10000,10000,10000,10000,10000,9999,0}
(1 row)
EXPLAIN (costs off) SELECT histogram(i, 0,100000,5) FROM "test";
QUERY PLAN
---------------------------------------------------------------
Finalize Aggregate
-> Gather
Workers Planned: 2
-> Partial Aggregate
-> Parallel Append
-> Parallel Seq Scan on _hyper_1_1_chunk
-> Parallel Seq Scan on _hyper_1_2_chunk
(7 rows)
SELECT histogram(i, 0, 100000, 5) FROM "test";
histogram
------------------------------------
{0,2000,2000,2000,2000,2000,90000}
(1 row)
EXPLAIN (costs off) SELECT histogram(i, 10,100000,5) FROM "test";
QUERY PLAN
---------------------------------------------------------------
Finalize Aggregate
-> Gather
Workers Planned: 2
-> Partial Aggregate
-> Parallel Append
-> Parallel Seq Scan on _hyper_1_1_chunk
-> Parallel Seq Scan on _hyper_1_2_chunk
(7 rows)
SELECT histogram(i, 10, 100000, 5) FROM "test";
histogram
------------------------------------
{1,2000,2000,2000,2000,1999,90000}
(1 row)
EXPLAIN (costs off) SELECT histogram(NULL, 10,100000,5) FROM "test" WHERE i = coalesce(-1,j);
QUERY PLAN
------------------------------------------------------------------------------------
Finalize Aggregate
-> Gather
Workers Planned: 2
-> Partial Aggregate
-> Parallel Append
-> Parallel Seq Scan on _hyper_1_1_chunk
Filter: ((i)::double precision = '-1'::double precision)
-> Parallel Seq Scan on _hyper_1_2_chunk
Filter: ((i)::double precision = '-1'::double precision)
(9 rows)
SELECT histogram(NULL, 10,100000,5) FROM "test" WHERE i = coalesce(-1,j);
histogram
-----------
(1 row)
-- test parallel ChunkAppend
:PREFIX SELECT i FROM "test" WHERE length(version()) > 0;
QUERY PLAN
--------------------------------------------------------------
Gather
Workers Planned: 1
Single Copy: true
-> Result
One-Time Filter: (length(version()) > 0)
-> Custom Scan (ChunkAppend) on test
Chunks excluded during startup: 0
-> Result
One-Time Filter: (length(version()) > 0)
-> Seq Scan on _hyper_1_1_chunk
-> Result
One-Time Filter: (length(version()) > 0)
-> Seq Scan on _hyper_1_2_chunk
(13 rows)
-- test worker assignment
-- first chunk should have 1 worker and second chunk should have 2
SET max_parallel_workers_per_gather TO 2;
:PREFIX SELECT count(*) FROM "test" WHERE i >= 400000 AND length(version()) > 0;
QUERY PLAN
--------------------------------------------------------------------------------------------------------------------
Finalize Aggregate
-> Gather
Workers Planned: 2
-> Partial Aggregate
-> Result
One-Time Filter: (length(version()) > 0)
-> Parallel Custom Scan (ChunkAppend) on test
Chunks excluded during startup: 0
-> Result
One-Time Filter: (length(version()) > 0)
-> Parallel Index Only Scan using _hyper_1_1_chunk_test_i_idx on _hyper_1_1_chunk
Index Cond: (i >= 400000)
-> Result
One-Time Filter: (length(version()) > 0)
-> Parallel Seq Scan on _hyper_1_2_chunk
Filter: (i >= 400000)
(16 rows)
SELECT count(*) FROM "test" WHERE i >= 400000 AND length(version()) > 0;
count
-------
60000
(1 row)
-- test worker assignment
-- first chunk should have 2 worker and second chunk should have 1
:PREFIX SELECT count(*) FROM "test" WHERE i < 600000 AND length(version()) > 0;
QUERY PLAN
--------------------------------------------------------------------------------------------------------------------
Finalize Aggregate
-> Gather
Workers Planned: 2
-> Partial Aggregate
-> Result
One-Time Filter: (length(version()) > 0)
-> Parallel Custom Scan (ChunkAppend) on test
Chunks excluded during startup: 0
-> Result
One-Time Filter: (length(version()) > 0)
-> Parallel Index Only Scan using _hyper_1_2_chunk_test_i_idx on _hyper_1_2_chunk
Index Cond: (i < 600000)
-> Result
One-Time Filter: (length(version()) > 0)
-> Parallel Seq Scan on _hyper_1_1_chunk
Filter: (i < 600000)
(16 rows)
SELECT count(*) FROM "test" WHERE i < 600000 AND length(version()) > 0;
count
-------
60000
(1 row)
-- test ChunkAppend with # workers < # childs
SET max_parallel_workers_per_gather TO 1;
:PREFIX SELECT count(*) FROM "test" WHERE length(version()) > 0;
QUERY PLAN
---------------------------------------------------------------------------
Finalize Aggregate
-> Gather
Workers Planned: 1
-> Partial Aggregate
-> Result
One-Time Filter: (length(version()) > 0)
-> Parallel Custom Scan (ChunkAppend) on test
Chunks excluded during startup: 0
-> Result
One-Time Filter: (length(version()) > 0)
-> Parallel Seq Scan on _hyper_1_1_chunk
-> Result
One-Time Filter: (length(version()) > 0)
-> Parallel Seq Scan on _hyper_1_2_chunk
(14 rows)
SELECT count(*) FROM "test" WHERE length(version()) > 0;
count
--------
100000
(1 row)
-- test ChunkAppend with # workers > # childs
SET max_parallel_workers_per_gather TO 2;
:PREFIX SELECT count(*) FROM "test" WHERE i >= 500000 AND length(version()) > 0;
QUERY PLAN
---------------------------------------------------------------------------
Finalize Aggregate
-> Gather
Workers Planned: 2
-> Partial Aggregate
-> Result
One-Time Filter: (length(version()) > 0)
-> Parallel Custom Scan (ChunkAppend) on test
Chunks excluded during startup: 0
-> Result
One-Time Filter: (length(version()) > 0)
-> Parallel Seq Scan on _hyper_1_2_chunk
Filter: (i >= 500000)
(12 rows)
SELECT count(*) FROM "test" WHERE i >= 500000 AND length(version()) > 0;
count
-------
50000
(1 row)
RESET max_parallel_workers_per_gather;
-- test partial and non-partial plans
-- these will not be parallel on PG < 11
ALTER TABLE :CHUNK1 SET (parallel_workers=0);
ALTER TABLE :CHUNK2 SET (parallel_workers=2);
:PREFIX SELECT count(*) FROM "test" WHERE i > 400000 AND length(version()) > 0;
QUERY PLAN
-----------------------------------------------------------------------------------------------------------
Finalize Aggregate
-> Gather
Workers Planned: 2
-> Partial Aggregate
-> Result
One-Time Filter: (length(version()) > 0)
-> Parallel Custom Scan (ChunkAppend) on test
Chunks excluded during startup: 0
-> Result
One-Time Filter: (length(version()) > 0)
-> Index Only Scan using _hyper_1_1_chunk_test_i_idx on _hyper_1_1_chunk
Index Cond: (i > 400000)
-> Result
One-Time Filter: (length(version()) > 0)
-> Parallel Seq Scan on _hyper_1_2_chunk
Filter: (i > 400000)
(16 rows)
ALTER TABLE :CHUNK1 SET (parallel_workers=2);
ALTER TABLE :CHUNK2 SET (parallel_workers=0);
:PREFIX SELECT count(*) FROM "test" WHERE i < 600000 AND length(version()) > 0;
QUERY PLAN
-----------------------------------------------------------------------------------------------------------
Finalize Aggregate
-> Gather
Workers Planned: 2
-> Partial Aggregate
-> Result
One-Time Filter: (length(version()) > 0)
-> Parallel Custom Scan (ChunkAppend) on test
Chunks excluded during startup: 0
-> Result
One-Time Filter: (length(version()) > 0)
-> Index Only Scan using _hyper_1_2_chunk_test_i_idx on _hyper_1_2_chunk
Index Cond: (i < 600000)
-> Result
One-Time Filter: (length(version()) > 0)
-> Parallel Seq Scan on _hyper_1_1_chunk
Filter: (i < 600000)
(16 rows)
ALTER TABLE :CHUNK1 RESET (parallel_workers);
ALTER TABLE :CHUNK2 RESET (parallel_workers);
-- now() is not marked parallel safe in PostgreSQL < 12 so using now()
-- in a query will prevent parallelism but CURRENT_TIMESTAMP and
-- transaction_timestamp() are marked parallel safe
:PREFIX SELECT i FROM "test" WHERE ts < CURRENT_TIMESTAMP;
QUERY PLAN
------------------------------------------------
Gather
Workers Planned: 1
Single Copy: true
-> Custom Scan (ChunkAppend) on test
Chunks excluded during startup: 0
-> Seq Scan on _hyper_1_1_chunk
Filter: (ts < CURRENT_TIMESTAMP)
-> Seq Scan on _hyper_1_2_chunk
Filter: (ts < CURRENT_TIMESTAMP)
(9 rows)
:PREFIX SELECT i FROM "test" WHERE ts < transaction_timestamp();
QUERY PLAN
------------------------------------------------------
Gather
Workers Planned: 1
Single Copy: true
-> Custom Scan (ChunkAppend) on test
Chunks excluded during startup: 0
-> Seq Scan on _hyper_1_1_chunk
Filter: (ts < transaction_timestamp())
-> Seq Scan on _hyper_1_2_chunk
Filter: (ts < transaction_timestamp())
(9 rows)
-- this won't be parallel query because now() is parallel restricted in PG < 12
:PREFIX SELECT i FROM "test" WHERE ts < now();
QUERY PLAN
-------------------------------------------
Gather
Workers Planned: 1
Single Copy: true
-> Custom Scan (ChunkAppend) on test
Chunks excluded during startup: 0
-> Seq Scan on _hyper_1_1_chunk
Filter: (ts < now())
-> Seq Scan on _hyper_1_2_chunk
Filter: (ts < now())
(9 rows)

View File

@ -0,0 +1,521 @@
-- This file and its contents are licensed under the Apache License 2.0.
-- Please see the included NOTICE for copyright information and
-- LICENSE-APACHE for a copy of the license.
CREATE TABLE part_legacy(time timestamptz, temp float, device int);
SELECT create_hypertable('part_legacy', 'time', 'device', 2, partitioning_func => '_timescaledb_internal.get_partition_for_key');
NOTICE: adding not-null constraint to column "time"
create_hypertable
--------------------------
(1,public,part_legacy,t)
(1 row)
-- Show legacy partitioning function is used
SELECT * FROM _timescaledb_catalog.dimension;
id | hypertable_id | column_name | column_type | aligned | num_slices | partitioning_func_schema | partitioning_func | interval_length | integer_now_func_schema | integer_now_func
----+---------------+-------------+--------------------------+---------+------------+--------------------------+-----------------------+-----------------+-------------------------+------------------
1 | 1 | time | timestamp with time zone | t | | | | 604800000000 | |
2 | 1 | device | integer | f | 2 | _timescaledb_internal | get_partition_for_key | | |
(2 rows)
INSERT INTO part_legacy VALUES ('2017-03-22T09:18:23', 23.4, 1);
INSERT INTO part_legacy VALUES ('2017-03-22T09:18:23', 23.4, 76);
VACUUM part_legacy;
-- Show two chunks and CHECK constraint with cast
SELECT * FROM test.show_constraintsp('_timescaledb_internal._hyper_1_%_chunk');
Table | Constraint | Type | Columns | Index | Expr | Deferrable | Deferred | Validated
----------------------------------------+--------------+------+----------+-------+------------------------------------------------------------------------------------------------------------------------------------------------+------------+----------+-----------
_timescaledb_internal._hyper_1_1_chunk | constraint_1 | c | {time} | - | (("time" >= 'Wed Mar 15 17:00:00 2017 PDT'::timestamp with time zone) AND ("time" < 'Wed Mar 22 17:00:00 2017 PDT'::timestamp with time zone)) | f | f | t
_timescaledb_internal._hyper_1_1_chunk | constraint_2 | c | {device} | - | (_timescaledb_internal.get_partition_for_key(device) >= 1073741823) | f | f | t
_timescaledb_internal._hyper_1_2_chunk | constraint_1 | c | {time} | - | (("time" >= 'Wed Mar 15 17:00:00 2017 PDT'::timestamp with time zone) AND ("time" < 'Wed Mar 22 17:00:00 2017 PDT'::timestamp with time zone)) | f | f | t
_timescaledb_internal._hyper_1_2_chunk | constraint_3 | c | {device} | - | (_timescaledb_internal.get_partition_for_key(device) < 1073741823) | f | f | t
(4 rows)
-- Make sure constraint exclusion works on device column
BEGIN;
-- For plan stability between versions
SET LOCAL enable_bitmapscan = false;
SET LOCAL enable_indexscan = false;
EXPLAIN (verbose, costs off)
SELECT * FROM part_legacy WHERE device = 1;
QUERY PLAN
-----------------------------------------------------------------------------------
Seq Scan on _timescaledb_internal._hyper_1_1_chunk
Output: _hyper_1_1_chunk."time", _hyper_1_1_chunk.temp, _hyper_1_1_chunk.device
Filter: (_hyper_1_1_chunk.device = 1)
(3 rows)
COMMIT;
CREATE TABLE part_new(time timestamptz, temp float, device int);
SELECT create_hypertable('part_new', 'time', 'device', 2);
NOTICE: adding not-null constraint to column "time"
create_hypertable
-----------------------
(2,public,part_new,t)
(1 row)
SELECT * FROM _timescaledb_catalog.dimension;
id | hypertable_id | column_name | column_type | aligned | num_slices | partitioning_func_schema | partitioning_func | interval_length | integer_now_func_schema | integer_now_func
----+---------------+-------------+--------------------------+---------+------------+--------------------------+-----------------------+-----------------+-------------------------+------------------
1 | 1 | time | timestamp with time zone | t | | | | 604800000000 | |
2 | 1 | device | integer | f | 2 | _timescaledb_internal | get_partition_for_key | | |
3 | 2 | time | timestamp with time zone | t | | | | 604800000000 | |
4 | 2 | device | integer | f | 2 | _timescaledb_internal | get_partition_hash | | |
(4 rows)
INSERT INTO part_new VALUES ('2017-03-22T09:18:23', 23.4, 1);
INSERT INTO part_new VALUES ('2017-03-22T09:18:23', 23.4, 2);
VACUUM part_new;
-- Show two chunks and CHECK constraint without cast
SELECT * FROM test.show_constraintsp('_timescaledb_internal._hyper_2_%_chunk');
Table | Constraint | Type | Columns | Index | Expr | Deferrable | Deferred | Validated
----------------------------------------+--------------+------+----------+-------+------------------------------------------------------------------------------------------------------------------------------------------------+------------+----------+-----------
_timescaledb_internal._hyper_2_3_chunk | constraint_4 | c | {time} | - | (("time" >= 'Wed Mar 15 17:00:00 2017 PDT'::timestamp with time zone) AND ("time" < 'Wed Mar 22 17:00:00 2017 PDT'::timestamp with time zone)) | f | f | t
_timescaledb_internal._hyper_2_3_chunk | constraint_5 | c | {device} | - | (_timescaledb_internal.get_partition_hash(device) < 1073741823) | f | f | t
_timescaledb_internal._hyper_2_4_chunk | constraint_4 | c | {time} | - | (("time" >= 'Wed Mar 15 17:00:00 2017 PDT'::timestamp with time zone) AND ("time" < 'Wed Mar 22 17:00:00 2017 PDT'::timestamp with time zone)) | f | f | t
_timescaledb_internal._hyper_2_4_chunk | constraint_6 | c | {device} | - | (_timescaledb_internal.get_partition_hash(device) >= 1073741823) | f | f | t
(4 rows)
-- Make sure constraint exclusion works on device column
BEGIN;
-- For plan stability between versions
SET LOCAL enable_bitmapscan = false;
SET LOCAL enable_indexscan = false;
EXPLAIN (verbose, costs off)
SELECT * FROM part_new WHERE device = 1;
QUERY PLAN
-----------------------------------------------------------------------------------
Seq Scan on _timescaledb_internal._hyper_2_3_chunk
Output: _hyper_2_3_chunk."time", _hyper_2_3_chunk.temp, _hyper_2_3_chunk.device
Filter: (_hyper_2_3_chunk.device = 1)
(3 rows)
COMMIT;
CREATE TABLE part_new_convert1(time timestamptz, temp float8, device int);
SELECT create_hypertable('part_new_convert1', 'time', 'temp', 2);
NOTICE: adding not-null constraint to column "time"
create_hypertable
--------------------------------
(3,public,part_new_convert1,t)
(1 row)
INSERT INTO part_new_convert1 VALUES ('2017-03-22T09:18:23', 1.0, 2);
\set ON_ERROR_STOP 0
-- Changing the type of a hash-partitioned column should not be supported
ALTER TABLE part_new_convert1 ALTER COLUMN temp TYPE numeric;
ERROR: cannot change the type of a hash-partitioned column
\set ON_ERROR_STOP 1
-- Should be able to change if not hash partitioned though
ALTER TABLE part_new_convert1 ALTER COLUMN time TYPE timestamp;
SELECT * FROM test.show_columnsp('_timescaledb_internal._hyper_3_%_chunk');
Relation | Kind | Column | Column type | NotNull
----------------------------------------+------+--------+-----------------------------+---------
_timescaledb_internal._hyper_3_5_chunk | r | time | timestamp without time zone | t
_timescaledb_internal._hyper_3_5_chunk | r | temp | double precision | f
_timescaledb_internal._hyper_3_5_chunk | r | device | integer | f
(3 rows)
CREATE TABLE part_add_dim(time timestamptz, temp float8, device int, location int);
SELECT create_hypertable('part_add_dim', 'time', 'temp', 2);
NOTICE: adding not-null constraint to column "time"
create_hypertable
---------------------------
(4,public,part_add_dim,t)
(1 row)
\set ON_ERROR_STOP 0
SELECT add_dimension('part_add_dim', 'location', 2, partitioning_func => 'bad_func');
ERROR: function "bad_func" does not exist at character 74
\set ON_ERROR_STOP 1
SELECT add_dimension('part_add_dim', 'location', 2, partitioning_func => '_timescaledb_internal.get_partition_for_key');
add_dimension
------------------------------------
(9,public,part_add_dim,location,t)
(1 row)
SELECT * FROM _timescaledb_catalog.dimension;
id | hypertable_id | column_name | column_type | aligned | num_slices | partitioning_func_schema | partitioning_func | interval_length | integer_now_func_schema | integer_now_func
----+---------------+-------------+-----------------------------+---------+------------+--------------------------+-----------------------+-----------------+-------------------------+------------------
1 | 1 | time | timestamp with time zone | t | | | | 604800000000 | |
2 | 1 | device | integer | f | 2 | _timescaledb_internal | get_partition_for_key | | |
3 | 2 | time | timestamp with time zone | t | | | | 604800000000 | |
4 | 2 | device | integer | f | 2 | _timescaledb_internal | get_partition_hash | | |
6 | 3 | temp | double precision | f | 2 | _timescaledb_internal | get_partition_hash | | |
5 | 3 | time | timestamp without time zone | t | | | | 604800000000 | |
7 | 4 | time | timestamp with time zone | t | | | | 604800000000 | |
8 | 4 | temp | double precision | f | 2 | _timescaledb_internal | get_partition_hash | | |
9 | 4 | location | integer | f | 2 | _timescaledb_internal | get_partition_for_key | | |
(9 rows)
-- Test that we support custom SQL-based partitioning functions and
-- that our native partitioning function handles function expressions
-- as argument
CREATE OR REPLACE FUNCTION custom_partfunc(source anyelement)
RETURNS INTEGER LANGUAGE PLPGSQL IMMUTABLE AS
$BODY$
DECLARE
retval INTEGER;
BEGIN
retval = _timescaledb_internal.get_partition_hash(substring(source::text FROM '[A-za-z0-9 ]+'));
RAISE NOTICE 'hash value for % is %', source, retval;
RETURN retval;
END
$BODY$;
CREATE TABLE part_custom_func(time timestamptz, temp float8, device text);
SELECT create_hypertable('part_custom_func', 'time', 'device', 2, partitioning_func => 'custom_partfunc');
NOTICE: adding not-null constraint to column "time"
create_hypertable
-------------------------------
(5,public,part_custom_func,t)
(1 row)
SELECT _timescaledb_internal.get_partition_hash(substring('dev1' FROM '[A-za-z0-9 ]+'));
get_partition_hash
--------------------
1129986420
(1 row)
SELECT _timescaledb_internal.get_partition_hash('dev1'::text);
get_partition_hash
--------------------
1129986420
(1 row)
SELECT _timescaledb_internal.get_partition_hash('dev7'::text);
get_partition_hash
--------------------
449729092
(1 row)
INSERT INTO part_custom_func VALUES ('2017-03-22T09:18:23', 23.4, 'dev1'),
('2017-03-22T09:18:23', 23.4, 'dev7');
NOTICE: hash value for dev1 is 1129986420
NOTICE: hash value for dev1 is 1129986420
NOTICE: hash value for dev7 is 449729092
NOTICE: hash value for dev7 is 449729092
SELECT * FROM test.show_subtables('part_custom_func');
Child | Tablespace
----------------------------------------+------------
_timescaledb_internal._hyper_5_6_chunk |
_timescaledb_internal._hyper_5_7_chunk |
(2 rows)
-- This first test is slightly trivial, but segfaulted in old versions
CREATE TYPE simpl AS (val1 int4);
CREATE OR REPLACE FUNCTION simpl_type_hash(ANYELEMENT) RETURNS int4 AS $$
SELECT $1.val1;
$$ LANGUAGE SQL IMMUTABLE;
CREATE TABLE simpl_partition ("timestamp" TIMESTAMPTZ, object simpl);
SELECT create_hypertable(
'simpl_partition',
'timestamp',
'object',
1000,
chunk_time_interval => interval '1 day',
partitioning_func=>'simpl_type_hash');
NOTICE: adding not-null constraint to column "timestamp"
create_hypertable
------------------------------
(6,public,simpl_partition,t)
(1 row)
INSERT INTO simpl_partition VALUES ('2017-03-22T09:18:23', ROW(1)::simpl);
SELECT * from simpl_partition;
timestamp | object
------------------------------+--------
Wed Mar 22 09:18:23 2017 PDT | (1)
(1 row)
-- Also test that the fix works when we have more chunks than allowed at once
SET timescaledb.max_open_chunks_per_insert=1;
INSERT INTO simpl_partition VALUES
('2017-03-22T10:18:23', ROW(0)::simpl),
('2017-03-22T10:18:23', ROW(1)::simpl),
('2017-03-22T10:18:23', ROW(2)::simpl),
('2017-03-22T10:18:23', ROW(3)::simpl),
('2017-03-22T10:18:23', ROW(4)::simpl),
('2017-03-22T10:18:23', ROW(5)::simpl);
SET timescaledb.max_open_chunks_per_insert=default;
SELECT * from simpl_partition;
timestamp | object
------------------------------+--------
Wed Mar 22 09:18:23 2017 PDT | (1)
Wed Mar 22 10:18:23 2017 PDT | (0)
Wed Mar 22 10:18:23 2017 PDT | (1)
Wed Mar 22 10:18:23 2017 PDT | (2)
Wed Mar 22 10:18:23 2017 PDT | (3)
Wed Mar 22 10:18:23 2017 PDT | (4)
Wed Mar 22 10:18:23 2017 PDT | (5)
(7 rows)
-- Test that index creation is handled correctly.
CREATE TABLE hyper_with_index(time timestamptz, temp float, device int);
CREATE UNIQUE INDEX temp_index ON hyper_with_index(temp);
\set ON_ERROR_STOP 0
SELECT create_hypertable('hyper_with_index', 'time');
NOTICE: adding not-null constraint to column "time"
ERROR: cannot create a unique index without the column "time" (used in partitioning)
SELECT create_hypertable('hyper_with_index', 'time', 'device', 2);
NOTICE: adding not-null constraint to column "time"
ERROR: cannot create a unique index without the column "time" (used in partitioning)
SELECT create_hypertable('hyper_with_index', 'time', 'temp', 2);
NOTICE: adding not-null constraint to column "time"
ERROR: cannot create a unique index without the column "time" (used in partitioning)
\set ON_ERROR_STOP 1
DROP INDEX temp_index;
CREATE UNIQUE INDEX time_index ON hyper_with_index(time);
\set ON_ERROR_STOP 0
-- should error because device not in index
SELECT create_hypertable('hyper_with_index', 'time', 'device', 4);
NOTICE: adding not-null constraint to column "time"
ERROR: cannot create a unique index without the column "device" (used in partitioning)
\set ON_ERROR_STOP 1
SELECT create_hypertable('hyper_with_index', 'time');
NOTICE: adding not-null constraint to column "time"
create_hypertable
--------------------------------
(11,public,hyper_with_index,t)
(1 row)
-- make sure user created index is used.
-- not using \d or \d+ because output syntax differs
-- between postgres 9 and postgres 10.
SELECT indexname FROM pg_indexes WHERE tablename = 'hyper_with_index';
indexname
------------
time_index
(1 row)
\set ON_ERROR_STOP 0
SELECT add_dimension('hyper_with_index', 'device', 4);
ERROR: cannot create a unique index without the column "device" (used in partitioning)
\set ON_ERROR_STOP 1
DROP INDEX time_index;
CREATE UNIQUE INDEX time_space_index ON hyper_with_index(time, device);
SELECT add_dimension('hyper_with_index', 'device', 4);
add_dimension
---------------------------------------
(23,public,hyper_with_index,device,t)
(1 row)
CREATE TABLE hyper_with_primary(time TIMESTAMPTZ PRIMARY KEY, temp float, device int);
\set ON_ERROR_STOP 0
SELECT create_hypertable('hyper_with_primary', 'time', 'device', 4);
ERROR: cannot create a unique index without the column "device" (used in partitioning)
\set ON_ERROR_STOP 1
SELECT create_hypertable('hyper_with_primary', 'time');
create_hypertable
----------------------------------
(13,public,hyper_with_primary,t)
(1 row)
\set ON_ERROR_STOP 0
SELECT add_dimension('hyper_with_primary', 'device', 4);
ERROR: cannot create a unique index without the column "device" (used in partitioning)
\set ON_ERROR_STOP 1
-- NON-unique indexes can still be created
CREATE INDEX temp_index ON hyper_with_index(temp);
-- Make sure custom composite types are supported as dimensions
CREATE TYPE TUPLE as (val1 int4, val2 int4);
CREATE FUNCTION tuple_hash(value ANYELEMENT) RETURNS INT4
LANGUAGE PLPGSQL IMMUTABLE AS
$BODY$
BEGIN
RAISE NOTICE 'custom hash value is: %', value.val1+value.val2;
RETURN value.val1+value.val2;
END
$BODY$;
CREATE TABLE part_custom_dim (time TIMESTAMPTZ, combo TUPLE, device TEXT);
\set ON_ERROR_STOP 0
-- should fail because no partitioning function supplied and the given custom type
-- has no default hash function
SELECT create_hypertable('part_custom_dim', 'time', 'combo', 4);
NOTICE: adding not-null constraint to column "time"
ERROR: could not find hash function for type tuple
\set ON_ERROR_STOP 1
SELECT create_hypertable('part_custom_dim', 'time', 'combo', 4, partitioning_func=>'tuple_hash');
NOTICE: adding not-null constraint to column "time"
create_hypertable
-------------------------------
(15,public,part_custom_dim,t)
(1 row)
INSERT INTO part_custom_dim(time, combo) VALUES (now(), (1,2));
NOTICE: custom hash value is: 3
NOTICE: custom hash value is: 3
DROP TABLE part_custom_dim;
-- Now make sure that renaming partitioning_func_schema will get updated properly
\c :TEST_DBNAME :ROLE_SUPERUSER
CREATE SCHEMA IF NOT EXISTS my_partitioning_schema;
CREATE FUNCTION my_partitioning_schema.tuple_hash(value ANYELEMENT) RETURNS INT4
LANGUAGE PLPGSQL IMMUTABLE AS
$BODY$
BEGIN
RAISE NOTICE 'custom hash value is: %', value.val1+value.val2;
RETURN value.val1+value.val2;
END
$BODY$;
CREATE TABLE part_custom_dim (time TIMESTAMPTZ, combo TUPLE, device TEXT);
SELECT create_hypertable('part_custom_dim', 'time', 'combo', 4, partitioning_func=>'my_partitioning_schema.tuple_hash');
NOTICE: adding not-null constraint to column "time"
create_hypertable
-------------------------------
(16,public,part_custom_dim,t)
(1 row)
INSERT INTO part_custom_dim(time, combo) VALUES (now(), (1,2));
NOTICE: custom hash value is: 3
NOTICE: custom hash value is: 3
ALTER SCHEMA my_partitioning_schema RENAME TO new_partitioning_schema;
-- Inserts should work even after we rename the schema
INSERT INTO part_custom_dim(time, combo) VALUES (now(), (3,4));
NOTICE: custom hash value is: 7
NOTICE: custom hash value is: 7
-- Test partitioning function on an open (time) dimension
CREATE OR REPLACE FUNCTION time_partfunc(unixtime float8)
RETURNS TIMESTAMPTZ LANGUAGE PLPGSQL IMMUTABLE AS
$BODY$
DECLARE
retval TIMESTAMPTZ;
BEGIN
retval := to_timestamp(unixtime);
RAISE NOTICE 'time value for % is %', unixtime, timezone('UTC', retval);
RETURN retval;
END
$BODY$;
CREATE OR REPLACE FUNCTION time_partfunc_bad_parameters(unixtime float8, extra text)
RETURNS TIMESTAMPTZ LANGUAGE SQL IMMUTABLE AS
$BODY$
SELECT to_timestamp(unixtime);
$BODY$;
CREATE OR REPLACE FUNCTION time_partfunc_bad_return_type(unixtime float8)
RETURNS FLOAT8 LANGUAGE SQL IMMUTABLE AS
$BODY$
SELECT unixtime;
$BODY$;
CREATE TABLE part_time_func(time float8, temp float8, device text);
\set ON_ERROR_STOP 0
-- Should fail due to invalid time column
SELECT create_hypertable('part_time_func', 'time');
ERROR: invalid type for dimension "time"
-- Should fail due to bad signature of time partitioning function
SELECT create_hypertable('part_time_func', 'time', time_partitioning_func => 'time_partfunc_bad_parameters');
ERROR: invalid partitioning function
SELECT create_hypertable('part_time_func', 'time', time_partitioning_func => 'time_partfunc_bad_return_type');
ERROR: invalid partitioning function
\set ON_ERROR_STOP 1
-- Should work with time partitioning function that returns a valid time type
SELECT create_hypertable('part_time_func', 'time', time_partitioning_func => 'time_partfunc');
NOTICE: adding not-null constraint to column "time"
create_hypertable
------------------------------
(17,public,part_time_func,t)
(1 row)
INSERT INTO part_time_func VALUES (1530214157.134, 23.4, 'dev1'),
(1533214157.8734, 22.3, 'dev7');
NOTICE: time value for 1530214157.134 is Thu Jun 28 19:29:17.134 2018
NOTICE: time value for 1530214157.134 is Thu Jun 28 19:29:17.134 2018
NOTICE: time value for 1530214157.134 is Thu Jun 28 19:29:17.134 2018
NOTICE: time value for 1530214157.134 is Thu Jun 28 19:29:17.134 2018
NOTICE: time value for 1533214157.8734 is Thu Aug 02 12:49:17.8734 2018
NOTICE: time value for 1533214157.8734 is Thu Aug 02 12:49:17.8734 2018
NOTICE: time value for 1533214157.8734 is Thu Aug 02 12:49:17.8734 2018
NOTICE: time value for 1533214157.8734 is Thu Aug 02 12:49:17.8734 2018
SELECT time, temp, device FROM part_time_func;
time | temp | device
-----------------+------+--------
1530214157.134 | 23.4 | dev1
1533214157.8734 | 22.3 | dev7
(2 rows)
SELECT time_partfunc(time) at time zone 'UTC', temp, device FROM part_time_func;
NOTICE: time value for 1530214157.134 is Thu Jun 28 19:29:17.134 2018
NOTICE: time value for 1533214157.8734 is Thu Aug 02 12:49:17.8734 2018
timezone | temp | device
-------------------------------+------+--------
Thu Jun 28 19:29:17.134 2018 | 23.4 | dev1
Thu Aug 02 12:49:17.8734 2018 | 22.3 | dev7
(2 rows)
SELECT * FROM test.show_subtables('part_time_func');
Child | Tablespace
------------------------------------------+------------
_timescaledb_internal._hyper_17_11_chunk |
_timescaledb_internal._hyper_17_12_chunk |
(2 rows)
SELECT (test.show_constraints("Child")).*
FROM test.show_subtables('part_time_func');
Constraint | Type | Columns | Index | Expr | Deferrable | Deferred | Validated
---------------+------+---------+-------+------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+------------+----------+-----------
constraint_18 | c | {time} | - | ((time_partfunc("time") >= 'Wed Jun 27 17:00:00 2018 PDT'::timestamp with time zone) AND (time_partfunc("time") < 'Wed Jul 04 17:00:00 2018 PDT'::timestamp with time zone)) | f | f | t
constraint_19 | c | {time} | - | ((time_partfunc("time") >= 'Wed Aug 01 17:00:00 2018 PDT'::timestamp with time zone) AND (time_partfunc("time") < 'Wed Aug 08 17:00:00 2018 PDT'::timestamp with time zone)) | f | f | t
(2 rows)
SELECT (test.show_indexes("Child")).*
FROM test.show_subtables('part_time_func');
Index | Columns | Expr | Unique | Primary | Exclusion | Tablespace
------------------------------------------------------------------+---------+---------------------+--------+---------+-----------+------------
_timescaledb_internal._hyper_17_11_chunk_part_time_func_expr_idx | {expr} | time_partfunc(expr) | f | f | f |
_timescaledb_internal._hyper_17_12_chunk_part_time_func_expr_idx | {expr} | time_partfunc(expr) | f | f | f |
(2 rows)
-- Check that constraint exclusion works with time partitioning
-- function (scan only one chunk)
-- No exclusion
EXPLAIN (verbose, costs off)
SELECT * FROM part_time_func;
QUERY PLAN
-----------------------------------------------------------------------------------------------
Append
-> Seq Scan on _timescaledb_internal._hyper_17_11_chunk
Output: _hyper_17_11_chunk."time", _hyper_17_11_chunk.temp, _hyper_17_11_chunk.device
-> Seq Scan on _timescaledb_internal._hyper_17_12_chunk
Output: _hyper_17_12_chunk."time", _hyper_17_12_chunk.temp, _hyper_17_12_chunk.device
(5 rows)
-- Exclude using the function on time
EXPLAIN (verbose, costs off)
SELECT * FROM part_time_func WHERE time_partfunc(time) < '2018-07-01';
QUERY PLAN
---------------------------------------------------------------------------------------------------------------------
Index Scan using _hyper_17_11_chunk_part_time_func_expr_idx on _timescaledb_internal._hyper_17_11_chunk
Output: _hyper_17_11_chunk."time", _hyper_17_11_chunk.temp, _hyper_17_11_chunk.device
Index Cond: (time_partfunc(_hyper_17_11_chunk."time") < 'Sun Jul 01 00:00:00 2018 PDT'::timestamp with time zone)
(3 rows)
-- Exclude using the same date but as a UNIX timestamp. Won't do an
-- index scan since the index is on the time function expression
EXPLAIN (verbose, costs off)
SELECT * FROM part_time_func WHERE time < 1530403200.0;
NOTICE: time value for 1530403200 is Sun Jul 01 00:00:00 2018
QUERY PLAN
-----------------------------------------------------------------------------------------
Seq Scan on _timescaledb_internal._hyper_17_11_chunk
Output: _hyper_17_11_chunk."time", _hyper_17_11_chunk.temp, _hyper_17_11_chunk.device
Filter: (_hyper_17_11_chunk."time" < '1530403200'::double precision)
(3 rows)
-- Check that inserts will fail if we use a time partitioning function
-- that returns NULL
CREATE OR REPLACE FUNCTION time_partfunc_null_ret(unixtime float8)
RETURNS TIMESTAMPTZ LANGUAGE PLPGSQL IMMUTABLE AS
$BODY$
BEGIN
RETURN NULL;
END
$BODY$;
CREATE TABLE part_time_func_null_ret(time float8, temp float8, device text);
SELECT create_hypertable('part_time_func_null_ret', 'time', time_partitioning_func => 'time_partfunc_null_ret');
NOTICE: adding not-null constraint to column "time"
create_hypertable
---------------------------------------
(18,public,part_time_func_null_ret,t)
(1 row)
\set ON_ERROR_STOP 0
INSERT INTO part_time_func_null_ret VALUES (1530214157.134, 23.4, 'dev1'),
(1533214157.8734, 22.3, 'dev7');
ERROR: partitioning function "public.time_partfunc_null_ret" returned NULL
\set ON_ERROR_STOP 1

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,329 @@
-- This file and its contents are licensed under the Apache License 2.0.
-- Please see the included NOTICE for copyright information and
-- LICENSE-APACHE for a copy of the license.
\set PREFIX 'EXPLAIN (costs off) '
\ir include/plan_hashagg_load.sql
-- This file and its contents are licensed under the Apache License 2.0.
-- Please see the included NOTICE for copyright information and
-- LICENSE-APACHE for a copy of the license.
CREATE TABLE metric (id SERIAL PRIMARY KEY, value INT);
CREATE TABLE hyper(time TIMESTAMP NOT NULL, time_int BIGINT, time_broken DATE, metricid int, value double precision);
CREATE TABLE regular(time TIMESTAMP NOT NULL, time_int BIGINT, time_date DATE, metricid int, value double precision);
SELECT create_hypertable('hyper', 'time', chunk_time_interval => interval '20 day', create_default_indexes=>FALSE);
create_hypertable
--------------------
(1,public,hyper,t)
(1 row)
ALTER TABLE hyper
DROP COLUMN time_broken,
ADD COLUMN time_date DATE;
INSERT INTO metric(value) SELECT random()*100 FROM generate_series(0,10);
INSERT INTO hyper SELECT t, EXTRACT(EPOCH FROM t), (EXTRACT(EPOCH FROM t)::int % 10)+1, 1.0, t::date FROM generate_series('2001-01-01', '2001-01-10', INTERVAL '1 second') t;
INSERT INTO regular(time, time_int, time_date, metricid, value)
SELECT t, EXTRACT(EPOCH FROM t), t::date, (EXTRACT(EPOCH FROM t)::int % 10) + 1, 1.0 FROM generate_series('2001-01-01', '2001-01-02', INTERVAL '1 second') t;
--test some queries before analyze;
EXPLAIN (costs off) SELECT time_bucket('1 minute', time) AS MetricMinuteTs, AVG(value) as avg
FROM hyper
WHERE time >= '2001-01-04T00:00:00' AND time <= '2001-01-05T01:00:00'
GROUP BY MetricMinuteTs
ORDER BY MetricMinuteTs DESC;
QUERY PLAN
---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
Finalize GroupAggregate
Group Key: (time_bucket('@ 1 min'::interval, _hyper_1_1_chunk."time"))
-> Gather Merge
Workers Planned: 2
-> Partial GroupAggregate
Group Key: (time_bucket('@ 1 min'::interval, _hyper_1_1_chunk."time"))
-> Sort
Sort Key: (time_bucket('@ 1 min'::interval, _hyper_1_1_chunk."time")) DESC
-> Result
-> Parallel Seq Scan on _hyper_1_1_chunk
Filter: (("time" >= 'Thu Jan 04 00:00:00 2001'::timestamp without time zone) AND ("time" <= 'Fri Jan 05 01:00:00 2001'::timestamp without time zone))
(11 rows)
EXPLAIN (costs off) SELECT date_trunc('minute', time) AS MetricMinuteTs, AVG(value) as avg
FROM hyper
WHERE time >= '2001-01-04T00:00:00' AND time <= '2001-01-05T01:00:00'
GROUP BY MetricMinuteTs
ORDER BY MetricMinuteTs DESC;
QUERY PLAN
---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
Finalize GroupAggregate
Group Key: (date_trunc('minute'::text, _hyper_1_1_chunk."time"))
-> Gather Merge
Workers Planned: 2
-> Partial GroupAggregate
Group Key: (date_trunc('minute'::text, _hyper_1_1_chunk."time"))
-> Sort
Sort Key: (date_trunc('minute'::text, _hyper_1_1_chunk."time")) DESC
-> Result
-> Parallel Seq Scan on _hyper_1_1_chunk
Filter: (("time" >= 'Thu Jan 04 00:00:00 2001'::timestamp without time zone) AND ("time" <= 'Fri Jan 05 01:00:00 2001'::timestamp without time zone))
(11 rows)
-- Test partitioning function on an open (time) dimension
CREATE OR REPLACE FUNCTION unix_to_timestamp(unixtime float8)
RETURNS TIMESTAMPTZ LANGUAGE SQL IMMUTABLE AS
$BODY$
SELECT to_timestamp(unixtime);
$BODY$;
CREATE TABLE hyper_timefunc(time float8 NOT NULL, metricid int, VALUE double precision, time_date DATE);
SELECT create_hypertable('hyper_timefunc', 'time', chunk_time_interval => interval '20 day', create_default_indexes=>FALSE, time_partitioning_func => 'unix_to_timestamp');
create_hypertable
-----------------------------
(2,public,hyper_timefunc,t)
(1 row)
INSERT INTO hyper_timefunc SELECT time_int, metricid, VALUE, time_date FROM hyper;
ANALYZE metric;
ANALYZE hyper;
ANALYZE regular;
ANALYZE hyper_timefunc;
\ir include/plan_hashagg_query.sql
-- This file and its contents are licensed under the Apache License 2.0.
-- Please see the included NOTICE for copyright information and
-- LICENSE-APACHE for a copy of the license.
:PREFIX SELECT time_bucket('1 minute', time) AS MetricMinuteTs, AVG(value) as avg
FROM hyper
WHERE time >= '2001-01-04T00:00:00' AND time <= '2001-01-05T01:00:00'
GROUP BY MetricMinuteTs
ORDER BY MetricMinuteTs DESC;
QUERY PLAN
---------------------------------------------------------------------------------------------------------------------------------------------------------------------------
Sort
Sort Key: (time_bucket('@ 1 min'::interval, _hyper_1_1_chunk."time")) DESC
-> HashAggregate
Group Key: time_bucket('@ 1 min'::interval, _hyper_1_1_chunk."time")
-> Result
-> Seq Scan on _hyper_1_1_chunk
Filter: (("time" >= 'Thu Jan 04 00:00:00 2001'::timestamp without time zone) AND ("time" <= 'Fri Jan 05 01:00:00 2001'::timestamp without time zone))
(7 rows)
:PREFIX SELECT time_bucket('1 hour', time) AS MetricMinuteTs, metricid, AVG(value) as avg
FROM hyper
WHERE time >= '2001-01-04T00:00:00' AND time <= '2001-01-05T01:00:00'
GROUP BY MetricMinuteTs, metricid
ORDER BY MetricMinuteTs DESC, metricid;
QUERY PLAN
---------------------------------------------------------------------------------------------------------------------------------------------------------------------------
Sort
Sort Key: (time_bucket('@ 1 hour'::interval, _hyper_1_1_chunk."time")) DESC, _hyper_1_1_chunk.metricid
-> HashAggregate
Group Key: time_bucket('@ 1 hour'::interval, _hyper_1_1_chunk."time"), _hyper_1_1_chunk.metricid
-> Result
-> Seq Scan on _hyper_1_1_chunk
Filter: (("time" >= 'Thu Jan 04 00:00:00 2001'::timestamp without time zone) AND ("time" <= 'Fri Jan 05 01:00:00 2001'::timestamp without time zone))
(7 rows)
--should be too many groups will not hashaggregate
:PREFIX SELECT time_bucket('1 second', time) AS MetricMinuteTs, metricid, AVG(value) as avg
FROM hyper
WHERE time >= '2001-01-04T00:00:00' AND time <= '2001-01-05T01:00:00'
GROUP BY MetricMinuteTs, metricid
ORDER BY MetricMinuteTs DESC, metricid;
QUERY PLAN
---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
Finalize GroupAggregate
Group Key: (time_bucket('@ 1 sec'::interval, _hyper_1_1_chunk."time")), _hyper_1_1_chunk.metricid
-> Gather Merge
Workers Planned: 2
-> Partial GroupAggregate
Group Key: (time_bucket('@ 1 sec'::interval, _hyper_1_1_chunk."time")), _hyper_1_1_chunk.metricid
-> Sort
Sort Key: (time_bucket('@ 1 sec'::interval, _hyper_1_1_chunk."time")) DESC, _hyper_1_1_chunk.metricid
-> Result
-> Parallel Seq Scan on _hyper_1_1_chunk
Filter: (("time" >= 'Thu Jan 04 00:00:00 2001'::timestamp without time zone) AND ("time" <= 'Fri Jan 05 01:00:00 2001'::timestamp without time zone))
(11 rows)
:PREFIX SELECT time_bucket('1 minute', time, INTERVAL '30 seconds') AS MetricMinuteTs, AVG(value) as avg
FROM hyper
WHERE time >= '2001-01-04T00:00:00' AND time <= '2001-01-05T01:00:00'
GROUP BY MetricMinuteTs
ORDER BY MetricMinuteTs DESC;
QUERY PLAN
---------------------------------------------------------------------------------------------------------------------------------------------------------------------------
Sort
Sort Key: ((time_bucket('@ 1 min'::interval, (_hyper_1_1_chunk."time" - '@ 30 secs'::interval)) + '@ 30 secs'::interval)) DESC
-> HashAggregate
Group Key: (time_bucket('@ 1 min'::interval, (_hyper_1_1_chunk."time" - '@ 30 secs'::interval)) + '@ 30 secs'::interval)
-> Result
-> Seq Scan on _hyper_1_1_chunk
Filter: (("time" >= 'Thu Jan 04 00:00:00 2001'::timestamp without time zone) AND ("time" <= 'Fri Jan 05 01:00:00 2001'::timestamp without time zone))
(7 rows)
:PREFIX SELECT time_bucket(60, time_int) AS MetricMinuteTs, AVG(value) as avg
FROM hyper
WHERE time >= '2001-01-04T00:00:00' AND time <= '2001-01-05T01:00:00'
GROUP BY MetricMinuteTs
ORDER BY MetricMinuteTs DESC;
QUERY PLAN
---------------------------------------------------------------------------------------------------------------------------------------------------------------------------
Sort
Sort Key: (time_bucket('60'::bigint, _hyper_1_1_chunk.time_int)) DESC
-> HashAggregate
Group Key: time_bucket('60'::bigint, _hyper_1_1_chunk.time_int)
-> Result
-> Seq Scan on _hyper_1_1_chunk
Filter: (("time" >= 'Thu Jan 04 00:00:00 2001'::timestamp without time zone) AND ("time" <= 'Fri Jan 05 01:00:00 2001'::timestamp without time zone))
(7 rows)
:PREFIX SELECT time_bucket(60, time_int, 10) AS MetricMinuteTs, AVG(value) as avg
FROM hyper
WHERE time >= '2001-01-04T00:00:00' AND time <= '2001-01-05T01:00:00'
GROUP BY MetricMinuteTs
ORDER BY MetricMinuteTs DESC;
QUERY PLAN
---------------------------------------------------------------------------------------------------------------------------------------------------------------------------
Sort
Sort Key: (time_bucket('60'::bigint, _hyper_1_1_chunk.time_int, '10'::bigint)) DESC
-> HashAggregate
Group Key: time_bucket('60'::bigint, _hyper_1_1_chunk.time_int, '10'::bigint)
-> Result
-> Seq Scan on _hyper_1_1_chunk
Filter: (("time" >= 'Thu Jan 04 00:00:00 2001'::timestamp without time zone) AND ("time" <= 'Fri Jan 05 01:00:00 2001'::timestamp without time zone))
(7 rows)
:PREFIX SELECT time_bucket('1 day', time_date) AS MetricMinuteTs, AVG(value) as avg
FROM hyper
WHERE time >= '2001-01-04T00:00:00' AND time <= '2001-01-05T01:00:00'
GROUP BY MetricMinuteTs
ORDER BY MetricMinuteTs DESC;
QUERY PLAN
---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
Finalize GroupAggregate
Group Key: (time_bucket('@ 1 day'::interval, _hyper_1_1_chunk.time_date))
-> Gather Merge
Workers Planned: 2
-> Sort
Sort Key: (time_bucket('@ 1 day'::interval, _hyper_1_1_chunk.time_date)) DESC
-> Partial HashAggregate
Group Key: time_bucket('@ 1 day'::interval, _hyper_1_1_chunk.time_date)
-> Result
-> Parallel Seq Scan on _hyper_1_1_chunk
Filter: (("time" >= 'Thu Jan 04 00:00:00 2001'::timestamp without time zone) AND ("time" <= 'Fri Jan 05 01:00:00 2001'::timestamp without time zone))
(11 rows)
:PREFIX SELECT date_trunc('minute', time) AS MetricMinuteTs, AVG(value) as avg
FROM hyper
WHERE time >= '2001-01-04T00:00:00' AND time <= '2001-01-05T01:00:00'
GROUP BY MetricMinuteTs
ORDER BY MetricMinuteTs DESC;
QUERY PLAN
---------------------------------------------------------------------------------------------------------------------------------------------------------------------------
Sort
Sort Key: (date_trunc('minute'::text, _hyper_1_1_chunk."time")) DESC
-> HashAggregate
Group Key: date_trunc('minute'::text, _hyper_1_1_chunk."time")
-> Result
-> Seq Scan on _hyper_1_1_chunk
Filter: (("time" >= 'Thu Jan 04 00:00:00 2001'::timestamp without time zone) AND ("time" <= 'Fri Jan 05 01:00:00 2001'::timestamp without time zone))
(7 rows)
\set ON_ERROR_STOP 0
--can't optimize invalid time unit
:PREFIX SELECT date_trunc('invalid', time) AS MetricMinuteTs, AVG(value) as avg
FROM hyper
WHERE time >= '2001-01-04T00:00:00' AND time <= '2001-01-05T01:00:00'
GROUP BY MetricMinuteTs
ORDER BY MetricMinuteTs DESC;
QUERY PLAN
---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
Finalize GroupAggregate
Group Key: (date_trunc('invalid'::text, _hyper_1_1_chunk."time"))
-> Gather Merge
Workers Planned: 2
-> Partial GroupAggregate
Group Key: (date_trunc('invalid'::text, _hyper_1_1_chunk."time"))
-> Sort
Sort Key: (date_trunc('invalid'::text, _hyper_1_1_chunk."time")) DESC
-> Result
-> Parallel Seq Scan on _hyper_1_1_chunk
Filter: (("time" >= 'Thu Jan 04 00:00:00 2001'::timestamp without time zone) AND ("time" <= 'Fri Jan 05 01:00:00 2001'::timestamp without time zone))
(11 rows)
\set ON_ERROR_STOP 1
:PREFIX SELECT date_trunc('day', time_date) AS MetricMinuteTs, AVG(value) as avg
FROM hyper
WHERE time >= '2001-01-04T00:00:00' AND time <= '2001-01-05T01:00:00'
GROUP BY MetricMinuteTs
ORDER BY MetricMinuteTs DESC;
QUERY PLAN
---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
Finalize GroupAggregate
Group Key: (date_trunc('day'::text, (_hyper_1_1_chunk.time_date)::timestamp with time zone))
-> Gather Merge
Workers Planned: 2
-> Sort
Sort Key: (date_trunc('day'::text, (_hyper_1_1_chunk.time_date)::timestamp with time zone)) DESC
-> Partial HashAggregate
Group Key: date_trunc('day'::text, (_hyper_1_1_chunk.time_date)::timestamp with time zone)
-> Result
-> Parallel Seq Scan on _hyper_1_1_chunk
Filter: (("time" >= 'Thu Jan 04 00:00:00 2001'::timestamp without time zone) AND ("time" <= 'Fri Jan 05 01:00:00 2001'::timestamp without time zone))
(11 rows)
--joins
--with hypertable, optimize
:PREFIX SELECT time_bucket(3600, time_int, 10) AS MetricMinuteTs, metric.value, AVG(hyper.value) as avg
FROM hyper
JOIN metric ON (hyper.metricid = metric.id)
WHERE time >= '2001-01-04T00:00:00' AND time <= '2001-01-05T01:00:00'
GROUP BY MetricMinuteTs, metric.id
ORDER BY MetricMinuteTs DESC, metric.id;
QUERY PLAN
---------------------------------------------------------------------------------------------------------------------------------------------------------------------------
Sort
Sort Key: (time_bucket('3600'::bigint, _hyper_1_1_chunk.time_int, '10'::bigint)) DESC, metric.id
-> HashAggregate
Group Key: time_bucket('3600'::bigint, _hyper_1_1_chunk.time_int, '10'::bigint), metric.id
-> Hash Join
Hash Cond: (_hyper_1_1_chunk.metricid = metric.id)
-> Seq Scan on _hyper_1_1_chunk
Filter: (("time" >= 'Thu Jan 04 00:00:00 2001'::timestamp without time zone) AND ("time" <= 'Fri Jan 05 01:00:00 2001'::timestamp without time zone))
-> Hash
-> Seq Scan on metric
(10 rows)
--no hypertable involved, no optimization
:PREFIX SELECT time_bucket(3600, time_int, 10) AS MetricMinuteTs, metric.value, AVG(regular.value) as avg
FROM regular
JOIN metric ON (regular.metricid = metric.id)
WHERE time >= '2001-01-04T00:00:00' AND time <= '2001-01-05T01:00:00'
GROUP BY MetricMinuteTs, metric.id
ORDER BY MetricMinuteTs DESC, metric.id;
QUERY PLAN
---------------------------------------------------------------------------------------------------------------------------------------------------------------------------
GroupAggregate
Group Key: (time_bucket('3600'::bigint, regular.time_int, '10'::bigint)), metric.id
-> Sort
Sort Key: (time_bucket('3600'::bigint, regular.time_int, '10'::bigint)) DESC, metric.id
-> Nested Loop
Join Filter: (regular.metricid = metric.id)
-> Seq Scan on regular
Filter: (("time" >= 'Thu Jan 04 00:00:00 2001'::timestamp without time zone) AND ("time" <= 'Fri Jan 05 01:00:00 2001'::timestamp without time zone))
-> Seq Scan on metric
(9 rows)
-- Try with time partitioning function. Currently not optimized for hash aggregates
:PREFIX SELECT time_bucket('1 minute', unix_to_timestamp(time)) AS MetricMinuteTs, AVG(value) as avg
FROM hyper_timefunc
WHERE unix_to_timestamp(time) >= '2001-01-04T00:00:00' AND unix_to_timestamp(time) <= '2001-01-05T01:00:00'
GROUP BY MetricMinuteTs
ORDER BY MetricMinuteTs DESC;
QUERY PLAN
---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
GroupAggregate
Group Key: (time_bucket('@ 1 min'::interval, to_timestamp(_hyper_2_2_chunk."time")))
-> Sort
Sort Key: (time_bucket('@ 1 min'::interval, to_timestamp(_hyper_2_2_chunk."time"))) DESC
-> Result
-> Seq Scan on _hyper_2_2_chunk
Filter: ((to_timestamp("time") >= 'Thu Jan 04 00:00:00 2001 PST'::timestamp with time zone) AND (to_timestamp("time") <= 'Fri Jan 05 01:00:00 2001 PST'::timestamp with time zone))
(7 rows)
\set ECHO none
psql:include/plan_hashagg_query.sql:60: ERROR: timestamp units "invalid" not recognized
psql:include/plan_hashagg_query.sql:60: ERROR: timestamp units "invalid" not recognized

408
test/expected/query-13.out Normal file
View File

@ -0,0 +1,408 @@
-- This file and its contents are licensed under the Apache License 2.0.
-- Please see the included NOTICE for copyright information and
-- LICENSE-APACHE for a copy of the license.
\set TEST_BASE_NAME query
SELECT format('include/%s_load.sql', :'TEST_BASE_NAME') as "TEST_LOAD_NAME",
format('include/%s_query.sql', :'TEST_BASE_NAME') as "TEST_QUERY_NAME",
format('%s/results/%s_results_optimized.out', :'TEST_OUTPUT_DIR', :'TEST_BASE_NAME') as "TEST_RESULTS_OPTIMIZED",
format('%s/results/%s_results_unoptimized.out', :'TEST_OUTPUT_DIR', :'TEST_BASE_NAME') as "TEST_RESULTS_UNOPTIMIZED"
\gset
SELECT format('\! diff -u --label "Unoptimized result" --label "Optimized result" %s %s', :'TEST_RESULTS_UNOPTIMIZED', :'TEST_RESULTS_OPTIMIZED') as "DIFF_CMD"
\gset
\set PREFIX 'EXPLAIN (costs OFF)'
\ir :TEST_LOAD_NAME
-- This file and its contents are licensed under the Apache License 2.0.
-- Please see the included NOTICE for copyright information and
-- LICENSE-APACHE for a copy of the license.
CREATE TABLE PUBLIC.hyper_1 (
time TIMESTAMP NOT NULL,
series_0 DOUBLE PRECISION NULL,
series_1 DOUBLE PRECISION NULL,
series_2 DOUBLE PRECISION NULL
);
CREATE INDEX "time_plain" ON PUBLIC.hyper_1 (time DESC, series_0);
SELECT * FROM create_hypertable('"public"."hyper_1"'::regclass, 'time'::name, number_partitions => 1, create_default_indexes=>false);
hypertable_id | schema_name | table_name | created
---------------+-------------+------------+---------
1 | public | hyper_1 | t
(1 row)
INSERT INTO hyper_1 SELECT to_timestamp(ser), ser, ser+10000, sqrt(ser::numeric) FROM generate_series(0,10000) ser;
INSERT INTO hyper_1 SELECT to_timestamp(ser), ser, ser+10000, sqrt(ser::numeric) FROM generate_series(10001,20000) ser;
CREATE TABLE PUBLIC.hyper_1_tz (
time TIMESTAMPTZ NOT NULL,
series_0 DOUBLE PRECISION NULL,
series_1 DOUBLE PRECISION NULL,
series_2 DOUBLE PRECISION NULL
);
CREATE INDEX "time_plain_tz" ON PUBLIC.hyper_1_tz (time DESC, series_0);
SELECT * FROM create_hypertable('"public"."hyper_1_tz"'::regclass, 'time'::name, number_partitions => 1, create_default_indexes=>false);
hypertable_id | schema_name | table_name | created
---------------+-------------+------------+---------
2 | public | hyper_1_tz | t
(1 row)
INSERT INTO hyper_1_tz SELECT to_timestamp(ser), ser, ser+10000, sqrt(ser::numeric) FROM generate_series(0,10000) ser;
INSERT INTO hyper_1_tz SELECT to_timestamp(ser), ser, ser+10000, sqrt(ser::numeric) FROM generate_series(10001,20000) ser;
CREATE TABLE PUBLIC.hyper_1_int (
time int NOT NULL,
series_0 DOUBLE PRECISION NULL,
series_1 DOUBLE PRECISION NULL,
series_2 DOUBLE PRECISION NULL
);
CREATE INDEX "time_plain_int" ON PUBLIC.hyper_1_int (time DESC, series_0);
SELECT * FROM create_hypertable('"public"."hyper_1_int"'::regclass, 'time'::name, number_partitions => 1, chunk_time_interval=>10000, create_default_indexes=>FALSE);
hypertable_id | schema_name | table_name | created
---------------+-------------+-------------+---------
3 | public | hyper_1_int | t
(1 row)
INSERT INTO hyper_1_int SELECT ser, ser, ser+10000, sqrt(ser::numeric) FROM generate_series(0,10000) ser;
INSERT INTO hyper_1_int SELECT ser, ser, ser+10000, sqrt(ser::numeric) FROM generate_series(10001,20000) ser;
CREATE TABLE PUBLIC.hyper_1_date (
time date NOT NULL,
series_0 DOUBLE PRECISION NULL,
series_1 DOUBLE PRECISION NULL,
series_2 DOUBLE PRECISION NULL
);
CREATE INDEX "time_plain_date" ON PUBLIC.hyper_1_date (time DESC, series_0);
SELECT * FROM create_hypertable('"public"."hyper_1_date"'::regclass, 'time'::name, number_partitions => 1, chunk_time_interval=>86400000000, create_default_indexes=>FALSE);
hypertable_id | schema_name | table_name | created
---------------+-------------+--------------+---------
4 | public | hyper_1_date | t
(1 row)
INSERT INTO hyper_1_date SELECT to_timestamp(ser)::date, ser, ser+10000, sqrt(ser::numeric) FROM generate_series(0,10000) ser;
INSERT INTO hyper_1_date SELECT to_timestamp(ser)::date, ser, ser+10000, sqrt(ser::numeric) FROM generate_series(10001,20000) ser;
--below needed to create enough unique dates to trigger an index scan
INSERT INTO hyper_1_date SELECT to_timestamp(ser*100)::date, ser, ser+10000, sqrt(ser::numeric) FROM generate_series(10001,20000) ser;
CREATE TABLE PUBLIC.plain_table (
time TIMESTAMPTZ NOT NULL,
series_0 DOUBLE PRECISION NULL,
series_1 DOUBLE PRECISION NULL,
series_2 DOUBLE PRECISION NULL
);
CREATE INDEX "time_plain_plain_table" ON PUBLIC.plain_table (time DESC, series_0);
INSERT INTO plain_table SELECT to_timestamp(ser), ser, ser+10000, sqrt(ser::numeric) FROM generate_series(0,10000) ser;
INSERT INTO plain_table SELECT to_timestamp(ser), ser, ser+10000, sqrt(ser::numeric) FROM generate_series(10001,20000) ser;
-- Table with a time partitioning function
CREATE TABLE PUBLIC.hyper_timefunc (
time float8 NOT NULL,
series_0 DOUBLE PRECISION NULL,
series_1 DOUBLE PRECISION NULL,
series_2 DOUBLE PRECISION NULL
);
CREATE OR REPLACE FUNCTION unix_to_timestamp(unixtime float8)
RETURNS TIMESTAMPTZ LANGUAGE SQL IMMUTABLE AS
$BODY$
SELECT to_timestamp(unixtime);
$BODY$;
CREATE INDEX "time_plain_timefunc" ON PUBLIC.hyper_timefunc (to_timestamp(time) DESC, series_0);
SELECT * FROM create_hypertable('"public"."hyper_timefunc"'::regclass, 'time'::name, number_partitions => 1, create_default_indexes=>false, time_partitioning_func => 'unix_to_timestamp');
hypertable_id | schema_name | table_name | created
---------------+-------------+----------------+---------
5 | public | hyper_timefunc | t
(1 row)
INSERT INTO hyper_timefunc SELECT ser, ser, ser+10000, sqrt(ser::numeric) FROM generate_series(0,10000) ser;
INSERT INTO hyper_timefunc SELECT ser, ser, ser+10000, sqrt(ser::numeric) FROM generate_series(10001,20000) ser;
ANALYZE plain_table;
ANALYZE hyper_timefunc;
ANALYZE hyper_1;
ANALYZE hyper_1_tz;
ANALYZE hyper_1_int;
ANALYZE hyper_1_date;
\ir :TEST_QUERY_NAME
-- This file and its contents are licensed under the Apache License 2.0.
-- Please see the included NOTICE for copyright information and
-- LICENSE-APACHE for a copy of the license.
SHOW timescaledb.enable_optimizations;
timescaledb.enable_optimizations
----------------------------------
on
(1 row)
--non-aggregates use MergeAppend in both optimized and non-optimized
:PREFIX SELECT * FROM hyper_1 ORDER BY "time" DESC limit 2;
QUERY PLAN
------------------------------------------------------------------------------
Limit
-> Custom Scan (ChunkAppend) on hyper_1
Order: hyper_1."time" DESC
-> Index Scan using _hyper_1_1_chunk_time_plain on _hyper_1_1_chunk
(4 rows)
:PREFIX SELECT * FROM hyper_timefunc ORDER BY unix_to_timestamp("time") DESC limit 2;
QUERY PLAN
-----------------------------------------------------------------------------------
Limit
-> Index Scan using _hyper_5_19_chunk_time_plain_timefunc on _hyper_5_19_chunk
(2 rows)
--Aggregates use MergeAppend only in optimized
:PREFIX SELECT date_trunc('minute', time) t, avg(series_0), min(series_1), avg(series_2) FROM hyper_1 GROUP BY t ORDER BY t DESC limit 2;
QUERY PLAN
------------------------------------------------------------------------------------
Limit
-> GroupAggregate
Group Key: (date_trunc('minute'::text, hyper_1."time"))
-> Custom Scan (ChunkAppend) on hyper_1
Order: date_trunc('minute'::text, hyper_1."time") DESC
-> Index Scan using _hyper_1_1_chunk_time_plain on _hyper_1_1_chunk
(6 rows)
:PREFIX SELECT date_trunc('minute', time) t, avg(series_0), min(series_1), avg(series_2) FROM hyper_1_date GROUP BY t ORDER BY t DESC limit 2;
QUERY PLAN
----------------------------------------------------------------------------------------------------------------------
Limit
-> GroupAggregate
Group Key: (date_trunc('minute'::text, (_hyper_4_6_chunk."time")::timestamp with time zone))
-> Result
-> Merge Append
Sort Key: (date_trunc('minute'::text, (_hyper_4_6_chunk."time")::timestamp with time zone)) DESC
-> Index Scan using _hyper_4_6_chunk_time_plain_date on _hyper_4_6_chunk
-> Index Scan using _hyper_4_7_chunk_time_plain_date on _hyper_4_7_chunk
-> Index Scan using _hyper_4_8_chunk_time_plain_date on _hyper_4_8_chunk
-> Index Scan using _hyper_4_9_chunk_time_plain_date on _hyper_4_9_chunk
-> Index Scan using _hyper_4_10_chunk_time_plain_date on _hyper_4_10_chunk
-> Index Scan using _hyper_4_11_chunk_time_plain_date on _hyper_4_11_chunk
-> Index Scan using _hyper_4_12_chunk_time_plain_date on _hyper_4_12_chunk
-> Index Scan using _hyper_4_13_chunk_time_plain_date on _hyper_4_13_chunk
-> Index Scan using _hyper_4_14_chunk_time_plain_date on _hyper_4_14_chunk
-> Index Scan using _hyper_4_15_chunk_time_plain_date on _hyper_4_15_chunk
-> Index Scan using _hyper_4_16_chunk_time_plain_date on _hyper_4_16_chunk
-> Index Scan using _hyper_4_17_chunk_time_plain_date on _hyper_4_17_chunk
-> Index Scan using _hyper_4_18_chunk_time_plain_date on _hyper_4_18_chunk
(19 rows)
--the minute and second results should be diff
:PREFIX SELECT date_trunc('minute', time) t, avg(series_0), min(series_1), avg(series_2) FROM hyper_1 GROUP BY t ORDER BY t DESC limit 2;
QUERY PLAN
------------------------------------------------------------------------------------
Limit
-> GroupAggregate
Group Key: (date_trunc('minute'::text, hyper_1."time"))
-> Custom Scan (ChunkAppend) on hyper_1
Order: date_trunc('minute'::text, hyper_1."time") DESC
-> Index Scan using _hyper_1_1_chunk_time_plain on _hyper_1_1_chunk
(6 rows)
:PREFIX SELECT date_trunc('second', time) t, avg(series_0), min(series_1), avg(series_2) FROM hyper_1 GROUP BY t ORDER BY t DESC limit 2;
QUERY PLAN
------------------------------------------------------------------------------------
Limit
-> GroupAggregate
Group Key: (date_trunc('second'::text, hyper_1."time"))
-> Custom Scan (ChunkAppend) on hyper_1
Order: date_trunc('second'::text, hyper_1."time") DESC
-> Index Scan using _hyper_1_1_chunk_time_plain on _hyper_1_1_chunk
(6 rows)
--test that when index on time used by constraint, still works correctly
:PREFIX
SELECT date_trunc('minute', time) t, avg(series_0), min(series_1), avg(series_2)
FROM hyper_1
WHERE time < to_timestamp(900)
GROUP BY t
ORDER BY t DESC
LIMIT 2;
QUERY PLAN
-----------------------------------------------------------------------------------------------------------
Limit
-> Sort
Sort Key: (date_trunc('minute'::text, hyper_1."time")) DESC
-> HashAggregate
Group Key: date_trunc('minute'::text, hyper_1."time")
-> Custom Scan (ChunkAppend) on hyper_1
Chunks excluded during startup: 0
-> Index Scan using _hyper_1_1_chunk_time_plain on _hyper_1_1_chunk
Index Cond: ("time" < 'Wed Dec 31 16:15:00 1969 PST'::timestamp with time zone)
(9 rows)
--test on table with time partitioning function. Currently not
--optimized to use index for ordering since the index is an expression
--on time (e.g., timefunc(time)), and we currently don't handle that
--case.
:PREFIX
SELECT date_trunc('minute', to_timestamp(time)) t, avg(series_0), min(series_1), avg(series_2)
FROM hyper_timefunc
WHERE to_timestamp(time) < to_timestamp(900)
GROUP BY t
ORDER BY t DESC
LIMIT 2;
QUERY PLAN
-------------------------------------------------------------------------------------------------------------------------
Limit
-> Sort
Sort Key: (date_trunc('minute'::text, to_timestamp(_hyper_5_19_chunk."time"))) DESC
-> HashAggregate
Group Key: date_trunc('minute'::text, to_timestamp(_hyper_5_19_chunk."time"))
-> Result
-> Index Scan using _hyper_5_19_chunk_time_plain_timefunc on _hyper_5_19_chunk
Index Cond: (to_timestamp("time") < 'Wed Dec 31 16:15:00 1969 PST'::timestamp with time zone)
(8 rows)
BEGIN;
--test that still works with an expression index on data_trunc.
DROP INDEX "time_plain";
CREATE INDEX "time_trunc" ON PUBLIC.hyper_1 (date_trunc('minute', time));
ANALYZE hyper_1;
:PREFIX SELECT date_trunc('minute', time) t, avg(series_0), min(series_1), avg(series_2) FROM hyper_1 GROUP BY t ORDER BY t DESC limit 2;
QUERY PLAN
---------------------------------------------------------------------------------------------
Limit
-> GroupAggregate
Group Key: (date_trunc('minute'::text, hyper_1."time"))
-> Custom Scan (ChunkAppend) on hyper_1
Order: date_trunc('minute'::text, hyper_1."time") DESC
-> Index Scan Backward using _hyper_1_1_chunk_time_trunc on _hyper_1_1_chunk
(6 rows)
--test that works with both indexes
CREATE INDEX "time_plain" ON PUBLIC.hyper_1 (time DESC, series_0);
ANALYZE hyper_1;
:PREFIX SELECT date_trunc('minute', time) t, avg(series_0), min(series_1), avg(series_2) FROM hyper_1 GROUP BY t ORDER BY t DESC limit 2;
QUERY PLAN
---------------------------------------------------------------------------------------------
Limit
-> GroupAggregate
Group Key: (date_trunc('minute'::text, hyper_1."time"))
-> Custom Scan (ChunkAppend) on hyper_1
Order: date_trunc('minute'::text, hyper_1."time") DESC
-> Index Scan Backward using _hyper_1_1_chunk_time_trunc on _hyper_1_1_chunk
(6 rows)
:PREFIX SELECT time_bucket('1 minute', time) t, avg(series_0), min(series_1), trunc(avg(series_2)::numeric, 5)
FROM hyper_1 GROUP BY t ORDER BY t DESC limit 2;
QUERY PLAN
------------------------------------------------------------------------------------
Limit
-> GroupAggregate
Group Key: (time_bucket('@ 1 min'::interval, hyper_1."time"))
-> Custom Scan (ChunkAppend) on hyper_1
Order: time_bucket('@ 1 min'::interval, hyper_1."time") DESC
-> Index Scan using _hyper_1_1_chunk_time_plain on _hyper_1_1_chunk
(6 rows)
:PREFIX SELECT time_bucket('1 minute', time, INTERVAL '30 seconds') t, avg(series_0), min(series_1), trunc(avg(series_2)::numeric,5)
FROM hyper_1 GROUP BY t ORDER BY t DESC limit 2;
QUERY PLAN
------------------------------------------------------------------------------------------------------------------------------------
Limit
-> GroupAggregate
Group Key: ((time_bucket('@ 1 min'::interval, (_hyper_1_1_chunk."time" - '@ 30 secs'::interval)) + '@ 30 secs'::interval))
-> Result
-> Index Scan using _hyper_1_1_chunk_time_plain on _hyper_1_1_chunk
(5 rows)
:PREFIX SELECT time_bucket('1 minute', time - INTERVAL '30 seconds') t, avg(series_0), min(series_1), trunc(avg(series_2)::numeric,5)
FROM hyper_1 GROUP BY t ORDER BY t DESC limit 2;
QUERY PLAN
------------------------------------------------------------------------------------------------------
Limit
-> GroupAggregate
Group Key: (time_bucket('@ 1 min'::interval, (hyper_1."time" - '@ 30 secs'::interval)))
-> Custom Scan (ChunkAppend) on hyper_1
Order: time_bucket('@ 1 min'::interval, (hyper_1."time" - '@ 30 secs'::interval)) DESC
-> Index Scan using _hyper_1_1_chunk_time_plain on _hyper_1_1_chunk
(6 rows)
:PREFIX SELECT time_bucket('1 minute', time - INTERVAL '30 seconds') + INTERVAL '30 seconds' t, avg(series_0), min(series_1), trunc(avg(series_2)::numeric,5)
FROM hyper_1 GROUP BY t ORDER BY t DESC limit 2;
QUERY PLAN
------------------------------------------------------------------------------------------------------------------------------------
Limit
-> GroupAggregate
Group Key: ((time_bucket('@ 1 min'::interval, (_hyper_1_1_chunk."time" - '@ 30 secs'::interval)) + '@ 30 secs'::interval))
-> Result
-> Index Scan using _hyper_1_1_chunk_time_plain on _hyper_1_1_chunk
(5 rows)
:PREFIX SELECT time_bucket('1 minute', time) t, avg(series_0), min(series_1), avg(series_2)
FROM hyper_1_tz GROUP BY t ORDER BY t DESC limit 2;
QUERY PLAN
---------------------------------------------------------------------------------------
Limit
-> GroupAggregate
Group Key: (time_bucket('@ 1 min'::interval, hyper_1_tz."time"))
-> Custom Scan (ChunkAppend) on hyper_1_tz
Order: time_bucket('@ 1 min'::interval, hyper_1_tz."time") DESC
-> Index Scan using _hyper_2_2_chunk_time_plain_tz on _hyper_2_2_chunk
(6 rows)
:PREFIX SELECT time_bucket('1 minute', time::timestamp) t, avg(series_0), min(series_1), avg(series_2)
FROM hyper_1_tz GROUP BY t ORDER BY t DESC limit 2;
QUERY PLAN
---------------------------------------------------------------------------------------------------------------
Limit
-> GroupAggregate
Group Key: (time_bucket('@ 1 min'::interval, (_hyper_2_2_chunk."time")::timestamp without time zone))
-> Result
-> Index Scan using _hyper_2_2_chunk_time_plain_tz on _hyper_2_2_chunk
(5 rows)
:PREFIX SELECT time_bucket(10, time) t, avg(series_0), min(series_1), avg(series_2)
FROM hyper_1_int GROUP BY t ORDER BY t DESC limit 2;
QUERY PLAN
----------------------------------------------------------------------------------------
Limit
-> GroupAggregate
Group Key: (time_bucket(10, hyper_1_int."time"))
-> Custom Scan (ChunkAppend) on hyper_1_int
Order: time_bucket(10, hyper_1_int."time") DESC
-> Index Scan using _hyper_3_5_chunk_time_plain_int on _hyper_3_5_chunk
-> Index Scan using _hyper_3_4_chunk_time_plain_int on _hyper_3_4_chunk
-> Index Scan using _hyper_3_3_chunk_time_plain_int on _hyper_3_3_chunk
(8 rows)
:PREFIX SELECT time_bucket(10, time, 2) t, avg(series_0), min(series_1), avg(series_2)
FROM hyper_1_int GROUP BY t ORDER BY t DESC limit 2;
QUERY PLAN
----------------------------------------------------------------------------------------
Limit
-> GroupAggregate
Group Key: (time_bucket(10, hyper_1_int."time", 2))
-> Custom Scan (ChunkAppend) on hyper_1_int
Order: time_bucket(10, hyper_1_int."time", 2) DESC
-> Index Scan using _hyper_3_5_chunk_time_plain_int on _hyper_3_5_chunk
-> Index Scan using _hyper_3_4_chunk_time_plain_int on _hyper_3_4_chunk
-> Index Scan using _hyper_3_3_chunk_time_plain_int on _hyper_3_3_chunk
(8 rows)
ROLLBACK;
-- sort order optimization should not be applied to non-hypertables
:PREFIX
SELECT date_trunc('minute', time) t, avg(series_0), min(series_1), avg(series_2)
FROM plain_table
WHERE time < to_timestamp(900)
GROUP BY t
ORDER BY t DESC
LIMIT 2;
QUERY PLAN
-----------------------------------------------------------------------------------------------------
Limit
-> Sort
Sort Key: (date_trunc('minute'::text, "time")) DESC
-> HashAggregate
Group Key: date_trunc('minute'::text, "time")
-> Index Scan using time_plain_plain_table on plain_table
Index Cond: ("time" < 'Wed Dec 31 16:15:00 1969 PST'::timestamp with time zone)
(7 rows)
--generate the results into two different files
\set ECHO errors
--- Unoptimized result
+++ Optimized result
@@ -1,6 +1,6 @@
timescaledb.enable_optimizations
----------------------------------
- off
+ on
(1 row)
time | series_0 | series_1 | series_2
?column?
----------
Done
(1 row)

View File

@ -0,0 +1,73 @@
-- This file and its contents are licensed under the Apache License 2.0.
-- Please see the included NOTICE for copyright information and
-- LICENSE-APACHE for a copy of the license.
\set PREFIX 'EXPLAIN (COSTS OFF) '
CREATE TABLE order_test(time int NOT NULL, device_id int, value float);
CREATE INDEX ON order_test(time,device_id);
CREATE INDEX ON order_test(device_id,time);
SELECT create_hypertable('order_test','time',chunk_time_interval:=1000);
create_hypertable
-------------------------
(1,public,order_test,t)
(1 row)
INSERT INTO order_test SELECT 0,10,0.5;
INSERT INTO order_test SELECT 1,9,0.5;
INSERT INTO order_test SELECT 2,8,0.5;
-- we want to see here that index scans are possible for the chosen expressions
-- so we disable seqscan so we dont need to worry about other factors which would
-- make PostgreSQL prefer seqscan over index scan
SET enable_seqscan TO off;
-- test sort optimization with single member order by
SELECT time_bucket(10,time),device_id,value FROM order_test ORDER BY 1;
time_bucket | device_id | value
-------------+-----------+-------
0 | 10 | 0.5
0 | 9 | 0.5
0 | 8 | 0.5
(3 rows)
-- should use index scan
:PREFIX SELECT time_bucket(10,time),device_id,value FROM order_test ORDER BY 1;
QUERY PLAN
------------------------------------------------------------------------------------------
Result
-> Index Scan Backward using _hyper_1_1_chunk_order_test_time_idx on _hyper_1_1_chunk
(2 rows)
-- test sort optimization with ordering by multiple columns and time_bucket not last
SELECT time_bucket(10,time),device_id,value FROM order_test ORDER BY 1,2;
time_bucket | device_id | value
-------------+-----------+-------
0 | 8 | 0.5
0 | 9 | 0.5
0 | 10 | 0.5
(3 rows)
-- must not use index scan
:PREFIX SELECT time_bucket(10,time),device_id,value FROM order_test ORDER BY 1,2;
QUERY PLAN
------------------------------------------------------------------------------------
Sort
Sort Key: (time_bucket(10, _hyper_1_1_chunk."time")), _hyper_1_1_chunk.device_id
-> Result
-> Seq Scan on _hyper_1_1_chunk
(4 rows)
-- test sort optimization with ordering by multiple columns and time_bucket as last member
SELECT time_bucket(10,time),device_id,value FROM order_test ORDER BY 2,1;
time_bucket | device_id | value
-------------+-----------+-------
0 | 8 | 0.5
0 | 9 | 0.5
0 | 10 | 0.5
(3 rows)
-- should use index scan
:PREFIX SELECT time_bucket(10,time),device_id,value FROM order_test ORDER BY 2,1;
QUERY PLAN
-------------------------------------------------------------------------------------------
Result
-> Index Scan using _hyper_1_1_chunk_order_test_device_id_time_idx on _hyper_1_1_chunk
(2 rows)

View File

@ -0,0 +1,279 @@
-- This file and its contents are licensed under the Apache License 2.0.
-- Please see the included NOTICE for copyright information and
-- LICENSE-APACHE for a copy of the license.
\o /dev/null
\ir include/insert_two_partitions.sql
-- This file and its contents are licensed under the Apache License 2.0.
-- Please see the included NOTICE for copyright information and
-- LICENSE-APACHE for a copy of the license.
CREATE TABLE PUBLIC."two_Partitions" (
"timeCustom" BIGINT NOT NULL,
device_id TEXT NOT NULL,
series_0 DOUBLE PRECISION NULL,
series_1 DOUBLE PRECISION NULL,
series_2 DOUBLE PRECISION NULL,
series_bool BOOLEAN NULL
);
CREATE INDEX ON PUBLIC."two_Partitions" (device_id, "timeCustom" DESC NULLS LAST) WHERE device_id IS NOT NULL;
CREATE INDEX ON PUBLIC."two_Partitions" ("timeCustom" DESC NULLS LAST, series_0) WHERE series_0 IS NOT NULL;
CREATE INDEX ON PUBLIC."two_Partitions" ("timeCustom" DESC NULLS LAST, series_1) WHERE series_1 IS NOT NULL;
CREATE INDEX ON PUBLIC."two_Partitions" ("timeCustom" DESC NULLS LAST, series_2) WHERE series_2 IS NOT NULL;
CREATE INDEX ON PUBLIC."two_Partitions" ("timeCustom" DESC NULLS LAST, series_bool) WHERE series_bool IS NOT NULL;
CREATE INDEX ON PUBLIC."two_Partitions" ("timeCustom" DESC NULLS LAST, device_id);
SELECT * FROM create_hypertable('"public"."two_Partitions"'::regclass, 'timeCustom'::name, 'device_id'::name, associated_schema_name=>'_timescaledb_internal'::text, number_partitions => 2, chunk_time_interval=>_timescaledb_internal.interval_to_usec('1 month'));
\set QUIET off
BEGIN;
\COPY public."two_Partitions" FROM 'data/ds1_dev1_1.tsv' NULL AS '';
COMMIT;
INSERT INTO public."two_Partitions"("timeCustom", device_id, series_0, series_1) VALUES
(1257987600000000000, 'dev1', 1.5, 1),
(1257987600000000000, 'dev1', 1.5, 2),
(1257894000000000000, 'dev2', 1.5, 1),
(1257894002000000000, 'dev1', 2.5, 3);
INSERT INTO "two_Partitions"("timeCustom", device_id, series_0, series_1) VALUES
(1257894000000000000, 'dev2', 1.5, 2);
\set QUIET on
\o
SELECT * FROM PUBLIC."two_Partitions";
timeCustom | device_id | series_0 | series_1 | series_2 | series_bool
---------------------+-----------+----------+----------+----------+-------------
1257894000000000000 | dev1 | 1.5 | 1 | 2 | t
1257894000000000000 | dev1 | 1.5 | 2 | |
1257894000000001000 | dev1 | 2.5 | 3 | |
1257894001000000000 | dev1 | 3.5 | 4 | |
1257894002000000000 | dev1 | 5.5 | 6 | | t
1257894002000000000 | dev1 | 5.5 | 7 | | f
1257894002000000000 | dev1 | 2.5 | 3 | |
1257897600000000000 | dev1 | 4.5 | 5 | | f
1257987600000000000 | dev1 | 1.5 | 1 | |
1257987600000000000 | dev1 | 1.5 | 2 | |
1257894000000000000 | dev2 | 1.5 | 1 | |
1257894000000000000 | dev2 | 1.5 | 2 | |
(12 rows)
EXPLAIN (verbose ON, costs off) SELECT * FROM PUBLIC."two_Partitions";
QUERY PLAN
------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
Append
-> Seq Scan on _timescaledb_internal._hyper_1_1_chunk
Output: _hyper_1_1_chunk."timeCustom", _hyper_1_1_chunk.device_id, _hyper_1_1_chunk.series_0, _hyper_1_1_chunk.series_1, _hyper_1_1_chunk.series_2, _hyper_1_1_chunk.series_bool
-> Seq Scan on _timescaledb_internal._hyper_1_2_chunk
Output: _hyper_1_2_chunk."timeCustom", _hyper_1_2_chunk.device_id, _hyper_1_2_chunk.series_0, _hyper_1_2_chunk.series_1, _hyper_1_2_chunk.series_2, _hyper_1_2_chunk.series_bool
-> Seq Scan on _timescaledb_internal._hyper_1_3_chunk
Output: _hyper_1_3_chunk."timeCustom", _hyper_1_3_chunk.device_id, _hyper_1_3_chunk.series_0, _hyper_1_3_chunk.series_1, _hyper_1_3_chunk.series_2, _hyper_1_3_chunk.series_bool
-> Seq Scan on _timescaledb_internal._hyper_1_4_chunk
Output: _hyper_1_4_chunk."timeCustom", _hyper_1_4_chunk.device_id, _hyper_1_4_chunk.series_0, _hyper_1_4_chunk.series_1, _hyper_1_4_chunk.series_2, _hyper_1_4_chunk.series_bool
(9 rows)
\echo "The following queries should NOT scan two_Partitions._hyper_1_1_chunk"
"The following queries should NOT scan two_Partitions._hyper_1_1_chunk"
EXPLAIN (verbose ON, costs off) SELECT * FROM PUBLIC."two_Partitions" WHERE device_id = 'dev2';
QUERY PLAN
------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
Index Scan using "_hyper_1_4_chunk_two_Partitions_device_id_timeCustom_idx" on _timescaledb_internal._hyper_1_4_chunk
Output: _hyper_1_4_chunk."timeCustom", _hyper_1_4_chunk.device_id, _hyper_1_4_chunk.series_0, _hyper_1_4_chunk.series_1, _hyper_1_4_chunk.series_2, _hyper_1_4_chunk.series_bool
Index Cond: (_hyper_1_4_chunk.device_id = 'dev2'::text)
(3 rows)
EXPLAIN (verbose ON, costs off) SELECT * FROM PUBLIC."two_Partitions" WHERE device_id = 'dev'||'2';
QUERY PLAN
------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
Index Scan using "_hyper_1_4_chunk_two_Partitions_device_id_timeCustom_idx" on _timescaledb_internal._hyper_1_4_chunk
Output: _hyper_1_4_chunk."timeCustom", _hyper_1_4_chunk.device_id, _hyper_1_4_chunk.series_0, _hyper_1_4_chunk.series_1, _hyper_1_4_chunk.series_2, _hyper_1_4_chunk.series_bool
Index Cond: (_hyper_1_4_chunk.device_id = 'dev2'::text)
(3 rows)
EXPLAIN (verbose ON, costs off) SELECT * FROM PUBLIC."two_Partitions" WHERE 'dev'||'2' = device_id;
QUERY PLAN
------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
Index Scan using "_hyper_1_4_chunk_two_Partitions_device_id_timeCustom_idx" on _timescaledb_internal._hyper_1_4_chunk
Output: _hyper_1_4_chunk."timeCustom", _hyper_1_4_chunk.device_id, _hyper_1_4_chunk.series_0, _hyper_1_4_chunk.series_1, _hyper_1_4_chunk.series_2, _hyper_1_4_chunk.series_bool
Index Cond: (_hyper_1_4_chunk.device_id = 'dev2'::text)
(3 rows)
--test integer partition key
CREATE TABLE "int_part"(time timestamp, object_id int, temp float);
SELECT create_hypertable('"int_part"', 'time', 'object_id', 2);
NOTICE: adding not-null constraint to column "time"
create_hypertable
-----------------------
(2,public,int_part,t)
(1 row)
INSERT INTO "int_part" VALUES('2017-01-20T09:00:01', 1, 22.5);
INSERT INTO "int_part" VALUES('2017-01-20T09:00:01', 2, 22.5);
--check that there are two chunks
SELECT * FROM test.show_subtables('int_part');
Child | Tablespace
----------------------------------------+------------
_timescaledb_internal._hyper_2_5_chunk |
_timescaledb_internal._hyper_2_6_chunk |
(2 rows)
SELECT * FROM "int_part" WHERE object_id = 1;
time | object_id | temp
--------------------------+-----------+------
Fri Jan 20 09:00:01 2017 | 1 | 22.5
(1 row)
--make sure this touches only one partititon
EXPLAIN (verbose ON, costs off) SELECT * FROM "int_part" WHERE object_id = 1;
QUERY PLAN
---------------------------------------------------------------------------------------------------------
Index Scan using _hyper_2_5_chunk_int_part_object_id_time_idx on _timescaledb_internal._hyper_2_5_chunk
Output: _hyper_2_5_chunk."time", _hyper_2_5_chunk.object_id, _hyper_2_5_chunk.temp
Index Cond: (_hyper_2_5_chunk.object_id = 1)
(3 rows)
--Need to verify space partitions are currently pruned in this query
--EXPLAIN (verbose ON, costs off) SELECT * FROM "two_Partitions" WHERE device_id IN ('dev2', 'dev21');
\echo "The following shows non-aggregated queries with time desc using merge append"
"The following shows non-aggregated queries with time desc using merge append"
EXPLAIN (verbose ON, costs off)SELECT * FROM PUBLIC."two_Partitions" ORDER BY "timeCustom" DESC NULLS LAST limit 2;
QUERY PLAN
------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
Limit
Output: "two_Partitions"."timeCustom", "two_Partitions".device_id, "two_Partitions".series_0, "two_Partitions".series_1, "two_Partitions".series_2, "two_Partitions".series_bool
-> Custom Scan (ChunkAppend) on public."two_Partitions"
Output: "two_Partitions"."timeCustom", "two_Partitions".device_id, "two_Partitions".series_0, "two_Partitions".series_1, "two_Partitions".series_2, "two_Partitions".series_bool
Order: "two_Partitions"."timeCustom" DESC NULLS LAST
Startup Exclusion: false
Runtime Exclusion: false
-> Index Scan using "_hyper_1_3_chunk_two_Partitions_timeCustom_device_id_idx" on _timescaledb_internal._hyper_1_3_chunk
Output: _hyper_1_3_chunk."timeCustom", _hyper_1_3_chunk.device_id, _hyper_1_3_chunk.series_0, _hyper_1_3_chunk.series_1, _hyper_1_3_chunk.series_2, _hyper_1_3_chunk.series_bool
-> Index Scan using "_hyper_1_2_chunk_two_Partitions_timeCustom_device_id_idx" on _timescaledb_internal._hyper_1_2_chunk
Output: _hyper_1_2_chunk."timeCustom", _hyper_1_2_chunk.device_id, _hyper_1_2_chunk.series_0, _hyper_1_2_chunk.series_1, _hyper_1_2_chunk.series_2, _hyper_1_2_chunk.series_bool
-> Merge Append
Sort Key: _hyper_1_4_chunk."timeCustom" DESC NULLS LAST
-> Index Scan using "_hyper_1_4_chunk_two_Partitions_timeCustom_device_id_idx" on _timescaledb_internal._hyper_1_4_chunk
Output: _hyper_1_4_chunk."timeCustom", _hyper_1_4_chunk.device_id, _hyper_1_4_chunk.series_0, _hyper_1_4_chunk.series_1, _hyper_1_4_chunk.series_2, _hyper_1_4_chunk.series_bool
-> Index Scan using "_hyper_1_1_chunk_two_Partitions_timeCustom_device_id_idx" on _timescaledb_internal._hyper_1_1_chunk
Output: _hyper_1_1_chunk."timeCustom", _hyper_1_1_chunk.device_id, _hyper_1_1_chunk.series_0, _hyper_1_1_chunk.series_1, _hyper_1_1_chunk.series_2, _hyper_1_1_chunk.series_bool
(17 rows)
--shows that more specific indexes are used if the WHERE clauses "match", uses the series_1 index here.
EXPLAIN (verbose ON, costs off)SELECT * FROM PUBLIC."two_Partitions" WHERE series_1 IS NOT NULL ORDER BY "timeCustom" DESC NULLS LAST limit 2;
QUERY PLAN
------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
Limit
Output: "two_Partitions"."timeCustom", "two_Partitions".device_id, "two_Partitions".series_0, "two_Partitions".series_1, "two_Partitions".series_2, "two_Partitions".series_bool
-> Custom Scan (ChunkAppend) on public."two_Partitions"
Output: "two_Partitions"."timeCustom", "two_Partitions".device_id, "two_Partitions".series_0, "two_Partitions".series_1, "two_Partitions".series_2, "two_Partitions".series_bool
Order: "two_Partitions"."timeCustom" DESC NULLS LAST
Startup Exclusion: false
Runtime Exclusion: false
-> Index Scan using "_hyper_1_3_chunk_two_Partitions_timeCustom_series_1_idx" on _timescaledb_internal._hyper_1_3_chunk
Output: _hyper_1_3_chunk."timeCustom", _hyper_1_3_chunk.device_id, _hyper_1_3_chunk.series_0, _hyper_1_3_chunk.series_1, _hyper_1_3_chunk.series_2, _hyper_1_3_chunk.series_bool
-> Index Scan using "_hyper_1_2_chunk_two_Partitions_timeCustom_series_1_idx" on _timescaledb_internal._hyper_1_2_chunk
Output: _hyper_1_2_chunk."timeCustom", _hyper_1_2_chunk.device_id, _hyper_1_2_chunk.series_0, _hyper_1_2_chunk.series_1, _hyper_1_2_chunk.series_2, _hyper_1_2_chunk.series_bool
-> Merge Append
Sort Key: _hyper_1_4_chunk."timeCustom" DESC NULLS LAST
-> Index Scan using "_hyper_1_4_chunk_two_Partitions_timeCustom_series_1_idx" on _timescaledb_internal._hyper_1_4_chunk
Output: _hyper_1_4_chunk."timeCustom", _hyper_1_4_chunk.device_id, _hyper_1_4_chunk.series_0, _hyper_1_4_chunk.series_1, _hyper_1_4_chunk.series_2, _hyper_1_4_chunk.series_bool
-> Index Scan using "_hyper_1_1_chunk_two_Partitions_timeCustom_series_1_idx" on _timescaledb_internal._hyper_1_1_chunk
Output: _hyper_1_1_chunk."timeCustom", _hyper_1_1_chunk.device_id, _hyper_1_1_chunk.series_0, _hyper_1_1_chunk.series_1, _hyper_1_1_chunk.series_2, _hyper_1_1_chunk.series_bool
(17 rows)
--here the "match" is implication series_1 > 1 => series_1 IS NOT NULL
EXPLAIN (verbose ON, costs off)SELECT * FROM PUBLIC."two_Partitions" WHERE series_1 > 1 ORDER BY "timeCustom" DESC NULLS LAST limit 2;
QUERY PLAN
------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
Limit
Output: "two_Partitions"."timeCustom", "two_Partitions".device_id, "two_Partitions".series_0, "two_Partitions".series_1, "two_Partitions".series_2, "two_Partitions".series_bool
-> Custom Scan (ChunkAppend) on public."two_Partitions"
Output: "two_Partitions"."timeCustom", "two_Partitions".device_id, "two_Partitions".series_0, "two_Partitions".series_1, "two_Partitions".series_2, "two_Partitions".series_bool
Order: "two_Partitions"."timeCustom" DESC NULLS LAST
Startup Exclusion: false
Runtime Exclusion: false
-> Index Scan using "_hyper_1_3_chunk_two_Partitions_timeCustom_series_1_idx" on _timescaledb_internal._hyper_1_3_chunk
Output: _hyper_1_3_chunk."timeCustom", _hyper_1_3_chunk.device_id, _hyper_1_3_chunk.series_0, _hyper_1_3_chunk.series_1, _hyper_1_3_chunk.series_2, _hyper_1_3_chunk.series_bool
Index Cond: (_hyper_1_3_chunk.series_1 > '1'::double precision)
-> Index Scan using "_hyper_1_2_chunk_two_Partitions_timeCustom_series_1_idx" on _timescaledb_internal._hyper_1_2_chunk
Output: _hyper_1_2_chunk."timeCustom", _hyper_1_2_chunk.device_id, _hyper_1_2_chunk.series_0, _hyper_1_2_chunk.series_1, _hyper_1_2_chunk.series_2, _hyper_1_2_chunk.series_bool
Index Cond: (_hyper_1_2_chunk.series_1 > '1'::double precision)
-> Merge Append
Sort Key: _hyper_1_4_chunk."timeCustom" DESC NULLS LAST
-> Index Scan using "_hyper_1_4_chunk_two_Partitions_timeCustom_series_1_idx" on _timescaledb_internal._hyper_1_4_chunk
Output: _hyper_1_4_chunk."timeCustom", _hyper_1_4_chunk.device_id, _hyper_1_4_chunk.series_0, _hyper_1_4_chunk.series_1, _hyper_1_4_chunk.series_2, _hyper_1_4_chunk.series_bool
Index Cond: (_hyper_1_4_chunk.series_1 > '1'::double precision)
-> Index Scan using "_hyper_1_1_chunk_two_Partitions_timeCustom_series_1_idx" on _timescaledb_internal._hyper_1_1_chunk
Output: _hyper_1_1_chunk."timeCustom", _hyper_1_1_chunk.device_id, _hyper_1_1_chunk.series_0, _hyper_1_1_chunk.series_1, _hyper_1_1_chunk.series_2, _hyper_1_1_chunk.series_bool
Index Cond: (_hyper_1_1_chunk.series_1 > '1'::double precision)
(21 rows)
--note that without time transform things work too
EXPLAIN (verbose ON, costs off)SELECT "timeCustom" t, min(series_0) FROM PUBLIC."two_Partitions" GROUP BY t ORDER BY t DESC NULLS LAST limit 2;
QUERY PLAN
-----------------------------------------------------------------------------------------------------------------------------------------------
Limit
Output: "two_Partitions"."timeCustom", (min("two_Partitions".series_0))
-> GroupAggregate
Output: "two_Partitions"."timeCustom", min("two_Partitions".series_0)
Group Key: "two_Partitions"."timeCustom"
-> Custom Scan (ChunkAppend) on public."two_Partitions"
Output: "two_Partitions"."timeCustom", "two_Partitions".series_0
Order: "two_Partitions"."timeCustom" DESC NULLS LAST
Startup Exclusion: false
Runtime Exclusion: false
-> Index Scan using "_hyper_1_3_chunk_two_Partitions_timeCustom_device_id_idx" on _timescaledb_internal._hyper_1_3_chunk
Output: _hyper_1_3_chunk."timeCustom", _hyper_1_3_chunk.series_0
-> Index Scan using "_hyper_1_2_chunk_two_Partitions_timeCustom_device_id_idx" on _timescaledb_internal._hyper_1_2_chunk
Output: _hyper_1_2_chunk."timeCustom", _hyper_1_2_chunk.series_0
-> Merge Append
Sort Key: _hyper_1_4_chunk."timeCustom" DESC NULLS LAST
-> Index Scan using "_hyper_1_4_chunk_two_Partitions_timeCustom_device_id_idx" on _timescaledb_internal._hyper_1_4_chunk
Output: _hyper_1_4_chunk."timeCustom", _hyper_1_4_chunk.series_0
-> Index Scan using "_hyper_1_1_chunk_two_Partitions_timeCustom_device_id_idx" on _timescaledb_internal._hyper_1_1_chunk
Output: _hyper_1_1_chunk."timeCustom", _hyper_1_1_chunk.series_0
(20 rows)
--The query should still use the index on timeCustom, even though the GROUP BY/ORDER BY is on the transformed time 't'.
--However, current query plans show that it does not.
EXPLAIN (verbose ON, costs off)SELECT "timeCustom"/10 t, min(series_0) FROM PUBLIC."two_Partitions" GROUP BY t ORDER BY t DESC NULLS LAST limit 2;
QUERY PLAN
--------------------------------------------------------------------------------------------------
Limit
Output: ((_hyper_1_1_chunk."timeCustom" / 10)), (min(_hyper_1_1_chunk.series_0))
-> Sort
Output: ((_hyper_1_1_chunk."timeCustom" / 10)), (min(_hyper_1_1_chunk.series_0))
Sort Key: ((_hyper_1_1_chunk."timeCustom" / 10)) DESC NULLS LAST
-> HashAggregate
Output: ((_hyper_1_1_chunk."timeCustom" / 10)), min(_hyper_1_1_chunk.series_0)
Group Key: (_hyper_1_1_chunk."timeCustom" / 10)
-> Result
Output: (_hyper_1_1_chunk."timeCustom" / 10), _hyper_1_1_chunk.series_0
-> Append
-> Seq Scan on _timescaledb_internal._hyper_1_1_chunk
Output: _hyper_1_1_chunk."timeCustom", _hyper_1_1_chunk.series_0
-> Seq Scan on _timescaledb_internal._hyper_1_2_chunk
Output: _hyper_1_2_chunk."timeCustom", _hyper_1_2_chunk.series_0
-> Seq Scan on _timescaledb_internal._hyper_1_3_chunk
Output: _hyper_1_3_chunk."timeCustom", _hyper_1_3_chunk.series_0
-> Seq Scan on _timescaledb_internal._hyper_1_4_chunk
Output: _hyper_1_4_chunk."timeCustom", _hyper_1_4_chunk.series_0
(19 rows)
EXPLAIN (verbose ON, costs off)SELECT "timeCustom"%10 t, min(series_0) FROM PUBLIC."two_Partitions" GROUP BY t ORDER BY t DESC NULLS LAST limit 2;
QUERY PLAN
--------------------------------------------------------------------------------------------------------
Limit
Output: ((_hyper_1_1_chunk."timeCustom" % '10'::bigint)), (min(_hyper_1_1_chunk.series_0))
-> Sort
Output: ((_hyper_1_1_chunk."timeCustom" % '10'::bigint)), (min(_hyper_1_1_chunk.series_0))
Sort Key: ((_hyper_1_1_chunk."timeCustom" % '10'::bigint)) DESC NULLS LAST
-> HashAggregate
Output: ((_hyper_1_1_chunk."timeCustom" % '10'::bigint)), min(_hyper_1_1_chunk.series_0)
Group Key: (_hyper_1_1_chunk."timeCustom" % '10'::bigint)
-> Result
Output: (_hyper_1_1_chunk."timeCustom" % '10'::bigint), _hyper_1_1_chunk.series_0
-> Append
-> Seq Scan on _timescaledb_internal._hyper_1_1_chunk
Output: _hyper_1_1_chunk."timeCustom", _hyper_1_1_chunk.series_0
-> Seq Scan on _timescaledb_internal._hyper_1_2_chunk
Output: _hyper_1_2_chunk."timeCustom", _hyper_1_2_chunk.series_0
-> Seq Scan on _timescaledb_internal._hyper_1_3_chunk
Output: _hyper_1_3_chunk."timeCustom", _hyper_1_3_chunk.series_0
-> Seq Scan on _timescaledb_internal._hyper_1_4_chunk
Output: _hyper_1_4_chunk."timeCustom", _hyper_1_4_chunk.series_0
(19 rows)

View File

@ -2160,6 +2160,7 @@ rollback to settings;
savepoint settings;
set local max_parallel_workers_per_gather = 0;
set local work_mem = '128kB';
set local enable_mergejoin to false;
:PREFIX
select count(*) from simple r join extremely_skewed s using (id);
select count(*) from simple r join extremely_skewed s using (id);