INSERT .. SELECT on distributed hypertable fails on PG15

INSERT .. SELECT query containing distributed hypertables generates plan
with DataNodeCopy node which is not supported. Issue is in function
tsl_create_distributed_insert_path() where we decide if we should
generate DataNodeCopy or DataNodeDispatch node based on the kind of
query. In PG15 for INSERT .. SELECT query timescaledb planner generates
DataNodeCopy as rte->subquery is set to NULL. This is because of a commit
in PG15 where rte->subquery is set to NULL as part of a fix.

This patch checks if SELECT subquery has distributed hypertables or not
by looking into root->parse->jointree which represents subquery.

Fixes #4983
This commit is contained in:
Bharathy 2022-11-17 18:44:09 +05:30
parent 1e3200be7d
commit bfa641a81c
11 changed files with 921 additions and 185 deletions

View File

@ -153,9 +153,9 @@ m["include"].append(
"snapshot": "snapshot", "snapshot": "snapshot",
"tsdb_build_args": "-DASSERTIONS=ON -DREQUIRE_ALL_TESTS=ON -DEXPERIMENTAL=ON", "tsdb_build_args": "-DASSERTIONS=ON -DREQUIRE_ALL_TESTS=ON -DEXPERIMENTAL=ON",
# below tests are tracked as part of #4838 # below tests are tracked as part of #4838
"installcheck_args": "SKIPS='003_connections_privs 001_simple_multinode 004_multinode_rdwr_1pc dist_hypertable-15 bgw_custom cagg_dump dist_move_chunk' " "installcheck_args": "SKIPS='003_connections_privs 001_simple_multinode 004_multinode_rdwr_1pc bgw_custom cagg_dump dist_move_chunk' "
# below tests are tracked as part of #4835 # below tests are tracked as part of #4835
"IGNORES='telemetry_stats dist_query dist_partial_agg plan_hashagg partialize_finalize dist_fetcher_type dist_remote_error jit-15 " "IGNORES='telemetry_stats dist_query dist_partial_agg plan_hashagg partialize_finalize dist_fetcher_type "
# below tests are tracked as part of #4837 # below tests are tracked as part of #4837
"remote_txn'", "remote_txn'",
} }

View File

@ -294,13 +294,15 @@ BEGIN;
:PREFIX SELECT time_bucket('1 minute', time, INTERVAL '30 seconds') t, avg(series_0), min(series_1), trunc(avg(series_2)::numeric,5) :PREFIX SELECT time_bucket('1 minute', time, INTERVAL '30 seconds') t, avg(series_0), min(series_1), trunc(avg(series_2)::numeric,5)
FROM hyper_1 GROUP BY t ORDER BY t DESC limit 2; FROM hyper_1 GROUP BY t ORDER BY t DESC limit 2;
QUERY PLAN QUERY PLAN
------------------------------------------------------------------------------------------------------------------------------------ ---------------------------------------------------------------------------------------------------------
Limit Limit
-> GroupAggregate -> GroupAggregate
Group Key: ((time_bucket('@ 1 min'::interval, (_hyper_1_1_chunk."time" - '@ 30 secs'::interval)) + '@ 30 secs'::interval)) Group Key: (time_bucket('@ 1 min'::interval, hyper_1."time", '@ 30 secs'::interval))
-> Result -> Result
-> Custom Scan (ChunkAppend) on hyper_1
Order: time_bucket('@ 1 min'::interval, hyper_1."time", '@ 30 secs'::interval) DESC
-> Index Scan using _hyper_1_1_chunk_time_plain on _hyper_1_1_chunk -> Index Scan using _hyper_1_1_chunk_time_plain on _hyper_1_1_chunk
(5 rows) (7 rows)
:PREFIX SELECT time_bucket('1 minute', time - INTERVAL '30 seconds') t, avg(series_0), min(series_1), trunc(avg(series_2)::numeric,5) :PREFIX SELECT time_bucket('1 minute', time - INTERVAL '30 seconds') t, avg(series_0), min(series_1), trunc(avg(series_2)::numeric,5)
FROM hyper_1 GROUP BY t ORDER BY t DESC limit 2; FROM hyper_1 GROUP BY t ORDER BY t DESC limit 2;

View File

@ -315,8 +315,31 @@ tsl_create_distributed_insert_path(PlannerInfo *root, ModifyTablePath *mtpath, I
if (rte->rtekind == RTE_SUBQUERY) if (rte->rtekind == RTE_SUBQUERY)
{ {
distributed = false; distributed = false;
if (distributed_rtes_walker((Node *) rte->subquery, &distributed) && Node *jtnode = (Node *) root->parse->jointree;
distributed) if (IsA(jtnode, FromExpr))
{
FromExpr *f = (FromExpr *) jtnode;
ListCell *l;
foreach (l, f->fromlist)
{
Node *n = (Node *) lfirst(l);
if (IsA(n, RangeTblRef))
{
RangeTblEntry *r =
planner_rt_fetch(((RangeTblRef *) n)->rtindex, root);
switch (r->rtekind)
{
case RTE_RELATION:
distributed_rtes_walker((Node *) r, &distributed);
break;
case RTE_SUBQUERY:
distributed_rtes_walker((Node *) r->subquery,
&distributed);
break;
default:
break;
}
if (distributed)
{ {
copy_possible = false; copy_possible = false;
break; break;
@ -326,6 +349,9 @@ tsl_create_distributed_insert_path(PlannerInfo *root, ModifyTablePath *mtpath, I
} }
} }
} }
}
}
}
if (copy_possible) if (copy_possible)
return data_node_copy_path_create(root, mtpath, hypertable_rti, subplan_index); return data_node_copy_path_create(root, mtpath, hypertable_rti, subplan_index);

View File

@ -610,7 +610,7 @@ SELECT time_bucket('3 hours', time) AS time, device, avg(temp) AS avg_temp
FROM disttable GROUP BY 1, 2 FROM disttable GROUP BY 1, 2
ORDER BY 1; ORDER BY 1;
QUERY PLAN QUERY PLAN
---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
GroupAggregate GroupAggregate
Output: (time_bucket('@ 3 hours'::interval, disttable."time")), disttable.device, avg(disttable.temp) Output: (time_bucket('@ 3 hours'::interval, disttable."time")), disttable.device, avg(disttable.temp)
Group Key: (time_bucket('@ 3 hours'::interval, disttable."time")), disttable.device Group Key: (time_bucket('@ 3 hours'::interval, disttable."time")), disttable.device
@ -618,22 +618,28 @@ ORDER BY 1;
Output: (time_bucket('@ 3 hours'::interval, disttable."time")), disttable.device, disttable.temp Output: (time_bucket('@ 3 hours'::interval, disttable."time")), disttable.device, disttable.temp
-> Merge Append -> Merge Append
Sort Key: (time_bucket('@ 3 hours'::interval, disttable_1."time")), disttable_1.device Sort Key: (time_bucket('@ 3 hours'::interval, disttable_1."time")), disttable_1.device
-> Custom Scan (DataNodeScan) on public.disttable disttable_1 -> Result
Output: time_bucket('@ 3 hours'::interval, disttable_1."time"), disttable_1.device, disttable_1.temp Output: time_bucket('@ 3 hours'::interval, disttable_1."time"), disttable_1.device, disttable_1.temp
-> Custom Scan (DataNodeScan) on public.disttable disttable_1
Output: disttable_1."time", disttable_1.device, disttable_1.temp
Data node: db_dist_hypertable_1 Data node: db_dist_hypertable_1
Chunks: _dist_hyper_1_1_chunk, _dist_hyper_1_4_chunk Chunks: _dist_hyper_1_1_chunk, _dist_hyper_1_4_chunk
Remote SQL: SELECT "time", device, temp FROM public.disttable WHERE _timescaledb_internal.chunks_in(public.disttable.*, ARRAY[1, 2]) ORDER BY public.time_bucket('03:00:00'::interval, "time") ASC NULLS LAST, device ASC NULLS LAST Remote SQL: SELECT "time", device, temp FROM public.disttable WHERE _timescaledb_internal.chunks_in(public.disttable.*, ARRAY[1, 2]) ORDER BY public.time_bucket('03:00:00'::interval, "time") ASC NULLS LAST, device ASC NULLS LAST
-> Custom Scan (DataNodeScan) on public.disttable disttable_2 -> Result
Output: time_bucket('@ 3 hours'::interval, disttable_2."time"), disttable_2.device, disttable_2.temp Output: time_bucket('@ 3 hours'::interval, disttable_2."time"), disttable_2.device, disttable_2.temp
-> Custom Scan (DataNodeScan) on public.disttable disttable_2
Output: disttable_2."time", disttable_2.device, disttable_2.temp
Data node: db_dist_hypertable_2 Data node: db_dist_hypertable_2
Chunks: _dist_hyper_1_3_chunk, _dist_hyper_1_5_chunk Chunks: _dist_hyper_1_3_chunk, _dist_hyper_1_5_chunk
Remote SQL: SELECT "time", device, temp FROM public.disttable WHERE _timescaledb_internal.chunks_in(public.disttable.*, ARRAY[1, 2]) ORDER BY public.time_bucket('03:00:00'::interval, "time") ASC NULLS LAST, device ASC NULLS LAST Remote SQL: SELECT "time", device, temp FROM public.disttable WHERE _timescaledb_internal.chunks_in(public.disttable.*, ARRAY[1, 2]) ORDER BY public.time_bucket('03:00:00'::interval, "time") ASC NULLS LAST, device ASC NULLS LAST
-> Custom Scan (DataNodeScan) on public.disttable disttable_3 -> Result
Output: time_bucket('@ 3 hours'::interval, disttable_3."time"), disttable_3.device, disttable_3.temp Output: time_bucket('@ 3 hours'::interval, disttable_3."time"), disttable_3.device, disttable_3.temp
-> Custom Scan (DataNodeScan) on public.disttable disttable_3
Output: disttable_3."time", disttable_3.device, disttable_3.temp
Data node: db_dist_hypertable_3 Data node: db_dist_hypertable_3
Chunks: _dist_hyper_1_2_chunk, _dist_hyper_1_6_chunk Chunks: _dist_hyper_1_2_chunk, _dist_hyper_1_6_chunk
Remote SQL: SELECT "time", device, temp FROM public.disttable WHERE _timescaledb_internal.chunks_in(public.disttable.*, ARRAY[1, 2]) ORDER BY public.time_bucket('03:00:00'::interval, "time") ASC NULLS LAST, device ASC NULLS LAST Remote SQL: SELECT "time", device, temp FROM public.disttable WHERE _timescaledb_internal.chunks_in(public.disttable.*, ARRAY[1, 2]) ORDER BY public.time_bucket('03:00:00'::interval, "time") ASC NULLS LAST, device ASC NULLS LAST
(22 rows) (28 rows)
-- Execute some queries on the frontend and return the results -- Execute some queries on the frontend and return the results
SELECT * FROM disttable; SELECT * FROM disttable;
@ -2143,13 +2149,13 @@ SELECT * FROM _timescaledb_catalog.hypertable;
(3 rows) (3 rows)
SELECT * FROM _timescaledb_catalog.dimension; SELECT * FROM _timescaledb_catalog.dimension;
id | hypertable_id | column_name | column_type | aligned | num_slices | partitioning_func_schema | partitioning_func | interval_length | integer_now_func_schema | integer_now_func id | hypertable_id | column_name | column_type | aligned | num_slices | partitioning_func_schema | partitioning_func | interval_length | compress_interval_length | integer_now_func_schema | integer_now_func
----+---------------+------------------+--------------------------+---------+------------+--------------------------+--------------------+-----------------+-------------------------+------------------ ----+---------------+------------------+--------------------------+---------+------------+--------------------------+--------------------+-----------------+--------------------------+-------------------------+------------------
1 | 1 | time | timestamp with time zone | t | | | | 604800000000 | | 1 | 1 | time | timestamp with time zone | t | | | | 604800000000 | | |
2 | 1 | device | integer | f | 3 | _timescaledb_internal | get_partition_hash | | | 2 | 1 | device | integer | f | 3 | _timescaledb_internal | get_partition_hash | | | |
3 | 2 | time | timestamp with time zone | t | | | | 604800000000 | | 3 | 2 | time | timestamp with time zone | t | | | | 604800000000 | | |
5 | 4 | time Col %#^#@$# | timestamp with time zone | t | | | | 604800000000 | | 5 | 4 | time Col %#^#@$# | timestamp with time zone | t | | | | 604800000000 | | |
6 | 4 | __region | text | f | 4 | _timescaledb_internal | get_partition_hash | | | 6 | 4 | __region | text | f | 4 | _timescaledb_internal | get_partition_hash | | | |
(5 rows) (5 rows)
SELECT * FROM test.show_triggers('"Table\\Schema"."Param_Table"'); SELECT * FROM test.show_triggers('"Table\\Schema"."Param_Table"');
@ -2178,13 +2184,13 @@ id|schema_name |table_name |associated_schema_name|associated_table_prefix|
NOTICE: [db_dist_hypertable_1]: NOTICE: [db_dist_hypertable_1]:
SELECT * FROM _timescaledb_catalog.dimension SELECT * FROM _timescaledb_catalog.dimension
NOTICE: [db_dist_hypertable_1]: NOTICE: [db_dist_hypertable_1]:
id|hypertable_id|column_name |column_type |aligned|num_slices|partitioning_func_schema|partitioning_func |interval_length|integer_now_func_schema|integer_now_func id|hypertable_id|column_name |column_type |aligned|num_slices|partitioning_func_schema|partitioning_func |interval_length|compress_interval_length|integer_now_func_schema|integer_now_func
--+-------------+----------------+------------------------+-------+----------+------------------------+------------------+---------------+-----------------------+---------------- --+-------------+----------------+------------------------+-------+----------+------------------------+------------------+---------------+------------------------+-----------------------+----------------
1| 1|time |timestamp with time zone|t | | | | 604800000000| | 1| 1|time |timestamp with time zone|t | | | | 604800000000| | |
2| 1|device |integer |f | 3|_timescaledb_internal |get_partition_hash| | | 2| 1|device |integer |f | 3|_timescaledb_internal |get_partition_hash| | | |
3| 2|time |timestamp with time zone|t | | | | 604800000000| | 3| 2|time |timestamp with time zone|t | | | | 604800000000| | |
4| 3|time Col %#^#@$#|timestamp with time zone|t | | | | 604800000000| | 4| 3|time Col %#^#@$#|timestamp with time zone|t | | | | 604800000000| | |
5| 3|__region |text |f | 4|_timescaledb_internal |get_partition_hash| | | 5| 3|__region |text |f | 4|_timescaledb_internal |get_partition_hash| | | |
(5 rows) (5 rows)
@ -2212,13 +2218,13 @@ id|schema_name |table_name |associated_schema_name|associated_table_prefix|
NOTICE: [db_dist_hypertable_2]: NOTICE: [db_dist_hypertable_2]:
SELECT * FROM _timescaledb_catalog.dimension SELECT * FROM _timescaledb_catalog.dimension
NOTICE: [db_dist_hypertable_2]: NOTICE: [db_dist_hypertable_2]:
id|hypertable_id|column_name |column_type |aligned|num_slices|partitioning_func_schema|partitioning_func |interval_length|integer_now_func_schema|integer_now_func id|hypertable_id|column_name |column_type |aligned|num_slices|partitioning_func_schema|partitioning_func |interval_length|compress_interval_length|integer_now_func_schema|integer_now_func
--+-------------+----------------+------------------------+-------+----------+------------------------+------------------+---------------+-----------------------+---------------- --+-------------+----------------+------------------------+-------+----------+------------------------+------------------+---------------+------------------------+-----------------------+----------------
1| 1|time |timestamp with time zone|t | | | | 604800000000| | 1| 1|time |timestamp with time zone|t | | | | 604800000000| | |
2| 1|device |integer |f | 3|_timescaledb_internal |get_partition_hash| | | 2| 1|device |integer |f | 3|_timescaledb_internal |get_partition_hash| | | |
3| 2|time |timestamp with time zone|t | | | | 604800000000| | 3| 2|time |timestamp with time zone|t | | | | 604800000000| | |
6| 4|time Col %#^#@$#|timestamp with time zone|t | | | | 604800000000| | 6| 4|time Col %#^#@$#|timestamp with time zone|t | | | | 604800000000| | |
7| 4|__region |text |f | 4|_timescaledb_internal |get_partition_hash| | | 7| 4|__region |text |f | 4|_timescaledb_internal |get_partition_hash| | | |
(5 rows) (5 rows)
@ -2246,13 +2252,13 @@ id|schema_name |table_name |associated_schema_name|associated_table_prefix|
NOTICE: [db_dist_hypertable_3]: NOTICE: [db_dist_hypertable_3]:
SELECT * FROM _timescaledb_catalog.dimension SELECT * FROM _timescaledb_catalog.dimension
NOTICE: [db_dist_hypertable_3]: NOTICE: [db_dist_hypertable_3]:
id|hypertable_id|column_name |column_type |aligned|num_slices|partitioning_func_schema|partitioning_func |interval_length|integer_now_func_schema|integer_now_func id|hypertable_id|column_name |column_type |aligned|num_slices|partitioning_func_schema|partitioning_func |interval_length|compress_interval_length|integer_now_func_schema|integer_now_func
--+-------------+----------------+------------------------+-------+----------+------------------------+------------------+---------------+-----------------------+---------------- --+-------------+----------------+------------------------+-------+----------+------------------------+------------------+---------------+------------------------+-----------------------+----------------
1| 1|time |timestamp with time zone|t | | | | 604800000000| | 1| 1|time |timestamp with time zone|t | | | | 604800000000| | |
2| 1|device |integer |f | 3|_timescaledb_internal |get_partition_hash| | | 2| 1|device |integer |f | 3|_timescaledb_internal |get_partition_hash| | | |
3| 2|time |timestamp with time zone|t | | | | 604800000000| | 3| 2|time |timestamp with time zone|t | | | | 604800000000| | |
4| 3|time Col %#^#@$#|timestamp with time zone|t | | | | 604800000000| | 4| 3|time Col %#^#@$#|timestamp with time zone|t | | | | 604800000000| | |
5| 3|__region |text |f | 4|_timescaledb_internal |get_partition_hash| | | 5| 3|__region |text |f | 4|_timescaledb_internal |get_partition_hash| | | |
(5 rows) (5 rows)
@ -2374,17 +2380,17 @@ SELECT * FROM dimented_table ORDER BY time;
(1 row) (1 row)
SELECT * FROM _timescaledb_catalog.dimension; SELECT * FROM _timescaledb_catalog.dimension;
id | hypertable_id | column_name | column_type | aligned | num_slices | partitioning_func_schema | partitioning_func | interval_length | integer_now_func_schema | integer_now_func id | hypertable_id | column_name | column_type | aligned | num_slices | partitioning_func_schema | partitioning_func | interval_length | compress_interval_length | integer_now_func_schema | integer_now_func
----+---------------+------------------+--------------------------+---------+------------+--------------------------+-----------------------+-----------------+-------------------------+------------------ ----+---------------+------------------+--------------------------+---------+------------+--------------------------+-----------------------+-----------------+--------------------------+-------------------------+------------------
1 | 1 | time | timestamp with time zone | t | | | | 604800000000 | | 1 | 1 | time | timestamp with time zone | t | | | | 604800000000 | | |
2 | 1 | device | integer | f | 3 | _timescaledb_internal | get_partition_hash | | | 2 | 1 | device | integer | f | 3 | _timescaledb_internal | get_partition_hash | | | |
3 | 2 | time | timestamp with time zone | t | | | | 604800000000 | | 3 | 2 | time | timestamp with time zone | t | | | | 604800000000 | | |
5 | 4 | time Col %#^#@$# | timestamp with time zone | t | | | | 604800000000 | | 5 | 4 | time Col %#^#@$# | timestamp with time zone | t | | | | 604800000000 | | |
6 | 4 | __region | text | f | 2 | _timescaledb_internal | get_partition_hash | | | 6 | 4 | __region | text | f | 2 | _timescaledb_internal | get_partition_hash | | | |
7 | 5 | time | timestamp with time zone | t | | | | 604800000000 | | 7 | 5 | time | timestamp with time zone | t | | | | 604800000000 | | |
8 | 5 | column1 | integer | f | 4 | _timescaledb_internal | get_partition_hash | | | 8 | 5 | column1 | integer | f | 4 | _timescaledb_internal | get_partition_hash | | | |
9 | 5 | column2 | timestamp with time zone | t | | | | 604800000000 | | 9 | 5 | column2 | timestamp with time zone | t | | | | 604800000000 | | |
10 | 5 | column3 | integer | f | 4 | _timescaledb_internal | get_partition_for_key | | | 10 | 5 | column3 | integer | f | 4 | _timescaledb_internal | get_partition_for_key | | | |
(9 rows) (9 rows)
SELECT * FROM attach_data_node(:'DATA_NODE_2', 'dimented_table'); SELECT * FROM attach_data_node(:'DATA_NODE_2', 'dimented_table');
@ -2394,17 +2400,17 @@ SELECT * FROM attach_data_node(:'DATA_NODE_2', 'dimented_table');
(1 row) (1 row)
SELECT * FROM _timescaledb_catalog.dimension; SELECT * FROM _timescaledb_catalog.dimension;
id | hypertable_id | column_name | column_type | aligned | num_slices | partitioning_func_schema | partitioning_func | interval_length | integer_now_func_schema | integer_now_func id | hypertable_id | column_name | column_type | aligned | num_slices | partitioning_func_schema | partitioning_func | interval_length | compress_interval_length | integer_now_func_schema | integer_now_func
----+---------------+------------------+--------------------------+---------+------------+--------------------------+-----------------------+-----------------+-------------------------+------------------ ----+---------------+------------------+--------------------------+---------+------------+--------------------------+-----------------------+-----------------+--------------------------+-------------------------+------------------
1 | 1 | time | timestamp with time zone | t | | | | 604800000000 | | 1 | 1 | time | timestamp with time zone | t | | | | 604800000000 | | |
2 | 1 | device | integer | f | 3 | _timescaledb_internal | get_partition_hash | | | 2 | 1 | device | integer | f | 3 | _timescaledb_internal | get_partition_hash | | | |
3 | 2 | time | timestamp with time zone | t | | | | 604800000000 | | 3 | 2 | time | timestamp with time zone | t | | | | 604800000000 | | |
5 | 4 | time Col %#^#@$# | timestamp with time zone | t | | | | 604800000000 | | 5 | 4 | time Col %#^#@$# | timestamp with time zone | t | | | | 604800000000 | | |
6 | 4 | __region | text | f | 2 | _timescaledb_internal | get_partition_hash | | | 6 | 4 | __region | text | f | 2 | _timescaledb_internal | get_partition_hash | | | |
7 | 5 | time | timestamp with time zone | t | | | | 604800000000 | | 7 | 5 | time | timestamp with time zone | t | | | | 604800000000 | | |
8 | 5 | column1 | integer | f | 4 | _timescaledb_internal | get_partition_hash | | | 8 | 5 | column1 | integer | f | 4 | _timescaledb_internal | get_partition_hash | | | |
9 | 5 | column2 | timestamp with time zone | t | | | | 604800000000 | | 9 | 5 | column2 | timestamp with time zone | t | | | | 604800000000 | | |
10 | 5 | column3 | integer | f | 4 | _timescaledb_internal | get_partition_for_key | | | 10 | 5 | column3 | integer | f | 4 | _timescaledb_internal | get_partition_for_key | | | |
(9 rows) (9 rows)
-- ensure data node has new dimensions -- ensure data node has new dimensions
@ -2414,47 +2420,47 @@ $$);
NOTICE: [db_dist_hypertable_1]: NOTICE: [db_dist_hypertable_1]:
SELECT * FROM _timescaledb_catalog.dimension SELECT * FROM _timescaledb_catalog.dimension
NOTICE: [db_dist_hypertable_1]: NOTICE: [db_dist_hypertable_1]:
id|hypertable_id|column_name |column_type |aligned|num_slices|partitioning_func_schema|partitioning_func |interval_length|integer_now_func_schema|integer_now_func id|hypertable_id|column_name |column_type |aligned|num_slices|partitioning_func_schema|partitioning_func |interval_length|compress_interval_length|integer_now_func_schema|integer_now_func
--+-------------+----------------+------------------------+-------+----------+------------------------+---------------------+---------------+-----------------------+---------------- --+-------------+----------------+------------------------+-------+----------+------------------------+---------------------+---------------+------------------------+-----------------------+----------------
1| 1|time |timestamp with time zone|t | | | | 604800000000| | 1| 1|time |timestamp with time zone|t | | | | 604800000000| | |
2| 1|device |integer |f | 3|_timescaledb_internal |get_partition_hash | | | 2| 1|device |integer |f | 3|_timescaledb_internal |get_partition_hash | | | |
3| 2|time |timestamp with time zone|t | | | | 604800000000| | 3| 2|time |timestamp with time zone|t | | | | 604800000000| | |
4| 3|time Col %#^#@$#|timestamp with time zone|t | | | | 604800000000| | 4| 3|time Col %#^#@$#|timestamp with time zone|t | | | | 604800000000| | |
5| 3|__region |text |f | 4|_timescaledb_internal |get_partition_hash | | | 5| 3|__region |text |f | 4|_timescaledb_internal |get_partition_hash | | | |
6| 4|time |timestamp with time zone|t | | | | 604800000000| | 6| 4|time |timestamp with time zone|t | | | | 604800000000| | |
7| 4|column1 |integer |f | 4|_timescaledb_internal |get_partition_hash | | | 7| 4|column1 |integer |f | 4|_timescaledb_internal |get_partition_hash | | | |
8| 4|column2 |timestamp with time zone|t | | | | 604800000000| | 8| 4|column2 |timestamp with time zone|t | | | | 604800000000| | |
9| 4|column3 |integer |f | 4|_timescaledb_internal |get_partition_for_key| | | 9| 4|column3 |integer |f | 4|_timescaledb_internal |get_partition_for_key| | | |
(9 rows) (9 rows)
NOTICE: [db_dist_hypertable_2]: NOTICE: [db_dist_hypertable_2]:
SELECT * FROM _timescaledb_catalog.dimension SELECT * FROM _timescaledb_catalog.dimension
NOTICE: [db_dist_hypertable_2]: NOTICE: [db_dist_hypertable_2]:
id|hypertable_id|column_name |column_type |aligned|num_slices|partitioning_func_schema|partitioning_func |interval_length|integer_now_func_schema|integer_now_func id|hypertable_id|column_name |column_type |aligned|num_slices|partitioning_func_schema|partitioning_func |interval_length|compress_interval_length|integer_now_func_schema|integer_now_func
--+-------------+----------------+------------------------+-------+----------+------------------------+---------------------+---------------+-----------------------+---------------- --+-------------+----------------+------------------------+-------+----------+------------------------+---------------------+---------------+------------------------+-----------------------+----------------
1| 1|time |timestamp with time zone|t | | | | 604800000000| | 1| 1|time |timestamp with time zone|t | | | | 604800000000| | |
2| 1|device |integer |f | 3|_timescaledb_internal |get_partition_hash | | | 2| 1|device |integer |f | 3|_timescaledb_internal |get_partition_hash | | | |
3| 2|time |timestamp with time zone|t | | | | 604800000000| | 3| 2|time |timestamp with time zone|t | | | | 604800000000| | |
6| 4|time Col %#^#@$#|timestamp with time zone|t | | | | 604800000000| | 6| 4|time Col %#^#@$#|timestamp with time zone|t | | | | 604800000000| | |
7| 4|__region |text |f | 4|_timescaledb_internal |get_partition_hash | | | 7| 4|__region |text |f | 4|_timescaledb_internal |get_partition_hash | | | |
8| 5|time |timestamp with time zone|t | | | | 604800000000| | 8| 5|time |timestamp with time zone|t | | | | 604800000000| | |
9| 5|column1 |integer |f | 4|_timescaledb_internal |get_partition_hash | | | 9| 5|column1 |integer |f | 4|_timescaledb_internal |get_partition_hash | | | |
10| 5|column2 |timestamp with time zone|t | | | | 604800000000| | 10| 5|column2 |timestamp with time zone|t | | | | 604800000000| | |
11| 5|column3 |integer |f | 4|_timescaledb_internal |get_partition_for_key| | | 11| 5|column3 |integer |f | 4|_timescaledb_internal |get_partition_for_key| | | |
(9 rows) (9 rows)
NOTICE: [db_dist_hypertable_3]: NOTICE: [db_dist_hypertable_3]:
SELECT * FROM _timescaledb_catalog.dimension SELECT * FROM _timescaledb_catalog.dimension
NOTICE: [db_dist_hypertable_3]: NOTICE: [db_dist_hypertable_3]:
id|hypertable_id|column_name |column_type |aligned|num_slices|partitioning_func_schema|partitioning_func |interval_length|integer_now_func_schema|integer_now_func id|hypertable_id|column_name |column_type |aligned|num_slices|partitioning_func_schema|partitioning_func |interval_length|compress_interval_length|integer_now_func_schema|integer_now_func
--+-------------+----------------+------------------------+-------+----------+------------------------+------------------+---------------+-----------------------+---------------- --+-------------+----------------+------------------------+-------+----------+------------------------+------------------+---------------+------------------------+-----------------------+----------------
1| 1|time |timestamp with time zone|t | | | | 604800000000| | 1| 1|time |timestamp with time zone|t | | | | 604800000000| | |
2| 1|device |integer |f | 3|_timescaledb_internal |get_partition_hash| | | 2| 1|device |integer |f | 3|_timescaledb_internal |get_partition_hash| | | |
3| 2|time |timestamp with time zone|t | | | | 604800000000| | 3| 2|time |timestamp with time zone|t | | | | 604800000000| | |
4| 3|time Col %#^#@$#|timestamp with time zone|t | | | | 604800000000| | 4| 3|time Col %#^#@$#|timestamp with time zone|t | | | | 604800000000| | |
5| 3|__region |text |f | 4|_timescaledb_internal |get_partition_hash| | | 5| 3|__region |text |f | 4|_timescaledb_internal |get_partition_hash| | | |
(5 rows) (5 rows)
@ -3427,9 +3433,9 @@ NOTICE: [db_dist_hypertable_1]:
SELECT d.* FROM _timescaledb_catalog.hypertable h, _timescaledb_catalog.dimension d SELECT d.* FROM _timescaledb_catalog.hypertable h, _timescaledb_catalog.dimension d
WHERE h.id = d.hypertable_id AND h.table_name = 'disttable' WHERE h.id = d.hypertable_id AND h.table_name = 'disttable'
NOTICE: [db_dist_hypertable_1]: NOTICE: [db_dist_hypertable_1]:
id|hypertable_id|column_name|column_type|aligned|num_slices|partitioning_func_schema|partitioning_func|interval_length|integer_now_func_schema|integer_now_func id|hypertable_id|column_name|column_type|aligned|num_slices|partitioning_func_schema|partitioning_func|interval_length|compress_interval_length|integer_now_func_schema|integer_now_func
--+-------------+-----------+-----------+-------+----------+------------------------+-----------------+---------------+-----------------------+---------------- --+-------------+-----------+-----------+-------+----------+------------------------+-----------------+---------------+------------------------+-----------------------+----------------
20| 11|time |bigint |t | | | | 1000000| | 20| 11|time |bigint |t | | | | 1000000| | |
(1 row) (1 row)
@ -3437,9 +3443,9 @@ NOTICE: [db_dist_hypertable_2]:
SELECT d.* FROM _timescaledb_catalog.hypertable h, _timescaledb_catalog.dimension d SELECT d.* FROM _timescaledb_catalog.hypertable h, _timescaledb_catalog.dimension d
WHERE h.id = d.hypertable_id AND h.table_name = 'disttable' WHERE h.id = d.hypertable_id AND h.table_name = 'disttable'
NOTICE: [db_dist_hypertable_2]: NOTICE: [db_dist_hypertable_2]:
id|hypertable_id|column_name|column_type|aligned|num_slices|partitioning_func_schema|partitioning_func|interval_length|integer_now_func_schema|integer_now_func id|hypertable_id|column_name|column_type|aligned|num_slices|partitioning_func_schema|partitioning_func|interval_length|compress_interval_length|integer_now_func_schema|integer_now_func
--+-------------+-----------+-----------+-------+----------+------------------------+-----------------+---------------+-----------------------+---------------- --+-------------+-----------+-----------+-------+----------+------------------------+-----------------+---------------+------------------------+-----------------------+----------------
20| 11|time |bigint |t | | | | 1000000| | 20| 11|time |bigint |t | | | | 1000000| | |
(1 row) (1 row)
@ -3447,9 +3453,9 @@ NOTICE: [db_dist_hypertable_3]:
SELECT d.* FROM _timescaledb_catalog.hypertable h, _timescaledb_catalog.dimension d SELECT d.* FROM _timescaledb_catalog.hypertable h, _timescaledb_catalog.dimension d
WHERE h.id = d.hypertable_id AND h.table_name = 'disttable' WHERE h.id = d.hypertable_id AND h.table_name = 'disttable'
NOTICE: [db_dist_hypertable_3]: NOTICE: [db_dist_hypertable_3]:
id|hypertable_id|column_name|column_type|aligned|num_slices|partitioning_func_schema|partitioning_func|interval_length|integer_now_func_schema|integer_now_func id|hypertable_id|column_name|column_type|aligned|num_slices|partitioning_func_schema|partitioning_func|interval_length|compress_interval_length|integer_now_func_schema|integer_now_func
--+-------------+-----------+-----------+-------+----------+------------------------+-----------------+---------------+-----------------------+---------------- --+-------------+-----------+-----------+-------+----------+------------------------+-----------------+---------------+------------------------+-----------------------+----------------
14| 9|time |bigint |t | | | | 1000000| | 14| 9|time |bigint |t | | | | 1000000| | |
(1 row) (1 row)
@ -3477,10 +3483,10 @@ NOTICE: [db_dist_hypertable_1]:
SELECT d.* FROM _timescaledb_catalog.hypertable h, _timescaledb_catalog.dimension d SELECT d.* FROM _timescaledb_catalog.hypertable h, _timescaledb_catalog.dimension d
WHERE h.id = d.hypertable_id AND h.table_name = 'disttable' WHERE h.id = d.hypertable_id AND h.table_name = 'disttable'
NOTICE: [db_dist_hypertable_1]: NOTICE: [db_dist_hypertable_1]:
id|hypertable_id|column_name|column_type|aligned|num_slices|partitioning_func_schema|partitioning_func |interval_length|integer_now_func_schema|integer_now_func id|hypertable_id|column_name|column_type|aligned|num_slices|partitioning_func_schema|partitioning_func |interval_length|compress_interval_length|integer_now_func_schema|integer_now_func
--+-------------+-----------+-----------+-------+----------+------------------------+------------------+---------------+-----------------------+---------------- --+-------------+-----------+-----------+-------+----------+------------------------+------------------+---------------+------------------------+-----------------------+----------------
21| 11|device |integer |f | 1|_timescaledb_internal |get_partition_hash| | | 21| 11|device |integer |f | 1|_timescaledb_internal |get_partition_hash| | | |
20| 11|time |bigint |t | | | | 1000000| | 20| 11|time |bigint |t | | | | 1000000| | |
(2 rows) (2 rows)
@ -3488,10 +3494,10 @@ NOTICE: [db_dist_hypertable_2]:
SELECT d.* FROM _timescaledb_catalog.hypertable h, _timescaledb_catalog.dimension d SELECT d.* FROM _timescaledb_catalog.hypertable h, _timescaledb_catalog.dimension d
WHERE h.id = d.hypertable_id AND h.table_name = 'disttable' WHERE h.id = d.hypertable_id AND h.table_name = 'disttable'
NOTICE: [db_dist_hypertable_2]: NOTICE: [db_dist_hypertable_2]:
id|hypertable_id|column_name|column_type|aligned|num_slices|partitioning_func_schema|partitioning_func |interval_length|integer_now_func_schema|integer_now_func id|hypertable_id|column_name|column_type|aligned|num_slices|partitioning_func_schema|partitioning_func |interval_length|compress_interval_length|integer_now_func_schema|integer_now_func
--+-------------+-----------+-----------+-------+----------+------------------------+------------------+---------------+-----------------------+---------------- --+-------------+-----------+-----------+-------+----------+------------------------+------------------+---------------+------------------------+-----------------------+----------------
21| 11|device |integer |f | 1|_timescaledb_internal |get_partition_hash| | | 21| 11|device |integer |f | 1|_timescaledb_internal |get_partition_hash| | | |
20| 11|time |bigint |t | | | | 1000000| | 20| 11|time |bigint |t | | | | 1000000| | |
(2 rows) (2 rows)
@ -3499,10 +3505,10 @@ NOTICE: [db_dist_hypertable_3]:
SELECT d.* FROM _timescaledb_catalog.hypertable h, _timescaledb_catalog.dimension d SELECT d.* FROM _timescaledb_catalog.hypertable h, _timescaledb_catalog.dimension d
WHERE h.id = d.hypertable_id AND h.table_name = 'disttable' WHERE h.id = d.hypertable_id AND h.table_name = 'disttable'
NOTICE: [db_dist_hypertable_3]: NOTICE: [db_dist_hypertable_3]:
id|hypertable_id|column_name|column_type|aligned|num_slices|partitioning_func_schema|partitioning_func |interval_length|integer_now_func_schema|integer_now_func id|hypertable_id|column_name|column_type|aligned|num_slices|partitioning_func_schema|partitioning_func |interval_length|compress_interval_length|integer_now_func_schema|integer_now_func
--+-------------+-----------+-----------+-------+----------+------------------------+------------------+---------------+-----------------------+---------------- --+-------------+-----------+-----------+-------+----------+------------------------+------------------+---------------+------------------------+-----------------------+----------------
15| 9|device |integer |f | 1|_timescaledb_internal |get_partition_hash| | | 15| 9|device |integer |f | 1|_timescaledb_internal |get_partition_hash| | | |
14| 9|time |bigint |t | | | | 1000000| | 14| 9|time |bigint |t | | | | 1000000| | |
(2 rows) (2 rows)
@ -3543,10 +3549,10 @@ NOTICE: [db_dist_hypertable_1]:
SELECT d.* FROM _timescaledb_catalog.hypertable h, _timescaledb_catalog.dimension d SELECT d.* FROM _timescaledb_catalog.hypertable h, _timescaledb_catalog.dimension d
WHERE h.id = d.hypertable_id AND h.table_name = 'disttable' WHERE h.id = d.hypertable_id AND h.table_name = 'disttable'
NOTICE: [db_dist_hypertable_1]: NOTICE: [db_dist_hypertable_1]:
id|hypertable_id|column_name|column_type|aligned|num_slices|partitioning_func_schema|partitioning_func |interval_length|integer_now_func_schema|integer_now_func id|hypertable_id|column_name|column_type|aligned|num_slices|partitioning_func_schema|partitioning_func |interval_length|compress_interval_length|integer_now_func_schema|integer_now_func
--+-------------+-----------+-----------+-------+----------+------------------------+------------------+---------------+-----------------------+---------------- --+-------------+-----------+-----------+-------+----------+------------------------+------------------+---------------+------------------------+-----------------------+----------------
21| 11|device |integer |f | 3|_timescaledb_internal |get_partition_hash| | | 21| 11|device |integer |f | 3|_timescaledb_internal |get_partition_hash| | | |
20| 11|time |bigint |t | | | | 2000000000|public |dummy_now 20| 11|time |bigint |t | | | | 2000000000| |public |dummy_now
(2 rows) (2 rows)
@ -3554,10 +3560,10 @@ NOTICE: [db_dist_hypertable_2]:
SELECT d.* FROM _timescaledb_catalog.hypertable h, _timescaledb_catalog.dimension d SELECT d.* FROM _timescaledb_catalog.hypertable h, _timescaledb_catalog.dimension d
WHERE h.id = d.hypertable_id AND h.table_name = 'disttable' WHERE h.id = d.hypertable_id AND h.table_name = 'disttable'
NOTICE: [db_dist_hypertable_2]: NOTICE: [db_dist_hypertable_2]:
id|hypertable_id|column_name|column_type|aligned|num_slices|partitioning_func_schema|partitioning_func |interval_length|integer_now_func_schema|integer_now_func id|hypertable_id|column_name|column_type|aligned|num_slices|partitioning_func_schema|partitioning_func |interval_length|compress_interval_length|integer_now_func_schema|integer_now_func
--+-------------+-----------+-----------+-------+----------+------------------------+------------------+---------------+-----------------------+---------------- --+-------------+-----------+-----------+-------+----------+------------------------+------------------+---------------+------------------------+-----------------------+----------------
21| 11|device |integer |f | 3|_timescaledb_internal |get_partition_hash| | | 21| 11|device |integer |f | 3|_timescaledb_internal |get_partition_hash| | | |
20| 11|time |bigint |t | | | | 2000000000|public |dummy_now 20| 11|time |bigint |t | | | | 2000000000| |public |dummy_now
(2 rows) (2 rows)
@ -3565,10 +3571,10 @@ NOTICE: [db_dist_hypertable_3]:
SELECT d.* FROM _timescaledb_catalog.hypertable h, _timescaledb_catalog.dimension d SELECT d.* FROM _timescaledb_catalog.hypertable h, _timescaledb_catalog.dimension d
WHERE h.id = d.hypertable_id AND h.table_name = 'disttable' WHERE h.id = d.hypertable_id AND h.table_name = 'disttable'
NOTICE: [db_dist_hypertable_3]: NOTICE: [db_dist_hypertable_3]:
id|hypertable_id|column_name|column_type|aligned|num_slices|partitioning_func_schema|partitioning_func |interval_length|integer_now_func_schema|integer_now_func id|hypertable_id|column_name|column_type|aligned|num_slices|partitioning_func_schema|partitioning_func |interval_length|compress_interval_length|integer_now_func_schema|integer_now_func
--+-------------+-----------+-----------+-------+----------+------------------------+------------------+---------------+-----------------------+---------------- --+-------------+-----------+-----------+-------+----------+------------------------+------------------+---------------+------------------------+-----------------------+----------------
15| 9|device |integer |f | 3|_timescaledb_internal |get_partition_hash| | | 15| 9|device |integer |f | 3|_timescaledb_internal |get_partition_hash| | | |
14| 9|time |bigint |t | | | | 2000000000|public |dummy_now 14| 9|time |bigint |t | | | | 2000000000| |public |dummy_now
(2 rows) (2 rows)
@ -3876,8 +3882,8 @@ ORDER BY 1;
------------------------------+--------+---------- ------------------------------+--------+----------
Sun Jan 01 07:00:00 2017 PST | 1 | 1.65 Sun Jan 01 07:00:00 2017 PST | 1 | 1.65
Mon Jan 02 04:00:00 2017 PST | 2 | 1.4 Mon Jan 02 04:00:00 2017 PST | 2 | 1.4
Mon Jan 02 07:00:00 2017 PST | 1 | 1.3
Mon Jan 02 07:00:00 2017 PST | 2 | 1.6 Mon Jan 02 07:00:00 2017 PST | 2 | 1.6
Mon Jan 02 07:00:00 2017 PST | 1 | 1.3
Tue Jan 03 01:00:00 2017 PST | 3 | 3 Tue Jan 03 01:00:00 2017 PST | 3 | 3
Sat Jan 13 01:00:00 2018 PST | 2 | 1.4 Sat Jan 13 01:00:00 2018 PST | 2 | 1.4
Sat Jan 13 04:00:00 2018 PST | 2 | 3 Sat Jan 13 04:00:00 2018 PST | 2 | 3
@ -5314,7 +5320,7 @@ EXPLAIN (COSTS OFF)
INSERT INTO disttable (time, device, temp_c) INSERT INTO disttable (time, device, temp_c)
SELECT time, device, temp_c FROM disttable; SELECT time, device, temp_c FROM disttable;
QUERY PLAN QUERY PLAN
----------------------------------------------------------------------------------- -----------------------------------------------------------------------------------------
Custom Scan (HypertableModify) Custom Scan (HypertableModify)
Insert on distributed hypertable disttable Insert on distributed hypertable disttable
-> Insert on disttable -> Insert on disttable
@ -5322,10 +5328,13 @@ SELECT time, device, temp_c FROM disttable;
Batch size: 1000 Batch size: 1000
-> Custom Scan (ChunkDispatch) -> Custom Scan (ChunkDispatch)
-> Append -> Append
-> Result
-> Custom Scan (DataNodeScan) on disttable disttable_2 -> Custom Scan (DataNodeScan) on disttable disttable_2
-> Result
-> Custom Scan (DataNodeScan) on disttable disttable_3 -> Custom Scan (DataNodeScan) on disttable disttable_3
-> Result
-> Custom Scan (DataNodeScan) on disttable disttable_4 -> Custom Scan (DataNodeScan) on disttable disttable_4
(10 rows) (13 rows)
EXPLAIN (COSTS OFF) EXPLAIN (COSTS OFF)
INSERT INTO disttable (time, device, temp_c) INSERT INTO disttable (time, device, temp_c)
@ -5351,7 +5360,7 @@ EXPLAIN (COSTS OFF)
INSERT INTO disttable (time, device, temp_c) INSERT INTO disttable (time, device, temp_c)
SELECT time, device, temp_c FROM disttable RETURNING *; SELECT time, device, temp_c FROM disttable RETURNING *;
QUERY PLAN QUERY PLAN
----------------------------------------------------------------------------------- -----------------------------------------------------------------------------------------
Custom Scan (HypertableModify) Custom Scan (HypertableModify)
Insert on distributed hypertable disttable Insert on distributed hypertable disttable
-> Insert on disttable -> Insert on disttable
@ -5359,10 +5368,13 @@ SELECT time, device, temp_c FROM disttable RETURNING *;
Batch size: 1000 Batch size: 1000
-> Custom Scan (ChunkDispatch) -> Custom Scan (ChunkDispatch)
-> Append -> Append
-> Result
-> Custom Scan (DataNodeScan) on disttable disttable_2 -> Custom Scan (DataNodeScan) on disttable disttable_2
-> Result
-> Custom Scan (DataNodeScan) on disttable disttable_3 -> Custom Scan (DataNodeScan) on disttable disttable_3
-> Result
-> Custom Scan (DataNodeScan) on disttable disttable_4 -> Custom Scan (DataNodeScan) on disttable disttable_4
(10 rows) (13 rows)
INSERT INTO disttable (time, device, temp_c) INSERT INTO disttable (time, device, temp_c)
SELECT time, device, temp_c FROM disttable; SELECT time, device, temp_c FROM disttable;
@ -5842,12 +5854,14 @@ WHERE time > x
* (x = '2018-01-02 11:00:00'::timestamp)::int -- immutable * (x = '2018-01-02 11:00:00'::timestamp)::int -- immutable
* INTERVAL '1 hour'; * INTERVAL '1 hour';
QUERY PLAN QUERY PLAN
--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
Custom Scan (AsyncAppend) Custom Scan (AsyncAppend)
Output: test_tz."time", test_tz.v, (('Tue Jan 02 11:00:00 2018 -01'::timestamp with time zone)::timestamp without time zone) Output: test_tz."time", test_tz.v, (('Tue Jan 02 11:00:00 2018 -01'::timestamp with time zone)::timestamp without time zone)
-> Append -> Append
-> Custom Scan (DataNodeScan) on public.test_tz test_tz_1 -> Result
Output: test_tz_1."time", test_tz_1.v, ('Tue Jan 02 11:00:00 2018 -01'::timestamp with time zone)::timestamp without time zone Output: test_tz_1."time", test_tz_1.v, ('Tue Jan 02 11:00:00 2018 -01'::timestamp with time zone)::timestamp without time zone
-> Custom Scan (DataNodeScan) on public.test_tz test_tz_1
Output: test_tz_1."time", test_tz_1.v
Data node: db_dist_hypertable_1 Data node: db_dist_hypertable_1
Chunks: _dist_hyper_31_97_chunk Chunks: _dist_hyper_31_97_chunk
Remote SQL: SELECT "time", v FROM public.test_tz WHERE _timescaledb_internal.chunks_in(public.test_tz.*, ARRAY[45]) AND (("time" > '2018-01-02 12:00:00'::timestamp without time zone)) Remote SQL: SELECT "time", v FROM public.test_tz WHERE _timescaledb_internal.chunks_in(public.test_tz.*, ARRAY[45]) AND (("time" > '2018-01-02 12:00:00'::timestamp without time zone))
@ -5856,8 +5870,10 @@ WHERE time > x
Output: _dist_hyper_31_97_chunk."time", _dist_hyper_31_97_chunk.v Output: _dist_hyper_31_97_chunk."time", _dist_hyper_31_97_chunk.v
Index Cond: (_dist_hyper_31_97_chunk."time" > '2018-01-02 12:00:00'::timestamp without time zone) Index Cond: (_dist_hyper_31_97_chunk."time" > '2018-01-02 12:00:00'::timestamp without time zone)
-> Custom Scan (DataNodeScan) on public.test_tz test_tz_2 -> Result
Output: test_tz_2."time", test_tz_2.v, ('Tue Jan 02 11:00:00 2018 -01'::timestamp with time zone)::timestamp without time zone Output: test_tz_2."time", test_tz_2.v, ('Tue Jan 02 11:00:00 2018 -01'::timestamp with time zone)::timestamp without time zone
-> Custom Scan (DataNodeScan) on public.test_tz test_tz_2
Output: test_tz_2."time", test_tz_2.v
Data node: db_dist_hypertable_2 Data node: db_dist_hypertable_2
Chunks: _dist_hyper_31_95_chunk, _dist_hyper_31_96_chunk, _dist_hyper_31_98_chunk Chunks: _dist_hyper_31_95_chunk, _dist_hyper_31_96_chunk, _dist_hyper_31_98_chunk
Remote SQL: SELECT "time", v FROM public.test_tz WHERE _timescaledb_internal.chunks_in(public.test_tz.*, ARRAY[43, 44, 45]) AND (("time" > '2018-01-02 12:00:00'::timestamp without time zone)) Remote SQL: SELECT "time", v FROM public.test_tz WHERE _timescaledb_internal.chunks_in(public.test_tz.*, ARRAY[43, 44, 45]) AND (("time" > '2018-01-02 12:00:00'::timestamp without time zone))
@ -5870,7 +5886,7 @@ WHERE time > x
Output: _dist_hyper_31_98_chunk."time", _dist_hyper_31_98_chunk.v Output: _dist_hyper_31_98_chunk."time", _dist_hyper_31_98_chunk.v
Index Cond: (_dist_hyper_31_98_chunk."time" > '2018-01-02 12:00:00'::timestamp without time zone) Index Cond: (_dist_hyper_31_98_chunk."time" > '2018-01-02 12:00:00'::timestamp without time zone)
(27 rows) (31 rows)
-- Reference value for the above test. -- Reference value for the above test.
WITH dummy AS (SELECT '2018-01-02 12:00:00 +00'::timestamptz::timestamp x) WITH dummy AS (SELECT '2018-01-02 12:00:00 +00'::timestamptz::timestamp x)

View File

@ -206,15 +206,17 @@ SELECT * FROM jit_device_summary WHERE metric_spread = 1800 ORDER BY bucket DESC
Output: (time_bucket('@ 1 hour'::interval, jit_test_contagg.observation_time)), jit_test_contagg.device_id, avg(jit_test_contagg.metric), (max(jit_test_contagg.metric) - min(jit_test_contagg.metric)) Output: (time_bucket('@ 1 hour'::interval, jit_test_contagg.observation_time)), jit_test_contagg.device_id, avg(jit_test_contagg.metric), (max(jit_test_contagg.metric) - min(jit_test_contagg.metric))
Group Key: time_bucket('@ 1 hour'::interval, jit_test_contagg.observation_time), jit_test_contagg.device_id Group Key: time_bucket('@ 1 hour'::interval, jit_test_contagg.observation_time), jit_test_contagg.device_id
Filter: ((max(jit_test_contagg.metric) - min(jit_test_contagg.metric)) = '1800'::double precision) Filter: ((max(jit_test_contagg.metric) - min(jit_test_contagg.metric)) = '1800'::double precision)
-> Custom Scan (ChunkAppend) on public.jit_test_contagg -> Result
Output: time_bucket('@ 1 hour'::interval, jit_test_contagg.observation_time), jit_test_contagg.device_id, jit_test_contagg.metric Output: time_bucket('@ 1 hour'::interval, jit_test_contagg.observation_time), jit_test_contagg.device_id, jit_test_contagg.metric
-> Custom Scan (ChunkAppend) on public.jit_test_contagg
Output: jit_test_contagg.observation_time, jit_test_contagg.device_id, jit_test_contagg.metric
Startup Exclusion: true Startup Exclusion: true
Runtime Exclusion: false Runtime Exclusion: false
Chunks excluded during startup: 4 Chunks excluded during startup: 4
-> Index Scan using _hyper_3_5_chunk_jit_test_contagg_observation_time_idx on _timescaledb_internal._hyper_3_5_chunk -> Index Scan using _hyper_3_5_chunk_jit_test_contagg_observation_time_idx on _timescaledb_internal._hyper_3_5_chunk
Output: _hyper_3_5_chunk.observation_time, _hyper_3_5_chunk.device_id, _hyper_3_5_chunk.metric Output: _hyper_3_5_chunk.observation_time, _hyper_3_5_chunk.device_id, _hyper_3_5_chunk.metric
Index Cond: (_hyper_3_5_chunk.observation_time >= COALESCE(_timescaledb_internal.to_timestamp(_timescaledb_internal.cagg_watermark(4)), '-infinity'::timestamp with time zone)) Index Cond: (_hyper_3_5_chunk.observation_time >= COALESCE(_timescaledb_internal.to_timestamp(_timescaledb_internal.cagg_watermark(4)), '-infinity'::timestamp with time zone))
(27 rows) (29 rows)
-- generate the results into two different files -- generate the results into two different files
\set ECHO errors \set ECHO errors

View File

@ -0,0 +1,229 @@
-- This file and its contents are licensed under the Timescale License.
-- Please see the included NOTICE for copyright information and
-- LICENSE-TIMESCALE for a copy of the license.
-- Import setup file to data nodes.
\unset ECHO
-- Disable SSL to get stable error output across versions. SSL adds some output
-- that changed in PG 14.
set timescaledb.debug_enable_ssl to off;
set client_min_messages to error;
SET timescaledb.hide_data_node_name_in_errors = 'on';
-- A relatively big table on one data node
create table metrics_dist_remote_error(like metrics_dist);
select table_name from create_distributed_hypertable('metrics_dist_remote_error', 'time', 'device_id',
data_nodes => '{"data_node_1"}');
table_name
metrics_dist_remote_error
(1 row)
insert into metrics_dist_remote_error select * from metrics_dist order by metrics_dist limit 20000;
-- The error messages vary wildly between the Postgres versions, dependent on
-- the particular behavior of libqp in this or that case. The purpose of this
-- test is not to solidify this accidental behavior, but to merely exercise the
-- error handling code to make sure it doesn't have fatal errors. Unfortunately,
-- there is no way to suppress error output from a psql script.
set client_min_messages to ERROR;
\set ON_ERROR_STOP off
set timescaledb.remote_data_fetcher = 'copy';
explain (analyze, verbose, costs off, timing off, summary off)
select 1 from metrics_dist_remote_error where ts_debug_shippable_error_after_n_rows(0, device_id)::int != 0;
ERROR: [<hidden node name>]: debug point: requested to error out after 0 rows, 1 rows seen
explain (analyze, verbose, costs off, timing off, summary off)
select 1 from metrics_dist_remote_error where ts_debug_shippable_error_after_n_rows(1, device_id)::int != 0;
ERROR: [<hidden node name>]: debug point: requested to error out after 1 rows, 1 rows seen
explain (analyze, verbose, costs off, timing off, summary off)
select 1 from metrics_dist_remote_error where ts_debug_shippable_error_after_n_rows(2, device_id)::int != 0;
ERROR: [<hidden node name>]: debug point: requested to error out after 2 rows, 2 rows seen
explain (analyze, verbose, costs off, timing off, summary off)
select 1 from metrics_dist_remote_error where ts_debug_shippable_error_after_n_rows(701, device_id)::int != 0;
ERROR: [<hidden node name>]: debug point: requested to error out after 701 rows, 701 rows seen
explain (analyze, verbose, costs off, timing off, summary off)
select 1 from metrics_dist_remote_error where ts_debug_shippable_error_after_n_rows(10000, device_id)::int != 0;
ERROR: [<hidden node name>]: debug point: requested to error out after 10000 rows, 10000 rows seen
explain (analyze, verbose, costs off, timing off, summary off)
select 1 from metrics_dist_remote_error where ts_debug_shippable_error_after_n_rows(16384, device_id)::int != 0;
ERROR: [<hidden node name>]: debug point: requested to error out after 16384 rows, 16384 rows seen
explain (analyze, verbose, costs off, timing off, summary off)
select 1 from metrics_dist_remote_error where ts_debug_shippable_error_after_n_rows(10000000, device_id)::int != 0;
QUERY PLAN
Custom Scan (DataNodeScan) on public.metrics_dist_remote_error (actual rows=20000 loops=1)
Output: 1
Data node: data_node_1
Fetcher Type: COPY
Chunks: _dist_hyper_X_X_chunk, _dist_hyper_X_X_chunk
Remote SQL: SELECT NULL FROM public.metrics_dist_remote_error WHERE _timescaledb_internal.chunks_in(public.metrics_dist_remote_error.*, ARRAY[..]) AND ((public.ts_debug_shippable_error_after_n_rows(10000000, device_id) <> 0))
(6 rows)
-- We don't test fatal errors here, because PG versions before 14 are unable to
-- report them properly to the access node, so we get different errors in these
-- versions.
-- Now test the same with the cursor fetcher.
set timescaledb.remote_data_fetcher = 'cursor';
explain (analyze, verbose, costs off, timing off, summary off)
select 1 from metrics_dist_remote_error where ts_debug_shippable_error_after_n_rows(0, device_id)::int != 0;
ERROR: [<hidden node name>]: debug point: requested to error out after 0 rows, 1 rows seen
explain (analyze, verbose, costs off, timing off, summary off)
select 1 from metrics_dist_remote_error where ts_debug_shippable_error_after_n_rows(1, device_id)::int != 0;
ERROR: [<hidden node name>]: debug point: requested to error out after 1 rows, 1 rows seen
explain (analyze, verbose, costs off, timing off, summary off)
select 1 from metrics_dist_remote_error where ts_debug_shippable_error_after_n_rows(2, device_id)::int != 0;
ERROR: [<hidden node name>]: debug point: requested to error out after 2 rows, 2 rows seen
explain (analyze, verbose, costs off, timing off, summary off)
select 1 from metrics_dist_remote_error where ts_debug_shippable_error_after_n_rows(701, device_id)::int != 0;
ERROR: [<hidden node name>]: debug point: requested to error out after 701 rows, 701 rows seen
explain (analyze, verbose, costs off, timing off, summary off)
select 1 from metrics_dist_remote_error where ts_debug_shippable_error_after_n_rows(10000, device_id)::int != 0;
ERROR: [<hidden node name>]: debug point: requested to error out after 10000 rows, 10000 rows seen
explain (analyze, verbose, costs off, timing off, summary off)
select 1 from metrics_dist_remote_error where ts_debug_shippable_error_after_n_rows(10000000, device_id)::int != 0;
QUERY PLAN
Custom Scan (DataNodeScan) on public.metrics_dist_remote_error (actual rows=20000 loops=1)
Output: 1
Data node: data_node_1
Fetcher Type: Cursor
Chunks: _dist_hyper_X_X_chunk, _dist_hyper_X_X_chunk
Remote SQL: SELECT NULL FROM public.metrics_dist_remote_error WHERE _timescaledb_internal.chunks_in(public.metrics_dist_remote_error.*, ARRAY[..]) AND ((public.ts_debug_shippable_error_after_n_rows(10000000, device_id) <> 0))
(6 rows)
-- Table with broken send for a data type.
create table metrics_dist_bs(like metrics_dist);
alter table metrics_dist_bs alter column v0 type bs;
select table_name from create_distributed_hypertable('metrics_dist_bs',
'time', 'device_id');
table_name
metrics_dist_bs
(1 row)
set timescaledb.enable_connection_binary_data to off;
insert into metrics_dist_bs
select * from metrics_dist_remote_error;
set timescaledb.enable_connection_binary_data to on;
explain (analyze, verbose, costs off, timing off, summary off)
select * from metrics_dist_bs;
ERROR: [<hidden node name>]: debug point: requested to error out after 7103 rows, 7103 rows seen
drop table metrics_dist_bs;
-- Table with broken receive for a data type.
create table metrics_dist_br(like metrics_dist);
alter table metrics_dist_br alter column v0 type br;
select table_name from create_distributed_hypertable('metrics_dist_br',
'time', 'device_id');
table_name
metrics_dist_br
(1 row)
select hypertable_name, replication_factor from timescaledb_information.hypertables
where hypertable_name = 'metrics_dist_br';
hypertable_name | replication_factor
-----------------+--------------------
metrics_dist_br | 1
(1 row)
-- Test that INSERT and COPY fail on data nodes.
-- Note that we use the text format for the COPY input, so that the access node
-- doesn't call `recv` and fail by itself. It's going to use binary format for
-- transfer to data nodes regardless of the input format.
set timescaledb.dist_copy_transfer_format = 'binary';
-- First, create the reference.
\copy (select * from metrics_dist_remote_error) to 'dist_remote_error.text' with (format text);
-- We have to test various interleavings of COPY and INSERT to check that
-- one can recover from connection failure states introduced by another.
\copy metrics_dist_br from 'dist_remote_error.text' with (format text);
ERROR: [<hidden node name>]: debug point: requested to error out after 7103 rows, 7103 rows seen
\copy metrics_dist_br from 'dist_remote_error.text' with (format text);
ERROR: [<hidden node name>]: debug point: requested to error out after 7103 rows, 7103 rows seen
insert into metrics_dist_br select * from metrics_dist_remote_error;
ERROR: [<hidden node name>]: debug point: requested to error out after 7103 rows, 7103 rows seen
insert into metrics_dist_br select * from metrics_dist_remote_error;
ERROR: [<hidden node name>]: debug point: requested to error out after 7103 rows, 7103 rows seen
\copy metrics_dist_br from 'dist_remote_error.text' with (format text);
ERROR: [<hidden node name>]: debug point: requested to error out after 7103 rows, 7103 rows seen
-- Fail at different points
set timescaledb.debug_broken_sendrecv_throw_after = 1;
\copy metrics_dist_br from 'dist_remote_error.text' with (format text);
ERROR: [<hidden node name>]: debug point: requested to error out after 1 rows, 1 rows seen
set timescaledb.debug_broken_sendrecv_throw_after = 2;
\copy metrics_dist_br from 'dist_remote_error.text' with (format text);
ERROR: [<hidden node name>]: debug point: requested to error out after 2 rows, 2 rows seen
set timescaledb.debug_broken_sendrecv_throw_after = 1023;
\copy metrics_dist_br from 'dist_remote_error.text' with (format text);
ERROR: [<hidden node name>]: debug point: requested to error out after 1023 rows, 1023 rows seen
set timescaledb.debug_broken_sendrecv_throw_after = 1024;
\copy metrics_dist_br from 'dist_remote_error.text' with (format text);
ERROR: [<hidden node name>]: debug point: requested to error out after 1024 rows, 1024 rows seen
set timescaledb.debug_broken_sendrecv_throw_after = 1025;
\copy metrics_dist_br from 'dist_remote_error.text' with (format text);
ERROR: [<hidden node name>]: debug point: requested to error out after 1025 rows, 1025 rows seen
reset timescaledb.debug_broken_sendrecv_throw_after;
-- Same with different replication factor
truncate metrics_dist_br;
select set_replication_factor('metrics_dist_br', 2);
set_replication_factor
(1 row)
select hypertable_name, replication_factor from timescaledb_information.hypertables
where hypertable_name = 'metrics_dist_br';
hypertable_name | replication_factor
-----------------+--------------------
metrics_dist_br | 2
(1 row)
\copy metrics_dist_br from 'dist_remote_error.text' with (format text);
ERROR: [<hidden node name>]: debug point: requested to error out after 7103 rows, 7103 rows seen
\copy metrics_dist_br from 'dist_remote_error.text' with (format text);
ERROR: [<hidden node name>]: debug point: requested to error out after 7103 rows, 7103 rows seen
insert into metrics_dist_br select * from metrics_dist_remote_error;
ERROR: [<hidden node name>]: debug point: requested to error out after 7103 rows, 7103 rows seen
insert into metrics_dist_br select * from metrics_dist_remote_error;
ERROR: [<hidden node name>]: debug point: requested to error out after 7103 rows, 7103 rows seen
set timescaledb.debug_broken_sendrecv_throw_after = 1;
\copy metrics_dist_br from 'dist_remote_error.text' with (format text);
ERROR: [<hidden node name>]: debug point: requested to error out after 1 rows, 1 rows seen
set timescaledb.debug_broken_sendrecv_throw_after = 2;
\copy metrics_dist_br from 'dist_remote_error.text' with (format text);
ERROR: [<hidden node name>]: debug point: requested to error out after 2 rows, 2 rows seen
set timescaledb.debug_broken_sendrecv_throw_after = 1023;
\copy metrics_dist_br from 'dist_remote_error.text' with (format text);
ERROR: [<hidden node name>]: debug point: requested to error out after 1023 rows, 1023 rows seen
set timescaledb.debug_broken_sendrecv_throw_after = 1024;
\copy metrics_dist_br from 'dist_remote_error.text' with (format text);
ERROR: [<hidden node name>]: debug point: requested to error out after 1024 rows, 1024 rows seen
set timescaledb.debug_broken_sendrecv_throw_after = 1025;
\copy metrics_dist_br from 'dist_remote_error.text' with (format text);
ERROR: [<hidden node name>]: debug point: requested to error out after 1025 rows, 1025 rows seen
-- Should succeed with text format for data transfer.
set timescaledb.dist_copy_transfer_format = 'text';
\copy metrics_dist_br from 'dist_remote_error.text' with (format text);
-- Final check.
set timescaledb.enable_connection_binary_data = false;
select count(*) from metrics_dist_br;
count
20000
(1 row)
set timescaledb.enable_connection_binary_data = true;
reset timescaledb.debug_broken_sendrecv_throw_after;
drop table metrics_dist_br;
-- Table with sleepy receive for a data type, to improve coverage of the waiting
-- code on the access node.
create table metrics_dist_bl(like metrics_dist);
alter table metrics_dist_bl alter column v0 type bl;
select table_name from create_distributed_hypertable('metrics_dist_bl',
'time', 'device_id');
table_name
metrics_dist_bl
(1 row)
-- We're using sleepy recv function, so need the binary transfer format for it
-- to be called on the data nodes.
set timescaledb.dist_copy_transfer_format = 'binary';
-- Test INSERT and COPY with slow data node.
\copy metrics_dist_bl from 'dist_remote_error.text' with (format text);
insert into metrics_dist_bl select * from metrics_dist_remote_error;
select count(*) from metrics_dist_bl;
count
40000
(1 row)
drop table metrics_dist_bl;
drop table metrics_dist_remote_error;

View File

@ -0,0 +1,229 @@
-- This file and its contents are licensed under the Timescale License.
-- Please see the included NOTICE for copyright information and
-- LICENSE-TIMESCALE for a copy of the license.
-- Import setup file to data nodes.
\unset ECHO
-- Disable SSL to get stable error output across versions. SSL adds some output
-- that changed in PG 14.
set timescaledb.debug_enable_ssl to off;
set client_min_messages to error;
SET timescaledb.hide_data_node_name_in_errors = 'on';
-- A relatively big table on one data node
create table metrics_dist_remote_error(like metrics_dist);
select table_name from create_distributed_hypertable('metrics_dist_remote_error', 'time', 'device_id',
data_nodes => '{"data_node_1"}');
table_name
metrics_dist_remote_error
(1 row)
insert into metrics_dist_remote_error select * from metrics_dist order by metrics_dist limit 20000;
-- The error messages vary wildly between the Postgres versions, dependent on
-- the particular behavior of libqp in this or that case. The purpose of this
-- test is not to solidify this accidental behavior, but to merely exercise the
-- error handling code to make sure it doesn't have fatal errors. Unfortunately,
-- there is no way to suppress error output from a psql script.
set client_min_messages to ERROR;
\set ON_ERROR_STOP off
set timescaledb.remote_data_fetcher = 'copy';
explain (analyze, verbose, costs off, timing off, summary off)
select 1 from metrics_dist_remote_error where ts_debug_shippable_error_after_n_rows(0, device_id)::int != 0;
ERROR: [<hidden node name>]: debug point: requested to error out after 0 rows, 1 rows seen
explain (analyze, verbose, costs off, timing off, summary off)
select 1 from metrics_dist_remote_error where ts_debug_shippable_error_after_n_rows(1, device_id)::int != 0;
ERROR: [<hidden node name>]: debug point: requested to error out after 1 rows, 1 rows seen
explain (analyze, verbose, costs off, timing off, summary off)
select 1 from metrics_dist_remote_error where ts_debug_shippable_error_after_n_rows(2, device_id)::int != 0;
ERROR: [<hidden node name>]: debug point: requested to error out after 2 rows, 2 rows seen
explain (analyze, verbose, costs off, timing off, summary off)
select 1 from metrics_dist_remote_error where ts_debug_shippable_error_after_n_rows(701, device_id)::int != 0;
ERROR: [<hidden node name>]: debug point: requested to error out after 701 rows, 701 rows seen
explain (analyze, verbose, costs off, timing off, summary off)
select 1 from metrics_dist_remote_error where ts_debug_shippable_error_after_n_rows(10000, device_id)::int != 0;
ERROR: [<hidden node name>]: debug point: requested to error out after 10000 rows, 10000 rows seen
explain (analyze, verbose, costs off, timing off, summary off)
select 1 from metrics_dist_remote_error where ts_debug_shippable_error_after_n_rows(16384, device_id)::int != 0;
ERROR: [<hidden node name>]: debug point: requested to error out after 16384 rows, 16384 rows seen
explain (analyze, verbose, costs off, timing off, summary off)
select 1 from metrics_dist_remote_error where ts_debug_shippable_error_after_n_rows(10000000, device_id)::int != 0;
QUERY PLAN
Custom Scan (DataNodeScan) on public.metrics_dist_remote_error (actual rows=20000 loops=1)
Output: 1
Data node: data_node_1
Fetcher Type: COPY
Chunks: _dist_hyper_X_X_chunk, _dist_hyper_X_X_chunk
Remote SQL: SELECT NULL FROM public.metrics_dist_remote_error WHERE _timescaledb_internal.chunks_in(public.metrics_dist_remote_error.*, ARRAY[..]) AND ((public.ts_debug_shippable_error_after_n_rows(10000000, device_id) <> 0))
(6 rows)
-- We don't test fatal errors here, because PG versions before 14 are unable to
-- report them properly to the access node, so we get different errors in these
-- versions.
-- Now test the same with the cursor fetcher.
set timescaledb.remote_data_fetcher = 'cursor';
explain (analyze, verbose, costs off, timing off, summary off)
select 1 from metrics_dist_remote_error where ts_debug_shippable_error_after_n_rows(0, device_id)::int != 0;
ERROR: [<hidden node name>]: debug point: requested to error out after 0 rows, 1 rows seen
explain (analyze, verbose, costs off, timing off, summary off)
select 1 from metrics_dist_remote_error where ts_debug_shippable_error_after_n_rows(1, device_id)::int != 0;
ERROR: [<hidden node name>]: debug point: requested to error out after 1 rows, 1 rows seen
explain (analyze, verbose, costs off, timing off, summary off)
select 1 from metrics_dist_remote_error where ts_debug_shippable_error_after_n_rows(2, device_id)::int != 0;
ERROR: [<hidden node name>]: debug point: requested to error out after 2 rows, 2 rows seen
explain (analyze, verbose, costs off, timing off, summary off)
select 1 from metrics_dist_remote_error where ts_debug_shippable_error_after_n_rows(701, device_id)::int != 0;
ERROR: [<hidden node name>]: debug point: requested to error out after 701 rows, 701 rows seen
explain (analyze, verbose, costs off, timing off, summary off)
select 1 from metrics_dist_remote_error where ts_debug_shippable_error_after_n_rows(10000, device_id)::int != 0;
ERROR: [<hidden node name>]: debug point: requested to error out after 10000 rows, 10000 rows seen
explain (analyze, verbose, costs off, timing off, summary off)
select 1 from metrics_dist_remote_error where ts_debug_shippable_error_after_n_rows(10000000, device_id)::int != 0;
QUERY PLAN
Custom Scan (DataNodeScan) on public.metrics_dist_remote_error (actual rows=20000 loops=1)
Output: 1
Data node: data_node_1
Fetcher Type: Cursor
Chunks: _dist_hyper_X_X_chunk, _dist_hyper_X_X_chunk
Remote SQL: SELECT NULL FROM public.metrics_dist_remote_error WHERE _timescaledb_internal.chunks_in(public.metrics_dist_remote_error.*, ARRAY[..]) AND ((public.ts_debug_shippable_error_after_n_rows(10000000, device_id) <> 0))
(6 rows)
-- Table with broken send for a data type.
create table metrics_dist_bs(like metrics_dist);
alter table metrics_dist_bs alter column v0 type bs;
select table_name from create_distributed_hypertable('metrics_dist_bs',
'time', 'device_id');
table_name
metrics_dist_bs
(1 row)
set timescaledb.enable_connection_binary_data to off;
insert into metrics_dist_bs
select * from metrics_dist_remote_error;
set timescaledb.enable_connection_binary_data to on;
explain (analyze, verbose, costs off, timing off, summary off)
select * from metrics_dist_bs;
ERROR: [<hidden node name>]: debug point: requested to error out after 7103 rows, 7103 rows seen
drop table metrics_dist_bs;
-- Table with broken receive for a data type.
create table metrics_dist_br(like metrics_dist);
alter table metrics_dist_br alter column v0 type br;
select table_name from create_distributed_hypertable('metrics_dist_br',
'time', 'device_id');
table_name
metrics_dist_br
(1 row)
select hypertable_name, replication_factor from timescaledb_information.hypertables
where hypertable_name = 'metrics_dist_br';
hypertable_name | replication_factor
-----------------+--------------------
metrics_dist_br | 1
(1 row)
-- Test that INSERT and COPY fail on data nodes.
-- Note that we use the text format for the COPY input, so that the access node
-- doesn't call `recv` and fail by itself. It's going to use binary format for
-- transfer to data nodes regardless of the input format.
set timescaledb.dist_copy_transfer_format = 'binary';
-- First, create the reference.
\copy (select * from metrics_dist_remote_error) to 'dist_remote_error.text' with (format text);
-- We have to test various interleavings of COPY and INSERT to check that
-- one can recover from connection failure states introduced by another.
\copy metrics_dist_br from 'dist_remote_error.text' with (format text);
ERROR: [<hidden node name>]: debug point: requested to error out after 7103 rows, 7103 rows seen
\copy metrics_dist_br from 'dist_remote_error.text' with (format text);
ERROR: [<hidden node name>]: debug point: requested to error out after 7103 rows, 7103 rows seen
insert into metrics_dist_br select * from metrics_dist_remote_error;
ERROR: [<hidden node name>]: debug point: requested to error out after 7103 rows, 7103 rows seen
insert into metrics_dist_br select * from metrics_dist_remote_error;
ERROR: [<hidden node name>]: debug point: requested to error out after 7103 rows, 7103 rows seen
\copy metrics_dist_br from 'dist_remote_error.text' with (format text);
ERROR: [<hidden node name>]: debug point: requested to error out after 7103 rows, 7103 rows seen
-- Fail at different points
set timescaledb.debug_broken_sendrecv_throw_after = 1;
\copy metrics_dist_br from 'dist_remote_error.text' with (format text);
ERROR: [<hidden node name>]: debug point: requested to error out after 1 rows, 1 rows seen
set timescaledb.debug_broken_sendrecv_throw_after = 2;
\copy metrics_dist_br from 'dist_remote_error.text' with (format text);
ERROR: [<hidden node name>]: debug point: requested to error out after 2 rows, 2 rows seen
set timescaledb.debug_broken_sendrecv_throw_after = 1023;
\copy metrics_dist_br from 'dist_remote_error.text' with (format text);
ERROR: [<hidden node name>]: debug point: requested to error out after 1023 rows, 1023 rows seen
set timescaledb.debug_broken_sendrecv_throw_after = 1024;
\copy metrics_dist_br from 'dist_remote_error.text' with (format text);
ERROR: [<hidden node name>]: debug point: requested to error out after 1024 rows, 1024 rows seen
set timescaledb.debug_broken_sendrecv_throw_after = 1025;
\copy metrics_dist_br from 'dist_remote_error.text' with (format text);
ERROR: [<hidden node name>]: debug point: requested to error out after 1025 rows, 1025 rows seen
reset timescaledb.debug_broken_sendrecv_throw_after;
-- Same with different replication factor
truncate metrics_dist_br;
select set_replication_factor('metrics_dist_br', 2);
set_replication_factor
(1 row)
select hypertable_name, replication_factor from timescaledb_information.hypertables
where hypertable_name = 'metrics_dist_br';
hypertable_name | replication_factor
-----------------+--------------------
metrics_dist_br | 2
(1 row)
\copy metrics_dist_br from 'dist_remote_error.text' with (format text);
ERROR: [<hidden node name>]: debug point: requested to error out after 7103 rows, 7103 rows seen
\copy metrics_dist_br from 'dist_remote_error.text' with (format text);
ERROR: [<hidden node name>]: debug point: requested to error out after 7103 rows, 7103 rows seen
insert into metrics_dist_br select * from metrics_dist_remote_error;
ERROR: [<hidden node name>]: debug point: requested to error out after 7103 rows, 7103 rows seen
insert into metrics_dist_br select * from metrics_dist_remote_error;
ERROR: [<hidden node name>]: debug point: requested to error out after 7103 rows, 7103 rows seen
set timescaledb.debug_broken_sendrecv_throw_after = 1;
\copy metrics_dist_br from 'dist_remote_error.text' with (format text);
ERROR: [<hidden node name>]: debug point: requested to error out after 1 rows, 1 rows seen
set timescaledb.debug_broken_sendrecv_throw_after = 2;
\copy metrics_dist_br from 'dist_remote_error.text' with (format text);
ERROR: [<hidden node name>]: debug point: requested to error out after 2 rows, 2 rows seen
set timescaledb.debug_broken_sendrecv_throw_after = 1023;
\copy metrics_dist_br from 'dist_remote_error.text' with (format text);
ERROR: [<hidden node name>]: debug point: requested to error out after 1023 rows, 1023 rows seen
set timescaledb.debug_broken_sendrecv_throw_after = 1024;
\copy metrics_dist_br from 'dist_remote_error.text' with (format text);
ERROR: [<hidden node name>]: debug point: requested to error out after 1024 rows, 1024 rows seen
set timescaledb.debug_broken_sendrecv_throw_after = 1025;
\copy metrics_dist_br from 'dist_remote_error.text' with (format text);
ERROR: [<hidden node name>]: debug point: requested to error out after 1025 rows, 1025 rows seen
-- Should succeed with text format for data transfer.
set timescaledb.dist_copy_transfer_format = 'text';
\copy metrics_dist_br from 'dist_remote_error.text' with (format text);
-- Final check.
set timescaledb.enable_connection_binary_data = false;
select count(*) from metrics_dist_br;
count
20000
(1 row)
set timescaledb.enable_connection_binary_data = true;
reset timescaledb.debug_broken_sendrecv_throw_after;
drop table metrics_dist_br;
-- Table with sleepy receive for a data type, to improve coverage of the waiting
-- code on the access node.
create table metrics_dist_bl(like metrics_dist);
alter table metrics_dist_bl alter column v0 type bl;
select table_name from create_distributed_hypertable('metrics_dist_bl',
'time', 'device_id');
table_name
metrics_dist_bl
(1 row)
-- We're using sleepy recv function, so need the binary transfer format for it
-- to be called on the data nodes.
set timescaledb.dist_copy_transfer_format = 'binary';
-- Test INSERT and COPY with slow data node.
\copy metrics_dist_bl from 'dist_remote_error.text' with (format text);
insert into metrics_dist_bl select * from metrics_dist_remote_error;
select count(*) from metrics_dist_bl;
count
40000
(1 row)
drop table metrics_dist_bl;
drop table metrics_dist_remote_error;

View File

@ -0,0 +1,231 @@
-- This file and its contents are licensed under the Timescale License.
-- Please see the included NOTICE for copyright information and
-- LICENSE-TIMESCALE for a copy of the license.
-- Import setup file to data nodes.
\unset ECHO
-- Disable SSL to get stable error output across versions. SSL adds some output
-- that changed in PG 14.
set timescaledb.debug_enable_ssl to off;
set client_min_messages to error;
SET timescaledb.hide_data_node_name_in_errors = 'on';
-- A relatively big table on one data node
create table metrics_dist_remote_error(like metrics_dist);
select table_name from create_distributed_hypertable('metrics_dist_remote_error', 'time', 'device_id',
data_nodes => '{"data_node_1"}');
table_name
metrics_dist_remote_error
(1 row)
insert into metrics_dist_remote_error select * from metrics_dist order by metrics_dist limit 20000;
-- The error messages vary wildly between the Postgres versions, dependent on
-- the particular behavior of libqp in this or that case. The purpose of this
-- test is not to solidify this accidental behavior, but to merely exercise the
-- error handling code to make sure it doesn't have fatal errors. Unfortunately,
-- there is no way to suppress error output from a psql script.
set client_min_messages to ERROR;
\set ON_ERROR_STOP off
set timescaledb.remote_data_fetcher = 'copy';
explain (analyze, verbose, costs off, timing off, summary off)
select 1 from metrics_dist_remote_error where ts_debug_shippable_error_after_n_rows(0, device_id)::int != 0;
ERROR: [<hidden node name>]: debug point: requested to error out after 0 rows, 1 rows seen
explain (analyze, verbose, costs off, timing off, summary off)
select 1 from metrics_dist_remote_error where ts_debug_shippable_error_after_n_rows(1, device_id)::int != 0;
ERROR: [<hidden node name>]: debug point: requested to error out after 1 rows, 1 rows seen
explain (analyze, verbose, costs off, timing off, summary off)
select 1 from metrics_dist_remote_error where ts_debug_shippable_error_after_n_rows(2, device_id)::int != 0;
ERROR: [<hidden node name>]: debug point: requested to error out after 2 rows, 2 rows seen
explain (analyze, verbose, costs off, timing off, summary off)
select 1 from metrics_dist_remote_error where ts_debug_shippable_error_after_n_rows(701, device_id)::int != 0;
ERROR: [<hidden node name>]: debug point: requested to error out after 701 rows, 701 rows seen
explain (analyze, verbose, costs off, timing off, summary off)
select 1 from metrics_dist_remote_error where ts_debug_shippable_error_after_n_rows(10000, device_id)::int != 0;
ERROR: [<hidden node name>]: debug point: requested to error out after 10000 rows, 10000 rows seen
explain (analyze, verbose, costs off, timing off, summary off)
select 1 from metrics_dist_remote_error where ts_debug_shippable_error_after_n_rows(16384, device_id)::int != 0;
ERROR: [<hidden node name>]: debug point: requested to error out after 16384 rows, 16384 rows seen
explain (analyze, verbose, costs off, timing off, summary off)
select 1 from metrics_dist_remote_error where ts_debug_shippable_error_after_n_rows(10000000, device_id)::int != 0;
QUERY PLAN
Result (actual rows=20000 loops=1)
Output: 1
-> Custom Scan (DataNodeScan) on public.metrics_dist_remote_error (actual rows=20000 loops=1)
Data node: data_node_1
Fetcher Type: COPY
Chunks: _dist_hyper_X_X_chunk, _dist_hyper_X_X_chunk
Remote SQL: SELECT NULL FROM public.metrics_dist_remote_error WHERE _timescaledb_internal.chunks_in(public.metrics_dist_remote_error.*, ARRAY[..]) AND ((public.ts_debug_shippable_error_after_n_rows(10000000, device_id) <> 0))
(7 rows)
-- We don't test fatal errors here, because PG versions before 14 are unable to
-- report them properly to the access node, so we get different errors in these
-- versions.
-- Now test the same with the cursor fetcher.
set timescaledb.remote_data_fetcher = 'cursor';
explain (analyze, verbose, costs off, timing off, summary off)
select 1 from metrics_dist_remote_error where ts_debug_shippable_error_after_n_rows(0, device_id)::int != 0;
ERROR: [<hidden node name>]: debug point: requested to error out after 0 rows, 1 rows seen
explain (analyze, verbose, costs off, timing off, summary off)
select 1 from metrics_dist_remote_error where ts_debug_shippable_error_after_n_rows(1, device_id)::int != 0;
ERROR: [<hidden node name>]: debug point: requested to error out after 1 rows, 1 rows seen
explain (analyze, verbose, costs off, timing off, summary off)
select 1 from metrics_dist_remote_error where ts_debug_shippable_error_after_n_rows(2, device_id)::int != 0;
ERROR: [<hidden node name>]: debug point: requested to error out after 2 rows, 2 rows seen
explain (analyze, verbose, costs off, timing off, summary off)
select 1 from metrics_dist_remote_error where ts_debug_shippable_error_after_n_rows(701, device_id)::int != 0;
ERROR: [<hidden node name>]: debug point: requested to error out after 701 rows, 701 rows seen
explain (analyze, verbose, costs off, timing off, summary off)
select 1 from metrics_dist_remote_error where ts_debug_shippable_error_after_n_rows(10000, device_id)::int != 0;
ERROR: [<hidden node name>]: debug point: requested to error out after 10000 rows, 10000 rows seen
explain (analyze, verbose, costs off, timing off, summary off)
select 1 from metrics_dist_remote_error where ts_debug_shippable_error_after_n_rows(10000000, device_id)::int != 0;
QUERY PLAN
Result (actual rows=20000 loops=1)
Output: 1
-> Custom Scan (DataNodeScan) on public.metrics_dist_remote_error (actual rows=20000 loops=1)
Data node: data_node_1
Fetcher Type: Cursor
Chunks: _dist_hyper_X_X_chunk, _dist_hyper_X_X_chunk
Remote SQL: SELECT NULL FROM public.metrics_dist_remote_error WHERE _timescaledb_internal.chunks_in(public.metrics_dist_remote_error.*, ARRAY[..]) AND ((public.ts_debug_shippable_error_after_n_rows(10000000, device_id) <> 0))
(7 rows)
-- Table with broken send for a data type.
create table metrics_dist_bs(like metrics_dist);
alter table metrics_dist_bs alter column v0 type bs;
select table_name from create_distributed_hypertable('metrics_dist_bs',
'time', 'device_id');
table_name
metrics_dist_bs
(1 row)
set timescaledb.enable_connection_binary_data to off;
insert into metrics_dist_bs
select * from metrics_dist_remote_error;
set timescaledb.enable_connection_binary_data to on;
explain (analyze, verbose, costs off, timing off, summary off)
select * from metrics_dist_bs;
ERROR: [<hidden node name>]: debug point: requested to error out after 7103 rows, 7103 rows seen
drop table metrics_dist_bs;
-- Table with broken receive for a data type.
create table metrics_dist_br(like metrics_dist);
alter table metrics_dist_br alter column v0 type br;
select table_name from create_distributed_hypertable('metrics_dist_br',
'time', 'device_id');
table_name
metrics_dist_br
(1 row)
select hypertable_name, replication_factor from timescaledb_information.hypertables
where hypertable_name = 'metrics_dist_br';
hypertable_name | replication_factor
-----------------+--------------------
metrics_dist_br | 1
(1 row)
-- Test that INSERT and COPY fail on data nodes.
-- Note that we use the text format for the COPY input, so that the access node
-- doesn't call `recv` and fail by itself. It's going to use binary format for
-- transfer to data nodes regardless of the input format.
set timescaledb.dist_copy_transfer_format = 'binary';
-- First, create the reference.
\copy (select * from metrics_dist_remote_error) to 'dist_remote_error.text' with (format text);
-- We have to test various interleavings of COPY and INSERT to check that
-- one can recover from connection failure states introduced by another.
\copy metrics_dist_br from 'dist_remote_error.text' with (format text);
ERROR: [<hidden node name>]: debug point: requested to error out after 7103 rows, 7103 rows seen
\copy metrics_dist_br from 'dist_remote_error.text' with (format text);
ERROR: [<hidden node name>]: debug point: requested to error out after 7103 rows, 7103 rows seen
insert into metrics_dist_br select * from metrics_dist_remote_error;
ERROR: [<hidden node name>]: debug point: requested to error out after 7103 rows, 7103 rows seen
insert into metrics_dist_br select * from metrics_dist_remote_error;
ERROR: [<hidden node name>]: debug point: requested to error out after 7103 rows, 7103 rows seen
\copy metrics_dist_br from 'dist_remote_error.text' with (format text);
ERROR: [<hidden node name>]: debug point: requested to error out after 7103 rows, 7103 rows seen
-- Fail at different points
set timescaledb.debug_broken_sendrecv_throw_after = 1;
\copy metrics_dist_br from 'dist_remote_error.text' with (format text);
ERROR: [<hidden node name>]: debug point: requested to error out after 1 rows, 1 rows seen
set timescaledb.debug_broken_sendrecv_throw_after = 2;
\copy metrics_dist_br from 'dist_remote_error.text' with (format text);
ERROR: [<hidden node name>]: debug point: requested to error out after 2 rows, 2 rows seen
set timescaledb.debug_broken_sendrecv_throw_after = 1023;
\copy metrics_dist_br from 'dist_remote_error.text' with (format text);
ERROR: [<hidden node name>]: debug point: requested to error out after 1023 rows, 1023 rows seen
set timescaledb.debug_broken_sendrecv_throw_after = 1024;
\copy metrics_dist_br from 'dist_remote_error.text' with (format text);
ERROR: [<hidden node name>]: debug point: requested to error out after 1024 rows, 1024 rows seen
set timescaledb.debug_broken_sendrecv_throw_after = 1025;
\copy metrics_dist_br from 'dist_remote_error.text' with (format text);
ERROR: [<hidden node name>]: debug point: requested to error out after 1025 rows, 1025 rows seen
reset timescaledb.debug_broken_sendrecv_throw_after;
-- Same with different replication factor
truncate metrics_dist_br;
select set_replication_factor('metrics_dist_br', 2);
set_replication_factor
(1 row)
select hypertable_name, replication_factor from timescaledb_information.hypertables
where hypertable_name = 'metrics_dist_br';
hypertable_name | replication_factor
-----------------+--------------------
metrics_dist_br | 2
(1 row)
\copy metrics_dist_br from 'dist_remote_error.text' with (format text);
ERROR: [<hidden node name>]: debug point: requested to error out after 7103 rows, 7103 rows seen
\copy metrics_dist_br from 'dist_remote_error.text' with (format text);
ERROR: [<hidden node name>]: debug point: requested to error out after 7103 rows, 7103 rows seen
insert into metrics_dist_br select * from metrics_dist_remote_error;
ERROR: [<hidden node name>]: debug point: requested to error out after 7103 rows, 7103 rows seen
insert into metrics_dist_br select * from metrics_dist_remote_error;
ERROR: [<hidden node name>]: debug point: requested to error out after 7103 rows, 7103 rows seen
set timescaledb.debug_broken_sendrecv_throw_after = 1;
\copy metrics_dist_br from 'dist_remote_error.text' with (format text);
ERROR: [<hidden node name>]: debug point: requested to error out after 1 rows, 1 rows seen
set timescaledb.debug_broken_sendrecv_throw_after = 2;
\copy metrics_dist_br from 'dist_remote_error.text' with (format text);
ERROR: [<hidden node name>]: debug point: requested to error out after 2 rows, 2 rows seen
set timescaledb.debug_broken_sendrecv_throw_after = 1023;
\copy metrics_dist_br from 'dist_remote_error.text' with (format text);
ERROR: [<hidden node name>]: debug point: requested to error out after 1023 rows, 1023 rows seen
set timescaledb.debug_broken_sendrecv_throw_after = 1024;
\copy metrics_dist_br from 'dist_remote_error.text' with (format text);
ERROR: [<hidden node name>]: debug point: requested to error out after 1024 rows, 1024 rows seen
set timescaledb.debug_broken_sendrecv_throw_after = 1025;
\copy metrics_dist_br from 'dist_remote_error.text' with (format text);
ERROR: [<hidden node name>]: debug point: requested to error out after 1025 rows, 1025 rows seen
-- Should succeed with text format for data transfer.
set timescaledb.dist_copy_transfer_format = 'text';
\copy metrics_dist_br from 'dist_remote_error.text' with (format text);
-- Final check.
set timescaledb.enable_connection_binary_data = false;
select count(*) from metrics_dist_br;
count
20000
(1 row)
set timescaledb.enable_connection_binary_data = true;
reset timescaledb.debug_broken_sendrecv_throw_after;
drop table metrics_dist_br;
-- Table with sleepy receive for a data type, to improve coverage of the waiting
-- code on the access node.
create table metrics_dist_bl(like metrics_dist);
alter table metrics_dist_bl alter column v0 type bl;
select table_name from create_distributed_hypertable('metrics_dist_bl',
'time', 'device_id');
table_name
metrics_dist_bl
(1 row)
-- We're using sleepy recv function, so need the binary transfer format for it
-- to be called on the data nodes.
set timescaledb.dist_copy_transfer_format = 'binary';
-- Test INSERT and COPY with slow data node.
\copy metrics_dist_bl from 'dist_remote_error.text' with (format text);
insert into metrics_dist_bl select * from metrics_dist_remote_error;
select count(*) from metrics_dist_bl;
count
40000
(1 row)
drop table metrics_dist_bl;
drop table metrics_dist_remote_error;

View File

@ -27,9 +27,10 @@ if((${PG_VERSION_MAJOR} GREATER_EQUAL "14"))
endif() endif()
if(CMAKE_BUILD_TYPE MATCHES Debug) if(CMAKE_BUILD_TYPE MATCHES Debug)
list(APPEND TEST_FILES_SHARED dist_parallel_agg.sql dist_remote_error.sql list(APPEND TEST_FILES_SHARED dist_parallel_agg.sql timestamp_limits.sql
timestamp_limits.sql with_clause_parser.sql) with_clause_parser.sql)
list(APPEND TEST_TEMPLATES_SHARED constify_now.sql.in space_constraint.sql.in) list(APPEND TEST_TEMPLATES_SHARED constify_now.sql.in space_constraint.sql.in
dist_remote_error.sql.in)
endif(CMAKE_BUILD_TYPE MATCHES Debug) endif(CMAKE_BUILD_TYPE MATCHES Debug)
# Regression tests that vary with PostgreSQL version. Generated test files are # Regression tests that vary with PostgreSQL version. Generated test files are