Fix chunks_in queries to use ChunkAppend plans

It was observed on DNs that "ORDER BY time" queries when the
"chunks_in" exclusion function was specified were running slower than
expected.

Examination of the EXPLAIN output revealed that when "chunks_in" was
specified then the ChunkAppend plan which provides the exclusion of
unwanted chunks was not getting picked up for execution.

Logic in get_explicit_chunk_oids() function has now been improved to
populate the fdw_private structure appropriately if ordering info is
present. Care has been taken to also handle the case where multiple
dimensions are present.

Fixes #2733
This commit is contained in:
Nikhil 2021-03-29 16:33:09 +05:30 committed by Nikhils
parent 574ef87426
commit 3c17802485
8 changed files with 623 additions and 7 deletions

View File

@ -9,12 +9,15 @@ accidentally triggering the load of a previous DB version.**
**Bugfixes**
* #2989 Refactor and harden size and stats functions
* #3058 Reduce memory usage for distributed inserts
* #3067 Fix extremely slow multi-node order by queries
**Thanks**
* @pedrokost and @RobAtticus for reporting an issue with size
functions on empty hypertables
* @stephane-moreau for reporting an issue with high memory usage during
single-transaction inserts on a distributed hypertable.
* @phemmer and @ryanbooz for reporting issues with slow
multi-node order by queries
## 2.1.1 (2021-03-29)

View File

@ -607,6 +607,9 @@ chunk_cmp_reverse(const void *c1, const void *c2)
/*
* get chunk oids ordered by time dimension
*
* if "chunks" is NULL, we get all the chunks from the catalog. Otherwise we
* restrict ourselves to the passed in chunks list.
*
* nested_oids is a list of lists, chunks that occupy the same time slice will be
* in the same list. In the list [[1,2,3],[4,5,6]] chunks 1, 2 and 3 are space partitions of
* the same time slice and 4, 5 and 6 are space partitions of the next time slice.
@ -614,16 +617,18 @@ chunk_cmp_reverse(const void *c1, const void *c2)
*/
List *
ts_hypertable_restrict_info_get_chunk_oids_ordered(HypertableRestrictInfo *hri, Hypertable *ht,
Chunk **chunks, unsigned int num_chunks,
LOCKMODE lockmode, List **nested_oids,
bool reverse)
{
unsigned num_chunks;
Chunk **chunks = hypertable_restrict_info_get_chunks(hri, ht, lockmode, &num_chunks);
List *chunk_oids = NIL;
List *slot_chunk_oids = NIL;
DimensionSlice *slice = NULL;
unsigned int i;
if (chunks == NULL)
chunks = hypertable_restrict_info_get_chunks(hri, ht, lockmode, &num_chunks);
if (num_chunks == 0)
return NIL;

View File

@ -26,7 +26,9 @@ extern List *ts_hypertable_restrict_info_get_chunk_oids(HypertableRestrictInfo *
LOCKMODE lockmode);
extern List *ts_hypertable_restrict_info_get_chunk_oids_ordered(HypertableRestrictInfo *hri,
Hypertable *ht, LOCKMODE lockmode,
Hypertable *ht, Chunk **chunks,
unsigned int num_chunks,
LOCKMODE lockmode,
List **nested_oids, bool reverse);
#endif /* TIMESCALEDB_HYPERTABLE_RESTRICT_INFO_H */

View File

@ -902,16 +902,26 @@ should_order_append(PlannerInfo *root, RelOptInfo *rel, Hypertable *ht, List *jo
return ts_ordered_append_should_optimize(root, rel, ht, join_conditions, order_attno, reverse);
}
/* get chunk oids specified by explicit chunk exclusion function */
/*
* get chunk oids specified by explicit chunk exclusion function
*
* Similar to the regular get_chunk_oids, we also populate the fdw_private
* structure appropriately if ordering info is present.
*/
static List *
get_explicit_chunk_oids(CollectQualCtx *ctx, Hypertable *ht)
get_explicit_chunk_oids(CollectQualCtx *ctx, PlannerInfo *root, RelOptInfo *rel, Hypertable *ht)
{
List *chunk_oids = NIL;
Const *chunks_arg;
ArrayIterator chunk_id_iterator;
ArrayType *chunk_id_arr;
Datum elem = (Datum) NULL;
bool isnull;
Expr *expr;
bool reverse;
int order_attno;
Chunk **chunks = NULL;
unsigned int num_chunks = 0;
Assert(ctx->chunk_exclusion_func->args->length == 2);
expr = lsecond(ctx->chunk_exclusion_func->args);
@ -925,8 +935,16 @@ get_explicit_chunk_oids(CollectQualCtx *ctx, Hypertable *ht)
/* function marked as STRICT so argument can't be NULL */
Assert(!chunks_arg->constisnull);
chunk_id_iterator = array_create_iterator(DatumGetArrayTypeP(chunks_arg->constvalue), 0, NULL);
chunk_id_arr = DatumGetArrayTypeP(chunks_arg->constvalue);
if (ARR_NDIM(chunk_id_arr) != 1)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("invalid number of array dimensions for chunks_in")));
/* allocate an array of "Chunk *" and set it up below */
chunks = (Chunk **) palloc0(sizeof(Chunk *) *
ArrayGetNItems(ARR_NDIM(chunk_id_arr), ARR_DIMS(chunk_id_arr)));
chunk_id_iterator = array_create_iterator(chunk_id_arr, 0, NULL);
while (array_iterate(chunk_id_iterator, &elem, &isnull))
{
if (!isnull)
@ -945,11 +963,46 @@ get_explicit_chunk_oids(CollectQualCtx *ctx, Hypertable *ht)
NameStr(ht->fd.table_name))));
chunk_oids = lappend_int(chunk_oids, chunk->table_id);
chunks[num_chunks++] = chunk;
}
else
elog(ERROR, "chunk id can't be NULL");
}
array_free_iterator(chunk_id_iterator);
/*
* If fdw_private has not been setup by caller there is no point checking
* for ordered append as we can't pass the required metadata in fdw_private
* to signal that this is safe to transform in ordered append plan in
* set_rel_pathlist.
*/
if (rel->fdw_private != NULL &&
should_order_append(root, rel, ht, ctx->join_conditions, &order_attno, &reverse))
{
TimescaleDBPrivate *priv = ts_get_private_reloptinfo(rel);
List **nested_oids = NULL;
priv->appends_ordered = true;
priv->order_attno = order_attno;
/*
* for space partitioning we need extra information about the
* time slices of the chunks
*/
if (ht->space->num_dimensions > 1)
nested_oids = &priv->nested_oids;
/* we don't need "hri" here since we already have the chunks */
return ts_hypertable_restrict_info_get_chunk_oids_ordered(NULL,
ht,
chunks,
num_chunks,
AccessShareLock,
nested_oids,
reverse);
}
return chunk_oids;
}
@ -1004,6 +1057,8 @@ get_chunk_oids(CollectQualCtx *ctx, PlannerInfo *root, RelOptInfo *rel, Hypertab
return ts_hypertable_restrict_info_get_chunk_oids_ordered(hri,
ht,
NULL,
0,
AccessShareLock,
nested_oids,
reverse);
@ -1011,7 +1066,7 @@ get_chunk_oids(CollectQualCtx *ctx, PlannerInfo *root, RelOptInfo *rel, Hypertab
return find_children_oids(hri, ht, AccessShareLock);
}
else
return get_explicit_chunk_oids(ctx, ht);
return get_explicit_chunk_oids(ctx, root, rel, ht);
}
/*

View File

@ -668,7 +668,91 @@ ORDER BY device, temp;
(9 rows)
-- Test remote explain
-- Make sure that chunks_in function only expects one-dimensional integer arrays
\set ON_ERROR_STOP 0
SELECT "time" FROM public.disttable WHERE _timescaledb_internal.chunks_in(public.disttable.*, ARRAY[[2], [1]])
ORDER BY "time" DESC NULLS FIRST LIMIT 1;
ERROR: invalid number of array dimensions for chunks_in
\set ON_ERROR_STOP 1
SET timescaledb.enable_remote_explain = ON;
-- Check that datanodes use ChunkAppend plans with chunks_in function in the
-- "Remote SQL" when using max(time).
EXPLAIN (VERBOSE, COSTS FALSE)
SELECT max(time)
FROM disttable;
QUERY PLAN
------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
Result
Output: $0
InitPlan 1 (returns $0)
-> Limit
Output: disttable."time"
-> Custom Scan (AsyncAppend)
Output: disttable."time"
-> Merge Append
Sort Key: disttable_1."time" DESC
-> Custom Scan (DataNodeScan) on public.disttable disttable_1
Output: disttable_1."time"
Data node: db_dist_hypertable_1
Chunks: _dist_hyper_1_4_chunk, _dist_hyper_1_1_chunk
Remote SQL: SELECT "time" FROM public.disttable WHERE _timescaledb_internal.chunks_in(public.disttable.*, ARRAY[2, 1]) AND (("time" IS NOT NULL)) ORDER BY "time" DESC NULLS FIRST LIMIT 1
Remote EXPLAIN:
Limit
Output: disttable."time"
-> Custom Scan (ChunkAppend) on public.disttable
Output: disttable."time"
Order: disttable."time" DESC
Startup Exclusion: false
Runtime Exclusion: false
-> Index Only Scan using _dist_hyper_1_4_chunk_disttable_time_idx on _timescaledb_internal._dist_hyper_1_4_chunk
Output: _dist_hyper_1_4_chunk."time"
Index Cond: (_dist_hyper_1_4_chunk."time" IS NOT NULL)
-> Index Only Scan using _dist_hyper_1_1_chunk_disttable_time_idx on _timescaledb_internal._dist_hyper_1_1_chunk
Output: _dist_hyper_1_1_chunk."time"
Index Cond: (_dist_hyper_1_1_chunk."time" IS NOT NULL)
-> Custom Scan (DataNodeScan) on public.disttable disttable_2
Output: disttable_2."time"
Data node: db_dist_hypertable_2
Chunks: _dist_hyper_1_5_chunk, _dist_hyper_1_3_chunk
Remote SQL: SELECT "time" FROM public.disttable WHERE _timescaledb_internal.chunks_in(public.disttable.*, ARRAY[2, 1]) AND (("time" IS NOT NULL)) ORDER BY "time" DESC NULLS FIRST LIMIT 1
Remote EXPLAIN:
Limit
Output: disttable."time"
-> Custom Scan (ChunkAppend) on public.disttable
Output: disttable."time"
Order: disttable."time" DESC
Startup Exclusion: false
Runtime Exclusion: false
-> Index Only Scan using _dist_hyper_1_5_chunk_disttable_time_idx on _timescaledb_internal._dist_hyper_1_5_chunk
Output: _dist_hyper_1_5_chunk."time"
Index Cond: (_dist_hyper_1_5_chunk."time" IS NOT NULL)
-> Index Only Scan using _dist_hyper_1_3_chunk_disttable_time_idx on _timescaledb_internal._dist_hyper_1_3_chunk
Output: _dist_hyper_1_3_chunk."time"
Index Cond: (_dist_hyper_1_3_chunk."time" IS NOT NULL)
-> Custom Scan (DataNodeScan) on public.disttable disttable_3
Output: disttable_3."time"
Data node: db_dist_hypertable_3
Chunks: _dist_hyper_1_6_chunk, _dist_hyper_1_2_chunk
Remote SQL: SELECT "time" FROM public.disttable WHERE _timescaledb_internal.chunks_in(public.disttable.*, ARRAY[2, 1]) AND (("time" IS NOT NULL)) ORDER BY "time" DESC NULLS FIRST LIMIT 1
Remote EXPLAIN:
Limit
Output: disttable."time"
-> Custom Scan (ChunkAppend) on public.disttable
Output: disttable."time"
Order: disttable."time" DESC
Startup Exclusion: false
Runtime Exclusion: false
-> Index Only Scan using _dist_hyper_1_6_chunk_disttable_time_idx on _timescaledb_internal._dist_hyper_1_6_chunk
Output: _dist_hyper_1_6_chunk."time"
Index Cond: (_dist_hyper_1_6_chunk."time" IS NOT NULL)
-> Index Only Scan using _dist_hyper_1_2_chunk_disttable_time_idx on _timescaledb_internal._dist_hyper_1_2_chunk
Output: _dist_hyper_1_2_chunk."time"
Index Cond: (_dist_hyper_1_2_chunk."time" IS NOT NULL)
(69 rows)
EXPLAIN (VERBOSE, COSTS FALSE)
SELECT max(temp)
FROM disttable;
@ -2503,6 +2587,73 @@ INSERT INTO twodim VALUES
INSERT INTO twodim VALUES
('2019-02-10 16:23', 5, 7.1),
('2019-02-10 17:11', 7, 3.2);
-- Check that datanodes use ChunkAppend plans with chunks_in function in the
-- "Remote SQL" when multiple dimensions are involved.
SET timescaledb.enable_remote_explain = ON;
EXPLAIN (VERBOSE, COSTS OFF, TIMING OFF, SUMMARY OFF)
SELECT * FROM twodim
ORDER BY time;
QUERY PLAN
------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
Custom Scan (AsyncAppend)
Output: twodim."time", twodim."Color", twodim.temp
-> Merge Append
Sort Key: twodim_1."time"
-> Custom Scan (DataNodeScan) on public.twodim twodim_1
Output: twodim_1."time", twodim_1."Color", twodim_1.temp
Data node: db_dist_hypertable_1
Chunks: _dist_hyper_7_18_chunk, _dist_hyper_7_22_chunk, _dist_hyper_7_25_chunk
Remote SQL: SELECT "time", "Color", temp FROM public.twodim WHERE _timescaledb_internal.chunks_in(public.twodim.*, ARRAY[10, 12, 14]) ORDER BY "time" ASC NULLS LAST
Remote EXPLAIN:
Custom Scan (ChunkAppend) on public.twodim
Output: twodim."time", twodim."Color", twodim.temp
Order: twodim."time"
Startup Exclusion: false
Runtime Exclusion: false
-> Index Scan Backward using _dist_hyper_7_18_chunk_twodim_time_idx on _timescaledb_internal._dist_hyper_7_18_chunk
Output: _dist_hyper_7_18_chunk."time", _dist_hyper_7_18_chunk."Color", _dist_hyper_7_18_chunk.temp
-> Index Scan Backward using _dist_hyper_7_22_chunk_twodim_time_idx on _timescaledb_internal._dist_hyper_7_22_chunk
Output: _dist_hyper_7_22_chunk."time", _dist_hyper_7_22_chunk."Color", _dist_hyper_7_22_chunk.temp
-> Index Scan Backward using _dist_hyper_7_25_chunk_twodim_time_idx on _timescaledb_internal._dist_hyper_7_25_chunk
Output: _dist_hyper_7_25_chunk."time", _dist_hyper_7_25_chunk."Color", _dist_hyper_7_25_chunk.temp
-> Custom Scan (DataNodeScan) on public.twodim twodim_2
Output: twodim_2."time", twodim_2."Color", twodim_2.temp
Data node: db_dist_hypertable_2
Chunks: _dist_hyper_7_19_chunk, _dist_hyper_7_21_chunk, _dist_hyper_7_24_chunk
Remote SQL: SELECT "time", "Color", temp FROM public.twodim WHERE _timescaledb_internal.chunks_in(public.twodim.*, ARRAY[10, 11, 13]) ORDER BY "time" ASC NULLS LAST
Remote EXPLAIN:
Custom Scan (ChunkAppend) on public.twodim
Output: twodim."time", twodim."Color", twodim.temp
Order: twodim."time"
Startup Exclusion: false
Runtime Exclusion: false
-> Index Scan Backward using _dist_hyper_7_19_chunk_twodim_time_idx on _timescaledb_internal._dist_hyper_7_19_chunk
Output: _dist_hyper_7_19_chunk."time", _dist_hyper_7_19_chunk."Color", _dist_hyper_7_19_chunk.temp
-> Index Scan Backward using _dist_hyper_7_21_chunk_twodim_time_idx on _timescaledb_internal._dist_hyper_7_21_chunk
Output: _dist_hyper_7_21_chunk."time", _dist_hyper_7_21_chunk."Color", _dist_hyper_7_21_chunk.temp
-> Index Scan Backward using _dist_hyper_7_24_chunk_twodim_time_idx on _timescaledb_internal._dist_hyper_7_24_chunk
Output: _dist_hyper_7_24_chunk."time", _dist_hyper_7_24_chunk."Color", _dist_hyper_7_24_chunk.temp
-> Custom Scan (DataNodeScan) on public.twodim twodim_3
Output: twodim_3."time", twodim_3."Color", twodim_3.temp
Data node: db_dist_hypertable_3
Chunks: _dist_hyper_7_20_chunk, _dist_hyper_7_23_chunk
Remote SQL: SELECT "time", "Color", temp FROM public.twodim WHERE _timescaledb_internal.chunks_in(public.twodim.*, ARRAY[10, 12]) ORDER BY "time" ASC NULLS LAST
Remote EXPLAIN:
Custom Scan (ChunkAppend) on public.twodim
Output: twodim."time", twodim."Color", twodim.temp
Order: twodim."time"
Startup Exclusion: false
Runtime Exclusion: false
-> Index Scan Backward using _dist_hyper_7_20_chunk_twodim_time_idx on _timescaledb_internal._dist_hyper_7_20_chunk
Output: _dist_hyper_7_20_chunk."time", _dist_hyper_7_20_chunk."Color", _dist_hyper_7_20_chunk.temp
-> Index Scan Backward using _dist_hyper_7_23_chunk_twodim_time_idx on _timescaledb_internal._dist_hyper_7_23_chunk
Output: _dist_hyper_7_23_chunk."time", _dist_hyper_7_23_chunk."Color", _dist_hyper_7_23_chunk.temp
(56 rows)
SET timescaledb.enable_remote_explain = OFF;
-- Check results
SELECT * FROM twodim
ORDER BY time;
@ -3243,6 +3394,31 @@ SELECT * FROM dist_device;
Remote SQL: SELECT "time", dist_device, temp FROM public.dist_device WHERE _timescaledb_internal.chunks_in(public.dist_device.*, ARRAY[24])
(6 rows)
-- Check that datanodes use ChunkAppend plans with chunks_in function in the
-- "Remote SQL" when only time partitioning is being used.
SET timescaledb.enable_remote_explain = ON;
EXPLAIN (VERBOSE, COSTS OFF, TIMING OFF, SUMMARY OFF)
SELECT "time", dist_device, temp FROM public.dist_device ORDER BY "time" ASC NULLS LAST;
QUERY PLAN
------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
Merge Append
Sort Key: dist_device."time"
-> Custom Scan (DataNodeScan) on public.dist_device
Output: dist_device."time", dist_device.dist_device, dist_device.temp
Data node: db_dist_hypertable_1
Chunks: _dist_hyper_15_37_chunk
Remote SQL: SELECT "time", dist_device, temp FROM public.dist_device WHERE _timescaledb_internal.chunks_in(public.dist_device.*, ARRAY[24]) ORDER BY "time" ASC NULLS LAST
Remote EXPLAIN:
Custom Scan (ChunkAppend) on public.dist_device
Output: dist_device."time", dist_device.dist_device, dist_device.temp
Order: dist_device."time"
Startup Exclusion: false
Runtime Exclusion: false
-> Index Scan Backward using _dist_hyper_15_37_chunk_dist_device_time_idx on _timescaledb_internal._dist_hyper_15_37_chunk
Output: _dist_hyper_15_37_chunk."time", _dist_hyper_15_37_chunk.dist_device, _dist_hyper_15_37_chunk.temp
(16 rows)
SELECT * FROM dist_device;
time | dist_device | temp
------------------------------+-------------+------

View File

@ -668,7 +668,91 @@ ORDER BY device, temp;
(9 rows)
-- Test remote explain
-- Make sure that chunks_in function only expects one-dimensional integer arrays
\set ON_ERROR_STOP 0
SELECT "time" FROM public.disttable WHERE _timescaledb_internal.chunks_in(public.disttable.*, ARRAY[[2], [1]])
ORDER BY "time" DESC NULLS FIRST LIMIT 1;
ERROR: invalid number of array dimensions for chunks_in
\set ON_ERROR_STOP 1
SET timescaledb.enable_remote_explain = ON;
-- Check that datanodes use ChunkAppend plans with chunks_in function in the
-- "Remote SQL" when using max(time).
EXPLAIN (VERBOSE, COSTS FALSE)
SELECT max(time)
FROM disttable;
QUERY PLAN
------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
Result
Output: $0
InitPlan 1 (returns $0)
-> Limit
Output: disttable."time"
-> Custom Scan (AsyncAppend)
Output: disttable."time"
-> Merge Append
Sort Key: disttable_1."time" DESC
-> Custom Scan (DataNodeScan) on public.disttable disttable_1
Output: disttable_1."time"
Data node: db_dist_hypertable_1
Chunks: _dist_hyper_1_4_chunk, _dist_hyper_1_1_chunk
Remote SQL: SELECT "time" FROM public.disttable WHERE _timescaledb_internal.chunks_in(public.disttable.*, ARRAY[2, 1]) AND (("time" IS NOT NULL)) ORDER BY "time" DESC NULLS FIRST LIMIT 1
Remote EXPLAIN:
Limit
Output: disttable."time"
-> Custom Scan (ChunkAppend) on public.disttable
Output: disttable."time"
Order: disttable."time" DESC
Startup Exclusion: false
Runtime Exclusion: false
-> Index Only Scan using _dist_hyper_1_4_chunk_disttable_time_idx on _timescaledb_internal._dist_hyper_1_4_chunk
Output: _dist_hyper_1_4_chunk."time"
Index Cond: (_dist_hyper_1_4_chunk."time" IS NOT NULL)
-> Index Only Scan using _dist_hyper_1_1_chunk_disttable_time_idx on _timescaledb_internal._dist_hyper_1_1_chunk
Output: _dist_hyper_1_1_chunk."time"
Index Cond: (_dist_hyper_1_1_chunk."time" IS NOT NULL)
-> Custom Scan (DataNodeScan) on public.disttable disttable_2
Output: disttable_2."time"
Data node: db_dist_hypertable_2
Chunks: _dist_hyper_1_5_chunk, _dist_hyper_1_3_chunk
Remote SQL: SELECT "time" FROM public.disttable WHERE _timescaledb_internal.chunks_in(public.disttable.*, ARRAY[2, 1]) AND (("time" IS NOT NULL)) ORDER BY "time" DESC NULLS FIRST LIMIT 1
Remote EXPLAIN:
Limit
Output: disttable."time"
-> Custom Scan (ChunkAppend) on public.disttable
Output: disttable."time"
Order: disttable."time" DESC
Startup Exclusion: false
Runtime Exclusion: false
-> Index Only Scan using _dist_hyper_1_5_chunk_disttable_time_idx on _timescaledb_internal._dist_hyper_1_5_chunk
Output: _dist_hyper_1_5_chunk."time"
Index Cond: (_dist_hyper_1_5_chunk."time" IS NOT NULL)
-> Index Only Scan using _dist_hyper_1_3_chunk_disttable_time_idx on _timescaledb_internal._dist_hyper_1_3_chunk
Output: _dist_hyper_1_3_chunk."time"
Index Cond: (_dist_hyper_1_3_chunk."time" IS NOT NULL)
-> Custom Scan (DataNodeScan) on public.disttable disttable_3
Output: disttable_3."time"
Data node: db_dist_hypertable_3
Chunks: _dist_hyper_1_6_chunk, _dist_hyper_1_2_chunk
Remote SQL: SELECT "time" FROM public.disttable WHERE _timescaledb_internal.chunks_in(public.disttable.*, ARRAY[2, 1]) AND (("time" IS NOT NULL)) ORDER BY "time" DESC NULLS FIRST LIMIT 1
Remote EXPLAIN:
Limit
Output: disttable."time"
-> Custom Scan (ChunkAppend) on public.disttable
Output: disttable."time"
Order: disttable."time" DESC
Startup Exclusion: false
Runtime Exclusion: false
-> Index Only Scan using _dist_hyper_1_6_chunk_disttable_time_idx on _timescaledb_internal._dist_hyper_1_6_chunk
Output: _dist_hyper_1_6_chunk."time"
Index Cond: (_dist_hyper_1_6_chunk."time" IS NOT NULL)
-> Index Only Scan using _dist_hyper_1_2_chunk_disttable_time_idx on _timescaledb_internal._dist_hyper_1_2_chunk
Output: _dist_hyper_1_2_chunk."time"
Index Cond: (_dist_hyper_1_2_chunk."time" IS NOT NULL)
(69 rows)
EXPLAIN (VERBOSE, COSTS FALSE)
SELECT max(temp)
FROM disttable;
@ -2484,6 +2568,73 @@ INSERT INTO twodim VALUES
INSERT INTO twodim VALUES
('2019-02-10 16:23', 5, 7.1),
('2019-02-10 17:11', 7, 3.2);
-- Check that datanodes use ChunkAppend plans with chunks_in function in the
-- "Remote SQL" when multiple dimensions are involved.
SET timescaledb.enable_remote_explain = ON;
EXPLAIN (VERBOSE, COSTS OFF, TIMING OFF, SUMMARY OFF)
SELECT * FROM twodim
ORDER BY time;
QUERY PLAN
------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
Custom Scan (AsyncAppend)
Output: twodim."time", twodim."Color", twodim.temp
-> Merge Append
Sort Key: twodim_1."time"
-> Custom Scan (DataNodeScan) on public.twodim twodim_1
Output: twodim_1."time", twodim_1."Color", twodim_1.temp
Data node: db_dist_hypertable_1
Chunks: _dist_hyper_7_18_chunk, _dist_hyper_7_22_chunk, _dist_hyper_7_25_chunk
Remote SQL: SELECT "time", "Color", temp FROM public.twodim WHERE _timescaledb_internal.chunks_in(public.twodim.*, ARRAY[10, 12, 14]) ORDER BY "time" ASC NULLS LAST
Remote EXPLAIN:
Custom Scan (ChunkAppend) on public.twodim
Output: twodim."time", twodim."Color", twodim.temp
Order: twodim."time"
Startup Exclusion: false
Runtime Exclusion: false
-> Index Scan Backward using _dist_hyper_7_18_chunk_twodim_time_idx on _timescaledb_internal._dist_hyper_7_18_chunk
Output: _dist_hyper_7_18_chunk."time", _dist_hyper_7_18_chunk."Color", _dist_hyper_7_18_chunk.temp
-> Index Scan Backward using _dist_hyper_7_22_chunk_twodim_time_idx on _timescaledb_internal._dist_hyper_7_22_chunk
Output: _dist_hyper_7_22_chunk."time", _dist_hyper_7_22_chunk."Color", _dist_hyper_7_22_chunk.temp
-> Index Scan Backward using _dist_hyper_7_25_chunk_twodim_time_idx on _timescaledb_internal._dist_hyper_7_25_chunk
Output: _dist_hyper_7_25_chunk."time", _dist_hyper_7_25_chunk."Color", _dist_hyper_7_25_chunk.temp
-> Custom Scan (DataNodeScan) on public.twodim twodim_2
Output: twodim_2."time", twodim_2."Color", twodim_2.temp
Data node: db_dist_hypertable_2
Chunks: _dist_hyper_7_19_chunk, _dist_hyper_7_21_chunk, _dist_hyper_7_24_chunk
Remote SQL: SELECT "time", "Color", temp FROM public.twodim WHERE _timescaledb_internal.chunks_in(public.twodim.*, ARRAY[10, 11, 13]) ORDER BY "time" ASC NULLS LAST
Remote EXPLAIN:
Custom Scan (ChunkAppend) on public.twodim
Output: twodim."time", twodim."Color", twodim.temp
Order: twodim."time"
Startup Exclusion: false
Runtime Exclusion: false
-> Index Scan Backward using _dist_hyper_7_19_chunk_twodim_time_idx on _timescaledb_internal._dist_hyper_7_19_chunk
Output: _dist_hyper_7_19_chunk."time", _dist_hyper_7_19_chunk."Color", _dist_hyper_7_19_chunk.temp
-> Index Scan Backward using _dist_hyper_7_21_chunk_twodim_time_idx on _timescaledb_internal._dist_hyper_7_21_chunk
Output: _dist_hyper_7_21_chunk."time", _dist_hyper_7_21_chunk."Color", _dist_hyper_7_21_chunk.temp
-> Index Scan Backward using _dist_hyper_7_24_chunk_twodim_time_idx on _timescaledb_internal._dist_hyper_7_24_chunk
Output: _dist_hyper_7_24_chunk."time", _dist_hyper_7_24_chunk."Color", _dist_hyper_7_24_chunk.temp
-> Custom Scan (DataNodeScan) on public.twodim twodim_3
Output: twodim_3."time", twodim_3."Color", twodim_3.temp
Data node: db_dist_hypertable_3
Chunks: _dist_hyper_7_20_chunk, _dist_hyper_7_23_chunk
Remote SQL: SELECT "time", "Color", temp FROM public.twodim WHERE _timescaledb_internal.chunks_in(public.twodim.*, ARRAY[10, 12]) ORDER BY "time" ASC NULLS LAST
Remote EXPLAIN:
Custom Scan (ChunkAppend) on public.twodim
Output: twodim."time", twodim."Color", twodim.temp
Order: twodim."time"
Startup Exclusion: false
Runtime Exclusion: false
-> Index Scan Backward using _dist_hyper_7_20_chunk_twodim_time_idx on _timescaledb_internal._dist_hyper_7_20_chunk
Output: _dist_hyper_7_20_chunk."time", _dist_hyper_7_20_chunk."Color", _dist_hyper_7_20_chunk.temp
-> Index Scan Backward using _dist_hyper_7_23_chunk_twodim_time_idx on _timescaledb_internal._dist_hyper_7_23_chunk
Output: _dist_hyper_7_23_chunk."time", _dist_hyper_7_23_chunk."Color", _dist_hyper_7_23_chunk.temp
(56 rows)
SET timescaledb.enable_remote_explain = OFF;
-- Check results
SELECT * FROM twodim
ORDER BY time;
@ -3223,6 +3374,29 @@ SELECT * FROM dist_device;
Remote SQL: SELECT "time", dist_device, temp FROM public.dist_device WHERE _timescaledb_internal.chunks_in(public.dist_device.*, ARRAY[24])
(5 rows)
-- Check that datanodes use ChunkAppend plans with chunks_in function in the
-- "Remote SQL" when only time partitioning is being used.
SET timescaledb.enable_remote_explain = ON;
EXPLAIN (VERBOSE, COSTS OFF, TIMING OFF, SUMMARY OFF)
SELECT "time", dist_device, temp FROM public.dist_device ORDER BY "time" ASC NULLS LAST;
QUERY PLAN
------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
Custom Scan (DataNodeScan) on public.dist_device
Output: dist_device."time", dist_device.dist_device, dist_device.temp
Data node: db_dist_hypertable_1
Chunks: _dist_hyper_15_37_chunk
Remote SQL: SELECT "time", dist_device, temp FROM public.dist_device WHERE _timescaledb_internal.chunks_in(public.dist_device.*, ARRAY[24]) ORDER BY "time" ASC NULLS LAST
Remote EXPLAIN:
Custom Scan (ChunkAppend) on public.dist_device
Output: dist_device."time", dist_device.dist_device, dist_device.temp
Order: dist_device."time"
Startup Exclusion: false
Runtime Exclusion: false
-> Index Scan Backward using _dist_hyper_15_37_chunk_dist_device_time_idx on _timescaledb_internal._dist_hyper_15_37_chunk
Output: _dist_hyper_15_37_chunk."time", _dist_hyper_15_37_chunk.dist_device, _dist_hyper_15_37_chunk.temp
(14 rows)
SELECT * FROM dist_device;
time | dist_device | temp
------------------------------+-------------+------

View File

@ -667,7 +667,91 @@ ORDER BY device, temp;
(9 rows)
-- Test remote explain
-- Make sure that chunks_in function only expects one-dimensional integer arrays
\set ON_ERROR_STOP 0
SELECT "time" FROM public.disttable WHERE _timescaledb_internal.chunks_in(public.disttable.*, ARRAY[[2], [1]])
ORDER BY "time" DESC NULLS FIRST LIMIT 1;
ERROR: invalid number of array dimensions for chunks_in
\set ON_ERROR_STOP 1
SET timescaledb.enable_remote_explain = ON;
-- Check that datanodes use ChunkAppend plans with chunks_in function in the
-- "Remote SQL" when using max(time).
EXPLAIN (VERBOSE, COSTS FALSE)
SELECT max(time)
FROM disttable;
QUERY PLAN
------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
Result
Output: $0
InitPlan 1 (returns $0)
-> Limit
Output: disttable."time"
-> Custom Scan (AsyncAppend)
Output: disttable."time"
-> Merge Append
Sort Key: disttable_1."time" DESC
-> Custom Scan (DataNodeScan) on public.disttable disttable_1
Output: disttable_1."time"
Data node: db_dist_hypertable_1
Chunks: _dist_hyper_1_4_chunk, _dist_hyper_1_1_chunk
Remote SQL: SELECT "time" FROM public.disttable WHERE _timescaledb_internal.chunks_in(public.disttable.*, ARRAY[2, 1]) AND (("time" IS NOT NULL)) ORDER BY "time" DESC NULLS FIRST LIMIT 1
Remote EXPLAIN:
Limit
Output: disttable."time"
-> Custom Scan (ChunkAppend) on public.disttable
Output: disttable."time"
Order: disttable."time" DESC
Startup Exclusion: false
Runtime Exclusion: false
-> Index Only Scan using _dist_hyper_1_4_chunk_disttable_time_idx on _timescaledb_internal._dist_hyper_1_4_chunk
Output: _dist_hyper_1_4_chunk."time"
Index Cond: (_dist_hyper_1_4_chunk."time" IS NOT NULL)
-> Index Only Scan using _dist_hyper_1_1_chunk_disttable_time_idx on _timescaledb_internal._dist_hyper_1_1_chunk
Output: _dist_hyper_1_1_chunk."time"
Index Cond: (_dist_hyper_1_1_chunk."time" IS NOT NULL)
-> Custom Scan (DataNodeScan) on public.disttable disttable_2
Output: disttable_2."time"
Data node: db_dist_hypertable_2
Chunks: _dist_hyper_1_5_chunk, _dist_hyper_1_3_chunk
Remote SQL: SELECT "time" FROM public.disttable WHERE _timescaledb_internal.chunks_in(public.disttable.*, ARRAY[2, 1]) AND (("time" IS NOT NULL)) ORDER BY "time" DESC NULLS FIRST LIMIT 1
Remote EXPLAIN:
Limit
Output: disttable."time"
-> Custom Scan (ChunkAppend) on public.disttable
Output: disttable."time"
Order: disttable."time" DESC
Startup Exclusion: false
Runtime Exclusion: false
-> Index Only Scan using _dist_hyper_1_5_chunk_disttable_time_idx on _timescaledb_internal._dist_hyper_1_5_chunk
Output: _dist_hyper_1_5_chunk."time"
Index Cond: (_dist_hyper_1_5_chunk."time" IS NOT NULL)
-> Index Only Scan using _dist_hyper_1_3_chunk_disttable_time_idx on _timescaledb_internal._dist_hyper_1_3_chunk
Output: _dist_hyper_1_3_chunk."time"
Index Cond: (_dist_hyper_1_3_chunk."time" IS NOT NULL)
-> Custom Scan (DataNodeScan) on public.disttable disttable_3
Output: disttable_3."time"
Data node: db_dist_hypertable_3
Chunks: _dist_hyper_1_6_chunk, _dist_hyper_1_2_chunk
Remote SQL: SELECT "time" FROM public.disttable WHERE _timescaledb_internal.chunks_in(public.disttable.*, ARRAY[2, 1]) AND (("time" IS NOT NULL)) ORDER BY "time" DESC NULLS FIRST LIMIT 1
Remote EXPLAIN:
Limit
Output: disttable."time"
-> Custom Scan (ChunkAppend) on public.disttable
Output: disttable."time"
Order: disttable."time" DESC
Startup Exclusion: false
Runtime Exclusion: false
-> Index Only Scan using _dist_hyper_1_6_chunk_disttable_time_idx on _timescaledb_internal._dist_hyper_1_6_chunk
Output: _dist_hyper_1_6_chunk."time"
Index Cond: (_dist_hyper_1_6_chunk."time" IS NOT NULL)
-> Index Only Scan using _dist_hyper_1_2_chunk_disttable_time_idx on _timescaledb_internal._dist_hyper_1_2_chunk
Output: _dist_hyper_1_2_chunk."time"
Index Cond: (_dist_hyper_1_2_chunk."time" IS NOT NULL)
(69 rows)
EXPLAIN (VERBOSE, COSTS FALSE)
SELECT max(temp)
FROM disttable;
@ -2483,6 +2567,73 @@ INSERT INTO twodim VALUES
INSERT INTO twodim VALUES
('2019-02-10 16:23', 5, 7.1),
('2019-02-10 17:11', 7, 3.2);
-- Check that datanodes use ChunkAppend plans with chunks_in function in the
-- "Remote SQL" when multiple dimensions are involved.
SET timescaledb.enable_remote_explain = ON;
EXPLAIN (VERBOSE, COSTS OFF, TIMING OFF, SUMMARY OFF)
SELECT * FROM twodim
ORDER BY time;
QUERY PLAN
------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
Custom Scan (AsyncAppend)
Output: twodim."time", twodim."Color", twodim.temp
-> Merge Append
Sort Key: twodim_1."time"
-> Custom Scan (DataNodeScan) on public.twodim twodim_1
Output: twodim_1."time", twodim_1."Color", twodim_1.temp
Data node: db_dist_hypertable_1
Chunks: _dist_hyper_7_18_chunk, _dist_hyper_7_22_chunk, _dist_hyper_7_25_chunk
Remote SQL: SELECT "time", "Color", temp FROM public.twodim WHERE _timescaledb_internal.chunks_in(public.twodim.*, ARRAY[10, 12, 14]) ORDER BY "time" ASC NULLS LAST
Remote EXPLAIN:
Custom Scan (ChunkAppend) on public.twodim
Output: twodim."time", twodim."Color", twodim.temp
Order: twodim."time"
Startup Exclusion: false
Runtime Exclusion: false
-> Index Scan Backward using _dist_hyper_7_18_chunk_twodim_time_idx on _timescaledb_internal._dist_hyper_7_18_chunk
Output: _dist_hyper_7_18_chunk."time", _dist_hyper_7_18_chunk."Color", _dist_hyper_7_18_chunk.temp
-> Index Scan Backward using _dist_hyper_7_22_chunk_twodim_time_idx on _timescaledb_internal._dist_hyper_7_22_chunk
Output: _dist_hyper_7_22_chunk."time", _dist_hyper_7_22_chunk."Color", _dist_hyper_7_22_chunk.temp
-> Index Scan Backward using _dist_hyper_7_25_chunk_twodim_time_idx on _timescaledb_internal._dist_hyper_7_25_chunk
Output: _dist_hyper_7_25_chunk."time", _dist_hyper_7_25_chunk."Color", _dist_hyper_7_25_chunk.temp
-> Custom Scan (DataNodeScan) on public.twodim twodim_2
Output: twodim_2."time", twodim_2."Color", twodim_2.temp
Data node: db_dist_hypertable_2
Chunks: _dist_hyper_7_19_chunk, _dist_hyper_7_21_chunk, _dist_hyper_7_24_chunk
Remote SQL: SELECT "time", "Color", temp FROM public.twodim WHERE _timescaledb_internal.chunks_in(public.twodim.*, ARRAY[10, 11, 13]) ORDER BY "time" ASC NULLS LAST
Remote EXPLAIN:
Custom Scan (ChunkAppend) on public.twodim
Output: twodim."time", twodim."Color", twodim.temp
Order: twodim."time"
Startup Exclusion: false
Runtime Exclusion: false
-> Index Scan Backward using _dist_hyper_7_19_chunk_twodim_time_idx on _timescaledb_internal._dist_hyper_7_19_chunk
Output: _dist_hyper_7_19_chunk."time", _dist_hyper_7_19_chunk."Color", _dist_hyper_7_19_chunk.temp
-> Index Scan Backward using _dist_hyper_7_21_chunk_twodim_time_idx on _timescaledb_internal._dist_hyper_7_21_chunk
Output: _dist_hyper_7_21_chunk."time", _dist_hyper_7_21_chunk."Color", _dist_hyper_7_21_chunk.temp
-> Index Scan Backward using _dist_hyper_7_24_chunk_twodim_time_idx on _timescaledb_internal._dist_hyper_7_24_chunk
Output: _dist_hyper_7_24_chunk."time", _dist_hyper_7_24_chunk."Color", _dist_hyper_7_24_chunk.temp
-> Custom Scan (DataNodeScan) on public.twodim twodim_3
Output: twodim_3."time", twodim_3."Color", twodim_3.temp
Data node: db_dist_hypertable_3
Chunks: _dist_hyper_7_20_chunk, _dist_hyper_7_23_chunk
Remote SQL: SELECT "time", "Color", temp FROM public.twodim WHERE _timescaledb_internal.chunks_in(public.twodim.*, ARRAY[10, 12]) ORDER BY "time" ASC NULLS LAST
Remote EXPLAIN:
Custom Scan (ChunkAppend) on public.twodim
Output: twodim."time", twodim."Color", twodim.temp
Order: twodim."time"
Startup Exclusion: false
Runtime Exclusion: false
-> Index Scan Backward using _dist_hyper_7_20_chunk_twodim_time_idx on _timescaledb_internal._dist_hyper_7_20_chunk
Output: _dist_hyper_7_20_chunk."time", _dist_hyper_7_20_chunk."Color", _dist_hyper_7_20_chunk.temp
-> Index Scan Backward using _dist_hyper_7_23_chunk_twodim_time_idx on _timescaledb_internal._dist_hyper_7_23_chunk
Output: _dist_hyper_7_23_chunk."time", _dist_hyper_7_23_chunk."Color", _dist_hyper_7_23_chunk.temp
(56 rows)
SET timescaledb.enable_remote_explain = OFF;
-- Check results
SELECT * FROM twodim
ORDER BY time;
@ -3222,6 +3373,29 @@ SELECT * FROM dist_device;
Remote SQL: SELECT "time", dist_device, temp FROM public.dist_device WHERE _timescaledb_internal.chunks_in(public.dist_device.*, ARRAY[24])
(5 rows)
-- Check that datanodes use ChunkAppend plans with chunks_in function in the
-- "Remote SQL" when only time partitioning is being used.
SET timescaledb.enable_remote_explain = ON;
EXPLAIN (VERBOSE, COSTS OFF, TIMING OFF, SUMMARY OFF)
SELECT "time", dist_device, temp FROM public.dist_device ORDER BY "time" ASC NULLS LAST;
QUERY PLAN
------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
Custom Scan (DataNodeScan) on public.dist_device
Output: dist_device."time", dist_device.dist_device, dist_device.temp
Data node: db_dist_hypertable_1
Chunks: _dist_hyper_15_37_chunk
Remote SQL: SELECT "time", dist_device, temp FROM public.dist_device WHERE _timescaledb_internal.chunks_in(public.dist_device.*, ARRAY[24]) ORDER BY "time" ASC NULLS LAST
Remote EXPLAIN:
Custom Scan (ChunkAppend) on public.dist_device
Output: dist_device."time", dist_device.dist_device, dist_device.temp
Order: dist_device."time"
Startup Exclusion: false
Runtime Exclusion: false
-> Index Scan Backward using _dist_hyper_15_37_chunk_dist_device_time_idx on _timescaledb_internal._dist_hyper_15_37_chunk
Output: _dist_hyper_15_37_chunk."time", _dist_hyper_15_37_chunk.dist_device, _dist_hyper_15_37_chunk.temp
(14 rows)
SELECT * FROM dist_device;
time | dist_device | temp
------------------------------+-------------+------

View File

@ -263,8 +263,20 @@ ORDER BY device, temp;
-- Test remote explain
-- Make sure that chunks_in function only expects one-dimensional integer arrays
\set ON_ERROR_STOP 0
SELECT "time" FROM public.disttable WHERE _timescaledb_internal.chunks_in(public.disttable.*, ARRAY[[2], [1]])
ORDER BY "time" DESC NULLS FIRST LIMIT 1;
\set ON_ERROR_STOP 1
SET timescaledb.enable_remote_explain = ON;
-- Check that datanodes use ChunkAppend plans with chunks_in function in the
-- "Remote SQL" when using max(time).
EXPLAIN (VERBOSE, COSTS FALSE)
SELECT max(time)
FROM disttable;
EXPLAIN (VERBOSE, COSTS FALSE)
SELECT max(temp)
FROM disttable;
@ -765,6 +777,14 @@ INSERT INTO twodim VALUES
('2019-02-10 16:23', 5, 7.1),
('2019-02-10 17:11', 7, 3.2);
-- Check that datanodes use ChunkAppend plans with chunks_in function in the
-- "Remote SQL" when multiple dimensions are involved.
SET timescaledb.enable_remote_explain = ON;
EXPLAIN (VERBOSE, COSTS OFF, TIMING OFF, SUMMARY OFF)
SELECT * FROM twodim
ORDER BY time;
SET timescaledb.enable_remote_explain = OFF;
-- Check results
SELECT * FROM twodim
ORDER BY time;
@ -994,7 +1014,14 @@ INSERT INTO dist_device VALUES
EXPLAIN (VERBOSE, COSTS OFF)
SELECT * FROM dist_device;
-- Check that datanodes use ChunkAppend plans with chunks_in function in the
-- "Remote SQL" when only time partitioning is being used.
SET timescaledb.enable_remote_explain = ON;
EXPLAIN (VERBOSE, COSTS OFF, TIMING OFF, SUMMARY OFF)
SELECT "time", dist_device, temp FROM public.dist_device ORDER BY "time" ASC NULLS LAST;
SELECT * FROM dist_device;
-- Test estimating relation size without stats
CREATE TABLE hyper_estimate(time timestamptz, device int, temp float);
SELECT * FROM create_distributed_hypertable('hyper_estimate', 'time', 'device', number_partitions => 3, replication_factor => 1, chunk_time_interval => INTERVAL '7 days');