mirror of
https://github.com/timescale/timescaledb.git
synced 2025-05-17 19:13:16 +08:00
Add parallel support for partialize_agg()
Make `partialize_agg()` support parallel query execution. To make this work, the finalize node need combine the individual partials from each parallel worker, but the final step that turns the resulting partial into the finished aggregate should not happen. Thus, in the case of distributed hypertables, each data node can run a parallel query to compute a partial, and the access node can later combine and finalize these partials into the final aggregate. Esssentially, there will be one combine step (minus final) on each data node, and then another one plus final on the access node. To implement this, the finalize aggregate plan is simply modified to elide the final step, and to reserialize the partial. It is only possible to do this at the plan stage; if done at the path stage, the PostgreSQL planner will hit assertions that assume that the node has certain values (e.g., it doesn't expect combine Paths to skip the final step).
This commit is contained in:
parent
c13ed17fbc
commit
c76a0cff68
@ -9,6 +9,7 @@ accidentally triggering the load of a previous DB version.**
|
|||||||
**Features**
|
**Features**
|
||||||
* #5212 Allow pushdown of reference table joins
|
* #5212 Allow pushdown of reference table joins
|
||||||
* #5312 Add timeout support to the ping_data_node()
|
* #5312 Add timeout support to the ping_data_node()
|
||||||
|
* #5361 Add parallel support for partialize_agg()
|
||||||
|
|
||||||
**Bugfixes**
|
**Bugfixes**
|
||||||
|
|
||||||
|
@ -3,7 +3,7 @@
|
|||||||
-- LICENSE-APACHE for a copy of the license.
|
-- LICENSE-APACHE for a copy of the license.
|
||||||
|
|
||||||
CREATE OR REPLACE FUNCTION _timescaledb_internal.partialize_agg(arg ANYELEMENT)
|
CREATE OR REPLACE FUNCTION _timescaledb_internal.partialize_agg(arg ANYELEMENT)
|
||||||
RETURNS BYTEA AS '@MODULE_PATHNAME@', 'ts_partialize_agg' LANGUAGE C VOLATILE;
|
RETURNS BYTEA AS '@MODULE_PATHNAME@', 'ts_partialize_agg' LANGUAGE C STABLE PARALLEL SAFE;
|
||||||
|
|
||||||
CREATE OR REPLACE FUNCTION _timescaledb_internal.finalize_agg_sfunc(
|
CREATE OR REPLACE FUNCTION _timescaledb_internal.finalize_agg_sfunc(
|
||||||
tstate internal, aggfn TEXT, inner_agg_collation_schema NAME, inner_agg_collation_name NAME, inner_agg_input_types NAME[][], inner_agg_serialized_state BYTEA, return_type_dummy_val ANYELEMENT)
|
tstate internal, aggfn TEXT, inner_agg_collation_schema NAME, inner_agg_collation_name NAME, inner_agg_input_types NAME[][], inner_agg_serialized_state BYTEA, return_type_dummy_val ANYELEMENT)
|
||||||
|
@ -1416,7 +1416,7 @@ ts_plan_expand_hypertable_chunks(Hypertable *ht, PlannerInfo *root, RelOptInfo *
|
|||||||
* children as part of a partitioned relation. This will enable
|
* children as part of a partitioned relation. This will enable
|
||||||
* partitionwise aggregation. */
|
* partitionwise aggregation. */
|
||||||
if ((enable_partitionwise_aggregate &&
|
if ((enable_partitionwise_aggregate &&
|
||||||
!has_partialize_function(root->parse, TS_DO_NOT_FIX_AGGREF)) ||
|
!has_partialize_function((Node *) root->parse->targetList, TS_DO_NOT_FIX_AGGSPLIT)) ||
|
||||||
hypertable_is_distributed(ht))
|
hypertable_is_distributed(ht))
|
||||||
{
|
{
|
||||||
build_hypertable_partition_info(ht, root, rel, list_length(inh_oids));
|
build_hypertable_partition_info(ht, root, rel, list_length(inh_oids));
|
||||||
|
@ -64,19 +64,25 @@ check_for_partialize_function_call(Node *node, PartializeWalkerState *state)
|
|||||||
if (state->looking_for_agg)
|
if (state->looking_for_agg)
|
||||||
{
|
{
|
||||||
state->looking_for_agg = false;
|
state->looking_for_agg = false;
|
||||||
if (state->fix_aggref == TS_FIX_AGGREF)
|
|
||||||
{
|
|
||||||
aggref->aggsplit = AGGSPLIT_INITIAL_SERIAL;
|
|
||||||
|
|
||||||
if (aggref->aggtranstype == INTERNALOID &&
|
if (state->fix_aggref != TS_DO_NOT_FIX_AGGSPLIT)
|
||||||
DO_AGGSPLIT_SERIALIZE(AGGSPLIT_INITIAL_SERIAL))
|
{
|
||||||
|
if (state->fix_aggref == TS_FIX_AGGSPLIT_SIMPLE &&
|
||||||
|
aggref->aggsplit == AGGSPLIT_SIMPLE)
|
||||||
{
|
{
|
||||||
|
aggref->aggsplit = AGGSPLIT_INITIAL_SERIAL;
|
||||||
|
}
|
||||||
|
else if (state->fix_aggref == TS_FIX_AGGSPLIT_FINAL &&
|
||||||
|
aggref->aggsplit == AGGSPLIT_FINAL_DESERIAL)
|
||||||
|
{
|
||||||
|
aggref->aggsplit = AGGSPLITOP_COMBINE | AGGSPLITOP_DESERIALIZE |
|
||||||
|
AGGSPLITOP_SERIALIZE | AGGSPLITOP_SKIPFINAL;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (aggref->aggtranstype == INTERNALOID)
|
||||||
aggref->aggtype = BYTEAOID;
|
aggref->aggtype = BYTEAOID;
|
||||||
}
|
|
||||||
else
|
else
|
||||||
{
|
|
||||||
aggref->aggtype = aggref->aggtranstype;
|
aggref->aggtype = aggref->aggtranstype;
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -99,7 +105,7 @@ check_for_partialize_function_call(Node *node, PartializeWalkerState *state)
|
|||||||
}
|
}
|
||||||
|
|
||||||
bool
|
bool
|
||||||
has_partialize_function(Query *parse, PartializeAggFixAggref fix_aggref)
|
has_partialize_function(Node *node, PartializeAggFixAggref fix_aggref)
|
||||||
{
|
{
|
||||||
Oid partialfnoid = InvalidOid;
|
Oid partialfnoid = InvalidOid;
|
||||||
Oid argtyp[] = { ANYELEMENTOID };
|
Oid argtyp[] = { ANYELEMENTOID };
|
||||||
@ -114,7 +120,7 @@ has_partialize_function(Query *parse, PartializeAggFixAggref fix_aggref)
|
|||||||
partialfnoid = LookupFuncName(name, lengthof(argtyp), argtyp, false);
|
partialfnoid = LookupFuncName(name, lengthof(argtyp), argtyp, false);
|
||||||
Assert(OidIsValid(partialfnoid));
|
Assert(OidIsValid(partialfnoid));
|
||||||
state.fnoid = partialfnoid;
|
state.fnoid = partialfnoid;
|
||||||
check_for_partialize_function_call((Node *) parse->targetList, &state);
|
check_for_partialize_function_call(node, &state);
|
||||||
|
|
||||||
if (state.found_partialize && state.found_non_partial_agg)
|
if (state.found_partialize && state.found_non_partial_agg)
|
||||||
elog(ERROR, "cannot mix partialized and non-partialized aggregates in the same statement");
|
elog(ERROR, "cannot mix partialized and non-partialized aggregates in the same statement");
|
||||||
@ -124,19 +130,59 @@ has_partialize_function(Query *parse, PartializeAggFixAggref fix_aggref)
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
* Modify all AggPaths in relation to use partial aggregation.
|
* Modify all AggPaths in relation to use partial aggregation.
|
||||||
|
*
|
||||||
|
* Note that there can be both parallel (split) paths and non-parallel
|
||||||
|
* (non-split) paths suggested at this stage, but all of them refer to the
|
||||||
|
* same Aggrefs. Depending on the Path picked, the Aggrefs are "fixed up" by
|
||||||
|
* the PostgreSQL planner at a later stage in planner (in setrefs.c) to match
|
||||||
|
* the choice of Path. For this reason, it is not possible to modify Aggrefs
|
||||||
|
* at this stage AND keep both type of Paths. Therefore, if a split Path is
|
||||||
|
* found, then prune the non-split path.
|
||||||
*/
|
*/
|
||||||
static void
|
static bool
|
||||||
partialize_agg_paths(RelOptInfo *rel)
|
partialize_agg_paths(RelOptInfo *rel)
|
||||||
{
|
{
|
||||||
ListCell *lc;
|
ListCell *lc;
|
||||||
|
bool has_combine = false;
|
||||||
|
List *aggsplit_simple_paths = NIL;
|
||||||
|
List *aggsplit_final_paths = NIL;
|
||||||
|
List *other_paths = NIL;
|
||||||
|
|
||||||
foreach (lc, rel->pathlist)
|
foreach (lc, rel->pathlist)
|
||||||
{
|
{
|
||||||
Path *path = lfirst(lc);
|
Path *path = lfirst(lc);
|
||||||
|
|
||||||
if (IsA(path, AggPath))
|
if (IsA(path, AggPath))
|
||||||
castNode(AggPath, path)->aggsplit = AGGSPLIT_INITIAL_SERIAL;
|
{
|
||||||
|
AggPath *agg = castNode(AggPath, path);
|
||||||
|
|
||||||
|
if (agg->aggsplit == AGGSPLIT_SIMPLE)
|
||||||
|
{
|
||||||
|
agg->aggsplit = AGGSPLIT_INITIAL_SERIAL;
|
||||||
|
aggsplit_simple_paths = lappend(aggsplit_simple_paths, path);
|
||||||
|
}
|
||||||
|
else if (agg->aggsplit == AGGSPLIT_FINAL_DESERIAL)
|
||||||
|
{
|
||||||
|
has_combine = true;
|
||||||
|
aggsplit_final_paths = lappend(aggsplit_final_paths, path);
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
other_paths = lappend(other_paths, path);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
other_paths = lappend(other_paths, path);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (aggsplit_final_paths != NIL)
|
||||||
|
rel->pathlist = list_concat(other_paths, aggsplit_final_paths);
|
||||||
|
else
|
||||||
|
rel->pathlist = list_concat(other_paths, aggsplit_simple_paths);
|
||||||
|
|
||||||
|
return has_combine;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -182,14 +228,30 @@ bool
|
|||||||
ts_plan_process_partialize_agg(PlannerInfo *root, RelOptInfo *output_rel)
|
ts_plan_process_partialize_agg(PlannerInfo *root, RelOptInfo *output_rel)
|
||||||
{
|
{
|
||||||
Query *parse = root->parse;
|
Query *parse = root->parse;
|
||||||
|
bool found_partialize_agg_func;
|
||||||
|
|
||||||
Assert(IS_UPPER_REL(output_rel));
|
Assert(IS_UPPER_REL(output_rel));
|
||||||
|
|
||||||
if (CMD_SELECT != parse->commandType || !parse->hasAggs)
|
if (CMD_SELECT != parse->commandType || !parse->hasAggs)
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
if (!has_partialize_function(parse, TS_FIX_AGGREF))
|
found_partialize_agg_func =
|
||||||
|
has_partialize_function((Node *) parse->targetList, TS_DO_NOT_FIX_AGGSPLIT);
|
||||||
|
|
||||||
|
if (!found_partialize_agg_func)
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
|
/* partialize_agg() function found. Now turn simple (non-partial) aggs
|
||||||
|
* (AGGSPLIT_SIMPLE) into partials. If the Agg is a combine/final we want
|
||||||
|
* to do the combine but not the final step. However, it is not possible
|
||||||
|
* to change that here at the Path stage because the PostgreSQL planner
|
||||||
|
* will hit an assertion, so we defer that to the plan stage in planner.c.
|
||||||
|
*/
|
||||||
|
bool is_combine = partialize_agg_paths(output_rel);
|
||||||
|
|
||||||
|
if (!is_combine)
|
||||||
|
has_partialize_function((Node *) parse->targetList, TS_FIX_AGGSPLIT_SIMPLE);
|
||||||
|
|
||||||
/* We cannot check root->hasHavingqual here because sometimes the
|
/* We cannot check root->hasHavingqual here because sometimes the
|
||||||
* planner can replace the HAVING clause with a simple filter. But
|
* planner can replace the HAVING clause with a simple filter. But
|
||||||
* root->hashavingqual stays true to remember that the query had a
|
* root->hashavingqual stays true to remember that the query had a
|
||||||
@ -201,6 +263,5 @@ ts_plan_process_partialize_agg(PlannerInfo *root, RelOptInfo *output_rel)
|
|||||||
errhint("Any aggregates in a HAVING clause need to be partialized in the output "
|
errhint("Any aggregates in a HAVING clause need to be partialized in the output "
|
||||||
"target list.")));
|
"target list.")));
|
||||||
|
|
||||||
partialize_agg_paths(output_rel);
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
@ -597,6 +597,25 @@ timescaledb_planner(Query *parse, int cursor_opts, ParamListInfo bound_params)
|
|||||||
if (subplan)
|
if (subplan)
|
||||||
ts_hypertable_modify_fixup_tlist(subplan);
|
ts_hypertable_modify_fixup_tlist(subplan);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (IsA(stmt->planTree, Agg))
|
||||||
|
{
|
||||||
|
Agg *agg = castNode(Agg, stmt->planTree);
|
||||||
|
|
||||||
|
/* If top-level plan is the finalize step of a partial
|
||||||
|
* aggregation, and it is wrapped in the partialize_agg()
|
||||||
|
* function, we want to do the combine step but skip
|
||||||
|
* finalization (e.g., for avg(), add up individual
|
||||||
|
* sum+counts, but don't compute the final average). */
|
||||||
|
if (agg->aggsplit == AGGSPLIT_FINAL_DESERIAL &&
|
||||||
|
has_partialize_function((Node *) agg->plan.targetlist, TS_FIX_AGGSPLIT_FINAL))
|
||||||
|
{
|
||||||
|
/* Deserialize input -> combine -> skip the final step ->
|
||||||
|
* serialize again */
|
||||||
|
agg->aggsplit = AGGSPLITOP_COMBINE | AGGSPLITOP_DESERIALIZE |
|
||||||
|
AGGSPLITOP_SERIALIZE | AGGSPLITOP_SKIPFINAL;
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (reset_baserel_info)
|
if (reset_baserel_info)
|
||||||
|
@ -90,12 +90,13 @@ typedef enum TsRelType
|
|||||||
|
|
||||||
typedef enum PartializeAggFixAggref
|
typedef enum PartializeAggFixAggref
|
||||||
{
|
{
|
||||||
TS_DO_NOT_FIX_AGGREF = 0,
|
TS_DO_NOT_FIX_AGGSPLIT = 0,
|
||||||
TS_FIX_AGGREF = 1
|
TS_FIX_AGGSPLIT_SIMPLE = 1,
|
||||||
|
TS_FIX_AGGSPLIT_FINAL = 2
|
||||||
} PartializeAggFixAggref;
|
} PartializeAggFixAggref;
|
||||||
|
|
||||||
Hypertable *ts_planner_get_hypertable(const Oid relid, const unsigned int flags);
|
Hypertable *ts_planner_get_hypertable(const Oid relid, const unsigned int flags);
|
||||||
bool has_partialize_function(Query *parse, PartializeAggFixAggref fix_aggref);
|
bool has_partialize_function(Node *node, PartializeAggFixAggref fix_aggref);
|
||||||
bool ts_plan_process_partialize_agg(PlannerInfo *root, RelOptInfo *output_rel);
|
bool ts_plan_process_partialize_agg(PlannerInfo *root, RelOptInfo *output_rel);
|
||||||
|
|
||||||
extern void ts_plan_add_hashagg(PlannerInfo *root, RelOptInfo *input_rel, RelOptInfo *output_rel);
|
extern void ts_plan_add_hashagg(PlannerInfo *root, RelOptInfo *input_rel, RelOptInfo *output_rel);
|
||||||
|
@ -30,6 +30,15 @@ SELECT 1 FROM add_data_node('data_node_3', host => 'localhost',
|
|||||||
GRANT USAGE ON FOREIGN SERVER data_node_1, data_node_2, data_node_3 TO PUBLIC;
|
GRANT USAGE ON FOREIGN SERVER data_node_1, data_node_2, data_node_3 TO PUBLIC;
|
||||||
-- though user on access node has required GRANTS, this will propagate GRANTS to the connected data nodes
|
-- though user on access node has required GRANTS, this will propagate GRANTS to the connected data nodes
|
||||||
GRANT CREATE ON SCHEMA public TO :ROLE_1;
|
GRANT CREATE ON SCHEMA public TO :ROLE_1;
|
||||||
|
-- make sure parallel query plans are preferred on data nodes
|
||||||
|
ALTER DATABASE :DN_DBNAME_1 SET parallel_setup_cost TO 1;
|
||||||
|
ALTER DATABASE :DN_DBNAME_2 SET parallel_setup_cost TO 1;
|
||||||
|
ALTER DATABASE :DN_DBNAME_3 SET parallel_setup_cost TO 1;
|
||||||
|
-- make sure query push-down is enabled
|
||||||
|
ALTER DATABASE :DN_DBNAME_1 SET enable_partitionwise_aggregate TO true;
|
||||||
|
ALTER DATABASE :DN_DBNAME_2 SET enable_partitionwise_aggregate TO true;
|
||||||
|
ALTER DATABASE :DN_DBNAME_3 SET enable_partitionwise_aggregate TO true;
|
||||||
|
SET enable_partitionwise_aggregate TO true;
|
||||||
SET ROLE :ROLE_1;
|
SET ROLE :ROLE_1;
|
||||||
create table uk_price_paid(price integer, "date" date, postcode1 text, postcode2 text, type smallint, is_new bool, duration smallint, addr1 text, addr2 text, street text, locality text, town text, district text, country text, category smallint);
|
create table uk_price_paid(price integer, "date" date, postcode1 text, postcode2 text, type smallint, is_new bool, duration smallint, addr1 text, addr2 text, street text, locality text, town text, district text, country text, category smallint);
|
||||||
-- Aim to about 100 partitions, the data is from 1995 to 2022.
|
-- Aim to about 100 partitions, the data is from 1995 to 2022.
|
||||||
|
@ -74,6 +74,14 @@ NOTICE: [db_dist_partial_agg_3]:
|
|||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
GRANT CREATE ON SCHEMA public TO :ROLE_1;
|
GRANT CREATE ON SCHEMA public TO :ROLE_1;
|
||||||
|
-- make sure parallel query plans are preferred on data nodes
|
||||||
|
ALTER DATABASE :DATA_NODE_1 SET parallel_setup_cost TO 1;
|
||||||
|
ALTER DATABASE :DATA_NODE_2 SET parallel_setup_cost TO 1;
|
||||||
|
ALTER DATABASE :DATA_NODE_3 SET parallel_setup_cost TO 1;
|
||||||
|
-- make sure partitionwise aggregation is enabled on data nodes
|
||||||
|
ALTER DATABASE :DATA_NODE_1 SET enable_partitionwise_aggregate TO true;
|
||||||
|
ALTER DATABASE :DATA_NODE_2 SET enable_partitionwise_aggregate TO true;
|
||||||
|
ALTER DATABASE :DATA_NODE_3 SET enable_partitionwise_aggregate TO true;
|
||||||
SET ROLE :ROLE_1;
|
SET ROLE :ROLE_1;
|
||||||
SELECT table_name FROM create_distributed_hypertable( 'conditions', 'timec', 'location', 3, chunk_time_interval => INTERVAL '1 day');
|
SELECT table_name FROM create_distributed_hypertable( 'conditions', 'timec', 'location', 3, chunk_time_interval => INTERVAL '1 day');
|
||||||
table_name
|
table_name
|
||||||
|
@ -74,6 +74,14 @@ NOTICE: [db_dist_partial_agg_3]:
|
|||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
GRANT CREATE ON SCHEMA public TO :ROLE_1;
|
GRANT CREATE ON SCHEMA public TO :ROLE_1;
|
||||||
|
-- make sure parallel query plans are preferred on data nodes
|
||||||
|
ALTER DATABASE :DATA_NODE_1 SET parallel_setup_cost TO 1;
|
||||||
|
ALTER DATABASE :DATA_NODE_2 SET parallel_setup_cost TO 1;
|
||||||
|
ALTER DATABASE :DATA_NODE_3 SET parallel_setup_cost TO 1;
|
||||||
|
-- make sure partitionwise aggregation is enabled on data nodes
|
||||||
|
ALTER DATABASE :DATA_NODE_1 SET enable_partitionwise_aggregate TO true;
|
||||||
|
ALTER DATABASE :DATA_NODE_2 SET enable_partitionwise_aggregate TO true;
|
||||||
|
ALTER DATABASE :DATA_NODE_3 SET enable_partitionwise_aggregate TO true;
|
||||||
SET ROLE :ROLE_1;
|
SET ROLE :ROLE_1;
|
||||||
SELECT table_name FROM create_distributed_hypertable( 'conditions', 'timec', 'location', 3, chunk_time_interval => INTERVAL '1 day');
|
SELECT table_name FROM create_distributed_hypertable( 'conditions', 'timec', 'location', 3, chunk_time_interval => INTERVAL '1 day');
|
||||||
table_name
|
table_name
|
||||||
|
@ -74,6 +74,14 @@ NOTICE: [db_dist_partial_agg_3]:
|
|||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
GRANT CREATE ON SCHEMA public TO :ROLE_1;
|
GRANT CREATE ON SCHEMA public TO :ROLE_1;
|
||||||
|
-- make sure parallel query plans are preferred on data nodes
|
||||||
|
ALTER DATABASE :DATA_NODE_1 SET parallel_setup_cost TO 1;
|
||||||
|
ALTER DATABASE :DATA_NODE_2 SET parallel_setup_cost TO 1;
|
||||||
|
ALTER DATABASE :DATA_NODE_3 SET parallel_setup_cost TO 1;
|
||||||
|
-- make sure partitionwise aggregation is enabled on data nodes
|
||||||
|
ALTER DATABASE :DATA_NODE_1 SET enable_partitionwise_aggregate TO true;
|
||||||
|
ALTER DATABASE :DATA_NODE_2 SET enable_partitionwise_aggregate TO true;
|
||||||
|
ALTER DATABASE :DATA_NODE_3 SET enable_partitionwise_aggregate TO true;
|
||||||
SET ROLE :ROLE_1;
|
SET ROLE :ROLE_1;
|
||||||
SELECT table_name FROM create_distributed_hypertable( 'conditions', 'timec', 'location', 3, chunk_time_interval => INTERVAL '1 day');
|
SELECT table_name FROM create_distributed_hypertable( 'conditions', 'timec', 'location', 3, chunk_time_interval => INTERVAL '1 day');
|
||||||
table_name
|
table_name
|
||||||
|
@ -74,6 +74,14 @@ NOTICE: [db_dist_partial_agg_3]:
|
|||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
GRANT CREATE ON SCHEMA public TO :ROLE_1;
|
GRANT CREATE ON SCHEMA public TO :ROLE_1;
|
||||||
|
-- make sure parallel query plans are preferred on data nodes
|
||||||
|
ALTER DATABASE :DATA_NODE_1 SET parallel_setup_cost TO 1;
|
||||||
|
ALTER DATABASE :DATA_NODE_2 SET parallel_setup_cost TO 1;
|
||||||
|
ALTER DATABASE :DATA_NODE_3 SET parallel_setup_cost TO 1;
|
||||||
|
-- make sure partitionwise aggregation is enabled on data nodes
|
||||||
|
ALTER DATABASE :DATA_NODE_1 SET enable_partitionwise_aggregate TO true;
|
||||||
|
ALTER DATABASE :DATA_NODE_2 SET enable_partitionwise_aggregate TO true;
|
||||||
|
ALTER DATABASE :DATA_NODE_3 SET enable_partitionwise_aggregate TO true;
|
||||||
SET ROLE :ROLE_1;
|
SET ROLE :ROLE_1;
|
||||||
SELECT table_name FROM create_distributed_hypertable( 'conditions', 'timec', 'location', 3, chunk_time_interval => INTERVAL '1 day');
|
SELECT table_name FROM create_distributed_hypertable( 'conditions', 'timec', 'location', 3, chunk_time_interval => INTERVAL '1 day');
|
||||||
table_name
|
table_name
|
||||||
|
@ -410,6 +410,479 @@ SELECT '2022-01-01 00:00:00-03'::timestamptz + interval '1 year' * mix(x), mix(x
|
|||||||
FROM generate_series(1, 100000) x(x);
|
FROM generate_series(1, 100000) x(x);
|
||||||
SET force_parallel_mode = 'on';
|
SET force_parallel_mode = 'on';
|
||||||
SET parallel_setup_cost = 0;
|
SET parallel_setup_cost = 0;
|
||||||
|
-- Materialize partials from execution of parallel query plan
|
||||||
|
EXPLAIN (VERBOSE, COSTS OFF)
|
||||||
|
SELECT
|
||||||
|
_timescaledb_internal.partialize_agg(sum(value)) AS partial_sum,
|
||||||
|
_timescaledb_internal.partialize_agg(avg(value)) AS partial_avg,
|
||||||
|
_timescaledb_internal.partialize_agg(min(value)) AS partial_min,
|
||||||
|
_timescaledb_internal.partialize_agg(max(value)) AS partial_max,
|
||||||
|
_timescaledb_internal.partialize_agg(count(*)) AS partial_count
|
||||||
|
FROM public.issue4922;
|
||||||
|
QUERY PLAN
|
||||||
|
------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
|
||||||
|
Partial Aggregate
|
||||||
|
Output: _timescaledb_internal.partialize_agg(PARTIAL sum(_hyper_2_4_chunk.value)), _timescaledb_internal.partialize_agg(PARTIAL avg(_hyper_2_4_chunk.value)), _timescaledb_internal.partialize_agg(PARTIAL min(_hyper_2_4_chunk.value)), _timescaledb_internal.partialize_agg(PARTIAL max(_hyper_2_4_chunk.value)), _timescaledb_internal.partialize_agg(PARTIAL count(*))
|
||||||
|
-> Gather
|
||||||
|
Output: (PARTIAL sum(_hyper_2_4_chunk.value)), (PARTIAL avg(_hyper_2_4_chunk.value)), (PARTIAL min(_hyper_2_4_chunk.value)), (PARTIAL max(_hyper_2_4_chunk.value)), (PARTIAL count(*))
|
||||||
|
Workers Planned: 2
|
||||||
|
-> Partial Aggregate
|
||||||
|
Output: PARTIAL sum(_hyper_2_4_chunk.value), PARTIAL avg(_hyper_2_4_chunk.value), PARTIAL min(_hyper_2_4_chunk.value), PARTIAL max(_hyper_2_4_chunk.value), PARTIAL count(*)
|
||||||
|
-> Parallel Append
|
||||||
|
-> Parallel Seq Scan on _timescaledb_internal._hyper_2_4_chunk
|
||||||
|
Output: _hyper_2_4_chunk.value
|
||||||
|
-> Parallel Seq Scan on _timescaledb_internal._hyper_2_5_chunk
|
||||||
|
Output: _hyper_2_5_chunk.value
|
||||||
|
-> Parallel Seq Scan on _timescaledb_internal._hyper_2_6_chunk
|
||||||
|
Output: _hyper_2_6_chunk.value
|
||||||
|
-> Parallel Seq Scan on _timescaledb_internal._hyper_2_7_chunk
|
||||||
|
Output: _hyper_2_7_chunk.value
|
||||||
|
-> Parallel Seq Scan on _timescaledb_internal._hyper_2_8_chunk
|
||||||
|
Output: _hyper_2_8_chunk.value
|
||||||
|
-> Parallel Seq Scan on _timescaledb_internal._hyper_2_9_chunk
|
||||||
|
Output: _hyper_2_9_chunk.value
|
||||||
|
-> Parallel Seq Scan on _timescaledb_internal._hyper_2_10_chunk
|
||||||
|
Output: _hyper_2_10_chunk.value
|
||||||
|
-> Parallel Seq Scan on _timescaledb_internal._hyper_2_11_chunk
|
||||||
|
Output: _hyper_2_11_chunk.value
|
||||||
|
-> Parallel Seq Scan on _timescaledb_internal._hyper_2_12_chunk
|
||||||
|
Output: _hyper_2_12_chunk.value
|
||||||
|
-> Parallel Seq Scan on _timescaledb_internal._hyper_2_13_chunk
|
||||||
|
Output: _hyper_2_13_chunk.value
|
||||||
|
-> Parallel Seq Scan on _timescaledb_internal._hyper_2_14_chunk
|
||||||
|
Output: _hyper_2_14_chunk.value
|
||||||
|
-> Parallel Seq Scan on _timescaledb_internal._hyper_2_15_chunk
|
||||||
|
Output: _hyper_2_15_chunk.value
|
||||||
|
-> Parallel Seq Scan on _timescaledb_internal._hyper_2_16_chunk
|
||||||
|
Output: _hyper_2_16_chunk.value
|
||||||
|
-> Parallel Seq Scan on _timescaledb_internal._hyper_2_17_chunk
|
||||||
|
Output: _hyper_2_17_chunk.value
|
||||||
|
-> Parallel Seq Scan on _timescaledb_internal._hyper_2_18_chunk
|
||||||
|
Output: _hyper_2_18_chunk.value
|
||||||
|
-> Parallel Seq Scan on _timescaledb_internal._hyper_2_19_chunk
|
||||||
|
Output: _hyper_2_19_chunk.value
|
||||||
|
-> Parallel Seq Scan on _timescaledb_internal._hyper_2_20_chunk
|
||||||
|
Output: _hyper_2_20_chunk.value
|
||||||
|
-> Parallel Seq Scan on _timescaledb_internal._hyper_2_21_chunk
|
||||||
|
Output: _hyper_2_21_chunk.value
|
||||||
|
-> Parallel Seq Scan on _timescaledb_internal._hyper_2_22_chunk
|
||||||
|
Output: _hyper_2_22_chunk.value
|
||||||
|
-> Parallel Seq Scan on _timescaledb_internal._hyper_2_23_chunk
|
||||||
|
Output: _hyper_2_23_chunk.value
|
||||||
|
-> Parallel Seq Scan on _timescaledb_internal._hyper_2_24_chunk
|
||||||
|
Output: _hyper_2_24_chunk.value
|
||||||
|
-> Parallel Seq Scan on _timescaledb_internal._hyper_2_25_chunk
|
||||||
|
Output: _hyper_2_25_chunk.value
|
||||||
|
-> Parallel Seq Scan on _timescaledb_internal._hyper_2_26_chunk
|
||||||
|
Output: _hyper_2_26_chunk.value
|
||||||
|
-> Parallel Seq Scan on _timescaledb_internal._hyper_2_27_chunk
|
||||||
|
Output: _hyper_2_27_chunk.value
|
||||||
|
-> Parallel Seq Scan on _timescaledb_internal._hyper_2_28_chunk
|
||||||
|
Output: _hyper_2_28_chunk.value
|
||||||
|
-> Parallel Seq Scan on _timescaledb_internal._hyper_2_29_chunk
|
||||||
|
Output: _hyper_2_29_chunk.value
|
||||||
|
-> Parallel Seq Scan on _timescaledb_internal._hyper_2_30_chunk
|
||||||
|
Output: _hyper_2_30_chunk.value
|
||||||
|
-> Parallel Seq Scan on _timescaledb_internal._hyper_2_31_chunk
|
||||||
|
Output: _hyper_2_31_chunk.value
|
||||||
|
-> Parallel Seq Scan on _timescaledb_internal._hyper_2_32_chunk
|
||||||
|
Output: _hyper_2_32_chunk.value
|
||||||
|
-> Parallel Seq Scan on _timescaledb_internal._hyper_2_33_chunk
|
||||||
|
Output: _hyper_2_33_chunk.value
|
||||||
|
-> Parallel Seq Scan on _timescaledb_internal._hyper_2_34_chunk
|
||||||
|
Output: _hyper_2_34_chunk.value
|
||||||
|
-> Parallel Seq Scan on _timescaledb_internal._hyper_2_35_chunk
|
||||||
|
Output: _hyper_2_35_chunk.value
|
||||||
|
-> Parallel Seq Scan on _timescaledb_internal._hyper_2_36_chunk
|
||||||
|
Output: _hyper_2_36_chunk.value
|
||||||
|
-> Parallel Seq Scan on _timescaledb_internal._hyper_2_37_chunk
|
||||||
|
Output: _hyper_2_37_chunk.value
|
||||||
|
-> Parallel Seq Scan on _timescaledb_internal._hyper_2_38_chunk
|
||||||
|
Output: _hyper_2_38_chunk.value
|
||||||
|
-> Parallel Seq Scan on _timescaledb_internal._hyper_2_39_chunk
|
||||||
|
Output: _hyper_2_39_chunk.value
|
||||||
|
-> Parallel Seq Scan on _timescaledb_internal._hyper_2_40_chunk
|
||||||
|
Output: _hyper_2_40_chunk.value
|
||||||
|
-> Parallel Seq Scan on _timescaledb_internal._hyper_2_41_chunk
|
||||||
|
Output: _hyper_2_41_chunk.value
|
||||||
|
-> Parallel Seq Scan on _timescaledb_internal._hyper_2_42_chunk
|
||||||
|
Output: _hyper_2_42_chunk.value
|
||||||
|
-> Parallel Seq Scan on _timescaledb_internal._hyper_2_43_chunk
|
||||||
|
Output: _hyper_2_43_chunk.value
|
||||||
|
-> Parallel Seq Scan on _timescaledb_internal._hyper_2_44_chunk
|
||||||
|
Output: _hyper_2_44_chunk.value
|
||||||
|
-> Parallel Seq Scan on _timescaledb_internal._hyper_2_45_chunk
|
||||||
|
Output: _hyper_2_45_chunk.value
|
||||||
|
-> Parallel Seq Scan on _timescaledb_internal._hyper_2_46_chunk
|
||||||
|
Output: _hyper_2_46_chunk.value
|
||||||
|
-> Parallel Seq Scan on _timescaledb_internal._hyper_2_47_chunk
|
||||||
|
Output: _hyper_2_47_chunk.value
|
||||||
|
-> Parallel Seq Scan on _timescaledb_internal._hyper_2_48_chunk
|
||||||
|
Output: _hyper_2_48_chunk.value
|
||||||
|
-> Parallel Seq Scan on _timescaledb_internal._hyper_2_49_chunk
|
||||||
|
Output: _hyper_2_49_chunk.value
|
||||||
|
-> Parallel Seq Scan on _timescaledb_internal._hyper_2_50_chunk
|
||||||
|
Output: _hyper_2_50_chunk.value
|
||||||
|
-> Parallel Seq Scan on _timescaledb_internal._hyper_2_51_chunk
|
||||||
|
Output: _hyper_2_51_chunk.value
|
||||||
|
-> Parallel Seq Scan on _timescaledb_internal._hyper_2_52_chunk
|
||||||
|
Output: _hyper_2_52_chunk.value
|
||||||
|
-> Parallel Seq Scan on _timescaledb_internal._hyper_2_53_chunk
|
||||||
|
Output: _hyper_2_53_chunk.value
|
||||||
|
-> Parallel Seq Scan on _timescaledb_internal._hyper_2_54_chunk
|
||||||
|
Output: _hyper_2_54_chunk.value
|
||||||
|
-> Parallel Seq Scan on _timescaledb_internal._hyper_2_55_chunk
|
||||||
|
Output: _hyper_2_55_chunk.value
|
||||||
|
-> Parallel Seq Scan on _timescaledb_internal._hyper_2_56_chunk
|
||||||
|
Output: _hyper_2_56_chunk.value
|
||||||
|
-> Parallel Seq Scan on _timescaledb_internal._hyper_2_57_chunk
|
||||||
|
Output: _hyper_2_57_chunk.value
|
||||||
|
-> Parallel Seq Scan on _timescaledb_internal._hyper_2_58_chunk
|
||||||
|
Output: _hyper_2_58_chunk.value
|
||||||
|
-> Parallel Seq Scan on _timescaledb_internal._hyper_2_59_chunk
|
||||||
|
Output: _hyper_2_59_chunk.value
|
||||||
|
-> Parallel Seq Scan on _timescaledb_internal._hyper_2_60_chunk
|
||||||
|
Output: _hyper_2_60_chunk.value
|
||||||
|
-> Parallel Seq Scan on _timescaledb_internal._hyper_2_61_chunk
|
||||||
|
Output: _hyper_2_61_chunk.value
|
||||||
|
-> Parallel Seq Scan on _timescaledb_internal._hyper_2_62_chunk
|
||||||
|
Output: _hyper_2_62_chunk.value
|
||||||
|
-> Parallel Seq Scan on _timescaledb_internal._hyper_2_63_chunk
|
||||||
|
Output: _hyper_2_63_chunk.value
|
||||||
|
-> Parallel Seq Scan on _timescaledb_internal._hyper_2_64_chunk
|
||||||
|
Output: _hyper_2_64_chunk.value
|
||||||
|
-> Parallel Seq Scan on _timescaledb_internal._hyper_2_65_chunk
|
||||||
|
Output: _hyper_2_65_chunk.value
|
||||||
|
-> Parallel Seq Scan on _timescaledb_internal._hyper_2_66_chunk
|
||||||
|
Output: _hyper_2_66_chunk.value
|
||||||
|
-> Parallel Seq Scan on _timescaledb_internal._hyper_2_67_chunk
|
||||||
|
Output: _hyper_2_67_chunk.value
|
||||||
|
-> Parallel Seq Scan on _timescaledb_internal._hyper_2_68_chunk
|
||||||
|
Output: _hyper_2_68_chunk.value
|
||||||
|
-> Parallel Seq Scan on _timescaledb_internal._hyper_2_69_chunk
|
||||||
|
Output: _hyper_2_69_chunk.value
|
||||||
|
-> Parallel Seq Scan on _timescaledb_internal._hyper_2_70_chunk
|
||||||
|
Output: _hyper_2_70_chunk.value
|
||||||
|
-> Parallel Seq Scan on _timescaledb_internal._hyper_2_71_chunk
|
||||||
|
Output: _hyper_2_71_chunk.value
|
||||||
|
-> Parallel Seq Scan on _timescaledb_internal._hyper_2_72_chunk
|
||||||
|
Output: _hyper_2_72_chunk.value
|
||||||
|
-> Parallel Seq Scan on _timescaledb_internal._hyper_2_73_chunk
|
||||||
|
Output: _hyper_2_73_chunk.value
|
||||||
|
-> Parallel Seq Scan on _timescaledb_internal._hyper_2_74_chunk
|
||||||
|
Output: _hyper_2_74_chunk.value
|
||||||
|
-> Parallel Seq Scan on _timescaledb_internal._hyper_2_75_chunk
|
||||||
|
Output: _hyper_2_75_chunk.value
|
||||||
|
-> Parallel Seq Scan on _timescaledb_internal._hyper_2_76_chunk
|
||||||
|
Output: _hyper_2_76_chunk.value
|
||||||
|
-> Parallel Seq Scan on _timescaledb_internal._hyper_2_77_chunk
|
||||||
|
Output: _hyper_2_77_chunk.value
|
||||||
|
-> Parallel Seq Scan on _timescaledb_internal._hyper_2_78_chunk
|
||||||
|
Output: _hyper_2_78_chunk.value
|
||||||
|
-> Parallel Seq Scan on _timescaledb_internal._hyper_2_79_chunk
|
||||||
|
Output: _hyper_2_79_chunk.value
|
||||||
|
-> Parallel Seq Scan on _timescaledb_internal._hyper_2_80_chunk
|
||||||
|
Output: _hyper_2_80_chunk.value
|
||||||
|
-> Parallel Seq Scan on _timescaledb_internal._hyper_2_81_chunk
|
||||||
|
Output: _hyper_2_81_chunk.value
|
||||||
|
-> Parallel Seq Scan on _timescaledb_internal._hyper_2_82_chunk
|
||||||
|
Output: _hyper_2_82_chunk.value
|
||||||
|
-> Parallel Seq Scan on _timescaledb_internal._hyper_2_83_chunk
|
||||||
|
Output: _hyper_2_83_chunk.value
|
||||||
|
-> Parallel Seq Scan on _timescaledb_internal._hyper_2_84_chunk
|
||||||
|
Output: _hyper_2_84_chunk.value
|
||||||
|
-> Parallel Seq Scan on _timescaledb_internal._hyper_2_85_chunk
|
||||||
|
Output: _hyper_2_85_chunk.value
|
||||||
|
-> Parallel Seq Scan on _timescaledb_internal._hyper_2_86_chunk
|
||||||
|
Output: _hyper_2_86_chunk.value
|
||||||
|
-> Parallel Seq Scan on _timescaledb_internal._hyper_2_87_chunk
|
||||||
|
Output: _hyper_2_87_chunk.value
|
||||||
|
-> Parallel Seq Scan on _timescaledb_internal._hyper_2_88_chunk
|
||||||
|
Output: _hyper_2_88_chunk.value
|
||||||
|
-> Parallel Seq Scan on _timescaledb_internal._hyper_2_89_chunk
|
||||||
|
Output: _hyper_2_89_chunk.value
|
||||||
|
-> Parallel Seq Scan on _timescaledb_internal._hyper_2_90_chunk
|
||||||
|
Output: _hyper_2_90_chunk.value
|
||||||
|
-> Parallel Seq Scan on _timescaledb_internal._hyper_2_91_chunk
|
||||||
|
Output: _hyper_2_91_chunk.value
|
||||||
|
-> Parallel Seq Scan on _timescaledb_internal._hyper_2_92_chunk
|
||||||
|
Output: _hyper_2_92_chunk.value
|
||||||
|
-> Parallel Seq Scan on _timescaledb_internal._hyper_2_93_chunk
|
||||||
|
Output: _hyper_2_93_chunk.value
|
||||||
|
-> Parallel Seq Scan on _timescaledb_internal._hyper_2_94_chunk
|
||||||
|
Output: _hyper_2_94_chunk.value
|
||||||
|
-> Parallel Seq Scan on _timescaledb_internal._hyper_2_95_chunk
|
||||||
|
Output: _hyper_2_95_chunk.value
|
||||||
|
-> Parallel Seq Scan on _timescaledb_internal._hyper_2_96_chunk
|
||||||
|
Output: _hyper_2_96_chunk.value
|
||||||
|
-> Parallel Seq Scan on _timescaledb_internal._hyper_2_97_chunk
|
||||||
|
Output: _hyper_2_97_chunk.value
|
||||||
|
-> Parallel Seq Scan on _timescaledb_internal._hyper_2_98_chunk
|
||||||
|
Output: _hyper_2_98_chunk.value
|
||||||
|
-> Parallel Seq Scan on _timescaledb_internal._hyper_2_99_chunk
|
||||||
|
Output: _hyper_2_99_chunk.value
|
||||||
|
-> Parallel Seq Scan on _timescaledb_internal._hyper_2_100_chunk
|
||||||
|
Output: _hyper_2_100_chunk.value
|
||||||
|
-> Parallel Seq Scan on _timescaledb_internal._hyper_2_101_chunk
|
||||||
|
Output: _hyper_2_101_chunk.value
|
||||||
|
-> Parallel Seq Scan on _timescaledb_internal._hyper_2_102_chunk
|
||||||
|
Output: _hyper_2_102_chunk.value
|
||||||
|
-> Parallel Seq Scan on _timescaledb_internal._hyper_2_103_chunk
|
||||||
|
Output: _hyper_2_103_chunk.value
|
||||||
|
-> Parallel Seq Scan on _timescaledb_internal._hyper_2_104_chunk
|
||||||
|
Output: _hyper_2_104_chunk.value
|
||||||
|
(210 rows)
|
||||||
|
|
||||||
|
CREATE MATERIALIZED VIEW issue4922_partials_parallel AS
|
||||||
|
SELECT
|
||||||
|
_timescaledb_internal.partialize_agg(sum(value)) AS partial_sum,
|
||||||
|
_timescaledb_internal.partialize_agg(avg(value)) AS partial_avg,
|
||||||
|
_timescaledb_internal.partialize_agg(min(value)) AS partial_min,
|
||||||
|
_timescaledb_internal.partialize_agg(max(value)) AS partial_max,
|
||||||
|
_timescaledb_internal.partialize_agg(count(*)) AS partial_count
|
||||||
|
FROM public.issue4922;
|
||||||
|
-- Materialize partials from execution of non-parallel query plan
|
||||||
|
SET max_parallel_workers_per_gather = 0;
|
||||||
|
EXPLAIN (VERBOSE, COSTS OFF)
|
||||||
|
SELECT
|
||||||
|
_timescaledb_internal.partialize_agg(sum(value)) AS partial_sum,
|
||||||
|
_timescaledb_internal.partialize_agg(avg(value)) AS partial_avg,
|
||||||
|
_timescaledb_internal.partialize_agg(min(value)) AS partial_min,
|
||||||
|
_timescaledb_internal.partialize_agg(max(value)) AS partial_max,
|
||||||
|
_timescaledb_internal.partialize_agg(count(*)) AS partial_count
|
||||||
|
FROM public.issue4922;
|
||||||
|
QUERY PLAN
|
||||||
|
------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
|
||||||
|
Partial Aggregate
|
||||||
|
Output: _timescaledb_internal.partialize_agg(PARTIAL sum(_hyper_2_4_chunk.value)), _timescaledb_internal.partialize_agg(PARTIAL avg(_hyper_2_4_chunk.value)), _timescaledb_internal.partialize_agg(PARTIAL min(_hyper_2_4_chunk.value)), _timescaledb_internal.partialize_agg(PARTIAL max(_hyper_2_4_chunk.value)), _timescaledb_internal.partialize_agg(PARTIAL count(*))
|
||||||
|
-> Append
|
||||||
|
-> Seq Scan on _timescaledb_internal._hyper_2_4_chunk
|
||||||
|
Output: _hyper_2_4_chunk.value
|
||||||
|
-> Seq Scan on _timescaledb_internal._hyper_2_5_chunk
|
||||||
|
Output: _hyper_2_5_chunk.value
|
||||||
|
-> Seq Scan on _timescaledb_internal._hyper_2_6_chunk
|
||||||
|
Output: _hyper_2_6_chunk.value
|
||||||
|
-> Seq Scan on _timescaledb_internal._hyper_2_7_chunk
|
||||||
|
Output: _hyper_2_7_chunk.value
|
||||||
|
-> Seq Scan on _timescaledb_internal._hyper_2_8_chunk
|
||||||
|
Output: _hyper_2_8_chunk.value
|
||||||
|
-> Seq Scan on _timescaledb_internal._hyper_2_9_chunk
|
||||||
|
Output: _hyper_2_9_chunk.value
|
||||||
|
-> Seq Scan on _timescaledb_internal._hyper_2_10_chunk
|
||||||
|
Output: _hyper_2_10_chunk.value
|
||||||
|
-> Seq Scan on _timescaledb_internal._hyper_2_11_chunk
|
||||||
|
Output: _hyper_2_11_chunk.value
|
||||||
|
-> Seq Scan on _timescaledb_internal._hyper_2_12_chunk
|
||||||
|
Output: _hyper_2_12_chunk.value
|
||||||
|
-> Seq Scan on _timescaledb_internal._hyper_2_13_chunk
|
||||||
|
Output: _hyper_2_13_chunk.value
|
||||||
|
-> Seq Scan on _timescaledb_internal._hyper_2_14_chunk
|
||||||
|
Output: _hyper_2_14_chunk.value
|
||||||
|
-> Seq Scan on _timescaledb_internal._hyper_2_15_chunk
|
||||||
|
Output: _hyper_2_15_chunk.value
|
||||||
|
-> Seq Scan on _timescaledb_internal._hyper_2_16_chunk
|
||||||
|
Output: _hyper_2_16_chunk.value
|
||||||
|
-> Seq Scan on _timescaledb_internal._hyper_2_17_chunk
|
||||||
|
Output: _hyper_2_17_chunk.value
|
||||||
|
-> Seq Scan on _timescaledb_internal._hyper_2_18_chunk
|
||||||
|
Output: _hyper_2_18_chunk.value
|
||||||
|
-> Seq Scan on _timescaledb_internal._hyper_2_19_chunk
|
||||||
|
Output: _hyper_2_19_chunk.value
|
||||||
|
-> Seq Scan on _timescaledb_internal._hyper_2_20_chunk
|
||||||
|
Output: _hyper_2_20_chunk.value
|
||||||
|
-> Seq Scan on _timescaledb_internal._hyper_2_21_chunk
|
||||||
|
Output: _hyper_2_21_chunk.value
|
||||||
|
-> Seq Scan on _timescaledb_internal._hyper_2_22_chunk
|
||||||
|
Output: _hyper_2_22_chunk.value
|
||||||
|
-> Seq Scan on _timescaledb_internal._hyper_2_23_chunk
|
||||||
|
Output: _hyper_2_23_chunk.value
|
||||||
|
-> Seq Scan on _timescaledb_internal._hyper_2_24_chunk
|
||||||
|
Output: _hyper_2_24_chunk.value
|
||||||
|
-> Seq Scan on _timescaledb_internal._hyper_2_25_chunk
|
||||||
|
Output: _hyper_2_25_chunk.value
|
||||||
|
-> Seq Scan on _timescaledb_internal._hyper_2_26_chunk
|
||||||
|
Output: _hyper_2_26_chunk.value
|
||||||
|
-> Seq Scan on _timescaledb_internal._hyper_2_27_chunk
|
||||||
|
Output: _hyper_2_27_chunk.value
|
||||||
|
-> Seq Scan on _timescaledb_internal._hyper_2_28_chunk
|
||||||
|
Output: _hyper_2_28_chunk.value
|
||||||
|
-> Seq Scan on _timescaledb_internal._hyper_2_29_chunk
|
||||||
|
Output: _hyper_2_29_chunk.value
|
||||||
|
-> Seq Scan on _timescaledb_internal._hyper_2_30_chunk
|
||||||
|
Output: _hyper_2_30_chunk.value
|
||||||
|
-> Seq Scan on _timescaledb_internal._hyper_2_31_chunk
|
||||||
|
Output: _hyper_2_31_chunk.value
|
||||||
|
-> Seq Scan on _timescaledb_internal._hyper_2_32_chunk
|
||||||
|
Output: _hyper_2_32_chunk.value
|
||||||
|
-> Seq Scan on _timescaledb_internal._hyper_2_33_chunk
|
||||||
|
Output: _hyper_2_33_chunk.value
|
||||||
|
-> Seq Scan on _timescaledb_internal._hyper_2_34_chunk
|
||||||
|
Output: _hyper_2_34_chunk.value
|
||||||
|
-> Seq Scan on _timescaledb_internal._hyper_2_35_chunk
|
||||||
|
Output: _hyper_2_35_chunk.value
|
||||||
|
-> Seq Scan on _timescaledb_internal._hyper_2_36_chunk
|
||||||
|
Output: _hyper_2_36_chunk.value
|
||||||
|
-> Seq Scan on _timescaledb_internal._hyper_2_37_chunk
|
||||||
|
Output: _hyper_2_37_chunk.value
|
||||||
|
-> Seq Scan on _timescaledb_internal._hyper_2_38_chunk
|
||||||
|
Output: _hyper_2_38_chunk.value
|
||||||
|
-> Seq Scan on _timescaledb_internal._hyper_2_39_chunk
|
||||||
|
Output: _hyper_2_39_chunk.value
|
||||||
|
-> Seq Scan on _timescaledb_internal._hyper_2_40_chunk
|
||||||
|
Output: _hyper_2_40_chunk.value
|
||||||
|
-> Seq Scan on _timescaledb_internal._hyper_2_41_chunk
|
||||||
|
Output: _hyper_2_41_chunk.value
|
||||||
|
-> Seq Scan on _timescaledb_internal._hyper_2_42_chunk
|
||||||
|
Output: _hyper_2_42_chunk.value
|
||||||
|
-> Seq Scan on _timescaledb_internal._hyper_2_43_chunk
|
||||||
|
Output: _hyper_2_43_chunk.value
|
||||||
|
-> Seq Scan on _timescaledb_internal._hyper_2_44_chunk
|
||||||
|
Output: _hyper_2_44_chunk.value
|
||||||
|
-> Seq Scan on _timescaledb_internal._hyper_2_45_chunk
|
||||||
|
Output: _hyper_2_45_chunk.value
|
||||||
|
-> Seq Scan on _timescaledb_internal._hyper_2_46_chunk
|
||||||
|
Output: _hyper_2_46_chunk.value
|
||||||
|
-> Seq Scan on _timescaledb_internal._hyper_2_47_chunk
|
||||||
|
Output: _hyper_2_47_chunk.value
|
||||||
|
-> Seq Scan on _timescaledb_internal._hyper_2_48_chunk
|
||||||
|
Output: _hyper_2_48_chunk.value
|
||||||
|
-> Seq Scan on _timescaledb_internal._hyper_2_49_chunk
|
||||||
|
Output: _hyper_2_49_chunk.value
|
||||||
|
-> Seq Scan on _timescaledb_internal._hyper_2_50_chunk
|
||||||
|
Output: _hyper_2_50_chunk.value
|
||||||
|
-> Seq Scan on _timescaledb_internal._hyper_2_51_chunk
|
||||||
|
Output: _hyper_2_51_chunk.value
|
||||||
|
-> Seq Scan on _timescaledb_internal._hyper_2_52_chunk
|
||||||
|
Output: _hyper_2_52_chunk.value
|
||||||
|
-> Seq Scan on _timescaledb_internal._hyper_2_53_chunk
|
||||||
|
Output: _hyper_2_53_chunk.value
|
||||||
|
-> Seq Scan on _timescaledb_internal._hyper_2_54_chunk
|
||||||
|
Output: _hyper_2_54_chunk.value
|
||||||
|
-> Seq Scan on _timescaledb_internal._hyper_2_55_chunk
|
||||||
|
Output: _hyper_2_55_chunk.value
|
||||||
|
-> Seq Scan on _timescaledb_internal._hyper_2_56_chunk
|
||||||
|
Output: _hyper_2_56_chunk.value
|
||||||
|
-> Seq Scan on _timescaledb_internal._hyper_2_57_chunk
|
||||||
|
Output: _hyper_2_57_chunk.value
|
||||||
|
-> Seq Scan on _timescaledb_internal._hyper_2_58_chunk
|
||||||
|
Output: _hyper_2_58_chunk.value
|
||||||
|
-> Seq Scan on _timescaledb_internal._hyper_2_59_chunk
|
||||||
|
Output: _hyper_2_59_chunk.value
|
||||||
|
-> Seq Scan on _timescaledb_internal._hyper_2_60_chunk
|
||||||
|
Output: _hyper_2_60_chunk.value
|
||||||
|
-> Seq Scan on _timescaledb_internal._hyper_2_61_chunk
|
||||||
|
Output: _hyper_2_61_chunk.value
|
||||||
|
-> Seq Scan on _timescaledb_internal._hyper_2_62_chunk
|
||||||
|
Output: _hyper_2_62_chunk.value
|
||||||
|
-> Seq Scan on _timescaledb_internal._hyper_2_63_chunk
|
||||||
|
Output: _hyper_2_63_chunk.value
|
||||||
|
-> Seq Scan on _timescaledb_internal._hyper_2_64_chunk
|
||||||
|
Output: _hyper_2_64_chunk.value
|
||||||
|
-> Seq Scan on _timescaledb_internal._hyper_2_65_chunk
|
||||||
|
Output: _hyper_2_65_chunk.value
|
||||||
|
-> Seq Scan on _timescaledb_internal._hyper_2_66_chunk
|
||||||
|
Output: _hyper_2_66_chunk.value
|
||||||
|
-> Seq Scan on _timescaledb_internal._hyper_2_67_chunk
|
||||||
|
Output: _hyper_2_67_chunk.value
|
||||||
|
-> Seq Scan on _timescaledb_internal._hyper_2_68_chunk
|
||||||
|
Output: _hyper_2_68_chunk.value
|
||||||
|
-> Seq Scan on _timescaledb_internal._hyper_2_69_chunk
|
||||||
|
Output: _hyper_2_69_chunk.value
|
||||||
|
-> Seq Scan on _timescaledb_internal._hyper_2_70_chunk
|
||||||
|
Output: _hyper_2_70_chunk.value
|
||||||
|
-> Seq Scan on _timescaledb_internal._hyper_2_71_chunk
|
||||||
|
Output: _hyper_2_71_chunk.value
|
||||||
|
-> Seq Scan on _timescaledb_internal._hyper_2_72_chunk
|
||||||
|
Output: _hyper_2_72_chunk.value
|
||||||
|
-> Seq Scan on _timescaledb_internal._hyper_2_73_chunk
|
||||||
|
Output: _hyper_2_73_chunk.value
|
||||||
|
-> Seq Scan on _timescaledb_internal._hyper_2_74_chunk
|
||||||
|
Output: _hyper_2_74_chunk.value
|
||||||
|
-> Seq Scan on _timescaledb_internal._hyper_2_75_chunk
|
||||||
|
Output: _hyper_2_75_chunk.value
|
||||||
|
-> Seq Scan on _timescaledb_internal._hyper_2_76_chunk
|
||||||
|
Output: _hyper_2_76_chunk.value
|
||||||
|
-> Seq Scan on _timescaledb_internal._hyper_2_77_chunk
|
||||||
|
Output: _hyper_2_77_chunk.value
|
||||||
|
-> Seq Scan on _timescaledb_internal._hyper_2_78_chunk
|
||||||
|
Output: _hyper_2_78_chunk.value
|
||||||
|
-> Seq Scan on _timescaledb_internal._hyper_2_79_chunk
|
||||||
|
Output: _hyper_2_79_chunk.value
|
||||||
|
-> Seq Scan on _timescaledb_internal._hyper_2_80_chunk
|
||||||
|
Output: _hyper_2_80_chunk.value
|
||||||
|
-> Seq Scan on _timescaledb_internal._hyper_2_81_chunk
|
||||||
|
Output: _hyper_2_81_chunk.value
|
||||||
|
-> Seq Scan on _timescaledb_internal._hyper_2_82_chunk
|
||||||
|
Output: _hyper_2_82_chunk.value
|
||||||
|
-> Seq Scan on _timescaledb_internal._hyper_2_83_chunk
|
||||||
|
Output: _hyper_2_83_chunk.value
|
||||||
|
-> Seq Scan on _timescaledb_internal._hyper_2_84_chunk
|
||||||
|
Output: _hyper_2_84_chunk.value
|
||||||
|
-> Seq Scan on _timescaledb_internal._hyper_2_85_chunk
|
||||||
|
Output: _hyper_2_85_chunk.value
|
||||||
|
-> Seq Scan on _timescaledb_internal._hyper_2_86_chunk
|
||||||
|
Output: _hyper_2_86_chunk.value
|
||||||
|
-> Seq Scan on _timescaledb_internal._hyper_2_87_chunk
|
||||||
|
Output: _hyper_2_87_chunk.value
|
||||||
|
-> Seq Scan on _timescaledb_internal._hyper_2_88_chunk
|
||||||
|
Output: _hyper_2_88_chunk.value
|
||||||
|
-> Seq Scan on _timescaledb_internal._hyper_2_89_chunk
|
||||||
|
Output: _hyper_2_89_chunk.value
|
||||||
|
-> Seq Scan on _timescaledb_internal._hyper_2_90_chunk
|
||||||
|
Output: _hyper_2_90_chunk.value
|
||||||
|
-> Seq Scan on _timescaledb_internal._hyper_2_91_chunk
|
||||||
|
Output: _hyper_2_91_chunk.value
|
||||||
|
-> Seq Scan on _timescaledb_internal._hyper_2_92_chunk
|
||||||
|
Output: _hyper_2_92_chunk.value
|
||||||
|
-> Seq Scan on _timescaledb_internal._hyper_2_93_chunk
|
||||||
|
Output: _hyper_2_93_chunk.value
|
||||||
|
-> Seq Scan on _timescaledb_internal._hyper_2_94_chunk
|
||||||
|
Output: _hyper_2_94_chunk.value
|
||||||
|
-> Seq Scan on _timescaledb_internal._hyper_2_95_chunk
|
||||||
|
Output: _hyper_2_95_chunk.value
|
||||||
|
-> Seq Scan on _timescaledb_internal._hyper_2_96_chunk
|
||||||
|
Output: _hyper_2_96_chunk.value
|
||||||
|
-> Seq Scan on _timescaledb_internal._hyper_2_97_chunk
|
||||||
|
Output: _hyper_2_97_chunk.value
|
||||||
|
-> Seq Scan on _timescaledb_internal._hyper_2_98_chunk
|
||||||
|
Output: _hyper_2_98_chunk.value
|
||||||
|
-> Seq Scan on _timescaledb_internal._hyper_2_99_chunk
|
||||||
|
Output: _hyper_2_99_chunk.value
|
||||||
|
-> Seq Scan on _timescaledb_internal._hyper_2_100_chunk
|
||||||
|
Output: _hyper_2_100_chunk.value
|
||||||
|
-> Seq Scan on _timescaledb_internal._hyper_2_101_chunk
|
||||||
|
Output: _hyper_2_101_chunk.value
|
||||||
|
-> Seq Scan on _timescaledb_internal._hyper_2_102_chunk
|
||||||
|
Output: _hyper_2_102_chunk.value
|
||||||
|
-> Seq Scan on _timescaledb_internal._hyper_2_103_chunk
|
||||||
|
Output: _hyper_2_103_chunk.value
|
||||||
|
-> Seq Scan on _timescaledb_internal._hyper_2_104_chunk
|
||||||
|
Output: _hyper_2_104_chunk.value
|
||||||
|
(205 rows)
|
||||||
|
|
||||||
|
CREATE MATERIALIZED VIEW issue4922_partials_non_parallel AS
|
||||||
|
SELECT
|
||||||
|
_timescaledb_internal.partialize_agg(sum(value)) AS partial_sum,
|
||||||
|
_timescaledb_internal.partialize_agg(avg(value)) AS partial_avg,
|
||||||
|
_timescaledb_internal.partialize_agg(min(value)) AS partial_min,
|
||||||
|
_timescaledb_internal.partialize_agg(max(value)) AS partial_max,
|
||||||
|
_timescaledb_internal.partialize_agg(count(*)) AS partial_count
|
||||||
|
FROM public.issue4922;
|
||||||
|
RESET max_parallel_workers_per_gather;
|
||||||
|
-- partials should be the same in both parallel and non-parallel execution
|
||||||
|
SELECT * FROM issue4922_partials_parallel;
|
||||||
|
partial_sum | partial_avg | partial_min | partial_max | partial_count
|
||||||
|
--------------------+--------------------------------------------------------------------------------------------+-------------+-------------+--------------------
|
||||||
|
\x00000000004c4fa9 | \x00000001000000000000001400000002000000010000000800000000000186a00000000800000000004c4fa9 | \x00000000 | \x00000064 | \x00000000000186a0
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
SELECT * FROM issue4922_partials_non_parallel;
|
||||||
|
partial_sum | partial_avg | partial_min | partial_max | partial_count
|
||||||
|
--------------------+--------------------------------------------------------------------------------------------+-------------+-------------+--------------------
|
||||||
|
\x00000000004c4fa9 | \x00000001000000000000001400000002000000010000000800000000000186a00000000800000000004c4fa9 | \x00000000 | \x00000064 | \x00000000000186a0
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
-- Compare results from partial and non-partial query execution
|
||||||
SELECT
|
SELECT
|
||||||
sum(value),
|
sum(value),
|
||||||
avg(value),
|
avg(value),
|
||||||
@ -429,259 +902,9 @@ SELECT
|
|||||||
_timescaledb_internal.finalize_agg('pg_catalog.min(integer)'::text, NULL::name, NULL::name, '{{pg_catalog,int4}}'::name[], partial_min, NULL::integer) AS min,
|
_timescaledb_internal.finalize_agg('pg_catalog.min(integer)'::text, NULL::name, NULL::name, '{{pg_catalog,int4}}'::name[], partial_min, NULL::integer) AS min,
|
||||||
_timescaledb_internal.finalize_agg('pg_catalog.max(integer)'::text, NULL::name, NULL::name, '{{pg_catalog,int4}}'::name[], partial_max, NULL::integer) AS max,
|
_timescaledb_internal.finalize_agg('pg_catalog.max(integer)'::text, NULL::name, NULL::name, '{{pg_catalog,int4}}'::name[], partial_max, NULL::integer) AS max,
|
||||||
_timescaledb_internal.finalize_agg('pg_catalog.count()'::text, NULL::name, NULL::name, '{}'::name[], partial_count, NULL::bigint) AS count
|
_timescaledb_internal.finalize_agg('pg_catalog.count()'::text, NULL::name, NULL::name, '{}'::name[], partial_count, NULL::bigint) AS count
|
||||||
FROM (
|
FROM issue4922_partials_parallel;
|
||||||
SELECT
|
|
||||||
_timescaledb_internal.partialize_agg(sum(value)) AS partial_sum,
|
|
||||||
_timescaledb_internal.partialize_agg(avg(value)) AS partial_avg,
|
|
||||||
_timescaledb_internal.partialize_agg(min(value)) AS partial_min,
|
|
||||||
_timescaledb_internal.partialize_agg(max(value)) AS partial_max,
|
|
||||||
_timescaledb_internal.partialize_agg(count(*)) AS partial_count
|
|
||||||
FROM public.issue4922) AS a;
|
|
||||||
sum | avg | min | max | count
|
sum | avg | min | max | count
|
||||||
---------+---------------------+-----+-----+--------
|
---------+---------------------+-----+-----+--------
|
||||||
5001129 | 50.0112900000000000 | 0 | 100 | 100000
|
5001129 | 50.0112900000000000 | 0 | 100 | 100000
|
||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
-- Check for parallel planning
|
|
||||||
EXPLAIN (COSTS OFF)
|
|
||||||
SELECT
|
|
||||||
sum(value),
|
|
||||||
avg(value),
|
|
||||||
min(value),
|
|
||||||
max(value),
|
|
||||||
count(*)
|
|
||||||
FROM issue4922;
|
|
||||||
QUERY PLAN
|
|
||||||
-----------------------------------------------------------------
|
|
||||||
Finalize Aggregate
|
|
||||||
-> Gather
|
|
||||||
Workers Planned: 2
|
|
||||||
-> Partial Aggregate
|
|
||||||
-> Parallel Append
|
|
||||||
-> Parallel Seq Scan on _hyper_2_4_chunk
|
|
||||||
-> Parallel Seq Scan on _hyper_2_5_chunk
|
|
||||||
-> Parallel Seq Scan on _hyper_2_6_chunk
|
|
||||||
-> Parallel Seq Scan on _hyper_2_7_chunk
|
|
||||||
-> Parallel Seq Scan on _hyper_2_8_chunk
|
|
||||||
-> Parallel Seq Scan on _hyper_2_9_chunk
|
|
||||||
-> Parallel Seq Scan on _hyper_2_10_chunk
|
|
||||||
-> Parallel Seq Scan on _hyper_2_11_chunk
|
|
||||||
-> Parallel Seq Scan on _hyper_2_12_chunk
|
|
||||||
-> Parallel Seq Scan on _hyper_2_13_chunk
|
|
||||||
-> Parallel Seq Scan on _hyper_2_14_chunk
|
|
||||||
-> Parallel Seq Scan on _hyper_2_15_chunk
|
|
||||||
-> Parallel Seq Scan on _hyper_2_16_chunk
|
|
||||||
-> Parallel Seq Scan on _hyper_2_17_chunk
|
|
||||||
-> Parallel Seq Scan on _hyper_2_18_chunk
|
|
||||||
-> Parallel Seq Scan on _hyper_2_19_chunk
|
|
||||||
-> Parallel Seq Scan on _hyper_2_20_chunk
|
|
||||||
-> Parallel Seq Scan on _hyper_2_21_chunk
|
|
||||||
-> Parallel Seq Scan on _hyper_2_22_chunk
|
|
||||||
-> Parallel Seq Scan on _hyper_2_23_chunk
|
|
||||||
-> Parallel Seq Scan on _hyper_2_24_chunk
|
|
||||||
-> Parallel Seq Scan on _hyper_2_25_chunk
|
|
||||||
-> Parallel Seq Scan on _hyper_2_26_chunk
|
|
||||||
-> Parallel Seq Scan on _hyper_2_27_chunk
|
|
||||||
-> Parallel Seq Scan on _hyper_2_28_chunk
|
|
||||||
-> Parallel Seq Scan on _hyper_2_29_chunk
|
|
||||||
-> Parallel Seq Scan on _hyper_2_30_chunk
|
|
||||||
-> Parallel Seq Scan on _hyper_2_31_chunk
|
|
||||||
-> Parallel Seq Scan on _hyper_2_32_chunk
|
|
||||||
-> Parallel Seq Scan on _hyper_2_33_chunk
|
|
||||||
-> Parallel Seq Scan on _hyper_2_34_chunk
|
|
||||||
-> Parallel Seq Scan on _hyper_2_35_chunk
|
|
||||||
-> Parallel Seq Scan on _hyper_2_36_chunk
|
|
||||||
-> Parallel Seq Scan on _hyper_2_37_chunk
|
|
||||||
-> Parallel Seq Scan on _hyper_2_38_chunk
|
|
||||||
-> Parallel Seq Scan on _hyper_2_39_chunk
|
|
||||||
-> Parallel Seq Scan on _hyper_2_40_chunk
|
|
||||||
-> Parallel Seq Scan on _hyper_2_41_chunk
|
|
||||||
-> Parallel Seq Scan on _hyper_2_42_chunk
|
|
||||||
-> Parallel Seq Scan on _hyper_2_43_chunk
|
|
||||||
-> Parallel Seq Scan on _hyper_2_44_chunk
|
|
||||||
-> Parallel Seq Scan on _hyper_2_45_chunk
|
|
||||||
-> Parallel Seq Scan on _hyper_2_46_chunk
|
|
||||||
-> Parallel Seq Scan on _hyper_2_47_chunk
|
|
||||||
-> Parallel Seq Scan on _hyper_2_48_chunk
|
|
||||||
-> Parallel Seq Scan on _hyper_2_49_chunk
|
|
||||||
-> Parallel Seq Scan on _hyper_2_50_chunk
|
|
||||||
-> Parallel Seq Scan on _hyper_2_51_chunk
|
|
||||||
-> Parallel Seq Scan on _hyper_2_52_chunk
|
|
||||||
-> Parallel Seq Scan on _hyper_2_53_chunk
|
|
||||||
-> Parallel Seq Scan on _hyper_2_54_chunk
|
|
||||||
-> Parallel Seq Scan on _hyper_2_55_chunk
|
|
||||||
-> Parallel Seq Scan on _hyper_2_56_chunk
|
|
||||||
-> Parallel Seq Scan on _hyper_2_57_chunk
|
|
||||||
-> Parallel Seq Scan on _hyper_2_58_chunk
|
|
||||||
-> Parallel Seq Scan on _hyper_2_59_chunk
|
|
||||||
-> Parallel Seq Scan on _hyper_2_60_chunk
|
|
||||||
-> Parallel Seq Scan on _hyper_2_61_chunk
|
|
||||||
-> Parallel Seq Scan on _hyper_2_62_chunk
|
|
||||||
-> Parallel Seq Scan on _hyper_2_63_chunk
|
|
||||||
-> Parallel Seq Scan on _hyper_2_64_chunk
|
|
||||||
-> Parallel Seq Scan on _hyper_2_65_chunk
|
|
||||||
-> Parallel Seq Scan on _hyper_2_66_chunk
|
|
||||||
-> Parallel Seq Scan on _hyper_2_67_chunk
|
|
||||||
-> Parallel Seq Scan on _hyper_2_68_chunk
|
|
||||||
-> Parallel Seq Scan on _hyper_2_69_chunk
|
|
||||||
-> Parallel Seq Scan on _hyper_2_70_chunk
|
|
||||||
-> Parallel Seq Scan on _hyper_2_71_chunk
|
|
||||||
-> Parallel Seq Scan on _hyper_2_72_chunk
|
|
||||||
-> Parallel Seq Scan on _hyper_2_73_chunk
|
|
||||||
-> Parallel Seq Scan on _hyper_2_74_chunk
|
|
||||||
-> Parallel Seq Scan on _hyper_2_75_chunk
|
|
||||||
-> Parallel Seq Scan on _hyper_2_76_chunk
|
|
||||||
-> Parallel Seq Scan on _hyper_2_77_chunk
|
|
||||||
-> Parallel Seq Scan on _hyper_2_78_chunk
|
|
||||||
-> Parallel Seq Scan on _hyper_2_79_chunk
|
|
||||||
-> Parallel Seq Scan on _hyper_2_80_chunk
|
|
||||||
-> Parallel Seq Scan on _hyper_2_81_chunk
|
|
||||||
-> Parallel Seq Scan on _hyper_2_82_chunk
|
|
||||||
-> Parallel Seq Scan on _hyper_2_83_chunk
|
|
||||||
-> Parallel Seq Scan on _hyper_2_84_chunk
|
|
||||||
-> Parallel Seq Scan on _hyper_2_85_chunk
|
|
||||||
-> Parallel Seq Scan on _hyper_2_86_chunk
|
|
||||||
-> Parallel Seq Scan on _hyper_2_87_chunk
|
|
||||||
-> Parallel Seq Scan on _hyper_2_88_chunk
|
|
||||||
-> Parallel Seq Scan on _hyper_2_89_chunk
|
|
||||||
-> Parallel Seq Scan on _hyper_2_90_chunk
|
|
||||||
-> Parallel Seq Scan on _hyper_2_91_chunk
|
|
||||||
-> Parallel Seq Scan on _hyper_2_92_chunk
|
|
||||||
-> Parallel Seq Scan on _hyper_2_93_chunk
|
|
||||||
-> Parallel Seq Scan on _hyper_2_94_chunk
|
|
||||||
-> Parallel Seq Scan on _hyper_2_95_chunk
|
|
||||||
-> Parallel Seq Scan on _hyper_2_96_chunk
|
|
||||||
-> Parallel Seq Scan on _hyper_2_97_chunk
|
|
||||||
-> Parallel Seq Scan on _hyper_2_98_chunk
|
|
||||||
-> Parallel Seq Scan on _hyper_2_99_chunk
|
|
||||||
-> Parallel Seq Scan on _hyper_2_100_chunk
|
|
||||||
-> Parallel Seq Scan on _hyper_2_101_chunk
|
|
||||||
-> Parallel Seq Scan on _hyper_2_102_chunk
|
|
||||||
-> Parallel Seq Scan on _hyper_2_103_chunk
|
|
||||||
-> Parallel Seq Scan on _hyper_2_104_chunk
|
|
||||||
(106 rows)
|
|
||||||
|
|
||||||
-- Make sure even forcing the parallel mode those functions are not safe for parallel
|
|
||||||
EXPLAIN (COSTS OFF)
|
|
||||||
SELECT
|
|
||||||
_timescaledb_internal.finalize_agg('pg_catalog.sum(integer)'::text, NULL::name, NULL::name, '{{pg_catalog,int4}}'::name[], partial_sum, NULL::bigint) AS sum,
|
|
||||||
_timescaledb_internal.finalize_agg('pg_catalog.avg(integer)'::text, NULL::name, NULL::name, '{{pg_catalog,int4}}'::name[], partial_avg, NULL::numeric) AS avg,
|
|
||||||
_timescaledb_internal.finalize_agg('pg_catalog.min(integer)'::text, NULL::name, NULL::name, '{{pg_catalog,int4}}'::name[], partial_min, NULL::integer) AS min,
|
|
||||||
_timescaledb_internal.finalize_agg('pg_catalog.max(integer)'::text, NULL::name, NULL::name, '{{pg_catalog,int4}}'::name[], partial_max, NULL::integer) AS max,
|
|
||||||
_timescaledb_internal.finalize_agg('pg_catalog.count()'::text, NULL::name, NULL::name, '{}'::name[], partial_count, NULL::bigint) AS count
|
|
||||||
FROM (
|
|
||||||
SELECT
|
|
||||||
_timescaledb_internal.partialize_agg(sum(value)) AS partial_sum,
|
|
||||||
_timescaledb_internal.partialize_agg(avg(value)) AS partial_avg,
|
|
||||||
_timescaledb_internal.partialize_agg(min(value)) AS partial_min,
|
|
||||||
_timescaledb_internal.partialize_agg(max(value)) AS partial_max,
|
|
||||||
_timescaledb_internal.partialize_agg(count(*)) AS partial_count
|
|
||||||
FROM public.issue4922) AS a;
|
|
||||||
QUERY PLAN
|
|
||||||
--------------------------------------------------
|
|
||||||
Aggregate
|
|
||||||
-> Partial Aggregate
|
|
||||||
-> Append
|
|
||||||
-> Seq Scan on _hyper_2_4_chunk
|
|
||||||
-> Seq Scan on _hyper_2_5_chunk
|
|
||||||
-> Seq Scan on _hyper_2_6_chunk
|
|
||||||
-> Seq Scan on _hyper_2_7_chunk
|
|
||||||
-> Seq Scan on _hyper_2_8_chunk
|
|
||||||
-> Seq Scan on _hyper_2_9_chunk
|
|
||||||
-> Seq Scan on _hyper_2_10_chunk
|
|
||||||
-> Seq Scan on _hyper_2_11_chunk
|
|
||||||
-> Seq Scan on _hyper_2_12_chunk
|
|
||||||
-> Seq Scan on _hyper_2_13_chunk
|
|
||||||
-> Seq Scan on _hyper_2_14_chunk
|
|
||||||
-> Seq Scan on _hyper_2_15_chunk
|
|
||||||
-> Seq Scan on _hyper_2_16_chunk
|
|
||||||
-> Seq Scan on _hyper_2_17_chunk
|
|
||||||
-> Seq Scan on _hyper_2_18_chunk
|
|
||||||
-> Seq Scan on _hyper_2_19_chunk
|
|
||||||
-> Seq Scan on _hyper_2_20_chunk
|
|
||||||
-> Seq Scan on _hyper_2_21_chunk
|
|
||||||
-> Seq Scan on _hyper_2_22_chunk
|
|
||||||
-> Seq Scan on _hyper_2_23_chunk
|
|
||||||
-> Seq Scan on _hyper_2_24_chunk
|
|
||||||
-> Seq Scan on _hyper_2_25_chunk
|
|
||||||
-> Seq Scan on _hyper_2_26_chunk
|
|
||||||
-> Seq Scan on _hyper_2_27_chunk
|
|
||||||
-> Seq Scan on _hyper_2_28_chunk
|
|
||||||
-> Seq Scan on _hyper_2_29_chunk
|
|
||||||
-> Seq Scan on _hyper_2_30_chunk
|
|
||||||
-> Seq Scan on _hyper_2_31_chunk
|
|
||||||
-> Seq Scan on _hyper_2_32_chunk
|
|
||||||
-> Seq Scan on _hyper_2_33_chunk
|
|
||||||
-> Seq Scan on _hyper_2_34_chunk
|
|
||||||
-> Seq Scan on _hyper_2_35_chunk
|
|
||||||
-> Seq Scan on _hyper_2_36_chunk
|
|
||||||
-> Seq Scan on _hyper_2_37_chunk
|
|
||||||
-> Seq Scan on _hyper_2_38_chunk
|
|
||||||
-> Seq Scan on _hyper_2_39_chunk
|
|
||||||
-> Seq Scan on _hyper_2_40_chunk
|
|
||||||
-> Seq Scan on _hyper_2_41_chunk
|
|
||||||
-> Seq Scan on _hyper_2_42_chunk
|
|
||||||
-> Seq Scan on _hyper_2_43_chunk
|
|
||||||
-> Seq Scan on _hyper_2_44_chunk
|
|
||||||
-> Seq Scan on _hyper_2_45_chunk
|
|
||||||
-> Seq Scan on _hyper_2_46_chunk
|
|
||||||
-> Seq Scan on _hyper_2_47_chunk
|
|
||||||
-> Seq Scan on _hyper_2_48_chunk
|
|
||||||
-> Seq Scan on _hyper_2_49_chunk
|
|
||||||
-> Seq Scan on _hyper_2_50_chunk
|
|
||||||
-> Seq Scan on _hyper_2_51_chunk
|
|
||||||
-> Seq Scan on _hyper_2_52_chunk
|
|
||||||
-> Seq Scan on _hyper_2_53_chunk
|
|
||||||
-> Seq Scan on _hyper_2_54_chunk
|
|
||||||
-> Seq Scan on _hyper_2_55_chunk
|
|
||||||
-> Seq Scan on _hyper_2_56_chunk
|
|
||||||
-> Seq Scan on _hyper_2_57_chunk
|
|
||||||
-> Seq Scan on _hyper_2_58_chunk
|
|
||||||
-> Seq Scan on _hyper_2_59_chunk
|
|
||||||
-> Seq Scan on _hyper_2_60_chunk
|
|
||||||
-> Seq Scan on _hyper_2_61_chunk
|
|
||||||
-> Seq Scan on _hyper_2_62_chunk
|
|
||||||
-> Seq Scan on _hyper_2_63_chunk
|
|
||||||
-> Seq Scan on _hyper_2_64_chunk
|
|
||||||
-> Seq Scan on _hyper_2_65_chunk
|
|
||||||
-> Seq Scan on _hyper_2_66_chunk
|
|
||||||
-> Seq Scan on _hyper_2_67_chunk
|
|
||||||
-> Seq Scan on _hyper_2_68_chunk
|
|
||||||
-> Seq Scan on _hyper_2_69_chunk
|
|
||||||
-> Seq Scan on _hyper_2_70_chunk
|
|
||||||
-> Seq Scan on _hyper_2_71_chunk
|
|
||||||
-> Seq Scan on _hyper_2_72_chunk
|
|
||||||
-> Seq Scan on _hyper_2_73_chunk
|
|
||||||
-> Seq Scan on _hyper_2_74_chunk
|
|
||||||
-> Seq Scan on _hyper_2_75_chunk
|
|
||||||
-> Seq Scan on _hyper_2_76_chunk
|
|
||||||
-> Seq Scan on _hyper_2_77_chunk
|
|
||||||
-> Seq Scan on _hyper_2_78_chunk
|
|
||||||
-> Seq Scan on _hyper_2_79_chunk
|
|
||||||
-> Seq Scan on _hyper_2_80_chunk
|
|
||||||
-> Seq Scan on _hyper_2_81_chunk
|
|
||||||
-> Seq Scan on _hyper_2_82_chunk
|
|
||||||
-> Seq Scan on _hyper_2_83_chunk
|
|
||||||
-> Seq Scan on _hyper_2_84_chunk
|
|
||||||
-> Seq Scan on _hyper_2_85_chunk
|
|
||||||
-> Seq Scan on _hyper_2_86_chunk
|
|
||||||
-> Seq Scan on _hyper_2_87_chunk
|
|
||||||
-> Seq Scan on _hyper_2_88_chunk
|
|
||||||
-> Seq Scan on _hyper_2_89_chunk
|
|
||||||
-> Seq Scan on _hyper_2_90_chunk
|
|
||||||
-> Seq Scan on _hyper_2_91_chunk
|
|
||||||
-> Seq Scan on _hyper_2_92_chunk
|
|
||||||
-> Seq Scan on _hyper_2_93_chunk
|
|
||||||
-> Seq Scan on _hyper_2_94_chunk
|
|
||||||
-> Seq Scan on _hyper_2_95_chunk
|
|
||||||
-> Seq Scan on _hyper_2_96_chunk
|
|
||||||
-> Seq Scan on _hyper_2_97_chunk
|
|
||||||
-> Seq Scan on _hyper_2_98_chunk
|
|
||||||
-> Seq Scan on _hyper_2_99_chunk
|
|
||||||
-> Seq Scan on _hyper_2_100_chunk
|
|
||||||
-> Seq Scan on _hyper_2_101_chunk
|
|
||||||
-> Seq Scan on _hyper_2_102_chunk
|
|
||||||
-> Seq Scan on _hyper_2_103_chunk
|
|
||||||
-> Seq Scan on _hyper_2_104_chunk
|
|
||||||
(104 rows)
|
|
||||||
|
|
||||||
|
@ -19,6 +19,17 @@ SELECT 1 FROM add_data_node('data_node_3', host => 'localhost',
|
|||||||
GRANT USAGE ON FOREIGN SERVER data_node_1, data_node_2, data_node_3 TO PUBLIC;
|
GRANT USAGE ON FOREIGN SERVER data_node_1, data_node_2, data_node_3 TO PUBLIC;
|
||||||
-- though user on access node has required GRANTS, this will propagate GRANTS to the connected data nodes
|
-- though user on access node has required GRANTS, this will propagate GRANTS to the connected data nodes
|
||||||
GRANT CREATE ON SCHEMA public TO :ROLE_1;
|
GRANT CREATE ON SCHEMA public TO :ROLE_1;
|
||||||
|
|
||||||
|
-- make sure parallel query plans are preferred on data nodes
|
||||||
|
ALTER DATABASE :DN_DBNAME_1 SET parallel_setup_cost TO 1;
|
||||||
|
ALTER DATABASE :DN_DBNAME_2 SET parallel_setup_cost TO 1;
|
||||||
|
ALTER DATABASE :DN_DBNAME_3 SET parallel_setup_cost TO 1;
|
||||||
|
|
||||||
|
-- make sure query push-down is enabled
|
||||||
|
ALTER DATABASE :DN_DBNAME_1 SET enable_partitionwise_aggregate TO true;
|
||||||
|
ALTER DATABASE :DN_DBNAME_2 SET enable_partitionwise_aggregate TO true;
|
||||||
|
ALTER DATABASE :DN_DBNAME_3 SET enable_partitionwise_aggregate TO true;
|
||||||
|
SET enable_partitionwise_aggregate TO true;
|
||||||
SET ROLE :ROLE_1;
|
SET ROLE :ROLE_1;
|
||||||
|
|
||||||
|
|
||||||
|
@ -28,6 +28,17 @@ $$
|
|||||||
CREATE TYPE custom_type AS (high int, low int);
|
CREATE TYPE custom_type AS (high int, low int);
|
||||||
$$);
|
$$);
|
||||||
GRANT CREATE ON SCHEMA public TO :ROLE_1;
|
GRANT CREATE ON SCHEMA public TO :ROLE_1;
|
||||||
|
|
||||||
|
-- make sure parallel query plans are preferred on data nodes
|
||||||
|
ALTER DATABASE :DATA_NODE_1 SET parallel_setup_cost TO 1;
|
||||||
|
ALTER DATABASE :DATA_NODE_2 SET parallel_setup_cost TO 1;
|
||||||
|
ALTER DATABASE :DATA_NODE_3 SET parallel_setup_cost TO 1;
|
||||||
|
|
||||||
|
-- make sure partitionwise aggregation is enabled on data nodes
|
||||||
|
ALTER DATABASE :DATA_NODE_1 SET enable_partitionwise_aggregate TO true;
|
||||||
|
ALTER DATABASE :DATA_NODE_2 SET enable_partitionwise_aggregate TO true;
|
||||||
|
ALTER DATABASE :DATA_NODE_3 SET enable_partitionwise_aggregate TO true;
|
||||||
|
|
||||||
SET ROLE :ROLE_1;
|
SET ROLE :ROLE_1;
|
||||||
SELECT table_name FROM create_distributed_hypertable( 'conditions', 'timec', 'location', 3, chunk_time_interval => INTERVAL '1 day');
|
SELECT table_name FROM create_distributed_hypertable( 'conditions', 'timec', 'location', 3, chunk_time_interval => INTERVAL '1 day');
|
||||||
|
|
||||||
|
@ -316,6 +316,54 @@ FROM generate_series(1, 100000) x(x);
|
|||||||
SET force_parallel_mode = 'on';
|
SET force_parallel_mode = 'on';
|
||||||
SET parallel_setup_cost = 0;
|
SET parallel_setup_cost = 0;
|
||||||
|
|
||||||
|
-- Materialize partials from execution of parallel query plan
|
||||||
|
EXPLAIN (VERBOSE, COSTS OFF)
|
||||||
|
SELECT
|
||||||
|
_timescaledb_internal.partialize_agg(sum(value)) AS partial_sum,
|
||||||
|
_timescaledb_internal.partialize_agg(avg(value)) AS partial_avg,
|
||||||
|
_timescaledb_internal.partialize_agg(min(value)) AS partial_min,
|
||||||
|
_timescaledb_internal.partialize_agg(max(value)) AS partial_max,
|
||||||
|
_timescaledb_internal.partialize_agg(count(*)) AS partial_count
|
||||||
|
FROM public.issue4922;
|
||||||
|
|
||||||
|
CREATE MATERIALIZED VIEW issue4922_partials_parallel AS
|
||||||
|
SELECT
|
||||||
|
_timescaledb_internal.partialize_agg(sum(value)) AS partial_sum,
|
||||||
|
_timescaledb_internal.partialize_agg(avg(value)) AS partial_avg,
|
||||||
|
_timescaledb_internal.partialize_agg(min(value)) AS partial_min,
|
||||||
|
_timescaledb_internal.partialize_agg(max(value)) AS partial_max,
|
||||||
|
_timescaledb_internal.partialize_agg(count(*)) AS partial_count
|
||||||
|
FROM public.issue4922;
|
||||||
|
|
||||||
|
|
||||||
|
-- Materialize partials from execution of non-parallel query plan
|
||||||
|
SET max_parallel_workers_per_gather = 0;
|
||||||
|
|
||||||
|
EXPLAIN (VERBOSE, COSTS OFF)
|
||||||
|
SELECT
|
||||||
|
_timescaledb_internal.partialize_agg(sum(value)) AS partial_sum,
|
||||||
|
_timescaledb_internal.partialize_agg(avg(value)) AS partial_avg,
|
||||||
|
_timescaledb_internal.partialize_agg(min(value)) AS partial_min,
|
||||||
|
_timescaledb_internal.partialize_agg(max(value)) AS partial_max,
|
||||||
|
_timescaledb_internal.partialize_agg(count(*)) AS partial_count
|
||||||
|
FROM public.issue4922;
|
||||||
|
|
||||||
|
CREATE MATERIALIZED VIEW issue4922_partials_non_parallel AS
|
||||||
|
SELECT
|
||||||
|
_timescaledb_internal.partialize_agg(sum(value)) AS partial_sum,
|
||||||
|
_timescaledb_internal.partialize_agg(avg(value)) AS partial_avg,
|
||||||
|
_timescaledb_internal.partialize_agg(min(value)) AS partial_min,
|
||||||
|
_timescaledb_internal.partialize_agg(max(value)) AS partial_max,
|
||||||
|
_timescaledb_internal.partialize_agg(count(*)) AS partial_count
|
||||||
|
FROM public.issue4922;
|
||||||
|
|
||||||
|
RESET max_parallel_workers_per_gather;
|
||||||
|
|
||||||
|
-- partials should be the same in both parallel and non-parallel execution
|
||||||
|
SELECT * FROM issue4922_partials_parallel;
|
||||||
|
SELECT * FROM issue4922_partials_non_parallel;
|
||||||
|
|
||||||
|
-- Compare results from partial and non-partial query execution
|
||||||
SELECT
|
SELECT
|
||||||
sum(value),
|
sum(value),
|
||||||
avg(value),
|
avg(value),
|
||||||
@ -331,38 +379,4 @@ SELECT
|
|||||||
_timescaledb_internal.finalize_agg('pg_catalog.min(integer)'::text, NULL::name, NULL::name, '{{pg_catalog,int4}}'::name[], partial_min, NULL::integer) AS min,
|
_timescaledb_internal.finalize_agg('pg_catalog.min(integer)'::text, NULL::name, NULL::name, '{{pg_catalog,int4}}'::name[], partial_min, NULL::integer) AS min,
|
||||||
_timescaledb_internal.finalize_agg('pg_catalog.max(integer)'::text, NULL::name, NULL::name, '{{pg_catalog,int4}}'::name[], partial_max, NULL::integer) AS max,
|
_timescaledb_internal.finalize_agg('pg_catalog.max(integer)'::text, NULL::name, NULL::name, '{{pg_catalog,int4}}'::name[], partial_max, NULL::integer) AS max,
|
||||||
_timescaledb_internal.finalize_agg('pg_catalog.count()'::text, NULL::name, NULL::name, '{}'::name[], partial_count, NULL::bigint) AS count
|
_timescaledb_internal.finalize_agg('pg_catalog.count()'::text, NULL::name, NULL::name, '{}'::name[], partial_count, NULL::bigint) AS count
|
||||||
FROM (
|
FROM issue4922_partials_parallel;
|
||||||
SELECT
|
|
||||||
_timescaledb_internal.partialize_agg(sum(value)) AS partial_sum,
|
|
||||||
_timescaledb_internal.partialize_agg(avg(value)) AS partial_avg,
|
|
||||||
_timescaledb_internal.partialize_agg(min(value)) AS partial_min,
|
|
||||||
_timescaledb_internal.partialize_agg(max(value)) AS partial_max,
|
|
||||||
_timescaledb_internal.partialize_agg(count(*)) AS partial_count
|
|
||||||
FROM public.issue4922) AS a;
|
|
||||||
|
|
||||||
-- Check for parallel planning
|
|
||||||
EXPLAIN (COSTS OFF)
|
|
||||||
SELECT
|
|
||||||
sum(value),
|
|
||||||
avg(value),
|
|
||||||
min(value),
|
|
||||||
max(value),
|
|
||||||
count(*)
|
|
||||||
FROM issue4922;
|
|
||||||
|
|
||||||
-- Make sure even forcing the parallel mode those functions are not safe for parallel
|
|
||||||
EXPLAIN (COSTS OFF)
|
|
||||||
SELECT
|
|
||||||
_timescaledb_internal.finalize_agg('pg_catalog.sum(integer)'::text, NULL::name, NULL::name, '{{pg_catalog,int4}}'::name[], partial_sum, NULL::bigint) AS sum,
|
|
||||||
_timescaledb_internal.finalize_agg('pg_catalog.avg(integer)'::text, NULL::name, NULL::name, '{{pg_catalog,int4}}'::name[], partial_avg, NULL::numeric) AS avg,
|
|
||||||
_timescaledb_internal.finalize_agg('pg_catalog.min(integer)'::text, NULL::name, NULL::name, '{{pg_catalog,int4}}'::name[], partial_min, NULL::integer) AS min,
|
|
||||||
_timescaledb_internal.finalize_agg('pg_catalog.max(integer)'::text, NULL::name, NULL::name, '{{pg_catalog,int4}}'::name[], partial_max, NULL::integer) AS max,
|
|
||||||
_timescaledb_internal.finalize_agg('pg_catalog.count()'::text, NULL::name, NULL::name, '{}'::name[], partial_count, NULL::bigint) AS count
|
|
||||||
FROM (
|
|
||||||
SELECT
|
|
||||||
_timescaledb_internal.partialize_agg(sum(value)) AS partial_sum,
|
|
||||||
_timescaledb_internal.partialize_agg(avg(value)) AS partial_avg,
|
|
||||||
_timescaledb_internal.partialize_agg(min(value)) AS partial_min,
|
|
||||||
_timescaledb_internal.partialize_agg(max(value)) AS partial_max,
|
|
||||||
_timescaledb_internal.partialize_agg(count(*)) AS partial_count
|
|
||||||
FROM public.issue4922) AS a;
|
|
||||||
|
Loading…
x
Reference in New Issue
Block a user