mirror of
https://github.com/timescale/timescaledb.git
synced 2025-05-26 00:00:54 +08:00
Adjust Code to PG14 ModifyTablePath changes
PG14 changes ModifyTablePath struct to have single child subpath instead of list of subpaths. Similarly ModifyTableState mt_nplans gets removed because ModifyTable will only have single child in PG14. The same patch also removes ri_junkFilter from ResultRelInfo. https://github.com/postgres/postgres/commit/86dc9005
This commit is contained in:
parent
d011418239
commit
325f68a62d
@ -58,9 +58,13 @@ ts_chunk_dispatch_has_returning(const ChunkDispatch *dispatch)
|
||||
List *
|
||||
ts_chunk_dispatch_get_returning_clauses(const ChunkDispatch *dispatch)
|
||||
{
|
||||
ModifyTableState *mtstate = dispatch->dispatch_state->mtstate;
|
||||
|
||||
#if PG14_LT
|
||||
ModifyTableState *mtstate = get_modifytable_state(dispatch);
|
||||
return list_nth(get_modifytable(dispatch)->returningLists, mtstate->mt_whichplan);
|
||||
#else
|
||||
Assert(list_length(get_modifytable(dispatch)->returningLists) == 1);
|
||||
return linitial(get_modifytable(dispatch)->returningLists);
|
||||
#endif
|
||||
}
|
||||
|
||||
List *
|
||||
|
@ -91,7 +91,11 @@ ts_chunk_dispatch_path_create(PlannerInfo *root, ModifyTablePath *mtpath, Index
|
||||
int subpath_index)
|
||||
{
|
||||
ChunkDispatchPath *path = (ChunkDispatchPath *) palloc0(sizeof(ChunkDispatchPath));
|
||||
#if PG14_LT
|
||||
Path *subpath = list_nth(mtpath->subpaths, subpath_index);
|
||||
#else
|
||||
Path *subpath = mtpath->subpath;
|
||||
#endif
|
||||
RangeTblEntry *rte = planner_rt_fetch(hypertable_rti, root);
|
||||
|
||||
memcpy(&path->cpath.path, subpath, sizeof(Path));
|
||||
|
@ -50,12 +50,14 @@ chunk_dispatch_begin(CustomScanState *node, EState *estate, int eflags)
|
||||
static void
|
||||
on_chunk_insert_state_changed(ChunkInsertState *cis, void *data)
|
||||
{
|
||||
#if PG14_LT
|
||||
ChunkDispatchState *state = data;
|
||||
ModifyTableState *mtstate = state->mtstate;
|
||||
|
||||
/* PG12 expects the current target slot to match the result relation. Thus
|
||||
/* PG < 14 expects the current target slot to match the result relation. Thus
|
||||
* we need to make sure it is up-to-date with the current chunk here. */
|
||||
mtstate->mt_scans[mtstate->mt_whichplan] = cis->slot;
|
||||
#endif
|
||||
}
|
||||
|
||||
static TupleTableSlot *
|
||||
@ -219,7 +221,9 @@ ts_chunk_dispatch_state_set_parent(ChunkDispatchState *state, ModifyTableState *
|
||||
ModifyTable *mt_plan = castNode(ModifyTable, mtstate->ps.plan);
|
||||
|
||||
/* Inserts on hypertables should always have one subplan */
|
||||
#if PG14_LT
|
||||
Assert(mtstate->mt_nplans == 1);
|
||||
#endif
|
||||
state->mtstate = mtstate;
|
||||
state->arbiter_indexes = mt_plan->arbiterIndexes;
|
||||
}
|
||||
|
@ -96,7 +96,9 @@ create_chunk_result_relation_info(ChunkDispatch *dispatch, Relation rel)
|
||||
rri_orig = dispatch->hypertable_result_rel_info;
|
||||
rri->ri_WithCheckOptions = rri_orig->ri_WithCheckOptions;
|
||||
rri->ri_WithCheckOptionExprs = rri_orig->ri_WithCheckOptionExprs;
|
||||
#if PG14_LT
|
||||
rri->ri_junkFilter = rri_orig->ri_junkFilter;
|
||||
#endif
|
||||
rri->ri_projectReturning = rri_orig->ri_projectReturning;
|
||||
|
||||
rri->ri_FdwState = NULL;
|
||||
@ -123,7 +125,9 @@ create_compress_chunk_result_relation_info(ChunkDispatch *dispatch, Relation com
|
||||
/* RLS policies are not supported if compression is enabled */
|
||||
Assert(rri_orig->ri_WithCheckOptions == NULL && rri_orig->ri_WithCheckOptionExprs == NULL);
|
||||
Assert(rri_orig->ri_projectReturning == NULL);
|
||||
#if PG14_LT
|
||||
rri->ri_junkFilter = rri_orig->ri_junkFilter;
|
||||
#endif
|
||||
|
||||
/* compressed rel chunk is on data node. Does not need any FDW access on AN */
|
||||
rri->ri_FdwState = NULL;
|
||||
|
@ -18,6 +18,7 @@
|
||||
#include <foreign/foreign.h>
|
||||
#include <catalog/pg_type.h>
|
||||
|
||||
#include "compat/compat.h"
|
||||
#include "hypertable_insert.h"
|
||||
#include "chunk_dispatch_state.h"
|
||||
#include "chunk_dispatch_plan.h"
|
||||
@ -80,7 +81,6 @@ hypertable_insert_begin(CustomScanState *node, EState *estate, int eflags)
|
||||
PlanState *ps;
|
||||
List *chunk_dispatch_states = NIL;
|
||||
ListCell *lc;
|
||||
int i;
|
||||
|
||||
ps = ExecInitNode(&state->mt->plan, estate, eflags);
|
||||
node->custom_ps = list_make1(ps);
|
||||
@ -89,13 +89,17 @@ hypertable_insert_begin(CustomScanState *node, EState *estate, int eflags)
|
||||
/*
|
||||
* Find all ChunkDispatchState subnodes and set their parent
|
||||
* ModifyTableState node
|
||||
* We assert we only have 1 ModifyTable subpath when we create
|
||||
* the HypertableInsert path so this should not have changed here.
|
||||
*/
|
||||
for (i = 0; i < mtstate->mt_nplans; i++)
|
||||
{
|
||||
List *substates = get_chunk_dispatch_states(mtstate->mt_plans[i]);
|
||||
#if PG14_LT
|
||||
Assert(mtstate->mt_nplans == 1);
|
||||
PlanState *subplan = mtstate->mt_plans[0];
|
||||
#else
|
||||
PlanState *subplan = outerPlanState(mtstate);
|
||||
#endif
|
||||
|
||||
chunk_dispatch_states = list_concat(chunk_dispatch_states, substates);
|
||||
}
|
||||
chunk_dispatch_states = get_chunk_dispatch_states(subplan);
|
||||
|
||||
/* Ensure that we found at least one ChunkDispatchState node */
|
||||
Assert(list_length(chunk_dispatch_states) > 0);
|
||||
@ -443,52 +447,49 @@ Path *
|
||||
ts_hypertable_insert_path_create(PlannerInfo *root, ModifyTablePath *mtpath)
|
||||
{
|
||||
Path *path = &mtpath->path;
|
||||
Path *subpath;
|
||||
Cache *hcache = ts_hypertable_cache_pin();
|
||||
ListCell *lc_path, *lc_rel;
|
||||
List *subpaths = NIL;
|
||||
Bitmapset *distributed_insert_plans = NULL;
|
||||
Hypertable *ht = NULL;
|
||||
HypertableInsertPath *hipath;
|
||||
int i = 0;
|
||||
|
||||
#if PG14_LT
|
||||
/* Since it's theoretically possible for ModifyTablePath to have multiple subpaths
|
||||
* in PG < 14 we assert that we only get 1 subpath here. */
|
||||
Assert(list_length(mtpath->subpaths) == list_length(mtpath->resultRelations));
|
||||
Assert(list_length(mtpath->subpaths) == 1);
|
||||
#endif
|
||||
|
||||
forboth (lc_path, mtpath->subpaths, lc_rel, mtpath->resultRelations)
|
||||
Index rti = linitial_int(mtpath->resultRelations);
|
||||
RangeTblEntry *rte = planner_rt_fetch(rti, root);
|
||||
|
||||
ht = ts_hypertable_cache_get_entry(hcache, rte->relid, CACHE_FLAG_MISSING_OK);
|
||||
|
||||
if (!ht)
|
||||
{
|
||||
Path *subpath = lfirst(lc_path);
|
||||
Index rti = lfirst_int(lc_rel);
|
||||
RangeTblEntry *rte = planner_rt_fetch(rti, root);
|
||||
|
||||
ht = ts_hypertable_cache_get_entry(hcache, rte->relid, CACHE_FLAG_MISSING_OK);
|
||||
|
||||
if (ht != NULL)
|
||||
{
|
||||
if (root->parse->onConflict != NULL &&
|
||||
root->parse->onConflict->constraint != InvalidOid)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
|
||||
errmsg("hypertables do not support ON CONFLICT statements that reference "
|
||||
"constraints"),
|
||||
errhint("Use column names to infer indexes instead.")));
|
||||
|
||||
if (hypertable_is_distributed(ht) && ts_guc_max_insert_batch_size > 0)
|
||||
{
|
||||
/* Remember that this will become a data node dispatch/copy
|
||||
* plan. We need to know later whether or not to plan this
|
||||
* using the FDW API. */
|
||||
distributed_insert_plans = bms_add_member(distributed_insert_plans, i);
|
||||
subpath = ts_cm_functions->distributed_insert_path_create(root, mtpath, rti, i);
|
||||
}
|
||||
else
|
||||
subpath = ts_chunk_dispatch_path_create(root, mtpath, rti, i);
|
||||
}
|
||||
|
||||
i++;
|
||||
subpaths = lappend(subpaths, subpath);
|
||||
}
|
||||
|
||||
if (NULL == ht)
|
||||
elog(ERROR, "no hypertable found in INSERT plan");
|
||||
}
|
||||
else
|
||||
{
|
||||
if (root->parse->onConflict && OidIsValid(root->parse->onConflict->constraint))
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
|
||||
errmsg("hypertables do not support ON CONFLICT statements that reference "
|
||||
"constraints"),
|
||||
errhint("Use column names to infer indexes instead.")));
|
||||
|
||||
if (hypertable_is_distributed(ht) && ts_guc_max_insert_batch_size > 0)
|
||||
{
|
||||
/* Remember that this will become a data node dispatch/copy
|
||||
* plan. We need to know later whether or not to plan this
|
||||
* using the FDW API. */
|
||||
distributed_insert_plans = bms_add_member(distributed_insert_plans, i);
|
||||
subpath = ts_cm_functions->distributed_insert_path_create(root, mtpath, rti, i);
|
||||
}
|
||||
else
|
||||
subpath = ts_chunk_dispatch_path_create(root, mtpath, rti, i);
|
||||
}
|
||||
|
||||
hipath = palloc0(sizeof(HypertableInsertPath));
|
||||
|
||||
@ -501,7 +502,11 @@ ts_hypertable_insert_path_create(PlannerInfo *root, ModifyTablePath *mtpath)
|
||||
hipath->distributed_insert_plans = distributed_insert_plans;
|
||||
hipath->serveroids = ts_hypertable_get_available_data_node_server_oids(ht);
|
||||
path = &hipath->cpath.path;
|
||||
mtpath->subpaths = subpaths;
|
||||
#if PG14_LT
|
||||
mtpath->subpaths = list_make1(subpath);
|
||||
#else
|
||||
mtpath->subpath = subpath;
|
||||
#endif
|
||||
|
||||
ts_cache_release(hcache);
|
||||
|
||||
|
@ -474,7 +474,11 @@ tsl_debug_append_path(StringInfo buf, PlannerInfo *root, Path *path, int indent)
|
||||
break;
|
||||
case T_ModifyTablePath:
|
||||
ptype = "ModifyTable";
|
||||
#if PG14_LT
|
||||
subpath_list = castNode(ModifyTablePath, path)->subpaths;
|
||||
#else
|
||||
subpath_list = list_make1(castNode(ModifyTablePath, path)->subpath);
|
||||
#endif
|
||||
break;
|
||||
case T_LimitPath:
|
||||
ptype = "Limit";
|
||||
|
Loading…
x
Reference in New Issue
Block a user