Add clang-tidy warning readability-inconsistent-declaration-parameter-name

Mostly cosmetic stuff. Matched to definition automatically with
--fix-notes.
This commit is contained in:
Alexander Kuzmenkov 2022-10-18 21:12:35 +04:00 committed by Alexander Kuzmenkov
parent 73c3d02ed2
commit f862212c8c
43 changed files with 133 additions and 126 deletions

View File

@ -485,7 +485,7 @@ if(LINTER)
message(STATUS "Linter support (clang-tidy) enabled")
if(LINTER_STRICT)
set(CMAKE_C_CLANG_TIDY
"${CLANG_TIDY};--checks=clang-diagnostic-*,clang-analyzer-*,-*,clang-analyzer-core.*,clang-diagnostic-*,readability-redundant-control-flow,bugprone-argument-comment,bugprone-macro-parentheses,readability-suspicious-call-argument,readability-misleading-indentation;--warnings-as-errors=*"
"${CLANG_TIDY};--checks=clang-diagnostic-*,clang-analyzer-*,-*,clang-analyzer-core.*,clang-diagnostic-*,readability-redundant-control-flow,bugprone-argument-comment,bugprone-macro-parentheses,readability-suspicious-call-argument,readability-misleading-indentation,readability-inconsistent-declaration-parameter-name;--warnings-as-errors=*"
)
else()
set(CMAKE_C_CLANG_TIDY "${CLANG_TIDY};--quiet")

View File

@ -58,7 +58,7 @@ extern void ts_bgw_job_set_scheduler_test_hook(scheduler_test_hook_type hook);
extern void ts_bgw_job_set_job_entrypoint_function_name(char *func_name);
extern bool ts_bgw_job_run_and_set_next_start(BgwJob *job, job_main_func func, int64 initial_runs,
Interval *next_interval);
extern TSDLLEXPORT bool ts_job_errors_insert_tuple(const FormData_job_error *jerr);
extern TSDLLEXPORT bool ts_job_errors_insert_tuple(const FormData_job_error *job_err);
extern TSDLLEXPORT void ts_bgw_job_validate_schedule_interval(Interval *schedule_interval);
extern TSDLLEXPORT char *ts_bgw_job_validate_timezone(Datum timezone);
#endif /* BGW_JOB_H */

View File

@ -40,7 +40,7 @@ extern TSDLLEXPORT void ts_bgw_job_stat_upsert_next_start(int32 bgw_job_id, Time
extern bool ts_bgw_job_stat_should_execute(BgwJobStat *jobstat, BgwJob *job);
extern TimestampTz ts_bgw_job_stat_next_start(BgwJobStat *jobstat, BgwJob *job,
int32 consecutive_failed_starts);
int32 consecutive_failed_launches);
extern TSDLLEXPORT void ts_bgw_job_stat_mark_crash_reported(int32 bgw_job_id);
#endif /* BGW_JOB_STAT_H */

View File

@ -15,7 +15,7 @@ typedef struct BgwPolicyChunkStats
FormData_bgw_policy_chunk_stats fd;
} BgwPolicyChunkStats;
extern TSDLLEXPORT void ts_bgw_policy_chunk_stats_insert(BgwPolicyChunkStats *stat);
extern TSDLLEXPORT void ts_bgw_policy_chunk_stats_insert(BgwPolicyChunkStats *chunk_stats);
extern BgwPolicyChunkStats *ts_bgw_policy_chunk_stats_find(int32 job_id, int32 chunk_id);
extern void ts_bgw_policy_chunk_stats_delete_row_only_by_job_id(int32 job_id);
extern void ts_bgw_policy_chunk_stats_delete_by_chunk_id(int32 chunk_id);

View File

@ -54,13 +54,15 @@ ts_chunk_constraints_alloc(int size_hint, MemoryContext mctx)
}
ChunkConstraints *
ts_chunk_constraints_copy(ChunkConstraints *ccs)
ts_chunk_constraints_copy(ChunkConstraints *chunk_constraints)
{
ChunkConstraints *copy = palloc(sizeof(ChunkConstraints));
memcpy(copy, ccs, sizeof(ChunkConstraints));
copy->constraints = palloc0(CHUNK_CONSTRAINTS_SIZE(ccs->capacity));
memcpy(copy->constraints, ccs->constraints, CHUNK_CONSTRAINTS_SIZE(ccs->num_constraints));
memcpy(copy, chunk_constraints, sizeof(ChunkConstraints));
copy->constraints = palloc0(CHUNK_CONSTRAINTS_SIZE(chunk_constraints->capacity));
memcpy(copy->constraints,
chunk_constraints->constraints,
CHUNK_CONSTRAINTS_SIZE(chunk_constraints->num_constraints));
return copy;
}
@ -899,7 +901,7 @@ chunk_constraint_rename_on_chunk_table(int32 chunk_id, const char *old_name, con
}
static void
chunk_constraint_rename_hypertable_from_tuple(TupleInfo *ti, const char *newname)
chunk_constraint_rename_hypertable_from_tuple(TupleInfo *ti, const char *new_name)
{
bool nulls[Natts_chunk_constraint];
Datum values[Natts_chunk_constraint];
@ -916,8 +918,8 @@ chunk_constraint_rename_hypertable_from_tuple(TupleInfo *ti, const char *newname
heap_deform_tuple(tuple, tupdesc, values, nulls);
chunk_id = DatumGetInt32(values[AttrNumberGetAttrOffset(Anum_chunk_constraint_chunk_id)]);
namestrcpy(&new_hypertable_constraint_name, newname);
chunk_constraint_choose_name(&new_chunk_constraint_name, newname, chunk_id);
namestrcpy(&new_hypertable_constraint_name, new_name);
chunk_constraint_choose_name(&new_chunk_constraint_name, new_name, chunk_id);
values[AttrNumberGetAttrOffset(Anum_chunk_constraint_hypertable_constraint_name)] =
NameGetDatum(&new_hypertable_constraint_name);
@ -935,7 +937,7 @@ chunk_constraint_rename_hypertable_from_tuple(TupleInfo *ti, const char *newname
new_tuple = heap_modify_tuple(tuple, tupdesc, values, nulls, doReplace);
ts_chunk_index_adjust_meta(chunk_id,
newname,
new_name,
NameStr(*old_chunk_constraint_name),
NameStr(new_chunk_constraint_name));
@ -950,14 +952,14 @@ chunk_constraint_rename_hypertable_from_tuple(TupleInfo *ti, const char *newname
* Adjust internal metadata after index/constraint rename
*/
int
ts_chunk_constraint_adjust_meta(int32 chunk_id, const char *ht_constraint_name, const char *oldname,
const char *newname)
ts_chunk_constraint_adjust_meta(int32 chunk_id, const char *ht_constraint_name,
const char *old_name, const char *new_name)
{
ScanIterator iterator =
ts_scan_iterator_create(CHUNK_CONSTRAINT, RowExclusiveLock, CurrentMemoryContext);
int count = 0;
init_scan_by_chunk_id_constraint_name(&iterator, chunk_id, oldname);
init_scan_by_chunk_id_constraint_name(&iterator, chunk_id, old_name);
ts_scanner_foreach(&iterator)
{
@ -975,7 +977,7 @@ ts_chunk_constraint_adjust_meta(int32 chunk_id, const char *ht_constraint_name,
CStringGetDatum(ht_constraint_name);
doReplace[AttrNumberGetAttrOffset(Anum_chunk_constraint_hypertable_constraint_name)] = true;
values[AttrNumberGetAttrOffset(Anum_chunk_constraint_constraint_name)] =
CStringGetDatum(newname);
CStringGetDatum(new_name);
doReplace[AttrNumberGetAttrOffset(Anum_chunk_constraint_constraint_name)] = true;
new_tuple =
@ -994,8 +996,8 @@ ts_chunk_constraint_adjust_meta(int32 chunk_id, const char *ht_constraint_name,
}
int
ts_chunk_constraint_rename_hypertable_constraint(int32 chunk_id, const char *oldname,
const char *newname)
ts_chunk_constraint_rename_hypertable_constraint(int32 chunk_id, const char *old_name,
const char *new_name)
{
ScanIterator iterator =
ts_scan_iterator_create(CHUNK_CONSTRAINT, RowExclusiveLock, CurrentMemoryContext);
@ -1005,12 +1007,12 @@ ts_chunk_constraint_rename_hypertable_constraint(int32 chunk_id, const char *old
ts_scanner_foreach(&iterator)
{
if (!hypertable_constraint_matches_tuple(ts_scan_iterator_tuple_info(&iterator), oldname))
if (!hypertable_constraint_matches_tuple(ts_scan_iterator_tuple_info(&iterator), old_name))
continue;
count++;
chunk_constraint_rename_hypertable_from_tuple(ts_scan_iterator_tuple_info(&iterator),
newname);
new_name);
}
return count;
}

View File

@ -36,9 +36,9 @@ typedef struct Hypercube Hypercube;
typedef struct ChunkScanCtx ChunkScanCtx;
extern TSDLLEXPORT ChunkConstraints *ts_chunk_constraints_alloc(int size_hint, MemoryContext mctx);
extern ChunkConstraints *ts_chunk_constraint_scan_by_chunk_id(int32 chunk_id, Size count_hint,
MemoryContext mctx);
extern ChunkConstraints *ts_chunk_constraints_copy(ChunkConstraints *constraints);
extern ChunkConstraints *
ts_chunk_constraint_scan_by_chunk_id(int32 chunk_id, Size num_constraints_hint, MemoryContext mctx);
extern ChunkConstraints *ts_chunk_constraints_copy(ChunkConstraints *chunk_constraints);
extern int ts_chunk_constraint_scan_by_dimension_slice(const DimensionSlice *slice,
ChunkScanCtx *ctx, MemoryContext mctx);
extern int ts_chunk_constraint_scan_by_dimension_slice_to_list(const DimensionSlice *slice,
@ -73,10 +73,10 @@ extern int ts_chunk_constraint_delete_by_constraint_name(int32 chunk_id,
bool delete_metadata,
bool drop_constraint);
extern void ts_chunk_constraint_recreate(const ChunkConstraint *cc, Oid chunk_oid);
extern int ts_chunk_constraint_rename_hypertable_constraint(int32 chunk_id, const char *oldname,
const char *newname);
extern int ts_chunk_constraint_rename_hypertable_constraint(int32 chunk_id, const char *old_name,
const char *new_name);
extern int ts_chunk_constraint_adjust_meta(int32 chunk_id, const char *ht_constraint_name,
const char *oldname, const char *newname);
const char *old_name, const char *new_name);
extern char *
ts_chunk_constraint_get_name_from_hypertable_constraint(Oid chunk_relid,

View File

@ -945,14 +945,14 @@ init_scan_by_chunk_id_index_name(ScanIterator *iterator, int32 chunk_id, const c
* Adjust internal metadata after index/constraint rename
*/
int
ts_chunk_index_adjust_meta(int32 chunk_id, const char *ht_index_name, const char *oldname,
const char *newname)
ts_chunk_index_adjust_meta(int32 chunk_id, const char *ht_index_name, const char *old_name,
const char *new_name)
{
ScanIterator iterator =
ts_scan_iterator_create(CHUNK_INDEX, RowExclusiveLock, CurrentMemoryContext);
int count = 0;
init_scan_by_chunk_id_index_name(&iterator, chunk_id, oldname);
init_scan_by_chunk_id_index_name(&iterator, chunk_id, old_name);
ts_scanner_foreach(&iterator)
{
@ -969,7 +969,7 @@ ts_chunk_index_adjust_meta(int32 chunk_id, const char *ht_index_name, const char
values[AttrNumberGetAttrOffset(Anum_chunk_index_hypertable_index_name)] =
CStringGetDatum(ht_index_name);
doReplace[AttrNumberGetAttrOffset(Anum_chunk_index_hypertable_index_name)] = true;
values[AttrNumberGetAttrOffset(Anum_chunk_index_index_name)] = CStringGetDatum(newname);
values[AttrNumberGetAttrOffset(Anum_chunk_index_index_name)] = CStringGetDatum(new_name);
doReplace[AttrNumberGetAttrOffset(Anum_chunk_index_index_name)] = true;
new_tuple =
@ -987,13 +987,13 @@ ts_chunk_index_adjust_meta(int32 chunk_id, const char *ht_index_name, const char
}
int
ts_chunk_index_rename(Chunk *chunk, Oid chunk_indexrelid, const char *newname)
ts_chunk_index_rename(Chunk *chunk, Oid chunk_indexrelid, const char *new_name)
{
ScanKeyData scankey[2];
const char *indexname = get_rel_name(chunk_indexrelid);
ChunkIndexRenameInfo renameinfo = {
.oldname = indexname,
.newname = newname,
.newname = new_name,
};
ScanKeyInit(&scankey[0],
@ -1016,13 +1016,13 @@ ts_chunk_index_rename(Chunk *chunk, Oid chunk_indexrelid, const char *newname)
}
int
ts_chunk_index_rename_parent(Hypertable *ht, Oid hypertable_indexrelid, const char *newname)
ts_chunk_index_rename_parent(Hypertable *ht, Oid hypertable_indexrelid, const char *new_name)
{
ScanKeyData scankey[2];
const char *indexname = get_rel_name(hypertable_indexrelid);
ChunkIndexRenameInfo renameinfo = {
.oldname = indexname,
.newname = newname,
.newname = new_name,
.isparent = true,
};

View File

@ -41,9 +41,9 @@ extern int ts_chunk_index_delete(int32 chunk_id, const char *indexname, bool dro
extern int ts_chunk_index_delete_by_chunk_id(int32 chunk_id, bool drop_index);
extern void ts_chunk_index_delete_by_name(const char *schema, const char *index_name,
bool drop_index);
extern int ts_chunk_index_rename(Chunk *chunk, Oid chunk_indexrelid, const char *newname);
extern int ts_chunk_index_rename(Chunk *chunk, Oid chunk_indexrelid, const char *new_name);
extern int ts_chunk_index_rename_parent(Hypertable *ht, Oid hypertable_indexrelid,
const char *newname);
const char *new_name);
extern int ts_chunk_index_adjust_meta(int32 chunk_id, const char *ht_index_name,
const char *old_name, const char *new_name);
extern int ts_chunk_index_set_tablespace(Hypertable *ht, Oid hypertable_indexrelid,

View File

@ -11,6 +11,6 @@
#include "hypertable.h"
extern Chunk **ts_chunk_scan_by_chunk_ids(const Hyperspace *hs, const List *chunk_ids,
unsigned int *numchunks);
unsigned int *num_chunks);
#endif /* TIMESCALEDB_CHUNK_SCAN_H */

View File

@ -140,7 +140,7 @@ extern TSDLLEXPORT DimensionInfo *ts_dimension_info_create_closed(Oid table_reli
extern void ts_dimension_info_validate(DimensionInfo *info);
extern int32 ts_dimension_add_from_info(DimensionInfo *info);
extern void ts_dimensions_rename_schema_name(const char *oldname, const char *newname);
extern void ts_dimensions_rename_schema_name(const char *old_name, const char *new_name);
extern TSDLLEXPORT void ts_dimension_update(const Hypertable *ht, const NameData *dimname,
DimensionType dimtype, Datum *interval,
Oid *intervaltype, int16 *num_slices,

View File

@ -45,7 +45,8 @@ typedef struct Hypercube Hypercube;
extern DimensionVec *ts_dimension_slice_scan_limit(int32 dimension_id, int64 coordinate, int limit,
const ScanTupLock *tuplock);
extern void ts_dimension_slice_scan_list(int32 dimension_id, int64 coordinate, List **dest);
extern void ts_dimension_slice_scan_list(int32 dimension_id, int64 coordinate,
List **matching_dimension_slices);
extern DimensionVec *
ts_dimension_slice_scan_range_limit(int32 dimension_id, StrategyNumber start_strategy,

View File

@ -17,7 +17,7 @@ extern TSDLLEXPORT void ts_jsonb_add_bool(JsonbParseState *state, const char *ke
extern TSDLLEXPORT void ts_jsonb_add_str(JsonbParseState *state, const char *key,
const char *value);
extern TSDLLEXPORT void ts_jsonb_add_interval(JsonbParseState *state, const char *key,
Interval *value);
Interval *interval);
extern TSDLLEXPORT void ts_jsonb_add_int32(JsonbParseState *state, const char *key,
const int32 value);
extern TSDLLEXPORT void ts_jsonb_add_int64(JsonbParseState *state, const char *key,

View File

@ -119,7 +119,7 @@ static void choose_next_subplan_non_parallel(ChunkAppendState *state);
static void choose_next_subplan_for_worker(ChunkAppendState *state);
static List *constify_restrictinfos(PlannerInfo *root, List *restrictinfos);
static bool can_exclude_chunk(List *constraints, List *restrictinfos);
static bool can_exclude_chunk(List *constraints, List *baserestrictinfo);
static void do_startup_exclusion(ChunkAppendState *state);
static Node *constify_param_mutator(Node *node, void *context);
static List *constify_restrictinfo_params(PlannerInfo *root, EState *state, List *restrictinfos);

View File

@ -95,9 +95,9 @@ ts_chunk_dispatch_get_cmd_type(const ChunkDispatch *dispatch)
}
void
ts_chunk_dispatch_destroy(ChunkDispatch *cd)
ts_chunk_dispatch_destroy(ChunkDispatch *chunk_dispatch)
{
ts_subspace_store_free(cd->cache);
ts_subspace_store_free(chunk_dispatch->cache);
}
static void

View File

@ -45,7 +45,7 @@ typedef struct Point Point;
typedef void (*on_chunk_changed_func)(ChunkInsertState *state, void *data);
extern ChunkDispatch *ts_chunk_dispatch_create(Hypertable *ht, EState *estate, int eflags);
extern void ts_chunk_dispatch_destroy(ChunkDispatch *dispatch);
extern void ts_chunk_dispatch_destroy(ChunkDispatch *chunk_dispatch);
extern ChunkInsertState *
ts_chunk_dispatch_get_chunk_insert_state(ChunkDispatch *dispatch, Point *p,
const on_chunk_changed_func on_chunk_changed, void *data);

View File

@ -38,7 +38,8 @@ typedef struct ChunkDispatchState
} ChunkDispatchState;
extern TSDLLEXPORT bool ts_is_chunk_dispatch_state(PlanState *state);
extern ChunkDispatchState *ts_chunk_dispatch_state_create(Oid hypertable_oid, Plan *plan);
extern void ts_chunk_dispatch_state_set_parent(ChunkDispatchState *state, ModifyTableState *parent);
extern ChunkDispatchState *ts_chunk_dispatch_state_create(Oid hypertable_relid, Plan *plan);
extern void ts_chunk_dispatch_state_set_parent(ChunkDispatchState *state,
ModifyTableState *mtstate);
#endif /* TIMESCALEDB_CHUNK_DISPATCH_STATE_H */

View File

@ -77,8 +77,8 @@ typedef struct MutatorContext
} MutatorContext;
static bool find_first_last_aggs_walker(Node *node, List **context);
static bool build_first_last_path(PlannerInfo *root, FirstLastAggInfo *flinfo, Oid eqop, Oid sortop,
bool nulls_first);
static bool build_first_last_path(PlannerInfo *root, FirstLastAggInfo *fl_info, Oid eqop,
Oid sortop, bool nulls_first);
static void first_last_qp_callback(PlannerInfo *root, void *extra);
static Node *mutate_aggref_node(Node *node, MutatorContext *context);
static void replace_aggref_in_tlist(MinMaxAggPath *minmaxagg_path);

View File

@ -91,19 +91,19 @@ ts_subspace_store_init(const Hyperspace *space, MemoryContext mcxt, int16 max_it
}
void
ts_subspace_store_add(SubspaceStore *store, const Hypercube *hc, void *object,
ts_subspace_store_add(SubspaceStore *subspace_store, const Hypercube *hypercube, void *object,
void (*object_free)(void *))
{
SubspaceStoreInternalNode *node = store->origin;
SubspaceStoreInternalNode *node = subspace_store->origin;
DimensionSlice *last = NULL;
MemoryContext old = MemoryContextSwitchTo(store->mcxt);
MemoryContext old = MemoryContextSwitchTo(subspace_store->mcxt);
int i;
Assert(hc->num_slices == store->num_dimensions);
Assert(hypercube->num_slices == subspace_store->num_dimensions);
for (i = 0; i < hc->num_slices; i++)
for (i = 0; i < hypercube->num_slices; i++)
{
const DimensionSlice *target = hc->slices[i];
const DimensionSlice *target = hypercube->slices[i];
DimensionSlice *match;
Assert(target->storage == NULL);
@ -116,12 +116,13 @@ ts_subspace_store_add(SubspaceStore *store, const Hypercube *hc, void *object,
* create one now. (There will always be one for time)
*/
Assert(last != NULL);
last->storage = subspace_store_internal_node_create(i == (hc->num_slices - 1));
last->storage = subspace_store_internal_node_create(i == (hypercube->num_slices - 1));
last->storage_free = subspace_store_internal_node_free;
node = last->storage;
}
Assert(store->max_items == 0 || node->descendants <= (size_t) store->max_items);
Assert(subspace_store->max_items == 0 ||
node->descendants <= (size_t) subspace_store->max_items);
/*
* We only call this function on a cache miss, so number of leaves
@ -134,7 +135,7 @@ ts_subspace_store_add(SubspaceStore *store, const Hypercube *hc, void *object,
node->vector->slices[0]->fd.dimension_id == target->fd.dimension_id);
/* Do we have enough space to store the object? */
if (store->max_items > 0 && node->descendants > store->max_items)
if (subspace_store->max_items > 0 && node->descendants > subspace_store->max_items)
{
/*
* Always delete the slice corresponding to the earliest time
@ -153,7 +154,7 @@ ts_subspace_store_add(SubspaceStore *store, const Hypercube *hc, void *object,
*/
Assert(i == 0);
Assert(store->max_items + 1 == node->descendants);
Assert(subspace_store->max_items + 1 == node->descendants);
ts_dimension_vec_remove_slice(&node->vector, i);
@ -181,7 +182,8 @@ ts_subspace_store_add(SubspaceStore *store, const Hypercube *hc, void *object,
match = copy;
}
Assert(store->max_items == 0 || node->descendants <= (size_t) store->max_items);
Assert(subspace_store->max_items == 0 ||
node->descendants <= (size_t) subspace_store->max_items);
last = match;
/* internal slices point to the next SubspaceStoreInternalNode */
@ -195,18 +197,18 @@ ts_subspace_store_add(SubspaceStore *store, const Hypercube *hc, void *object,
}
void *
ts_subspace_store_get(const SubspaceStore *store, const Point *target)
ts_subspace_store_get(const SubspaceStore *subspace_store, const Point *target)
{
int i;
DimensionVec *vec = store->origin->vector;
DimensionVec *vec = subspace_store->origin->vector;
DimensionSlice *match = NULL;
Assert(target->cardinality == store->num_dimensions);
Assert(target->cardinality == subspace_store->num_dimensions);
/* The internal compressed hypertable has no dimensions as
* chunks are created explicitly by compress_chunk and linked
* to the source chunk. */
if (store->num_dimensions == 0)
if (subspace_store->num_dimensions == 0)
return NULL;
for (i = 0; i < target->cardinality; i++)
@ -223,14 +225,14 @@ ts_subspace_store_get(const SubspaceStore *store, const Point *target)
}
void
ts_subspace_store_free(SubspaceStore *store)
ts_subspace_store_free(SubspaceStore *subspace_store)
{
subspace_store_internal_node_free(store->origin);
pfree(store);
subspace_store_internal_node_free(subspace_store->origin);
pfree(subspace_store);
}
MemoryContext
ts_subspace_store_mcxt(const SubspaceStore *store)
ts_subspace_store_mcxt(const SubspaceStore *subspace_store)
{
return store->mcxt;
return subspace_store->mcxt;
}

View File

@ -23,14 +23,14 @@ extern SubspaceStore *ts_subspace_store_init(const Hyperspace *space, MemoryCont
int16 max_items);
/* Store an object associate with the subspace represented by a hypercube */
extern void ts_subspace_store_add(SubspaceStore *cache, const Hypercube *hc, void *object,
void (*object_free)(void *));
extern void ts_subspace_store_add(SubspaceStore *subspace_store, const Hypercube *hypercube,
void *object, void (*object_free)(void *));
/* Get the object stored for the subspace that a point is in.
* Return the object stored or NULL if this subspace is not in the store.
*/
extern void *ts_subspace_store_get(const SubspaceStore *cache, const Point *target);
extern void ts_subspace_store_free(SubspaceStore *cache);
extern MemoryContext ts_subspace_store_mcxt(const SubspaceStore *cache);
extern void *ts_subspace_store_get(const SubspaceStore *subspace_store, const Point *target);
extern void ts_subspace_store_free(SubspaceStore *subspace_store);
extern MemoryContext ts_subspace_store_mcxt(const SubspaceStore *subspace_store);
#endif /* TIMESCALEDB_SUBSPACE_STORE_H */

View File

@ -171,7 +171,7 @@ TSDLLEXPORT void ts_create_arrays_from_caggs_info(const CaggsInfo *all_caggs,
extern TSDLLEXPORT ContinuousAgg *
ts_continuous_agg_find_by_mat_hypertable_id(int32 mat_hypertable_id);
extern TSDLLEXPORT void ts_materialization_invalidation_log_delete_inner(int32 materialization_id);
extern TSDLLEXPORT void ts_materialization_invalidation_log_delete_inner(int32 mat_hypertable_id);
extern TSDLLEXPORT ContinuousAggHypertableStatus
ts_continuous_agg_hypertable_status(int32 hypertable_id);

View File

@ -40,18 +40,19 @@ tablespaces_alloc(int capacity)
}
Tablespace *
ts_tablespaces_add(Tablespaces *tspcs, const FormData_tablespace *form, Oid tspc_oid)
ts_tablespaces_add(Tablespaces *tablespaces, const FormData_tablespace *form, Oid tspc_oid)
{
Tablespace *tspc;
if (tspcs->num_tablespaces >= tspcs->capacity)
if (tablespaces->num_tablespaces >= tablespaces->capacity)
{
tspcs->capacity += TABLESPACE_DEFAULT_CAPACITY;
Assert(tspcs->tablespaces); /* repalloc() does not work with NULL argument */
tspcs->tablespaces = repalloc(tspcs->tablespaces, sizeof(Tablespace) * tspcs->capacity);
tablespaces->capacity += TABLESPACE_DEFAULT_CAPACITY;
Assert(tablespaces->tablespaces); /* repalloc() does not work with NULL argument */
tablespaces->tablespaces =
repalloc(tablespaces->tablespaces, sizeof(Tablespace) * tablespaces->capacity);
}
tspc = &tspcs->tablespaces[tspcs->num_tablespaces++];
tspc = &tablespaces->tablespaces[tablespaces->num_tablespaces++];
memcpy(&tspc->fd, form, sizeof(FormData_tablespace));
tspc->tablespace_oid = tspc_oid;
@ -59,12 +60,12 @@ ts_tablespaces_add(Tablespaces *tspcs, const FormData_tablespace *form, Oid tspc
}
bool
ts_tablespaces_contain(const Tablespaces *tspcs, Oid tspc_oid)
ts_tablespaces_contain(const Tablespaces *tablespaces, Oid tspc_oid)
{
int i;
for (i = 0; i < tspcs->num_tablespaces; i++)
if (tspc_oid == tspcs->tablespaces[i].tablespace_oid)
for (i = 0; i < tablespaces->num_tablespaces; i++)
if (tspc_oid == tablespaces->tablespaces[i].tablespace_oid)
return true;
return false;

View File

@ -26,7 +26,7 @@ typedef struct Tablespaces
extern Tablespace *ts_tablespaces_add(Tablespaces *tablespaces, const FormData_tablespace *form,
Oid tspc_oid);
extern bool ts_tablespaces_contain(const Tablespaces *tspcs, Oid tspc_oid);
extern bool ts_tablespaces_contain(const Tablespaces *tablespaces, Oid tspc_oid);
extern Tablespaces *ts_tablespace_scan(int32 hypertable_id);
extern TSDLLEXPORT void ts_tablespace_attach_internal(Name tspcname, Oid hypertable_oid,
bool if_not_attached);

View File

@ -10,6 +10,6 @@
typedef struct Connection Connection;
extern ssize_t ts_connection_mock_set_recv_buf(Connection *conn, char *buf, size_t buflen);
extern ssize_t ts_connection_mock_set_recv_buf(Connection *conn, char *buf, size_t buf_len);
#endif /* TIMESCALEDB_CONN_MOCK_H */

View File

@ -13,7 +13,7 @@ bool policy_config_check_hypertable_lag_equality(Jsonb *config, const char *json
Oid dim_type, Oid lag_type, Datum lag_datum);
int64 subtract_integer_from_now_internal(int64 interval, Oid time_dim_type, Oid now_func,
bool *overflow);
Datum subtract_interval_from_now(Interval *interval, Oid time_dim_type);
Datum subtract_interval_from_now(Interval *lag, Oid time_dim_type);
const Dimension *get_open_dimension_for_hypertable(const Hypertable *ht);
bool policy_get_verbose_log(const Jsonb *config);
#endif /* TIMESCALEDB_TSL_BGW_POLICY_UTILS_H */

View File

@ -20,7 +20,7 @@ extern void chunk_api_create_on_data_nodes(const Chunk *chunk, const Hypertable
const char *remote_chunk_name, List *data_nodes);
extern Datum chunk_api_get_chunk_relstats(PG_FUNCTION_ARGS);
extern Datum chunk_api_get_chunk_colstats(PG_FUNCTION_ARGS);
extern void chunk_api_update_distributed_hypertable_stats(Oid relid);
extern void chunk_api_update_distributed_hypertable_stats(Oid table_id);
extern Datum chunk_create_empty_table(PG_FUNCTION_ARGS);
extern void chunk_api_call_create_empty_chunk_table(const Hypertable *ht, const Chunk *chunk,
const char *node_name);

View File

@ -156,7 +156,7 @@ static Tuplesortstate *compress_chunk_sort_relation(Relation in_rel, int n_keys,
static void row_compressor_init(RowCompressor *row_compressor, TupleDesc uncompressed_tuple_desc,
Relation compressed_table, int num_compression_infos,
const ColumnCompressionInfo **column_compression_info,
int16 *column_offsets, int16 num_compressed_columns,
int16 *column_offsets, int16 num_columns_in_compressed_table,
bool need_bistate);
static void row_compressor_append_sorted_rows(RowCompressor *row_compressor,
Tuplesortstate *sorted_rel, TupleDesc sorted_desc);

View File

@ -145,7 +145,7 @@ pg_attribute_unused() assert_num_compression_algorithms_sane(void)
extern CompressionStorage compression_get_toast_storage(CompressionAlgorithms algo);
extern CompressionStats compress_chunk(Oid in_table, Oid out_table,
const ColumnCompressionInfo **column_compression_info,
int num_columns);
int num_compression_infos);
extern void decompress_chunk(Oid in_table, Oid out_table);
extern DecompressionIterator *(*tsl_get_decompression_iterator_init(

View File

@ -21,7 +21,7 @@ bool tsl_process_compress_table(AlterTableCmd *cmd, Hypertable *ht,
void tsl_process_compress_table_add_column(Hypertable *ht, ColumnDef *orig_def);
void tsl_process_compress_table_drop_column(Hypertable *ht, char *name);
void tsl_process_compress_table_rename_column(Hypertable *ht, const RenameStmt *stmt);
Chunk *create_compress_chunk(Hypertable *compress_ht, Chunk *src_chunk, Oid table_oid);
Chunk *create_compress_chunk(Hypertable *compress_ht, Chunk *src_chunk, Oid table_id);
char *compression_column_segment_min_name(const FormData_hypertable_compression *fd);
char *compression_column_segment_max_name(const FormData_hypertable_compression *fd);

View File

@ -156,17 +156,17 @@ align_and_zero(char *ptr, char type_align, Size *max_size)
/* Inspired by datum_write in rangetypes.c. This reduces the max_size by the data length before
* exiting */
char *
datum_to_bytes_and_advance(DatumSerializer *serializer, char *ptr, Size *max_size, Datum datum)
datum_to_bytes_and_advance(DatumSerializer *serializer, char *start, Size *max_size, Datum datum)
{
Size data_length;
if (serializer->type_by_val)
{
/* pass-by-value */
ptr = align_and_zero(ptr, serializer->type_align, max_size);
start = align_and_zero(start, serializer->type_align, max_size);
data_length = serializer->type_len;
check_allowed_data_len(data_length, *max_size);
store_att_byval(ptr, datum, data_length);
store_att_byval(start, datum, data_length);
}
else if (serializer->type_len == -1)
{
@ -187,7 +187,7 @@ datum_to_bytes_and_advance(DatumSerializer *serializer, char *ptr, Size *max_siz
/* no alignment for short varlenas */
data_length = VARSIZE_SHORT(val);
check_allowed_data_len(data_length, *max_size);
memcpy(ptr, val, data_length);
memcpy(start, val, data_length);
}
else if (TYPE_IS_PACKABLE(serializer->type_len, serializer->type_storage) &&
VARATT_CAN_MAKE_SHORT(val))
@ -195,16 +195,16 @@ datum_to_bytes_and_advance(DatumSerializer *serializer, char *ptr, Size *max_siz
/* convert to short varlena -- no alignment */
data_length = VARATT_CONVERTED_SHORT_SIZE(val);
check_allowed_data_len(data_length, *max_size);
SET_VARSIZE_SHORT(ptr, data_length);
memcpy(ptr + 1, VARDATA(val), data_length - 1);
SET_VARSIZE_SHORT(start, data_length);
memcpy(start + 1, VARDATA(val), data_length - 1);
}
else
{
/* full 4-byte header varlena */
ptr = align_and_zero(ptr, serializer->type_align, max_size);
start = align_and_zero(start, serializer->type_align, max_size);
data_length = VARSIZE(val);
check_allowed_data_len(data_length, *max_size);
memcpy(ptr, val, data_length);
memcpy(start, val, data_length);
}
}
else if (serializer->type_len == -2)
@ -213,22 +213,22 @@ datum_to_bytes_and_advance(DatumSerializer *serializer, char *ptr, Size *max_siz
Assert(serializer->type_align == 'c');
data_length = strlen(DatumGetCString(datum)) + 1;
check_allowed_data_len(data_length, *max_size);
memcpy(ptr, DatumGetPointer(datum), data_length);
memcpy(start, DatumGetPointer(datum), data_length);
}
else
{
/* fixed-length pass-by-reference */
ptr = align_and_zero(ptr, serializer->type_align, max_size);
start = align_and_zero(start, serializer->type_align, max_size);
Assert(serializer->type_len > 0);
data_length = serializer->type_len;
check_allowed_data_len(data_length, *max_size);
memcpy(ptr, DatumGetPointer(datum), data_length);
memcpy(start, DatumGetPointer(datum), data_length);
}
ptr += data_length;
start += data_length;
*max_size = *max_size - data_length;
return ptr;
return start;
}
typedef struct DatumDeserializer

View File

@ -28,23 +28,23 @@ BinaryStringEncoding datum_serializer_binary_string_encoding(DatumSerializer *se
/* serialize to bytes in memory. */
Size datum_get_bytes_size(DatumSerializer *serializer, Size start_offset, Datum val);
char *datum_to_bytes_and_advance(DatumSerializer *serializer, char *start, Size *max_size,
Datum val);
Datum datum);
/* serialize to a binary string (for send functions) */
void type_append_to_binary_string(Oid type_oid, StringInfo data);
void type_append_to_binary_string(Oid type_oid, StringInfo buffer);
void datum_append_to_binary_string(DatumSerializer *serializer, BinaryStringEncoding encoding,
StringInfo data, Datum datum);
StringInfo buffer, Datum datum);
/* DESERIALIZATION */
typedef struct DatumDeserializer DatumDeserializer;
DatumDeserializer *create_datum_deserializer(Oid type);
/* deserialization from bytes in memory */
Datum bytes_to_datum_and_advance(DatumDeserializer *deserializer, const char **bytes);
Datum bytes_to_datum_and_advance(DatumDeserializer *deserializer, const char **ptr);
/* deserialization from binary strings (for recv functions) */
Datum binary_string_to_datum(DatumDeserializer *deserializer, BinaryStringEncoding encoding,
StringInfo data);
Oid binary_string_get_type(StringInfo data);
StringInfo buffer);
Oid binary_string_get_type(StringInfo buffer);
#endif

View File

@ -31,13 +31,13 @@ extern void *dictionary_compressor_finish(DictionaryCompressor *compressor);
extern DecompressionIterator *
tsl_dictionary_decompression_iterator_from_datum_forward(Datum dictionary_compressed,
Oid element_oid);
Oid element_type);
extern DecompressResult
dictionary_decompression_iterator_try_next_forward(DecompressionIterator *iter);
extern DecompressionIterator *
tsl_dictionary_decompression_iterator_from_datum_reverse(Datum dictionary_compressed,
Oid element_oid);
Oid element_type);
extern DecompressResult
dictionary_decompression_iterator_try_next_reverse(DecompressionIterator *iter);

View File

@ -79,7 +79,7 @@ extern void gorilla_compressor_append_value(GorillaCompressor *compressor, uint6
extern void *gorilla_compressor_finish(GorillaCompressor *compressor);
extern DecompressionIterator *
gorilla_decompression_iterator_from_datum_forward(Datum dictionary_compressed, Oid element_type);
gorilla_decompression_iterator_from_datum_forward(Datum gorilla_compressed, Oid element_type);
extern DecompressResult
gorilla_decompression_iterator_try_next_forward(DecompressionIterator *iter);
@ -88,7 +88,7 @@ gorilla_decompression_iterator_from_datum_reverse(Datum gorilla_compressed, Oid
extern DecompressResult
gorilla_decompression_iterator_try_next_reverse(DecompressionIterator *iter);
extern void gorilla_compressed_send(CompressedDataHeader *compressed, StringInfo buffer);
extern void gorilla_compressed_send(CompressedDataHeader *header, StringInfo buffer);
extern Datum gorilla_compressed_recv(StringInfo buf);
extern Datum tsl_gorilla_compressor_append(PG_FUNCTION_ARGS);

View File

@ -200,7 +200,7 @@ static int32 mattablecolumninfo_create_materialization_table(
MatTableColumnInfo *matcolinfo, int32 hypertable_id, RangeVar *mat_rel,
CAggTimebucketInfo *origquery_tblinfo, bool create_addl_index, char *tablespacename,
char *table_access_method, ObjectAddress *mataddress);
static Query *mattablecolumninfo_get_partial_select_query(MatTableColumnInfo *matcolinfo,
static Query *mattablecolumninfo_get_partial_select_query(MatTableColumnInfo *mattblinfo,
Query *userview_query, bool finalized);
static void caggtimebucketinfo_init(CAggTimebucketInfo *src, int32 hypertable_id,

View File

@ -13,7 +13,7 @@
#define CONTINUOUS_AGG_CHUNK_ID_COL_NAME "chunk_id"
DDLResult tsl_process_continuous_agg_viewstmt(Node *stmt, const char *query_string, void *pstmt,
DDLResult tsl_process_continuous_agg_viewstmt(Node *node, const char *query_string, void *pstmt,
WithClauseResult *with_clause_options);
extern void cagg_flip_realtime_view_definition(ContinuousAgg *agg, Hypertable *mat_ht);

View File

@ -48,7 +48,7 @@ const char *deparse_get_tabledef_commands_concat(Oid relid);
DeparsedHypertableCommands *deparse_get_distributed_hypertable_create_command(Hypertable *ht);
const char *deparse_func_call(FunctionCallInfo finfo);
const char *deparse_func_call(FunctionCallInfo fcinfo);
const char *deparse_oid_function_call_coll(Oid funcid, Oid collation, unsigned int num_args, ...);
const char *deparse_grant_revoke_on_database(const GrantStmt *stmt, const char *dbname);
const char *deparse_create_trigger(CreateTrigStmt *stmt);

View File

@ -31,7 +31,7 @@ static Datum dist_util_remote_srf_query(FunctionCallInfo fcinfo, const char *nod
/* UUID associated with remote connection */
static pg_uuid_t *peer_dist_id = NULL;
static bool dist_util_set_id_with_uuid_check(Datum dist_uuid, bool check_uuid);
static bool dist_util_set_id_with_uuid_check(Datum dist_id, bool check_uuid);
/* Requires non-null arguments */
static bool

View File

@ -200,7 +200,7 @@ static void deparseReturningList(StringInfo buf, RangeTblEntry *rte, Index rtind
static void deparseColumnRef(StringInfo buf, int varno, int varattno, RangeTblEntry *rte,
bool qualify_col);
static void deparseRelation(StringInfo buf, Relation rel);
static void deparseExpr(Expr *expr, deparse_expr_cxt *context);
static void deparseExpr(Expr *node, deparse_expr_cxt *context);
static void deparseVar(Var *node, deparse_expr_cxt *context);
static void deparseConst(Const *node, deparse_expr_cxt *context, int showtype);
static void deparseParam(Param *node, deparse_expr_cxt *context);

View File

@ -49,7 +49,7 @@ extern List *build_tlist_to_deparse(RelOptInfo *foreignrel);
extern void deparseSelectStmtForRel(StringInfo buf, PlannerInfo *root, RelOptInfo *rel, List *tlist,
List *remote_where, List *remote_having, List *pathkeys,
bool is_subquery, List **retrieved_attrs, List **params_list,
DataNodeChunkAssignment *swa);
DataNodeChunkAssignment *sca);
extern const char *get_jointype_name(JoinType jointype);
extern void deparseStringLiteral(StringInfo buf, const char *val);

View File

@ -9,7 +9,7 @@
#include <postgres.h>
extern void option_validate(List *options_list, Oid catalog);
extern List *option_extract_extension_list(const char *extensionsString, bool warn_on_missing);
extern List *option_extract_extension_list(const char *extensions_string, bool warn_on_missing);
extern bool option_get_from_options_list_int(List *options, const char *optionname, int *value);
#endif /* TIMESCALEDB_TSL_FDW_OPTION_H */

View File

@ -32,6 +32,6 @@ typedef struct AsyncScanState
void (*fetch_data)(struct AsyncScanState *state);
} AsyncScanState;
extern void async_append_add_paths(PlannerInfo *root, RelOptInfo *hyper_rel);
extern void async_append_add_paths(PlannerInfo *root, RelOptInfo *final_rel);
#endif

View File

@ -92,7 +92,7 @@ extern AsyncRequest *async_request_send_with_stmt_params_elevel_res_format(
extern AsyncRequest *async_request_send_prepare(TSConnection *conn, const char *sql_statement,
int n_params);
extern AsyncRequest *async_request_send_prepared_stmt(PreparedStmt *stmt,
const char *const *paramValues);
const char *const *param_values);
extern AsyncRequest *async_request_send_prepared_stmt_with_params(PreparedStmt *stmt,
StmtParams *params,
int res_format);
@ -107,7 +107,7 @@ extern AsyncResponseResult *async_request_wait_any_result(AsyncRequest *request)
extern AsyncResponse *async_request_cleanup_result(AsyncRequest *req, TimestampTz endtime);
/* Returns on successful commands, throwing errors otherwise */
extern void async_request_wait_ok_command(AsyncRequest *set);
extern void async_request_wait_ok_command(AsyncRequest *req);
extern PreparedStmt *async_request_wait_prepared_statement(AsyncRequest *request);
/* Async Response */

View File

@ -22,7 +22,7 @@ typedef struct DistCmdDescr
extern DistCmdResult *ts_dist_multi_cmds_params_invoke_on_data_nodes(List *cmd_descriptors,
List *data_nodes,
bool transactional);
extern DistCmdResult *ts_dist_cmd_invoke_on_data_nodes(const char *sql, List *node_names,
extern DistCmdResult *ts_dist_cmd_invoke_on_data_nodes(const char *sql, List *data_nodes,
bool transactional);
extern DistCmdResult *ts_dist_cmd_params_invoke_on_data_nodes(const char *sql, StmtParams *params,
List *data_nodes, bool transactional);

View File

@ -24,7 +24,7 @@ typedef enum
/* actions */
extern void remote_txn_init(RemoteTxn *entry, TSConnection *conn);
extern RemoteTxn *remote_txn_begin_on_connection(TSConnection *conn);
extern void remote_txn_begin(RemoteTxn *entry, int txnlevel);
extern void remote_txn_begin(RemoteTxn *entry, int curlevel);
extern bool remote_txn_abort(RemoteTxn *entry);
extern void remote_txn_write_persistent_record(RemoteTxn *entry);
extern void remote_txn_deallocate_prepared_stmts_if_needed(RemoteTxn *entry);
@ -49,7 +49,7 @@ extern void remote_txn_report_prepare_transaction_result(RemoteTxn *txn, bool su
/* Persitent record */
extern RemoteTxnId *remote_txn_persistent_record_write(TSConnectionId id);
extern bool remote_txn_persistent_record_exists(const RemoteTxnId *gid);
extern bool remote_txn_persistent_record_exists(const RemoteTxnId *parsed);
extern int remote_txn_persistent_record_delete_for_data_node(Oid foreign_server_oid,
const char *gid);