Prepare for clang-tidy-18 (#7213)

Mostly the complaints about implicit casts of multi-level pointers. Not
enabling it yet in the CI because there are some complicated warnings
left.

Loader changes are cosmetic.
This commit is contained in:
Alexander Kuzmenkov 2024-11-18 14:06:52 +01:00 committed by GitHub
parent 8f2382eb4c
commit fc827c154a
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
61 changed files with 211 additions and 151 deletions

View File

@ -1,6 +0,0 @@
---
Checks: '-*,clang-analyzer-core.*,clang-diagnostic-*'
WarningsAsErrors: 'clang-analyzer-unix.*,clang-analyzer-core.NullDereference'
HeaderFilterRegex: ''
AnalyzeTemporaryDtors: false
...

View File

@ -34,7 +34,7 @@ bgw_job_stat_next_start_was_set(FormData_bgw_job_stat *fd)
static ScanTupleResult
bgw_job_stat_tuple_found(TupleInfo *ti, void *const data)
{
BgwJobStat **job_stat_pp = data;
BgwJobStat **job_stat_pp = (BgwJobStat **) data;
*job_stat_pp = STRUCT_FROM_SLOT(ti->slot, ti->mctx, BgwJobStat, FormData_bgw_job_stat);
@ -94,7 +94,7 @@ ts_bgw_job_stat_find(int32 bgw_job_id)
bgw_job_stat_scan_job_id(bgw_job_id,
bgw_job_stat_tuple_found,
NULL,
&job_stat,
(void *) &job_stat,
AccessShareLock);
return job_stat;
@ -235,8 +235,8 @@ ts_get_next_scheduled_execution_slot(BgwJob *job, TimestampTz finish_time)
DirectFunctionCall2(timestamptz_part, CStringGetTextDatum("month"), timebucket_fini);
/* convert everything to months */
float8 month_diff = DatumGetFloat8(year_fini) * 12 + DatumGetFloat8(month_fini) -
(DatumGetFloat8(year_init) * 12 + DatumGetFloat8(month_init));
float8 month_diff = (DatumGetFloat8(year_fini) * 12) + DatumGetFloat8(month_fini) -
((DatumGetFloat8(year_init) * 12) + DatumGetFloat8(month_init));
Datum months_to_add = DirectFunctionCall2(interval_mul,
IntervalPGetDatum(&one_month),

View File

@ -57,7 +57,8 @@ get_timeout_millisec(TimestampTz by_time)
if (timeout_sec < 0 || timeout_usec < 0)
return 0;
return (int64) (timeout_sec * MILLISECS_PER_SEC + ((int64) timeout_usec) / USECS_PER_MILLISEC);
return (int64) ((timeout_sec * MILLISECS_PER_SEC) +
(((int64) timeout_usec) / USECS_PER_MILLISEC));
}
static bool
@ -73,7 +74,7 @@ wait_using_wait_latch(TimestampTz until)
timeout = MAX_TIMEOUT;
/* Wait latch requires timeout to be <= INT_MAX */
if ((int64) timeout > (int64) INT_MAX)
if (timeout > (int64) INT_MAX)
timeout = INT_MAX;
wl_rc = WaitLatch(MyLatch,

View File

@ -18,7 +18,7 @@
static ScanTupleResult
bgw_policy_chunk_stats_tuple_found(TupleInfo *ti, void *const data)
{
BgwPolicyChunkStats **chunk_stats = data;
BgwPolicyChunkStats **chunk_stats = (BgwPolicyChunkStats **) data;
*chunk_stats =
STRUCT_FROM_SLOT(ti->slot, ti->mctx, BgwPolicyChunkStats, FormData_bgw_policy_chunk_stats);
@ -148,7 +148,7 @@ ts_bgw_policy_chunk_stats_find(int32 job_id, int32 chunk_id)
bgw_policy_chunk_stats_tuple_found,
AccessShareLock,
BGW_POLICY_CHUNK_STATS_TABLE_NAME,
&stats);
(void *) &stats);
return stats;
}

View File

@ -152,7 +152,7 @@ extern ChunkVec *ts_chunk_vec_create(int32 capacity);
extern ChunkVec *ts_chunk_vec_sort(ChunkVec **chunks);
extern ChunkVec *ts_chunk_vec_add_from_tuple(ChunkVec **chunks, TupleInfo *ti);
#define CHUNK_VEC_SIZE(num_chunks) (sizeof(ChunkVec) + sizeof(Chunk) * num_chunks)
#define CHUNK_VEC_SIZE(num_chunks) (sizeof(ChunkVec) + (sizeof(Chunk) * num_chunks))
#define DEFAULT_CHUNK_VEC_SIZE 10
extern void ts_chunk_formdata_fill(FormData_chunk *fd, const TupleInfo *ti);

View File

@ -525,7 +525,7 @@ chunk_index_mapping_from_tuple(TupleInfo *ti, ChunkIndexMapping *cim)
static ScanTupleResult
chunk_index_collect(TupleInfo *ti, void *data)
{
List **mappings = data;
List **mappings = (List **) data;
ChunkIndexMapping *cim;
MemoryContext oldmctx;
@ -560,7 +560,7 @@ ts_chunk_index_get_mappings(Hypertable *ht, Oid hypertable_indexrelid)
2,
chunk_index_collect,
NULL,
&mappings,
(void *) &mappings,
AccessShareLock);
return mappings;

View File

@ -54,7 +54,8 @@ ts_chunk_scan_by_chunk_ids(const Hyperspace *hs, const List *chunk_ids, unsigned
* Make sure to filter out "dropped" chunks.
*/
ScanIterator chunk_it = ts_chunk_scan_iterator_create(orig_mcxt);
locked_chunks = MemoryContextAlloc(orig_mcxt, sizeof(Chunk *) * list_length(chunk_ids));
locked_chunks =
(Chunk **) MemoryContextAlloc(orig_mcxt, sizeof(Chunk *) * list_length(chunk_ids));
foreach (lc, chunk_ids)
{
int chunk_id = lfirst_int(lc);

View File

@ -184,7 +184,7 @@ TSCopyMultiInsertBufferInit(ChunkInsertState *cis, Point *point)
TSCopyMultiInsertBuffer *buffer;
buffer = (TSCopyMultiInsertBuffer *) palloc(sizeof(TSCopyMultiInsertBuffer));
memset(buffer->slots, 0, sizeof(TupleTableSlot *) * MAX_BUFFERED_TUPLES);
memset((void *) buffer->slots, 0, sizeof(TupleTableSlot *) * MAX_BUFFERED_TUPLES);
buffer->bistate = GetBulkInsertState();
buffer->nused = 0;
@ -447,7 +447,17 @@ TSCmpBuffersByUsage(const ListCell *a, const ListCell *b)
Assert(b1 >= 0);
Assert(b2 >= 0);
return (b1 > b2) ? 1 : (b1 == b2) ? 0 : -1;
if (b1 > b2)
{
return 1;
}
if (b1 == b2)
{
return 0;
}
return -1;
}
/*

View File

@ -1886,7 +1886,7 @@ ts_dimensions_rename_schema_name(const char *old_name, const char *new_name)
.nkeys = 1,
.scankey = scankey,
.tuple_found = dimension_rename_schema_name,
.data = names,
.data = (void *) names,
.lockmode = RowExclusiveLock,
.scandirection = ForwardScanDirection,
};

View File

@ -308,7 +308,7 @@ lock_result_ok_or_abort(TupleInfo *ti)
static ScanTupleResult
dimension_vec_tuple_found_list(TupleInfo *ti, void *data)
{
List **slices = data;
List **slices = (List **) data;
DimensionSlice *slice;
MemoryContext old;
@ -339,7 +339,7 @@ dimension_vec_tuple_found_list(TupleInfo *ti, void *data)
static ScanTupleResult
dimension_vec_tuple_found(TupleInfo *ti, void *data)
{
DimensionVec **slices = data;
DimensionVec **slices = (DimensionVec **) data;
DimensionSlice *slice;
MemoryContext old;
@ -455,7 +455,7 @@ ts_dimension_slice_scan_limit(int32 dimension_id, int64 coordinate, int limit,
scankey,
3,
dimension_vec_tuple_found,
&slices,
(void *) &slices,
limit,
AccessShareLock,
tuplock,
@ -499,7 +499,7 @@ ts_dimension_slice_scan_list(int32 dimension_id, int64 coordinate, List **matchi
scankey,
3,
dimension_vec_tuple_found_list,
matching_dimension_slices,
(void *) matching_dimension_slices,
/* limit = */ 0,
AccessShareLock,
&tuplock,
@ -669,7 +669,7 @@ ts_dimension_slice_collision_scan_limit(int32 dimension_id, int64 range_start, i
scankey,
3,
dimension_vec_tuple_found,
&slices,
(void *) &slices,
limit,
AccessShareLock,
NULL,
@ -694,7 +694,7 @@ ts_dimension_slice_scan_by_dimension(int32 dimension_id, int limit)
scankey,
1,
dimension_vec_tuple_found,
&slices,
(void *) &slices,
limit,
AccessShareLock,
NULL,
@ -737,7 +737,7 @@ ts_dimension_slice_scan_by_dimension_before_point(int32 dimension_id, int64 poin
scankey,
3,
dimension_vec_tuple_found,
&slices,
(void *) &slices,
limit,
scandir,
AccessShareLock,
@ -813,7 +813,7 @@ ts_dimension_slice_delete_by_dimension_id(int32 dimension_id, bool delete_constr
scankey,
1,
dimension_slice_tuple_delete,
&delete_constraints,
(void *) &delete_constraints,
0,
RowExclusiveLock,
&scantuplock,
@ -841,7 +841,7 @@ dimension_slice_fill(TupleInfo *ti, void *data)
case TM_SelfModified:
case TM_Ok:
{
DimensionSlice **slice = data;
DimensionSlice **slice = (DimensionSlice **) data;
bool should_free;
HeapTuple tuple = ts_scanner_fetch_heap_tuple(ti, false, &should_free);
@ -898,7 +898,7 @@ ts_dimension_slice_scan_for_existing(const DimensionSlice *slice, const ScanTupL
scankey,
3,
dimension_slice_fill,
(DimensionSlice **) &slice,
(void *) &slice,
1,
AccessShareLock,
tuplock,
@ -922,7 +922,7 @@ ts_dimension_slice_from_tuple(TupleInfo *ti)
static ScanTupleResult
dimension_slice_tuple_found(TupleInfo *ti, void *data)
{
DimensionSlice **slice = data;
DimensionSlice **slice = (DimensionSlice **) data;
*slice = ts_dimension_slice_from_tuple(ti);
return SCAN_DONE;
}
@ -949,7 +949,7 @@ ts_dimension_slice_scan_by_id_and_lock(int32 dimension_slice_id, const ScanTupLo
scankey,
1,
dimension_slice_tuple_found,
&slice,
(void *) &slice,
1,
lockmode,
tuplock,
@ -1178,7 +1178,7 @@ ts_dimension_slice_insert(DimensionSlice *slice)
static ScanTupleResult
dimension_slice_nth_tuple_found(TupleInfo *ti, void *data)
{
DimensionSlice **slice = data;
DimensionSlice **slice = (DimensionSlice **) data;
MemoryContext old = MemoryContextSwitchTo(ti->mctx);
*slice = dimension_slice_from_slot(ti->slot);
@ -1204,7 +1204,7 @@ ts_dimension_slice_nth_latest_slice(int32 dimension_id, int n)
scankey,
1,
dimension_slice_nth_tuple_found,
&ret,
(void *) &ret,
n,
BackwardScanDirection,
AccessShareLock,

View File

@ -56,7 +56,7 @@ ts_dimension_vec_sort(DimensionVec **vecptr)
DimensionVec *vec = *vecptr;
if (vec->num_slices > 1)
qsort(vec->slices, vec->num_slices, sizeof(DimensionSlice *), cmp_slices);
qsort((void *) vec->slices, vec->num_slices, sizeof(DimensionSlice *), cmp_slices);
return vec;
}
@ -102,8 +102,8 @@ ts_dimension_vec_remove_slice(DimensionVec **vecptr, int32 index)
DimensionVec *vec = *vecptr;
ts_dimension_slice_free(vec->slices[index]);
memmove(vec->slices + index,
vec->slices + (index + 1),
memmove((void *) &vec->slices[index],
(void *) &vec->slices[index + 1],
sizeof(DimensionSlice *) * (vec->num_slices - index - 1));
vec->num_slices--;
}
@ -118,7 +118,7 @@ dimension_vec_is_sorted(const DimensionVec *vec)
return true;
for (i = 1; i < vec->num_slices; i++)
if (cmp_slices(&vec->slices[i - 1], &vec->slices[i]) > 0)
if (cmp_slices((void *) &vec->slices[i - 1], (void *) &vec->slices[i]) > 0)
return false;
return true;
@ -135,11 +135,11 @@ ts_dimension_vec_find_slice(const DimensionVec *vec, int64 coordinate)
Assert(dimension_vec_is_sorted(vec));
res = bsearch(&coordinate,
vec->slices,
vec->num_slices,
sizeof(DimensionSlice *),
cmp_coordinate_and_slice);
res = (DimensionSlice **) bsearch(&coordinate,
(void *) vec->slices,
vec->num_slices,
sizeof(DimensionSlice *),
cmp_coordinate_and_slice);
if (res == NULL)
return NULL;

View File

@ -24,7 +24,7 @@ typedef struct DimensionVec
} DimensionVec;
#define DIMENSION_VEC_SIZE(num_slices) \
(sizeof(DimensionVec) + sizeof(DimensionSlice *) * num_slices)
(sizeof(DimensionVec) + (sizeof(DimensionSlice *) * num_slices))
#define DIMENSION_VEC_DEFAULT_SIZE 10

View File

@ -466,7 +466,6 @@ relation_get_fk_constraint(Oid conrelid, Oid confrelid)
Relation conrel;
SysScanDesc conscan;
ScanKeyData skey[3];
HeapTuple htup = NULL;
/* Prepare to scan pg_constraint for entries having confrelid = this rel. */
ScanKeyInit(&skey[0],
@ -490,7 +489,8 @@ relation_get_fk_constraint(Oid conrelid, Oid confrelid)
conrel = table_open(ConstraintRelationId, AccessShareLock);
conscan = systable_beginscan(conrel, InvalidOid, false, NULL, 3, skey);
if (HeapTupleIsValid(htup = systable_getnext(conscan)))
HeapTuple htup = systable_getnext(conscan);
if (HeapTupleIsValid(htup))
{
htup = heap_copytuple(htup);
}

View File

@ -36,7 +36,8 @@ TS_FUNCTION_INFO_V1(ts_hist_serializefunc);
TS_FUNCTION_INFO_V1(ts_hist_deserializefunc);
TS_FUNCTION_INFO_V1(ts_hist_finalfunc);
#define HISTOGRAM_SIZE(state, nbuckets) (sizeof(*(state)) + (nbuckets) * sizeof(*(state)->buckets))
#define HISTOGRAM_SIZE(state, nbuckets) \
(sizeof(*(state)) + ((nbuckets) * sizeof(*(state)->buckets)))
typedef struct Histogram
{

View File

@ -146,7 +146,10 @@ ts_hypercube_add_slice(Hypercube *hc, const DimensionSlice *slice)
void
ts_hypercube_slice_sort(Hypercube *hc)
{
qsort(hc->slices, hc->num_slices, sizeof(DimensionSlice *), cmp_slices_by_dimension_id);
qsort((void *) hc->slices,
hc->num_slices,
sizeof(DimensionSlice *),
cmp_slices_by_dimension_id);
}
const DimensionSlice *
@ -162,8 +165,8 @@ ts_hypercube_get_slice_by_dimension_id(const Hypercube *hc, int32 dimension_id)
Assert(hypercube_is_sorted(hc));
ptr = bsearch(&ptr,
hc->slices,
ptr = bsearch((void *) &ptr,
(void *) hc->slices,
hc->num_slices,
sizeof(DimensionSlice *),
cmp_slices_by_dimension_id);

View File

@ -24,7 +24,7 @@ typedef struct Hypercube
} Hypercube;
#define HYPERCUBE_SIZE(num_dimensions) \
(sizeof(Hypercube) + sizeof(DimensionSlice *) * (num_dimensions))
(sizeof(Hypercube) + (sizeof(DimensionSlice *) * (num_dimensions)))
extern TSDLLEXPORT Hypercube *ts_hypercube_alloc(int16 num_dimensions);
extern void ts_hypercube_free(Hypercube *hc);

View File

@ -927,7 +927,7 @@ hypertable_insert(int32 hypertable_id, Name schema_name, Name table_name,
static ScanTupleResult
hypertable_tuple_found(TupleInfo *ti, void *data)
{
Hypertable **entry = data;
Hypertable **entry = (Hypertable **) data;
*entry = ts_hypertable_from_tupleinfo(ti);
return SCAN_DONE;
@ -938,7 +938,7 @@ ts_hypertable_get_by_name(const char *schema, const char *name)
{
Hypertable *ht = NULL;
hypertable_scan(schema, name, hypertable_tuple_found, &ht, AccessShareLock);
hypertable_scan(schema, name, hypertable_tuple_found, (void *) &ht, AccessShareLock);
return ht;
}
@ -959,7 +959,7 @@ ts_hypertable_get_by_id(int32 hypertable_id)
1,
HYPERTABLE_ID_INDEX,
hypertable_tuple_found,
&ht,
(void *) &ht,
1,
AccessShareLock,
CurrentMemoryContext,

View File

@ -292,8 +292,8 @@ ts_hypertable_restrict_info_create(RelOptInfo *rel, Hypertable *ht)
int num_dimensions =
ht->space->num_dimensions + (range_space ? range_space->num_range_cols : 0);
HypertableRestrictInfo *res =
palloc0(sizeof(HypertableRestrictInfo) + sizeof(DimensionRestrictInfo *) * num_dimensions);
HypertableRestrictInfo *res = palloc0(sizeof(HypertableRestrictInfo) +
(sizeof(DimensionRestrictInfo *) * num_dimensions));
int i;
int range_index = 0;
@ -813,9 +813,9 @@ ts_hypertable_restrict_info_get_chunks_ordered(HypertableRestrictInfo *hri, Hype
Assert(IS_OPEN_DIMENSION(&ht->space->dimensions[0]));
if (reverse)
qsort(chunks, *num_chunks, sizeof(Chunk *), chunk_cmp_reverse);
qsort((void *) chunks, *num_chunks, sizeof(Chunk *), chunk_cmp_reverse);
else
qsort(chunks, *num_chunks, sizeof(Chunk *), chunk_cmp);
qsort((void *) chunks, *num_chunks, sizeof(Chunk *), chunk_cmp);
for (i = 0; i < *num_chunks; i++)
{

View File

@ -152,7 +152,7 @@ ts_set_append_rel_pathlist(PlannerInfo *root, RelOptInfo *parent_rel, Index pare
AppendRelInfo *appinfo = (AppendRelInfo *) lfirst(l);
/* append_rel_list contains all append rels; ignore others */
if (appinfo->parent_relid != (Index) parent_rt_index)
if (appinfo->parent_relid != parent_rt_index)
continue;
/* Re-locate the child RTE and RelOptInfo */

View File

@ -22,7 +22,8 @@
*/
/* Overhead for the fixed part of a List header, measured in ListCells */
#define LIST_HEADER_OVERHEAD ((int) ((offsetof(List, initial_elements) - 1) / sizeof(ListCell) + 1))
#define LIST_HEADER_OVERHEAD \
((int) (((offsetof(List, initial_elements) - 1) / sizeof(ListCell)) + 1))
/*
* Return a freshly allocated List with room for at least min_size cells.
@ -78,7 +79,7 @@ ts_new_list(NodeTag type, int min_size)
max_size = min_size;
#endif
newlist = (List *) palloc(offsetof(List, initial_elements) + max_size * sizeof(ListCell));
newlist = (List *) palloc(offsetof(List, initial_elements) + (max_size * sizeof(ListCell)));
newlist->type = type;
newlist->length = min_size;
newlist->max_length = max_size;

View File

@ -741,7 +741,7 @@ ts_bgw_cluster_launcher_main(PG_FUNCTION_ARGS)
pgstat_report_appname(MyBgworkerEntry->bgw_name);
ereport(LOG, (errmsg("TimescaleDB background worker launcher connected to shared catalogs")));
htab_storage = MemoryContextAllocZero(TopMemoryContext, sizeof(void *));
htab_storage = (HTAB **) MemoryContextAllocZero(TopMemoryContext, sizeof(void *));
/*
* We must setup the cleanup function _before_ initializing any state it

View File

@ -41,7 +41,7 @@ ts_function_telemetry_shmem_startup()
* segfaults. Since the shmem_startup_hook is run on every backend, we use
* a ShmemInitStruct to detect if this function has been called before.
*/
lock = ShmemInitStruct("fn_telemetry_detect_first_run", sizeof(LWLock *), &found);
lock = (LWLock **) ShmemInitStruct("fn_telemetry_detect_first_run", sizeof(LWLock *), &found);
if (!found)
*lock = &(GetNamedLWLockTranche(FN_TELEMETRY_LWLOCK_TRANCHE_NAME))->lock;

View File

@ -389,7 +389,7 @@ perform_plan_init(ChunkAppendState *state, EState *estate, int eflags)
return;
}
state->subplanstates = palloc0(state->num_subplans * sizeof(PlanState *));
state->subplanstates = (PlanState **) palloc0(state->num_subplans * sizeof(PlanState *));
i = 0;
foreach (lc, state->filtered_subplans)

View File

@ -1554,10 +1554,10 @@ ExecInsert(ModifyTableContext *context, ResultRelInfo *resultRelInfo, ChunkDispa
if (resultRelInfo->ri_Slots == NULL)
{
resultRelInfo->ri_Slots =
palloc(sizeof(TupleTableSlot *) * resultRelInfo->ri_BatchSize);
resultRelInfo->ri_PlanSlots =
palloc(sizeof(TupleTableSlot *) * resultRelInfo->ri_BatchSize);
resultRelInfo->ri_Slots = (TupleTableSlot **) palloc(sizeof(TupleTableSlot *) *
resultRelInfo->ri_BatchSize);
resultRelInfo->ri_PlanSlots = (TupleTableSlot **) palloc(
sizeof(TupleTableSlot *) * resultRelInfo->ri_BatchSize);
}
/*

View File

@ -188,7 +188,7 @@ is_first_last_node(Node *node, List **context)
if (func_strategy != NULL)
return true;
}
return expression_tree_walker(node, is_first_last_node, context);
return expression_tree_walker(node, is_first_last_node, (void *) context);
}
static bool

View File

@ -145,10 +145,10 @@ int_get_datum(int64 value, Oid type)
return TimestampGetDatum(value);
case TIMESTAMPTZOID:
return TimestampTzGetDatum(value);
default:
elog(ERROR, "unsupported datatype in int_get_datum: %s", format_type_be(type));
pg_unreachable();
}
elog(ERROR, "unsupported datatype in int_get_datum: %s", format_type_be(type));
pg_unreachable();
}
static int64
@ -170,10 +170,12 @@ const_datum_get_int(Const *cnst)
return DatumGetTimestamp(cnst->constvalue);
case TIMESTAMPTZOID:
return DatumGetTimestampTz(cnst->constvalue);
default:
elog(ERROR,
"unsupported datatype in const_datum_get_int: %s",
format_type_be(cnst->consttype));
pg_unreachable();
}
elog(ERROR, "unsupported datatype in const_datum_get_int: %s", format_type_be(cnst->consttype));
pg_unreachable();
}
/*
@ -884,7 +886,7 @@ find_children_chunks(HypertableRestrictInfo *hri, Hypertable *ht, bool include_o
* by find_inheritance_children. This is mostly needed to avoid test
* reference changes.
*/
qsort(chunks, *num_chunks, sizeof(Chunk *), chunk_cmp_chunk_reloid);
qsort((void *) chunks, *num_chunks, sizeof(Chunk *), chunk_cmp_chunk_reloid);
return chunks;
}

View File

@ -1319,6 +1319,12 @@ process_truncate(ProcessUtilityArgs *args)
}
break;
}
default:
/*
* Do nothing for other relation types. This is mostly to
* placate the static analyzers.
*/
break;
}
}
@ -3325,13 +3331,14 @@ process_cluster_start(ProcessUtilityArgs *args)
* it only for "verbose" output, but this doesn't seem worth it as the
* cost of sorting is quickly amortized over the actual work to cluster
* the chunks. */
mappings = palloc(sizeof(ChunkIndexMapping *) * list_length(chunk_indexes));
mappings = (ChunkIndexMapping **) palloc(sizeof(ChunkIndexMapping *) *
list_length(chunk_indexes));
i = 0;
foreach (lc, chunk_indexes)
mappings[i++] = lfirst(lc);
qsort(mappings,
qsort((void *) mappings,
list_length(chunk_indexes),
sizeof(ChunkIndexMapping *),
chunk_index_mappings_cmp);

View File

@ -172,6 +172,12 @@ transform_int_op_const(OpExpr *op)
return copyObject(nonconst);
}
break;
default:
/*
* Do nothing for unknown operators. The explicit empty
* branch is to placate the static analyzers.
*/
break;
}
}
}

View File

@ -297,7 +297,7 @@ function_telemetry_increment(Oid func_id, HTAB **local_counts)
static bool
function_gather_checker(Oid func_id, void *context)
{
function_telemetry_increment(func_id, context);
function_telemetry_increment(func_id, (HTAB **) context);
return false;
}
@ -325,7 +325,7 @@ static HTAB *
record_function_counts(Query *query)
{
HTAB *query_function_counts = NULL;
query_tree_walker(query, function_gather_walker, &query_function_counts, 0);
query_tree_walker(query, function_gather_walker, (void *) &query_function_counts, 0);
return query_function_counts;
}

View File

@ -163,10 +163,10 @@ bucket_month(int32 period, DateADT date, DateADT origin)
int32 result;
j2date(date + POSTGRES_EPOCH_JDATE, &year, &month, &day);
int32 timestamp = year * 12 + month - 1;
int32 timestamp = (year * 12) + month - 1;
j2date(origin + POSTGRES_EPOCH_JDATE, &year, &month, &day);
int32 offset = year * 12 + month - 1;
int32 offset = (year * 12) + month - 1;
TIME_BUCKET(period, timestamp, offset, PG_INT32_MIN, PG_INT32_MAX, result);
@ -684,8 +684,8 @@ ts_time_bucket_ng_date(PG_FUNCTION_ARGS)
j2date(date + POSTGRES_EPOCH_JDATE, &year, &month, &day);
int32 result;
int32 offset = origin_year * 12 + origin_month - 1;
int32 timestamp = year * 12 + month - 1;
int32 offset = (origin_year * 12) + origin_month - 1;
int32 timestamp = (year * 12) + month - 1;
TIME_BUCKET(interval->month, timestamp, offset, PG_INT32_MIN, PG_INT32_MAX, result);
year = result / 12;

View File

@ -279,7 +279,15 @@ ts_array_add_element_bool(ArrayType *arr, bool value)
Assert(position);
position++;
d = array_set_element(d, 1, &position, value, false, -1, 1, true, TYPALIGN_CHAR);
d = array_set_element(d,
1,
&position,
BoolGetDatum(value),
false,
-1,
1,
true,
TYPALIGN_CHAR);
return DatumGetArrayTypeP(d);
}

View File

@ -416,9 +416,7 @@ ts_catalog_table_info_init(CatalogTableInfo *tables_info, int max_tables,
for (j = 0; j < number_indexes; j++)
{
id = ts_get_relation_relid((char *) table_ary[i].schema_name,
(char *) index_ary[i].names[j],
true);
id = ts_get_relation_relid(table_ary[i].schema_name, index_ary[i].names[j], true);
if (!OidIsValid(id))
elog(ERROR, "OID lookup failed for table index \"%s\"", index_ary[i].names[j]);

View File

@ -292,9 +292,9 @@ ts_time_value_to_internal_or_infinite(Datum time_val, Oid type_oid)
return ts_time_value_to_internal(time_val, type_oid);
}
default:
return ts_time_value_to_internal(time_val, type_oid);
}
return ts_time_value_to_internal(time_val, type_oid);
}
TS_FUNCTION_INFO_V1(ts_time_to_internal);

View File

@ -114,8 +114,8 @@ ts_test_next_scheduled_execution_slot(PG_FUNCTION_ARGS)
DirectFunctionCall2(timestamptz_part, CStringGetTextDatum("month"), timebucket_fini);
/* convert everything to months */
float8 month_diff = DatumGetFloat8(year_fini) * 12 + DatumGetFloat8(month_fini) -
(DatumGetFloat8(year_init) * 12 + DatumGetFloat8(month_init));
float8 month_diff = (DatumGetFloat8(year_fini) * 12) + DatumGetFloat8(month_fini) -
((DatumGetFloat8(year_init) * 12) + DatumGetFloat8(month_init));
Datum months_to_add = DirectFunctionCall2(interval_mul,
IntervalPGetDatum(&one_month),

View File

@ -31,7 +31,7 @@ void
_PG_init(void)
{
elog(WARNING, "OSM-%s _PG_init", OSM_VERSION_MOD);
void *osm_lock_pointer = (LWLock **) find_rendezvous_variable(RENDEZVOUS_OSM_PARALLEL_LWLOCK);
void **osm_lock_pointer = find_rendezvous_variable(RENDEZVOUS_OSM_PARALLEL_LWLOCK);
if (osm_lock_pointer != NULL)
{
elog(WARNING, "got lwlock osm lock");

View File

@ -153,7 +153,7 @@ ts_test_status(PG_FUNCTION_ARGS)
int port = 80;
int status = PG_GETARG_INT32(0);
PG_RETURN_JSONB_P((void *) test_factory(CONNECTION_PLAIN, status, TEST_ENDPOINT, port));
PG_RETURN_DATUM(test_factory(CONNECTION_PLAIN, status, TEST_ENDPOINT, port));
}
#ifdef TS_DEBUG
@ -166,7 +166,7 @@ ts_test_status_mock(PG_FUNCTION_ARGS)
test_string = text_to_cstring(arg1);
PG_RETURN_JSONB_P((void *) test_factory(CONNECTION_MOCK, 123, TEST_ENDPOINT, port));
PG_RETURN_DATUM(test_factory(CONNECTION_MOCK, 123, TEST_ENDPOINT, port));
}
#endif
@ -294,5 +294,5 @@ ts_test_telemetry(PG_FUNCTION_ARGS)
ts_http_response_state_destroy(rsp);
PG_RETURN_JSONB_P((void *) json_body);
PG_RETURN_DATUM(json_body);
}

View File

@ -315,7 +315,7 @@ job_alter(PG_FUNCTION_ARGS)
Oid check = PG_ARGISNULL(9) ? InvalidOid : PG_GETARG_OID(9);
char *check_name_str = NULL;
/* Added space for period and NULL */
char schema_qualified_check_name[2 * NAMEDATALEN + 2] = { 0 };
char schema_qualified_check_name[(2 * NAMEDATALEN) + 2] = { 0 };
bool unregister_check = (!PG_ARGISNULL(9) && !OidIsValid(check));
TimestampTz initial_start = PG_ARGISNULL(11) ? DT_NOBEGIN : PG_GETARG_TIMESTAMPTZ(11);
text *timezone = PG_ARGISNULL(12) ? NULL : PG_GETARG_TEXT_PP(12);

View File

@ -522,7 +522,7 @@ generate_partial_agg_pushdown_path(PlannerInfo *root, Path *cheapest_partial_pat
partially_grouped_rel->reltarget,
NULL,
&total_groups);
add_path(partially_grouped_rel, (Path *) gather_path);
add_path(partially_grouped_rel, gather_path);
}
}

View File

@ -1,8 +0,0 @@
# Disable warnings as errors on compression code since it currently
# doesn't pass those tests
---
Checks: '-*,clang-analyzer-core.*,clang-diagnostic-*'
WarningsAsErrors: 'clang-analyzer-unix.*'
HeaderFilterRegex: ''
AnalyzeTemporaryDtors: false
...

View File

@ -634,7 +634,8 @@ text_array_decompress_all_serialized_no_header(StringInfo si, bool has_nulls,
Assert(current_notnull_element == -1);
}
ArrowArray *result = MemoryContextAllocZero(dest_mctx, sizeof(ArrowArray) + sizeof(void *) * 3);
ArrowArray *result =
MemoryContextAllocZero(dest_mctx, sizeof(ArrowArray) + (sizeof(void *) * 3));
const void **buffers = (const void **) &result[1];
buffers[0] = validity_bitmap;
buffers[1] = offsets;

View File

@ -508,7 +508,8 @@ tsl_text_dictionary_decompress_all(Datum compressed, Oid element_type, MemoryCon
Assert(current_notnull_element == -1);
}
ArrowArray *result = MemoryContextAllocZero(dest_mctx, sizeof(ArrowArray) + sizeof(void *) * 2);
ArrowArray *result =
MemoryContextAllocZero(dest_mctx, sizeof(ArrowArray) + (sizeof(void *) * 2));
const void **buffers = (const void **) &result[1];
buffers[0] = validity_bitmap;
buffers[1] = indices;

View File

@ -1407,7 +1407,7 @@ recompress_chunk_segmentwise_impl(Chunk *uncompressed_chunk)
bool changed_segment = false;
/************ current segment **************/
CompressedSegmentInfo **current_segment =
palloc(sizeof(CompressedSegmentInfo *) * nsegmentby_cols);
(CompressedSegmentInfo **) palloc(sizeof(CompressedSegmentInfo *) * nsegmentby_cols);
for (int i = 0; i < nsegmentby_cols; i++)
{

View File

@ -1233,7 +1233,8 @@ build_decompressor(Relation in_rel, Relation out_rel)
ALLOCSET_DEFAULT_SIZES),
.estate = CreateExecutorState(),
.decompressed_slots = palloc0(sizeof(void *) * TARGET_COMPRESSED_BATCH_SIZE),
.decompressed_slots =
(TupleTableSlot **) palloc0(sizeof(void *) * TARGET_COMPRESSED_BATCH_SIZE),
};
create_per_compressed_column(&decompressor);
@ -1815,6 +1816,9 @@ tsl_compressed_data_info(PG_FUNCTION_ARGS)
case COMPRESSION_ALGORITHM_ARRAY:
has_nulls = array_compressed_has_nulls(header);
break;
default:
elog(ERROR, "unknown compression algorithm %d", header->compression_algorithm);
break;
}
tupdesc = BlessTupleDesc(tupdesc);

View File

@ -944,7 +944,11 @@ process_predicates(Chunk *ch, CompressionSettings *settings, List *predicates,
false, /* is_null */
false /* is_array_op */
));
break;
}
default:
/* Do nothing for unknown operator strategies. */
break;
}
continue;
}
@ -1043,6 +1047,10 @@ process_predicates(Chunk *ch, CompressionSettings *settings, List *predicates,
false /* is_array_op */
));
}
break;
default:
/* Do nothing for unknown operator strategies. */
break;
}
}
}
@ -1088,7 +1096,11 @@ process_predicates(Chunk *ch, CompressionSettings *settings, List *predicates,
false, /* is_null */
true /* is_array_op */
));
break;
}
default:
/* Do nothing on unknown operator strategies. */
break;
}
continue;
}

View File

@ -327,6 +327,10 @@ process_timebucket_parameters(FuncExpr *fe, ContinuousAggsBucketFunction *bf, bo
bf->bucket_time_origin = DatumGetTimestampTz(constval->constvalue);
}
}
break;
default:
/* Nothing to do for integer time column. */
break;
}
if (process_checks && custom_origin && TIMESTAMP_NOT_FINITE(bf->bucket_time_origin))
{

View File

@ -505,7 +505,7 @@ arrow_create_with_buffers(MemoryContext mcxt, int n_buffers)
ArrowArray array;
const void *buffers[FLEXIBLE_ARRAY_MEMBER];
} *array_with_buffers =
MemoryContextAllocZero(mcxt, sizeof(ArrowArray) + sizeof(const void *) * n_buffers);
MemoryContextAllocZero(mcxt, sizeof(ArrowArray) + (sizeof(const void *) * n_buffers));
ArrowArray *array = &array_with_buffers->array;

View File

@ -336,10 +336,10 @@ typedef struct HypercoreParallelScanDescData *HypercoreParallelScanDesc;
typedef enum HypercoreScanState
{
HYPERCORE_SCAN_START,
HYPERCORE_SCAN_START = 0,
HYPERCORE_SCAN_COMPRESSED = HYPERCORE_SCAN_START,
HYPERCORE_SCAN_NON_COMPRESSED,
HYPERCORE_SCAN_DONE,
HYPERCORE_SCAN_NON_COMPRESSED = 1,
HYPERCORE_SCAN_DONE = 2,
} HypercoreScanState;
const char *scan_state_name[] = {
@ -2074,7 +2074,7 @@ hypercore_relation_copy_for_cluster(Relation OldHypercore, Relation NewCompressi
if (prev_cblock != cblock)
{
pgstat_progress_update_param(PROGRESS_CLUSTER_HEAP_BLKS_SCANNED,
(cblock + nblocks - startblock) % nblocks + 1);
((cblock + nblocks - startblock) % nblocks) + 1);
prev_cblock = cblock;
}
/* Get the actual tuple from the child slot (either compressed or

View File

@ -199,7 +199,7 @@ typedef struct HSProxyVacuumState
} HSProxyVacuumState;
#define HSPROXY_VACUUM_STATE_SIZE(nindexes) \
(sizeof(HSProxyVacuumState) + (sizeof(IndexBulkDeleteResult)) * (nindexes))
(sizeof(HSProxyVacuumState) + (sizeof(IndexBulkDeleteResult) * (nindexes)))
/*
* Bulkdelete. Called by vacuum on the compressed relation.

View File

@ -52,7 +52,7 @@ batch_array_enlarge(BatchArray *array, int new_number)
array->batch_states = repalloc(array->batch_states, array->n_batch_state_bytes * new_number);
/* Zero out the tail. The batch states are initialized on first use. */
memset(((char *) array->batch_states) + array->n_batch_state_bytes * array->n_batch_states,
memset(((char *) array->batch_states) + (array->n_batch_state_bytes * array->n_batch_states),
0x0,
array->n_batch_state_bytes * (new_number - array->n_batch_states));

View File

@ -159,7 +159,7 @@ binaryheap_add_unordered_autoresize(binaryheap *heap, Datum d)
if (heap->bh_size >= heap->bh_space)
{
heap->bh_space = heap->bh_space * 2;
Size new_size = offsetof(binaryheap, bh_nodes) + sizeof(Datum) * heap->bh_space;
Size new_size = offsetof(binaryheap, bh_nodes) + (sizeof(Datum) * heap->bh_space);
heap = (binaryheap *) repalloc(heap, new_size);
}
@ -207,9 +207,9 @@ batch_queue_heap_pop(BatchQueue *bq, DecompressContext *dcontext)
* We're working with virtual tuple slots so no need for slot_getattr().
*/
Assert(TTS_IS_VIRTUAL(top_tuple));
queue->heap_entries[top_batch_index * queue->nkeys + key].value =
queue->heap_entries[(top_batch_index * queue->nkeys) + key].value =
top_tuple->tts_values[attr];
queue->heap_entries[top_batch_index * queue->nkeys + key].null =
queue->heap_entries[(top_batch_index * queue->nkeys) + key].null =
top_tuple->tts_isnull[attr];
}
@ -308,9 +308,9 @@ batch_queue_heap_push_batch(BatchQueue *_queue, DecompressContext *dcontext,
* We're working with virtual tuple slots so no need for slot_getattr().
*/
Assert(TTS_IS_VIRTUAL(current_tuple));
queue->heap_entries[new_batch_index * queue->nkeys + key].value =
queue->heap_entries[(new_batch_index * queue->nkeys) + key].value =
current_tuple->tts_values[attr];
queue->heap_entries[new_batch_index * queue->nkeys + key].null =
queue->heap_entries[(new_batch_index * queue->nkeys) + key].null =
current_tuple->tts_isnull[attr];
}

View File

@ -432,7 +432,7 @@ translate_bitmap_from_dictionary(const ArrowArray *arrow, const uint64 *dict_res
uint64 word = 0;
for (size_t inner = 0; inner < 64; inner++)
{
const size_t row = outer * 64 + inner;
const size_t row = (outer * 64) + inner;
const size_t bit_index = inner;
#define INNER_LOOP \
const int16 index = indices[row]; \

View File

@ -443,7 +443,16 @@ static double
smoothstep(double x, double start, double end)
{
x = (x - start) / (end - start);
x = x < 0 ? 0 : x > 1 ? 1 : x;
if (x < 0)
{
x = 0;
}
else if (x > 1)
{
x = 1;
}
return x * x * (3.0F - 2.0F * x);
}
@ -522,7 +531,7 @@ cost_batch_sorted_merge(PlannerInfo *root, CompressionInfo *compression_info,
* we often read a small subset of columns in analytical queries. The
* compressed chunk is never projected so we can't use it for that.
*/
const double work_mem_bytes = work_mem * (double) 1024.0;
const double work_mem_bytes = work_mem * 1024.0;
const double needed_memory_bytes = open_batches_clamped * TARGET_COMPRESSED_BATCH_SIZE *
dcpath->custom_path.path.pathtarget->width;
@ -545,7 +554,7 @@ cost_batch_sorted_merge(PlannerInfo *root, CompressionInfo *compression_info,
*/
const double sort_path_cost_for_startup =
sort_path.startup_cost +
(sort_path.total_cost - sort_path.startup_cost) * (open_batches_clamped / sort_path.rows);
((sort_path.total_cost - sort_path.startup_cost) * (open_batches_clamped / sort_path.rows));
Assert(sort_path_cost_for_startup >= 0);
dcpath->custom_path.path.startup_cost = sort_path_cost_for_startup + work_mem_penalty;
@ -1506,7 +1515,7 @@ has_compressed_vars_walker(Node *node, CompressionInfo *info)
if (IsA(node, Var))
{
Var *var = castNode(Var, node);
if ((Index) var->varno != (Index) info->compressed_rel->relid)
if ((Index) var->varno != info->compressed_rel->relid)
{
return false;
}

View File

@ -157,7 +157,7 @@ typedef struct VectorQualInfoDecompressChunk
static bool *
build_vector_attrs_array(const UncompressedColumnInfo *colinfo, const CompressionInfo *info)
{
const unsigned short arrlen = info->chunk_rel->max_attr + 1;
const AttrNumber arrlen = info->chunk_rel->max_attr + 1;
bool *vector_attrs = palloc(sizeof(bool) * arrlen);
for (AttrNumber attno = 0; attno < arrlen; attno++)

View File

@ -32,7 +32,7 @@ vector_const_text_comparison(const ArrowArray *arrow, const Datum constdatum, bo
uint64 word = 0;
for (size_t inner = 0; inner < 64; inner++)
{
const size_t row = outer * 64 + inner;
const size_t row = (outer * 64) + inner;
const size_t bit_index = inner;
#define INNER_LOOP \
const uint32 start = offsets[row]; \
@ -122,7 +122,7 @@ vector_const_like_impl(const ArrowArray *arrow, const Datum constdatum, uint64 *
uint64 word = 0;
for (size_t inner = 0; inner < 64; inner++)
{
const size_t row = outer * 64 + inner;
const size_t row = (outer * 64) + inner;
const size_t bit_index = inner;
/*
* The inner loop could have been an inline function, but it would have 5

View File

@ -48,6 +48,12 @@ get_vector_const_predicate(Oid pg_predicate)
case F_TEXTNE:
return vector_const_textne;
default:
/*
* More checks below, this branch is to placate the static analyzers.
*/
break;
}
if (GetDatabaseEncoding() == PG_UTF8)
@ -59,6 +65,11 @@ get_vector_const_predicate(Oid pg_predicate)
return vector_const_textlike_utf8;
case F_TEXTNLIKE:
return vector_const_textnlike_utf8;
default:
/*
* This branch is to placate the static analyzers.
*/
break;
}
}

View File

@ -1206,7 +1206,7 @@ gapfill_state_initialize_columns(GapFillState *state)
int i;
state->ncolumns = tupledesc->natts;
state->columns = palloc(state->ncolumns * sizeof(GapFillColumnState *));
state->columns = (GapFillColumnState **) palloc(state->ncolumns * sizeof(GapFillColumnState *));
for (i = 0; i < state->ncolumns; i++)
{

View File

@ -74,7 +74,7 @@ gapfill_function_walker(Node *node, gapfill_walker_context *context)
context->count++;
}
return expression_tree_walker((Node *) node, gapfill_function_walker, context);
return expression_tree_walker(node, gapfill_function_walker, context);
}
/*
@ -92,7 +92,7 @@ marker_function_walker(Node *node, gapfill_walker_context *context)
context->count++;
}
return expression_tree_walker((Node *) node, marker_function_walker, context);
return expression_tree_walker(node, marker_function_walker, context);
}
/*

View File

@ -27,12 +27,10 @@ gapfill_interpolate_initialize(GapFillInterpolateColumnState *interpolate, GapFi
{
interpolate->prev.isnull = true;
interpolate->next.isnull = true;
if (list_length(((FuncExpr *) function)->args) > 1)
interpolate->lookup_before =
gapfill_adjust_varnos(state, lsecond(((FuncExpr *) function)->args));
if (list_length(((FuncExpr *) function)->args) > 2)
interpolate->lookup_after =
gapfill_adjust_varnos(state, lthird(((FuncExpr *) function)->args));
if (list_length(function->args) > 1)
interpolate->lookup_before = gapfill_adjust_varnos(state, lsecond(function->args));
if (list_length(function->args) > 2)
interpolate->lookup_after = gapfill_adjust_varnos(state, lthird(function->args));
}
/*

View File

@ -16,7 +16,8 @@ static void
FUNCTION_NAME(emit)(void *agg_state, Datum *out_result, bool *out_isnull)
{
FloatSumState *state = (FloatSumState *) agg_state;
*out_result = CTYPE_TO_DATUM((CTYPE) state->result);
const CTYPE result_casted = state->result;
*out_result = CTYPE_TO_DATUM(result_casted);
*out_isnull = !state->isvalid;
}

View File

@ -71,7 +71,7 @@ create_grouping_policy_batch(int num_agg_defs, VectorAggDef *agg_defs, int num_g
policy->agg_extra_mctx =
AllocSetContextCreate(CurrentMemoryContext, "agg extra", ALLOCSET_DEFAULT_SIZES);
policy->agg_states = palloc(sizeof(*policy->agg_states) * policy->num_agg_defs);
policy->agg_states = (void **) palloc(sizeof(*policy->agg_states) * policy->num_agg_defs);
for (int i = 0; i < policy->num_agg_defs; i++)
{
VectorAggDef *agg_def = &policy->agg_defs[i];
@ -190,7 +190,7 @@ gp_batch_add_batch(GroupingPolicy *gp, DecompressBatchState *batch_state)
const size_t num_words = (batch_state->total_batch_rows + 63) / 64;
if (num_words > policy->num_tmp_filter_words)
{
const size_t new_words = num_words * 2 + 1;
const size_t new_words = (num_words * 2) + 1;
if (policy->tmp_filter != NULL)
{
pfree(policy->tmp_filter);

View File

@ -539,11 +539,8 @@ test_delta3(bool have_nulls, bool have_random)
/* Forward decompression. */
DecompressionIterator *iter =
delta_delta_decompression_iterator_from_datum_forward(PointerGetDatum((void *) compressed),
INT8OID);
ArrowArray *bulk_result = delta_delta_decompress_all(PointerGetDatum((void *) compressed),
INT8OID,
CurrentMemoryContext);
delta_delta_decompression_iterator_from_datum_forward(compressed, INT8OID);
ArrowArray *bulk_result = delta_delta_decompress_all(compressed, INT8OID, CurrentMemoryContext);
for (int i = 0; i < TEST_ELEMENTS; i++)
{
DecompressResult r = delta_delta_decompression_iterator_try_next_forward(iter);
@ -565,9 +562,7 @@ test_delta3(bool have_nulls, bool have_random)
TestAssertTrue(r.is_done);
/* Reverse decompression. */
iter =
delta_delta_decompression_iterator_from_datum_reverse(PointerGetDatum((void *) compressed),
INT8OID);
iter = delta_delta_decompression_iterator_from_datum_reverse(compressed, INT8OID);
for (int i = TEST_ELEMENTS - 1; i >= 0; i--)
{
DecompressResult r = delta_delta_decompression_iterator_try_next_reverse(iter);