From fc827c154a14013e806ff650af9a2c5255a3d30d Mon Sep 17 00:00:00 2001 From: Alexander Kuzmenkov <36882414+akuzm@users.noreply.github.com> Date: Mon, 18 Nov 2024 14:06:52 +0100 Subject: [PATCH] Prepare for clang-tidy-18 (#7213) Mostly the complaints about implicit casts of multi-level pointers. Not enabling it yet in the CI because there are some complicated warnings left. Loader changes are cosmetic. --- .clang-tidy | 6 ---- src/bgw/job_stat.c | 8 +++--- src/bgw/timer.c | 5 ++-- src/bgw_policy/chunk_stats.c | 4 +-- src/chunk.h | 2 +- src/chunk_index.c | 4 +-- src/chunk_scan.c | 3 +- src/copy.c | 14 ++++++++-- src/dimension.c | 2 +- src/dimension_slice.c | 28 +++++++++---------- src/dimension_vector.c | 18 ++++++------ src/dimension_vector.h | 2 +- src/foreign_key.c | 4 +-- src/histogram.c | 3 +- src/hypercube.c | 9 ++++-- src/hypercube.h | 2 +- src/hypertable.c | 6 ++-- src/hypertable_restrict_info.c | 8 +++--- src/import/allpaths.c | 2 +- src/import/list.c | 5 ++-- src/loader/bgw_launcher.c | 2 +- src/loader/function_telemetry.c | 2 +- src/nodes/chunk_append/exec.c | 2 +- src/nodes/hypertable_modify.c | 8 +++--- src/planner/agg_bookend.c | 2 +- src/planner/expand_hypertable.c | 16 ++++++----- src/process_utility.c | 11 ++++++-- src/sort_transform.c | 6 ++++ src/telemetry/functions.c | 4 +-- src/time_bucket.c | 8 +++--- src/ts_catalog/array_utils.c | 10 ++++++- src/ts_catalog/catalog.c | 4 +-- src/utils.c | 4 +-- test/src/bgw/scheduler_mock.c | 4 +-- test/src/loader/osm_init.c | 2 +- test/src/telemetry/test_telemetry.c | 6 ++-- tsl/src/bgw_policy/job_api.c | 2 +- tsl/src/chunkwise_agg.c | 2 +- tsl/src/compression/.clang-tidy | 8 ------ tsl/src/compression/algorithms/array.c | 3 +- tsl/src/compression/algorithms/dictionary.c | 3 +- tsl/src/compression/api.c | 2 +- tsl/src/compression/compression.c | 6 +++- tsl/src/compression/compression_dml.c | 12 ++++++++ tsl/src/continuous_aggs/common.c | 4 +++ tsl/src/hypercore/arrow_array.c | 2 +- tsl/src/hypercore/hypercore_handler.c | 8 +++--- tsl/src/hypercore/hypercore_proxy.c | 2 +- tsl/src/nodes/decompress_chunk/batch_array.c | 2 +- .../nodes/decompress_chunk/batch_queue_heap.c | 10 +++---- .../nodes/decompress_chunk/compressed_batch.c | 2 +- .../nodes/decompress_chunk/decompress_chunk.c | 17 ++++++++--- tsl/src/nodes/decompress_chunk/planner.c | 2 +- tsl/src/nodes/decompress_chunk/pred_text.c | 4 +-- .../decompress_chunk/vector_predicates.c | 11 ++++++++ tsl/src/nodes/gapfill/gapfill_exec.c | 2 +- tsl/src/nodes/gapfill/gapfill_plan.c | 4 +-- tsl/src/nodes/gapfill/interpolate.c | 10 +++---- .../vector_agg/function/sum_float_single.c | 3 +- .../nodes/vector_agg/grouping_policy_batch.c | 4 +-- tsl/test/src/compression_unit_test.c | 11 ++------ 61 files changed, 211 insertions(+), 151 deletions(-) delete mode 100644 .clang-tidy delete mode 100644 tsl/src/compression/.clang-tidy diff --git a/.clang-tidy b/.clang-tidy deleted file mode 100644 index c154026e6..000000000 --- a/.clang-tidy +++ /dev/null @@ -1,6 +0,0 @@ ---- -Checks: '-*,clang-analyzer-core.*,clang-diagnostic-*' -WarningsAsErrors: 'clang-analyzer-unix.*,clang-analyzer-core.NullDereference' -HeaderFilterRegex: '' -AnalyzeTemporaryDtors: false -... diff --git a/src/bgw/job_stat.c b/src/bgw/job_stat.c index c52d4169e..1dbc4fe18 100644 --- a/src/bgw/job_stat.c +++ b/src/bgw/job_stat.c @@ -34,7 +34,7 @@ bgw_job_stat_next_start_was_set(FormData_bgw_job_stat *fd) static ScanTupleResult bgw_job_stat_tuple_found(TupleInfo *ti, void *const data) { - BgwJobStat **job_stat_pp = data; + BgwJobStat **job_stat_pp = (BgwJobStat **) data; *job_stat_pp = STRUCT_FROM_SLOT(ti->slot, ti->mctx, BgwJobStat, FormData_bgw_job_stat); @@ -94,7 +94,7 @@ ts_bgw_job_stat_find(int32 bgw_job_id) bgw_job_stat_scan_job_id(bgw_job_id, bgw_job_stat_tuple_found, NULL, - &job_stat, + (void *) &job_stat, AccessShareLock); return job_stat; @@ -235,8 +235,8 @@ ts_get_next_scheduled_execution_slot(BgwJob *job, TimestampTz finish_time) DirectFunctionCall2(timestamptz_part, CStringGetTextDatum("month"), timebucket_fini); /* convert everything to months */ - float8 month_diff = DatumGetFloat8(year_fini) * 12 + DatumGetFloat8(month_fini) - - (DatumGetFloat8(year_init) * 12 + DatumGetFloat8(month_init)); + float8 month_diff = (DatumGetFloat8(year_fini) * 12) + DatumGetFloat8(month_fini) - + ((DatumGetFloat8(year_init) * 12) + DatumGetFloat8(month_init)); Datum months_to_add = DirectFunctionCall2(interval_mul, IntervalPGetDatum(&one_month), diff --git a/src/bgw/timer.c b/src/bgw/timer.c index 0627d4a1d..be2bd1d1c 100644 --- a/src/bgw/timer.c +++ b/src/bgw/timer.c @@ -57,7 +57,8 @@ get_timeout_millisec(TimestampTz by_time) if (timeout_sec < 0 || timeout_usec < 0) return 0; - return (int64) (timeout_sec * MILLISECS_PER_SEC + ((int64) timeout_usec) / USECS_PER_MILLISEC); + return (int64) ((timeout_sec * MILLISECS_PER_SEC) + + (((int64) timeout_usec) / USECS_PER_MILLISEC)); } static bool @@ -73,7 +74,7 @@ wait_using_wait_latch(TimestampTz until) timeout = MAX_TIMEOUT; /* Wait latch requires timeout to be <= INT_MAX */ - if ((int64) timeout > (int64) INT_MAX) + if (timeout > (int64) INT_MAX) timeout = INT_MAX; wl_rc = WaitLatch(MyLatch, diff --git a/src/bgw_policy/chunk_stats.c b/src/bgw_policy/chunk_stats.c index 3c1a694ef..f41fcec72 100644 --- a/src/bgw_policy/chunk_stats.c +++ b/src/bgw_policy/chunk_stats.c @@ -18,7 +18,7 @@ static ScanTupleResult bgw_policy_chunk_stats_tuple_found(TupleInfo *ti, void *const data) { - BgwPolicyChunkStats **chunk_stats = data; + BgwPolicyChunkStats **chunk_stats = (BgwPolicyChunkStats **) data; *chunk_stats = STRUCT_FROM_SLOT(ti->slot, ti->mctx, BgwPolicyChunkStats, FormData_bgw_policy_chunk_stats); @@ -148,7 +148,7 @@ ts_bgw_policy_chunk_stats_find(int32 job_id, int32 chunk_id) bgw_policy_chunk_stats_tuple_found, AccessShareLock, BGW_POLICY_CHUNK_STATS_TABLE_NAME, - &stats); + (void *) &stats); return stats; } diff --git a/src/chunk.h b/src/chunk.h index ac6ca255e..aaa93026e 100644 --- a/src/chunk.h +++ b/src/chunk.h @@ -152,7 +152,7 @@ extern ChunkVec *ts_chunk_vec_create(int32 capacity); extern ChunkVec *ts_chunk_vec_sort(ChunkVec **chunks); extern ChunkVec *ts_chunk_vec_add_from_tuple(ChunkVec **chunks, TupleInfo *ti); -#define CHUNK_VEC_SIZE(num_chunks) (sizeof(ChunkVec) + sizeof(Chunk) * num_chunks) +#define CHUNK_VEC_SIZE(num_chunks) (sizeof(ChunkVec) + (sizeof(Chunk) * num_chunks)) #define DEFAULT_CHUNK_VEC_SIZE 10 extern void ts_chunk_formdata_fill(FormData_chunk *fd, const TupleInfo *ti); diff --git a/src/chunk_index.c b/src/chunk_index.c index 35b9d16d6..da11d4a56 100644 --- a/src/chunk_index.c +++ b/src/chunk_index.c @@ -525,7 +525,7 @@ chunk_index_mapping_from_tuple(TupleInfo *ti, ChunkIndexMapping *cim) static ScanTupleResult chunk_index_collect(TupleInfo *ti, void *data) { - List **mappings = data; + List **mappings = (List **) data; ChunkIndexMapping *cim; MemoryContext oldmctx; @@ -560,7 +560,7 @@ ts_chunk_index_get_mappings(Hypertable *ht, Oid hypertable_indexrelid) 2, chunk_index_collect, NULL, - &mappings, + (void *) &mappings, AccessShareLock); return mappings; diff --git a/src/chunk_scan.c b/src/chunk_scan.c index 817918f6d..ce9daeb6d 100644 --- a/src/chunk_scan.c +++ b/src/chunk_scan.c @@ -54,7 +54,8 @@ ts_chunk_scan_by_chunk_ids(const Hyperspace *hs, const List *chunk_ids, unsigned * Make sure to filter out "dropped" chunks. */ ScanIterator chunk_it = ts_chunk_scan_iterator_create(orig_mcxt); - locked_chunks = MemoryContextAlloc(orig_mcxt, sizeof(Chunk *) * list_length(chunk_ids)); + locked_chunks = + (Chunk **) MemoryContextAlloc(orig_mcxt, sizeof(Chunk *) * list_length(chunk_ids)); foreach (lc, chunk_ids) { int chunk_id = lfirst_int(lc); diff --git a/src/copy.c b/src/copy.c index a3df8b01f..a1847f70f 100644 --- a/src/copy.c +++ b/src/copy.c @@ -184,7 +184,7 @@ TSCopyMultiInsertBufferInit(ChunkInsertState *cis, Point *point) TSCopyMultiInsertBuffer *buffer; buffer = (TSCopyMultiInsertBuffer *) palloc(sizeof(TSCopyMultiInsertBuffer)); - memset(buffer->slots, 0, sizeof(TupleTableSlot *) * MAX_BUFFERED_TUPLES); + memset((void *) buffer->slots, 0, sizeof(TupleTableSlot *) * MAX_BUFFERED_TUPLES); buffer->bistate = GetBulkInsertState(); buffer->nused = 0; @@ -447,7 +447,17 @@ TSCmpBuffersByUsage(const ListCell *a, const ListCell *b) Assert(b1 >= 0); Assert(b2 >= 0); - return (b1 > b2) ? 1 : (b1 == b2) ? 0 : -1; + if (b1 > b2) + { + return 1; + } + + if (b1 == b2) + { + return 0; + } + + return -1; } /* diff --git a/src/dimension.c b/src/dimension.c index 4c80edaa0..e5b829369 100644 --- a/src/dimension.c +++ b/src/dimension.c @@ -1886,7 +1886,7 @@ ts_dimensions_rename_schema_name(const char *old_name, const char *new_name) .nkeys = 1, .scankey = scankey, .tuple_found = dimension_rename_schema_name, - .data = names, + .data = (void *) names, .lockmode = RowExclusiveLock, .scandirection = ForwardScanDirection, }; diff --git a/src/dimension_slice.c b/src/dimension_slice.c index 9e0796086..bb57972dd 100644 --- a/src/dimension_slice.c +++ b/src/dimension_slice.c @@ -308,7 +308,7 @@ lock_result_ok_or_abort(TupleInfo *ti) static ScanTupleResult dimension_vec_tuple_found_list(TupleInfo *ti, void *data) { - List **slices = data; + List **slices = (List **) data; DimensionSlice *slice; MemoryContext old; @@ -339,7 +339,7 @@ dimension_vec_tuple_found_list(TupleInfo *ti, void *data) static ScanTupleResult dimension_vec_tuple_found(TupleInfo *ti, void *data) { - DimensionVec **slices = data; + DimensionVec **slices = (DimensionVec **) data; DimensionSlice *slice; MemoryContext old; @@ -455,7 +455,7 @@ ts_dimension_slice_scan_limit(int32 dimension_id, int64 coordinate, int limit, scankey, 3, dimension_vec_tuple_found, - &slices, + (void *) &slices, limit, AccessShareLock, tuplock, @@ -499,7 +499,7 @@ ts_dimension_slice_scan_list(int32 dimension_id, int64 coordinate, List **matchi scankey, 3, dimension_vec_tuple_found_list, - matching_dimension_slices, + (void *) matching_dimension_slices, /* limit = */ 0, AccessShareLock, &tuplock, @@ -669,7 +669,7 @@ ts_dimension_slice_collision_scan_limit(int32 dimension_id, int64 range_start, i scankey, 3, dimension_vec_tuple_found, - &slices, + (void *) &slices, limit, AccessShareLock, NULL, @@ -694,7 +694,7 @@ ts_dimension_slice_scan_by_dimension(int32 dimension_id, int limit) scankey, 1, dimension_vec_tuple_found, - &slices, + (void *) &slices, limit, AccessShareLock, NULL, @@ -737,7 +737,7 @@ ts_dimension_slice_scan_by_dimension_before_point(int32 dimension_id, int64 poin scankey, 3, dimension_vec_tuple_found, - &slices, + (void *) &slices, limit, scandir, AccessShareLock, @@ -813,7 +813,7 @@ ts_dimension_slice_delete_by_dimension_id(int32 dimension_id, bool delete_constr scankey, 1, dimension_slice_tuple_delete, - &delete_constraints, + (void *) &delete_constraints, 0, RowExclusiveLock, &scantuplock, @@ -841,7 +841,7 @@ dimension_slice_fill(TupleInfo *ti, void *data) case TM_SelfModified: case TM_Ok: { - DimensionSlice **slice = data; + DimensionSlice **slice = (DimensionSlice **) data; bool should_free; HeapTuple tuple = ts_scanner_fetch_heap_tuple(ti, false, &should_free); @@ -898,7 +898,7 @@ ts_dimension_slice_scan_for_existing(const DimensionSlice *slice, const ScanTupL scankey, 3, dimension_slice_fill, - (DimensionSlice **) &slice, + (void *) &slice, 1, AccessShareLock, tuplock, @@ -922,7 +922,7 @@ ts_dimension_slice_from_tuple(TupleInfo *ti) static ScanTupleResult dimension_slice_tuple_found(TupleInfo *ti, void *data) { - DimensionSlice **slice = data; + DimensionSlice **slice = (DimensionSlice **) data; *slice = ts_dimension_slice_from_tuple(ti); return SCAN_DONE; } @@ -949,7 +949,7 @@ ts_dimension_slice_scan_by_id_and_lock(int32 dimension_slice_id, const ScanTupLo scankey, 1, dimension_slice_tuple_found, - &slice, + (void *) &slice, 1, lockmode, tuplock, @@ -1178,7 +1178,7 @@ ts_dimension_slice_insert(DimensionSlice *slice) static ScanTupleResult dimension_slice_nth_tuple_found(TupleInfo *ti, void *data) { - DimensionSlice **slice = data; + DimensionSlice **slice = (DimensionSlice **) data; MemoryContext old = MemoryContextSwitchTo(ti->mctx); *slice = dimension_slice_from_slot(ti->slot); @@ -1204,7 +1204,7 @@ ts_dimension_slice_nth_latest_slice(int32 dimension_id, int n) scankey, 1, dimension_slice_nth_tuple_found, - &ret, + (void *) &ret, n, BackwardScanDirection, AccessShareLock, diff --git a/src/dimension_vector.c b/src/dimension_vector.c index 10c2f146d..128d5edb4 100644 --- a/src/dimension_vector.c +++ b/src/dimension_vector.c @@ -56,7 +56,7 @@ ts_dimension_vec_sort(DimensionVec **vecptr) DimensionVec *vec = *vecptr; if (vec->num_slices > 1) - qsort(vec->slices, vec->num_slices, sizeof(DimensionSlice *), cmp_slices); + qsort((void *) vec->slices, vec->num_slices, sizeof(DimensionSlice *), cmp_slices); return vec; } @@ -102,8 +102,8 @@ ts_dimension_vec_remove_slice(DimensionVec **vecptr, int32 index) DimensionVec *vec = *vecptr; ts_dimension_slice_free(vec->slices[index]); - memmove(vec->slices + index, - vec->slices + (index + 1), + memmove((void *) &vec->slices[index], + (void *) &vec->slices[index + 1], sizeof(DimensionSlice *) * (vec->num_slices - index - 1)); vec->num_slices--; } @@ -118,7 +118,7 @@ dimension_vec_is_sorted(const DimensionVec *vec) return true; for (i = 1; i < vec->num_slices; i++) - if (cmp_slices(&vec->slices[i - 1], &vec->slices[i]) > 0) + if (cmp_slices((void *) &vec->slices[i - 1], (void *) &vec->slices[i]) > 0) return false; return true; @@ -135,11 +135,11 @@ ts_dimension_vec_find_slice(const DimensionVec *vec, int64 coordinate) Assert(dimension_vec_is_sorted(vec)); - res = bsearch(&coordinate, - vec->slices, - vec->num_slices, - sizeof(DimensionSlice *), - cmp_coordinate_and_slice); + res = (DimensionSlice **) bsearch(&coordinate, + (void *) vec->slices, + vec->num_slices, + sizeof(DimensionSlice *), + cmp_coordinate_and_slice); if (res == NULL) return NULL; diff --git a/src/dimension_vector.h b/src/dimension_vector.h index 7c59a61c2..e5f46b87b 100644 --- a/src/dimension_vector.h +++ b/src/dimension_vector.h @@ -24,7 +24,7 @@ typedef struct DimensionVec } DimensionVec; #define DIMENSION_VEC_SIZE(num_slices) \ - (sizeof(DimensionVec) + sizeof(DimensionSlice *) * num_slices) + (sizeof(DimensionVec) + (sizeof(DimensionSlice *) * num_slices)) #define DIMENSION_VEC_DEFAULT_SIZE 10 diff --git a/src/foreign_key.c b/src/foreign_key.c index 50f587e8b..d1d3007d2 100644 --- a/src/foreign_key.c +++ b/src/foreign_key.c @@ -466,7 +466,6 @@ relation_get_fk_constraint(Oid conrelid, Oid confrelid) Relation conrel; SysScanDesc conscan; ScanKeyData skey[3]; - HeapTuple htup = NULL; /* Prepare to scan pg_constraint for entries having confrelid = this rel. */ ScanKeyInit(&skey[0], @@ -490,7 +489,8 @@ relation_get_fk_constraint(Oid conrelid, Oid confrelid) conrel = table_open(ConstraintRelationId, AccessShareLock); conscan = systable_beginscan(conrel, InvalidOid, false, NULL, 3, skey); - if (HeapTupleIsValid(htup = systable_getnext(conscan))) + HeapTuple htup = systable_getnext(conscan); + if (HeapTupleIsValid(htup)) { htup = heap_copytuple(htup); } diff --git a/src/histogram.c b/src/histogram.c index a7363f518..e35c62e5e 100644 --- a/src/histogram.c +++ b/src/histogram.c @@ -36,7 +36,8 @@ TS_FUNCTION_INFO_V1(ts_hist_serializefunc); TS_FUNCTION_INFO_V1(ts_hist_deserializefunc); TS_FUNCTION_INFO_V1(ts_hist_finalfunc); -#define HISTOGRAM_SIZE(state, nbuckets) (sizeof(*(state)) + (nbuckets) * sizeof(*(state)->buckets)) +#define HISTOGRAM_SIZE(state, nbuckets) \ + (sizeof(*(state)) + ((nbuckets) * sizeof(*(state)->buckets))) typedef struct Histogram { diff --git a/src/hypercube.c b/src/hypercube.c index 655cddba9..7b8c6c5fc 100644 --- a/src/hypercube.c +++ b/src/hypercube.c @@ -146,7 +146,10 @@ ts_hypercube_add_slice(Hypercube *hc, const DimensionSlice *slice) void ts_hypercube_slice_sort(Hypercube *hc) { - qsort(hc->slices, hc->num_slices, sizeof(DimensionSlice *), cmp_slices_by_dimension_id); + qsort((void *) hc->slices, + hc->num_slices, + sizeof(DimensionSlice *), + cmp_slices_by_dimension_id); } const DimensionSlice * @@ -162,8 +165,8 @@ ts_hypercube_get_slice_by_dimension_id(const Hypercube *hc, int32 dimension_id) Assert(hypercube_is_sorted(hc)); - ptr = bsearch(&ptr, - hc->slices, + ptr = bsearch((void *) &ptr, + (void *) hc->slices, hc->num_slices, sizeof(DimensionSlice *), cmp_slices_by_dimension_id); diff --git a/src/hypercube.h b/src/hypercube.h index 7de9db5e8..1190c9af0 100644 --- a/src/hypercube.h +++ b/src/hypercube.h @@ -24,7 +24,7 @@ typedef struct Hypercube } Hypercube; #define HYPERCUBE_SIZE(num_dimensions) \ - (sizeof(Hypercube) + sizeof(DimensionSlice *) * (num_dimensions)) + (sizeof(Hypercube) + (sizeof(DimensionSlice *) * (num_dimensions))) extern TSDLLEXPORT Hypercube *ts_hypercube_alloc(int16 num_dimensions); extern void ts_hypercube_free(Hypercube *hc); diff --git a/src/hypertable.c b/src/hypertable.c index 7bb5e7dee..449c675b0 100644 --- a/src/hypertable.c +++ b/src/hypertable.c @@ -927,7 +927,7 @@ hypertable_insert(int32 hypertable_id, Name schema_name, Name table_name, static ScanTupleResult hypertable_tuple_found(TupleInfo *ti, void *data) { - Hypertable **entry = data; + Hypertable **entry = (Hypertable **) data; *entry = ts_hypertable_from_tupleinfo(ti); return SCAN_DONE; @@ -938,7 +938,7 @@ ts_hypertable_get_by_name(const char *schema, const char *name) { Hypertable *ht = NULL; - hypertable_scan(schema, name, hypertable_tuple_found, &ht, AccessShareLock); + hypertable_scan(schema, name, hypertable_tuple_found, (void *) &ht, AccessShareLock); return ht; } @@ -959,7 +959,7 @@ ts_hypertable_get_by_id(int32 hypertable_id) 1, HYPERTABLE_ID_INDEX, hypertable_tuple_found, - &ht, + (void *) &ht, 1, AccessShareLock, CurrentMemoryContext, diff --git a/src/hypertable_restrict_info.c b/src/hypertable_restrict_info.c index fafe15834..88684c6f8 100644 --- a/src/hypertable_restrict_info.c +++ b/src/hypertable_restrict_info.c @@ -292,8 +292,8 @@ ts_hypertable_restrict_info_create(RelOptInfo *rel, Hypertable *ht) int num_dimensions = ht->space->num_dimensions + (range_space ? range_space->num_range_cols : 0); - HypertableRestrictInfo *res = - palloc0(sizeof(HypertableRestrictInfo) + sizeof(DimensionRestrictInfo *) * num_dimensions); + HypertableRestrictInfo *res = palloc0(sizeof(HypertableRestrictInfo) + + (sizeof(DimensionRestrictInfo *) * num_dimensions)); int i; int range_index = 0; @@ -813,9 +813,9 @@ ts_hypertable_restrict_info_get_chunks_ordered(HypertableRestrictInfo *hri, Hype Assert(IS_OPEN_DIMENSION(&ht->space->dimensions[0])); if (reverse) - qsort(chunks, *num_chunks, sizeof(Chunk *), chunk_cmp_reverse); + qsort((void *) chunks, *num_chunks, sizeof(Chunk *), chunk_cmp_reverse); else - qsort(chunks, *num_chunks, sizeof(Chunk *), chunk_cmp); + qsort((void *) chunks, *num_chunks, sizeof(Chunk *), chunk_cmp); for (i = 0; i < *num_chunks; i++) { diff --git a/src/import/allpaths.c b/src/import/allpaths.c index 33ff82efe..31e350a96 100644 --- a/src/import/allpaths.c +++ b/src/import/allpaths.c @@ -152,7 +152,7 @@ ts_set_append_rel_pathlist(PlannerInfo *root, RelOptInfo *parent_rel, Index pare AppendRelInfo *appinfo = (AppendRelInfo *) lfirst(l); /* append_rel_list contains all append rels; ignore others */ - if (appinfo->parent_relid != (Index) parent_rt_index) + if (appinfo->parent_relid != parent_rt_index) continue; /* Re-locate the child RTE and RelOptInfo */ diff --git a/src/import/list.c b/src/import/list.c index fb420d89a..34529f219 100644 --- a/src/import/list.c +++ b/src/import/list.c @@ -22,7 +22,8 @@ */ /* Overhead for the fixed part of a List header, measured in ListCells */ -#define LIST_HEADER_OVERHEAD ((int) ((offsetof(List, initial_elements) - 1) / sizeof(ListCell) + 1)) +#define LIST_HEADER_OVERHEAD \ + ((int) (((offsetof(List, initial_elements) - 1) / sizeof(ListCell)) + 1)) /* * Return a freshly allocated List with room for at least min_size cells. @@ -78,7 +79,7 @@ ts_new_list(NodeTag type, int min_size) max_size = min_size; #endif - newlist = (List *) palloc(offsetof(List, initial_elements) + max_size * sizeof(ListCell)); + newlist = (List *) palloc(offsetof(List, initial_elements) + (max_size * sizeof(ListCell))); newlist->type = type; newlist->length = min_size; newlist->max_length = max_size; diff --git a/src/loader/bgw_launcher.c b/src/loader/bgw_launcher.c index c44a360bc..af2a2cc3e 100644 --- a/src/loader/bgw_launcher.c +++ b/src/loader/bgw_launcher.c @@ -741,7 +741,7 @@ ts_bgw_cluster_launcher_main(PG_FUNCTION_ARGS) pgstat_report_appname(MyBgworkerEntry->bgw_name); ereport(LOG, (errmsg("TimescaleDB background worker launcher connected to shared catalogs"))); - htab_storage = MemoryContextAllocZero(TopMemoryContext, sizeof(void *)); + htab_storage = (HTAB **) MemoryContextAllocZero(TopMemoryContext, sizeof(void *)); /* * We must setup the cleanup function _before_ initializing any state it diff --git a/src/loader/function_telemetry.c b/src/loader/function_telemetry.c index 2d0ce2895..654f5a341 100644 --- a/src/loader/function_telemetry.c +++ b/src/loader/function_telemetry.c @@ -41,7 +41,7 @@ ts_function_telemetry_shmem_startup() * segfaults. Since the shmem_startup_hook is run on every backend, we use * a ShmemInitStruct to detect if this function has been called before. */ - lock = ShmemInitStruct("fn_telemetry_detect_first_run", sizeof(LWLock *), &found); + lock = (LWLock **) ShmemInitStruct("fn_telemetry_detect_first_run", sizeof(LWLock *), &found); if (!found) *lock = &(GetNamedLWLockTranche(FN_TELEMETRY_LWLOCK_TRANCHE_NAME))->lock; diff --git a/src/nodes/chunk_append/exec.c b/src/nodes/chunk_append/exec.c index 5e8ef1653..528b73aea 100644 --- a/src/nodes/chunk_append/exec.c +++ b/src/nodes/chunk_append/exec.c @@ -389,7 +389,7 @@ perform_plan_init(ChunkAppendState *state, EState *estate, int eflags) return; } - state->subplanstates = palloc0(state->num_subplans * sizeof(PlanState *)); + state->subplanstates = (PlanState **) palloc0(state->num_subplans * sizeof(PlanState *)); i = 0; foreach (lc, state->filtered_subplans) diff --git a/src/nodes/hypertable_modify.c b/src/nodes/hypertable_modify.c index 34fecff06..255bd7d12 100644 --- a/src/nodes/hypertable_modify.c +++ b/src/nodes/hypertable_modify.c @@ -1554,10 +1554,10 @@ ExecInsert(ModifyTableContext *context, ResultRelInfo *resultRelInfo, ChunkDispa if (resultRelInfo->ri_Slots == NULL) { - resultRelInfo->ri_Slots = - palloc(sizeof(TupleTableSlot *) * resultRelInfo->ri_BatchSize); - resultRelInfo->ri_PlanSlots = - palloc(sizeof(TupleTableSlot *) * resultRelInfo->ri_BatchSize); + resultRelInfo->ri_Slots = (TupleTableSlot **) palloc(sizeof(TupleTableSlot *) * + resultRelInfo->ri_BatchSize); + resultRelInfo->ri_PlanSlots = (TupleTableSlot **) palloc( + sizeof(TupleTableSlot *) * resultRelInfo->ri_BatchSize); } /* diff --git a/src/planner/agg_bookend.c b/src/planner/agg_bookend.c index a3bcafed6..8a4c7786d 100644 --- a/src/planner/agg_bookend.c +++ b/src/planner/agg_bookend.c @@ -188,7 +188,7 @@ is_first_last_node(Node *node, List **context) if (func_strategy != NULL) return true; } - return expression_tree_walker(node, is_first_last_node, context); + return expression_tree_walker(node, is_first_last_node, (void *) context); } static bool diff --git a/src/planner/expand_hypertable.c b/src/planner/expand_hypertable.c index 41195e410..2a2e3d54d 100644 --- a/src/planner/expand_hypertable.c +++ b/src/planner/expand_hypertable.c @@ -145,10 +145,10 @@ int_get_datum(int64 value, Oid type) return TimestampGetDatum(value); case TIMESTAMPTZOID: return TimestampTzGetDatum(value); + default: + elog(ERROR, "unsupported datatype in int_get_datum: %s", format_type_be(type)); + pg_unreachable(); } - - elog(ERROR, "unsupported datatype in int_get_datum: %s", format_type_be(type)); - pg_unreachable(); } static int64 @@ -170,10 +170,12 @@ const_datum_get_int(Const *cnst) return DatumGetTimestamp(cnst->constvalue); case TIMESTAMPTZOID: return DatumGetTimestampTz(cnst->constvalue); + default: + elog(ERROR, + "unsupported datatype in const_datum_get_int: %s", + format_type_be(cnst->consttype)); + pg_unreachable(); } - - elog(ERROR, "unsupported datatype in const_datum_get_int: %s", format_type_be(cnst->consttype)); - pg_unreachable(); } /* @@ -884,7 +886,7 @@ find_children_chunks(HypertableRestrictInfo *hri, Hypertable *ht, bool include_o * by find_inheritance_children. This is mostly needed to avoid test * reference changes. */ - qsort(chunks, *num_chunks, sizeof(Chunk *), chunk_cmp_chunk_reloid); + qsort((void *) chunks, *num_chunks, sizeof(Chunk *), chunk_cmp_chunk_reloid); return chunks; } diff --git a/src/process_utility.c b/src/process_utility.c index 46272a3b2..8543da0c5 100644 --- a/src/process_utility.c +++ b/src/process_utility.c @@ -1319,6 +1319,12 @@ process_truncate(ProcessUtilityArgs *args) } break; } + default: + /* + * Do nothing for other relation types. This is mostly to + * placate the static analyzers. + */ + break; } } @@ -3325,13 +3331,14 @@ process_cluster_start(ProcessUtilityArgs *args) * it only for "verbose" output, but this doesn't seem worth it as the * cost of sorting is quickly amortized over the actual work to cluster * the chunks. */ - mappings = palloc(sizeof(ChunkIndexMapping *) * list_length(chunk_indexes)); + mappings = (ChunkIndexMapping **) palloc(sizeof(ChunkIndexMapping *) * + list_length(chunk_indexes)); i = 0; foreach (lc, chunk_indexes) mappings[i++] = lfirst(lc); - qsort(mappings, + qsort((void *) mappings, list_length(chunk_indexes), sizeof(ChunkIndexMapping *), chunk_index_mappings_cmp); diff --git a/src/sort_transform.c b/src/sort_transform.c index 7d067388d..60db458fe 100644 --- a/src/sort_transform.c +++ b/src/sort_transform.c @@ -172,6 +172,12 @@ transform_int_op_const(OpExpr *op) return copyObject(nonconst); } break; + default: + /* + * Do nothing for unknown operators. The explicit empty + * branch is to placate the static analyzers. + */ + break; } } } diff --git a/src/telemetry/functions.c b/src/telemetry/functions.c index 8d3bb28f6..9390aad97 100644 --- a/src/telemetry/functions.c +++ b/src/telemetry/functions.c @@ -297,7 +297,7 @@ function_telemetry_increment(Oid func_id, HTAB **local_counts) static bool function_gather_checker(Oid func_id, void *context) { - function_telemetry_increment(func_id, context); + function_telemetry_increment(func_id, (HTAB **) context); return false; } @@ -325,7 +325,7 @@ static HTAB * record_function_counts(Query *query) { HTAB *query_function_counts = NULL; - query_tree_walker(query, function_gather_walker, &query_function_counts, 0); + query_tree_walker(query, function_gather_walker, (void *) &query_function_counts, 0); return query_function_counts; } diff --git a/src/time_bucket.c b/src/time_bucket.c index c7e065e17..a79fe84e2 100644 --- a/src/time_bucket.c +++ b/src/time_bucket.c @@ -163,10 +163,10 @@ bucket_month(int32 period, DateADT date, DateADT origin) int32 result; j2date(date + POSTGRES_EPOCH_JDATE, &year, &month, &day); - int32 timestamp = year * 12 + month - 1; + int32 timestamp = (year * 12) + month - 1; j2date(origin + POSTGRES_EPOCH_JDATE, &year, &month, &day); - int32 offset = year * 12 + month - 1; + int32 offset = (year * 12) + month - 1; TIME_BUCKET(period, timestamp, offset, PG_INT32_MIN, PG_INT32_MAX, result); @@ -684,8 +684,8 @@ ts_time_bucket_ng_date(PG_FUNCTION_ARGS) j2date(date + POSTGRES_EPOCH_JDATE, &year, &month, &day); int32 result; - int32 offset = origin_year * 12 + origin_month - 1; - int32 timestamp = year * 12 + month - 1; + int32 offset = (origin_year * 12) + origin_month - 1; + int32 timestamp = (year * 12) + month - 1; TIME_BUCKET(interval->month, timestamp, offset, PG_INT32_MIN, PG_INT32_MAX, result); year = result / 12; diff --git a/src/ts_catalog/array_utils.c b/src/ts_catalog/array_utils.c index a0c827991..599bffd2c 100644 --- a/src/ts_catalog/array_utils.c +++ b/src/ts_catalog/array_utils.c @@ -279,7 +279,15 @@ ts_array_add_element_bool(ArrayType *arr, bool value) Assert(position); position++; - d = array_set_element(d, 1, &position, value, false, -1, 1, true, TYPALIGN_CHAR); + d = array_set_element(d, + 1, + &position, + BoolGetDatum(value), + false, + -1, + 1, + true, + TYPALIGN_CHAR); return DatumGetArrayTypeP(d); } diff --git a/src/ts_catalog/catalog.c b/src/ts_catalog/catalog.c index ad4822775..cb519e2f9 100644 --- a/src/ts_catalog/catalog.c +++ b/src/ts_catalog/catalog.c @@ -416,9 +416,7 @@ ts_catalog_table_info_init(CatalogTableInfo *tables_info, int max_tables, for (j = 0; j < number_indexes; j++) { - id = ts_get_relation_relid((char *) table_ary[i].schema_name, - (char *) index_ary[i].names[j], - true); + id = ts_get_relation_relid(table_ary[i].schema_name, index_ary[i].names[j], true); if (!OidIsValid(id)) elog(ERROR, "OID lookup failed for table index \"%s\"", index_ary[i].names[j]); diff --git a/src/utils.c b/src/utils.c index c1643d5fd..61e695b9b 100644 --- a/src/utils.c +++ b/src/utils.c @@ -292,9 +292,9 @@ ts_time_value_to_internal_or_infinite(Datum time_val, Oid type_oid) return ts_time_value_to_internal(time_val, type_oid); } + default: + return ts_time_value_to_internal(time_val, type_oid); } - - return ts_time_value_to_internal(time_val, type_oid); } TS_FUNCTION_INFO_V1(ts_time_to_internal); diff --git a/test/src/bgw/scheduler_mock.c b/test/src/bgw/scheduler_mock.c index cbfb58a7a..016ca6a3d 100644 --- a/test/src/bgw/scheduler_mock.c +++ b/test/src/bgw/scheduler_mock.c @@ -114,8 +114,8 @@ ts_test_next_scheduled_execution_slot(PG_FUNCTION_ARGS) DirectFunctionCall2(timestamptz_part, CStringGetTextDatum("month"), timebucket_fini); /* convert everything to months */ - float8 month_diff = DatumGetFloat8(year_fini) * 12 + DatumGetFloat8(month_fini) - - (DatumGetFloat8(year_init) * 12 + DatumGetFloat8(month_init)); + float8 month_diff = (DatumGetFloat8(year_fini) * 12) + DatumGetFloat8(month_fini) - + ((DatumGetFloat8(year_init) * 12) + DatumGetFloat8(month_init)); Datum months_to_add = DirectFunctionCall2(interval_mul, IntervalPGetDatum(&one_month), diff --git a/test/src/loader/osm_init.c b/test/src/loader/osm_init.c index 6a310eff4..ad0c451bf 100644 --- a/test/src/loader/osm_init.c +++ b/test/src/loader/osm_init.c @@ -31,7 +31,7 @@ void _PG_init(void) { elog(WARNING, "OSM-%s _PG_init", OSM_VERSION_MOD); - void *osm_lock_pointer = (LWLock **) find_rendezvous_variable(RENDEZVOUS_OSM_PARALLEL_LWLOCK); + void **osm_lock_pointer = find_rendezvous_variable(RENDEZVOUS_OSM_PARALLEL_LWLOCK); if (osm_lock_pointer != NULL) { elog(WARNING, "got lwlock osm lock"); diff --git a/test/src/telemetry/test_telemetry.c b/test/src/telemetry/test_telemetry.c index 904f3fee1..ad0a6ef53 100644 --- a/test/src/telemetry/test_telemetry.c +++ b/test/src/telemetry/test_telemetry.c @@ -153,7 +153,7 @@ ts_test_status(PG_FUNCTION_ARGS) int port = 80; int status = PG_GETARG_INT32(0); - PG_RETURN_JSONB_P((void *) test_factory(CONNECTION_PLAIN, status, TEST_ENDPOINT, port)); + PG_RETURN_DATUM(test_factory(CONNECTION_PLAIN, status, TEST_ENDPOINT, port)); } #ifdef TS_DEBUG @@ -166,7 +166,7 @@ ts_test_status_mock(PG_FUNCTION_ARGS) test_string = text_to_cstring(arg1); - PG_RETURN_JSONB_P((void *) test_factory(CONNECTION_MOCK, 123, TEST_ENDPOINT, port)); + PG_RETURN_DATUM(test_factory(CONNECTION_MOCK, 123, TEST_ENDPOINT, port)); } #endif @@ -294,5 +294,5 @@ ts_test_telemetry(PG_FUNCTION_ARGS) ts_http_response_state_destroy(rsp); - PG_RETURN_JSONB_P((void *) json_body); + PG_RETURN_DATUM(json_body); } diff --git a/tsl/src/bgw_policy/job_api.c b/tsl/src/bgw_policy/job_api.c index aa9d34b78..47db920e6 100644 --- a/tsl/src/bgw_policy/job_api.c +++ b/tsl/src/bgw_policy/job_api.c @@ -315,7 +315,7 @@ job_alter(PG_FUNCTION_ARGS) Oid check = PG_ARGISNULL(9) ? InvalidOid : PG_GETARG_OID(9); char *check_name_str = NULL; /* Added space for period and NULL */ - char schema_qualified_check_name[2 * NAMEDATALEN + 2] = { 0 }; + char schema_qualified_check_name[(2 * NAMEDATALEN) + 2] = { 0 }; bool unregister_check = (!PG_ARGISNULL(9) && !OidIsValid(check)); TimestampTz initial_start = PG_ARGISNULL(11) ? DT_NOBEGIN : PG_GETARG_TIMESTAMPTZ(11); text *timezone = PG_ARGISNULL(12) ? NULL : PG_GETARG_TEXT_PP(12); diff --git a/tsl/src/chunkwise_agg.c b/tsl/src/chunkwise_agg.c index 3e908f858..0bc302ea7 100644 --- a/tsl/src/chunkwise_agg.c +++ b/tsl/src/chunkwise_agg.c @@ -522,7 +522,7 @@ generate_partial_agg_pushdown_path(PlannerInfo *root, Path *cheapest_partial_pat partially_grouped_rel->reltarget, NULL, &total_groups); - add_path(partially_grouped_rel, (Path *) gather_path); + add_path(partially_grouped_rel, gather_path); } } diff --git a/tsl/src/compression/.clang-tidy b/tsl/src/compression/.clang-tidy deleted file mode 100644 index 77b1143ff..000000000 --- a/tsl/src/compression/.clang-tidy +++ /dev/null @@ -1,8 +0,0 @@ -# Disable warnings as errors on compression code since it currently -# doesn't pass those tests ---- -Checks: '-*,clang-analyzer-core.*,clang-diagnostic-*' -WarningsAsErrors: 'clang-analyzer-unix.*' -HeaderFilterRegex: '' -AnalyzeTemporaryDtors: false -... diff --git a/tsl/src/compression/algorithms/array.c b/tsl/src/compression/algorithms/array.c index 674131e23..f1c01a22b 100644 --- a/tsl/src/compression/algorithms/array.c +++ b/tsl/src/compression/algorithms/array.c @@ -634,7 +634,8 @@ text_array_decompress_all_serialized_no_header(StringInfo si, bool has_nulls, Assert(current_notnull_element == -1); } - ArrowArray *result = MemoryContextAllocZero(dest_mctx, sizeof(ArrowArray) + sizeof(void *) * 3); + ArrowArray *result = + MemoryContextAllocZero(dest_mctx, sizeof(ArrowArray) + (sizeof(void *) * 3)); const void **buffers = (const void **) &result[1]; buffers[0] = validity_bitmap; buffers[1] = offsets; diff --git a/tsl/src/compression/algorithms/dictionary.c b/tsl/src/compression/algorithms/dictionary.c index a090447c9..36df8bb52 100644 --- a/tsl/src/compression/algorithms/dictionary.c +++ b/tsl/src/compression/algorithms/dictionary.c @@ -508,7 +508,8 @@ tsl_text_dictionary_decompress_all(Datum compressed, Oid element_type, MemoryCon Assert(current_notnull_element == -1); } - ArrowArray *result = MemoryContextAllocZero(dest_mctx, sizeof(ArrowArray) + sizeof(void *) * 2); + ArrowArray *result = + MemoryContextAllocZero(dest_mctx, sizeof(ArrowArray) + (sizeof(void *) * 2)); const void **buffers = (const void **) &result[1]; buffers[0] = validity_bitmap; buffers[1] = indices; diff --git a/tsl/src/compression/api.c b/tsl/src/compression/api.c index bb615ded5..375e9b527 100644 --- a/tsl/src/compression/api.c +++ b/tsl/src/compression/api.c @@ -1407,7 +1407,7 @@ recompress_chunk_segmentwise_impl(Chunk *uncompressed_chunk) bool changed_segment = false; /************ current segment **************/ CompressedSegmentInfo **current_segment = - palloc(sizeof(CompressedSegmentInfo *) * nsegmentby_cols); + (CompressedSegmentInfo **) palloc(sizeof(CompressedSegmentInfo *) * nsegmentby_cols); for (int i = 0; i < nsegmentby_cols; i++) { diff --git a/tsl/src/compression/compression.c b/tsl/src/compression/compression.c index 95b25e472..bede1ae06 100644 --- a/tsl/src/compression/compression.c +++ b/tsl/src/compression/compression.c @@ -1233,7 +1233,8 @@ build_decompressor(Relation in_rel, Relation out_rel) ALLOCSET_DEFAULT_SIZES), .estate = CreateExecutorState(), - .decompressed_slots = palloc0(sizeof(void *) * TARGET_COMPRESSED_BATCH_SIZE), + .decompressed_slots = + (TupleTableSlot **) palloc0(sizeof(void *) * TARGET_COMPRESSED_BATCH_SIZE), }; create_per_compressed_column(&decompressor); @@ -1815,6 +1816,9 @@ tsl_compressed_data_info(PG_FUNCTION_ARGS) case COMPRESSION_ALGORITHM_ARRAY: has_nulls = array_compressed_has_nulls(header); break; + default: + elog(ERROR, "unknown compression algorithm %d", header->compression_algorithm); + break; } tupdesc = BlessTupleDesc(tupdesc); diff --git a/tsl/src/compression/compression_dml.c b/tsl/src/compression/compression_dml.c index 2083d7d40..ab72d316f 100644 --- a/tsl/src/compression/compression_dml.c +++ b/tsl/src/compression/compression_dml.c @@ -944,7 +944,11 @@ process_predicates(Chunk *ch, CompressionSettings *settings, List *predicates, false, /* is_null */ false /* is_array_op */ )); + break; } + default: + /* Do nothing for unknown operator strategies. */ + break; } continue; } @@ -1043,6 +1047,10 @@ process_predicates(Chunk *ch, CompressionSettings *settings, List *predicates, false /* is_array_op */ )); } + break; + default: + /* Do nothing for unknown operator strategies. */ + break; } } } @@ -1088,7 +1096,11 @@ process_predicates(Chunk *ch, CompressionSettings *settings, List *predicates, false, /* is_null */ true /* is_array_op */ )); + break; } + default: + /* Do nothing on unknown operator strategies. */ + break; } continue; } diff --git a/tsl/src/continuous_aggs/common.c b/tsl/src/continuous_aggs/common.c index 2213ea829..50b1b8491 100644 --- a/tsl/src/continuous_aggs/common.c +++ b/tsl/src/continuous_aggs/common.c @@ -327,6 +327,10 @@ process_timebucket_parameters(FuncExpr *fe, ContinuousAggsBucketFunction *bf, bo bf->bucket_time_origin = DatumGetTimestampTz(constval->constvalue); } } + break; + default: + /* Nothing to do for integer time column. */ + break; } if (process_checks && custom_origin && TIMESTAMP_NOT_FINITE(bf->bucket_time_origin)) { diff --git a/tsl/src/hypercore/arrow_array.c b/tsl/src/hypercore/arrow_array.c index 74adca387..b52b71d67 100644 --- a/tsl/src/hypercore/arrow_array.c +++ b/tsl/src/hypercore/arrow_array.c @@ -505,7 +505,7 @@ arrow_create_with_buffers(MemoryContext mcxt, int n_buffers) ArrowArray array; const void *buffers[FLEXIBLE_ARRAY_MEMBER]; } *array_with_buffers = - MemoryContextAllocZero(mcxt, sizeof(ArrowArray) + sizeof(const void *) * n_buffers); + MemoryContextAllocZero(mcxt, sizeof(ArrowArray) + (sizeof(const void *) * n_buffers)); ArrowArray *array = &array_with_buffers->array; diff --git a/tsl/src/hypercore/hypercore_handler.c b/tsl/src/hypercore/hypercore_handler.c index 4bfc50495..50cac0ea4 100644 --- a/tsl/src/hypercore/hypercore_handler.c +++ b/tsl/src/hypercore/hypercore_handler.c @@ -336,10 +336,10 @@ typedef struct HypercoreParallelScanDescData *HypercoreParallelScanDesc; typedef enum HypercoreScanState { - HYPERCORE_SCAN_START, + HYPERCORE_SCAN_START = 0, HYPERCORE_SCAN_COMPRESSED = HYPERCORE_SCAN_START, - HYPERCORE_SCAN_NON_COMPRESSED, - HYPERCORE_SCAN_DONE, + HYPERCORE_SCAN_NON_COMPRESSED = 1, + HYPERCORE_SCAN_DONE = 2, } HypercoreScanState; const char *scan_state_name[] = { @@ -2074,7 +2074,7 @@ hypercore_relation_copy_for_cluster(Relation OldHypercore, Relation NewCompressi if (prev_cblock != cblock) { pgstat_progress_update_param(PROGRESS_CLUSTER_HEAP_BLKS_SCANNED, - (cblock + nblocks - startblock) % nblocks + 1); + ((cblock + nblocks - startblock) % nblocks) + 1); prev_cblock = cblock; } /* Get the actual tuple from the child slot (either compressed or diff --git a/tsl/src/hypercore/hypercore_proxy.c b/tsl/src/hypercore/hypercore_proxy.c index ffde5c3fc..d100a8f0e 100644 --- a/tsl/src/hypercore/hypercore_proxy.c +++ b/tsl/src/hypercore/hypercore_proxy.c @@ -199,7 +199,7 @@ typedef struct HSProxyVacuumState } HSProxyVacuumState; #define HSPROXY_VACUUM_STATE_SIZE(nindexes) \ - (sizeof(HSProxyVacuumState) + (sizeof(IndexBulkDeleteResult)) * (nindexes)) + (sizeof(HSProxyVacuumState) + (sizeof(IndexBulkDeleteResult) * (nindexes))) /* * Bulkdelete. Called by vacuum on the compressed relation. diff --git a/tsl/src/nodes/decompress_chunk/batch_array.c b/tsl/src/nodes/decompress_chunk/batch_array.c index f5f2af6b9..c5c9fa465 100644 --- a/tsl/src/nodes/decompress_chunk/batch_array.c +++ b/tsl/src/nodes/decompress_chunk/batch_array.c @@ -52,7 +52,7 @@ batch_array_enlarge(BatchArray *array, int new_number) array->batch_states = repalloc(array->batch_states, array->n_batch_state_bytes * new_number); /* Zero out the tail. The batch states are initialized on first use. */ - memset(((char *) array->batch_states) + array->n_batch_state_bytes * array->n_batch_states, + memset(((char *) array->batch_states) + (array->n_batch_state_bytes * array->n_batch_states), 0x0, array->n_batch_state_bytes * (new_number - array->n_batch_states)); diff --git a/tsl/src/nodes/decompress_chunk/batch_queue_heap.c b/tsl/src/nodes/decompress_chunk/batch_queue_heap.c index b925be437..2c33b12f3 100644 --- a/tsl/src/nodes/decompress_chunk/batch_queue_heap.c +++ b/tsl/src/nodes/decompress_chunk/batch_queue_heap.c @@ -159,7 +159,7 @@ binaryheap_add_unordered_autoresize(binaryheap *heap, Datum d) if (heap->bh_size >= heap->bh_space) { heap->bh_space = heap->bh_space * 2; - Size new_size = offsetof(binaryheap, bh_nodes) + sizeof(Datum) * heap->bh_space; + Size new_size = offsetof(binaryheap, bh_nodes) + (sizeof(Datum) * heap->bh_space); heap = (binaryheap *) repalloc(heap, new_size); } @@ -207,9 +207,9 @@ batch_queue_heap_pop(BatchQueue *bq, DecompressContext *dcontext) * We're working with virtual tuple slots so no need for slot_getattr(). */ Assert(TTS_IS_VIRTUAL(top_tuple)); - queue->heap_entries[top_batch_index * queue->nkeys + key].value = + queue->heap_entries[(top_batch_index * queue->nkeys) + key].value = top_tuple->tts_values[attr]; - queue->heap_entries[top_batch_index * queue->nkeys + key].null = + queue->heap_entries[(top_batch_index * queue->nkeys) + key].null = top_tuple->tts_isnull[attr]; } @@ -308,9 +308,9 @@ batch_queue_heap_push_batch(BatchQueue *_queue, DecompressContext *dcontext, * We're working with virtual tuple slots so no need for slot_getattr(). */ Assert(TTS_IS_VIRTUAL(current_tuple)); - queue->heap_entries[new_batch_index * queue->nkeys + key].value = + queue->heap_entries[(new_batch_index * queue->nkeys) + key].value = current_tuple->tts_values[attr]; - queue->heap_entries[new_batch_index * queue->nkeys + key].null = + queue->heap_entries[(new_batch_index * queue->nkeys) + key].null = current_tuple->tts_isnull[attr]; } diff --git a/tsl/src/nodes/decompress_chunk/compressed_batch.c b/tsl/src/nodes/decompress_chunk/compressed_batch.c index b1591ecf7..81ed77fa8 100644 --- a/tsl/src/nodes/decompress_chunk/compressed_batch.c +++ b/tsl/src/nodes/decompress_chunk/compressed_batch.c @@ -432,7 +432,7 @@ translate_bitmap_from_dictionary(const ArrowArray *arrow, const uint64 *dict_res uint64 word = 0; for (size_t inner = 0; inner < 64; inner++) { - const size_t row = outer * 64 + inner; + const size_t row = (outer * 64) + inner; const size_t bit_index = inner; #define INNER_LOOP \ const int16 index = indices[row]; \ diff --git a/tsl/src/nodes/decompress_chunk/decompress_chunk.c b/tsl/src/nodes/decompress_chunk/decompress_chunk.c index 393fb8117..ac2c78734 100644 --- a/tsl/src/nodes/decompress_chunk/decompress_chunk.c +++ b/tsl/src/nodes/decompress_chunk/decompress_chunk.c @@ -443,7 +443,16 @@ static double smoothstep(double x, double start, double end) { x = (x - start) / (end - start); - x = x < 0 ? 0 : x > 1 ? 1 : x; + + if (x < 0) + { + x = 0; + } + else if (x > 1) + { + x = 1; + } + return x * x * (3.0F - 2.0F * x); } @@ -522,7 +531,7 @@ cost_batch_sorted_merge(PlannerInfo *root, CompressionInfo *compression_info, * we often read a small subset of columns in analytical queries. The * compressed chunk is never projected so we can't use it for that. */ - const double work_mem_bytes = work_mem * (double) 1024.0; + const double work_mem_bytes = work_mem * 1024.0; const double needed_memory_bytes = open_batches_clamped * TARGET_COMPRESSED_BATCH_SIZE * dcpath->custom_path.path.pathtarget->width; @@ -545,7 +554,7 @@ cost_batch_sorted_merge(PlannerInfo *root, CompressionInfo *compression_info, */ const double sort_path_cost_for_startup = sort_path.startup_cost + - (sort_path.total_cost - sort_path.startup_cost) * (open_batches_clamped / sort_path.rows); + ((sort_path.total_cost - sort_path.startup_cost) * (open_batches_clamped / sort_path.rows)); Assert(sort_path_cost_for_startup >= 0); dcpath->custom_path.path.startup_cost = sort_path_cost_for_startup + work_mem_penalty; @@ -1506,7 +1515,7 @@ has_compressed_vars_walker(Node *node, CompressionInfo *info) if (IsA(node, Var)) { Var *var = castNode(Var, node); - if ((Index) var->varno != (Index) info->compressed_rel->relid) + if ((Index) var->varno != info->compressed_rel->relid) { return false; } diff --git a/tsl/src/nodes/decompress_chunk/planner.c b/tsl/src/nodes/decompress_chunk/planner.c index 416ff1f66..8048fc2ce 100644 --- a/tsl/src/nodes/decompress_chunk/planner.c +++ b/tsl/src/nodes/decompress_chunk/planner.c @@ -157,7 +157,7 @@ typedef struct VectorQualInfoDecompressChunk static bool * build_vector_attrs_array(const UncompressedColumnInfo *colinfo, const CompressionInfo *info) { - const unsigned short arrlen = info->chunk_rel->max_attr + 1; + const AttrNumber arrlen = info->chunk_rel->max_attr + 1; bool *vector_attrs = palloc(sizeof(bool) * arrlen); for (AttrNumber attno = 0; attno < arrlen; attno++) diff --git a/tsl/src/nodes/decompress_chunk/pred_text.c b/tsl/src/nodes/decompress_chunk/pred_text.c index 75a4acdb8..f46f3efbb 100644 --- a/tsl/src/nodes/decompress_chunk/pred_text.c +++ b/tsl/src/nodes/decompress_chunk/pred_text.c @@ -32,7 +32,7 @@ vector_const_text_comparison(const ArrowArray *arrow, const Datum constdatum, bo uint64 word = 0; for (size_t inner = 0; inner < 64; inner++) { - const size_t row = outer * 64 + inner; + const size_t row = (outer * 64) + inner; const size_t bit_index = inner; #define INNER_LOOP \ const uint32 start = offsets[row]; \ @@ -122,7 +122,7 @@ vector_const_like_impl(const ArrowArray *arrow, const Datum constdatum, uint64 * uint64 word = 0; for (size_t inner = 0; inner < 64; inner++) { - const size_t row = outer * 64 + inner; + const size_t row = (outer * 64) + inner; const size_t bit_index = inner; /* * The inner loop could have been an inline function, but it would have 5 diff --git a/tsl/src/nodes/decompress_chunk/vector_predicates.c b/tsl/src/nodes/decompress_chunk/vector_predicates.c index f1be155c3..0134b22b8 100644 --- a/tsl/src/nodes/decompress_chunk/vector_predicates.c +++ b/tsl/src/nodes/decompress_chunk/vector_predicates.c @@ -48,6 +48,12 @@ get_vector_const_predicate(Oid pg_predicate) case F_TEXTNE: return vector_const_textne; + + default: + /* + * More checks below, this branch is to placate the static analyzers. + */ + break; } if (GetDatabaseEncoding() == PG_UTF8) @@ -59,6 +65,11 @@ get_vector_const_predicate(Oid pg_predicate) return vector_const_textlike_utf8; case F_TEXTNLIKE: return vector_const_textnlike_utf8; + default: + /* + * This branch is to placate the static analyzers. + */ + break; } } diff --git a/tsl/src/nodes/gapfill/gapfill_exec.c b/tsl/src/nodes/gapfill/gapfill_exec.c index c262f2cd3..24f86665c 100644 --- a/tsl/src/nodes/gapfill/gapfill_exec.c +++ b/tsl/src/nodes/gapfill/gapfill_exec.c @@ -1206,7 +1206,7 @@ gapfill_state_initialize_columns(GapFillState *state) int i; state->ncolumns = tupledesc->natts; - state->columns = palloc(state->ncolumns * sizeof(GapFillColumnState *)); + state->columns = (GapFillColumnState **) palloc(state->ncolumns * sizeof(GapFillColumnState *)); for (i = 0; i < state->ncolumns; i++) { diff --git a/tsl/src/nodes/gapfill/gapfill_plan.c b/tsl/src/nodes/gapfill/gapfill_plan.c index 484e787aa..63e00b095 100644 --- a/tsl/src/nodes/gapfill/gapfill_plan.c +++ b/tsl/src/nodes/gapfill/gapfill_plan.c @@ -74,7 +74,7 @@ gapfill_function_walker(Node *node, gapfill_walker_context *context) context->count++; } - return expression_tree_walker((Node *) node, gapfill_function_walker, context); + return expression_tree_walker(node, gapfill_function_walker, context); } /* @@ -92,7 +92,7 @@ marker_function_walker(Node *node, gapfill_walker_context *context) context->count++; } - return expression_tree_walker((Node *) node, marker_function_walker, context); + return expression_tree_walker(node, marker_function_walker, context); } /* diff --git a/tsl/src/nodes/gapfill/interpolate.c b/tsl/src/nodes/gapfill/interpolate.c index 29fbdc61e..202fd84ea 100644 --- a/tsl/src/nodes/gapfill/interpolate.c +++ b/tsl/src/nodes/gapfill/interpolate.c @@ -27,12 +27,10 @@ gapfill_interpolate_initialize(GapFillInterpolateColumnState *interpolate, GapFi { interpolate->prev.isnull = true; interpolate->next.isnull = true; - if (list_length(((FuncExpr *) function)->args) > 1) - interpolate->lookup_before = - gapfill_adjust_varnos(state, lsecond(((FuncExpr *) function)->args)); - if (list_length(((FuncExpr *) function)->args) > 2) - interpolate->lookup_after = - gapfill_adjust_varnos(state, lthird(((FuncExpr *) function)->args)); + if (list_length(function->args) > 1) + interpolate->lookup_before = gapfill_adjust_varnos(state, lsecond(function->args)); + if (list_length(function->args) > 2) + interpolate->lookup_after = gapfill_adjust_varnos(state, lthird(function->args)); } /* diff --git a/tsl/src/nodes/vector_agg/function/sum_float_single.c b/tsl/src/nodes/vector_agg/function/sum_float_single.c index c46a05ca6..66ed5c7e1 100644 --- a/tsl/src/nodes/vector_agg/function/sum_float_single.c +++ b/tsl/src/nodes/vector_agg/function/sum_float_single.c @@ -16,7 +16,8 @@ static void FUNCTION_NAME(emit)(void *agg_state, Datum *out_result, bool *out_isnull) { FloatSumState *state = (FloatSumState *) agg_state; - *out_result = CTYPE_TO_DATUM((CTYPE) state->result); + const CTYPE result_casted = state->result; + *out_result = CTYPE_TO_DATUM(result_casted); *out_isnull = !state->isvalid; } diff --git a/tsl/src/nodes/vector_agg/grouping_policy_batch.c b/tsl/src/nodes/vector_agg/grouping_policy_batch.c index 651f59046..c9fa8f667 100644 --- a/tsl/src/nodes/vector_agg/grouping_policy_batch.c +++ b/tsl/src/nodes/vector_agg/grouping_policy_batch.c @@ -71,7 +71,7 @@ create_grouping_policy_batch(int num_agg_defs, VectorAggDef *agg_defs, int num_g policy->agg_extra_mctx = AllocSetContextCreate(CurrentMemoryContext, "agg extra", ALLOCSET_DEFAULT_SIZES); - policy->agg_states = palloc(sizeof(*policy->agg_states) * policy->num_agg_defs); + policy->agg_states = (void **) palloc(sizeof(*policy->agg_states) * policy->num_agg_defs); for (int i = 0; i < policy->num_agg_defs; i++) { VectorAggDef *agg_def = &policy->agg_defs[i]; @@ -190,7 +190,7 @@ gp_batch_add_batch(GroupingPolicy *gp, DecompressBatchState *batch_state) const size_t num_words = (batch_state->total_batch_rows + 63) / 64; if (num_words > policy->num_tmp_filter_words) { - const size_t new_words = num_words * 2 + 1; + const size_t new_words = (num_words * 2) + 1; if (policy->tmp_filter != NULL) { pfree(policy->tmp_filter); diff --git a/tsl/test/src/compression_unit_test.c b/tsl/test/src/compression_unit_test.c index fa78055fd..0276af438 100644 --- a/tsl/test/src/compression_unit_test.c +++ b/tsl/test/src/compression_unit_test.c @@ -539,11 +539,8 @@ test_delta3(bool have_nulls, bool have_random) /* Forward decompression. */ DecompressionIterator *iter = - delta_delta_decompression_iterator_from_datum_forward(PointerGetDatum((void *) compressed), - INT8OID); - ArrowArray *bulk_result = delta_delta_decompress_all(PointerGetDatum((void *) compressed), - INT8OID, - CurrentMemoryContext); + delta_delta_decompression_iterator_from_datum_forward(compressed, INT8OID); + ArrowArray *bulk_result = delta_delta_decompress_all(compressed, INT8OID, CurrentMemoryContext); for (int i = 0; i < TEST_ELEMENTS; i++) { DecompressResult r = delta_delta_decompression_iterator_try_next_forward(iter); @@ -565,9 +562,7 @@ test_delta3(bool have_nulls, bool have_random) TestAssertTrue(r.is_done); /* Reverse decompression. */ - iter = - delta_delta_decompression_iterator_from_datum_reverse(PointerGetDatum((void *) compressed), - INT8OID); + iter = delta_delta_decompression_iterator_from_datum_reverse(compressed, INT8OID); for (int i = TEST_ELEMENTS - 1; i >= 0; i--) { DecompressResult r = delta_delta_decompression_iterator_try_next_reverse(iter);