Enable -Wextra

Our code mostly has warnings about comparison with different
signedness.
This commit is contained in:
Alexander Kuzmenkov 2022-10-27 15:08:09 +04:00 committed by Alexander Kuzmenkov
parent 864da20cee
commit 313845a882
52 changed files with 141 additions and 142 deletions

View File

@ -200,6 +200,10 @@ if(CMAKE_C_COMPILER_ID MATCHES "GNU|AppleClang|Clang")
-Wempty-body
-Wvla
-Wall
-Wextra
# The SQL function arguments macro PG_FUNCTION_ARGS often inroduces unused
# arguments.
-Wno-unused-parameter
-Wundef
-Wmissing-prototypes
-Wpointer-arith
@ -242,6 +246,13 @@ if(CMAKE_C_COMPILER_ID MATCHES "GNU|AppleClang|Clang")
add_compile_options(-Wno-strict-overflow)
endif()
if(CMAKE_COMPILER_IS_GNUCC)
add_compile_options(
-Wno-clobbered
# Seems to be broken in GCC 11 with designated initializers.
-Wno-missing-field-initializers)
endif()
# On UNIX, the compiler needs to support -fvisibility=hidden to hide symbols
# by default
check_c_compiler_flag(-fvisibility=hidden CC_SUPPORTS_VISIBILITY_HIDDEN)

View File

@ -128,10 +128,9 @@ bit_array_recv(const StringInfo buffer)
static inline void
bit_array_send(StringInfo buffer, const BitArray *data)
{
int i;
pq_sendint32(buffer, data->buckets.num_elements);
pq_sendbyte(buffer, data->bits_used_in_last_bucket);
for (i = 0; i < data->buckets.num_elements; i++)
for (uint32 i = 0; i < data->buckets.num_elements; i++)
pq_sendint64(buffer, data->buckets.data[i]);
}

View File

@ -118,7 +118,7 @@ typedef struct ChunkStubScanCtx
} ChunkStubScanCtx;
static bool
chunk_stub_is_valid(const ChunkStub *stub, unsigned int expected_slices)
chunk_stub_is_valid(const ChunkStub *stub, int16 expected_slices)
{
return stub && stub->id > 0 && stub->constraints && expected_slices == stub->cube->num_slices &&
stub->cube->num_slices == stub->constraints->num_dimension_constraints;
@ -3793,10 +3793,8 @@ ts_chunk_do_drop_chunks(Hypertable *ht, int64 older_than, int64 newer_than, int3
List **affected_data_nodes)
{
uint64 i = 0;
uint64 num_chunks = 0;
Chunk *chunks;
List *dropped_chunk_names = NIL;
const char *schema_name, *table_name;
const int32 hypertable_id = ht->fd.id;
bool has_continuous_aggs;
@ -3866,8 +3864,6 @@ ts_chunk_do_drop_chunks(Hypertable *ht, int64 older_than, int64 newer_than, int3
if (has_continuous_aggs)
{
int i;
/* Exclusively lock all chunks, and invalidate the continuous
* aggregates in the regions covered by the chunks. We do this in two
* steps: first lock all the chunks and then invalidate the
@ -3878,7 +3874,7 @@ ts_chunk_do_drop_chunks(Hypertable *ht, int64 older_than, int64 newer_than, int3
* this transaction, which allows moving the invalidation threshold
* without having to worry about new invalidations while
* refreshing. */
for (i = 0; i < num_chunks; i++)
for (uint64 i = 0; i < num_chunks; i++)
{
LockRelationOid(chunks[i].table_id, ExclusiveLock);
@ -3893,7 +3889,7 @@ ts_chunk_do_drop_chunks(Hypertable *ht, int64 older_than, int64 newer_than, int3
* The invalidation will allow the refresh command on a continuous
* aggregate to see that this region was dropped and and will
* therefore be able to refresh accordingly.*/
for (i = 0; i < num_chunks; i++)
for (uint64 i = 0; i < num_chunks; i++)
{
int64 start = ts_chunk_primary_dimension_start(&chunks[i]);
int64 end = ts_chunk_primary_dimension_end(&chunks[i]);
@ -3902,7 +3898,8 @@ ts_chunk_do_drop_chunks(Hypertable *ht, int64 older_than, int64 newer_than, int3
}
}
for (i = 0; i < num_chunks; i++)
List *dropped_chunk_names = NIL;
for (uint64 i = 0; i < num_chunks; i++)
{
char *chunk_name;
ListCell *lc;

View File

@ -46,8 +46,8 @@ ts_chunk_scan_by_chunk_ids(const Hyperspace *hs, const List *chunk_ids, unsigned
MemoryContext orig_mcxt;
Chunk **locked_chunks = NULL;
Chunk **unlocked_chunks = NULL;
unsigned int locked_chunk_count = 0;
unsigned int unlocked_chunk_count = 0;
int locked_chunk_count = 0;
int unlocked_chunk_count = 0;
ListCell *lc;
int remote_chunk_count = 0;

View File

@ -76,7 +76,6 @@ parse_segment_collist(char *inpstr, Hypertable *hypertable)
List *parsed;
ListCell *lc;
SelectStmt *select;
short index = 0;
List *collist = NIL;
RawStmt *raw;
@ -119,6 +118,7 @@ parse_segment_collist(char *inpstr, Hypertable *hypertable)
if (select->sortClause != NIL)
throw_segment_by_error(inpstr);
short index = 0;
foreach (lc, select->groupClause)
{
ColumnRef *cf;
@ -161,7 +161,6 @@ parse_order_collist(char *inpstr, Hypertable *hypertable)
List *parsed;
ListCell *lc;
SelectStmt *select;
short index = 0;
List *collist = NIL;
RawStmt *raw;
@ -203,6 +202,7 @@ parse_order_collist(char *inpstr, Hypertable *hypertable)
if (select->groupClause != NIL)
throw_order_by_error(inpstr);
short index = 0;
foreach (lc, select->sortClause)
{
SortBy *sort_by;

View File

@ -107,7 +107,6 @@ get_show_upper_mask(const char *paths, size_t paths_len)
static bool
set_debug_flag(const char *flag_string, size_t length, DebugOptimizerFlags *flags)
{
int i;
char *end;
size_t flag_length;
@ -121,7 +120,7 @@ set_debug_flag(const char *flag_string, size_t length, DebugOptimizerFlags *flag
flag_length = length;
}
for (i = 0; i < sizeof(g_flag_names) / sizeof(*g_flag_names); ++i)
for (size_t i = 0; i < sizeof(g_flag_names) / sizeof(*g_flag_names); ++i)
if (strncmp(g_flag_names[i].name, flag_string, flag_length) == 0)
switch (g_flag_names[i].flag)
{

View File

@ -1493,7 +1493,7 @@ ts_dimension_add(PG_FUNCTION_ARGS)
.colname = PG_ARGISNULL(1) ? NULL : PG_GETARG_NAME(1),
.num_slices = PG_ARGISNULL(2) ? DatumGetInt32(-1) : PG_GETARG_INT32(2),
.num_slices_is_set = !PG_ARGISNULL(2),
.interval_datum = PG_ARGISNULL(3) ? DatumGetInt32(-1) : PG_GETARG_DATUM(3),
.interval_datum = PG_ARGISNULL(3) ? Int32GetDatum(-1) : PG_GETARG_DATUM(3),
.interval_type = PG_ARGISNULL(3) ? InvalidOid : get_fn_expr_argtype(fcinfo->flinfo, 3),
.partitioning_func = PG_ARGISNULL(4) ? InvalidOid : PG_GETARG_OID(4),
.if_not_exists = PG_ARGISNULL(5) ? false : PG_GETARG_BOOL(5),

View File

@ -122,12 +122,14 @@ get_proxy_table_relid()
return get_relname_relid(EXTENSION_PROXY_TABLE, nsid);
}
static bool inline extension_exists()
inline static bool
extension_exists()
{
return OidIsValid(get_extension_oid(EXTENSION_NAME, true));
}
static bool inline extension_is_transitioning()
inline static bool
extension_is_transitioning()
{
/*
* Determine whether the extension is being created or upgraded (as a

View File

@ -501,7 +501,6 @@ initialize_func_info()
Oid pg_nsp = get_namespace_oid("pg_catalog", false);
HeapTuple tuple;
Relation rel;
int i;
func_hash = hash_create("func_cache",
_MAX_CACHE_FUNCTIONS,
@ -510,7 +509,7 @@ initialize_func_info()
rel = table_open(ProcedureRelationId, AccessShareLock);
for (i = 0; i < _MAX_CACHE_FUNCTIONS; i++)
for (size_t i = 0; i < _MAX_CACHE_FUNCTIONS; i++)
{
FuncInfo *finfo = &funcinfo[i];
Oid namespaceoid = pg_nsp;

View File

@ -16,13 +16,6 @@
#endif
#ifdef USE_TELEMETRY
typedef enum TelemetryLevel
{
TELEMETRY_OFF,
TELEMETRY_NO_FUNCTIONS,
TELEMETRY_BASIC,
} TelemetryLevel;
/* Define which level means on. We use this object to have at least one object
* of type TelemetryLevel in the code, otherwise pgindent won't work for the
* type */
@ -87,7 +80,7 @@ TSDLLEXPORT bool ts_guc_enable_skip_scan = true;
int ts_guc_max_open_chunks_per_insert = 10;
int ts_guc_max_cached_chunks_per_hypertable = 10;
#ifdef USE_TELEMETRY
int ts_guc_telemetry_level = TELEMETRY_DEFAULT;
TelemetryLevel ts_guc_telemetry_level = TELEMETRY_DEFAULT;
char *ts_telemetry_cloud = NULL;
#endif
@ -453,7 +446,7 @@ _guc_init(void)
DefineCustomEnumVariable("timescaledb.telemetry_level",
"Telemetry settings level",
"Level used to determine which telemetry to send",
&ts_guc_telemetry_level,
(int *) &ts_guc_telemetry_level,
TELEMETRY_DEFAULT,
telemetry_level_options,
PGC_USERSET,

View File

@ -32,10 +32,19 @@ extern TSDLLEXPORT bool ts_guc_enable_skip_scan;
extern bool ts_guc_restoring;
extern int ts_guc_max_open_chunks_per_insert;
extern int ts_guc_max_cached_chunks_per_hypertable;
#ifdef USE_TELEMETRY
extern int ts_guc_telemetry_level;
typedef enum TelemetryLevel
{
TELEMETRY_OFF,
TELEMETRY_NO_FUNCTIONS,
TELEMETRY_BASIC,
} TelemetryLevel;
extern TelemetryLevel ts_guc_telemetry_level;
extern char *ts_telemetry_cloud;
#endif
extern TSDLLEXPORT char *ts_guc_license;
extern char *ts_last_tune_time;
extern char *ts_last_tune_version;

View File

@ -144,8 +144,6 @@ ts_hist_combinefunc(PG_FUNCTION_ARGS)
}
else
{
Size i;
/* Since number of buckets is part of the aggregation call the initialization
* might be different in the partials so we error out if they are not identical. */
if (state1->nbuckets != state2->nbuckets)
@ -154,7 +152,7 @@ ts_hist_combinefunc(PG_FUNCTION_ARGS)
result = copy_state(aggcontext, state1);
/* Combine values from state1 and state2 when both states are non-null */
for (i = 0; i < state1->nbuckets; i++)
for (int32 i = 0; i < state1->nbuckets; i++)
{
/* Perform addition using int64 to check for overflow */
int64 val = (int64) DatumGetInt32(result->buckets[i]);
@ -174,7 +172,6 @@ Datum
ts_hist_serializefunc(PG_FUNCTION_ARGS)
{
Histogram *state;
Size i;
StringInfoData buf;
Assert(!PG_ARGISNULL(0));
@ -183,7 +180,7 @@ ts_hist_serializefunc(PG_FUNCTION_ARGS)
pq_begintypsend(&buf);
pq_sendint32(&buf, state->nbuckets);
for (i = 0; i < state->nbuckets; i++)
for (int32 i = 0; i < state->nbuckets; i++)
pq_sendint32(&buf, DatumGetInt32(state->buckets[i]));
PG_RETURN_BYTEA_P(pq_endtypsend(&buf));

View File

@ -853,7 +853,7 @@ ts_hypertable_set_num_dimensions(Hypertable *ht, int16 num_dimensions)
#define DEFAULT_ASSOCIATED_TABLE_PREFIX_FORMAT "_hyper_%d"
#define DEFAULT_ASSOCIATED_DISTRIBUTED_TABLE_PREFIX_FORMAT "_dist_hyper_%d"
static const int MAXIMUM_PREFIX_LENGTH = NAMEDATALEN - 16;
static const size_t MAXIMUM_PREFIX_LENGTH = NAMEDATALEN - 16;
static void
hypertable_insert_relation(Relation rel, FormData_hypertable *fd)
@ -1328,7 +1328,11 @@ table_has_replica_identity(const Relation rel)
return rel->rd_rel->relreplident != REPLICA_IDENTITY_DEFAULT;
}
static bool inline table_has_rules(Relation rel) { return rel->rd_rules != NULL; }
inline static bool
table_has_rules(Relation rel)
{
return rel->rd_rules != NULL;
}
bool
ts_hypertable_has_chunks(Oid table_relid, LOCKMODE lockmode)
@ -2386,7 +2390,7 @@ typedef struct AccumHypertable
} AccumHypertable;
bool
ts_is_partitioning_column(const Hypertable *ht, Index column_attno)
ts_is_partitioning_column(const Hypertable *ht, AttrNumber column_attno)
{
uint16 i;
@ -2551,7 +2555,7 @@ ts_hypertable_create_compressed(Oid table_relid, int32 hypertable_id)
ChunkSizingInfo *chunk_sizing_info;
Relation rel;
rel = table_open(table_relid, AccessExclusiveLock);
int32 row_size = MAXALIGN(SizeofHeapTupleHeader);
Size row_size = MAXALIGN(SizeofHeapTupleHeader);
/* estimate tuple width of compressed hypertable */
for (int i = 1; i <= RelationGetNumberOfAttributes(rel); i++)
{
@ -2568,7 +2572,7 @@ ts_hypertable_create_compressed(Oid table_relid, int32 hypertable_id)
{
ereport(WARNING,
(errmsg("compressed row size might exceed maximum row size"),
errdetail("Estimated row size of compressed hypertable is %u. This exceeds the "
errdetail("Estimated row size of compressed hypertable is %zu. This exceeds the "
"maximum size of %zu and can cause compression of chunks to fail.",
row_size,
MaxHeapTupleSize)));

View File

@ -145,7 +145,7 @@ extern Tablespace *ts_hypertable_get_tablespace_at_offset_from(int32 hypertable_
Oid tablespace_oid, int16 offset);
extern bool ts_hypertable_has_chunks(Oid table_relid, LOCKMODE lockmode);
extern void ts_hypertables_rename_schema_name(const char *old_name, const char *new_name);
extern bool ts_is_partitioning_column(const Hypertable *ht, Index column_attno);
extern bool ts_is_partitioning_column(const Hypertable *ht, AttrNumber column_attno);
extern TSDLLEXPORT bool ts_hypertable_set_compressed(Hypertable *ht,
int32 compressed_hypertable_id);
extern TSDLLEXPORT bool ts_hypertable_unset_compressed(Hypertable *ht);

View File

@ -153,7 +153,7 @@ ts_set_append_rel_pathlist(PlannerInfo *root, RelOptInfo *rel, Index rti, RangeT
RelOptInfo *childrel;
/* append_rel_list contains all append rels; ignore others */
if (appinfo->parent_relid != parentRTindex)
if (appinfo->parent_relid != (Index) parentRTindex)
continue;
/* Re-locate the child RTE and RelOptInfo */
@ -565,7 +565,7 @@ ts_set_append_rel_size(PlannerInfo *root, RelOptInfo *rel, Index rti, RangeTblEn
ListCell *childvars;
/* append_rel_list contains all append rels; ignore others */
if (appinfo->parent_relid != parentRTindex)
if (appinfo->parent_relid != (Index) parentRTindex)
continue;
childRTindex = appinfo->child_relid;

View File

@ -122,7 +122,7 @@ static ProcessUtility_hook_type prev_ProcessUtility_hook;
/* This is timescaleDB's versioned-extension's post_parse_analyze_hook */
static post_parse_analyze_hook_type extension_post_parse_analyze_hook = NULL;
static void inline extension_check(void);
inline static void extension_check(void);
#if PG14_LT
static void call_extension_post_parse_analyze_hook(ParseState *pstate, Query *query);
#else
@ -708,7 +708,8 @@ _PG_init(void)
ProcessUtility_hook = loader_process_utility_hook;
}
static void inline do_load()
inline static void
do_load()
{
char *version = extension_version();
char soname[MAX_SO_NAME_LEN];
@ -782,7 +783,8 @@ static void inline do_load()
post_parse_analyze_hook = old_hook;
}
static void inline extension_check()
inline static void
extension_check()
{
enum ExtensionState state = extension_current_state();

View File

@ -73,7 +73,7 @@ ts_http_send_and_recv(Connection *conn, HttpRequest *req, HttpResponseState *sta
{
ret = ts_connection_write(conn, built_request + write_off, request_len);
if (ret < 0 || ret > request_len)
if (ret < 0 || (size_t) ret > request_len)
return HTTP_ERROR_WRITE;
if (ret == 0)

View File

@ -233,7 +233,7 @@ ts_http_request_build(HttpRequest *req, size_t *buf_size)
if (content_length != -1)
{
/* make sure it's equal to body_len */
if (content_length != req->body_len)
if ((size_t) content_length != req->body_len)
{
return NULL;
}

View File

@ -74,7 +74,7 @@ is_valid_now_expr(OpExpr *op, List *rtable)
Var *var = linitial_node(Var, op->args);
if (var->varlevelsup != 0)
return false;
Assert(var->varno <= list_length(rtable));
Assert(var->varno <= (Index) list_length(rtable));
RangeTblEntry *rte = list_nth(rtable, var->varno - 1);
/*

View File

@ -932,7 +932,7 @@ find_children_chunks(HypertableRestrictInfo *hri, Hypertable *ht, unsigned int *
*num_chunks = list_length(chunk_oids);
Chunk **chunks = (Chunk **) palloc(sizeof(Chunk *) * *num_chunks);
for (int i = 0; i < *num_chunks; i++)
for (unsigned int i = 0; i < *num_chunks; i++)
{
chunks[i] = ts_chunk_get_by_relid(list_nth_oid(chunk_oids, i),
/* fail_if_not_found = */ true);
@ -999,7 +999,7 @@ get_explicit_chunks(CollectQualCtx *ctx, PlannerInfo *root, RelOptInfo *rel, Hyp
int order_attno;
Chunk **unlocked_chunks = NULL;
Chunk **chunks = NULL;
unsigned int unlocked_chunk_count = 0;
int unlocked_chunk_count = 0;
Oid prev_chunk_oid = InvalidOid;
bool chunk_sort_needed = false;
int i;
@ -1360,10 +1360,9 @@ ts_plan_expand_hypertable_chunks(Hypertable *ht, PlannerInfo *root, RelOptInfo *
.join_level = 0,
};
Index first_chunk_index = 0;
Index i;
/* double check our permissions are valid */
Assert(rti != parse->resultRelation);
Assert(rti != (Index) parse->resultRelation);
oldrc = get_plan_rowmark(root->rowMarks, rti);
@ -1525,8 +1524,7 @@ ts_plan_expand_hypertable_chunks(Hypertable *ht, PlannerInfo *root, RelOptInfo *
* build_simple_rel will look things up in the append_rel_array, so we can
* only use it after that array has been set up.
*/
i = 0;
for (i = 0; i < list_length(inh_oids); i++)
for (int i = 0; i < list_length(inh_oids); i++)
{
Index child_rtindex = first_chunk_index + i;
/* build_simple_rel will add the child to the relarray */

View File

@ -112,7 +112,7 @@ static planner_hook_type prev_planner_hook;
static set_rel_pathlist_hook_type prev_set_rel_pathlist_hook;
static get_relation_info_hook_type prev_get_relation_info_hook;
static create_upper_paths_hook_type prev_create_upper_paths_hook;
static void cagg_reorder_groupby_clause(RangeTblEntry *subq_rte, int rtno, List *outer_sortcl,
static void cagg_reorder_groupby_clause(RangeTblEntry *subq_rte, Index rtno, List *outer_sortcl,
List *outer_tlist);
/*
@ -918,12 +918,11 @@ rte_should_expand(const RangeTblEntry *rte)
static void
reenable_inheritance(PlannerInfo *root, RelOptInfo *rel, Index rti, RangeTblEntry *rte)
{
Index i;
bool set_pathlist_for_current_rel = false;
double total_pages;
bool reenabled_inheritance = false;
for (i = 1; i < root->simple_rel_array_size; i++)
for (int i = 1; i < root->simple_rel_array_size; i++)
{
RangeTblEntry *in_rte = root->simple_rte_array[i];
@ -961,7 +960,7 @@ reenable_inheritance(PlannerInfo *root, RelOptInfo *rel, Index rti, RangeTblEntr
*/
if (in_rte == rte)
{
Assert(rti == i);
Assert(rti == (Index) i);
set_pathlist_for_current_rel = true;
}
}
@ -971,14 +970,14 @@ reenable_inheritance(PlannerInfo *root, RelOptInfo *rel, Index rti, RangeTblEntr
return;
total_pages = 0;
for (i = 1; i < root->simple_rel_array_size; i++)
for (int i = 1; i < root->simple_rel_array_size; i++)
{
RelOptInfo *brel = root->simple_rel_array[i];
if (brel == NULL)
continue;
Assert(brel->relid == i); /* sanity check on array */
Assert(brel->relid == (Index) i); /* sanity check on array */
if (IS_DUMMY_REL(brel))
continue;
@ -1286,7 +1285,7 @@ timescaledb_get_relation_info_hook(PlannerInfo *root, Oid relation_objectid, boo
rel->tuples = (double) uncompressed_chunk->rd_rel->reltuples;
if (rel->pages == 0)
rel->allvisfrac = 0.0;
else if (uncompressed_chunk->rd_rel->relallvisible >= rel->pages)
else if (uncompressed_chunk->rd_rel->relallvisible >= (int32) rel->pages)
rel->allvisfrac = 1.0;
else
rel->allvisfrac =
@ -1573,7 +1572,7 @@ check_cagg_view_rte(RangeTblEntry *rte)
* outer_tlist - outer query's target list
*/
static void
cagg_reorder_groupby_clause(RangeTblEntry *subq_rte, int rtno, List *outer_sortcl,
cagg_reorder_groupby_clause(RangeTblEntry *subq_rte, Index rtno, List *outer_sortcl,
List *outer_tlist)
{
bool not_found = true;

View File

@ -26,7 +26,7 @@
* if the column is not a space dimension.
*/
static Dimension *
get_space_dimension(Oid relid, Index varattno)
get_space_dimension(Oid relid, AttrNumber varattno)
{
Hypertable *ht = ts_planner_get_hypertable(relid, CACHE_FLAG_CHECK);
if (!ht)
@ -102,7 +102,7 @@ is_valid_space_constraint(OpExpr *op, List *rtable)
/*
* Check that the constraint is actually on a partitioning column.
*/
Assert(var->varno <= list_length(rtable));
Assert(var->varno <= (Index) list_length(rtable));
RangeTblEntry *rte = list_nth(rtable, var->varno - 1);
Dimension *dim = get_space_dimension(rte->relid, var->varattno);
@ -136,7 +136,7 @@ is_valid_scalar_space_constraint(ScalarArrayOpExpr *op, List *rtable)
/*
* Check that the constraint is actually on a partitioning column.
*/
Assert(var->varno <= list_length(rtable));
Assert(var->varno <= (Index) list_length(rtable));
RangeTblEntry *rte = list_nth(rtable, var->varno - 1);
Dimension *dim = get_space_dimension(rte->relid, var->varattno);

View File

@ -347,7 +347,7 @@ sort_transform_ec(PlannerInfo *root, EquivalenceClass *orig)
foreach (lc, root->append_rel_list)
{
AppendRelInfo *appinfo = lfirst_node(AppendRelInfo, lc);
if (appinfo->parent_relid == parent)
if (appinfo->parent_relid == (Index) parent)
{
RelOptInfo *parent_rel = root->simple_rel_array[appinfo->parent_relid];
RelOptInfo *child_rel = root->simple_rel_array[appinfo->child_relid];

View File

@ -24,16 +24,16 @@
typedef struct SubspaceStoreInternalNode
{
DimensionVec *vector;
size_t descendants;
uint16 descendants;
bool last_internal_node;
} SubspaceStoreInternalNode;
typedef struct SubspaceStore
{
MemoryContext mcxt;
int16 num_dimensions;
uint16 num_dimensions;
/* limit growth of store by limiting number of slices in first dimension, 0 for no limit */
int16 max_items;
uint16 max_items;
SubspaceStoreInternalNode *origin; /* origin of the tree */
} SubspaceStore;

View File

@ -171,7 +171,6 @@ char_in_valid_version_digits(const char c)
bool
ts_validate_server_version(const char *json, VersionResult *result)
{
int i;
Datum version = DirectFunctionCall2(json_object_field_text,
CStringGetTextDatum(json),
PointerGetDatum(cstring_to_text(TS_VERSION_JSON_FIELD)));
@ -192,7 +191,7 @@ ts_validate_server_version(const char *json, VersionResult *result)
return false;
}
for (i = 0; i < strlen(result->versionstr); i++)
for (size_t i = 0; i < strlen(result->versionstr); i++)
{
if (!isalpha(result->versionstr[i]) && !isdigit(result->versionstr[i]) &&
!char_in_valid_version_digits(result->versionstr[i]))
@ -274,11 +273,9 @@ get_database_size()
static void
add_related_extensions(JsonbParseState *state)
{
int i;
pushJsonbValue(&state, WJB_BEGIN_OBJECT, NULL);
for (i = 0; i < sizeof(related_extensions) / sizeof(char *); i++)
for (size_t i = 0; i < sizeof(related_extensions) / sizeof(char *); i++)
{
const char *ext = related_extensions[i];
@ -462,7 +459,7 @@ add_function_call_telemetry(JsonbParseState *state)
}
visible_extensions[0] = "timescaledb";
for (int i = 1; i < sizeof(visible_extensions) / sizeof(char *); i++)
for (size_t i = 1; i < sizeof(visible_extensions) / sizeof(char *); i++)
visible_extensions[i] = related_extensions[i - 1];
functions =

View File

@ -312,7 +312,7 @@ typedef struct InternalFunctionDef
int args;
} InternalFunctionDef;
const static InternalFunctionDef internal_function_definitions[_MAX_INTERNAL_FUNCTIONS] = {
static const InternalFunctionDef internal_function_definitions[_MAX_INTERNAL_FUNCTIONS] = {
[DDL_ADD_CHUNK_CONSTRAINT] = {
.name = "chunk_constraint_add_table_constraint",
.args = 1,

View File

@ -378,7 +378,7 @@ continuous_agg_init(ContinuousAgg *cagg, const Form_continuous_agg fd)
}
}
TSDLLEXPORT const CaggsInfo
TSDLLEXPORT CaggsInfo
ts_continuous_agg_get_all_caggs_info(int32 raw_hypertable_id)
{
CaggsInfo all_caggs_info;
@ -470,7 +470,6 @@ bucket_function_serialize(const ContinuousAggsBucketFunction *bf)
static const ContinuousAggsBucketFunction *
bucket_function_deserialize(const char *str)
{
int i;
char *begin, *end, *strings[4];
ContinuousAggsBucketFunction *bf;
@ -479,7 +478,7 @@ bucket_function_deserialize(const char *str)
return NULL;
begin = pstrdup(str);
for (i = 0; i < lengthof(strings); i++)
for (size_t i = 0; i < lengthof(strings); i++)
{
end = strstr(begin, ";");
if (end == NULL)

View File

@ -158,7 +158,7 @@ typedef struct CaggPolicyOffset
extern TSDLLEXPORT Oid ts_cagg_permissions_check(Oid cagg_oid, Oid userid);
extern TSDLLEXPORT const CaggsInfo ts_continuous_agg_get_all_caggs_info(int32 raw_hypertable_id);
extern TSDLLEXPORT CaggsInfo ts_continuous_agg_get_all_caggs_info(int32 raw_hypertable_id);
extern TSDLLEXPORT void ts_populate_caggs_info_from_arrays(ArrayType *mat_hypertable_ids,
ArrayType *bucket_widths,
ArrayType *bucket_functions,

View File

@ -20,8 +20,8 @@ typedef struct MockConnection
{
Connection conn;
char recv_buf[MOCK_MAX_BUF_SIZE];
int recv_buf_offset;
int recv_buf_len;
size_t recv_buf_offset;
size_t recv_buf_len;
} MockConnection;
static int

View File

@ -60,7 +60,7 @@ static const char *const BAD_RESPONSES[] = { "HTTP/1.1 200 OK\r\n"
"{\"status\":404}",
NULL };
static int TEST_LENGTHS[] = { 14, 14, 14, 14 };
static size_t TEST_LENGTHS[] = { 14, 14, 14, 14 };
static const char *MESSAGE_BODY[] = {
"{\"status\":200}", "{\"status\":200}", "{\"status\":200}", "{\"status\":201}"
};
@ -72,7 +72,7 @@ TS_FUNCTION_INFO_V1(ts_test_http_request_build);
static int
num_test_strings()
{
return sizeof(TEST_LENGTHS) / sizeof(int);
return sizeof(TEST_LENGTHS) / sizeof(TEST_LENGTHS[0]);
}
/* Check we can succesfully parse partial by well-formed HTTP responses */
@ -80,7 +80,8 @@ Datum
ts_test_http_parsing(PG_FUNCTION_ARGS)
{
int num_iterations = PG_GETARG_INT32(0);
int bytes, i, j;
int i, j;
size_t bytes;
srand(time(0));
@ -97,7 +98,7 @@ ts_test_http_parsing(PG_FUNCTION_ARGS)
buf = ts_http_response_state_next_buffer(state, &bufsize);
TestAssertTrue(bufsize >= bytes);
TestAssertTrue(bufsize >= (ssize_t) bytes);
/* Copy part of the message into the parsing state */
memcpy(buf, TEST_RESPONSES[i], bytes);
@ -125,7 +126,8 @@ ts_test_http_parsing(PG_FUNCTION_ARGS)
Datum
ts_test_http_parsing_full(PG_FUNCTION_ARGS)
{
int bytes, i;
int i;
size_t bytes;
srand(time(0));
@ -140,7 +142,7 @@ ts_test_http_parsing_full(PG_FUNCTION_ARGS)
bytes = strlen(TEST_RESPONSES[i]);
TestAssertTrue(bufsize >= bytes);
TestAssertTrue(bufsize >= (ssize_t) bytes);
/* Copy all of the message into the parsing state */
memcpy(buf, TEST_RESPONSES[i], bytes);
@ -172,7 +174,7 @@ ts_test_http_parsing_full(PG_FUNCTION_ARGS)
bytes = strlen(BAD_RESPONSES[i]);
TestAssertTrue(bufsize >= bytes);
TestAssertTrue(bufsize >= (ssize_t) bytes);
memcpy(buf, BAD_RESPONSES[i], bytes);

View File

@ -15,7 +15,7 @@ TS_TEST_FN(ts_test_scanner)
ScanIterator it;
Relation chunkrel;
int32 chunk_id[2] = { -1, -1 };
int i = 0;
size_t i = 0;
/* Test pre-open relation */
it = ts_chunk_scan_iterator_create(CurrentMemoryContext);

View File

@ -240,7 +240,7 @@ TS_TEST_FN(ts_test_with_clause_parse)
funcctx = SRF_PERCALL_SETUP();
result = funcctx->user_fctx;
if (result == NULL || result->i >= TS_ARRAY_LEN(test_args))
if (result == NULL || (size_t) result->i >= TS_ARRAY_LEN(test_args))
SRF_RETURN_DONE(funcctx);
values = palloc0(sizeof(*values) * funcctx->tuple_desc->natts);

View File

@ -396,7 +396,6 @@ policy_invoke_recompress_chunk(Chunk *chunk)
Oid restype;
Oid func_oid;
List *args = NIL;
int i;
bool isnull;
Const *argarr[RECOMPRESS_CHUNK_NARGS] = {
makeConst(REGCLASSOID,
@ -421,7 +420,7 @@ policy_invoke_recompress_chunk(Chunk *chunk)
/* Prepare the function expr with argument list */
get_func_result_type(func_oid, &restype, NULL);
for (i = 0; i < lengthof(argarr); i++)
for (size_t i = 0; i < lengthof(argarr); i++)
args = lappend(args, argarr[i]);
fexpr = makeFuncExpr(func_oid, restype, args, InvalidOid, InvalidOid, COERCE_EXPLICIT_CALL);

View File

@ -207,7 +207,7 @@ chunk_invoke_drop_chunks(Oid relid, Datum older_than, Datum older_than_type)
ExprContext *econtext;
FuncExpr *fexpr;
List *args = NIL;
int i, num_results = 0;
int num_results = 0;
SetExprState *state;
Oid restype;
Oid func_oid;
@ -242,7 +242,7 @@ chunk_invoke_drop_chunks(Oid relid, Datum older_than, Datum older_than_type)
/* Prepare the function expr with argument list */
get_func_result_type(func_oid, &restype, NULL);
for (i = 0; i < lengthof(argarr); i++)
for (size_t i = 0; i < lengthof(argarr); i++)
args = lappend(args, argarr[i]);
fexpr = makeFuncExpr(func_oid, restype, args, InvalidOid, InvalidOid, COERCE_EXPLICIT_CALL);

View File

@ -40,7 +40,7 @@ typedef struct ArrayCompressed
static void
pg_attribute_unused() assertions(void)
{
ArrayCompressed test_val = { { 0 } };
ArrayCompressed test_val = { .vl_len_ = { 0 } };
Simple8bRleSerialized test_simple8b = { 0 };
/* make sure no padding bytes make it to disk */
StaticAssertStmt(sizeof(ArrayCompressed) ==

View File

@ -1466,7 +1466,7 @@ Datum
tsl_compressed_data_recv(PG_FUNCTION_ARGS)
{
StringInfo buf = (StringInfo) PG_GETARG_POINTER(0);
CompressedDataHeader header = { { 0 } };
CompressedDataHeader header = { .vl_len_ = { 0 } };
header.compression_algorithm = pq_getmsgbyte(buf);

View File

@ -39,7 +39,7 @@ typedef struct DeltaDeltaCompressed
static void
pg_attribute_unused() assertions(void)
{
DeltaDeltaCompressed test_val = { { 0 } };
DeltaDeltaCompressed test_val = { .vl_len_ = { 0 } };
/* make sure no padding bytes make it to disk */
StaticAssertStmt(sizeof(DeltaDeltaCompressed) ==
sizeof(test_val.vl_len_) + sizeof(test_val.compression_algorithm) +

View File

@ -49,7 +49,7 @@ typedef struct DictionaryCompressed
static void
pg_attribute_unused() assertions(void)
{
DictionaryCompressed test_val = { { 0 } };
DictionaryCompressed test_val = { .vl_len_ = { 0 } };
/* make sure no padding bytes make it to disk */
StaticAssertStmt(sizeof(DictionaryCompressed) ==
sizeof(test_val.vl_len_) + sizeof(test_val.compression_algorithm) +
@ -240,7 +240,7 @@ compressor_get_serialization_info(DictionaryCompressor *compressor)
sizes.value_array[dict_item->index] = dict_item->key;
sizes.num_distinct += 1;
}
for (int i = 0; i < sizes.num_distinct; i++)
for (uint32 i = 0; i < sizes.num_distinct; i++)
{
array_compressor_append(array_comp, sizes.value_array[i]);
}
@ -281,7 +281,7 @@ dictionary_compressed_from_serialization_info(DictionaryCompressorSerializationI
sizes.dictionary_size,
sizes.dictionary_serialization_info);
Assert(data - (char *) bitmap == sizes.total_size);
Assert((Size) (data - (char *) bitmap) == sizes.total_size);
return bitmap;
}
@ -390,7 +390,7 @@ dictionary_decompression_iterator_init(DictionaryDecompressionIterator *iter, co
bitmap->element_type,
/* has_nulls */ false);
for (int i = 0; i < bitmap->num_distinct; i++)
for (uint32 i = 0; i < bitmap->num_distinct; i++)
{
DecompressResult res = array_decompression_iterator_try_next_forward(dictionary_iterator);
Assert(!res.is_null);

View File

@ -62,7 +62,7 @@ typedef struct CompressedGorillaData
static void
pg_attribute_unused() assertions(void)
{
GorillaCompressed test_val = { { 0 } };
GorillaCompressed test_val = { .vl_len_ = { 0 } };
/* make sure no padding bytes make it to disk */
StaticAssertStmt(sizeof(GorillaCompressed) ==
sizeof(test_val.vl_len_) + sizeof(test_val.compression_algorithm) +
@ -804,7 +804,7 @@ gorilla_compressed_send(CompressedDataHeader *header, StringInfo buf)
Datum
gorilla_compressed_recv(StringInfo buf)
{
GorillaCompressed header = { { 0 } };
GorillaCompressed header = { .vl_len_ = { 0 } };
CompressedGorillaData data = {
.header = &header,
};

View File

@ -349,7 +349,7 @@ simple8brle_compressor_pop_block(Simple8bRleCompressor *compressor)
return compressor->last_block;
}
static inline int32
static inline uint32
simple8brle_compressor_num_selectors(Simple8bRleCompressor *compressor)
{
Assert(bit_array_num_bits(&compressor->selectors) % SIMPLE8B_BITS_PER_SELECTOR == 0);
@ -628,7 +628,7 @@ simple8brle_decompression_iterator_try_next_forward(Simple8bRleDecompressionIter
.is_done = true,
};
if (iter->current_in_compressed_pos >= iter->current_block.num_elements_compressed)
if (iter->current_in_compressed_pos >= (int32) iter->current_block.num_elements_compressed)
{
iter->current_block =
simple8brle_block_create(bit_array_iter_next(&iter->selectors,

View File

@ -400,7 +400,7 @@ static bool
data_node_validate_database(TSConnection *conn, const DbInfo *database)
{
PGresult *res;
uint32 actual_encoding;
int32 actual_encoding;
const char *actual_chartype;
const char *actual_collation;
@ -580,9 +580,8 @@ connect_for_bootstrapping(const char *node_name, const char *const host, int32 p
{
TSConnection *conn = NULL;
char *err = NULL;
int i;
for (i = 0; i < lengthof(bootstrap_databases); i++)
for (size_t i = 0; i < lengthof(bootstrap_databases); i++)
{
List *node_options =
create_data_node_options(host, port, bootstrap_databases[i], username, password);
@ -1451,7 +1450,6 @@ drop_data_node_database(const ForeignServer *server)
char *nodename = pstrdup(server->servername);
char *dbname = NULL;
char *err = NULL;
int i;
/* Figure out the name of the database that should be dropped */
foreach (lc, server->options)
@ -1483,7 +1481,7 @@ drop_data_node_database(const ForeignServer *server)
/* Cannot connect to the database that is being dropped, so try to connect
* to a "standard" bootstrap database that we expect to exist on the data
* node */
for (i = 0; i < lengthof(bootstrap_databases); i++)
for (size_t i = 0; i < lengthof(bootstrap_databases); i++)
{
List *conn_options;
DefElem dbname_elem = {

View File

@ -174,7 +174,7 @@ create_distributed_restore_point(PG_FUNCTION_ARGS)
{
int result_index = funcctx->call_cntr - 1;
if (result_index < ts_dist_cmd_response_count(result_cmd))
if (result_index < (int) ts_dist_cmd_response_count(result_cmd))
{
const char *node_name;
PGresult *result =

View File

@ -402,7 +402,7 @@ dist_util_remote_srf_query(FunctionCallInfo fcinfo, const char *node_name, const
funcctx = SRF_PERCALL_SETUP();
result = ts_dist_cmd_get_result_by_node_name(funcctx->user_fctx, node_name);
if (funcctx->call_cntr < PQntuples(result))
if (funcctx->call_cntr < (uint64) PQntuples(result))
{
HeapTuple tuple;
char **fields = palloc(sizeof(char *) * PQnfields(result));

View File

@ -1002,7 +1002,7 @@ deparseDistinctClause(StringInfo buf, deparse_expr_cxt *context, List *pathkeys)
char *sep = "";
RelOptInfo *scanrel = context->scanrel;
Assert(varno > 0 && varno < root->simple_rel_array_size);
Assert(varno > 0 && varno < (Index) root->simple_rel_array_size);
context->scanrel = root->simple_rel_array[varno];
appendStringInfoString(buf, "DISTINCT ON (");
@ -1560,11 +1560,10 @@ static int
append_values_params(DeparsedInsertStmt *stmt, StringInfo buf, int pindex)
{
bool first = true;
int i;
appendStringInfoChar(buf, '(');
for (i = 0; i < stmt->num_target_attrs; i++)
for (unsigned int i = 0; i < stmt->num_target_attrs; i++)
{
if (!first)
appendStringInfoString(buf, ", ");

View File

@ -61,9 +61,8 @@ apply_fdw_and_server_options(TsFdwRelInfo *fpinfo)
ListCell *lc;
ForeignDataWrapper *fdw = GetForeignDataWrapper(fpinfo->server->fdwid);
List *options[] = { fdw->options, fpinfo->server->options };
int i;
for (i = 0; i < lengthof(options); i++)
for (size_t i = 0; i < lengthof(options); i++)
{
foreach (lc, options[i])
{

View File

@ -88,7 +88,7 @@ make_pathkey_from_compressed(PlannerInfo *root, Index compressed_relid, Expr *ex
/* Because SortGroupClause doesn't carry collation, consult the expr */
collation = exprCollation((Node *) expr);
Assert(compressed_relid < root->simple_rel_array_size);
Assert(compressed_relid < (Index) root->simple_rel_array_size);
return ts_make_pathkey_from_sortinfo(root,
expr,
NULL,

View File

@ -230,7 +230,7 @@ async_request_send_prepare(TSConnection *conn, const char *sql, int n_params)
written =
snprintf(stmt_name, stmt_name_len, "ts_prep_%u", remote_connection_get_prep_stmt_number());
if (written < 0 || written >= stmt_name_len)
if (written < 0 || (size_t) written >= stmt_name_len)
elog(ERROR, "cannot create prepared statement name");
req = async_request_create(conn, sql, stmt_name, n_params, NULL, FORMAT_TEXT);
@ -934,7 +934,7 @@ prepared_stmt_close(PreparedStmt *stmt)
ret = snprintf(sql, sizeof(sql), "DEALLOCATE %s", stmt->stmt_name);
if (ret < 0 || ret >= sizeof(sql))
if (ret < 0 || (size_t) ret >= sizeof(sql))
elog(ERROR, "could not create deallocate statement");
async_request_wait_ok_command(async_request_send(stmt->conn, sql));

View File

@ -29,8 +29,8 @@ typedef struct ConnectionCacheEntry
{
TSConnectionId id;
TSConnection *conn;
int32 foreign_server_hashvalue; /* Hash of server OID for cache invalidation */
int32 role_hashvalue; /* Hash of role OID for cache invalidation */
uint32 foreign_server_hashvalue; /* Hash of server OID for cache invalidation */
uint32 role_hashvalue; /* Hash of role OID for cache invalidation */
bool invalidated;
} ConnectionCacheEntry;

View File

@ -138,7 +138,7 @@ ts_dist_multi_cmds_params_invoke_on_data_nodes(List *cmd_descriptors, List *data
results = ts_dist_cmd_collect_responses(requests);
list_free(requests);
Assert(ts_dist_cmd_response_count(results) == list_length(data_nodes));
Assert(ts_dist_cmd_response_count(results) == (Size) list_length(data_nodes));
return results;
}
@ -301,9 +301,7 @@ ts_dist_cmd_func_call_on_data_nodes(FunctionCallInfo fcinfo, List *data_nodes)
PGresult *
ts_dist_cmd_get_result_by_node_name(DistCmdResult *response, const char *node_name)
{
int i;
for (i = 0; i < response->num_responses; ++i)
for (size_t i = 0; i < response->num_responses; ++i)
{
DistCmdResponse *resp = &response->responses[i];
@ -350,10 +348,9 @@ ts_dist_cmd_response_count(DistCmdResult *result)
long
ts_dist_cmd_total_row_count(DistCmdResult *result)
{
int i;
long num_rows = 0;
for (i = 0; i < result->num_responses; ++i)
for (size_t i = 0; i < result->num_responses; ++i)
{
DistCmdResponse *resp = &result->responses[i];

View File

@ -265,7 +265,7 @@ stmt_params_free(StmtParams *params)
MemoryContextDelete(params->mctx);
}
const int *
int *
stmt_params_formats(StmtParams *stmt_params)
{
if (stmt_params)
@ -273,7 +273,7 @@ stmt_params_formats(StmtParams *stmt_params)
return NULL;
}
const int *
int *
stmt_params_lengths(StmtParams *stmt_params)
{
if (stmt_params)
@ -289,7 +289,7 @@ stmt_params_values(StmtParams *stmt_params)
return NULL;
}
const int
int
stmt_params_num_params(StmtParams *stmt_params)
{
if (stmt_params)
@ -297,7 +297,7 @@ stmt_params_num_params(StmtParams *stmt_params)
return 0;
}
const int
int
stmt_params_total_values(StmtParams *stmt_params)
{
if (stmt_params)
@ -306,7 +306,7 @@ stmt_params_total_values(StmtParams *stmt_params)
return 0;
}
const int
int
stmt_params_converted_tuples(StmtParams *stmt_params)
{
return stmt_params->converted_tuples;

View File

@ -21,13 +21,13 @@ extern StmtParams *stmt_params_create(List *target_attr_nums, bool ctid, TupleDe
extern StmtParams *stmt_params_create_from_values(const char **param_values, int n_params);
extern void stmt_params_convert_values(StmtParams *params, TupleTableSlot *slot,
ItemPointer tupleid);
extern const int *stmt_params_formats(StmtParams *stmt_params);
extern const int *stmt_params_lengths(StmtParams *stmt_params);
extern int *stmt_params_formats(StmtParams *stmt_params);
extern int *stmt_params_lengths(StmtParams *stmt_params);
extern const char *const *stmt_params_values(StmtParams *stmt_params);
extern const int stmt_params_num_params(StmtParams *stmt_params);
extern int stmt_params_num_params(StmtParams *stmt_params);
extern void stmt_params_reset(StmtParams *params);
extern void stmt_params_free(StmtParams *params);
extern const int stmt_params_total_values(StmtParams *stmt_params);
extern const int stmt_params_converted_tuples(StmtParams *stmt_params);
extern int stmt_params_total_values(StmtParams *stmt_params);
extern int stmt_params_converted_tuples(StmtParams *stmt_params);
#endif

View File

@ -44,7 +44,7 @@ print_result(int elevel, const char *server_name, const PGresult *pg_result)
FILE *result_stream;
File tmpfile;
char *result_text = NULL;
size_t result_text_size = 0;
int result_text_size = 0;
PQprintOpt print_opt = {
.header = 1,
.align = 1,