Silence various compiler warnings

This change fixes various compiler warnings that show up on different
compilers and platforms. In particular, MSVC is sensitive to functions
that do not return a value after throwing an error since it doesn't
realize that the code path is not reachable.
This commit is contained in:
Erik Nordström 2020-04-27 10:01:10 +02:00 committed by Erik Nordström
parent 7d78540a22
commit 0e9461251b
14 changed files with 49 additions and 17 deletions

View File

@ -150,6 +150,7 @@ ts_bgw_job_owner(BgwJob *job)
break;
}
elog(ERROR, "unknown job type \"%s\" in finding owner", NameStr(job->fd.job_type));
pg_unreachable();
}
BackgroundWorkerHandle *

View File

@ -369,7 +369,7 @@ hypertable_scan_limit_internal(ScanKeyData *scankey, int num_scankeys, int index
.limit = limit,
.tuple_found = on_tuple_found,
.lockmode = lock,
.filter = filter,
.filter = filter,
.scandirection = ForwardScanDirection,
.result_mctx = mctx,
.tuplock = {
@ -823,20 +823,26 @@ ts_hypertable_lock_tuple_simple(Oid table_relid)
errmsg("hypertable \"%s\" has already been updated by another transaction",
get_rel_name(table_relid)),
errhint("Retry the operation again")));
pg_unreachable();
return false;
case TM_BeingModified:
ereport(ERROR,
(errcode(ERRCODE_LOCK_NOT_AVAILABLE),
errmsg("hypertable \"%s\" is being updated by another transaction",
get_rel_name(table_relid)),
errhint("Retry the operation again")));
pg_unreachable();
return false;
case TM_WouldBlock:
/* Locking would block. Let caller decide what to do */
return false;
case TM_Invisible:
elog(ERROR, "attempted to lock invisible tuple");
pg_unreachable();
return false;
default:
elog(ERROR, "unexpected tuple lock status");
pg_unreachable();
return false;
}
}

View File

@ -12,10 +12,11 @@
#include "cache.h"
#include "hypertable.h"
extern TSDLLEXPORT Hypertable *ts_hypertable_cache_get_entry(Cache *cache, const Oid relid,
extern TSDLLEXPORT Hypertable *ts_hypertable_cache_get_entry(Cache *const cache, const Oid relid,
const unsigned int flags);
extern TSDLLEXPORT Hypertable *
ts_hypertable_cache_get_cache_and_entry(const Oid relid, const unsigned int flags, Cache **cache);
extern TSDLLEXPORT Hypertable *ts_hypertable_cache_get_cache_and_entry(const Oid relid,
const unsigned int flags,
Cache **const cache);
extern Hypertable *ts_hypertable_cache_get_entry_rv(Cache *cache, const RangeVar *rv);
extern Hypertable *ts_hypertable_cache_get_entry_with_table(Cache *cache, const Oid relid,
const char *schema, const char *table,

View File

@ -18,7 +18,11 @@
#define TIMESCALEDB_PLANNER_IMPORT_H
#include <postgres.h>
#include <nodes/execnodes.h>
#include <utils/selfuncs.h>
#include <utils/rel.h>
#include "export.h"
extern void ts_make_inh_translation_list(Relation oldrelation, Relation newrelation, Index newvarno,
List **translated_vars);

View File

@ -18,6 +18,7 @@
#include "compat-msvc-exit.h"
#include "plan_add_hashagg.h"
#include "import/planner.h"
#include "utils.h"
#include "guc.h"
#include "estimate.h"

View File

@ -2697,6 +2697,7 @@ process_altertable_end_subcmd(Hypertable *ht, Node *parsetree, ObjectAddress *ob
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("hypertables do not support logical replication")));
break;
case AT_EnableRule:
case AT_EnableAlwaysRule:
case AT_EnableReplicaRule:
@ -2975,13 +2976,11 @@ process_altertable_reset_options(AlterTableCmd *cmd, Hypertable *ht)
inpdef = (List *) cmd->def;
ts_with_clause_filter(inpdef, &compress_options, &pg_options);
if (compress_options)
{
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("compression options cannot be reset")));
}
else
return false;
return false;
}
static bool

View File

@ -6,14 +6,16 @@ set(_install_checks)
# Testing support
find_program(PG_REGRESS pg_regress
HINTS
"${PG_BINDIR}"
"${PG_PKGLIBDIR}/pgxs/src/test/regress/")
find_program(PG_ISOLATION_REGRESS
NAMES pg_isolation_regress
HINTS
${BINDIR}
${PG_BINDIR}
${PG_PKGLIBDIR}/pgxs/src/test/isolation
${PG_SOURCE_DIR}/src/test/isolation)
${PG_SOURCE_DIR}/src/test/isolation
${BINDIR})
include(test-defs.cmake)

View File

@ -571,6 +571,7 @@ create_compress_chunk_table(Hypertable *compress_ht, Chunk *src_chunk)
Catalog *catalog = ts_catalog_get();
CatalogSecurityContext sec_ctx;
Chunk *compress_chunk;
int namelen;
/* Create a new chunk based on the hypercube */
ts_catalog_database_info_become_owner(ts_catalog_database_info_get(), &sec_ctx);
@ -583,13 +584,20 @@ create_compress_chunk_table(Hypertable *compress_ht, Chunk *src_chunk)
compress_chunk->hypertable_relid = compress_ht->main_table_relid;
compress_chunk->constraints = ts_chunk_constraints_alloc(1, CurrentMemoryContext);
namestrcpy(&compress_chunk->fd.schema_name, INTERNAL_SCHEMA_NAME);
snprintf(compress_chunk->fd.table_name.data,
NAMEDATALEN,
"compress%s_%d_chunk",
NameStr(compress_ht->fd.associated_table_prefix),
compress_chunk->fd.id);
;
/* Fail if we overflow the name limit */
namelen = snprintf(NameStr(compress_chunk->fd.table_name),
NAMEDATALEN,
"compress%s_%d_chunk",
NameStr(compress_ht->fd.associated_table_prefix),
compress_chunk->fd.id);
if (namelen >= NAMEDATALEN)
ereport(ERROR,
(errcode(ERRCODE_INTERNAL_ERROR),
errmsg("invalid name \"%s\" for compressed chunk",
NameStr(compress_chunk->fd.table_name)),
errdetail("The associated table prefix is too long.")));
/* Insert chunk */
ts_chunk_insert_lock(compress_chunk, RowExclusiveLock);

View File

@ -239,6 +239,8 @@ delta_delta_compressor_for_type(Oid element_type)
default:
elog(ERROR, "invalid type for delta-delta compressor %d", element_type);
}
pg_unreachable();
}
Datum
@ -509,6 +511,8 @@ convert_from_internal(DecompressResultInternal res_internal, Oid element_type)
default:
elog(ERROR, "invalid type requested from deltadelta decompression %d", element_type);
}
pg_unreachable();
}
static DecompressResultInternal

View File

@ -327,6 +327,7 @@ gorilla_compressor_for_type(Oid element_type)
default:
elog(ERROR, "invalid type for Gorilla compression %d", element_type);
}
pg_unreachable();
}
GorillaCompressor *
@ -652,6 +653,7 @@ convert_from_internal(DecompressResultInternal res_internal, Oid element_type)
default:
elog(ERROR, "invalid type requested from gorilla decompression");
}
pg_unreachable();
}
static DecompressResultInternal

View File

@ -786,6 +786,8 @@ simple8brle_block_get_element(Simple8bRleBlock block, uint32 position_in_value)
compressed_value &= simple8brle_selector_get_bitmask(block.selector);
return compressed_value;
}
pg_unreachable();
}
/***************************

View File

@ -29,6 +29,7 @@
#endif
#include "hypertable_compression.h"
#include "import/planner.h"
#include "compression/create.h"
#include "nodes/decompress_chunk/decompress_chunk.h"
#include "nodes/decompress_chunk/planner.h"

View File

@ -487,6 +487,7 @@ infer_gapfill_boundary(GapFillState *state, GapFillBoundary boundary)
"clause",
boundary == GAPFILL_START ? "start" : "finish"),
errhint("You can either pass start and finish as arguments or in the WHERE clause")));
pg_unreachable();
}
static Const *

View File

@ -499,7 +499,7 @@ copy_heap_data(Oid OIDNewHeap, Oid OIDOldHeap, Oid OIDOldIndex, bool verbose,
Relation relRelation;
HeapTuple reltup;
Form_pg_class relform;
TupleDesc oldTupDesc;
TupleDesc PG_USED_FOR_ASSERTS_ONLY oldTupDesc;
TupleDesc newTupDesc;
int natts;
Datum *values;