1
0
mirror of https://github.com/apple/foundationdb.git synced 2025-05-31 10:14:52 +08:00

Fix more -Wreorder-ctor warnings across many files

This commit is contained in:
sfc-gh-tclinkenbeard 2021-07-24 11:20:51 -07:00
parent e006e4fed4
commit 3442ebd3b7
26 changed files with 142 additions and 141 deletions

@ -31,7 +31,8 @@ namespace {
class BackupFile : public IBackupFile, ReferenceCounted<BackupFile> {
public:
BackupFile(const std::string& fileName, Reference<IAsyncFile> file, const std::string& finalFullPath)
: IBackupFile(fileName), m_file(file), m_finalFullPath(finalFullPath), m_writeOffset(0), m_blockSize(CLIENT_KNOBS->BACKUP_LOCAL_FILE_WRITE_BLOCK) {
: IBackupFile(fileName), m_file(file), m_writeOffset(0), m_finalFullPath(finalFullPath),
m_blockSize(CLIENT_KNOBS->BACKUP_LOCAL_FILE_WRITE_BLOCK) {
if (BUGGIFY) {
m_blockSize = deterministicRandom()->randomInt(100, 20000);
}

@ -44,28 +44,29 @@ const Key DatabaseBackupAgent::keyDatabasesInSync = LiteralStringRef("databases_
const int DatabaseBackupAgent::LATEST_DR_VERSION = 1;
DatabaseBackupAgent::DatabaseBackupAgent()
: subspace(Subspace(databaseBackupPrefixRange.begin)), tagNames(subspace.get(BackupAgentBase::keyTagName)),
states(subspace.get(BackupAgentBase::keyStates)), config(subspace.get(BackupAgentBase::keyConfig)),
errors(subspace.get(BackupAgentBase::keyErrors)), ranges(subspace.get(BackupAgentBase::keyRanges)),
: subspace(Subspace(databaseBackupPrefixRange.begin)), states(subspace.get(BackupAgentBase::keyStates)),
config(subspace.get(BackupAgentBase::keyConfig)), errors(subspace.get(BackupAgentBase::keyErrors)),
ranges(subspace.get(BackupAgentBase::keyRanges)), tagNames(subspace.get(BackupAgentBase::keyTagName)),
sourceStates(subspace.get(BackupAgentBase::keySourceStates)),
sourceTagNames(subspace.get(BackupAgentBase::keyTagName)),
taskBucket(new TaskBucket(subspace.get(BackupAgentBase::keyTasks),
AccessSystemKeys::True,
PriorityBatch::False,
LockAware::True)),
futureBucket(new FutureBucket(subspace.get(BackupAgentBase::keyFutures), AccessSystemKeys::True, LockAware::True)),
sourceStates(subspace.get(BackupAgentBase::keySourceStates)),
sourceTagNames(subspace.get(BackupAgentBase::keyTagName)) {}
futureBucket(new FutureBucket(subspace.get(BackupAgentBase::keyFutures), AccessSystemKeys::True, LockAware::True)) {
}
DatabaseBackupAgent::DatabaseBackupAgent(Database src)
: subspace(Subspace(databaseBackupPrefixRange.begin)), tagNames(subspace.get(BackupAgentBase::keyTagName)),
states(subspace.get(BackupAgentBase::keyStates)), config(subspace.get(BackupAgentBase::keyConfig)),
errors(subspace.get(BackupAgentBase::keyErrors)), ranges(subspace.get(BackupAgentBase::keyRanges)),
: subspace(Subspace(databaseBackupPrefixRange.begin)), states(subspace.get(BackupAgentBase::keyStates)),
config(subspace.get(BackupAgentBase::keyConfig)), errors(subspace.get(BackupAgentBase::keyErrors)),
ranges(subspace.get(BackupAgentBase::keyRanges)), tagNames(subspace.get(BackupAgentBase::keyTagName)),
sourceStates(subspace.get(BackupAgentBase::keySourceStates)),
sourceTagNames(subspace.get(BackupAgentBase::keyTagName)),
taskBucket(new TaskBucket(subspace.get(BackupAgentBase::keyTasks),
AccessSystemKeys::True,
PriorityBatch::False,
LockAware::True)),
futureBucket(new FutureBucket(subspace.get(BackupAgentBase::keyFutures), AccessSystemKeys::True, LockAware::True)),
sourceStates(subspace.get(BackupAgentBase::keySourceStates)),
sourceTagNames(subspace.get(BackupAgentBase::keyTagName)) {
futureBucket(new FutureBucket(subspace.get(BackupAgentBase::keyFutures), AccessSystemKeys::True, LockAware::True)) {
taskBucket->src = src;
}

@ -396,7 +396,7 @@ void loadClientFunction(T* fp, void* lib, std::string libPath, const char* funct
}
DLApi::DLApi(std::string fdbCPath, bool unlinkOnLoad)
: api(new FdbCApi()), fdbCPath(fdbCPath), unlinkOnLoad(unlinkOnLoad), networkSetup(false) {}
: fdbCPath(fdbCPath), api(new FdbCApi()), unlinkOnLoad(unlinkOnLoad), networkSetup(false) {}
// Loads client API functions (definitions are in FdbCApi struct)
void DLApi::init() {
@ -993,8 +993,8 @@ ThreadFuture<ProtocolVersion> MultiVersionDatabase::getServerProtocol(Optional<P
}
MultiVersionDatabase::DatabaseState::DatabaseState(std::string clusterFilePath, Reference<IDatabase> versionMonitorDb)
: clusterFilePath(clusterFilePath), versionMonitorDb(versionMonitorDb),
dbVar(new ThreadSafeAsyncVar<Reference<IDatabase>>(Reference<IDatabase>(nullptr))) {}
: dbVar(new ThreadSafeAsyncVar<Reference<IDatabase>>(Reference<IDatabase>(nullptr))),
clusterFilePath(clusterFilePath), versionMonitorDb(versionMonitorDb) {}
// Adds a client (local or externally loaded) that can be used to connect to the cluster
void MultiVersionDatabase::DatabaseState::addClient(Reference<ClientInfo> client) {
@ -1855,8 +1855,8 @@ void MultiVersionApi::loadEnvironmentVariableNetworkOptions() {
}
MultiVersionApi::MultiVersionApi()
: bypassMultiClientApi(false), networkStartSetup(false), networkSetup(false), callbackOnMainThread(true),
externalClient(false), localClientDisabled(false), apiVersion(0), envOptionsLoaded(false), threadCount(0) {}
: callbackOnMainThread(true), localClientDisabled(false), networkStartSetup(false), networkSetup(false),
bypassMultiClientApi(false), externalClient(false), apiVersion(0), threadCount(0), envOptionsLoaded(false) {}
MultiVersionApi* MultiVersionApi::api = new MultiVersionApi();

@ -248,8 +248,9 @@ ACTOR Future<Void> normalizeKeySelectorActor(SpecialKeySpace* sks,
}
SpecialKeySpace::SpecialKeySpace(KeyRef spaceStartKey, KeyRef spaceEndKey, bool testOnly)
: range(KeyRangeRef(spaceStartKey, spaceEndKey)), readImpls(nullptr, spaceEndKey), writeImpls(nullptr, spaceEndKey),
modules(testOnly ? SpecialKeySpace::MODULE::TESTONLY : SpecialKeySpace::MODULE::UNKNOWN, spaceEndKey) {
: readImpls(nullptr, spaceEndKey),
modules(testOnly ? SpecialKeySpace::MODULE::TESTONLY : SpecialKeySpace::MODULE::UNKNOWN, spaceEndKey),
writeImpls(nullptr, spaceEndKey), range(KeyRangeRef(spaceStartKey, spaceEndKey)) {
// Default begin of KeyRangeMap is Key(), insert the range to update start key
readImpls.insert(range, nullptr);
writeImpls.insert(range, nullptr);

@ -873,13 +873,14 @@ TaskBucket::TaskBucket(const Subspace& subspace,
AccessSystemKeys sysAccess,
PriorityBatch priorityBatch,
LockAware lockAware)
: prefix(subspace), active(prefix.get(LiteralStringRef("ac"))), available(prefix.get(LiteralStringRef("av"))),
: cc("TaskBucket"), dbgid(deterministicRandom()->randomUniqueID()),
dispatchSlotChecksStarted("DispatchSlotChecksStarted", cc), dispatchErrors("DispatchErrors", cc),
dispatchDoTasks("DispatchDoTasks", cc), dispatchEmptyTasks("DispatchEmptyTasks", cc),
dispatchSlotChecksComplete("DispatchSlotChecksComplete", cc), prefix(subspace),
active(prefix.get(LiteralStringRef("ac"))), available(prefix.get(LiteralStringRef("av"))),
available_prioritized(prefix.get(LiteralStringRef("avp"))), timeouts(prefix.get(LiteralStringRef("to"))),
pauseKey(prefix.pack(LiteralStringRef("pause"))), timeout(CLIENT_KNOBS->TASKBUCKET_TIMEOUT_VERSIONS),
system_access(sysAccess), priority_batch(priorityBatch), lockAware(lockAware), cc("TaskBucket"),
dbgid(deterministicRandom()->randomUniqueID()), dispatchSlotChecksStarted("DispatchSlotChecksStarted", cc),
dispatchErrors("DispatchErrors", cc), dispatchDoTasks("DispatchDoTasks", cc),
dispatchEmptyTasks("DispatchEmptyTasks", cc), dispatchSlotChecksComplete("DispatchSlotChecksComplete", cc) {}
timeout(CLIENT_KNOBS->TASKBUCKET_TIMEOUT_VERSIONS), pauseKey(prefix.pack(LiteralStringRef("pause"))),
system_access(sysAccess), priority_batch(priorityBatch), lockAware(lockAware) {}
TaskBucket::~TaskBucket() {}

@ -340,9 +340,8 @@ ACTOR Future<Void> pingLatencyLogger(TransportData* self) {
}
TransportData::TransportData(uint64_t transportId)
: endpoints(WLTOKEN_COUNTS), endpointNotFoundReceiver(endpoints), pingReceiver(endpoints),
warnAlwaysForLargePacket(true), lastIncompatibleMessage(0), transportId(transportId),
numIncompatibleConnections(0) {
: warnAlwaysForLargePacket(true), endpoints(WLTOKEN_COUNTS), endpointNotFoundReceiver(endpoints),
pingReceiver(endpoints), numIncompatibleConnections(0), lastIncompatibleMessage(0), transportId(transportId) {
degraded = makeReference<AsyncVar<bool>>(false);
pingLogger = pingLatencyLogger(this);
}
@ -795,13 +794,14 @@ ACTOR Future<Void> connectionKeeper(Reference<Peer> self,
}
Peer::Peer(TransportData* transport, NetworkAddress const& destination)
: transport(transport), destination(destination), outgoingConnectionIdle(true), lastConnectTime(0.0),
reconnectionDelay(FLOW_KNOBS->INITIAL_RECONNECTION_TIME), compatible(true), outstandingReplies(0),
incompatibleProtocolVersionNewer(false), peerReferences(-1), bytesReceived(0), lastDataPacketSentTime(now()),
pingLatencies(destination.isPublic() ? FLOW_KNOBS->PING_SAMPLE_AMOUNT : 1), lastLoggedBytesReceived(0),
bytesSent(0), lastLoggedBytesSent(0), timeoutCount(0), lastLoggedTime(0.0), connectOutgoingCount(0), connectIncomingCount(0),
connectFailedCount(0), connectLatencies(destination.isPublic() ? FLOW_KNOBS->NETWORK_CONNECT_SAMPLE_AMOUNT : 1),
protocolVersion(Reference<AsyncVar<Optional<ProtocolVersion>>>(new AsyncVar<Optional<ProtocolVersion>>())) {
: transport(transport), destination(destination), compatible(true), outgoingConnectionIdle(true),
lastConnectTime(0.0), reconnectionDelay(FLOW_KNOBS->INITIAL_RECONNECTION_TIME), peerReferences(-1),
outstandingReplies(0), pingLatencies(destination.isPublic() ? FLOW_KNOBS->PING_SAMPLE_AMOUNT : 1),
lastLoggedTime(0.0), lastLoggedBytesReceived(0), lastLoggedBytesSent(0), timeoutCount(0),
incompatibleProtocolVersionNewer(false), bytesReceived(0), bytesSent(0), lastDataPacketSentTime(now()),
protocolVersion(Reference<AsyncVar<Optional<ProtocolVersion>>>(new AsyncVar<Optional<ProtocolVersion>>())),
connectOutgoingCount(0), connectIncomingCount(0), connectFailedCount(0),
connectLatencies(destination.isPublic() ? FLOW_KNOBS->NETWORK_CONNECT_SAMPLE_AMOUNT : 1) {
IFailureMonitor::failureMonitor().setStatus(destination, FailureStatus(false));
}

@ -241,8 +241,8 @@ struct BackupData {
: myId(id), tag(req.routerTag), totalTags(req.totalTags), startVersion(req.startVersion),
endVersion(req.endVersion), recruitedEpoch(req.recruitedEpoch), backupEpoch(req.backupEpoch),
minKnownCommittedVersion(invalidVersion), savedVersion(req.startVersion - 1), popVersion(req.startVersion - 1),
cc("BackupWorker", myId.toString()), pulledVersion(0), paused(false),
lock(new FlowLock(SERVER_KNOBS->BACKUP_LOCK_BYTES)) {
pulledVersion(0), paused(false), lock(new FlowLock(SERVER_KNOBS->BACKUP_LOCK_BYTES)),
cc("BackupWorker", myId.toString()) {
cx = openDBOnServer(db, TaskPriority::DefaultEndpoint, LockAware::True);
specialCounter(cc, "SavedVersion", [this]() { return this->savedVersion; });

@ -128,15 +128,15 @@ public:
std::map<NetworkAddress, std::pair<double, OpenDatabaseRequest>> clientStatus;
DBInfo()
: masterRegistrationCount(0), recoveryStalled(false), forceRecovery(false), unfinishedRecoveries(0),
logGenerations(0), cachePopulated(false), clientInfo(new AsyncVar<ClientDBInfo>()), dbInfoCount(0),
serverInfo(new AsyncVar<ServerDBInfo>()), db(DatabaseContext::create(clientInfo,
Future<Void>(),
LocalityData(),
EnableLocalityLoadBalance::True,
TaskPriority::DefaultEndpoint,
LockAware::True)) // SOMEDAY: Locality!
{}
: clientInfo(new AsyncVar<ClientDBInfo>()), serverInfo(new AsyncVar<ServerDBInfo>()),
masterRegistrationCount(0), dbInfoCount(0), recoveryStalled(false), forceRecovery(false),
db(DatabaseContext::create(clientInfo,
Future<Void>(),
LocalityData(),
EnableLocalityLoadBalance::True,
TaskPriority::DefaultEndpoint,
LockAware::True)), // SOMEDAY: Locality!
unfinishedRecoveries(0), logGenerations(0), cachePopulated(false) {}
void setDistributor(const DataDistributorInterface& interf) {
auto newInfo = serverInfo->get();
@ -1431,12 +1431,12 @@ public:
bool degraded = false;
RoleFitness(int bestFit, int worstFit, int count, ProcessClass::ClusterRole role)
: bestFit((ProcessClass::Fitness)bestFit), worstFit((ProcessClass::Fitness)worstFit), count(count),
role(role) {}
: bestFit((ProcessClass::Fitness)bestFit), worstFit((ProcessClass::Fitness)worstFit), role(role),
count(count) {}
RoleFitness(int fitness, int count, ProcessClass::ClusterRole role)
: bestFit((ProcessClass::Fitness)fitness), worstFit((ProcessClass::Fitness)fitness), count(count),
role(role) {}
: bestFit((ProcessClass::Fitness)fitness), worstFit((ProcessClass::Fitness)fitness), role(role),
count(count) {}
RoleFitness()
: bestFit(ProcessClass::NeverAssign), worstFit(ProcessClass::NeverAssign), role(ProcessClass::NoRole),
@ -3059,9 +3059,9 @@ public:
ClusterControllerData(ClusterControllerFullInterface const& ccInterface,
LocalityData const& locality,
ServerCoordinators const& coordinators)
: clusterControllerProcessId(locality.processId()), clusterControllerDcId(locality.dcId()), id(ccInterface.id()),
ac(false), outstandingRequestChecker(Void()), outstandingRemoteRequestChecker(Void()), gotProcessClasses(false),
gotFullyRecoveredConfig(false), startTime(now()), goodRecruitmentTime(Never()),
: gotProcessClasses(false), gotFullyRecoveredConfig(false), clusterControllerProcessId(locality.processId()),
clusterControllerDcId(locality.dcId()), id(ccInterface.id()), ac(false), outstandingRequestChecker(Void()),
outstandingRemoteRequestChecker(Void()), startTime(now()), goodRecruitmentTime(Never()),
goodRemoteRecruitmentTime(Never()), datacenterVersionDifference(0), versionDifferenceUpdated(false),
recruitingDistributor(false), recruitRatekeeper(false),
clusterControllerMetrics("ClusterController", id.toString()),

@ -203,7 +203,7 @@ class ConfigBroadcasterImpl {
}
ConfigBroadcasterImpl()
: id(deterministicRandom()->randomUniqueID()), lastCompactedVersion(0), mostRecentVersion(0),
: mostRecentVersion(0), lastCompactedVersion(0), id(deterministicRandom()->randomUniqueID()),
cc("ConfigBroadcaster"), compactRequest("CompactRequest", cc),
successfulChangeRequest("SuccessfulChangeRequest", cc), failedChangeRequest("FailedChangeRequest", cc),
snapshotRequest("SnapshotRequest", cc) {

@ -241,8 +241,8 @@ class BroadcasterToLocalConfigEnvironment {
public:
BroadcasterToLocalConfigEnvironment(std::string const& dataDir, std::string const& configPath)
: broadcaster(ConfigFollowerInterface{}), cbfi(makeReference<AsyncVar<ConfigBroadcastFollowerInterface>>()),
readFrom(dataDir, configPath, {}) {}
: readFrom(dataDir, configPath, {}), cbfi(makeReference<AsyncVar<ConfigBroadcastFollowerInterface>>()),
broadcaster(ConfigFollowerInterface{}) {}
Future<Void> setup() { return setup(this); }
@ -371,8 +371,9 @@ class TransactionToLocalConfigEnvironment {
public:
TransactionToLocalConfigEnvironment(std::string const& dataDir, std::string const& configPath)
: writeTo(dataDir), readFrom(dataDir, configPath, {}), broadcaster(writeTo.getFollowerInterface()),
cbfi(makeReference<AsyncVar<ConfigBroadcastFollowerInterface>>()) {}
: writeTo(dataDir), readFrom(dataDir, configPath, {}),
cbfi(makeReference<AsyncVar<ConfigBroadcastFollowerInterface>>()), broadcaster(writeTo.getFollowerInterface()) {
}
Future<Void> setup() { return setup(this); }

@ -50,7 +50,7 @@ struct RelocateData {
TraceInterval interval;
RelocateData()
: startTime(-1), priority(-1), boundaryPriority(-1), healthPriority(-1), workFactor(0), wantsNewServers(false),
: priority(-1), boundaryPriority(-1), healthPriority(-1), startTime(-1), workFactor(0), wantsNewServers(false),
interval("QueuedRelocation") {}
explicit RelocateData(RelocateShard const& rs)
: keys(rs.keys), priority(rs.priority), boundaryPriority(isBoundaryPriority(rs.priority) ? rs.priority : -1),
@ -448,14 +448,14 @@ struct DDQueueData {
FutureStream<RelocateShard> input,
PromiseStream<GetMetricsRequest> getShardMetrics,
double* lastLimited)
: activeRelocations(0), queuedRelocations(0), bytesWritten(0), teamCollections(teamCollections),
shardsAffectedByTeamFailure(sABTF), getAverageShardBytes(getAverageShardBytes), distributorId(mid), lock(lock),
cx(cx), teamSize(teamSize), singleRegionTeamSize(singleRegionTeamSize), output(output), input(input),
getShardMetrics(getShardMetrics), startMoveKeysParallelismLock(SERVER_KNOBS->DD_MOVE_KEYS_PARALLELISM),
: distributorId(mid), lock(lock), cx(cx), teamCollections(teamCollections), shardsAffectedByTeamFailure(sABTF),
getAverageShardBytes(getAverageShardBytes),
startMoveKeysParallelismLock(SERVER_KNOBS->DD_MOVE_KEYS_PARALLELISM),
finishMoveKeysParallelismLock(SERVER_KNOBS->DD_MOVE_KEYS_PARALLELISM),
fetchSourceLock(new FlowLock(SERVER_KNOBS->DD_FETCH_SOURCE_PARALLELISM)), lastLimited(lastLimited),
suppressIntervals(0), lastInterval(0), unhealthyRelocations(0),
rawProcessingUnhealthy(new AsyncVar<bool>(false)) {}
fetchSourceLock(new FlowLock(SERVER_KNOBS->DD_FETCH_SOURCE_PARALLELISM)), activeRelocations(0),
queuedRelocations(0), bytesWritten(0), teamSize(teamSize), singleRegionTeamSize(singleRegionTeamSize),
output(output), input(input), getShardMetrics(getShardMetrics), lastLimited(lastLimited), lastInterval(0),
suppressIntervals(0), rawProcessingUnhealthy(new AsyncVar<bool>(false)), unhealthyRelocations(0) {}
void validate() {
if (EXPENSIVE_VALIDATION) {

@ -123,10 +123,10 @@ struct DataDistributionTracker {
Reference<AsyncVar<bool>> anyZeroHealthyTeams,
KeyRangeMap<ShardTrackedData>& shards,
bool& trackerCancelled)
: cx(cx), distributorId(distributorId), dbSizeEstimate(new AsyncVar<int64_t>()), systemSizeEstimate(0),
maxShardSize(new AsyncVar<Optional<int64_t>>()), sizeChanges(false), readyToStart(readyToStart), output(output),
shardsAffectedByTeamFailure(shardsAffectedByTeamFailure), anyZeroHealthyTeams(anyZeroHealthyTeams),
shards(shards), trackerCancelled(trackerCancelled) {}
: cx(cx), distributorId(distributorId), shards(shards), sizeChanges(false), systemSizeEstimate(0),
dbSizeEstimate(new AsyncVar<int64_t>()), maxShardSize(new AsyncVar<Optional<int64_t>>()), output(output),
shardsAffectedByTeamFailure(shardsAffectedByTeamFailure), readyToStart(readyToStart),
anyZeroHealthyTeams(anyZeroHealthyTeams), trackerCancelled(trackerCancelled) {}
~DataDistributionTracker() {
trackerCancelled = true;

@ -168,11 +168,11 @@ private:
class RawDiskQueue_TwoFiles : public Tracked<RawDiskQueue_TwoFiles> {
public:
RawDiskQueue_TwoFiles(std::string basename, std::string fileExtension, UID dbgid, int64_t fileSizeWarningLimit)
: basename(basename), fileExtension(fileExtension), onError(delayed(error.getFuture())),
onStopped(stopped.getFuture()), readingFile(-1), readingPage(-1), writingPos(-1), dbgid(dbgid),
dbg_file0BeginSeq(0), fileExtensionBytes(SERVER_KNOBS->DISK_QUEUE_FILE_EXTENSION_BYTES),
fileShrinkBytes(SERVER_KNOBS->DISK_QUEUE_FILE_SHRINK_BYTES), readingBuffer(dbgid), readyToPush(Void()),
fileSizeWarningLimit(fileSizeWarningLimit), lastCommit(Void()), isFirstCommit(true) {
: basename(basename), fileExtension(fileExtension), dbgid(dbgid), dbg_file0BeginSeq(0),
fileSizeWarningLimit(fileSizeWarningLimit), onError(delayed(error.getFuture())), onStopped(stopped.getFuture()),
readyToPush(Void()), lastCommit(Void()), isFirstCommit(true), readingBuffer(dbgid), readingFile(-1),
readingPage(-1), writingPos(-1), fileExtensionBytes(SERVER_KNOBS->DISK_QUEUE_FILE_EXTENSION_BYTES),
fileShrinkBytes(SERVER_KNOBS->DISK_QUEUE_FILE_SHRINK_BYTES) {
if (BUGGIFY)
fileExtensionBytes = _PAGE_SIZE * deterministicRandom()->randomSkewedUInt32(1, 10 << 10);
if (BUGGIFY)
@ -878,9 +878,9 @@ public:
DiskQueueVersion diskQueueVersion,
int64_t fileSizeWarningLimit)
: rawQueue(new RawDiskQueue_TwoFiles(basename, fileExtension, dbgid, fileSizeWarningLimit)), dbgid(dbgid),
diskQueueVersion(diskQueueVersion), anyPopped(false), nextPageSeq(0), poppedSeq(0), lastPoppedSeq(0),
nextReadLocation(-1), readBufPage(nullptr), readBufPos(0), pushed_page_buffer(nullptr), recovered(false),
initialized(false), lastCommittedSeq(-1), warnAlwaysForMemory(true) {}
diskQueueVersion(diskQueueVersion), anyPopped(false), warnAlwaysForMemory(true), nextPageSeq(0), poppedSeq(0),
lastPoppedSeq(0), lastCommittedSeq(-1), pushed_page_buffer(nullptr), recovered(false), initialized(false),
nextReadLocation(-1), readBufPage(nullptr), readBufPos(0) {}
location push(StringRef contents) override {
ASSERT(recovered);

@ -681,7 +681,7 @@ struct SQLiteTransaction {
struct IntKeyCursor {
SQLiteDB& db;
BtCursor* cursor;
IntKeyCursor(SQLiteDB& db, int table, bool write) : cursor(0), db(db) {
IntKeyCursor(SQLiteDB& db, int table, bool write) : db(db), cursor(nullptr) {
cursor = (BtCursor*)new char[sqlite3BtreeCursorSize()];
sqlite3BtreeCursorZero(cursor);
db.checkError("BtreeCursor", sqlite3BtreeCursor(db.btree, table, write, nullptr, cursor));
@ -705,7 +705,7 @@ struct RawCursor {
operator bool() const { return valid; }
RawCursor(SQLiteDB& db, int table, bool write) : cursor(0), db(db), valid(false) {
RawCursor(SQLiteDB& db, int table, bool write) : db(db), cursor(nullptr), valid(false) {
keyInfo.db = db.db;
keyInfo.enc = db.db->aDb[0].pSchema->enc;
keyInfo.aColl[0] = db.db->pDfltColl;
@ -1732,9 +1732,9 @@ private:
volatile int64_t& freeListPages,
UID dbgid,
vector<Reference<ReadCursor>>* pReadThreads)
: kvs(kvs), conn(kvs->filename, isBtreeV2, isBtreeV2), commits(), setsThisCommit(), freeTableEmpty(false),
writesComplete(writesComplete), springCleaningStats(springCleaningStats), diskBytesUsed(diskBytesUsed),
freeListPages(freeListPages), cursor(nullptr), dbgid(dbgid), readThreads(*pReadThreads),
: kvs(kvs), conn(kvs->filename, isBtreeV2, isBtreeV2), cursor(nullptr), commits(), setsThisCommit(),
freeTableEmpty(false), writesComplete(writesComplete), springCleaningStats(springCleaningStats),
diskBytesUsed(diskBytesUsed), freeListPages(freeListPages), dbgid(dbgid), readThreads(*pReadThreads),
checkAllChecksumsOnOpen(checkAllChecksumsOnOpen), checkIntegrityOnOpen(checkIntegrityOnOpen) {}
~Writer() override {
TraceEvent("KVWriterDestroying", dbgid);
@ -2109,7 +2109,7 @@ KeyValueStoreSQLite::KeyValueStoreSQLite(std::string const& filename,
KeyValueStoreType storeType,
bool checkChecksums,
bool checkIntegrity)
: type(storeType), filename(filename), logID(id), readThreads(CoroThreadPool::createThreadPool()),
: type(storeType), logID(id), filename(filename), readThreads(CoroThreadPool::createThreadPool()),
writeThread(CoroThreadPool::createThreadPool()), readsRequested(0), writesRequested(0), writesComplete(0),
diskBytesUsed(0), freeListPages(0) {
TraceEvent(SevDebug, "KeyValueStoreSQLiteCreate").detail("Filename", filename);

@ -326,9 +326,9 @@ public:
std::string const& configPath,
std::map<std::string, std::string> const& manualKnobOverrides,
IsTest isTest)
: id(deterministicRandom()->randomUniqueID()), kvStore(dataFolder, id, "localconf-"), cc("LocalConfiguration"),
broadcasterChanges("BroadcasterChanges", cc), snapshots("Snapshots", cc),
changeRequestsFetched("ChangeRequestsFetched", cc), mutations("Mutations", cc), configKnobOverrides(configPath),
: id(deterministicRandom()->randomUniqueID()), kvStore(dataFolder, id, "localconf-"),
configKnobOverrides(configPath), cc("LocalConfiguration"), broadcasterChanges("BroadcasterChanges", cc),
snapshots("Snapshots", cc), changeRequestsFetched("ChangeRequestsFetched", cc), mutations("Mutations", cc),
manualKnobOverrides(manualKnobOverrides) {
if (isTest) {
testKnobCollection =

@ -29,7 +29,7 @@
struct MetricsRule {
MetricsRule(bool enabled = false, int minLevel = 0, StringRef const& name = StringRef())
: enabled(enabled), minLevel(minLevel), namePattern(name) {}
: namePattern(name), enabled(enabled), minLevel(minLevel) {}
Standalone<StringRef> typePattern;
Standalone<StringRef> namePattern;

@ -515,8 +515,7 @@ struct RatekeeperLimits {
int64_t logSpringBytes,
double maxVersionDifference,
int64_t durabilityLagTargetVersions)
: priority(priority), tpsLimit(std::numeric_limits<double>::infinity()),
tpsLimitMetric(StringRef("Ratekeeper.TPSLimit" + context)),
: tpsLimit(std::numeric_limits<double>::infinity()), tpsLimitMetric(StringRef("Ratekeeper.TPSLimit" + context)),
reasonMetric(StringRef("Ratekeeper.Reason" + context)), storageTargetBytes(storageTargetBytes),
storageSpringBytes(storageSpringBytes), logTargetBytes(logTargetBytes), logSpringBytes(logSpringBytes),
maxVersionDifference(maxVersionDifference),
@ -524,7 +523,8 @@ struct RatekeeperLimits {
durabilityLagTargetVersions +
SERVER_KNOBS->MAX_READ_TRANSACTION_LIFE_VERSIONS), // The read transaction life versions are expected to not
// be durable on the storage servers
durabilityLagLimit(std::numeric_limits<double>::infinity()), lastDurabilityLag(0), context(context) {}
lastDurabilityLag(0), durabilityLagLimit(std::numeric_limits<double>::infinity()), priority(priority),
context(context) {}
};
struct GrvProxyInfo {
@ -536,7 +536,7 @@ struct GrvProxyInfo {
double lastTagPushTime;
GrvProxyInfo()
: totalTransactions(0), batchTransactions(0), lastUpdateTime(0), lastThrottledTagChangeId(0), lastTagPushTime(0) {
: totalTransactions(0), batchTransactions(0), lastThrottledTagChangeId(0), lastUpdateTime(0), lastTagPushTime(0) {
}
};
@ -577,7 +577,7 @@ struct RatekeeperData {
smoothBatchReleasedTransactions(SERVER_KNOBS->SMOOTHING_AMOUNT),
smoothTotalDurableBytes(SERVER_KNOBS->SLOW_SMOOTHING_AMOUNT),
actualTpsMetric(LiteralStringRef("Ratekeeper.ActualTPS")), lastWarning(0), lastSSListFetchedTimestamp(now()),
throttledTagChangeId(0), lastBusiestCommitTagPick(0),
lastBusiestCommitTagPick(0), throttledTagChangeId(0),
normalLimits(TransactionPriority::DEFAULT,
"",
SERVER_KNOBS->TARGET_BYTES_PER_STORAGE_SERVER,

@ -92,7 +92,7 @@ struct KeyInfo {
KeyInfo() = default;
KeyInfo(StringRef key, bool begin, bool write, int transaction, int* pIndex)
: key(key), begin(begin), write(write), transaction(transaction), pIndex(pIndex) {}
: key(key), pIndex(pIndex), begin(begin), write(write), transaction(transaction) {}
};
force_inline int extra_ordering(const KeyInfo& ki) {
@ -343,7 +343,7 @@ public:
StringRef value;
Finger() = default;
Finger(Node* header, const StringRef& ptr) : value(ptr), x(header) {}
Finger(Node* header, const StringRef& ptr) : x(header), value(ptr) {}
void init(const StringRef& value, Node* header) {
this->value = value;

@ -224,14 +224,14 @@ public:
// LatencyBands readLatencyBands;
Counters(StorageCacheData* self)
: cc("StorageCacheServer", self->thisServerID.toString()), getKeyQueries("GetKeyQueries", cc),
getValueQueries("GetValueQueries", cc), getRangeQueries("GetRangeQueries", cc),
allQueries("QueryQueue", cc), finishedQueries("FinishedQueries", cc), rowsQueried("RowsQueried", cc),
bytesQueried("BytesQueried", cc), bytesInput("BytesInput", cc), bytesFetched("BytesFetched", cc),
mutationBytes("MutationBytes", cc), mutations("Mutations", cc), setMutations("SetMutations", cc),
clearRangeMutations("ClearRangeMutations", cc), atomicMutations("AtomicMutations", cc),
updateBatches("UpdateBatches", cc), updateVersions("UpdateVersions", cc), loops("Loops", cc),
readsRejected("ReadsRejected", cc) {
: cc("StorageCacheServer", self->thisServerID.toString()), allQueries("QueryQueue", cc),
getKeyQueries("GetKeyQueries", cc), getValueQueries("GetValueQueries", cc),
getRangeQueries("GetRangeQueries", cc), finishedQueries("FinishedQueries", cc),
rowsQueried("RowsQueried", cc), bytesQueried("BytesQueried", cc), bytesInput("BytesInput", cc),
bytesFetched("BytesFetched", cc), mutationBytes("MutationBytes", cc), mutations("Mutations", cc),
setMutations("SetMutations", cc), clearRangeMutations("ClearRangeMutations", cc),
atomicMutations("AtomicMutations", cc), updateBatches("UpdateBatches", cc),
updateVersions("UpdateVersions", cc), loops("Loops", cc), readsRejected("ReadsRejected", cc) {
specialCounter(cc, "LastTLogVersion", [self]() { return self->lastTLogVersion; });
specialCounter(cc, "Version", [self]() { return self->version.get(); });
specialCounter(cc, "VersionLag", [self]() { return self->versionLag; });
@ -1542,7 +1542,7 @@ ACTOR Future<Void> fetchKeys(StorageCacheData* data, AddingCacheRange* cacheRang
};
AddingCacheRange::AddingCacheRange(StorageCacheData* server, KeyRangeRef const& keys)
: server(server), keys(keys), transferredVersion(invalidVersion), phase(WaitPrevious) {
: keys(keys), server(server), transferredVersion(invalidVersion), phase(WaitPrevious) {
fetchClient = fetchKeys(server, this);
}
@ -1704,9 +1704,9 @@ void cacheWarmup(StorageCacheData* data, const KeyRangeRef& keys, bool nowAssign
class StorageCacheUpdater {
public:
StorageCacheUpdater()
: fromVersion(invalidVersion), currentVersion(invalidVersion), processedCacheStartKey(false) {}
: currentVersion(invalidVersion), fromVersion(invalidVersion), processedCacheStartKey(false) {}
StorageCacheUpdater(Version currentVersion)
: fromVersion(currentVersion), currentVersion(currentVersion), processedCacheStartKey(false) {}
: currentVersion(invalidVersion), fromVersion(currentVersion), processedCacheStartKey(false) {}
void applyMutation(StorageCacheData* data, MutationRef const& m, Version ver) {
//TraceEvent("SCNewVersion", data->thisServerID).detail("VerWas", data->mutableData().latestVersion).detail("ChVer", ver);

@ -95,8 +95,8 @@ public:
ReusableCoordinatedState(ServerCoordinators const& coordinators,
PromiseStream<Future<Void>> const& addActor,
UID const& dbgid)
: coordinators(coordinators), cstate(coordinators), addActor(addActor), dbgid(dbgid), finalWriteStarted(false),
previousWrite(Void()) {}
: finalWriteStarted(false), previousWrite(Void()), cstate(coordinators), coordinators(coordinators),
addActor(addActor), dbgid(dbgid) {}
Future<Void> read() { return _read(this); }
@ -265,12 +265,12 @@ struct MasterData : NonCopyable, ReferenceCounted<MasterData> {
: dbgid(myInterface.id()), lastEpochEnd(invalidVersion), recoveryTransactionVersion(invalidVersion),
lastCommitTime(0), liveCommittedVersion(invalidVersion), databaseLocked(false),
minKnownCommittedVersion(invalidVersion), myInterface(myInterface), dbInfo(dbInfo),
cstate(coordinators, addActor, dbgid), coordinators(coordinators), clusterController(clusterController),
dbId(dbId), forceRecovery(forceRecovery), safeLocality(tagLocalityInvalid), primaryLocality(tagLocalityInvalid),
neverCreated(false), hasConfiguration(false), version(invalidVersion), lastVersionTime(0),
txnStateStore(nullptr), memoryLimit(2e9), registrationCount(0), addActor(addActor),
recruitmentStalled(makeReference<AsyncVar<bool>>(false)), cc("Master", dbgid.toString()),
minKnownCommittedVersion(invalidVersion), hasConfiguration(false), coordinators(coordinators),
version(invalidVersion), lastVersionTime(0), txnStateStore(nullptr), memoryLimit(2e9), dbId(dbId),
myInterface(myInterface), clusterController(clusterController), cstate(coordinators, addActor, dbgid),
dbInfo(dbInfo), registrationCount(0), addActor(addActor),
recruitmentStalled(makeReference<AsyncVar<bool>>(false)), forceRecovery(forceRecovery), neverCreated(false),
safeLocality(tagLocalityInvalid), primaryLocality(tagLocalityInvalid), cc("Master", dbgid.toString()),
changeCoordinatorsRequests("ChangeCoordinatorsRequests", cc),
getCommitVersionRequests("GetCommitVersionRequests", cc),
backupWorkerDoneRequests("BackupWorkerDoneRequests", cc),

@ -73,7 +73,7 @@ struct AsyncFileCorrectnessWorkload : public AsyncFileWorkload {
PerfIntCounter numOperations;
AsyncFileCorrectnessWorkload(WorkloadContext const& wcx)
: AsyncFileWorkload(wcx), success(true), numOperations("Num Operations"), memoryFile(nullptr) {
: AsyncFileWorkload(wcx), memoryFile(nullptr), success(true), numOperations("Num Operations") {
maxOperationSize = getOption(options, LiteralStringRef("maxOperationSize"), 4096);
numSimultaneousOperations = getOption(options, LiteralStringRef("numSimultaneousOperations"), 10);
targetFileSize = getOption(options, LiteralStringRef("targetFileSize"), (uint64_t)163840);

@ -46,7 +46,7 @@ struct AsyncFileWriteWorkload : public AsyncFileWorkload {
PerfIntCounter bytesWritten;
AsyncFileWriteWorkload(WorkloadContext const& wcx)
: AsyncFileWorkload(wcx), bytesWritten("Bytes Written"), writeBuffer(nullptr) {
: AsyncFileWorkload(wcx), writeBuffer(nullptr), bytesWritten("Bytes Written") {
numParallelWrites = getOption(options, LiteralStringRef("numParallelWrites"), 0);
writeSize = getOption(options, LiteralStringRef("writeSize"), _PAGE_SIZE);
fileSize = getOption(options, LiteralStringRef("fileSize"), 10002432);

@ -243,7 +243,7 @@ public:
struct DelayedTask : OrderedTask {
double at;
DelayedTask(double at, int64_t priority, TaskPriority taskID, Task* task)
: at(at), OrderedTask(priority, taskID, task) {}
: OrderedTask(priority, taskID, task), at(at) {}
bool operator<(DelayedTask const& rhs) const { return at > rhs.at; } // Ordering is reversed for priority_queue
};
std::priority_queue<DelayedTask, std::vector<DelayedTask>> timers;
@ -1169,19 +1169,16 @@ struct PromiseTask : public Task, public FastAllocated<PromiseTask> {
// 5MB for loading files into memory
Net2::Net2(const TLSConfig& tlsConfig, bool useThreadPool, bool useMetrics)
: useThreadPool(useThreadPool), network(this), reactor(this), stopped(false), tasksIssued(0),
ready(FLOW_KNOBS->READY_QUEUE_RESERVED_SIZE),
// Until run() is called, yield() will always yield
tscBegin(0), tscEnd(0), taskBegin(0), currentTaskID(TaskPriority::DefaultYield), numYields(0),
lastPriorityStats(nullptr), tlsInitializedState(ETLSInitState::NONE), tlsConfig(tlsConfig), started(false)
: useThreadPool(useThreadPool), reactor(this),
#ifndef TLS_DISABLED
,
sslContextVar({ ReferencedObject<boost::asio::ssl::context>::from(
boost::asio::ssl::context(boost::asio::ssl::context::tls)) }),
sslPoolHandshakesInProgress(0), sslHandshakerThreadsStarted(0)
sslHandshakerThreadsStarted(0), sslPoolHandshakesInProgress(0),
#endif
{
tlsConfig(tlsConfig), network(this), tscBegin(0), tscEnd(0), taskBegin(0),
currentTaskID(TaskPriority::DefaultYield), tasksIssued(0), stopped(false), started(false), numYields(0),
lastPriorityStats(nullptr), ready(FLOW_KNOBS->READY_QUEUE_RESERVED_SIZE), tlsInitializedState(ETLSInitState::NONE) {
// Until run() is called, yield() will always yield
TraceEvent("Net2Starting");
// Set the global members
@ -1908,7 +1905,7 @@ void Net2::getDiskBytes(std::string const& directory, int64_t& free, int64_t& to
#include <sched.h>
#endif
ASIOReactor::ASIOReactor(Net2* net) : network(net), firstTimer(ios), do_not_stop(ios) {
ASIOReactor::ASIOReactor(Net2* net) : do_not_stop(ios), network(net), firstTimer(ios) {
#ifdef __linux__
// Reactor flags are used only for experimentation, and are platform-specific
if (FLOW_KNOBS->REACTOR_FLAGS & 1) {

@ -128,7 +128,7 @@ struct Profiler {
bool timerInitialized;
Profiler(int period, std::string const& outfn, INetwork* network)
: environmentInfoWriter(Unversioned()), signalClosure(signal_handler_for_closure, this), network(network),
: signalClosure(signal_handler_for_closure, this), environmentInfoWriter(Unversioned()), network(network),
timerInitialized(false) {
actor = profile(this, period, outfn);
}

@ -205,7 +205,7 @@ public:
WriterThread(Reference<BarrierList> barriers,
Reference<ITraceLogWriter> logWriter,
Reference<ITraceLogFormatter> formatter)
: barriers(barriers), logWriter(logWriter), formatter(formatter) {}
: logWriter(logWriter), formatter(formatter), barriers(barriers) {}
void init() override {}
@ -277,8 +277,8 @@ public:
};
TraceLog()
: bufferLength(0), loggedLength(0), opened(false), preopenOverflowCount(0), barriers(new BarrierList),
logTraceEventMetrics(false), formatter(new XmlTraceLogFormatter()), issues(new IssuesList) {}
: formatter(new XmlTraceLogFormatter()), loggedLength(0), bufferLength(0), opened(false), preopenOverflowCount(0),
logTraceEventMetrics(false), issues(new IssuesList), barriers(new BarrierList) {}
bool isOpen() const { return opened; }
@ -835,28 +835,27 @@ Future<Void> pingTraceLogWriterThread() {
}
TraceEvent::TraceEvent(const char* type, UID id)
: id(id), type(type), severity(SevInfo), initialized(false), enabled(true), logged(false) {
: initialized(false), enabled(true), logged(false), severity(SevInfo), type(type), id(id) {
setMaxFieldLength(0);
setMaxEventLength(0);
}
TraceEvent::TraceEvent(Severity severity, const char* type, UID id)
: id(id), type(type), severity(severity), initialized(false), logged(false),
enabled(g_network == nullptr || FLOW_KNOBS->MIN_TRACE_SEVERITY <= severity) {
: initialized(false), enabled(g_network == nullptr || FLOW_KNOBS->MIN_TRACE_SEVERITY <= severity), logged(false),
severity(severity), type(type), id(id) {
setMaxFieldLength(0);
setMaxEventLength(0);
}
TraceEvent::TraceEvent(TraceInterval& interval, UID id)
: id(id), type(interval.type), severity(interval.severity), initialized(false), logged(false),
enabled(g_network == nullptr || FLOW_KNOBS->MIN_TRACE_SEVERITY <= interval.severity) {
: initialized(false), enabled(g_network == nullptr || FLOW_KNOBS->MIN_TRACE_SEVERITY <= interval.severity),
logged(false), type(interval.type), id(id), severity(interval.severity) {
setMaxFieldLength(0);
setMaxEventLength(0);
init(interval);
}
TraceEvent::TraceEvent(Severity severity, TraceInterval& interval, UID id)
: id(id), type(interval.type), severity(severity), initialized(false), logged(false),
enabled(g_network == nullptr || FLOW_KNOBS->MIN_TRACE_SEVERITY <= severity) {
: initialized(false), logged(false), enabled(g_network == nullptr || FLOW_KNOBS->MIN_TRACE_SEVERITY <= severity),
type(interval.type), id(id), severity(severity) {
setMaxFieldLength(0);
setMaxEventLength(0);

@ -276,7 +276,7 @@ ACTOR Future<Void> fastTraceLogger(int* unreadyMessages, int* failedMessages, in
struct FastUDPTracer : public UDPTracer {
FastUDPTracer()
: socket_fd_(-1), unready_socket_messages_(0), failed_messages_(0), total_messages_(0), send_error_(false) {
: unready_socket_messages_(0), failed_messages_(0), total_messages_(0), socket_fd_(-1), send_error_(false) {
request_ = TraceRequest{ .buffer = std::make_unique<uint8_t[]>(kTraceBufferSize),
.data_size = 0,
.buffer_size = kTraceBufferSize };