Fix Wunused-but-set-variable warnings

This commit is contained in:
Jingyu Zhou 2024-07-17 11:28:00 -07:00
parent db036aeec3
commit 3a3ee247ab
15 changed files with 6 additions and 31 deletions

View File

@ -254,7 +254,6 @@ ACTOR Future<Void> validateGranuleSummaries(Database cx,
state KeyRangeMap<Optional<BlobGranuleSummaryRef>> lastSummary;
state Version lastSummaryVersion = invalidVersion;
state Transaction tr(cx, tenant);
state int successCount = 0;
try {
loop {
// get grv and get latest summaries
@ -324,8 +323,6 @@ ACTOR Future<Void> validateGranuleSummaries(Database cx,
}
}
successCount++;
lastSummaryArena = nextSummary.arena();
lastSummaryVersion = nextSummaryVersion;
lastSummary.insert(range, {});

View File

@ -1785,7 +1785,6 @@ ACTOR Future<WriteMutationRefVar> writeMutationEncryptedMutation(CommitBatchCont
Optional<MutationRef>* encryptedMutationOpt,
Arena* arena) {
state MutationRef encryptedMutation = encryptedMutationOpt->get();
state const BlobCipherEncryptHeader* header;
state BlobCipherEncryptHeaderRef headerRef;
state MutationRef decryptedMutation;

View File

@ -403,7 +403,7 @@ ACTOR Future<Void> getRate(UID myID,
state Future<GetRateInfoReply> reply = Never();
state double lastDetailedReply = 0.0; // request detailed metrics immediately
state bool expectingDetailedReply = false;
state int64_t lastTC = 0;
// state int64_t lastTC = 0;
if (db->get().ratekeeper.present())
nextRequestTimer = Void();
@ -441,7 +441,7 @@ ACTOR Future<Void> getRate(UID myID,
stats->batchTransactionRateAllowed = rep.batchTransactionRate;
++stats->updatesFromRatekeeper;
//TraceEvent("GrvProxyRate", myID).detail("Rate", rep.transactionRate).detail("BatchRate", rep.batchTransactionRate).detail("Lease", rep.leaseDuration).detail("ReleasedTransactions", *inTransactionCount - lastTC);
lastTC = *inTransactionCount;
// lastTC = *inTransactionCount;
leaseTimeout = delay(rep.leaseDuration);
nextRequestTimer = delayJittered(rep.leaseDuration / 2);
healthMetricsReply->update(rep.healthMetrics, expectingDetailedReply, true);

View File

@ -453,7 +453,7 @@ ACTOR Future<bool> validateRangeAssignment(Database occ,
try {
// If corruption detected, enter security mode which
// stops using data moves and only allow auditStorage
int _ = wait(setDDMode(occ, 2));
wait(success(setDDMode(occ, 2)));
TraceEvent(SevInfo, "ValidateRangeAssignmentCorruptionDetectedAndDDStopped")
.detail("DataMoveID", dataMoveId)
.detail("Range", range)

View File

@ -384,7 +384,6 @@ public:
ACTOR static Future<Void> monitorBlobWorkers(Ratekeeper* self, Reference<AsyncVar<ServerDBInfo> const> dbInfo) {
state std::vector<BlobWorkerInterface> blobWorkers;
state int workerFetchCount = 0;
state double lastStartTime = 0;
state double startTime = 0;
state bool blobWorkerDead = false;
state double lastLoggedTime = 0;
@ -409,7 +408,6 @@ public:
grv = self->maxVersion;
}
lastStartTime = startTime;
startTime = now();
if (blobWorkers.size() > 0) {

View File

@ -250,7 +250,6 @@ ACTOR Future<Void> distributeRestoreSysInfo(Reference<RestoreControllerData> con
// 4) After process all restore requests, finish restore by cleaning up the restore related system key
// and ask all restore roles to quit.
ACTOR Future<Void> startProcessRestoreRequests(Reference<RestoreControllerData> self, Database cx) {
state UID randomUID = deterministicRandom()->randomUniqueID();
state std::vector<RestoreRequest> restoreRequests = wait(collectRestoreRequests(cx));
state int restoreIndex = 0;

View File

@ -229,7 +229,7 @@ ACTOR Future<Void> startRestoreWorkerLeader(Reference<RestoreWorkerData> self,
}
ACTOR Future<Void> startRestoreWorker(Reference<RestoreWorkerData> self, RestoreWorkerInterface interf, Database cx) {
state double lastLoopTopTime;
state double lastLoopTopTime = now();
state ActorCollection actors(false); // Collect the main actor for each role
state Future<Void> exitRole = Never();

View File

@ -255,8 +255,7 @@ ACTOR Future<Void> fetchCheckpointBytesSampleFile(Database cx,
ASSERT(!metaData->src.empty());
state UID ssId = metaData->src.front();
int64_t fileSize =
wait(doFetchCheckpointFile(cx, metaData->bytesSampleFile.get(), localFile, ssId, metaData->checkpointID));
wait(success(doFetchCheckpointFile(cx, metaData->bytesSampleFile.get(), localFile, ssId, metaData->checkpointID)));
metaData->bytesSampleFile = localFile;
if (cFun) {
wait(cFun(*metaData));
@ -925,7 +924,7 @@ ACTOR Future<Void> fetchCheckpointFile(Database cx,
ASSERT_EQ(metaData->src.size(), 1);
const UID ssId = metaData->src.front();
int64_t fileSize = wait(doFetchCheckpointFile(cx, remoteFile, localFile, ssId, metaData->checkpointID));
wait(success(doFetchCheckpointFile(cx, remoteFile, localFile, ssId, metaData->checkpointID)));
rocksCF.sstFiles[idx].db_path = dir;
rocksCF.sstFiles[idx].fetched = true;
metaData->serializedCheckpoint = ObjectWriter::toValue(rocksCF, IncludeVersion());

View File

@ -2950,7 +2950,6 @@ ACTOR Future<Void> pullAsyncData(TLogData* self,
state Version ver = 0;
state std::vector<TagsAndMessage> messages;
state bool pullingRecoveryData = endVersion.present() && endVersion.get() == logData->recoveredAt;
loop {
state bool foundMessage = r->hasMessage();
if (!foundMessage || r->version().version != ver) {

View File

@ -88,8 +88,6 @@ Future<uint64_t> setupRange(Database cx,
uint64_t end,
std::vector<Reference<Tenant>> tenants) {
state uint64_t bytesInserted = 0;
state double startT = now();
state double prevStart;
loop {
Optional<Reference<Tenant>> tenant;
if (tenants.size() > 0) {
@ -98,7 +96,6 @@ Future<uint64_t> setupRange(Database cx,
state Transaction tr(cx, tenant);
setAuthToken(*workload, tr);
try {
prevStart = now();
if (deterministicRandom()->random01() < 0.001)
tr.debugTransaction(deterministicRandom()->randomUniqueID());

View File

@ -399,7 +399,6 @@ struct BackupAndParallelRestoreCorrectnessWorkload : TestWorkload {
ACTOR static Future<Void> _start(Database cx, BackupAndParallelRestoreCorrectnessWorkload* self) {
state FileBackupAgent backupAgent;
state Future<Void> extraBackup;
state bool extraTasks = false;
state UID randomID = nondeterministicRandom()->randomUniqueID();
state int restoreIndex = 0;
state ReadYourWritesTransaction tr2(cx);
@ -623,7 +622,6 @@ struct BackupAndParallelRestoreCorrectnessWorkload : TestWorkload {
// Q: What is the extra backup and why do we need to care about it?
if (extraBackup.isValid()) { // SOMEDAY: Handle this case
TraceEvent("BARW_WaitExtraBackup", randomID).detail("BackupTag", printable(self->backupTag));
extraTasks = true;
try {
wait(extraBackup);
} catch (Error& e) {

View File

@ -555,7 +555,6 @@ struct BackupAndRestoreCorrectnessWorkload : TestWorkload {
ACTOR static Future<Void> _start(Database cx, BackupAndRestoreCorrectnessWorkload* self) {
state FileBackupAgent backupAgent;
state Future<Void> extraBackup;
state bool extraTasks = false;
state DatabaseConfiguration config = wait(getDatabaseConfiguration(cx));
TraceEvent("BARW_Arguments")
.detail("BackupTag", printable(self->backupTag))
@ -850,7 +849,6 @@ struct BackupAndRestoreCorrectnessWorkload : TestWorkload {
if (extraBackup.isValid()) {
TraceEvent("BARW_WaitExtraBackup", randomID).detail("BackupTag", printable(self->backupTag));
extraTasks = true;
try {
wait(extraBackup);
} catch (Error& e) {

View File

@ -581,7 +581,6 @@ struct BackupToDBCorrectnessWorkload : TestWorkload {
state DatabaseBackupAgent backupAgent(cx);
state DatabaseBackupAgent restoreTool(self->extraDB);
state Future<Void> extraBackup;
state bool extraTasks = false;
state DatabaseConfiguration config = wait(getDatabaseConfiguration(cx));
TraceEvent("BARW_Arguments")
.detail("BackupTag", printable(self->backupTag))
@ -753,7 +752,6 @@ struct BackupToDBCorrectnessWorkload : TestWorkload {
if (extraBackup.isValid()) {
TraceEvent("BARW_WaitExtraBackup", randomID).detail("BackupTag", printable(self->backupTag));
extraTasks = true;
try {
wait(extraBackup);
} catch (Error& e) {

View File

@ -339,18 +339,14 @@ struct BlobGranuleCorrectnessWorkload : TestWorkload {
state bool gotEOS = false;
state int64_t totalRows = 0;
state uint32_t lastKey = -1;
state uint32_t lastId = -1;
state std::map<uint32_t, KeyData>::iterator lastKeyData = threadData->keyData.end();
fmt::print("Loading previous directory data for {0}\n", threadData->directoryID);
loop {
state Version readVersion = invalidVersion;
state int64_t bufferedBytes = 0;
try {
state Version ver = wait(tr.getReadVersion());
fmt::print("Dir {0}: RV={1}\n", threadData->directoryID, ver);
readVersion = ver;
state PromiseStream<Standalone<RangeResultRef>> results;
state Future<Void> stream = tr.getRangeStream(results, keyRange, GetRangeLimits());
@ -381,8 +377,6 @@ struct BlobGranuleCorrectnessWorkload : TestWorkload {
// insert new WriteData for key
lastKeyData->second.writes.emplace_back(ver, MAX_VERSION, val, it.value.size());
lastId = id;
}
if (!res.empty()) {

View File

@ -266,7 +266,6 @@ struct BlobGranuleVerifierWorkload : TestWorkload {
state std::map<double, OldRead> timeTravelChecks;
state int64_t timeTravelChecksMemory = 0;
state Version prevPurgeVersion = -1;
state UID dbgId = debugRandom()->randomUniqueID();
state Version newPurgeVersion = 0;
// usually we want randomness to verify maximum data, but sometimes hotspotting a subset is good too
state bool pickGranuleUniform = deterministicRandom()->random01() < 0.1;