mirror of
https://github.com/apple/foundationdb.git
synced 2025-05-31 10:14:52 +08:00
remove duplicates
This commit is contained in:
parent
16affd3575
commit
bdd3dbfa7d
@ -243,12 +243,12 @@ ACTOR Future<Standalone<RangeResultRef>> SpecialKeySpace::getRangeAggregationAct
|
||||
// Handle all corner cases like what RYW does
|
||||
// return if range inverted
|
||||
if (actualBeginOffset >= actualEndOffset && begin.getKey() >= end.getKey()) {
|
||||
TEST(true);
|
||||
TEST(true); // inverted range
|
||||
return RangeResultRef(false, false);
|
||||
}
|
||||
// If touches begin or end, return with readToBegin and readThroughEnd flags
|
||||
if (begin.getKey() == moduleBoundary.end || end.getKey() == moduleBoundary.begin) {
|
||||
TEST(true);
|
||||
TEST(true); // query touches begin or end
|
||||
return result;
|
||||
}
|
||||
state RangeMap<Key, SpecialKeyRangeReadImpl*, KeyRangeRef>::Ranges ranges =
|
||||
|
@ -63,7 +63,7 @@ Future<REPLY_TYPE(Req)> retryBrokenPromise( RequestStream<Req> to, Req request,
|
||||
throw;
|
||||
resetReply( request );
|
||||
wait( delayJittered(FLOW_KNOBS->PREVENT_FAST_SPIN_DELAY, taskID) );
|
||||
TEST(true); // retryBrokenPromise
|
||||
TEST(true); // retryBrokenPromise with taskID
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -556,8 +556,8 @@ private:
|
||||
|
||||
debugFileCheck("SimpleFileRead", self->filename, data, offset, length);
|
||||
|
||||
INJECT_FAULT(io_timeout, "SimpleFile::read");
|
||||
INJECT_FAULT(io_error, "SimpleFile::read");
|
||||
INJECT_FAULT(io_timeout, "SimpleFile::read"); // SimpleFile::read io_timeout injected
|
||||
INJECT_FAULT(io_error, "SimpleFile::read"); // SimpleFile::read io_error injected
|
||||
|
||||
return read_bytes;
|
||||
}
|
||||
@ -594,8 +594,8 @@ private:
|
||||
|
||||
debugFileCheck("SimpleFileWrite", self->filename, (void*)data.begin(), offset, data.size());
|
||||
|
||||
INJECT_FAULT(io_timeout, "SimpleFile::write");
|
||||
INJECT_FAULT(io_error, "SimpleFile::write");
|
||||
INJECT_FAULT(io_timeout, "SimpleFile::write"); // SimpleFile::write inject io_timeout
|
||||
INJECT_FAULT(io_error, "SimpleFile::write"); // SimpleFile::write inject io_error
|
||||
|
||||
return Void();
|
||||
}
|
||||
@ -621,8 +621,8 @@ private:
|
||||
if (randLog)
|
||||
fprintf( randLog, "SFT2 %s %s %s\n", self->dbgId.shortString().c_str(), self->filename.c_str(), opId.shortString().c_str());
|
||||
|
||||
INJECT_FAULT( io_timeout, "SimpleFile::truncate" );
|
||||
INJECT_FAULT( io_error, "SimpleFile::truncate" );
|
||||
INJECT_FAULT( io_timeout, "SimpleFile::truncate" ); // SimpleFile::truncate inject io_timeout
|
||||
INJECT_FAULT( io_error, "SimpleFile::truncate" ); // SimpleFile::truncate inject io_error
|
||||
|
||||
return Void();
|
||||
}
|
||||
@ -654,8 +654,8 @@ private:
|
||||
if (randLog)
|
||||
fprintf( randLog, "SFC2 %s %s %s\n", self->dbgId.shortString().c_str(), self->filename.c_str(), opId.shortString().c_str());
|
||||
|
||||
INJECT_FAULT( io_timeout, "SimpleFile::sync" );
|
||||
INJECT_FAULT( io_error, "SimpleFile::sync" );
|
||||
INJECT_FAULT( io_timeout, "SimpleFile::sync" ); // SimpleFile::sync inject io_timeout
|
||||
INJECT_FAULT( io_error, "SimpleFile::sync" ); // SimpleFile::sync inject io_errot
|
||||
|
||||
return Void();
|
||||
}
|
||||
@ -675,7 +675,7 @@ private:
|
||||
|
||||
if (randLog)
|
||||
fprintf(randLog, "SFS2 %s %s %s %" PRId64 "\n", self->dbgId.shortString().c_str(), self->filename.c_str(), opId.shortString().c_str(), pos);
|
||||
INJECT_FAULT( io_error, "SimpleFile::size" );
|
||||
INJECT_FAULT( io_error, "SimpleFile::size" ); // SimpleFile::size inject io_error
|
||||
|
||||
return pos;
|
||||
}
|
||||
@ -1436,7 +1436,7 @@ public:
|
||||
|
||||
// Check if any processes on machine are rebooting
|
||||
if ( processesOnMachine != processesPerMachine ) {
|
||||
TEST(true); //Attempted reboot, but the target did not have all of its processes running
|
||||
TEST(true); //Attempted reboot and kill, but the target did not have all of its processes running
|
||||
TraceEvent(SevWarn, "AbortedKill").detail("KillType", kt).detail("MachineId", machineId).detail("Reason", "Machine processes does not match number of processes per machine").detail("Processes", processesOnMachine).detail("ProcessesPerMachine", processesPerMachine).backtrace();
|
||||
if (ktFinal) *ktFinal = None;
|
||||
return false;
|
||||
@ -1547,12 +1547,12 @@ public:
|
||||
.detail("KilledDC", kt==ktMin);
|
||||
|
||||
TEST(kt != ktMin); // DataCenter kill was rejected by killMachine
|
||||
TEST((kt==ktMin) && (kt == RebootAndDelete)); // Resulted in a reboot and delete
|
||||
TEST((kt==ktMin) && (kt == Reboot)); // Resulted in a reboot
|
||||
TEST((kt==ktMin) && (kt == KillInstantly)); // Resulted in an instant kill
|
||||
TEST((kt==ktMin) && (kt == InjectFaults)); // Resulted in a kill by injecting faults
|
||||
TEST((kt==ktMin) && (kt != ktOrig)); // Kill request was downgraded
|
||||
TEST((kt==ktMin) && (kt == ktOrig)); // Requested kill was done
|
||||
TEST((kt==ktMin) && (kt == RebootAndDelete)); // Datacenter kill Resulted in a reboot and delete
|
||||
TEST((kt==ktMin) && (kt == Reboot)); // Datacenter kill Resulted in a reboot
|
||||
TEST((kt==ktMin) && (kt == KillInstantly)); // Datacenter kill Resulted in an instant kill
|
||||
TEST((kt==ktMin) && (kt == InjectFaults)); // Datacenter kill Resulted in a kill by injecting faults
|
||||
TEST((kt==ktMin) && (kt != ktOrig)); // Datacenter Kill request was downgraded
|
||||
TEST((kt==ktMin) && (kt == ktOrig)); // Datacenter kill - Requested kill was done
|
||||
|
||||
if (ktFinal) *ktFinal = ktMin;
|
||||
|
||||
|
@ -1937,7 +1937,7 @@ ACTOR Future<Void> clusterRecruitFromConfiguration( ClusterControllerData* self,
|
||||
|
||||
ACTOR Future<Void> clusterRecruitRemoteFromConfiguration( ClusterControllerData* self, RecruitRemoteFromConfigurationRequest req ) {
|
||||
// At the moment this doesn't really need to be an actor (it always completes immediately)
|
||||
TEST(true); //ClusterController RecruitTLogsRequest
|
||||
TEST(true); //ClusterController RecruitTLogsRequest Remote
|
||||
loop {
|
||||
try {
|
||||
RecruitRemoteFromConfigurationReply rep = self->findRemoteWorkersForConfiguration( req );
|
||||
|
@ -2410,7 +2410,7 @@ struct DDTeamCollection : ReferenceCounted<DDTeamCollection> {
|
||||
Reference<TCMachineInfo> machineInfo;
|
||||
if (machine_info.find(machine_id) == machine_info.end()) {
|
||||
// uid is the first storage server process on the machine
|
||||
TEST(true);
|
||||
TEST(true); // First storage server in process on the machine
|
||||
// For each machine, store the first server's localityEntry into machineInfo for later use.
|
||||
LocalityEntry localityEntry = machineLocalityMap.add(locality, &server->id);
|
||||
machineInfo = makeReference<TCMachineInfo>(server, localityEntry);
|
||||
@ -3054,7 +3054,7 @@ ACTOR Future<Void> machineTeamRemover(DDTeamCollection* self) {
|
||||
// in the serverTeams vector in the machine team.
|
||||
--teamIndex;
|
||||
self->addTeam(team->getServers(), true, true);
|
||||
TEST(true);
|
||||
TEST(true); // Removed machine team
|
||||
}
|
||||
|
||||
self->doBuildTeams = true;
|
||||
|
@ -728,7 +728,7 @@ void ILogSystem::SetPeekCursor::updateMessage(int logIdx, bool usePolicy) {
|
||||
c->advanceTo(messageVersion);
|
||||
if( start <= messageVersion && messageVersion < c->version() ) {
|
||||
advancedPast = true;
|
||||
TEST(true); //Merge peek cursor advanced past desired sequence
|
||||
TEST(true); //Merge peek cursor with logIdx advanced past desired sequence
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -461,8 +461,8 @@ namespace oldTLog_4_6 {
|
||||
state Version stopVersion = logData->version.get();
|
||||
|
||||
TEST(true); // TLog stopped by recovering master
|
||||
TEST( logData->stopped );
|
||||
TEST( !logData->stopped );
|
||||
TEST( logData->stopped ); // LogData already stopped
|
||||
TEST( !logData->stopped ); // LogData not yet stopped
|
||||
|
||||
TraceEvent("TLogStop", logData->logId).detail("Ver", stopVersion).detail("IsStopped", logData->stopped).detail("QueueCommitted", logData->queueCommittedVersion.get());
|
||||
|
||||
@ -1005,7 +1005,7 @@ namespace oldTLog_4_6 {
|
||||
auto& sequenceData = trackerData.sequence_version[sequence+1];
|
||||
if(sequenceData.isSet()) {
|
||||
if(sequenceData.getFuture().get() != reply.end) {
|
||||
TEST(true); //tlog peek second attempt ended at a different version
|
||||
TEST(true); //tlog peek second attempt ended at a different version (2)
|
||||
req.reply.sendError(operation_obsolete());
|
||||
return Void();
|
||||
}
|
||||
|
@ -589,8 +589,8 @@ ACTOR Future<Void> tLogLock( TLogData* self, ReplyPromise< TLogLockResult > repl
|
||||
state Version stopVersion = logData->version.get();
|
||||
|
||||
TEST(true); // TLog stopped by recovering master
|
||||
TEST( logData->stopped );
|
||||
TEST( !logData->stopped );
|
||||
TEST( logData->stopped ); // logData already stopped
|
||||
TEST( !logData->stopped ); // logData not yet stopped
|
||||
|
||||
TraceEvent("TLogStop", logData->logId).detail("Ver", stopVersion).detail("IsStopped", logData->stopped).detail("QueueCommitted", logData->queueCommittedVersion.get());
|
||||
|
||||
@ -1295,7 +1295,7 @@ ACTOR Future<Void> tLogPeekMessages( TLogData* self, TLogPeekRequest req, Refere
|
||||
if(sequenceData.isSet()) {
|
||||
trackerData.duplicatePeeks++;
|
||||
if(sequenceData.getFuture().get().first != reply.end) {
|
||||
TEST(true); //tlog peek second attempt ended at a different version
|
||||
TEST(true); //tlog peek second attempt ended at a different version (2)
|
||||
req.reply.sendError(operation_obsolete());
|
||||
return Void();
|
||||
}
|
||||
|
@ -680,8 +680,8 @@ ACTOR Future<Void> tLogLock( TLogData* self, ReplyPromise< TLogLockResult > repl
|
||||
state Version stopVersion = logData->version.get();
|
||||
|
||||
TEST(true); // TLog stopped by recovering master
|
||||
TEST( logData->stopped );
|
||||
TEST( !logData->stopped );
|
||||
TEST( logData->stopped ); // logData already stopped
|
||||
TEST( !logData->stopped ); // logData not yet stopped
|
||||
|
||||
TraceEvent("TLogStop", logData->logId).detail("Ver", stopVersion).detail("IsStopped", logData->stopped).detail("QueueCommitted", logData->queueCommittedVersion.get());
|
||||
|
||||
@ -1689,7 +1689,7 @@ ACTOR Future<Void> tLogPeekMessages( TLogData* self, TLogPeekRequest req, Refere
|
||||
if(sequenceData.isSet()) {
|
||||
trackerData.duplicatePeeks++;
|
||||
if(sequenceData.getFuture().get().first != reply.end) {
|
||||
TEST(true); //tlog peek second attempt ended at a different version
|
||||
TEST(true); //tlog peek second attempt ended at a different version (2)
|
||||
req.reply.sendError(operation_obsolete());
|
||||
return Void();
|
||||
}
|
||||
|
@ -756,7 +756,7 @@ void SimulationConfig::generateNormalConfig(int minimumReplication, int minimumR
|
||||
break;
|
||||
}
|
||||
case 3: {
|
||||
TEST(true); // Simulated cluster using radix-tree storage engine
|
||||
TEST(true); // Simulated cluster using redwood storage engine
|
||||
set_config("ssd-redwood-experimental");
|
||||
break;
|
||||
}
|
||||
@ -857,7 +857,7 @@ void SimulationConfig::generateNormalConfig(int minimumReplication, int minimumR
|
||||
int satellite_replication_type = deterministicRandom()->randomInt(0,3);
|
||||
switch (satellite_replication_type) {
|
||||
case 0: {
|
||||
TEST( true ); // Simulated cluster using no satellite redundancy mode
|
||||
TEST( true ); // Simulated cluster using no satellite redundancy mode (>4 datacenters)
|
||||
break;
|
||||
}
|
||||
case 1: {
|
||||
@ -884,7 +884,7 @@ void SimulationConfig::generateNormalConfig(int minimumReplication, int minimumR
|
||||
break;
|
||||
}
|
||||
case 1: {
|
||||
TEST( true ); // Simulated cluster using no satellite redundancy mode
|
||||
TEST( true ); // Simulated cluster using no satellite redundancy mode (<4 datacenters)
|
||||
break;
|
||||
}
|
||||
case 2: {
|
||||
@ -1138,8 +1138,8 @@ void setupSimulatedSystem(vector<Future<Void>>* systemActors, std::string baseFo
|
||||
|
||||
// Use IPv6 25% of the time
|
||||
bool useIPv6 = deterministicRandom()->random01() < 0.25;
|
||||
TEST( useIPv6 );
|
||||
TEST( !useIPv6 );
|
||||
TEST( useIPv6 ); // Use IPv6
|
||||
TEST( !useIPv6 ); // Use IPv4
|
||||
|
||||
vector<NetworkAddress> coordinatorAddresses;
|
||||
if(minimumRegions > 1) {
|
||||
|
@ -212,9 +212,9 @@ struct StorageServerMetrics {
|
||||
void notify( KeyRef key, StorageMetrics& metrics ) {
|
||||
ASSERT (metrics.bytes == 0); // ShardNotifyMetrics
|
||||
if (g_network->isSimulated()) {
|
||||
TEST(metrics.bytesPerKSecond != 0); // ShardNotifyMetrics
|
||||
TEST(metrics.iosPerKSecond != 0); // ShardNotifyMetrics
|
||||
TEST(metrics.bytesReadPerKSecond != 0); // ShardNotifyMetrics
|
||||
TEST(metrics.bytesPerKSecond != 0); // ShardNotifyMetrics bytes
|
||||
TEST(metrics.iosPerKSecond != 0); // ShardNotifyMetrics ios
|
||||
TEST(metrics.bytesReadPerKSecond != 0); // ShardNotifyMetrics bytesRead
|
||||
}
|
||||
|
||||
double expire = now() + SERVER_KNOBS->STORAGE_METRICS_AVERAGE_INTERVAL;
|
||||
|
@ -704,8 +704,8 @@ ACTOR Future<Void> tLogLock( TLogData* self, ReplyPromise< TLogLockResult > repl
|
||||
state Version stopVersion = logData->version.get();
|
||||
|
||||
TEST(true); // TLog stopped by recovering master
|
||||
TEST( logData->stopped );
|
||||
TEST( !logData->stopped );
|
||||
TEST( logData->stopped ); // logData already stopped
|
||||
TEST( !logData->stopped ); // logData not yet stopped
|
||||
|
||||
TraceEvent("TLogStop", logData->logId).detail("Ver", stopVersion).detail("IsStopped", logData->stopped).detail("QueueCommitted", logData->queueCommittedVersion.get());
|
||||
|
||||
@ -1728,7 +1728,7 @@ ACTOR Future<Void> tLogPeekMessages( TLogData* self, TLogPeekRequest req, Refere
|
||||
if(sequenceData.isSet()) {
|
||||
trackerData.duplicatePeeks++;
|
||||
if(sequenceData.getFuture().get().first != reply.end) {
|
||||
TEST(true); //tlog peek second attempt ended at a different version
|
||||
TEST(true); //tlog peek second attempt ended at a different version (2)
|
||||
req.reply.sendError(operation_obsolete());
|
||||
return Void();
|
||||
}
|
||||
|
@ -2276,7 +2276,7 @@ ACTOR Future<Void> fetchKeys( StorageServer *data, AddingShard* shard ) {
|
||||
}
|
||||
|
||||
TEST( true );
|
||||
TEST( shard->updates.size() );
|
||||
TEST( shard->updates.size() ); // Shard has updates
|
||||
ASSERT( otherShard->updates.empty() );
|
||||
}
|
||||
}
|
||||
|
@ -224,7 +224,7 @@ struct BackupAndParallelRestoreCorrectnessWorkload : TestWorkload {
|
||||
|
||||
// Stop the differential backup, if enabled
|
||||
if (stopDifferentialDelay) {
|
||||
TEST(!stopDifferentialFuture.isReady()); // Restore starts at specified time
|
||||
TEST(!stopDifferentialFuture.isReady()); // Restore starts at specified time - stopDifferential not ready
|
||||
wait(stopDifferentialFuture);
|
||||
TraceEvent("BARW_DoBackupWaitToDiscontinue", randomID)
|
||||
.detail("Tag", printable(tag))
|
||||
|
@ -233,7 +233,7 @@ struct BackupAndRestoreCorrectnessWorkload : TestWorkload {
|
||||
|
||||
// Stop the differential backup, if enabled
|
||||
if (stopDifferentialDelay) {
|
||||
TEST(!stopDifferentialFuture.isReady()); //Restore starts at specified time
|
||||
TEST(!stopDifferentialFuture.isReady()); //Restore starts at specified time - stopDifferential not ready
|
||||
wait(stopDifferentialFuture);
|
||||
TraceEvent("BARW_DoBackupWaitToDiscontinue", randomID).detail("Tag", printable(tag)).detail("DifferentialAfter", stopDifferentialDelay);
|
||||
|
||||
|
@ -254,7 +254,7 @@ struct BackupToDBCorrectnessWorkload : TestWorkload {
|
||||
|
||||
// Stop the differential backup, if enabled
|
||||
if (stopDifferentialDelay) {
|
||||
TEST(!stopDifferentialFuture.isReady()); //Restore starts at specified time
|
||||
TEST(!stopDifferentialFuture.isReady()); //Restore starts at specified time - stopDifferential not ready
|
||||
wait(stopDifferentialFuture);
|
||||
TraceEvent("BARW_DoBackupWaitToDiscontinue", randomID).detail("Tag", printable(tag)).detail("DifferentialAfter", stopDifferentialDelay);
|
||||
|
||||
|
@ -131,7 +131,7 @@ std::string generateRegions() {
|
||||
break;
|
||||
}
|
||||
case 1: {
|
||||
TEST( true ); // Simulated cluster using no satellite redundancy mode
|
||||
TEST( true ); // Simulated cluster using no satellite redundancy mode (<5 datacenters)
|
||||
break;
|
||||
}
|
||||
case 2: {
|
||||
|
@ -109,7 +109,7 @@ struct SpecialKeySpaceCorrectnessWorkload : TestWorkload {
|
||||
return;
|
||||
}
|
||||
f = success(ryw.get(LiteralStringRef("\xff\xff/status/json")));
|
||||
TEST(!f.isReady());
|
||||
TEST(!f.isReady()); // status json not ready
|
||||
}
|
||||
ASSERT(f.isError());
|
||||
ASSERT(f.getError().code() == error_code_transaction_cancelled);
|
||||
@ -317,7 +317,7 @@ struct SpecialKeySpaceCorrectnessWorkload : TestWorkload {
|
||||
wait(success(tx->getRange(
|
||||
KeyRangeRef(LiteralStringRef("\xff\xff/transaction/"), LiteralStringRef("\xff\xff/transaction0")),
|
||||
CLIENT_KNOBS->TOO_MANY)));
|
||||
TEST(true);
|
||||
TEST(true); // read transaction special keyrange
|
||||
tx->reset();
|
||||
} catch (Error& e) {
|
||||
throw;
|
||||
@ -341,7 +341,7 @@ struct SpecialKeySpaceCorrectnessWorkload : TestWorkload {
|
||||
KeySelector begin = KeySelectorRef(readConflictRangeKeysRange.begin, false, 1);
|
||||
KeySelector end = KeySelectorRef(LiteralStringRef("\xff\xff/transaction0"), false, 0);
|
||||
wait(success(tx->getRange(begin, end, GetRangeLimits(CLIENT_KNOBS->TOO_MANY))));
|
||||
TEST(true);
|
||||
TEST(true); // end key selector inside module range
|
||||
tx->reset();
|
||||
} catch (Error& e) {
|
||||
throw;
|
||||
|
@ -253,7 +253,7 @@ struct VersionStampWorkload : TestWorkload {
|
||||
if (self->failIfDataLost) {
|
||||
ASSERT(result.size() == self->versionStampKey_commit.size());
|
||||
} else {
|
||||
TEST(result.size() > 0); // Not all data should always be lost.
|
||||
TEST(result.size() > 0); // Not all data should always be lost (2)
|
||||
}
|
||||
|
||||
//TraceEvent("VST_Check1").detail("Size", result.size()).detail("VsKeyCommitSize", self->versionStampKey_commit.size());
|
||||
|
Loading…
x
Reference in New Issue
Block a user