mirror of
https://github.com/apple/foundationdb.git
synced 2025-05-15 02:18:39 +08:00
cleanup before merge
This commit is contained in:
parent
f0de1525c1
commit
912ef76f1c
@ -27,35 +27,6 @@
|
|||||||
#include "fdbclient/CommitTransaction.h"
|
#include "fdbclient/CommitTransaction.h"
|
||||||
#include "fdbclient/FDBTypes.h"
|
#include "fdbclient/FDBTypes.h"
|
||||||
|
|
||||||
// TODO better place for this? It's used in change feeds too
|
|
||||||
struct MutationsAndVersionRef {
|
|
||||||
VectorRef<MutationRef> mutations;
|
|
||||||
Version version;
|
|
||||||
Version knownCommittedVersion;
|
|
||||||
|
|
||||||
MutationsAndVersionRef() {}
|
|
||||||
explicit MutationsAndVersionRef(Version version, Version knownCommittedVersion)
|
|
||||||
: version(version), knownCommittedVersion(knownCommittedVersion) {}
|
|
||||||
MutationsAndVersionRef(VectorRef<MutationRef> mutations, Version version, Version knownCommittedVersion)
|
|
||||||
: mutations(mutations), version(version), knownCommittedVersion(knownCommittedVersion) {}
|
|
||||||
MutationsAndVersionRef(Arena& to, VectorRef<MutationRef> mutations, Version version, Version knownCommittedVersion)
|
|
||||||
: mutations(to, mutations), version(version), knownCommittedVersion(knownCommittedVersion) {}
|
|
||||||
MutationsAndVersionRef(Arena& to, const MutationsAndVersionRef& from)
|
|
||||||
: mutations(to, from.mutations), version(from.version), knownCommittedVersion(from.knownCommittedVersion) {}
|
|
||||||
int expectedSize() const { return mutations.expectedSize(); }
|
|
||||||
|
|
||||||
struct OrderByVersion {
|
|
||||||
bool operator()(MutationsAndVersionRef const& a, MutationsAndVersionRef const& b) const {
|
|
||||||
return a.version < b.version;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
template <class Ar>
|
|
||||||
void serialize(Ar& ar) {
|
|
||||||
serializer(ar, mutations, version, knownCommittedVersion);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
// TODO should GranuleSnapshot and GranuleDeltas just be typedefs instead of subclasses?
|
// TODO should GranuleSnapshot and GranuleDeltas just be typedefs instead of subclasses?
|
||||||
// file format of actual blob files
|
// file format of actual blob files
|
||||||
struct GranuleSnapshot : VectorRef<KeyValueRef> {
|
struct GranuleSnapshot : VectorRef<KeyValueRef> {
|
||||||
@ -78,14 +49,14 @@ struct GranuleDeltas : VectorRef<MutationsAndVersionRef> {
|
|||||||
};
|
};
|
||||||
|
|
||||||
// TODO better name?
|
// TODO better name?
|
||||||
struct BlobFilenameRef {
|
struct BlobFilePointerRef {
|
||||||
constexpr static FileIdentifier file_identifier = 5253554;
|
constexpr static FileIdentifier file_identifier = 5253554;
|
||||||
StringRef filename;
|
StringRef filename;
|
||||||
int64_t offset;
|
int64_t offset;
|
||||||
int64_t length;
|
int64_t length;
|
||||||
|
|
||||||
BlobFilenameRef() {}
|
BlobFilePointerRef() {}
|
||||||
BlobFilenameRef(Arena& to, std::string filename, int64_t offset, int64_t length)
|
BlobFilePointerRef(Arena& to, const std::string& filename, int64_t offset, int64_t length)
|
||||||
: filename(to, filename), offset(offset), length(length) {}
|
: filename(to, filename), offset(offset), length(length) {}
|
||||||
|
|
||||||
template <class Ar>
|
template <class Ar>
|
||||||
@ -108,8 +79,8 @@ struct BlobGranuleChunkRef {
|
|||||||
constexpr static FileIdentifier file_identifier = 865198;
|
constexpr static FileIdentifier file_identifier = 865198;
|
||||||
KeyRangeRef keyRange;
|
KeyRangeRef keyRange;
|
||||||
Version includedVersion;
|
Version includedVersion;
|
||||||
Optional<BlobFilenameRef> snapshotFile; // not set if it's an incremental read
|
Optional<BlobFilePointerRef> snapshotFile; // not set if it's an incremental read
|
||||||
VectorRef<BlobFilenameRef> deltaFiles;
|
VectorRef<BlobFilePointerRef> deltaFiles;
|
||||||
GranuleDeltas newDeltas;
|
GranuleDeltas newDeltas;
|
||||||
|
|
||||||
template <class Ar>
|
template <class Ar>
|
||||||
|
@ -41,7 +41,7 @@
|
|||||||
#define BG_READ_DEBUG false
|
#define BG_READ_DEBUG false
|
||||||
|
|
||||||
ACTOR Future<Arena> readSnapshotFile(Reference<BackupContainerFileSystem> bstore,
|
ACTOR Future<Arena> readSnapshotFile(Reference<BackupContainerFileSystem> bstore,
|
||||||
BlobFilenameRef f,
|
BlobFilePointerRef f,
|
||||||
KeyRangeRef keyRange,
|
KeyRangeRef keyRange,
|
||||||
std::map<KeyRef, ValueRef>* dataMap) {
|
std::map<KeyRef, ValueRef>* dataMap) {
|
||||||
try {
|
try {
|
||||||
@ -112,7 +112,7 @@ ACTOR Future<Arena> readSnapshotFile(Reference<BackupContainerFileSystem> bstore
|
|||||||
}
|
}
|
||||||
|
|
||||||
ACTOR Future<Standalone<GranuleDeltas>> readDeltaFile(Reference<BackupContainerFileSystem> bstore,
|
ACTOR Future<Standalone<GranuleDeltas>> readDeltaFile(Reference<BackupContainerFileSystem> bstore,
|
||||||
BlobFilenameRef f,
|
BlobFilePointerRef f,
|
||||||
KeyRangeRef keyRange,
|
KeyRangeRef keyRange,
|
||||||
Version readVersion) {
|
Version readVersion) {
|
||||||
try {
|
try {
|
||||||
@ -302,7 +302,7 @@ ACTOR Future<RangeResult> readBlobGranule(BlobGranuleChunkRef chunk,
|
|||||||
|
|
||||||
state std::vector<Future<Standalone<GranuleDeltas>>> readDeltaFutures;
|
state std::vector<Future<Standalone<GranuleDeltas>>> readDeltaFutures;
|
||||||
readDeltaFutures.reserve(chunk.deltaFiles.size());
|
readDeltaFutures.reserve(chunk.deltaFiles.size());
|
||||||
for (BlobFilenameRef deltaFile : chunk.deltaFiles) {
|
for (BlobFilePointerRef deltaFile : chunk.deltaFiles) {
|
||||||
readDeltaFutures.push_back(readDeltaFile(bstore, deltaFile, keyRange, readVersion));
|
readDeltaFutures.push_back(readDeltaFile(bstore, deltaFile, keyRange, readVersion));
|
||||||
if (stats.present()) {
|
if (stats.present()) {
|
||||||
++stats.get()->s3GetReqs;
|
++stats.get()->s3GetReqs;
|
||||||
|
@ -73,8 +73,7 @@ void ClientKnobs::initialize(Randomize randomize) {
|
|||||||
init( KEY_SIZE_LIMIT, 1e4 );
|
init( KEY_SIZE_LIMIT, 1e4 );
|
||||||
init( SYSTEM_KEY_SIZE_LIMIT, 3e4 );
|
init( SYSTEM_KEY_SIZE_LIMIT, 3e4 );
|
||||||
init( VALUE_SIZE_LIMIT, 1e5 );
|
init( VALUE_SIZE_LIMIT, 1e5 );
|
||||||
// TODO find better solution?
|
init( SPLIT_KEY_SIZE_LIMIT, KEY_SIZE_LIMIT/2 ); if( randomize && BUGGIFY ) SPLIT_KEY_SIZE_LIMIT = KEY_SIZE_LIMIT - 31;//serverKeysPrefixFor(UID()).size() - 1;
|
||||||
init( SPLIT_KEY_SIZE_LIMIT, KEY_SIZE_LIMIT/2 );// if( randomize && BUGGIFY ) SPLIT_KEY_SIZE_LIMIT = KEY_SIZE_LIMIT - 31;//serverKeysPrefixFor(UID()).size() - 1;
|
|
||||||
init( METADATA_VERSION_CACHE_SIZE, 1000 );
|
init( METADATA_VERSION_CACHE_SIZE, 1000 );
|
||||||
|
|
||||||
init( MAX_BATCH_SIZE, 1000 ); if( randomize && BUGGIFY ) MAX_BATCH_SIZE = 1;
|
init( MAX_BATCH_SIZE, 1000 ); if( randomize && BUGGIFY ) MAX_BATCH_SIZE = 1;
|
||||||
@ -253,7 +252,7 @@ void ClientKnobs::initialize(Randomize randomize) {
|
|||||||
init( BUSYNESS_SPIKE_SATURATED_THRESHOLD, 0.500 );
|
init( BUSYNESS_SPIKE_SATURATED_THRESHOLD, 0.500 );
|
||||||
|
|
||||||
// blob granules
|
// blob granules
|
||||||
init( ENABLE_BLOB_GRANULES, true );
|
init( ENABLE_BLOB_GRANULES, false );
|
||||||
|
|
||||||
// clang-format on
|
// clang-format on
|
||||||
}
|
}
|
||||||
|
@ -217,4 +217,32 @@ struct CommitTransactionRef {
|
|||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
struct MutationsAndVersionRef {
|
||||||
|
VectorRef<MutationRef> mutations;
|
||||||
|
Version version;
|
||||||
|
Version knownCommittedVersion;
|
||||||
|
|
||||||
|
MutationsAndVersionRef() {}
|
||||||
|
explicit MutationsAndVersionRef(Version version, Version knownCommittedVersion)
|
||||||
|
: version(version), knownCommittedVersion(knownCommittedVersion) {}
|
||||||
|
MutationsAndVersionRef(VectorRef<MutationRef> mutations, Version version, Version knownCommittedVersion)
|
||||||
|
: mutations(mutations), version(version), knownCommittedVersion(knownCommittedVersion) {}
|
||||||
|
MutationsAndVersionRef(Arena& to, VectorRef<MutationRef> mutations, Version version, Version knownCommittedVersion)
|
||||||
|
: mutations(to, mutations), version(version), knownCommittedVersion(knownCommittedVersion) {}
|
||||||
|
MutationsAndVersionRef(Arena& to, const MutationsAndVersionRef& from)
|
||||||
|
: mutations(to, from.mutations), version(from.version), knownCommittedVersion(from.knownCommittedVersion) {}
|
||||||
|
int expectedSize() const { return mutations.expectedSize(); }
|
||||||
|
|
||||||
|
struct OrderByVersion {
|
||||||
|
bool operator()(MutationsAndVersionRef const& a, MutationsAndVersionRef const& b) const {
|
||||||
|
return a.version < b.version;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
template <class Ar>
|
||||||
|
void serialize(Ar& ar) {
|
||||||
|
serializer(ar, mutations, version, knownCommittedVersion);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
@ -268,11 +268,10 @@ public:
|
|||||||
Future<Void> popChangeFeedMutations(Key rangeID, Version version);
|
Future<Void> popChangeFeedMutations(Key rangeID, Version version);
|
||||||
|
|
||||||
Future<Void> getBlobGranuleRangesStream(const PromiseStream<KeyRange>& results, KeyRange range);
|
Future<Void> getBlobGranuleRangesStream(const PromiseStream<KeyRange>& results, KeyRange range);
|
||||||
// TODO add optional for end version so it can do a GRV in the transaction it already has to do
|
|
||||||
Future<Void> readBlobGranulesStream(const PromiseStream<Standalone<BlobGranuleChunkRef>>& results,
|
Future<Void> readBlobGranulesStream(const PromiseStream<Standalone<BlobGranuleChunkRef>>& results,
|
||||||
KeyRange range,
|
KeyRange range,
|
||||||
Version begin,
|
Version begin,
|
||||||
Version end);
|
Optional<Version> end);
|
||||||
|
|
||||||
// private:
|
// private:
|
||||||
explicit DatabaseContext(Reference<AsyncVar<Reference<ClusterConnectionFile>>> connectionFile,
|
explicit DatabaseContext(Reference<AsyncVar<Reference<ClusterConnectionFile>>> connectionFile,
|
||||||
|
@ -6744,16 +6744,8 @@ ACTOR Future<Void> singleChangeFeedStream(StorageServerInterface interf,
|
|||||||
state ChangeFeedStreamReply rep = waitNext(replyStream.getFuture());
|
state ChangeFeedStreamReply rep = waitNext(replyStream.getFuture());
|
||||||
begin = rep.mutations.back().version + 1;
|
begin = rep.mutations.back().version + 1;
|
||||||
|
|
||||||
/*if (rangeID.printable() == "8696f5d434d2247e73307a158f9f2a6b" && begin >= 234494075) {
|
|
||||||
printf(" NA CF %s [%s - %s) got %d at %lld\n",
|
|
||||||
rangeID.printable().c_str(),
|
|
||||||
range.begin.printable().c_str(),
|
|
||||||
range.end.printable().c_str(),
|
|
||||||
rep.mutations.size(),
|
|
||||||
begin);
|
|
||||||
}*/
|
|
||||||
state int resultLoc = 0;
|
state int resultLoc = 0;
|
||||||
// FIXME: fix better
|
// FIXME: handle empty versions properly
|
||||||
while (resultLoc < rep.mutations.size()) {
|
while (resultLoc < rep.mutations.size()) {
|
||||||
if (rep.mutations[resultLoc].mutations.size() || rep.mutations[resultLoc].version + 1 == end ||
|
if (rep.mutations[resultLoc].mutations.size() || rep.mutations[resultLoc].version + 1 == end ||
|
||||||
(rep.mutations[resultLoc].mutations.empty() &&
|
(rep.mutations[resultLoc].mutations.empty() &&
|
||||||
@ -7146,7 +7138,6 @@ Future<Void> DatabaseContext::popChangeFeedMutations(Key rangeID, Version versio
|
|||||||
|
|
||||||
#define BG_REQUEST_DEBUG false
|
#define BG_REQUEST_DEBUG false
|
||||||
|
|
||||||
// FIXME: code for discovering blob granules is similar enough that it could be refactored? It's pretty simple though
|
|
||||||
ACTOR Future<Void> getBlobGranuleRangesStreamActor(Reference<DatabaseContext> db,
|
ACTOR Future<Void> getBlobGranuleRangesStreamActor(Reference<DatabaseContext> db,
|
||||||
PromiseStream<KeyRange> results,
|
PromiseStream<KeyRange> results,
|
||||||
KeyRange keyRange) {
|
KeyRange keyRange) {
|
||||||
@ -7211,7 +7202,6 @@ ACTOR Future<Void> readBlobGranulesStreamActor(Reference<DatabaseContext> db,
|
|||||||
state UID workerId;
|
state UID workerId;
|
||||||
loop {
|
loop {
|
||||||
try {
|
try {
|
||||||
// FIXME NOW: handle errors, handle mapping changes
|
|
||||||
// FIXME: Use streaming parallelism?
|
// FIXME: Use streaming parallelism?
|
||||||
// Read mapping and worker interfaces from DB
|
// Read mapping and worker interfaces from DB
|
||||||
loopCounter++;
|
loopCounter++;
|
||||||
@ -7237,7 +7227,6 @@ ACTOR Future<Void> readBlobGranulesStreamActor(Reference<DatabaseContext> db,
|
|||||||
tr, blobGranuleMappingKeys.begin, keyRange, 1000, GetRangeLimits::BYTE_LIMIT_UNLIMITED));
|
tr, blobGranuleMappingKeys.begin, keyRange, 1000, GetRangeLimits::BYTE_LIMIT_UNLIMITED));
|
||||||
blobGranuleMapping = _bgMapping;
|
blobGranuleMapping = _bgMapping;
|
||||||
if (blobGranuleMapping.more) {
|
if (blobGranuleMapping.more) {
|
||||||
// TODO REMOVE
|
|
||||||
if (BG_REQUEST_DEBUG) {
|
if (BG_REQUEST_DEBUG) {
|
||||||
printf("BG Mapping for [%s - %s) too large!\n");
|
printf("BG Mapping for [%s - %s) too large!\n");
|
||||||
}
|
}
|
||||||
@ -7365,8 +7354,6 @@ ACTOR Future<Void> readBlobGranulesStreamActor(Reference<DatabaseContext> db,
|
|||||||
results.sendError(end_of_stream());
|
results.sendError(end_of_stream());
|
||||||
return Void();
|
return Void();
|
||||||
} catch (Error& e) {
|
} catch (Error& e) {
|
||||||
// only print this error with exponential backoff
|
|
||||||
|
|
||||||
if (e.code() == error_code_actor_cancelled) {
|
if (e.code() == error_code_actor_cancelled) {
|
||||||
throw;
|
throw;
|
||||||
}
|
}
|
||||||
@ -7381,7 +7368,6 @@ ACTOR Future<Void> readBlobGranulesStreamActor(Reference<DatabaseContext> db,
|
|||||||
results.sendError(e);
|
results.sendError(e);
|
||||||
return Void();
|
return Void();
|
||||||
}
|
}
|
||||||
// TODO add a wait here!
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -7389,7 +7375,7 @@ ACTOR Future<Void> readBlobGranulesStreamActor(Reference<DatabaseContext> db,
|
|||||||
Future<Void> DatabaseContext::readBlobGranulesStream(const PromiseStream<Standalone<BlobGranuleChunkRef>>& results,
|
Future<Void> DatabaseContext::readBlobGranulesStream(const PromiseStream<Standalone<BlobGranuleChunkRef>>& results,
|
||||||
KeyRange range,
|
KeyRange range,
|
||||||
Version begin,
|
Version begin,
|
||||||
Version end) {
|
Optional<Version> end) {
|
||||||
if (!CLIENT_KNOBS->ENABLE_BLOB_GRANULES) {
|
if (!CLIENT_KNOBS->ENABLE_BLOB_GRANULES) {
|
||||||
throw client_invalid_operation();
|
throw client_invalid_operation();
|
||||||
}
|
}
|
||||||
|
@ -716,9 +716,9 @@ public:
|
|||||||
double LATENCY_METRICS_LOGGING_INTERVAL;
|
double LATENCY_METRICS_LOGGING_INTERVAL;
|
||||||
|
|
||||||
// blob granule stuff
|
// blob granule stuff
|
||||||
// TODO better place to put this or wire up blob config?
|
// FIXME: configure url with database configuration instead of knob eventually
|
||||||
|
|
||||||
std::string BG_URL;
|
std::string BG_URL;
|
||||||
|
|
||||||
int BG_SNAPSHOT_FILE_TARGET_BYTES;
|
int BG_SNAPSHOT_FILE_TARGET_BYTES;
|
||||||
int BG_DELTA_FILE_TARGET_BYTES;
|
int BG_DELTA_FILE_TARGET_BYTES;
|
||||||
int BG_DELTA_BYTES_BEFORE_COMPACT;
|
int BG_DELTA_BYTES_BEFORE_COMPACT;
|
||||||
|
@ -30,7 +30,6 @@
|
|||||||
#include "fdbrpc/Stats.h"
|
#include "fdbrpc/Stats.h"
|
||||||
#include "fdbrpc/TimedRequest.h"
|
#include "fdbrpc/TimedRequest.h"
|
||||||
#include "fdbrpc/TSSComparison.h"
|
#include "fdbrpc/TSSComparison.h"
|
||||||
#include "fdbclient/BlobGranuleCommon.h"
|
|
||||||
#include "fdbclient/CommitTransaction.h"
|
#include "fdbclient/CommitTransaction.h"
|
||||||
#include "fdbclient/TagThrottle.actor.h"
|
#include "fdbclient/TagThrottle.actor.h"
|
||||||
#include "flow/UnitTest.h"
|
#include "flow/UnitTest.h"
|
||||||
|
@ -1122,66 +1122,34 @@ const KeyRangeRef blobGranuleHistoryKeys(LiteralStringRef("\xff\x02/bgh/"), Lite
|
|||||||
const uint8_t BG_FILE_TYPE_DELTA = 'D';
|
const uint8_t BG_FILE_TYPE_DELTA = 'D';
|
||||||
const uint8_t BG_FILE_TYPE_SNAPSHOT = 'S';
|
const uint8_t BG_FILE_TYPE_SNAPSHOT = 'S';
|
||||||
|
|
||||||
// uids in blob granule file/split keys are serialized big endian so that incrementUID can create a prefix range for
|
|
||||||
// just that UID.
|
|
||||||
|
|
||||||
// TODO: could move this to UID or sometwhere else?
|
|
||||||
UID incrementUID(UID uid) {
|
|
||||||
uint64_t first = uid.first();
|
|
||||||
uint64_t second = uid.second() + 1;
|
|
||||||
// handle overflow increment of second
|
|
||||||
if (second == 0) {
|
|
||||||
first++;
|
|
||||||
// FIXME: assume we never generate max uid, for now
|
|
||||||
ASSERT(first != 0);
|
|
||||||
}
|
|
||||||
|
|
||||||
return UID(first, second);
|
|
||||||
}
|
|
||||||
|
|
||||||
void serializeUIDBigEndian(BinaryWriter& wr, UID uid) {
|
|
||||||
wr << bigEndian64(uid.first());
|
|
||||||
wr << bigEndian64(uid.second());
|
|
||||||
}
|
|
||||||
|
|
||||||
UID deserializeUIDBigEndian(BinaryReader& reader) {
|
|
||||||
uint64_t first;
|
|
||||||
uint64_t second;
|
|
||||||
reader >> first;
|
|
||||||
reader >> second;
|
|
||||||
return UID(bigEndian64(first), bigEndian64(second));
|
|
||||||
}
|
|
||||||
|
|
||||||
const Key blobGranuleFileKeyFor(UID granuleID, uint8_t fileType, Version fileVersion) {
|
const Key blobGranuleFileKeyFor(UID granuleID, uint8_t fileType, Version fileVersion) {
|
||||||
ASSERT(fileType == 'D' || fileType == 'S');
|
ASSERT(fileType == 'D' || fileType == 'S');
|
||||||
BinaryWriter wr(AssumeVersion(ProtocolVersion::withBlobGranule()));
|
BinaryWriter wr(AssumeVersion(ProtocolVersion::withBlobGranule()));
|
||||||
wr.serializeBytes(blobGranuleFileKeys.begin);
|
wr.serializeBytes(blobGranuleFileKeys.begin);
|
||||||
serializeUIDBigEndian(wr, granuleID);
|
wr << granuleID;
|
||||||
wr << fileType;
|
wr << fileType;
|
||||||
wr << bigEndian64(fileVersion);
|
wr << bigEndian64(fileVersion);
|
||||||
return wr.toValue();
|
return wr.toValue();
|
||||||
}
|
}
|
||||||
|
|
||||||
std::tuple<UID, uint8_t, Version> decodeBlobGranuleFileKey(KeyRef const& key) {
|
std::tuple<UID, uint8_t, Version> decodeBlobGranuleFileKey(KeyRef const& key) {
|
||||||
|
UID granuleID;
|
||||||
uint8_t fileType;
|
uint8_t fileType;
|
||||||
Version fileVersion;
|
Version fileVersion;
|
||||||
BinaryReader reader(key.removePrefix(blobGranuleFileKeys.begin), AssumeVersion(ProtocolVersion::withBlobGranule()));
|
BinaryReader reader(key.removePrefix(blobGranuleFileKeys.begin), AssumeVersion(ProtocolVersion::withBlobGranule()));
|
||||||
UID granuleID = deserializeUIDBigEndian(reader);
|
reader >> granuleID;
|
||||||
reader >> fileType;
|
reader >> fileType;
|
||||||
reader >> fileVersion;
|
reader >> fileVersion;
|
||||||
ASSERT(fileType == 'D' || fileType == 'S');
|
ASSERT(fileType == 'D' || fileType == 'S');
|
||||||
return std::tuple(granuleID, fileType, bigEndian64(fileVersion));
|
return std::tuple(granuleID, fileType, bigEndian64(fileVersion));
|
||||||
}
|
}
|
||||||
|
|
||||||
Key bgFilePrefixKey(UID gid) {
|
const KeyRange blobGranuleFileKeyRangeFor(UID granuleID) {
|
||||||
BinaryWriter wr(AssumeVersion(ProtocolVersion::withBlobGranule()));
|
BinaryWriter wr(AssumeVersion(ProtocolVersion::withBlobGranule()));
|
||||||
wr.serializeBytes(blobGranuleFileKeys.begin);
|
wr.serializeBytes(blobGranuleFileKeys.begin);
|
||||||
serializeUIDBigEndian(wr, gid);
|
wr << granuleID;
|
||||||
return wr.toValue();
|
Key startKey = wr.toValue();
|
||||||
}
|
return KeyRangeRef(startKey, strinc(startKey));
|
||||||
|
|
||||||
const KeyRange blobGranuleFileKeyRangeFor(UID granuleID) {
|
|
||||||
return KeyRangeRef(bgFilePrefixKey(granuleID), bgFilePrefixKey(incrementUID(granuleID)));
|
|
||||||
}
|
}
|
||||||
|
|
||||||
const Value blobGranuleFileValueFor(StringRef const& filename, int64_t offset, int64_t length) {
|
const Value blobGranuleFileValueFor(StringRef const& filename, int64_t offset, int64_t length) {
|
||||||
@ -1244,30 +1212,29 @@ std::tuple<int64_t, int64_t, UID> decodeBlobGranuleLockValue(const ValueRef& val
|
|||||||
const Key blobGranuleSplitKeyFor(UID const& parentGranuleID, UID const& granuleID) {
|
const Key blobGranuleSplitKeyFor(UID const& parentGranuleID, UID const& granuleID) {
|
||||||
BinaryWriter wr(AssumeVersion(ProtocolVersion::withBlobGranule()));
|
BinaryWriter wr(AssumeVersion(ProtocolVersion::withBlobGranule()));
|
||||||
wr.serializeBytes(blobGranuleSplitKeys.begin);
|
wr.serializeBytes(blobGranuleSplitKeys.begin);
|
||||||
serializeUIDBigEndian(wr, parentGranuleID);
|
wr << parentGranuleID;
|
||||||
serializeUIDBigEndian(wr, granuleID);
|
wr << granuleID;
|
||||||
return wr.toValue();
|
return wr.toValue();
|
||||||
}
|
}
|
||||||
|
|
||||||
std::pair<UID, UID> decodeBlobGranuleSplitKey(KeyRef const& key) {
|
std::pair<UID, UID> decodeBlobGranuleSplitKey(KeyRef const& key) {
|
||||||
|
UID parentGranuleID;
|
||||||
|
UID granuleID;
|
||||||
BinaryReader reader(key.removePrefix(blobGranuleSplitKeys.begin),
|
BinaryReader reader(key.removePrefix(blobGranuleSplitKeys.begin),
|
||||||
AssumeVersion(ProtocolVersion::withBlobGranule()));
|
AssumeVersion(ProtocolVersion::withBlobGranule()));
|
||||||
|
|
||||||
UID parentGranuleID = deserializeUIDBigEndian(reader);
|
reader >> parentGranuleID;
|
||||||
UID currentGranuleID = deserializeUIDBigEndian(reader);
|
reader >> granuleID;
|
||||||
return std::pair(parentGranuleID, currentGranuleID);
|
return std::pair(parentGranuleID, granuleID);
|
||||||
}
|
|
||||||
|
|
||||||
Key bgSplitPrefixKeyFor(UID gid) {
|
|
||||||
BinaryWriter wr(AssumeVersion(ProtocolVersion::withBlobGranule()));
|
|
||||||
wr.serializeBytes(blobGranuleSplitKeys.begin);
|
|
||||||
serializeUIDBigEndian(wr, gid);
|
|
||||||
return wr.toValue();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
const KeyRange blobGranuleSplitKeyRangeFor(UID const& parentGranuleID) {
|
const KeyRange blobGranuleSplitKeyRangeFor(UID const& parentGranuleID) {
|
||||||
return KeyRangeRef(bgSplitPrefixKeyFor(parentGranuleID), bgSplitPrefixKeyFor(incrementUID(parentGranuleID)));
|
BinaryWriter wr(AssumeVersion(ProtocolVersion::withBlobGranule()));
|
||||||
|
wr.serializeBytes(blobGranuleSplitKeys.begin);
|
||||||
|
wr << parentGranuleID;
|
||||||
|
|
||||||
|
Key startKey = wr.toValue();
|
||||||
|
return KeyRangeRef(startKey, strinc(startKey));
|
||||||
}
|
}
|
||||||
|
|
||||||
const Value blobGranuleSplitValueFor(BlobGranuleSplitState st) {
|
const Value blobGranuleSplitValueFor(BlobGranuleSplitState st) {
|
||||||
|
@ -533,9 +533,6 @@ extern const uint8_t BG_FILE_TYPE_SNAPSHOT;
|
|||||||
// \xff\x02/bgf/(granuleID, {snapshot|delta}, version) = [[filename]]
|
// \xff\x02/bgf/(granuleID, {snapshot|delta}, version) = [[filename]]
|
||||||
extern const KeyRangeRef blobGranuleFileKeys;
|
extern const KeyRangeRef blobGranuleFileKeys;
|
||||||
|
|
||||||
// TODO could shrink the size of the mapping keyspace by using something similar to tags instead of UIDs. We'd probably
|
|
||||||
// want to do that in V1 or it'd be a big migration.
|
|
||||||
|
|
||||||
// \xff\x02/bgm/[[begin]] = [[BlobWorkerUID]]
|
// \xff\x02/bgm/[[begin]] = [[BlobWorkerUID]]
|
||||||
extern const KeyRangeRef blobGranuleMappingKeys;
|
extern const KeyRangeRef blobGranuleMappingKeys;
|
||||||
|
|
||||||
@ -548,7 +545,6 @@ extern const KeyRangeRef blobGranuleSplitKeys;
|
|||||||
// \xff\x02/bgh/(start,end,version) = { granuleID, [parentGranuleHistoryKeys] }
|
// \xff\x02/bgh/(start,end,version) = { granuleID, [parentGranuleHistoryKeys] }
|
||||||
extern const KeyRangeRef blobGranuleHistoryKeys;
|
extern const KeyRangeRef blobGranuleHistoryKeys;
|
||||||
|
|
||||||
// FIXME: better way to represent file type than a string ref?
|
|
||||||
const Key blobGranuleFileKeyFor(UID granuleID, uint8_t fileType, Version fileVersion);
|
const Key blobGranuleFileKeyFor(UID granuleID, uint8_t fileType, Version fileVersion);
|
||||||
std::tuple<UID, uint8_t, Version> decodeBlobGranuleFileKey(ValueRef const& value);
|
std::tuple<UID, uint8_t, Version> decodeBlobGranuleFileKey(ValueRef const& value);
|
||||||
const KeyRange blobGranuleFileKeyRangeFor(UID granuleID);
|
const KeyRange blobGranuleFileKeyRangeFor(UID granuleID);
|
||||||
|
@ -305,19 +305,6 @@ public:
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
std::set<std::string> getRolesSet(NetworkAddress const& address, bool skipWorkers = true) const {
|
|
||||||
std::set<std::string> roles;
|
|
||||||
auto addressIt = roleAddresses.find(address);
|
|
||||||
if (addressIt != roleAddresses.end()) {
|
|
||||||
for (auto& roleIt : addressIt->second) {
|
|
||||||
if ((!skipWorkers) || (roleIt.first != "Worker")) {
|
|
||||||
roles.insert(roleIt.first);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return roles;
|
|
||||||
}
|
|
||||||
|
|
||||||
std::string getRoles(NetworkAddress const& address, bool skipWorkers = true) const {
|
std::string getRoles(NetworkAddress const& address, bool skipWorkers = true) const {
|
||||||
auto addressIt = roleAddresses.find(address);
|
auto addressIt = roleAddresses.find(address);
|
||||||
std::string roleText;
|
std::string roleText;
|
||||||
|
@ -27,7 +27,6 @@
|
|||||||
#include "fdbclient/ReadYourWrites.h"
|
#include "fdbclient/ReadYourWrites.h"
|
||||||
#include "fdbclient/SystemData.h"
|
#include "fdbclient/SystemData.h"
|
||||||
#include "fdbserver/BlobManagerInterface.h"
|
#include "fdbserver/BlobManagerInterface.h"
|
||||||
#include "fdbserver/BlobWorker.actor.h"
|
|
||||||
#include "fdbserver/Knobs.h"
|
#include "fdbserver/Knobs.h"
|
||||||
#include "fdbserver/WaitFailure.h"
|
#include "fdbserver/WaitFailure.h"
|
||||||
#include "fdbserver/WorkerInterface.actor.h"
|
#include "fdbserver/WorkerInterface.actor.h"
|
||||||
|
@ -32,10 +32,10 @@
|
|||||||
#include "fdbclient/DatabaseContext.h"
|
#include "fdbclient/DatabaseContext.h"
|
||||||
#include "fdbclient/NativeAPI.actor.h"
|
#include "fdbclient/NativeAPI.actor.h"
|
||||||
#include "fdbclient/Notified.h"
|
#include "fdbclient/Notified.h"
|
||||||
#include "fdbserver/BlobWorker.actor.h"
|
|
||||||
#include "fdbserver/Knobs.h"
|
#include "fdbserver/Knobs.h"
|
||||||
#include "fdbserver/MutationTracking.h"
|
#include "fdbserver/MutationTracking.h"
|
||||||
#include "fdbserver/WaitFailure.h"
|
#include "fdbserver/WaitFailure.h"
|
||||||
|
#include "fdbserver/ServerDBInfo.h"
|
||||||
#include "flow/Arena.h"
|
#include "flow/Arena.h"
|
||||||
#include "flow/Error.h"
|
#include "flow/Error.h"
|
||||||
#include "flow/IRandom.h"
|
#include "flow/IRandom.h"
|
||||||
@ -793,7 +793,7 @@ ACTOR Future<BlobFileIndex> compactFromBlob(Reference<BlobWorkerData> bwData,
|
|||||||
state int64_t compactBytesRead = 0;
|
state int64_t compactBytesRead = 0;
|
||||||
state Version snapshotVersion = files.snapshotFiles.back().version;
|
state Version snapshotVersion = files.snapshotFiles.back().version;
|
||||||
BlobFileIndex snapshotF = files.snapshotFiles.back();
|
BlobFileIndex snapshotF = files.snapshotFiles.back();
|
||||||
chunk.snapshotFile = BlobFilenameRef(filenameArena, snapshotF.filename, snapshotF.offset, snapshotF.length);
|
chunk.snapshotFile = BlobFilePointerRef(filenameArena, snapshotF.filename, snapshotF.offset, snapshotF.length);
|
||||||
compactBytesRead += snapshotF.length;
|
compactBytesRead += snapshotF.length;
|
||||||
int deltaIdx = files.deltaFiles.size() - 1;
|
int deltaIdx = files.deltaFiles.size() - 1;
|
||||||
while (deltaIdx >= 0 && files.deltaFiles[deltaIdx].version > snapshotVersion) {
|
while (deltaIdx >= 0 && files.deltaFiles[deltaIdx].version > snapshotVersion) {
|
||||||
@ -1920,7 +1920,7 @@ ACTOR Future<Void> handleBlobGranuleFileRequest(Reference<BlobWorkerData> bwData
|
|||||||
ASSERT(i >= 0);
|
ASSERT(i >= 0);
|
||||||
|
|
||||||
BlobFileIndex snapshotF = chunkFiles.snapshotFiles[i];
|
BlobFileIndex snapshotF = chunkFiles.snapshotFiles[i];
|
||||||
chunk.snapshotFile = BlobFilenameRef(rep.arena, snapshotF.filename, snapshotF.offset, snapshotF.length);
|
chunk.snapshotFile = BlobFilePointerRef(rep.arena, snapshotF.filename, snapshotF.offset, snapshotF.length);
|
||||||
Version snapshotVersion = chunkFiles.snapshotFiles[i].version;
|
Version snapshotVersion = chunkFiles.snapshotFiles[i].version;
|
||||||
|
|
||||||
// handle delta files
|
// handle delta files
|
||||||
|
@ -1,36 +0,0 @@
|
|||||||
/*
|
|
||||||
* BlobWorker.actor.h
|
|
||||||
*
|
|
||||||
* This source file is part of the FoundationDB open source project
|
|
||||||
*
|
|
||||||
* Copyright 2013-2019 Apple Inc. and the FoundationDB project authors
|
|
||||||
*
|
|
||||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
* you may not use this file except in compliance with the License.
|
|
||||||
* You may obtain a copy of the License at
|
|
||||||
*
|
|
||||||
* http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
*
|
|
||||||
* Unless required by applicable law or agreed to in writing, software
|
|
||||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
* See the License for the specific language governing permissions and
|
|
||||||
* limitations under the License.
|
|
||||||
*/
|
|
||||||
|
|
||||||
#if defined(NO_INTELLISENSE) && !defined(FDBSERVER_BLOBWORKER_ACTOR_G_H)
|
|
||||||
#define FDBSERVER_BLOBWORKER_ACTOR_G_H
|
|
||||||
#include "fdbserver/BlobWorker.actor.g.h"
|
|
||||||
#elif !defined(FDBSERVER_BLOBWORKER_ACTOR_H)
|
|
||||||
#define FDBSERVER_BLOBWORKER_ACTOR_H
|
|
||||||
|
|
||||||
#include "fdbclient/BlobWorkerInterface.h"
|
|
||||||
#include "fdbserver/ServerDBInfo.h"
|
|
||||||
#include "flow/actorcompiler.h" // This must be the last #include.
|
|
||||||
|
|
||||||
// TODO this whole file should go away once blob worker isn't embedded in other roles
|
|
||||||
|
|
||||||
ACTOR Future<Void> blobWorker(BlobWorkerInterface bwInterf, Reference<AsyncVar<ServerDBInfo> const> dbInfo);
|
|
||||||
|
|
||||||
#include "flow/unactorcompiler.h"
|
|
||||||
#endif
|
|
@ -8,7 +8,6 @@ set(FDBSERVER_SRCS
|
|||||||
BlobManager.actor.cpp
|
BlobManager.actor.cpp
|
||||||
BlobManagerInterface.h
|
BlobManagerInterface.h
|
||||||
BlobWorker.actor.cpp
|
BlobWorker.actor.cpp
|
||||||
BlobWorker.actor.h
|
|
||||||
ClusterController.actor.cpp
|
ClusterController.actor.cpp
|
||||||
ConfigBroadcaster.actor.cpp
|
ConfigBroadcaster.actor.cpp
|
||||||
ConfigBroadcaster.h
|
ConfigBroadcaster.h
|
||||||
|
@ -3706,8 +3706,6 @@ void checkBetterSingletons(ClusterControllerData* self) {
|
|||||||
|
|
||||||
// if we reach here, we know that the singletons are healthy so let's
|
// if we reach here, we know that the singletons are healthy so let's
|
||||||
// check if we can colocate the singletons in a more optimal way
|
// check if we can colocate the singletons in a more optimal way
|
||||||
|
|
||||||
// TODO: verify that we don't need to get the pid from the worker like we were doing before
|
|
||||||
Optional<Standalone<StringRef>> currRKProcessId = rkSingleton.interface.get().locality.processId();
|
Optional<Standalone<StringRef>> currRKProcessId = rkSingleton.interface.get().locality.processId();
|
||||||
Optional<Standalone<StringRef>> currDDProcessId = ddSingleton.interface.get().locality.processId();
|
Optional<Standalone<StringRef>> currDDProcessId = ddSingleton.interface.get().locality.processId();
|
||||||
Optional<Standalone<StringRef>> newRKProcessId = newRKWorker.interf.locality.processId();
|
Optional<Standalone<StringRef>> newRKProcessId = newRKWorker.interf.locality.processId();
|
||||||
|
@ -32,7 +32,6 @@
|
|||||||
#include "fdbserver/ServerDBInfo.h"
|
#include "fdbserver/ServerDBInfo.h"
|
||||||
#include "fdbserver/WaitFailure.h"
|
#include "fdbserver/WaitFailure.h"
|
||||||
#include "flow/actorcompiler.h" // This must be the last #include.
|
#include "flow/actorcompiler.h" // This must be the last #include.
|
||||||
#include <limits>
|
|
||||||
|
|
||||||
enum limitReason_t {
|
enum limitReason_t {
|
||||||
unlimited, // TODO: rename to workload?
|
unlimited, // TODO: rename to workload?
|
||||||
|
@ -68,10 +68,6 @@ namespace {
|
|||||||
|
|
||||||
const int MACHINE_REBOOT_TIME = 10;
|
const int MACHINE_REBOOT_TIME = 10;
|
||||||
|
|
||||||
// The max number of extra blob worker machines we might (i.e. randomly) add to the simulated cluster.
|
|
||||||
// Note that this is in addition to the two we always have.
|
|
||||||
const int NUM_EXTRA_BW_MACHINES = 5;
|
|
||||||
|
|
||||||
bool destructed = false;
|
bool destructed = false;
|
||||||
|
|
||||||
// Configuration details specified in workload test files that change the simulation
|
// Configuration details specified in workload test files that change the simulation
|
||||||
@ -288,7 +284,7 @@ public:
|
|||||||
// Refer to FDBTypes.h::TLogVersion. Defaults to the maximum supported version.
|
// Refer to FDBTypes.h::TLogVersion. Defaults to the maximum supported version.
|
||||||
int maxTLogVersion = TLogVersion::MAX_SUPPORTED;
|
int maxTLogVersion = TLogVersion::MAX_SUPPORTED;
|
||||||
// Set true to simplify simulation configs for easier debugging
|
// Set true to simplify simulation configs for easier debugging
|
||||||
bool simpleConfig = true;
|
bool simpleConfig = false;
|
||||||
int extraMachineCountDC = 0;
|
int extraMachineCountDC = 0;
|
||||||
Optional<bool> generateFearless, buggify;
|
Optional<bool> generateFearless, buggify;
|
||||||
Optional<int> datacenters, desiredTLogCount, commitProxyCount, grvProxyCount, resolverCount, storageEngineType,
|
Optional<int> datacenters, desiredTLogCount, commitProxyCount, grvProxyCount, resolverCount, storageEngineType,
|
||||||
@ -1635,14 +1631,7 @@ void SimulationConfig::setRegions(const TestConfig& testConfig) {
|
|||||||
void SimulationConfig::setMachineCount(const TestConfig& testConfig) {
|
void SimulationConfig::setMachineCount(const TestConfig& testConfig) {
|
||||||
if (testConfig.machineCount.present()) {
|
if (testConfig.machineCount.present()) {
|
||||||
machine_count = testConfig.machineCount.get();
|
machine_count = testConfig.machineCount.get();
|
||||||
}
|
} else if (generateFearless && testConfig.minimumReplication > 1) {
|
||||||
/// TODO REMOVE!
|
|
||||||
else if (testConfig.simpleConfig) {
|
|
||||||
printf("Setting machine count to 1\n");
|
|
||||||
machine_count = 1;
|
|
||||||
}
|
|
||||||
//
|
|
||||||
else if (generateFearless && testConfig.minimumReplication > 1) {
|
|
||||||
// low latency tests in fearless configurations need 4 machines per datacenter (3 for triple replication, 1 that
|
// low latency tests in fearless configurations need 4 machines per datacenter (3 for triple replication, 1 that
|
||||||
// is down during failures).
|
// is down during failures).
|
||||||
machine_count = 16;
|
machine_count = 16;
|
||||||
@ -1689,7 +1678,7 @@ void SimulationConfig::setCoordinators(const TestConfig& testConfig) {
|
|||||||
void SimulationConfig::setProcessesPerMachine(const TestConfig& testConfig) {
|
void SimulationConfig::setProcessesPerMachine(const TestConfig& testConfig) {
|
||||||
if (testConfig.processesPerMachine.present()) {
|
if (testConfig.processesPerMachine.present()) {
|
||||||
processes_per_machine = testConfig.processesPerMachine.get();
|
processes_per_machine = testConfig.processesPerMachine.get();
|
||||||
} else if (generateFearless || testConfig.simpleConfig) { // TODO CHANGE BACK
|
} else if (generateFearless) {
|
||||||
processes_per_machine = 1;
|
processes_per_machine = 1;
|
||||||
} else {
|
} else {
|
||||||
processes_per_machine = deterministicRandom()->randomInt(1, (extraDB ? 14 : 28) / machine_count + 2);
|
processes_per_machine = deterministicRandom()->randomInt(1, (extraDB ? 14 : 28) / machine_count + 2);
|
||||||
@ -2020,19 +2009,16 @@ void setupSimulatedSystem(std::vector<Future<Void>>* systemActors,
|
|||||||
coordinatorCount);
|
coordinatorCount);
|
||||||
ASSERT_LE(dcCoordinators, machines);
|
ASSERT_LE(dcCoordinators, machines);
|
||||||
|
|
||||||
|
// FIXME: temporarily code to test storage cache
|
||||||
// TODO: caching disabled for this merge
|
// TODO: caching disabled for this merge
|
||||||
// FIXME: we hardcode some machines to specifically test storage cache and blob workers
|
if (dc == 0) {
|
||||||
int storageCacheMachines = dc == 0 ? 1 : 0;
|
machines++;
|
||||||
int blobWorkerMachines = 0;
|
|
||||||
if (CLIENT_KNOBS->ENABLE_BLOB_GRANULES) {
|
|
||||||
blobWorkerMachines = 2 + deterministicRandom()->randomInt(0, NUM_EXTRA_BW_MACHINES + 1);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
int totalMachines = machines + storageCacheMachines + blobWorkerMachines;
|
int useSeedForMachine = deterministicRandom()->randomInt(0, machines);
|
||||||
int useSeedForMachine = deterministicRandom()->randomInt(0, totalMachines);
|
|
||||||
Standalone<StringRef> zoneId;
|
Standalone<StringRef> zoneId;
|
||||||
Standalone<StringRef> newZoneId;
|
Standalone<StringRef> newZoneId;
|
||||||
for (int machine = 0; machine < totalMachines; machine++) {
|
for (int machine = 0; machine < machines; machine++) {
|
||||||
Standalone<StringRef> machineId(deterministicRandom()->randomUniqueID().toString());
|
Standalone<StringRef> machineId(deterministicRandom()->randomUniqueID().toString());
|
||||||
if (machine == 0 || machineCount - dataCenters <= 4 || assignedMachines != 4 ||
|
if (machine == 0 || machineCount - dataCenters <= 4 || assignedMachines != 4 ||
|
||||||
simconfig.db.regions.size() || deterministicRandom()->random01() < 0.5) {
|
simconfig.db.regions.size() || deterministicRandom()->random01() < 0.5) {
|
||||||
@ -2062,20 +2048,11 @@ void setupSimulatedSystem(std::vector<Future<Void>>* systemActors,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// FIXME: hack to add machines specifically to test storage cache and blob workers
|
// FIXME: temporarily code to test storage cache
|
||||||
// TODO: caching disabled for this merge
|
// TODO: caching disabled for this merge
|
||||||
|
if (machine == machines - 1 && dc == 0) {
|
||||||
// `machines` here is the normal (non-temporary) machines that totalMachines comprises of
|
|
||||||
if (machine >= machines) {
|
|
||||||
if (storageCacheMachines > 0 && dc == 0) {
|
|
||||||
processClass = ProcessClass(ProcessClass::StorageCacheClass, ProcessClass::CommandLineSource);
|
processClass = ProcessClass(ProcessClass::StorageCacheClass, ProcessClass::CommandLineSource);
|
||||||
nonVersatileMachines++;
|
nonVersatileMachines++;
|
||||||
storageCacheMachines--;
|
|
||||||
} else if (blobWorkerMachines > 0) { // add blob workers to every DC
|
|
||||||
processClass = ProcessClass(ProcessClass::BlobWorkerClass, ProcessClass::CommandLineSource);
|
|
||||||
nonVersatileMachines++;
|
|
||||||
blobWorkerMachines--;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
std::vector<IPAddress> ips;
|
std::vector<IPAddress> ips;
|
||||||
|
@ -1657,8 +1657,6 @@ ACTOR Future<ChangeFeedReply> getChangeFeedMutations(StorageServer* data, Change
|
|||||||
|
|
||||||
auto feed = data->uidChangeFeed.find(req.rangeID);
|
auto feed = data->uidChangeFeed.find(req.rangeID);
|
||||||
if (feed == data->uidChangeFeed.end()) {
|
if (feed == data->uidChangeFeed.end()) {
|
||||||
// TODO should unknown_change_feed be replyable?
|
|
||||||
printf("Unknown change feed %s\n", req.rangeID.printable().c_str());
|
|
||||||
throw unknown_change_feed();
|
throw unknown_change_feed();
|
||||||
}
|
}
|
||||||
state Version dequeVersion = data->version.get();
|
state Version dequeVersion = data->version.get();
|
||||||
@ -1756,6 +1754,7 @@ ACTOR Future<ChangeFeedReply> getChangeFeedMutations(StorageServer* data, Change
|
|||||||
}
|
}
|
||||||
|
|
||||||
// TODO REMOVE or only do if mutation tracking is enabled
|
// TODO REMOVE or only do if mutation tracking is enabled
|
||||||
|
if (MUTATION_TRACKING_ENABLED) {
|
||||||
for (auto& mutations : reply.mutations) {
|
for (auto& mutations : reply.mutations) {
|
||||||
for (auto& m : mutations.mutations) {
|
for (auto& m : mutations.mutations) {
|
||||||
DEBUG_MUTATION("ChangeFeedRead", mutations.version, m, data->thisServerID)
|
DEBUG_MUTATION("ChangeFeedRead", mutations.version, m, data->thisServerID)
|
||||||
@ -1765,6 +1764,7 @@ ACTOR Future<ChangeFeedReply> getChangeFeedMutations(StorageServer* data, Change
|
|||||||
.detail("ReqRange", req.range);
|
.detail("ReqRange", req.range);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
return reply;
|
return reply;
|
||||||
}
|
}
|
||||||
@ -1780,7 +1780,6 @@ ACTOR Future<Void> localChangeFeedStream(StorageServer* data,
|
|||||||
feedRequest.rangeID = rangeID;
|
feedRequest.rangeID = rangeID;
|
||||||
feedRequest.begin = begin;
|
feedRequest.begin = begin;
|
||||||
feedRequest.end = end;
|
feedRequest.end = end;
|
||||||
// FIXME: this should set request range, otherwise filterMutations won't work?..
|
|
||||||
state ChangeFeedReply feedReply = wait(getChangeFeedMutations(data, feedRequest));
|
state ChangeFeedReply feedReply = wait(getChangeFeedMutations(data, feedRequest));
|
||||||
begin = feedReply.mutations.back().version + 1;
|
begin = feedReply.mutations.back().version + 1;
|
||||||
state int resultLoc = 0;
|
state int resultLoc = 0;
|
||||||
@ -1828,8 +1827,7 @@ ACTOR Future<Void> changeFeedStreamQ(StorageServer* data, ChangeFeedStreamReques
|
|||||||
state ChangeFeedRequest feedRequest;
|
state ChangeFeedRequest feedRequest;
|
||||||
feedRequest.rangeID = req.rangeID;
|
feedRequest.rangeID = req.rangeID;
|
||||||
feedRequest.begin = begin;
|
feedRequest.begin = begin;
|
||||||
// Set to min of request end and buffered version to skip any potentially partially buffered mutations
|
feedRequest.end = req.end;
|
||||||
feedRequest.end = std::min(req.end, data->version.get() + 1);
|
|
||||||
feedRequest.range = req.range;
|
feedRequest.range = req.range;
|
||||||
ChangeFeedReply feedReply = wait(getChangeFeedMutations(data, feedRequest));
|
ChangeFeedReply feedReply = wait(getChangeFeedMutations(data, feedRequest));
|
||||||
|
|
||||||
@ -1851,8 +1849,6 @@ ACTOR Future<Void> changeFeedStreamQ(StorageServer* data, ChangeFeedStreamReques
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
} catch (Error& e) {
|
} catch (Error& e) {
|
||||||
// TODO REMOVE
|
|
||||||
printf("CF Stream %s got error %s\n", req.rangeID.printable().c_str(), e.name());
|
|
||||||
if (e.code() != error_code_operation_obsolete) {
|
if (e.code() != error_code_operation_obsolete) {
|
||||||
if (!canReplyWith(e))
|
if (!canReplyWith(e))
|
||||||
throw;
|
throw;
|
||||||
@ -5969,7 +5965,6 @@ ACTOR Future<Void> storageServer(IKeyValueStore* persistentData,
|
|||||||
|
|
||||||
throw internal_error();
|
throw internal_error();
|
||||||
} catch (Error& e) {
|
} catch (Error& e) {
|
||||||
printf("SS %s crashed with error %s\n", self.thisServerID.toString().c_str(), e.name());
|
|
||||||
// If we die with an error before replying to the recruitment request, send the error to the recruiter
|
// If we die with an error before replying to the recruitment request, send the error to the recruiter
|
||||||
// (ClusterController, and from there to the DataDistributionTeamCollection)
|
// (ClusterController, and from there to the DataDistributionTeamCollection)
|
||||||
if (!recruitReply.isSet())
|
if (!recruitReply.isSet())
|
||||||
@ -6181,7 +6176,6 @@ ACTOR Future<Void> storageServer(IKeyValueStore* persistentData,
|
|||||||
|
|
||||||
throw internal_error();
|
throw internal_error();
|
||||||
} catch (Error& e) {
|
} catch (Error& e) {
|
||||||
printf("SS %s crashed with error %s\n", self.thisServerID.toString().c_str(), e.name());
|
|
||||||
if (recovered.canBeSet())
|
if (recovered.canBeSet())
|
||||||
recovered.send(Void());
|
recovered.send(Void());
|
||||||
if (storageServerTerminated(self, persistentData, e))
|
if (storageServerTerminated(self, persistentData, e))
|
||||||
|
@ -43,8 +43,6 @@
|
|||||||
* To catch availability issues with the blob worker, it does a request to each granule at the end of the test.
|
* To catch availability issues with the blob worker, it does a request to each granule at the end of the test.
|
||||||
*/
|
*/
|
||||||
struct BlobGranuleVerifierWorkload : TestWorkload {
|
struct BlobGranuleVerifierWorkload : TestWorkload {
|
||||||
// TODO add delay on start so it can start with data
|
|
||||||
|
|
||||||
bool doSetup;
|
bool doSetup;
|
||||||
double minDelay;
|
double minDelay;
|
||||||
double maxDelay;
|
double maxDelay;
|
||||||
@ -237,7 +235,6 @@ struct BlobGranuleVerifierWorkload : TestWorkload {
|
|||||||
.detail("RequestType", initialRequest ? "RealTime" : "TimeTravel")
|
.detail("RequestType", initialRequest ? "RealTime" : "TimeTravel")
|
||||||
.detail("FDBSize", fdb.size())
|
.detail("FDBSize", fdb.size())
|
||||||
.detail("BlobSize", blob.first.size());
|
.detail("BlobSize", blob.first.size());
|
||||||
// TODO debugging details!
|
|
||||||
|
|
||||||
if (BGV_DEBUG) {
|
if (BGV_DEBUG) {
|
||||||
printf("\nMismatch for [%s - %s) @ %lld (%s). F(%d) B(%d):\n",
|
printf("\nMismatch for [%s - %s) @ %lld (%s). F(%d) B(%d):\n",
|
||||||
@ -313,7 +310,6 @@ struct BlobGranuleVerifierWorkload : TestWorkload {
|
|||||||
|
|
||||||
OldRead() {}
|
OldRead() {}
|
||||||
OldRead(KeyRange range, Version v, RangeResult oldResult) : range(range), v(v), oldResult(oldResult) {}
|
OldRead(KeyRange range, Version v, RangeResult oldResult) : range(range), v(v), oldResult(oldResult) {}
|
||||||
// OldRead(const OldRead& other) : range(other.range), v(other.v), oldResult(other.oldResult) {}
|
|
||||||
};
|
};
|
||||||
|
|
||||||
ACTOR Future<Void> verifyGranules(Database cx, BlobGranuleVerifierWorkload* self) {
|
ACTOR Future<Void> verifyGranules(Database cx, BlobGranuleVerifierWorkload* self) {
|
||||||
|
@ -138,9 +138,9 @@ public: // introduced features
|
|||||||
PROTOCOL_VERSION_FEATURE(0x0FDB00B070010000LL, StableInterfaces);
|
PROTOCOL_VERSION_FEATURE(0x0FDB00B070010000LL, StableInterfaces);
|
||||||
PROTOCOL_VERSION_FEATURE(0x0FDB00B070010001LL, TagThrottleValueReason);
|
PROTOCOL_VERSION_FEATURE(0x0FDB00B070010001LL, TagThrottleValueReason);
|
||||||
PROTOCOL_VERSION_FEATURE(0x0FDB00B070010001LL, SpanContext);
|
PROTOCOL_VERSION_FEATURE(0x0FDB00B070010001LL, SpanContext);
|
||||||
PROTOCOL_VERSION_FEATURE(0x0FDB00B070010001LL, ChangeFeed);
|
|
||||||
PROTOCOL_VERSION_FEATURE(0x0FDB00B070010001LL, TSS);
|
PROTOCOL_VERSION_FEATURE(0x0FDB00B070010001LL, TSS);
|
||||||
PROTOCOL_VERSION_FEATURE(0x0FDB00B070010001LL, BlobGranule); // TODO make this 7.1 or 7.2?
|
PROTOCOL_VERSION_FEATURE(0x0FDB00B071010001LL, ChangeFeed);
|
||||||
|
PROTOCOL_VERSION_FEATURE(0x0FDB00B071010001LL, BlobGranule);
|
||||||
};
|
};
|
||||||
|
|
||||||
template <>
|
template <>
|
||||||
@ -157,7 +157,7 @@ struct Traceable<ProtocolVersion> : std::true_type {
|
|||||||
//
|
//
|
||||||
// xyzdev
|
// xyzdev
|
||||||
// vvvv
|
// vvvv
|
||||||
constexpr ProtocolVersion currentProtocolVersion(0x0FDB00B070010001LL);
|
constexpr ProtocolVersion currentProtocolVersion(0x0FDB00B071010001LL);
|
||||||
// This assert is intended to help prevent incrementing the leftmost digits accidentally. It will probably need to
|
// This assert is intended to help prevent incrementing the leftmost digits accidentally. It will probably need to
|
||||||
// change when we reach version 10.
|
// change when we reach version 10.
|
||||||
static_assert(currentProtocolVersion.version() < 0x0FDB00B100000000LL, "Unexpected protocol version");
|
static_assert(currentProtocolVersion.version() < 0x0FDB00B100000000LL, "Unexpected protocol version");
|
||||||
|
Loading…
x
Reference in New Issue
Block a user