Fix typo in SHARD_READ_HOT_BANDWIDTH_MIN_PER_KSECONDS name

This commit is contained in:
sfc-gh-tclinkenbeard 2022-06-27 09:56:02 -07:00
parent 542adb4b9f
commit 0c2bd73e28
5 changed files with 7 additions and 7 deletions

View File

@ -181,7 +181,7 @@ void ServerKnobs::initialize(Randomize randomize, ClientKnobs* clientKnobs, IsSi
/* /*
The bytesRead/byteSize radio. Will be declared as read hot when larger than this. 8.0 was chosen to avoid reporting table scan as read hot. The bytesRead/byteSize radio. Will be declared as read hot when larger than this. 8.0 was chosen to avoid reporting table scan as read hot.
*/ */
init ( SHARD_READ_HOT_BANDWITH_MIN_PER_KSECONDS, 1666667 * 1000); init ( SHARD_READ_HOT_BANDWIDTH_MIN_PER_KSECONDS, 1666667 * 1000);
/* /*
The read bandwidth of a given shard needs to be larger than this value in order to be evaluated if it's read hot. The roughly 1.67MB per second is calculated as following: The read bandwidth of a given shard needs to be larger than this value in order to be evaluated if it's read hot. The roughly 1.67MB per second is calculated as following:
- Heuristic data suggests that each storage process can do max 500K read operations per second - Heuristic data suggests that each storage process can do max 500K read operations per second

View File

@ -177,7 +177,7 @@ public:
SHARD_MIN_BYTES_PER_KSEC, // Shards with more than this bandwidth will not be merged SHARD_MIN_BYTES_PER_KSEC, // Shards with more than this bandwidth will not be merged
SHARD_SPLIT_BYTES_PER_KSEC; // When splitting a shard, it is split into pieces with less than this bandwidth SHARD_SPLIT_BYTES_PER_KSEC; // When splitting a shard, it is split into pieces with less than this bandwidth
double SHARD_MAX_READ_DENSITY_RATIO; double SHARD_MAX_READ_DENSITY_RATIO;
int64_t SHARD_READ_HOT_BANDWITH_MIN_PER_KSECONDS; int64_t SHARD_READ_HOT_BANDWIDTH_MIN_PER_KSECONDS;
double SHARD_MAX_BYTES_READ_PER_KSEC_JITTER; double SHARD_MAX_BYTES_READ_PER_KSEC_JITTER;
double STORAGE_METRIC_TIMEOUT; double STORAGE_METRIC_TIMEOUT;
double METRIC_DELAY; double METRIC_DELAY;

View File

@ -43,7 +43,7 @@ BandwidthStatus getBandwidthStatus(StorageMetrics const& metrics) {
} }
ReadBandwidthStatus getReadBandwidthStatus(StorageMetrics const& metrics) { ReadBandwidthStatus getReadBandwidthStatus(StorageMetrics const& metrics) {
if (metrics.bytesReadPerKSecond <= SERVER_KNOBS->SHARD_READ_HOT_BANDWITH_MIN_PER_KSECONDS || if (metrics.bytesReadPerKSecond <= SERVER_KNOBS->SHARD_READ_HOT_BANDWIDTH_MIN_PER_KSECONDS ||
metrics.bytesReadPerKSecond <= SERVER_KNOBS->SHARD_MAX_READ_DENSITY_RATIO * metrics.bytes * metrics.bytesReadPerKSecond <= SERVER_KNOBS->SHARD_MAX_READ_DENSITY_RATIO * metrics.bytes *
SERVER_KNOBS->STORAGE_METRICS_AVERAGE_INTERVAL_PER_KSECONDS) { SERVER_KNOBS->STORAGE_METRICS_AVERAGE_INTERVAL_PER_KSECONDS) {
return ReadBandwidthStatusNormal; return ReadBandwidthStatusNormal;
@ -238,7 +238,7 @@ ACTOR Future<Void> trackShardMetrics(DataDistributionTracker::SafeAccessor self,
std::max((int64_t)(SERVER_KNOBS->SHARD_MAX_READ_DENSITY_RATIO * bytes * std::max((int64_t)(SERVER_KNOBS->SHARD_MAX_READ_DENSITY_RATIO * bytes *
SERVER_KNOBS->STORAGE_METRICS_AVERAGE_INTERVAL_PER_KSECONDS * SERVER_KNOBS->STORAGE_METRICS_AVERAGE_INTERVAL_PER_KSECONDS *
(1.0 + SERVER_KNOBS->SHARD_MAX_BYTES_READ_PER_KSEC_JITTER)), (1.0 + SERVER_KNOBS->SHARD_MAX_BYTES_READ_PER_KSEC_JITTER)),
SERVER_KNOBS->SHARD_READ_HOT_BANDWITH_MIN_PER_KSECONDS); SERVER_KNOBS->SHARD_READ_HOT_BANDWIDTH_MIN_PER_KSECONDS);
bounds.min.bytesReadPerKSecond = 0; bounds.min.bytesReadPerKSecond = 0;
bounds.permittedError.bytesReadPerKSecond = bounds.min.bytesReadPerKSecond / 4; bounds.permittedError.bytesReadPerKSecond = bounds.min.bytesReadPerKSecond / 4;
} else if (readBandwidthStatus == ReadBandwidthStatusHigh) { } else if (readBandwidthStatus == ReadBandwidthStatusHigh) {
@ -291,7 +291,7 @@ ACTOR Future<Void> trackShardMetrics(DataDistributionTracker::SafeAccessor self,
.detail("Keys", keys) .detail("Keys", keys)
.detail("UpdatedSize", metrics.metrics.bytes) .detail("UpdatedSize", metrics.metrics.bytes)
.detail("Bandwidth", metrics.metrics.bytesPerKSecond) .detail("Bandwidth", metrics.metrics.bytesPerKSecond)
.detail("BandwithStatus", getBandwidthStatus(metrics)) .detail("BandwidthStatus", getBandwidthStatus(metrics))
.detail("BytesLower", bounds.min.bytes) .detail("BytesLower", bounds.min.bytes)
.detail("BytesUpper", bounds.max.bytes) .detail("BytesUpper", bounds.max.bytes)
.detail("BandwidthLower", bounds.min.bytesPerKSecond) .detail("BandwidthLower", bounds.min.bytesPerKSecond)

View File

@ -532,7 +532,7 @@ struct StorageServerMetrics {
auto _ranges = getReadHotRanges(req.keys, auto _ranges = getReadHotRanges(req.keys,
SERVER_KNOBS->SHARD_MAX_READ_DENSITY_RATIO, SERVER_KNOBS->SHARD_MAX_READ_DENSITY_RATIO,
SERVER_KNOBS->READ_HOT_SUB_RANGE_CHUNK_SIZE, SERVER_KNOBS->READ_HOT_SUB_RANGE_CHUNK_SIZE,
SERVER_KNOBS->SHARD_READ_HOT_BANDWITH_MIN_PER_KSECONDS); SERVER_KNOBS->SHARD_READ_HOT_BANDWIDTH_MIN_PER_KSECONDS);
reply.readHotRanges = VectorRef(_ranges.data(), _ranges.size()); reply.readHotRanges = VectorRef(_ranges.data(), _ranges.size());
req.reply.send(reply); req.reply.send(reply);
} }

View File

@ -101,7 +101,7 @@ struct ReadHotDetectionWorkload : TestWorkload {
StorageMetrics sm = wait(cx->getStorageMetrics(self->wholeRange, 100)); StorageMetrics sm = wait(cx->getStorageMetrics(self->wholeRange, 100));
// TraceEvent("RHDCheckPhaseLog") // TraceEvent("RHDCheckPhaseLog")
// .detail("KeyRangeSize", sm.bytes) // .detail("KeyRangeSize", sm.bytes)
// .detail("KeyRangeReadBandwith", sm.bytesReadPerKSecond); // .detail("KeyRangeReadBandwidth", sm.bytesReadPerKSecond);
Standalone<VectorRef<ReadHotRangeWithMetrics>> keyRanges = wait(cx->getReadHotRanges(self->wholeRange)); Standalone<VectorRef<ReadHotRangeWithMetrics>> keyRanges = wait(cx->getReadHotRanges(self->wholeRange));
// TraceEvent("RHDCheckPhaseLog") // TraceEvent("RHDCheckPhaseLog")
// .detail("KeyRangesSize", keyRanges.size()) // .detail("KeyRangesSize", keyRanges.size())