mirror of
https://github.com/apple/foundationdb.git
synced 2025-05-14 18:02:31 +08:00
Enable clang -Wformat warning
This commit is contained in:
parent
45a66f9f75
commit
13bb7838aa
@ -943,7 +943,7 @@ int run_workload(FDBTransaction* transaction,
|
||||
if (tracetimer == dotrace) {
|
||||
fdb_error_t err;
|
||||
tracetimer = 0;
|
||||
snprintf(traceid, 32, "makotrace%019lld", total_xacts);
|
||||
snprintf(traceid, 32, "makotrace%019ld", total_xacts);
|
||||
fprintf(debugme, "DEBUG: txn tracing %s\n", traceid);
|
||||
err = fdb_transaction_set_option(transaction,
|
||||
FDB_TR_OPTION_DEBUG_TRANSACTION_IDENTIFIER,
|
||||
@ -1101,7 +1101,7 @@ void* worker_thread(void* thread_args) {
|
||||
}
|
||||
|
||||
fprintf(debugme,
|
||||
"DEBUG: worker_id:%d (%d) thread_id:%d (%d) database_index:%d (tid:%lld)\n",
|
||||
"DEBUG: worker_id:%d (%d) thread_id:%d (%d) database_index:%lu (tid:%lu)\n",
|
||||
worker_id,
|
||||
args->num_processes,
|
||||
thread_id,
|
||||
@ -1301,7 +1301,7 @@ int worker_process_main(mako_args_t* args, int worker_id, mako_shmhdr_t* shm, pi
|
||||
if (err) {
|
||||
fprintf(stderr,
|
||||
"ERROR: fdb_network_set_option (FDB_NET_OPTION_CLIENT_THREADS_PER_VERSION) (%d): %s\n",
|
||||
(uint8_t*)&args->client_threads_per_version,
|
||||
args->client_threads_per_version,
|
||||
fdb_get_error(err));
|
||||
// let's exit here since we do not want to confuse users
|
||||
// that mako is running with multi-threaded client enabled
|
||||
@ -2038,9 +2038,9 @@ void print_stats(mako_args_t* args, mako_stats_t* stats, struct timespec* now, s
|
||||
for (op = 0; op < MAX_OP; op++) {
|
||||
if (args->txnspec.ops[op][OP_COUNT] > 0) {
|
||||
uint64_t ops_total_diff = ops_total[op] - ops_total_prev[op];
|
||||
printf("%" STR(STATS_FIELD_WIDTH) "lld ", ops_total_diff);
|
||||
printf("%" STR(STATS_FIELD_WIDTH) "lu ", ops_total_diff);
|
||||
if (fp) {
|
||||
fprintf(fp, "\"%s\": %lld,", get_ops_name(op), ops_total_diff);
|
||||
fprintf(fp, "\"%s\": %lu,", get_ops_name(op), ops_total_diff);
|
||||
}
|
||||
errors_diff[op] = errors_total[op] - errors_total_prev[op];
|
||||
print_err = (errors_diff[op] > 0);
|
||||
@ -2068,7 +2068,7 @@ void print_stats(mako_args_t* args, mako_stats_t* stats, struct timespec* now, s
|
||||
printf("%" STR(STATS_TITLE_WIDTH) "s ", "Errors");
|
||||
for (op = 0; op < MAX_OP; op++) {
|
||||
if (args->txnspec.ops[op][OP_COUNT] > 0) {
|
||||
printf("%" STR(STATS_FIELD_WIDTH) "lld ", errors_diff[op]);
|
||||
printf("%" STR(STATS_FIELD_WIDTH) "lu ", errors_diff[op]);
|
||||
if (fp) {
|
||||
fprintf(fp, "\"errors\": %.2f", conflicts_diff);
|
||||
}
|
||||
@ -2213,10 +2213,10 @@ void print_report(mako_args_t* args,
|
||||
break;
|
||||
}
|
||||
}
|
||||
printf("Total Xacts: %8lld\n", totalxacts);
|
||||
printf("Total Conflicts: %8lld\n", conflicts);
|
||||
printf("Total Errors: %8lld\n", totalerrors);
|
||||
printf("Overall TPS: %8lld\n\n", totalxacts * 1000000000 / duration_nsec);
|
||||
printf("Total Xacts: %8lu\n", totalxacts);
|
||||
printf("Total Conflicts: %8lu\n", conflicts);
|
||||
printf("Total Errors: %8lu\n", totalerrors);
|
||||
printf("Overall TPS: %8lu\n\n", totalxacts * 1000000000 / duration_nsec);
|
||||
|
||||
if (fp) {
|
||||
fprintf(fp, "\"results\": {");
|
||||
@ -2224,10 +2224,10 @@ void print_report(mako_args_t* args,
|
||||
fprintf(fp, "\"totalProcesses\": %d,", args->num_processes);
|
||||
fprintf(fp, "\"totalThreads\": %d,", args->num_threads);
|
||||
fprintf(fp, "\"targetTPS\": %d,", args->tpsmax);
|
||||
fprintf(fp, "\"totalXacts\": %lld,", totalxacts);
|
||||
fprintf(fp, "\"totalConflicts\": %lld,", conflicts);
|
||||
fprintf(fp, "\"totalErrors\": %lld,", totalerrors);
|
||||
fprintf(fp, "\"overallTPS\": %lld,", totalxacts * 1000000000 / duration_nsec);
|
||||
fprintf(fp, "\"totalXacts\": %lu,", totalxacts);
|
||||
fprintf(fp, "\"totalConflicts\": %lu,", conflicts);
|
||||
fprintf(fp, "\"totalErrors\": %lu,", totalerrors);
|
||||
fprintf(fp, "\"overallTPS\": %lu,", totalxacts * 1000000000 / duration_nsec);
|
||||
}
|
||||
|
||||
/* per-op stats */
|
||||
@ -2240,9 +2240,9 @@ void print_report(mako_args_t* args,
|
||||
}
|
||||
for (op = 0; op < MAX_OP; op++) {
|
||||
if ((args->txnspec.ops[op][OP_COUNT] > 0 && op != OP_TRANSACTION) || op == OP_COMMIT) {
|
||||
printf("%" STR(STATS_FIELD_WIDTH) "lld ", ops_total[op]);
|
||||
printf("%" STR(STATS_FIELD_WIDTH) "lu ", ops_total[op]);
|
||||
if (fp) {
|
||||
fprintf(fp, "\"%s\": %lld,", get_ops_name(op), ops_total[op]);
|
||||
fprintf(fp, "\"%s\": %lu,", get_ops_name(op), ops_total[op]);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -2263,9 +2263,9 @@ void print_report(mako_args_t* args,
|
||||
printf("%-" STR(STATS_TITLE_WIDTH) "s ", "Errors");
|
||||
for (op = 0; op < MAX_OP; op++) {
|
||||
if (args->txnspec.ops[op][OP_COUNT] > 0 && op != OP_TRANSACTION) {
|
||||
printf("%" STR(STATS_FIELD_WIDTH) "lld ", errors_total[op]);
|
||||
printf("%" STR(STATS_FIELD_WIDTH) "lu ", errors_total[op]);
|
||||
if (fp) {
|
||||
fprintf(fp, "\"%s\": %lld,", get_ops_name(op), errors_total[op]);
|
||||
fprintf(fp, "\"%s\": %lu,", get_ops_name(op), errors_total[op]);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -2282,12 +2282,12 @@ void print_report(mako_args_t* args,
|
||||
for (op = 0; op < MAX_OP; op++) {
|
||||
if (args->txnspec.ops[op][OP_COUNT] > 0 || op == OP_TRANSACTION || op == OP_COMMIT) {
|
||||
if (lat_total[op]) {
|
||||
printf("%" STR(STATS_FIELD_WIDTH) "lld ", lat_samples[op]);
|
||||
printf("%" STR(STATS_FIELD_WIDTH) "lu ", lat_samples[op]);
|
||||
} else {
|
||||
printf("%" STR(STATS_FIELD_WIDTH) "s ", "N/A");
|
||||
}
|
||||
if (fp) {
|
||||
fprintf(fp, "\"%s\": %lld,", get_ops_name(op), lat_samples[op]);
|
||||
fprintf(fp, "\"%s\": %lu,", get_ops_name(op), lat_samples[op]);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -2303,9 +2303,9 @@ void print_report(mako_args_t* args,
|
||||
if (lat_min[op] == -1) {
|
||||
printf("%" STR(STATS_FIELD_WIDTH) "s ", "N/A");
|
||||
} else {
|
||||
printf("%" STR(STATS_FIELD_WIDTH) "lld ", lat_min[op]);
|
||||
printf("%" STR(STATS_FIELD_WIDTH) "lu ", lat_min[op]);
|
||||
if (fp) {
|
||||
fprintf(fp, "\"%s\": %lld,", get_ops_name(op), lat_min[op]);
|
||||
fprintf(fp, "\"%s\": %lu,", get_ops_name(op), lat_min[op]);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -2320,9 +2320,9 @@ void print_report(mako_args_t* args,
|
||||
for (op = 0; op < MAX_OP; op++) {
|
||||
if (args->txnspec.ops[op][OP_COUNT] > 0 || op == OP_TRANSACTION || op == OP_COMMIT) {
|
||||
if (lat_total[op]) {
|
||||
printf("%" STR(STATS_FIELD_WIDTH) "lld ", lat_total[op] / lat_samples[op]);
|
||||
printf("%" STR(STATS_FIELD_WIDTH) "lu ", lat_total[op] / lat_samples[op]);
|
||||
if (fp) {
|
||||
fprintf(fp, "\"%s\": %lld,", get_ops_name(op), lat_total[op] / lat_samples[op]);
|
||||
fprintf(fp, "\"%s\": %lu,", get_ops_name(op), lat_total[op] / lat_samples[op]);
|
||||
}
|
||||
} else {
|
||||
printf("%" STR(STATS_FIELD_WIDTH) "s ", "N/A");
|
||||
@ -2341,9 +2341,9 @@ void print_report(mako_args_t* args,
|
||||
if (lat_max[op] == 0) {
|
||||
printf("%" STR(STATS_FIELD_WIDTH) "s ", "N/A");
|
||||
} else {
|
||||
printf("%" STR(STATS_FIELD_WIDTH) "lld ", lat_max[op]);
|
||||
printf("%" STR(STATS_FIELD_WIDTH) "lu ", lat_max[op]);
|
||||
if (fp) {
|
||||
fprintf(fp, "\"%s\": %lld,", get_ops_name(op), lat_max[op]);
|
||||
fprintf(fp, "\"%s\": %lu,", get_ops_name(op), lat_max[op]);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -2393,9 +2393,9 @@ void print_report(mako_args_t* args,
|
||||
} else {
|
||||
median = (dataPoints[op][num_points[op] / 2] + dataPoints[op][num_points[op] / 2 - 1]) >> 1;
|
||||
}
|
||||
printf("%" STR(STATS_FIELD_WIDTH) "lld ", median);
|
||||
printf("%" STR(STATS_FIELD_WIDTH) "lu ", median);
|
||||
if (fp) {
|
||||
fprintf(fp, "\"%s\": %lld,", get_ops_name(op), median);
|
||||
fprintf(fp, "\"%s\": %lu,", get_ops_name(op), median);
|
||||
}
|
||||
} else {
|
||||
printf("%" STR(STATS_FIELD_WIDTH) "s ", "N/A");
|
||||
@ -2417,9 +2417,9 @@ void print_report(mako_args_t* args,
|
||||
}
|
||||
if (lat_total[op]) {
|
||||
point_95pct = ((float)(num_points[op]) * 0.95) - 1;
|
||||
printf("%" STR(STATS_FIELD_WIDTH) "lld ", dataPoints[op][point_95pct]);
|
||||
printf("%" STR(STATS_FIELD_WIDTH) "lu ", dataPoints[op][point_95pct]);
|
||||
if (fp) {
|
||||
fprintf(fp, "\"%s\": %lld,", get_ops_name(op), dataPoints[op][point_95pct]);
|
||||
fprintf(fp, "\"%s\": %lu,", get_ops_name(op), dataPoints[op][point_95pct]);
|
||||
}
|
||||
} else {
|
||||
printf("%" STR(STATS_FIELD_WIDTH) "s ", "N/A");
|
||||
@ -2441,9 +2441,9 @@ void print_report(mako_args_t* args,
|
||||
}
|
||||
if (lat_total[op]) {
|
||||
point_99pct = ((float)(num_points[op]) * 0.99) - 1;
|
||||
printf("%" STR(STATS_FIELD_WIDTH) "lld ", dataPoints[op][point_99pct]);
|
||||
printf("%" STR(STATS_FIELD_WIDTH) "lu ", dataPoints[op][point_99pct]);
|
||||
if (fp) {
|
||||
fprintf(fp, "\"%s\": %lld,", get_ops_name(op), dataPoints[op][point_99pct]);
|
||||
fprintf(fp, "\"%s\": %lu,", get_ops_name(op), dataPoints[op][point_99pct]);
|
||||
}
|
||||
} else {
|
||||
printf("%" STR(STATS_FIELD_WIDTH) "s ", "N/A");
|
||||
@ -2465,9 +2465,9 @@ void print_report(mako_args_t* args,
|
||||
}
|
||||
if (lat_total[op]) {
|
||||
point_99_9pct = ((float)(num_points[op]) * 0.999) - 1;
|
||||
printf("%" STR(STATS_FIELD_WIDTH) "lld ", dataPoints[op][point_99_9pct]);
|
||||
printf("%" STR(STATS_FIELD_WIDTH) "lu ", dataPoints[op][point_99_9pct]);
|
||||
if (fp) {
|
||||
fprintf(fp, "\"%s\": %lld,", get_ops_name(op), dataPoints[op][point_99_9pct]);
|
||||
fprintf(fp, "\"%s\": %lu,", get_ops_name(op), dataPoints[op][point_99_9pct]);
|
||||
}
|
||||
} else {
|
||||
printf("%" STR(STATS_FIELD_WIDTH) "s ", "N/A");
|
||||
@ -2529,7 +2529,7 @@ int stats_process_main(mako_args_t* args,
|
||||
fprintf(fp, "\"value_length\": %d,", args->value_length);
|
||||
fprintf(fp, "\"commit_get\": %d,", args->commit_get);
|
||||
fprintf(fp, "\"verbose\": %d,", args->verbose);
|
||||
fprintf(fp, "\"cluster_files\": \"%s\",", args->cluster_files);
|
||||
fprintf(fp, "\"cluster_files\": \"%s\",", args->cluster_files[0]);
|
||||
fprintf(fp, "\"log_group\": \"%s\",", args->log_group);
|
||||
fprintf(fp, "\"prefixpadding\": %d,", args->prefixpadding);
|
||||
fprintf(fp, "\"trace\": %d,", args->trace);
|
||||
|
@ -67,25 +67,25 @@ void runTests(struct ResultSet* rs) {
|
||||
fdb_transaction_set(tr, keys[i], KEY_SIZE, valueStr, VALUE_SIZE);
|
||||
e = getSize(rs, tr, sizes + i);
|
||||
checkError(e, "transaction get size", rs);
|
||||
printf("size %d: %u\n", i, sizes[i]);
|
||||
printf("size %d: %ld\n", i, sizes[i]);
|
||||
i++;
|
||||
|
||||
fdb_transaction_set(tr, keys[i], KEY_SIZE, valueStr, VALUE_SIZE);
|
||||
e = getSize(rs, tr, sizes + i);
|
||||
checkError(e, "transaction get size", rs);
|
||||
printf("size %d: %u\n", i, sizes[i]);
|
||||
printf("size %d: %ld\n", i, sizes[i]);
|
||||
i++;
|
||||
|
||||
fdb_transaction_clear(tr, keys[i], KEY_SIZE);
|
||||
e = getSize(rs, tr, sizes + i);
|
||||
checkError(e, "transaction get size", rs);
|
||||
printf("size %d: %u\n", i, sizes[i]);
|
||||
printf("size %d: %ld\n", i, sizes[i]);
|
||||
i++;
|
||||
|
||||
fdb_transaction_clear_range(tr, keys[i], KEY_SIZE, keys[i + 1], KEY_SIZE);
|
||||
e = getSize(rs, tr, sizes + i);
|
||||
checkError(e, "transaction get size", rs);
|
||||
printf("size %d: %u\n", i, sizes[i]);
|
||||
printf("size %d: %ld\n", i, sizes[i]);
|
||||
i++;
|
||||
|
||||
for (j = 0; j + 1 < i; j++) {
|
||||
|
@ -284,7 +284,6 @@ else()
|
||||
# Here's the current set of warnings we need to explicitly disable to compile warning-free with clang 11
|
||||
-Wno-comment
|
||||
-Wno-delete-non-virtual-dtor
|
||||
-Wno-format
|
||||
-Wno-mismatched-tags
|
||||
-Wno-missing-field-initializers
|
||||
-Wno-sign-compare
|
||||
|
@ -127,7 +127,7 @@ ACTOR Future<bool> changeFeedCommandActor(Database localDb, std::vector<StringRe
|
||||
when(Standalone<VectorRef<MutationsAndVersionRef>> res = waitNext(feedResults.getFuture())) {
|
||||
for (auto& it : res) {
|
||||
for (auto& it2 : it.mutations) {
|
||||
printf("%lld %s\n", it.version, it2.toString().c_str());
|
||||
printf("%ld %s\n", it.version, it2.toString().c_str());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -48,7 +48,7 @@ ACTOR Future<Void> printProcessClass(Reference<IDatabase> db) {
|
||||
ASSERT(processSourceList.size() == processTypeList.size());
|
||||
if (!processTypeList.size())
|
||||
printf("No processes are registered in the database.\n");
|
||||
printf("There are currently %zu processes in the database:\n", processTypeList.size());
|
||||
printf("There are currently %d processes in the database:\n", processTypeList.size());
|
||||
for (int index = 0; index < processTypeList.size(); index++) {
|
||||
std::string address =
|
||||
processTypeList[index].key.removePrefix(fdb_cli::processClassTypeSpecialKeyRange.begin).toString();
|
||||
|
@ -97,7 +97,7 @@ ACTOR Future<Arena> readSnapshotFile(Reference<BackupContainerFileSystem> bstore
|
||||
}
|
||||
}*/
|
||||
if (BG_READ_DEBUG) {
|
||||
printf("Started with %d rows from snapshot file %s after pruning to [%s - %s)\n",
|
||||
printf("Started with %lu rows from snapshot file %s after pruning to [%s - %s)\n",
|
||||
dataMap->size(),
|
||||
f.toString().c_str(),
|
||||
keyRange.begin.printable().c_str(),
|
||||
@ -143,7 +143,7 @@ ACTOR Future<Standalone<GranuleDeltas>> readDeltaFile(Reference<BackupContainerF
|
||||
// TODO REMOVE sanity check
|
||||
for (int i = 0; i < result.size() - 1; i++) {
|
||||
if (result[i].version > result[i + 1].version) {
|
||||
printf("BG VERSION ORDER VIOLATION IN DELTA FILE: '%lld', '%lld'\n",
|
||||
printf("BG VERSION ORDER VIOLATION IN DELTA FILE: '%ld', '%ld'\n",
|
||||
result[i].version,
|
||||
result[i + 1].version);
|
||||
}
|
||||
@ -313,7 +313,7 @@ ACTOR Future<RangeResult> readBlobGranule(BlobGranuleChunkRef chunk,
|
||||
arena.dependsOn(snapshotArena);
|
||||
|
||||
if (BG_READ_DEBUG) {
|
||||
printf("Applying %d delta files\n", readDeltaFutures.size());
|
||||
printf("Applying %lu delta files\n", readDeltaFutures.size());
|
||||
}
|
||||
for (Future<Standalone<GranuleDeltas>> deltaFuture : readDeltaFutures) {
|
||||
Standalone<GranuleDeltas> result = wait(deltaFuture);
|
||||
|
@ -7201,7 +7201,7 @@ ACTOR Future<Void> readBlobGranulesStreamActor(Reference<DatabaseContext> db,
|
||||
blobGranuleMapping = _bgMapping;
|
||||
if (blobGranuleMapping.more) {
|
||||
if (BG_REQUEST_DEBUG) {
|
||||
printf("BG Mapping for [%s - %s) too large!\n");
|
||||
// printf("BG Mapping for [%s - %s) too large!\n");
|
||||
}
|
||||
throw unsupported_operation();
|
||||
}
|
||||
@ -7215,7 +7215,7 @@ ACTOR Future<Void> readBlobGranulesStreamActor(Reference<DatabaseContext> db,
|
||||
}
|
||||
|
||||
if (BG_REQUEST_DEBUG) {
|
||||
printf("Doing blob granule request @ %lld\n", endVersion);
|
||||
printf("Doing blob granule request @ %ld\n", endVersion);
|
||||
printf("blob worker assignments:\n");
|
||||
}
|
||||
|
||||
@ -7290,7 +7290,7 @@ ACTOR Future<Void> readBlobGranulesStreamActor(Reference<DatabaseContext> db,
|
||||
nullptr));
|
||||
|
||||
if (BG_REQUEST_DEBUG) {
|
||||
printf("Blob granule request for [%s - %s) @ %lld - %lld got reply from %s:\n",
|
||||
printf("Blob granule request for [%s - %s) @ %ld - %ld got reply from %s:\n",
|
||||
granuleStartKey.printable().c_str(),
|
||||
granuleEndKey.printable().c_str(),
|
||||
begin,
|
||||
@ -7311,11 +7311,11 @@ ACTOR Future<Void> readBlobGranulesStreamActor(Reference<DatabaseContext> db,
|
||||
}
|
||||
printf(" Deltas: (%d)", chunk.newDeltas.size());
|
||||
if (chunk.newDeltas.size() > 0) {
|
||||
printf(" with version [%lld - %lld]",
|
||||
printf(" with version [%ld - %ld]",
|
||||
chunk.newDeltas[0].version,
|
||||
chunk.newDeltas[chunk.newDeltas.size() - 1].version);
|
||||
}
|
||||
printf(" IncludedVersion: %lld\n", chunk.includedVersion);
|
||||
printf(" IncludedVersion: %ld\n", chunk.includedVersion);
|
||||
printf("\n\n");
|
||||
}
|
||||
Arena a;
|
||||
|
@ -235,7 +235,7 @@ ACTOR Future<Standalone<VectorRef<KeyRef>>> splitRange(Reference<ReadYourWritesT
|
||||
StorageMetrics estimated = wait(tr->getTransaction().getStorageMetrics(range, CLIENT_KNOBS->TOO_MANY));
|
||||
|
||||
if (BM_DEBUG) {
|
||||
printf("Estimated bytes for [%s - %s): %lld\n",
|
||||
printf("Estimated bytes for [%s - %s): %ld\n",
|
||||
range.begin.printable().c_str(),
|
||||
range.end.printable().c_str(),
|
||||
estimated.bytes);
|
||||
@ -300,7 +300,7 @@ static UID pickWorkerForAssign(BlobManagerData* bmData) {
|
||||
ACTOR Future<Void> doRangeAssignment(BlobManagerData* bmData, RangeAssignment assignment, UID workerID, int64_t seqNo) {
|
||||
|
||||
if (BM_DEBUG) {
|
||||
printf("BM %s %s range [%s - %s) @ (%lld, %lld)\n",
|
||||
printf("BM %s %s range [%s - %s) @ (%ld, %ld)\n",
|
||||
bmData->id.toString().c_str(),
|
||||
assignment.isAssign ? "assigning" : "revoking",
|
||||
assignment.keyRange.begin.printable().c_str(),
|
||||
@ -379,7 +379,7 @@ ACTOR Future<Void> doRangeAssignment(BlobManagerData* bmData, RangeAssignment as
|
||||
// FIXME: improvement would be to add history of failed workers to assignment so it can try other ones first
|
||||
} else {
|
||||
if (BM_DEBUG) {
|
||||
printf("BM got error revoking range [%s - %s) from worker %s",
|
||||
printf("BM got error revoking range [%s - %s) from worker",
|
||||
assignment.keyRange.begin.printable().c_str(),
|
||||
assignment.keyRange.end.printable().c_str());
|
||||
}
|
||||
@ -472,7 +472,7 @@ ACTOR Future<Void> checkManagerLock(Reference<ReadYourWritesTransaction> tr, Blo
|
||||
ASSERT(currentEpoch > bmData->epoch);
|
||||
|
||||
if (BM_DEBUG) {
|
||||
printf("BM %s found new epoch %d > %d in lock check\n",
|
||||
printf("BM %s found new epoch %ld > %ld in lock check\n",
|
||||
bmData->id.toString().c_str(),
|
||||
currentEpoch,
|
||||
bmData->epoch);
|
||||
@ -625,7 +625,7 @@ ACTOR Future<Void> maybeSplitRange(BlobManagerData* bmData,
|
||||
std::tuple<int64_t, int64_t, UID> prevGranuleLock = decodeBlobGranuleLockValue(lockValue.get());
|
||||
if (std::get<0>(prevGranuleLock) > bmData->epoch) {
|
||||
if (BM_DEBUG) {
|
||||
printf("BM %s found a higher epoch %d than %d for granule lock of [%s - %s)\n",
|
||||
printf("BM %s found a higher epoch %ld than %ld for granule lock of [%s - %s)\n",
|
||||
bmData->id.toString().c_str(),
|
||||
std::get<0>(prevGranuleLock),
|
||||
bmData->epoch,
|
||||
@ -770,7 +770,7 @@ ACTOR Future<Void> monitorBlobWorkerStatus(BlobManagerData* bmData, BlobWorkerIn
|
||||
GranuleStatusReply rep = waitNext(statusStream.getFuture());
|
||||
|
||||
if (BM_DEBUG) {
|
||||
printf("BM %lld got status of [%s - %s) @ (%lld, %lld) from BW %s: %s\n",
|
||||
printf("BM %ld got status of [%s - %s) @ (%ld, %ld) from BW %s: %s\n",
|
||||
bmData->epoch,
|
||||
rep.granuleRange.begin.printable().c_str(),
|
||||
rep.granuleRange.end.printable().c_str(),
|
||||
@ -806,14 +806,14 @@ ACTOR Future<Void> monitorBlobWorkerStatus(BlobManagerData* bmData, BlobWorkerIn
|
||||
rep.granuleRange.end == lastReqForGranule.end() && rep.epoch == lastReqForGranule.value().first &&
|
||||
rep.seqno == lastReqForGranule.value().second) {
|
||||
if (BM_DEBUG) {
|
||||
printf("Manager %lld received repeat status for the same granule [%s - %s) @ %lld, ignoring.",
|
||||
printf("Manager %ld received repeat status for the same granule [%s - %s), ignoring.",
|
||||
bmData->epoch,
|
||||
rep.granuleRange.begin.printable().c_str(),
|
||||
rep.granuleRange.end.printable().c_str());
|
||||
}
|
||||
} else {
|
||||
if (BM_DEBUG) {
|
||||
printf("Manager %lld evaluating [%s - %s) for split\n",
|
||||
printf("Manager %ld evaluating [%s - %s) for split\n",
|
||||
bmData->epoch,
|
||||
rep.granuleRange.begin.printable().c_str(),
|
||||
rep.granuleRange.end.printable().c_str());
|
||||
@ -858,7 +858,7 @@ ACTOR Future<Void> monitorBlobWorker(BlobManagerData* bmData, BlobWorkerInterfac
|
||||
choose {
|
||||
when(wait(waitFailure)) {
|
||||
if (BM_DEBUG) {
|
||||
printf("BM %lld detected BW %s is dead\n", bmData->epoch, bwInterf.id().toString().c_str());
|
||||
printf("BM %ld detected BW %s is dead\n", bmData->epoch, bwInterf.id().toString().c_str());
|
||||
}
|
||||
TraceEvent("BlobWorkerFailed", bmData->id).detail("BlobWorkerID", bwInterf.id());
|
||||
}
|
||||
@ -1115,7 +1115,7 @@ ACTOR Future<Void> blobManager(BlobManagerInterface bmInterf,
|
||||
}
|
||||
|
||||
if (BM_DEBUG) {
|
||||
printf("Blob manager acquired lock at epoch %lld\n", epoch);
|
||||
printf("Blob manager acquired lock at epoch %ld\n", epoch);
|
||||
}
|
||||
|
||||
// needed to pick up changes to dbinfo in case new CC comes along
|
||||
|
@ -193,7 +193,7 @@ struct BlobWorkerData : NonCopyable, ReferenceCounted<BlobWorkerData> {
|
||||
bool managerEpochOk(int64_t epoch) {
|
||||
if (epoch < currentManagerEpoch) {
|
||||
if (BW_DEBUG) {
|
||||
printf("BW %s got request from old epoch %lld, notifying manager it is out of date\n",
|
||||
printf("BW %s got request from old epoch %ld, notifying manager it is out of date\n",
|
||||
id.toString().c_str(),
|
||||
epoch);
|
||||
}
|
||||
@ -202,7 +202,7 @@ struct BlobWorkerData : NonCopyable, ReferenceCounted<BlobWorkerData> {
|
||||
if (epoch > currentManagerEpoch) {
|
||||
currentManagerEpoch = epoch;
|
||||
if (BW_DEBUG) {
|
||||
printf("BW %s found new manager epoch %lld\n", id.toString().c_str(), currentManagerEpoch);
|
||||
printf("BW %s found new manager epoch %ld\n", id.toString().c_str(), currentManagerEpoch);
|
||||
}
|
||||
}
|
||||
|
||||
@ -216,7 +216,7 @@ static void acquireGranuleLock(int64_t epoch, int64_t seqno, int64_t prevOwnerEp
|
||||
// returns true if our lock (E, S) >= (Eprev, Sprev)
|
||||
if (epoch < prevOwnerEpoch || (epoch == prevOwnerEpoch && seqno < prevOwnerSeqno)) {
|
||||
if (BW_DEBUG) {
|
||||
printf("Lock acquire check failed. Proposed (%lld, %lld) < previous (%lld, %lld)\n",
|
||||
printf("Lock acquire check failed. Proposed (%ld, %ld) < previous (%ld, %ld)\n",
|
||||
epoch,
|
||||
seqno,
|
||||
prevOwnerEpoch,
|
||||
@ -239,7 +239,7 @@ static void checkGranuleLock(int64_t epoch, int64_t seqno, int64_t ownerEpoch, i
|
||||
// returns true if we still own the lock, false if someone else does
|
||||
if (epoch != ownerEpoch || seqno != ownerSeqno) {
|
||||
if (BW_DEBUG) {
|
||||
printf("Lock assignment check failed. Expected (%lld, %lld), got (%lld, %lld)\n",
|
||||
printf("Lock assignment check failed. Expected (%ld, %ld), got (%ld, %ld)\n",
|
||||
epoch,
|
||||
seqno,
|
||||
ownerEpoch,
|
||||
@ -303,7 +303,7 @@ ACTOR Future<Void> readGranuleFiles(Transaction* tr, Key* startKey, Key endKey,
|
||||
}
|
||||
}
|
||||
if (BW_DEBUG) {
|
||||
printf("Loaded %d snapshot and %d delta files for %s\n",
|
||||
printf("Loaded %lu snapshot and %lu delta files for %s\n",
|
||||
files->snapshotFiles.size(),
|
||||
files->deltaFiles.size(),
|
||||
granuleID.toString().c_str());
|
||||
@ -546,7 +546,7 @@ ACTOR Future<BlobFileIndex> writeDeltaFile(Reference<BlobWorkerData> bwData,
|
||||
|
||||
wait(tr->commit());
|
||||
if (BW_DEBUG) {
|
||||
printf("Granule %s [%s - %s) updated fdb with delta file %s of size %d at version %lld, cv=%lld\n",
|
||||
printf("Granule %s [%s - %s) updated fdb with delta file %s of size %d at version %ld, cv=%ld\n",
|
||||
granuleID.toString().c_str(),
|
||||
keyRange.begin.printable().c_str(),
|
||||
keyRange.end.printable().c_str(),
|
||||
@ -812,7 +812,7 @@ ACTOR Future<BlobFileIndex> compactFromBlob(Reference<BlobWorkerData> bwData,
|
||||
chunk.includedVersion = version;
|
||||
|
||||
if (BW_DEBUG) {
|
||||
printf("Re-snapshotting [%s - %s) @ %lld from blob\n",
|
||||
printf("Re-snapshotting [%s - %s) @ %ld from blob\n",
|
||||
metadata->keyRange.begin.printable().c_str(),
|
||||
metadata->keyRange.end.printable().c_str(),
|
||||
version);
|
||||
@ -911,7 +911,7 @@ ACTOR Future<Void> handleCompletedDeltaFile(Reference<BlobWorkerData> bwData,
|
||||
|
||||
if (completedDeltaFile.version > cfStartVersion) {
|
||||
if (BW_DEBUG) {
|
||||
printf("Popping change feed %s at %lld\n", cfKey.printable().c_str(), completedDeltaFile.version);
|
||||
printf("Popping change feed %s at %ld\n", cfKey.printable().c_str(), completedDeltaFile.version);
|
||||
}
|
||||
// FIXME: for a write-hot shard, we could potentially batch these and only pop the largest one after several
|
||||
// have completed
|
||||
@ -968,7 +968,7 @@ static Version doGranuleRollback(Reference<GranuleMetadata> metadata,
|
||||
metadata->bytesInNewDeltaFiles -= df.bytes;
|
||||
toPop++;
|
||||
if (BW_DEBUG) {
|
||||
printf("[%s - %s) rollback cancelling delta file @ %lld\n",
|
||||
printf("[%s - %s) rollback cancelling delta file @ %ld\n",
|
||||
metadata->keyRange.begin.printable().c_str(),
|
||||
metadata->keyRange.end.printable().c_str(),
|
||||
df.version);
|
||||
@ -1013,7 +1013,7 @@ static Version doGranuleRollback(Reference<GranuleMetadata> metadata,
|
||||
}
|
||||
mIdx++;
|
||||
if (BW_DEBUG) {
|
||||
printf("[%s - %s) rollback discarding %d in-memory mutations, %d mutations and %lld bytes left\n",
|
||||
printf("[%s - %s) rollback discarding %d in-memory mutations, %d mutations and %ld bytes left\n",
|
||||
metadata->keyRange.begin.printable().c_str(),
|
||||
metadata->keyRange.end.printable().c_str(),
|
||||
metadata->currentDeltas.size() - mIdx,
|
||||
@ -1030,7 +1030,7 @@ static Version doGranuleRollback(Reference<GranuleMetadata> metadata,
|
||||
}
|
||||
|
||||
if (BW_DEBUG) {
|
||||
printf("[%s - %s) finishing rollback to %lld\n",
|
||||
printf("[%s - %s) finishing rollback to %ld\n",
|
||||
metadata->keyRange.begin.printable().c_str(),
|
||||
metadata->keyRange.end.printable().c_str(),
|
||||
cfRollbackVersion);
|
||||
@ -1093,8 +1093,8 @@ ACTOR Future<Void> blobGranuleUpdateFiles(Reference<BlobWorkerData> bwData,
|
||||
metadata->keyRange.begin.printable().c_str(),
|
||||
metadata->keyRange.end.printable().c_str());
|
||||
printf(" CFID: %s\n", startState.granuleID.toString().c_str());
|
||||
printf(" CF Start Version: %lld\n", startState.changeFeedStartVersion);
|
||||
printf(" Previous Durable Version: %lld\n", startState.previousDurableVersion);
|
||||
printf(" CF Start Version: %ld\n", startState.changeFeedStartVersion);
|
||||
printf(" Previous Durable Version: %ld\n", startState.previousDurableVersion);
|
||||
printf(" doSnapshot=%s\n", startState.doSnapshot ? "T" : "F");
|
||||
printf(" Prev CFID: %s\n",
|
||||
startState.parentGranule.present() ? startState.parentGranule.get().second.toString().c_str() : "");
|
||||
@ -1259,7 +1259,7 @@ ACTOR Future<Void> blobGranuleUpdateFiles(Reference<BlobWorkerData> bwData,
|
||||
if (metadata->bufferedDeltaBytes >= SERVER_KNOBS->BG_DELTA_FILE_TARGET_BYTES &&
|
||||
deltas.version > lastVersion) {
|
||||
if (BW_DEBUG) {
|
||||
printf("Granule [%s - %s) flushing delta file after %d bytes @ %lld %lld%s\n",
|
||||
printf("Granule [%s - %s) flushing delta file after %lu bytes @ %ld %ld%s\n",
|
||||
metadata->keyRange.begin.printable().c_str(),
|
||||
metadata->keyRange.end.printable().c_str(),
|
||||
metadata->bufferedDeltaBytes,
|
||||
@ -1321,7 +1321,7 @@ ACTOR Future<Void> blobGranuleUpdateFiles(Reference<BlobWorkerData> bwData,
|
||||
if (snapshotEligible && metadata->bytesInNewDeltaFiles >= SERVER_KNOBS->BG_DELTA_BYTES_BEFORE_COMPACT &&
|
||||
!readOldChangeFeed) {
|
||||
if (BW_DEBUG && (inFlightBlobSnapshot.isValid() || !inFlightDeltaFiles.empty())) {
|
||||
printf("Granule [%s - %s) ready to re-snapshot, waiting for outstanding %d snapshot and %d "
|
||||
printf("Granule [%s - %s) ready to re-snapshot, waiting for outstanding %d snapshot and %lu "
|
||||
"deltas to "
|
||||
"finish\n",
|
||||
metadata->keyRange.begin.printable().c_str(),
|
||||
@ -1350,7 +1350,7 @@ ACTOR Future<Void> blobGranuleUpdateFiles(Reference<BlobWorkerData> bwData,
|
||||
inFlightDeltaFiles.clear();
|
||||
|
||||
if (BW_DEBUG) {
|
||||
printf("Granule [%s - %s) checking with BM for re-snapshot after %d bytes\n",
|
||||
printf("Granule [%s - %s) checking with BM for re-snapshot after %lu bytes\n",
|
||||
metadata->keyRange.begin.printable().c_str(),
|
||||
metadata->keyRange.end.printable().c_str(),
|
||||
metadata->bytesInNewDeltaFiles);
|
||||
@ -1400,7 +1400,7 @@ ACTOR Future<Void> blobGranuleUpdateFiles(Reference<BlobWorkerData> bwData,
|
||||
}
|
||||
|
||||
if (BW_DEBUG) {
|
||||
printf("Granule [%s - %s) re-snapshotting after %d bytes\n",
|
||||
printf("Granule [%s - %s) re-snapshotting after %lu bytes\n",
|
||||
metadata->keyRange.begin.printable().c_str(),
|
||||
metadata->keyRange.end.printable().c_str(),
|
||||
metadata->bytesInNewDeltaFiles);
|
||||
@ -1467,7 +1467,7 @@ ACTOR Future<Void> blobGranuleUpdateFiles(Reference<BlobWorkerData> bwData,
|
||||
if (!rollbacksInProgress.empty()) {
|
||||
ASSERT(rollbacksInProgress.front().first == rollbackVersion);
|
||||
ASSERT(rollbacksInProgress.front().second == deltas.version);
|
||||
printf("Passed rollback %lld -> %lld\n", deltas.version, rollbackVersion);
|
||||
printf("Passed rollback %ld -> %ld\n", deltas.version, rollbackVersion);
|
||||
rollbacksCompleted.push_back(rollbacksInProgress.front());
|
||||
rollbacksInProgress.pop_front();
|
||||
} else {
|
||||
@ -1479,13 +1479,13 @@ ACTOR Future<Void> blobGranuleUpdateFiles(Reference<BlobWorkerData> bwData,
|
||||
metadata->currentDeltas.back().version <= rollbackVersion)) {
|
||||
|
||||
if (BW_DEBUG) {
|
||||
printf("BW skipping rollback %lld -> %lld completely\n",
|
||||
printf("BW skipping rollback %ld -> %ld completely\n",
|
||||
deltas.version,
|
||||
rollbackVersion);
|
||||
}
|
||||
} else {
|
||||
if (BW_DEBUG) {
|
||||
printf("BW [%s - %s) ROLLBACK @ %lld -> %lld\n",
|
||||
printf("BW [%s - %s) ROLLBACK @ %ld -> %ld\n",
|
||||
metadata->keyRange.begin.printable().c_str(),
|
||||
metadata->keyRange.end.printable().c_str(),
|
||||
deltas.version,
|
||||
@ -1527,7 +1527,7 @@ ACTOR Future<Void> blobGranuleUpdateFiles(Reference<BlobWorkerData> bwData,
|
||||
} else if (!rollbacksInProgress.empty() && rollbacksInProgress.front().first < deltas.version &&
|
||||
rollbacksInProgress.front().second > deltas.version) {
|
||||
if (BW_DEBUG) {
|
||||
printf("Skipping mutations @ %lld b/c prior rollback\n", deltas.version);
|
||||
printf("Skipping mutations @ %ld b/c prior rollback\n", deltas.version);
|
||||
}
|
||||
} else {
|
||||
for (auto& delta : deltas.mutations) {
|
||||
@ -1555,7 +1555,7 @@ ACTOR Future<Void> blobGranuleUpdateFiles(Reference<BlobWorkerData> bwData,
|
||||
ASSERT(startState.parentGranule.present());
|
||||
oldChangeFeedDataComplete = startState.parentGranule.get();
|
||||
if (BW_DEBUG) {
|
||||
printf("Granule [%s - %s) switching to new change feed %s @ %lld\n",
|
||||
printf("Granule [%s - %s) switching to new change feed %s @ %ld\n",
|
||||
metadata->keyRange.begin.printable().c_str(),
|
||||
metadata->keyRange.end.printable().c_str(),
|
||||
startState.granuleID.toString().c_str(),
|
||||
@ -1676,7 +1676,7 @@ ACTOR Future<Void> blobGranuleLoadHistory(Reference<BlobWorkerData> bwData,
|
||||
}
|
||||
|
||||
if (BW_DEBUG) {
|
||||
printf("Loaded %d history entries for granule [%s - %s) (%d skipped)\n",
|
||||
printf("Loaded %lu history entries for granule [%s - %s) (%d skipped)\n",
|
||||
historyEntryStack.size(),
|
||||
metadata->keyRange.begin.printable().c_str(),
|
||||
metadata->keyRange.end.printable().c_str(),
|
||||
@ -1855,7 +1855,7 @@ ACTOR Future<Void> handleBlobGranuleFileRequest(Reference<BlobWorkerData> bwData
|
||||
}
|
||||
|
||||
if (BW_REQUEST_DEBUG) {
|
||||
printf("[%s - %s) @ %lld time traveled back to %s [%s - %s) @ [%lld - %lld)\n",
|
||||
printf("[%s - %s) @ %ld time traveled back to %s [%s - %s) @ [%ld - %ld)\n",
|
||||
req.keyRange.begin.printable().c_str(),
|
||||
req.keyRange.end.printable().c_str(),
|
||||
req.readVersion,
|
||||
@ -1894,7 +1894,7 @@ ACTOR Future<Void> handleBlobGranuleFileRequest(Reference<BlobWorkerData> bwData
|
||||
if (rollbackCount == metadata->rollbackCount.get()) {
|
||||
break;
|
||||
} else if (BW_REQUEST_DEBUG) {
|
||||
printf("[%s - %s) @ %lld hit rollback, restarting waitForVersion\n",
|
||||
printf("[%s - %s) @ %ld hit rollback, restarting waitForVersion\n",
|
||||
req.keyRange.begin.printable().c_str(),
|
||||
req.keyRange.end.printable().c_str(),
|
||||
req.readVersion);
|
||||
@ -2222,7 +2222,7 @@ ACTOR Future<bool> changeBlobRange(Reference<BlobWorkerData> bwData,
|
||||
bool disposeOnCleanup,
|
||||
bool selfReassign) {
|
||||
if (BW_DEBUG) {
|
||||
printf("%s range for [%s - %s): %s @ (%lld, %lld)\n",
|
||||
printf("%s range for [%s - %s): %s @ (%ld, %ld)\n",
|
||||
selfReassign ? "Re-assigning" : "Changing",
|
||||
keyRange.begin.printable().c_str(),
|
||||
keyRange.end.printable().c_str(),
|
||||
@ -2273,7 +2273,7 @@ ACTOR Future<bool> changeBlobRange(Reference<BlobWorkerData> bwData,
|
||||
if (r.value().activeMetadata.isValid() && thisAssignmentNewer) {
|
||||
// cancel actors for old range and clear reference
|
||||
if (BW_DEBUG) {
|
||||
printf(" [%s - %s): @ (%lld, %lld) (cancelling)\n",
|
||||
printf(" [%s - %s): @ (%ld, %ld) (cancelling)\n",
|
||||
r.begin().printable().c_str(),
|
||||
r.end().printable().c_str(),
|
||||
r.value().lastEpoch,
|
||||
@ -2298,7 +2298,7 @@ ACTOR Future<bool> changeBlobRange(Reference<BlobWorkerData> bwData,
|
||||
|
||||
bwData->granuleMetadata.insert(keyRange, newMetadata);
|
||||
if (BW_DEBUG) {
|
||||
printf("Inserting new range [%s - %s): %s @ (%lld, %lld)\n",
|
||||
printf("Inserting new range [%s - %s): %s @ (%ld, %ld)\n",
|
||||
keyRange.begin.printable().c_str(),
|
||||
keyRange.end.printable().c_str(),
|
||||
newMetadata.activeMetadata.isValid() ? "T" : "F",
|
||||
@ -2308,7 +2308,7 @@ ACTOR Future<bool> changeBlobRange(Reference<BlobWorkerData> bwData,
|
||||
|
||||
for (auto& it : newerRanges) {
|
||||
if (BW_DEBUG) {
|
||||
printf("Re-inserting newer range [%s - %s): %s @ (%lld, %lld)\n",
|
||||
printf("Re-inserting newer range [%s - %s): %s @ (%ld, %ld)\n",
|
||||
it.first.begin.printable().c_str(),
|
||||
it.first.end.printable().c_str(),
|
||||
it.second.activeMetadata.isValid() ? "T" : "F",
|
||||
@ -2332,8 +2332,8 @@ static bool resumeBlobRange(Reference<BlobWorkerData> bwData, KeyRange keyRange,
|
||||
!existingRange.value().activeMetadata.isValid()) {
|
||||
|
||||
if (BW_DEBUG) {
|
||||
printf("BW %s got out of date resume range for [%s - %s) @ (%lld, %lld). Currently [%s - %s) @ (%lld, "
|
||||
"%lld): %s\n",
|
||||
printf("BW %s got out of date resume range for [%s - %s) @ (%ld, %ld). Currently [%s - %s) @ (%ld, "
|
||||
"%ld): %s\n",
|
||||
bwData->id.toString().c_str(),
|
||||
existingRange.begin().printable().c_str(),
|
||||
existingRange.end().printable().c_str(),
|
||||
@ -2555,7 +2555,7 @@ ACTOR Future<Void> blobWorker(BlobWorkerInterface bwInterf,
|
||||
--self->stats.numRangesAssigned;
|
||||
state AssignBlobRangeRequest assignReq = _req;
|
||||
if (BW_DEBUG) {
|
||||
printf("Worker %s assigned range [%s - %s) @ (%lld, %lld):\n continue=%s\n",
|
||||
printf("Worker %s assigned range [%s - %s) @ (%ld, %ld):\n continue=%s\n",
|
||||
self->id.toString().c_str(),
|
||||
assignReq.keyRange.begin.printable().c_str(),
|
||||
assignReq.keyRange.end.printable().c_str(),
|
||||
@ -2574,7 +2574,7 @@ ACTOR Future<Void> blobWorker(BlobWorkerInterface bwInterf,
|
||||
state RevokeBlobRangeRequest revokeReq = _req;
|
||||
--self->stats.numRangesAssigned;
|
||||
if (BW_DEBUG) {
|
||||
printf("Worker %s revoked range [%s - %s) @ (%lld, %lld):\n dispose=%s\n",
|
||||
printf("Worker %s revoked range [%s - %s) @ (%ld, %ld):\n dispose=%s\n",
|
||||
self->id.toString().c_str(),
|
||||
revokeReq.keyRange.begin.printable().c_str(),
|
||||
revokeReq.keyRange.end.printable().c_str(),
|
||||
|
@ -221,7 +221,9 @@ class TestConfig {
|
||||
}
|
||||
|
||||
if (attrib == "configureLocked") {
|
||||
sscanf(value.c_str(), "%d", &configureLocked);
|
||||
int configureLockedInt;
|
||||
sscanf(value.c_str(), "%d", &configureLockedInt);
|
||||
configureLocked = (configureLockedInt != 0);
|
||||
}
|
||||
|
||||
if (attrib == "startIncompatibleProcess") {
|
||||
@ -2301,4 +2303,4 @@ ACTOR void setupAndRun(std::string dataFolder,
|
||||
destructed = true;
|
||||
wait(Never());
|
||||
ASSERT(false);
|
||||
}
|
||||
}
|
||||
|
@ -7929,7 +7929,7 @@ TEST_CASE("/redwood/correctness/unit/RedwoodRecordRef") {
|
||||
ASSERT(RedwoodRecordRef::Delta::LengthFormatSizes[2] == 6);
|
||||
ASSERT(RedwoodRecordRef::Delta::LengthFormatSizes[3] == 8);
|
||||
|
||||
printf("sizeof(RedwoodRecordRef) = %d\n", sizeof(RedwoodRecordRef));
|
||||
printf("sizeof(RedwoodRecordRef) = %lu\n", sizeof(RedwoodRecordRef));
|
||||
|
||||
// Test pageID stuff.
|
||||
{
|
||||
@ -8862,7 +8862,7 @@ TEST_CASE("/redwood/correctness/unit/deltaTree/IntIntPair") {
|
||||
pos = newPos;
|
||||
}
|
||||
double elapsed = timer() - start;
|
||||
printf("Seek/skip test, count=%d jumpMax=%d, items=%d, oldSeek=%d useHint=%d: Elapsed %f seconds %.2f M/s\n",
|
||||
printf("Seek/skip test, count=%d jumpMax=%d, items=%lu, oldSeek=%d useHint=%d: Elapsed %f seconds %.2f M/s\n",
|
||||
count,
|
||||
jumpMax,
|
||||
items.size(),
|
||||
@ -8905,7 +8905,7 @@ TEST_CASE("/redwood/correctness/unit/deltaTree/IntIntPair") {
|
||||
pos = newPos;
|
||||
}
|
||||
double elapsed = timer() - start;
|
||||
printf("DeltaTree2 Seek/skip test, count=%d jumpMax=%d, items=%d, oldSeek=%d useHint=%d: Elapsed %f seconds "
|
||||
printf("DeltaTree2 Seek/skip test, count=%d jumpMax=%d, items=%lu, oldSeek=%d useHint=%d: Elapsed %f seconds "
|
||||
"%.2f M/s\n",
|
||||
count,
|
||||
jumpMax,
|
||||
@ -8983,7 +8983,7 @@ TEST_CASE(":/redwood/performance/mutationBuffer") {
|
||||
strings.push_back(randomString(arena, 5));
|
||||
}
|
||||
|
||||
printf("Inserting and then finding each string...\n", count);
|
||||
printf("Inserting %d elements and then finding each string...\n", count);
|
||||
double start = timer();
|
||||
VersionedBTree::MutationBuffer m;
|
||||
for (int i = 0; i < count; ++i) {
|
||||
@ -9254,7 +9254,7 @@ TEST_CASE("/redwood/correctness/btree") {
|
||||
commit = map(btree->commit(version), [=, &ops = totalPageOps, v = version](Void) {
|
||||
// Update pager ops before clearing metrics
|
||||
ops += g_redwoodMetrics.pageOps();
|
||||
printf("Committed %s PageOps %" PRId64 "/%" PRId64 " (%.2f%%) VerificationMapEntries %d/%d (%.2f%%)\n",
|
||||
printf("Committed %s PageOps %" PRId64 "/%" PRId64 " (%.2f%%) VerificationMapEntries %lu/%d (%.2f%%)\n",
|
||||
toString(v).c_str(),
|
||||
ops,
|
||||
targetPageOps,
|
||||
@ -9508,7 +9508,7 @@ TEST_CASE(":/redwood/performance/extentQueue") {
|
||||
for (v = 1; v <= numEntries; ++v) {
|
||||
// Sometimes do a commit
|
||||
if (currentCommitSize >= targetCommitSize) {
|
||||
printf("currentCommitSize: %d, cumulativeCommitSize: %d, pageCacheCount: %d\n",
|
||||
printf("currentCommitSize: %d, cumulativeCommitSize: %ld, pageCacheCount: %ld\n",
|
||||
currentCommitSize,
|
||||
cumulativeCommitSize,
|
||||
pager->getPageCacheCount());
|
||||
@ -9531,7 +9531,7 @@ TEST_CASE(":/redwood/performance/extentQueue") {
|
||||
}
|
||||
cumulativeCommitSize += currentCommitSize;
|
||||
printf(
|
||||
"Final cumulativeCommitSize: %d, pageCacheCount: %d\n", cumulativeCommitSize, pager->getPageCacheCount());
|
||||
"Final cumulativeCommitSize: %ld, pageCacheCount: %ld\n", cumulativeCommitSize, pager->getPageCacheCount());
|
||||
wait(m_extentQueue.flush());
|
||||
extentQueueState = m_extentQueue.getState();
|
||||
printf("Commit ExtentQueue getState(): %s\n", extentQueueState.toString().c_str());
|
||||
@ -9592,7 +9592,7 @@ TEST_CASE(":/redwood/performance/extentQueue") {
|
||||
entriesRead,
|
||||
cumulativeCommitSize / elapsed / 1e6);
|
||||
|
||||
printf("pageCacheCount: %d extentCacheCount: %d\n", pager->getPageCacheCount(), pager->getExtentCacheCount());
|
||||
printf("pageCacheCount: %ld extentCacheCount: %ld\n", pager->getPageCacheCount(), pager->getExtentCacheCount());
|
||||
|
||||
pager->extentCacheClear();
|
||||
m_extentQueue.resetHeadReader();
|
||||
@ -9985,7 +9985,7 @@ ACTOR Future<Void> prefixClusteredInsert(IKeyValueStore* kvs,
|
||||
state int64_t kvBytesTarget = (int64_t)recordCountTarget * recordSize;
|
||||
state int recordsPerPrefix = recordCountTarget / source.numPrefixes();
|
||||
|
||||
printf("\nstoreType: %d\n", kvs->getType());
|
||||
printf("\nstoreType: %d\n", static_cast<int>(kvs->getType()));
|
||||
printf("commitTarget: %d\n", commitTarget);
|
||||
printf("prefixSource: %s\n", source.toString().c_str());
|
||||
printf("usePrefixesInOrder: %d\n", usePrefixesInOrder);
|
||||
@ -10074,7 +10074,7 @@ ACTOR Future<Void> sequentialInsert(IKeyValueStore* kvs, int prefixLen, int valu
|
||||
state int recordSize = source.prefixLen + sizeof(uint64_t) + valueSize;
|
||||
state int64_t kvBytesTarget = (int64_t)recordCountTarget * recordSize;
|
||||
|
||||
printf("\nstoreType: %d\n", kvs->getType());
|
||||
printf("\nstoreType: %d\n", static_cast<int>(kvs->getType()));
|
||||
printf("commitTarget: %d\n", commitTarget);
|
||||
printf("valueSize: %d\n", valueSize);
|
||||
printf("recordSize: %d\n", recordSize);
|
||||
@ -10208,7 +10208,7 @@ ACTOR Future<Void> randomRangeScans(IKeyValueStore* kvs,
|
||||
int recordCountTarget,
|
||||
bool singlePrefix,
|
||||
int rowLimit) {
|
||||
printf("\nstoreType: %d\n", kvs->getType());
|
||||
printf("\nstoreType: %d\n", static_cast<int>(kvs->getType()));
|
||||
printf("prefixSource: %s\n", source.toString().c_str());
|
||||
printf("suffixSize: %d\n", suffixSize);
|
||||
printf("recordCountTarget: %d\n", recordCountTarget);
|
||||
@ -10224,7 +10224,7 @@ ACTOR Future<Void> randomRangeScans(IKeyValueStore* kvs,
|
||||
state double start = timer();
|
||||
state std::function<void()> stats = [&]() {
|
||||
double elapsed = timer() - start;
|
||||
printf("Cumulative stats: %.2f seconds %d queries %.2f MB %d records %.2f qps %.2f MB/s %.2f rec/s\r\n",
|
||||
printf("Cumulative stats: %.2f seconds %d queries %.2f MB %ld records %.2f qps %.2f MB/s %.2f rec/s\r\n",
|
||||
elapsed,
|
||||
queries,
|
||||
bytesRead / 1e6,
|
||||
|
@ -529,7 +529,7 @@ static void printOptionUsage(std::string option, std::string description) {
|
||||
|
||||
std::stringstream sstream(description);
|
||||
if (sstream.eof()) {
|
||||
printf(result.c_str());
|
||||
printf("%s", result.c_str());
|
||||
return;
|
||||
}
|
||||
|
||||
@ -552,7 +552,7 @@ static void printOptionUsage(std::string option, std::string description) {
|
||||
}
|
||||
result += currLine + '\n';
|
||||
|
||||
printf(result.c_str());
|
||||
printf("%s", result.c_str());
|
||||
}
|
||||
|
||||
static void printUsage(const char* name, bool devhelp) {
|
||||
|
@ -584,7 +584,7 @@ struct P2PNetworkTest {
|
||||
|
||||
self->startTime = now();
|
||||
|
||||
printf("%d listeners, %d remotes, %d outgoing connections\n",
|
||||
printf("%lu listeners, %lu remotes, %d outgoing connections\n",
|
||||
self->listeners.size(),
|
||||
self->remotes.size(),
|
||||
self->connectionsOut);
|
||||
|
@ -422,7 +422,7 @@ void printSimulatedTopology() {
|
||||
printf("%smachineId: %s\n", indent.c_str(), p->locality.describeMachineId().c_str());
|
||||
}
|
||||
indent += " ";
|
||||
printf("%sAddress: %s\n", indent.c_str(), p->address.toString().c_str(), p->name);
|
||||
printf("%sAddress: %s\n", indent.c_str(), p->address.toString().c_str());
|
||||
indent += " ";
|
||||
printf("%sClass: %s\n", indent.c_str(), p->startingClass.toString().c_str());
|
||||
printf("%sName: %s\n", indent.c_str(), p->name);
|
||||
|
@ -237,7 +237,7 @@ struct BlobGranuleVerifierWorkload : TestWorkload {
|
||||
.detail("BlobSize", blob.first.size());
|
||||
|
||||
if (BGV_DEBUG) {
|
||||
printf("\nMismatch for [%s - %s) @ %lld (%s). F(%d) B(%d):\n",
|
||||
printf("\nMismatch for [%s - %s) @ %ld (%s). F(%d) B(%d):\n",
|
||||
range.begin.printable().c_str(),
|
||||
range.end.printable().c_str(),
|
||||
v,
|
||||
@ -291,11 +291,11 @@ struct BlobGranuleVerifierWorkload : TestWorkload {
|
||||
}
|
||||
printf(" Deltas: (%d)", chunk.newDeltas.size());
|
||||
if (chunk.newDeltas.size() > 0) {
|
||||
printf(" with version [%lld - %lld]",
|
||||
printf(" with version [%ld - %ld]",
|
||||
chunk.newDeltas[0].version,
|
||||
chunk.newDeltas[chunk.newDeltas.size() - 1].version);
|
||||
}
|
||||
printf(" IncludedVersion: %lld\n", chunk.includedVersion);
|
||||
printf(" IncludedVersion: %ld\n", chunk.includedVersion);
|
||||
}
|
||||
printf("\n");
|
||||
}
|
||||
@ -416,7 +416,7 @@ struct BlobGranuleVerifierWorkload : TestWorkload {
|
||||
state KeyRange r = range;
|
||||
state PromiseStream<Standalone<BlobGranuleChunkRef>> chunkStream;
|
||||
if (BGV_DEBUG) {
|
||||
printf("Final availability check [%s - %s) @ %lld\n",
|
||||
printf("Final availability check [%s - %s) @ %ld\n",
|
||||
r.begin.printable().c_str(),
|
||||
r.end.printable().c_str(),
|
||||
readVersion);
|
||||
@ -435,7 +435,7 @@ struct BlobGranuleVerifierWorkload : TestWorkload {
|
||||
break;
|
||||
}
|
||||
if (BGV_DEBUG) {
|
||||
printf("BG Verifier failed final availability check for [%s - %s) @ %lld with error %s. Last "
|
||||
printf("BG Verifier failed final availability check for [%s - %s) @ %ld with error %s. Last "
|
||||
"Success=[%s - %s)\n",
|
||||
r.begin.printable().c_str(),
|
||||
r.end.printable().c_str(),
|
||||
@ -452,13 +452,13 @@ struct BlobGranuleVerifierWorkload : TestWorkload {
|
||||
printf("Blob Granule Verifier finished with:\n");
|
||||
printf(" %d successful final granule checks\n", checks);
|
||||
printf(" %d failed final granule checks\n", availabilityPassed ? 0 : 1);
|
||||
printf(" %lld mismatches\n", self->mismatches);
|
||||
printf(" %lld time travel too old\n", self->timeTravelTooOld);
|
||||
printf(" %lld errors\n", self->errors);
|
||||
printf(" %lld initial reads\n", self->initialReads);
|
||||
printf(" %lld time travel reads\n", self->timeTravelReads);
|
||||
printf(" %lld rows\n", self->rowsRead);
|
||||
printf(" %lld bytes\n", self->bytesRead);
|
||||
printf(" %ld mismatches\n", self->mismatches);
|
||||
printf(" %ld time travel too old\n", self->timeTravelTooOld);
|
||||
printf(" %ld errors\n", self->errors);
|
||||
printf(" %ld initial reads\n", self->initialReads);
|
||||
printf(" %ld time travel reads\n", self->timeTravelReads);
|
||||
printf(" %ld rows\n", self->rowsRead);
|
||||
printf(" %ld bytes\n", self->bytesRead);
|
||||
// FIXME: add above as details
|
||||
TraceEvent("BlobGranuleVerifierChecked");
|
||||
return availabilityPassed && self->mismatches == 0 && checks > 0 && self->timeTravelTooOld == 0;
|
||||
|
@ -299,14 +299,14 @@ struct RyowCorrectnessWorkload : ApiWorkload {
|
||||
printable(op.beginKey).c_str(),
|
||||
printable(op.endKey).c_str(),
|
||||
op.limit,
|
||||
op.reverse);
|
||||
static_cast<bool>(op.reverse));
|
||||
break;
|
||||
case Operation::GET_RANGE_SELECTOR:
|
||||
printf("Operation GET_RANGE_SELECTOR failed: begin = %s, end = %s, limit = %d, reverse = %d\n",
|
||||
op.beginSelector.toString().c_str(),
|
||||
op.endSelector.toString().c_str(),
|
||||
op.limit,
|
||||
op.reverse);
|
||||
static_cast<bool>(op.reverse));
|
||||
break;
|
||||
case Operation::GET_KEY:
|
||||
printf("Operation GET_KEY failed: selector = %s\n", op.beginSelector.toString().c_str());
|
||||
|
Loading…
x
Reference in New Issue
Block a user