Merge remote-tracking branch 'upstream/main' into add_intel_compiler_support

This commit is contained in:
Andrei Gorneanu 2022-07-07 17:30:09 +02:00
commit 3a0444b569
599 changed files with 7851 additions and 6173 deletions

4
.git-blame-ignore-revs Normal file
View File

@ -0,0 +1,4 @@
# clang-format the entire codebase
df90cc89de67ea4748c8cadd18e6fc4ce7fda12e
2c788c233db56ccec4ed90d7da31887487b9f3b7
69508b980f3cc5aabea6322f292e53b07bb27544

View File

@ -105,15 +105,12 @@ set(FDB_PACKAGE_NAME "${FDB_MAJOR}.${FDB_MINOR}")
configure_file(${CMAKE_SOURCE_DIR}/versions.target.cmake ${CMAKE_CURRENT_BINARY_DIR}/versions.target)
file(WRITE ${CMAKE_BINARY_DIR}/version.txt ${FDB_VERSION})
message(STATUS "FDB version is ${FDB_VERSION}")
message(STATUS "FDB package name is ${FDB_PACKAGE_NAME}")
configure_file(${CMAKE_CURRENT_SOURCE_DIR}/fdbclient/versions.h.cmake ${CMAKE_CURRENT_BINARY_DIR}/fdbclient/versions.h)
################################################################################
# Flow
################################################################################
include(utils)
# Flow and other tools are written in C# - so we need that dependency
include(EnableCsharp)
@ -203,12 +200,6 @@ if(CMAKE_SYSTEM_NAME STREQUAL "FreeBSD")
add_link_options(-lexecinfo)
endif()
################################################################################
# Build information
################################################################################
configure_file(${CMAKE_CURRENT_SOURCE_DIR}/fdbclient/BuildFlags.h.in ${CMAKE_CURRENT_BINARY_DIR}/fdbclient/BuildFlags.h)
################################################################################
# process compile commands for IDE
################################################################################

View File

@ -288,7 +288,7 @@ class TestRunner(object):
tr = self.db.create_transaction()
try:
tr.options.set_special_key_space_enable_writes()
del tr[b'\xff\xff/management/tenant_map/' : b'\xff\xff/management/tenant_map0']
del tr[b'\xff\xff/management/tenant/map/' : b'\xff\xff/management/tenant/map0']
tr.commit().wait()
break
except fdb.FDBError as e:

View File

@ -119,6 +119,7 @@ if(NOT WIN32)
set(API_TESTER_SRCS
test/apitester/fdb_c_api_tester.cpp
test/apitester/TesterAtomicOpsCorrectnessWorkload.cpp
test/apitester/TesterApiWorkload.cpp
test/apitester/TesterApiWorkload.h
test/apitester/TesterTestSpec.cpp
@ -137,7 +138,6 @@ if(NOT WIN32)
test/apitester/TesterUtil.h
test/apitester/TesterWorkload.cpp
test/apitester/TesterWorkload.h
../../flow/SimpleOpt.h
)
if(OPEN_FOR_IDE)
@ -180,7 +180,7 @@ if(NOT WIN32)
add_dependencies(disconnected_timeout_unit_tests doctest)
target_include_directories(fdb_c_setup_tests PUBLIC ${DOCTEST_INCLUDE_DIR})
target_include_directories(fdb_c_unit_tests PUBLIC ${DOCTEST_INCLUDE_DIR})
target_include_directories(fdb_c_unit_tests_version_510 PUBLIC ${DOCTEST_INCLUDE_DIR})
target_include_directories(fdb_c_unit_tests_version_510 PUBLIC ${DOCTEST_INCLUDE_DIR} ${CMAKE_BINARY_DIR}/flow/include)
target_include_directories(disconnected_timeout_unit_tests PUBLIC ${DOCTEST_INCLUDE_DIR})
target_link_libraries(fdb_c_setup_tests PRIVATE fdb_c Threads::Threads)
target_link_libraries(fdb_c_unit_tests PRIVATE fdb_c Threads::Threads fdbclient rapidjson)
@ -188,18 +188,20 @@ if(NOT WIN32)
target_link_libraries(trace_partial_file_suffix_test PRIVATE fdb_c Threads::Threads flow)
target_link_libraries(disconnected_timeout_unit_tests PRIVATE fdb_c Threads::Threads)
if(USE_SANITIZER)
target_link_libraries(fdb_c_api_tester PRIVATE fdb_c fdb_cpp toml11_target Threads::Threads fmt::fmt boost_asan)
else()
target_link_libraries(fdb_c_api_tester PRIVATE fdb_c fdb_cpp toml11_target Threads::Threads fmt::fmt boost_target)
endif()
if(USE_SANITIZER)
target_link_libraries(fdb_c_api_tester PRIVATE fdb_c fdb_cpp toml11_target Threads::Threads fmt::fmt boost_asan)
else()
target_link_libraries(fdb_c_api_tester PRIVATE fdb_c fdb_cpp toml11_target Threads::Threads fmt::fmt boost_target)
endif()
target_include_directories(fdb_c_api_tester PRIVATE "${CMAKE_SOURCE_DIR}/flow/include" "${CMAKE_BINARY_DIR}/flow/include")
target_link_libraries(fdb_c_api_tester PRIVATE SimpleOpt)
# do not set RPATH for mako
set_property(TARGET mako PROPERTY SKIP_BUILD_RPATH TRUE)
if (USE_SANITIZER)
target_link_libraries(mako PRIVATE fdb_c fdbclient fmt::fmt Threads::Threads fdb_cpp boost_asan)
target_link_libraries(mako PRIVATE fdb_c fdbclient fmt::fmt Threads::Threads fdb_cpp boost_asan rapidjson)
else()
target_link_libraries(mako PRIVATE fdb_c fdbclient fmt::fmt Threads::Threads fdb_cpp boost_target)
target_link_libraries(mako PRIVATE fdb_c fdbclient fmt::fmt Threads::Threads fdb_cpp boost_target rapidjson)
endif()
if(NOT OPEN_FOR_IDE)

View File

@ -382,6 +382,12 @@ extern "C" DLLEXPORT fdb_error_t fdb_create_database(const char* cluster_file_pa
return fdb_create_database_impl(cluster_file_path, out_database);
}
extern "C" DLLEXPORT fdb_error_t fdb_create_database_from_connection_string(const char* connection_string,
FDBDatabase** out_database) {
CATCH_AND_RETURN(*out_database =
(FDBDatabase*)API->createDatabaseFromConnectionString(connection_string).extractPtr(););
}
extern "C" DLLEXPORT fdb_error_t fdb_database_set_option(FDBDatabase* d,
FDBDatabaseOption option,
uint8_t const* value,

View File

@ -46,6 +46,9 @@ DLLEXPORT void fdb_database_set_shared_state(FDBDatabase* db, DatabaseSharedStat
DLLEXPORT WARN_UNUSED_RESULT fdb_error_t fdb_future_get_shared_state(FDBFuture* f, DatabaseSharedState** outPtr);
DLLEXPORT WARN_UNUSED_RESULT fdb_error_t fdb_create_database_from_connection_string(const char* connection_string,
FDBDatabase** out_database);
#ifdef __cplusplus
}
#endif

View File

@ -0,0 +1,330 @@
/*
* TesterAtomicOpsCorrectnessWorkload.cpp
*
* This source file is part of the FoundationDB open source project
*
* Copyright 2013-2022 Apple Inc. and the FoundationDB project authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "TesterApiWorkload.h"
#include "TesterUtil.h"
#include "fdb_c_options.g.h"
#include "fmt/core.h"
#include "test/fdb_api.hpp"
#include <cctype>
#include <memory>
#include <fmt/format.h>
namespace FdbApiTester {
using fdb::Key;
using fdb::Value;
using fdb::ValueRef;
class AtomicOpsCorrectnessWorkload : public ApiWorkload {
public:
AtomicOpsCorrectnessWorkload(const WorkloadConfig& config) : ApiWorkload(config) {}
private:
typedef std::function<uint64_t(uint64_t, uint64_t)> IntAtomicOpFunction;
typedef std::function<Value(ValueRef, ValueRef)> AtomicOpFunction;
enum OpType {
OP_ATOMIC_ADD,
OP_ATOMIC_BIT_AND,
OP_ATOMIC_BIT_OR,
OP_ATOMIC_BIT_XOR,
OP_ATOMIC_APPEND_IF_FITS,
OP_ATOMIC_MAX,
OP_ATOMIC_MIN,
OP_ATOMIC_VERSIONSTAMPED_KEY,
OP_ATOMIC_VERSIONSTAMPED_VALUE,
OP_ATOMIC_BYTE_MIN,
OP_ATOMIC_BYTE_MAX,
OP_ATOMIC_COMPARE_AND_CLEAR,
OP_LAST = OP_ATOMIC_COMPARE_AND_CLEAR
};
void randomOperation(TTaskFct cont) override {
OpType txType = (OpType)Random::get().randomInt(0, OP_LAST);
switch (txType) {
case OP_ATOMIC_ADD:
testIntAtomicOp(
FDBMutationType::FDB_MUTATION_TYPE_ADD, [](uint64_t val1, uint64_t val2) { return val1 + val2; }, cont);
break;
case OP_ATOMIC_BIT_AND:
testIntAtomicOp(
FDBMutationType::FDB_MUTATION_TYPE_BIT_AND,
[](uint64_t val1, uint64_t val2) { return val1 & val2; },
cont);
break;
case OP_ATOMIC_BIT_OR:
testIntAtomicOp(
FDBMutationType::FDB_MUTATION_TYPE_BIT_OR,
[](uint64_t val1, uint64_t val2) { return val1 | val2; },
cont);
break;
case OP_ATOMIC_BIT_XOR:
testIntAtomicOp(
FDBMutationType::FDB_MUTATION_TYPE_BIT_XOR,
[](uint64_t val1, uint64_t val2) { return val1 ^ val2; },
cont);
break;
case OP_ATOMIC_APPEND_IF_FITS: {
Value val1 = randomValue();
Value val2 = randomValue();
testAtomicOp(
FDBMutationType::FDB_MUTATION_TYPE_APPEND_IF_FITS,
val1,
val2,
[](ValueRef val1, ValueRef val2) { return Value(val1) + Value(val2); },
cont);
break;
}
case OP_ATOMIC_MAX:
testIntAtomicOp(
FDBMutationType::FDB_MUTATION_TYPE_MAX,
[](uint64_t val1, uint64_t val2) { return std::max(val1, val2); },
cont);
break;
case OP_ATOMIC_MIN:
testIntAtomicOp(
FDBMutationType::FDB_MUTATION_TYPE_MIN,
[](uint64_t val1, uint64_t val2) { return std::min(val1, val2); },
cont);
break;
case OP_ATOMIC_VERSIONSTAMPED_KEY:
testAtomicVersionstampedKeyOp(cont);
break;
case OP_ATOMIC_VERSIONSTAMPED_VALUE:
testAtomicVersionstampedValueOp(cont);
break;
case OP_ATOMIC_BYTE_MIN: {
Value val1 = randomValue();
Value val2 = randomValue();
testAtomicOp(
FDBMutationType::FDB_MUTATION_TYPE_BYTE_MIN,
val1,
val2,
[](ValueRef val1, ValueRef val2) { return Value(std::min(val1, val2)); },
cont);
break;
}
case OP_ATOMIC_BYTE_MAX: {
Value val1 = randomValue();
Value val2 = randomValue();
testAtomicOp(
FDBMutationType::FDB_MUTATION_TYPE_BYTE_MAX,
val1,
val2,
[](ValueRef val1, ValueRef val2) { return Value(std::max(val1, val2)); },
cont);
break;
}
case OP_ATOMIC_COMPARE_AND_CLEAR:
testAtomicCompareAndClearOp(cont);
break;
}
}
void testIntAtomicOp(FDBMutationType opType, IntAtomicOpFunction opFunc, TTaskFct cont) {
uint64_t intValue1 = Random::get().randomInt(0, 10000000);
uint64_t intValue2 = Random::get().randomInt(0, 10000000);
Value val1 = toByteString(intValue1);
Value val2 = toByteString(intValue2);
testAtomicOp(
opType,
val1,
val2,
[opFunc](ValueRef val1, ValueRef val2) {
return toByteString(opFunc(toInteger<uint64_t>(val1), toInteger<uint64_t>(val2)));
},
cont);
}
void testAtomicOp(FDBMutationType opType, Value val1, Value val2, AtomicOpFunction opFunc, TTaskFct cont) {
Key key(randomKeyName());
execTransaction(
// 1. Set the key to val1
[key, val1](auto ctx) {
ctx->tx().set(key, val1);
ctx->commit();
},
[this, opType, opFunc, key, val1, val2, cont]() {
execTransaction(
// 2. Perform the given atomic operation to val2, but only if it hasn't been applied yet, otherwise
// retries of commit_unknown_result would cause the operation to be applied multiple times, see
// https://github.com/apple/foundationdb/issues/1321.
[key, opType, val1, val2](auto ctx) {
auto f = ctx->tx().get(key, false);
ctx->continueAfter(f, [ctx, f, opType, key, val1, val2]() {
auto outputVal = f.get();
ASSERT(outputVal.has_value());
if (outputVal.value() == val1) {
ctx->tx().atomicOp(key, val2, opType);
ctx->commit();
} else {
ctx->done();
}
});
},
[this, opFunc, key, val1, val2, cont]() {
auto result = std::make_shared<Value>();
execTransaction(
// 3. Fetch the final value.
[key, result](auto ctx) {
auto f = ctx->tx().get(key, false);
ctx->continueAfter(
f,
[ctx, f, result]() {
auto outputVal = f.get();
ASSERT(outputVal.has_value());
*result = outputVal.value();
ctx->done();
},
true);
},
[this, opFunc, key, val1, val2, result, cont]() {
// 4. Assert expectation.
auto expected = opFunc(val1, val2);
if (*result != expected) {
error(fmt::format("testAtomicOp expected: {} actual: {}",
fdb::toCharsRef(expected),
fdb::toCharsRef(*result)));
ASSERT(false);
}
schedule(cont);
});
});
});
}
void testAtomicVersionstampedKeyOp(TTaskFct cont) {
Key keyPrefix(randomKeyName());
Key key = keyPrefix + fdb::ByteString(10, '\0') + toByteString((uint32_t)keyPrefix.size());
Value val = randomValue();
auto versionstamp_f = std::make_shared<fdb::TypedFuture<fdb::future_var::KeyRef>>();
execTransaction(
// 1. Perform SetVersionstampedKey operation.
[key, val, versionstamp_f](auto ctx) {
ctx->tx().atomicOp(key, val, FDBMutationType::FDB_MUTATION_TYPE_SET_VERSIONSTAMPED_KEY);
*versionstamp_f = ctx->tx().getVersionstamp();
ctx->commit();
},
[this, keyPrefix, val, versionstamp_f, cont]() {
ASSERT(versionstamp_f->ready());
auto resultKey = keyPrefix + Key(versionstamp_f->get());
auto resultVal = std::make_shared<Value>();
execTransaction(
// 2. Fetch the resulting versionstamped key and value.
[keyPrefix, resultKey, resultVal](auto ctx) {
auto fv = ctx->tx().get(resultKey, false);
ctx->continueAfter(fv, [ctx, fv, resultVal]() {
auto outputVal = fv.get();
ASSERT(outputVal.has_value());
*resultVal = outputVal.value();
ctx->done();
});
},
[this, keyPrefix, val, resultVal, cont]() {
// 3. Assert expectation.
ASSERT(*resultVal == val);
schedule(cont);
});
});
}
void testAtomicVersionstampedValueOp(TTaskFct cont) {
Key key(randomKeyName());
Value valPrefix = randomValue();
Value val = valPrefix + fdb::ByteString(10, '\0') + toByteString((uint32_t)valPrefix.size());
auto versionstamp_f = std::make_shared<fdb::TypedFuture<fdb::future_var::KeyRef>>();
execTransaction(
// 1. Perform SetVersionstampedValue operation.
[key, val, versionstamp_f](auto ctx) {
ctx->tx().atomicOp(key, val, FDBMutationType::FDB_MUTATION_TYPE_SET_VERSIONSTAMPED_VALUE);
*versionstamp_f = ctx->tx().getVersionstamp();
ctx->commit();
},
[this, key, valPrefix, versionstamp_f, cont]() {
versionstamp_f->blockUntilReady();
auto versionstamp = Key(versionstamp_f->get());
auto result = std::make_shared<Value>();
execTransaction(
// 2. Fetch the resulting versionstamped value.
[key, result](auto ctx) {
auto f = ctx->tx().get(key, false);
ctx->continueAfter(
f,
[ctx, f, result]() {
auto outputVal = f.get();
ASSERT(outputVal.has_value());
*result = outputVal.value();
ctx->done();
},
true);
},
[this, key, valPrefix, result, versionstamp, cont]() {
// 3. Assert expectation.
ASSERT(*result == valPrefix + versionstamp);
schedule(cont);
});
});
}
void testAtomicCompareAndClearOp(TTaskFct cont) {
Key key(randomKeyName());
Value val = randomValue();
execTransaction(
// 1. Set the key to initial value
[key, val](auto ctx) {
ctx->tx().set(key, val);
ctx->commit();
},
[this, key, val, cont]() {
execTransaction(
// 2. Perform CompareAndClear operation.
[key, val](auto ctx) {
ctx->tx().atomicOp(key, val, FDBMutationType::FDB_MUTATION_TYPE_COMPARE_AND_CLEAR);
ctx->commit();
},
[this, key, cont]() {
execTransaction(
// 3. Verify that the key was cleared.
[key](auto ctx) {
auto f = ctx->tx().get(key, false);
ctx->continueAfter(
f,
[ctx, f]() {
auto outputVal = f.get();
ASSERT(!outputVal.has_value());
ctx->done();
},
true);
},
[this, cont]() { schedule(cont); });
});
});
}
};
WorkloadFactory<AtomicOpsCorrectnessWorkload> AtomicOpsCorrectnessWorkloadFactory("AtomicOpsCorrectness");
} // namespace FdbApiTester

View File

@ -30,7 +30,16 @@ public:
ApiCorrectnessWorkload(const WorkloadConfig& config) : ApiWorkload(config) {}
private:
enum OpType { OP_INSERT, OP_GET, OP_CLEAR, OP_CLEAR_RANGE, OP_COMMIT_READ, OP_LAST = OP_COMMIT_READ };
enum OpType {
OP_INSERT,
OP_GET,
OP_GET_KEY,
OP_CLEAR,
OP_GET_RANGE,
OP_CLEAR_RANGE,
OP_COMMIT_READ,
OP_LAST = OP_COMMIT_READ
};
void randomCommitReadOp(TTaskFct cont) {
int numKeys = Random::get().randomInt(1, maxKeysPerTransaction);
@ -125,6 +134,135 @@ private:
});
}
void randomGetKeyOp(TTaskFct cont) {
int numKeys = Random::get().randomInt(1, maxKeysPerTransaction);
auto keysWithSelectors = std::make_shared<std::vector<std::pair<fdb::Key, fdb::KeySelector>>>();
auto results = std::make_shared<std::vector<fdb::Key>>();
keysWithSelectors->reserve(numKeys);
for (int i = 0; i < numKeys; i++) {
auto key = randomKey(readExistingKeysRatio);
fdb::KeySelector selector;
selector.keyLength = key.size();
selector.orEqual = Random::get().randomBool(0.5);
selector.offset = Random::get().randomInt(0, 4);
keysWithSelectors->emplace_back(std::move(key), std::move(selector));
// We would ideally do the following above:
// selector.key = key.data();
// but key.data() may become invalid after the key is moved to the vector.
// So instead, we update the pointer here to the string already in the vector.
keysWithSelectors->back().second.key = keysWithSelectors->back().first.data();
}
execTransaction(
[keysWithSelectors, results](auto ctx) {
auto futures = std::make_shared<std::vector<fdb::Future>>();
for (const auto& keyWithSelector : *keysWithSelectors) {
auto key = keyWithSelector.first;
auto selector = keyWithSelector.second;
futures->push_back(ctx->tx().getKey(selector, false));
}
ctx->continueAfterAll(*futures, [ctx, futures, results]() {
results->clear();
for (auto& f : *futures) {
results->push_back(fdb::Key(f.get<fdb::future_var::KeyRef>()));
}
ASSERT(results->size() == futures->size());
ctx->done();
});
},
[this, keysWithSelectors, results, cont]() {
ASSERT(results->size() == keysWithSelectors->size());
for (int i = 0; i < keysWithSelectors->size(); i++) {
auto const& key = (*keysWithSelectors)[i].first;
auto const& selector = (*keysWithSelectors)[i].second;
auto expected = store.getKey(key, selector.orEqual, selector.offset);
auto actual = (*results)[i];
// Local store only contains data for the current client, while fdb contains data from multiple
// clients. If getKey returned a key outside of the range for the current client, adjust the result
// to match what would be expected in the local store.
if (actual.substr(0, keyPrefix.size()) < keyPrefix) {
actual = store.startKey();
} else if ((*results)[i].substr(0, keyPrefix.size()) > keyPrefix) {
actual = store.endKey();
}
if (actual != expected) {
error(fmt::format("randomGetKeyOp mismatch. key: {}, orEqual: {}, offset: {}, expected: {} "
"actual: {}",
fdb::toCharsRef(key),
selector.orEqual,
selector.offset,
fdb::toCharsRef(expected),
fdb::toCharsRef(actual)));
}
}
schedule(cont);
});
}
void getRangeLoop(std::shared_ptr<ITransactionContext> ctx,
fdb::KeySelector begin,
fdb::KeySelector end,
std::shared_ptr<std::vector<fdb::KeyValue>> results) {
auto f = ctx->tx().getRange(begin,
end,
0 /*limit*/,
0 /*target_bytes*/,
FDB_STREAMING_MODE_WANT_ALL,
0 /*iteration*/,
false /*snapshot*/,
false /*reverse*/);
ctx->continueAfter(f, [this, ctx, f, end, results]() {
auto out = copyKeyValueArray(f.get());
results->insert(results->end(), out.first.begin(), out.first.end());
const bool more = out.second;
if (more) {
// Fetch the remaining results.
getRangeLoop(ctx, fdb::key_select::firstGreaterThan(results->back().key), end, results);
} else {
ctx->done();
}
});
}
void randomGetRangeOp(TTaskFct cont) {
auto begin = randomKey(readExistingKeysRatio);
auto end = randomKey(readExistingKeysRatio);
auto results = std::make_shared<std::vector<fdb::KeyValue>>();
execTransaction(
[this, begin, end, results](auto ctx) {
// Clear the results vector, in case the transaction is retried.
results->clear();
getRangeLoop(ctx,
fdb::key_select::firstGreaterOrEqual(begin),
fdb::key_select::firstGreaterOrEqual(end),
results);
},
[this, begin, end, results, cont]() {
auto expected = store.getRange(begin, end, results->size() + 10, false);
if (results->size() != expected.size()) {
error(fmt::format("randomGetRangeOp mismatch. expected {} keys, actual {} keys",
expected.size(),
results->size()));
} else {
auto expected_kv = expected.begin();
for (auto actual_kv : *results) {
if (actual_kv.key != expected_kv->key || actual_kv.value != expected_kv->value) {
error(fmt::format(
"randomGetRangeOp mismatch. expected key: {} actual key: {} expected value: "
"{:.80} actual value: {:.80}",
fdb::toCharsRef(expected_kv->key),
fdb::toCharsRef(actual_kv.key),
fdb::toCharsRef(expected_kv->value),
fdb::toCharsRef(actual_kv.value)));
}
expected_kv++;
}
}
schedule(cont);
});
}
void randomOperation(TTaskFct cont) {
OpType txType = (store.size() == 0) ? OP_INSERT : (OpType)Random::get().randomInt(0, OP_LAST);
switch (txType) {
@ -134,9 +272,15 @@ private:
case OP_GET:
randomGetOp(cont);
break;
case OP_GET_KEY:
randomGetKeyOp(cont);
break;
case OP_CLEAR:
randomClearOp(cont);
break;
case OP_GET_RANGE:
randomGetRangeOp(cont);
break;
case OP_CLEAR_RANGE:
randomClearRangeOp(cont);
break;

View File

@ -120,6 +120,25 @@ KeyValueArray copyKeyValueArray(fdb::future_var::KeyValueRefArray::Type array);
using KeyRangeArray = std::vector<fdb::KeyRange>;
KeyRangeArray copyKeyRangeArray(fdb::future_var::KeyRangeRefArray::Type array);
static_assert(__BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__, "Do not support non-little-endian systems");
// Converts a little-endian encoded number into an integral type.
template <class T, typename = std::enable_if_t<std::is_integral<T>::value>>
static T toInteger(fdb::BytesRef value) {
ASSERT(value.size() == sizeof(T));
T output;
memcpy(&output, value.data(), value.size());
return output;
}
// Converts an integral type to a little-endian encoded byte string.
template <class T, typename = std::enable_if_t<std::is_integral<T>::value>>
static fdb::ByteString toByteString(T value) {
fdb::ByteString output(sizeof(T), 0);
memcpy(output.data(), (const uint8_t*)&value, sizeof(value));
return output;
}
} // namespace FdbApiTester
#endif

View File

@ -24,7 +24,7 @@
#include "TesterTransactionExecutor.h"
#include "TesterTestSpec.h"
#include "TesterUtil.h"
#include "flow/SimpleOpt.h"
#include "SimpleOpt/SimpleOpt.h"
#include "test/fdb_api.hpp"
#include <memory>

View File

@ -12,14 +12,18 @@ maxClientThreads = 8
minClients = 2
maxClients = 8
[[test.workload]]
name = 'ApiCorrectness'
minKeyLength = 1
maxKeyLength = 64
minValueLength = 1
maxValueLength = 1000
maxKeysPerTransaction = 50
initialSize = 100
numRandomOperations = 100
readExistingKeysRatio = 0.9
maxKeyLength = 64
minValueLength = 1
maxValueLength = 1000
maxKeysPerTransaction = 50
initialSize = 100
numRandomOperations = 100
readExistingKeysRatio = 0.9
[[test.workload]]
name = 'AtomicOpsCorrectness'
initialSize = 0
numRandomOperations = 100

View File

@ -15,10 +15,15 @@ maxClients = 8
[[test.workload]]
name = 'ApiCorrectness'
minKeyLength = 1
maxKeyLength = 64
minValueLength = 1
maxValueLength = 1000
maxKeysPerTransaction = 50
initialSize = 100
numRandomOperations = 100
readExistingKeysRatio = 0.9
maxKeyLength = 64
minValueLength = 1
maxValueLength = 1000
maxKeysPerTransaction = 50
initialSize = 100
numRandomOperations = 100
readExistingKeysRatio = 0.9
[[test.workload]]
name = 'AtomicOpsCorrectness'
initialSize = 0
numRandomOperations = 100

View File

@ -15,10 +15,15 @@ maxClients = 8
[[test.workload]]
name = 'ApiCorrectness'
minKeyLength = 1
maxKeyLength = 64
minValueLength = 1
maxValueLength = 1000
maxKeysPerTransaction = 50
initialSize = 100
numRandomOperations = 100
readExistingKeysRatio = 0.9
maxKeyLength = 64
minValueLength = 1
maxValueLength = 1000
maxKeysPerTransaction = 50
initialSize = 100
numRandomOperations = 100
readExistingKeysRatio = 0.9
[[test.workload]]
name = 'AtomicOpsCorrectness'
initialSize = 0
numRandomOperations = 100

View File

@ -14,10 +14,15 @@ maxClients = 8
[[test.workload]]
name = 'ApiCorrectness'
minKeyLength = 1
maxKeyLength = 64
minValueLength = 1
maxValueLength = 1000
maxKeysPerTransaction = 50
initialSize = 100
numRandomOperations = 100
readExistingKeysRatio = 0.9
maxKeyLength = 64
minValueLength = 1
maxValueLength = 1000
maxKeysPerTransaction = 50
initialSize = 100
numRandomOperations = 100
readExistingKeysRatio = 0.9
[[test.workload]]
name = 'AtomicOpsCorrectness'
initialSize = 0
numRandomOperations = 100

View File

@ -7,10 +7,15 @@ multiThreaded = false
[[test.workload]]
name = 'ApiCorrectness'
minKeyLength = 1
maxKeyLength = 64
minValueLength = 1
maxValueLength = 1000
maxKeysPerTransaction = 50
initialSize = 100
numRandomOperations = 100
readExistingKeysRatio = 0.9
maxKeyLength = 64
minValueLength = 1
maxValueLength = 1000
maxKeysPerTransaction = 50
initialSize = 100
numRandomOperations = 100
readExistingKeysRatio = 0.9
[[test.workload]]
name = 'AtomicOpsCorrectness'
initialSize = 0
numRandomOperations = 100

View File

@ -91,6 +91,21 @@ inline int intSize(BytesRef b) {
return static_cast<int>(b.size());
}
template <template <class...> class StringLike, class Char>
ByteString strinc(const StringLike<Char>& s) {
int index;
for (index = s.size() - 1; index >= 0; index--)
if (s[index] != 255)
break;
// Must not be called with a string that consists only of zero or more '\xff' bytes.
assert(index >= 0);
ByteString byteResult(s.substr(0, index + 1));
byteResult[byteResult.size() - 1]++;
return byteResult;
}
class Error {
public:
using CodeType = native::fdb_error_t;
@ -197,16 +212,6 @@ inline int maxApiVersion() {
return native::fdb_get_max_api_version();
}
inline Error selectApiVersionNothrow(int version) {
return Error(native::fdb_select_api_version(version));
}
inline void selectApiVersion(int version) {
if (auto err = selectApiVersionNothrow(version)) {
throwError(fmt::format("ERROR: fdb_select_api_version({}): ", version), err);
}
}
namespace network {
inline Error setOptionNothrow(FDBNetworkOption option, BytesRef str) noexcept {
@ -397,6 +402,7 @@ template <typename VarTraits>
class TypedFuture : public Future {
friend class Future;
friend class Transaction;
friend class Tenant;
using SelfType = TypedFuture<VarTraits>;
using Future::Future;
// hide type-unsafe inherited functions
@ -516,6 +522,8 @@ public:
return out;
}
TypedFuture<future_var::KeyRef> getVersionstamp() { return native::fdb_transaction_get_versionstamp(tr.get()); }
TypedFuture<future_var::KeyRef> getKey(KeySelector sel, bool snapshot) {
return native::fdb_transaction_get_key(tr.get(), sel.key, sel.keyLength, sel.orEqual, sel.offset, snapshot);
}
@ -577,6 +585,11 @@ public:
native::fdb_transaction_set(tr.get(), key.data(), intSize(key), value.data(), intSize(value));
}
void atomicOp(KeyRef key, ValueRef param, FDBMutationType operationType) {
native::fdb_transaction_atomic_op(
tr.get(), key.data(), intSize(key), param.data(), intSize(param), operationType);
}
void clear(KeyRef key) { native::fdb_transaction_clear(tr.get(), key.data(), intSize(key)); }
void clearRange(KeyRef begin, KeyRef end) {
@ -588,14 +601,15 @@ class Tenant final {
friend class Database;
std::shared_ptr<native::FDBTenant> tenant;
static constexpr CharsRef tenantManagementMapPrefix = "\xff\xff/management/tenant_map/";
explicit Tenant(native::FDBTenant* tenant_raw) {
if (tenant_raw)
tenant = std::shared_ptr<native::FDBTenant>(tenant_raw, &native::fdb_tenant_destroy);
}
public:
// This should only be mutated by API versioning
static inline CharsRef tenantManagementMapPrefix = "\xff\xff/management/tenant/map/";
Tenant(const Tenant&) noexcept = default;
Tenant& operator=(const Tenant&) noexcept = default;
Tenant() noexcept : tenant(nullptr) {}
@ -613,6 +627,13 @@ public:
tr.clear(toBytesRef(fmt::format("{}{}", tenantManagementMapPrefix, toCharsRef(name))));
}
static TypedFuture<future_var::ValueRef> getTenant(Transaction tr, BytesRef name) {
tr.setOption(FDBTransactionOption::FDB_TR_OPTION_READ_SYSTEM_KEYS, BytesRef());
tr.setOption(FDBTransactionOption::FDB_TR_OPTION_LOCK_AWARE, BytesRef());
tr.setOption(FDBTransactionOption::FDB_TR_OPTION_RAW_ACCESS, BytesRef());
return tr.get(toBytesRef(fmt::format("{}{}", tenantManagementMapPrefix, toCharsRef(name))), false);
}
Transaction createTransaction() {
auto tx_native = static_cast<native::FDBTransaction*>(nullptr);
auto err = Error(native::fdb_tenant_create_transaction(tenant.get(), &tx_native));
@ -684,6 +705,19 @@ public:
}
};
inline Error selectApiVersionNothrow(int version) {
if (version < 720) {
Tenant::tenantManagementMapPrefix = "\xff\xff/management/tenant_map/";
}
return Error(native::fdb_select_api_version(version));
}
inline void selectApiVersion(int version) {
if (auto err = selectApiVersionNothrow(version)) {
throwError(fmt::format("ERROR: fdb_select_api_version({}): ", version), err);
}
}
} // namespace fdb
template <>

View File

@ -66,7 +66,8 @@ namespace mako {
struct alignas(64) ThreadArgs {
int worker_id;
int thread_id;
int tenants;
int active_tenants;
int total_tenants;
pid_t parent_id;
Arguments const* args;
shared_memory::Access shm;
@ -82,11 +83,11 @@ thread_local Logger logr = Logger(MainProcess{}, VERBOSE_DEFAULT);
Transaction createNewTransaction(Database db, Arguments const& args, int id = -1, Tenant* tenants = nullptr) {
// No tenants specified
if (args.tenants <= 0) {
if (args.active_tenants <= 0) {
return db.createTransaction();
}
// Create Tenant Transaction
int tenant_id = (id == -1) ? urand(0, args.tenants - 1) : id;
int tenant_id = (id == -1) ? urand(0, args.active_tenants - 1) : id;
// If provided tenants array, use it
if (tenants) {
return tenants[tenant_id].createTransaction();
@ -97,6 +98,23 @@ Transaction createNewTransaction(Database db, Arguments const& args, int id = -1
return t.createTransaction();
}
uint64_t byteswapHelper(uint64_t input) {
uint64_t output = 0;
for (int i = 0; i < 8; ++i) {
output <<= 8;
output += input & 0xFF;
input >>= 8;
}
return output;
}
void computeTenantPrefix(ByteString& s, uint64_t id) {
uint64_t swapped = byteswapHelper(id);
BytesRef temp = reinterpret_cast<const uint8_t*>(&swapped);
memcpy(&s[0], temp.data(), 8);
}
/* cleanup database */
int cleanup(Database db, Arguments const& args) {
const auto prefix_len = args.prefixpadding ? args.key_length - args.row_digits : intSize(KEY_PREFIX);
@ -117,12 +135,8 @@ int cleanup(Database db, Arguments const& args) {
auto watch = Stopwatch(StartAtCtor{});
int num_iterations = (args.tenants > 1) ? args.tenants : 1;
for (int i = 0; i < num_iterations; ++i) {
// If args.tenants is zero, this will use a non-tenant txn and perform a single range clear.
// If 1, it will use a tenant txn and do a single range clear instead.
// If > 1, it will perform a range clear with a different tenant txn per iteration.
Transaction tx = createNewTransaction(db, args, i);
Transaction tx = db.createTransaction();
if (args.total_tenants == 0) {
while (true) {
tx.clearRange(beginstr, endstr);
auto future_commit = tx.commit();
@ -136,23 +150,89 @@ int cleanup(Database db, Arguments const& args) {
return -1;
}
}
// If tenants are specified, also delete the tenant after clearing out its keyspace
if (args.tenants > 0) {
Transaction systemTx = db.createTransaction();
while (true) {
Tenant::deleteTenant(systemTx, toBytesRef("tenant" + std::to_string(i)));
auto future_commit = systemTx.commit();
const auto rc = waitAndHandleError(systemTx, future_commit, "DELETE_TENANT");
if (rc == FutureRC::OK) {
break;
} else if (rc == FutureRC::RETRY || rc == FutureRC::CONFLICT) {
// tx already reset
continue;
} else {
return -1;
} else {
int batch_size = args.tenant_batch_size;
int batches = (args.total_tenants + batch_size - 1) / batch_size;
// First loop to clear all tenant key ranges
for (int batch = 0; batch < batches; ++batch) {
fdb::TypedFuture<fdb::future_var::ValueRef> tenantResults[batch_size];
// Issue all tenant reads first
Transaction getTx = db.createTransaction();
for (int i = batch * batch_size; i < args.total_tenants && i < (batch + 1) * batch_size; ++i) {
std::string tenant_name = "tenant" + std::to_string(i);
tenantResults[i - (batch * batch_size)] = Tenant::getTenant(getTx, toBytesRef(tenant_name));
}
tx.setOption(FDBTransactionOption::FDB_TR_OPTION_LOCK_AWARE, BytesRef());
tx.setOption(FDBTransactionOption::FDB_TR_OPTION_RAW_ACCESS, BytesRef());
for (int i = batch * batch_size; i < args.total_tenants && i < (batch + 1) * batch_size; ++i) {
std::string tenant_name = "tenant" + std::to_string(i);
while (true) {
const auto rc = waitAndHandleError(getTx, tenantResults[i - (batch * batch_size)], "GET_TENANT");
if (rc == FutureRC::OK) {
// Read the tenant metadata for the prefix and issue a range clear
if (tenantResults[i - (batch * batch_size)].get().has_value()) {
ByteString val(tenantResults[i - (batch * batch_size)].get().value());
rapidjson::Document doc;
const char* metadata = reinterpret_cast<const char*>(val.c_str());
doc.Parse(metadata);
if (!doc.HasParseError()) {
// rapidjson does not decode the prefix as the same byte string that
// was passed as input. This is because we use a non-standard encoding.
// The encoding will likely change in the future.
// For a workaround, we take the id and compute the prefix on our own
rapidjson::Value& docVal = doc["id"];
uint64_t id = docVal.GetUint64();
ByteString tenantPrefix(8, '\0');
computeTenantPrefix(tenantPrefix, id);
ByteString tenantPrefixEnd = strinc(tenantPrefix);
tx.clearRange(toBytesRef(tenantPrefix), toBytesRef(tenantPrefixEnd));
}
}
break;
} else if (rc == FutureRC::RETRY) {
continue;
} else {
// Abort
return -1;
}
}
}
auto future_commit = tx.commit();
const auto rc = waitAndHandleError(tx, future_commit, "TENANT_COMMIT_CLEANUP");
if (rc == FutureRC::OK) {
// Keep going with reset transaction if commit was successful
tx.reset();
} else if (rc == FutureRC::RETRY) {
// We want to retry this batch, so decrement the number
// and go back through the loop to get the same value
// Transaction is already reset
--batch;
} else {
// Abort
return -1;
}
}
// Second loop to delete the tenants
tx.reset();
for (int batch = 0; batch < batches; ++batch) {
for (int i = batch * batch_size; i < args.total_tenants && i < (batch + 1) * batch_size; ++i) {
std::string tenant_name = "tenant" + std::to_string(i);
Tenant::deleteTenant(tx, toBytesRef(tenant_name));
}
auto future_commit = tx.commit();
const auto rc = waitAndHandleError(tx, future_commit, "DELETE_TENANT");
if (rc == FutureRC::OK) {
// Keep going with reset transaction if commit was successful
tx.reset();
} else if (rc == FutureRC::RETRY) {
// We want to retry this batch, so decrement the number
// and go back through the loop to get the same value
// Transaction is already reset
--batch;
} else {
// Abort
return -1;
}
}
}
@ -168,7 +248,6 @@ int populate(Database db,
int thread_tps,
ThreadStatistics& stats) {
auto xacts = 0;
auto keystr = ByteString{};
auto valstr = ByteString{};
keystr.resize(args.key_length);
@ -180,39 +259,65 @@ int populate(Database db,
auto watch_tx = Stopwatch(watch_total.getStart());
auto watch_trace = Stopwatch(watch_total.getStart());
Transaction systemTx = db.createTransaction();
for (int i = 0; i < args.tenants; ++i) {
while (true) {
// Until this issue https://github.com/apple/foundationdb/issues/7260 is resolved
// we have to commit each tenant creation transaction one-by-one
// while (i % 10 == 9 || i == args.tenants - 1) {
std::string tenant_name = "tenant" + std::to_string(i);
Tenant::createTenant(systemTx, toBytesRef(tenant_name));
auto future_commit = systemTx.commit();
const auto rc = waitAndHandleError(systemTx, future_commit, "CREATE_TENANT");
if (rc == FutureRC::RETRY) {
continue;
} else {
// Keep going if commit was successful (FutureRC::OK)
// If not a retryable error, expected to be the error
// tenant_already_exists, meaning another thread finished creating it
systemTx.reset();
break;
if (args.total_tenants > 0) {
Transaction systemTx = db.createTransaction();
// Have one thread create all the tenants, then let the rest help with data population
if (worker_id == 0 && thread_id == 0) {
int batch_size = args.tenant_batch_size;
int batches = (args.total_tenants + batch_size - 1) / batch_size;
for (int batch = 0; batch < batches; ++batch) {
for (int i = batch * batch_size; i < args.total_tenants && i < (batch + 1) * batch_size; ++i) {
std::string tenant_name = "tenant" + std::to_string(i);
Tenant::createTenant(systemTx, toBytesRef(tenant_name));
}
auto future_commit = systemTx.commit();
const auto rc = waitAndHandleError(systemTx, future_commit, "CREATE_TENANT");
if (rc == FutureRC::OK) {
// Keep going with reset transaction if commit was successful
systemTx.reset();
} else if (rc == FutureRC::RETRY) {
// We want to retry this batch, so decrement the number
// and go back through the loop to get the same value
// Transaction is already reset
--batch;
} else {
// Abort
return -1;
}
}
} else {
std::string last_tenant_name = "tenant" + std::to_string(args.total_tenants - 1);
while (true) {
auto result = Tenant::getTenant(systemTx, toBytesRef(last_tenant_name));
const auto rc = waitAndHandleError(systemTx, result, "GET_TENANT");
if (rc == FutureRC::OK) {
// If we get valid tenant metadata, the main thread has finished
if (result.get().has_value()) {
break;
}
systemTx.reset();
} else if (rc == FutureRC::RETRY) {
continue;
} else {
// Abort
return -1;
}
usleep(1000);
}
}
}
// mimic typical tenant usage: keep tenants in memory
// and create transactions as needed
Tenant tenants[args.tenants];
for (int i = 0; i < args.tenants; ++i) {
Tenant tenants[args.active_tenants];
for (int i = 0; i < args.active_tenants; ++i) {
std::string tenantStr = "tenant" + std::to_string(i);
BytesRef tenant_name = toBytesRef(tenantStr);
tenants[i] = db.openTenant(tenant_name);
}
int populate_iters = args.tenants > 0 ? args.tenants : 1;
int populate_iters = args.active_tenants > 0 ? args.active_tenants : 1;
// Each tenant should have the same range populated
for (auto t_id = 0; t_id < populate_iters; ++t_id) {
Transaction tx = createNewTransaction(db, args, t_id, args.tenants > 0 ? tenants : nullptr);
Transaction tx = createNewTransaction(db, args, t_id, args.active_tenants > 0 ? tenants : nullptr);
const auto key_begin = insertBegin(args.rows, worker_id, thread_id, args.num_processes, args.num_threads);
const auto key_end = insertEnd(args.rows, worker_id, thread_id, args.num_processes, args.num_threads);
auto key_checkpoint = key_begin; // in case of commit failure, restart from this key
@ -261,7 +366,7 @@ int populate(Database db,
auto tx_restarter = ExitGuard([&watch_tx]() { watch_tx.startFromStop(); });
if (rc == FutureRC::OK) {
key_checkpoint = i + 1; // restart on failures from next key
tx = createNewTransaction(db, args, t_id, args.tenants > 0 ? tenants : nullptr);
tx = createNewTransaction(db, args, t_id, args.active_tenants > 0 ? tenants : nullptr);
} else if (rc == FutureRC::ABORT) {
return -1;
} else {
@ -440,8 +545,8 @@ int runWorkload(Database db,
// mimic typical tenant usage: keep tenants in memory
// and create transactions as needed
Tenant tenants[args.tenants];
for (int i = 0; i < args.tenants; ++i) {
Tenant tenants[args.active_tenants];
for (int i = 0; i < args.active_tenants; ++i) {
std::string tenantStr = "tenant" + std::to_string(i);
BytesRef tenant_name = toBytesRef(tenantStr);
tenants[i] = db.openTenant(tenant_name);
@ -449,7 +554,7 @@ int runWorkload(Database db,
/* main transaction loop */
while (1) {
Transaction tx = createNewTransaction(db, args, -1, args.tenants > 0 ? tenants : nullptr);
Transaction tx = createNewTransaction(db, args, -1, args.active_tenants > 0 ? tenants : nullptr);
while ((thread_tps > 0) && (xacts >= current_tps)) {
/* throttle on */
const auto time_now = steady_clock::now();
@ -809,7 +914,8 @@ int workerProcessMain(Arguments const& args, int worker_id, shared_memory::Acces
this_args.worker_id = worker_id;
this_args.thread_id = i;
this_args.parent_id = pid_main;
this_args.tenants = args.tenants;
this_args.active_tenants = args.active_tenants;
this_args.total_tenants = args.total_tenants;
this_args.args = &args;
this_args.shm = shm;
this_args.database = databases[i % args.num_databases];
@ -878,7 +984,9 @@ int initArguments(Arguments& args) {
args.sampling = 1000;
args.key_length = 32;
args.value_length = 16;
args.tenants = 0;
args.active_tenants = 0;
args.total_tenants = 0;
args.tenant_batch_size = 10000;
args.zipf = 0;
args.commit_get = 0;
args.verbose = 1;
@ -1053,7 +1161,9 @@ void usage() {
printf("%-24s %s\n", "", "This option cannot be specified with --seconds.");
printf("%-24s %s\n", " --keylen=LENGTH", "Specify the key lengths");
printf("%-24s %s\n", " --vallen=LENGTH", "Specify the value lengths");
printf("%-24s %s\n", " --tenants=TENANTS", "Specify the number of tenants to use");
printf("%-24s %s\n", " --active_tenants=ACTIVE_TENANTS", "Specify the number of tenants to use");
printf("%-24s %s\n", " --total_tenants=TOTAL_TENANTS", "Specify the number of tenants to create");
printf("%-24s %s\n", " --tenant_batch_size=SIZE", "Specify how many tenants to create/delete per transaction");
printf("%-24s %s\n", "-x, --transaction=SPEC", "Transaction specification");
printf("%-24s %s\n", " --tps|--tpsmax=TPS", "Specify the target max TPS");
printf("%-24s %s\n", " --tpsmin=TPS", "Specify the target min TPS");
@ -1109,7 +1219,9 @@ int parseArguments(int argc, char* argv[], Arguments& args) {
{ "iteration", required_argument, NULL, 'i' },
{ "keylen", required_argument, NULL, ARG_KEYLEN },
{ "vallen", required_argument, NULL, ARG_VALLEN },
{ "tenants", required_argument, NULL, ARG_TENANTS },
{ "active_tenants", required_argument, NULL, ARG_ACTIVE_TENANTS },
{ "total_tenants", required_argument, NULL, ARG_TOTAL_TENANTS },
{ "tenant_batch_size", required_argument, NULL, ARG_TENANT_BATCH_SIZE },
{ "transaction", required_argument, NULL, 'x' },
{ "tps", required_argument, NULL, ARG_TPS },
{ "tpsmax", required_argument, NULL, ARG_TPSMAX },
@ -1226,8 +1338,14 @@ int parseArguments(int argc, char* argv[], Arguments& args) {
case ARG_VALLEN:
args.value_length = atoi(optarg);
break;
case ARG_TENANTS:
args.tenants = atoi(optarg);
case ARG_ACTIVE_TENANTS:
args.active_tenants = atoi(optarg);
break;
case ARG_TOTAL_TENANTS:
args.total_tenants = atoi(optarg);
break;
case ARG_TENANT_BATCH_SIZE:
args.tenant_batch_size = atoi(optarg);
break;
case ARG_TPS:
case ARG_TPSMAX:
@ -1418,6 +1536,14 @@ int validateArguments(Arguments const& args) {
4 + args.row_digits);
return -1;
}
if (args.active_tenants > args.total_tenants) {
logr.error("--active_tenants must be less than or equal to --total_tenants");
return -1;
}
if (args.tenant_batch_size < 1) {
logr.error("--tenant_batch_size must be at least 1");
return -1;
}
if (args.mode == MODE_RUN) {
if ((args.seconds > 0) && (args.iteration > 0)) {
logr.error("Cannot specify seconds and iteration together");
@ -1984,7 +2110,8 @@ int statsProcessMain(Arguments const& args,
fmt::fprintf(fp, "\"sampling\": %d,", args.sampling);
fmt::fprintf(fp, "\"key_length\": %d,", args.key_length);
fmt::fprintf(fp, "\"value_length\": %d,", args.value_length);
fmt::fprintf(fp, "\"tenants\": %d,", args.tenants);
fmt::fprintf(fp, "\"active_tenants\": %d,", args.active_tenants);
fmt::fprintf(fp, "\"total_tenants\": %d,", args.total_tenants);
fmt::fprintf(fp, "\"commit_get\": %d,", args.commit_get);
fmt::fprintf(fp, "\"verbose\": %d,", args.verbose);
fmt::fprintf(fp, "\"cluster_files\": \"%s\",", args.cluster_files[0]);
@ -2108,11 +2235,16 @@ int main(int argc, char* argv[]) {
/* usage printed */
return 0;
}
if (args.tenants > 1) {
args.rows = args.rows / args.tenants;
if (args.active_tenants > 1) {
args.rows = args.rows / args.active_tenants;
args.row_digits = digits(args.rows);
}
// Allow specifying only the number of active tenants, in which case # active = # total
if (args.active_tenants > 0 && args.total_tenants == 0) {
args.total_tenants = args.active_tenants;
}
rc = validateArguments(args);
if (rc < 0)
return -1;

View File

@ -50,7 +50,9 @@ constexpr const int MODE_REPORT = 3;
enum ArgKind {
ARG_KEYLEN,
ARG_VALLEN,
ARG_TENANTS,
ARG_ACTIVE_TENANTS,
ARG_TOTAL_TENANTS,
ARG_TENANT_BATCH_SIZE,
ARG_TPS,
ARG_ASYNC,
ARG_COMMITGET,
@ -145,7 +147,9 @@ struct Arguments {
int sampling;
int key_length;
int value_length;
int tenants;
int active_tenants;
int total_tenants;
int tenant_batch_size;
int zipf;
int commit_get;
int verbose;

View File

@ -35,10 +35,10 @@
#include "operations.hpp"
#include "time.hpp"
#include "ddsketch.hpp"
#include "contrib/rapidjson/rapidjson/document.h"
#include "contrib/rapidjson/rapidjson/rapidjson.h"
#include "contrib/rapidjson/rapidjson/stringbuffer.h"
#include "contrib/rapidjson/rapidjson/writer.h"
#include "rapidjson/document.h"
#include "rapidjson/rapidjson.h"
#include "rapidjson/stringbuffer.h"
#include "rapidjson/writer.h"
#include <iostream>
#include <sstream>
#include <vector>

View File

@ -2624,7 +2624,7 @@ TEST_CASE("Tenant create, access, and delete") {
fdb::Transaction tr(db);
while (1) {
fdb_check(tr.set_option(FDB_TR_OPTION_SPECIAL_KEY_SPACE_ENABLE_WRITES, nullptr, 0));
tr.set("\xff\xff/management/tenant_map/" + tenantName, "");
tr.set("\xff\xff/management/tenant/map/" + tenantName, "");
fdb::EmptyFuture commitFuture = tr.commit();
fdb_error_t err = wait_future(commitFuture);
if (err) {
@ -2637,8 +2637,8 @@ TEST_CASE("Tenant create, access, and delete") {
}
while (1) {
StringRef begin = "\xff\xff/management/tenant_map/"_sr;
StringRef end = "\xff\xff/management/tenant_map0"_sr;
StringRef begin = "\xff\xff/management/tenant/map/"_sr;
StringRef end = "\xff\xff/management/tenant/map0"_sr;
fdb_check(tr.set_option(FDB_TR_OPTION_SPECIAL_KEY_SPACE_ENABLE_WRITES, nullptr, 0));
fdb::KeyValueArrayFuture f = tr.get_range(FDB_KEYSEL_FIRST_GREATER_OR_EQUAL(begin.begin(), begin.size()),
@ -2716,7 +2716,7 @@ TEST_CASE("Tenant create, access, and delete") {
while (1) {
fdb_check(tr.set_option(FDB_TR_OPTION_SPECIAL_KEY_SPACE_ENABLE_WRITES, nullptr, 0));
tr.clear("\xff\xff/management/tenant_map/" + tenantName);
tr.clear("\xff\xff/management/tenant/map/" + tenantName);
fdb::EmptyFuture commitFuture = tr.commit();
fdb_error_t err = wait_future(commitFuture);
if (err) {

View File

@ -20,6 +20,7 @@ add_flow_target(STATIC_LIBRARY NAME fdb_flow SRCS ${SRCS})
target_link_libraries(fdb_flow PUBLIC fdb_c)
target_link_libraries(fdb_flow PUBLIC fdbclient)
target_include_directories(fdb_flow PUBLIC
"${CMAKE_SOURCE_DIR}"
"${CMAKE_CURRENT_BINARY_DIR}"
"${CMAKE_CURRENT_SOURCE_DIR}"
"${CMAKE_CURRENT_SOURCE_DIR}/tester"

View File

@ -24,7 +24,7 @@
#include <stdio.h>
#include <cinttypes>
#include "contrib/fmt-8.1.1/include/fmt/format.h"
#include "fmt/format.h"
#include "flow/DeterministicRandom.h"
#include "flow/SystemMonitor.h"
#include "flow/TLSConfig.actor.h"

View File

@ -18,7 +18,7 @@
* limitations under the License.
*/
#include "Tester.actor.h"
#include "tester/Tester.actor.h"
#include <cinttypes>
#ifdef __linux__
#include <string.h>

View File

@ -22,7 +22,7 @@
// version.
#if defined(NO_INTELLISENSE) && !defined(FDB_FLOW_TESTER_TESTER_ACTOR_G_H)
#define FDB_FLOW_TESTER_TESTER_ACTOR_G_H
#include "Tester.actor.g.h"
#include "tester/Tester.actor.g.h"
#elif !defined(FDB_FLOW_TESTER_TESTER_ACTOR_H)
#define FDB_FLOW_TESTER_TESTER_ACTOR_H

View File

@ -27,6 +27,8 @@ import java.util.concurrent.Semaphore;
import java.util.concurrent.ThreadFactory;
import java.util.concurrent.atomic.AtomicInteger;
import com.apple.foundationdb.tuple.ByteArrayUtil;
/**
* The starting point for accessing FoundationDB.
* <br>
@ -189,6 +191,11 @@ public class FDB {
Select_API_version(version);
singleton = new FDB(version);
if (version < 720) {
TenantManagement.TENANT_MAP_PREFIX = ByteArrayUtil.join(new byte[] { (byte)255, (byte)255 },
"/management/tenant_map/".getBytes());
}
return singleton;
}

View File

@ -40,8 +40,8 @@ import com.apple.foundationdb.tuple.Tuple;
* The FoundationDB API includes function to manage the set of tenants in a cluster.
*/
public class TenantManagement {
static final byte[] TENANT_MAP_PREFIX = ByteArrayUtil.join(new byte[] { (byte)255, (byte)255 },
"/management/tenant_map/".getBytes());
static byte[] TENANT_MAP_PREFIX = ByteArrayUtil.join(new byte[] { (byte)255, (byte)255 },
"/management/tenant/map/".getBytes());
/**
* Creates a new tenant in the cluster. If the tenant already exists, this operation will complete

View File

@ -102,6 +102,8 @@ def api_version(ver):
if ver >= 710:
import fdb.tenant_management
if ver < 720:
fdb.tenant_management._tenant_map_prefix = b'\xff\xff/management/tenant_map/'
if ver < 610:
globals()["init"] = getattr(fdb.impl, "init")

View File

@ -25,7 +25,7 @@ https://apple.github.io/foundationdb/api-python.html"""
from fdb import impl as _impl
_tenant_map_prefix = b'\xff\xff/management/tenant_map/'
_tenant_map_prefix = b'\xff\xff/management/tenant/map/'
# If the existence_check_marker is an empty list, then check whether the tenant exists.
# After the check, append an item to the existence_check_marker list so that subsequent

View File

@ -620,6 +620,15 @@ def tenants(logger):
assert lines[0].strip().startswith('id: ')
assert lines[1].strip().startswith('prefix: ')
output = run_fdbcli_command('gettenant tenant JSON')
json_output = json.loads(output, strict=False)
assert(len(json_output) == 2)
assert('tenant' in json_output)
assert(json_output['type'] == 'success')
assert(len(json_output['tenant']) == 2)
assert('id' in json_output['tenant'])
assert('prefix' in json_output['tenant'])
output = run_fdbcli_command('usetenant')
assert output == 'Using the default tenant'

View File

@ -78,12 +78,12 @@ def test_tenant_operations(db):
tenant1[b'tenant_test_key'] = b'tenant1'
tenant2[b'tenant_test_key'] = b'tenant2'
tenant1_entry = db[b'\xff\xff/management/tenant_map/tenant1']
tenant1_entry = db[b'\xff\xff/management/tenant/map/tenant1']
tenant1_json = json.loads(tenant1_entry)
prefix1 = tenant1_json['prefix'].encode('utf8')
assert prefix1 == p1
tenant2_entry = db[b'\xff\xff/management/tenant_map/tenant2']
tenant2_entry = db[b'\xff\xff/management/tenant/map/tenant2']
tenant2_json = json.loads(tenant2_entry)
prefix2 = tenant2_json['prefix'].encode('utf8')
assert prefix2 == p2

View File

@ -64,9 +64,6 @@ add_compile_definitions(BOOST_ERROR_CODE_HEADER_ONLY BOOST_SYSTEM_NO_DEPRECATED)
set(THREADS_PREFER_PTHREAD_FLAG ON)
find_package(Threads REQUIRED)
include_directories(${CMAKE_SOURCE_DIR})
include_directories(${CMAKE_BINARY_DIR})
if(WIN32)
add_definitions(-DBOOST_USE_WINDOWS_H)
add_definitions(-DWIN32_LEAN_AND_MEAN)

View File

@ -193,10 +193,9 @@ endif()
find_package(toml11 QUIET)
if(toml11_FOUND)
add_library(toml11_target INTERFACE)
add_dependencies(toml11_target INTERFACE toml11::toml11)
target_link_libraries(toml11_target INTERFACE toml11::toml11)
else()
include(ExternalProject)
ExternalProject_add(toml11Project
URL "https://github.com/ToruNiina/toml11/archive/v3.4.0.tar.gz"
URL_HASH SHA256=bc6d733efd9216af8c119d8ac64a805578c79cc82b813e4d1d880ca128bd154d

View File

@ -64,9 +64,8 @@ function(generate_coverage_xml)
WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}
COMMENT "Generate coverage xml")
endif()
add_custom_target(coverage_${target_name} DEPENDS ${target_file})
add_custom_target(coverage_${target_name} ALL DEPENDS ${target_file})
add_dependencies(coverage_${target_name} coveragetool)
add_dependencies(${target_name} coverage_${target_name})
endfunction()
# This function asserts that `versions.h` does not exist in the source
@ -147,6 +146,47 @@ function(strip_debug_symbols target)
add_dependencies(strip_targets strip_${target})
endfunction()
# This will copy the header from a flow target into ${CMAKE_BINARY_DIR}/include/target-name
# We're doing this to enforce proper dependencies. In the past we simply added the source
# and binary dir to the include list, which means that for example a compilation unit in
# flow could include a header file that lives in fdbserver. This is a somewhat hacky solution
# but due to our directory structure it seems to be the least invasive one.
function(copy_headers)
set(options)
set(oneValueArgs NAME OUT_DIR INC_DIR)
set(multiValueArgs SRCS)
cmake_parse_arguments(CP "${options}" "${oneValueArgs}" "${multiValueArgs}" "${ARGN}")
get_filename_component(dir_name ${CMAKE_CURRENT_SOURCE_DIR} NAME)
set(include_dir "${CMAKE_CURRENT_BINARY_DIR}/include")
set(incl_dir "${include_dir}/${dir_name}")
make_directory("${incl_dir}")
foreach(f IN LISTS CP_SRCS)
is_prefix(bd "${CMAKE_CURRENT_BINARY_DIR}" "${f}")
is_prefix(sd "${CMAKE_CURRENT_SOURCE_DIR}" "${f}")
if (bd OR sd)
continue()
endif()
is_header(hdr "${f}")
if(NOT hdr)
continue()
endif()
get_filename_component(fname ${f} NAME)
get_filename_component(dname ${f} DIRECTORY)
if (dname)
make_directory(${incl_dir}/${dname})
endif()
set(fpath "${incl_dir}/${dname}/${fname}")
add_custom_command(OUTPUT "${fpath}"
DEPENDS "${f}"
COMMAND "${CMAKE_COMMAND}" -E copy "${f}" "${fpath}"
WORKING_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}")
list(APPEND out_files "${fpath}")
endforeach()
add_custom_target("${CP_NAME}_incl" DEPENDS ${out_files})
set("${CP_OUT_DIR}" "${incl_dir}" PARENT_SCOPE)
set("${CP_INC_DIR}" ${include_dir} PARENT_SCOPE)
endfunction()
function(add_flow_target)
set(options EXECUTABLE STATIC_LIBRARY
DYNAMIC_LIBRARY)
@ -159,42 +199,64 @@ function(add_flow_target)
if(NOT AFT_SRCS)
message(FATAL_ERROR "No sources provided")
endif()
#foreach(src IN LISTS AFT_SRCS)
# is_header(h "${src}")
# if(NOT h)
# list(SRCS "${CMAKE_CURRENT_SOURCE_DIR}/${src}")
# endif()
#endforeach()
if(OPEN_FOR_IDE)
# Intentionally omit ${AFT_DISABLE_ACTOR_DIAGNOSTICS} since we don't want diagnostics
set(sources ${AFT_SRCS} ${AFT_ADDL_SRCS})
add_library(${AFT_NAME} OBJECT ${sources})
else()
create_build_dirs(${AFT_SRCS} ${AFT_DISABLE_ACTOR_DIAGNOSTICS})
foreach(src IN LISTS AFT_SRCS AFT_DISABLE_ACTOR_DIAGNOSTICS)
set(actor_compiler_flags "")
is_header(hdr ${src})
set(in_filename "${src}")
if(${src} MATCHES ".*\\.actor\\.(h|cpp)")
list(APPEND actors ${src})
list(APPEND actor_compiler_flags "--generate-probes")
set(is_actor_file YES)
if(${src} MATCHES ".*\\.h")
string(REPLACE ".actor.h" ".actor.g.h" generated ${src})
string(REPLACE ".actor.h" ".actor.g.h" out_filename ${in_filename})
else()
string(REPLACE ".actor.cpp" ".actor.g.cpp" generated ${src})
string(REPLACE ".actor.cpp" ".actor.g.cpp" out_filename ${in_filename})
endif()
else()
set(is_actor_file NO)
set(out_filename "${src}")
endif()
set(in_file "${CMAKE_CURRENT_SOURCE_DIR}/${in_filename}")
if(is_actor_file)
set(out_file "${CMAKE_CURRENT_BINARY_DIR}/${out_filename}")
else()
set(out_file "${in_file}")
endif()
list(APPEND sources ${out_file})
set(actor_compiler_flags "")
if(is_actor_file)
list(APPEND actors ${in_file})
list(APPEND actor_compiler_flags "--generate-probes")
foreach(s IN LISTS AFT_DISABLE_ACTOR_DIAGNOSTICS)
if("${s}" STREQUAL "${src}")
list(APPEND actor_compiler_flags "--disable-diagnostics")
break()
endif()
endforeach()
list(APPEND sources ${generated})
list(APPEND generated_files ${CMAKE_CURRENT_BINARY_DIR}/${generated})
list(APPEND generated_files ${out_file})
if(WIN32)
add_custom_command(OUTPUT "${CMAKE_CURRENT_BINARY_DIR}/${generated}"
COMMAND $<TARGET_FILE:actorcompiler> "${CMAKE_CURRENT_SOURCE_DIR}/${src}" "${CMAKE_CURRENT_BINARY_DIR}/${generated}" ${actor_compiler_flags}
DEPENDS "${CMAKE_CURRENT_SOURCE_DIR}/${src}" ${actor_exe}
add_custom_command(OUTPUT "${out_file}"
COMMAND $<TARGET_FILE:actorcompiler> "${in_file}" "${out_file}" ${actor_compiler_flags}
DEPENDS "${in_file}" ${actor_exe}
COMMENT "Compile actor: ${src}")
else()
add_custom_command(OUTPUT "${CMAKE_CURRENT_BINARY_DIR}/${generated}"
COMMAND ${MONO_EXECUTABLE} ${actor_exe} "${CMAKE_CURRENT_SOURCE_DIR}/${src}" "${CMAKE_CURRENT_BINARY_DIR}/${generated}" ${actor_compiler_flags} > /dev/null
DEPENDS "${CMAKE_CURRENT_SOURCE_DIR}/${src}" ${actor_exe}
add_custom_command(OUTPUT "${out_file}"
COMMAND ${MONO_EXECUTABLE} ${actor_exe} "${in_file}" "${out_file}" ${actor_compiler_flags} > /dev/null
DEPENDS "${in_file}" ${actor_exe}
COMMENT "Compile actor: ${src}")
endif()
else()
list(APPEND sources ${src})
endif()
endforeach()
if(AFT_EXECUTABLE)
@ -227,7 +289,6 @@ function(add_flow_target)
set_property(TARGET ${AFT_NAME} PROPERTY COVERAGE_FILTERS ${AFT_SRCS})
add_custom_target(${AFT_NAME}_actors DEPENDS ${generated_files})
add_dependencies(${AFT_NAME}_actors actorcompiler)
add_dependencies(${AFT_NAME} ${AFT_NAME}_actors)
if(NOT WIN32)
assert_no_version_h(${AFT_NAME}_actors)

View File

@ -221,16 +221,10 @@ configure_file(${CMAKE_SOURCE_DIR}/LICENSE ${CMAKE_BINARY_DIR}/License.txt COPYO
################################################################################
# Filename of packages
################################################################################
set(CPACK_RPM_PACKAGE_RELEASE 1)
if(NOT FDB_RELEASE)
if(CURRENT_GIT_VERSION)
string(SUBSTRING ${CURRENT_GIT_VERSION} 0 9 git_hash)
endif()
set(CPACK_RPM_PACKAGE_RELEASE 0)
set(package_version_postfix "-0.${git_hash}.SNAPSHOT")
set(git_string ".${git_hash}")
set(package_version_postfix "-1.SNAPSHOT")
else()
set(CPACK_RPM_PACKAGE_RELEASE 1)
set(package_version_postfix "-1")
endif()
@ -239,6 +233,7 @@ endif()
################################################################################
string(REPLACE "-" "_" FDB_PACKAGE_VERSION ${FDB_VERSION})
set(CPACK_RPM_PACKAGE_GROUP ${CURRENT_GIT_VERSION})
set(CPACK_RPM_PACKAGE_LICENSE "Apache 2.0")
set(CPACK_RPM_PACKAGE_NAME "foundationdb")
set(CPACK_RPM_CLIENTS-EL7_PACKAGE_NAME "${CPACK_RPM_PACKAGE_NAME}-clients")
@ -260,14 +255,14 @@ set(CPACK_RPM_SERVER-EL7_USER_FILELIST "%config(noreplace) /
"%attr(0700,foundationdb,foundationdb) /var/lib/foundationdb")
set(CPACK_RPM_CLIENTS-VERSIONED_PACKAGE_NAME "${CPACK_RPM_PACKAGE_NAME}${FDB_PACKAGE_VERSION}${FDB_BUILDTIME_STRING}${PRERELEASE_TAG}-clients")
set(CPACK_RPM_CLIENTS-VERSIONED_FILE_NAME "${CPACK_RPM_CLIENTS-VERSIONED_PACKAGE_NAME}${git_string}.versioned.${CMAKE_SYSTEM_PROCESSOR}.rpm")
set(CPACK_RPM_CLIENTS-VERSIONED_DEBUGINFO_FILE_NAME "${CPACK_RPM_CLIENTS-VERSIONED_PACKAGE_NAME}${git_string}.versioned-debuginfo.${CMAKE_SYSTEM_PROCESSOR}.rpm")
set(CPACK_RPM_CLIENTS-VERSIONED_FILE_NAME "${CPACK_RPM_CLIENTS-VERSIONED_PACKAGE_NAME}-${CPACK_RPM_PACKAGE_RELEASE}.versioned.${CMAKE_SYSTEM_PROCESSOR}.rpm")
set(CPACK_RPM_CLIENTS-VERSIONED_DEBUGINFO_FILE_NAME "${CPACK_RPM_CLIENTS-VERSIONED_PACKAGE_NAME}-${CPACK_RPM_PACKAGE_RELEASE}.versioned-debuginfo.${CMAKE_SYSTEM_PROCESSOR}.rpm")
set(CPACK_RPM_CLIENTS-VERSIONED_POST_INSTALL_SCRIPT_FILE ${CMAKE_BINARY_DIR}/packaging/multiversion/clients/postinst-el7)
set(CPACK_RPM_CLIENTS-VERSIONED_PRE_UNINSTALL_SCRIPT_FILE ${CMAKE_BINARY_DIR}/packaging/multiversion/clients/prerm)
set(CPACK_RPM_SERVER-VERSIONED_PACKAGE_NAME "${CPACK_RPM_PACKAGE_NAME}${FDB_PACKAGE_VERSION}${FDB_BUILDTIME_STRING}${PRERELEASE_TAG}-server")
set(CPACK_RPM_SERVER-VERSIONED_FILE_NAME "${CPACK_RPM_SERVER-VERSIONED_PACKAGE_NAME}${git_string}.versioned.${CMAKE_SYSTEM_PROCESSOR}.rpm")
set(CPACK_RPM_SERVER-VERSIONED_DEBUGINFO_FILE_NAME "${CPACK_RPM_SERVER-VERSIONED_PACKAGE_NAME}${git_string}.versioned-debuginfo.${CMAKE_SYSTEM_PROCESSOR}.rpm")
set(CPACK_RPM_SERVER-VERSIONED_FILE_NAME "${CPACK_RPM_SERVER-VERSIONED_PACKAGE_NAME}-${CPACK_RPM_PACKAGE_RELEASE}.versioned.${CMAKE_SYSTEM_PROCESSOR}.rpm")
set(CPACK_RPM_SERVER-VERSIONED_DEBUGINFO_FILE_NAME "${CPACK_RPM_SERVER-VERSIONED_PACKAGE_NAME}-${CPACK_RPM_PACKAGE_RELEASE}.versioned-debuginfo.${CMAKE_SYSTEM_PROCESSOR}.rpm")
set(CPACK_RPM_SERVER-VERSIONED_PACKAGE_REQUIRES "${CPACK_COMPONENT_CLIENTS-VERSIONED_DISPLAY_NAME}")
set(CPACK_RPM_SERVER-VERSIONED_POST_INSTALL_SCRIPT_FILE ${CMAKE_BINARY_DIR}/packaging/multiversion/server/postinst-rpm)
set(CPACK_RPM_SERVER-VERSIONED_PRE_UNINSTALL_SCRIPT_FILE ${CMAKE_BINARY_DIR}/packaging/multiversion/server/prerm)
@ -309,13 +304,13 @@ set(CPACK_RPM_COMPONENT_INSTALL ON)
if (CMAKE_SYSTEM_PROCESSOR MATCHES "x86_64")
set(CPACK_DEBIAN_CLIENTS-DEB_FILE_NAME "foundationdb-clients_${FDB_VERSION}${package_version_postfix}_amd64.deb")
set(CPACK_DEBIAN_SERVER-DEB_FILE_NAME "foundationdb-server_${FDB_VERSION}${package_version_postfix}_amd64.deb")
set(CPACK_DEBIAN_CLIENTS-VERSIONED_FILE_NAME "foundationdb${FDB_VERSION}${FDB_BUILDTIME_STRING}${PRERELEASE_TAG}-clients${git_string}.versioned_amd64.deb")
set(CPACK_DEBIAN_SERVER-VERSIONED_FILE_NAME "foundationdb${FDB_VERSION}${FDB_BUILDTIME_STRING}${PRERELEASE_TAG}-server${git_string}.versioned_amd64.deb")
set(CPACK_DEBIAN_CLIENTS-VERSIONED_FILE_NAME "foundationdb${FDB_VERSION}${FDB_BUILDTIME_STRING}${PRERELEASE_TAG}-clients-${CPACK_RPM_PACKAGE_RELEASE}.versioned_amd64.deb")
set(CPACK_DEBIAN_SERVER-VERSIONED_FILE_NAME "foundationdb${FDB_VERSION}${FDB_BUILDTIME_STRING}${PRERELEASE_TAG}-server-${CPACK_RPM_PACKAGE_RELEASE}.versioned_amd64.deb")
else()
set(CPACK_DEBIAN_CLIENTS-DEB_FILE_NAME "foundationdb-clients_${FDB_VERSION}${package_version_postfix}_${CMAKE_SYSTEM_PROCESSOR}.deb")
set(CPACK_DEBIAN_SERVER-DEB_FILE_NAME "foundationdb-server_${FDB_VERSION}${package_version_postfix}_${CMAKE_SYSTEM_PROCESSOR}.deb")
set(CPACK_DEBIAN_CLIENTS-VERSIONED_FILE_NAME "foundationdb${FDB_VERSION}${FDB_BUILDTIME_STRING}${PRERELEASE_TAG}-clients${git_string}.versioned_${CMAKE_SYSTEM_PROCESSOR}.deb")
set(CPACK_DEBIAN_SERVER-VERSIONED_FILE_NAME "foundationdb${FDB_VERSION}${FDB_BUILDTIME_STRING}${PRERELEASE_TAG}-server${git_string}.versioned_${CMAKE_SYSTEM_PROCESSOR}.deb")
set(CPACK_DEBIAN_CLIENTS-VERSIONED_FILE_NAME "foundationdb${FDB_VERSION}${FDB_BUILDTIME_STRING}${PRERELEASE_TAG}-clients-${CPACK_RPM_PACKAGE_RELEASE}.versioned_${CMAKE_SYSTEM_PROCESSOR}.deb")
set(CPACK_DEBIAN_SERVER-VERSIONED_FILE_NAME "foundationdb${FDB_VERSION}${FDB_BUILDTIME_STRING}${PRERELEASE_TAG}-server-${CPACK_RPM_PACKAGE_RELEASE}.versioned_${CMAKE_SYSTEM_PROCESSOR}.deb")
endif()
set(CPACK_DEB_COMPONENT_INSTALL ON)

68
cmake/utils.cmake Normal file
View File

@ -0,0 +1,68 @@
# sets out_var to YES if filename has extension .h or .hpp, NO otherwise
function(is_header out_var filename)
set(res "NO")
get_filename_component(ext "${filename}" LAST_EXT)
if((ext STREQUAL ".h") OR (ext STREQUAL ".hpp"))
set(res "YES")
endif()
set("${out_var}" "${res}" PARENT_SCOPE)
endfunction()
function(remove_prefix out prefix str)
string(LENGTH "${prefix}" len)
string(SUBSTRING "${str}" ${len} -1 res)
set("${out}" "${res}" PARENT_SCOPE)
endfunction()
function(is_prefix out prefix str)
string(LENGTH "${prefix}" plen)
string(LENGTH "${str}" slen)
if(plen GREATER slen)
set(res NO)
else()
string(SUBSTRING "${str}" 0 ${plen} pstr)
if(pstr STREQUAL prefix)
set(res YES)
else()
set(res NO)
endif()
endif()
set(${out} ${res} PARENT_SCOPE)
endfunction()
function(create_build_dirs)
foreach(src IN LISTS ARGV)
get_filename_component(d "${src}" DIRECTORY)
if(IS_ABSOLUTE "${d}")
file(RELATIVE_PATH d "${CMAKE_CURRENT_SOURCE_DIR}" "${src}")
endif()
list(APPEND dirs "${d}")
endforeach()
list(REMOVE_DUPLICATES dirs)
foreach(dir IN LISTS dirs)
make_directory("${CMAKE_CURRENT_BINARY_DIR}/${dir}")
endforeach()
endfunction()
function(fdb_find_sources out)
file(GLOB res
LIST_DIRECTORIES false
RELATIVE "${CMAKE_CURRENT_SOURCE_DIR}"
CONFIGURE_DEPENDS "*.cpp" "*.c" "*.h" "*.hpp")
file(GLOB_RECURSE res_includes
LIST_DIRECTORIES false
RELATIVE "${CMAKE_CURRENT_SOURCE_DIR}/include"
CONFIGURE_DEPENDS "include/*.cpp" "include/*.c" "include/*.h" "include/*.hpp")
file(GLOB_RECURSE res_workloads
LIST_DIRECTORIES false
RELATIVE "${CMAKE_CURRENT_SOURCE_DIR}/workloads"
CONFIGURE_DEPENDS "workloads/*.cpp" "workloads/*.c" "workloads/*.h" "workloads/*.hpp")
foreach(f IN LISTS res_includes)
list(APPEND res "include/${f}")
endforeach()
foreach(f IN LISTS res_workloads)
list(APPEND res "workloads/${f}")
endforeach()
set(${out} "${res}" PARENT_SCOPE)
endfunction()

View File

@ -1,8 +1,15 @@
add_library(rapidjson INTERFACE)
target_include_directories(rapidjson INTERFACE ${CMAKE_CURRENT_SOURCE_DIR}/rapidjson)
add_subdirectory(crc32)
add_subdirectory(stacktrace)
add_subdirectory(folly_memcpy)
add_subdirectory(rapidxml)
add_subdirectory(sqlite)
add_subdirectory(SimpleOpt)
add_subdirectory(fmt-8.1.1)
if(NOT WIN32)
add_subdirectory(linenoise)
add_subdirectory(debug_determinism)
add_subdirectory(monitoring)
add_subdirectory(TraceLogHelper)

View File

@ -0,0 +1,2 @@
add_library(SimpleOpt INTERFACE)
target_include_directories(SimpleOpt INTERFACE "${CMAKE_CURRENT_SOURCE_DIR}/include")

View File

@ -78,7 +78,7 @@
<li> Include the SimpleOpt.h header file
<pre>
\#include "flow/SimpleOpt.h"
\#include "SimpleOpt/SimpleOpt.h"
</pre>
<li> Define an array of valid options for your program.

View File

@ -0,0 +1,2 @@
add_library(crc32 STATIC crc32.S crc32_wrapper.c crc32c.cpp)
target_include_directories(crc32 PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/include")

View File

@ -58,7 +58,7 @@
#ifdef CRC32_CONSTANTS_HEADER
#include CRC32_CONSTANTS_HEADER
#else
#include "crc32_constants.h"
#include "crc32/crc32_constants.h"
#endif
.text

View File

@ -15,7 +15,7 @@
#ifdef CRC32_CONSTANTS_HEADER
#include CRC32_CONSTANTS_HEADER
#else
#include "crc32_constants.h"
#include "crc32/crc32_constants.h"
#endif
#define VMX_ALIGN 16

View File

@ -25,7 +25,21 @@
#define _CRT_SECURE_NO_WARNINGS
#endif
#include "flow/crc32c.h"
#if (defined(__linux__) || defined(__APPLE__) || defined(__FreeBSD__))
#define __unixish__ 1
#endif
#ifdef __unixish__
#if !defined(__aarch64__) && !defined(__powerpc64__)
#include <cpuid.h>
#endif
#endif
#ifdef _WIN32
#include <intrin.h>
#endif
#include "crc32/crc32c.h"
#if !defined(__aarch64__) && !defined(__powerpc64__)
#include <nmmintrin.h>
@ -34,9 +48,40 @@
#include <stdlib.h>
#include <random>
#include <algorithm>
#include "flow/Platform.h"
#include "crc32c-generated-constants.cpp"
// CRC32C
#ifdef __aarch64__
// aarch64
#include <inttypes.h>
static inline uint32_t hwCrc32cU8(unsigned int crc, unsigned char v) {
uint32_t ret;
asm volatile("crc32cb %w[r], %w[c], %w[v]" : [r] "=r"(ret) : [c] "r"(crc), [v] "r"(v));
return ret;
}
static inline uint32_t hwCrc32cU32(unsigned int crc, unsigned int v) {
uint32_t ret;
asm volatile("crc32cw %w[r], %w[c], %w[v]" : [r] "=r"(ret) : [c] "r"(crc), [v] "r"(v));
return ret;
}
#ifdef _M_X64
static inline uint64_t hwCrc32cU64(uint64_t crc, uint64_t v) {
uint64_t ret;
asm volatile("crc32cx %w[r], %w[c], %x[v]" : [r] "=r"(ret) : [c] "r"(crc), [v] "r"(v));
return ret;
}
#endif
#else
#ifndef __powerpc64__
// Intel
#define hwCrc32cU8(c, v) _mm_crc32_u8(c, v)
#define hwCrc32cU32(c, v) _mm_crc32_u32(c, v)
#ifdef _M_X64
#define hwCrc32cU64(c, v) _mm_crc32_u64(c, v)
#endif
#endif
#endif
[[maybe_unused]] static uint32_t append_trivial(uint32_t crc, const uint8_t* input, size_t length) {
for (size_t i = 0; i < length; ++i) {
crc = crc ^ input[i];
@ -278,7 +323,25 @@ uint32_t ppc_hw(uint32_t crc, const uint8_t* input, size_t length) {
}
#endif
static bool hw_available = platform::isHwCrcSupported();
bool isHwCrcSupported() {
#if defined(_WIN32)
int info[4];
__cpuid(info, 1);
return (info[2] & (1 << 20)) != 0;
#elif defined(__aarch64__)
return true; /* force to use crc instructions */
#elif defined(__powerpc64__)
return false; /* force not to use crc instructions */
#elif defined(__unixish__)
uint32_t eax, ebx, ecx, edx, level = 1, count = 0;
__cpuid_count(level, count, eax, ebx, ecx, edx);
return ((ecx >> 20) & 1) != 0;
#else
#error Port me!
#endif
}
static bool hw_available = isHwCrcSupported();
extern "C" uint32_t crc32c_append(uint32_t crc, const uint8_t* input, size_t length) {
if (hw_available) {

View File

@ -0,0 +1,4 @@
if(UNIX AND NOT APPLE)
add_library(folly_memcpy STATIC folly_memcpy.S)
target_include_directories(folly_memcpy PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}")
endif()

View File

@ -0,0 +1,2 @@
add_library(linenoise STATIC linenoise.c)
target_include_directories(linenoise PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/include")

View File

@ -115,7 +115,7 @@
#include <sys/types.h>
#include <sys/ioctl.h>
#include <unistd.h>
#include "linenoise.h"
#include "linenoise/linenoise.h"
#define LINENOISE_DEFAULT_HISTORY_MAX_LEN 100
#define LINENOISE_MAX_LINE 4096

View File

@ -0,0 +1,2 @@
add_library(rapidxml INTERFACE)
target_include_directories(rapidxml INTERFACE "${CMAKE_CURRENT_SOURCE_DIR}/include")

View File

@ -0,0 +1,16 @@
add_library(sqlite STATIC
btree.h
hash.h
sqlite3.h
sqlite3ext.h
sqliteInt.h
sqliteLimit.h
sqlite3.amalgamation.c)
target_include_directories(sqlite PUBLIC ${CMAKE_CURRENT_SOURCE_DIR})
# Suppress warnings in sqlite since it's third party
if(NOT WIN32)
target_compile_definitions(sqlite PRIVATE $<$<CONFIG:Debug>:NDEBUG>)
target_compile_options(sqlite BEFORE PRIVATE -w) # disable warnings for third party
endif()

View File

@ -0,0 +1,11 @@
add_library(stacktrace STATIC stacktrace.amalgamation.cpp)
target_include_directories(stacktrace PUBLIC "${CMAKE_CURRENT_SOURCE_DIR}/include")
if (USE_ASAN)
target_compile_definitions(stacktrace PRIVATE ADDRESS_SANITIZER)
elseif(USE_MSAN)
target_compile_definitions(stacktrace PRIVATE MEMORY_SANITIZER)
elseif(USE_UBSAN)
target_compile_definitions(stacktrace PRIVATE UNDEFINED_BEHAVIOR_SANITIZER)
elseif(USE_TSAN)
target_compile_definitions(stacktrace PRIVATE THREAD_SANITIZER DYNAMIC_ANNOTATIONS_EXTERNAL_IMPL=1)
endif()

View File

@ -234,12 +234,27 @@ Note that :ref:`characters can be escaped <cli-escaping>` when specifying keys (
gettenant
---------
The ``gettenant`` command fetches metadata for a given tenant and displays it. Its syntax is ``gettenant <TENANT_NAME>``.
The ``gettenant`` command fetches metadata for a given tenant and displays it. Its syntax is ``gettenant <TENANT_NAME> [JSON]``.
Included in the output of this command are the ``id`` and ``prefix`` assigned to the tenant. If the tenant does not exist, ``fdbcli`` will report an error.
Included in the output of this command are the ``id`` and ``prefix`` assigned to the tenant. If the tenant does not exist, ``fdbcli`` will report an error. If ``JSON`` is specified, then the output will be written as a JSON document::
getversion
----------
{
"tenant": {
"id": 0,
"prefix": "\u0000\u0000\u0000\u0000\u0000\u0000\u0000\u0000"
},
"type": "success"
}
In the event of an error, the output will include an error message::
{
"error": "...",
"type": "error"
}
getversion
----------
The ``getversion`` command fetches the current read version of the cluster or currently running transaction.
@ -399,6 +414,13 @@ heap
Enables heap profiling for the specified process.
renametenant
------------
The ``renametenant`` command can rename an existing tenant to a new name. Its syntax is ``renametenant <OLD_NAME> <NEW_NAME>``.
This command requires that ``OLD_NAME`` is a tenant that already exists on the cluster, and that ``NEW_NAME`` is not already a name of a tenant in the cluster.
reset
-----

View File

@ -4,6 +4,18 @@
Release Notes
#############
7.1.11
======
* Same as 7.1.10 release with AVX enabled.
7.1.10
======
* Released with AVX disabled.
* Fixed a sequencer crash when DC ID is a string. `(PR #7393) <https://github.com/apple/foundationdb/pull/7393>`_
* Fixed a client performance regression by removing unnecessary transaction initialization. `(PR #7365) <https://github.com/apple/foundationdb/pull/7365>`_
* Safely removed fdb_transaction_get_range_and_flat_map C API. `(PR #7379) <https://github.com/apple/foundationdb/pull/7379>`_
* Fixed an unknown error bug when hostname resolving fails. `(PR #7380) <https://github.com/apple/foundationdb/pull/7380>`_
7.1.9
=====
* Same as 7.1.8 release with AVX enabled.
@ -15,7 +27,7 @@ Release Notes
* Added RSS bytes for processes in status json output and corrected available_bytes calculation. `(PR #7348) <https://github.com/apple/foundationdb/pull/7348>`_
* Added versionstamp support in tuples. `(PR #7313) <https://github.com/apple/foundationdb/pull/7313>`_
* Fixed some spammy trace events. `(PR #7300) <https://github.com/apple/foundationdb/pull/7300>`_
* Fixed a memory corruption bug for using streaming peeks. `(PR #7288) <https://github.com/apple/foundationdb/pull/7288>`_
* Avoided a memory corruption bug by disabling streaming peeks. `(PR #7288) <https://github.com/apple/foundationdb/pull/7288>`_
* Fixed a hang bug in fdbcli exclude command. `(PR #7268) <https://github.com/apple/foundationdb/pull/7268>`_
* Fixed an issue that a remote TLog blocks peeks. `(PR #7255) <https://github.com/apple/foundationdb/pull/7255>`_
* Fixed a connection issue using hostnames. `(PR #7264) <https://github.com/apple/foundationdb/pull/7264>`_

View File

@ -17,18 +17,22 @@ Users will also (by default) see a ``special_keys_cross_module_read`` error if t
The error is to save the user from the surprise of seeing the behavior of multiple modules in the same read.
Users may opt out of these restrictions by setting the ``special_key_space_relaxed`` transaction option.
Each special key that existed before api version 630 is its own module. These are
Each special key that existed before api version 630 is its own module. These are:
#. ``\xff\xff/cluster_file_path`` See :ref:`cluster file client access <cluster-file-client-access>`
#. ``\xff\xff/status/json`` See :doc:`Machine-readable status <mr-status>`
#. ``\xff\xff/cluster_file_path`` - See :ref:`cluster file client access <cluster-file-client-access>`
#. ``\xff\xff/status/json`` - See :doc:`Machine-readable status <mr-status>`
Prior to api version 630, it was also possible to read a range starting at
``\xff\xff/worker_interfaces``. This is mostly an implementation detail of fdbcli,
Prior to api version 630, it was also possible to read a range starting at ``\xff\xff/worker_interfaces``. This is mostly an implementation detail of fdbcli,
but it's available in api version 630 as a module with prefix ``\xff\xff/worker_interfaces/``.
Api version 630 includes two new modules with prefixes
``\xff\xff/transaction/`` (information about the current transaction), and
``\xff\xff/metrics/`` (various metrics, not transactional).
Api version 630 includes two new modules:
#. ``\xff\xff/transaction/`` - information about the current transaction
#. ``\xff\xff/metrics/`` - various metrics, not transactional
Api version 720 includes one new module:
#. ``\xff\xff/clusterId`` - returns an immutable unique ID for a cluster
Transaction module
------------------
@ -204,7 +208,7 @@ that process, and wait for necessary data to be moved away.
#. ``\xff\xff/management/failed_locality/<locality>`` Read/write. Indicates that the cluster should consider matching processes as permanently failed. This allows the cluster to avoid maintaining extra state and doing extra work in the hope that these processes come back. See :ref:`removing machines from a cluster <removing-machines-from-a-cluster>` for documentation for the corresponding fdbcli command.
#. ``\xff\xff/management/options/excluded_locality/force`` Read/write. Setting this key disables safety checks for writes to ``\xff\xff/management/excluded_locality/<locality>``. Setting this key only has an effect in the current transaction and is not persisted on commit.
#. ``\xff\xff/management/options/failed_locality/force`` Read/write. Setting this key disables safety checks for writes to ``\xff\xff/management/failed_locality/<locality>``. Setting this key only has an effect in the current transaction and is not persisted on commit.
#. ``\xff\xff/management/tenant_map/<tenant>`` Read/write. Setting a key in this range to any value will result in a tenant being created with name ``<tenant>``. Clearing a key in this range will delete the tenant with name ``<tenant>``. Reading all or a portion of this range will return the list of tenants currently present in the cluster, excluding any changes in this transaction. Values read in this range will be JSON objects containing the metadata for the associated tenants.
#. ``\xff\xff/management/tenant/map/<tenant>`` Read/write. Setting a key in this range to any value will result in a tenant being created with name ``<tenant>``. Clearing a key in this range will delete the tenant with name ``<tenant>``. Reading all or a portion of this range will return the list of tenants currently present in the cluster, excluding any changes in this transaction. Values read in this range will be JSON objects containing the metadata for the associated tenants.
An exclusion is syntactically either an ip address (e.g. ``127.0.0.1``), or
an ip address and port (e.g. ``127.0.0.1:4500``) or any locality (e.g ``locality_dcid:primary-satellite`` or
@ -270,7 +274,8 @@ Deprecated Keys
Listed below are the special keys that have been deprecated. Special key(s) will no longer be accessible when the client specifies an API version equal to or larger than the version where they were deprecated. Clients specifying older API versions will be able to continue using the deprecated key(s).
#. ``\xff\xff/management/profiling/<client_txn_sample_rate|client_txn_size_limit>`` Deprecated as of API version 7.2. The corresponding functionalities are now covered by the global configuration module. For details, see :doc:`global-configuration`. Read/write. Changing these two keys will change the corresponding system keys ``\xff\x02/fdbClientInfo/<client_txn_sample_rate|client_txn_size_limit>``, respectively. The value of ``\xff\xff/management/client_txn_sample_rate`` is a literal text of ``double``, and the value of ``\xff\xff/management/client_txn_size_limit`` is a literal text of ``int64_t``. A special value ``default`` can be set to or read from these two keys, representing the client profiling is disabled. In addition, ``clear`` in this range is not allowed. For more details, see help text of ``fdbcli`` command ``profile client``.
#. ``\xff\xff/management/profiling/<client_txn_sample_rate|client_txn_size_limit>`` Deprecated as of API version 720. The corresponding functionalities are now covered by the global configuration module. For details, see :doc:`global-configuration`. Read/write. Changing these two keys will change the corresponding system keys ``\xff\x02/fdbClientInfo/<client_txn_sample_rate|client_txn_size_limit>``, respectively. The value of ``\xff\xff/management/client_txn_sample_rate`` is a literal text of ``double``, and the value of ``\xff\xff/management/client_txn_size_limit`` is a literal text of ``int64_t``. A special value ``default`` can be set to or read from these two keys, representing the client profiling is disabled. In addition, ``clear`` in this range is not allowed. For more details, see help text of ``fdbcli`` command ``profile client``.
#. ``\xff\xff/management/tenant_map/<tenant>`` Removed as of API version 720 and renamed to ``\xff\xff/management/tenant/map/<tenant>``.
Versioning
==========

View File

@ -31,7 +31,7 @@ FoundationDB clusters support the following tenant modes:
Creating and deleting tenants
=============================
Tenants can be created and deleted using the ``\xff\xff/management/tenant_map/<tenant_name>`` :doc:`special key <special-keys>` range as well as by using APIs provided in some language bindings.
Tenants can be created and deleted using the ``\xff\xff/management/tenant/map/<tenant_name>`` :doc:`special key <special-keys>` range as well as by using APIs provided in some language bindings.
Tenants can be created with any byte-string name that does not begin with the ``\xff`` character. Once created, a tenant will be assigned an ID and a prefix where its data will reside.

View File

@ -19,7 +19,7 @@
* limitations under the License.
*/
#include "contrib/fmt-8.1.1/include/fmt/format.h"
#include "fmt/format.h"
#include "flow/flow.h"
#include "flow/Platform.h"
#include "flow/DeterministicRandom.h"

View File

@ -1,23 +1,22 @@
set(FDBBACKUP_SRCS
BackupTLSConfig.h
BackupTLSConfig.cpp
backup.actor.cpp)
add_flow_target(EXECUTABLE NAME fdbbackup SRCS ${FDBBACKUP_SRCS})
target_include_directories(fdbbackup PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/include" "${CMAKE_CURRENT_BINARY_DIR}/include")
target_link_libraries(fdbbackup PRIVATE fdbclient)
set(FDBCONVERT_SRCS
FileConverter.actor.cpp
FileConverter.h)
FileConverter.actor.cpp)
add_flow_target(EXECUTABLE NAME fdbconvert SRCS ${FDBCONVERT_SRCS})
target_include_directories(fdbconvert PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/include" "${CMAKE_CURRENT_BINARY_DIR}/include")
target_link_libraries(fdbconvert PRIVATE fdbclient)
set(FDBDECODE_SRCS
BackupTLSConfig.h
BackupTLSConfig.cpp
FileDecoder.actor.cpp
FileConverter.h)
FileDecoder.actor.cpp)
add_flow_target(EXECUTABLE NAME fdbdecode SRCS ${FDBDECODE_SRCS})
target_include_directories(fdbdecode PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/include" "${CMAKE_CURRENT_BINARY_DIR}/include")
target_link_libraries(fdbdecode PRIVATE fdbclient)
if(NOT OPEN_FOR_IDE)

View File

@ -18,7 +18,7 @@
* limitations under the License.
*/
#include "contrib/fmt-8.1.1/include/fmt/format.h"
#include "fmt/format.h"
#include "fdbbackup/BackupTLSConfig.h"
#include "fdbclient/JsonBuilder.h"
#include "flow/Arena.h"
@ -74,7 +74,7 @@
#include "fdbclient/versions.h"
#include "fdbclient/BuildFlags.h"
#include "flow/SimpleOpt.h"
#include "SimpleOpt/SimpleOpt.h"
#include "flow/actorcompiler.h" // This must be the last #include.
// Type of program being executed
@ -1920,11 +1920,11 @@ ACTOR Future<Void> submitBackup(Database db,
if (dryRun) {
state KeyBackedTag tag = makeBackupTag(tagName);
Optional<UidAndAbortedFlagT> uidFlag = wait(tag.get(db));
Optional<UidAndAbortedFlagT> uidFlag = wait(tag.get(db.getReference()));
if (uidFlag.present()) {
BackupConfig config(uidFlag.get().first);
EBackupState backupStatus = wait(config.stateEnum().getOrThrow(db));
EBackupState backupStatus = wait(config.stateEnum().getOrThrow(db.getReference()));
// Throw error if a backup is currently running until we support parallel backups
if (BackupAgentBase::isRunnable(backupStatus)) {
@ -2883,7 +2883,7 @@ ACTOR Future<Void> modifyBackup(Database db, std::string tagName, BackupModifyOp
tr->setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS);
tr->setOption(FDBTransactionOptions::LOCK_AWARE);
state Optional<UidAndAbortedFlagT> uidFlag = wait(tag.get(db));
state Optional<UidAndAbortedFlagT> uidFlag = wait(tag.get(db.getReference()));
if (!uidFlag.present()) {
fprintf(stderr, "No backup exists on tag '%s'\n", tagName.c_str());

View File

@ -23,7 +23,7 @@
#pragma once
#include <cinttypes>
#include "flow/SimpleOpt.h"
#include "SimpleOpt/SimpleOpt.h"
#include "flow/TLSConfig.actor.h"
namespace file_converter {

View File

@ -19,7 +19,7 @@
*/
#include "boost/lexical_cast.hpp"
#include "contrib/fmt-8.1.1/include/fmt/format.h"
#include "fmt/format.h"
#include "fdbcli/fdbcli.actor.h"
#include "fdbclient/IClientApi.h"

View File

@ -1,43 +1,12 @@
set(FDBCLI_SRCS
fdbcli.actor.cpp
fdbcli.actor.h
AdvanceVersionCommand.actor.cpp
BlobRangeCommand.actor.cpp
CacheRangeCommand.actor.cpp
ConfigureCommand.actor.cpp
ConsistencyCheckCommand.actor.cpp
CoordinatorsCommand.actor.cpp
DataDistributionCommand.actor.cpp
ExcludeCommand.actor.cpp
ExpensiveDataCheckCommand.actor.cpp
FileConfigureCommand.actor.cpp
FlowLineNoise.actor.cpp
FlowLineNoise.h
ForceRecoveryWithDataLossCommand.actor.cpp
IncludeCommand.actor.cpp
KillCommand.actor.cpp
LockCommand.actor.cpp
ChangeFeedCommand.actor.cpp
MaintenanceCommand.actor.cpp
ProfileCommand.actor.cpp
SetClassCommand.actor.cpp
SnapshotCommand.actor.cpp
StatusCommand.actor.cpp
SuspendCommand.actor.cpp
TenantCommands.actor.cpp
ThrottleCommand.actor.cpp
TriggerDDTeamInfoLogCommand.actor.cpp
TssqCommand.actor.cpp
Util.actor.cpp
VersionEpochCommand.actor.cpp
linenoise/linenoise.h)
if(NOT WIN32)
list(APPEND FDBCLI_SRCS linenoise/linenoise.c)
endif()
fdb_find_sources(FDBCLI_SRCS)
add_flow_target(EXECUTABLE NAME fdbcli SRCS ${FDBCLI_SRCS})
target_link_libraries(fdbcli PRIVATE fdbclient)
target_include_directories(fdbcli PRIVATE "${CMAKE_CURRENT_SOURCE_DIR}/include" "${CMAKE_CURRENT_BINARY_DIR}/include")
target_link_libraries(fdbcli PRIVATE fdbclient SimpleOpt)
if(NOT WIN32)
target_link_libraries(fdbcli PRIVATE linenoise)
endif()
if(NOT OPEN_FOR_IDE)
if(GENERATE_DEBUG_PACKAGES)

View File

@ -18,7 +18,7 @@
* limitations under the License.
*/
#include "contrib/fmt-8.1.1/include/fmt/format.h"
#include "fmt/format.h"
#include "fdbcli/fdbcli.actor.h"

View File

@ -30,7 +30,7 @@
#if __unixish__
#define HAVE_LINENOISE 1
#include "fdbcli/linenoise/linenoise.h"
#include "linenoise/linenoise.h"
#else
#define HAVE_LINENOISE 0
#endif

View File

@ -21,7 +21,7 @@
#include <cinttypes>
#include "boost/lexical_cast.hpp"
#include "contrib/fmt-8.1.1/include/fmt/format.h"
#include "fmt/format.h"
#include "fdbcli/fdbcli.actor.h"

View File

@ -0,0 +1,178 @@
/*
* QuotaCommand.actor.cpp
*
* This source file is part of the FoundationDB open source project
*
* Copyright 2013-2022 Apple Inc. and the FoundationDB project authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "fdbcli/fdbcli.actor.h"
#include "flow/actorcompiler.h" // This must be the last include
namespace {
enum class LimitType { RESERVED, TOTAL };
enum class OpType { READ, WRITE };
Optional<TransactionTag> parseTag(StringRef token) {
if (token.size() > CLIENT_KNOBS->MAX_TRANSACTION_TAG_LENGTH) {
return {};
} else {
return token;
}
}
Optional<LimitType> parseLimitType(StringRef token) {
if (token == "reserved"_sr) {
return LimitType::RESERVED;
} else if (token == "total"_sr) {
return LimitType::TOTAL;
} else {
return {};
}
}
Optional<OpType> parseOpType(StringRef token) {
if (token == "read"_sr) {
return OpType::READ;
} else if (token == "write"_sr) {
return OpType::WRITE;
} else {
return {};
}
}
Optional<double> parseLimitValue(StringRef token) {
try {
return std::stod(token.toString());
} catch (...) {
return {};
}
}
ACTOR Future<Void> getQuota(Reference<IDatabase> db, TransactionTag tag, LimitType limitType, OpType opType) {
state Reference<ITransaction> tr = db->createTransaction();
loop {
tr->setOption(FDBTransactionOptions::READ_SYSTEM_KEYS);
try {
state ThreadFuture<Optional<Value>> resultFuture = tr->get(tag.withPrefix(tagQuotaPrefix));
Optional<Value> v = wait(safeThreadFutureToFuture(resultFuture));
if (!v.present()) {
fmt::print("<empty>\n");
} else {
auto const quota = ThrottleApi::TagQuotaValue::fromValue(v.get());
if (limitType == LimitType::TOTAL && opType == OpType::READ) {
fmt::print("{}\n", quota.totalReadQuota);
} else if (limitType == LimitType::TOTAL && opType == OpType::WRITE) {
fmt::print("{}\n", quota.totalWriteQuota);
} else if (limitType == LimitType::RESERVED && opType == OpType::READ) {
fmt::print("{}\n", quota.reservedReadQuota);
} else if (limitType == LimitType::RESERVED && opType == OpType::WRITE) {
fmt::print("{}\n", quota.reservedWriteQuota);
}
}
return Void();
} catch (Error& e) {
wait(safeThreadFutureToFuture(tr->onError(e)));
}
}
}
ACTOR Future<Void> setQuota(Reference<IDatabase> db,
TransactionTag tag,
LimitType limitType,
OpType opType,
double value) {
state Reference<ITransaction> tr = db->createTransaction();
state Key key = tag.withPrefix(tagQuotaPrefix);
loop {
tr->setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS);
try {
state ThreadFuture<Optional<Value>> resultFuture = tr->get(key);
Optional<Value> v = wait(safeThreadFutureToFuture(resultFuture));
ThrottleApi::TagQuotaValue quota;
if (v.present()) {
quota = ThrottleApi::TagQuotaValue::fromValue(v.get());
}
if (limitType == LimitType::TOTAL && opType == OpType::READ) {
quota.totalReadQuota = value;
} else if (limitType == LimitType::TOTAL && opType == OpType::WRITE) {
quota.totalWriteQuota = value;
} else if (limitType == LimitType::RESERVED && opType == OpType::READ) {
quota.reservedReadQuota = value;
} else if (limitType == LimitType::RESERVED && opType == OpType::WRITE) {
quota.reservedWriteQuota = value;
}
ThrottleApi::setTagQuota(tr,
tag,
quota.reservedReadQuota,
quota.totalReadQuota,
quota.reservedWriteQuota,
quota.totalWriteQuota);
wait(safeThreadFutureToFuture(tr->commit()));
return Void();
} catch (Error& e) {
wait(safeThreadFutureToFuture(tr->onError(e)));
}
}
}
constexpr auto usage =
"quota [get <tag> [reserved|total] [read|write]|set <tag> [reserved|total] [read|write] <value>]";
bool exitFailure() {
fmt::print(usage);
return false;
}
} // namespace
namespace fdb_cli {
ACTOR Future<bool> quotaCommandActor(Reference<IDatabase> db, std::vector<StringRef> tokens) {
state bool result = true;
if (tokens.size() != 5 && tokens.size() != 6) {
return exitFailure();
} else {
auto tag = parseTag(tokens[2]);
auto limitType = parseLimitType(tokens[3]);
auto opType = parseOpType(tokens[4]);
if (!tag.present() || !limitType.present() || !opType.present()) {
return exitFailure();
}
if (tokens[1] == "get"_sr) {
if (tokens.size() != 5) {
return exitFailure();
}
wait(getQuota(db, tag.get(), limitType.get(), opType.get()));
return true;
} else if (tokens[1] == "set"_sr) {
if (tokens.size() != 6) {
return exitFailure();
}
auto const limitValue = parseLimitValue(tokens[5]);
if (!limitValue.present()) {
return exitFailure();
}
wait(setQuota(db, tag.get(), limitType.get(), opType.get(), limitValue.get()));
return true;
} else {
return exitFailure();
}
}
}
} // namespace fdb_cli

View File

@ -18,7 +18,7 @@
* limitations under the License.
*/
#include "contrib/fmt-8.1.1/include/fmt/format.h"
#include "fmt/format.h"
#include "fdbcli/fdbcli.actor.h"

View File

@ -21,9 +21,11 @@
#include "fdbcli/fdbcli.actor.h"
#include "fdbclient/FDBOptions.g.h"
#include "fdbclient/GenericManagementAPI.actor.h"
#include "fdbclient/IClientApi.h"
#include "fdbclient/Knobs.h"
#include "fdbclient/ManagementAPI.actor.h"
#include "fdbclient/TenantManagement.actor.h"
#include "fdbclient/Schemas.h"
#include "flow/Arena.h"
@ -33,8 +35,8 @@
namespace fdb_cli {
const KeyRangeRef tenantSpecialKeyRange(LiteralStringRef("\xff\xff/management/tenant_map/"),
LiteralStringRef("\xff\xff/management/tenant_map0"));
const KeyRangeRef tenantSpecialKeyRange(LiteralStringRef("\xff\xff/management/tenant/map/"),
LiteralStringRef("\xff\xff/management/tenant/map0"));
// createtenant command
ACTOR Future<bool> createTenantCommandActor(Reference<IDatabase> db, std::vector<StringRef> tokens) {
@ -209,11 +211,12 @@ CommandFactory listTenantsFactory(
// gettenant command
ACTOR Future<bool> getTenantCommandActor(Reference<IDatabase> db, std::vector<StringRef> tokens) {
if (tokens.size() != 2) {
if (tokens.size() < 2 || tokens.size() > 3 || (tokens.size() == 3 && tokens[2] != "JSON"_sr)) {
printUsage(tokens[0]);
return false;
}
state bool useJson = tokens.size() == 3;
state Key tenantNameKey = fdb_cli::tenantSpecialKeyRange.begin.withSuffix(tokens[1]);
state Reference<ITransaction> tr = db->createTransaction();
@ -228,30 +231,79 @@ ACTOR Future<bool> getTenantCommandActor(Reference<IDatabase> db, std::vector<St
json_spirit::mValue jsonObject;
json_spirit::read_string(tenant.get().toString(), jsonObject);
JSONDoc doc(jsonObject);
int64_t id;
std::string prefix;
doc.get("id", id);
doc.get("prefix", prefix);
if (useJson) {
json_spirit::mObject resultObj;
resultObj["tenant"] = jsonObject;
resultObj["type"] = "success";
printf("%s\n",
json_spirit::write_string(json_spirit::mValue(resultObj), json_spirit::pretty_print).c_str());
} else {
JSONDoc doc(jsonObject);
int64_t id;
std::string prefix;
doc.get("id", id);
doc.get("prefix", prefix);
printf(" id: %" PRId64 "\n", id);
printf(" prefix: %s\n", printable(prefix).c_str());
}
printf(" id: %" PRId64 "\n", id);
printf(" prefix: %s\n", printable(prefix).c_str());
return true;
} catch (Error& e) {
state Error err(e);
if (e.code() == error_code_special_keys_api_failure) {
std::string errorMsgStr = wait(fdb_cli::getSpecialKeysFailureErrorMessage(tr));
fprintf(stderr, "ERROR: %s\n", errorMsgStr.c_str());
try {
wait(safeThreadFutureToFuture(tr->onError(e)));
} catch (Error& finalErr) {
state std::string errorStr;
if (finalErr.code() == error_code_special_keys_api_failure) {
std::string str = wait(getSpecialKeysFailureErrorMessage(tr));
errorStr = str;
} else if (useJson) {
errorStr = finalErr.what();
} else {
throw finalErr;
}
if (useJson) {
json_spirit::mObject resultObj;
resultObj["type"] = "error";
resultObj["error"] = errorStr;
printf(
"%s\n",
json_spirit::write_string(json_spirit::mValue(resultObj), json_spirit::pretty_print).c_str());
} else {
fprintf(stderr, "ERROR: %s\n", errorStr.c_str());
}
return false;
}
wait(safeThreadFutureToFuture(tr->onError(err)));
}
}
}
CommandFactory getTenantFactory("gettenant",
CommandHelp("gettenant <TENANT_NAME>",
"prints the metadata for a tenant",
"Prints the metadata for a tenant."));
CommandFactory getTenantFactory(
"gettenant",
CommandHelp("gettenant <TENANT_NAME> [JSON]",
"prints the metadata for a tenant",
"Prints the metadata for a tenant. If JSON is specified, then the output will be in JSON format."));
// renametenant command
ACTOR Future<bool> renameTenantCommandActor(Reference<IDatabase> db, std::vector<StringRef> tokens) {
if (tokens.size() != 3) {
printUsage(tokens[0]);
return false;
}
wait(safeThreadFutureToFuture(TenantAPI::renameTenant(db, tokens[1], tokens[2])));
printf("The tenant `%s' has been renamed to `%s'\n", printable(tokens[1]).c_str(), printable(tokens[2]).c_str());
return true;
}
CommandFactory renameTenantFactory(
"renametenant",
CommandHelp(
"renametenant <OLD_NAME> <NEW_NAME>",
"renames a tenant in the cluster.",
"Renames a tenant in the cluster. The old name must exist and the new name must not exist in the cluster."));
} // namespace fdb_cli

View File

@ -19,7 +19,7 @@
*/
#include "boost/lexical_cast.hpp"
#include "contrib/fmt-8.1.1/include/fmt/format.h"
#include "fmt/format.h"
#include "fdbclient/ClusterConnectionFile.h"
#include "fdbclient/NativeAPI.actor.h"
#include "fdbclient/FDBTypes.h"
@ -39,6 +39,7 @@
#include "fdbclient/FDBOptions.g.h"
#include "fdbclient/SystemData.h"
#include "fdbclient/TagThrottle.actor.h"
#include "fdbclient/TenantManagement.actor.h"
#include "fdbclient/Tuple.h"
#include "fdbclient/ThreadSafeTransaction.h"
@ -51,7 +52,7 @@
#include "flow/TLSConfig.actor.h"
#include "flow/ThreadHelper.actor.h"
#include "flow/SimpleOpt.h"
#include "SimpleOpt/SimpleOpt.h"
#include "fdbcli/FlowLineNoise.h"
#include "fdbcli/fdbcli.actor.h"
@ -63,7 +64,7 @@
#ifdef __unixish__
#include <stdio.h>
#include "fdbcli/linenoise/linenoise.h"
#include "linenoise/linenoise.h"
#endif
#include "fdbclient/versions.h"
@ -508,6 +509,10 @@ void initHelp() {
CommandHelp("getversion",
"Fetch the current read version",
"Displays the current read version of the database or currently running transaction.");
helpMap["quota"] =
CommandHelp("quota",
"quota [get <tag> [reserved|total] [read|write]|set <tag> [reserved|total] [read|write] <value>]",
"Get or modify the throughput quota for the specified tag.");
helpMap["reset"] =
CommandHelp("reset",
"reset the current transaction",
@ -1049,7 +1054,7 @@ ACTOR Future<int> cli(CLIOptions opt, LineNoise* plinenoise) {
state Database localDb;
state Reference<IDatabase> db;
state Reference<ITenant> tenant;
state Optional<Standalone<StringRef>> tenantName;
state Optional<TenantName> tenantName;
state Optional<TenantMapEntry> tenantEntry;
// This tenant is kept empty for operations that perform management tasks (e.g. killing a process)
@ -1467,6 +1472,14 @@ ACTOR Future<int> cli(CLIOptions opt, LineNoise* plinenoise) {
continue;
}
if (tokencmp(tokens[0], "quota")) {
bool _result = wait(makeInterruptable(quotaCommandActor(db, tokens)));
if (!_result) {
is_error = true;
}
continue;
}
if (tokencmp(tokens[0], "reset")) {
if (tokens.size() != 1) {
printUsage(tokens[0]);
@ -1840,7 +1853,7 @@ ACTOR Future<int> cli(CLIOptions opt, LineNoise* plinenoise) {
}
} else {
Optional<TenantMapEntry> entry =
wait(makeInterruptable(ManagementAPI::tryGetTenant(db, tokens[1])));
wait(makeInterruptable(TenantAPI::tryGetTenant(db, tokens[1])));
if (!entry.present()) {
fprintf(stderr, "ERROR: Tenant `%s' does not exist\n", printable(tokens[1]).c_str());
is_error = true;
@ -1905,6 +1918,13 @@ ACTOR Future<int> cli(CLIOptions opt, LineNoise* plinenoise) {
continue;
}
if (tokencmp(tokens[0], "renametenant")) {
bool _result = wait(makeInterruptable(renameTenantCommandActor(db, tokens)));
if (!_result)
is_error = true;
continue;
}
fprintf(stderr, "ERROR: Unknown command `%s'. Try `help'?\n", formatStringRef(tokens[0]).c_str());
is_error = true;
}

View File

@ -218,6 +218,10 @@ ACTOR Future<bool> profileCommandActor(Database db,
Reference<ITransaction> tr,
std::vector<StringRef> tokens,
bool intrans);
// renametenant command
ACTOR Future<bool> renameTenantCommandActor(Reference<IDatabase> db, std::vector<StringRef> tokens);
// quota command
ACTOR Future<bool> quotaCommandActor(Reference<IDatabase> db, std::vector<StringRef> tokens);
// setclass command
ACTOR Future<bool> setClassCommandActor(Reference<IDatabase> db, std::vector<StringRef> tokens);
// snapshot command

View File

@ -20,7 +20,7 @@
#include "flow/flow.h"
#include "flow/singleton.h"
#include "fdbrpc/IAsyncFile.h"
#include "flow/IAsyncFile.h"
#include "fdbclient/ActorLineageProfiler.h"
#include "fdbclient/NameLineage.h"
#include <msgpack.hpp>

Some files were not shown because too many files have changed in this diff Show More