mirror of
https://github.com/apple/foundationdb.git
synced 2025-06-01 10:45:56 +08:00
Merge branch 'master' into bit-flipping-workload
This commit is contained in:
commit
035e0d6e52
bindings
c
java
JavaWorkload.cppfdbJNI.cpp
src
integration/com/apple/foundationdb
junit/com/apple/foundationdb
main/com/apple/foundationdb
tests.cmakecmake
design
documentation/sphinx/source
api-common.rst.incapi-python.rstmr-status-json-schemas.rst.inc
release-notes
request-tracing.rsttransaction-tagging.rsttss.rstfdbcli
fdbclient
ActorLineageProfiler.hCMakeLists.txtClientKnobs.cppClientKnobs.hClientLibManagement.actor.cppClientLibManagement.actor.hClientVersion.hClusterInterface.hCommitProxyInterface.hConfigTransactionInterface.hDatabaseConfiguration.cppDatabaseContext.hFDBTypes.cppFDBTypes.hFileBackupAgent.actor.cppIClientApi.hIKnobCollection.cppIKnobCollection.hISingleThreadTransaction.hKeyBackedTypes.hLocalClientAPI.cppLocalClientAPI.hManagementAPI.actor.cppMonitorLeader.actor.cppMultiVersionTransaction.actor.cppMultiVersionTransaction.hNativeAPI.actor.cppNativeAPI.actor.hPaxosConfigTransaction.hReadYourWrites.actor.cppReadYourWrites.hSchemas.cppServerKnobs.cppServerKnobs.hSimpleConfigTransaction.hSpecialKeySpace.actor.cppStorageServerInterface.cppStorageServerInterface.hSystemData.cppSystemData.hThreadSafeTransaction.cppThreadSafeTransaction.hVersionedMap.h
json_spirit
vexillographer
fdbkubernetesmonitor
fdbrpc
FlowTests.actor.cppFlowTransport.actor.cppFlowTransport.hSimExternalConnection.actor.cppSimExternalConnection.hTSSComparison.hfdbrpc.hgenericactors.actor.cppsim2.actor.cpp
fdbserver
@ -7,18 +7,26 @@ file(MAKE_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR}/foundationdb)
|
||||
|
||||
set(asm_file ${CMAKE_CURRENT_BINARY_DIR}/fdb_c.g.S)
|
||||
|
||||
set(platform "linux")
|
||||
set(os "linux")
|
||||
set(cpu "intel")
|
||||
if(APPLE)
|
||||
set(platform "osx")
|
||||
set(os "osx")
|
||||
elseif(WIN32)
|
||||
set(platform "windows")
|
||||
set(os "windows")
|
||||
set(asm_file ${CMAKE_CURRENT_BINARY_DIR}/fdb_c.g.asm)
|
||||
elseif(CMAKE_SYSTEM_NAME MATCHES "Linux" AND CMAKE_SYSTEM_PROCESSOR MATCHES "aarch64")
|
||||
set(platform "linux-aarch64")
|
||||
endif()
|
||||
|
||||
if(CMAKE_SYSTEM_PROCESSOR MATCHES "aarch64" OR CMAKE_SYSTEM_PROCESSOR MATCHES "arm64")
|
||||
set(cpu "aarch64")
|
||||
endif()
|
||||
|
||||
set(IS_ARM_MAC NO)
|
||||
if(APPLE AND CMAKE_SYSTEM_PROCESSOR STREQUAL "arm64")
|
||||
set(IS_ARM_MAC YES)
|
||||
endif()
|
||||
|
||||
add_custom_command(OUTPUT ${asm_file} ${CMAKE_CURRENT_BINARY_DIR}/fdb_c_function_pointers.g.h
|
||||
COMMAND $<TARGET_FILE:Python::Interpreter> ${CMAKE_CURRENT_SOURCE_DIR}/generate_asm.py ${platform}
|
||||
COMMAND $<TARGET_FILE:Python::Interpreter> ${CMAKE_CURRENT_SOURCE_DIR}/generate_asm.py ${os} ${cpu}
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/fdb_c.cpp
|
||||
${asm_file}
|
||||
${CMAKE_CURRENT_BINARY_DIR}/fdb_c_function_pointers.g.h
|
||||
@ -66,8 +74,10 @@ if(WIN32)
|
||||
set_property(SOURCE ${asm_file} PROPERTY LANGUAGE ASM_MASM)
|
||||
endif()
|
||||
|
||||
# The tests don't build on windows
|
||||
if(NOT WIN32)
|
||||
# The tests don't build on windows and ARM macs
|
||||
# doctest doesn't seem to compile on ARM macs, we should
|
||||
# check later whether this works
|
||||
if(NOT WIN32 AND NOT IS_ARM_MAC)
|
||||
set(MAKO_SRCS
|
||||
test/mako/mako.c
|
||||
test/mako/mako.h
|
||||
|
@ -436,21 +436,12 @@ extern "C" DLLEXPORT FDBFuture* fdb_transaction_get_addresses_for_key(FDBTransac
|
||||
return (FDBFuture*)(TXN(tr)->getAddressesForKey(KeyRef(key_name, key_name_length)).extractPtr());
|
||||
}
|
||||
|
||||
FDBFuture* fdb_transaction_get_range_impl(FDBTransaction* tr,
|
||||
uint8_t const* begin_key_name,
|
||||
int begin_key_name_length,
|
||||
fdb_bool_t begin_or_equal,
|
||||
int begin_offset,
|
||||
uint8_t const* end_key_name,
|
||||
int end_key_name_length,
|
||||
fdb_bool_t end_or_equal,
|
||||
int end_offset,
|
||||
int limit,
|
||||
int target_bytes,
|
||||
// Set to the actual limit, target_bytes, and reverse.
|
||||
FDBFuture* validate_and_update_parameters(int& limit,
|
||||
int& target_bytes,
|
||||
FDBStreamingMode mode,
|
||||
int iteration,
|
||||
fdb_bool_t snapshot,
|
||||
fdb_bool_t reverse) {
|
||||
fdb_bool_t& reverse) {
|
||||
/* This method may be called with a runtime API version of 13, in
|
||||
which negative row limits are a reverse range read */
|
||||
if (g_api_version <= 13 && limit < 0) {
|
||||
@ -500,6 +491,27 @@ FDBFuture* fdb_transaction_get_range_impl(FDBTransaction* tr,
|
||||
else if (mode_bytes != GetRangeLimits::BYTE_LIMIT_UNLIMITED)
|
||||
target_bytes = std::min(target_bytes, mode_bytes);
|
||||
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
FDBFuture* fdb_transaction_get_range_impl(FDBTransaction* tr,
|
||||
uint8_t const* begin_key_name,
|
||||
int begin_key_name_length,
|
||||
fdb_bool_t begin_or_equal,
|
||||
int begin_offset,
|
||||
uint8_t const* end_key_name,
|
||||
int end_key_name_length,
|
||||
fdb_bool_t end_or_equal,
|
||||
int end_offset,
|
||||
int limit,
|
||||
int target_bytes,
|
||||
FDBStreamingMode mode,
|
||||
int iteration,
|
||||
fdb_bool_t snapshot,
|
||||
fdb_bool_t reverse) {
|
||||
FDBFuture* r = validate_and_update_parameters(limit, target_bytes, mode, iteration, reverse);
|
||||
if (r != nullptr)
|
||||
return r;
|
||||
return (
|
||||
FDBFuture*)(TXN(tr)
|
||||
->getRange(
|
||||
@ -511,6 +523,60 @@ FDBFuture* fdb_transaction_get_range_impl(FDBTransaction* tr,
|
||||
.extractPtr());
|
||||
}
|
||||
|
||||
FDBFuture* fdb_transaction_get_range_and_flat_map_impl(FDBTransaction* tr,
|
||||
uint8_t const* begin_key_name,
|
||||
int begin_key_name_length,
|
||||
fdb_bool_t begin_or_equal,
|
||||
int begin_offset,
|
||||
uint8_t const* end_key_name,
|
||||
int end_key_name_length,
|
||||
fdb_bool_t end_or_equal,
|
||||
int end_offset,
|
||||
uint8_t const* mapper_name,
|
||||
int mapper_name_length,
|
||||
int limit,
|
||||
int target_bytes,
|
||||
FDBStreamingMode mode,
|
||||
int iteration,
|
||||
fdb_bool_t snapshot,
|
||||
fdb_bool_t reverse) {
|
||||
FDBFuture* r = validate_and_update_parameters(limit, target_bytes, mode, iteration, reverse);
|
||||
if (r != nullptr)
|
||||
return r;
|
||||
return (
|
||||
FDBFuture*)(TXN(tr)
|
||||
->getRangeAndFlatMap(
|
||||
KeySelectorRef(KeyRef(begin_key_name, begin_key_name_length), begin_or_equal, begin_offset),
|
||||
KeySelectorRef(KeyRef(end_key_name, end_key_name_length), end_or_equal, end_offset),
|
||||
StringRef(mapper_name, mapper_name_length),
|
||||
GetRangeLimits(limit, target_bytes),
|
||||
snapshot,
|
||||
reverse)
|
||||
.extractPtr());
|
||||
}
|
||||
|
||||
// TODO: Support FDB_API_ADDED in generate_asm.py and then this can be replaced with fdb_api_ptr_unimpl.
|
||||
FDBFuture* fdb_transaction_get_range_and_flat_map_v699(FDBTransaction* tr,
|
||||
uint8_t const* begin_key_name,
|
||||
int begin_key_name_length,
|
||||
fdb_bool_t begin_or_equal,
|
||||
int begin_offset,
|
||||
uint8_t const* end_key_name,
|
||||
int end_key_name_length,
|
||||
fdb_bool_t end_or_equal,
|
||||
int end_offset,
|
||||
uint8_t const* mapper_name,
|
||||
int mapper_name_length,
|
||||
int limit,
|
||||
int target_bytes,
|
||||
FDBStreamingMode mode,
|
||||
int iteration,
|
||||
fdb_bool_t snapshot,
|
||||
fdb_bool_t reverse) {
|
||||
fprintf(stderr, "UNIMPLEMENTED FDB API FUNCTION\n");
|
||||
abort();
|
||||
}
|
||||
|
||||
FDBFuture* fdb_transaction_get_range_selector_v13(FDBTransaction* tr,
|
||||
uint8_t const* begin_key_name,
|
||||
int begin_key_name_length,
|
||||
@ -702,6 +768,7 @@ extern "C" DLLEXPORT fdb_error_t fdb_select_api_version_impl(int runtime_version
|
||||
// WARNING: use caution when implementing removed functions by calling public API functions. This can lead to
|
||||
// undesired behavior when using the multi-version API. Instead, it is better to have both the removed and public
|
||||
// functions call an internal implementation function. See fdb_create_database_impl for an example.
|
||||
FDB_API_CHANGED(fdb_transaction_get_range_and_flat_map, 700);
|
||||
FDB_API_REMOVED(fdb_future_get_version, 620);
|
||||
FDB_API_REMOVED(fdb_create_cluster, 610);
|
||||
FDB_API_REMOVED(fdb_cluster_create_database, 610);
|
||||
|
@ -244,6 +244,24 @@ DLLEXPORT WARN_UNUSED_RESULT FDBFuture* fdb_transaction_get_range(FDBTransaction
|
||||
fdb_bool_t reverse);
|
||||
#endif
|
||||
|
||||
DLLEXPORT WARN_UNUSED_RESULT FDBFuture* fdb_transaction_get_range_and_flat_map(FDBTransaction* tr,
|
||||
uint8_t const* begin_key_name,
|
||||
int begin_key_name_length,
|
||||
fdb_bool_t begin_or_equal,
|
||||
int begin_offset,
|
||||
uint8_t const* end_key_name,
|
||||
int end_key_name_length,
|
||||
fdb_bool_t end_or_equal,
|
||||
int end_offset,
|
||||
uint8_t const* mapper_name,
|
||||
int mapper_name_length,
|
||||
int limit,
|
||||
int target_bytes,
|
||||
FDBStreamingMode mode,
|
||||
int iteration,
|
||||
fdb_bool_t snapshot,
|
||||
fdb_bool_t reverse);
|
||||
|
||||
DLLEXPORT void fdb_transaction_set(FDBTransaction* tr,
|
||||
uint8_t const* key_name,
|
||||
int key_name_length,
|
||||
|
@ -23,7 +23,7 @@
|
||||
import re
|
||||
import sys
|
||||
|
||||
(platform, source, asm, h) = sys.argv[1:]
|
||||
(os, cpu, source, asm, h) = sys.argv[1:]
|
||||
|
||||
functions = {}
|
||||
|
||||
@ -59,17 +59,18 @@ def write_windows_asm(asmfile, functions):
|
||||
|
||||
|
||||
def write_unix_asm(asmfile, functions, prefix):
|
||||
if platform != "linux-aarch64":
|
||||
if cpu != "aarch64":
|
||||
asmfile.write(".intel_syntax noprefix\n")
|
||||
|
||||
if platform.startswith('linux') or platform == "freebsd":
|
||||
if cpu == 'aarch64' or os == 'linux' or os == 'freebsd':
|
||||
asmfile.write("\n.data\n")
|
||||
for f in functions:
|
||||
asmfile.write("\t.extern fdb_api_ptr_%s\n" % f)
|
||||
|
||||
asmfile.write("\n.text\n")
|
||||
for f in functions:
|
||||
asmfile.write("\t.global %s\n\t.type %s, @function\n" % (f, f))
|
||||
|
||||
if os == 'linux' or os == 'freebsd':
|
||||
asmfile.write("\n.text\n")
|
||||
for f in functions:
|
||||
asmfile.write("\t.global %s\n\t.type %s, @function\n" % (f, f))
|
||||
|
||||
for f in functions:
|
||||
asmfile.write("\n.globl %s%s\n" % (prefix, f))
|
||||
@ -104,10 +105,16 @@ def write_unix_asm(asmfile, functions, prefix):
|
||||
# .size g, .-g
|
||||
# .ident "GCC: (GNU) 8.3.1 20190311 (Red Hat 8.3.1-3)"
|
||||
|
||||
if platform == "linux-aarch64":
|
||||
asmfile.write("\tadrp x8, :got:fdb_api_ptr_%s\n" % (f))
|
||||
asmfile.write("\tldr x8, [x8, #:got_lo12:fdb_api_ptr_%s]\n" % (f))
|
||||
asmfile.write("\tldr x8, [x8]\n")
|
||||
p = ''
|
||||
if os == 'osx':
|
||||
p = '_'
|
||||
if cpu == "aarch64":
|
||||
asmfile.write("\tldr x16, =%sfdb_api_ptr_%s\n" % (p, f))
|
||||
if os == 'osx':
|
||||
asmfile.write("\tldr x16, [x16]\n")
|
||||
else:
|
||||
asmfile.write("\tldr x8, [x8, #:got_lo12:fdb_api_ptr_%s]\n" % (f))
|
||||
asmfile.write("\tldr x8, [x8]\n")
|
||||
asmfile.write("\tbr x8\n")
|
||||
else:
|
||||
asmfile.write(
|
||||
@ -123,15 +130,15 @@ with open(asm, 'w') as asmfile:
|
||||
hfile.write(
|
||||
"void fdb_api_ptr_removed() { fprintf(stderr, \"REMOVED FDB API FUNCTION\\n\"); abort(); }\n\n")
|
||||
|
||||
if platform.startswith('linux'):
|
||||
if os == 'linux':
|
||||
write_unix_asm(asmfile, functions, '')
|
||||
elif platform == "osx":
|
||||
elif os == "osx":
|
||||
write_unix_asm(asmfile, functions, '_')
|
||||
elif platform == "windows":
|
||||
elif os == "windows":
|
||||
write_windows_asm(asmfile, functions)
|
||||
|
||||
for f in functions:
|
||||
if platform == "windows":
|
||||
if os == "windows":
|
||||
hfile.write("extern \"C\" ")
|
||||
hfile.write("void* fdb_api_ptr_%s = (void*)&fdb_api_ptr_unimpl;\n" % f)
|
||||
for v in functions[f]:
|
||||
|
@ -1297,12 +1297,15 @@ int worker_process_main(mako_args_t* args, int worker_id, mako_shmhdr_t* shm, pi
|
||||
|
||||
if (args->client_threads_per_version > 0) {
|
||||
err = fdb_network_set_option(
|
||||
FDB_NET_OPTION_CLIENT_THREADS_PER_VERSION, (uint8_t*)&args->client_threads_per_version, sizeof(uint32_t));
|
||||
FDB_NET_OPTION_CLIENT_THREADS_PER_VERSION, (uint8_t*)&args->client_threads_per_version, sizeof(int64_t));
|
||||
if (err) {
|
||||
fprintf(stderr,
|
||||
"ERROR: fdb_network_set_option (FDB_NET_OPTION_CLIENT_THREADS_PER_VERSION) (%d): %s\n",
|
||||
(uint8_t*)&args->client_threads_per_version,
|
||||
fdb_get_error(err));
|
||||
// let's exit here since we do not want to confuse users
|
||||
// that mako is running with multi-threaded client enabled
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
||||
@ -2815,6 +2818,7 @@ failExit:
|
||||
if (shmfd) {
|
||||
close(shmfd);
|
||||
shm_unlink(shmpath);
|
||||
unlink(shmpath);
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
@ -143,7 +143,7 @@ typedef struct {
|
||||
int txntagging;
|
||||
char txntagging_prefix[TAGPREFIXLENGTH_MAX];
|
||||
FDBStreamingMode streaming_mode;
|
||||
uint32_t client_threads_per_version;
|
||||
int client_threads_per_version;
|
||||
int disable_ryw;
|
||||
char json_output_path[PATH_MAX];
|
||||
} mako_args_t;
|
||||
|
@ -193,6 +193,41 @@ KeyValueArrayFuture Transaction::get_range(const uint8_t* begin_key_name,
|
||||
reverse));
|
||||
}
|
||||
|
||||
KeyValueArrayFuture Transaction::get_range_and_flat_map(const uint8_t* begin_key_name,
|
||||
int begin_key_name_length,
|
||||
fdb_bool_t begin_or_equal,
|
||||
int begin_offset,
|
||||
const uint8_t* end_key_name,
|
||||
int end_key_name_length,
|
||||
fdb_bool_t end_or_equal,
|
||||
int end_offset,
|
||||
const uint8_t* mapper_name,
|
||||
int mapper_name_length,
|
||||
int limit,
|
||||
int target_bytes,
|
||||
FDBStreamingMode mode,
|
||||
int iteration,
|
||||
fdb_bool_t snapshot,
|
||||
fdb_bool_t reverse) {
|
||||
return KeyValueArrayFuture(fdb_transaction_get_range_and_flat_map(tr_,
|
||||
begin_key_name,
|
||||
begin_key_name_length,
|
||||
begin_or_equal,
|
||||
begin_offset,
|
||||
end_key_name,
|
||||
end_key_name_length,
|
||||
end_or_equal,
|
||||
end_offset,
|
||||
mapper_name,
|
||||
mapper_name_length,
|
||||
limit,
|
||||
target_bytes,
|
||||
mode,
|
||||
iteration,
|
||||
snapshot,
|
||||
reverse));
|
||||
}
|
||||
|
||||
EmptyFuture Transaction::watch(std::string_view key) {
|
||||
return EmptyFuture(fdb_transaction_watch(tr_, (const uint8_t*)key.data(), key.size()));
|
||||
}
|
||||
|
@ -219,6 +219,25 @@ public:
|
||||
fdb_bool_t snapshot,
|
||||
fdb_bool_t reverse);
|
||||
|
||||
// WARNING: This feature is considered experimental at this time. It is only allowed when using snapshot isolation
|
||||
// AND disabling read-your-writes. Returns a future which will be set to an FDBKeyValue array.
|
||||
KeyValueArrayFuture get_range_and_flat_map(const uint8_t* begin_key_name,
|
||||
int begin_key_name_length,
|
||||
fdb_bool_t begin_or_equal,
|
||||
int begin_offset,
|
||||
const uint8_t* end_key_name,
|
||||
int end_key_name_length,
|
||||
fdb_bool_t end_or_equal,
|
||||
int end_offset,
|
||||
const uint8_t* mapper_name,
|
||||
int mapper_name_length,
|
||||
int limit,
|
||||
int target_bytes,
|
||||
FDBStreamingMode mode,
|
||||
int iteration,
|
||||
fdb_bool_t snapshot,
|
||||
fdb_bool_t reverse);
|
||||
|
||||
// Wrapper around fdb_transaction_watch. Returns a future representing an
|
||||
// empty value.
|
||||
EmptyFuture watch(std::string_view key);
|
||||
|
@ -40,6 +40,7 @@
|
||||
#define DOCTEST_CONFIG_IMPLEMENT
|
||||
#include "doctest.h"
|
||||
#include "fdbclient/rapidjson/document.h"
|
||||
#include "fdbclient/Tuple.h"
|
||||
|
||||
#include "flow/config.h"
|
||||
|
||||
@ -76,7 +77,7 @@ fdb_error_t wait_future(fdb::Future& f) {
|
||||
// Given a string s, returns the "lowest" string greater than any string that
|
||||
// starts with s. Taken from
|
||||
// https://github.com/apple/foundationdb/blob/e7d72f458c6a985fdfa677ae021f357d6f49945b/flow/flow.cpp#L223.
|
||||
std::string strinc(const std::string& s) {
|
||||
std::string strinc_str(const std::string& s) {
|
||||
int index = -1;
|
||||
for (index = s.size() - 1; index >= 0; --index) {
|
||||
if ((uint8_t)s[index] != 255) {
|
||||
@ -92,16 +93,16 @@ std::string strinc(const std::string& s) {
|
||||
return r;
|
||||
}
|
||||
|
||||
TEST_CASE("strinc") {
|
||||
CHECK(strinc("a").compare("b") == 0);
|
||||
CHECK(strinc("y").compare("z") == 0);
|
||||
CHECK(strinc("!").compare("\"") == 0);
|
||||
CHECK(strinc("*").compare("+") == 0);
|
||||
CHECK(strinc("fdb").compare("fdc") == 0);
|
||||
CHECK(strinc("foundation database 6").compare("foundation database 7") == 0);
|
||||
TEST_CASE("strinc_str") {
|
||||
CHECK(strinc_str("a").compare("b") == 0);
|
||||
CHECK(strinc_str("y").compare("z") == 0);
|
||||
CHECK(strinc_str("!").compare("\"") == 0);
|
||||
CHECK(strinc_str("*").compare("+") == 0);
|
||||
CHECK(strinc_str("fdb").compare("fdc") == 0);
|
||||
CHECK(strinc_str("foundation database 6").compare("foundation database 7") == 0);
|
||||
|
||||
char terminated[] = { 'a', 'b', '\xff' };
|
||||
CHECK(strinc(std::string(terminated, 3)).compare("ac") == 0);
|
||||
CHECK(strinc_str(std::string(terminated, 3)).compare("ac") == 0);
|
||||
}
|
||||
|
||||
// Helper function to add `prefix` to all keys in the given map. Returns a new
|
||||
@ -117,7 +118,7 @@ std::map<std::string, std::string> create_data(std::map<std::string, std::string
|
||||
// Clears all data in the database, then inserts the given key value pairs.
|
||||
void insert_data(FDBDatabase* db, const std::map<std::string, std::string>& data) {
|
||||
fdb::Transaction tr(db);
|
||||
auto end_key = strinc(prefix);
|
||||
auto end_key = strinc_str(prefix);
|
||||
while (1) {
|
||||
tr.clear_range(prefix, end_key);
|
||||
for (const auto& [key, val] : data) {
|
||||
@ -224,6 +225,59 @@ GetRangeResult get_range(fdb::Transaction& tr,
|
||||
return GetRangeResult{ results, out_more != 0, 0 };
|
||||
}
|
||||
|
||||
GetRangeResult get_range_and_flat_map(fdb::Transaction& tr,
|
||||
const uint8_t* begin_key_name,
|
||||
int begin_key_name_length,
|
||||
fdb_bool_t begin_or_equal,
|
||||
int begin_offset,
|
||||
const uint8_t* end_key_name,
|
||||
int end_key_name_length,
|
||||
fdb_bool_t end_or_equal,
|
||||
int end_offset,
|
||||
const uint8_t* mapper_name,
|
||||
int mapper_name_length,
|
||||
int limit,
|
||||
int target_bytes,
|
||||
FDBStreamingMode mode,
|
||||
int iteration,
|
||||
fdb_bool_t snapshot,
|
||||
fdb_bool_t reverse) {
|
||||
fdb::KeyValueArrayFuture f1 = tr.get_range_and_flat_map(begin_key_name,
|
||||
begin_key_name_length,
|
||||
begin_or_equal,
|
||||
begin_offset,
|
||||
end_key_name,
|
||||
end_key_name_length,
|
||||
end_or_equal,
|
||||
end_offset,
|
||||
mapper_name,
|
||||
mapper_name_length,
|
||||
limit,
|
||||
target_bytes,
|
||||
mode,
|
||||
iteration,
|
||||
snapshot,
|
||||
reverse);
|
||||
|
||||
fdb_error_t err = wait_future(f1);
|
||||
if (err) {
|
||||
return GetRangeResult{ {}, false, err };
|
||||
}
|
||||
|
||||
const FDBKeyValue* out_kv;
|
||||
int out_count;
|
||||
fdb_bool_t out_more;
|
||||
fdb_check(f1.get(&out_kv, &out_count, &out_more));
|
||||
|
||||
std::vector<std::pair<std::string, std::string>> results;
|
||||
for (int i = 0; i < out_count; ++i) {
|
||||
std::string key((const char*)out_kv[i].key, out_kv[i].key_length);
|
||||
std::string value((const char*)out_kv[i].value, out_kv[i].value_length);
|
||||
results.emplace_back(key, value);
|
||||
}
|
||||
return GetRangeResult{ results, out_more != 0, 0 };
|
||||
}
|
||||
|
||||
// Clears all data in the database.
|
||||
void clear_data(FDBDatabase* db) {
|
||||
insert_data(db, {});
|
||||
@ -819,6 +873,86 @@ TEST_CASE("fdb_transaction_set_read_version future_version") {
|
||||
CHECK(err == 1009); // future_version
|
||||
}
|
||||
|
||||
const std::string EMPTY = Tuple().pack().toString();
|
||||
const KeyRef RECORD = "RECORD"_sr;
|
||||
const KeyRef INDEX = "INDEX"_sr;
|
||||
static Key primaryKey(const int i) {
|
||||
return Key(format("primary-key-of-record-%08d", i));
|
||||
}
|
||||
static Key indexKey(const int i) {
|
||||
return Key(format("index-key-of-record-%08d", i));
|
||||
}
|
||||
static Value dataOfRecord(const int i) {
|
||||
return Value(format("data-of-record-%08d", i));
|
||||
}
|
||||
static std::string indexEntryKey(const int i) {
|
||||
return Tuple().append(StringRef(prefix)).append(INDEX).append(indexKey(i)).append(primaryKey(i)).pack().toString();
|
||||
}
|
||||
static std::string recordKey(const int i) {
|
||||
return Tuple().append(prefix).append(RECORD).append(primaryKey(i)).pack().toString();
|
||||
}
|
||||
static std::string recordValue(const int i) {
|
||||
return Tuple().append(dataOfRecord(i)).pack().toString();
|
||||
}
|
||||
|
||||
TEST_CASE("fdb_transaction_get_range_and_flat_map") {
|
||||
// Note: The user requested `prefix` should be added as the first element of the tuple that forms the key, rather
|
||||
// than the prefix of the key. So we don't use key() or create_data() in this test.
|
||||
std::map<std::string, std::string> data;
|
||||
for (int i = 0; i < 3; i++) {
|
||||
data[indexEntryKey(i)] = EMPTY;
|
||||
data[recordKey(i)] = recordValue(i);
|
||||
}
|
||||
insert_data(db, data);
|
||||
|
||||
std::string mapper = Tuple().append(prefix).append(RECORD).append("{K[3]}"_sr).pack().toString();
|
||||
|
||||
fdb::Transaction tr(db);
|
||||
// get_range_and_flat_map is only support without RYW. This is a must!!!
|
||||
fdb_check(tr.set_option(FDB_TR_OPTION_READ_YOUR_WRITES_DISABLE, nullptr, 0));
|
||||
while (1) {
|
||||
auto result = get_range_and_flat_map(
|
||||
tr,
|
||||
// [0, 1]
|
||||
FDB_KEYSEL_FIRST_GREATER_OR_EQUAL((const uint8_t*)indexEntryKey(0).c_str(), indexEntryKey(0).size()),
|
||||
FDB_KEYSEL_FIRST_GREATER_THAN((const uint8_t*)indexEntryKey(1).c_str(), indexEntryKey(1).size()),
|
||||
(const uint8_t*)mapper.c_str(),
|
||||
mapper.size(),
|
||||
/* limit */ 0,
|
||||
/* target_bytes */ 0,
|
||||
/* FDBStreamingMode */ FDB_STREAMING_MODE_WANT_ALL,
|
||||
/* iteration */ 0,
|
||||
/* snapshot */ true,
|
||||
/* reverse */ 0);
|
||||
|
||||
if (result.err) {
|
||||
fdb::EmptyFuture f1 = tr.on_error(result.err);
|
||||
fdb_check(wait_future(f1));
|
||||
continue;
|
||||
}
|
||||
|
||||
// Only the first 2 records are supposed to be returned.
|
||||
if (result.kvs.size() < 2) {
|
||||
CHECK(result.more);
|
||||
// Retry.
|
||||
continue;
|
||||
}
|
||||
|
||||
CHECK(result.kvs.size() == 2);
|
||||
CHECK(!result.more);
|
||||
for (int i = 0; i < 2; i++) {
|
||||
const auto& [key, value] = result.kvs[i];
|
||||
std::cout << "result[" << i << "]: key=" << key << ", value=" << value << std::endl;
|
||||
// OUTPUT:
|
||||
// result[0]: key=fdbRECORDprimary-key-of-record-00000000, value=data-of-record-00000000
|
||||
// result[1]: key=fdbRECORDprimary-key-of-record-00000001, value=data-of-record-00000001
|
||||
CHECK(recordKey(i).compare(key) == 0);
|
||||
CHECK(recordValue(i).compare(value) == 0);
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
TEST_CASE("fdb_transaction_get_range reverse") {
|
||||
std::map<std::string, std::string> data = create_data({ { "a", "1" }, { "b", "2" }, { "c", "3" }, { "d", "4" } });
|
||||
insert_data(db, data);
|
||||
@ -1726,7 +1860,7 @@ TEST_CASE("fdb_transaction_add_conflict_range") {
|
||||
|
||||
fdb::Transaction tr2(db);
|
||||
while (1) {
|
||||
fdb_check(tr2.add_conflict_range(key("a"), strinc(key("a")), FDB_CONFLICT_RANGE_TYPE_WRITE));
|
||||
fdb_check(tr2.add_conflict_range(key("a"), strinc_str(key("a")), FDB_CONFLICT_RANGE_TYPE_WRITE));
|
||||
fdb::EmptyFuture f1 = tr2.commit();
|
||||
|
||||
fdb_error_t err = wait_future(f1);
|
||||
@ -1739,8 +1873,8 @@ TEST_CASE("fdb_transaction_add_conflict_range") {
|
||||
}
|
||||
|
||||
while (1) {
|
||||
fdb_check(tr.add_conflict_range(key("a"), strinc(key("a")), FDB_CONFLICT_RANGE_TYPE_READ));
|
||||
fdb_check(tr.add_conflict_range(key("a"), strinc(key("a")), FDB_CONFLICT_RANGE_TYPE_WRITE));
|
||||
fdb_check(tr.add_conflict_range(key("a"), strinc_str(key("a")), FDB_CONFLICT_RANGE_TYPE_READ));
|
||||
fdb_check(tr.add_conflict_range(key("a"), strinc_str(key("a")), FDB_CONFLICT_RANGE_TYPE_WRITE));
|
||||
fdb::EmptyFuture f1 = tr.commit();
|
||||
|
||||
fdb_error_t err = wait_future(f1);
|
||||
@ -1828,41 +1962,6 @@ TEST_CASE("special-key-space set transaction ID after write") {
|
||||
}
|
||||
}
|
||||
|
||||
TEST_CASE("special-key-space set token after write") {
|
||||
fdb::Transaction tr(db);
|
||||
fdb_check(tr.set_option(FDB_TR_OPTION_SPECIAL_KEY_SPACE_ENABLE_WRITES, nullptr, 0));
|
||||
while (1) {
|
||||
tr.set(key("foo"), "bar");
|
||||
tr.set("\xff\xff/tracing/token", "false");
|
||||
fdb::ValueFuture f1 = tr.get("\xff\xff/tracing/token",
|
||||
/* snapshot */ false);
|
||||
|
||||
fdb_error_t err = wait_future(f1);
|
||||
if (err) {
|
||||
fdb::EmptyFuture f2 = tr.on_error(err);
|
||||
fdb_check(wait_future(f2));
|
||||
continue;
|
||||
}
|
||||
|
||||
int out_present;
|
||||
char* val;
|
||||
int vallen;
|
||||
fdb_check(f1.get(&out_present, (const uint8_t**)&val, &vallen));
|
||||
|
||||
REQUIRE(out_present);
|
||||
uint64_t token = std::stoul(std::string(val, vallen));
|
||||
CHECK(token != 0);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
TEST_CASE("special-key-space valid token") {
|
||||
auto value = get_value("\xff\xff/tracing/token", /* snapshot */ false, {});
|
||||
REQUIRE(value.has_value());
|
||||
uint64_t token = std::stoul(value.value());
|
||||
CHECK(token > 0);
|
||||
}
|
||||
|
||||
TEST_CASE("special-key-space disable tracing") {
|
||||
fdb::Transaction tr(db);
|
||||
fdb_check(tr.set_option(FDB_TR_OPTION_SPECIAL_KEY_SPACE_ENABLE_WRITES, nullptr, 0));
|
||||
@ -1890,48 +1989,6 @@ TEST_CASE("special-key-space disable tracing") {
|
||||
}
|
||||
}
|
||||
|
||||
TEST_CASE("FDB_DB_OPTION_DISTRIBUTED_TRANSACTION_TRACE_DISABLE") {
|
||||
fdb_check(fdb_database_set_option(db, FDB_DB_OPTION_DISTRIBUTED_TRANSACTION_TRACE_DISABLE, nullptr, 0));
|
||||
|
||||
auto value = get_value("\xff\xff/tracing/token", /* snapshot */ false, {});
|
||||
REQUIRE(value.has_value());
|
||||
uint64_t token = std::stoul(value.value());
|
||||
CHECK(token == 0);
|
||||
|
||||
fdb_check(fdb_database_set_option(db, FDB_DB_OPTION_DISTRIBUTED_TRANSACTION_TRACE_ENABLE, nullptr, 0));
|
||||
}
|
||||
|
||||
TEST_CASE("FDB_DB_OPTION_DISTRIBUTED_TRANSACTION_TRACE_DISABLE enable tracing for transaction") {
|
||||
fdb_check(fdb_database_set_option(db, FDB_DB_OPTION_DISTRIBUTED_TRANSACTION_TRACE_DISABLE, nullptr, 0));
|
||||
|
||||
fdb::Transaction tr(db);
|
||||
fdb_check(tr.set_option(FDB_TR_OPTION_SPECIAL_KEY_SPACE_ENABLE_WRITES, nullptr, 0));
|
||||
while (1) {
|
||||
tr.set("\xff\xff/tracing/token", "true");
|
||||
fdb::ValueFuture f1 = tr.get("\xff\xff/tracing/token",
|
||||
/* snapshot */ false);
|
||||
|
||||
fdb_error_t err = wait_future(f1);
|
||||
if (err) {
|
||||
fdb::EmptyFuture f2 = tr.on_error(err);
|
||||
fdb_check(wait_future(f2));
|
||||
continue;
|
||||
}
|
||||
|
||||
int out_present;
|
||||
char* val;
|
||||
int vallen;
|
||||
fdb_check(f1.get(&out_present, (const uint8_t**)&val, &vallen));
|
||||
|
||||
REQUIRE(out_present);
|
||||
uint64_t token = std::stoul(std::string(val, vallen));
|
||||
CHECK(token > 0);
|
||||
break;
|
||||
}
|
||||
|
||||
fdb_check(fdb_database_set_option(db, FDB_DB_OPTION_DISTRIBUTED_TRANSACTION_TRACE_ENABLE, nullptr, 0));
|
||||
}
|
||||
|
||||
TEST_CASE("special-key-space tracing get range") {
|
||||
std::string tracingBegin = "\xff\xff/tracing/";
|
||||
std::string tracingEnd = "\xff\xff/tracing0";
|
||||
@ -1964,8 +2021,6 @@ TEST_CASE("special-key-space tracing get range") {
|
||||
CHECK(!out_more);
|
||||
CHECK(out_count == 2);
|
||||
|
||||
CHECK(std::string((char*)out_kv[0].key, out_kv[0].key_length) == tracingBegin + "token");
|
||||
CHECK(std::stoul(std::string((char*)out_kv[0].value, out_kv[0].value_length)) > 0);
|
||||
CHECK(std::string((char*)out_kv[1].key, out_kv[1].key_length) == tracingBegin + "transaction_id");
|
||||
CHECK(std::stoul(std::string((char*)out_kv[1].value, out_kv[1].value_length)) > 0);
|
||||
break;
|
||||
@ -2217,7 +2272,7 @@ TEST_CASE("commit_does_not_reset") {
|
||||
continue;
|
||||
}
|
||||
|
||||
fdb_check(tr2.add_conflict_range(key("foo"), strinc(key("foo")), FDB_CONFLICT_RANGE_TYPE_READ));
|
||||
fdb_check(tr2.add_conflict_range(key("foo"), strinc_str(key("foo")), FDB_CONFLICT_RANGE_TYPE_READ));
|
||||
tr2.set(key("foo"), "bar");
|
||||
fdb::EmptyFuture tr2CommitFuture = tr2.commit();
|
||||
err = wait_future(tr2CommitFuture);
|
||||
|
@ -176,9 +176,9 @@ void promiseSend(JNIEnv, jclass, jlong self, jboolean value) {
|
||||
|
||||
struct JNIError {
|
||||
JNIEnv* env;
|
||||
jthrowable throwable = nullptr;
|
||||
const char* file;
|
||||
int line;
|
||||
jthrowable throwable{ nullptr };
|
||||
const char* file{ nullptr };
|
||||
int line{ 0 };
|
||||
|
||||
std::string location() const {
|
||||
if (file == nullptr) {
|
||||
|
@ -756,6 +756,76 @@ JNIEXPORT jlong JNICALL Java_com_apple_foundationdb_FDBTransaction_Transaction_1
|
||||
return (jlong)f;
|
||||
}
|
||||
|
||||
JNIEXPORT jlong JNICALL
|
||||
Java_com_apple_foundationdb_FDBTransaction_Transaction_1getRangeAndFlatMap(JNIEnv* jenv,
|
||||
jobject,
|
||||
jlong tPtr,
|
||||
jbyteArray keyBeginBytes,
|
||||
jboolean orEqualBegin,
|
||||
jint offsetBegin,
|
||||
jbyteArray keyEndBytes,
|
||||
jboolean orEqualEnd,
|
||||
jint offsetEnd,
|
||||
jbyteArray mapperBytes,
|
||||
jint rowLimit,
|
||||
jint targetBytes,
|
||||
jint streamingMode,
|
||||
jint iteration,
|
||||
jboolean snapshot,
|
||||
jboolean reverse) {
|
||||
if (!tPtr || !keyBeginBytes || !keyEndBytes || !mapperBytes) {
|
||||
throwParamNotNull(jenv);
|
||||
return 0;
|
||||
}
|
||||
FDBTransaction* tr = (FDBTransaction*)tPtr;
|
||||
|
||||
uint8_t* barrBegin = (uint8_t*)jenv->GetByteArrayElements(keyBeginBytes, JNI_NULL);
|
||||
if (!barrBegin) {
|
||||
if (!jenv->ExceptionOccurred())
|
||||
throwRuntimeEx(jenv, "Error getting handle to native resources");
|
||||
return 0;
|
||||
}
|
||||
|
||||
uint8_t* barrEnd = (uint8_t*)jenv->GetByteArrayElements(keyEndBytes, JNI_NULL);
|
||||
if (!barrEnd) {
|
||||
jenv->ReleaseByteArrayElements(keyBeginBytes, (jbyte*)barrBegin, JNI_ABORT);
|
||||
if (!jenv->ExceptionOccurred())
|
||||
throwRuntimeEx(jenv, "Error getting handle to native resources");
|
||||
return 0;
|
||||
}
|
||||
|
||||
uint8_t* barrMapper = (uint8_t*)jenv->GetByteArrayElements(mapperBytes, JNI_NULL);
|
||||
if (!barrMapper) {
|
||||
jenv->ReleaseByteArrayElements(keyBeginBytes, (jbyte*)barrBegin, JNI_ABORT);
|
||||
jenv->ReleaseByteArrayElements(keyEndBytes, (jbyte*)barrEnd, JNI_ABORT);
|
||||
if (!jenv->ExceptionOccurred())
|
||||
throwRuntimeEx(jenv, "Error getting handle to native resources");
|
||||
return 0;
|
||||
}
|
||||
|
||||
FDBFuture* f = fdb_transaction_get_range_and_flat_map(tr,
|
||||
barrBegin,
|
||||
jenv->GetArrayLength(keyBeginBytes),
|
||||
orEqualBegin,
|
||||
offsetBegin,
|
||||
barrEnd,
|
||||
jenv->GetArrayLength(keyEndBytes),
|
||||
orEqualEnd,
|
||||
offsetEnd,
|
||||
barrMapper,
|
||||
jenv->GetArrayLength(mapperBytes),
|
||||
rowLimit,
|
||||
targetBytes,
|
||||
(FDBStreamingMode)streamingMode,
|
||||
iteration,
|
||||
snapshot,
|
||||
reverse);
|
||||
jenv->ReleaseByteArrayElements(keyBeginBytes, (jbyte*)barrBegin, JNI_ABORT);
|
||||
jenv->ReleaseByteArrayElements(keyEndBytes, (jbyte*)barrEnd, JNI_ABORT);
|
||||
jenv->ReleaseByteArrayElements(mapperBytes, (jbyte*)barrMapper, JNI_ABORT);
|
||||
return (jlong)f;
|
||||
}
|
||||
|
||||
JNIEXPORT void JNICALL Java_com_apple_foundationdb_FutureResults_FutureResults_1getDirect(JNIEnv* jenv,
|
||||
jobject,
|
||||
jlong future,
|
||||
|
256
bindings/java/src/integration/com/apple/foundationdb/RangeAndFlatMapQueryIntegrationTest.java
Normal file
256
bindings/java/src/integration/com/apple/foundationdb/RangeAndFlatMapQueryIntegrationTest.java
Normal file
@ -0,0 +1,256 @@
|
||||
/*
|
||||
* RangeAndFlatMapQueryIntegrationTest.java
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2018 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
package com.apple.foundationdb;
|
||||
|
||||
import java.util.ArrayList;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.Iterator;
|
||||
import java.util.List;
|
||||
import java.util.concurrent.CompletableFuture;
|
||||
import java.util.concurrent.ExecutionException;
|
||||
import java.util.concurrent.ThreadLocalRandom;
|
||||
import java.util.concurrent.atomic.AtomicReference;
|
||||
|
||||
import com.apple.foundationdb.async.AsyncIterable;
|
||||
import com.apple.foundationdb.async.AsyncUtil;
|
||||
import com.apple.foundationdb.tuple.ByteArrayUtil;
|
||||
import com.apple.foundationdb.tuple.Tuple;
|
||||
import org.junit.jupiter.api.AfterEach;
|
||||
import org.junit.jupiter.api.Assertions;
|
||||
import org.junit.jupiter.api.BeforeEach;
|
||||
import org.junit.jupiter.api.Test;
|
||||
import org.junit.jupiter.api.extension.ExtendWith;
|
||||
|
||||
@ExtendWith(RequiresDatabase.class)
|
||||
class RangeAndFlatMapQueryIntegrationTest {
|
||||
private static final FDB fdb = FDB.selectAPIVersion(710);
|
||||
public String databaseArg = null;
|
||||
private Database openFDB() { return fdb.open(databaseArg); }
|
||||
|
||||
@BeforeEach
|
||||
@AfterEach
|
||||
void clearDatabase() throws Exception {
|
||||
/*
|
||||
* Empty the database before and after each run, just in case
|
||||
*/
|
||||
try (Database db = openFDB()) {
|
||||
db.run(tr -> {
|
||||
tr.clear(Range.startsWith(new byte[] { (byte)0x00 }));
|
||||
return null;
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
static private final byte[] EMPTY = Tuple.from().pack();
|
||||
static private final String PREFIX = "prefix";
|
||||
static private final String RECORD = "RECORD";
|
||||
static private final String INDEX = "INDEX";
|
||||
static private String primaryKey(int i) { return String.format("primary-key-of-record-%08d", i); }
|
||||
static private String indexKey(int i) { return String.format("index-key-of-record-%08d", i); }
|
||||
static private String dataOfRecord(int i) { return String.format("data-of-record-%08d", i); }
|
||||
|
||||
static byte[] MAPPER = Tuple.from(PREFIX, RECORD, "{K[3]}").pack();
|
||||
static private byte[] indexEntryKey(final int i) {
|
||||
return Tuple.from(PREFIX, INDEX, indexKey(i), primaryKey(i)).pack();
|
||||
}
|
||||
static private byte[] recordKey(final int i) { return Tuple.from(PREFIX, RECORD, primaryKey(i)).pack(); }
|
||||
static private byte[] recordValue(final int i) { return Tuple.from(dataOfRecord(i)).pack(); }
|
||||
|
||||
static private void insertRecordWithIndex(final Transaction tr, final int i) {
|
||||
tr.set(indexEntryKey(i), EMPTY);
|
||||
tr.set(recordKey(i), recordValue(i));
|
||||
}
|
||||
|
||||
private static String getArgFromEnv() {
|
||||
String[] clusterFiles = MultiClientHelper.readClusterFromEnv();
|
||||
String cluster = clusterFiles[0];
|
||||
System.out.printf("Using Cluster: %s\n", cluster);
|
||||
return cluster;
|
||||
}
|
||||
public static void main(String[] args) throws Exception {
|
||||
final RangeAndFlatMapQueryIntegrationTest test = new RangeAndFlatMapQueryIntegrationTest();
|
||||
test.databaseArg = getArgFromEnv();
|
||||
test.clearDatabase();
|
||||
test.comparePerformance();
|
||||
test.clearDatabase();
|
||||
}
|
||||
|
||||
int numRecords = 10000;
|
||||
int numQueries = 10000;
|
||||
int numRecordsPerQuery = 100;
|
||||
boolean validate = false;
|
||||
@Test
|
||||
void comparePerformance() {
|
||||
FDB fdb = FDB.selectAPIVersion(710);
|
||||
try (Database db = openFDB()) {
|
||||
insertRecordsWithIndexes(numRecords, db);
|
||||
instrument(rangeQueryAndGet, "rangeQueryAndGet", db);
|
||||
instrument(rangeQueryAndFlatMap, "rangeQueryAndFlatMap", db);
|
||||
}
|
||||
}
|
||||
|
||||
private void instrument(final RangeQueryWithIndex query, final String name, final Database db) {
|
||||
System.out.printf("Starting %s (numQueries:%d, numRecordsPerQuery:%d)\n", name, numQueries, numRecordsPerQuery);
|
||||
long startTime = System.currentTimeMillis();
|
||||
for (int queryId = 0; queryId < numQueries; queryId++) {
|
||||
int begin = ThreadLocalRandom.current().nextInt(numRecords - numRecordsPerQuery);
|
||||
query.run(begin, begin + numRecordsPerQuery, db);
|
||||
}
|
||||
long time = System.currentTimeMillis() - startTime;
|
||||
System.out.printf("Finished %s, it takes %d ms for %d queries (%d qps)\n", name, time, numQueries,
|
||||
numQueries * 1000L / time);
|
||||
}
|
||||
|
||||
static private final int RECORDS_PER_TXN = 100;
|
||||
static private void insertRecordsWithIndexes(int n, Database db) {
|
||||
int i = 0;
|
||||
while (i < n) {
|
||||
int begin = i;
|
||||
int end = Math.min(n, i + RECORDS_PER_TXN);
|
||||
// insert [begin, end) in one transaction
|
||||
db.run(tr -> {
|
||||
for (int t = begin; t < end; t++) {
|
||||
insertRecordWithIndex(tr, t);
|
||||
}
|
||||
return null;
|
||||
});
|
||||
i = end;
|
||||
}
|
||||
}
|
||||
|
||||
public interface RangeQueryWithIndex {
|
||||
void run(int begin, int end, Database db);
|
||||
}
|
||||
|
||||
RangeQueryWithIndex rangeQueryAndGet = (int begin, int end, Database db) -> db.run(tr -> {
|
||||
try {
|
||||
List<KeyValue> kvs = tr.getRange(KeySelector.firstGreaterOrEqual(indexEntryKey(begin)),
|
||||
KeySelector.firstGreaterOrEqual(indexEntryKey(end)),
|
||||
ReadTransaction.ROW_LIMIT_UNLIMITED, false, StreamingMode.WANT_ALL)
|
||||
.asList()
|
||||
.get();
|
||||
Assertions.assertEquals(end - begin, kvs.size());
|
||||
|
||||
// Get the records of each index entry IN PARALLEL.
|
||||
List<CompletableFuture<byte[]>> resultFutures = new ArrayList<>();
|
||||
// In reality, we need to get the record key by parsing the index entry key. But considering this is a
|
||||
// performance test, we just ignore the returned key and simply generate it from recordKey.
|
||||
for (int id = begin; id < end; id++) {
|
||||
resultFutures.add(tr.get(recordKey(id)));
|
||||
}
|
||||
AsyncUtil.whenAll(resultFutures).get();
|
||||
|
||||
if (validate) {
|
||||
final Iterator<KeyValue> indexes = kvs.iterator();
|
||||
final Iterator<CompletableFuture<byte[]>> records = resultFutures.iterator();
|
||||
for (int id = begin; id < end; id++) {
|
||||
Assertions.assertTrue(indexes.hasNext());
|
||||
assertByteArrayEquals(indexEntryKey(id), indexes.next().getKey());
|
||||
Assertions.assertTrue(records.hasNext());
|
||||
assertByteArrayEquals(recordValue(id), records.next().get());
|
||||
}
|
||||
Assertions.assertFalse(indexes.hasNext());
|
||||
Assertions.assertFalse(records.hasNext());
|
||||
}
|
||||
} catch (Exception e) {
|
||||
Assertions.fail("Unexpected exception", e);
|
||||
}
|
||||
return null;
|
||||
});
|
||||
|
||||
RangeQueryWithIndex rangeQueryAndFlatMap = (int begin, int end, Database db) -> db.run(tr -> {
|
||||
try {
|
||||
tr.options().setReadYourWritesDisable();
|
||||
List<KeyValue> kvs =
|
||||
tr.snapshot()
|
||||
.getRangeAndFlatMap(KeySelector.firstGreaterOrEqual(indexEntryKey(begin)),
|
||||
KeySelector.firstGreaterOrEqual(indexEntryKey(end)), MAPPER,
|
||||
ReadTransaction.ROW_LIMIT_UNLIMITED, false, StreamingMode.WANT_ALL)
|
||||
.asList()
|
||||
.get();
|
||||
Assertions.assertEquals(end - begin, kvs.size());
|
||||
|
||||
if (validate) {
|
||||
final Iterator<KeyValue> results = kvs.iterator();
|
||||
for (int id = begin; id < end; id++) {
|
||||
Assertions.assertTrue(results.hasNext());
|
||||
assertByteArrayEquals(recordValue(id), results.next().getValue());
|
||||
}
|
||||
Assertions.assertFalse(results.hasNext());
|
||||
}
|
||||
} catch (Exception e) {
|
||||
Assertions.fail("Unexpected exception", e);
|
||||
}
|
||||
return null;
|
||||
});
|
||||
|
||||
void assertByteArrayEquals(byte[] expected, byte[] actual) {
|
||||
Assertions.assertEquals(ByteArrayUtil.printable(expected), ByteArrayUtil.printable(actual));
|
||||
}
|
||||
|
||||
@Test
|
||||
void rangeAndFlatMapQueryOverMultipleRows() throws Exception {
|
||||
try (Database db = openFDB()) {
|
||||
insertRecordsWithIndexes(3, db);
|
||||
|
||||
List<byte[]> expected_data_of_records = new ArrayList<>();
|
||||
for (int i = 0; i <= 1; i++) {
|
||||
expected_data_of_records.add(recordValue(i));
|
||||
}
|
||||
|
||||
db.run(tr -> {
|
||||
// getRangeAndFlatMap is only support without RYW. This is a must!!!
|
||||
tr.options().setReadYourWritesDisable();
|
||||
|
||||
// getRangeAndFlatMap is only supported with snapshot.
|
||||
Iterator<KeyValue> kvs =
|
||||
tr.snapshot()
|
||||
.getRangeAndFlatMap(KeySelector.firstGreaterOrEqual(indexEntryKey(0)),
|
||||
KeySelector.firstGreaterThan(indexEntryKey(1)), MAPPER,
|
||||
ReadTransaction.ROW_LIMIT_UNLIMITED, false, StreamingMode.WANT_ALL)
|
||||
.iterator();
|
||||
Iterator<byte[]> expected_data_of_records_iter = expected_data_of_records.iterator();
|
||||
while (expected_data_of_records_iter.hasNext()) {
|
||||
Assertions.assertTrue(kvs.hasNext(), "iterator ended too early");
|
||||
KeyValue kv = kvs.next();
|
||||
byte[] actual_data_of_record = kv.getValue();
|
||||
byte[] expected_data_of_record = expected_data_of_records_iter.next();
|
||||
|
||||
// System.out.println("result key:" + ByteArrayUtil.printable(kv.getKey()) + " value:" +
|
||||
// ByteArrayUtil.printable(kv.getValue())); Output:
|
||||
// result
|
||||
// key:\x02prefix\x00\x02INDEX\x00\x02index-key-of-record-0\x00\x02primary-key-of-record-0\x00
|
||||
// value:\x02data-of-record-0\x00
|
||||
// result
|
||||
// key:\x02prefix\x00\x02INDEX\x00\x02index-key-of-record-1\x00\x02primary-key-of-record-1\x00
|
||||
// value:\x02data-of-record-1\x00
|
||||
|
||||
// For now, we don't guarantee what that the returned keys mean.
|
||||
Assertions.assertArrayEquals(expected_data_of_record, actual_data_of_record,
|
||||
"Incorrect data of record!");
|
||||
}
|
||||
Assertions.assertFalse(kvs.hasNext(), "Iterator returned too much data");
|
||||
|
||||
return null;
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
@ -88,8 +88,11 @@ public class FakeFDBTransaction extends FDBTransaction {
|
||||
public int getNumRangeCalls() { return numRangeCalls; }
|
||||
|
||||
@Override
|
||||
protected FutureResults getRange_internal(KeySelector begin, KeySelector end, int rowLimit, int targetBytes,
|
||||
int streamingMode, int iteration, boolean isSnapshot, boolean reverse) {
|
||||
protected FutureResults getRange_internal(KeySelector begin, KeySelector end,
|
||||
// TODO: map is not supported in FakeFDBTransaction yet.
|
||||
byte[] mapper, // Nullable
|
||||
int rowLimit, int targetBytes, int streamingMode, int iteration,
|
||||
boolean isSnapshot, boolean reverse) {
|
||||
numRangeCalls++;
|
||||
// TODO this is probably not correct for all KeySelector instances--we'll want to match with real behavior
|
||||
NavigableMap<byte[], byte[]> range =
|
||||
|
@ -91,6 +91,15 @@ class FDBTransaction extends NativeObjectWrapper implements Transaction, OptionC
|
||||
return FDBTransaction.this.getRangeSplitPoints(range, chunkSize);
|
||||
}
|
||||
|
||||
@Override
|
||||
public AsyncIterable<KeyValue> getRangeAndFlatMap(KeySelector begin, KeySelector end, byte[] mapper, int limit,
|
||||
boolean reverse, StreamingMode mode) {
|
||||
if (mapper == null) {
|
||||
throw new IllegalArgumentException("Mapper must be non-null");
|
||||
}
|
||||
return new RangeQuery(FDBTransaction.this, true, begin, end, mapper, limit, reverse, mode, eventKeeper);
|
||||
}
|
||||
|
||||
///////////////////
|
||||
// getRange -> KeySelectors
|
||||
///////////////////
|
||||
@ -338,6 +347,12 @@ class FDBTransaction extends NativeObjectWrapper implements Transaction, OptionC
|
||||
return this.getRangeSplitPoints(range.begin, range.end, chunkSize);
|
||||
}
|
||||
|
||||
@Override
|
||||
public AsyncIterable<KeyValue> getRangeAndFlatMap(KeySelector begin, KeySelector end, byte[] mapper, int limit,
|
||||
boolean reverse, StreamingMode mode) {
|
||||
throw new UnsupportedOperationException("getRangeAndFlatMap is only supported in snapshot");
|
||||
}
|
||||
|
||||
///////////////////
|
||||
// getRange -> KeySelectors
|
||||
///////////////////
|
||||
@ -415,10 +430,10 @@ class FDBTransaction extends NativeObjectWrapper implements Transaction, OptionC
|
||||
}
|
||||
|
||||
// Users of this function must close the returned FutureResults when finished
|
||||
protected FutureResults getRange_internal(
|
||||
KeySelector begin, KeySelector end,
|
||||
int rowLimit, int targetBytes, int streamingMode,
|
||||
int iteration, boolean isSnapshot, boolean reverse) {
|
||||
protected FutureResults getRange_internal(KeySelector begin, KeySelector end,
|
||||
byte[] mapper, // Nullable
|
||||
int rowLimit, int targetBytes, int streamingMode, int iteration,
|
||||
boolean isSnapshot, boolean reverse) {
|
||||
if (eventKeeper != null) {
|
||||
eventKeeper.increment(Events.JNI_CALL);
|
||||
}
|
||||
@ -429,10 +444,14 @@ class FDBTransaction extends NativeObjectWrapper implements Transaction, OptionC
|
||||
begin.toString(), end.toString(), rowLimit, targetBytes, streamingMode,
|
||||
iteration, Boolean.toString(isSnapshot), Boolean.toString(reverse)));*/
|
||||
return new FutureResults(
|
||||
Transaction_getRange(getPtr(), begin.getKey(), begin.orEqual(), begin.getOffset(),
|
||||
end.getKey(), end.orEqual(), end.getOffset(), rowLimit, targetBytes,
|
||||
streamingMode, iteration, isSnapshot, reverse),
|
||||
FDB.instance().isDirectBufferQueriesEnabled(), executor, eventKeeper);
|
||||
mapper == null
|
||||
? Transaction_getRange(getPtr(), begin.getKey(), begin.orEqual(), begin.getOffset(), end.getKey(),
|
||||
end.orEqual(), end.getOffset(), rowLimit, targetBytes, streamingMode,
|
||||
iteration, isSnapshot, reverse)
|
||||
: Transaction_getRangeAndFlatMap(getPtr(), begin.getKey(), begin.orEqual(), begin.getOffset(),
|
||||
end.getKey(), end.orEqual(), end.getOffset(), mapper, rowLimit,
|
||||
targetBytes, streamingMode, iteration, isSnapshot, reverse),
|
||||
FDB.instance().isDirectBufferQueriesEnabled(), executor, eventKeeper);
|
||||
} finally {
|
||||
pointerReadLock.unlock();
|
||||
}
|
||||
@ -771,6 +790,12 @@ class FDBTransaction extends NativeObjectWrapper implements Transaction, OptionC
|
||||
byte[] keyEnd, boolean orEqualEnd, int offsetEnd,
|
||||
int rowLimit, int targetBytes, int streamingMode, int iteration,
|
||||
boolean isSnapshot, boolean reverse);
|
||||
private native long Transaction_getRangeAndFlatMap(long cPtr, byte[] keyBegin, boolean orEqualBegin,
|
||||
int offsetBegin, byte[] keyEnd, boolean orEqualEnd,
|
||||
int offsetEnd,
|
||||
byte[] mapper, // Nonnull
|
||||
int rowLimit, int targetBytes, int streamingMode, int iteration,
|
||||
boolean isSnapshot, boolean reverse);
|
||||
private native void Transaction_addConflictRange(long cPtr,
|
||||
byte[] keyBegin, byte[] keyEnd, int conflictRangeType);
|
||||
private native void Transaction_set(long cPtr, byte[] key, byte[] value);
|
||||
|
@ -49,17 +49,19 @@ class RangeQuery implements AsyncIterable<KeyValue> {
|
||||
private final FDBTransaction tr;
|
||||
private final KeySelector begin;
|
||||
private final KeySelector end;
|
||||
private final byte[] mapper; // Nullable
|
||||
private final boolean snapshot;
|
||||
private final int rowLimit;
|
||||
private final boolean reverse;
|
||||
private final StreamingMode streamingMode;
|
||||
private final EventKeeper eventKeeper;
|
||||
|
||||
RangeQuery(FDBTransaction transaction, boolean isSnapshot, KeySelector begin, KeySelector end, int rowLimit,
|
||||
boolean reverse, StreamingMode streamingMode, EventKeeper eventKeeper) {
|
||||
RangeQuery(FDBTransaction transaction, boolean isSnapshot, KeySelector begin, KeySelector end, byte[] mapper,
|
||||
int rowLimit, boolean reverse, StreamingMode streamingMode, EventKeeper eventKeeper) {
|
||||
this.tr = transaction;
|
||||
this.begin = begin;
|
||||
this.end = end;
|
||||
this.mapper = mapper;
|
||||
this.snapshot = isSnapshot;
|
||||
this.rowLimit = rowLimit;
|
||||
this.reverse = reverse;
|
||||
@ -67,6 +69,12 @@ class RangeQuery implements AsyncIterable<KeyValue> {
|
||||
this.eventKeeper = eventKeeper;
|
||||
}
|
||||
|
||||
// RangeQueryAndFlatMap
|
||||
RangeQuery(FDBTransaction transaction, boolean isSnapshot, KeySelector begin, KeySelector end, int rowLimit,
|
||||
boolean reverse, StreamingMode streamingMode, EventKeeper eventKeeper) {
|
||||
this(transaction, isSnapshot, begin, end, null, rowLimit, reverse, streamingMode, eventKeeper);
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns all the results from the range requested as a {@code List}. If there were no
|
||||
* limits on the original query and there is a large amount of data in the database
|
||||
@ -83,16 +91,16 @@ class RangeQuery implements AsyncIterable<KeyValue> {
|
||||
|
||||
// if the streaming mode is EXACT, try and grab things as one chunk
|
||||
if(mode == StreamingMode.EXACT) {
|
||||
FutureResults range = tr.getRange_internal(
|
||||
this.begin, this.end, this.rowLimit, 0, StreamingMode.EXACT.code(),
|
||||
1, this.snapshot, this.reverse);
|
||||
|
||||
FutureResults range = tr.getRange_internal(this.begin, this.end, this.mapper, this.rowLimit, 0,
|
||||
StreamingMode.EXACT.code(), 1, this.snapshot, this.reverse);
|
||||
return range.thenApply(result -> result.get().values)
|
||||
.whenComplete((result, e) -> range.close());
|
||||
}
|
||||
|
||||
// If the streaming mode is not EXACT, simply collect the results of an
|
||||
// iteration into a list
|
||||
return AsyncUtil.collect(new RangeQuery(tr, snapshot, begin, end, rowLimit, reverse, mode, eventKeeper),
|
||||
return AsyncUtil.collect(new RangeQuery(tr, snapshot, begin, end, mapper, rowLimit, reverse, mode, eventKeeper),
|
||||
tr.getExecutor());
|
||||
}
|
||||
|
||||
@ -221,8 +229,8 @@ class RangeQuery implements AsyncIterable<KeyValue> {
|
||||
|
||||
nextFuture = new CompletableFuture<>();
|
||||
final long sTime = System.nanoTime();
|
||||
fetchingChunk = tr.getRange_internal(begin, end, rowsLimited ? rowsRemaining : 0, 0, streamingMode.code(),
|
||||
++iteration, snapshot, reverse);
|
||||
fetchingChunk = tr.getRange_internal(begin, end, mapper, rowsLimited ? rowsRemaining : 0, 0,
|
||||
streamingMode.code(), ++iteration, snapshot, reverse);
|
||||
|
||||
BiConsumer<RangeResultInfo,Throwable> cons = new FetchComplete(fetchingChunk,nextFuture);
|
||||
if(eventKeeper!=null){
|
||||
|
@ -424,6 +424,41 @@ public interface ReadTransaction extends ReadTransactionContext {
|
||||
AsyncIterable<KeyValue> getRange(Range range,
|
||||
int limit, boolean reverse, StreamingMode mode);
|
||||
|
||||
/**
|
||||
* WARNING: This feature is considered experimental at this time. It is only allowed when using snapshot isolation
|
||||
* AND disabling read-your-writes.
|
||||
*
|
||||
* @see KeySelector
|
||||
* @see AsyncIterator
|
||||
*
|
||||
* @param begin the beginning of the range (inclusive)
|
||||
* @param end the end of the range (exclusive)
|
||||
* @param mapper TODO
|
||||
* @param limit the maximum number of results to return. Limits results to the
|
||||
* <i>first</i> keys in the range. Pass {@link #ROW_LIMIT_UNLIMITED} if this query
|
||||
* should not limit the number of results. If {@code reverse} is {@code true} rows
|
||||
* will be limited starting at the end of the range.
|
||||
* @param reverse return results starting at the end of the range in reverse order.
|
||||
* Reading ranges in reverse is supported natively by the database and should
|
||||
* have minimal extra cost.
|
||||
* @param mode provide a hint about how the results are to be used. This
|
||||
* can provide speed improvements or efficiency gains based on the caller's
|
||||
* knowledge of the upcoming access pattern.
|
||||
*
|
||||
* <p>
|
||||
* When converting the result of this query to a list using {@link AsyncIterable#asList()} with the {@code
|
||||
* ITERATOR} streaming mode, the query is automatically modified to fetch results in larger batches. This is done
|
||||
* because it is known in advance that the {@link AsyncIterable#asList()} function will fetch all results in the
|
||||
* range. If a limit is specified, the {@code EXACT} streaming mode will be used, and otherwise it will use {@code
|
||||
* WANT_ALL}.
|
||||
*
|
||||
* To achieve comparable performance when iterating over an entire range without using {@link
|
||||
* AsyncIterable#asList()}, the same streaming mode would need to be used.
|
||||
* </p>
|
||||
* @return a handle to access the results of the asynchronous call
|
||||
*/
|
||||
AsyncIterable<KeyValue> getRangeAndFlatMap(KeySelector begin, KeySelector end, byte[] mapper, int limit,
|
||||
boolean reverse, StreamingMode mode);
|
||||
|
||||
/**
|
||||
* Gets an estimate for the number of bytes stored in the given range.
|
||||
|
@ -52,6 +52,7 @@ set(JAVA_INTEGRATION_TESTS
|
||||
src/integration/com/apple/foundationdb/CycleMultiClientIntegrationTest.java
|
||||
src/integration/com/apple/foundationdb/SidebandMultiThreadClientTest.java
|
||||
src/integration/com/apple/foundationdb/RepeatableReadMultiThreadClientTest.java
|
||||
src/integration/com/apple/foundationdb/RangeAndFlatMapQueryIntegrationTest.java
|
||||
)
|
||||
|
||||
# Resources that are used in integration testing, but are not explicitly test files (JUnit rules,
|
||||
|
@ -49,8 +49,8 @@ function(compile_boost)
|
||||
include(ExternalProject)
|
||||
set(BOOST_INSTALL_DIR "${CMAKE_BINARY_DIR}/boost_install")
|
||||
ExternalProject_add("${COMPILE_BOOST_TARGET}Project"
|
||||
URL "https://boostorg.jfrog.io/artifactory/main/release/1.72.0/source/boost_1_72_0.tar.bz2"
|
||||
URL_HASH SHA256=59c9b274bc451cf91a9ba1dd2c7fdcaf5d60b1b3aa83f2c9fa143417cc660722
|
||||
URL "https://boostorg.jfrog.io/artifactory/main/release/1.77.0/source/boost_1_77_0.tar.bz2"
|
||||
URL_HASH SHA256=fc9f85fc030e233142908241af7a846e60630aa7388de9a5fafb1f3a26840854
|
||||
CONFIGURE_COMMAND ${BOOTSTRAP_COMMAND} ${BOOTSTRAP_ARGS} --with-libraries=${BOOTSTRAP_LIBRARIES} --with-toolset=${BOOST_TOOLSET}
|
||||
BUILD_COMMAND ${B2_COMMAND} link=static ${COMPILE_BOOST_BUILD_ARGS} --prefix=${BOOST_INSTALL_DIR} ${USER_CONFIG_FLAG} install
|
||||
BUILD_IN_SOURCE ON
|
||||
@ -113,7 +113,7 @@ if(WIN32)
|
||||
return()
|
||||
endif()
|
||||
|
||||
find_package(Boost 1.72.0 EXACT QUIET COMPONENTS context CONFIG PATHS ${BOOST_HINT_PATHS})
|
||||
find_package(Boost 1.77.0 EXACT QUIET COMPONENTS context CONFIG PATHS ${BOOST_HINT_PATHS})
|
||||
set(FORCE_BOOST_BUILD OFF CACHE BOOL "Forces cmake to build boost and ignores any installed boost")
|
||||
|
||||
if(Boost_FOUND AND NOT FORCE_BOOST_BUILD)
|
||||
|
@ -282,18 +282,11 @@ else()
|
||||
-Woverloaded-virtual
|
||||
-Wshift-sign-overflow
|
||||
# Here's the current set of warnings we need to explicitly disable to compile warning-free with clang 11
|
||||
-Wno-comment
|
||||
-Wno-delete-non-virtual-dtor
|
||||
-Wno-format
|
||||
-Wno-mismatched-tags
|
||||
-Wno-missing-field-initializers
|
||||
-Wno-sign-compare
|
||||
-Wno-tautological-pointer-compare
|
||||
-Wno-undefined-var-template
|
||||
-Wno-unknown-pragmas
|
||||
-Wno-unknown-warning-option
|
||||
-Wno-unused-function
|
||||
-Wno-unused-local-typedef
|
||||
-Wno-unused-parameter
|
||||
)
|
||||
if (USE_CCACHE)
|
||||
@ -320,7 +313,7 @@ else()
|
||||
-fvisibility=hidden
|
||||
-Wreturn-type
|
||||
-fPIC)
|
||||
if (CMAKE_HOST_SYSTEM_PROCESSOR MATCHES "^x86")
|
||||
if (CMAKE_HOST_SYSTEM_PROCESSOR MATCHES "^x86" AND NOT CLANG)
|
||||
add_compile_options($<$<COMPILE_LANGUAGE:CXX>:-Wclass-memaccess>)
|
||||
endif()
|
||||
if (GPERFTOOLS_FOUND AND GCC)
|
||||
|
111
design/transaction-state-store.md
Normal file
111
design/transaction-state-store.md
Normal file
@ -0,0 +1,111 @@
|
||||
# Transaction State Store (txnStateStore)
|
||||
|
||||
This document describes the transaction state store (often is referred as `txnStateStore` in the code) in FDB. The transaction state store keeps important metadata about the database to bootstrap the database, to guide the transaction system to persist writes (i.e., help assign storage tags to mutations at commit proxies), and to manage data (i.e., shard) movement metadata. This is a critical piece of information that have to be consistent across many processes and to be persistent for recovery.
|
||||
|
||||
Acknowledgment: A lot of contents are taken from [Evan's FDB brownbag talk](https://drive.google.com/file/d/15UvKiNc-jSFfDGygNmLQP_d4b14X3DAS/).
|
||||
|
||||
## What information is stored in transaction state store?
|
||||
|
||||
The information includes: shard mapping (key range to storage server mapping, i.e.,
|
||||
`keyServers`), storage server tags (`serverTags`), tagLocalityList, storage server tag
|
||||
history, database locked flag, metadata version, mustContainSystemMutations, coordinators,
|
||||
storage server interface (`serverList`), database configurations, TSS mappings and
|
||||
quarantines, backup apply mutation ranges and log ranges, etc.
|
||||
|
||||
The information of transaction state store is kept in the system key space, i.e., using the
|
||||
`\xff` prefix. Note all data in the system key space are saved on storage servers. The
|
||||
`txnStateStore` is only a part of the `\xff` key space, and is additionally kept in the
|
||||
memory of commit proxies as well as disks of the log system (i.e., TLogs). Changes to
|
||||
the `txnStateStore` are special mutations to the `\xff` key space, and are called
|
||||
inconsistently in the code base as "metadata mutations" in commit proxies and
|
||||
"state mutations" in Resolvers.
|
||||
|
||||
## Why do we need transaction state store?
|
||||
|
||||
When bootstraping an FDB cluster, the new master (i.e., the sequencer) role recruits a
|
||||
new transaction system and initializes them. In particular, the transaction state store
|
||||
is first read by the master from previous generation's log system, and then broadcast to
|
||||
all commit proxies of the new transaction system. After initializing `txnStateStore`, these
|
||||
commit proxies know how to assign mutations with storage server tags: `txnStateStore`
|
||||
contains the shard map from key range to storage servers; commit proxies use the shard
|
||||
map to find and attach the destination storage tags for each mutation.
|
||||
|
||||
## How is transaction state store replicated?
|
||||
|
||||
The `txnStateStore` is replicated in all commit proxies' memories. It is very important
|
||||
that `txnStateStore` data are consistent, otherwise, a shard change issued by one commit
|
||||
proxy could result in a situation where different proxies think they should send a
|
||||
mutation to different storage servers, thus causing data corruptions.
|
||||
|
||||
FDB solves this problem by state machine replication: all commit proxies start with the
|
||||
same `txnStateStore` data (from master broadcast), and apply the same sequence of mutations.
|
||||
Because commits happen at all proxies, it is difficult to maintain the same order as well
|
||||
as minimize the communication among them. Fortunately, every transaction has to send a
|
||||
conflict resolution request to all Resolvers and they process transactions in strict order
|
||||
of commit versions. Leveraging this mechanism, each commit proxy sends all metadata
|
||||
(i.e., system key) mutations to all Resolvers. Resolvers keep these mutations in memory
|
||||
and forward to other commit proxies in separate resolution response. Each commit proxy
|
||||
receive resolution response, along with metadata mutations happend at other proxies before
|
||||
its commit version, and apply all these metadata mutations in the commit order.
|
||||
Finally, this proxy only writes metadata mutations in its own transaction batch to TLogs,
|
||||
i.e., do not write other proxies' metadata mutations to TLogs to avoid repeated writes.
|
||||
|
||||
It's worth calling out that everything in the `txnStateStore` is stored at some storage
|
||||
servers and a client (e.g., `fdbcli`) can read from these storage servers. During the
|
||||
commit process, commit proxies parse all mutations in a batch of transactions, and apply
|
||||
changes (i.e., metadata mutations) to its in-memory copy of `txnStateStore`. Later, the
|
||||
same changes are applied at storage servers for persistence. Additionally, the process
|
||||
to store `txnStateStore` at log system is described below.
|
||||
|
||||
Notably `applyMetadataMutations()` is the function that commit proxies use to make changes
|
||||
to `txnStateStore`. The key ranges stored in `txnStateStore` include `[\xff, \xff\x02)` and
|
||||
`[\xff\x03, \xff\xff)`, but not everything in these ranges. There is no data in the range
|
||||
of `[\xff\x02, \xff\x03)` belong to `txnStateStore`, e.g., `\xff\x02` prefix is used for
|
||||
backup data and is *NOT* metadata mutations.
|
||||
|
||||
## How is transaction state store persisted at log system?
|
||||
|
||||
When a commit proxy writes metadata mutations to the log system, the proxy assigns a
|
||||
"txs" tag to the mutation. Depending on FDB versions, the "txs" tag can be one special
|
||||
tag `txsTag{ tagLocalitySpecial, 1 }` for `TLogVersion::V3` (FDB 6.1) or a randomized
|
||||
"txs" tag for `TLogVersion::V4` (FDB 6.2 and later) and larger. The idea of randomized
|
||||
"txs" tag is to spread metadata mutations to all TLogs for faster parallel recovery of
|
||||
`txnStateStore`.
|
||||
|
||||
At TLogs, all mutation data are indexed by tags. "txs" tag data is special, since it is
|
||||
only peeked by the master during the transaction system recovery.
|
||||
See [TLog Spilling doc](tlog-spilling.md.html) for more detailed discussion on the
|
||||
topic of spilling "txs" data. In short, `txsTag` is spilled by value.
|
||||
"txs" tag data is indexed and stored in both primary TLogs and satellite TLogs.
|
||||
Note satellite TLogs only index log router tags and "txs" tags.
|
||||
|
||||
## How is transaction state store implemented?
|
||||
|
||||
`txnStateStore` is kept in memory at commit proxies using `KeyValueStoreMemory`, which
|
||||
uses `LogSystemDiskQueueAdapter` to be durable with the log system. As a result, reading
|
||||
from `txnStateStore` never blocks, which means the futures returned by read calls should
|
||||
always be ready. Writes to `txnStateStore` are first buffered by the `LogSystemDiskQueueAdapter`
|
||||
in memory. After a commit proxy pushes transaction data to the log system and the data
|
||||
becomes durable, the proxy clears the buffered data in `LogSystemDiskQueueAdapter`.
|
||||
|
||||
* Master reads `txnStateStore` from old log system: https://github.com/apple/foundationdb/blob/6281e647784e74dccb3a6cb88efb9d8b9cccd376/fdbserver/masterserver.actor.cpp#L928-L931
|
||||
|
||||
* Master broadcasts `txnStateStore` to commit proxies: https://github.com/apple/foundationdb/blob/6281e647784e74dccb3a6cb88efb9d8b9cccd376/fdbserver/masterserver.actor.cpp#L940-L968
|
||||
|
||||
* Commit proxies receive txnStateStore broadcast and builds the `keyInfo` map: https://github.com/apple/foundationdb/blob/6281e647784e74dccb3a6cb88efb9d8b9cccd376/fdbserver/CommitProxyServer.actor.cpp#L1886-L1927
|
||||
* Look up `keyInfo` map for `GetKeyServerLocationsRequest`: https://github.com/apple/foundationdb/blob/6281e647784e74dccb3a6cb88efb9d8b9cccd376/fdbserver/CommitProxyServer.actor.cpp#L1464
|
||||
* Look up `keyInfo` map for assign mutations with storage tags: https://github.com/apple/foundationdb/blob/6281e647784e74dccb3a6cb88efb9d8b9cccd376/fdbserver/CommitProxyServer.actor.cpp#L926 and https://github.com/apple/foundationdb/blob/6281e647784e74dccb3a6cb88efb9d8b9cccd376/fdbserver/CommitProxyServer.actor.cpp#L965-L1010
|
||||
|
||||
* Commit proxies recover database lock flag and metadata version: https://github.com/apple/foundationdb/blob/6281e647784e74dccb3a6cb88efb9d8b9cccd376/fdbserver/CommitProxyServer.actor.cpp#L1942-L1944
|
||||
|
||||
* Commit proxies add metadata mutations to Resolver request: https://github.com/apple/foundationdb/blob/6281e647784e74dccb3a6cb88efb9d8b9cccd376/fdbserver/CommitProxyServer.actor.cpp#L137-L140
|
||||
|
||||
* Resolvers keep these mutations in memory: https://github.com/apple/foundationdb/blob/6281e647784e74dccb3a6cb88efb9d8b9cccd376/fdbserver/Resolver.actor.cpp#L220-L230
|
||||
|
||||
* Resolvers copy metadata mutations to resolution reply message: https://github.com/apple/foundationdb/blob/6281e647784e74dccb3a6cb88efb9d8b9cccd376/fdbserver/Resolver.actor.cpp#L244-L249
|
||||
|
||||
* Commit proxies apply all metadata mutations (including those from other proxies) in the commit order: https://github.com/apple/foundationdb/blob/6281e647784e74dccb3a6cb88efb9d8b9cccd376/fdbserver/CommitProxyServer.actor.cpp#L740-L770
|
||||
|
||||
* Commit proxies only write metadata mutations in its own transaction batch to TLogs: https://github.com/apple/foundationdb/blob/6281e647784e74dccb3a6cb88efb9d8b9cccd376/fdbserver/CommitProxyServer.actor.cpp#L772-L774 adds mutations to `storeCommits`. Later in `postResolution()`, https://github.com/apple/foundationdb/blob/6281e647784e74dccb3a6cb88efb9d8b9cccd376/fdbserver/CommitProxyServer.actor.cpp#L1162-L1176, only the last one in `storeCommits` are send to TLogs.
|
||||
|
||||
* Commit proxies clear the buffered data in `LogSystemDiskQueueAdapter` after TLog push: https://github.com/apple/foundationdb/blob/6281e647784e74dccb3a6cb88efb9d8b9cccd376/fdbserver/CommitProxyServer.actor.cpp#L1283-L1287
|
@ -588,3 +588,43 @@
|
||||
.. |locality-get-addresses-for-key-blurb| replace::
|
||||
|
||||
Returns a list of public network addresses as strings, one for each of the storage servers responsible for storing ``key`` and its associated value.
|
||||
|
||||
.. |option-knob| replace::
|
||||
|
||||
Sets internal tuning or debugging knobs. The argument to this function should be a string representing the knob name and the value, e.g. "transaction_size_limit=1000".
|
||||
|
||||
.. |option-tls-verify-peers| replace::
|
||||
|
||||
Sets the peer certificate field verification criteria.
|
||||
|
||||
.. |option-tls-ca-bytes| replace::
|
||||
|
||||
Sets the certificate authority bundle.
|
||||
|
||||
.. |option-tls-ca-path| replace::
|
||||
|
||||
Sets the file from which to load the certificate authority bundle.
|
||||
|
||||
.. |option-tls-password| replace::
|
||||
|
||||
Sets the passphrase for encrypted private key. Password should be set before setting the key for the password to be used.
|
||||
|
||||
.. |option-set-disable-local-client| replace::
|
||||
|
||||
Prevents connections through the local client, allowing only connections through externally loaded client libraries.
|
||||
|
||||
.. |option-set-client-threads-per-version| replace::
|
||||
|
||||
Spawns multiple worker threads for each version of the client that is loaded. Setting this to a number greater than one implies disable_local_client.
|
||||
|
||||
.. |option-disable-client-statistics-logging| replace::
|
||||
|
||||
Disables logging of client statistics, such as sampled transaction activity.
|
||||
|
||||
.. |option-enable-run-loop-profiling| replace::
|
||||
|
||||
Enables debugging feature to perform run loop profiling. Requires trace logging to be enabled. WARNING: this feature is not recommended for use in production.
|
||||
|
||||
.. |option-set-distributed-client-tracer| replace::
|
||||
|
||||
Sets a tracer to run on the client. Should be set to the same value as the tracer set on the server.
|
@ -125,6 +125,10 @@ After importing the ``fdb`` module and selecting an API version, you probably wa
|
||||
|
||||
.. note:: |network-options-warning|
|
||||
|
||||
.. method :: fdb.options.set_knob(knob)
|
||||
|
||||
|option-knob|
|
||||
|
||||
.. method :: fdb.options.set_trace_enable( output_directory=None )
|
||||
|
||||
|option-trace-enable-blurb|
|
||||
@ -188,6 +192,48 @@ After importing the ``fdb`` module and selecting an API version, you probably wa
|
||||
.. method :: fdb.options.set_tls_key_bytes(bytes)
|
||||
|
||||
|option-tls-key-bytes|
|
||||
|
||||
.. method :: fdb.options.set_tls_verify_peers(verification_pattern)
|
||||
|
||||
|option-tls-verify-peers|
|
||||
|
||||
.. method :: fdb.options.set_tls_ca_bytes(ca_bundle)
|
||||
|
||||
|option-tls-ca-bytes|
|
||||
|
||||
.. method :: fdb.options.set_tls_ca_path(path)
|
||||
|
||||
|option-tls-ca-path|
|
||||
|
||||
.. method :: fdb.options.set_tls_password(password)
|
||||
|
||||
|option-tls-password|
|
||||
|
||||
.. method :: fdb.options.set_disable_multi_version_client_api()
|
||||
|
||||
|option-disable-multi-version-client-api|
|
||||
|
||||
.. method :: fdb.options.set_disable_local_client()
|
||||
|
||||
|option-set-disable-local-client|
|
||||
|
||||
.. method :: fdb.options.set_client_threads_per_version(number)
|
||||
|
||||
|option-set-client-threads-per-version|
|
||||
|
||||
.. method :: fdb.options.set_disable_client_statistics_logging()
|
||||
|
||||
|option-disable-client-statistics-logging|
|
||||
|
||||
.. method :: fdb.options.set_enable_run_loop_profiling()
|
||||
|
||||
|option-enable-run-loop-profiling|
|
||||
|
||||
.. method :: fdb.options.set_distributed_client_tracer(tracer_type)
|
||||
|
||||
|option-set-distributed-client-tracer|
|
||||
|
||||
Please refer to fdboptions.py (generated) for a comprehensive list of options.
|
||||
|
||||
.. _api-python-keys:
|
||||
|
||||
|
@ -700,7 +700,7 @@
|
||||
"ssd",
|
||||
"ssd-1",
|
||||
"ssd-2",
|
||||
"ssd-redwood-experimental",
|
||||
"ssd-redwood-1-experimental",
|
||||
"ssd-rocksdb-experimental",
|
||||
"memory",
|
||||
"memory-1",
|
||||
@ -713,7 +713,7 @@
|
||||
"ssd",
|
||||
"ssd-1",
|
||||
"ssd-2",
|
||||
"ssd-redwood-experimental",
|
||||
"ssd-redwood-1-experimental",
|
||||
"ssd-rocksdb-experimental",
|
||||
"memory",
|
||||
"memory-1",
|
||||
|
@ -30,6 +30,7 @@ Features
|
||||
* Improved the efficiency with which storage servers replicate data between themselves. `(PR #5017) <https://github.com/apple/foundationdb/pull/5017>`_
|
||||
* Added support to ``exclude command`` to exclude based on locality match. `(PR #5113) <https://github.com/apple/foundationdb/pull/5113>`_
|
||||
* Add the ``trace_partial_file_suffix`` network option. This option will give unfinished trace files a special suffix to indicate they're not complete yet. When the trace file is complete, it is renamed to remove the suffix. `(PR #5328) <https://github.com/apple/foundationdb/pull/5328>`_
|
||||
* Added "get range and flat map" feature with new APIs (see Bindings section). Storage servers are able to generate the keys in the queries based on another query. With this, upper layer can push some computations down to FDB, to improve latency and bandwidth when read. `(PR #5609) <https://github.com/apple/foundationdb/pull/5609>`_
|
||||
|
||||
Performance
|
||||
-----------
|
||||
@ -86,6 +87,8 @@ Bindings
|
||||
* C: Added a function, ``fdb_database_create_snapshot``, to create a snapshot of the database. `(PR #4241) <https://github.com/apple/foundationdb/pull/4241/files>`_
|
||||
* C: Added ``fdb_database_get_main_thread_busyness`` function to report how busy a client's main thread is. `(PR #4504) <https://github.com/apple/foundationdb/pull/4504>`_
|
||||
* Java: Added ``Database.getMainThreadBusyness`` function to report how busy a client's main thread is. `(PR #4564) <https://github.com/apple/foundationdb/pull/4564>`_
|
||||
* C: Added ``fdb_transaction_get_range_and_flat_map`` function to support running queries based on another query in one request. `(PR #5609) <https://github.com/apple/foundationdb/pull/5609>`_
|
||||
* Java: Added ``Transaction.getRangeAndFlatMap`` function to support running queries based on another query in one request. `(PR #5609) <https://github.com/apple/foundationdb/pull/5609>`_
|
||||
|
||||
Other Changes
|
||||
-------------
|
||||
|
@ -85,23 +85,12 @@ Control options
|
||||
In addition to the command line parameter described above, tracing can be set
|
||||
at a database and transaction level.
|
||||
|
||||
Tracing can be globally disabled by setting the
|
||||
``distributed_transaction_trace_disable`` database option. It can be enabled by
|
||||
setting the ``distributed_transaction_trace_enable`` database option. If
|
||||
neither option is specified but a tracer option is set as described above,
|
||||
tracing will be enabled.
|
||||
Tracing can be controlled on a global level by setting the
|
||||
``TRACING_SAMPLE_RATE`` knob. Set the knob to 0.0 to record no traces, to 1.0
|
||||
to record all traces, or somewhere in the middle. Traces are sampled as a unit.
|
||||
All individual spans in the trace will be included in the sample.
|
||||
|
||||
Tracing can be enabled or disabled for individual transactions. The special key
|
||||
space exposes an API to set a custom trace ID for a transaction, or to disable
|
||||
tracing for the transaction. See the special key space :ref:`tracing module
|
||||
documentation <special-key-space-tracing-module>` to learn more.
|
||||
|
||||
^^^^^^^^^^^^^^
|
||||
Trace sampling
|
||||
^^^^^^^^^^^^^^
|
||||
|
||||
By default, all traces are recorded. If tracing is producing too much data,
|
||||
adjust the trace sample rate with the ``TRACING_SAMPLE_RATE`` knob. Set the
|
||||
knob to 0.0 to record no traces, to 1.0 to record all traces, or somewhere in
|
||||
the middle. Traces are sampled as a unit. All individual spans in the trace
|
||||
will be included in the sample.
|
||||
|
@ -13,14 +13,14 @@ Adding tags to transactions
|
||||
|
||||
Tags are added to transaction by using transaction options. Each transaction can include up to five tags, and each tag must not exceed 16 characters. There are two options that can be used to add tags:
|
||||
|
||||
* ``TAG`` - Adds a tag to the transaction. This tag will not be used for auto-throttling and is not included with read requests. Tags set in this way can only be throttled manually.
|
||||
* ``AUTO_THROTTLE_TAG`` - Adds a tag to the transaction that can be both automatically and manually throttled. To support busy tag detection, these tags may be sent as part of read requests.
|
||||
* ``TAG`` - Adds a tag to the transaction. This tag will not be used for auto-throttling and is not included with read or commit requests. Tags set in this way can only be throttled manually.
|
||||
* ``AUTO_THROTTLE_TAG`` - Adds a tag to the transaction that can be both automatically and manually throttled. To support busy tag detection, these tags may be sent as part of read or commit requests.
|
||||
|
||||
See the documentation for your particular language binding for details about setting this option.
|
||||
|
||||
.. note:: If setting hierarchical tags, it is recommended that you not use auto-throttle tags at multiple levels of the hierarchy. Otherwise, the cluster will favor throttling those tags set at higher levels, as they will include more transactions.
|
||||
|
||||
.. note:: Tags must be included as part of all get read version requests, and a sample of read requests will include auto-throttled tags. Additionally, each tag imposes additional costs with those requests. It is therefore recommended that you not use excessive numbers or lengths of transaction tags.
|
||||
.. note:: Tags must be included as part of all get read version requests, and a sample of read and commit requests will include auto-throttled tags. Additionally, each tag imposes additional costs with those requests. It is therefore recommended that you not use excessive numbers or lengths of transaction tags.
|
||||
|
||||
Tag throttling overview
|
||||
=======================
|
||||
@ -48,7 +48,7 @@ When a transaction tag is throttled, this information will be communicated to th
|
||||
Automatic transaction tag throttling
|
||||
====================================
|
||||
|
||||
When using the ``AUTO_THROTTLE_TAG`` transaction option, the cluster will monitor read activity for the chosen tags and may choose to reduce a tag's transaction rate limit if a storage server gets busy enough and has a sufficient portion of its read traffic coming from that one tag.
|
||||
When using the ``AUTO_THROTTLE_TAG`` transaction option, the cluster will monitor activity for the chosen tags and may choose to reduce a tag's transaction rate limit if a storage server gets busy enough and has a sufficient portion of its traffic coming from that one tag.
|
||||
|
||||
When a tag is auto-throttled, the default priority transaction rate will be decreased to reduce the percentage of traffic attributable to that tag to a reasonable amount of total traffic on the affected storage server(s), and batch priority transactions for that tag will be stopped completely.
|
||||
|
||||
|
@ -31,7 +31,7 @@ Because TSS recruitment only pairs *new* storage processes, you must add process
|
||||
Example commands
|
||||
----------------
|
||||
|
||||
Set the desired TSS processes count to 4, using the redwood storage engine: ``configure tss ssd-redwood-experimental count=4``.
|
||||
Set the desired TSS processes count to 4, using the redwood storage engine: ``configure tss ssd-redwood-1-experimental count=4``.
|
||||
|
||||
Change the desired TSS process count to 2: ``configure tss count=2``.
|
||||
|
||||
|
@ -62,6 +62,17 @@ ACTOR Future<Void> changeFeedList(Database db) {
|
||||
|
||||
namespace fdb_cli {
|
||||
|
||||
ACTOR Future<Void> requestVersionUpdate(Database localDb, Reference<ChangeFeedData> feedData) {
|
||||
loop {
|
||||
wait(delay(5.0));
|
||||
Transaction tr(localDb);
|
||||
state Version ver = wait(tr.getReadVersion());
|
||||
printf("Requesting version %d\n", ver);
|
||||
wait(feedData->whenAtLeast(ver));
|
||||
printf("Feed at version %d\n", ver);
|
||||
}
|
||||
}
|
||||
|
||||
ACTOR Future<bool> changeFeedCommandActor(Database localDb, std::vector<StringRef> tokens, Future<Void> warn) {
|
||||
if (tokens.size() == 1) {
|
||||
printUsage(tokens[0]);
|
||||
@ -117,14 +128,16 @@ ACTOR Future<bool> changeFeedCommandActor(Database localDb, std::vector<StringRe
|
||||
if (warn.isValid()) {
|
||||
warn.cancel();
|
||||
}
|
||||
state PromiseStream<Standalone<VectorRef<MutationsAndVersionRef>>> feedResults;
|
||||
state Future<Void> feed = localDb->getChangeFeedStream(feedResults, tokens[2], begin, end);
|
||||
state Reference<ChangeFeedData> feedData = makeReference<ChangeFeedData>();
|
||||
state Future<Void> feed = localDb->getChangeFeedStream(feedData, tokens[2], begin, end);
|
||||
state Future<Void> versionUpdates = requestVersionUpdate(localDb, feedData);
|
||||
printf("\n");
|
||||
try {
|
||||
state Future<Void> feedInterrupt = LineNoise::onKeyboardInterrupt();
|
||||
loop {
|
||||
choose {
|
||||
when(Standalone<VectorRef<MutationsAndVersionRef>> res = waitNext(feedResults.getFuture())) {
|
||||
when(Standalone<VectorRef<MutationsAndVersionRef>> res =
|
||||
waitNext(feedData->mutations.getFuture())) {
|
||||
for (auto& it : res) {
|
||||
for (auto& it2 : it.mutations) {
|
||||
printf("%lld %s\n", it.version, it2.toString().c_str());
|
||||
@ -134,7 +147,7 @@ ACTOR Future<bool> changeFeedCommandActor(Database localDb, std::vector<StringRe
|
||||
when(wait(feedInterrupt)) {
|
||||
feedInterrupt = Future<Void>();
|
||||
feed.cancel();
|
||||
feedResults = PromiseStream<Standalone<VectorRef<MutationsAndVersionRef>>>();
|
||||
feedData = makeReference<ChangeFeedData>();
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -96,7 +96,7 @@ struct ConfigError {
|
||||
class ProfilerConfigT {
|
||||
private: // private types
|
||||
using Lock = std::unique_lock<std::mutex>;
|
||||
friend class crossbow::create_static<ProfilerConfigT>;
|
||||
friend struct crossbow::create_static<ProfilerConfigT>;
|
||||
|
||||
private: // members
|
||||
std::shared_ptr<SampleIngestor> ingestor = std::make_shared<NoneIngestor>();
|
||||
|
@ -30,6 +30,7 @@ set(FDBCLIENT_SRCS
|
||||
ClientKnobs.cpp
|
||||
ClientKnobs.h
|
||||
ClientLogEvents.h
|
||||
ClientVersion.h
|
||||
ClientWorkerInterface.h
|
||||
ClusterConnectionFile.actor.cpp
|
||||
ClusterConnectionFile.h
|
||||
@ -73,6 +74,8 @@ set(FDBCLIENT_SRCS
|
||||
Knobs.h
|
||||
IKnobCollection.cpp
|
||||
IKnobCollection.h
|
||||
LocalClientAPI.cpp
|
||||
LocalClientAPI.h
|
||||
ManagementAPI.actor.cpp
|
||||
ManagementAPI.actor.h
|
||||
MonitorLeader.actor.cpp
|
||||
|
@ -100,6 +100,7 @@ void ClientKnobs::initialize(Randomize randomize) {
|
||||
init( RANGESTREAM_FRAGMENT_SIZE, 1e6 );
|
||||
init( RANGESTREAM_BUFFERED_FRAGMENTS_LIMIT, 20 );
|
||||
init( QUARANTINE_TSS_ON_MISMATCH, true ); if( randomize && BUGGIFY ) QUARANTINE_TSS_ON_MISMATCH = false; // if true, a tss mismatch will put the offending tss in quarantine. If false, it will just be killed
|
||||
init( CHANGE_FEED_EMPTY_BATCH_TIME, 0.005 );
|
||||
|
||||
//KeyRangeMap
|
||||
init( KRM_GET_RANGE_LIMIT, 1e5 ); if( randomize && BUGGIFY ) KRM_GET_RANGE_LIMIT = 10;
|
||||
|
@ -100,6 +100,7 @@ public:
|
||||
int64_t RANGESTREAM_FRAGMENT_SIZE;
|
||||
int RANGESTREAM_BUFFERED_FRAGMENTS_LIMIT;
|
||||
bool QUARANTINE_TSS_ON_MISMATCH;
|
||||
double CHANGE_FEED_EMPTY_BATCH_TIME;
|
||||
|
||||
// KeyRangeMap
|
||||
int KRM_GET_RANGE_LIMIT;
|
||||
|
@ -23,6 +23,7 @@
|
||||
#include "fdbclient/NativeAPI.actor.h"
|
||||
#include "fdbclient/ManagementAPI.actor.h"
|
||||
#include "fdbclient/ClientKnobs.h"
|
||||
#include "fdbclient/SystemData.h"
|
||||
#include "fdbclient/versions.h"
|
||||
#include "fdbrpc/IAsyncFile.h"
|
||||
#include "flow/Platform.h"
|
||||
@ -46,7 +47,7 @@ struct ClientLibBinaryInfo {
|
||||
#define ASSERT_INDEX_IN_RANGE(idx, arr) ASSERT(idx >= 0 && idx < sizeof(arr) / sizeof(arr[0]))
|
||||
|
||||
const std::string& getStatusName(ClientLibStatus status) {
|
||||
static const std::string statusNames[] = { "disabled", "available", "uploading" };
|
||||
static const std::string statusNames[] = { "disabled", "uploading", "download", "active" };
|
||||
int idx = static_cast<int>(status);
|
||||
ASSERT_INDEX_IN_RANGE(idx, statusNames);
|
||||
return statusNames[idx];
|
||||
@ -123,7 +124,23 @@ ClientLibChecksumAlg getChecksumAlgByName(std::string_view checksumAlgName) {
|
||||
namespace {
|
||||
|
||||
bool isValidTargetStatus(ClientLibStatus status) {
|
||||
return status == ClientLibStatus::AVAILABLE || status == ClientLibStatus::DISABLED;
|
||||
return status == ClientLibStatus::DISABLED || status == ClientLibStatus::DOWNLOAD ||
|
||||
status == ClientLibStatus::ACTIVE;
|
||||
}
|
||||
|
||||
bool isAvailableForDownload(ClientLibStatus status) {
|
||||
return status == ClientLibStatus::DOWNLOAD || status == ClientLibStatus::ACTIVE;
|
||||
}
|
||||
|
||||
void updateClientLibChangeCounter(Transaction& tr, ClientLibStatus prevStatus, ClientLibStatus newStatus) {
|
||||
static const int64_t counterIncVal = 1;
|
||||
if ((prevStatus != newStatus) &&
|
||||
(newStatus == ClientLibStatus::DOWNLOAD || newStatus == ClientLibStatus::ACTIVE ||
|
||||
prevStatus == ClientLibStatus::DOWNLOAD || prevStatus == ClientLibStatus::ACTIVE)) {
|
||||
tr.atomicOp(clientLibChangeCounterKey,
|
||||
StringRef(reinterpret_cast<const uint8_t*>(&counterIncVal), sizeof(counterIncVal)),
|
||||
MutationRef::AddValue);
|
||||
}
|
||||
}
|
||||
|
||||
json_spirit::mObject parseMetadataJson(StringRef metadataString) {
|
||||
@ -198,7 +215,7 @@ KeyRef chunkKeyFromNo(StringRef clientLibBinPrefix, size_t chunkNo, Arena& arena
|
||||
return clientLibBinPrefix.withSuffix(format("%06zu", chunkNo), arena);
|
||||
}
|
||||
|
||||
ClientLibPlatform getCurrentClientPlatform() {
|
||||
[[maybe_unused]] ClientLibPlatform getCurrentClientPlatform() {
|
||||
#ifdef __x86_64__
|
||||
#if defined(_WIN32)
|
||||
return ClientLibPlatform::X86_64_WINDOWS;
|
||||
@ -432,6 +449,7 @@ ACTOR Future<Void> uploadClientLibrary(Database db,
|
||||
tr.setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS);
|
||||
tr.setOption(FDBTransactionOptions::LOCK_AWARE);
|
||||
tr.set(clientLibMetaKey, ValueRef(jsStr));
|
||||
updateClientLibChangeCounter(tr, ClientLibStatus::DISABLED, targetStatus);
|
||||
wait(tr.commit());
|
||||
break;
|
||||
} catch (Error& e) {
|
||||
@ -488,8 +506,8 @@ ACTOR Future<Void> downloadClientLibrary(Database db,
|
||||
}
|
||||
}
|
||||
|
||||
// Allow downloading only libraries in the available state
|
||||
if (getStatusByName(getMetadataStrAttr(metadataJson, CLIENTLIB_ATTR_STATUS)) != ClientLibStatus::AVAILABLE) {
|
||||
// Prevent downloading not yet uploaded and disabled libraries
|
||||
if (!isAvailableForDownload(getStatusByName(getMetadataStrAttr(metadataJson, CLIENTLIB_ATTR_STATUS)))) {
|
||||
throw client_lib_not_available();
|
||||
}
|
||||
|
||||
@ -620,8 +638,11 @@ ACTOR Future<Void> deleteClientLibrary(Database db, Standalone<StringRef> client
|
||||
TraceEvent(SevWarnAlways, "ClientLibraryNotFound").detail("Key", clientLibMetaKey);
|
||||
throw client_lib_not_found();
|
||||
}
|
||||
json_spirit::mObject metadataJson = parseMetadataJson(metadataOpt.get());
|
||||
ClientLibStatus status = getStatusByName(getMetadataStrAttr(metadataJson, CLIENTLIB_ATTR_STATUS));
|
||||
tr.clear(prefixRange(chunkKeyPrefix));
|
||||
tr.clear(clientLibMetaKey);
|
||||
updateClientLibChangeCounter(tr, status, ClientLibStatus::DISABLED);
|
||||
wait(tr.commit());
|
||||
break;
|
||||
} catch (Error& e) {
|
||||
@ -641,8 +662,8 @@ void applyClientLibFilter(const ClientLibFilter& filter,
|
||||
for (const auto& [k, v] : scanResults) {
|
||||
try {
|
||||
json_spirit::mObject metadataJson = parseMetadataJson(v);
|
||||
if (filter.matchAvailableOnly && getStatusByName(getMetadataStrAttr(metadataJson, CLIENTLIB_ATTR_STATUS)) !=
|
||||
ClientLibStatus::AVAILABLE) {
|
||||
if (filter.matchAvailableOnly &&
|
||||
!isAvailableForDownload(getStatusByName(getMetadataStrAttr(metadataJson, CLIENTLIB_ATTR_STATUS)))) {
|
||||
continue;
|
||||
}
|
||||
if (filter.matchCompatibleAPI &&
|
||||
@ -707,4 +728,74 @@ ACTOR Future<Standalone<VectorRef<StringRef>>> listClientLibraries(Database db,
|
||||
return result;
|
||||
}
|
||||
|
||||
} // namespace ClientLibManagement
|
||||
ACTOR Future<ClientLibStatus> getClientLibraryStatus(Database db, Standalone<StringRef> clientLibId) {
|
||||
state Key clientLibMetaKey = metadataKeyFromId(clientLibId);
|
||||
state Transaction tr(db);
|
||||
loop {
|
||||
try {
|
||||
tr.setOption(FDBTransactionOptions::READ_SYSTEM_KEYS);
|
||||
tr.setOption(FDBTransactionOptions::READ_LOCK_AWARE);
|
||||
Optional<Value> metadataOpt = wait(tr.get(clientLibMetaKey));
|
||||
if (!metadataOpt.present()) {
|
||||
TraceEvent(SevWarnAlways, "ClientLibraryNotFound").detail("Key", clientLibMetaKey);
|
||||
throw client_lib_not_found();
|
||||
}
|
||||
json_spirit::mObject metadataJson = parseMetadataJson(metadataOpt.get());
|
||||
return getStatusByName(getMetadataStrAttr(metadataJson, CLIENTLIB_ATTR_STATUS));
|
||||
} catch (Error& e) {
|
||||
wait(tr.onError(e));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
ACTOR Future<Void> changeClientLibraryStatus(Database db,
|
||||
Standalone<StringRef> clientLibId,
|
||||
ClientLibStatus newStatus) {
|
||||
state Key clientLibMetaKey = metadataKeyFromId(clientLibId);
|
||||
state json_spirit::mObject metadataJson;
|
||||
state std::string jsStr;
|
||||
state Transaction tr;
|
||||
|
||||
if (!isValidTargetStatus(newStatus)) {
|
||||
TraceEvent(SevWarnAlways, "ClientLibraryInvalidMetadata")
|
||||
.detail("Reason", "InvalidTargetStatus")
|
||||
.detail("Status", getStatusName(newStatus));
|
||||
throw client_lib_invalid_metadata();
|
||||
}
|
||||
|
||||
loop {
|
||||
tr = Transaction(db);
|
||||
try {
|
||||
tr.setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS);
|
||||
tr.setOption(FDBTransactionOptions::LOCK_AWARE);
|
||||
Optional<Value> metadataOpt = wait(tr.get(clientLibMetaKey));
|
||||
if (!metadataOpt.present()) {
|
||||
TraceEvent(SevWarnAlways, "ClientLibraryNotFound").detail("Key", clientLibMetaKey);
|
||||
throw client_lib_not_found();
|
||||
}
|
||||
metadataJson = parseMetadataJson(metadataOpt.get());
|
||||
ClientLibStatus prevStatus = getStatusByName(getMetadataStrAttr(metadataJson, CLIENTLIB_ATTR_STATUS));
|
||||
if (prevStatus == newStatus) {
|
||||
return Void();
|
||||
}
|
||||
metadataJson[CLIENTLIB_ATTR_STATUS] = getStatusName(newStatus);
|
||||
jsStr = json_spirit::write_string(json_spirit::mValue(metadataJson));
|
||||
tr.set(clientLibMetaKey, ValueRef(jsStr));
|
||||
|
||||
updateClientLibChangeCounter(tr, prevStatus, newStatus);
|
||||
|
||||
wait(tr.commit());
|
||||
break;
|
||||
} catch (Error& e) {
|
||||
if (e.code() == error_code_client_lib_not_found) {
|
||||
throw;
|
||||
}
|
||||
wait(tr.onError(e));
|
||||
}
|
||||
}
|
||||
|
||||
TraceEvent("ClientLibraryStatusChanged").detail("Key", clientLibMetaKey).detail("Status", getStatusName(newStatus));
|
||||
return Void();
|
||||
}
|
||||
|
||||
} // namespace ClientLibManagement
|
||||
|
@ -35,8 +35,9 @@ namespace ClientLibManagement {
|
||||
|
||||
enum class ClientLibStatus {
|
||||
DISABLED = 0,
|
||||
AVAILABLE, // 1
|
||||
UPLOADING, // 2
|
||||
UPLOADING, // 1
|
||||
DOWNLOAD, // 2
|
||||
ACTIVE, // 3
|
||||
COUNT // must be the last one
|
||||
};
|
||||
|
||||
@ -133,6 +134,12 @@ ACTOR Future<Void> deleteClientLibrary(Database db, Standalone<StringRef> client
|
||||
// Returns metadata JSON of each library
|
||||
ACTOR Future<Standalone<VectorRef<StringRef>>> listClientLibraries(Database db, ClientLibFilter filter);
|
||||
|
||||
// Get the current status of an uploaded client library
|
||||
ACTOR Future<ClientLibStatus> getClientLibraryStatus(Database db, Standalone<StringRef> clientLibId);
|
||||
|
||||
// Change client library metadata status
|
||||
ACTOR Future<Void> changeClientLibraryStatus(Database db, Standalone<StringRef> clientLibId, ClientLibStatus newStatus);
|
||||
|
||||
} // namespace ClientLibManagement
|
||||
|
||||
#include "flow/unactorcompiler.h"
|
||||
|
77
fdbclient/ClientVersion.h
Normal file
77
fdbclient/ClientVersion.h
Normal file
@ -0,0 +1,77 @@
|
||||
/*
|
||||
* ClientVersion.h
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2021 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef FDBCLIENT_CLIENT_VERSION_H
|
||||
#define FDBCLIENT_CLIENT_VERSION_H
|
||||
#pragma once
|
||||
|
||||
#include "flow/Arena.h"
|
||||
|
||||
struct ClientVersionRef {
|
||||
StringRef clientVersion;
|
||||
StringRef sourceVersion;
|
||||
StringRef protocolVersion;
|
||||
|
||||
ClientVersionRef() { initUnknown(); }
|
||||
|
||||
ClientVersionRef(Arena& arena, ClientVersionRef const& cv)
|
||||
: clientVersion(arena, cv.clientVersion), sourceVersion(arena, cv.sourceVersion),
|
||||
protocolVersion(arena, cv.protocolVersion) {}
|
||||
ClientVersionRef(StringRef clientVersion, StringRef sourceVersion, StringRef protocolVersion)
|
||||
: clientVersion(clientVersion), sourceVersion(sourceVersion), protocolVersion(protocolVersion) {}
|
||||
ClientVersionRef(StringRef versionString) {
|
||||
std::vector<StringRef> parts = versionString.splitAny(LiteralStringRef(","));
|
||||
if (parts.size() != 3) {
|
||||
initUnknown();
|
||||
return;
|
||||
}
|
||||
clientVersion = parts[0];
|
||||
sourceVersion = parts[1];
|
||||
protocolVersion = parts[2];
|
||||
}
|
||||
|
||||
void initUnknown() {
|
||||
clientVersion = LiteralStringRef("Unknown");
|
||||
sourceVersion = LiteralStringRef("Unknown");
|
||||
protocolVersion = LiteralStringRef("Unknown");
|
||||
}
|
||||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
serializer(ar, clientVersion, sourceVersion, protocolVersion);
|
||||
}
|
||||
|
||||
size_t expectedSize() const { return clientVersion.size() + sourceVersion.size() + protocolVersion.size(); }
|
||||
|
||||
bool operator<(const ClientVersionRef& rhs) const {
|
||||
if (protocolVersion != rhs.protocolVersion) {
|
||||
return protocolVersion < rhs.protocolVersion;
|
||||
}
|
||||
|
||||
// These comparisons are arbitrary because they aren't ordered
|
||||
if (clientVersion != rhs.clientVersion) {
|
||||
return clientVersion < rhs.clientVersion;
|
||||
}
|
||||
|
||||
return sourceVersion < rhs.sourceVersion;
|
||||
}
|
||||
};
|
||||
|
||||
#endif
|
@ -27,6 +27,7 @@
|
||||
#include "fdbclient/Status.h"
|
||||
#include "fdbclient/CommitProxyInterface.h"
|
||||
#include "fdbclient/ClientWorkerInterface.h"
|
||||
#include "fdbclient/ClientVersion.h"
|
||||
|
||||
struct ClusterInterface {
|
||||
constexpr static FileIdentifier file_identifier = 15888863;
|
||||
@ -38,6 +39,7 @@ struct ClusterInterface {
|
||||
RequestStream<struct ForceRecoveryRequest> forceRecovery;
|
||||
RequestStream<struct MoveShardRequest> moveShard;
|
||||
RequestStream<struct RepairSystemDataRequest> repairSystemData;
|
||||
RequestStream<struct SplitShardRequest> splitShard;
|
||||
|
||||
bool operator==(ClusterInterface const& r) const { return id() == r.id(); }
|
||||
bool operator!=(ClusterInterface const& r) const { return id() != r.id(); }
|
||||
@ -48,7 +50,8 @@ struct ClusterInterface {
|
||||
return openDatabase.getFuture().isReady() || failureMonitoring.getFuture().isReady() ||
|
||||
databaseStatus.getFuture().isReady() || ping.getFuture().isReady() ||
|
||||
getClientWorkers.getFuture().isReady() || forceRecovery.getFuture().isReady() ||
|
||||
moveShard.getFuture().isReady() || repairSystemData.getFuture().isReady();
|
||||
moveShard.getFuture().isReady() || repairSystemData.getFuture().isReady() ||
|
||||
splitShard.getFuture().isReady();
|
||||
}
|
||||
|
||||
void initEndpoints() {
|
||||
@ -60,6 +63,7 @@ struct ClusterInterface {
|
||||
forceRecovery.getEndpoint(TaskPriority::ClusterController);
|
||||
moveShard.getEndpoint(TaskPriority::ClusterController);
|
||||
repairSystemData.getEndpoint(TaskPriority::ClusterController);
|
||||
splitShard.getEndpoint(TaskPriority::ClusterController);
|
||||
}
|
||||
|
||||
template <class Ar>
|
||||
@ -72,7 +76,8 @@ struct ClusterInterface {
|
||||
getClientWorkers,
|
||||
forceRecovery,
|
||||
moveShard,
|
||||
repairSystemData);
|
||||
repairSystemData,
|
||||
splitShard);
|
||||
}
|
||||
};
|
||||
|
||||
@ -93,56 +98,6 @@ struct ClusterControllerClientInterface {
|
||||
}
|
||||
};
|
||||
|
||||
struct ClientVersionRef {
|
||||
StringRef clientVersion;
|
||||
StringRef sourceVersion;
|
||||
StringRef protocolVersion;
|
||||
|
||||
ClientVersionRef() { initUnknown(); }
|
||||
|
||||
ClientVersionRef(Arena& arena, ClientVersionRef const& cv)
|
||||
: clientVersion(arena, cv.clientVersion), sourceVersion(arena, cv.sourceVersion),
|
||||
protocolVersion(arena, cv.protocolVersion) {}
|
||||
ClientVersionRef(StringRef clientVersion, StringRef sourceVersion, StringRef protocolVersion)
|
||||
: clientVersion(clientVersion), sourceVersion(sourceVersion), protocolVersion(protocolVersion) {}
|
||||
ClientVersionRef(StringRef versionString) {
|
||||
std::vector<StringRef> parts = versionString.splitAny(LiteralStringRef(","));
|
||||
if (parts.size() != 3) {
|
||||
initUnknown();
|
||||
return;
|
||||
}
|
||||
clientVersion = parts[0];
|
||||
sourceVersion = parts[1];
|
||||
protocolVersion = parts[2];
|
||||
}
|
||||
|
||||
void initUnknown() {
|
||||
clientVersion = LiteralStringRef("Unknown");
|
||||
sourceVersion = LiteralStringRef("Unknown");
|
||||
protocolVersion = LiteralStringRef("Unknown");
|
||||
}
|
||||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
serializer(ar, clientVersion, sourceVersion, protocolVersion);
|
||||
}
|
||||
|
||||
size_t expectedSize() const { return clientVersion.size() + sourceVersion.size() + protocolVersion.size(); }
|
||||
|
||||
bool operator<(const ClientVersionRef& rhs) const {
|
||||
if (protocolVersion != rhs.protocolVersion) {
|
||||
return protocolVersion < rhs.protocolVersion;
|
||||
}
|
||||
|
||||
// These comparisons are arbitrary because they aren't ordered
|
||||
if (clientVersion != rhs.clientVersion) {
|
||||
return clientVersion < rhs.clientVersion;
|
||||
}
|
||||
|
||||
return sourceVersion < rhs.sourceVersion;
|
||||
}
|
||||
};
|
||||
|
||||
template <class T>
|
||||
struct ItemWithExamples {
|
||||
T item;
|
||||
@ -337,4 +292,35 @@ struct RepairSystemDataRequest {
|
||||
serializer(ar, reply);
|
||||
}
|
||||
};
|
||||
|
||||
// Returns the actual shards generated by the SplitShardRequest.
|
||||
struct SplitShardReply {
|
||||
constexpr static FileIdentifier file_identifier = 1384440;
|
||||
std::vector<KeyRange> shards;
|
||||
|
||||
SplitShardReply() {}
|
||||
explicit SplitShardReply(std::vector<KeyRange> shards) : shards{ std::move(shards) } {}
|
||||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
serializer(ar, shards);
|
||||
}
|
||||
};
|
||||
|
||||
// Split keyrange [shard.begin, shard.end) into num shards.
|
||||
// Split points are chosen as the arithmeticlly equal division points of the given range.
|
||||
struct SplitShardRequest {
|
||||
constexpr static FileIdentifier file_identifier = 1384443;
|
||||
KeyRange shard;
|
||||
int num;
|
||||
ReplyPromise<SplitShardReply> reply;
|
||||
|
||||
SplitShardRequest() : num(0) {}
|
||||
SplitShardRequest(KeyRange shard, int num) : shard{ std::move(shard) }, num(num) {}
|
||||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
serializer(ar, shard, num, reply);
|
||||
}
|
||||
};
|
||||
#endif
|
||||
|
@ -115,6 +115,9 @@ struct ClientDBInfo {
|
||||
firstCommitProxy; // not serialized, used for commitOnFirstProxy when the commit proxies vector has been shrunk
|
||||
Optional<Value> forward;
|
||||
std::vector<VersionHistory> history;
|
||||
// a counter increased every time a change of uploaded client libraries
|
||||
// happens, the clients need to be aware of
|
||||
uint64_t clientLibChangeCounter = 0;
|
||||
|
||||
ClientDBInfo() {}
|
||||
|
||||
@ -126,7 +129,7 @@ struct ClientDBInfo {
|
||||
if constexpr (!is_fb_function<Archive>) {
|
||||
ASSERT(ar.protocolVersion().isValid());
|
||||
}
|
||||
serializer(ar, grvProxies, commitProxies, id, forward, history);
|
||||
serializer(ar, grvProxies, commitProxies, id, forward, history, clientLibChangeCounter);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -188,11 +188,11 @@ struct ConfigTransactionInterface {
|
||||
|
||||
public:
|
||||
static constexpr FileIdentifier file_identifier = 982485;
|
||||
struct RequestStream<ConfigTransactionGetGenerationRequest> getGeneration;
|
||||
struct RequestStream<ConfigTransactionGetRequest> get;
|
||||
struct RequestStream<ConfigTransactionGetConfigClassesRequest> getClasses;
|
||||
struct RequestStream<ConfigTransactionGetKnobsRequest> getKnobs;
|
||||
struct RequestStream<ConfigTransactionCommitRequest> commit;
|
||||
class RequestStream<ConfigTransactionGetGenerationRequest> getGeneration;
|
||||
class RequestStream<ConfigTransactionGetRequest> get;
|
||||
class RequestStream<ConfigTransactionGetConfigClassesRequest> getClasses;
|
||||
class RequestStream<ConfigTransactionGetKnobsRequest> getKnobs;
|
||||
class RequestStream<ConfigTransactionCommitRequest> commit;
|
||||
|
||||
ConfigTransactionInterface();
|
||||
void setupWellKnownEndpoints();
|
||||
|
@ -288,7 +288,7 @@ StatusObject DatabaseConfiguration::toJSON(bool noPolicies) const {
|
||||
result["storage_engine"] = "ssd-2";
|
||||
} else if (tLogDataStoreType == KeyValueStoreType::SSD_BTREE_V2 &&
|
||||
storageServerStoreType == KeyValueStoreType::SSD_REDWOOD_V1) {
|
||||
result["storage_engine"] = "ssd-redwood-experimental";
|
||||
result["storage_engine"] = "ssd-redwood-1-experimental";
|
||||
} else if (tLogDataStoreType == KeyValueStoreType::SSD_BTREE_V2 &&
|
||||
storageServerStoreType == KeyValueStoreType::SSD_ROCKSDB_V1) {
|
||||
result["storage_engine"] = "ssd-rocksdb-experimental";
|
||||
@ -311,7 +311,7 @@ StatusObject DatabaseConfiguration::toJSON(bool noPolicies) const {
|
||||
} else if (testingStorageServerStoreType == KeyValueStoreType::SSD_BTREE_V2) {
|
||||
result["tss_storage_engine"] = "ssd-2";
|
||||
} else if (testingStorageServerStoreType == KeyValueStoreType::SSD_REDWOOD_V1) {
|
||||
result["tss_storage_engine"] = "ssd-redwood-experimental";
|
||||
result["tss_storage_engine"] = "ssd-redwood-1-experimental";
|
||||
} else if (testingStorageServerStoreType == KeyValueStoreType::SSD_ROCKSDB_V1) {
|
||||
result["tss_storage_engine"] = "ssd-rocksdb-experimental";
|
||||
} else if (testingStorageServerStoreType == KeyValueStoreType::MEMORY_RADIXTREE) {
|
||||
@ -578,9 +578,6 @@ bool DatabaseConfiguration::setInternal(KeyRef key, ValueRef value) {
|
||||
return true; // All of the above options currently require recovery to take effect
|
||||
}
|
||||
|
||||
static KeyValueRef* lower_bound(VectorRef<KeyValueRef>& config, KeyRef const& key) {
|
||||
return std::lower_bound(config.begin(), config.end(), KeyValueRef(key, ValueRef()), KeyValueRef::OrderByKey());
|
||||
}
|
||||
static KeyValueRef const* lower_bound(VectorRef<KeyValueRef> const& config, KeyRef const& key) {
|
||||
return std::lower_bound(config.begin(), config.end(), KeyValueRef(key, ValueRef()), KeyValueRef::OrderByKey());
|
||||
}
|
||||
|
@ -20,6 +20,7 @@
|
||||
|
||||
#ifndef DatabaseContext_h
|
||||
#define DatabaseContext_h
|
||||
#include "fdbclient/Notified.h"
|
||||
#include "flow/FastAlloc.h"
|
||||
#include "flow/FastRef.h"
|
||||
#include "fdbclient/StorageServerInterface.h"
|
||||
@ -146,6 +147,25 @@ public:
|
||||
WatchMetadata(Key key, Optional<Value> value, Version version, TransactionInfo info, TagSet tags);
|
||||
};
|
||||
|
||||
struct ChangeFeedStorageData : ReferenceCounted<ChangeFeedStorageData> {
|
||||
UID id;
|
||||
Future<Void> updater;
|
||||
NotifiedVersion version;
|
||||
NotifiedVersion desired;
|
||||
};
|
||||
|
||||
struct ChangeFeedData : ReferenceCounted<ChangeFeedData> {
|
||||
PromiseStream<Standalone<VectorRef<MutationsAndVersionRef>>> mutations;
|
||||
|
||||
Version getVersion();
|
||||
Future<Void> whenAtLeast(Version version);
|
||||
|
||||
NotifiedVersion lastReturnedVersion;
|
||||
std::vector<Reference<ChangeFeedStorageData>> storageData;
|
||||
AsyncVar<int> notAtLatest;
|
||||
Promise<Void> refresh;
|
||||
};
|
||||
|
||||
class DatabaseContext : public ReferenceCounted<DatabaseContext>, public FastAllocated<DatabaseContext>, NonCopyable {
|
||||
public:
|
||||
static DatabaseContext* allocateOnForeignThread() {
|
||||
@ -197,6 +217,7 @@ public:
|
||||
Future<Reference<CommitProxyInfo>> getCommitProxiesFuture(bool useProvisionalProxies);
|
||||
Reference<GrvProxyInfo> getGrvProxies(bool useProvisionalProxies);
|
||||
Future<Void> onProxiesChanged() const;
|
||||
Future<Void> onClientLibStatusChanged() const;
|
||||
Future<HealthMetrics> getHealthMetrics(bool detailed);
|
||||
|
||||
// Returns the protocol version reported by the coordinator this client is connected to
|
||||
@ -252,7 +273,7 @@ public:
|
||||
// Management API, create snapshot
|
||||
Future<Void> createSnapshot(StringRef uid, StringRef snapshot_command);
|
||||
|
||||
Future<Void> getChangeFeedStream(const PromiseStream<Standalone<VectorRef<MutationsAndVersionRef>>>& results,
|
||||
Future<Void> getChangeFeedStream(Reference<ChangeFeedData> results,
|
||||
Key rangeID,
|
||||
Version begin = 0,
|
||||
Version end = std::numeric_limits<Version>::max(),
|
||||
@ -287,7 +308,8 @@ public:
|
||||
// Key DB-specific information
|
||||
Reference<AsyncVar<Reference<IClusterConnectionRecord>>> connectionRecord;
|
||||
AsyncTrigger proxiesChangeTrigger;
|
||||
Future<Void> monitorProxiesInfoChange;
|
||||
AsyncTrigger clientLibChangeTrigger;
|
||||
Future<Void> clientDBInfoMonitor;
|
||||
Future<Void> monitorTssInfoChange;
|
||||
Future<Void> tssMismatchHandler;
|
||||
PromiseStream<std::pair<UID, std::vector<DetailedTSSMismatch>>> tssMismatchStream;
|
||||
@ -345,6 +367,9 @@ public:
|
||||
std::unordered_map<UID, Reference<TSSMetrics>> tssMetrics;
|
||||
// map from changeFeedId -> changeFeedRange
|
||||
std::unordered_map<Key, KeyRange> changeFeedCache;
|
||||
std::unordered_map<UID, Reference<ChangeFeedStorageData>> changeFeedUpdaters;
|
||||
|
||||
Reference<ChangeFeedStorageData> getStorageData(StorageServerInterface interf);
|
||||
|
||||
UID dbId;
|
||||
IsInternal internal; // Only contexts created through the C client and fdbcli are non-internal
|
||||
@ -369,6 +394,7 @@ public:
|
||||
Counter transactionGetKeyRequests;
|
||||
Counter transactionGetValueRequests;
|
||||
Counter transactionGetRangeRequests;
|
||||
Counter transactionGetRangeAndFlatMapRequests;
|
||||
Counter transactionGetRangeStreamRequests;
|
||||
Counter transactionWatchRequests;
|
||||
Counter transactionGetAddressesForKeyRequests;
|
||||
@ -404,7 +430,7 @@ public:
|
||||
|
||||
int snapshotRywEnabled;
|
||||
|
||||
int transactionTracingEnabled;
|
||||
bool transactionTracingSample;
|
||||
double verifyCausalReadsProp = 0.0;
|
||||
|
||||
Future<Void> logger;
|
||||
|
@ -65,3 +65,11 @@ std::string KeySelectorRef::toString() const {
|
||||
return format("%d+lastLessThan(%s)", offset, printable(key).c_str());
|
||||
}
|
||||
}
|
||||
|
||||
std::string describe(const std::string& s) {
|
||||
return s;
|
||||
}
|
||||
|
||||
std::string describe(UID const& item) {
|
||||
return item.shortString();
|
||||
}
|
||||
|
@ -188,18 +188,14 @@ inline std::string describe(const int item) {
|
||||
}
|
||||
|
||||
// Allows describeList to work on a vector of std::string
|
||||
static std::string describe(const std::string& s) {
|
||||
return s;
|
||||
}
|
||||
std::string describe(const std::string& s);
|
||||
|
||||
template <class T>
|
||||
std::string describe(Reference<T> const& item) {
|
||||
return item->toString();
|
||||
}
|
||||
|
||||
static std::string describe(UID const& item) {
|
||||
return item.shortString();
|
||||
}
|
||||
std::string describe(UID const& item);
|
||||
|
||||
template <class T>
|
||||
std::string describe(T const& item) {
|
||||
@ -710,7 +706,7 @@ struct KeyValueStoreType {
|
||||
case SSD_BTREE_V2:
|
||||
return "ssd-2";
|
||||
case SSD_REDWOOD_V1:
|
||||
return "ssd-redwood-experimental";
|
||||
return "ssd-redwood-1-experimental";
|
||||
case SSD_ROCKSDB_V1:
|
||||
return "ssd-rocksdb-experimental";
|
||||
case MEMORY:
|
||||
|
@ -193,10 +193,10 @@ public:
|
||||
struct RestoreFile {
|
||||
Version version;
|
||||
std::string fileName;
|
||||
bool isRange; // false for log file
|
||||
int64_t blockSize;
|
||||
int64_t fileSize;
|
||||
Version endVersion; // not meaningful for range files
|
||||
bool isRange{ false }; // false for log file
|
||||
int64_t blockSize{ 0 };
|
||||
int64_t fileSize{ 0 };
|
||||
Version endVersion{ ::invalidVersion }; // not meaningful for range files
|
||||
|
||||
Tuple pack() const {
|
||||
return Tuple()
|
||||
|
@ -59,6 +59,12 @@ public:
|
||||
GetRangeLimits limits,
|
||||
bool snapshot = false,
|
||||
bool reverse = false) = 0;
|
||||
virtual ThreadFuture<RangeResult> getRangeAndFlatMap(const KeySelectorRef& begin,
|
||||
const KeySelectorRef& end,
|
||||
const StringRef& mapper,
|
||||
GetRangeLimits limits,
|
||||
bool snapshot = false,
|
||||
bool reverse = false) = 0;
|
||||
virtual ThreadFuture<Standalone<VectorRef<const char*>>> getAddressesForKey(const KeyRef& key) = 0;
|
||||
virtual ThreadFuture<Standalone<StringRef>> getVersionstamp() = 0;
|
||||
|
||||
|
@ -73,20 +73,25 @@ KnobValue IKnobCollection::parseKnobValue(std::string const& knobName, std::stri
|
||||
UNSTOPPABLE_ASSERT(false);
|
||||
}
|
||||
|
||||
std::unique_ptr<IKnobCollection> IKnobCollection::globalKnobCollection =
|
||||
IKnobCollection::create(IKnobCollection::Type::CLIENT, Randomize::False, IsSimulated::False);
|
||||
std::unique_ptr<IKnobCollection>& IKnobCollection::globalKnobCollection() {
|
||||
static std::unique_ptr<IKnobCollection> res;
|
||||
if (!res) {
|
||||
res = IKnobCollection::create(IKnobCollection::Type::CLIENT, Randomize::False, IsSimulated::False);
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
void IKnobCollection::setGlobalKnobCollection(Type type, Randomize randomize, IsSimulated isSimulated) {
|
||||
globalKnobCollection = create(type, randomize, isSimulated);
|
||||
FLOW_KNOBS = &globalKnobCollection->getFlowKnobs();
|
||||
globalKnobCollection() = create(type, randomize, isSimulated);
|
||||
FLOW_KNOBS = &globalKnobCollection()->getFlowKnobs();
|
||||
}
|
||||
|
||||
IKnobCollection const& IKnobCollection::getGlobalKnobCollection() {
|
||||
return *globalKnobCollection;
|
||||
return *globalKnobCollection();
|
||||
}
|
||||
|
||||
IKnobCollection& IKnobCollection::getMutableGlobalKnobCollection() {
|
||||
return *globalKnobCollection;
|
||||
return *globalKnobCollection();
|
||||
}
|
||||
|
||||
ConfigMutationRef IKnobCollection::createSetMutation(Arena arena, KeyRef key, ValueRef value) {
|
||||
|
@ -36,7 +36,7 @@
|
||||
* - TestKnobs
|
||||
*/
|
||||
class IKnobCollection {
|
||||
static std::unique_ptr<IKnobCollection> globalKnobCollection;
|
||||
static std::unique_ptr<IKnobCollection>& globalKnobCollection();
|
||||
|
||||
public:
|
||||
virtual ~IKnobCollection() = default;
|
||||
|
@ -63,6 +63,12 @@ public:
|
||||
GetRangeLimits limits,
|
||||
Snapshot = Snapshot::False,
|
||||
Reverse = Reverse::False) = 0;
|
||||
virtual Future<RangeResult> getRangeAndFlatMap(KeySelector begin,
|
||||
KeySelector end,
|
||||
Key mapper,
|
||||
GetRangeLimits limits,
|
||||
Snapshot = Snapshot::False,
|
||||
Reverse = Reverse::False) = 0;
|
||||
virtual Future<Standalone<VectorRef<const char*>>> getAddressesForKey(Key const& key) = 0;
|
||||
virtual Future<Standalone<VectorRef<KeyRef>>> getRangeSplitPoints(KeyRange const& range, int64_t chunkSize) = 0;
|
||||
virtual Future<int64_t> getEstimatedRangeSizeBytes(KeyRange const& keys) = 0;
|
||||
|
@ -26,7 +26,9 @@
|
||||
#include "fdbclient/IClientApi.h"
|
||||
#include "fdbclient/ReadYourWrites.h"
|
||||
#include "fdbclient/Subspace.h"
|
||||
#include "flow/ObjectSerializer.h"
|
||||
#include "flow/genericactors.actor.h"
|
||||
#include "flow/serialize.h"
|
||||
|
||||
// Codec is a utility struct to convert a type to and from a Tuple. It is used by the template
|
||||
// classes below like KeyBackedProperty and KeyBackedMap to convert key parts and values
|
||||
@ -168,14 +170,8 @@ public:
|
||||
Future<T> getOrThrow(Reference<ReadYourWritesTransaction> tr,
|
||||
Snapshot snapshot = Snapshot::False,
|
||||
Error err = key_not_found()) const {
|
||||
auto keyCopy = key;
|
||||
auto backtrace = platform::get_backtrace();
|
||||
return map(get(tr, snapshot), [=](Optional<T> val) -> T {
|
||||
if (!val.present()) {
|
||||
TraceEvent(SevInfo, "KeyBackedProperty_KeyNotFound")
|
||||
.detail("Key", keyCopy)
|
||||
.detail("Err", err.code())
|
||||
.detail("ParentTrace", backtrace.c_str());
|
||||
throw err;
|
||||
}
|
||||
|
||||
@ -184,45 +180,39 @@ public:
|
||||
}
|
||||
|
||||
Future<Optional<T>> get(Database cx, Snapshot snapshot = Snapshot::False) const {
|
||||
auto& copy = *this;
|
||||
return runRYWTransaction(cx, [=](Reference<ReadYourWritesTransaction> tr) {
|
||||
return runRYWTransaction(cx, [=, self = *this](Reference<ReadYourWritesTransaction> tr) {
|
||||
tr->setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS);
|
||||
tr->setOption(FDBTransactionOptions::LOCK_AWARE);
|
||||
|
||||
return copy.get(tr, snapshot);
|
||||
return self.get(tr, snapshot);
|
||||
});
|
||||
}
|
||||
|
||||
Future<T> getD(Database cx, Snapshot snapshot = Snapshot::False, T defaultValue = T()) const {
|
||||
auto& copy = *this;
|
||||
return runRYWTransaction(cx, [=](Reference<ReadYourWritesTransaction> tr) {
|
||||
return runRYWTransaction(cx, [=, self = *this](Reference<ReadYourWritesTransaction> tr) {
|
||||
tr->setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS);
|
||||
tr->setOption(FDBTransactionOptions::LOCK_AWARE);
|
||||
|
||||
return copy.getD(tr, snapshot, defaultValue);
|
||||
return self.getD(tr, snapshot, defaultValue);
|
||||
});
|
||||
}
|
||||
|
||||
Future<T> getOrThrow(Database cx, Snapshot snapshot = Snapshot::False, Error err = key_not_found()) const {
|
||||
auto& copy = *this;
|
||||
return runRYWTransaction(cx, [=](Reference<ReadYourWritesTransaction> tr) {
|
||||
return runRYWTransaction(cx, [=, self = *this](Reference<ReadYourWritesTransaction> tr) {
|
||||
tr->setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS);
|
||||
tr->setOption(FDBTransactionOptions::LOCK_AWARE);
|
||||
|
||||
return copy.getOrThrow(tr, snapshot, err);
|
||||
return self.getOrThrow(tr, snapshot, err);
|
||||
});
|
||||
}
|
||||
|
||||
void set(Reference<ReadYourWritesTransaction> tr, T const& val) { return tr->set(key, Codec<T>::pack(val).pack()); }
|
||||
|
||||
Future<Void> set(Database cx, T const& val) {
|
||||
auto _key = key;
|
||||
Value _val = Codec<T>::pack(val).pack();
|
||||
return runRYWTransaction(cx, [_key, _val](Reference<ReadYourWritesTransaction> tr) {
|
||||
return runRYWTransaction(cx, [=, self = *this](Reference<ReadYourWritesTransaction> tr) {
|
||||
tr->setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS);
|
||||
tr->setOption(FDBTransactionOptions::LOCK_AWARE);
|
||||
tr->set(_key, _val);
|
||||
|
||||
self->set(tr, val);
|
||||
return Future<Void>(Void());
|
||||
});
|
||||
}
|
||||
@ -262,12 +252,12 @@ public:
|
||||
Key key;
|
||||
};
|
||||
|
||||
// Convenient read/write access to a sorted map of KeyType to ValueType that has key as its prefix
|
||||
// Convenient read/write access to a sorted map of KeyType to ValueType under prefix
|
||||
// Even though 'this' is not actually mutated, methods that change db keys are not const.
|
||||
template <typename _KeyType, typename _ValueType>
|
||||
class KeyBackedMap {
|
||||
public:
|
||||
KeyBackedMap(KeyRef key) : space(key) {}
|
||||
KeyBackedMap(KeyRef prefix) : space(prefix) {}
|
||||
|
||||
typedef _KeyType KeyType;
|
||||
typedef _ValueType ValueType;
|
||||
@ -336,6 +326,164 @@ public:
|
||||
Subspace space;
|
||||
};
|
||||
|
||||
// Convenient read/write access to a single value of type T stored at key
|
||||
// Even though 'this' is not actually mutated, methods that change the db key are not const.
|
||||
template <typename T, typename VersionOptions>
|
||||
class KeyBackedObjectProperty {
|
||||
public:
|
||||
KeyBackedObjectProperty(KeyRef key, VersionOptions versionOptions) : key(key), versionOptions(versionOptions) {}
|
||||
Future<Optional<T>> get(Reference<ReadYourWritesTransaction> tr, Snapshot snapshot = Snapshot::False) const {
|
||||
|
||||
return map(tr->get(key, snapshot), [vo = versionOptions](Optional<Value> const& val) -> Optional<T> {
|
||||
if (val.present())
|
||||
return ObjectReader::fromStringRef<T>(val.get(), vo);
|
||||
return {};
|
||||
});
|
||||
}
|
||||
|
||||
// Get property's value or defaultValue if it doesn't exist
|
||||
Future<T> getD(Reference<ReadYourWritesTransaction> tr,
|
||||
Snapshot snapshot = Snapshot::False,
|
||||
T defaultValue = T()) const {
|
||||
return map(get(tr, snapshot), [=](Optional<T> val) -> T { return val.present() ? val.get() : defaultValue; });
|
||||
}
|
||||
// Get property's value or throw error if it doesn't exist
|
||||
Future<T> getOrThrow(Reference<ReadYourWritesTransaction> tr,
|
||||
Snapshot snapshot = Snapshot::False,
|
||||
Error err = key_not_found()) const {
|
||||
return map(get(tr, snapshot), [=](Optional<T> val) -> T {
|
||||
if (!val.present()) {
|
||||
throw err;
|
||||
}
|
||||
|
||||
return val.get();
|
||||
});
|
||||
}
|
||||
|
||||
Future<Optional<T>> get(Database cx, Snapshot snapshot = Snapshot::False) const {
|
||||
return runRYWTransaction(cx, [=, self = *this](Reference<ReadYourWritesTransaction> tr) {
|
||||
tr->setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS);
|
||||
tr->setOption(FDBTransactionOptions::LOCK_AWARE);
|
||||
|
||||
return self.get(tr, snapshot);
|
||||
});
|
||||
}
|
||||
|
||||
Future<T> getD(Database cx, Snapshot snapshot = Snapshot::False, T defaultValue = T()) const {
|
||||
return runRYWTransaction(cx, [=, self = *this](Reference<ReadYourWritesTransaction> tr) {
|
||||
tr->setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS);
|
||||
tr->setOption(FDBTransactionOptions::LOCK_AWARE);
|
||||
|
||||
return self.getD(tr, snapshot, defaultValue);
|
||||
});
|
||||
}
|
||||
|
||||
Future<T> getOrThrow(Database cx, Snapshot snapshot = Snapshot::False, Error err = key_not_found()) const {
|
||||
return runRYWTransaction(cx, [=, self = *this](Reference<ReadYourWritesTransaction> tr) {
|
||||
tr->setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS);
|
||||
tr->setOption(FDBTransactionOptions::LOCK_AWARE);
|
||||
|
||||
return self.getOrThrow(tr, snapshot, err);
|
||||
});
|
||||
}
|
||||
|
||||
void set(Reference<ReadYourWritesTransaction> tr, T const& val) {
|
||||
return tr->set(key, ObjectWriter::toValue(val, versionOptions));
|
||||
}
|
||||
|
||||
Future<Void> set(Database cx, T const& val) {
|
||||
return runRYWTransaction(cx, [=, self = *this](Reference<ReadYourWritesTransaction> tr) {
|
||||
tr->setOption(FDBTransactionOptions::ACCESS_SYSTEM_KEYS);
|
||||
tr->setOption(FDBTransactionOptions::LOCK_AWARE);
|
||||
self.set(tr, val);
|
||||
return Future<Void>(Void());
|
||||
});
|
||||
}
|
||||
|
||||
void clear(Reference<ReadYourWritesTransaction> tr) { return tr->clear(key); }
|
||||
|
||||
Key key;
|
||||
VersionOptions versionOptions;
|
||||
};
|
||||
|
||||
// Convenient read/write access to a sorted map of KeyType to ValueType under key prefix
|
||||
// ValueType is encoded / decoded with ObjectWriter/ObjectReader
|
||||
// Even though 'this' is not actually mutated, methods that change db keys are not const.
|
||||
template <typename _KeyType, typename _ValueType, typename VersionOptions>
|
||||
class KeyBackedObjectMap {
|
||||
public:
|
||||
KeyBackedObjectMap(KeyRef prefix, VersionOptions versionOptions) : space(prefix), versionOptions(versionOptions) {}
|
||||
|
||||
typedef _KeyType KeyType;
|
||||
typedef _ValueType ValueType;
|
||||
typedef std::pair<KeyType, ValueType> PairType;
|
||||
typedef std::vector<PairType> PairsType;
|
||||
|
||||
// If end is not present one key past the end of the map is used.
|
||||
Future<PairsType> getRange(Reference<ReadYourWritesTransaction> tr,
|
||||
KeyType const& begin,
|
||||
Optional<KeyType> const& end,
|
||||
int limit,
|
||||
Snapshot snapshot = Snapshot::False,
|
||||
Reverse reverse = Reverse::False) const {
|
||||
Key endKey = end.present() ? space.pack(Codec<KeyType>::pack(end.get())) : space.range().end;
|
||||
return map(
|
||||
tr->getRange(
|
||||
KeyRangeRef(space.pack(Codec<KeyType>::pack(begin)), endKey), GetRangeLimits(limit), snapshot, reverse),
|
||||
[self = *this](RangeResult const& kvs) -> PairsType {
|
||||
PairsType results;
|
||||
for (int i = 0; i < kvs.size(); ++i) {
|
||||
KeyType key = Codec<KeyType>::unpack(self.space.unpack(kvs[i].key));
|
||||
ValueType val = ObjectReader::fromStringRef<ValueType>(kvs[i].value, self.versionOptions);
|
||||
results.push_back(PairType(key, val));
|
||||
}
|
||||
return results;
|
||||
});
|
||||
}
|
||||
|
||||
Future<Optional<ValueType>> get(Reference<ReadYourWritesTransaction> tr,
|
||||
KeyType const& key,
|
||||
Snapshot snapshot = Snapshot::False) const {
|
||||
return map(tr->get(space.pack(Codec<KeyType>::pack(key)), snapshot),
|
||||
[vo = versionOptions](Optional<Value> const& val) -> Optional<ValueType> {
|
||||
if (val.present())
|
||||
return ObjectReader::fromStringRef<ValueType>(val.get(), vo);
|
||||
return {};
|
||||
});
|
||||
}
|
||||
|
||||
// Returns a Property that can be get/set that represents key's entry in this this.
|
||||
KeyBackedObjectProperty<ValueType, VersionOptions> getProperty(KeyType const& key) const {
|
||||
return KeyBackedObjectProperty<ValueType, VersionOptions>(space.pack(Codec<KeyType>::pack(key)),
|
||||
versionOptions);
|
||||
}
|
||||
|
||||
// Returns the expectedSize of the set key
|
||||
int set(Reference<ReadYourWritesTransaction> tr, KeyType const& key, ValueType const& val) {
|
||||
Key k = space.pack(Codec<KeyType>::pack(key));
|
||||
Value v = ObjectWriter::toValue(val, versionOptions);
|
||||
tr->set(k, v);
|
||||
return k.expectedSize() + v.expectedSize();
|
||||
}
|
||||
|
||||
void erase(Reference<ReadYourWritesTransaction> tr, KeyType const& key) {
|
||||
return tr->clear(space.pack(Codec<KeyType>::pack(key)));
|
||||
}
|
||||
|
||||
void erase(Reference<ITransaction> tr, KeyType const& key) {
|
||||
return tr->clear(space.pack(Codec<KeyType>::pack(key)));
|
||||
}
|
||||
|
||||
void erase(Reference<ReadYourWritesTransaction> tr, KeyType const& begin, KeyType const& end) {
|
||||
return tr->clear(KeyRangeRef(space.pack(Codec<KeyType>::pack(begin)), space.pack(Codec<KeyType>::pack(end))));
|
||||
}
|
||||
|
||||
void clear(Reference<ReadYourWritesTransaction> tr) { return tr->clear(space.range()); }
|
||||
|
||||
Subspace space;
|
||||
VersionOptions versionOptions;
|
||||
};
|
||||
|
||||
template <typename _ValueType>
|
||||
class KeyBackedSet {
|
||||
public:
|
||||
|
27
fdbclient/LocalClientAPI.cpp
Normal file
27
fdbclient/LocalClientAPI.cpp
Normal file
@ -0,0 +1,27 @@
|
||||
/*
|
||||
* LocalClientAPI.cpp
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2021 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "fdbclient/LocalClientAPI.h"
|
||||
#include "fdbclient/ThreadSafeTransaction.h"
|
||||
|
||||
IClientApi* getLocalClientAPI() {
|
||||
static IClientApi* api = new ThreadSafeApi();
|
||||
return api;
|
||||
}
|
28
fdbclient/LocalClientAPI.h
Normal file
28
fdbclient/LocalClientAPI.h
Normal file
@ -0,0 +1,28 @@
|
||||
/*
|
||||
* LocalClientAPI.h
|
||||
*
|
||||
* This source file is part of the FoundationDB open source project
|
||||
*
|
||||
* Copyright 2013-2021 Apple Inc. and the FoundationDB project authors
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#ifndef FDBCLIENT_LOCALCLIENTAPI_H
|
||||
#define FDBCLIENT_LOCALCLIENTAPI_H
|
||||
#pragma once
|
||||
|
||||
#include "fdbclient/IClientApi.h"
|
||||
|
||||
IClientApi* getLocalClientAPI();
|
||||
#endif
|
@ -184,7 +184,7 @@ std::map<std::string, std::string> configForToken(std::string const& mode) {
|
||||
} else if (mode == "ssd" || mode == "ssd-2") {
|
||||
logType = KeyValueStoreType::SSD_BTREE_V2;
|
||||
storeType = KeyValueStoreType::SSD_BTREE_V2;
|
||||
} else if (mode == "ssd-redwood-experimental") {
|
||||
} else if (mode == "ssd-redwood-1-experimental") {
|
||||
logType = KeyValueStoreType::SSD_BTREE_V2;
|
||||
storeType = KeyValueStoreType::SSD_REDWOOD_V1;
|
||||
} else if (mode == "ssd-rocksdb-experimental") {
|
||||
|
@ -48,6 +48,14 @@ std::string trim(std::string const& connectionString) {
|
||||
return trimmed;
|
||||
}
|
||||
|
||||
std::string trimFromHostname(std::string const& networkAddress) {
|
||||
const auto& pos = networkAddress.find("(fromHostname)");
|
||||
if (pos != std::string::npos) {
|
||||
return networkAddress.substr(0, pos);
|
||||
}
|
||||
return networkAddress;
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
FDB_DEFINE_BOOLEAN_PARAM(ConnectionStringNeedsPersisted);
|
||||
@ -269,9 +277,10 @@ std::string ClusterConnectionString::toString() const {
|
||||
std::string s = key.toString();
|
||||
s += '@';
|
||||
for (int i = 0; i < coord.size(); i++) {
|
||||
if (i)
|
||||
if (i) {
|
||||
s += ',';
|
||||
s += coord[i].toString();
|
||||
}
|
||||
s += trimFromHostname(coord[i].toString());
|
||||
}
|
||||
return s;
|
||||
}
|
||||
|
@ -18,10 +18,10 @@
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "fdbclient/CoordinationInterface.h"
|
||||
#include "fdbclient/MultiVersionTransaction.h"
|
||||
#include "fdbclient/MultiVersionAssignmentVars.h"
|
||||
#include "fdbclient/ThreadSafeTransaction.h"
|
||||
#include "fdbclient/ClientVersion.h"
|
||||
#include "fdbclient/LocalClientAPI.h"
|
||||
|
||||
#include "flow/network.h"
|
||||
#include "flow/Platform.h"
|
||||
@ -30,6 +30,10 @@
|
||||
|
||||
#include "flow/actorcompiler.h" // This must be the last #include.
|
||||
|
||||
#ifdef FDBCLIENT_NATIVEAPI_ACTOR_H
|
||||
#error "MVC should not depend on the Native API"
|
||||
#endif
|
||||
|
||||
void throwIfError(FdbCApi::fdb_error_t e) {
|
||||
if (e) {
|
||||
throw Error(e);
|
||||
@ -141,6 +145,41 @@ ThreadFuture<RangeResult> DLTransaction::getRange(const KeyRangeRef& keys,
|
||||
return getRange(firstGreaterOrEqual(keys.begin), firstGreaterOrEqual(keys.end), limits, snapshot, reverse);
|
||||
}
|
||||
|
||||
ThreadFuture<RangeResult> DLTransaction::getRangeAndFlatMap(const KeySelectorRef& begin,
|
||||
const KeySelectorRef& end,
|
||||
const StringRef& mapper,
|
||||
GetRangeLimits limits,
|
||||
bool snapshot,
|
||||
bool reverse) {
|
||||
FdbCApi::FDBFuture* f = api->transactionGetRangeAndFlatMap(tr,
|
||||
begin.getKey().begin(),
|
||||
begin.getKey().size(),
|
||||
begin.orEqual,
|
||||
begin.offset,
|
||||
end.getKey().begin(),
|
||||
end.getKey().size(),
|
||||
end.orEqual,
|
||||
end.offset,
|
||||
mapper.begin(),
|
||||
mapper.size(),
|
||||
limits.rows,
|
||||
limits.bytes,
|
||||
FDB_STREAMING_MODE_EXACT,
|
||||
0,
|
||||
snapshot,
|
||||
reverse);
|
||||
return toThreadFuture<RangeResult>(api, f, [](FdbCApi::FDBFuture* f, FdbCApi* api) {
|
||||
const FdbCApi::FDBKeyValue* kvs;
|
||||
int count;
|
||||
FdbCApi::fdb_bool_t more;
|
||||
FdbCApi::fdb_error_t error = api->futureGetKeyValueArray(f, &kvs, &count, &more);
|
||||
ASSERT(!error);
|
||||
|
||||
// The memory for this is stored in the FDBFuture and is released when the future gets destroyed
|
||||
return RangeResult(RangeResultRef(VectorRef<KeyValueRef>((KeyValueRef*)kvs, count), more), Arena());
|
||||
});
|
||||
}
|
||||
|
||||
ThreadFuture<Standalone<VectorRef<const char*>>> DLTransaction::getAddressesForKey(const KeyRef& key) {
|
||||
FdbCApi::FDBFuture* f = api->transactionGetAddressesForKey(tr, key.begin(), key.size());
|
||||
|
||||
@ -452,6 +491,7 @@ void DLApi::init() {
|
||||
loadClientFunction(&api->transactionGetKey, lib, fdbCPath, "fdb_transaction_get_key");
|
||||
loadClientFunction(&api->transactionGetAddressesForKey, lib, fdbCPath, "fdb_transaction_get_addresses_for_key");
|
||||
loadClientFunction(&api->transactionGetRange, lib, fdbCPath, "fdb_transaction_get_range");
|
||||
loadClientFunction(&api->transactionGetRangeAndFlatMap, lib, fdbCPath, "fdb_transaction_get_range_and_flat_map");
|
||||
loadClientFunction(
|
||||
&api->transactionGetVersionstamp, lib, fdbCPath, "fdb_transaction_get_versionstamp", headerVersion >= 410);
|
||||
loadClientFunction(&api->transactionSet, lib, fdbCPath, "fdb_transaction_set");
|
||||
@ -731,6 +771,18 @@ ThreadFuture<RangeResult> MultiVersionTransaction::getRange(const KeyRangeRef& k
|
||||
return abortableFuture(f, tr.onChange);
|
||||
}
|
||||
|
||||
ThreadFuture<RangeResult> MultiVersionTransaction::getRangeAndFlatMap(const KeySelectorRef& begin,
|
||||
const KeySelectorRef& end,
|
||||
const StringRef& mapper,
|
||||
GetRangeLimits limits,
|
||||
bool snapshot,
|
||||
bool reverse) {
|
||||
auto tr = getTransaction();
|
||||
auto f = tr.transaction ? tr.transaction->getRangeAndFlatMap(begin, end, mapper, limits, snapshot, reverse)
|
||||
: makeTimeout<RangeResult>();
|
||||
return abortableFuture(f, tr.onChange);
|
||||
}
|
||||
|
||||
ThreadFuture<Standalone<StringRef>> MultiVersionTransaction::getVersionstamp() {
|
||||
auto tr = getTransaction();
|
||||
auto f = tr.transaction ? tr.transaction->getVersionstamp() : makeTimeout<Standalone<StringRef>>();
|
||||
@ -888,6 +940,30 @@ ACTOR Future<Void> timeoutImpl(Reference<ThreadSingleAssignmentVar<Void>> tsav,
|
||||
return Void();
|
||||
}
|
||||
|
||||
namespace {
|
||||
|
||||
void validateOptionValuePresent(Optional<StringRef> value) {
|
||||
if (!value.present()) {
|
||||
throw invalid_option_value();
|
||||
}
|
||||
}
|
||||
|
||||
int64_t extractIntOption(Optional<StringRef> value, int64_t minValue, int64_t maxValue) {
|
||||
validateOptionValuePresent(value);
|
||||
if (value.get().size() != 8) {
|
||||
throw invalid_option_value();
|
||||
}
|
||||
|
||||
int64_t passed = *((int64_t*)(value.get().begin()));
|
||||
if (passed > maxValue || passed < minValue) {
|
||||
throw invalid_option_value();
|
||||
}
|
||||
|
||||
return passed;
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
// Configure a timeout based on the options set for this transaction. This timeout only applies
|
||||
// if we don't have an underlying database object to connect with.
|
||||
void MultiVersionTransaction::setTimeout(Optional<StringRef> value) {
|
||||
@ -1467,7 +1543,8 @@ Reference<ClientInfo> MultiVersionApi::getLocalClient() {
|
||||
|
||||
void MultiVersionApi::selectApiVersion(int apiVersion) {
|
||||
if (!localClient) {
|
||||
localClient = makeReference<ClientInfo>(ThreadSafeApi::api);
|
||||
localClient = makeReference<ClientInfo>(getLocalClientAPI());
|
||||
ASSERT(localClient);
|
||||
}
|
||||
|
||||
if (this->apiVersion != 0 && this->apiVersion != apiVersion) {
|
||||
@ -1482,6 +1559,8 @@ const char* MultiVersionApi::getClientVersion() {
|
||||
return localClient->api->getClientVersion();
|
||||
}
|
||||
|
||||
namespace {
|
||||
|
||||
void validateOption(Optional<StringRef> value, bool canBePresent, bool canBeAbsent, bool canBeEmpty = true) {
|
||||
ASSERT(canBePresent || canBeAbsent);
|
||||
|
||||
@ -1493,6 +1572,8 @@ void validateOption(Optional<StringRef> value, bool canBePresent, bool canBeAbse
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
void MultiVersionApi::disableMultiVersionClientApi() {
|
||||
MutexHolder holder(lock);
|
||||
if (networkStartSetup || localClientDisabled) {
|
||||
@ -1703,16 +1784,15 @@ void MultiVersionApi::setNetworkOptionInternal(FDBNetworkOptions::Option option,
|
||||
} else if (option == FDBNetworkOptions::CLIENT_THREADS_PER_VERSION) {
|
||||
MutexHolder holder(lock);
|
||||
validateOption(value, true, false, false);
|
||||
ASSERT(!networkStartSetup);
|
||||
if (networkStartSetup) {
|
||||
throw invalid_option();
|
||||
}
|
||||
#if defined(__unixish__)
|
||||
threadCount = extractIntOption(value, 1, 1024);
|
||||
#else
|
||||
// multiple client threads are not supported on windows.
|
||||
threadCount = extractIntOption(value, 1, 1);
|
||||
#endif
|
||||
if (threadCount > 1) {
|
||||
disableLocalClient();
|
||||
}
|
||||
} else {
|
||||
MutexHolder holder(lock);
|
||||
localClient->api->setNetworkOption(option, value);
|
||||
@ -1740,6 +1820,10 @@ void MultiVersionApi::setupNetwork() {
|
||||
throw network_already_setup();
|
||||
}
|
||||
|
||||
if (threadCount > 1) {
|
||||
disableLocalClient();
|
||||
}
|
||||
|
||||
for (auto i : externalClientDescriptions) {
|
||||
std::string path = i.second.libPath;
|
||||
std::string filename = basename(path);
|
||||
|
@ -118,6 +118,23 @@ struct FdbCApi : public ThreadSafeReferenceCounted<FdbCApi> {
|
||||
int iteration,
|
||||
fdb_bool_t snapshot,
|
||||
fdb_bool_t reverse);
|
||||
FDBFuture* (*transactionGetRangeAndFlatMap)(FDBTransaction* tr,
|
||||
uint8_t const* beginKeyName,
|
||||
int beginKeyNameLength,
|
||||
fdb_bool_t beginOrEqual,
|
||||
int beginOffset,
|
||||
uint8_t const* endKeyName,
|
||||
int endKeyNameLength,
|
||||
fdb_bool_t endOrEqual,
|
||||
int endOffset,
|
||||
uint8_t const* mapper_name,
|
||||
int mapper_name_length,
|
||||
int limit,
|
||||
int targetBytes,
|
||||
FDBStreamingMode mode,
|
||||
int iteration,
|
||||
fdb_bool_t snapshot,
|
||||
fdb_bool_t reverse);
|
||||
FDBFuture* (*transactionGetVersionstamp)(FDBTransaction* tr);
|
||||
|
||||
void (*transactionSet)(FDBTransaction* tr,
|
||||
@ -219,6 +236,12 @@ public:
|
||||
GetRangeLimits limits,
|
||||
bool snapshot = false,
|
||||
bool reverse = false) override;
|
||||
ThreadFuture<RangeResult> getRangeAndFlatMap(const KeySelectorRef& begin,
|
||||
const KeySelectorRef& end,
|
||||
const StringRef& mapper,
|
||||
GetRangeLimits limits,
|
||||
bool snapshot,
|
||||
bool reverse) override;
|
||||
ThreadFuture<Standalone<VectorRef<const char*>>> getAddressesForKey(const KeyRef& key) override;
|
||||
ThreadFuture<Standalone<StringRef>> getVersionstamp() override;
|
||||
ThreadFuture<int64_t> getEstimatedRangeSizeBytes(const KeyRangeRef& keys) override;
|
||||
@ -360,6 +383,12 @@ public:
|
||||
GetRangeLimits limits,
|
||||
bool snapshot = false,
|
||||
bool reverse = false) override;
|
||||
ThreadFuture<RangeResult> getRangeAndFlatMap(const KeySelectorRef& begin,
|
||||
const KeySelectorRef& end,
|
||||
const StringRef& mapper,
|
||||
GetRangeLimits limits,
|
||||
bool snapshot,
|
||||
bool reverse) override;
|
||||
ThreadFuture<Standalone<VectorRef<const char*>>> getAddressesForKey(const KeyRef& key) override;
|
||||
ThreadFuture<Standalone<StringRef>> getVersionstamp() override;
|
||||
|
||||
|
@ -65,6 +65,7 @@
|
||||
#include "flow/ActorCollection.h"
|
||||
#include "flow/DeterministicRandom.h"
|
||||
#include "flow/Error.h"
|
||||
#include "flow/FastRef.h"
|
||||
#include "flow/IRandom.h"
|
||||
#include "flow/flow.h"
|
||||
#include "flow/genericactors.actor.h"
|
||||
@ -160,6 +161,8 @@ void DatabaseContext::addTssMapping(StorageServerInterface const& ssi, StorageSe
|
||||
TSSEndpointData(tssi.id(), tssi.getKey.getEndpoint(), metrics));
|
||||
queueModel.updateTssEndpoint(ssi.getKeyValues.getEndpoint().token.first(),
|
||||
TSSEndpointData(tssi.id(), tssi.getKeyValues.getEndpoint(), metrics));
|
||||
queueModel.updateTssEndpoint(ssi.getKeyValuesAndFlatMap.getEndpoint().token.first(),
|
||||
TSSEndpointData(tssi.id(), tssi.getKeyValuesAndFlatMap.getEndpoint(), metrics));
|
||||
queueModel.updateTssEndpoint(ssi.getKeyValuesStream.getEndpoint().token.first(),
|
||||
TSSEndpointData(tssi.id(), tssi.getKeyValuesStream.getEndpoint(), metrics));
|
||||
|
||||
@ -183,6 +186,7 @@ void DatabaseContext::removeTssMapping(StorageServerInterface const& ssi) {
|
||||
queueModel.removeTssEndpoint(ssi.getValue.getEndpoint().token.first());
|
||||
queueModel.removeTssEndpoint(ssi.getKey.getEndpoint().token.first());
|
||||
queueModel.removeTssEndpoint(ssi.getKeyValues.getEndpoint().token.first());
|
||||
queueModel.removeTssEndpoint(ssi.getKeyValuesAndFlatMap.getEndpoint().token.first());
|
||||
queueModel.removeTssEndpoint(ssi.getKeyValuesStream.getEndpoint().token.first());
|
||||
|
||||
queueModel.removeTssEndpoint(ssi.watchValue.getEndpoint().token.first());
|
||||
@ -717,14 +721,17 @@ Future<Void> attemptGRVFromOldProxies(std::vector<GrvProxyInterface> oldProxies,
|
||||
return waitForAll(replies);
|
||||
}
|
||||
|
||||
ACTOR static Future<Void> monitorProxiesChange(DatabaseContext* cx,
|
||||
Reference<AsyncVar<ClientDBInfo> const> clientDBInfo,
|
||||
AsyncTrigger* triggerVar) {
|
||||
ACTOR static Future<Void> monitorClientDBInfoChange(DatabaseContext* cx,
|
||||
Reference<AsyncVar<ClientDBInfo> const> clientDBInfo,
|
||||
AsyncTrigger* proxyChangeTrigger,
|
||||
AsyncTrigger* clientLibChangeTrigger) {
|
||||
state std::vector<CommitProxyInterface> curCommitProxies;
|
||||
state std::vector<GrvProxyInterface> curGrvProxies;
|
||||
state ActorCollection actors(false);
|
||||
state uint64_t curClientLibChangeCounter;
|
||||
curCommitProxies = clientDBInfo->get().commitProxies;
|
||||
curGrvProxies = clientDBInfo->get().grvProxies;
|
||||
curClientLibChangeCounter = clientDBInfo->get().clientLibChangeCounter;
|
||||
|
||||
loop {
|
||||
choose {
|
||||
@ -745,7 +752,10 @@ ACTOR static Future<Void> monitorProxiesChange(DatabaseContext* cx,
|
||||
}
|
||||
curCommitProxies = clientDBInfo->get().commitProxies;
|
||||
curGrvProxies = clientDBInfo->get().grvProxies;
|
||||
triggerVar->trigger();
|
||||
proxyChangeTrigger->trigger();
|
||||
}
|
||||
if (curClientLibChangeCounter != clientDBInfo->get().clientLibChangeCounter) {
|
||||
clientLibChangeTrigger->trigger();
|
||||
}
|
||||
}
|
||||
when(wait(actors.getResult())) { UNSTOPPABLE_ASSERT(false); }
|
||||
@ -1196,6 +1206,7 @@ DatabaseContext::DatabaseContext(Reference<AsyncVar<Reference<IClusterConnection
|
||||
transactionPhysicalReadsCompleted("PhysicalReadRequestsCompleted", cc),
|
||||
transactionGetKeyRequests("GetKeyRequests", cc), transactionGetValueRequests("GetValueRequests", cc),
|
||||
transactionGetRangeRequests("GetRangeRequests", cc),
|
||||
transactionGetRangeAndFlatMapRequests("GetRangeAndFlatMapRequests", cc),
|
||||
transactionGetRangeStreamRequests("GetRangeStreamRequests", cc), transactionWatchRequests("WatchRequests", cc),
|
||||
transactionGetAddressesForKeyRequests("GetAddressesForKeyRequests", cc), transactionBytesRead("BytesRead", cc),
|
||||
transactionKeysRead("KeysRead", cc), transactionMetadataVersionReads("MetadataVersionReads", cc),
|
||||
@ -1212,7 +1223,7 @@ DatabaseContext::DatabaseContext(Reference<AsyncVar<Reference<IClusterConnection
|
||||
transactionsExpensiveClearCostEstCount("ExpensiveClearCostEstCount", cc),
|
||||
transactionGrvFullBatches("NumGrvFullBatches", cc), transactionGrvTimedOutBatches("NumGrvTimedOutBatches", cc),
|
||||
latencies(1000), readLatencies(1000), commitLatencies(1000), GRVLatencies(1000), mutationsPerCommit(1000),
|
||||
bytesPerCommit(1000), outstandingWatches(0), transactionTracingEnabled(true), taskID(taskID),
|
||||
bytesPerCommit(1000), outstandingWatches(0), transactionTracingSample(false), taskID(taskID),
|
||||
clientInfo(clientInfo), clientInfoMonitor(clientInfoMonitor), coordinator(coordinator), apiVersion(apiVersion),
|
||||
mvCacheInsertLocation(0), healthMetricsLastUpdated(0), detailedHealthMetricsLastUpdated(0),
|
||||
smoothMidShardSize(CLIENT_KNOBS->SHARD_STAT_SMOOTH_AMOUNT),
|
||||
@ -1234,7 +1245,7 @@ DatabaseContext::DatabaseContext(Reference<AsyncVar<Reference<IClusterConnection
|
||||
getValueSubmitted.init(LiteralStringRef("NativeAPI.GetValueSubmitted"));
|
||||
getValueCompleted.init(LiteralStringRef("NativeAPI.GetValueCompleted"));
|
||||
|
||||
monitorProxiesInfoChange = monitorProxiesChange(this, clientInfo, &proxiesChangeTrigger);
|
||||
clientDBInfoMonitor = monitorClientDBInfoChange(this, clientInfo, &proxiesChangeTrigger, &clientLibChangeTrigger);
|
||||
tssMismatchHandler = handleTssMismatches(this);
|
||||
clientStatusUpdater.actor = clientStatusUpdateActor(this);
|
||||
cacheListMonitor = monitorCacheList(this);
|
||||
@ -1451,6 +1462,7 @@ DatabaseContext::DatabaseContext(const Error& err)
|
||||
transactionPhysicalReadsCompleted("PhysicalReadRequestsCompleted", cc),
|
||||
transactionGetKeyRequests("GetKeyRequests", cc), transactionGetValueRequests("GetValueRequests", cc),
|
||||
transactionGetRangeRequests("GetRangeRequests", cc),
|
||||
transactionGetRangeAndFlatMapRequests("GetRangeAndFlatMapRequests", cc),
|
||||
transactionGetRangeStreamRequests("GetRangeStreamRequests", cc), transactionWatchRequests("WatchRequests", cc),
|
||||
transactionGetAddressesForKeyRequests("GetAddressesForKeyRequests", cc), transactionBytesRead("BytesRead", cc),
|
||||
transactionKeysRead("KeysRead", cc), transactionMetadataVersionReads("MetadataVersionReads", cc),
|
||||
@ -1467,7 +1479,7 @@ DatabaseContext::DatabaseContext(const Error& err)
|
||||
transactionsExpensiveClearCostEstCount("ExpensiveClearCostEstCount", cc),
|
||||
transactionGrvFullBatches("NumGrvFullBatches", cc), transactionGrvTimedOutBatches("NumGrvTimedOutBatches", cc),
|
||||
latencies(1000), readLatencies(1000), commitLatencies(1000), GRVLatencies(1000), mutationsPerCommit(1000),
|
||||
bytesPerCommit(1000), transactionTracingEnabled(true), smoothMidShardSize(CLIENT_KNOBS->SHARD_STAT_SMOOTH_AMOUNT) {}
|
||||
bytesPerCommit(1000), transactionTracingSample(false), smoothMidShardSize(CLIENT_KNOBS->SHARD_STAT_SMOOTH_AMOUNT) {}
|
||||
|
||||
// Static constructor used by server processes to create a DatabaseContext
|
||||
// For internal (fdbserver) use only
|
||||
@ -1494,7 +1506,7 @@ Database DatabaseContext::create(Reference<AsyncVar<ClientDBInfo>> clientInfo,
|
||||
|
||||
DatabaseContext::~DatabaseContext() {
|
||||
cacheListMonitor.cancel();
|
||||
monitorProxiesInfoChange.cancel();
|
||||
clientDBInfoMonitor.cancel();
|
||||
monitorTssInfoChange.cancel();
|
||||
tssMismatchHandler.cancel();
|
||||
for (auto it = server_interf.begin(); it != server_interf.end(); it = server_interf.erase(it))
|
||||
@ -1583,6 +1595,10 @@ Future<Void> DatabaseContext::onProxiesChanged() const {
|
||||
return this->proxiesChangeTrigger.onTrigger();
|
||||
}
|
||||
|
||||
Future<Void> DatabaseContext::onClientLibStatusChanged() const {
|
||||
return this->clientLibChangeTrigger.onTrigger();
|
||||
}
|
||||
|
||||
bool DatabaseContext::sampleReadTags() const {
|
||||
double sampleRate = GlobalConfig::globalConfig().get(transactionTagSampleRate, CLIENT_KNOBS->READ_TAG_SAMPLE_RATE);
|
||||
return sampleRate > 0 && deterministicRandom()->random01() <= sampleRate;
|
||||
@ -1666,14 +1682,6 @@ void DatabaseContext::setOption(FDBDatabaseOptions::Option option, Optional<Stri
|
||||
validateOptionValueNotPresent(value);
|
||||
snapshotRywEnabled--;
|
||||
break;
|
||||
case FDBDatabaseOptions::DISTRIBUTED_TRANSACTION_TRACE_ENABLE:
|
||||
validateOptionValueNotPresent(value);
|
||||
transactionTracingEnabled++;
|
||||
break;
|
||||
case FDBDatabaseOptions::DISTRIBUTED_TRANSACTION_TRACE_DISABLE:
|
||||
validateOptionValueNotPresent(value);
|
||||
transactionTracingEnabled--;
|
||||
break;
|
||||
case FDBDatabaseOptions::USE_CONFIG_DATABASE:
|
||||
validateOptionValueNotPresent(value);
|
||||
useConfigDatabase = true;
|
||||
@ -3029,7 +3037,8 @@ ACTOR Future<Void> watchValueMap(Future<Version> version,
|
||||
return Void();
|
||||
}
|
||||
|
||||
void transformRangeLimits(GetRangeLimits limits, Reverse reverse, GetKeyValuesRequest& req) {
|
||||
template <class GetKeyValuesFamilyRequest>
|
||||
void transformRangeLimits(GetRangeLimits limits, Reverse reverse, GetKeyValuesFamilyRequest& req) {
|
||||
if (limits.bytes != 0) {
|
||||
if (!limits.hasRowLimit())
|
||||
req.limit = CLIENT_KNOBS->REPLY_BYTE_LIMIT; // Can't get more than this many rows anyway
|
||||
@ -3049,26 +3058,47 @@ void transformRangeLimits(GetRangeLimits limits, Reverse reverse, GetKeyValuesRe
|
||||
}
|
||||
}
|
||||
|
||||
ACTOR Future<RangeResult> getExactRange(Database cx,
|
||||
Version version,
|
||||
KeyRange keys,
|
||||
GetRangeLimits limits,
|
||||
Reverse reverse,
|
||||
TransactionInfo info,
|
||||
TagSet tags) {
|
||||
template <class GetKeyValuesFamilyRequest>
|
||||
RequestStream<GetKeyValuesFamilyRequest> StorageServerInterface::*getRangeRequestStream() {
|
||||
if constexpr (std::is_same<GetKeyValuesFamilyRequest, GetKeyValuesRequest>::value) {
|
||||
return &StorageServerInterface::getKeyValues;
|
||||
} else if (std::is_same<GetKeyValuesFamilyRequest, GetKeyValuesAndFlatMapRequest>::value) {
|
||||
return &StorageServerInterface::getKeyValuesAndFlatMap;
|
||||
} else {
|
||||
UNREACHABLE();
|
||||
}
|
||||
}
|
||||
|
||||
ACTOR template <class GetKeyValuesFamilyRequest, class GetKeyValuesFamilyReply>
|
||||
Future<RangeResult> getExactRange(Database cx,
|
||||
Version version,
|
||||
KeyRange keys,
|
||||
Key mapper,
|
||||
GetRangeLimits limits,
|
||||
Reverse reverse,
|
||||
TransactionInfo info,
|
||||
TagSet tags) {
|
||||
state RangeResult output;
|
||||
state Span span("NAPI:getExactRange"_loc, info.spanID);
|
||||
|
||||
// printf("getExactRange( '%s', '%s' )\n", keys.begin.toString().c_str(), keys.end.toString().c_str());
|
||||
loop {
|
||||
state std::vector<std::pair<KeyRange, Reference<LocationInfo>>> locations = wait(getKeyRangeLocations(
|
||||
cx, keys, CLIENT_KNOBS->GET_RANGE_SHARD_LIMIT, reverse, &StorageServerInterface::getKeyValues, info));
|
||||
state std::vector<std::pair<KeyRange, Reference<LocationInfo>>> locations =
|
||||
wait(getKeyRangeLocations(cx,
|
||||
keys,
|
||||
CLIENT_KNOBS->GET_RANGE_SHARD_LIMIT,
|
||||
reverse,
|
||||
getRangeRequestStream<GetKeyValuesFamilyRequest>(),
|
||||
info));
|
||||
ASSERT(locations.size());
|
||||
state int shard = 0;
|
||||
loop {
|
||||
const KeyRangeRef& range = locations[shard].first;
|
||||
|
||||
GetKeyValuesRequest req;
|
||||
GetKeyValuesFamilyRequest req;
|
||||
req.mapper = mapper;
|
||||
req.arena.dependsOn(mapper.arena());
|
||||
|
||||
req.version = version;
|
||||
req.begin = firstGreaterOrEqual(range.begin);
|
||||
req.end = firstGreaterOrEqual(range.end);
|
||||
@ -3098,14 +3128,14 @@ ACTOR Future<RangeResult> getExactRange(Database cx,
|
||||
.detail("Servers", locations[shard].second->description());*/
|
||||
}
|
||||
++cx->transactionPhysicalReads;
|
||||
state GetKeyValuesReply rep;
|
||||
state GetKeyValuesFamilyReply rep;
|
||||
try {
|
||||
choose {
|
||||
when(wait(cx->connectionFileChanged())) { throw transaction_too_old(); }
|
||||
when(GetKeyValuesReply _rep =
|
||||
when(GetKeyValuesFamilyReply _rep =
|
||||
wait(loadBalance(cx.getPtr(),
|
||||
locations[shard].second,
|
||||
&StorageServerInterface::getKeyValues,
|
||||
getRangeRequestStream<GetKeyValuesFamilyRequest>(),
|
||||
req,
|
||||
TaskPriority::DefaultPromiseEndpoint,
|
||||
AtMostOnce::False,
|
||||
@ -3155,7 +3185,7 @@ ACTOR Future<RangeResult> getExactRange(Database cx,
|
||||
.detail("BlockBytes", rep.data.expectedSize());
|
||||
ASSERT(false);
|
||||
}
|
||||
TEST(true); // GetKeyValuesReply.more in getExactRange
|
||||
TEST(true); // GetKeyValuesFamilyReply.more in getExactRange
|
||||
// Make next request to the same shard with a beginning key just after the last key returned
|
||||
if (reverse)
|
||||
locations[shard].first =
|
||||
@ -3231,14 +3261,16 @@ Future<Key> resolveKey(Database const& cx,
|
||||
return getKey(cx, key, version, info, tags);
|
||||
}
|
||||
|
||||
ACTOR Future<RangeResult> getRangeFallback(Database cx,
|
||||
Version version,
|
||||
KeySelector begin,
|
||||
KeySelector end,
|
||||
GetRangeLimits limits,
|
||||
Reverse reverse,
|
||||
TransactionInfo info,
|
||||
TagSet tags) {
|
||||
ACTOR template <class GetKeyValuesFamilyRequest, class GetKeyValuesFamilyReply>
|
||||
Future<RangeResult> getRangeFallback(Database cx,
|
||||
Version version,
|
||||
KeySelector begin,
|
||||
KeySelector end,
|
||||
Key mapper,
|
||||
GetRangeLimits limits,
|
||||
Reverse reverse,
|
||||
TransactionInfo info,
|
||||
TagSet tags) {
|
||||
if (version == latestVersion) {
|
||||
state Transaction transaction(cx);
|
||||
transaction.setOption(FDBTransactionOptions::CAUSAL_READ_RISKY);
|
||||
@ -3261,7 +3293,8 @@ ACTOR Future<RangeResult> getRangeFallback(Database cx,
|
||||
// if b is allKeys.begin, we have either read through the beginning of the database,
|
||||
// or allKeys.begin exists in the database and will be part of the conflict range anyways
|
||||
|
||||
RangeResult _r = wait(getExactRange(cx, version, KeyRangeRef(b, e), limits, reverse, info, tags));
|
||||
RangeResult _r = wait(getExactRange<GetKeyValuesFamilyRequest, GetKeyValuesFamilyReply>(
|
||||
cx, version, KeyRangeRef(b, e), mapper, limits, reverse, info, tags));
|
||||
RangeResult r = _r;
|
||||
|
||||
if (b == allKeys.begin && ((reverse && !r.more) || !reverse))
|
||||
@ -3286,6 +3319,7 @@ ACTOR Future<RangeResult> getRangeFallback(Database cx,
|
||||
return r;
|
||||
}
|
||||
|
||||
// TODO: Client should add mapped keys to conflict ranges.
|
||||
void getRangeFinished(Database cx,
|
||||
Reference<TransactionLogInfo> trLogInfo,
|
||||
double startTime,
|
||||
@ -3340,17 +3374,23 @@ void getRangeFinished(Database cx,
|
||||
}
|
||||
}
|
||||
|
||||
ACTOR Future<RangeResult> getRange(Database cx,
|
||||
Reference<TransactionLogInfo> trLogInfo,
|
||||
Future<Version> fVersion,
|
||||
KeySelector begin,
|
||||
KeySelector end,
|
||||
GetRangeLimits limits,
|
||||
Promise<std::pair<Key, Key>> conflictRange,
|
||||
Snapshot snapshot,
|
||||
Reverse reverse,
|
||||
TransactionInfo info,
|
||||
TagSet tags) {
|
||||
// GetKeyValuesFamilyRequest: GetKeyValuesRequest or GetKeyValuesAndFlatMapRequest
|
||||
// GetKeyValuesFamilyReply: GetKeyValuesReply or GetKeyValuesAndFlatMapReply
|
||||
// Sadly we need GetKeyValuesFamilyReply because cannot do something like: state
|
||||
// REPLY_TYPE(GetKeyValuesFamilyRequest) rep;
|
||||
ACTOR template <class GetKeyValuesFamilyRequest, class GetKeyValuesFamilyReply>
|
||||
Future<RangeResult> getRange(Database cx,
|
||||
Reference<TransactionLogInfo> trLogInfo,
|
||||
Future<Version> fVersion,
|
||||
KeySelector begin,
|
||||
KeySelector end,
|
||||
Key mapper,
|
||||
GetRangeLimits limits,
|
||||
Promise<std::pair<Key, Key>> conflictRange,
|
||||
Snapshot snapshot,
|
||||
Reverse reverse,
|
||||
TransactionInfo info,
|
||||
TagSet tags) {
|
||||
state GetRangeLimits originalLimits(limits);
|
||||
state KeySelector originalBegin = begin;
|
||||
state KeySelector originalEnd = end;
|
||||
@ -3384,11 +3424,13 @@ ACTOR Future<RangeResult> getRange(Database cx,
|
||||
|
||||
Key locationKey = reverse ? Key(end.getKey(), end.arena()) : Key(begin.getKey(), begin.arena());
|
||||
Reverse locationBackward{ reverse ? (end - 1).isBackward() : begin.isBackward() };
|
||||
state std::pair<KeyRange, Reference<LocationInfo>> beginServer =
|
||||
wait(getKeyLocation(cx, locationKey, &StorageServerInterface::getKeyValues, info, locationBackward));
|
||||
state std::pair<KeyRange, Reference<LocationInfo>> beginServer = wait(getKeyLocation(
|
||||
cx, locationKey, getRangeRequestStream<GetKeyValuesFamilyRequest>(), info, locationBackward));
|
||||
state KeyRange shard = beginServer.first;
|
||||
state bool modifiedSelectors = false;
|
||||
state GetKeyValuesRequest req;
|
||||
state GetKeyValuesFamilyRequest req;
|
||||
req.mapper = mapper;
|
||||
req.arena.dependsOn(mapper.arena());
|
||||
|
||||
req.isFetchKeys = (info.taskID == TaskPriority::FetchKeys);
|
||||
req.version = readVersion;
|
||||
@ -3447,17 +3489,17 @@ ACTOR Future<RangeResult> getRange(Database cx,
|
||||
}
|
||||
|
||||
++cx->transactionPhysicalReads;
|
||||
state GetKeyValuesReply rep;
|
||||
state GetKeyValuesFamilyReply rep;
|
||||
try {
|
||||
if (CLIENT_BUGGIFY_WITH_PROB(.01)) {
|
||||
throw deterministicRandom()->randomChoice(
|
||||
std::vector<Error>{ transaction_too_old(), future_version() });
|
||||
}
|
||||
// state AnnotateActor annotation(currentLineage);
|
||||
GetKeyValuesReply _rep =
|
||||
GetKeyValuesFamilyReply _rep =
|
||||
wait(loadBalance(cx.getPtr(),
|
||||
beginServer.second,
|
||||
&StorageServerInterface::getKeyValues,
|
||||
getRangeRequestStream<GetKeyValuesFamilyRequest>(),
|
||||
req,
|
||||
TaskPriority::DefaultPromiseEndpoint,
|
||||
AtMostOnce::False,
|
||||
@ -3557,11 +3599,11 @@ ACTOR Future<RangeResult> getRange(Database cx,
|
||||
|
||||
if (!rep.more) {
|
||||
ASSERT(modifiedSelectors);
|
||||
TEST(true); // !GetKeyValuesReply.more and modifiedSelectors in getRange
|
||||
TEST(true); // !GetKeyValuesFamilyReply.more and modifiedSelectors in getRange
|
||||
|
||||
if (!rep.data.size()) {
|
||||
RangeResult result = wait(getRangeFallback(
|
||||
cx, version, originalBegin, originalEnd, originalLimits, reverse, info, tags));
|
||||
RangeResult result = wait(getRangeFallback<GetKeyValuesFamilyRequest, GetKeyValuesFamilyReply>(
|
||||
cx, version, originalBegin, originalEnd, mapper, originalLimits, reverse, info, tags));
|
||||
getRangeFinished(cx,
|
||||
trLogInfo,
|
||||
startTime,
|
||||
@ -3579,7 +3621,7 @@ ACTOR Future<RangeResult> getRange(Database cx,
|
||||
else
|
||||
begin = firstGreaterOrEqual(shard.end);
|
||||
} else {
|
||||
TEST(true); // GetKeyValuesReply.more in getRange
|
||||
TEST(true); // GetKeyValuesFamilyReply.more in getRange
|
||||
if (reverse)
|
||||
end = firstGreaterOrEqual(output[output.size() - 1].key);
|
||||
else
|
||||
@ -3597,8 +3639,8 @@ ACTOR Future<RangeResult> getRange(Database cx,
|
||||
Reverse{ reverse ? (end - 1).isBackward() : begin.isBackward() });
|
||||
|
||||
if (e.code() == error_code_wrong_shard_server) {
|
||||
RangeResult result = wait(getRangeFallback(
|
||||
cx, version, originalBegin, originalEnd, originalLimits, reverse, info, tags));
|
||||
RangeResult result = wait(getRangeFallback<GetKeyValuesFamilyRequest, GetKeyValuesFamilyReply>(
|
||||
cx, version, originalBegin, originalEnd, mapper, originalLimits, reverse, info, tags));
|
||||
getRangeFinished(cx,
|
||||
trLogInfo,
|
||||
startTime,
|
||||
@ -4164,17 +4206,18 @@ Future<RangeResult> getRange(Database const& cx,
|
||||
Reverse const& reverse,
|
||||
TransactionInfo const& info,
|
||||
TagSet const& tags) {
|
||||
return getRange(cx,
|
||||
Reference<TransactionLogInfo>(),
|
||||
fVersion,
|
||||
begin,
|
||||
end,
|
||||
limits,
|
||||
Promise<std::pair<Key, Key>>(),
|
||||
Snapshot::True,
|
||||
reverse,
|
||||
info,
|
||||
tags);
|
||||
return getRange<GetKeyValuesRequest, GetKeyValuesReply>(cx,
|
||||
Reference<TransactionLogInfo>(),
|
||||
fVersion,
|
||||
begin,
|
||||
end,
|
||||
""_sr,
|
||||
limits,
|
||||
Promise<std::pair<Key, Key>>(),
|
||||
Snapshot::True,
|
||||
reverse,
|
||||
info,
|
||||
tags);
|
||||
}
|
||||
|
||||
bool DatabaseContext::debugUseTags = false;
|
||||
@ -4205,9 +4248,15 @@ void debugAddTags(Transaction* tr) {
|
||||
}
|
||||
}
|
||||
|
||||
SpanID generateSpanID(int transactionTracingEnabled) {
|
||||
SpanID generateSpanID(bool transactionTracingSample, SpanID parentContext = SpanID()) {
|
||||
uint64_t txnId = deterministicRandom()->randomUInt64();
|
||||
if (transactionTracingEnabled > 0) {
|
||||
if (parentContext.isValid()) {
|
||||
if (parentContext.first() > 0) {
|
||||
txnId = parentContext.first();
|
||||
}
|
||||
uint64_t tokenId = parentContext.second() > 0 ? deterministicRandom()->randomUInt64() : 0;
|
||||
return SpanID(txnId, tokenId);
|
||||
} else if (transactionTracingSample) {
|
||||
uint64_t tokenId = deterministicRandom()->random01() <= FLOW_KNOBS->TRACING_SAMPLE_RATE
|
||||
? deterministicRandom()->randomUInt64()
|
||||
: 0;
|
||||
@ -4217,10 +4266,10 @@ SpanID generateSpanID(int transactionTracingEnabled) {
|
||||
}
|
||||
}
|
||||
|
||||
Transaction::Transaction() : info(TaskPriority::DefaultEndpoint, generateSpanID(true)) {}
|
||||
Transaction::Transaction() : info(TaskPriority::DefaultEndpoint, generateSpanID(false)) {}
|
||||
|
||||
Transaction::Transaction(Database const& cx)
|
||||
: info(cx->taskID, generateSpanID(cx->transactionTracingEnabled)), numErrors(0), options(cx),
|
||||
: info(cx->taskID, generateSpanID(cx->transactionTracingSample)), numErrors(0), options(cx),
|
||||
span(info.spanID, "Transaction"_loc), trLogInfo(createTrLogInfoProbabilistically(cx)), cx(cx),
|
||||
backoff(CLIENT_KNOBS->DEFAULT_BACKOFF), committedVersion(invalidVersion), tr(info.spanID) {
|
||||
if (DatabaseContext::debugUseTags) {
|
||||
@ -4469,13 +4518,26 @@ Future<Key> Transaction::getKey(const KeySelector& key, Snapshot snapshot) {
|
||||
return getKeyAndConflictRange(cx, key, getReadVersion(), conflictRange, info, options.readTags);
|
||||
}
|
||||
|
||||
Future<RangeResult> Transaction::getRange(const KeySelector& begin,
|
||||
const KeySelector& end,
|
||||
GetRangeLimits limits,
|
||||
Snapshot snapshot,
|
||||
Reverse reverse) {
|
||||
template <class GetKeyValuesFamilyRequest>
|
||||
void increaseCounterForRequest(Database cx) {
|
||||
if constexpr (std::is_same<GetKeyValuesFamilyRequest, GetKeyValuesRequest>::value) {
|
||||
++cx->transactionGetRangeRequests;
|
||||
} else if (std::is_same<GetKeyValuesFamilyRequest, GetKeyValuesAndFlatMapRequest>::value) {
|
||||
++cx->transactionGetRangeAndFlatMapRequests;
|
||||
} else {
|
||||
UNREACHABLE();
|
||||
}
|
||||
}
|
||||
|
||||
template <class GetKeyValuesFamilyRequest, class GetKeyValuesFamilyReply>
|
||||
Future<RangeResult> Transaction::getRangeInternal(const KeySelector& begin,
|
||||
const KeySelector& end,
|
||||
const Key& mapper,
|
||||
GetRangeLimits limits,
|
||||
Snapshot snapshot,
|
||||
Reverse reverse) {
|
||||
++cx->transactionLogicalReads;
|
||||
++cx->transactionGetRangeRequests;
|
||||
increaseCounterForRequest<GetKeyValuesFamilyRequest>(cx);
|
||||
|
||||
if (limits.isReached())
|
||||
return RangeResult();
|
||||
@ -4507,8 +4569,37 @@ Future<RangeResult> Transaction::getRange(const KeySelector& begin,
|
||||
extraConflictRanges.push_back(conflictRange.getFuture());
|
||||
}
|
||||
|
||||
return ::getRange(
|
||||
cx, trLogInfo, getReadVersion(), b, e, limits, conflictRange, snapshot, reverse, info, options.readTags);
|
||||
return ::getRange<GetKeyValuesFamilyRequest, GetKeyValuesFamilyReply>(cx,
|
||||
trLogInfo,
|
||||
getReadVersion(),
|
||||
b,
|
||||
e,
|
||||
mapper,
|
||||
limits,
|
||||
conflictRange,
|
||||
snapshot,
|
||||
reverse,
|
||||
info,
|
||||
options.readTags);
|
||||
}
|
||||
|
||||
Future<RangeResult> Transaction::getRange(const KeySelector& begin,
|
||||
const KeySelector& end,
|
||||
GetRangeLimits limits,
|
||||
Snapshot snapshot,
|
||||
Reverse reverse) {
|
||||
return getRangeInternal<GetKeyValuesRequest, GetKeyValuesReply>(begin, end, ""_sr, limits, snapshot, reverse);
|
||||
}
|
||||
|
||||
Future<RangeResult> Transaction::getRangeAndFlatMap(const KeySelector& begin,
|
||||
const KeySelector& end,
|
||||
const Key& mapper,
|
||||
GetRangeLimits limits,
|
||||
Snapshot snapshot,
|
||||
Reverse reverse) {
|
||||
|
||||
return getRangeInternal<GetKeyValuesAndFlatMapRequest, GetKeyValuesAndFlatMapReply>(
|
||||
begin, end, mapper, limits, snapshot, reverse);
|
||||
}
|
||||
|
||||
Future<RangeResult> Transaction::getRange(const KeySelector& begin,
|
||||
@ -4845,8 +4936,8 @@ void Transaction::reset() {
|
||||
|
||||
void Transaction::fullReset() {
|
||||
reset();
|
||||
span = Span(span.location);
|
||||
info.spanID = span.context;
|
||||
info.spanID = generateSpanID(cx->transactionTracingSample);
|
||||
span = Span(info.spanID, "Transaction"_loc);
|
||||
backoff = CLIENT_KNOBS->DEFAULT_BACKOFF;
|
||||
}
|
||||
|
||||
@ -5368,6 +5459,9 @@ ACTOR Future<Void> commitAndWatch(Transaction* self) {
|
||||
try {
|
||||
wait(self->commitMutations());
|
||||
|
||||
self->getDatabase()->transactionTracingSample =
|
||||
(self->getCommittedVersion() % 60000000) < (60000000 * FLOW_KNOBS->TRACING_SAMPLE_RATE);
|
||||
|
||||
if (!self->watches.empty()) {
|
||||
self->setupWatches();
|
||||
}
|
||||
@ -5871,7 +5965,7 @@ Future<Version> Transaction::getReadVersion(uint32_t flags) {
|
||||
}
|
||||
|
||||
Location location = "NAPI:getReadVersion"_loc;
|
||||
UID spanContext = generateSpanID(cx->transactionTracingEnabled);
|
||||
UID spanContext = generateSpanID(cx->transactionTracingSample, info.spanID);
|
||||
auto const req = DatabaseContext::VersionRequest(spanContext, options.tags, info.debugID);
|
||||
batcher.stream.send(req);
|
||||
startTime = now();
|
||||
@ -6234,7 +6328,7 @@ ACTOR Future<std::pair<Optional<StorageMetrics>, int>> waitStorageMetrics(Databa
|
||||
StorageMetrics permittedError,
|
||||
int shardLimit,
|
||||
int expectedShardCount) {
|
||||
state Span span("NAPI:WaitStorageMetrics"_loc, generateSpanID(cx->transactionTracingEnabled));
|
||||
state Span span("NAPI:WaitStorageMetrics"_loc, generateSpanID(cx->transactionTracingSample));
|
||||
loop {
|
||||
std::vector<std::pair<KeyRange, Reference<LocationInfo>>> locations =
|
||||
wait(getKeyRangeLocations(cx,
|
||||
@ -6663,12 +6757,101 @@ Future<Void> DatabaseContext::createSnapshot(StringRef uid, StringRef snapshot_c
|
||||
return createSnapshotActor(this, UID::fromString(uid_str), snapshot_command);
|
||||
}
|
||||
|
||||
ACTOR Future<Void> storageFeedVersionUpdater(StorageServerInterface interf, ChangeFeedStorageData* self) {
|
||||
loop {
|
||||
if (self->version.get() < self->desired.get()) {
|
||||
wait(delay(CLIENT_KNOBS->CHANGE_FEED_EMPTY_BATCH_TIME) || self->version.whenAtLeast(self->desired.get()));
|
||||
if (self->version.get() < self->desired.get()) {
|
||||
ChangeFeedVersionUpdateReply rep = wait(brokenPromiseToNever(
|
||||
interf.changeFeedVersionUpdate.getReply(ChangeFeedVersionUpdateRequest(self->desired.get()))));
|
||||
if (rep.version > self->version.get()) {
|
||||
self->version.set(rep.version);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
wait(self->desired.whenAtLeast(self->version.get() + 1));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Reference<ChangeFeedStorageData> DatabaseContext::getStorageData(StorageServerInterface interf) {
|
||||
auto it = changeFeedUpdaters.find(interf.id());
|
||||
if (it == changeFeedUpdaters.end()) {
|
||||
Reference<ChangeFeedStorageData> newStorageUpdater = makeReference<ChangeFeedStorageData>();
|
||||
newStorageUpdater->id = interf.id();
|
||||
newStorageUpdater->updater = storageFeedVersionUpdater(interf, newStorageUpdater.getPtr());
|
||||
changeFeedUpdaters[interf.id()] = newStorageUpdater;
|
||||
return newStorageUpdater;
|
||||
}
|
||||
return it->second;
|
||||
}
|
||||
|
||||
Version ChangeFeedData::getVersion() {
|
||||
if (notAtLatest.get() == 0 && mutations.isEmpty()) {
|
||||
Version v = storageData[0]->version.get();
|
||||
for (int i = 1; i < storageData.size(); i++) {
|
||||
if (storageData[i]->version.get() < v) {
|
||||
v = storageData[i]->version.get();
|
||||
}
|
||||
}
|
||||
return std::max(v, lastReturnedVersion.get());
|
||||
}
|
||||
return lastReturnedVersion.get();
|
||||
}
|
||||
|
||||
ACTOR Future<Void> changeFeedWhenAtLatest(ChangeFeedData* self, Version version) {
|
||||
state Future<Void> lastReturned = self->lastReturnedVersion.whenAtLeast(version);
|
||||
loop {
|
||||
if (self->notAtLatest.get() == 0) {
|
||||
std::vector<Future<Void>> allAtLeast;
|
||||
for (auto& it : self->storageData) {
|
||||
if (it->version.get() < version) {
|
||||
if (version > it->desired.get()) {
|
||||
it->desired.set(version);
|
||||
}
|
||||
allAtLeast.push_back(it->version.whenAtLeast(version));
|
||||
}
|
||||
}
|
||||
choose {
|
||||
when(wait(lastReturned)) { return Void(); }
|
||||
when(wait(waitForAll(allAtLeast))) {
|
||||
if (self->mutations.isEmpty()) {
|
||||
return Void();
|
||||
}
|
||||
choose {
|
||||
when(wait(self->mutations.onEmpty())) {
|
||||
wait(delay(0));
|
||||
return Void();
|
||||
}
|
||||
when(wait(lastReturned)) { return Void(); }
|
||||
when(wait(self->refresh.getFuture())) {}
|
||||
}
|
||||
}
|
||||
when(wait(self->refresh.getFuture())) {}
|
||||
}
|
||||
} else {
|
||||
choose {
|
||||
when(wait(lastReturned)) { return Void(); }
|
||||
when(wait(self->notAtLatest.onChange())) {}
|
||||
when(wait(self->refresh.getFuture())) {}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Future<Void> ChangeFeedData::whenAtLeast(Version version) {
|
||||
return changeFeedWhenAtLatest(this, version);
|
||||
}
|
||||
|
||||
ACTOR Future<Void> singleChangeFeedStream(StorageServerInterface interf,
|
||||
PromiseStream<Standalone<MutationsAndVersionRef>> results,
|
||||
Key rangeID,
|
||||
Version begin,
|
||||
Version end,
|
||||
KeyRange range) {
|
||||
KeyRange range,
|
||||
Reference<ChangeFeedData> feedData,
|
||||
Reference<ChangeFeedStorageData> storageData) {
|
||||
state bool atLatestVersion = false;
|
||||
loop {
|
||||
try {
|
||||
state Version lastEmpty = invalidVersion;
|
||||
@ -6699,6 +6882,13 @@ ACTOR Future<Void> singleChangeFeedStream(StorageServerInterface interf,
|
||||
results.sendError(end_of_stream());
|
||||
return Void();
|
||||
}
|
||||
if (!atLatestVersion && rep.atLatestVersion) {
|
||||
atLatestVersion = true;
|
||||
feedData->notAtLatest.set(feedData->notAtLatest.get() - 1);
|
||||
}
|
||||
if (rep.minStreamVersion > storageData->version.get()) {
|
||||
storageData->version.set(rep.minStreamVersion);
|
||||
}
|
||||
}
|
||||
} catch (Error& e) {
|
||||
if (e.code() == error_code_actor_cancelled) {
|
||||
@ -6716,17 +6906,39 @@ struct MutationAndVersionStream {
|
||||
bool operator<(MutationAndVersionStream const& rhs) const { return next.version > rhs.next.version; }
|
||||
};
|
||||
|
||||
ACTOR Future<Void> mergeChangeFeedStream(std::vector<std::pair<StorageServerInterface, KeyRange>> interfs,
|
||||
PromiseStream<Standalone<VectorRef<MutationsAndVersionRef>>> results,
|
||||
ACTOR Future<Void> mergeChangeFeedStream(Reference<DatabaseContext> db,
|
||||
std::vector<std::pair<StorageServerInterface, KeyRange>> interfs,
|
||||
Reference<ChangeFeedData> results,
|
||||
Key rangeID,
|
||||
Version* begin,
|
||||
Version end) {
|
||||
state std::priority_queue<MutationAndVersionStream, std::vector<MutationAndVersionStream>> mutations;
|
||||
state std::vector<Future<Void>> fetchers(interfs.size());
|
||||
state std::vector<MutationAndVersionStream> streams(interfs.size());
|
||||
|
||||
for (auto& it : results->storageData) {
|
||||
if (it->debugGetReferenceCount() == 2) {
|
||||
db->changeFeedUpdaters.erase(it->id);
|
||||
}
|
||||
}
|
||||
results->storageData.clear();
|
||||
Promise<Void> refresh = results->refresh;
|
||||
results->refresh = Promise<Void>();
|
||||
for (int i = 0; i < interfs.size(); i++) {
|
||||
fetchers[i] =
|
||||
singleChangeFeedStream(interfs[i].first, streams[i].results, rangeID, *begin, end, interfs[i].second);
|
||||
results->storageData.push_back(db->getStorageData(interfs[i].first));
|
||||
}
|
||||
results->notAtLatest.set(interfs.size());
|
||||
refresh.send(Void());
|
||||
|
||||
for (int i = 0; i < interfs.size(); i++) {
|
||||
fetchers[i] = singleChangeFeedStream(interfs[i].first,
|
||||
streams[i].results,
|
||||
rangeID,
|
||||
*begin,
|
||||
end,
|
||||
interfs[i].second,
|
||||
results,
|
||||
results->storageData[i]);
|
||||
}
|
||||
state int interfNum = 0;
|
||||
while (interfNum < interfs.size()) {
|
||||
@ -6750,7 +6962,8 @@ ACTOR Future<Void> mergeChangeFeedStream(std::vector<std::pair<StorageServerInte
|
||||
if (nextStream.next.version != checkVersion) {
|
||||
if (nextOut.size()) {
|
||||
*begin = checkVersion + 1;
|
||||
results.send(nextOut);
|
||||
results->mutations.send(nextOut);
|
||||
results->lastReturnedVersion.set(nextOut.back().version);
|
||||
nextOut = Standalone<VectorRef<MutationsAndVersionRef>>();
|
||||
}
|
||||
checkVersion = nextStream.next.version;
|
||||
@ -6775,7 +6988,8 @@ ACTOR Future<Void> mergeChangeFeedStream(std::vector<std::pair<StorageServerInte
|
||||
}
|
||||
}
|
||||
if (nextOut.size()) {
|
||||
results.send(nextOut);
|
||||
results->mutations.send(nextOut);
|
||||
results->lastReturnedVersion.set(nextOut.back().version);
|
||||
}
|
||||
throw end_of_stream();
|
||||
}
|
||||
@ -6814,7 +7028,7 @@ ACTOR Future<KeyRange> getChangeFeedRange(Reference<DatabaseContext> db, Databas
|
||||
}
|
||||
|
||||
ACTOR Future<Void> getChangeFeedStreamActor(Reference<DatabaseContext> db,
|
||||
PromiseStream<Standalone<VectorRef<MutationsAndVersionRef>>> results,
|
||||
Reference<ChangeFeedData> results,
|
||||
Key rangeID,
|
||||
Version begin,
|
||||
Version end,
|
||||
@ -6887,32 +7101,57 @@ ACTOR Future<Void> getChangeFeedStreamActor(Reference<DatabaseContext> db,
|
||||
interfs.push_back(std::make_pair(locations[i].second->getInterface(chosenLocations[i]),
|
||||
locations[i].first & range));
|
||||
}
|
||||
wait(mergeChangeFeedStream(interfs, results, rangeID, &begin, end) || cx->connectionFileChanged());
|
||||
wait(mergeChangeFeedStream(db, interfs, results, rangeID, &begin, end) || cx->connectionFileChanged());
|
||||
} else {
|
||||
state ChangeFeedStreamRequest req;
|
||||
req.rangeID = rangeID;
|
||||
req.begin = begin;
|
||||
req.end = end;
|
||||
req.range = range;
|
||||
|
||||
StorageServerInterface interf = locations[0].second->getInterface(chosenLocations[0]);
|
||||
state ReplyPromiseStream<ChangeFeedStreamReply> replyStream =
|
||||
locations[0]
|
||||
.second->get(chosenLocations[0], &StorageServerInterface::changeFeedStream)
|
||||
.getReplyStream(req);
|
||||
|
||||
interf.changeFeedStream.getReplyStream(req);
|
||||
for (auto& it : results->storageData) {
|
||||
if (it->debugGetReferenceCount() == 2) {
|
||||
db->changeFeedUpdaters.erase(it->id);
|
||||
}
|
||||
}
|
||||
results->storageData.clear();
|
||||
results->storageData.push_back(db->getStorageData(interf));
|
||||
Promise<Void> refresh = results->refresh;
|
||||
results->refresh = Promise<Void>();
|
||||
results->notAtLatest.set(1);
|
||||
refresh.send(Void());
|
||||
state bool atLatest = false;
|
||||
loop {
|
||||
wait(results.onEmpty());
|
||||
wait(results->mutations.onEmpty());
|
||||
choose {
|
||||
when(wait(cx->connectionFileChanged())) { break; }
|
||||
when(ChangeFeedStreamReply rep = waitNext(replyStream.getFuture())) {
|
||||
begin = rep.mutations.back().version + 1;
|
||||
results.send(Standalone<VectorRef<MutationsAndVersionRef>>(rep.mutations, rep.arena));
|
||||
results->mutations.send(
|
||||
Standalone<VectorRef<MutationsAndVersionRef>>(rep.mutations, rep.arena));
|
||||
results->lastReturnedVersion.set(rep.mutations.back().version);
|
||||
if (!atLatest && rep.atLatestVersion) {
|
||||
atLatest = true;
|
||||
results->notAtLatest.set(0);
|
||||
}
|
||||
if (rep.minStreamVersion > results->storageData[0]->version.get()) {
|
||||
results->storageData[0]->version.set(rep.minStreamVersion);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
} catch (Error& e) {
|
||||
if (e.code() == error_code_actor_cancelled) {
|
||||
for (auto& it : results->storageData) {
|
||||
if (it->debugGetReferenceCount() == 2) {
|
||||
db->changeFeedUpdaters.erase(it->id);
|
||||
}
|
||||
}
|
||||
results->storageData.clear();
|
||||
results->refresh.sendError(change_feed_cancelled());
|
||||
throw;
|
||||
}
|
||||
if (e.code() == error_code_wrong_shard_server || e.code() == error_code_all_alternatives_failed ||
|
||||
@ -6922,19 +7161,25 @@ ACTOR Future<Void> getChangeFeedStreamActor(Reference<DatabaseContext> db,
|
||||
cx->invalidateCache(keys);
|
||||
wait(delay(CLIENT_KNOBS->WRONG_SHARD_SERVER_DELAY));
|
||||
} else {
|
||||
results.sendError(e);
|
||||
results->mutations.sendError(e);
|
||||
results->refresh.sendError(change_feed_cancelled());
|
||||
for (auto& it : results->storageData) {
|
||||
if (it->debugGetReferenceCount() == 2) {
|
||||
db->changeFeedUpdaters.erase(it->id);
|
||||
}
|
||||
}
|
||||
results->storageData.clear();
|
||||
return Void();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Future<Void> DatabaseContext::getChangeFeedStream(
|
||||
const PromiseStream<Standalone<VectorRef<MutationsAndVersionRef>>>& results,
|
||||
Key rangeID,
|
||||
Version begin,
|
||||
Version end,
|
||||
KeyRange range) {
|
||||
Future<Void> DatabaseContext::getChangeFeedStream(Reference<ChangeFeedData> results,
|
||||
Key rangeID,
|
||||
Version begin,
|
||||
Version end,
|
||||
KeyRange range) {
|
||||
return getChangeFeedStreamActor(Reference<DatabaseContext>::addRef(this), results, rangeID, begin, end, range);
|
||||
}
|
||||
|
||||
|
@ -289,6 +289,23 @@ public:
|
||||
reverse);
|
||||
}
|
||||
|
||||
[[nodiscard]] Future<RangeResult> getRangeAndFlatMap(const KeySelector& begin,
|
||||
const KeySelector& end,
|
||||
const Key& mapper,
|
||||
GetRangeLimits limits,
|
||||
Snapshot = Snapshot::False,
|
||||
Reverse = Reverse::False);
|
||||
|
||||
private:
|
||||
template <class GetKeyValuesFamilyRequest, class GetKeyValuesFamilyReply>
|
||||
Future<RangeResult> getRangeInternal(const KeySelector& begin,
|
||||
const KeySelector& end,
|
||||
const Key& mapper,
|
||||
GetRangeLimits limits,
|
||||
Snapshot snapshot,
|
||||
Reverse reverse);
|
||||
|
||||
public:
|
||||
// A method for streaming data from the storage server that is more efficient than getRange when reading large
|
||||
// amounts of data
|
||||
[[nodiscard]] Future<Void> getRangeStream(const PromiseStream<Standalone<RangeResultRef>>& results,
|
||||
|
@ -50,6 +50,14 @@ public:
|
||||
GetRangeLimits limits,
|
||||
Snapshot = Snapshot::False,
|
||||
Reverse = Reverse::False) override;
|
||||
Future<RangeResult> getRangeAndFlatMap(KeySelector begin,
|
||||
KeySelector end,
|
||||
Key mapper,
|
||||
GetRangeLimits limits,
|
||||
Snapshot = Snapshot::False,
|
||||
Reverse = Reverse::False) override {
|
||||
throw client_invalid_operation();
|
||||
}
|
||||
void set(KeyRef const& key, ValueRef const& value) override;
|
||||
void clear(KeyRangeRef const&) override { throw client_invalid_operation(); }
|
||||
void clear(KeyRef const&) override;
|
||||
|
@ -74,6 +74,16 @@ public:
|
||||
using Result = RangeResult;
|
||||
};
|
||||
|
||||
template <bool reverse>
|
||||
struct GetRangeAndFlatMapReq {
|
||||
GetRangeAndFlatMapReq(KeySelector begin, KeySelector end, Key mapper, GetRangeLimits limits)
|
||||
: begin(begin), end(end), mapper(mapper), limits(limits) {}
|
||||
KeySelector begin, end;
|
||||
Key mapper;
|
||||
GetRangeLimits limits;
|
||||
using Result = RangeResult;
|
||||
};
|
||||
|
||||
// read() Performs a read (get, getKey, getRange, etc), in the context of the given transaction. Snapshot or RYW
|
||||
// reads are distingushed by the type Iter being SnapshotCache::iterator or RYWIterator. Fills in the snapshot cache
|
||||
// as a side effect but does not affect conflict ranges. Some (indicated) overloads of read are required to update
|
||||
@ -203,6 +213,36 @@ public:
|
||||
return v;
|
||||
}
|
||||
|
||||
ACTOR template <bool backwards>
|
||||
static Future<RangeResult> readThroughAndFlatMap(ReadYourWritesTransaction* ryw,
|
||||
GetRangeAndFlatMapReq<backwards> read,
|
||||
Snapshot snapshot) {
|
||||
if (backwards && read.end.offset > 1) {
|
||||
// FIXME: Optimistically assume that this will not run into the system keys, and only reissue if the result
|
||||
// actually does.
|
||||
Key key = wait(ryw->tr.getKey(read.end, snapshot));
|
||||
if (key > ryw->getMaxReadKey())
|
||||
read.end = firstGreaterOrEqual(ryw->getMaxReadKey());
|
||||
else
|
||||
read.end = KeySelector(firstGreaterOrEqual(key), key.arena());
|
||||
}
|
||||
|
||||
RangeResult v = wait(ryw->tr.getRangeAndFlatMap(
|
||||
read.begin, read.end, read.mapper, read.limits, snapshot, backwards ? Reverse::True : Reverse::False));
|
||||
KeyRef maxKey = ryw->getMaxReadKey();
|
||||
if (v.size() > 0) {
|
||||
if (!backwards && v[v.size() - 1].key >= maxKey) {
|
||||
state RangeResult _v = v;
|
||||
int i = _v.size() - 2;
|
||||
for (; i >= 0 && _v[i].key >= maxKey; --i) {
|
||||
}
|
||||
return RangeResult(RangeResultRef(VectorRef<KeyValueRef>(&_v[0], i + 1), false), _v.arena());
|
||||
}
|
||||
}
|
||||
|
||||
return v;
|
||||
}
|
||||
|
||||
// addConflictRange(ryw,read,result) is called after a serializable read and is responsible for adding the relevant
|
||||
// conflict range
|
||||
|
||||
@ -309,6 +349,15 @@ public:
|
||||
}
|
||||
}
|
||||
ACTOR template <class Req>
|
||||
static Future<typename Req::Result> readWithConflictRangeThroughAndFlatMap(ReadYourWritesTransaction* ryw,
|
||||
Req req,
|
||||
Snapshot snapshot) {
|
||||
choose {
|
||||
when(typename Req::Result result = wait(readThroughAndFlatMap(ryw, req, snapshot))) { return result; }
|
||||
when(wait(ryw->resetPromise.getFuture())) { throw internal_error(); }
|
||||
}
|
||||
}
|
||||
ACTOR template <class Req>
|
||||
static Future<typename Req::Result> readWithConflictRangeSnapshot(ReadYourWritesTransaction* ryw, Req req) {
|
||||
state SnapshotCache::iterator it(&ryw->cache, &ryw->writes);
|
||||
choose {
|
||||
@ -344,6 +393,19 @@ public:
|
||||
return readWithConflictRangeRYW(ryw, req, snapshot);
|
||||
}
|
||||
|
||||
template <class Req>
|
||||
static inline Future<typename Req::Result> readWithConflictRangeAndFlatMap(ReadYourWritesTransaction* ryw,
|
||||
Req const& req,
|
||||
Snapshot snapshot) {
|
||||
// For now, getRangeAndFlatMap is only supported if transaction use snapshot isolation AND read-your-writes is
|
||||
// disabled.
|
||||
if (snapshot && ryw->options.readYourWritesDisabled) {
|
||||
return readWithConflictRangeThroughAndFlatMap(ryw, req, snapshot);
|
||||
}
|
||||
TEST(true); // readWithConflictRangeRYW not supported for getRangeAndFlatMap
|
||||
throw client_invalid_operation();
|
||||
}
|
||||
|
||||
template <class Iter>
|
||||
static void resolveKeySelectorFromCache(KeySelector& key,
|
||||
Iter& it,
|
||||
@ -1509,6 +1571,65 @@ Future<RangeResult> ReadYourWritesTransaction::getRange(const KeySelector& begin
|
||||
return getRange(begin, end, GetRangeLimits(limit), snapshot, reverse);
|
||||
}
|
||||
|
||||
Future<RangeResult> ReadYourWritesTransaction::getRangeAndFlatMap(KeySelector begin,
|
||||
KeySelector end,
|
||||
Key mapper,
|
||||
GetRangeLimits limits,
|
||||
Snapshot snapshot,
|
||||
Reverse reverse) {
|
||||
if (getDatabase()->apiVersionAtLeast(630)) {
|
||||
if (specialKeys.contains(begin.getKey()) && specialKeys.begin <= end.getKey() &&
|
||||
end.getKey() <= specialKeys.end) {
|
||||
TEST(true); // Special key space get range (FlatMap)
|
||||
throw client_invalid_operation(); // Not support special keys.
|
||||
}
|
||||
} else {
|
||||
if (begin.getKey() == LiteralStringRef("\xff\xff/worker_interfaces")) {
|
||||
throw client_invalid_operation(); // Not support special keys.
|
||||
}
|
||||
}
|
||||
|
||||
if (checkUsedDuringCommit()) {
|
||||
return used_during_commit();
|
||||
}
|
||||
|
||||
if (resetPromise.isSet())
|
||||
return resetPromise.getFuture().getError();
|
||||
|
||||
KeyRef maxKey = getMaxReadKey();
|
||||
if (begin.getKey() > maxKey || end.getKey() > maxKey)
|
||||
return key_outside_legal_range();
|
||||
|
||||
// This optimization prevents nullptr operations from being added to the conflict range
|
||||
if (limits.isReached()) {
|
||||
TEST(true); // RYW range read limit 0 (FlatMap)
|
||||
return RangeResult();
|
||||
}
|
||||
|
||||
if (!limits.isValid())
|
||||
return range_limits_invalid();
|
||||
|
||||
if (begin.orEqual)
|
||||
begin.removeOrEqual(begin.arena());
|
||||
|
||||
if (end.orEqual)
|
||||
end.removeOrEqual(end.arena());
|
||||
|
||||
if (begin.offset >= end.offset && begin.getKey() >= end.getKey()) {
|
||||
TEST(true); // RYW range inverted (FlatMap)
|
||||
return RangeResult();
|
||||
}
|
||||
|
||||
Future<RangeResult> result =
|
||||
reverse ? RYWImpl::readWithConflictRangeAndFlatMap(
|
||||
this, RYWImpl::GetRangeAndFlatMapReq<true>(begin, end, mapper, limits), snapshot)
|
||||
: RYWImpl::readWithConflictRangeAndFlatMap(
|
||||
this, RYWImpl::GetRangeAndFlatMapReq<false>(begin, end, mapper, limits), snapshot);
|
||||
|
||||
reading.add(success(result));
|
||||
return result;
|
||||
}
|
||||
|
||||
Future<Standalone<VectorRef<const char*>>> ReadYourWritesTransaction::getAddressesForKey(const Key& key) {
|
||||
if (checkUsedDuringCommit()) {
|
||||
return used_during_commit();
|
||||
|
@ -104,6 +104,12 @@ public:
|
||||
snapshot,
|
||||
reverse);
|
||||
}
|
||||
Future<RangeResult> getRangeAndFlatMap(KeySelector begin,
|
||||
KeySelector end,
|
||||
Key mapper,
|
||||
GetRangeLimits limits,
|
||||
Snapshot = Snapshot::False,
|
||||
Reverse = Reverse::False) override;
|
||||
|
||||
[[nodiscard]] Future<Standalone<VectorRef<const char*>>> getAddressesForKey(const Key& key) override;
|
||||
Future<Standalone<VectorRef<KeyRef>>> getRangeSplitPoints(const KeyRange& range, int64_t chunkSize) override;
|
||||
|
@ -728,7 +728,7 @@ const KeyRef JSONSchemas::statusSchema = LiteralStringRef(R"statusSchema(
|
||||
"ssd",
|
||||
"ssd-1",
|
||||
"ssd-2",
|
||||
"ssd-redwood-experimental",
|
||||
"ssd-redwood-1-experimental",
|
||||
"ssd-rocksdb-experimental",
|
||||
"memory",
|
||||
"memory-1",
|
||||
@ -741,7 +741,7 @@ const KeyRef JSONSchemas::statusSchema = LiteralStringRef(R"statusSchema(
|
||||
"ssd",
|
||||
"ssd-1",
|
||||
"ssd-2",
|
||||
"ssd-redwood-experimental",
|
||||
"ssd-redwood-1-experimental",
|
||||
"ssd-rocksdb-experimental",
|
||||
"memory",
|
||||
"memory-1",
|
||||
|
@ -349,6 +349,11 @@ void ServerKnobs::initialize(Randomize randomize, ClientKnobs* clientKnobs, IsSi
|
||||
init( ROCKSDB_READ_VALUE_TIMEOUT, 5.0 );
|
||||
init( ROCKSDB_READ_VALUE_PREFIX_TIMEOUT, 5.0 );
|
||||
init( ROCKSDB_READ_RANGE_TIMEOUT, 5.0 );
|
||||
init( ROCKSDB_READ_QUEUE_WAIT, 1.0 );
|
||||
init( ROCKSDB_READ_QUEUE_HARD_MAX, 1000 );
|
||||
init( ROCKSDB_READ_QUEUE_SOFT_MAX, 500 );
|
||||
init( ROCKSDB_FETCH_QUEUE_HARD_MAX, 100 );
|
||||
init( ROCKSDB_FETCH_QUEUE_SOFT_MAX, 50 );
|
||||
|
||||
// Leader election
|
||||
bool longLeaderElection = randomize && BUGGIFY;
|
||||
@ -644,6 +649,8 @@ void ServerKnobs::initialize(Randomize randomize, ClientKnobs* clientKnobs, IsSi
|
||||
init( MAX_STORAGE_COMMIT_TIME, 120.0 ); //The max fsync stall time on the storage server and tlog before marking a disk as failed
|
||||
init( RANGESTREAM_LIMIT_BYTES, 2e6 ); if( randomize && BUGGIFY ) RANGESTREAM_LIMIT_BYTES = 1;
|
||||
init( ENABLE_CLEAR_RANGE_EAGER_READS, true );
|
||||
init( QUICK_GET_VALUE_FALLBACK, true );
|
||||
init( QUICK_GET_KEY_VALUES_FALLBACK, true );
|
||||
|
||||
//Wait Failure
|
||||
init( MAX_OUTSTANDING_WAIT_FAILURE_REQUESTS, 250 ); if( randomize && BUGGIFY ) MAX_OUTSTANDING_WAIT_FAILURE_REQUESTS = 2;
|
||||
@ -764,6 +771,7 @@ void ServerKnobs::initialize(Randomize randomize, ClientKnobs* clientKnobs, IsSi
|
||||
init( REDWOOD_LAZY_CLEAR_MAX_PAGES, 1e6 );
|
||||
init( REDWOOD_REMAP_CLEANUP_WINDOW, 50 );
|
||||
init( REDWOOD_REMAP_CLEANUP_LAG, 0.1 );
|
||||
init( REDWOOD_PAGEFILE_GROWTH_SIZE_PAGES, 20000 ); if( randomize && BUGGIFY ) { REDWOOD_PAGEFILE_GROWTH_SIZE_PAGES = deterministicRandom()->randomInt(200, 1000); }
|
||||
init( REDWOOD_METRICS_INTERVAL, 5.0 );
|
||||
init( REDWOOD_HISTOGRAM_INTERVAL, 30.0 );
|
||||
|
||||
|
@ -281,6 +281,11 @@ public:
|
||||
double ROCKSDB_READ_VALUE_TIMEOUT;
|
||||
double ROCKSDB_READ_VALUE_PREFIX_TIMEOUT;
|
||||
double ROCKSDB_READ_RANGE_TIMEOUT;
|
||||
double ROCKSDB_READ_QUEUE_WAIT;
|
||||
int ROCKSDB_READ_QUEUE_SOFT_MAX;
|
||||
int ROCKSDB_READ_QUEUE_HARD_MAX;
|
||||
int ROCKSDB_FETCH_QUEUE_SOFT_MAX;
|
||||
int ROCKSDB_FETCH_QUEUE_HARD_MAX;
|
||||
|
||||
// Leader election
|
||||
int MAX_NOTIFICATIONS;
|
||||
@ -585,6 +590,8 @@ public:
|
||||
double MAX_STORAGE_COMMIT_TIME;
|
||||
int64_t RANGESTREAM_LIMIT_BYTES;
|
||||
bool ENABLE_CLEAR_RANGE_EAGER_READS;
|
||||
bool QUICK_GET_VALUE_FALLBACK;
|
||||
bool QUICK_GET_KEY_VALUES_FALLBACK;
|
||||
|
||||
// Wait Failure
|
||||
int MAX_OUTSTANDING_WAIT_FAILURE_REQUESTS;
|
||||
@ -717,6 +724,7 @@ public:
|
||||
int64_t REDWOOD_REMAP_CLEANUP_WINDOW; // Remap remover lag interval in which to coalesce page writes
|
||||
double REDWOOD_REMAP_CLEANUP_LAG; // Maximum allowed remap remover lag behind the cleanup window as a multiple of
|
||||
// the window size
|
||||
int REDWOOD_PAGEFILE_GROWTH_SIZE_PAGES; // Number of pages to grow page file by
|
||||
double REDWOOD_METRICS_INTERVAL;
|
||||
double REDWOOD_HISTOGRAM_INTERVAL;
|
||||
|
||||
|
@ -59,6 +59,14 @@ public:
|
||||
GetRangeLimits limits,
|
||||
Snapshot = Snapshot::False,
|
||||
Reverse = Reverse::False) override;
|
||||
Future<RangeResult> getRangeAndFlatMap(KeySelector begin,
|
||||
KeySelector end,
|
||||
Key mapper,
|
||||
GetRangeLimits limits,
|
||||
Snapshot = Snapshot::False,
|
||||
Reverse = Reverse::False) override {
|
||||
throw client_invalid_operation();
|
||||
}
|
||||
Future<Void> commit() override;
|
||||
Version getCommittedVersion() const override;
|
||||
void setOption(FDBTransactionOptions::Option option, Optional<StringRef> value = Optional<StringRef>()) override;
|
||||
|
@ -1961,7 +1961,7 @@ void parse(StringRef& val, WaitState& w) {
|
||||
}
|
||||
|
||||
void parse(StringRef& val, time_t& t) {
|
||||
struct tm tm = { 0 };
|
||||
struct tm tm;
|
||||
#ifdef _WIN32
|
||||
std::istringstream s(val.toString());
|
||||
s.imbue(std::locale(setlocale(LC_TIME, nullptr)));
|
||||
|
@ -152,6 +152,45 @@ void TSS_traceMismatch(TraceEvent& event,
|
||||
.detail("TSSReply", tssResultsString);
|
||||
}
|
||||
|
||||
// range reads and flat map
|
||||
template <>
|
||||
bool TSS_doCompare(const GetKeyValuesAndFlatMapReply& src, const GetKeyValuesAndFlatMapReply& tss) {
|
||||
return src.more == tss.more && src.data == tss.data;
|
||||
}
|
||||
|
||||
template <>
|
||||
const char* TSS_mismatchTraceName(const GetKeyValuesAndFlatMapRequest& req) {
|
||||
return "TSSMismatchGetKeyValuesAndFlatMap";
|
||||
}
|
||||
|
||||
template <>
|
||||
void TSS_traceMismatch(TraceEvent& event,
|
||||
const GetKeyValuesAndFlatMapRequest& req,
|
||||
const GetKeyValuesAndFlatMapReply& src,
|
||||
const GetKeyValuesAndFlatMapReply& tss) {
|
||||
std::string ssResultsString = format("(%d)%s:\n", src.data.size(), src.more ? "+" : "");
|
||||
for (auto& it : src.data) {
|
||||
ssResultsString += "\n" + it.key.printable() + "=" + traceChecksumValue(it.value);
|
||||
}
|
||||
|
||||
std::string tssResultsString = format("(%d)%s:\n", tss.data.size(), tss.more ? "+" : "");
|
||||
for (auto& it : tss.data) {
|
||||
tssResultsString += "\n" + it.key.printable() + "=" + traceChecksumValue(it.value);
|
||||
}
|
||||
event
|
||||
.detail(
|
||||
"Begin",
|
||||
format("%s%s:%d", req.begin.orEqual ? "=" : "", req.begin.getKey().printable().c_str(), req.begin.offset))
|
||||
.detail("End",
|
||||
format("%s%s:%d", req.end.orEqual ? "=" : "", req.end.getKey().printable().c_str(), req.end.offset))
|
||||
.detail("Version", req.version)
|
||||
.detail("Limit", req.limit)
|
||||
.detail("LimitBytes", req.limitBytes)
|
||||
.setMaxFieldLength(FLOW_KNOBS->TSS_LARGE_TRACE_SIZE * 4 / 10)
|
||||
.detail("SSReply", ssResultsString)
|
||||
.detail("TSSReply", tssResultsString);
|
||||
}
|
||||
|
||||
// streaming range reads
|
||||
template <>
|
||||
bool TSS_doCompare(const GetKeyValuesStreamReply& src, const GetKeyValuesStreamReply& tss) {
|
||||
@ -356,6 +395,12 @@ void TSSMetrics::recordLatency(const GetKeyValuesRequest& req, double ssLatency,
|
||||
TSSgetKeyValuesLatency.addSample(tssLatency);
|
||||
}
|
||||
|
||||
template <>
|
||||
void TSSMetrics::recordLatency(const GetKeyValuesAndFlatMapRequest& req, double ssLatency, double tssLatency) {
|
||||
SSgetKeyValuesAndFlatMapLatency.addSample(ssLatency);
|
||||
TSSgetKeyValuesAndFlatMapLatency.addSample(tssLatency);
|
||||
}
|
||||
|
||||
template <>
|
||||
void TSSMetrics::recordLatency(const WatchValueRequest& req, double ssLatency, double tssLatency) {}
|
||||
|
||||
|
@ -22,6 +22,7 @@
|
||||
#define FDBCLIENT_STORAGESERVERINTERFACE_H
|
||||
#pragma once
|
||||
|
||||
#include <ostream>
|
||||
#include "fdbclient/FDBTypes.h"
|
||||
#include "fdbrpc/Locality.h"
|
||||
#include "fdbrpc/QueueModel.h"
|
||||
@ -65,6 +66,7 @@ struct StorageServerInterface {
|
||||
// Throws a wrong_shard_server if the keys in the request or result depend on data outside this server OR if a large
|
||||
// selector offset prevents all data from being read in one range read
|
||||
RequestStream<struct GetKeyValuesRequest> getKeyValues;
|
||||
RequestStream<struct GetKeyValuesAndFlatMapRequest> getKeyValuesAndFlatMap;
|
||||
|
||||
RequestStream<struct GetShardStateRequest> getShardState;
|
||||
RequestStream<struct WaitMetricsRequest> waitMetrics;
|
||||
@ -81,6 +83,7 @@ struct StorageServerInterface {
|
||||
RequestStream<struct ChangeFeedStreamRequest> changeFeedStream;
|
||||
RequestStream<struct OverlappingChangeFeedsRequest> overlappingChangeFeeds;
|
||||
RequestStream<struct ChangeFeedPopRequest> changeFeedPop;
|
||||
RequestStream<struct ChangeFeedVersionUpdateRequest> changeFeedVersionUpdate;
|
||||
|
||||
explicit StorageServerInterface(UID uid) : uniqueID(uid) {}
|
||||
StorageServerInterface() : uniqueID(deterministicRandom()->randomUniqueID()) {}
|
||||
@ -123,12 +126,16 @@ struct StorageServerInterface {
|
||||
RequestStream<struct SplitRangeRequest>(getValue.getEndpoint().getAdjustedEndpoint(12));
|
||||
getKeyValuesStream =
|
||||
RequestStream<struct GetKeyValuesStreamRequest>(getValue.getEndpoint().getAdjustedEndpoint(13));
|
||||
getKeyValuesAndFlatMap =
|
||||
RequestStream<struct GetKeyValuesAndFlatMapRequest>(getValue.getEndpoint().getAdjustedEndpoint(14));
|
||||
changeFeedStream =
|
||||
RequestStream<struct ChangeFeedStreamRequest>(getValue.getEndpoint().getAdjustedEndpoint(14));
|
||||
RequestStream<struct ChangeFeedStreamRequest>(getValue.getEndpoint().getAdjustedEndpoint(15));
|
||||
overlappingChangeFeeds =
|
||||
RequestStream<struct OverlappingChangeFeedsRequest>(getValue.getEndpoint().getAdjustedEndpoint(15));
|
||||
RequestStream<struct OverlappingChangeFeedsRequest>(getValue.getEndpoint().getAdjustedEndpoint(16));
|
||||
changeFeedPop =
|
||||
RequestStream<struct ChangeFeedPopRequest>(getValue.getEndpoint().getAdjustedEndpoint(16));
|
||||
RequestStream<struct ChangeFeedPopRequest>(getValue.getEndpoint().getAdjustedEndpoint(17));
|
||||
changeFeedVersionUpdate = RequestStream<struct ChangeFeedVersionUpdateRequest>(
|
||||
getValue.getEndpoint().getAdjustedEndpoint(18));
|
||||
}
|
||||
} else {
|
||||
ASSERT(Ar::isDeserializing);
|
||||
@ -171,9 +178,11 @@ struct StorageServerInterface {
|
||||
streams.push_back(getReadHotRanges.getReceiver());
|
||||
streams.push_back(getRangeSplitPoints.getReceiver());
|
||||
streams.push_back(getKeyValuesStream.getReceiver(TaskPriority::LoadBalancedEndpoint));
|
||||
streams.push_back(getKeyValuesAndFlatMap.getReceiver(TaskPriority::LoadBalancedEndpoint));
|
||||
streams.push_back(changeFeedStream.getReceiver());
|
||||
streams.push_back(overlappingChangeFeeds.getReceiver());
|
||||
streams.push_back(changeFeedPop.getReceiver());
|
||||
streams.push_back(changeFeedVersionUpdate.getReceiver());
|
||||
FlowTransport::transport().addEndpoints(streams);
|
||||
}
|
||||
};
|
||||
@ -296,6 +305,9 @@ struct GetKeyValuesRequest : TimedRequest {
|
||||
SpanID spanContext;
|
||||
Arena arena;
|
||||
KeySelectorRef begin, end;
|
||||
// This is a dummy field there has never been used.
|
||||
// TODO: Get rid of this by constexpr or other template magic in getRange
|
||||
KeyRef mapper = KeyRef();
|
||||
Version version; // or latestVersion
|
||||
int limit, limitBytes;
|
||||
bool isFetchKeys;
|
||||
@ -310,6 +322,43 @@ struct GetKeyValuesRequest : TimedRequest {
|
||||
}
|
||||
};
|
||||
|
||||
struct GetKeyValuesAndFlatMapReply : public LoadBalancedReply {
|
||||
constexpr static FileIdentifier file_identifier = 1783067;
|
||||
Arena arena;
|
||||
VectorRef<KeyValueRef, VecSerStrategy::String> data;
|
||||
Version version; // useful when latestVersion was requested
|
||||
bool more;
|
||||
bool cached = false;
|
||||
|
||||
GetKeyValuesAndFlatMapReply() : version(invalidVersion), more(false), cached(false) {}
|
||||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
serializer(ar, LoadBalancedReply::penalty, LoadBalancedReply::error, data, version, more, cached, arena);
|
||||
}
|
||||
};
|
||||
|
||||
struct GetKeyValuesAndFlatMapRequest : TimedRequest {
|
||||
constexpr static FileIdentifier file_identifier = 6795747;
|
||||
SpanID spanContext;
|
||||
Arena arena;
|
||||
KeySelectorRef begin, end;
|
||||
KeyRef mapper;
|
||||
Version version; // or latestVersion
|
||||
int limit, limitBytes;
|
||||
bool isFetchKeys;
|
||||
Optional<TagSet> tags;
|
||||
Optional<UID> debugID;
|
||||
ReplyPromise<GetKeyValuesAndFlatMapReply> reply;
|
||||
|
||||
GetKeyValuesAndFlatMapRequest() : isFetchKeys(false) {}
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
serializer(
|
||||
ar, begin, end, mapper, version, limit, limitBytes, isFetchKeys, tags, debugID, reply, spanContext, arena);
|
||||
}
|
||||
};
|
||||
|
||||
struct GetKeyValuesStreamReply : public ReplyPromiseStreamReply {
|
||||
constexpr static FileIdentifier file_identifier = 1783066;
|
||||
Arena arena;
|
||||
@ -639,6 +688,8 @@ struct ChangeFeedStreamReply : public ReplyPromiseStreamReply {
|
||||
constexpr static FileIdentifier file_identifier = 1783066;
|
||||
Arena arena;
|
||||
VectorRef<MutationsAndVersionRef> mutations;
|
||||
bool atLatestVersion = false;
|
||||
Version minStreamVersion = invalidVersion;
|
||||
|
||||
ChangeFeedStreamReply() {}
|
||||
|
||||
@ -646,7 +697,13 @@ struct ChangeFeedStreamReply : public ReplyPromiseStreamReply {
|
||||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
serializer(ar, ReplyPromiseStreamReply::acknowledgeToken, ReplyPromiseStreamReply::sequence, mutations, arena);
|
||||
serializer(ar,
|
||||
ReplyPromiseStreamReply::acknowledgeToken,
|
||||
ReplyPromiseStreamReply::sequence,
|
||||
mutations,
|
||||
atLatestVersion,
|
||||
minStreamVersion,
|
||||
arena);
|
||||
}
|
||||
};
|
||||
|
||||
@ -734,6 +791,33 @@ struct OverlappingChangeFeedsRequest {
|
||||
}
|
||||
};
|
||||
|
||||
struct ChangeFeedVersionUpdateReply {
|
||||
constexpr static FileIdentifier file_identifier = 11815134;
|
||||
Version version = 0;
|
||||
|
||||
ChangeFeedVersionUpdateReply() {}
|
||||
explicit ChangeFeedVersionUpdateReply(Version version) : version(version) {}
|
||||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
serializer(ar, version);
|
||||
}
|
||||
};
|
||||
|
||||
struct ChangeFeedVersionUpdateRequest {
|
||||
constexpr static FileIdentifier file_identifier = 6795746;
|
||||
Version minVersion;
|
||||
ReplyPromise<ChangeFeedVersionUpdateReply> reply;
|
||||
|
||||
ChangeFeedVersionUpdateRequest() {}
|
||||
explicit ChangeFeedVersionUpdateRequest(Version minVersion) : minVersion(minVersion) {}
|
||||
|
||||
template <class Ar>
|
||||
void serialize(Ar& ar) {
|
||||
serializer(ar, minVersion, reply);
|
||||
}
|
||||
};
|
||||
|
||||
struct GetStorageMetricsReply {
|
||||
constexpr static FileIdentifier file_identifier = 15491478;
|
||||
StorageMetrics load;
|
||||
|
@ -213,6 +213,8 @@ const KeyRangeRef writeConflictRangeKeysRange =
|
||||
KeyRangeRef(LiteralStringRef("\xff\xff/transaction/write_conflict_range/"),
|
||||
LiteralStringRef("\xff\xff/transaction/write_conflict_range/\xff\xff"));
|
||||
|
||||
const KeyRef clusterIdKey = LiteralStringRef("\xff/clusterId");
|
||||
|
||||
// "\xff/cacheServer/[[UID]] := StorageServerInterface"
|
||||
const KeyRangeRef storageCacheServerKeys(LiteralStringRef("\xff/cacheServer/"), LiteralStringRef("\xff/cacheServer0"));
|
||||
const KeyRef storageCacheServersPrefix = storageCacheServerKeys.begin;
|
||||
@ -1033,6 +1035,8 @@ const KeyRangeRef clientLibBinaryKeys(LiteralStringRef("\xff\x02/clientlib/bin/"
|
||||
LiteralStringRef("\xff\x02/clientlib/bin0"));
|
||||
const KeyRef clientLibBinaryPrefix = clientLibBinaryKeys.begin;
|
||||
|
||||
const KeyRef clientLibChangeCounterKey = "\xff\x02/clientlib/changeCounter"_sr;
|
||||
|
||||
const KeyRangeRef testOnlyTxnStateStorePrefixRange(LiteralStringRef("\xff/TESTONLYtxnStateStore/"),
|
||||
LiteralStringRef("\xff/TESTONLYtxnStateStore0"));
|
||||
|
||||
|
@ -67,6 +67,8 @@ void decodeKeyServersValue(std::map<Tag, UID> const& tag_uid,
|
||||
std::vector<UID>& src,
|
||||
std::vector<UID>& dest);
|
||||
|
||||
extern const KeyRef clusterIdKey;
|
||||
|
||||
// "\xff/storageCacheServer/[[UID]] := StorageServerInterface"
|
||||
// This will be added by the cache server on initialization and removed by DD
|
||||
// TODO[mpilman]: We will need a way to map uint16_t ids to UIDs in a future
|
||||
@ -488,6 +490,8 @@ extern const KeyRef clientLibMetadataPrefix;
|
||||
extern const KeyRangeRef clientLibBinaryKeys;
|
||||
extern const KeyRef clientLibBinaryPrefix;
|
||||
|
||||
extern const KeyRef clientLibChangeCounterKey;
|
||||
|
||||
// All mutations done to this range are blindly copied into txnStateStore.
|
||||
// Used to create artifically large txnStateStore instances in testing.
|
||||
extern const KeyRangeRef testOnlyTxnStateStorePrefixRange;
|
||||
|
@ -257,6 +257,23 @@ ThreadFuture<RangeResult> ThreadSafeTransaction::getRange(const KeySelectorRef&
|
||||
});
|
||||
}
|
||||
|
||||
ThreadFuture<RangeResult> ThreadSafeTransaction::getRangeAndFlatMap(const KeySelectorRef& begin,
|
||||
const KeySelectorRef& end,
|
||||
const StringRef& mapper,
|
||||
GetRangeLimits limits,
|
||||
bool snapshot,
|
||||
bool reverse) {
|
||||
KeySelector b = begin;
|
||||
KeySelector e = end;
|
||||
Key h = mapper;
|
||||
|
||||
ISingleThreadTransaction* tr = this->tr;
|
||||
return onMainThread([tr, b, e, h, limits, snapshot, reverse]() -> Future<RangeResult> {
|
||||
tr->checkDeferredError();
|
||||
return tr->getRangeAndFlatMap(b, e, h, limits, Snapshot{ snapshot }, Reverse{ reverse });
|
||||
});
|
||||
}
|
||||
|
||||
ThreadFuture<Standalone<VectorRef<const char*>>> ThreadSafeTransaction::getAddressesForKey(const KeyRef& key) {
|
||||
Key k = key;
|
||||
|
||||
@ -490,5 +507,3 @@ void ThreadSafeApi::addNetworkThreadCompletionHook(void (*hook)(void*), void* ho
|
||||
// upon return that the hook is set.
|
||||
threadCompletionHooks.emplace_back(hook, hookParameter);
|
||||
}
|
||||
|
||||
IClientApi* ThreadSafeApi::api = new ThreadSafeApi();
|
||||
|
@ -106,6 +106,12 @@ public:
|
||||
bool reverse = false) override {
|
||||
return getRange(firstGreaterOrEqual(keys.begin), firstGreaterOrEqual(keys.end), limits, snapshot, reverse);
|
||||
}
|
||||
ThreadFuture<RangeResult> getRangeAndFlatMap(const KeySelectorRef& begin,
|
||||
const KeySelectorRef& end,
|
||||
const StringRef& mapper,
|
||||
GetRangeLimits limits,
|
||||
bool snapshot,
|
||||
bool reverse) override;
|
||||
ThreadFuture<Standalone<VectorRef<const char*>>> getAddressesForKey(const KeyRef& key) override;
|
||||
ThreadFuture<Standalone<StringRef>> getVersionstamp() override;
|
||||
ThreadFuture<int64_t> getEstimatedRangeSizeBytes(const KeyRangeRef& keys) override;
|
||||
@ -166,9 +172,8 @@ public:
|
||||
|
||||
void addNetworkThreadCompletionHook(void (*hook)(void*), void* hookParameter) override;
|
||||
|
||||
static IClientApi* api;
|
||||
|
||||
private:
|
||||
friend IClientApi* getLocalClientAPI();
|
||||
ThreadSafeApi();
|
||||
|
||||
int apiVersion;
|
||||
|
@ -38,7 +38,9 @@
|
||||
// PTree also supports efficient finger searches.
|
||||
namespace PTreeImpl {
|
||||
|
||||
#ifdef _MSC_VER
|
||||
#pragma warning(disable : 4800)
|
||||
#endif
|
||||
|
||||
template <class T>
|
||||
struct PTree : public ReferenceCounted<PTree<T>>, FastAllocated<PTree<T>>, NonCopyable {
|
||||
|
@ -32,8 +32,6 @@ inline char to_hex_char(unsigned int c) {
|
||||
|
||||
template <class String_type>
|
||||
String_type non_printable_to_string(unsigned int c) {
|
||||
typedef typename String_type::value_type Char_type;
|
||||
|
||||
String_type result(6, '\\');
|
||||
|
||||
result[1] = 'u';
|
||||
|
@ -58,7 +58,7 @@ description is not currently required but encouraged.
|
||||
paramType="String" paramDescription="The identifier that will be part of all trace file names"
|
||||
description="Once provided, this string will be used to replace the port/PID in the log file names." />
|
||||
<Option name="trace_partial_file_suffix" code="39"
|
||||
paramType="String" paramDesciption="Append this suffix to partially written log files. When a log file is complete, it is renamed to remove the suffix. No separator is added between the file and the suffix. If you want to add a file extension, you should include the separator - e.g. '.tmp' instead of 'tmp' to add the 'tmp' extension."
|
||||
paramType="String" paramDescription="Append this suffix to partially written log files. When a log file is complete, it is renamed to remove the suffix. No separator is added between the file and the suffix. If you want to add a file extension, you should include the separator - e.g. '.tmp' instead of 'tmp' to add the 'tmp' extension."
|
||||
description="" />
|
||||
<Option name="knob" code="40"
|
||||
paramType="String" paramDescription="knob_name=knob_value"
|
||||
@ -191,10 +191,6 @@ description is not currently required but encouraged.
|
||||
<Option name="transaction_include_port_in_address" code="505"
|
||||
description="Addresses returned by get_addresses_for_key include the port when enabled. As of api version 630, this option is enabled by default and setting this has no effect."
|
||||
defaultFor="23"/>
|
||||
<Option name="distributed_transaction_trace_enable" code="600"
|
||||
description="Enable tracing for all transactions. This is the default." />
|
||||
<Option name="distributed_transaction_trace_disable" code="601"
|
||||
description="Disable tracing for all transactions." />
|
||||
<Option name="transaction_bypass_unreadable" code="700"
|
||||
description="Allows ``get`` operations to read from sections of keyspace that have become unreadable because of versionstamp operations. This sets the ``bypass_unreadable`` option of each transaction created by this database. See the transaction option description for more information."
|
||||
defaultFor="1100"/>
|
||||
|
37
fdbkubernetesmonitor/.testdata/default_config.json
Normal file
37
fdbkubernetesmonitor/.testdata/default_config.json
Normal file
@ -0,0 +1,37 @@
|
||||
{
|
||||
"version": "6.3.15",
|
||||
"serverCount": 1,
|
||||
"arguments": [
|
||||
{"value": "--cluster_file"},
|
||||
{"value": ".testdata/fdb.cluster"},
|
||||
{"value": "--public_address"},
|
||||
{"type": "Concatenate", "values": [
|
||||
{"type": "Environment", "source": "FDB_PUBLIC_IP"},
|
||||
{"value": ":"},
|
||||
{"type": "ProcessNumber", "offset": 4499, "multiplier": 2}
|
||||
]},
|
||||
{"value": "--listen_address"},
|
||||
{"type": "Concatenate", "values": [
|
||||
{"type": "Environment", "source": "FDB_POD_IP"},
|
||||
{"value": ":"},
|
||||
{"type": "ProcessNumber", "offset": 4499, "multiplier": 2}
|
||||
]},
|
||||
{"value": "--datadir"},
|
||||
{"type": "Concatenate", "values": [
|
||||
{"value": ".testdata/data/"},
|
||||
{"type": "ProcessNumber"}
|
||||
]},
|
||||
{"value": "--class"},
|
||||
{"value": "storage"},
|
||||
{"value": "--locality_zoneid"},
|
||||
{"type": "Environment", "source": "FDB_ZONE_ID"},
|
||||
{"value": "--locality_instance-id"},
|
||||
{"type": "Environment", "source": "FDB_INSTANCE_ID"},
|
||||
{"value": "--locality_process-id"},
|
||||
{"type": "Concatenate", "values": [
|
||||
{"type": "Environment", "source": "FDB_INSTANCE_ID"},
|
||||
{"value": "-"},
|
||||
{"type": "ProcessNumber"}
|
||||
]}
|
||||
]
|
||||
}
|
1
fdbkubernetesmonitor/.testdata/fdb.cluster
Normal file
1
fdbkubernetesmonitor/.testdata/fdb.cluster
Normal file
@ -0,0 +1 @@
|
||||
test:test@127.0.0.1:4501
|
7
fdbkubernetesmonitor/.testdata/test_env.sh
Normal file
7
fdbkubernetesmonitor/.testdata/test_env.sh
Normal file
@ -0,0 +1,7 @@
|
||||
export FDB_PUBLIC_IP=127.0.0.1
|
||||
export FDB_POD_IP=127.0.0.1
|
||||
export FDB_ZONE_ID=localhost
|
||||
export FDB_MACHINE_ID=localhost
|
||||
export FDB_INSTANCE_ID=storage-1
|
||||
export KUBERNETES_SERVICE_HOST=kubernetes.docker.internal
|
||||
export KUBERNETES_SERVICE_PORT=6443
|
37
fdbkubernetesmonitor/README.md
Normal file
37
fdbkubernetesmonitor/README.md
Normal file
@ -0,0 +1,37 @@
|
||||
|
||||
This package provides a launcher program for running FoundationDB in Kubernetes.
|
||||
|
||||
To test this, run the following commands from the root of the FoundationDB
|
||||
repository:
|
||||
|
||||
```bash
|
||||
docker build -t foundationdb/foundationdb-kubernetes:6.3.13-local --build-arg FDB_VERSION=6.3.13 --build-arg FDB_LIBRARY_VERSIONS="6.3.13 6.2.30 6.1.13" -f packaging/docker/kubernetes/Dockerfile .
|
||||
docker build -t foundationdb/foundationdb-kubernetes:6.3.15-local --build-arg FDB_VERSION=6.3.15 --build-arg FDB_LIBRARY_VERSIONS="6.3.15 6.2.30 6.1.13" -f packaging/docker/kubernetes/Dockerfile .
|
||||
kubectl apply -f packaging/docker/kubernetes/test_config.yaml
|
||||
# Wait for the pods to become ready
|
||||
ips=$(kubectl get pod -l app=fdb-kubernetes-example -o json | jq -j '[[.items|.[]|select(.status.podIP!="")]|limit(3;.[])|.status.podIP+":4501"]|join(",")')
|
||||
sed -e "s/fdb.cluster: \"\"/fdb.cluster: \"test:test@$ips\"/" -e "s/\"serverCount\": 0/\"serverCount\": 1/" packaging/docker/kubernetes/test_config.yaml | kubectl apply -f -
|
||||
kubectl get pod -l app=fdb-kubernetes-example -o name | xargs -I {} kubectl annotate {} foundationdb.org/outdated-config-map-seen=$(date +%s) --overwrite
|
||||
# Watch the logs for the fdb-kubernetes-example pods to confirm that they have launched the fdbserver processes.
|
||||
kubectl exec -it sts/fdb-kubernetes-example -- fdbcli --exec "configure new double ssd"
|
||||
```
|
||||
|
||||
This will set up a cluster in your Kubernetes environment using a statefulset, to provide a simple subset of what the Kubernetes operator does to set up the cluster. Note: This assumes that you are running Docker Desktop on your local machine, with Kubernetes configured through Docker Desktop.
|
||||
|
||||
You can then make changes to the data in the config map and update the fdbserver processes:
|
||||
|
||||
```bash
|
||||
sed -e "s/fdb.cluster: \"\"/fdb.cluster: \"test:test@$ips\"/" -e "s/\"serverCount\": 0/\"serverCount\": 1/" packaging/docker/kubernetes/test_config.yaml | kubectl apply -f -
|
||||
|
||||
# You can apply an annotation to speed up the propagation of config
|
||||
kubectl get pod -l app=fdb-kubernetes-example -o name | xargs -I {} kubectl annotate {} foundationdb.org/outdated-config-map-seen=$(date +%s) --overwrite
|
||||
|
||||
# Watch the logs for the fdb-kubernetes-example pods to confirm that they have reloaded their configuration, and then do a bounce.
|
||||
kubectl exec -it sts/fdb-kubernetes-example -- fdbcli --exec "kill; kill all; status"
|
||||
```
|
||||
|
||||
Once you are done, you can tear down the example with the following command:
|
||||
|
||||
```bash
|
||||
kubectl delete -f packaging/docker/kubernetes/test_config.yaml; kubectl delete pvc -l app=fdb-kubernetes-example
|
||||
```
|
145
fdbkubernetesmonitor/config.go
Normal file
145
fdbkubernetesmonitor/config.go
Normal file
@ -0,0 +1,145 @@
|
||||
// config.go
|
||||
//
|
||||
// This source file is part of the FoundationDB open source project
|
||||
//
|
||||
// Copyright 2021 Apple Inc. and the FoundationDB project authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// ProcessConfiguration models the configuration for starting a FoundationDB
|
||||
// process.
|
||||
type ProcessConfiguration struct {
|
||||
// Version provides the version of FoundationDB the process should run.
|
||||
Version string `json:"version"`
|
||||
|
||||
// ServerCount defines the number of processes to start.
|
||||
ServerCount int `json:"serverCount,omitempty"`
|
||||
|
||||
// BinaryPath provides the path to the binary to launch.
|
||||
BinaryPath string `json:"-"`
|
||||
|
||||
// Arguments provides the arugments to the process.
|
||||
Arguments []Argument `json:"arguments,omitempty"`
|
||||
}
|
||||
|
||||
// Argument defines an argument to the process.
|
||||
type Argument struct {
|
||||
// ArgumentType determines how the value is generated.
|
||||
ArgumentType ArgumentType `json:"type,omitempty"`
|
||||
|
||||
// Value provides the value for a Literal type argument.
|
||||
Value string `json:"value,omitempty"`
|
||||
|
||||
// Values provides the sub-values for a Concatenate type argument.
|
||||
Values []Argument `json:"values,omitempty"`
|
||||
|
||||
// Source provides the name of the environment variable to use for an
|
||||
// Environment type argument.
|
||||
Source string `json:"source,omitempty"`
|
||||
|
||||
// Multiplier provides a multiplier for the process number for ProcessNumber
|
||||
// type arguments.
|
||||
Multiplier int `json:"multiplier,omitempty"`
|
||||
|
||||
// Offset provides an offset to add to the process number for ProcessNumber
|
||||
// type argujments.
|
||||
Offset int `json:"offset,omitempty"`
|
||||
}
|
||||
|
||||
// ArgumentType defines the types for arguments.
|
||||
type ArgumentType string
|
||||
|
||||
const (
|
||||
// LiteralArgumentType defines an argument with a literal string value.
|
||||
LiteralArgumentType ArgumentType = "Literal"
|
||||
|
||||
// ConcatenateArgumentType defines an argument composed of other arguments.
|
||||
ConcatenateArgumentType = "Concatenate"
|
||||
|
||||
// EnvironmentArgumentType defines an argument that is pulled from an
|
||||
// environment variable.
|
||||
EnvironmentArgumentType = "Environment"
|
||||
|
||||
// ProcessNumberArgumentType defines an argument that is calculated using
|
||||
// the number of the process in the process list.
|
||||
ProcessNumberArgumentType = "ProcessNumber"
|
||||
)
|
||||
|
||||
// GenerateArgument processes an argument and generates its string
|
||||
// representation.
|
||||
func (argument Argument) GenerateArgument(processNumber int, env map[string]string) (string, error) {
|
||||
switch argument.ArgumentType {
|
||||
case "":
|
||||
fallthrough
|
||||
case LiteralArgumentType:
|
||||
return argument.Value, nil
|
||||
case ConcatenateArgumentType:
|
||||
concatenated := ""
|
||||
for _, childArgument := range argument.Values {
|
||||
childValue, err := childArgument.GenerateArgument(processNumber, env)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
concatenated += childValue
|
||||
}
|
||||
return concatenated, nil
|
||||
case ProcessNumberArgumentType:
|
||||
number := processNumber
|
||||
if argument.Multiplier != 0 {
|
||||
number = number * argument.Multiplier
|
||||
}
|
||||
number = number + argument.Offset
|
||||
return strconv.Itoa(number), nil
|
||||
case EnvironmentArgumentType:
|
||||
var value string
|
||||
var present bool
|
||||
if env != nil {
|
||||
value, present = env[argument.Source]
|
||||
}
|
||||
if !present {
|
||||
value, present = os.LookupEnv(argument.Source)
|
||||
}
|
||||
if !present {
|
||||
return "", fmt.Errorf("Missing environment variable %s", argument.Source)
|
||||
}
|
||||
return value, nil
|
||||
default:
|
||||
return "", fmt.Errorf("Unsupported argument type %s", argument.ArgumentType)
|
||||
}
|
||||
}
|
||||
|
||||
// GenerateArguments intreprets the arguments in the process configuration and
|
||||
// generates a command invocation.
|
||||
func (configuration *ProcessConfiguration) GenerateArguments(processNumber int, env map[string]string) ([]string, error) {
|
||||
results := make([]string, 0, len(configuration.Arguments)+1)
|
||||
if configuration.BinaryPath != "" {
|
||||
results = append(results, configuration.BinaryPath)
|
||||
}
|
||||
for _, argument := range configuration.Arguments {
|
||||
result, err := argument.GenerateArgument(processNumber, env)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
results = append(results, result)
|
||||
}
|
||||
return results, nil
|
||||
}
|
129
fdbkubernetesmonitor/config_test.go
Normal file
129
fdbkubernetesmonitor/config_test.go
Normal file
@ -0,0 +1,129 @@
|
||||
// config_test.go
|
||||
//
|
||||
// This source file is part of the FoundationDB open source project
|
||||
//
|
||||
// Copyright 2021 Apple Inc. and the FoundationDB project authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"os"
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func loadConfigFromFile(path string) (*ProcessConfiguration, error) {
|
||||
file, err := os.Open(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer file.Close()
|
||||
decoder := json.NewDecoder(file)
|
||||
config := &ProcessConfiguration{}
|
||||
err = decoder.Decode(config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return config, nil
|
||||
}
|
||||
|
||||
func TestGeneratingArgumentsForDefaultConfig(t *testing.T) {
|
||||
config, err := loadConfigFromFile(".testdata/default_config.json")
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
arguments, err := config.GenerateArguments(1, map[string]string{
|
||||
"FDB_PUBLIC_IP": "10.0.0.1",
|
||||
"FDB_POD_IP": "192.168.0.1",
|
||||
"FDB_ZONE_ID": "zone1",
|
||||
"FDB_INSTANCE_ID": "storage-1",
|
||||
})
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
expectedArguments := []string{
|
||||
"--cluster_file", ".testdata/fdb.cluster",
|
||||
"--public_address", "10.0.0.1:4501", "--listen_address", "192.168.0.1:4501",
|
||||
"--datadir", ".testdata/data/1", "--class", "storage",
|
||||
"--locality_zoneid", "zone1", "--locality_instance-id", "storage-1",
|
||||
"--locality_process-id", "storage-1-1",
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(arguments, expectedArguments) {
|
||||
t.Logf("Expected arguments %v, but got arguments %v", expectedArguments, arguments)
|
||||
t.Fail()
|
||||
}
|
||||
|
||||
config.BinaryPath = "/usr/bin/fdbserver"
|
||||
|
||||
arguments, err = config.GenerateArguments(1, map[string]string{
|
||||
"FDB_PUBLIC_IP": "10.0.0.1",
|
||||
"FDB_POD_IP": "192.168.0.1",
|
||||
"FDB_ZONE_ID": "zone1",
|
||||
"FDB_INSTANCE_ID": "storage-1",
|
||||
})
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
|
||||
expectedArguments = []string{
|
||||
"/usr/bin/fdbserver",
|
||||
"--cluster_file", ".testdata/fdb.cluster",
|
||||
"--public_address", "10.0.0.1:4501", "--listen_address", "192.168.0.1:4501",
|
||||
"--datadir", ".testdata/data/1", "--class", "storage",
|
||||
"--locality_zoneid", "zone1", "--locality_instance-id", "storage-1",
|
||||
"--locality_process-id", "storage-1-1",
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(arguments, expectedArguments) {
|
||||
t.Logf("Expected arguments %v, but got arguments %v", expectedArguments, arguments)
|
||||
t.Fail()
|
||||
}
|
||||
}
|
||||
|
||||
func TestGeneratingArgumentForEnvironmentVariable(t *testing.T) {
|
||||
argument := Argument{ArgumentType: EnvironmentArgumentType, Source: "FDB_ZONE_ID"}
|
||||
|
||||
result, err := argument.GenerateArgument(1, map[string]string{"FDB_ZONE_ID": "zone1", "FDB_MACHINE_ID": "machine1"})
|
||||
if err != nil {
|
||||
t.Error(err)
|
||||
return
|
||||
}
|
||||
if result != "zone1" {
|
||||
t.Logf("Expected result zone1, but got result %v", result)
|
||||
t.Fail()
|
||||
return
|
||||
}
|
||||
|
||||
_, err = argument.GenerateArgument(1, map[string]string{"FDB_MACHINE_ID": "machine1"})
|
||||
if err == nil {
|
||||
t.Logf("Expected error result, but did not get an error")
|
||||
t.Fail()
|
||||
return
|
||||
}
|
||||
expectedError := "Missing environment variable FDB_ZONE_ID"
|
||||
if err.Error() != expectedError {
|
||||
t.Logf("Expected error %s, but got error %s", expectedError, err)
|
||||
t.Fail()
|
||||
return
|
||||
}
|
||||
}
|
100
fdbkubernetesmonitor/copy.go
Normal file
100
fdbkubernetesmonitor/copy.go
Normal file
@ -0,0 +1,100 @@
|
||||
// copy.go
|
||||
//
|
||||
// This source file is part of the FoundationDB open source project
|
||||
//
|
||||
// Copyright 2021 Apple Inc. and the FoundationDB project authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
|
||||
"github.com/go-logr/logr"
|
||||
)
|
||||
|
||||
// copyFile copies a file into the output directory.
|
||||
func copyFile(logger logr.Logger, inputPath string, outputPath string, required bool) error {
|
||||
logger.Info("Copying file", "inputPath", inputPath, "outputPath", outputPath)
|
||||
inputFile, err := os.Open(inputPath)
|
||||
if err != nil {
|
||||
logger.Error(err, "Error opening file", "path", inputPath)
|
||||
return err
|
||||
}
|
||||
defer inputFile.Close()
|
||||
|
||||
inputInfo, err := inputFile.Stat()
|
||||
if err != nil {
|
||||
logger.Error(err, "Error getting stats for file", "path", inputPath)
|
||||
return err
|
||||
}
|
||||
|
||||
if required && inputInfo.Size() == 0 {
|
||||
return fmt.Errorf("File %s is empty", inputPath)
|
||||
}
|
||||
|
||||
outputDir := path.Dir(outputPath)
|
||||
|
||||
tempFile, err := os.CreateTemp(outputDir, "")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer tempFile.Close()
|
||||
|
||||
_, err = tempFile.ReadFrom(inputFile)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = tempFile.Close()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = os.Chmod(tempFile.Name(), inputInfo.Mode())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = os.Rename(tempFile.Name(), outputPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// CopyFiles copies a list of files into the output directory.
|
||||
func CopyFiles(logger logr.Logger, outputDir string, copyDetails map[string]string, requiredCopies map[string]bool) error {
|
||||
for inputPath, outputSubpath := range copyDetails {
|
||||
if outputSubpath == "" {
|
||||
outputSubpath = path.Base(inputPath)
|
||||
}
|
||||
outputPath := path.Join(outputDir, outputSubpath)
|
||||
err := os.MkdirAll(path.Dir(outputPath), os.ModeDir|os.ModePerm)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
required := requiredCopies[inputPath]
|
||||
err = copyFile(logger, inputPath, outputPath, required)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
33
fdbkubernetesmonitor/go.mod
Normal file
33
fdbkubernetesmonitor/go.mod
Normal file
@ -0,0 +1,33 @@
|
||||
// go.mod
|
||||
//
|
||||
// This source file is part of the FoundationDB open source project
|
||||
//
|
||||
// Copyright 2021 Apple Inc. and the FoundationDB project authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
|
||||
module github.com/apple/foundationdb/fdbkubernetesmonitor
|
||||
|
||||
go 1.16
|
||||
|
||||
require (
|
||||
github.com/fsnotify/fsnotify v1.5.0
|
||||
github.com/go-logr/logr v0.4.0
|
||||
github.com/go-logr/zapr v0.4.0
|
||||
github.com/spf13/pflag v1.0.5
|
||||
go.uber.org/zap v1.19.0
|
||||
k8s.io/api v0.20.2
|
||||
k8s.io/apimachinery v0.20.2
|
||||
k8s.io/client-go v0.20.2
|
||||
)
|
439
fdbkubernetesmonitor/go.sum
Normal file
439
fdbkubernetesmonitor/go.sum
Normal file
@ -0,0 +1,439 @@
|
||||
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
|
||||
cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
|
||||
cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
|
||||
cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc=
|
||||
cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0=
|
||||
cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To=
|
||||
cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4=
|
||||
cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M=
|
||||
cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc=
|
||||
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
|
||||
cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
|
||||
cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
|
||||
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
|
||||
cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
|
||||
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
|
||||
cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
|
||||
cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA=
|
||||
cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw=
|
||||
cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos=
|
||||
cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
|
||||
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
|
||||
github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24=
|
||||
github.com/Azure/go-autorest/autorest v0.11.1/go.mod h1:JFgpikqFJ/MleTTxwepExTKnFUKKszPS8UavbQYUMuw=
|
||||
github.com/Azure/go-autorest/autorest/adal v0.9.0/go.mod h1:/c022QCutn2P7uY+/oQWWNcK9YU+MH96NgK+jErpbcg=
|
||||
github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A=
|
||||
github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74=
|
||||
github.com/Azure/go-autorest/autorest/mocks v0.4.0/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k=
|
||||
github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k=
|
||||
github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8=
|
||||
github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
||||
github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ=
|
||||
github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
|
||||
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
|
||||
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
|
||||
github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8=
|
||||
github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
|
||||
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
|
||||
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
|
||||
github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM=
|
||||
github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE=
|
||||
github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc=
|
||||
github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs=
|
||||
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||
github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
|
||||
github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
|
||||
github.com/fsnotify/fsnotify v1.5.0 h1:NO5hkcB+srp1x6QmwvNZLeaOgbM8cmBTN32THzjvu2k=
|
||||
github.com/fsnotify/fsnotify v1.5.0/go.mod h1:BX0DCEr5pT4jm2CnQdVP1lFV521fcCNcyEeNp4DQQDk=
|
||||
github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
|
||||
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
|
||||
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
|
||||
github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
|
||||
github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
|
||||
github.com/go-logr/logr v0.4.0 h1:K7/B1jt6fIBQVd4Owv2MqGQClcgf0R266+7C/QjRcLc=
|
||||
github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
|
||||
github.com/go-logr/zapr v0.4.0 h1:uc1uML3hRYL9/ZZPdgHS/n8Nzo+eaYL/Efxkkamf7OM=
|
||||
github.com/go-logr/zapr v0.4.0/go.mod h1:tabnROwaDl0UNxkVeFRbY8bwB37GwRv0P8lg6aAiEnk=
|
||||
github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg=
|
||||
github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
|
||||
github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc=
|
||||
github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8=
|
||||
github.com/go-openapi/spec v0.19.3/go.mod h1:FpwSN1ksY1eteniUU7X0N/BgJ7a4WvBFVA8Lj9mJglo=
|
||||
github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
|
||||
github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
|
||||
github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls=
|
||||
github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
|
||||
github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
|
||||
github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
|
||||
github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
|
||||
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
|
||||
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
|
||||
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
|
||||
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
|
||||
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
|
||||
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
|
||||
github.com/golang/protobuf v1.4.3 h1:JjCZWpVbqXDqFVmTfYWEVTMIYrL/NPdPSCHPJ0T/raM=
|
||||
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.2 h1:X2ev0eStA3AbceY54o37/0PQ/UWqKEiiO2dKL5OPaFM=
|
||||
github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g=
|
||||
github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
|
||||
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
||||
github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
||||
github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||
github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||
github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
|
||||
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
|
||||
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
|
||||
github.com/googleapis/gnostic v0.4.1 h1:DLJCy1n/vrD4HPjOvYcT8aYQXpPIzoRZONaYwyycI+I=
|
||||
github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg=
|
||||
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
|
||||
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
||||
github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
|
||||
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
||||
github.com/json-iterator/go v1.1.10 h1:Kz6Cvnvv2wGdaG/V8yMvfkmNiXq9Ya2KUv4rouJJr68=
|
||||
github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
|
||||
github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
|
||||
github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
github.com/kr/pretty v0.2.0 h1:s5hAObm+yFO5uHYt5dYjxi2rXrsnmRpJx4OYvIWUaQs=
|
||||
github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA=
|
||||
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||
github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI=
|
||||
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
|
||||
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=
|
||||
github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
|
||||
github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||
github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
|
||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||
github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
|
||||
github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
||||
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
|
||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
|
||||
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
|
||||
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw=
|
||||
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
|
||||
go.uber.org/goleak v1.1.10 h1:z+mqJhf6ss6BSfSM671tgKyZBFPTTJM+HLxnhPC3wu0=
|
||||
go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A=
|
||||
go.uber.org/multierr v1.6.0 h1:y6IPFStTAIT5Ytl7/XYmHvzXQ7S3g/IeZW9hyZ5thw4=
|
||||
go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
|
||||
go.uber.org/zap v1.19.0 h1:mZQZefskPPCMIBCSEH0v2/iUqqLrYtaeqwD6FUGUnFE=
|
||||
go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0 h1:hb9wdF1z5waM+dSIICn1l0DkLVDT3hqhhQsDNUmHPRE=
|
||||
golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
|
||||
golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek=
|
||||
golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY=
|
||||
golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
|
||||
golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
|
||||
golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
|
||||
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
|
||||
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
|
||||
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
|
||||
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
||||
golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs=
|
||||
golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
||||
golang.org/x/lint v0.0.0-20200302205851-738671d3881b h1:Wh+f8QHJXR411sJR8/vRBTZ7YapZaRvUcLFFJhusH0k=
|
||||
golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
||||
golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
|
||||
golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
|
||||
golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc=
|
||||
golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY=
|
||||
golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
||||
golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
|
||||
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b h1:uwuIcX0g4Yl1NC5XAz37xsr2lTtcqevgzYNVt49waME=
|
||||
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d h1:TzXSXBo42m9gQenoE3b9BGiEpg5IG2JkU5FkPIawgtw=
|
||||
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201112073958-5cba982894dd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c h1:F1jZWGFhYfh0Ci55sIpILtKKK8p3i2/krTr0H1rg74I=
|
||||
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.4 h1:0YWbFKbhXG/wIiuHDSKpS0Iy7FSA+u45VtBMfQcFTTc=
|
||||
golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e h1:EHBhcS0mlXEAVwNyO2dLfjToGsyY4j24pTs2ScHnX7s=
|
||||
golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
||||
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
|
||||
golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||
golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||
golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||
golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||
golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
|
||||
golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb h1:iKlO7ROJc6SttHKlxzwGytRtBUqX4VARrNTgP2YLX5M=
|
||||
golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
|
||||
google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
|
||||
google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
|
||||
google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
|
||||
google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
|
||||
google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
|
||||
google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI=
|
||||
google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
|
||||
google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
|
||||
google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
|
||||
google.golang.org/appengine v1.6.5 h1:tycE03LOZYQNhDpS27tcQdAzLCVMaj7QT2SXxebnpCM=
|
||||
google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
|
||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
|
||||
google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8=
|
||||
google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||
google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||
google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||
google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||
google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||
google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc=
|
||||
google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA=
|
||||
google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
|
||||
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
|
||||
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
|
||||
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
|
||||
google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||
google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
|
||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
|
||||
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
|
||||
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c=
|
||||
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
|
||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
|
||||
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
|
||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10=
|
||||
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo=
|
||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
|
||||
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
|
||||
k8s.io/api v0.20.2 h1:y/HR22XDZY3pniu9hIFDLpUCPq2w5eQ6aV/VFQ7uJMw=
|
||||
k8s.io/api v0.20.2/go.mod h1:d7n6Ehyzx+S+cE3VhTGfVNNqtGc/oL9DCdYYahlurV8=
|
||||
k8s.io/apimachinery v0.20.2 h1:hFx6Sbt1oG0n6DZ+g4bFt5f6BoMkOjKWsQFu077M3Vg=
|
||||
k8s.io/apimachinery v0.20.2/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU=
|
||||
k8s.io/client-go v0.20.2 h1:uuf+iIAbfnCSw8IGAv/Rg0giM+2bOzHLOsbbrwrdhNQ=
|
||||
k8s.io/client-go v0.20.2/go.mod h1:kH5brqWqp7HDxUFKoEgiI4v8G1xzbe9giaCenUWJzgE=
|
||||
k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
|
||||
k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE=
|
||||
k8s.io/klog/v2 v2.4.0 h1:7+X0fUguPyrKEC4WjH8iGDg3laWgMo5tMnRTIGTTxGQ=
|
||||
k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
|
||||
k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM=
|
||||
k8s.io/utils v0.0.0-20201110183641-67b214c5f920 h1:CbnUZsM497iRC5QMVkHwyl8s2tB3g7yaSHkYPkpgelw=
|
||||
k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
|
||||
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
|
||||
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
|
||||
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.0.2 h1:YHQV7Dajm86OuqnIR6zAelnDWBRjo+YhYV9PmGrh1s8=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
|
||||
sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
|
||||
sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q=
|
||||
sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=
|
188
fdbkubernetesmonitor/kubernetes.go
Normal file
188
fdbkubernetesmonitor/kubernetes.go
Normal file
@ -0,0 +1,188 @@
|
||||
// kubernetes.go
|
||||
//
|
||||
// This source file is part of the FoundationDB open source project
|
||||
//
|
||||
// Copyright 2021 Apple Inc. and the FoundationDB project authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"strconv"
|
||||
|
||||
"github.com/go-logr/logr"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/watch"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
typedv1 "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
"k8s.io/client-go/rest"
|
||||
)
|
||||
|
||||
const (
|
||||
// CurrentConfigurationAnnotation is the annotation we use to store the
|
||||
// latest configuration.
|
||||
CurrentConfigurationAnnotation = "foundationdb.org/launcher-current-configuration"
|
||||
|
||||
// EnvironmentAnnotation is the annotation we use to store the environment
|
||||
// variables.
|
||||
EnvironmentAnnotation = "foundationdb.org/launcher-environment"
|
||||
|
||||
// OutdatedConfigMapAnnotation is the annotation we read to get notified of
|
||||
// outdated configuration.
|
||||
OutdatedConfigMapAnnotation = "foundationdb.org/outdated-config-map-seen"
|
||||
)
|
||||
|
||||
// PodClient is a wrapper around the pod API.
|
||||
type PodClient struct {
|
||||
// podApi is the raw API
|
||||
podApi typedv1.PodInterface
|
||||
|
||||
// pod is the latest pod configuration
|
||||
pod *corev1.Pod
|
||||
|
||||
// TimestampFeed is a channel where the pod client will send updates with
|
||||
// the values from OutdatedConfigMapAnnotation.
|
||||
TimestampFeed chan int64
|
||||
|
||||
// Logger is the logger we use for this client.
|
||||
Logger logr.Logger
|
||||
}
|
||||
|
||||
// CreatePodClient creates a new client for working with the pod object.
|
||||
func CreatePodClient() (*PodClient, error) {
|
||||
config, err := rest.InClusterConfig()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
client, err := kubernetes.NewForConfig(config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
podApi := client.CoreV1().Pods(os.Getenv("FDB_POD_NAMESPACE"))
|
||||
pod, err := podApi.Get(context.Background(), os.Getenv("FDB_POD_NAME"), metav1.GetOptions{ResourceVersion: "0"})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
podClient := &PodClient{podApi: podApi, pod: pod, TimestampFeed: make(chan int64, 10)}
|
||||
err = podClient.watchPod()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return podClient, nil
|
||||
}
|
||||
|
||||
// retrieveEnvironmentVariables extracts the environment variables we have for
|
||||
// an argument into a map.
|
||||
func retrieveEnvironmentVariables(argument Argument, target map[string]string) {
|
||||
if argument.Source != "" {
|
||||
target[argument.Source] = os.Getenv(argument.Source)
|
||||
}
|
||||
if argument.Values != nil {
|
||||
for _, childArgument := range argument.Values {
|
||||
retrieveEnvironmentVariables(childArgument, target)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// UpdateAnnotations updates annotations on the pod after loading new
|
||||
// configuration.
|
||||
func (client *PodClient) UpdateAnnotations(monitor *Monitor) error {
|
||||
environment := make(map[string]string)
|
||||
for _, argument := range monitor.ActiveConfiguration.Arguments {
|
||||
retrieveEnvironmentVariables(argument, environment)
|
||||
}
|
||||
jsonEnvironment, err := json.Marshal(environment)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
patch := map[string]interface{}{
|
||||
"metadata": map[string]interface{}{
|
||||
"annotations": map[string]string{
|
||||
CurrentConfigurationAnnotation: string(monitor.ActiveConfigurationBytes),
|
||||
EnvironmentAnnotation: string(jsonEnvironment),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
patchJson, err := json.Marshal(patch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
pod, err := client.podApi.Patch(context.Background(), client.pod.Name, types.MergePatchType, patchJson, metav1.PatchOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
client.pod = pod
|
||||
return nil
|
||||
}
|
||||
|
||||
// watchPod starts a watch on the pod.
|
||||
func (client *PodClient) watchPod() error {
|
||||
podWatch, err := client.podApi.Watch(
|
||||
context.Background(),
|
||||
metav1.ListOptions{
|
||||
Watch: true,
|
||||
ResourceVersion: "0",
|
||||
FieldSelector: fmt.Sprintf("metadata.name=%s", os.Getenv("FDB_POD_NAME")),
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
results := podWatch.ResultChan()
|
||||
go func() {
|
||||
for event := range results {
|
||||
if event.Type == watch.Modified {
|
||||
pod, valid := event.Object.(*corev1.Pod)
|
||||
if !valid {
|
||||
client.Logger.Error(nil, "Error getting pod information from watch", "event", event)
|
||||
}
|
||||
client.processPodUpdate(pod)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// processPodUpdate handles an update for a pod.
|
||||
func (client *PodClient) processPodUpdate(pod *corev1.Pod) {
|
||||
client.pod = pod
|
||||
if pod.Annotations == nil {
|
||||
return
|
||||
}
|
||||
annotation := client.pod.Annotations[OutdatedConfigMapAnnotation]
|
||||
if annotation == "" {
|
||||
return
|
||||
}
|
||||
timestamp, err := strconv.ParseInt(annotation, 10, 64)
|
||||
if err != nil {
|
||||
client.Logger.Error(err, "Error parsing annotation", "key", OutdatedConfigMapAnnotation, "rawAnnotation", annotation, err)
|
||||
return
|
||||
}
|
||||
|
||||
client.TimestampFeed <- timestamp
|
||||
}
|
189
fdbkubernetesmonitor/main.go
Normal file
189
fdbkubernetesmonitor/main.go
Normal file
@ -0,0 +1,189 @@
|
||||
// main.go
|
||||
//
|
||||
// This source file is part of the FoundationDB open source project
|
||||
//
|
||||
// Copyright 2021 Apple Inc. and the FoundationDB project authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/go-logr/logr"
|
||||
"github.com/go-logr/zapr"
|
||||
"github.com/spf13/pflag"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
var (
|
||||
inputDir string
|
||||
fdbserverPath string
|
||||
versionFilePath string
|
||||
sharedBinaryDir string
|
||||
monitorConfFile string
|
||||
logPath string
|
||||
executionModeString string
|
||||
outputDir string
|
||||
copyFiles []string
|
||||
copyBinaries []string
|
||||
binaryOutputDirectory string
|
||||
copyLibraries []string
|
||||
copyPrimaryLibrary string
|
||||
requiredCopyFiles []string
|
||||
mainContainerVersion string
|
||||
currentContainerVersion string
|
||||
additionalEnvFile string
|
||||
)
|
||||
|
||||
type executionMode string
|
||||
|
||||
const (
|
||||
executionModeLauncher executionMode = "launcher"
|
||||
executionModeInit executionMode = "init"
|
||||
executionModeSidecar executionMode = "sidecar"
|
||||
)
|
||||
|
||||
func main() {
|
||||
pflag.StringVar(&executionModeString, "mode", "launcher", "Execution mode. Valid options are launcher, sidecar, and init")
|
||||
pflag.StringVar(&fdbserverPath, "fdbserver-path", "/usr/bin/fdbserver", "Path to the fdbserver binary")
|
||||
pflag.StringVar(&inputDir, "input-dir", ".", "Directory containing input files")
|
||||
pflag.StringVar(&monitorConfFile, "input-monitor-conf", "config.json", "Name of the file in the input directory that contains the monitor configuration")
|
||||
pflag.StringVar(&logPath, "log-path", "", "Name of a file to send logs to. Logs will be sent to stdout in addition the file you pass in this argument. If this is blank, logs will only by sent to stdout")
|
||||
pflag.StringVar(&outputDir, "output-dir", ".", "Directory to copy files into")
|
||||
pflag.StringArrayVar(©Files, "copy-file", nil, "A list of files to copy")
|
||||
pflag.StringArrayVar(©Binaries, "copy-binary", nil, "A list of binaries to copy from /usr/bin")
|
||||
pflag.StringVar(&versionFilePath, "version-file", "/var/fdb/version", "Path to a file containing the current FDB version")
|
||||
pflag.StringVar(&sharedBinaryDir, "shared-binary-dir", "/var/fdb/shared-binaries/bin", "A directory containing binaries that are copied from a sidecar process")
|
||||
pflag.StringVar(&binaryOutputDirectory, "binary-output-dir", "", "A subdirectory within $(output-dir)/bin to store binaries in. This defaults to the value in /var/fdb/version")
|
||||
pflag.StringArrayVar(©Libraries, "copy-library", nil, "A list of libraries to copy from /usr/lib/fdb/multiversion to $(output-dir)/lib/multiversion")
|
||||
pflag.StringVar(©PrimaryLibrary, "copy-primary-library", "", "A library to copy from /usr/lib/fdb/multiversion to $(output-dir)/lib. This file will be renamed to libfdb_c.so")
|
||||
pflag.StringArrayVar(&requiredCopyFiles, "require-not-empty", nil, "When copying this file, exit with an error if the file is empty")
|
||||
pflag.StringVar(&mainContainerVersion, "main-container-version", "", "For sidecar mode, this specifies the version of the main container. If this is equal to the current container version, no files will be copied")
|
||||
pflag.StringVar(&additionalEnvFile, "additional-env-file", "", "A file with additional environment variables to use when interpreting the monitor configuration")
|
||||
pflag.Parse()
|
||||
|
||||
zapConfig := zap.NewProductionConfig()
|
||||
if logPath != "" {
|
||||
zapConfig.OutputPaths = append(zapConfig.OutputPaths, logPath)
|
||||
}
|
||||
zapLogger, err := zapConfig.Build()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
versionBytes, err := os.ReadFile(versionFilePath)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
currentContainerVersion = strings.TrimSpace(string(versionBytes))
|
||||
|
||||
logger := zapr.NewLogger(zapLogger)
|
||||
copyDetails, requiredCopies, err := getCopyDetails()
|
||||
if err != nil {
|
||||
logger.Error(err, "Error getting list of files to copy")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
mode := executionMode(executionModeString)
|
||||
switch mode {
|
||||
case executionModeLauncher:
|
||||
customEnvironment, err := loadAdditionalEnvironment(logger)
|
||||
if err != nil {
|
||||
logger.Error(err, "Error loading additional environment")
|
||||
os.Exit(1)
|
||||
}
|
||||
StartMonitor(logger, fmt.Sprintf("%s/%s", inputDir, monitorConfFile), customEnvironment)
|
||||
case executionModeInit:
|
||||
err = CopyFiles(logger, outputDir, copyDetails, requiredCopies)
|
||||
if err != nil {
|
||||
logger.Error(err, "Error copying files")
|
||||
os.Exit(1)
|
||||
}
|
||||
case executionModeSidecar:
|
||||
if mainContainerVersion != currentContainerVersion {
|
||||
err = CopyFiles(logger, outputDir, copyDetails, requiredCopies)
|
||||
if err != nil {
|
||||
logger.Error(err, "Error copying files")
|
||||
os.Exit(1)
|
||||
}
|
||||
done := make(chan bool)
|
||||
<-done
|
||||
}
|
||||
default:
|
||||
logger.Error(nil, "Unknown execution mode", "mode", mode)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
func getCopyDetails() (map[string]string, map[string]bool, error) {
|
||||
copyDetails := make(map[string]string, len(copyFiles)+len(copyBinaries))
|
||||
|
||||
for _, filePath := range copyFiles {
|
||||
copyDetails[path.Join(inputDir, filePath)] = ""
|
||||
}
|
||||
if copyBinaries != nil {
|
||||
if binaryOutputDirectory == "" {
|
||||
binaryOutputDirectory = currentContainerVersion
|
||||
}
|
||||
for _, copyBinary := range copyBinaries {
|
||||
copyDetails[fmt.Sprintf("/usr/bin/%s", copyBinary)] = path.Join("bin", binaryOutputDirectory, copyBinary)
|
||||
}
|
||||
}
|
||||
for _, library := range copyLibraries {
|
||||
copyDetails[fmt.Sprintf("/usr/lib/fdb/multiversion/libfdb_c_%s.so", library)] = path.Join("lib", "multiversion", fmt.Sprintf("libfdb_c_%s.so", library))
|
||||
}
|
||||
if copyPrimaryLibrary != "" {
|
||||
copyDetails[fmt.Sprintf("/usr/lib/fdb/multiversion/libfdb_c_%s.so", copyPrimaryLibrary)] = path.Join("lib", "libfdb_c.so")
|
||||
}
|
||||
requiredCopyMap := make(map[string]bool, len(requiredCopyFiles))
|
||||
for _, filePath := range requiredCopyFiles {
|
||||
fullFilePath := path.Join(inputDir, filePath)
|
||||
_, present := copyDetails[fullFilePath]
|
||||
if !present {
|
||||
return nil, nil, fmt.Errorf("File %s is required, but is not in the --copy-file list", filePath)
|
||||
}
|
||||
requiredCopyMap[fullFilePath] = true
|
||||
}
|
||||
return copyDetails, requiredCopyMap, nil
|
||||
}
|
||||
|
||||
func loadAdditionalEnvironment(logger logr.Logger) (map[string]string, error) {
|
||||
var customEnvironment = make(map[string]string)
|
||||
environmentPattern := regexp.MustCompile(`export ([A-Za-z0-9_]+)=([^\n]*)`)
|
||||
if additionalEnvFile != "" {
|
||||
file, err := os.Open(additionalEnvFile)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
envScanner := bufio.NewScanner(file)
|
||||
for envScanner.Scan() {
|
||||
envLine := envScanner.Text()
|
||||
matches := environmentPattern.FindStringSubmatch(envLine)
|
||||
if matches == nil || envLine == "" {
|
||||
logger.Error(nil, "Environment file contains line that we cannot parse", "line", envLine, "environmentPattern", environmentPattern)
|
||||
continue
|
||||
}
|
||||
customEnvironment[matches[1]] = matches[2]
|
||||
}
|
||||
}
|
||||
return customEnvironment, nil
|
||||
}
|
379
fdbkubernetesmonitor/monitor.go
Normal file
379
fdbkubernetesmonitor/monitor.go
Normal file
@ -0,0 +1,379 @@
|
||||
// monitor.go
|
||||
//
|
||||
// This source file is part of the FoundationDB open source project
|
||||
//
|
||||
// Copyright 2021 Apple Inc. and the FoundationDB project authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"os/exec"
|
||||
"os/signal"
|
||||
"path"
|
||||
"sync"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/fsnotify/fsnotify"
|
||||
"github.com/go-logr/logr"
|
||||
)
|
||||
|
||||
// errorBackoffSeconds is the time to wait after a process fails before starting
|
||||
// another process.
|
||||
// This delay will only be applied when there has been more than one failure
|
||||
// within this time window.
|
||||
const errorBackoffSeconds = 60
|
||||
|
||||
// Monitor provides the main monitor loop
|
||||
type Monitor struct {
|
||||
// ConfigFile defines the path to the config file to load.
|
||||
ConfigFile string
|
||||
|
||||
// CustomEnvironment defines the custom environment variables to use when
|
||||
// interpreting the monitor configuration.
|
||||
CustomEnvironment map[string]string
|
||||
|
||||
// ActiveConfiguration defines the active process configuration.
|
||||
ActiveConfiguration *ProcessConfiguration
|
||||
|
||||
// ActiveConfigurationBytes defines the source data for the active process
|
||||
// configuration.
|
||||
ActiveConfigurationBytes []byte
|
||||
|
||||
// LastConfigurationTime is the last time we successfully reloaded the
|
||||
// configuration file.
|
||||
LastConfigurationTime time.Time
|
||||
|
||||
// ProcessIDs stores the PIDs of the processes that are running. A PID of
|
||||
// zero will indicate that a process does not have a run loop. A PID of -1
|
||||
// will indicate that a process has a run loop but is not currently running
|
||||
// the subprocess.
|
||||
ProcessIDs []int
|
||||
|
||||
// Mutex defines a mutex around working with configuration.
|
||||
// This is used to synchronize access to local state like the active
|
||||
// configuration and the process IDs from multiple goroutines.
|
||||
Mutex sync.Mutex
|
||||
|
||||
// PodClient is a client for posting updates about this pod to
|
||||
// Kubernetes.
|
||||
PodClient *PodClient
|
||||
|
||||
// Logger is the logger instance for this monitor.
|
||||
Logger logr.Logger
|
||||
}
|
||||
|
||||
// StartMonitor starts the monitor loop.
|
||||
func StartMonitor(logger logr.Logger, configFile string, customEnvironment map[string]string) {
|
||||
podClient, err := CreatePodClient()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
monitor := &Monitor{
|
||||
ConfigFile: configFile,
|
||||
PodClient: podClient,
|
||||
Logger: logger,
|
||||
CustomEnvironment: customEnvironment,
|
||||
}
|
||||
|
||||
go func() { monitor.WatchPodTimestamps() }()
|
||||
monitor.Run()
|
||||
}
|
||||
|
||||
// LoadConfiguration loads the latest configuration from the config file.
|
||||
func (monitor *Monitor) LoadConfiguration() {
|
||||
file, err := os.Open(monitor.ConfigFile)
|
||||
if err != nil {
|
||||
monitor.Logger.Error(err, "Error reading monitor config file", "monitorConfigPath", monitor.ConfigFile)
|
||||
return
|
||||
}
|
||||
defer file.Close()
|
||||
configuration := &ProcessConfiguration{}
|
||||
configurationBytes, err := io.ReadAll(file)
|
||||
if err != nil {
|
||||
monitor.Logger.Error(err, "Error reading monitor configuration", "monitorConfigPath", monitor.ConfigFile)
|
||||
}
|
||||
err = json.Unmarshal(configurationBytes, configuration)
|
||||
if err != nil {
|
||||
monitor.Logger.Error(err, "Error parsing monitor configuration", "rawConfiguration", string(configurationBytes))
|
||||
return
|
||||
}
|
||||
|
||||
if currentContainerVersion == configuration.Version {
|
||||
configuration.BinaryPath = fdbserverPath
|
||||
} else {
|
||||
configuration.BinaryPath = path.Join(sharedBinaryDir, configuration.Version, "fdbserver")
|
||||
}
|
||||
|
||||
err = checkOwnerExecutable(configuration.BinaryPath)
|
||||
if err != nil {
|
||||
monitor.Logger.Error(err, "Error with binary path for latest configuration", "configuration", configuration, "binaryPath", configuration.BinaryPath)
|
||||
return
|
||||
}
|
||||
|
||||
_, err = configuration.GenerateArguments(1, monitor.CustomEnvironment)
|
||||
if err != nil {
|
||||
monitor.Logger.Error(err, "Error generating arguments for latest configuration", "configuration", configuration, "binaryPath", configuration.BinaryPath)
|
||||
return
|
||||
}
|
||||
|
||||
monitor.acceptConfiguration(configuration, configurationBytes)
|
||||
}
|
||||
|
||||
// checkOwnerExecutable validates that a path is a file that exists and is
|
||||
// executable by its owner.
|
||||
func checkOwnerExecutable(path string) error {
|
||||
binaryStat, err := os.Stat(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if binaryStat.Mode()&0o100 == 0 {
|
||||
return fmt.Errorf("Binary is not executable")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// acceptConfiguration is called when the monitor process parses and accepts
|
||||
// a configuration from the local config file.
|
||||
func (monitor *Monitor) acceptConfiguration(configuration *ProcessConfiguration, configurationBytes []byte) {
|
||||
monitor.Mutex.Lock()
|
||||
defer monitor.Mutex.Unlock()
|
||||
monitor.Logger.Info("Received new configuration file", "configuration", configuration)
|
||||
|
||||
if monitor.ProcessIDs == nil {
|
||||
monitor.ProcessIDs = make([]int, configuration.ServerCount+1)
|
||||
} else {
|
||||
for len(monitor.ProcessIDs) <= configuration.ServerCount {
|
||||
monitor.ProcessIDs = append(monitor.ProcessIDs, 0)
|
||||
}
|
||||
}
|
||||
|
||||
monitor.ActiveConfiguration = configuration
|
||||
monitor.ActiveConfigurationBytes = configurationBytes
|
||||
monitor.LastConfigurationTime = time.Now()
|
||||
|
||||
for processNumber := 1; processNumber <= configuration.ServerCount; processNumber++ {
|
||||
if monitor.ProcessIDs[processNumber] == 0 {
|
||||
monitor.ProcessIDs[processNumber] = -1
|
||||
tempNumber := processNumber
|
||||
go func() { monitor.RunProcess(tempNumber) }()
|
||||
}
|
||||
}
|
||||
|
||||
err := monitor.PodClient.UpdateAnnotations(monitor)
|
||||
if err != nil {
|
||||
monitor.Logger.Error(err, "Error updating pod annotations")
|
||||
}
|
||||
}
|
||||
|
||||
// RunProcess runs a loop to continually start and watch a process.
|
||||
func (monitor *Monitor) RunProcess(processNumber int) {
|
||||
pid := 0
|
||||
logger := monitor.Logger.WithValues("processNumber", processNumber, "area", "RunProcess")
|
||||
logger.Info("Starting run loop")
|
||||
for {
|
||||
if !monitor.checkProcessRequired(processNumber) {
|
||||
return
|
||||
}
|
||||
|
||||
arguments, err := monitor.ActiveConfiguration.GenerateArguments(processNumber, monitor.CustomEnvironment)
|
||||
if err != nil {
|
||||
logger.Error(err, "Error generating arguments for subprocess", "configuration", monitor.ActiveConfiguration)
|
||||
time.Sleep(errorBackoffSeconds * time.Second)
|
||||
}
|
||||
cmd := exec.Cmd{
|
||||
Path: arguments[0],
|
||||
Args: arguments,
|
||||
}
|
||||
|
||||
logger.Info("Starting subprocess", "arguments", arguments)
|
||||
|
||||
stdout, err := cmd.StdoutPipe()
|
||||
if err != nil {
|
||||
logger.Error(err, "Error getting stdout from subprocess")
|
||||
}
|
||||
|
||||
stderr, err := cmd.StderrPipe()
|
||||
if err != nil {
|
||||
logger.Error(err, "Error getting stderr from subprocess")
|
||||
}
|
||||
|
||||
err = cmd.Start()
|
||||
if err != nil {
|
||||
logger.Error(err, "Error starting subprocess")
|
||||
time.Sleep(errorBackoffSeconds * time.Second)
|
||||
continue
|
||||
}
|
||||
|
||||
if cmd.Process != nil {
|
||||
pid = cmd.Process.Pid
|
||||
} else {
|
||||
logger.Error(nil, "No Process information available for subprocess")
|
||||
}
|
||||
|
||||
startTime := time.Now()
|
||||
logger.Info("Subprocess started", "PID", pid)
|
||||
|
||||
monitor.updateProcessID(processNumber, pid)
|
||||
|
||||
if stdout != nil {
|
||||
stdoutScanner := bufio.NewScanner(stdout)
|
||||
go func() {
|
||||
for stdoutScanner.Scan() {
|
||||
logger.Info("Subprocess output", "msg", stdoutScanner.Text(), "PID", pid)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
if stderr != nil {
|
||||
stderrScanner := bufio.NewScanner(stderr)
|
||||
go func() {
|
||||
for stderrScanner.Scan() {
|
||||
logger.Error(nil, "Subprocess error log", "msg", stderrScanner.Text(), "PID", pid)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
err = cmd.Wait()
|
||||
if err != nil {
|
||||
logger.Error(err, "Error from subprocess", "PID", pid)
|
||||
}
|
||||
exitCode := -1
|
||||
if cmd.ProcessState != nil {
|
||||
exitCode = cmd.ProcessState.ExitCode()
|
||||
}
|
||||
|
||||
logger.Info("Subprocess terminated", "exitCode", exitCode, "PID", pid)
|
||||
|
||||
endTime := time.Now()
|
||||
monitor.updateProcessID(processNumber, -1)
|
||||
|
||||
processDuration := endTime.Sub(startTime)
|
||||
if processDuration.Seconds() < errorBackoffSeconds {
|
||||
logger.Info("Backing off from restarting subprocess", "backOffTimeSeconds", errorBackoffSeconds, "lastExecutionDurationSeconds", processDuration)
|
||||
time.Sleep(errorBackoffSeconds * time.Second)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// checkProcessRequired determines if the latest configuration requires that a
|
||||
// process stay running.
|
||||
// If the process is no longer desired, this will remove it from the process ID
|
||||
// list and return false. If the process is still desired, this will return
|
||||
// true.
|
||||
func (monitor *Monitor) checkProcessRequired(processNumber int) bool {
|
||||
monitor.Mutex.Lock()
|
||||
defer monitor.Mutex.Unlock()
|
||||
logger := monitor.Logger.WithValues("processNumber", processNumber, "area", "checkProcessRequired")
|
||||
if monitor.ActiveConfiguration.ServerCount < processNumber {
|
||||
logger.Info("Terminating run loop")
|
||||
monitor.ProcessIDs[processNumber] = 0
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// updateProcessID records a new Process ID from a newly launched process.
|
||||
func (monitor *Monitor) updateProcessID(processNumber int, pid int) {
|
||||
monitor.Mutex.Lock()
|
||||
defer monitor.Mutex.Unlock()
|
||||
monitor.ProcessIDs[processNumber] = pid
|
||||
}
|
||||
|
||||
// WatchConfiguration detects changes to the monitor configuration file.
|
||||
func (monitor *Monitor) WatchConfiguration(watcher *fsnotify.Watcher) {
|
||||
for {
|
||||
select {
|
||||
case event, ok := <-watcher.Events:
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
monitor.Logger.Info("Detected event on monitor conf file", "event", event)
|
||||
if event.Op&fsnotify.Write == fsnotify.Write || event.Op&fsnotify.Create == fsnotify.Create {
|
||||
monitor.LoadConfiguration()
|
||||
} else if event.Op&fsnotify.Remove == fsnotify.Remove {
|
||||
err := watcher.Add(monitor.ConfigFile)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
monitor.LoadConfiguration()
|
||||
}
|
||||
case err, ok := <-watcher.Errors:
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
monitor.Logger.Error(err, "Error watching for file system events")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Run runs the monitor loop.
|
||||
func (monitor *Monitor) Run() {
|
||||
done := make(chan bool, 1)
|
||||
signals := make(chan os.Signal, 1)
|
||||
signal.Notify(signals, syscall.SIGINT, syscall.SIGTERM)
|
||||
|
||||
go func() {
|
||||
latestSignal := <-signals
|
||||
monitor.Logger.Info("Received system signal", "signal", latestSignal)
|
||||
for processNumber, processID := range monitor.ProcessIDs {
|
||||
if processID > 0 {
|
||||
subprocessLogger := monitor.Logger.WithValues("processNumber", processNumber, "PID", processID)
|
||||
process, err := os.FindProcess(processID)
|
||||
if err != nil {
|
||||
subprocessLogger.Error(err, "Error finding subprocess")
|
||||
continue
|
||||
}
|
||||
subprocessLogger.Info("Sending signal to subprocess", "signal", latestSignal)
|
||||
err = process.Signal(latestSignal)
|
||||
if err != nil {
|
||||
subprocessLogger.Error(err, "Error signaling subprocess")
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
done <- true
|
||||
}()
|
||||
|
||||
monitor.LoadConfiguration()
|
||||
watcher, err := fsnotify.NewWatcher()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
err = watcher.Add(monitor.ConfigFile)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
defer watcher.Close()
|
||||
go func() { monitor.WatchConfiguration(watcher) }()
|
||||
|
||||
<-done
|
||||
}
|
||||
|
||||
func (monitor *Monitor) WatchPodTimestamps() {
|
||||
for timestamp := range monitor.PodClient.TimestampFeed {
|
||||
if timestamp > monitor.LastConfigurationTime.Unix() {
|
||||
monitor.LoadConfiguration()
|
||||
}
|
||||
}
|
||||
}
|
@ -809,7 +809,7 @@ TEST_CASE("/flow/flow/chooseTwoActor") {
|
||||
return Void();
|
||||
}
|
||||
|
||||
TEST_CASE("/flow/flow/perf/actor patterns") {
|
||||
TEST_CASE("#flow/flow/perf/actor patterns") {
|
||||
double start;
|
||||
int N = 1000000;
|
||||
|
||||
@ -1287,8 +1287,6 @@ TEST_CASE("/fdbrpc/flow/wait_expression_after_cancel") {
|
||||
template <class>
|
||||
struct ShouldNotGoIntoClassContextStack;
|
||||
|
||||
ACTOR static Future<Void> shouldNotHaveFriends();
|
||||
|
||||
class Foo1 {
|
||||
public:
|
||||
explicit Foo1(int x) : x(x) {}
|
||||
@ -1363,8 +1361,6 @@ ACTOR Future<int> Outer::Foo5::fooActor(Outer::Foo5* self) {
|
||||
return self->x;
|
||||
}
|
||||
|
||||
ACTOR static Future<Void> shouldNotHaveFriends2();
|
||||
|
||||
// Meant to be run with -fsanitize=undefined
|
||||
TEST_CASE("/flow/DeterministicRandom/SignedOverflow") {
|
||||
deterministicRandom()->randomInt(std::numeric_limits<int>::min(), 0);
|
||||
|
@ -27,7 +27,6 @@
|
||||
#include <memcheck.h>
|
||||
#endif
|
||||
|
||||
#include "flow/crc32c.h"
|
||||
#include "fdbrpc/fdbrpc.h"
|
||||
#include "fdbrpc/FailureMonitor.h"
|
||||
#include "fdbrpc/HealthMonitor.h"
|
||||
@ -41,6 +40,8 @@
|
||||
#include "flow/ObjectSerializer.h"
|
||||
#include "flow/ProtocolVersion.h"
|
||||
#include "flow/UnitTest.h"
|
||||
#define XXH_INLINE_ALL
|
||||
#include "flow/xxhash.h"
|
||||
#include "flow/actorcompiler.h" // This must be the last #include.
|
||||
|
||||
static NetworkAddressList g_currentDeliveryPeerAddress = NetworkAddressList();
|
||||
@ -984,21 +985,22 @@ static void scanPackets(TransportData* transport,
|
||||
|
||||
const bool checksumEnabled = !peerAddress.isTLS();
|
||||
loop {
|
||||
uint32_t packetLen, packetChecksum;
|
||||
uint32_t packetLen;
|
||||
XXH64_hash_t packetChecksum;
|
||||
|
||||
// Retrieve packet length and checksum
|
||||
// Read packet length if size is sufficient or stop
|
||||
if (e - p < PACKET_LEN_WIDTH)
|
||||
break;
|
||||
packetLen = *(uint32_t*)p;
|
||||
p += PACKET_LEN_WIDTH;
|
||||
|
||||
// Read checksum if present
|
||||
if (checksumEnabled) {
|
||||
if (e - p < sizeof(uint32_t) * 2)
|
||||
// Read checksum if size is sufficient or stop
|
||||
if (e - p < sizeof(packetChecksum))
|
||||
break;
|
||||
packetLen = *(uint32_t*)p;
|
||||
p += PACKET_LEN_WIDTH;
|
||||
packetChecksum = *(uint32_t*)p;
|
||||
p += sizeof(uint32_t);
|
||||
} else {
|
||||
if (e - p < sizeof(uint32_t))
|
||||
break;
|
||||
packetLen = *(uint32_t*)p;
|
||||
p += PACKET_LEN_WIDTH;
|
||||
packetChecksum = *(XXH64_hash_t*)p;
|
||||
p += sizeof(packetChecksum);
|
||||
}
|
||||
|
||||
if (packetLen > FLOW_KNOBS->PACKET_LIMIT) {
|
||||
@ -1036,23 +1038,23 @@ static void scanPackets(TransportData* transport,
|
||||
}
|
||||
}
|
||||
|
||||
uint32_t calculatedChecksum = crc32c_append(0, p, packetLen);
|
||||
XXH64_hash_t calculatedChecksum = XXH3_64bits(p, packetLen);
|
||||
if (calculatedChecksum != packetChecksum) {
|
||||
if (isBuggifyEnabled) {
|
||||
TraceEvent(SevInfo, "ChecksumMismatchExp")
|
||||
.detail("PacketChecksum", (int)packetChecksum)
|
||||
.detail("CalculatedChecksum", (int)calculatedChecksum);
|
||||
.detail("PacketChecksum", packetChecksum)
|
||||
.detail("CalculatedChecksum", calculatedChecksum);
|
||||
} else {
|
||||
TraceEvent(SevWarnAlways, "ChecksumMismatchUnexp")
|
||||
.detail("PacketChecksum", (int)packetChecksum)
|
||||
.detail("CalculatedChecksum", (int)calculatedChecksum);
|
||||
.detail("PacketChecksum", packetChecksum)
|
||||
.detail("CalculatedChecksum", calculatedChecksum);
|
||||
}
|
||||
throw checksum_failed();
|
||||
} else {
|
||||
if (isBuggifyEnabled) {
|
||||
TraceEvent(SevError, "ChecksumMatchUnexp")
|
||||
.detail("PacketChecksum", (int)packetChecksum)
|
||||
.detail("CalculatedChecksum", (int)calculatedChecksum);
|
||||
.detail("PacketChecksum", packetChecksum)
|
||||
.detail("CalculatedChecksum", calculatedChecksum);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -1261,8 +1263,11 @@ ACTOR static Future<Void> connectionReader(TransportData* transport,
|
||||
} else {
|
||||
peerProtocolVersion = protocolVersion;
|
||||
if (pkt.canonicalRemotePort) {
|
||||
peerAddress = NetworkAddress(
|
||||
pkt.canonicalRemoteIp(), pkt.canonicalRemotePort, true, peerAddress.isTLS());
|
||||
peerAddress = NetworkAddress(pkt.canonicalRemoteIp(),
|
||||
pkt.canonicalRemotePort,
|
||||
true,
|
||||
peerAddress.isTLS(),
|
||||
peerAddress.fromHostname);
|
||||
}
|
||||
peer = transport->getOrOpenPeer(peerAddress, false);
|
||||
peer->compatible = compatible;
|
||||
@ -1584,7 +1589,15 @@ static ReliablePacket* sendPacket(TransportData* self,
|
||||
|
||||
// Reserve some space for packet length and checksum, write them after serializing data
|
||||
SplitBuffer packetInfoBuffer;
|
||||
uint32_t len, checksum = 0;
|
||||
uint32_t len;
|
||||
|
||||
// This is technically abstraction breaking but avoids XXH3_createState() and XXH3_freeState() which are just
|
||||
// malloc/free
|
||||
XXH3_state_t checksumState;
|
||||
// Checksum will be calculated with buffer API if contiguous, else using stream API. Mode is tracked here.
|
||||
bool checksumStream = false;
|
||||
XXH64_hash_t checksum;
|
||||
|
||||
int packetInfoSize = PACKET_LEN_WIDTH;
|
||||
if (checksumEnabled) {
|
||||
packetInfoSize += sizeof(checksum);
|
||||
@ -1609,11 +1622,38 @@ static ReliablePacket* sendPacket(TransportData* self,
|
||||
while (checksumUnprocessedLength > 0) {
|
||||
uint32_t processLength =
|
||||
std::min(checksumUnprocessedLength, (uint32_t)(checksumPb->bytes_written - prevBytesWritten));
|
||||
checksum = crc32c_append(checksum, checksumPb->data() + prevBytesWritten, processLength);
|
||||
|
||||
// If not in checksum stream mode yet
|
||||
if (!checksumStream) {
|
||||
// If there is nothing left to process then calculate checksum directly
|
||||
if (processLength == checksumUnprocessedLength) {
|
||||
checksum = XXH3_64bits(checksumPb->data() + prevBytesWritten, processLength);
|
||||
} else {
|
||||
// Otherwise, initialize checksum state and switch to stream mode
|
||||
if (XXH3_64bits_reset(&checksumState) != XXH_OK) {
|
||||
throw internal_error();
|
||||
}
|
||||
checksumStream = true;
|
||||
}
|
||||
}
|
||||
|
||||
// If in checksum stream mode, update the checksum state
|
||||
if (checksumStream) {
|
||||
if (XXH3_64bits_update(&checksumState, checksumPb->data() + prevBytesWritten, processLength) !=
|
||||
XXH_OK) {
|
||||
throw internal_error();
|
||||
}
|
||||
}
|
||||
|
||||
checksumUnprocessedLength -= processLength;
|
||||
checksumPb = checksumPb->nextPacketBuffer();
|
||||
prevBytesWritten = 0;
|
||||
}
|
||||
|
||||
// If in checksum stream mode, get the final checksum
|
||||
if (checksumStream) {
|
||||
checksum = XXH3_64bits_digest(&checksumState);
|
||||
}
|
||||
}
|
||||
|
||||
// Write packet length and checksum into packet buffer
|
||||
|
@ -39,9 +39,9 @@ public:
|
||||
// Endpoint represents a particular service (e.g. a serialized Promise<T> or PromiseStream<T>)
|
||||
// An endpoint is either "local" (used for receiving data) or "remote" (used for sending data)
|
||||
constexpr static FileIdentifier file_identifier = 10618805;
|
||||
typedef UID Token;
|
||||
using Token = UID;
|
||||
NetworkAddressList addresses;
|
||||
Token token;
|
||||
Token token{};
|
||||
|
||||
Endpoint() {}
|
||||
Endpoint(const NetworkAddressList& addresses, Token token) : addresses(addresses), token(token) {
|
||||
@ -134,7 +134,7 @@ public:
|
||||
}
|
||||
};
|
||||
|
||||
struct TransportData;
|
||||
class TransportData;
|
||||
|
||||
struct Peer : public ReferenceCounted<Peer> {
|
||||
TransportData* transport;
|
||||
|
@ -67,6 +67,47 @@ public:
|
||||
}
|
||||
};
|
||||
|
||||
bool MockDNS::findMockTCPEndpoint(const std::string& host, const std::string& service) {
|
||||
std::string hostname = host + ":" + service;
|
||||
return hostnameToAddresses.find(hostname) != hostnameToAddresses.end();
|
||||
}
|
||||
|
||||
void MockDNS::addMockTCPEndpoint(const std::string& host,
|
||||
const std::string& service,
|
||||
const std::vector<NetworkAddress>& addresses) {
|
||||
if (findMockTCPEndpoint(host, service)) {
|
||||
throw operation_failed();
|
||||
}
|
||||
hostnameToAddresses[host + ":" + service] = addresses;
|
||||
}
|
||||
|
||||
void MockDNS::updateMockTCPEndpoint(const std::string& host,
|
||||
const std::string& service,
|
||||
const std::vector<NetworkAddress>& addresses) {
|
||||
if (!findMockTCPEndpoint(host, service)) {
|
||||
throw operation_failed();
|
||||
}
|
||||
hostnameToAddresses[host + ":" + service] = addresses;
|
||||
}
|
||||
|
||||
void MockDNS::removeMockTCPEndpoint(const std::string& host, const std::string& service) {
|
||||
if (!findMockTCPEndpoint(host, service)) {
|
||||
throw operation_failed();
|
||||
}
|
||||
hostnameToAddresses.erase(host + ":" + service);
|
||||
}
|
||||
|
||||
std::vector<NetworkAddress> MockDNS::getTCPEndpoint(const std::string& host, const std::string& service) {
|
||||
if (!findMockTCPEndpoint(host, service)) {
|
||||
throw operation_failed();
|
||||
}
|
||||
return hostnameToAddresses[host + ":" + service];
|
||||
}
|
||||
|
||||
void MockDNS::clearMockTCPEndpoints() {
|
||||
hostnameToAddresses.clear();
|
||||
}
|
||||
|
||||
void SimExternalConnection::close() {
|
||||
socket.close();
|
||||
}
|
||||
@ -212,4 +253,49 @@ TEST_CASE("fdbrpc/SimExternalClient") {
|
||||
return Void();
|
||||
}
|
||||
|
||||
TEST_CASE("fdbrpc/MockTCPEndpoints") {
|
||||
state MockDNS mockDNS;
|
||||
state std::vector<NetworkAddress> networkAddresses;
|
||||
state NetworkAddress address1(IPAddress(0x13131313), 1);
|
||||
state NetworkAddress address2(IPAddress(0x14141414), 2);
|
||||
networkAddresses.push_back(address1);
|
||||
networkAddresses.push_back(address2);
|
||||
mockDNS.addMockTCPEndpoint("testhost1", "testport1", networkAddresses);
|
||||
ASSERT(mockDNS.findMockTCPEndpoint("testhost1", "testport1"));
|
||||
ASSERT(mockDNS.findMockTCPEndpoint("testhost1", "testport2") == false);
|
||||
std::vector<NetworkAddress> resolvedNetworkAddresses = mockDNS.getTCPEndpoint("testhost1", "testport1");
|
||||
ASSERT(resolvedNetworkAddresses.size() == 2);
|
||||
ASSERT(std::find(resolvedNetworkAddresses.begin(), resolvedNetworkAddresses.end(), address1) !=
|
||||
resolvedNetworkAddresses.end());
|
||||
ASSERT(std::find(resolvedNetworkAddresses.begin(), resolvedNetworkAddresses.end(), address2) !=
|
||||
resolvedNetworkAddresses.end());
|
||||
// Adding a hostname twice should fail.
|
||||
try {
|
||||
mockDNS.addMockTCPEndpoint("testhost1", "testport1", networkAddresses);
|
||||
} catch (Error& e) {
|
||||
ASSERT(e.code() == error_code_operation_failed);
|
||||
}
|
||||
// Updating an unexisted hostname should fail.
|
||||
try {
|
||||
mockDNS.updateMockTCPEndpoint("testhost2", "testport2", networkAddresses);
|
||||
} catch (Error& e) {
|
||||
ASSERT(e.code() == error_code_operation_failed);
|
||||
}
|
||||
// Removing an unexisted hostname should fail.
|
||||
try {
|
||||
mockDNS.removeMockTCPEndpoint("testhost2", "testport2");
|
||||
} catch (Error& e) {
|
||||
ASSERT(e.code() == error_code_operation_failed);
|
||||
}
|
||||
mockDNS.clearMockTCPEndpoints();
|
||||
// Updating any hostname right after clearing endpoints should fail.
|
||||
try {
|
||||
mockDNS.updateMockTCPEndpoint("testhost1", "testport1", networkAddresses);
|
||||
} catch (Error& e) {
|
||||
ASSERT(e.code() == error_code_operation_failed);
|
||||
}
|
||||
|
||||
return Void();
|
||||
}
|
||||
|
||||
void forceLinkSimExternalConnectionTests() {}
|
||||
|
@ -28,6 +28,24 @@
|
||||
|
||||
#include <boost/asio.hpp>
|
||||
|
||||
// MockDNS is a class maintaining a <hostname, vector<NetworkAddress>> mapping, mocking a DNS in simulation.
|
||||
class MockDNS {
|
||||
public:
|
||||
bool findMockTCPEndpoint(const std::string& host, const std::string& service);
|
||||
void addMockTCPEndpoint(const std::string& host,
|
||||
const std::string& service,
|
||||
const std::vector<NetworkAddress>& addresses);
|
||||
void updateMockTCPEndpoint(const std::string& host,
|
||||
const std::string& service,
|
||||
const std::vector<NetworkAddress>& addresses);
|
||||
void removeMockTCPEndpoint(const std::string& host, const std::string& service);
|
||||
void clearMockTCPEndpoints();
|
||||
std::vector<NetworkAddress> getTCPEndpoint(const std::string& host, const std::string& service);
|
||||
|
||||
private:
|
||||
std::map<std::string, std::vector<NetworkAddress>> hostnameToAddresses;
|
||||
};
|
||||
|
||||
class SimExternalConnection final : public IConnection, public ReferenceCounted<SimExternalConnection> {
|
||||
boost::asio::ip::tcp::socket socket;
|
||||
SimExternalConnection(boost::asio::ip::tcp::socket&& socket);
|
||||
|
@ -51,10 +51,12 @@ struct TSSMetrics : ReferenceCounted<TSSMetrics>, NonCopyable {
|
||||
ContinuousSample<double> SSgetValueLatency;
|
||||
ContinuousSample<double> SSgetKeyLatency;
|
||||
ContinuousSample<double> SSgetKeyValuesLatency;
|
||||
ContinuousSample<double> SSgetKeyValuesAndFlatMapLatency;
|
||||
|
||||
ContinuousSample<double> TSSgetValueLatency;
|
||||
ContinuousSample<double> TSSgetKeyLatency;
|
||||
ContinuousSample<double> TSSgetKeyValuesLatency;
|
||||
ContinuousSample<double> TSSgetKeyValuesAndFlatMapLatency;
|
||||
|
||||
std::unordered_map<int, uint64_t> ssErrorsByCode;
|
||||
std::unordered_map<int, uint64_t> tssErrorsByCode;
|
||||
@ -103,7 +105,8 @@ struct TSSMetrics : ReferenceCounted<TSSMetrics>, NonCopyable {
|
||||
: cc("TSSClientMetrics"), requests("Requests", cc), streamComparisons("StreamComparisons", cc),
|
||||
ssErrors("SSErrors", cc), tssErrors("TSSErrors", cc), tssTimeouts("TSSTimeouts", cc),
|
||||
mismatches("Mismatches", cc), SSgetValueLatency(1000), SSgetKeyLatency(1000), SSgetKeyValuesLatency(1000),
|
||||
TSSgetValueLatency(1000), TSSgetKeyLatency(1000), TSSgetKeyValuesLatency(1000) {}
|
||||
SSgetKeyValuesAndFlatMapLatency(1000), TSSgetValueLatency(1000), TSSgetKeyLatency(1000),
|
||||
TSSgetKeyValuesLatency(1000), TSSgetKeyValuesAndFlatMapLatency(1000) {}
|
||||
};
|
||||
|
||||
template <class Rep>
|
||||
|
@ -662,8 +662,8 @@ public:
|
||||
}
|
||||
|
||||
// stream.tryGetReply( request )
|
||||
// Unreliable at most once delivery: Either delivers request and returns a reply, or returns failure
|
||||
// (Optional<T>()) eventually. If a reply is returned, request was delivered exactly once. If cancelled or returns
|
||||
// Unreliable at most once delivery: Either delivers request and returns a reply, or returns an error eventually.
|
||||
// If a reply is returned, request was delivered exactly once. If cancelled or returns
|
||||
// failure, request was or will be delivered zero or one times. The caller must be capable of retrying if this
|
||||
// request returns failure
|
||||
template <class X>
|
||||
|
@ -25,9 +25,8 @@
|
||||
#include "flow/actorcompiler.h"
|
||||
|
||||
ACTOR Future<Void> disableConnectionFailuresAfter(double time, std::string context) {
|
||||
wait(delay(time));
|
||||
|
||||
if (g_network->isSimulated()) {
|
||||
wait(delayUntil(time));
|
||||
g_simulator.connectionFailuresDisableDuration = 1e6;
|
||||
g_simulator.speedUpSimulation = true;
|
||||
TraceEvent(SevWarnAlways, ("DisableConnectionFailures_" + context).c_str());
|
||||
|
@ -946,8 +946,18 @@ public:
|
||||
Future<Reference<IUDPSocket>> createUDPSocket(NetworkAddress toAddr) override;
|
||||
Future<Reference<IUDPSocket>> createUDPSocket(bool isV6 = false) override;
|
||||
|
||||
// Add a <hostname, vector<NetworkAddress>> pair to mock DNS in simulation.
|
||||
void addMockTCPEndpoint(const std::string& host,
|
||||
const std::string& service,
|
||||
const std::vector<NetworkAddress>& addresses) override {
|
||||
mockDNS.addMockTCPEndpoint(host, service, addresses);
|
||||
}
|
||||
Future<std::vector<NetworkAddress>> resolveTCPEndpoint(const std::string& host,
|
||||
const std::string& service) override {
|
||||
// If a <hostname, vector<NetworkAddress>> pair was injected to mock DNS, use it.
|
||||
if (mockDNS.findMockTCPEndpoint(host, service)) {
|
||||
return mockDNS.getTCPEndpoint(host, service);
|
||||
}
|
||||
return SimExternalConnection::resolveTCPEndpoint(host, service);
|
||||
}
|
||||
ACTOR static Future<Reference<IConnection>> onConnect(Future<Void> ready, Reference<Sim2Conn> conn) {
|
||||
@ -2136,6 +2146,9 @@ public:
|
||||
bool yielded;
|
||||
int yield_limit; // how many more times yield may return false before next returning true
|
||||
|
||||
private:
|
||||
MockDNS mockDNS;
|
||||
|
||||
#ifdef ENABLE_SAMPLING
|
||||
ActorLineageSet actorLineageSet;
|
||||
#endif
|
||||
|
@ -543,7 +543,7 @@ private:
|
||||
m.param1.startsWith(applyMutationsAddPrefixRange.begin) ||
|
||||
m.param1.startsWith(applyMutationsRemovePrefixRange.begin) || m.param1.startsWith(tagLocalityListPrefix) ||
|
||||
m.param1.startsWith(serverTagHistoryPrefix) ||
|
||||
m.param1.startsWith(testOnlyTxnStateStorePrefixRange.begin)) {
|
||||
m.param1.startsWith(testOnlyTxnStateStorePrefixRange.begin) || m.param1 == clusterIdKey) {
|
||||
|
||||
txnStateStore->set(KeyValueRef(m.param1, m.param2));
|
||||
}
|
||||
|
@ -40,7 +40,7 @@
|
||||
|
||||
// TODO add comments + documentation
|
||||
void handleClientBlobRange(KeyRangeMap<bool>* knownBlobRanges,
|
||||
Arena ar,
|
||||
Arena& ar,
|
||||
VectorRef<KeyRangeRef>* rangesToAdd,
|
||||
VectorRef<KeyRangeRef>* rangesToRemove,
|
||||
KeyRef rangeStart,
|
||||
@ -81,7 +81,7 @@ void handleClientBlobRange(KeyRangeMap<bool>* knownBlobRanges,
|
||||
|
||||
void updateClientBlobRanges(KeyRangeMap<bool>* knownBlobRanges,
|
||||
RangeResult dbBlobRanges,
|
||||
Arena ar,
|
||||
Arena& ar,
|
||||
VectorRef<KeyRangeRef>* rangesToAdd,
|
||||
VectorRef<KeyRangeRef>* rangesToRemove) {
|
||||
if (BM_DEBUG) {
|
||||
@ -1169,7 +1169,7 @@ ACTOR Future<Void> blobManager(BlobManagerInterface bmInterf,
|
||||
// DB has [A - B) and [C - D). They should show up in knownBlobRanges, and [B - C) should be in removed.
|
||||
// DB has [B - C). It should show up in knownBlobRanges, [B - C) should be in added, and [A - B) and [C - D) should
|
||||
// be in removed.
|
||||
TEST_CASE("/blobmanager/updateranges") {
|
||||
TEST_CASE(":/blobmanager/updateranges") {
|
||||
KeyRangeMap<bool> knownBlobRanges(false, normalKeys.end);
|
||||
Arena ar;
|
||||
|
||||
|
@ -1054,8 +1054,8 @@ static Version doGranuleRollback(Reference<GranuleMetadata> metadata,
|
||||
ACTOR Future<Void> blobGranuleUpdateFiles(Reference<BlobWorkerData> bwData,
|
||||
Reference<GranuleMetadata> metadata,
|
||||
Future<GranuleStartState> assignFuture) {
|
||||
state PromiseStream<Standalone<VectorRef<MutationsAndVersionRef>>> oldChangeFeedStream;
|
||||
state PromiseStream<Standalone<VectorRef<MutationsAndVersionRef>>> changeFeedStream;
|
||||
state Reference<ChangeFeedData> oldChangeFeedStream = makeReference<ChangeFeedData>();
|
||||
state Reference<ChangeFeedData> changeFeedStream = makeReference<ChangeFeedData>();
|
||||
state Future<BlobFileIndex> inFlightBlobSnapshot;
|
||||
state std::deque<InFlightDeltaFile> inFlightDeltaFiles;
|
||||
state Future<Void> oldChangeFeedFuture;
|
||||
@ -1220,7 +1220,8 @@ ACTOR Future<Void> blobGranuleUpdateFiles(Reference<BlobWorkerData> bwData,
|
||||
|
||||
state Standalone<VectorRef<MutationsAndVersionRef>> mutations;
|
||||
if (readOldChangeFeed) {
|
||||
Standalone<VectorRef<MutationsAndVersionRef>> oldMutations = waitNext(oldChangeFeedStream.getFuture());
|
||||
Standalone<VectorRef<MutationsAndVersionRef>> oldMutations =
|
||||
waitNext(oldChangeFeedStream->mutations.getFuture());
|
||||
// TODO filter old mutations won't be necessary, SS does it already
|
||||
if (filterOldMutations(
|
||||
metadata->keyRange, &oldMutations, &mutations, startState.changeFeedStartVersion)) {
|
||||
@ -1235,10 +1236,11 @@ ACTOR Future<Void> blobGranuleUpdateFiles(Reference<BlobWorkerData> bwData,
|
||||
|
||||
// now that old change feed is cancelled, clear out any mutations still in buffer by replacing
|
||||
// promise stream
|
||||
oldChangeFeedStream = PromiseStream<Standalone<VectorRef<MutationsAndVersionRef>>>();
|
||||
oldChangeFeedStream = makeReference<ChangeFeedData>();
|
||||
}
|
||||
} else {
|
||||
Standalone<VectorRef<MutationsAndVersionRef>> newMutations = waitNext(changeFeedStream.getFuture());
|
||||
Standalone<VectorRef<MutationsAndVersionRef>> newMutations =
|
||||
waitNext(changeFeedStream->mutations.getFuture());
|
||||
mutations = newMutations;
|
||||
}
|
||||
|
||||
@ -1504,8 +1506,7 @@ ACTOR Future<Void> blobGranuleUpdateFiles(Reference<BlobWorkerData> bwData,
|
||||
|
||||
// reset change feeds to cfRollbackVersion
|
||||
if (readOldChangeFeed) {
|
||||
oldChangeFeedStream =
|
||||
PromiseStream<Standalone<VectorRef<MutationsAndVersionRef>>>();
|
||||
oldChangeFeedStream = makeReference<ChangeFeedData>();
|
||||
oldChangeFeedFuture = bwData->db->getChangeFeedStream(
|
||||
oldChangeFeedStream,
|
||||
oldCFKey.get(),
|
||||
@ -1513,7 +1514,7 @@ ACTOR Future<Void> blobGranuleUpdateFiles(Reference<BlobWorkerData> bwData,
|
||||
MAX_VERSION,
|
||||
startState.parentGranule.get().first /*metadata->keyRange*/);
|
||||
} else {
|
||||
changeFeedStream = PromiseStream<Standalone<VectorRef<MutationsAndVersionRef>>>();
|
||||
changeFeedStream = makeReference<ChangeFeedData>();
|
||||
changeFeedFuture = bwData->db->getChangeFeedStream(changeFeedStream,
|
||||
cfKey,
|
||||
cfRollbackVersion + 1,
|
||||
|
@ -211,6 +211,7 @@ set(FDBSERVER_SRCS
|
||||
workloads/MemoryLifetime.actor.cpp
|
||||
workloads/MetricLogging.actor.cpp
|
||||
workloads/MutationLogReaderCorrectness.actor.cpp
|
||||
workloads/IndexPrefetchDemo.actor.cpp
|
||||
workloads/ParallelRestore.actor.cpp
|
||||
workloads/Performance.actor.cpp
|
||||
workloads/Ping.actor.cpp
|
||||
|
@ -904,6 +904,10 @@ public:
|
||||
}
|
||||
}
|
||||
|
||||
if (resultSet.size() < required) {
|
||||
throw no_more_servers();
|
||||
}
|
||||
|
||||
// Continue adding workers to the result set until we reach the desired number of workers
|
||||
for (auto workerIter = fitness_workers.begin();
|
||||
workerIter != fitness_workers.end() && resultSet.size() < desired;
|
||||
@ -922,7 +926,7 @@ public:
|
||||
}
|
||||
}
|
||||
|
||||
ASSERT(resultSet.size() <= desired);
|
||||
ASSERT(resultSet.size() >= required && resultSet.size() <= desired);
|
||||
|
||||
for (auto& result : resultSet) {
|
||||
id_used[result.interf.locality.processId()]++;
|
||||
@ -1210,40 +1214,44 @@ public:
|
||||
exclusionWorkerIds);
|
||||
|
||||
if (g_network->isSimulated()) {
|
||||
auto testWorkers = getWorkersForTlogsBackup(
|
||||
conf, required, desired, policy, testUsed, checkStable, dcIds, exclusionWorkerIds);
|
||||
RoleFitness testFitness(testWorkers, ProcessClass::TLog, testUsed);
|
||||
RoleFitness fitness(workers, ProcessClass::TLog, id_used);
|
||||
try {
|
||||
auto testWorkers = getWorkersForTlogsBackup(
|
||||
conf, required, desired, policy, testUsed, checkStable, dcIds, exclusionWorkerIds);
|
||||
RoleFitness testFitness(testWorkers, ProcessClass::TLog, testUsed);
|
||||
RoleFitness fitness(workers, ProcessClass::TLog, id_used);
|
||||
|
||||
std::map<Optional<Standalone<StringRef>>, int> field_count;
|
||||
std::set<Optional<Standalone<StringRef>>> zones;
|
||||
for (auto& worker : testWorkers) {
|
||||
if (!zones.count(worker.interf.locality.zoneId())) {
|
||||
field_count[worker.interf.locality.get(pa1->attributeKey())]++;
|
||||
zones.insert(worker.interf.locality.zoneId());
|
||||
std::map<Optional<Standalone<StringRef>>, int> field_count;
|
||||
std::set<Optional<Standalone<StringRef>>> zones;
|
||||
for (auto& worker : testWorkers) {
|
||||
if (!zones.count(worker.interf.locality.zoneId())) {
|
||||
field_count[worker.interf.locality.get(pa1->attributeKey())]++;
|
||||
zones.insert(worker.interf.locality.zoneId());
|
||||
}
|
||||
}
|
||||
}
|
||||
// backup recruitment is not required to use degraded processes that have better fitness
|
||||
// so we cannot compare degraded between the two methods
|
||||
testFitness.degraded = fitness.degraded;
|
||||
// backup recruitment is not required to use degraded processes that have better fitness
|
||||
// so we cannot compare degraded between the two methods
|
||||
testFitness.degraded = fitness.degraded;
|
||||
|
||||
int minField = 100;
|
||||
int minField = 100;
|
||||
|
||||
for (auto& f : field_count) {
|
||||
minField = std::min(minField, f.second);
|
||||
}
|
||||
|
||||
if (fitness > testFitness && minField > 1) {
|
||||
for (auto& w : testWorkers) {
|
||||
TraceEvent("TestTLogs").detail("Interf", w.interf.address());
|
||||
for (auto& f : field_count) {
|
||||
minField = std::min(minField, f.second);
|
||||
}
|
||||
for (auto& w : workers) {
|
||||
TraceEvent("RealTLogs").detail("Interf", w.interf.address());
|
||||
|
||||
if (fitness > testFitness && minField > 1) {
|
||||
for (auto& w : testWorkers) {
|
||||
TraceEvent("TestTLogs").detail("Interf", w.interf.address());
|
||||
}
|
||||
for (auto& w : workers) {
|
||||
TraceEvent("RealTLogs").detail("Interf", w.interf.address());
|
||||
}
|
||||
TraceEvent("FitnessCompare")
|
||||
.detail("TestF", testFitness.toString())
|
||||
.detail("RealF", fitness.toString());
|
||||
ASSERT(false);
|
||||
}
|
||||
TraceEvent("FitnessCompare")
|
||||
.detail("TestF", testFitness.toString())
|
||||
.detail("RealF", fitness.toString());
|
||||
ASSERT(false);
|
||||
} catch (Error& e) {
|
||||
ASSERT(false); // Simulation only validation should not throw errors
|
||||
}
|
||||
}
|
||||
|
||||
@ -1263,25 +1271,29 @@ public:
|
||||
getWorkersForTlogsSimple(conf, required, desired, id_used, checkStable, dcIds, exclusionWorkerIds);
|
||||
|
||||
if (g_network->isSimulated()) {
|
||||
auto testWorkers = getWorkersForTlogsBackup(
|
||||
conf, required, desired, policy, testUsed, checkStable, dcIds, exclusionWorkerIds);
|
||||
RoleFitness testFitness(testWorkers, ProcessClass::TLog, testUsed);
|
||||
RoleFitness fitness(workers, ProcessClass::TLog, id_used);
|
||||
// backup recruitment is not required to use degraded processes that have better fitness
|
||||
// so we cannot compare degraded between the two methods
|
||||
testFitness.degraded = fitness.degraded;
|
||||
try {
|
||||
auto testWorkers = getWorkersForTlogsBackup(
|
||||
conf, required, desired, policy, testUsed, checkStable, dcIds, exclusionWorkerIds);
|
||||
RoleFitness testFitness(testWorkers, ProcessClass::TLog, testUsed);
|
||||
RoleFitness fitness(workers, ProcessClass::TLog, id_used);
|
||||
// backup recruitment is not required to use degraded processes that have better fitness
|
||||
// so we cannot compare degraded between the two methods
|
||||
testFitness.degraded = fitness.degraded;
|
||||
|
||||
if (fitness > testFitness) {
|
||||
for (auto& w : testWorkers) {
|
||||
TraceEvent("TestTLogs").detail("Interf", w.interf.address());
|
||||
if (fitness > testFitness) {
|
||||
for (auto& w : testWorkers) {
|
||||
TraceEvent("TestTLogs").detail("Interf", w.interf.address());
|
||||
}
|
||||
for (auto& w : workers) {
|
||||
TraceEvent("RealTLogs").detail("Interf", w.interf.address());
|
||||
}
|
||||
TraceEvent("FitnessCompare")
|
||||
.detail("TestF", testFitness.toString())
|
||||
.detail("RealF", fitness.toString());
|
||||
ASSERT(false);
|
||||
}
|
||||
for (auto& w : workers) {
|
||||
TraceEvent("RealTLogs").detail("Interf", w.interf.address());
|
||||
}
|
||||
TraceEvent("FitnessCompare")
|
||||
.detail("TestF", testFitness.toString())
|
||||
.detail("RealF", fitness.toString());
|
||||
ASSERT(false);
|
||||
} catch (Error& e) {
|
||||
ASSERT(false); // Simulation only validation should not throw errors
|
||||
}
|
||||
}
|
||||
return workers;
|
||||
@ -1659,7 +1671,8 @@ public:
|
||||
}
|
||||
|
||||
ErrorOr<RecruitFromConfigurationReply> findWorkersForConfigurationFromDC(RecruitFromConfigurationRequest const& req,
|
||||
Optional<Key> dcId) {
|
||||
Optional<Key> dcId,
|
||||
bool checkGoodRecruitment) {
|
||||
RecruitFromConfigurationReply result;
|
||||
std::map<Optional<Standalone<StringRef>>, int> id_used;
|
||||
updateKnownIds(&id_used);
|
||||
@ -1770,7 +1783,7 @@ public:
|
||||
[](const WorkerDetails& w) { return w.interf; });
|
||||
}
|
||||
|
||||
if (!goodRecruitmentTime.isReady() &&
|
||||
if (!goodRecruitmentTime.isReady() && checkGoodRecruitment &&
|
||||
(RoleFitness(SERVER_KNOBS->EXPECTED_TLOG_FITNESS, req.configuration.getDesiredLogs(), ProcessClass::TLog)
|
||||
.betterCount(RoleFitness(tlogs, ProcessClass::TLog, id_used)) ||
|
||||
(region.satelliteTLogReplicationFactor > 0 && req.configuration.usableRegions > 1 &&
|
||||
@ -1796,7 +1809,8 @@ public:
|
||||
return result;
|
||||
}
|
||||
|
||||
RecruitFromConfigurationReply findWorkersForConfigurationDispatch(RecruitFromConfigurationRequest const& req) {
|
||||
RecruitFromConfigurationReply findWorkersForConfigurationDispatch(RecruitFromConfigurationRequest const& req,
|
||||
bool checkGoodRecruitment) {
|
||||
if (req.configuration.regions.size() > 1) {
|
||||
std::vector<RegionInfo> regions = req.configuration.regions;
|
||||
if (regions[0].priority == regions[1].priority && regions[1].dcId == clusterControllerDcId.get()) {
|
||||
@ -1833,7 +1847,7 @@ public:
|
||||
|
||||
bool setPrimaryDesired = false;
|
||||
try {
|
||||
auto reply = findWorkersForConfigurationFromDC(req, regions[0].dcId);
|
||||
auto reply = findWorkersForConfigurationFromDC(req, regions[0].dcId, checkGoodRecruitment);
|
||||
setPrimaryDesired = true;
|
||||
std::vector<Optional<Key>> dcPriority;
|
||||
dcPriority.push_back(regions[0].dcId);
|
||||
@ -1850,7 +1864,8 @@ public:
|
||||
.detail("RecruitedTxnSystemDcId", regions[0].dcId);
|
||||
throw no_more_servers();
|
||||
} catch (Error& e) {
|
||||
if (!goodRemoteRecruitmentTime.isReady() && regions[1].dcId != clusterControllerDcId.get()) {
|
||||
if (!goodRemoteRecruitmentTime.isReady() && regions[1].dcId != clusterControllerDcId.get() &&
|
||||
checkGoodRecruitment) {
|
||||
throw operation_failed();
|
||||
}
|
||||
|
||||
@ -1860,7 +1875,7 @@ public:
|
||||
TraceEvent(SevWarn, "AttemptingRecruitmentInRemoteDc", id)
|
||||
.detail("SetPrimaryDesired", setPrimaryDesired)
|
||||
.error(e);
|
||||
auto reply = findWorkersForConfigurationFromDC(req, regions[1].dcId);
|
||||
auto reply = findWorkersForConfigurationFromDC(req, regions[1].dcId, checkGoodRecruitment);
|
||||
if (!setPrimaryDesired) {
|
||||
std::vector<Optional<Key>> dcPriority;
|
||||
dcPriority.push_back(regions[1].dcId);
|
||||
@ -1878,7 +1893,8 @@ public:
|
||||
std::vector<Optional<Key>> dcPriority;
|
||||
dcPriority.push_back(req.configuration.regions[0].dcId);
|
||||
desiredDcIds.set(dcPriority);
|
||||
auto reply = findWorkersForConfigurationFromDC(req, req.configuration.regions[0].dcId);
|
||||
auto reply =
|
||||
findWorkersForConfigurationFromDC(req, req.configuration.regions[0].dcId, checkGoodRecruitment);
|
||||
if (reply.isError()) {
|
||||
throw reply.getError();
|
||||
} else if (req.configuration.regions[0].dcId == clusterControllerDcId.get()) {
|
||||
@ -2047,7 +2063,7 @@ public:
|
||||
.detail("DesiredResolvers", req.configuration.getDesiredResolvers())
|
||||
.detail("ActualResolvers", result.resolvers.size());
|
||||
|
||||
if (!goodRecruitmentTime.isReady() &&
|
||||
if (!goodRecruitmentTime.isReady() && checkGoodRecruitment &&
|
||||
(RoleFitness(
|
||||
SERVER_KNOBS->EXPECTED_TLOG_FITNESS, req.configuration.getDesiredLogs(), ProcessClass::TLog)
|
||||
.betterCount(RoleFitness(tlogs, ProcessClass::TLog, id_used)) ||
|
||||
@ -2113,84 +2129,89 @@ public:
|
||||
}
|
||||
|
||||
RecruitFromConfigurationReply findWorkersForConfiguration(RecruitFromConfigurationRequest const& req) {
|
||||
RecruitFromConfigurationReply rep = findWorkersForConfigurationDispatch(req);
|
||||
RecruitFromConfigurationReply rep = findWorkersForConfigurationDispatch(req, true);
|
||||
if (g_network->isSimulated()) {
|
||||
// FIXME: The logic to pick a satellite in a remote region is not
|
||||
// deterministic and can therefore break this nondeterminism check.
|
||||
// Since satellites will generally be in the primary region,
|
||||
// disable the determinism check for remote region satellites.
|
||||
bool remoteDCUsedAsSatellite = false;
|
||||
if (req.configuration.regions.size() > 1) {
|
||||
auto [region, remoteRegion] =
|
||||
getPrimaryAndRemoteRegion(req.configuration.regions, req.configuration.regions[0].dcId);
|
||||
for (const auto& satellite : region.satellites) {
|
||||
if (satellite.dcId == remoteRegion.dcId) {
|
||||
remoteDCUsedAsSatellite = true;
|
||||
try {
|
||||
// FIXME: The logic to pick a satellite in a remote region is not
|
||||
// deterministic and can therefore break this nondeterminism check.
|
||||
// Since satellites will generally be in the primary region,
|
||||
// disable the determinism check for remote region satellites.
|
||||
bool remoteDCUsedAsSatellite = false;
|
||||
if (req.configuration.regions.size() > 1) {
|
||||
auto [region, remoteRegion] =
|
||||
getPrimaryAndRemoteRegion(req.configuration.regions, req.configuration.regions[0].dcId);
|
||||
for (const auto& satellite : region.satellites) {
|
||||
if (satellite.dcId == remoteRegion.dcId) {
|
||||
remoteDCUsedAsSatellite = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if (!remoteDCUsedAsSatellite) {
|
||||
RecruitFromConfigurationReply compare = findWorkersForConfigurationDispatch(req);
|
||||
if (!remoteDCUsedAsSatellite) {
|
||||
RecruitFromConfigurationReply compare = findWorkersForConfigurationDispatch(req, false);
|
||||
|
||||
std::map<Optional<Standalone<StringRef>>, int> firstUsed;
|
||||
std::map<Optional<Standalone<StringRef>>, int> secondUsed;
|
||||
updateKnownIds(&firstUsed);
|
||||
updateKnownIds(&secondUsed);
|
||||
std::map<Optional<Standalone<StringRef>>, int> firstUsed;
|
||||
std::map<Optional<Standalone<StringRef>>, int> secondUsed;
|
||||
updateKnownIds(&firstUsed);
|
||||
updateKnownIds(&secondUsed);
|
||||
|
||||
// auto mworker = id_worker.find(masterProcessId);
|
||||
//TraceEvent("CompareAddressesMaster")
|
||||
// .detail("Master",
|
||||
// mworker != id_worker.end() ? mworker->second.details.interf.address() : NetworkAddress());
|
||||
// auto mworker = id_worker.find(masterProcessId);
|
||||
//TraceEvent("CompareAddressesMaster")
|
||||
// .detail("Master",
|
||||
// mworker != id_worker.end() ? mworker->second.details.interf.address() :
|
||||
// NetworkAddress());
|
||||
|
||||
updateIdUsed(rep.tLogs, firstUsed);
|
||||
updateIdUsed(compare.tLogs, secondUsed);
|
||||
compareWorkers(
|
||||
req.configuration, rep.tLogs, firstUsed, compare.tLogs, secondUsed, ProcessClass::TLog, "TLog");
|
||||
updateIdUsed(rep.satelliteTLogs, firstUsed);
|
||||
updateIdUsed(compare.satelliteTLogs, secondUsed);
|
||||
compareWorkers(req.configuration,
|
||||
rep.satelliteTLogs,
|
||||
firstUsed,
|
||||
compare.satelliteTLogs,
|
||||
secondUsed,
|
||||
ProcessClass::TLog,
|
||||
"Satellite");
|
||||
updateIdUsed(rep.commitProxies, firstUsed);
|
||||
updateIdUsed(compare.commitProxies, secondUsed);
|
||||
updateIdUsed(rep.grvProxies, firstUsed);
|
||||
updateIdUsed(compare.grvProxies, secondUsed);
|
||||
updateIdUsed(rep.resolvers, firstUsed);
|
||||
updateIdUsed(compare.resolvers, secondUsed);
|
||||
compareWorkers(req.configuration,
|
||||
rep.commitProxies,
|
||||
firstUsed,
|
||||
compare.commitProxies,
|
||||
secondUsed,
|
||||
ProcessClass::CommitProxy,
|
||||
"CommitProxy");
|
||||
compareWorkers(req.configuration,
|
||||
rep.grvProxies,
|
||||
firstUsed,
|
||||
compare.grvProxies,
|
||||
secondUsed,
|
||||
ProcessClass::GrvProxy,
|
||||
"GrvProxy");
|
||||
compareWorkers(req.configuration,
|
||||
rep.resolvers,
|
||||
firstUsed,
|
||||
compare.resolvers,
|
||||
secondUsed,
|
||||
ProcessClass::Resolver,
|
||||
"Resolver");
|
||||
updateIdUsed(rep.backupWorkers, firstUsed);
|
||||
updateIdUsed(compare.backupWorkers, secondUsed);
|
||||
compareWorkers(req.configuration,
|
||||
rep.backupWorkers,
|
||||
firstUsed,
|
||||
compare.backupWorkers,
|
||||
secondUsed,
|
||||
ProcessClass::Backup,
|
||||
"Backup");
|
||||
updateIdUsed(rep.tLogs, firstUsed);
|
||||
updateIdUsed(compare.tLogs, secondUsed);
|
||||
compareWorkers(
|
||||
req.configuration, rep.tLogs, firstUsed, compare.tLogs, secondUsed, ProcessClass::TLog, "TLog");
|
||||
updateIdUsed(rep.satelliteTLogs, firstUsed);
|
||||
updateIdUsed(compare.satelliteTLogs, secondUsed);
|
||||
compareWorkers(req.configuration,
|
||||
rep.satelliteTLogs,
|
||||
firstUsed,
|
||||
compare.satelliteTLogs,
|
||||
secondUsed,
|
||||
ProcessClass::TLog,
|
||||
"Satellite");
|
||||
updateIdUsed(rep.commitProxies, firstUsed);
|
||||
updateIdUsed(compare.commitProxies, secondUsed);
|
||||
updateIdUsed(rep.grvProxies, firstUsed);
|
||||
updateIdUsed(compare.grvProxies, secondUsed);
|
||||
updateIdUsed(rep.resolvers, firstUsed);
|
||||
updateIdUsed(compare.resolvers, secondUsed);
|
||||
compareWorkers(req.configuration,
|
||||
rep.commitProxies,
|
||||
firstUsed,
|
||||
compare.commitProxies,
|
||||
secondUsed,
|
||||
ProcessClass::CommitProxy,
|
||||
"CommitProxy");
|
||||
compareWorkers(req.configuration,
|
||||
rep.grvProxies,
|
||||
firstUsed,
|
||||
compare.grvProxies,
|
||||
secondUsed,
|
||||
ProcessClass::GrvProxy,
|
||||
"GrvProxy");
|
||||
compareWorkers(req.configuration,
|
||||
rep.resolvers,
|
||||
firstUsed,
|
||||
compare.resolvers,
|
||||
secondUsed,
|
||||
ProcessClass::Resolver,
|
||||
"Resolver");
|
||||
updateIdUsed(rep.backupWorkers, firstUsed);
|
||||
updateIdUsed(compare.backupWorkers, secondUsed);
|
||||
compareWorkers(req.configuration,
|
||||
rep.backupWorkers,
|
||||
firstUsed,
|
||||
compare.backupWorkers,
|
||||
secondUsed,
|
||||
ProcessClass::Backup,
|
||||
"Backup");
|
||||
}
|
||||
} catch (Error& e) {
|
||||
ASSERT(false); // Simulation only validation should not throw errors
|
||||
}
|
||||
}
|
||||
return rep;
|
||||
@ -3417,7 +3438,7 @@ ACTOR Future<Void> clusterWatchDatabase(ClusterControllerData* cluster, ClusterC
|
||||
TEST(true); // clusterWatchDatabase() master failed
|
||||
TraceEvent(SevWarn, "DetectedFailedMaster", cluster->id).detail("OldMaster", iMaster.id());
|
||||
} else {
|
||||
TEST(true); // clusterWatchDatabas() !newMaster.present()
|
||||
TEST(true); // clusterWatchDatabase() !newMaster.present()
|
||||
wait(delay(SERVER_KNOBS->MASTER_SPIN_DELAY));
|
||||
}
|
||||
} catch (Error& e) {
|
||||
@ -4044,7 +4065,8 @@ void clusterRegisterMaster(ClusterControllerData* self, RegisterMasterRequest co
|
||||
.detail("GrvProxies", req.grvProxies.size())
|
||||
.detail("RecoveryCount", req.recoveryCount)
|
||||
.detail("Stalled", req.recoveryStalled)
|
||||
.detail("OldestBackupEpoch", req.logSystemConfig.oldestBackupEpoch);
|
||||
.detail("OldestBackupEpoch", req.logSystemConfig.oldestBackupEpoch)
|
||||
.detail("ClusterId", req.clusterId);
|
||||
|
||||
// make sure the request comes from an active database
|
||||
auto db = &self->db;
|
||||
@ -4128,6 +4150,11 @@ void clusterRegisterMaster(ClusterControllerData* self, RegisterMasterRequest co
|
||||
dbInfo.recoveryCount = req.recoveryCount;
|
||||
}
|
||||
|
||||
if (dbInfo.clusterId != req.clusterId) {
|
||||
isChanged = true;
|
||||
dbInfo.clusterId = req.clusterId;
|
||||
}
|
||||
|
||||
if (isChanged) {
|
||||
dbInfo.id = deterministicRandom()->randomUniqueID();
|
||||
dbInfo.infoGeneration = ++self->db.dbInfoCount;
|
||||
@ -4719,6 +4746,48 @@ ACTOR Future<Void> monitorGlobalConfig(ClusterControllerData::DBInfo* db) {
|
||||
}
|
||||
}
|
||||
|
||||
ACTOR Future<Void> monitorClientLibChangeCounter(ClusterControllerData::DBInfo* db) {
|
||||
state ClientDBInfo clientInfo;
|
||||
state ReadYourWritesTransaction tr;
|
||||
state Future<Void> clientLibChangeFuture;
|
||||
|
||||
loop {
|
||||
tr = ReadYourWritesTransaction(db->db);
|
||||
loop {
|
||||
try {
|
||||
tr.setOption(FDBTransactionOptions::READ_SYSTEM_KEYS);
|
||||
tr.setOption(FDBTransactionOptions::READ_LOCK_AWARE);
|
||||
|
||||
Optional<Value> counterVal = wait(tr.get(clientLibChangeCounterKey));
|
||||
if (counterVal.present() && counterVal.get().size() == sizeof(uint64_t)) {
|
||||
uint64_t changeCounter = *reinterpret_cast<const uint64_t*>(counterVal.get().begin());
|
||||
|
||||
clientInfo = db->serverInfo->get().client;
|
||||
if (changeCounter != clientInfo.clientLibChangeCounter) {
|
||||
TraceEvent("ClientLibChangeCounterChanged").detail("Value", changeCounter);
|
||||
clientInfo.id = deterministicRandom()->randomUniqueID();
|
||||
clientInfo.clientLibChangeCounter = changeCounter;
|
||||
db->clientInfo->set(clientInfo);
|
||||
|
||||
ServerDBInfo serverInfo = db->serverInfo->get();
|
||||
serverInfo.id = deterministicRandom()->randomUniqueID();
|
||||
serverInfo.infoGeneration = ++db->dbInfoCount;
|
||||
serverInfo.client = clientInfo;
|
||||
db->serverInfo->set(serverInfo);
|
||||
}
|
||||
}
|
||||
|
||||
clientLibChangeFuture = tr.watch(clientLibChangeCounterKey);
|
||||
wait(tr.commit());
|
||||
wait(clientLibChangeFuture);
|
||||
break;
|
||||
} catch (Error& e) {
|
||||
wait(tr.onError(e));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
ACTOR Future<Void> updatedChangingDatacenters(ClusterControllerData* self) {
|
||||
// do not change the cluster controller until all the processes have had a chance to register
|
||||
wait(delay(SERVER_KNOBS->WAIT_FOR_GOOD_RECRUITMENT_DELAY));
|
||||
@ -5416,6 +5485,7 @@ ACTOR Future<Void> clusterControllerCore(ClusterControllerFullInterface interf,
|
||||
self.addActor.send(monitorProcessClasses(&self));
|
||||
self.addActor.send(monitorServerInfoConfig(&self.db));
|
||||
self.addActor.send(monitorGlobalConfig(&self.db));
|
||||
self.addActor.send(monitorClientLibChangeCounter(&self.db));
|
||||
self.addActor.send(updatedChangingDatacenters(&self));
|
||||
self.addActor.send(updatedChangedDatacenters(&self));
|
||||
self.addActor.send(updateDatacenterVersionDifference(&self));
|
||||
|
@ -1729,7 +1729,7 @@ ACTOR Future<Void> proxySnapCreate(ProxySnapRequest snapReq, ProxyCommitData* co
|
||||
// FIXME: logAntiQuorum not supported, remove it later,
|
||||
// In version2, we probably don't need this limtiation, but this needs to be tested.
|
||||
if (logAntiQuorum > 0) {
|
||||
TraceEvent("SnapCommitProxy_LogAnitQuorumNotSupported")
|
||||
TraceEvent("SnapCommitProxy_LogAntiQuorumNotSupported")
|
||||
.detail("SnapPayload", snapReq.snapPayload)
|
||||
.detail("SnapUID", snapReq.snapUID);
|
||||
throw snap_log_anti_quorum_unsupported();
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user