Return 529 for overloaded search requests.

This commit is contained in:
Kishore Nallan 2023-01-25 17:29:08 +05:30
parent f038e99809
commit c4355ed123
5 changed files with 29 additions and 2 deletions

View File

@ -50,6 +50,7 @@ public:
static inline const std::string DOC_WRITE_LABEL = "write";
static inline const std::string IMPORT_LABEL = "import";
static inline const std::string DOC_DELETE_LABEL = "delete";
static inline const std::string OVERLOADED_LABEL = "overloaded";
static const uint64_t METRICS_REFRESH_INTERVAL_MS = 10 * 1000;

View File

@ -235,6 +235,9 @@ struct http_req {
// timestamp from the underlying http library
uint64_t conn_ts;
// was the request aborted *without a result* because of wait time exceeding search cutoff threshold?
bool overloaded = false;
std::mutex mcv;
std::condition_variable cv;
bool ready;
@ -301,7 +304,9 @@ struct http_req {
bool log_slow_requests = config.get_log_slow_requests_time_ms() >= 0 &&
int(ms_since_start) >= config.get_log_slow_requests_time_ms();
if(log_slow_searches || log_slow_requests) {
if(overloaded) {
AppMetrics::get_instance().increment_count(AppMetrics::OVERLOADED_LABEL, 1);
} else if(log_slow_searches || log_slow_requests) {
// log slow request if logging is enabled
std::string query_string = "?";
bool is_multi_search_query = (path_without_query == "/multi_search");

View File

@ -35,6 +35,8 @@ void AppMetrics::get(const std::string& rps_key, const std::string& latency_key,
auto DOC_DELETE_RPS_KEY = DOC_DELETE_LABEL + "_" + rps_key;
auto DOC_DELETE_LATENCY_KEY = DOC_DELETE_LABEL + "_" + latency_key;
auto OVERLOADED_RPS_KEY = OVERLOADED_LABEL + "_" + rps_key;
result[rps_key] = nlohmann::json::object();
for(const auto& kv: *counts) {
if(kv.first == SEARCH_LABEL) {
@ -53,6 +55,10 @@ void AppMetrics::get(const std::string& rps_key, const std::string& latency_key,
result[DOC_DELETE_RPS_KEY] = double(kv.second) / (METRICS_REFRESH_INTERVAL_MS / 1000);
}
else if(kv.first == OVERLOADED_LABEL) {
result[OVERLOADED_RPS_KEY] = double(kv.second) / (METRICS_REFRESH_INTERVAL_MS / 1000);
}
else {
result[rps_key][kv.first] = (double(kv.second) / (METRICS_REFRESH_INTERVAL_MS / 1000));
total_counts += kv.second;
@ -90,7 +96,8 @@ void AppMetrics::get(const std::string& rps_key, const std::string& latency_key,
std::vector<std::string> keys_to_check = {
SEARCH_RPS_KEY, IMPORT_RPS_KEY, DOC_WRITE_RPS_KEY, DOC_DELETE_RPS_KEY,
SEARCH_LATENCY_KEY, IMPORT_LATENCY_KEY, DOC_WRITE_LATENCY_KEY, DOC_DELETE_LATENCY_KEY
SEARCH_LATENCY_KEY, IMPORT_LATENCY_KEY, DOC_WRITE_LATENCY_KEY, DOC_DELETE_LATENCY_KEY,
OVERLOADED_RPS_KEY
};
for(auto& key: keys_to_check) {

View File

@ -1299,6 +1299,12 @@ Option<nlohmann::json> Collection::search(const std::string & raw_query,
total_found = search_params->all_result_ids_len;
}
if(search_cutoff && total_found == 0) {
// this can happen if other requests stopped this request from being processed
// we should return an error so that request can be retried by client
return Option<nlohmann::json>(529, "Site is overloaded");
}
if(match_score_index >= 0 && sort_fields_std[match_score_index].text_match_buckets > 1) {
size_t num_buckets = sort_fields_std[match_score_index].text_match_buckets;
const size_t max_kvs_bucketed = std::min<size_t>(DEFAULT_TOPSTER_SIZE, raw_result_kvs.size());

View File

@ -392,6 +392,9 @@ bool get_search(const std::shared_ptr<http_req>& req, const std::shared_ptr<http
if(!search_op.ok()) {
res->set(search_op.code(), search_op.error());
if(search_op.code() == 529) {
req->overloaded = true;
}
return false;
}
@ -562,6 +565,11 @@ bool post_multi_search(const std::shared_ptr<http_req>& req, const std::shared_p
if(search_op.ok()) {
response["results"].push_back(nlohmann::json::parse(results_json_str));
} else {
if(search_op.code() == 529) {
res->set(search_op.code(), search_op.error());
req->overloaded = true;
return false;
}
nlohmann::json err_res;
err_res["error"] = search_op.error();
err_res["code"] = search_op.code();