Add debug logs.

This commit is contained in:
kishorenc 2020-08-27 07:02:51 +05:30
parent 5b55b071b2
commit 1e5dd2bdbc
2 changed files with 26 additions and 6 deletions

View File

@ -211,6 +211,9 @@ nlohmann::json Collection::add_many(std::vector<std::string>& json_lines) {
index_res["error"] = doc_seq_id_op.error();
index_res["success"] = false;
// FIXME:
LOG(INFO) << "Document parsing error, bad json_line is: " << json_line;
// NOTE: we overwrite the input json_lines with result to avoid memory pressure
json_lines[i] = index_res.dump();
continue;
@ -733,6 +736,9 @@ Option<nlohmann::json> Collection::search(const std::string & query, const std::
size_t total_found = 0;
spp::sparse_hash_set<uint64_t> groups_processed; // used to calculate total_found for grouped query
// FIXME:
LOG(INFO) << "Num indices used for querying: " << indices.size();
// send data to individual index threads
size_t index_id = 0;
for(Index* index: indices) {

View File

@ -589,6 +589,9 @@ bool post_import_documents(http_req& req, http_res& res) {
if(req.body_index == 0) {
// will log for every major chunk of request body
LOG(INFO) << "Import, req.body.size=" << req.body.size() << ", batch_size=" << IMPORT_BATCH_SIZE;
// FIXME:
int nminusten_pos = std::max(0, int(req.body.size())-10);
LOG(INFO) << "Last 10 chars: " << req.body.substr(nminusten_pos);
}
CollectionManager & collectionManager = CollectionManager::get_instance();
@ -601,12 +604,16 @@ bool post_import_documents(http_req& req, http_res& res) {
return false;
}
//LOG(INFO) << "Import, " << "req.body_index=" << req.body_index << ", req.body.size: " << req.body.size();
// FIXME:
LOG(INFO) << "Import, " << "req.body_index=" << req.body_index << ", req.body.size: " << req.body.size();
//LOG(INFO) << "req body %: " << (float(req.body_index)/req.body.size())*100;
std::vector<std::string> json_lines;
req.body_index = StringUtils::split(req.body, json_lines, "\n", false, req.body_index, IMPORT_BATCH_SIZE);
// FIXME:
LOG(INFO) << "json_lines.size before: " << json_lines.size();
bool stream_proceed = false; // default state
if(req.body_index == req.body.size()) {
@ -615,6 +622,8 @@ bool post_import_documents(http_req& req, http_res& res) {
stream_proceed = true;
if(req.last_chunk_aggregate) {
// FIXME:
LOG(INFO) << "req.last_chunk_aggregate is true";
req.body = "";
} else {
if(!json_lines.empty()) {
@ -639,30 +648,35 @@ bool post_import_documents(http_req& req, http_res& res) {
}
}
// FIXME:
LOG(INFO) << "json_lines.size after: " << json_lines.size();
//LOG(INFO) << "json_lines.size: " << json_lines.size() << ", req.stream_state: " << req.stream_state;
// When only one partial record arrives as a chunk, an empty body is pushed to response stream
bool single_partial_record_body = (json_lines.empty() && !req.body.empty());
std::stringstream ss;
std::stringstream response_stream;
// FIXME:
LOG(INFO) << "single_partial_record_body: " << single_partial_record_body;
if(!single_partial_record_body) {
nlohmann::json json_res = collection->add_many(json_lines);
//const std::string& import_summary_json = json_res.dump();
//ss << import_summary_json << "\n";
//response_stream << import_summary_json << "\n";
for (size_t i = 0; i < json_lines.size(); i++) {
if(i == json_lines.size()-1 && req.body_index == req.body.size() && req.last_chunk_aggregate) {
// indicates last record of last batch
ss << json_lines[i];
response_stream << json_lines[i];
} else {
ss << json_lines[i] << "\n";
response_stream << json_lines[i] << "\n";
}
}
}
res.content_type_header = "text/plain; charset=utf8";
res.status_code = 200;
res.body += ss.str();
res.body += response_stream.str();
if(stream_proceed) {
HttpServer::stream_response(req, res);