remove query hits aggregation & store (#1496)

This commit is contained in:
Krunal Gandhi 2024-01-19 05:27:25 +00:00 committed by GitHub
parent 964774c1a3
commit 9565efefb3
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
9 changed files with 5 additions and 602 deletions

View File

@ -56,47 +56,6 @@ struct counter_event_t {
std::map<std::string, uint16_t> event_weight_map;
};
struct query_hits_count_t {
std::string query;
uint64_t timestamp;
std::string user_id;
uint64_t hits_count;
query_hits_count_t() = delete;
~query_hits_count_t() = default;
query_hits_count_t(std::string q, uint64_t ts, std::string uid, uint64_t count) {
query = q;
timestamp = ts;
user_id = uid;
hits_count = count;
}
query_hits_count_t &operator=(query_hits_count_t &other) {
if (this != &other) {
query = other.query;
timestamp = other.timestamp;
user_id = other.user_id;
hits_count = other.hits_count;
return *this;
}
}
void to_json(nlohmann::json &obj) const {
obj["query"] = query;
obj["timestamp"] = timestamp;
obj["user_id"] = user_id;
obj["hits_count"] = hits_count;
}
};
struct query_hits_count_comp {
bool operator()(const query_hits_count_t& a, const query_hits_count_t& b) const {
return a.query < b.query;
}
};
struct event_cache_t {
uint64_t last_update_time;
uint64_t count;
@ -159,11 +118,7 @@ private:
//query collection => events
std::unordered_map<std::string, std::vector<event_t>> query_collection_events;
//query collection => query hits count
std::unordered_map<std::string, std::set<query_hits_count_t, query_hits_count_comp>> query_collection_hits_count;
Store* store = nullptr;
Store* analytics_store = nullptr;
std::ofstream analytics_logs;
bool isRateLimitEnabled = false;
@ -180,9 +135,6 @@ private:
public:
static constexpr const char* ANALYTICS_RULE_PREFIX = "$AR";
static constexpr const char* CLICK_EVENT = "$CE";
static constexpr const char* QUERY_HITS_COUNT = "$QH";
static constexpr const char* PURCHASE_EVENT = "$PE";
static constexpr const char* POPULAR_QUERIES_TYPE = "popular_queries";
static constexpr const char* NOHITS_QUERIES_TYPE = "nohits_queries";
static constexpr const char* COUNTER_TYPE = "counter";
@ -215,8 +167,6 @@ public:
void dispose();
Store* get_analytics_store();
void persist_query_events(ReplicationState *raft_server, uint64_t prev_persistence_s);
std::unordered_map<std::string, QueryAnalytics*> get_popular_queries();
@ -232,23 +182,10 @@ public:
std::unordered_map<std::string, counter_event_t> get_popular_clicks();
Option<bool> write_events_to_store(nlohmann::json& event_jsons);
void add_nohits_query(const std::string& query_collection,
const std::string& query, bool live_query, const std::string& user_id);
std::unordered_map<std::string, QueryAnalytics*> get_nohits_queries();
void resetToggleRateLimit(bool toggle);
void add_query_hits_count(const std::string& query_collection, const std::string& query, const std::string& user_id,
uint64_t hits_count);
nlohmann::json get_query_hits_counts();
void checkEventsExpiry();
uint64_t get_current_time_us();
void resetAnalyticsStore();
};

View File

@ -169,10 +169,6 @@ bool put_upsert_analytics_rules(const std::shared_ptr<http_req>& req, const std:
bool del_analytics_rules(const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res);
bool post_replicate_events(const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res);
bool get_query_hits_counts(const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res);
// Misc helpers
void get_collections_for_auth(std::map<std::string, std::string>& req_params, const std::string& body,

View File

@ -299,7 +299,7 @@ void AnalyticsManager::add_suggestion(const std::string &query_collection,
Option<bool> AnalyticsManager::add_event(const std::string& event_type, const std::string &query_collection, const std::string &query, const std::string &user_id,
std::string doc_id, uint64_t position, const std::string& client_ip) {
std::unique_lock lock(mutex);
if(analytics_store) {
if(analytics_logs.is_open()) {
auto &events_vec= query_collection_events[query_collection];
#ifdef TEST_BUILD
@ -369,25 +369,6 @@ void AnalyticsManager::add_nohits_query(const std::string &query_collection, con
}
}
void AnalyticsManager::add_query_hits_count(const std::string &query_collection, const std::string &query,
const std::string &user_id, uint64_t hits_count) {
std::unique_lock lock(mutex);
if(analytics_store) {
auto now_ts_useconds = std::chrono::duration_cast<std::chrono::microseconds>(
std::chrono::system_clock::now().time_since_epoch()).count();
auto &query_hits_count_set = query_collection_hits_count[query_collection];
query_hits_count_t queryHitsCount(query, now_ts_useconds, user_id, hits_count);
auto query_hits_count_set_it = query_hits_count_set.find(queryHitsCount);
if(query_hits_count_set_it != query_hits_count_set.end()) {
query_hits_count_set.erase(query_hits_count_set_it);
}
query_hits_count_set.emplace(queryHitsCount);
}
}
void AnalyticsManager::run(ReplicationState* raft_server) {
uint64_t prev_persistence_s = std::chrono::duration_cast<std::chrono::seconds>(
std::chrono::system_clock::now().time_since_epoch()).count();
@ -412,7 +393,6 @@ void AnalyticsManager::run(ReplicationState* raft_server) {
continue;
}
checkEventsExpiry();
persist_query_events(raft_server, prev_persistence_s);
persist_events(raft_server, prev_persistence_s);
persist_popular_events(raft_server, prev_persistence_s);
@ -512,36 +492,7 @@ void AnalyticsManager::persist_query_events(ReplicationState *raft_server, uint6
void AnalyticsManager::persist_events(ReplicationState *raft_server, uint64_t prev_persistence_s) {
// lock is held by caller
nlohmann::json payload_json = nlohmann::json::array();
auto send_http_response = [&]()->bool {
if(payload_json.empty()) {
return false;
}
const std::string import_payload = payload_json.dump();
std::string leader_url = raft_server->get_leader_url();
if (!leader_url.empty()) {
const std::string &base_url = leader_url + "analytics";
std::string res;
const std::string &update_url = base_url + "/events/replicate";
std::map<std::string, std::string> res_headers;
long status_code = HttpClient::post_response(update_url, import_payload,
res, res_headers, {}, 10 * 1000, true);
if (status_code != 200) {
LOG(ERROR) << "Error while sending events to leader. "
<< "Status code: " << status_code << ", response: " << res;
return false;
}
return true;
}
return false;
};
for (const auto &events_collection_it: query_collection_events) {
for (const auto &events_collection_it: query_collection_events) {
for (const auto &event: events_collection_it.second) {
if(analytics_logs.is_open()) {
//store events to log file
@ -552,26 +503,6 @@ void AnalyticsManager::persist_events(ReplicationState *raft_server, uint64_t pr
}
}
}
query_collection_events.clear();
for (const auto &query_collection_hits_count_it: query_collection_hits_count) {
auto collection_id = CollectionManager::get_instance().get_collection(
query_collection_hits_count_it.first)->get_collection_id();
for (const auto &query_hits_count: query_collection_hits_count_it.second) {
// send http request
nlohmann::json query_hits_count_json;
query_hits_count.to_json(query_hits_count_json);
query_hits_count_json["collection_id"] = std::to_string(collection_id);
query_hits_count_json["event_type"] = "query_hits_counts";
payload_json.push_back(query_hits_count_json);
}
}
if(send_http_response()) {
query_collection_hits_count.clear();
}
payload_json.clear();
}
void AnalyticsManager::persist_popular_events(ReplicationState *raft_server, uint64_t prev_persistence_s) {
@ -608,9 +539,7 @@ void AnalyticsManager::persist_popular_events(ReplicationState *raft_server, uin
void AnalyticsManager::stop() {
quit = true;
cv.notify_all();
if(analytics_store) {
delete analytics_store;
}
analytics_logs.close();
}
void AnalyticsManager::dispose() {
@ -633,17 +562,12 @@ void AnalyticsManager::init(Store* store, const std::string& analytics_dir) {
this->store = store;
if(!analytics_dir.empty()) {
this->analytics_store = new Store(analytics_dir, 24 * 60 * 60, 1024, true);
const auto analytics_log_path = analytics_dir + "/analytics_events.tsv";
analytics_logs.open(analytics_log_path, std::ofstream::out | std::ofstream::app);
}
}
Store* AnalyticsManager::get_analytics_store() {
return this->analytics_store;
}
std::unordered_map<std::string, QueryAnalytics*> AnalyticsManager::get_popular_queries() {
std::unique_lock lk(mutex);
return popular_queries;
@ -678,94 +602,7 @@ nlohmann::json AnalyticsManager::get_events(const std::string& coll, const std::
return result_json;
}
nlohmann::json AnalyticsManager::get_query_hits_counts() {
std::unique_lock lk(mutex);
std::vector<std::string> query_hits_counts_jsons;
nlohmann::json result_json = nlohmann::json::array();
if (analytics_store) {
analytics_store->scan_fill(std::string(QUERY_HITS_COUNT) + "_", std::string(QUERY_HITS_COUNT) + "`",
query_hits_counts_jsons);
for (const auto &query_hits_count_json: query_hits_counts_jsons) {
nlohmann::json query_hits_count = nlohmann::json::parse(query_hits_count_json);
result_json.push_back(query_hits_count);
}
}
return result_json;
}
Option<bool> AnalyticsManager::write_events_to_store(nlohmann::json &event_jsons) {
//LOG(INFO) << "writing events to analytics db";
for(const auto& event_json : event_jsons) {
auto collection_id = event_json["collection_id"].get<std::string>();
auto timestamp = event_json["timestamp"].get<uint64_t>();
std::string key = std::string(QUERY_HITS_COUNT) + "_" + StringUtils::serialize_uint64_t(timestamp) + "_" + collection_id;
if(analytics_store) {
bool inserted = analytics_store->insert(key, event_json.dump());
if (!inserted) {
std::string error = "Unable to insert " + std::string(event_json["event_type"]) + " to store";
return Option<bool>(500, error);
}
} else {
return Option<bool>(500, "Analytics DB not initialized.");
}
}
return Option<bool>(true);
}
void AnalyticsManager::resetToggleRateLimit(bool toggle) {
events_cache.clear();
isRateLimitEnabled = toggle;
}
void AnalyticsManager::resetAnalyticsStore() {
const std::string query_hits_prefix = std::string(QUERY_HITS_COUNT) + "_";
//delete query hits counts
auto delete_prefix_begin = query_hits_prefix;
auto delete_prefix_end = query_hits_prefix + "`";
analytics_store->delete_range(delete_prefix_begin, delete_prefix_end);
}
#ifdef TEST_BUILD
uint64_t AnalyticsManager::get_current_time_us() {
uint64_t now_ts_useconds = 1701851345000000 + EVENTS_TTL_INTERVAL_US;
return now_ts_useconds;
}
#else
uint64_t AnalyticsManager::get_current_time_us() {
uint64_t now_ts_useconds = std::chrono::duration_cast<std::chrono::microseconds>(
std::chrono::system_clock::now().time_since_epoch()).count();
return now_ts_useconds;
}
#endif
void AnalyticsManager::checkEventsExpiry() {
if (analytics_store) {
//LOG(INFO) << "checking for events expiry";
//we check for 30days events validity, events older than 30 days will be removed from db
auto ts_ttl_useconds = get_current_time_us() - EVENTS_TTL_INTERVAL_US;
const std::string query_hits_prefix = std::string(QUERY_HITS_COUNT) + "_";
auto iter = analytics_store->get_iterator();
//now remove query hits counts
auto delete_prefix_begin = query_hits_prefix;
auto delete_prefix_end = std::string(QUERY_HITS_COUNT) + "_" +
StringUtils::serialize_uint64_t(ts_ttl_useconds);
iter->SeekToFirst();
iter->Seek(delete_prefix_end);
analytics_store->delete_range(delete_prefix_begin, delete_prefix_end);
}
}

View File

@ -1821,11 +1821,6 @@ Option<bool> CollectionManager::do_search(std::map<std::string, std::string>& re
AnalyticsManager::get_instance().add_suggestion(orig_coll_name, analytics_query, expanded_query,
true, req_params["x-typesense-user-id"]);
#ifdef ENABLE_QUERY_HITS
AnalyticsManager::get_instance().add_query_hits_count(orig_coll_name, analytics_query,
req_params["x-typesense-user-id"],
result["found"].get<size_t>());
#endif
} else if(result.contains("found") == 0 && result["found"].get<size_t>() == 0) {
std::string analytics_query = Tokenizer::normalize_ascii_no_spaces(raw_query);
AnalyticsManager::get_instance().add_nohits_query(orig_coll_name, analytics_query,

View File

@ -2795,32 +2795,4 @@ bool put_conversation_model(const std::shared_ptr<http_req>& req, const std::sha
res->set_200(model.dump());
return true;
}
bool post_replicate_events(const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res) {
nlohmann::json req_json;
try {
req_json = nlohmann::json::parse(req->body);
} catch(const std::exception& e) {
LOG(ERROR) << "JSON error: " << e.what();
res->set_400("Bad JSON.");
return false;
}
auto op = AnalyticsManager::get_instance().write_events_to_store(req_json);
if(!op.ok()) {
res->set_body(op.code(), op.error());
return false;
}
res->set_200("event wrote to DB.");
return true;
}
bool get_query_hits_counts(const std::shared_ptr<http_req>& req, const std::shared_ptr<http_res>& res) {
auto query_hits_counts = AnalyticsManager::get_instance().get_query_hits_counts();
res->set_200(query_hits_counts.dump());
return true;
}

View File

@ -79,11 +79,7 @@ void master_server_routes() {
server->post("/analytics/rules", post_create_analytics_rules);
server->put("/analytics/rules/:name", put_upsert_analytics_rules);
server->del("/analytics/rules/:name", del_analytics_rules);
//analytics events
server->post("/analytics/events", post_create_event);
server->post("/analytics/events/replicate", post_replicate_events);
server->get("/analytics/query_hits_counts", get_query_hits_counts);
// meta
server->get("/metrics.json", get_metrics_json);

View File

@ -19,7 +19,6 @@ protected:
void setupCollection() {
std::string state_dir_path = "/tmp/typesense_test/analytics_manager_test";
std::string analytics_db_path = "/tmp/typesense_test/analytics_db";
LOG(INFO) << "Truncating and creating: " << state_dir_path;
system(("rm -rf "+state_dir_path+" && mkdir -p "+state_dir_path).c_str());
system("mkdir -p /tmp/typesense_test/models");
@ -28,7 +27,7 @@ protected:
collectionManager.init(store, 1.0, "auth_key", quit);
collectionManager.load(8, 1000);
analyticsManager.init(store, analytics_db_path);
analyticsManager.init(store, state_dir_path);
}
virtual void SetUp() {
@ -257,114 +256,6 @@ TEST_F(AnalyticsManagerTest, GetAndDeleteSuggestions) {
ASSERT_FALSE(missing_rule_op.ok());
}
TEST_F(AnalyticsManagerTest, ClickEventsStoreRetrieveal) {
//stores events in memory and reads as events are persisted in file on disk
nlohmann::json titles_schema = R"({
"name": "titles",
"fields": [
{"name": "title", "type": "string"}
]
})"_json;
Collection* titles_coll = collectionManager.create_collection(titles_schema).get();
std::shared_ptr<http_req> req = std::make_shared<http_req>();
std::shared_ptr<http_res> res = std::make_shared<http_res>(nullptr);
nlohmann::json event1 = R"({
"type": "query_click",
"data": {
"q": "technology",
"collection": "titles",
"doc_id": "21",
"position": 2,
"user_id": "13"
}
})"_json;
req->body = event1.dump();
ASSERT_TRUE(post_create_event(req, res));
nlohmann::json event2 = R"({
"type": "query_click",
"data": {
"q": "technology",
"collection": "titles",
"doc_id": "21",
"position": 4,
"user_id": "11"
}
})"_json;
req->body = event2.dump();
ASSERT_TRUE(post_create_event(req, res));
auto result = analyticsManager.get_events("titles", "query_click");
ASSERT_EQ("13", result[0]["user_id"]);
ASSERT_EQ("21", result[0]["doc_id"]);
ASSERT_EQ(2, result[0]["position"]);
ASSERT_EQ("technology", result[0]["query"]);
ASSERT_EQ("11", result[1]["user_id"]);
ASSERT_EQ("21", result[1]["doc_id"]);
ASSERT_EQ(4, result[1]["position"]);
ASSERT_EQ("technology", result[1]["query"]);
}
TEST_F(AnalyticsManagerTest, PurchaseEventsStoreRetrieval) {
//stores events in memory and reads as events are persisted in file on disk
nlohmann::json titles_schema = R"({
"name": "titles",
"fields": [
{"name": "title", "type": "string"}
]
})"_json;
Collection *titles_coll = collectionManager.create_collection(titles_schema).get();
std::shared_ptr<http_req> req = std::make_shared<http_req>();
std::shared_ptr<http_res> res = std::make_shared<http_res>(nullptr);
nlohmann::json event1 = R"({
"type": "query_purchase",
"data": {
"q": "technology",
"collection": "titles",
"doc_id": "21",
"position": 2,
"user_id": "13"
}
})"_json;
req->body = event1.dump();
ASSERT_TRUE(post_create_event(req, res));
nlohmann::json event2 = R"({
"type": "query_purchase",
"data": {
"q": "technology",
"collection": "titles",
"doc_id": "21",
"position": 4,
"user_id": "11"
}
})"_json;
req->body = event2.dump();
ASSERT_TRUE(post_create_event(req, res));
auto result = analyticsManager.get_events("titles", "query_purchase");
ASSERT_EQ("13", result[0]["user_id"]);
ASSERT_EQ("21", result[0]["doc_id"]);
ASSERT_EQ(2, result[0]["position"]);
ASSERT_EQ("technology", result[0]["query"]);
ASSERT_EQ("11", result[1]["user_id"]);
ASSERT_EQ("21", result[1]["doc_id"]);
ASSERT_EQ(4, result[1]["position"]);
ASSERT_EQ("technology", result[1]["query"]);
}
TEST_F(AnalyticsManagerTest, EventsValidation) {
nlohmann::json titles_schema = R"({
"name": "titles",
@ -639,220 +530,6 @@ TEST_F(AnalyticsManagerTest, SuggestionConfigRule) {
ASSERT_EQ(0, rules.size());
}
TEST_F(AnalyticsManagerTest, QueryHitsCount) {
//flush all events from analytics store
analyticsManager.resetAnalyticsStore();
nlohmann::json titles_schema = R"({
"name": "titles",
"fields": [
{"name": "title", "type": "string"}
]
})"_json;
Collection *titles_coll = collectionManager.create_collection(titles_schema).get();
nlohmann::json doc;
doc["title"] = "Cool trousers";
ASSERT_TRUE(titles_coll->add(doc.dump()).ok());
doc["title"] = "Cool pants";
ASSERT_TRUE(titles_coll->add(doc.dump()).ok());
doc["title"] = "Trendy sneakers";
ASSERT_TRUE(titles_coll->add(doc.dump()).ok());
doc["title"] = "Funky shorts";
ASSERT_TRUE(titles_coll->add(doc.dump()).ok());
nlohmann::json query_hits_array = nlohmann::json::array();
nlohmann::json obj;
obj["collection_id"] = "0";
obj["event_type"] = "query_hits_counts";
obj["query"] = "cool";
obj["timestamp"] = 1625365612;
obj["user_id"] = "1";
obj["hits_count"] = 2;
query_hits_array.push_back(obj);
obj["query"] = "funky";
obj["timestamp"] = 1625365616;
obj["user_id"] = "1";
obj["hits_count"] = 1;
query_hits_array.push_back(obj);
auto op = analyticsManager.write_events_to_store(query_hits_array);
ASSERT_TRUE(op.ok());
auto result = analyticsManager.get_query_hits_counts();
ASSERT_EQ(2, result.size());
ASSERT_EQ("cool", result[0]["query"]);
ASSERT_EQ(2, result[0]["hits_count"]);
ASSERT_EQ(1625365612, result[0]["timestamp"]);
ASSERT_EQ("funky", result[1]["query"]);
ASSERT_EQ(1, result[1]["hits_count"]);
ASSERT_EQ(1625365616, result[1]["timestamp"]);
}
TEST_F(AnalyticsManagerTest, EventsExpiryBasic) {
//flush all events from analytics store
analyticsManager.resetAnalyticsStore();
nlohmann::json titles_schema = R"({
"name": "titles",
"fields": [
{"name": "title", "type": "string"}
]
})"_json;
Collection *titles_coll = collectionManager.create_collection(titles_schema).get();
nlohmann::json events = nlohmann::json::array();
auto ts = 1701851341000000;
//add query hits events with click events
nlohmann::json event2;
event2["event_type"] = "query_hits_counts";
event2["q"] = "technology";
event2["collection_id"] = "0";
event2["user_id"] = 13;
event2["hits_count"] = 124;
event2["timestamp"] = ts + 10000000; //after 10s
events.push_back(event2);
ASSERT_TRUE(analyticsManager.write_events_to_store(events).ok());
auto resp = analyticsManager.get_query_hits_counts();
ASSERT_EQ(1, resp.size());
ASSERT_EQ("technology", resp[0]["q"]);
ASSERT_EQ(13, resp[0]["user_id"]);
ASSERT_EQ(124, resp[0]["hits_count"]);
// assumming ttl is ts + 5 sec
analyticsManager.checkEventsExpiry();
resp = analyticsManager.get_query_hits_counts();
ASSERT_EQ(1, resp.size());
ASSERT_EQ("technology", resp[0]["q"]);
ASSERT_EQ(13, resp[0]["user_id"]);
ASSERT_EQ(124, resp[0]["hits_count"]);
}
TEST_F(AnalyticsManagerTest, EventsExpiryAll) {
//flush all events from analytics store
analyticsManager.resetAnalyticsStore();
nlohmann::json titles_schema = R"({
"name": "titles",
"fields": [
{"name": "title", "type": "string"}
]
})"_json;
Collection *titles_coll = collectionManager.create_collection(titles_schema).get();
nlohmann::json events = nlohmann::json::array();
auto ts = 1701851341000000;
nlohmann::json event2;
event2["event_type"] = "query_hits_counts";
event2["q"] = "technology";
event2["collection_id"] = "0";
event2["user_id"] = 13;
event2["hits_count"] = 124;
event2["timestamp"] = ts + 2000000; //after 2s
events.push_back(event2);
ASSERT_TRUE(analyticsManager.write_events_to_store(events).ok());
auto resp = analyticsManager.get_query_hits_counts();
ASSERT_EQ(1, resp.size());
ASSERT_EQ("technology", resp[0]["q"]);
ASSERT_EQ(13, resp[0]["user_id"]);
ASSERT_EQ(124, resp[0]["hits_count"]);
//check for events expiry
// assuming ttl is ts + 5 sec
analyticsManager.checkEventsExpiry();
resp = analyticsManager.get_query_hits_counts();
ASSERT_EQ(0, resp.size());
}
TEST_F(AnalyticsManagerTest, EventsExpiryPartial) {
//flush all events from analytics store
analyticsManager.resetAnalyticsStore();
nlohmann::json titles_schema = R"({
"name": "titles",
"fields": [
{"name": "title", "type": "string"}
]
})"_json;
Collection *titles_coll = collectionManager.create_collection(titles_schema).get();
nlohmann::json events = nlohmann::json::array();
auto ts = 1701851341000000;
nlohmann::json event2;
event2["event_type"] = "query_hits_counts";
event2["q"] = "technology";
event2["collection_id"] = "0";
event2["user_id"] = 13;
event2["hits_count"] = 124;
event2["timestamp"] = ts;
events.push_back(event2);
event2["q"] = "industry";
event2["user_id"] = 13;
event2["hits_count"] = 214;
event2["timestamp"] = ts + 2000000; //after 2s
events.push_back(event2);
event2["q"] = "management";
event2["user_id"] = 13;
event2["hits_count"] = 834;
event2["timestamp"] = ts + 8000000; //after 8s
events.push_back(event2);
ASSERT_TRUE(analyticsManager.write_events_to_store(events).ok());
auto resp = analyticsManager.get_query_hits_counts();
ASSERT_EQ(3, resp.size());
ASSERT_EQ("technology", resp[0]["q"]);
ASSERT_EQ(13, resp[0]["user_id"]);
ASSERT_EQ(124, resp[0]["hits_count"]);
ASSERT_EQ("industry", resp[1]["q"]);
ASSERT_EQ(13, resp[1]["user_id"]);
ASSERT_EQ(214, resp[1]["hits_count"]);
ASSERT_EQ("management", resp[2]["q"]);
ASSERT_EQ(13, resp[2]["user_id"]);
ASSERT_EQ(834, resp[2]["hits_count"]);
//check for events expiry
// assuming ttl is ts + 5 sec
//only events in ttl interval will be removed
analyticsManager.checkEventsExpiry();
resp = analyticsManager.get_query_hits_counts();
ASSERT_EQ(1, resp.size());
ASSERT_EQ("management", resp[0]["q"]);
ASSERT_EQ(13, resp[0]["user_id"]);
ASSERT_EQ(834, resp[0]["hits_count"]);
}
TEST_F(AnalyticsManagerTest, PopularityScore) {
nlohmann::json products_schema = R"({

View File

@ -18,7 +18,6 @@ protected:
void setupCollection() {
std::string state_dir_path = "/tmp/typesense_test/coll_manager_test_db";
std::string analytics_db_path = "/tmp/typesense_test/analytics_db";
LOG(INFO) << "Truncating and creating: " << state_dir_path;
system(("rm -rf "+state_dir_path+" && mkdir -p "+state_dir_path).c_str());
@ -26,7 +25,7 @@ protected:
collectionManager.init(store, 1.0, "auth_key", quit);
collectionManager.load(8, 1000);
AnalyticsManager::get_instance().init(store, analytics_db_path);
AnalyticsManager::get_instance().init(store);
schema = R"({
"name": "collection1",
@ -63,7 +62,6 @@ protected:
collectionManager.drop_collection("collection1");
collectionManager.dispose();
delete store;
AnalyticsManager::get_instance().stop();
}
}
};

View File

@ -7,7 +7,6 @@
#include "raft_server.h"
#include "conversation_model_manager.h"
#include "conversation_manager.h"
#include <analytics_manager.h>
class CoreAPIUtilsTest : public ::testing::Test {
protected:
@ -18,11 +17,9 @@ protected:
std::vector<std::string> query_fields;
std::vector<sort_by> sort_fields;
AnalyticsManager& analyticsManager = AnalyticsManager::get_instance();
void setupCollection() {
std::string state_dir_path = "/tmp/typesense_test/core_api_utils";
std::string analytics_db_path = "/tmp/typesense_test/analytics_db2";
LOG(INFO) << "Truncating and creating: " << state_dir_path;
system(("rm -rf "+state_dir_path+" && mkdir -p "+state_dir_path).c_str());
@ -32,7 +29,6 @@ protected:
ConversationModelManager::init(store);
ConversationManager::get_instance().init(store);
analyticsManager.init(store, analytics_db_path);
}
virtual void SetUp() {
@ -42,7 +38,6 @@ protected:
virtual void TearDown() {
collectionManager.dispose();
delete store;
analyticsManager.stop();
}
};