diff --git a/src/collection.cpp b/src/collection.cpp index bac865ad..c55bee7d 100644 --- a/src/collection.cpp +++ b/src/collection.cpp @@ -419,7 +419,7 @@ Option Collection::update_matching_filter(const std::string& fil } const auto& dirty_values = parse_dirty_values_option(req_dirty_values); - size_t docs_updated_count; + size_t docs_updated_count = 0; nlohmann::json update_document, dummy; try { diff --git a/src/string_utils.cpp b/src/string_utils.cpp index b2caf445..04711402 100644 --- a/src/string_utils.cpp +++ b/src/string_utils.cpp @@ -464,7 +464,11 @@ Option StringUtils::split_include_fields(const std::string& include_fields if (range_pos == std::string::npos && comma_pos == std::string::npos) { if (start < size - 1) { - tokens.push_back(include_fields.substr(start, size - start)); + include_field = include_fields.substr(start, size - start); + include_field = trim(include_field); + if (!include_field.empty()) { + tokens.push_back(include_field); + } } break; } else if (range_pos < comma_pos) { diff --git a/test/string_utils_test.cpp b/test/string_utils_test.cpp index 8fa60a92..75700c8a 100644 --- a/test/string_utils_test.cpp +++ b/test/string_utils_test.cpp @@ -391,7 +391,32 @@ TEST(StringUtilsTest, TokenizeFilterQuery) { tokenList = {"(", "(", "age:<5", "||", "age:>10", ")", "&&", "location:(48.906,2.343,5mi)", ")", "||", "tags:AT&T"}; tokenizeTestHelper(filter_query, tokenList); - filter_query = "((age: <5 || age: >10) && category:= [shoes]) && $Customers(customer_id:=customer_a && (product_price:>100 && product_price:<200))"; - tokenList = {"(", "(", "age: <5", "||", "age: >10", ")", "&&", "category:= [shoes]", ")", "&&", "$Customers(customer_id:=customer_a && (product_price:>100 && product_price:<200))"}; + filter_query = "((age: <5 || age: >10) && category:= [shoes]) &&" + " $Customers(customer_id:=customer_a && (product_price:>100 && product_price:<200))"; + tokenList = {"(", "(", "age: <5", "||", "age: >10", ")", "&&", "category:= [shoes]", ")", "&&", + "$Customers(customer_id:=customer_a && (product_price:>100 && product_price:<200))"}; tokenizeTestHelper(filter_query, tokenList); } + +void splitIncludeTestHelper(const std::string& include_fields, const std::vector& expected) { + std::vector output; + auto tokenize_op = StringUtils::split_include_fields(include_fields, output); + ASSERT_TRUE(tokenize_op.ok()); + ASSERT_EQ(expected.size(), output.size()); + for (auto i = 0; i < output.size(); i++) { + ASSERT_EQ(expected[i], output[i]); + } +} + +TEST(StringUtilsTest, SplitIncludeFields) { + std::string include_fields; + std::vector tokens; + + include_fields = "id, title, count"; + tokens = {"id", "title", "count"}; + splitIncludeTestHelper(include_fields, tokens); + + include_fields = "id, $Collection(title, pref*), count"; + tokens = {"id", "$Collection(title, pref*)", "count"}; + splitIncludeTestHelper(include_fields, tokens); +} \ No newline at end of file