Match record validation behavior during alter with other places.

This ensures that we don't end up making the validation stricter after a bad record has been indexed earlier.
This commit is contained in:
Kishore Nallan 2024-01-10 17:21:27 +05:30
parent cfeab44614
commit a79356e055
2 changed files with 46 additions and 1 deletions

View File

@ -4774,7 +4774,7 @@ Option<bool> Collection::batch_alter_data(const std::vector<field>& alter_fields
field::flatten_doc(document, nested_fields, {}, true, flattened_fields);
}
index_record record(num_found_docs, seq_id, document, index_operation_t::CREATE, DIRTY_VALUES::REJECT);
index_record record(num_found_docs, seq_id, document, index_operation_t::CREATE, DIRTY_VALUES::COERCE_OR_DROP);
iter_batch.emplace_back(std::move(record));
// Peek and check for last record right here so that we handle batched indexing correctly
@ -4891,10 +4891,12 @@ Option<bool> Collection::alter(nlohmann::json& alter_payload) {
auto validate_op = validate_alter_payload(alter_payload, addition_fields, reindex_fields,
del_fields, this_fallback_field_type);
if(!validate_op.ok()) {
LOG(INFO) << "Alter failed validation: " << validate_op.error();
return validate_op;
}
if(!this_fallback_field_type.empty() && !fallback_field_type.empty()) {
LOG(INFO) << "Alter failed: schema already contains a `.*` field.";
return Option<bool>(400, "The schema already contains a `.*` field.");
}
@ -4912,6 +4914,7 @@ Option<bool> Collection::alter(nlohmann::json& alter_payload) {
auto batch_alter_op = batch_alter_data(addition_fields, del_fields, fallback_field_type);
if(!batch_alter_op.ok()) {
LOG(INFO) << "Alter failed during alter data: " << batch_alter_op.error();
return batch_alter_op;
}
@ -4919,6 +4922,7 @@ Option<bool> Collection::alter(nlohmann::json& alter_payload) {
LOG(INFO) << "Processing field modifications now...";
batch_alter_op = batch_alter_data(reindex_fields, {}, fallback_field_type);
if(!batch_alter_op.ok()) {
LOG(INFO) << "Alter failed during alter data: " << batch_alter_op.error();
return batch_alter_op;
}
}

View File

@ -1539,6 +1539,47 @@ TEST_F(CollectionSchemaChangeTest, AlterShouldBeAbleToHandleFieldValueCoercion)
ASSERT_TRUE(alter_op.ok());
}
TEST_F(CollectionSchemaChangeTest, AlterValidationShouldNotRejectBadValues) {
nlohmann::json schema = R"({
"name": "coll1",
"enable_nested_fields": true,
"fields": [
{"name": "info", "type": "object"}
]
})"_json;
Collection* coll1 = collectionManager.create_collection(schema).get();
nlohmann::json doc = R"(
{"info": {"year": 1999}}
)"_json;
auto add_op = coll1->add(doc.dump(), CREATE, "0", DIRTY_VALUES::COERCE_OR_DROP);
ASSERT_TRUE(add_op.ok());
doc = R"(
{"info": {"year": "2001"}, "description": "test"}
)"_json;
add_op = coll1->add(doc.dump(), CREATE, "1", DIRTY_VALUES::COERCE_OR_DROP);
ASSERT_TRUE(add_op.ok());
// add a new field
auto schema_changes = R"({
"fields": [
{"name": "description", "type": "string", "optional": true}
]
})"_json;
auto alter_op = coll1->alter(schema_changes);
ASSERT_TRUE(alter_op.ok());
auto res_op = coll1->search("test", {"description"}, "", {}, {}, {0}, 3, 1, FREQUENCY, {true});
ASSERT_TRUE(res_op.ok());
ASSERT_EQ(1, res_op.get()["found"].get<size_t>());
}
TEST_F(CollectionSchemaChangeTest, GeoFieldSchemaAddition) {
nlohmann::json schema = R"({
"name": "coll1",