mirror of
https://github.com/facebook/rocksdb.git
synced 2025-05-14 08:53:08 +08:00
Have Cache use Status::MemoryLimit (#10262)
Summary:
I noticed it would clean up some things to have Cache::Insert()
return our MemoryLimit Status instead of Incomplete for the case in
which the capacity limit is reached. I suspect this fixes some existing but
unknown bugs where this Incomplete could be confused with other uses
of Incomplete, especially no_io cases. This is the most suspicious case I
noticed, but was not able to reproduce a bug, in part because the existing
code is not covered by unit tests (FIXME added): 57adbf0e91/table/get_context.cc (L397)
I audited all the existing uses of IsIncomplete and updated those that
seemed relevant.
HISTORY updated with a clear warning to users of strict_capacity_limit=true
to update uses of `IsIncomplete()`
Pull Request resolved: https://github.com/facebook/rocksdb/pull/10262
Test Plan: updated unit tests
Reviewed By: hx235
Differential Revision: D37473155
Pulled By: pdillinger
fbshipit-source-id: 4bd9d9353ccddfe286b03ebd0652df8ce20f99cb
This commit is contained in:
parent
071fe39c05
commit
e6c5e0ab9a
@ -12,6 +12,7 @@
|
||||
* `rocksdb_level_metadata_t` and its and its get functions & destroy function.
|
||||
* `rocksdb_file_metadata_t` and its and get functions & destroy functions.
|
||||
* Add suggest_compact_range() and suggest_compact_range_cf() to C API.
|
||||
* When using block cache strict capacity limit (`LRUCache` with `strict_capacity_limit=true`), DB operations now fail with Status code `kAborted` subcode `kMemoryLimit` (`IsMemoryLimit()`) instead of `kIncomplete` (`IsIncomplete()`) when the capacity limit is reached, because Incomplete can mean other specific things for some operations. In more detail, `Cache::Insert()` now returns the updated Status code and this usually propagates through RocksDB to the user on failure.
|
||||
|
||||
### Bug Fixes
|
||||
* Fix a bug in which backup/checkpoint can include a WAL deleted by RocksDB.
|
||||
|
4
cache/cache_reservation_manager_test.cc
vendored
4
cache/cache_reservation_manager_test.cc
vendored
@ -147,7 +147,7 @@ TEST(CacheReservationManagerIncreaseReservcationOnFullCacheTest,
|
||||
|
||||
std::size_t new_mem_used = kSmallCacheCapacity + 1;
|
||||
Status s = test_cache_rev_mng->UpdateCacheReservation(new_mem_used);
|
||||
EXPECT_EQ(s, Status::Incomplete())
|
||||
EXPECT_EQ(s, Status::MemoryLimit())
|
||||
<< "Failed to return status to indicate failure of dummy entry insertion "
|
||||
"during cache reservation on full cache";
|
||||
EXPECT_GE(test_cache_rev_mng->GetTotalReservedCacheSize(),
|
||||
@ -192,7 +192,7 @@ TEST(CacheReservationManagerIncreaseReservcationOnFullCacheTest,
|
||||
// Create cache full again for subsequent tests
|
||||
new_mem_used = kSmallCacheCapacity + 1;
|
||||
s = test_cache_rev_mng->UpdateCacheReservation(new_mem_used);
|
||||
EXPECT_EQ(s, Status::Incomplete())
|
||||
EXPECT_EQ(s, Status::MemoryLimit())
|
||||
<< "Failed to return status to indicate failure of dummy entry insertion "
|
||||
"during cache reservation on full cache";
|
||||
EXPECT_GE(test_cache_rev_mng->GetTotalReservedCacheSize(),
|
||||
|
4
cache/cache_test.cc
vendored
4
cache/cache_test.cc
vendored
@ -741,7 +741,7 @@ TEST_P(LRUCacheTest, SetStrictCapacityLimit) {
|
||||
cache->SetStrictCapacityLimit(true);
|
||||
Cache::Handle* handle;
|
||||
s = cache->Insert(extra_key, extra_value, 1, &deleter, &handle);
|
||||
ASSERT_TRUE(s.IsIncomplete());
|
||||
ASSERT_TRUE(s.IsMemoryLimit());
|
||||
ASSERT_EQ(nullptr, handle);
|
||||
ASSERT_EQ(10, cache->GetUsage());
|
||||
|
||||
@ -758,7 +758,7 @@ TEST_P(LRUCacheTest, SetStrictCapacityLimit) {
|
||||
ASSERT_NE(nullptr, handles[i]);
|
||||
}
|
||||
s = cache2->Insert(extra_key, extra_value, 1, &deleter, &handle);
|
||||
ASSERT_TRUE(s.IsIncomplete());
|
||||
ASSERT_TRUE(s.IsMemoryLimit());
|
||||
ASSERT_EQ(nullptr, handle);
|
||||
// test insert without handle
|
||||
s = cache2->Insert(extra_key, extra_value, 1, &deleter);
|
||||
|
7
cache/clock_cache.cc
vendored
7
cache/clock_cache.cc
vendored
@ -351,11 +351,12 @@ Status ClockCacheShard::Insert(const Slice& key, uint32_t hash, void* value,
|
||||
last_reference_list.push_back(tmp);
|
||||
} else {
|
||||
if (table_.GetOccupancy() == table_.GetOccupancyLimit()) {
|
||||
s = Status::Incomplete(
|
||||
// TODO: Consider using a distinct status for this case, but usually
|
||||
// it will be handled the same way as reaching charge capacity limit
|
||||
s = Status::MemoryLimit(
|
||||
"Insert failed because all slots in the hash table are full.");
|
||||
// TODO(Guido) Use the correct statuses.
|
||||
} else {
|
||||
s = Status::Incomplete(
|
||||
s = Status::MemoryLimit(
|
||||
"Insert failed because the total charge has exceeded the "
|
||||
"capacity.");
|
||||
}
|
||||
|
7
cache/fast_lru_cache.cc
vendored
7
cache/fast_lru_cache.cc
vendored
@ -368,11 +368,12 @@ Status LRUCacheShard::Insert(const Slice& key, uint32_t hash, void* value,
|
||||
last_reference_list.push_back(tmp);
|
||||
} else {
|
||||
if (table_.GetOccupancy() == table_.GetOccupancyLimit()) {
|
||||
s = Status::Incomplete(
|
||||
// TODO: Consider using a distinct status for this case, but usually
|
||||
// it will be handled the same way as reaching charge capacity limit
|
||||
s = Status::MemoryLimit(
|
||||
"Insert failed because all slots in the hash table are full.");
|
||||
// TODO(Guido) Use the correct statuses.
|
||||
} else {
|
||||
s = Status::Incomplete(
|
||||
s = Status::MemoryLimit(
|
||||
"Insert failed because the total charge has exceeded the "
|
||||
"capacity.");
|
||||
}
|
||||
|
2
cache/lru_cache.cc
vendored
2
cache/lru_cache.cc
vendored
@ -332,7 +332,7 @@ Status LRUCacheShard::InsertItem(LRUHandle* e, Cache::Handle** handle,
|
||||
delete[] reinterpret_cast<char*>(e);
|
||||
*handle = nullptr;
|
||||
}
|
||||
s = Status::Incomplete("Insert failed due to LRU cache being full.");
|
||||
s = Status::MemoryLimit("Insert failed due to LRU cache being full.");
|
||||
}
|
||||
} else {
|
||||
// Insert into the cache. Note that the cache might get larger than its
|
||||
|
@ -254,7 +254,7 @@ TEST_F(BlobFileCacheTest, GetBlobFileReader_CacheFull) {
|
||||
CacheHandleGuard<BlobFileReader> reader;
|
||||
|
||||
ASSERT_TRUE(blob_file_cache.GetBlobFileReader(blob_file_number, &reader)
|
||||
.IsIncomplete());
|
||||
.IsMemoryLimit());
|
||||
ASSERT_EQ(reader.GetValue(), nullptr);
|
||||
ASSERT_EQ(options.statistics->getTickerCount(NO_FILE_OPENS), 1);
|
||||
ASSERT_EQ(options.statistics->getTickerCount(NO_FILE_ERRORS), 1);
|
||||
|
@ -250,7 +250,7 @@ TEST_F(DBBlockCacheTest, TestWithoutCompressedBlockCache) {
|
||||
cache->SetStrictCapacityLimit(true);
|
||||
iter = db_->NewIterator(read_options);
|
||||
iter->Seek(std::to_string(kNumBlocks - 1));
|
||||
ASSERT_TRUE(iter->status().IsIncomplete());
|
||||
ASSERT_TRUE(iter->status().IsMemoryLimit());
|
||||
CheckCacheCounters(options, 1, 0, 0, 1);
|
||||
delete iter;
|
||||
iter = nullptr;
|
||||
@ -333,8 +333,10 @@ TEST_F(DBBlockCacheTest, TestWithCompressedBlockCache) {
|
||||
ASSERT_EQ(usage, cache->GetPinnedUsage());
|
||||
|
||||
// Load last key block.
|
||||
ASSERT_EQ("Result incomplete: Insert failed due to LRU cache being full.",
|
||||
Get(std::to_string(kNumBlocks - 1)));
|
||||
ASSERT_EQ(
|
||||
"Operation aborted: Memory limit reached: Insert failed due to LRU cache "
|
||||
"being full.",
|
||||
Get(std::to_string(kNumBlocks - 1)));
|
||||
// Failure will also record the miss counter.
|
||||
CheckCacheCounters(options, 1, 0, 0, 1);
|
||||
CheckCompressedCacheCounters(options, 1, 0, 1, 0);
|
||||
|
@ -117,7 +117,7 @@ struct CompressionOptions {
|
||||
//
|
||||
// The amount of data buffered can be limited by `max_dict_buffer_bytes`. This
|
||||
// buffered memory is charged to the block cache when there is a block cache.
|
||||
// If block cache insertion fails with `Status::Incomplete` (i.e., it is
|
||||
// If block cache insertion fails with `Status::MemoryLimit` (i.e., it is
|
||||
// full), we finalize the dictionary with whatever data we have and then stop
|
||||
// buffering.
|
||||
//
|
||||
|
@ -288,7 +288,7 @@ class Cache {
|
||||
// Insert a mapping from key->value into the volatile cache only
|
||||
// and assign it with the specified charge against the total cache capacity.
|
||||
// If strict_capacity_limit is true and cache reaches its full capacity,
|
||||
// return Status::Incomplete.
|
||||
// return Status::MemoryLimit.
|
||||
//
|
||||
// If handle is not nullptr, returns a handle that corresponds to the
|
||||
// mapping. The caller must call this->Release(handle) when the returned
|
||||
@ -446,7 +446,7 @@ class Cache {
|
||||
// Insert a mapping from key->value into the cache and assign it
|
||||
// the specified charge against the total cache capacity.
|
||||
// If strict_capacity_limit is true and cache reaches its full capacity,
|
||||
// return Status::Incomplete.
|
||||
// return Status::MemoryLimit.
|
||||
//
|
||||
// The helper argument is saved by the cache and will be used when the
|
||||
// inserted object is evicted or promoted to the secondary cache. It,
|
||||
|
@ -944,7 +944,7 @@ void BlockBasedTableBuilder::Add(const Slice& key, const Slice& value) {
|
||||
Status s =
|
||||
r->compression_dict_buffer_cache_res_mgr->UpdateCacheReservation(
|
||||
r->data_begin_offset);
|
||||
exceeds_global_block_cache_limit = s.IsIncomplete();
|
||||
exceeds_global_block_cache_limit = s.IsMemoryLimit();
|
||||
}
|
||||
|
||||
if (exceeds_buffer_limit || exceeds_global_block_cache_limit) {
|
||||
|
@ -743,7 +743,7 @@ Status BlockBasedTable::Open(
|
||||
std::size_t mem_usage = new_table->ApproximateMemoryUsage();
|
||||
s = table_reader_cache_res_mgr->MakeCacheReservation(
|
||||
mem_usage, &(rep->table_reader_cache_res_handle));
|
||||
if (s.IsIncomplete()) {
|
||||
if (s.IsMemoryLimit()) {
|
||||
s = Status::MemoryLimit(
|
||||
"Can't allocate " +
|
||||
kCacheEntryRoleToCamelString[static_cast<std::uint32_t>(
|
||||
|
@ -662,7 +662,7 @@ class Standard128RibbonBitsBuilder : public XXPH3FilterBitsBuilder {
|
||||
bytes_banding, &banding_res_handle);
|
||||
}
|
||||
|
||||
if (status_banding_cache_res.IsIncomplete()) {
|
||||
if (status_banding_cache_res.IsMemoryLimit()) {
|
||||
ROCKS_LOG_WARN(info_log_,
|
||||
"Cache charging for Ribbon filter banding failed due "
|
||||
"to cache full");
|
||||
|
@ -415,6 +415,7 @@ bool GetContext::GetBlobValue(const Slice& blob_index,
|
||||
user_key_, blob_index, prefetch_buffer, blob_value, bytes_read);
|
||||
if (!status.ok()) {
|
||||
if (status.IsIncomplete()) {
|
||||
// FIXME: this code is not covered by unit tests
|
||||
MarkKeyMayExist();
|
||||
return false;
|
||||
}
|
||||
|
@ -116,7 +116,7 @@ TEST_F(SimCacheTest, SimCache) {
|
||||
simCache->SetStrictCapacityLimit(true);
|
||||
iter = db_->NewIterator(read_options);
|
||||
iter->Seek(std::to_string(kNumBlocks * 2 - 1));
|
||||
ASSERT_TRUE(iter->status().IsIncomplete());
|
||||
ASSERT_TRUE(iter->status().IsMemoryLimit());
|
||||
CheckCacheCounters(options, 1, 0, 0, 1);
|
||||
delete iter;
|
||||
iter = nullptr;
|
||||
|
Loading…
x
Reference in New Issue
Block a user