mirror of
https://github.com/facebook/rocksdb.git
synced 2025-06-02 10:25:45 +08:00
Fix the segdefault bug in CompressedSecondaryCache and its tests (#10507)
Summary: This fix is to replace `AllocateBlock()` with `new`. Once I figure out why `AllocateBlock()` might cause the segfault, I will update the implementation. Fix the bug that causes ./compressed_secondary_cache_test output following test failures: ``` Note: Google Test filter = CompressedSecondaryCacheTest.MergeChunksIntoValueTest [==========] Running 1 test from 1 test case. [----------] Global test environment set-up. [----------] 1 test from CompressedSecondaryCacheTest [ RUN ] CompressedSecondaryCacheTest.MergeChunksIntoValueTest [ OK ] CompressedSecondaryCacheTest.MergeChunksIntoValueTest (1 ms) [----------] 1 test from CompressedSecondaryCacheTest (1 ms total) [----------] Global test environment tear-down [==========] 1 test from 1 test case ran. (9 ms total) [ PASSED ] 1 test. t/run-compressed_secondary_cache_test-CompressedSecondaryCacheTest.MergeChunksIntoValueTest: line 4: 1091086 Segmentation fault (core dumped) TEST_TMPDIR=$d ./compressed_secondary_cache_test --gtest_filter=CompressedSecondaryCacheTest.MergeChunksIntoValueTest Note: Google Test filter = CompressedSecondaryCacheTest.BasicTestWithMemoryAllocatorAndCompression [==========] Running 1 test from 1 test case. [----------] Global test environment set-up. [----------] 1 test from CompressedSecondaryCacheTest [ RUN ] CompressedSecondaryCacheTest.BasicTestWithMemoryAllocatorAndCompression [ OK ] CompressedSecondaryCacheTest.BasicTestWithMemoryAllocatorAndCompression (1 ms) [----------] 1 test from CompressedSecondaryCacheTest (1 ms total) [----------] Global test environment tear-down [==========] 1 test from 1 test case ran. (2 ms total) [ PASSED ] 1 test. t/run-compressed_secondary_cache_test-CompressedSecondaryCacheTest.BasicTestWithMemoryAllocatorAndCompression: line 4: 1090883 Segmentation fault (core dumped) TEST_TMPDIR=$d ./compressed_secondary_cache_test --gtest_filter=CompressedSecondaryCacheTest.BasicTestWithMemoryAllocatorAndCompression ``` Pull Request resolved: https://github.com/facebook/rocksdb/pull/10507 Test Plan: Test 1: ``` $make -j 24 $./compressed_secondary_cache_test ``` Test 2: ``` $COMPILE_WITH_ASAN=1 make -j 24 $./compressed_secondary_cache_test ``` Test 3: ``` $COMPILE_WITH_TSAN=1 make -j 24 $./compressed_secondary_cache_test ``` Reviewed By: anand1976 Differential Revision: D38529885 Pulled By: gitbw95 fbshipit-source-id: d903fa3fadbd4d29f9528728c63a4f61c4396890
This commit is contained in:
parent
0b02960d8c
commit
f060b47ee8
@ -23,6 +23,7 @@
|
||||
* Fixed a bug where blobs read during compaction would pollute the cache.
|
||||
* Fixed a data race in LRUCache when used with a secondary_cache.
|
||||
* Fixed a bug where blobs read by iterators would be inserted into the cache even with the `fill_cache` read option set to false.
|
||||
* Fixed the segfault caused by `AllocateData()` in `CompressedSecondaryCache::SplitValueIntoChunks()` and `MergeChunksIntoValueTest`.
|
||||
|
||||
### Behavior Change
|
||||
* Added checksum handshake during the copying of decompressed WAL fragment. This together with #9875, #10037, #10212, #10114 and #10319 provides end-to-end integrity protection for write batch during recovery.
|
||||
@ -32,7 +33,7 @@
|
||||
* Improve universal tiered storage compaction picker to avoid extra major compaction triggered by size amplification. If `preclude_last_level_data_seconds` is enabled, the size amplification is calculated within non last_level data only which skip the last level and use the penultimate level as the size base.
|
||||
|
||||
### Performance Improvements
|
||||
* Instead of constructing `FragmentedRangeTombstoneList` during every read operation, it is now constructed once and stored in immutable memtables. This improves speed of querying range tombstones from immutable memtables.
|
||||
* Instead of constructing `FragmentedRangeTombstoneList` during every read operation, it is now constructed once and stored in immutable memtables. This improves speed of querying range tombstones from immutable memtables.
|
||||
* Improve read performance by avoiding dynamic memory allocation.
|
||||
* When using iterators with the integrated BlobDB implementation, blob cache handles are now released immediately when the iterator's position changes.
|
||||
|
||||
|
6
cache/compressed_secondary_cache.cc
vendored
6
cache/compressed_secondary_cache.cc
vendored
@ -149,7 +149,6 @@ CompressedSecondaryCache::SplitValueIntoChunks(
|
||||
|
||||
CacheValueChunk dummy_head = CacheValueChunk();
|
||||
CacheValueChunk* current_chunk = &dummy_head;
|
||||
CacheAllocationPtr ptr;
|
||||
// Do not split when value size is large or there is no compression.
|
||||
size_t predicted_chunk_size{0};
|
||||
size_t actual_chunk_size{0};
|
||||
@ -170,8 +169,9 @@ CompressedSecondaryCache::SplitValueIntoChunks(
|
||||
tmp_size = *(--upper);
|
||||
}
|
||||
|
||||
ptr = AllocateBlock(tmp_size, cache_options_.memory_allocator.get());
|
||||
current_chunk->next = reinterpret_cast<CacheValueChunk*>(ptr.release());
|
||||
CacheValueChunk* new_chunk =
|
||||
reinterpret_cast<CacheValueChunk*>(new char[tmp_size]);
|
||||
current_chunk->next = new_chunk;
|
||||
current_chunk = current_chunk->next;
|
||||
actual_chunk_size = tmp_size - sizeof(CacheValueChunk) + 1;
|
||||
memcpy(current_chunk->data, src_ptr, actual_chunk_size);
|
||||
|
29
cache/compressed_secondary_cache_test.cc
vendored
29
cache/compressed_secondary_cache_test.cc
vendored
@ -570,42 +570,28 @@ class CompressedSecondaryCacheTest : public testing::Test {
|
||||
}
|
||||
|
||||
void MergeChunksIntoValueTest() {
|
||||
JemallocAllocatorOptions jopts;
|
||||
std::shared_ptr<MemoryAllocator> allocator;
|
||||
std::string msg;
|
||||
if (JemallocNodumpAllocator::IsSupported(&msg)) {
|
||||
Status s = NewJemallocNodumpAllocator(jopts, &allocator);
|
||||
if (!s.ok()) {
|
||||
ROCKSDB_GTEST_BYPASS("JEMALLOC not supported");
|
||||
}
|
||||
} else {
|
||||
ROCKSDB_GTEST_BYPASS("JEMALLOC not supported");
|
||||
}
|
||||
|
||||
using CacheValueChunk = CompressedSecondaryCache::CacheValueChunk;
|
||||
Random rnd(301);
|
||||
size_t size1{2048};
|
||||
std::string str1 = rnd.RandomString(static_cast<int>(size1));
|
||||
CacheAllocationPtr ptr =
|
||||
AllocateBlock(sizeof(CacheValueChunk) - 1 + size1, allocator.get());
|
||||
CacheValueChunk* current_chunk =
|
||||
reinterpret_cast<CacheValueChunk*>(ptr.release());
|
||||
CacheValueChunk* current_chunk = reinterpret_cast<CacheValueChunk*>(
|
||||
new char[sizeof(CacheValueChunk) - 1 + size1]);
|
||||
CacheValueChunk* chunks_head = current_chunk;
|
||||
memcpy(current_chunk->data, str1.data(), size1);
|
||||
current_chunk->size = size1;
|
||||
|
||||
size_t size2{256};
|
||||
std::string str2 = rnd.RandomString(static_cast<int>(size2));
|
||||
ptr = AllocateBlock(sizeof(CacheValueChunk) - 1 + size2, allocator.get());
|
||||
current_chunk->next = reinterpret_cast<CacheValueChunk*>(ptr.release());
|
||||
current_chunk->next = reinterpret_cast<CacheValueChunk*>(
|
||||
new char[sizeof(CacheValueChunk) - 1 + size2]);
|
||||
current_chunk = current_chunk->next;
|
||||
memcpy(current_chunk->data, str2.data(), size2);
|
||||
current_chunk->size = size2;
|
||||
|
||||
size_t size3{31};
|
||||
std::string str3 = rnd.RandomString(static_cast<int>(size3));
|
||||
ptr = AllocateBlock(sizeof(CacheValueChunk) - 1 + size3, allocator.get());
|
||||
current_chunk->next = reinterpret_cast<CacheValueChunk*>(ptr.release());
|
||||
current_chunk->next = reinterpret_cast<CacheValueChunk*>(
|
||||
new char[sizeof(CacheValueChunk) - 1 + size3]);
|
||||
current_chunk = current_chunk->next;
|
||||
memcpy(current_chunk->data, str3.data(), size3);
|
||||
current_chunk->size = size3;
|
||||
@ -614,8 +600,7 @@ class CompressedSecondaryCacheTest : public testing::Test {
|
||||
std::string str = str1 + str2 + str3;
|
||||
|
||||
std::unique_ptr<CompressedSecondaryCache> sec_cache =
|
||||
std::make_unique<CompressedSecondaryCache>(1000, 0, true, 0.5,
|
||||
allocator);
|
||||
std::make_unique<CompressedSecondaryCache>(1000, 0, true, 0.5);
|
||||
size_t charge{0};
|
||||
CacheAllocationPtr value =
|
||||
sec_cache->MergeChunksIntoValue(chunks_head, charge);
|
||||
|
Loading…
x
Reference in New Issue
Block a user