From 7ffdd0716cab462596a4a400f3a8845e3940a592 Mon Sep 17 00:00:00 2001 From: Ante Kresic Date: Wed, 31 Jan 2024 10:46:07 +0100 Subject: [PATCH] Reduce locking on decompress_chunk to allow reads With a recent change, we updated the lock on decompress_chunk to take an AccessExclusiveLock on the uncompressed chunk at the start of this potentially long running operation. Reducing this lock to ExclusiveLock would enable reads to execute while we are decompressing the chunk. AccessExclusive lock will be taken on the compressed chunk at the end of the operation, during its removal. --- tsl/src/compression/api.c | 4 +- tsl/src/compression/compression.c | 4 +- .../expected/compression_conflicts_iso.out | 107 +++++++++++++++++- .../specs/compression_conflicts_iso.spec | 25 ++++ 4 files changed, 136 insertions(+), 4 deletions(-) diff --git a/tsl/src/compression/api.c b/tsl/src/compression/api.c index 280f76eed..1255301d1 100644 --- a/tsl/src/compression/api.c +++ b/tsl/src/compression/api.c @@ -607,12 +607,14 @@ decompress_chunk_impl(Chunk *uncompressed_chunk, bool if_compressed) /* * Lock the compressed chunk that is going to be deleted. At this point, * the reference to the compressed chunk is already removed from the - * catalog. So, new readers do not include it in their operations. + * catalog but we need to block readers from accessing this chunk + * until the catalog changes are visible to them. * * Note: Calling performMultipleDeletions in chunk_index_tuple_delete * also requests an AccessExclusiveLock on the compressed_chunk. However, * this call makes the lock on the chunk explicit. */ + LockRelationOid(uncompressed_chunk->table_id, AccessExclusiveLock); LockRelationOid(compressed_chunk->table_id, AccessExclusiveLock); ts_chunk_drop(compressed_chunk, DROP_RESTRICT, -1); ts_cache_release(hcache); diff --git a/tsl/src/compression/compression.c b/tsl/src/compression/compression.c index 86e09ac00..bf6bb65cd 100644 --- a/tsl/src/compression/compression.c +++ b/tsl/src/compression/compression.c @@ -1498,9 +1498,9 @@ decompress_chunk(Oid in_table, Oid out_table) * We want to prevent other decompressors from decompressing this table, * and we want to prevent INSERTs or UPDATEs which could mess up our decompression. * We may as well allow readers to keep reading the compressed data while - * we are compressing, so we only take an ExclusiveLock instead of AccessExclusive. + * we are decompressing, so we only take an ExclusiveLock instead of AccessExclusive. */ - Relation out_rel = table_open(out_table, AccessExclusiveLock); + Relation out_rel = table_open(out_table, ExclusiveLock); Relation in_rel = table_open(in_table, ExclusiveLock); int64 nrows_processed = 0; diff --git a/tsl/test/isolation/expected/compression_conflicts_iso.out b/tsl/test/isolation/expected/compression_conflicts_iso.out index d4e776a65..4b9462e64 100644 --- a/tsl/test/isolation/expected/compression_conflicts_iso.out +++ b/tsl/test/isolation/expected/compression_conflicts_iso.out @@ -1,4 +1,4 @@ -Parsed test spec with 9 sessions +Parsed test spec with 10 sessions starting permutation: LockChunk1 IB I1 C1 UnlockChunk Ic Cc SC1 S1 SChunkStat step LockChunk1: @@ -2954,3 +2954,108 @@ time|device|location|value 1| 1| 100| 99 (1 row) + +starting permutation: CA1 CAc LockChunkTuple DA1 SA SF UnlockChunkTuple DAc +step CA1: + BEGIN; + SELECT + CASE WHEN compress_chunk(ch) IS NOT NULL THEN true ELSE false END AS compress + FROM show_chunks('ts_device_table') AS ch + ORDER BY ch::text; + +compress +-------- +t +(1 row) + +step CAc: COMMIT; +step LockChunkTuple: + BEGIN; + SELECT status as chunk_status from _timescaledb_catalog.chunk + WHERE id = ( select min(ch.id) FROM _timescaledb_catalog.hypertable ht, _timescaledb_catalog.chunk ch WHERE ch.hypertable_id = ht.id AND ht.table_name like 'ts_device_table') FOR UPDATE; + +chunk_status +------------ + 1 +(1 row) + +step DA1: + BEGIN; + SELECT + CASE WHEN decompress_chunk(ch) IS NOT NULL THEN true ELSE false END AS compress + FROM show_chunks('ts_device_table') AS ch + ORDER BY ch::text; + +step SA: SELECT * FROM ts_device_table; +time|device|location|value +----+------+--------+----- + 0| 1| 100| 20 + 1| 1| 100| 20 + 2| 1| 100| 20 + 3| 1| 100| 20 + 4| 1| 100| 20 + 5| 1| 100| 20 + 6| 1| 100| 20 + 7| 1| 100| 20 + 8| 1| 100| 20 + 9| 1| 100| 20 +(10 rows) + +step SF: +step UnlockChunkTuple: ROLLBACK; +step DA1: <... completed> +compress +-------- +t +(1 row) + +step DAc: COMMIT; + +starting permutation: SB SA CA1 SA SF SR CAc +step SB: BEGIN; +step SA: SELECT * FROM ts_device_table; +time|device|location|value +----+------+--------+----- + 0| 1| 100| 20 + 1| 1| 100| 20 + 2| 1| 100| 20 + 3| 1| 100| 20 + 4| 1| 100| 20 + 5| 1| 100| 20 + 6| 1| 100| 20 + 7| 1| 100| 20 + 8| 1| 100| 20 + 9| 1| 100| 20 +(10 rows) + +step CA1: + BEGIN; + SELECT + CASE WHEN compress_chunk(ch) IS NOT NULL THEN true ELSE false END AS compress + FROM show_chunks('ts_device_table') AS ch + ORDER BY ch::text; + +step SA: SELECT * FROM ts_device_table; +time|device|location|value +----+------+--------+----- + 0| 1| 100| 20 + 1| 1| 100| 20 + 2| 1| 100| 20 + 3| 1| 100| 20 + 4| 1| 100| 20 + 5| 1| 100| 20 + 6| 1| 100| 20 + 7| 1| 100| 20 + 8| 1| 100| 20 + 9| 1| 100| 20 +(10 rows) + +step SF: +step SR: ROLLBACK; +step CA1: <... completed> +compress +-------- +t +(1 row) + +step CAc: COMMIT; diff --git a/tsl/test/isolation/specs/compression_conflicts_iso.spec b/tsl/test/isolation/specs/compression_conflicts_iso.spec index 65b67ea63..3d1c844f9 100644 --- a/tsl/test/isolation/specs/compression_conflicts_iso.spec +++ b/tsl/test/isolation/specs/compression_conflicts_iso.spec @@ -49,11 +49,15 @@ step "SChunkStat" { SELECT status from _timescaledb_catalog.chunk WHERE id = ( select min(ch.id) FROM _timescaledb_catalog.hypertable ht, _timescaledb_catalog.chunk ch WHERE ch.hypertable_id = ht.id AND ht.table_name like 'ts_device_table'); } session "S" +step "SB" { BEGIN; } +step "SR" { ROLLBACK; } step "S1" { SELECT count(*) from ts_device_table; } step "SC1" { SELECT (count_chunktable(ch)).* FROM show_chunks('ts_device_table') AS ch LIMIT 1; } step "SH" { SELECT total_chunks, number_compressed_chunks from hypertable_compression_stats('ts_device_table'); } step "SA" { SELECT * FROM ts_device_table; } step "SU" { SELECT * FROM ts_device_table WHERE value IN (98,99); } +# Step to confirm previous step finished +step "SF" {} session "LCT" @@ -98,6 +102,16 @@ step "CA1" { } step "CAc" { COMMIT; } +session "DecompressAll" +step "DA1" { + BEGIN; + SELECT + CASE WHEN decompress_chunk(ch) IS NOT NULL THEN true ELSE false END AS compress + FROM show_chunks('ts_device_table') AS ch + ORDER BY ch::text; +} +step "DAc" { COMMIT; } + session "RecompressChunk" step "RC" { DO $$ @@ -167,3 +181,14 @@ permutation "CA1" "CAc" "I1" "SChunkStat" "LockChunk1" "IBRR" "Iu1" "RC" "Unloc permutation "CA1" "CAc" "I1" "SChunkStat" "LockChunk1" "IBS" "Iu1" "RC" "UnlockChunk" "Ic" "SH" "SA" "SChunkStat" "SU" permutation "CA1" "CAc" "I1" "SChunkStat" "LockChunk1" "IN1" "RC" "UnlockChunk" "INc" "SH" "SA" "SChunkStat" permutation "CA1" "CAc" "I1" "SChunkStat" "LockChunk1" "INu1" "RC" "UnlockChunk" "INc" "SH" "SA" "SChunkStat" "SU" + +# Decompressing a chunk should not stop any reads +# until the end when we drop the compressed chunk +# which happens after updates to the chunk catalog tuple +permutation "CA1" "CAc" "LockChunkTuple" "DA1" "SA" "SF" "UnlockChunkTuple" "DAc" + +# Compressing a chunk should not stop any reads +# until it comes to truncating the uncompressed chunk +# which happens at the end of the operations along with +# catalog updates. +permutation "SB" "SA" "CA1" "SA" "SF" "SR" "CAc"