1
0
mirror of https://github.com/timescale/timescaledb.git synced 2025-05-20 12:47:40 +08:00

Reduce locking on decompress_chunk to allow reads

With a recent change, we updated the lock on decompress_chunk
to take an AccessExclusiveLock on the uncompressed chunk at
the start of this potentially long running operation. Reducing
this lock to ExclusiveLock would enable reads to execute while
we are decompressing the chunk. AccessExclusive lock will be
taken on the compressed chunk at the end of the operation,
during its removal.
This commit is contained in:
Ante Kresic 2024-01-31 10:46:07 +01:00 committed by Ante Kresic
parent 626975f09f
commit 7ffdd0716c
4 changed files with 136 additions and 4 deletions

@ -607,12 +607,14 @@ decompress_chunk_impl(Chunk *uncompressed_chunk, bool if_compressed)
/*
* Lock the compressed chunk that is going to be deleted. At this point,
* the reference to the compressed chunk is already removed from the
* catalog. So, new readers do not include it in their operations.
* catalog but we need to block readers from accessing this chunk
* until the catalog changes are visible to them.
*
* Note: Calling performMultipleDeletions in chunk_index_tuple_delete
* also requests an AccessExclusiveLock on the compressed_chunk. However,
* this call makes the lock on the chunk explicit.
*/
LockRelationOid(uncompressed_chunk->table_id, AccessExclusiveLock);
LockRelationOid(compressed_chunk->table_id, AccessExclusiveLock);
ts_chunk_drop(compressed_chunk, DROP_RESTRICT, -1);
ts_cache_release(hcache);

@ -1498,9 +1498,9 @@ decompress_chunk(Oid in_table, Oid out_table)
* We want to prevent other decompressors from decompressing this table,
* and we want to prevent INSERTs or UPDATEs which could mess up our decompression.
* We may as well allow readers to keep reading the compressed data while
* we are compressing, so we only take an ExclusiveLock instead of AccessExclusive.
* we are decompressing, so we only take an ExclusiveLock instead of AccessExclusive.
*/
Relation out_rel = table_open(out_table, AccessExclusiveLock);
Relation out_rel = table_open(out_table, ExclusiveLock);
Relation in_rel = table_open(in_table, ExclusiveLock);
int64 nrows_processed = 0;

@ -1,4 +1,4 @@
Parsed test spec with 9 sessions
Parsed test spec with 10 sessions
starting permutation: LockChunk1 IB I1 C1 UnlockChunk Ic Cc SC1 S1 SChunkStat
step LockChunk1:
@ -2954,3 +2954,108 @@ time|device|location|value
1| 1| 100| 99
(1 row)
starting permutation: CA1 CAc LockChunkTuple DA1 SA SF UnlockChunkTuple DAc
step CA1:
BEGIN;
SELECT
CASE WHEN compress_chunk(ch) IS NOT NULL THEN true ELSE false END AS compress
FROM show_chunks('ts_device_table') AS ch
ORDER BY ch::text;
compress
--------
t
(1 row)
step CAc: COMMIT;
step LockChunkTuple:
BEGIN;
SELECT status as chunk_status from _timescaledb_catalog.chunk
WHERE id = ( select min(ch.id) FROM _timescaledb_catalog.hypertable ht, _timescaledb_catalog.chunk ch WHERE ch.hypertable_id = ht.id AND ht.table_name like 'ts_device_table') FOR UPDATE;
chunk_status
------------
1
(1 row)
step DA1:
BEGIN;
SELECT
CASE WHEN decompress_chunk(ch) IS NOT NULL THEN true ELSE false END AS compress
FROM show_chunks('ts_device_table') AS ch
ORDER BY ch::text;
<waiting ...>
step SA: SELECT * FROM ts_device_table;
time|device|location|value
----+------+--------+-----
0| 1| 100| 20
1| 1| 100| 20
2| 1| 100| 20
3| 1| 100| 20
4| 1| 100| 20
5| 1| 100| 20
6| 1| 100| 20
7| 1| 100| 20
8| 1| 100| 20
9| 1| 100| 20
(10 rows)
step SF:
step UnlockChunkTuple: ROLLBACK;
step DA1: <... completed>
compress
--------
t
(1 row)
step DAc: COMMIT;
starting permutation: SB SA CA1 SA SF SR CAc
step SB: BEGIN;
step SA: SELECT * FROM ts_device_table;
time|device|location|value
----+------+--------+-----
0| 1| 100| 20
1| 1| 100| 20
2| 1| 100| 20
3| 1| 100| 20
4| 1| 100| 20
5| 1| 100| 20
6| 1| 100| 20
7| 1| 100| 20
8| 1| 100| 20
9| 1| 100| 20
(10 rows)
step CA1:
BEGIN;
SELECT
CASE WHEN compress_chunk(ch) IS NOT NULL THEN true ELSE false END AS compress
FROM show_chunks('ts_device_table') AS ch
ORDER BY ch::text;
<waiting ...>
step SA: SELECT * FROM ts_device_table;
time|device|location|value
----+------+--------+-----
0| 1| 100| 20
1| 1| 100| 20
2| 1| 100| 20
3| 1| 100| 20
4| 1| 100| 20
5| 1| 100| 20
6| 1| 100| 20
7| 1| 100| 20
8| 1| 100| 20
9| 1| 100| 20
(10 rows)
step SF:
step SR: ROLLBACK;
step CA1: <... completed>
compress
--------
t
(1 row)
step CAc: COMMIT;

@ -49,11 +49,15 @@ step "SChunkStat" { SELECT status from _timescaledb_catalog.chunk
WHERE id = ( select min(ch.id) FROM _timescaledb_catalog.hypertable ht, _timescaledb_catalog.chunk ch WHERE ch.hypertable_id = ht.id AND ht.table_name like 'ts_device_table'); }
session "S"
step "SB" { BEGIN; }
step "SR" { ROLLBACK; }
step "S1" { SELECT count(*) from ts_device_table; }
step "SC1" { SELECT (count_chunktable(ch)).* FROM show_chunks('ts_device_table') AS ch LIMIT 1; }
step "SH" { SELECT total_chunks, number_compressed_chunks from hypertable_compression_stats('ts_device_table'); }
step "SA" { SELECT * FROM ts_device_table; }
step "SU" { SELECT * FROM ts_device_table WHERE value IN (98,99); }
# Step to confirm previous step finished
step "SF" {}
session "LCT"
@ -98,6 +102,16 @@ step "CA1" {
}
step "CAc" { COMMIT; }
session "DecompressAll"
step "DA1" {
BEGIN;
SELECT
CASE WHEN decompress_chunk(ch) IS NOT NULL THEN true ELSE false END AS compress
FROM show_chunks('ts_device_table') AS ch
ORDER BY ch::text;
}
step "DAc" { COMMIT; }
session "RecompressChunk"
step "RC" {
DO $$
@ -167,3 +181,14 @@ permutation "CA1" "CAc" "I1" "SChunkStat" "LockChunk1" "IBRR" "Iu1" "RC" "Unloc
permutation "CA1" "CAc" "I1" "SChunkStat" "LockChunk1" "IBS" "Iu1" "RC" "UnlockChunk" "Ic" "SH" "SA" "SChunkStat" "SU"
permutation "CA1" "CAc" "I1" "SChunkStat" "LockChunk1" "IN1" "RC" "UnlockChunk" "INc" "SH" "SA" "SChunkStat"
permutation "CA1" "CAc" "I1" "SChunkStat" "LockChunk1" "INu1" "RC" "UnlockChunk" "INc" "SH" "SA" "SChunkStat" "SU"
# Decompressing a chunk should not stop any reads
# until the end when we drop the compressed chunk
# which happens after updates to the chunk catalog tuple
permutation "CA1" "CAc" "LockChunkTuple" "DA1" "SA" "SF" "UnlockChunkTuple" "DAc"
# Compressing a chunk should not stop any reads
# until it comes to truncating the uncompressed chunk
# which happens at the end of the operations along with
# catalog updates.
permutation "SB" "SA" "CA1" "SA" "SF" "SR" "CAc"