timescaledb/tsl/test/isolation/specs/decompression_chunk_and_parallel_query.in
Jan Nidzwetzki de30d190e4 Fix a deadlock in chunk decompression and SELECTs
This patch fixes a deadlock between chunk decompression and SELECT
queries executed in parallel. The change in
a608d7db614c930213dee8d6a5e9d26a0259da61 requests an AccessExclusiveLock
for the decompressed chunk instead of the compressed chunk, resulting in
deadlocks.

In addition, an isolation test has been added to test that SELECT
queries on a chunk that is currently decompressed can be executed.

Fixes #4605
2022-09-22 14:37:14 +02:00

113 lines
5.1 KiB
Plaintext

# This file and its contents are licensed under the Timescale License.
# Please see the included NOTICE for copyright information and
# LICENSE-TIMESCALE for a copy of the license.
###
# This isolation test checks that SELECT queries can be performed in parallel to
# chunk decompression operations. This version of the isolation tests creates the
# default index on the time column. See the decompression_chunk_and_parallel_query_wo_idx
# test for a version without any index.
###
setup {
CREATE OR REPLACE FUNCTION debug_waitpoint_enable(TEXT) RETURNS VOID LANGUAGE C VOLATILE STRICT
AS '@TS_MODULE_PATHNAME@', 'ts_debug_point_enable';
CREATE OR REPLACE FUNCTION debug_waitpoint_release(TEXT) RETURNS VOID LANGUAGE C VOLATILE STRICT
AS '@TS_MODULE_PATHNAME@', 'ts_debug_point_release';
CREATE TABLE sensor_data (
time timestamptz not null,
sensor_id integer not null,
cpu double precision null,
temperature double precision null);
-- Create the hypertable
SELECT FROM create_hypertable('sensor_data','time', chunk_time_interval => INTERVAL '60 days');
-- SELECT FROM create_hypertable('sensor_data','time', chunk_time_interval => INTERVAL '60 days', create_default_indexes => FALSE);
-- All generated data is part of one chunk. Only one chunk is used because 'compress_chunk' is
-- used in this isolation test. In contrast to 'policy_compression_execute' all decompression
-- operations are executed in one transaction. So, processing more than one chunk with 'compress_chunk'
-- could lead to deadlocks that do not occur real-world scenarios (due to locks hold on a completely
-- decompressed chunk).
INSERT INTO sensor_data
SELECT time + (INTERVAL '1 minute' * random()) AS time,
sensor_id,
random() AS cpu,
random()* 100 AS temperature
FROM generate_series('2022-01-01', '2022-01-15', INTERVAL '1 minute') AS g1(time),
generate_series(1, 50, 1) AS g2(sensor_id)
ORDER BY time;
SELECT count(*) FROM sensor_data;
ALTER TABLE sensor_data SET (timescaledb.compress, timescaledb.compress_segmentby = 'sensor_id, cpu');
SELECT count(*) FROM (SELECT compress_chunk(i, if_not_compressed => true) FROM show_chunks('sensor_data') i) i;
SELECT compression_status FROM chunk_compression_stats('sensor_data');
}
teardown {
DROP TABLE sensor_data;
}
session "s1"
step "s1_decompress" {
SELECT count(*) FROM (SELECT decompress_chunk(i, if_compressed => true) FROM show_chunks('sensor_data') i) i;
SELECT compression_status FROM chunk_compression_stats('sensor_data');
}
session "s2"
step "s2_read_sensor_data" {
SELECT FROM sensor_data;
}
session "s3"
step "s3_lock_decompression_locks" {
-- This waitpoint is defined before the decompressed chunk is re-indexed. Up to this
-- point parallel SELECTs should be possible.
SELECT debug_waitpoint_enable('decompress_chunk_impl_before_reindex');
-- This waitpoint is defined after all locks for the decompression and the deletion
-- of the compressed chunk are requested.
SELECT debug_waitpoint_enable('decompress_chunk_impl_after_reindex');
}
step "s3_unlock_decompression_before_reindex_lock" {
-- Ensure that we are waiting on our debug waitpoint
-- Note: The OIDs of the advisory locks are based on the hash value of the lock name (see debug_point_init())
-- decompress_chunk_impl_before_reindex = 3966149665.
SELECT locktype, mode, granted, objid FROM pg_locks WHERE not granted AND locktype = 'advisory' ORDER BY relation, locktype, mode, granted;
SELECT debug_waitpoint_release('decompress_chunk_impl_before_reindex');
}
step "s3_unlock_decompression_after_reindex_lock" {
-- Ensure that we are waiting on our debug waitpoint
-- Note: The OIDs of the advisory locks are based on the hash value of the lock name (see debug_point_init())
-- decompress_chunk_impl_after_reindex = 1858017383.
SELECT locktype, mode, granted, objid FROM pg_locks WHERE not granted AND locktype = 'advisory' ORDER BY relation, locktype, mode, granted;
SELECT debug_waitpoint_release('decompress_chunk_impl_after_reindex');
}
# Desired execution:
# s3_lock_decompression_locks - Locks the decompression waitpoints.
# s2_read_sensor_data - Read the compressed chunk. This should be executed without blocking.
# s1_decompress - Start the decompression and block on the first waitpoint.
# s2_read_sensor_data - Read the compressed chunk again. This should be still possible without blocking.
# s3_unlock_decompression_before_reindex_lock - Releases the decompress_chunk_impl_before_reindex waitpoint.
# s1_decompress continues - The chunk is reindexed and the index is locked.
# s2_read_sensor_data - Read the chunk. This blocks due to the locked index.
# s3_unlock_decompression_after_reindex_lock - Releases the decompress_chunk_impl_after_compressed_chunk_lock.
# s1_decompress continues - Finishes the decompression operation and releases the locks.
# s2_read_sensor_data continues.
permutation "s3_lock_decompression_locks" "s2_read_sensor_data" "s1_decompress" "s2_read_sensor_data" "s3_unlock_decompression_before_reindex_lock" "s2_read_sensor_data" "s3_unlock_decompression_after_reindex_lock"