mirror of
https://github.com/timescale/timescaledb.git
synced 2025-05-17 19:13:16 +08:00
INSERT into compressed hypertable with number of open chunks greater than ts_guc_max_open_chunks_per_insert causes segementation fault. New row which needs to be inserted into compressed chunk has to be compressed. Memory required as part of compressing a row is allocated from RowCompressor::per_row_ctx memory context. Once row is compressed, ExecInsert() is called, where memory from same context is used to allocate and free it instead of using "Executor State". This causes a corruption in memory. Fixes: #4778
56 lines
2.3 KiB
SQL
56 lines
2.3 KiB
SQL
-- This file and its contents are licensed under the Timescale License.
|
|
-- Please see the included NOTICE for copyright information and
|
|
-- LICENSE-TIMESCALE for a copy of the license.
|
|
|
|
-- test constraint exclusion with prepared statements and generic plans
|
|
CREATE TABLE i3719 (time timestamptz NOT NULL,data text);
|
|
SELECT table_name FROM create_hypertable('i3719', 'time');
|
|
ALTER TABLE i3719 SET (timescaledb.compress);
|
|
|
|
INSERT INTO i3719 VALUES('2021-01-01 00:00:00', 'chunk 1');
|
|
SELECT count(compress_chunk(c)) FROM show_chunks('i3719') c;
|
|
INSERT INTO i3719 VALUES('2021-02-22 08:00:00', 'chunk 2');
|
|
|
|
SET plan_cache_mode TO force_generic_plan;
|
|
PREPARE p1(timestamptz) AS UPDATE i3719 SET data = 'x' WHERE time=$1;
|
|
PREPARE p2(timestamptz) AS DELETE FROM i3719 WHERE time=$1;
|
|
EXECUTE p1('2021-02-22T08:00:00+00');
|
|
EXECUTE p2('2021-02-22T08:00:00+00');
|
|
|
|
DEALLOCATE p1;
|
|
DEALLOCATE p2;
|
|
|
|
DROP TABLE i3719;
|
|
|
|
-- github issue 4778
|
|
CREATE TABLE metric_5m (
|
|
time TIMESTAMPTZ NOT NULL,
|
|
value DOUBLE PRECISION NOT NULL,
|
|
series_id BIGINT NOT NULL
|
|
);
|
|
SELECT table_name FROM create_hypertable(
|
|
'metric_5m'::regclass,
|
|
'time'::name, chunk_time_interval=>interval '5m',
|
|
create_default_indexes=> false);
|
|
-- enable compression
|
|
ALTER TABLE metric_5m SET (
|
|
timescaledb.compress,
|
|
timescaledb.compress_segmentby = 'series_id',
|
|
timescaledb.compress_orderby = 'time, value'
|
|
);
|
|
SET work_mem TO '64kB';
|
|
SELECT '2022-10-10 14:33:44.1234+05:30' as start_date \gset
|
|
-- populate hypertable
|
|
INSERT INTO metric_5m (time, series_id, value)
|
|
SELECT t, s,1 from generate_series(:'start_date'::timestamptz, :'start_date'::timestamptz + interval '1 day', '10s') t cross join generate_series(1,10, 1) s;
|
|
-- manually compress all chunks
|
|
SELECT count(compress_chunk(c.schema_name|| '.' || c.table_name))
|
|
FROM _timescaledb_catalog.chunk c, _timescaledb_catalog.hypertable ht where
|
|
c.hypertable_id = ht.id and ht.table_name = 'metric_5m' and c.compressed_chunk_id IS NULL;
|
|
|
|
-- populate into compressed hypertable, this should not crash
|
|
INSERT INTO metric_5m (time, series_id, value)
|
|
SELECT t, s,1 from generate_series(:'start_date'::timestamptz, :'start_date'::timestamptz + interval '1 day', '10s') t cross join generate_series(1,10, 1) s;
|
|
-- clean up
|
|
RESET work_mem;
|
|
DROP TABLE metric_5m; |