mirror of
https://github.com/timescale/timescaledb.git
synced 2025-05-14 17:43:34 +08:00
Fix segfault after column drop on compressed table
Decompression produces records which have all the decompressed data set, but it also retains the fields which are used internally during decompression. These didn't cause any problem - unless an operation is being done with the whole row - in which case all the fields which have ended up being non-null can be a potential segfault source. Fixes #5458 #5411
This commit is contained in:
parent
feef9206fa
commit
975e9ca166
@ -31,6 +31,7 @@ accidentally triggering the load of a previous DB version.**
|
||||
* #5499 Do not segfault on large histogram() parameters
|
||||
* #5497 Allow named time_bucket arguments in Cagg definition
|
||||
* #5500 Fix when no FROM clause in continuous aggregate definition
|
||||
* #5462 Fix segfault after column drop on compressed table
|
||||
|
||||
**Thanks**
|
||||
* @nikolaps for reporting an issue with the COPY fetcher
|
||||
|
@ -473,7 +473,7 @@ decompress_chunk_create_tuple(DecompressChunkState *state)
|
||||
|
||||
if (!state->initialized)
|
||||
{
|
||||
ExecClearTuple(decompressed_slot);
|
||||
ExecStoreAllNullTuple(decompressed_slot);
|
||||
|
||||
/*
|
||||
* Reset expression memory context to clean out any cruft from
|
||||
|
@ -630,3 +630,33 @@ INSERT INTO ts_table SELECT * FROM data_table;
|
||||
--cleanup tables
|
||||
DROP TABLE data_table cascade;
|
||||
DROP TABLE ts_table cascade;
|
||||
--invalid reads for row expressions after column dropped on compressed tables #5458
|
||||
CREATE TABLE readings(
|
||||
"time" TIMESTAMPTZ NOT NULL,
|
||||
battery_status TEXT,
|
||||
battery_temperature DOUBLE PRECISION
|
||||
);
|
||||
INSERT INTO readings ("time") VALUES ('2022-11-11 11:11:11-00');
|
||||
SELECT create_hypertable('readings', 'time', chunk_time_interval => interval '12 hour', migrate_data=>true);
|
||||
NOTICE: migrating data to chunks
|
||||
create_hypertable
|
||||
------------------------
|
||||
(35,public,readings,t)
|
||||
(1 row)
|
||||
|
||||
ALTER TABLE readings SET (timescaledb.compress,timescaledb.compress_segmentby = 'battery_temperature');
|
||||
SELECT compress_chunk(show_chunks('readings'));
|
||||
compress_chunk
|
||||
------------------------------------------
|
||||
_timescaledb_internal._hyper_35_22_chunk
|
||||
(1 row)
|
||||
|
||||
ALTER TABLE readings DROP COLUMN battery_status;
|
||||
INSERT INTO readings ("time", battery_temperature) VALUES ('2022-11-11 11:11:11', 0.2);
|
||||
SELECT readings FROM readings;
|
||||
readings
|
||||
--------------------------------------
|
||||
("Fri Nov 11 03:11:11 2022 PST",)
|
||||
("Fri Nov 11 11:11:11 2022 PST",0.2)
|
||||
(2 rows)
|
||||
|
||||
|
@ -364,3 +364,21 @@ INSERT INTO ts_table SELECT * FROM data_table;
|
||||
--cleanup tables
|
||||
DROP TABLE data_table cascade;
|
||||
DROP TABLE ts_table cascade;
|
||||
|
||||
--invalid reads for row expressions after column dropped on compressed tables #5458
|
||||
CREATE TABLE readings(
|
||||
"time" TIMESTAMPTZ NOT NULL,
|
||||
battery_status TEXT,
|
||||
battery_temperature DOUBLE PRECISION
|
||||
);
|
||||
|
||||
INSERT INTO readings ("time") VALUES ('2022-11-11 11:11:11-00');
|
||||
|
||||
SELECT create_hypertable('readings', 'time', chunk_time_interval => interval '12 hour', migrate_data=>true);
|
||||
|
||||
ALTER TABLE readings SET (timescaledb.compress,timescaledb.compress_segmentby = 'battery_temperature');
|
||||
SELECT compress_chunk(show_chunks('readings'));
|
||||
|
||||
ALTER TABLE readings DROP COLUMN battery_status;
|
||||
INSERT INTO readings ("time", battery_temperature) VALUES ('2022-11-11 11:11:11', 0.2);
|
||||
SELECT readings FROM readings;
|
||||
|
Loading…
x
Reference in New Issue
Block a user