Always copy into non-compressed slot of arrow slot

When copying from a non-arrow slot to a arrow slot, we should always
copy the data into the non-compressed slot and never to the compressed
slot.

The previous check for matching number of attributes fail when you drop
one column from the hyperstore.
This commit is contained in:
Mats Kindahl 2024-09-10 08:45:30 +02:00 committed by Mats Kindahl
parent 86fb747202
commit e73d0ceb04
3 changed files with 68 additions and 30 deletions

View File

@ -638,39 +638,14 @@ tts_arrow_copyslot(TupleTableSlot *dstslot, TupleTableSlot *srcslot)
ExecClearTuple(dstslot);
/* Check if copying from another slot implementation */
/* Check if copying from another slot implementation. */
if (dstslot->tts_ops != srcslot->tts_ops)
{
/* If we are copying from another slot implementation to arrow slot,
we always copy the data into the non-compressed slot. */
child_srcslot = srcslot;
/*
* The source slot is not an Arrow slot so it is necessary to identify
* which destination child slot to copy the source slot into. It
* should normally be the non-compressed slot, but double check the
* number of attributes to sure. If the source and the target tuple
* descriptor has the same number of attributes, then it should a
* non-compressed slot since the compressed slot has extra metadata
* attributes. If the compressed table is changed in the future to not
* have extra metadata attributes, this check needs to be updated.
*
* Note that it is not possible to use equalTupleDescs() because it
* compares the tuple's composite ID. If the source slot is, e.g.,
* virtual, with no connection to a physical relation, the composite
* ID is often RECORDID while the arrow slot has the ID of the
* relation.
*/
if (dstslot->tts_tupleDescriptor->natts == srcslot->tts_tupleDescriptor->natts)
{
/* non-compressed tuple slot */
child_dstslot = arrow_slot_get_noncompressed_slot(dstslot);
adstslot->tuple_index = InvalidTupleIndex;
}
else
{
child_dstslot = arrow_slot_get_compressed_slot(dstslot, srcslot->tts_tupleDescriptor);
/* compressed tuple slot */
adstslot->tuple_index = 1;
}
child_dstslot = arrow_slot_get_noncompressed_slot(dstslot);
adstslot->tuple_index = InvalidTupleIndex;
}
else
{

View File

@ -301,3 +301,37 @@ order by location_id;
(3 rows)
drop table :hypertable;
-- Check that we can write to a hyperstore table from another kind of
-- slot even if we have dropped and added attributes.
create table test2 (itime integer, b bigint, t text);
select create_hypertable('test2', by_range('itime', 10));
NOTICE: adding not-null constraint to column "itime"
create_hypertable
-------------------
(3,t)
(1 row)
create table test2_source(itime integer, d int, t text);
insert into test2_source values (9, '9', 90), (17, '17', 1700);
-- this will create a single chunk.
insert into test2 select t, 10, 'first'::text from generate_series(1, 7) t;
alter table test2 drop column b;
alter table test2 add column c int default -15;
alter table test2 add column d int;
-- Since we have chunk sizes of 10, this will create a second chunk
-- with a second set of attributes where one is dropped.
insert into test2 select t, 'second'::text, 120, 1 from generate_series(11, 15) t;
alter table test2
set access method hyperstore,
set (timescaledb.compress_segmentby = '', timescaledb.compress_orderby = 'c, itime desc');
WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes
NOTICE: default segment by for hypertable "test2" is set to ""
select compress_chunk(show_chunks('test2'));
compress_chunk
-----------------------------------------
_timescaledb_internal._hyper_3_13_chunk
_timescaledb_internal._hyper_3_14_chunk
(2 rows)
-- Insert into both chunks using a select.
insert into test2(itime ,t , d) select itime, t, d from test2_source;

View File

@ -173,3 +173,32 @@ select * from :hypertable where location_id between 11 and 22
order by location_id;
drop table :hypertable;
-- Check that we can write to a hyperstore table from another kind of
-- slot even if we have dropped and added attributes.
create table test2 (itime integer, b bigint, t text);
select create_hypertable('test2', by_range('itime', 10));
create table test2_source(itime integer, d int, t text);
insert into test2_source values (9, '9', 90), (17, '17', 1700);
-- this will create a single chunk.
insert into test2 select t, 10, 'first'::text from generate_series(1, 7) t;
alter table test2 drop column b;
alter table test2 add column c int default -15;
alter table test2 add column d int;
-- Since we have chunk sizes of 10, this will create a second chunk
-- with a second set of attributes where one is dropped.
insert into test2 select t, 'second'::text, 120, 1 from generate_series(11, 15) t;
alter table test2
set access method hyperstore,
set (timescaledb.compress_segmentby = '', timescaledb.compress_orderby = 'c, itime desc');
select compress_chunk(show_chunks('test2'));
-- Insert into both chunks using a select.
insert into test2(itime ,t , d) select itime, t, d from test2_source;