Optimize segmentwise recompression

Instead of recompressing all the segments, try to find segments
which have uncompressed tuples and only recompress those segments.
This commit is contained in:
Ante Kresic 2023-11-23 17:57:26 +01:00 committed by Ante Kresic
parent 7443c476ea
commit 645727bfe1
4 changed files with 123 additions and 136 deletions

View File

@ -947,14 +947,18 @@ tsl_recompress_chunk_wrapper(Chunk *uncompressed_chunk)
return true;
}
/* This is a wrapper around row_compressor_append_sorted_rows. */
/* Sort the tuples and recompress them */
static void
recompress_segment(Tuplesortstate *tuplesortstate, Relation compressed_chunk_rel,
RowCompressor *row_compressor)
{
tuplesort_performsort(tuplesortstate);
row_compressor_reset(row_compressor);
row_compressor_append_sorted_rows(row_compressor,
tuplesortstate,
RelationGetDescr(compressed_chunk_rel));
tuplesort_end(tuplesortstate);
CommandCounterIncrement();
}
static bool
@ -975,25 +979,19 @@ decompress_segment_update_current_segment(CompressedSegmentInfo **current_segmen
for (int i = 0; i < nsegmentby_cols; i++)
{
int16 col_offset = segby_col_offsets_compressed[i];
if (!compressed_chunk_column_is_segmentby(per_col[col_offset]))
continue;
else
if (compressed_chunk_column_is_segmentby(per_col[col_offset]))
{
val = slot_getattr(slot, AttrOffsetGetAttrNumber(col_offset), &is_null);
if (!segment_info_datum_is_in_group(current_segment[seg_idx++]->segment_info,
val,
is_null))
{
/* new segment, need to do per-segment processing */
pfree(
current_segment[seg_idx - 1]->segment_info); /* because increased previously */
SegmentInfo *segment_info =
segment_info_new(TupleDescAttr(slot->tts_tupleDescriptor, col_offset));
segment_info_update(segment_info, val, is_null);
current_segment[seg_idx - 1]->segment_info = segment_info;
current_segment[seg_idx - 1]->decompressed_chunk_offset =
per_col[col_offset].decompressed_column_offset;
}
/* new segment, need to do per-segment processing */
if (current_segment[seg_idx]->segment_info)
pfree(current_segment[seg_idx]->segment_info);
SegmentInfo *segment_info =
segment_info_new(TupleDescAttr(slot->tts_tupleDescriptor, col_offset));
segment_info_update(segment_info, val, is_null);
current_segment[seg_idx]->segment_info = segment_info;
current_segment[seg_idx]->decompressed_chunk_offset =
per_col[col_offset].decompressed_column_offset;
seg_idx++;
}
}
}
@ -1010,9 +1008,7 @@ decompress_segment_changed_group(CompressedSegmentInfo **current_segment, TupleT
for (int i = 0; i < nsegmentby_cols; i++)
{
int16 col_offset = segby_col_offsets_compressed[i];
if (!compressed_chunk_column_is_segmentby(per_col[col_offset]))
continue;
else
if (compressed_chunk_column_is_segmentby(per_col[col_offset]))
{
val = slot_getattr(slot, AttrOffsetGetAttrNumber(col_offset), &is_null);
if (!segment_info_datum_is_in_group(current_segment[seg_idx++]->segment_info,
@ -1157,7 +1153,7 @@ fetch_unmatched_uncompressed_chunk_into_tuplesort(Tuplesortstate *segment_tuples
table_endscan(heapScan);
}
static void
static bool
fetch_matching_uncompressed_chunk_into_tuplesort(Tuplesortstate *segment_tuplesortstate,
int nsegmentby_cols,
Relation uncompressed_chunk_rel,
@ -1169,6 +1165,7 @@ fetch_matching_uncompressed_chunk_into_tuplesort(Tuplesortstate *segment_tupleso
int index = 0;
int nsegbycols_nonnull = 0;
Bitmapset *null_segbycols = NULL;
bool matching_exist = false;
for (int seg_col = 0; seg_col < nsegmentby_cols; seg_col++)
{
@ -1227,6 +1224,7 @@ fetch_matching_uncompressed_chunk_into_tuplesort(Tuplesortstate *segment_tupleso
}
if (valid)
{
matching_exist = true;
ExecStoreHeapTuple(uncompressed_tuple, heap_tuple_slot, false);
slot_getallattrs(heap_tuple_slot);
tuplesort_puttupleslot(segment_tuplesortstate, heap_tuple_slot);
@ -1243,6 +1241,7 @@ fetch_matching_uncompressed_chunk_into_tuplesort(Tuplesortstate *segment_tupleso
if (scankey != NULL)
pfree(scankey);
return matching_exist;
}
/*
@ -1353,6 +1352,8 @@ tsl_recompress_chunk_segmentwise(PG_FUNCTION_ARGS)
/****** compression statistics ******/
RelationSize after_size;
int64 skipped_uncompressed_rows = 0;
int64 skipped_compressed_rows = 0;
Tuplesortstate *segment_tuplesortstate;
@ -1417,7 +1418,6 @@ tsl_recompress_chunk_segmentwise(PG_FUNCTION_ARGS)
HeapTuple compressed_tuple;
IndexScanDesc index_scan;
SegmentInfo *segment_info = NULL;
bool changed_segment = false;
/************ current segment **************/
CompressedSegmentInfo **current_segment =
@ -1426,8 +1426,10 @@ tsl_recompress_chunk_segmentwise(PG_FUNCTION_ARGS)
for (int i = 0; i < nsegmentby_cols; i++)
{
current_segment[i] = palloc(sizeof(CompressedSegmentInfo));
current_segment[i]->segment_info = NULL;
}
bool current_segment_init = false;
bool skip_current_segment = false;
/************** snapshot ****************************/
Snapshot snapshot = RegisterSnapshot(GetTransactionSnapshot());
@ -1439,32 +1441,28 @@ tsl_recompress_chunk_segmentwise(PG_FUNCTION_ARGS)
TupleTableSlot *slot = table_slot_create(compressed_chunk_rel, NULL);
index_rescan(index_scan, NULL, 0, NULL, 0);
Datum val;
bool is_null;
while (index_getnext_slot(index_scan, ForwardScanDirection, slot))
{
i = 0;
int col = 0;
slot_getallattrs(slot);
if (!current_segment_init)
{
current_segment_init = true;
Datum val;
bool is_null;
/* initialize current segment */
for (col = 0; col < slot->tts_tupleDescriptor->natts; col++)
{
val = slot_getattr(slot, AttrOffsetGetAttrNumber(col), &is_null);
if (compressed_chunk_column_is_segmentby(decompressor.per_compressed_cols[col]))
{
segment_info = segment_info_new(TupleDescAttr(slot->tts_tupleDescriptor, col));
current_segment[i]->decompressed_chunk_offset =
decompressor.per_compressed_cols[col].decompressed_column_offset;
/* also need to call segment_info_update here to update the val part */
segment_info_update(segment_info, val, is_null);
current_segment[i]->segment_info = segment_info;
i++;
}
}
decompress_segment_update_current_segment(current_segment,
slot, /*slot from compressed chunk*/
decompressor.per_compressed_cols,
segmentby_column_offsets_compressed,
nsegmentby_cols);
skip_current_segment =
!fetch_matching_uncompressed_chunk_into_tuplesort(segment_tuplesortstate,
nsegmentby_cols,
uncompressed_chunk_rel,
current_segment);
}
/* we have a segment already, so compare those */
changed_segment = decompress_segment_changed_group(current_segment,
@ -1472,75 +1470,68 @@ tsl_recompress_chunk_segmentwise(PG_FUNCTION_ARGS)
decompressor.per_compressed_cols,
segmentby_column_offsets_compressed,
nsegmentby_cols);
if (!changed_segment)
if (changed_segment)
{
i = 0;
bool should_free;
if (!skip_current_segment)
{
recompress_segment(segment_tuplesortstate, uncompressed_chunk_rel, &row_compressor);
compressed_tuple = ExecFetchSlotHeapTuple(slot, false, &should_free);
heap_deform_tuple(compressed_tuple,
compressed_rel_tupdesc,
decompressor.compressed_datums,
decompressor.compressed_is_nulls);
row_decompressor_decompress_row_to_tuplesort(&decompressor, segment_tuplesortstate);
simple_table_tuple_delete(compressed_chunk_rel, &(slot->tts_tid), snapshot);
if (should_free)
heap_freetuple(compressed_tuple);
}
else if (changed_segment)
{
fetch_matching_uncompressed_chunk_into_tuplesort(segment_tuplesortstate,
nsegmentby_cols,
uncompressed_chunk_rel,
current_segment);
tuplesort_performsort(segment_tuplesortstate);
row_compressor_reset(&row_compressor);
recompress_segment(segment_tuplesortstate, uncompressed_chunk_rel, &row_compressor);
/* now any pointers returned will be garbage */
tuplesort_end(segment_tuplesortstate);
/* reinit tuplesort */
segment_tuplesortstate = tuplesort_begin_heap(uncompressed_rel_tupdesc,
n_keys,
sort_keys,
sort_operators,
sort_collations,
nulls_first,
maintenance_work_mem,
NULL,
false);
}
decompress_segment_update_current_segment(current_segment,
slot, /*slot from compressed chunk*/
decompressor.per_compressed_cols,
segmentby_column_offsets_compressed,
nsegmentby_cols);
/* reinit tuplesort and add the first tuple of the new segment to it */
segment_tuplesortstate = tuplesort_begin_heap(uncompressed_rel_tupdesc,
n_keys,
sort_keys,
sort_operators,
sort_collations,
nulls_first,
maintenance_work_mem,
NULL,
false);
bool should_free;
compressed_tuple = ExecFetchSlotHeapTuple(slot, false, &should_free);
heap_deform_tuple(compressed_tuple,
compressed_rel_tupdesc,
decompressor.compressed_datums,
decompressor.compressed_is_nulls);
row_decompressor_decompress_row_to_tuplesort(&decompressor, segment_tuplesortstate);
simple_table_tuple_delete(compressed_chunk_rel, &(slot->tts_tid), snapshot);
/* because this is the first tuple of the new segment */
changed_segment = false;
/* make changes visible */
CommandCounterIncrement();
if (should_free)
heap_freetuple(compressed_tuple);
skip_current_segment =
!fetch_matching_uncompressed_chunk_into_tuplesort(segment_tuplesortstate,
nsegmentby_cols,
uncompressed_chunk_rel,
current_segment);
}
if (skip_current_segment)
{
val = slot_getattr(slot,
AttrOffsetGetAttrNumber(row_compressor.count_metadata_column_offset),
&is_null);
Assert(!is_null);
skipped_uncompressed_rows += DatumGetInt32(val);
skipped_compressed_rows++;
continue;
}
/* Didn't change group and we are not skipping the current segment
* add it to the tuplesort
*/
bool should_free;
compressed_tuple = ExecFetchSlotHeapTuple(slot, false, &should_free);
heap_deform_tuple(compressed_tuple,
compressed_rel_tupdesc,
decompressor.compressed_datums,
decompressor.compressed_is_nulls);
row_decompressor_decompress_row_to_tuplesort(&decompressor, segment_tuplesortstate);
simple_table_tuple_delete(compressed_chunk_rel, &(slot->tts_tid), snapshot);
CommandCounterIncrement();
if (should_free)
heap_freetuple(compressed_tuple);
}
ExecClearTuple(slot);
@ -1550,18 +1541,9 @@ tsl_recompress_chunk_segmentwise(PG_FUNCTION_ARGS)
* the current segment could not be initialized in the case where two recompress operations
* execute concurrently: one blocks on the Exclusive lock but has already read the chunk
* status and determined that there is data in the uncompressed chunk */
if (!changed_segment && current_segment_init)
if (!changed_segment && !skip_current_segment && current_segment_init)
{
fetch_matching_uncompressed_chunk_into_tuplesort(segment_tuplesortstate,
nsegmentby_cols,
uncompressed_chunk_rel,
current_segment);
tuplesort_performsort(segment_tuplesortstate);
row_compressor_reset(&row_compressor);
recompress_segment(segment_tuplesortstate, uncompressed_chunk_rel, &row_compressor);
tuplesort_end(segment_tuplesortstate);
CommandCounterIncrement();
}
/* done with the compressed chunk segments that had new entries in the uncompressed
but there could be rows inserted into the uncompressed that don't already have a corresponding
@ -1598,6 +1580,8 @@ tsl_recompress_chunk_segmentwise(PG_FUNCTION_ARGS)
/* the compression size statistics we are able to update and accurately report are:
* rowcount pre/post compression,
* compressed chunk sizes */
row_compressor.rowcnt_pre_compression += skipped_uncompressed_rows;
row_compressor.num_compressed_rows += skipped_compressed_rows;
compression_chunk_size_catalog_update_recompressed(uncompressed_chunk->fd.id,
compressed_chunk->fd.id,
&after_size,

View File

@ -118,7 +118,7 @@ select * from compression_rowcnt_view where chunk_name = :'chunk_to_compress_2';
(1 row)
insert into mytab_twoseg values ('2023-01-01 19:56:20.048355+02'::timestamptz, 2, NULL, 2);
select * from :chunk_to_compress_2;
select * from :chunk_to_compress_2 ORDER BY a, c, time DESC;
time | a | b | c
-------------------------------------+---+---+---
Sun Jan 01 11:56:20.048355 2023 PST | 2 | | 2
@ -146,12 +146,12 @@ select _timescaledb_functions.recompress_chunk_segmentwise(:'chunk_to_compress_2
select ctid, * from :compressed_chunk_name_2;
ctid | time | a | b | c | _ts_meta_count | _ts_meta_sequence_num | _ts_meta_min_1 | _ts_meta_max_1
-------+----------------------------------------------------------------------+---+---+---+----------------+-----------------------+-------------------------------------+-------------------------------------
(0,2) | BAAAApQ3/0H94//////8bHkAAAAAAgAAAAIAAAAAAAAA7gAFKHAFqwnGAAUocAzSF8U= | 3 | | 3 | 2 | 10 | Sun Jan 01 11:56:20.048355 2023 PST | Sun Jan 01 11:57:20.048355 2023 PST
(0,3) | BAAAApQ2Uhq14/////5S2LgAAAAAAgAAAAIAAAAAAAAA7gAFKG/+g/vGAAUoc1jSi8U= | 2 | | 2 | 2 | 10 | Sun Jan 01 09:56:20.048355 2023 PST | Sun Jan 01 11:56:20.048355 2023 PST
(0,4) | BAAAApQ3/0H94//////8bHkAAAAAAgAAAAIAAAAAAAAA7gAFKHAFqwnGAAUocAzSF8U= | 3 | | 3 | 2 | 10 | Sun Jan 01 11:56:20.048355 2023 PST | Sun Jan 01 11:57:20.048355 2023 PST
(2 rows)
-- verify that initial data is returned as expected
select * from :chunk_to_compress_2;
select * from :chunk_to_compress_2 ORDER BY a, c, time DESC;
time | a | b | c
-------------------------------------+---+---+---
Sun Jan 01 11:56:20.048355 2023 PST | 2 | | 2
@ -220,15 +220,15 @@ select _timescaledb_functions.recompress_chunk_segmentwise(:'chunk_to_compress_2
select ctid, * from :compressed_chunk_name_2;
ctid | time | a | b | c | _ts_meta_count | _ts_meta_sequence_num | _ts_meta_min_1 | _ts_meta_max_1
--------+------------------------------------------------------------------------------------------+---+---+---+----------------+-----------------------+------------------------------+------------------------------
(0,4) | BAAAApQ0bFLXgP/////+NjyAAAAD6AAAAAMAAAAAAAAP7gAFKHbNWYAAAAUodtDtBv8AAD5gAAAAAA== | 1 | | 1 | 1000 | 10 | Sun Jan 01 07:40:30 2023 PST | Sun Jan 01 16:00:00 2023 PST
(0,5) | BAAAApQtcC8rgP/////+NjyAAAAD6AAAAAMAAAAAAAAP7gAFKGjVEigAAAUoaNilrv8AAD5gAAAAAA== | 1 | | 1 | 1000 | 20 | Sat Dec 31 23:20:30 2022 PST | Sun Jan 01 07:40:00 2023 PST
(0,6) | BAAAApQnSNVgAP/////+NjyAAAADcQAAAAMAAAAAAAAP7gAFKFrcytAAAAUoWuBeVv8AADbwAAAAAA== | 1 | | 1 | 881 | 30 | Sat Dec 31 16:00:00 2022 PST | Sat Dec 31 23:20:00 2022 PST
(0,7) | BAAAApQ0bFLXgP/////+NjyAAAAD6AAAAAMAAAAAAAAP7gAFKHbNWYAAAAUodtDtBv8AAD5gAAAAAA== | 2 | | 2 | 1000 | 10 | Sun Jan 01 07:40:30 2023 PST | Sun Jan 01 16:00:00 2023 PST
(0,8) | BAAAApQtcC8rgP/////+NjyAAAAD6AAAAAMAAAAAAAAP7gAFKGjVEigAAAUoaNilrv8AAD5gAAAAAA== | 2 | | 2 | 1000 | 20 | Sat Dec 31 23:20:30 2022 PST | Sun Jan 01 07:40:00 2023 PST
(0,9) | BAAAApQnSNVgAP/////+NjyAAAADcQAAAAMAAAAAAAAP7gAFKFrcytAAAAUoWuBeVv8AADbwAAAAAA== | 2 | | 2 | 881 | 30 | Sat Dec 31 16:00:00 2022 PST | Sat Dec 31 23:20:00 2022 PST
(0,10) | BAAAApQ0bFLXgP/////+NjyAAAAD6AAAAAMAAAAAAAAP7gAFKHbNWYAAAAUodtDtBv8AAD5gAAAAAA== | 0 | | 0 | 1000 | 10 | Sun Jan 01 07:40:30 2023 PST | Sun Jan 01 16:00:00 2023 PST
(0,11) | BAAAApQtcC8rgP/////+NjyAAAAD6AAAAAMAAAAAAAAP7gAFKGjVEigAAAUoaNilrv8AAD5gAAAAAA== | 0 | | 0 | 1000 | 20 | Sat Dec 31 23:20:30 2022 PST | Sun Jan 01 07:40:00 2023 PST
(0,12) | BAAAApQnSNVgAP//////4XuAAAADcgAAAAQAAAAAAADf7gAFKFrcytAAAAUoWuBeVv8AADbgAAAAAAMZdQAAPQkA | 0 | | 0 | 882 | 30 | Sat Dec 31 16:00:00 2022 PST | Sat Dec 31 23:20:00 2022 PST
(0,13) | BAAAApQ0bFLXgP/////+NjyAAAAD6AAAAAMAAAAAAAAP7gAFKHbNWYAAAAUodtDtBv8AAD5gAAAAAA== | 1 | | 1 | 1000 | 10 | Sun Jan 01 07:40:30 2023 PST | Sun Jan 01 16:00:00 2023 PST
(0,14) | BAAAApQtcC8rgP/////+NjyAAAAD6AAAAAMAAAAAAAAP7gAFKGjVEigAAAUoaNilrv8AAD5gAAAAAA== | 1 | | 1 | 1000 | 20 | Sat Dec 31 23:20:30 2022 PST | Sun Jan 01 07:40:00 2023 PST
(0,15) | BAAAApQnSNVgAP/////+NjyAAAADcQAAAAMAAAAAAAAP7gAFKFrcytAAAAUoWuBeVv8AADbwAAAAAA== | 1 | | 1 | 881 | 30 | Sat Dec 31 16:00:00 2022 PST | Sat Dec 31 23:20:00 2022 PST
(0,16) | BAAAApQ0bFLXgP/////+NjyAAAAD6AAAAAMAAAAAAAAP7gAFKHbNWYAAAAUodtDtBv8AAD5gAAAAAA== | 2 | | 2 | 1000 | 10 | Sun Jan 01 07:40:30 2023 PST | Sun Jan 01 16:00:00 2023 PST
(0,17) | BAAAApQtcC8rgP/////+NjyAAAAD6AAAAAMAAAAAAAAP7gAFKGjVEigAAAUoaNilrv8AAD5gAAAAAA== | 2 | | 2 | 1000 | 20 | Sat Dec 31 23:20:30 2022 PST | Sun Jan 01 07:40:00 2023 PST
(0,18) | BAAAApQnSNVgAP/////+NjyAAAADcQAAAAMAAAAAAAAP7gAFKFrcytAAAAUoWuBeVv8AADbwAAAAAA== | 2 | | 2 | 881 | 30 | Sat Dec 31 16:00:00 2022 PST | Sat Dec 31 23:20:00 2022 PST
(9 rows)
-- after recompression
@ -313,7 +313,7 @@ INSERT INTO mytab_prep VALUES ('2023-01-01'::timestamptz, 2, NULL, 2),
('2023-01-01'::timestamptz, 2, NULL, 2);
alter table mytab_prep set (timescaledb.compress, timescaledb.compress_segmentby = 'a, c');
PREPARE p1 AS
SELECT * FROM mytab_prep;
SELECT * FROM mytab_prep ORDER BY a, c, time DESC;
select show_chunks as chunk_to_compress_prep from show_chunks('mytab_prep') limit 1 \gset
SELECT compress_chunk(:'chunk_to_compress_prep'); -- the output of the prepared plan would change before and after compress
compress_chunk
@ -324,13 +324,16 @@ SELECT compress_chunk(:'chunk_to_compress_prep'); -- the output of the prepared
INSERT INTO mytab_prep VALUES ('2023-01-01'::timestamptz, 2, 3, 2);
-- plan should be invalidated to return results from the uncompressed chunk also
EXPLAIN (COSTS OFF) EXECUTE p1;
QUERY PLAN
----------------------------------------------------------
Append
QUERY PLAN
----------------------------------------------------------------------------------------------------------------------------
Merge Append
Sort Key: _hyper_9_10_chunk.a, _hyper_9_10_chunk.c, _hyper_9_10_chunk."time" DESC
-> Custom Scan (DecompressChunk) on _hyper_9_10_chunk
-> Seq Scan on compress_hyper_10_11_chunk
-> Seq Scan on _hyper_9_10_chunk
(4 rows)
-> Index Scan using compress_hyper_10_11_chunk__compressed_hypertable_10_a_c__ts_me on compress_hyper_10_11_chunk
-> Sort
Sort Key: _hyper_9_10_chunk.a, _hyper_9_10_chunk.c, _hyper_9_10_chunk."time" DESC
-> Seq Scan on _hyper_9_10_chunk
(7 rows)
EXECUTE p1;
time | a | b | c
@ -343,18 +346,18 @@ EXECUTE p1;
-- check plan again after recompression
CALL recompress_chunk(:'chunk_to_compress_prep');
EXPLAIN (COSTS OFF) EXECUTE p1;
QUERY PLAN
----------------------------------------------------
QUERY PLAN
----------------------------------------------------------------------------------------------------------------------
Custom Scan (DecompressChunk) on _hyper_9_10_chunk
-> Seq Scan on compress_hyper_10_11_chunk
-> Index Scan using compress_hyper_10_11_chunk__compressed_hypertable_10_a_c__ts_me on compress_hyper_10_11_chunk
(2 rows)
EXECUTE p1;
time | a | b | c
------------------------------+---+---+---
Sun Jan 01 00:00:00 2023 PST | 2 | | 2
Sun Jan 01 00:00:00 2023 PST | 2 | | 2
Sun Jan 01 00:00:00 2023 PST | 2 | 3 | 2
Sun Jan 01 00:00:00 2023 PST | 2 | | 2
Sun Jan 01 00:00:00 2023 PST | 2 | | 2
(3 rows)
-- verify segmentwise recompression when index exists, decompress + compress otherwise
@ -498,8 +501,8 @@ select * from :compressed_chunk_name;
----------------------------------------------------------------------+---+------------------------------------------------------------------------------------------+---+----------------+-----------------------+------------------------------+------------------------------
BAAAAneAR/JEAAACd4BH8kQAAAAAAQAAAAEAAAAAAAAADgAE7wCP5IgA | 1 | BAAAAAAAAAAAAQAAAAAAAAABAAAAAQAAAAEAAAAAAAAAAgAAAAAAAAAC | 1 | 1 | 10 | Sat Jan 01 01:00:00 2022 PST | Sat Jan 01 01:00:00 2022 PST
BAAAAneAR/JEAAACd4BH8kQAAAAAAQAAAAEAAAAAAAAADgAE7wCP5IgA | 1 | BAAAAAAAAAAAAgAAAAAAAAACAAAAAQAAAAEAAAAAAAAAAwAAAAAAAAAE | 2 | 1 | 10 | Sat Jan 01 01:00:00 2022 PST | Sat Jan 01 01:00:00 2022 PST
BAAAAneAR/JEAAAAAAAAAAAAAAAAAgAAAAIAAAAAAAAA7gAE7wCP5IgAAATvAI/kh/8= | 1 | BAEAAAAAAAAABAAAAAAAAAAEAAAAAQAAAAEAAAAAAAAABAAAAAAAAAAIAAAAAgAAAAEAAAAAAAAAAQAAAAAAAAAC | | 2 | 10 | Sat Jan 01 01:00:00 2022 PST | Sat Jan 01 01:00:00 2022 PST
BAAAAneAR/JEAAACd4BH8kQAAAAAAQAAAAEAAAAAAAAADgAE7wCP5IgA | 2 | BAAAAAAAAAAAAgAAAAAAAAACAAAAAQAAAAEAAAAAAAAAAwAAAAAAAAAE | 2 | 1 | 10 | Sat Jan 01 01:00:00 2022 PST | Sat Jan 01 01:00:00 2022 PST
BAAAAneAR/JEAAAAAAAAAAAAAAAAAgAAAAIAAAAAAAAA7gAE7wCP5IgAAATvAI/kh/8= | 2 | BAEAAAAAAAAAAwAAAAAAAAADAAAAAQAAAAEAAAAAAAAAAwAAAAAAAAAGAAAAAgAAAAEAAAAAAAAAAQAAAAAAAAAC | 3 | 2 | 10 | Sat Jan 01 01:00:00 2022 PST | Sat Jan 01 01:00:00 2022 PST
BAAAAneAR/JEAAAAAAAAAAAAAAAAAgAAAAIAAAAAAAAA7gAE7wCP5IgAAATvAI/kh/8= | 1 | BAEAAAAAAAAABAAAAAAAAAAEAAAAAQAAAAEAAAAAAAAABAAAAAAAAAAIAAAAAgAAAAEAAAAAAAAAAQAAAAAAAAAB | | 2 | 10 | Sat Jan 01 01:00:00 2022 PST | Sat Jan 01 01:00:00 2022 PST
(5 rows)

View File

@ -544,8 +544,8 @@ step SA: SELECT * FROM ts_device_table;
time|device|location|value
----+------+--------+-----
0| 1| 100| 20
1| 1| 100| 20
1| 1| 100| 100
1| 1| 100| 20
2| 1| 100| 20
3| 1| 100| 20
4| 1| 100| 20
@ -665,8 +665,8 @@ step SA: SELECT * FROM ts_device_table;
time|device|location|value
----+------+--------+-----
0| 1| 100| 20
1| 1| 100| 20
1| 1| 100| 100
1| 1| 100| 20
2| 1| 100| 20
3| 1| 100| 20
4| 1| 100| 20

View File

@ -88,7 +88,7 @@ select * from compression_rowcnt_view where chunk_name = :'chunk_to_compress_2';
insert into mytab_twoseg values ('2023-01-01 19:56:20.048355+02'::timestamptz, 2, NULL, 2);
select * from :chunk_to_compress_2;
select * from :chunk_to_compress_2 ORDER BY a, c, time DESC;
SELECT compressed_chunk_schema || '.' || compressed_chunk_name as compressed_chunk_name_2
from compressed_chunk_info_view where hypertable_name = 'mytab_twoseg' \gset
@ -101,7 +101,7 @@ select _timescaledb_functions.recompress_chunk_segmentwise(:'chunk_to_compress_2
select ctid, * from :compressed_chunk_name_2;
-- verify that initial data is returned as expected
select * from :chunk_to_compress_2;
select * from :chunk_to_compress_2 ORDER BY a, c, time DESC;
-- should still have 2 compressed rows
select * from compression_rowcnt_view where chunk_name = :'chunk_to_compress_2';
@ -176,7 +176,7 @@ INSERT INTO mytab_prep VALUES ('2023-01-01'::timestamptz, 2, NULL, 2),
alter table mytab_prep set (timescaledb.compress, timescaledb.compress_segmentby = 'a, c');
PREPARE p1 AS
SELECT * FROM mytab_prep;
SELECT * FROM mytab_prep ORDER BY a, c, time DESC;
select show_chunks as chunk_to_compress_prep from show_chunks('mytab_prep') limit 1 \gset
SELECT compress_chunk(:'chunk_to_compress_prep'); -- the output of the prepared plan would change before and after compress