Optimize segmentwise recompression

Instead of recompressing all the segments, try to find segments
which have uncompressed tuples and only recompress those segments.
This commit is contained in:
Ante Kresic 2023-11-23 17:57:26 +01:00 committed by Ante Kresic
parent 7443c476ea
commit 645727bfe1
4 changed files with 123 additions and 136 deletions

View File

@ -947,14 +947,18 @@ tsl_recompress_chunk_wrapper(Chunk *uncompressed_chunk)
return true; return true;
} }
/* This is a wrapper around row_compressor_append_sorted_rows. */ /* Sort the tuples and recompress them */
static void static void
recompress_segment(Tuplesortstate *tuplesortstate, Relation compressed_chunk_rel, recompress_segment(Tuplesortstate *tuplesortstate, Relation compressed_chunk_rel,
RowCompressor *row_compressor) RowCompressor *row_compressor)
{ {
tuplesort_performsort(tuplesortstate);
row_compressor_reset(row_compressor);
row_compressor_append_sorted_rows(row_compressor, row_compressor_append_sorted_rows(row_compressor,
tuplesortstate, tuplesortstate,
RelationGetDescr(compressed_chunk_rel)); RelationGetDescr(compressed_chunk_rel));
tuplesort_end(tuplesortstate);
CommandCounterIncrement();
} }
static bool static bool
@ -975,25 +979,19 @@ decompress_segment_update_current_segment(CompressedSegmentInfo **current_segmen
for (int i = 0; i < nsegmentby_cols; i++) for (int i = 0; i < nsegmentby_cols; i++)
{ {
int16 col_offset = segby_col_offsets_compressed[i]; int16 col_offset = segby_col_offsets_compressed[i];
if (!compressed_chunk_column_is_segmentby(per_col[col_offset])) if (compressed_chunk_column_is_segmentby(per_col[col_offset]))
continue;
else
{ {
val = slot_getattr(slot, AttrOffsetGetAttrNumber(col_offset), &is_null); val = slot_getattr(slot, AttrOffsetGetAttrNumber(col_offset), &is_null);
if (!segment_info_datum_is_in_group(current_segment[seg_idx++]->segment_info, /* new segment, need to do per-segment processing */
val, if (current_segment[seg_idx]->segment_info)
is_null)) pfree(current_segment[seg_idx]->segment_info);
{ SegmentInfo *segment_info =
/* new segment, need to do per-segment processing */ segment_info_new(TupleDescAttr(slot->tts_tupleDescriptor, col_offset));
pfree( segment_info_update(segment_info, val, is_null);
current_segment[seg_idx - 1]->segment_info); /* because increased previously */ current_segment[seg_idx]->segment_info = segment_info;
SegmentInfo *segment_info = current_segment[seg_idx]->decompressed_chunk_offset =
segment_info_new(TupleDescAttr(slot->tts_tupleDescriptor, col_offset)); per_col[col_offset].decompressed_column_offset;
segment_info_update(segment_info, val, is_null); seg_idx++;
current_segment[seg_idx - 1]->segment_info = segment_info;
current_segment[seg_idx - 1]->decompressed_chunk_offset =
per_col[col_offset].decompressed_column_offset;
}
} }
} }
} }
@ -1010,9 +1008,7 @@ decompress_segment_changed_group(CompressedSegmentInfo **current_segment, TupleT
for (int i = 0; i < nsegmentby_cols; i++) for (int i = 0; i < nsegmentby_cols; i++)
{ {
int16 col_offset = segby_col_offsets_compressed[i]; int16 col_offset = segby_col_offsets_compressed[i];
if (!compressed_chunk_column_is_segmentby(per_col[col_offset])) if (compressed_chunk_column_is_segmentby(per_col[col_offset]))
continue;
else
{ {
val = slot_getattr(slot, AttrOffsetGetAttrNumber(col_offset), &is_null); val = slot_getattr(slot, AttrOffsetGetAttrNumber(col_offset), &is_null);
if (!segment_info_datum_is_in_group(current_segment[seg_idx++]->segment_info, if (!segment_info_datum_is_in_group(current_segment[seg_idx++]->segment_info,
@ -1157,7 +1153,7 @@ fetch_unmatched_uncompressed_chunk_into_tuplesort(Tuplesortstate *segment_tuples
table_endscan(heapScan); table_endscan(heapScan);
} }
static void static bool
fetch_matching_uncompressed_chunk_into_tuplesort(Tuplesortstate *segment_tuplesortstate, fetch_matching_uncompressed_chunk_into_tuplesort(Tuplesortstate *segment_tuplesortstate,
int nsegmentby_cols, int nsegmentby_cols,
Relation uncompressed_chunk_rel, Relation uncompressed_chunk_rel,
@ -1169,6 +1165,7 @@ fetch_matching_uncompressed_chunk_into_tuplesort(Tuplesortstate *segment_tupleso
int index = 0; int index = 0;
int nsegbycols_nonnull = 0; int nsegbycols_nonnull = 0;
Bitmapset *null_segbycols = NULL; Bitmapset *null_segbycols = NULL;
bool matching_exist = false;
for (int seg_col = 0; seg_col < nsegmentby_cols; seg_col++) for (int seg_col = 0; seg_col < nsegmentby_cols; seg_col++)
{ {
@ -1227,6 +1224,7 @@ fetch_matching_uncompressed_chunk_into_tuplesort(Tuplesortstate *segment_tupleso
} }
if (valid) if (valid)
{ {
matching_exist = true;
ExecStoreHeapTuple(uncompressed_tuple, heap_tuple_slot, false); ExecStoreHeapTuple(uncompressed_tuple, heap_tuple_slot, false);
slot_getallattrs(heap_tuple_slot); slot_getallattrs(heap_tuple_slot);
tuplesort_puttupleslot(segment_tuplesortstate, heap_tuple_slot); tuplesort_puttupleslot(segment_tuplesortstate, heap_tuple_slot);
@ -1243,6 +1241,7 @@ fetch_matching_uncompressed_chunk_into_tuplesort(Tuplesortstate *segment_tupleso
if (scankey != NULL) if (scankey != NULL)
pfree(scankey); pfree(scankey);
return matching_exist;
} }
/* /*
@ -1353,6 +1352,8 @@ tsl_recompress_chunk_segmentwise(PG_FUNCTION_ARGS)
/****** compression statistics ******/ /****** compression statistics ******/
RelationSize after_size; RelationSize after_size;
int64 skipped_uncompressed_rows = 0;
int64 skipped_compressed_rows = 0;
Tuplesortstate *segment_tuplesortstate; Tuplesortstate *segment_tuplesortstate;
@ -1417,7 +1418,6 @@ tsl_recompress_chunk_segmentwise(PG_FUNCTION_ARGS)
HeapTuple compressed_tuple; HeapTuple compressed_tuple;
IndexScanDesc index_scan; IndexScanDesc index_scan;
SegmentInfo *segment_info = NULL;
bool changed_segment = false; bool changed_segment = false;
/************ current segment **************/ /************ current segment **************/
CompressedSegmentInfo **current_segment = CompressedSegmentInfo **current_segment =
@ -1426,8 +1426,10 @@ tsl_recompress_chunk_segmentwise(PG_FUNCTION_ARGS)
for (int i = 0; i < nsegmentby_cols; i++) for (int i = 0; i < nsegmentby_cols; i++)
{ {
current_segment[i] = palloc(sizeof(CompressedSegmentInfo)); current_segment[i] = palloc(sizeof(CompressedSegmentInfo));
current_segment[i]->segment_info = NULL;
} }
bool current_segment_init = false; bool current_segment_init = false;
bool skip_current_segment = false;
/************** snapshot ****************************/ /************** snapshot ****************************/
Snapshot snapshot = RegisterSnapshot(GetTransactionSnapshot()); Snapshot snapshot = RegisterSnapshot(GetTransactionSnapshot());
@ -1439,32 +1441,28 @@ tsl_recompress_chunk_segmentwise(PG_FUNCTION_ARGS)
TupleTableSlot *slot = table_slot_create(compressed_chunk_rel, NULL); TupleTableSlot *slot = table_slot_create(compressed_chunk_rel, NULL);
index_rescan(index_scan, NULL, 0, NULL, 0); index_rescan(index_scan, NULL, 0, NULL, 0);
Datum val;
bool is_null;
while (index_getnext_slot(index_scan, ForwardScanDirection, slot)) while (index_getnext_slot(index_scan, ForwardScanDirection, slot))
{ {
i = 0; i = 0;
int col = 0;
slot_getallattrs(slot); slot_getallattrs(slot);
if (!current_segment_init) if (!current_segment_init)
{ {
current_segment_init = true; current_segment_init = true;
Datum val; decompress_segment_update_current_segment(current_segment,
bool is_null; slot, /*slot from compressed chunk*/
/* initialize current segment */ decompressor.per_compressed_cols,
for (col = 0; col < slot->tts_tupleDescriptor->natts; col++) segmentby_column_offsets_compressed,
{ nsegmentby_cols);
val = slot_getattr(slot, AttrOffsetGetAttrNumber(col), &is_null);
if (compressed_chunk_column_is_segmentby(decompressor.per_compressed_cols[col])) skip_current_segment =
{ !fetch_matching_uncompressed_chunk_into_tuplesort(segment_tuplesortstate,
segment_info = segment_info_new(TupleDescAttr(slot->tts_tupleDescriptor, col)); nsegmentby_cols,
current_segment[i]->decompressed_chunk_offset = uncompressed_chunk_rel,
decompressor.per_compressed_cols[col].decompressed_column_offset; current_segment);
/* also need to call segment_info_update here to update the val part */
segment_info_update(segment_info, val, is_null);
current_segment[i]->segment_info = segment_info;
i++;
}
}
} }
/* we have a segment already, so compare those */ /* we have a segment already, so compare those */
changed_segment = decompress_segment_changed_group(current_segment, changed_segment = decompress_segment_changed_group(current_segment,
@ -1472,75 +1470,68 @@ tsl_recompress_chunk_segmentwise(PG_FUNCTION_ARGS)
decompressor.per_compressed_cols, decompressor.per_compressed_cols,
segmentby_column_offsets_compressed, segmentby_column_offsets_compressed,
nsegmentby_cols); nsegmentby_cols);
if (!changed_segment) if (changed_segment)
{ {
i = 0; if (!skip_current_segment)
bool should_free; {
recompress_segment(segment_tuplesortstate, uncompressed_chunk_rel, &row_compressor);
compressed_tuple = ExecFetchSlotHeapTuple(slot, false, &should_free); /* reinit tuplesort */
segment_tuplesortstate = tuplesort_begin_heap(uncompressed_rel_tupdesc,
heap_deform_tuple(compressed_tuple, n_keys,
compressed_rel_tupdesc, sort_keys,
decompressor.compressed_datums, sort_operators,
decompressor.compressed_is_nulls); sort_collations,
nulls_first,
row_decompressor_decompress_row_to_tuplesort(&decompressor, segment_tuplesortstate); maintenance_work_mem,
NULL,
simple_table_tuple_delete(compressed_chunk_rel, &(slot->tts_tid), snapshot); false);
}
if (should_free)
heap_freetuple(compressed_tuple);
}
else if (changed_segment)
{
fetch_matching_uncompressed_chunk_into_tuplesort(segment_tuplesortstate,
nsegmentby_cols,
uncompressed_chunk_rel,
current_segment);
tuplesort_performsort(segment_tuplesortstate);
row_compressor_reset(&row_compressor);
recompress_segment(segment_tuplesortstate, uncompressed_chunk_rel, &row_compressor);
/* now any pointers returned will be garbage */
tuplesort_end(segment_tuplesortstate);
decompress_segment_update_current_segment(current_segment, decompress_segment_update_current_segment(current_segment,
slot, /*slot from compressed chunk*/ slot, /*slot from compressed chunk*/
decompressor.per_compressed_cols, decompressor.per_compressed_cols,
segmentby_column_offsets_compressed, segmentby_column_offsets_compressed,
nsegmentby_cols); nsegmentby_cols);
/* reinit tuplesort and add the first tuple of the new segment to it */
segment_tuplesortstate = tuplesort_begin_heap(uncompressed_rel_tupdesc,
n_keys,
sort_keys,
sort_operators,
sort_collations,
nulls_first,
maintenance_work_mem,
NULL,
false);
bool should_free;
compressed_tuple = ExecFetchSlotHeapTuple(slot, false, &should_free);
heap_deform_tuple(compressed_tuple,
compressed_rel_tupdesc,
decompressor.compressed_datums,
decompressor.compressed_is_nulls);
row_decompressor_decompress_row_to_tuplesort(&decompressor, segment_tuplesortstate);
simple_table_tuple_delete(compressed_chunk_rel, &(slot->tts_tid), snapshot);
/* because this is the first tuple of the new segment */
changed_segment = false; changed_segment = false;
/* make changes visible */ skip_current_segment =
CommandCounterIncrement(); !fetch_matching_uncompressed_chunk_into_tuplesort(segment_tuplesortstate,
nsegmentby_cols,
if (should_free) uncompressed_chunk_rel,
heap_freetuple(compressed_tuple); current_segment);
} }
if (skip_current_segment)
{
val = slot_getattr(slot,
AttrOffsetGetAttrNumber(row_compressor.count_metadata_column_offset),
&is_null);
Assert(!is_null);
skipped_uncompressed_rows += DatumGetInt32(val);
skipped_compressed_rows++;
continue;
}
/* Didn't change group and we are not skipping the current segment
* add it to the tuplesort
*/
bool should_free;
compressed_tuple = ExecFetchSlotHeapTuple(slot, false, &should_free);
heap_deform_tuple(compressed_tuple,
compressed_rel_tupdesc,
decompressor.compressed_datums,
decompressor.compressed_is_nulls);
row_decompressor_decompress_row_to_tuplesort(&decompressor, segment_tuplesortstate);
simple_table_tuple_delete(compressed_chunk_rel, &(slot->tts_tid), snapshot);
CommandCounterIncrement();
if (should_free)
heap_freetuple(compressed_tuple);
} }
ExecClearTuple(slot); ExecClearTuple(slot);
@ -1550,18 +1541,9 @@ tsl_recompress_chunk_segmentwise(PG_FUNCTION_ARGS)
* the current segment could not be initialized in the case where two recompress operations * the current segment could not be initialized in the case where two recompress operations
* execute concurrently: one blocks on the Exclusive lock but has already read the chunk * execute concurrently: one blocks on the Exclusive lock but has already read the chunk
* status and determined that there is data in the uncompressed chunk */ * status and determined that there is data in the uncompressed chunk */
if (!changed_segment && current_segment_init) if (!changed_segment && !skip_current_segment && current_segment_init)
{ {
fetch_matching_uncompressed_chunk_into_tuplesort(segment_tuplesortstate,
nsegmentby_cols,
uncompressed_chunk_rel,
current_segment);
tuplesort_performsort(segment_tuplesortstate);
row_compressor_reset(&row_compressor);
recompress_segment(segment_tuplesortstate, uncompressed_chunk_rel, &row_compressor); recompress_segment(segment_tuplesortstate, uncompressed_chunk_rel, &row_compressor);
tuplesort_end(segment_tuplesortstate);
CommandCounterIncrement();
} }
/* done with the compressed chunk segments that had new entries in the uncompressed /* done with the compressed chunk segments that had new entries in the uncompressed
but there could be rows inserted into the uncompressed that don't already have a corresponding but there could be rows inserted into the uncompressed that don't already have a corresponding
@ -1598,6 +1580,8 @@ tsl_recompress_chunk_segmentwise(PG_FUNCTION_ARGS)
/* the compression size statistics we are able to update and accurately report are: /* the compression size statistics we are able to update and accurately report are:
* rowcount pre/post compression, * rowcount pre/post compression,
* compressed chunk sizes */ * compressed chunk sizes */
row_compressor.rowcnt_pre_compression += skipped_uncompressed_rows;
row_compressor.num_compressed_rows += skipped_compressed_rows;
compression_chunk_size_catalog_update_recompressed(uncompressed_chunk->fd.id, compression_chunk_size_catalog_update_recompressed(uncompressed_chunk->fd.id,
compressed_chunk->fd.id, compressed_chunk->fd.id,
&after_size, &after_size,

View File

@ -118,7 +118,7 @@ select * from compression_rowcnt_view where chunk_name = :'chunk_to_compress_2';
(1 row) (1 row)
insert into mytab_twoseg values ('2023-01-01 19:56:20.048355+02'::timestamptz, 2, NULL, 2); insert into mytab_twoseg values ('2023-01-01 19:56:20.048355+02'::timestamptz, 2, NULL, 2);
select * from :chunk_to_compress_2; select * from :chunk_to_compress_2 ORDER BY a, c, time DESC;
time | a | b | c time | a | b | c
-------------------------------------+---+---+--- -------------------------------------+---+---+---
Sun Jan 01 11:56:20.048355 2023 PST | 2 | | 2 Sun Jan 01 11:56:20.048355 2023 PST | 2 | | 2
@ -146,12 +146,12 @@ select _timescaledb_functions.recompress_chunk_segmentwise(:'chunk_to_compress_2
select ctid, * from :compressed_chunk_name_2; select ctid, * from :compressed_chunk_name_2;
ctid | time | a | b | c | _ts_meta_count | _ts_meta_sequence_num | _ts_meta_min_1 | _ts_meta_max_1 ctid | time | a | b | c | _ts_meta_count | _ts_meta_sequence_num | _ts_meta_min_1 | _ts_meta_max_1
-------+----------------------------------------------------------------------+---+---+---+----------------+-----------------------+-------------------------------------+------------------------------------- -------+----------------------------------------------------------------------+---+---+---+----------------+-----------------------+-------------------------------------+-------------------------------------
(0,2) | BAAAApQ3/0H94//////8bHkAAAAAAgAAAAIAAAAAAAAA7gAFKHAFqwnGAAUocAzSF8U= | 3 | | 3 | 2 | 10 | Sun Jan 01 11:56:20.048355 2023 PST | Sun Jan 01 11:57:20.048355 2023 PST
(0,3) | BAAAApQ2Uhq14/////5S2LgAAAAAAgAAAAIAAAAAAAAA7gAFKG/+g/vGAAUoc1jSi8U= | 2 | | 2 | 2 | 10 | Sun Jan 01 09:56:20.048355 2023 PST | Sun Jan 01 11:56:20.048355 2023 PST (0,3) | BAAAApQ2Uhq14/////5S2LgAAAAAAgAAAAIAAAAAAAAA7gAFKG/+g/vGAAUoc1jSi8U= | 2 | | 2 | 2 | 10 | Sun Jan 01 09:56:20.048355 2023 PST | Sun Jan 01 11:56:20.048355 2023 PST
(0,4) | BAAAApQ3/0H94//////8bHkAAAAAAgAAAAIAAAAAAAAA7gAFKHAFqwnGAAUocAzSF8U= | 3 | | 3 | 2 | 10 | Sun Jan 01 11:56:20.048355 2023 PST | Sun Jan 01 11:57:20.048355 2023 PST
(2 rows) (2 rows)
-- verify that initial data is returned as expected -- verify that initial data is returned as expected
select * from :chunk_to_compress_2; select * from :chunk_to_compress_2 ORDER BY a, c, time DESC;
time | a | b | c time | a | b | c
-------------------------------------+---+---+--- -------------------------------------+---+---+---
Sun Jan 01 11:56:20.048355 2023 PST | 2 | | 2 Sun Jan 01 11:56:20.048355 2023 PST | 2 | | 2
@ -220,15 +220,15 @@ select _timescaledb_functions.recompress_chunk_segmentwise(:'chunk_to_compress_2
select ctid, * from :compressed_chunk_name_2; select ctid, * from :compressed_chunk_name_2;
ctid | time | a | b | c | _ts_meta_count | _ts_meta_sequence_num | _ts_meta_min_1 | _ts_meta_max_1 ctid | time | a | b | c | _ts_meta_count | _ts_meta_sequence_num | _ts_meta_min_1 | _ts_meta_max_1
--------+------------------------------------------------------------------------------------------+---+---+---+----------------+-----------------------+------------------------------+------------------------------ --------+------------------------------------------------------------------------------------------+---+---+---+----------------+-----------------------+------------------------------+------------------------------
(0,4) | BAAAApQ0bFLXgP/////+NjyAAAAD6AAAAAMAAAAAAAAP7gAFKHbNWYAAAAUodtDtBv8AAD5gAAAAAA== | 1 | | 1 | 1000 | 10 | Sun Jan 01 07:40:30 2023 PST | Sun Jan 01 16:00:00 2023 PST
(0,5) | BAAAApQtcC8rgP/////+NjyAAAAD6AAAAAMAAAAAAAAP7gAFKGjVEigAAAUoaNilrv8AAD5gAAAAAA== | 1 | | 1 | 1000 | 20 | Sat Dec 31 23:20:30 2022 PST | Sun Jan 01 07:40:00 2023 PST
(0,6) | BAAAApQnSNVgAP/////+NjyAAAADcQAAAAMAAAAAAAAP7gAFKFrcytAAAAUoWuBeVv8AADbwAAAAAA== | 1 | | 1 | 881 | 30 | Sat Dec 31 16:00:00 2022 PST | Sat Dec 31 23:20:00 2022 PST
(0,7) | BAAAApQ0bFLXgP/////+NjyAAAAD6AAAAAMAAAAAAAAP7gAFKHbNWYAAAAUodtDtBv8AAD5gAAAAAA== | 2 | | 2 | 1000 | 10 | Sun Jan 01 07:40:30 2023 PST | Sun Jan 01 16:00:00 2023 PST
(0,8) | BAAAApQtcC8rgP/////+NjyAAAAD6AAAAAMAAAAAAAAP7gAFKGjVEigAAAUoaNilrv8AAD5gAAAAAA== | 2 | | 2 | 1000 | 20 | Sat Dec 31 23:20:30 2022 PST | Sun Jan 01 07:40:00 2023 PST
(0,9) | BAAAApQnSNVgAP/////+NjyAAAADcQAAAAMAAAAAAAAP7gAFKFrcytAAAAUoWuBeVv8AADbwAAAAAA== | 2 | | 2 | 881 | 30 | Sat Dec 31 16:00:00 2022 PST | Sat Dec 31 23:20:00 2022 PST
(0,10) | BAAAApQ0bFLXgP/////+NjyAAAAD6AAAAAMAAAAAAAAP7gAFKHbNWYAAAAUodtDtBv8AAD5gAAAAAA== | 0 | | 0 | 1000 | 10 | Sun Jan 01 07:40:30 2023 PST | Sun Jan 01 16:00:00 2023 PST (0,10) | BAAAApQ0bFLXgP/////+NjyAAAAD6AAAAAMAAAAAAAAP7gAFKHbNWYAAAAUodtDtBv8AAD5gAAAAAA== | 0 | | 0 | 1000 | 10 | Sun Jan 01 07:40:30 2023 PST | Sun Jan 01 16:00:00 2023 PST
(0,11) | BAAAApQtcC8rgP/////+NjyAAAAD6AAAAAMAAAAAAAAP7gAFKGjVEigAAAUoaNilrv8AAD5gAAAAAA== | 0 | | 0 | 1000 | 20 | Sat Dec 31 23:20:30 2022 PST | Sun Jan 01 07:40:00 2023 PST (0,11) | BAAAApQtcC8rgP/////+NjyAAAAD6AAAAAMAAAAAAAAP7gAFKGjVEigAAAUoaNilrv8AAD5gAAAAAA== | 0 | | 0 | 1000 | 20 | Sat Dec 31 23:20:30 2022 PST | Sun Jan 01 07:40:00 2023 PST
(0,12) | BAAAApQnSNVgAP//////4XuAAAADcgAAAAQAAAAAAADf7gAFKFrcytAAAAUoWuBeVv8AADbgAAAAAAMZdQAAPQkA | 0 | | 0 | 882 | 30 | Sat Dec 31 16:00:00 2022 PST | Sat Dec 31 23:20:00 2022 PST (0,12) | BAAAApQnSNVgAP//////4XuAAAADcgAAAAQAAAAAAADf7gAFKFrcytAAAAUoWuBeVv8AADbgAAAAAAMZdQAAPQkA | 0 | | 0 | 882 | 30 | Sat Dec 31 16:00:00 2022 PST | Sat Dec 31 23:20:00 2022 PST
(0,13) | BAAAApQ0bFLXgP/////+NjyAAAAD6AAAAAMAAAAAAAAP7gAFKHbNWYAAAAUodtDtBv8AAD5gAAAAAA== | 1 | | 1 | 1000 | 10 | Sun Jan 01 07:40:30 2023 PST | Sun Jan 01 16:00:00 2023 PST
(0,14) | BAAAApQtcC8rgP/////+NjyAAAAD6AAAAAMAAAAAAAAP7gAFKGjVEigAAAUoaNilrv8AAD5gAAAAAA== | 1 | | 1 | 1000 | 20 | Sat Dec 31 23:20:30 2022 PST | Sun Jan 01 07:40:00 2023 PST
(0,15) | BAAAApQnSNVgAP/////+NjyAAAADcQAAAAMAAAAAAAAP7gAFKFrcytAAAAUoWuBeVv8AADbwAAAAAA== | 1 | | 1 | 881 | 30 | Sat Dec 31 16:00:00 2022 PST | Sat Dec 31 23:20:00 2022 PST
(0,16) | BAAAApQ0bFLXgP/////+NjyAAAAD6AAAAAMAAAAAAAAP7gAFKHbNWYAAAAUodtDtBv8AAD5gAAAAAA== | 2 | | 2 | 1000 | 10 | Sun Jan 01 07:40:30 2023 PST | Sun Jan 01 16:00:00 2023 PST
(0,17) | BAAAApQtcC8rgP/////+NjyAAAAD6AAAAAMAAAAAAAAP7gAFKGjVEigAAAUoaNilrv8AAD5gAAAAAA== | 2 | | 2 | 1000 | 20 | Sat Dec 31 23:20:30 2022 PST | Sun Jan 01 07:40:00 2023 PST
(0,18) | BAAAApQnSNVgAP/////+NjyAAAADcQAAAAMAAAAAAAAP7gAFKFrcytAAAAUoWuBeVv8AADbwAAAAAA== | 2 | | 2 | 881 | 30 | Sat Dec 31 16:00:00 2022 PST | Sat Dec 31 23:20:00 2022 PST
(9 rows) (9 rows)
-- after recompression -- after recompression
@ -313,7 +313,7 @@ INSERT INTO mytab_prep VALUES ('2023-01-01'::timestamptz, 2, NULL, 2),
('2023-01-01'::timestamptz, 2, NULL, 2); ('2023-01-01'::timestamptz, 2, NULL, 2);
alter table mytab_prep set (timescaledb.compress, timescaledb.compress_segmentby = 'a, c'); alter table mytab_prep set (timescaledb.compress, timescaledb.compress_segmentby = 'a, c');
PREPARE p1 AS PREPARE p1 AS
SELECT * FROM mytab_prep; SELECT * FROM mytab_prep ORDER BY a, c, time DESC;
select show_chunks as chunk_to_compress_prep from show_chunks('mytab_prep') limit 1 \gset select show_chunks as chunk_to_compress_prep from show_chunks('mytab_prep') limit 1 \gset
SELECT compress_chunk(:'chunk_to_compress_prep'); -- the output of the prepared plan would change before and after compress SELECT compress_chunk(:'chunk_to_compress_prep'); -- the output of the prepared plan would change before and after compress
compress_chunk compress_chunk
@ -324,13 +324,16 @@ SELECT compress_chunk(:'chunk_to_compress_prep'); -- the output of the prepared
INSERT INTO mytab_prep VALUES ('2023-01-01'::timestamptz, 2, 3, 2); INSERT INTO mytab_prep VALUES ('2023-01-01'::timestamptz, 2, 3, 2);
-- plan should be invalidated to return results from the uncompressed chunk also -- plan should be invalidated to return results from the uncompressed chunk also
EXPLAIN (COSTS OFF) EXECUTE p1; EXPLAIN (COSTS OFF) EXECUTE p1;
QUERY PLAN QUERY PLAN
---------------------------------------------------------- ----------------------------------------------------------------------------------------------------------------------------
Append Merge Append
Sort Key: _hyper_9_10_chunk.a, _hyper_9_10_chunk.c, _hyper_9_10_chunk."time" DESC
-> Custom Scan (DecompressChunk) on _hyper_9_10_chunk -> Custom Scan (DecompressChunk) on _hyper_9_10_chunk
-> Seq Scan on compress_hyper_10_11_chunk -> Index Scan using compress_hyper_10_11_chunk__compressed_hypertable_10_a_c__ts_me on compress_hyper_10_11_chunk
-> Seq Scan on _hyper_9_10_chunk -> Sort
(4 rows) Sort Key: _hyper_9_10_chunk.a, _hyper_9_10_chunk.c, _hyper_9_10_chunk."time" DESC
-> Seq Scan on _hyper_9_10_chunk
(7 rows)
EXECUTE p1; EXECUTE p1;
time | a | b | c time | a | b | c
@ -343,18 +346,18 @@ EXECUTE p1;
-- check plan again after recompression -- check plan again after recompression
CALL recompress_chunk(:'chunk_to_compress_prep'); CALL recompress_chunk(:'chunk_to_compress_prep');
EXPLAIN (COSTS OFF) EXECUTE p1; EXPLAIN (COSTS OFF) EXECUTE p1;
QUERY PLAN QUERY PLAN
---------------------------------------------------- ----------------------------------------------------------------------------------------------------------------------
Custom Scan (DecompressChunk) on _hyper_9_10_chunk Custom Scan (DecompressChunk) on _hyper_9_10_chunk
-> Seq Scan on compress_hyper_10_11_chunk -> Index Scan using compress_hyper_10_11_chunk__compressed_hypertable_10_a_c__ts_me on compress_hyper_10_11_chunk
(2 rows) (2 rows)
EXECUTE p1; EXECUTE p1;
time | a | b | c time | a | b | c
------------------------------+---+---+--- ------------------------------+---+---+---
Sun Jan 01 00:00:00 2023 PST | 2 | | 2
Sun Jan 01 00:00:00 2023 PST | 2 | | 2
Sun Jan 01 00:00:00 2023 PST | 2 | 3 | 2 Sun Jan 01 00:00:00 2023 PST | 2 | 3 | 2
Sun Jan 01 00:00:00 2023 PST | 2 | | 2
Sun Jan 01 00:00:00 2023 PST | 2 | | 2
(3 rows) (3 rows)
-- verify segmentwise recompression when index exists, decompress + compress otherwise -- verify segmentwise recompression when index exists, decompress + compress otherwise
@ -498,8 +501,8 @@ select * from :compressed_chunk_name;
----------------------------------------------------------------------+---+------------------------------------------------------------------------------------------+---+----------------+-----------------------+------------------------------+------------------------------ ----------------------------------------------------------------------+---+------------------------------------------------------------------------------------------+---+----------------+-----------------------+------------------------------+------------------------------
BAAAAneAR/JEAAACd4BH8kQAAAAAAQAAAAEAAAAAAAAADgAE7wCP5IgA | 1 | BAAAAAAAAAAAAQAAAAAAAAABAAAAAQAAAAEAAAAAAAAAAgAAAAAAAAAC | 1 | 1 | 10 | Sat Jan 01 01:00:00 2022 PST | Sat Jan 01 01:00:00 2022 PST BAAAAneAR/JEAAACd4BH8kQAAAAAAQAAAAEAAAAAAAAADgAE7wCP5IgA | 1 | BAAAAAAAAAAAAQAAAAAAAAABAAAAAQAAAAEAAAAAAAAAAgAAAAAAAAAC | 1 | 1 | 10 | Sat Jan 01 01:00:00 2022 PST | Sat Jan 01 01:00:00 2022 PST
BAAAAneAR/JEAAACd4BH8kQAAAAAAQAAAAEAAAAAAAAADgAE7wCP5IgA | 1 | BAAAAAAAAAAAAgAAAAAAAAACAAAAAQAAAAEAAAAAAAAAAwAAAAAAAAAE | 2 | 1 | 10 | Sat Jan 01 01:00:00 2022 PST | Sat Jan 01 01:00:00 2022 PST BAAAAneAR/JEAAACd4BH8kQAAAAAAQAAAAEAAAAAAAAADgAE7wCP5IgA | 1 | BAAAAAAAAAAAAgAAAAAAAAACAAAAAQAAAAEAAAAAAAAAAwAAAAAAAAAE | 2 | 1 | 10 | Sat Jan 01 01:00:00 2022 PST | Sat Jan 01 01:00:00 2022 PST
BAAAAneAR/JEAAAAAAAAAAAAAAAAAgAAAAIAAAAAAAAA7gAE7wCP5IgAAATvAI/kh/8= | 1 | BAEAAAAAAAAABAAAAAAAAAAEAAAAAQAAAAEAAAAAAAAABAAAAAAAAAAIAAAAAgAAAAEAAAAAAAAAAQAAAAAAAAAC | | 2 | 10 | Sat Jan 01 01:00:00 2022 PST | Sat Jan 01 01:00:00 2022 PST
BAAAAneAR/JEAAACd4BH8kQAAAAAAQAAAAEAAAAAAAAADgAE7wCP5IgA | 2 | BAAAAAAAAAAAAgAAAAAAAAACAAAAAQAAAAEAAAAAAAAAAwAAAAAAAAAE | 2 | 1 | 10 | Sat Jan 01 01:00:00 2022 PST | Sat Jan 01 01:00:00 2022 PST BAAAAneAR/JEAAACd4BH8kQAAAAAAQAAAAEAAAAAAAAADgAE7wCP5IgA | 2 | BAAAAAAAAAAAAgAAAAAAAAACAAAAAQAAAAEAAAAAAAAAAwAAAAAAAAAE | 2 | 1 | 10 | Sat Jan 01 01:00:00 2022 PST | Sat Jan 01 01:00:00 2022 PST
BAAAAneAR/JEAAAAAAAAAAAAAAAAAgAAAAIAAAAAAAAA7gAE7wCP5IgAAATvAI/kh/8= | 2 | BAEAAAAAAAAAAwAAAAAAAAADAAAAAQAAAAEAAAAAAAAAAwAAAAAAAAAGAAAAAgAAAAEAAAAAAAAAAQAAAAAAAAAC | 3 | 2 | 10 | Sat Jan 01 01:00:00 2022 PST | Sat Jan 01 01:00:00 2022 PST BAAAAneAR/JEAAAAAAAAAAAAAAAAAgAAAAIAAAAAAAAA7gAE7wCP5IgAAATvAI/kh/8= | 2 | BAEAAAAAAAAAAwAAAAAAAAADAAAAAQAAAAEAAAAAAAAAAwAAAAAAAAAGAAAAAgAAAAEAAAAAAAAAAQAAAAAAAAAC | 3 | 2 | 10 | Sat Jan 01 01:00:00 2022 PST | Sat Jan 01 01:00:00 2022 PST
BAAAAneAR/JEAAAAAAAAAAAAAAAAAgAAAAIAAAAAAAAA7gAE7wCP5IgAAATvAI/kh/8= | 1 | BAEAAAAAAAAABAAAAAAAAAAEAAAAAQAAAAEAAAAAAAAABAAAAAAAAAAIAAAAAgAAAAEAAAAAAAAAAQAAAAAAAAAB | | 2 | 10 | Sat Jan 01 01:00:00 2022 PST | Sat Jan 01 01:00:00 2022 PST
(5 rows) (5 rows)

View File

@ -544,8 +544,8 @@ step SA: SELECT * FROM ts_device_table;
time|device|location|value time|device|location|value
----+------+--------+----- ----+------+--------+-----
0| 1| 100| 20 0| 1| 100| 20
1| 1| 100| 20
1| 1| 100| 100 1| 1| 100| 100
1| 1| 100| 20
2| 1| 100| 20 2| 1| 100| 20
3| 1| 100| 20 3| 1| 100| 20
4| 1| 100| 20 4| 1| 100| 20
@ -665,8 +665,8 @@ step SA: SELECT * FROM ts_device_table;
time|device|location|value time|device|location|value
----+------+--------+----- ----+------+--------+-----
0| 1| 100| 20 0| 1| 100| 20
1| 1| 100| 20
1| 1| 100| 100 1| 1| 100| 100
1| 1| 100| 20
2| 1| 100| 20 2| 1| 100| 20
3| 1| 100| 20 3| 1| 100| 20
4| 1| 100| 20 4| 1| 100| 20

View File

@ -88,7 +88,7 @@ select * from compression_rowcnt_view where chunk_name = :'chunk_to_compress_2';
insert into mytab_twoseg values ('2023-01-01 19:56:20.048355+02'::timestamptz, 2, NULL, 2); insert into mytab_twoseg values ('2023-01-01 19:56:20.048355+02'::timestamptz, 2, NULL, 2);
select * from :chunk_to_compress_2; select * from :chunk_to_compress_2 ORDER BY a, c, time DESC;
SELECT compressed_chunk_schema || '.' || compressed_chunk_name as compressed_chunk_name_2 SELECT compressed_chunk_schema || '.' || compressed_chunk_name as compressed_chunk_name_2
from compressed_chunk_info_view where hypertable_name = 'mytab_twoseg' \gset from compressed_chunk_info_view where hypertable_name = 'mytab_twoseg' \gset
@ -101,7 +101,7 @@ select _timescaledb_functions.recompress_chunk_segmentwise(:'chunk_to_compress_2
select ctid, * from :compressed_chunk_name_2; select ctid, * from :compressed_chunk_name_2;
-- verify that initial data is returned as expected -- verify that initial data is returned as expected
select * from :chunk_to_compress_2; select * from :chunk_to_compress_2 ORDER BY a, c, time DESC;
-- should still have 2 compressed rows -- should still have 2 compressed rows
select * from compression_rowcnt_view where chunk_name = :'chunk_to_compress_2'; select * from compression_rowcnt_view where chunk_name = :'chunk_to_compress_2';
@ -176,7 +176,7 @@ INSERT INTO mytab_prep VALUES ('2023-01-01'::timestamptz, 2, NULL, 2),
alter table mytab_prep set (timescaledb.compress, timescaledb.compress_segmentby = 'a, c'); alter table mytab_prep set (timescaledb.compress, timescaledb.compress_segmentby = 'a, c');
PREPARE p1 AS PREPARE p1 AS
SELECT * FROM mytab_prep; SELECT * FROM mytab_prep ORDER BY a, c, time DESC;
select show_chunks as chunk_to_compress_prep from show_chunks('mytab_prep') limit 1 \gset select show_chunks as chunk_to_compress_prep from show_chunks('mytab_prep') limit 1 \gset
SELECT compress_chunk(:'chunk_to_compress_prep'); -- the output of the prepared plan would change before and after compress SELECT compress_chunk(:'chunk_to_compress_prep'); -- the output of the prepared plan would change before and after compress