mirror of
https://github.com/timescale/timescaledb.git
synced 2025-05-15 18:13:18 +08:00
Reduce decompression during UPDATE/DELETE
When updating or deleting tuples from a compressed chunk, we first need to decompress the matching tuples then proceed with the operation. This optimization reduces the amount of data decompressed by using compressed metadata to decompress only the affected segments.
This commit is contained in:
parent
3bf58dac02
commit
910663d0be
@ -92,10 +92,10 @@ static void row_compressor_append_row(RowCompressor *row_compressor, TupleTableS
|
|||||||
static void row_compressor_flush(RowCompressor *row_compressor, CommandId mycid,
|
static void row_compressor_flush(RowCompressor *row_compressor, CommandId mycid,
|
||||||
bool changed_groups);
|
bool changed_groups);
|
||||||
|
|
||||||
static int create_segment_metadata_scankey(RowDecompressor *decompressor,
|
static int create_segment_filter_scankey(RowDecompressor *decompressor,
|
||||||
char *segment_meta_col_name, AttrNumber in_attno,
|
char *segment_filter_col_name, StrategyNumber strategy,
|
||||||
StrategyNumber strategy, ScanKeyData *scankeys,
|
ScanKeyData *scankeys, int num_scankeys,
|
||||||
int num_scankeys, Datum value);
|
Bitmapset **null_columns, Datum value, bool isnull);
|
||||||
static void run_analyze_on_chunk(Oid chunk_relid);
|
static void run_analyze_on_chunk(Oid chunk_relid);
|
||||||
|
|
||||||
/********************
|
/********************
|
||||||
@ -1819,7 +1819,6 @@ build_scankeys(int32 hypertable_id, RowDecompressor decompressor, Bitmapset *key
|
|||||||
{
|
{
|
||||||
AttrNumber attno = i + FirstLowInvalidHeapAttributeNumber;
|
AttrNumber attno = i + FirstLowInvalidHeapAttributeNumber;
|
||||||
char *attname = get_attname(decompressor.out_rel->rd_id, attno, false);
|
char *attname = get_attname(decompressor.out_rel->rd_id, attno, false);
|
||||||
AttrNumber cmp_attno = get_attnum(decompressor.in_rel->rd_id, attname);
|
|
||||||
FormData_hypertable_compression *fd =
|
FormData_hypertable_compression *fd =
|
||||||
ts_hypertable_compression_get_by_pkey(hypertable_id, attname);
|
ts_hypertable_compression_get_by_pkey(hypertable_id, attname);
|
||||||
|
|
||||||
@ -1844,44 +1843,14 @@ build_scankeys(int32 hypertable_id, RowDecompressor decompressor, Bitmapset *key
|
|||||||
{
|
{
|
||||||
bool isnull;
|
bool isnull;
|
||||||
Datum value = slot_getattr(slot, attno, &isnull);
|
Datum value = slot_getattr(slot, attno, &isnull);
|
||||||
Oid atttypid = decompressor.out_desc->attrs[attno - 1].atttypid;
|
key_index = create_segment_filter_scankey(&decompressor,
|
||||||
|
attname,
|
||||||
TypeCacheEntry *tce = lookup_type_cache(atttypid, TYPECACHE_EQ_OPR_FINFO);
|
BTEqualStrategyNumber,
|
||||||
|
scankeys,
|
||||||
/* Segmentby column type should match in compressed and uncompressed chunk */
|
key_index,
|
||||||
Assert(decompressor.out_desc->attrs[AttrNumberGetAttrOffset(attno)].atttypid ==
|
null_columns,
|
||||||
decompressor.in_desc->attrs[AttrNumberGetAttrOffset(cmp_attno)].atttypid);
|
value,
|
||||||
|
isnull);
|
||||||
if (!OidIsValid(tce->eq_opr_finfo.fn_oid))
|
|
||||||
elog(ERROR, "no equality function for type \"%s\"", format_type_be(atttypid));
|
|
||||||
|
|
||||||
/*
|
|
||||||
* In PG versions <= 14 NULL values are always considered distinct
|
|
||||||
* from other NULL values and therefore NULLABLE multi-columnn
|
|
||||||
* unique constraints might expose unexpected behaviour in the
|
|
||||||
* presence of NULL values.
|
|
||||||
* Since SK_SEARCHNULL is not supported by heap scans we cannot
|
|
||||||
* build a ScanKey for NOT NULL and instead have to do those
|
|
||||||
* checks manually.
|
|
||||||
*/
|
|
||||||
if (isnull)
|
|
||||||
{
|
|
||||||
*null_columns = bms_add_member(*null_columns, cmp_attno);
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
ScanKeyEntryInitialize(&scankeys[key_index],
|
|
||||||
0, /* flags */
|
|
||||||
cmp_attno,
|
|
||||||
BTEqualStrategyNumber,
|
|
||||||
InvalidOid, /* No strategy subtype. */
|
|
||||||
decompressor.out_desc
|
|
||||||
->attrs[AttrNumberGetAttrOffset(attno)]
|
|
||||||
.attcollation,
|
|
||||||
tce->eq_opr_finfo.fn_oid,
|
|
||||||
value);
|
|
||||||
key_index++;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
if (COMPRESSIONCOL_IS_ORDER_BY(fd))
|
if (COMPRESSIONCOL_IS_ORDER_BY(fd))
|
||||||
{
|
{
|
||||||
@ -1894,20 +1863,22 @@ build_scankeys(int32 hypertable_id, RowDecompressor decompressor, Bitmapset *key
|
|||||||
if (isnull)
|
if (isnull)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
key_index = create_segment_metadata_scankey(&decompressor,
|
key_index = create_segment_filter_scankey(&decompressor,
|
||||||
compression_column_segment_min_name(fd),
|
compression_column_segment_min_name(fd),
|
||||||
attno,
|
BTLessEqualStrategyNumber,
|
||||||
BTLessEqualStrategyNumber,
|
scankeys,
|
||||||
scankeys,
|
key_index,
|
||||||
key_index,
|
null_columns,
|
||||||
value);
|
value,
|
||||||
key_index = create_segment_metadata_scankey(&decompressor,
|
false); /* is_null_check */
|
||||||
compression_column_segment_max_name(fd),
|
key_index = create_segment_filter_scankey(&decompressor,
|
||||||
attno,
|
compression_column_segment_max_name(fd),
|
||||||
BTGreaterEqualStrategyNumber,
|
BTGreaterEqualStrategyNumber,
|
||||||
scankeys,
|
scankeys,
|
||||||
key_index,
|
key_index,
|
||||||
value);
|
null_columns,
|
||||||
|
value,
|
||||||
|
false); /* is_null_check */
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -1917,26 +1888,33 @@ build_scankeys(int32 hypertable_id, RowDecompressor decompressor, Bitmapset *key
|
|||||||
}
|
}
|
||||||
|
|
||||||
static int
|
static int
|
||||||
create_segment_metadata_scankey(RowDecompressor *decompressor, char *segment_meta_col_name,
|
create_segment_filter_scankey(RowDecompressor *decompressor, char *segment_filter_col_name,
|
||||||
AttrNumber in_attno, StrategyNumber strategy, ScanKeyData *scankeys,
|
StrategyNumber strategy, ScanKeyData *scankeys, int num_scankeys,
|
||||||
int num_scankeys, Datum value)
|
Bitmapset **null_columns, Datum value, bool is_null_check)
|
||||||
{
|
{
|
||||||
AttrNumber segment_meta_attr_number =
|
AttrNumber cmp_attno = get_attnum(decompressor->in_rel->rd_id, segment_filter_col_name);
|
||||||
get_attnum(decompressor->in_rel->rd_id, segment_meta_col_name);
|
Assert(cmp_attno != InvalidAttrNumber);
|
||||||
Assert(segment_meta_attr_number != InvalidAttrNumber);
|
|
||||||
|
|
||||||
/* This should never happen but if it does happen, we can't generate a scan key for
|
/* This should never happen but if it does happen, we can't generate a scan key for
|
||||||
* the orderby column so just skip it */
|
* the filter column so just skip it */
|
||||||
if (segment_meta_attr_number == InvalidAttrNumber)
|
if (cmp_attno == InvalidAttrNumber)
|
||||||
return num_scankeys;
|
return num_scankeys;
|
||||||
|
|
||||||
Oid atttypid = decompressor->out_desc->attrs[AttrNumberGetAttrOffset(in_attno)].atttypid;
|
/*
|
||||||
|
* In PG versions <= 14 NULL values are always considered distinct
|
||||||
|
* from other NULL values and therefore NULLABLE multi-columnn
|
||||||
|
* unique constraints might expose unexpected behaviour in the
|
||||||
|
* presence of NULL values.
|
||||||
|
* Since SK_SEARCHNULL is not supported by heap scans we cannot
|
||||||
|
* build a ScanKey for NOT NULL and instead have to do those
|
||||||
|
* checks manually.
|
||||||
|
*/
|
||||||
|
if (is_null_check)
|
||||||
|
{
|
||||||
|
*null_columns = bms_add_member(*null_columns, cmp_attno);
|
||||||
|
return num_scankeys;
|
||||||
|
}
|
||||||
|
|
||||||
/* Orderby column type should match in compressed metadata columns and uncompressed
|
Oid atttypid = decompressor->in_desc->attrs[AttrNumberGetAttrOffset(cmp_attno)].atttypid;
|
||||||
* chunk attribute */
|
|
||||||
Assert(
|
|
||||||
atttypid ==
|
|
||||||
decompressor->in_desc->attrs[AttrNumberGetAttrOffset(segment_meta_attr_number)].atttypid);
|
|
||||||
|
|
||||||
TypeCacheEntry *tce = lookup_type_cache(atttypid, TYPECACHE_BTREE_OPFAMILY);
|
TypeCacheEntry *tce = lookup_type_cache(atttypid, TYPECACHE_BTREE_OPFAMILY);
|
||||||
if (!OidIsValid(tce->btree_opf))
|
if (!OidIsValid(tce->btree_opf))
|
||||||
@ -1956,10 +1934,10 @@ create_segment_metadata_scankey(RowDecompressor *decompressor, char *segment_met
|
|||||||
|
|
||||||
ScanKeyEntryInitialize(&scankeys[num_scankeys++],
|
ScanKeyEntryInitialize(&scankeys[num_scankeys++],
|
||||||
0, /* flags */
|
0, /* flags */
|
||||||
segment_meta_attr_number,
|
cmp_attno,
|
||||||
strategy,
|
strategy,
|
||||||
InvalidOid, /* No strategy subtype. */
|
InvalidOid, /* No strategy subtype. */
|
||||||
decompressor->out_desc->attrs[AttrNumberGetAttrOffset(in_attno)]
|
decompressor->in_desc->attrs[AttrNumberGetAttrOffset(cmp_attno)]
|
||||||
.attcollation,
|
.attcollation,
|
||||||
opr,
|
opr,
|
||||||
value);
|
value);
|
||||||
@ -2062,27 +2040,21 @@ decompress_batches_for_insert(ChunkInsertState *cis, Chunk *chunk, TupleTableSlo
|
|||||||
}
|
}
|
||||||
|
|
||||||
#if PG14_GE
|
#if PG14_GE
|
||||||
/*
|
static SegmentFilter *
|
||||||
* Helper method which returns true if given column_name
|
add_filter_column_strategy(char *column_name, StrategyNumber strategy, Const *value,
|
||||||
* is configured as SEGMENT BY column in a compressed hypertable
|
bool is_null_check)
|
||||||
*/
|
|
||||||
static bool
|
|
||||||
is_segmentby_col(List *ht_compression_info, char *column_name)
|
|
||||||
{
|
{
|
||||||
ListCell *lc;
|
SegmentFilter *segment_filter = palloc0(sizeof(*segment_filter));
|
||||||
foreach (lc, ht_compression_info)
|
|
||||||
{
|
|
||||||
FormData_hypertable_compression *fd = lfirst(lc);
|
|
||||||
if (namestrcmp(&fd->attname, column_name) == 0)
|
|
||||||
{
|
|
||||||
if (fd->segmentby_column_index > 0)
|
|
||||||
return true;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
*segment_filter = (SegmentFilter){
|
||||||
|
.strategy = strategy,
|
||||||
|
.value = value,
|
||||||
|
.is_null_check = is_null_check,
|
||||||
|
};
|
||||||
|
namestrcpy(&segment_filter->column_name, column_name);
|
||||||
|
|
||||||
|
return segment_filter;
|
||||||
|
}
|
||||||
/*
|
/*
|
||||||
* This method will evaluate the predicates, extract
|
* This method will evaluate the predicates, extract
|
||||||
* left and right operands, check if one of the operands is
|
* left and right operands, check if one of the operands is
|
||||||
@ -2093,10 +2065,8 @@ is_segmentby_col(List *ht_compression_info, char *column_name)
|
|||||||
* be used to build scan keys later.
|
* be used to build scan keys later.
|
||||||
*/
|
*/
|
||||||
static void
|
static void
|
||||||
fill_predicate_context(Chunk *ch, List *predicates, List **segmentby_columns,
|
fill_predicate_context(Chunk *ch, List *predicates, List **filters, List **is_null)
|
||||||
List **segmentby_columns_value, List **is_null_check, List **is_null)
|
|
||||||
{
|
{
|
||||||
List *ht_compression_info = ts_hypertable_compression_get(ch->fd.hypertable_id);
|
|
||||||
ListCell *lc;
|
ListCell *lc;
|
||||||
foreach (lc, predicates)
|
foreach (lc, predicates)
|
||||||
{
|
{
|
||||||
@ -2136,18 +2106,75 @@ fill_predicate_context(Chunk *ch, List *predicates, List **segmentby_columns,
|
|||||||
continue;
|
continue;
|
||||||
|
|
||||||
column_name = get_attname(ch->table_id, var->varattno, false);
|
column_name = get_attname(ch->table_id, var->varattno, false);
|
||||||
if (is_segmentby_col(ht_compression_info, column_name))
|
FormData_hypertable_compression *fd =
|
||||||
|
ts_hypertable_compression_get_by_pkey(ch->fd.hypertable_id, column_name);
|
||||||
|
TypeCacheEntry *tce = lookup_type_cache(var->vartype, TYPECACHE_BTREE_OPFAMILY);
|
||||||
|
int op_strategy = get_op_opfamily_strategy(opexpr->opno, tce->btree_opf);
|
||||||
|
if (COMPRESSIONCOL_IS_SEGMENT_BY(fd))
|
||||||
{
|
{
|
||||||
TypeCacheEntry *tce = lookup_type_cache(var->vartype, TYPECACHE_BTREE_OPFAMILY);
|
switch (op_strategy)
|
||||||
int op_strategy = get_op_opfamily_strategy(opexpr->opno, tce->btree_opf);
|
|
||||||
if (op_strategy == BTEqualStrategyNumber)
|
|
||||||
{
|
{
|
||||||
/* save segment by column name and its corresponding value specified in
|
case BTEqualStrategyNumber:
|
||||||
* WHERE */
|
case BTLessStrategyNumber:
|
||||||
*segmentby_columns = lappend(*segmentby_columns, column_name);
|
case BTLessEqualStrategyNumber:
|
||||||
*segmentby_columns_value = lappend(*segmentby_columns_value, arg_value);
|
case BTGreaterStrategyNumber:
|
||||||
/* this constraint is not IS [NOT] NULL, so mark is_null_check as 0 */
|
case BTGreaterEqualStrategyNumber:
|
||||||
*is_null_check = lappend_int(*is_null_check, 0);
|
{
|
||||||
|
/* save segment by column name and its corresponding value specified in
|
||||||
|
* WHERE */
|
||||||
|
*filters =
|
||||||
|
lappend(*filters,
|
||||||
|
add_filter_column_strategy(column_name,
|
||||||
|
op_strategy,
|
||||||
|
arg_value,
|
||||||
|
false)); /* is_null_check */
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
else if (COMPRESSIONCOL_IS_ORDER_BY(fd))
|
||||||
|
{
|
||||||
|
switch (op_strategy)
|
||||||
|
{
|
||||||
|
case BTEqualStrategyNumber:
|
||||||
|
{
|
||||||
|
/* orderby col = value implies min <= value and max >= value */
|
||||||
|
*filters = lappend(
|
||||||
|
*filters,
|
||||||
|
add_filter_column_strategy(compression_column_segment_min_name(fd),
|
||||||
|
BTLessEqualStrategyNumber,
|
||||||
|
arg_value,
|
||||||
|
false)); /* is_null_check */
|
||||||
|
*filters = lappend(
|
||||||
|
*filters,
|
||||||
|
add_filter_column_strategy(compression_column_segment_max_name(fd),
|
||||||
|
BTGreaterEqualStrategyNumber,
|
||||||
|
arg_value,
|
||||||
|
false)); /* is_null_check */
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
case BTLessStrategyNumber:
|
||||||
|
case BTLessEqualStrategyNumber:
|
||||||
|
{
|
||||||
|
/* orderby col <[=] value implies min <[=] value */
|
||||||
|
*filters = lappend(
|
||||||
|
*filters,
|
||||||
|
add_filter_column_strategy(compression_column_segment_min_name(fd),
|
||||||
|
op_strategy,
|
||||||
|
arg_value,
|
||||||
|
false)); /* is_null_check */
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
case BTGreaterStrategyNumber:
|
||||||
|
case BTGreaterEqualStrategyNumber:
|
||||||
|
{
|
||||||
|
/* orderby col >[=] value implies max >[=] value */
|
||||||
|
*filters = lappend(
|
||||||
|
*filters,
|
||||||
|
add_filter_column_strategy(compression_column_segment_max_name(fd),
|
||||||
|
op_strategy,
|
||||||
|
arg_value,
|
||||||
|
false)); /* is_null_check */
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -2159,15 +2186,26 @@ fill_predicate_context(Chunk *ch, List *predicates, List **segmentby_columns,
|
|||||||
{
|
{
|
||||||
var = (Var *) ntest->arg;
|
var = (Var *) ntest->arg;
|
||||||
column_name = get_attname(ch->table_id, var->varattno, false);
|
column_name = get_attname(ch->table_id, var->varattno, false);
|
||||||
if (is_segmentby_col(ht_compression_info, column_name))
|
FormData_hypertable_compression *fd =
|
||||||
|
ts_hypertable_compression_get_by_pkey(ch->fd.hypertable_id, column_name);
|
||||||
|
if (COMPRESSIONCOL_IS_SEGMENT_BY(fd))
|
||||||
{
|
{
|
||||||
*segmentby_columns = lappend(*segmentby_columns, column_name);
|
*filters = lappend(*filters,
|
||||||
*is_null_check = lappend_int(*is_null_check, 1);
|
add_filter_column_strategy(column_name,
|
||||||
|
InvalidStrategy,
|
||||||
|
NULL,
|
||||||
|
true)); /* is_null_check */
|
||||||
|
|
||||||
if (ntest->nulltesttype == IS_NULL)
|
if (ntest->nulltesttype == IS_NULL)
|
||||||
*is_null = lappend_int(*is_null, 1);
|
*is_null = lappend_int(*is_null, 1);
|
||||||
else
|
else
|
||||||
*is_null = lappend_int(*is_null, 0);
|
*is_null = lappend_int(*is_null, 0);
|
||||||
}
|
}
|
||||||
|
/* We cannot optimize filtering decompression using ORDERBY
|
||||||
|
* metadata and null check qualifiers. We could possibly do that by checking the
|
||||||
|
* compressed data in combination with the ORDERBY nulls first setting and
|
||||||
|
* verifying that the first or last tuple of a segment contains a NULL value.
|
||||||
|
* This is left for future optimization */
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
@ -2184,57 +2222,36 @@ fill_predicate_context(Chunk *ch, List *predicates, List **segmentby_columns,
|
|||||||
* OUT param null_columns is saved with column attribute number.
|
* OUT param null_columns is saved with column attribute number.
|
||||||
*/
|
*/
|
||||||
static ScanKeyData *
|
static ScanKeyData *
|
||||||
build_update_delete_scankeys(Chunk *chunk, List *segmentby_columns, List *segmentby_column_values,
|
build_update_delete_scankeys(RowDecompressor *decompressor, List *filters, int *num_scankeys,
|
||||||
List *is_null_check, int *num_scankeys, Bitmapset **null_columns)
|
Bitmapset **null_columns)
|
||||||
{
|
{
|
||||||
Chunk *comp_chunk = NULL;
|
ListCell *lc;
|
||||||
Relation comp_chunk_rel;
|
SegmentFilter *filter;
|
||||||
TupleDesc tupleDesc;
|
|
||||||
ListCell *col;
|
|
||||||
ListCell *is_null_or_not_null;
|
|
||||||
int key_index = 0;
|
int key_index = 0;
|
||||||
int pos = 0;
|
|
||||||
|
|
||||||
ScanKeyData *scankeys = palloc0(segmentby_columns->length * sizeof(ScanKeyData));
|
ScanKeyData *scankeys = palloc0(filters->length * sizeof(ScanKeyData));
|
||||||
comp_chunk = ts_chunk_get_by_id(chunk->fd.compressed_chunk_id, true);
|
|
||||||
comp_chunk_rel = table_open(comp_chunk->table_id, AccessShareLock);
|
|
||||||
tupleDesc = RelationGetDescr(comp_chunk_rel);
|
|
||||||
|
|
||||||
forboth (col, segmentby_columns, is_null_or_not_null, is_null_check)
|
foreach (lc, filters)
|
||||||
{
|
{
|
||||||
char *column_name = lfirst(col);
|
filter = lfirst(lc);
|
||||||
AttrNumber attnum = get_attnum(RelationGetRelid(comp_chunk_rel), column_name);
|
AttrNumber attno = get_attnum(decompressor->in_rel->rd_id, NameStr(filter->column_name));
|
||||||
if (attnum == InvalidAttrNumber)
|
if (attno == InvalidAttrNumber)
|
||||||
ereport(ERROR,
|
ereport(ERROR,
|
||||||
(errcode(ERRCODE_UNDEFINED_COLUMN),
|
(errcode(ERRCODE_UNDEFINED_COLUMN),
|
||||||
errmsg("column \"%s\" of relation \"%s\" does not exist",
|
errmsg("column \"%s\" of relation \"%s\" does not exist",
|
||||||
column_name,
|
NameStr(filter->column_name),
|
||||||
RelationGetRelationName(comp_chunk_rel))));
|
RelationGetRelationName(decompressor->in_rel))));
|
||||||
Form_pg_attribute attr = TupleDescAttr(tupleDesc, AttrNumberGetAttrOffset(attnum));
|
|
||||||
TypeCacheEntry *tce = lookup_type_cache(attr->atttypid, TYPECACHE_EQ_OPR_FINFO);
|
|
||||||
if (!OidIsValid(tce->eq_opr_finfo.fn_oid))
|
|
||||||
elog(ERROR, "no equality function for column \"%s\"", column_name);
|
|
||||||
|
|
||||||
if (lfirst_int(is_null_or_not_null) == 0)
|
key_index = create_segment_filter_scankey(decompressor,
|
||||||
{
|
NameStr(filter->column_name),
|
||||||
Const *const_value = list_nth(segmentby_column_values, pos++);
|
filter->strategy,
|
||||||
Datum value = (const_value ? const_value->constvalue : 0);
|
scankeys,
|
||||||
ScanKeyEntryInitialize(&scankeys[key_index++],
|
key_index,
|
||||||
0, /* flags */
|
null_columns,
|
||||||
attnum,
|
filter->value ? filter->value->constvalue : 0,
|
||||||
BTEqualStrategyNumber,
|
filter->is_null_check);
|
||||||
InvalidOid, /* No strategy subtype. */
|
|
||||||
attr->attcollation,
|
|
||||||
tce->eq_opr_finfo.fn_oid,
|
|
||||||
value);
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
*null_columns = bms_add_member(*null_columns, attnum);
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
*num_scankeys = key_index;
|
*num_scankeys = key_index;
|
||||||
table_close(comp_chunk_rel, AccessShareLock);
|
|
||||||
return scankeys;
|
return scankeys;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2246,23 +2263,14 @@ build_update_delete_scankeys(Chunk *chunk, List *segmentby_columns, List *segmen
|
|||||||
* 4.delete this row from compressed chunk
|
* 4.delete this row from compressed chunk
|
||||||
*/
|
*/
|
||||||
static bool
|
static bool
|
||||||
decompress_batches(Chunk *ch, ScanKeyData *scankeys, int num_scankeys, Bitmapset *null_columns,
|
decompress_batches(RowDecompressor *decompressor, ScanKeyData *scankeys, int num_scankeys,
|
||||||
List *is_nulls, bool *chunk_status_changed)
|
Bitmapset *null_columns, List *is_nulls, bool *chunk_status_changed)
|
||||||
{
|
{
|
||||||
Relation chunk_rel;
|
|
||||||
Relation comp_chunk_rel;
|
|
||||||
Chunk *comp_chunk;
|
|
||||||
HeapTuple compressed_tuple;
|
HeapTuple compressed_tuple;
|
||||||
RowDecompressor decompressor;
|
Snapshot snapshot = GetTransactionSnapshot();
|
||||||
Snapshot snapshot;
|
|
||||||
|
|
||||||
snapshot = GetTransactionSnapshot();
|
TableScanDesc heapScan =
|
||||||
chunk_rel = table_open(ch->table_id, RowExclusiveLock);
|
table_beginscan(decompressor->in_rel, snapshot, num_scankeys, scankeys);
|
||||||
comp_chunk = ts_chunk_get_by_id(ch->fd.compressed_chunk_id, true);
|
|
||||||
comp_chunk_rel = table_open(comp_chunk->table_id, RowExclusiveLock);
|
|
||||||
decompressor = build_decompressor(comp_chunk_rel, chunk_rel);
|
|
||||||
|
|
||||||
TableScanDesc heapScan = table_beginscan(comp_chunk_rel, snapshot, num_scankeys, scankeys);
|
|
||||||
while ((compressed_tuple = heap_getnext(heapScan, ForwardScanDirection)) != NULL)
|
while ((compressed_tuple = heap_getnext(heapScan, ForwardScanDirection)) != NULL)
|
||||||
{
|
{
|
||||||
bool skip_tuple = false;
|
bool skip_tuple = false;
|
||||||
@ -2273,7 +2281,7 @@ decompress_batches(Chunk *ch, ScanKeyData *scankeys, int num_scankeys, Bitmapset
|
|||||||
for (; attrno >= 0; attrno = bms_next_member(null_columns, attrno))
|
for (; attrno >= 0; attrno = bms_next_member(null_columns, attrno))
|
||||||
{
|
{
|
||||||
is_null_condition = list_nth_int(is_nulls, pos);
|
is_null_condition = list_nth_int(is_nulls, pos);
|
||||||
seg_col_is_null = heap_attisnull(compressed_tuple, attrno, decompressor.in_desc);
|
seg_col_is_null = heap_attisnull(compressed_tuple, attrno, decompressor->in_desc);
|
||||||
if ((seg_col_is_null && !is_null_condition) || (!seg_col_is_null && is_null_condition))
|
if ((seg_col_is_null && !is_null_condition) || (!seg_col_is_null && is_null_condition))
|
||||||
{
|
{
|
||||||
/*
|
/*
|
||||||
@ -2289,16 +2297,16 @@ decompress_batches(Chunk *ch, ScanKeyData *scankeys, int num_scankeys, Bitmapset
|
|||||||
if (skip_tuple)
|
if (skip_tuple)
|
||||||
continue;
|
continue;
|
||||||
heap_deform_tuple(compressed_tuple,
|
heap_deform_tuple(compressed_tuple,
|
||||||
decompressor.in_desc,
|
decompressor->in_desc,
|
||||||
decompressor.compressed_datums,
|
decompressor->compressed_datums,
|
||||||
decompressor.compressed_is_nulls);
|
decompressor->compressed_is_nulls);
|
||||||
|
|
||||||
row_decompressor_decompress_row(&decompressor, NULL);
|
row_decompressor_decompress_row(decompressor, NULL);
|
||||||
TM_FailureData tmfd;
|
TM_FailureData tmfd;
|
||||||
TM_Result result;
|
TM_Result result;
|
||||||
result = table_tuple_delete(comp_chunk_rel,
|
result = table_tuple_delete(decompressor->in_rel,
|
||||||
&compressed_tuple->t_self,
|
&compressed_tuple->t_self,
|
||||||
decompressor.mycid,
|
decompressor->mycid,
|
||||||
snapshot,
|
snapshot,
|
||||||
InvalidSnapshot,
|
InvalidSnapshot,
|
||||||
true,
|
true,
|
||||||
@ -2323,11 +2331,6 @@ decompress_batches(Chunk *ch, ScanKeyData *scankeys, int num_scankeys, Bitmapset
|
|||||||
if (scankeys)
|
if (scankeys)
|
||||||
pfree(scankeys);
|
pfree(scankeys);
|
||||||
table_endscan(heapScan);
|
table_endscan(heapScan);
|
||||||
ts_catalog_close_indexes(decompressor.indexstate);
|
|
||||||
FreeBulkInsertState(decompressor.bistate);
|
|
||||||
|
|
||||||
table_close(chunk_rel, NoLock);
|
|
||||||
table_close(comp_chunk_rel, NoLock);
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2342,35 +2345,38 @@ decompress_batches(Chunk *ch, ScanKeyData *scankeys, int num_scankeys, Bitmapset
|
|||||||
void
|
void
|
||||||
decompress_batches_for_update_delete(List *chunks, List *predicates)
|
decompress_batches_for_update_delete(List *chunks, List *predicates)
|
||||||
{
|
{
|
||||||
List *segmentby_columns = NIL;
|
List *filters = NIL;
|
||||||
List *segmentby_columns_value = NIL;
|
|
||||||
List *is_null_check = NIL;
|
|
||||||
List *is_null = NIL;
|
List *is_null = NIL;
|
||||||
ListCell *ch = NULL;
|
ListCell *ch = NULL;
|
||||||
|
ListCell *lc = NULL;
|
||||||
|
|
||||||
|
Relation chunk_rel;
|
||||||
|
Relation comp_chunk_rel;
|
||||||
|
Chunk *chunk, *comp_chunk;
|
||||||
|
RowDecompressor decompressor;
|
||||||
|
SegmentFilter *filter;
|
||||||
|
|
||||||
if (predicates)
|
if (predicates)
|
||||||
fill_predicate_context(linitial(chunks),
|
fill_predicate_context(linitial(chunks), predicates, &filters, &is_null);
|
||||||
predicates,
|
|
||||||
&segmentby_columns,
|
|
||||||
&segmentby_columns_value,
|
|
||||||
&is_null_check,
|
|
||||||
&is_null);
|
|
||||||
foreach (ch, chunks)
|
foreach (ch, chunks)
|
||||||
{
|
{
|
||||||
|
chunk = (Chunk *) lfirst(ch);
|
||||||
bool chunk_status_changed = false;
|
bool chunk_status_changed = false;
|
||||||
ScanKeyData *scankeys = NULL;
|
ScanKeyData *scankeys = NULL;
|
||||||
Bitmapset *null_columns = NULL;
|
Bitmapset *null_columns = NULL;
|
||||||
int num_scankeys = 0;
|
int num_scankeys = 0;
|
||||||
|
|
||||||
if (segmentby_columns)
|
chunk_rel = table_open(chunk->table_id, RowExclusiveLock);
|
||||||
|
comp_chunk = ts_chunk_get_by_id(chunk->fd.compressed_chunk_id, true);
|
||||||
|
comp_chunk_rel = table_open(comp_chunk->table_id, RowExclusiveLock);
|
||||||
|
decompressor = build_decompressor(comp_chunk_rel, chunk_rel);
|
||||||
|
|
||||||
|
if (filters)
|
||||||
{
|
{
|
||||||
scankeys = build_update_delete_scankeys(lfirst(ch),
|
scankeys =
|
||||||
segmentby_columns,
|
build_update_delete_scankeys(&decompressor, filters, &num_scankeys, &null_columns);
|
||||||
segmentby_columns_value,
|
|
||||||
is_null_check,
|
|
||||||
&num_scankeys,
|
|
||||||
&null_columns);
|
|
||||||
}
|
}
|
||||||
if (decompress_batches(lfirst(ch),
|
if (decompress_batches(&decompressor,
|
||||||
scankeys,
|
scankeys,
|
||||||
num_scankeys,
|
num_scankeys,
|
||||||
null_columns,
|
null_columns,
|
||||||
@ -2384,6 +2390,18 @@ decompress_batches_for_update_delete(List *chunks, List *predicates)
|
|||||||
if (chunk_status_changed == true)
|
if (chunk_status_changed == true)
|
||||||
ts_chunk_set_partial(lfirst(ch));
|
ts_chunk_set_partial(lfirst(ch));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ts_catalog_close_indexes(decompressor.indexstate);
|
||||||
|
FreeBulkInsertState(decompressor.bistate);
|
||||||
|
|
||||||
|
table_close(chunk_rel, NoLock);
|
||||||
|
table_close(comp_chunk_rel, NoLock);
|
||||||
|
}
|
||||||
|
|
||||||
|
foreach (lc, filters)
|
||||||
|
{
|
||||||
|
filter = lfirst(lc);
|
||||||
|
pfree(filter);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
@ -252,6 +252,19 @@ typedef struct RowCompressor
|
|||||||
bool first_iteration;
|
bool first_iteration;
|
||||||
} RowCompressor;
|
} RowCompressor;
|
||||||
|
|
||||||
|
/* SegmentFilter is used for filtering segments based on qualifiers */
|
||||||
|
typedef struct SegmentFilter
|
||||||
|
{
|
||||||
|
/* Column which we use for filtering */
|
||||||
|
NameData column_name;
|
||||||
|
/* Filter operation used */
|
||||||
|
StrategyNumber strategy;
|
||||||
|
/* Value to compare with */
|
||||||
|
Const *value;
|
||||||
|
/* IS NULL or IS NOT NULL */
|
||||||
|
bool is_null_check;
|
||||||
|
} SegmentFilter;
|
||||||
|
|
||||||
extern Datum tsl_compressed_data_decompress_forward(PG_FUNCTION_ARGS);
|
extern Datum tsl_compressed_data_decompress_forward(PG_FUNCTION_ARGS);
|
||||||
extern Datum tsl_compressed_data_decompress_reverse(PG_FUNCTION_ARGS);
|
extern Datum tsl_compressed_data_decompress_reverse(PG_FUNCTION_ARGS);
|
||||||
extern Datum tsl_compressed_data_send(PG_FUNCTION_ARGS);
|
extern Datum tsl_compressed_data_send(PG_FUNCTION_ARGS);
|
||||||
|
@ -124,7 +124,7 @@ WHERE hypertable_name = 'sample_table' ORDER BY chunk_name;
|
|||||||
9 | _hyper_1_2_chunk
|
9 | _hyper_1_2_chunk
|
||||||
(2 rows)
|
(2 rows)
|
||||||
|
|
||||||
-- recompress the paritial chunks
|
-- recompress the partial chunks
|
||||||
CALL recompress_chunk('_timescaledb_internal._hyper_1_1_chunk');
|
CALL recompress_chunk('_timescaledb_internal._hyper_1_1_chunk');
|
||||||
CALL recompress_chunk('_timescaledb_internal._hyper_1_2_chunk');
|
CALL recompress_chunk('_timescaledb_internal._hyper_1_2_chunk');
|
||||||
-- check chunk compression status
|
-- check chunk compression status
|
||||||
@ -914,13 +914,13 @@ DELETE FROM sample_table WHERE device_id >= 4 AND val <= 1;
|
|||||||
SELECT COUNT(*) FROM :COMPRESS_CHUNK_1;
|
SELECT COUNT(*) FROM :COMPRESS_CHUNK_1;
|
||||||
count
|
count
|
||||||
-------
|
-------
|
||||||
0
|
4
|
||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
SELECT COUNT(*) FROM :COMPRESS_CHUNK_2;
|
SELECT COUNT(*) FROM :COMPRESS_CHUNK_2;
|
||||||
count
|
count
|
||||||
-------
|
-------
|
||||||
0
|
5
|
||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
-- get rowcount from compressed chunks where device_id IS NULL
|
-- get rowcount from compressed chunks where device_id IS NULL
|
||||||
@ -945,7 +945,7 @@ WHERE hypertable_name = 'sample_table' ORDER BY chunk_name;
|
|||||||
chunk_status | CHUNK_NAME
|
chunk_status | CHUNK_NAME
|
||||||
--------------+--------------------
|
--------------+--------------------
|
||||||
9 | _hyper_13_27_chunk
|
9 | _hyper_13_27_chunk
|
||||||
9 | _hyper_13_28_chunk
|
1 | _hyper_13_28_chunk
|
||||||
(2 rows)
|
(2 rows)
|
||||||
|
|
||||||
-- added tests for code coverage
|
-- added tests for code coverage
|
||||||
@ -1272,3 +1272,453 @@ SELECT COUNT(*) FROM :COMPRESS_CHUNK_1 WHERE c4 IS NULL;
|
|||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
DROP TABLE sample_table;
|
DROP TABLE sample_table;
|
||||||
|
-- test filtering with ORDER BY columns
|
||||||
|
CREATE TABLE sample_table(time timestamptz, c1 int, c2 int, c3 int, c4 int);
|
||||||
|
SELECT create_hypertable('sample_table','time');
|
||||||
|
NOTICE: adding not-null constraint to column "time"
|
||||||
|
create_hypertable
|
||||||
|
----------------------------
|
||||||
|
(19,public,sample_table,t)
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
ALTER TABLE sample_table SET (timescaledb.compress,timescaledb.compress_segmentby='c4', timescaledb.compress_orderby='c1,c2,time');
|
||||||
|
INSERT INTO sample_table
|
||||||
|
SELECT t, c1, c2, c3, c4
|
||||||
|
FROM generate_series(:'start_date'::timestamptz - INTERVAL '9 hours',
|
||||||
|
:'start_date'::timestamptz,
|
||||||
|
INTERVAL '1 hour') t,
|
||||||
|
generate_series(0,9,1) c1,
|
||||||
|
generate_series(0,9,1) c2,
|
||||||
|
generate_series(0,9,1) c3,
|
||||||
|
generate_series(0,9,1) c4;
|
||||||
|
SELECT compress_chunk(show_chunks('sample_table'));
|
||||||
|
compress_chunk
|
||||||
|
------------------------------------------
|
||||||
|
_timescaledb_internal._hyper_19_37_chunk
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
-- get FIRST chunk
|
||||||
|
SELECT ch1.schema_name|| '.' || ch1.table_name AS "CHUNK_1"
|
||||||
|
FROM _timescaledb_catalog.chunk ch1, _timescaledb_catalog.hypertable ht
|
||||||
|
WHERE ch1.hypertable_id = ht.id AND ch1.table_name LIKE '_hyper_%'
|
||||||
|
ORDER BY ch1.id \gset
|
||||||
|
-- get FIRST compressed chunk
|
||||||
|
SELECT ch1.schema_name|| '.' || ch1.table_name AS "COMPRESS_CHUNK_1"
|
||||||
|
FROM _timescaledb_catalog.chunk ch1, _timescaledb_catalog.hypertable ht
|
||||||
|
WHERE ch1.hypertable_id = ht.id AND ch1.table_name LIKE 'compress_%'
|
||||||
|
ORDER BY ch1.id \gset
|
||||||
|
-- check that you uncompress and delete only for exact SEGMENTBY value
|
||||||
|
BEGIN;
|
||||||
|
-- report 10 rows
|
||||||
|
SELECT COUNT(*) FROM :COMPRESS_CHUNK_1 where c4 = 5;
|
||||||
|
count
|
||||||
|
-------
|
||||||
|
10
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
-- report 10k rows
|
||||||
|
SELECT COUNT(*) FROM sample_table WHERE c4 = 5;
|
||||||
|
count
|
||||||
|
-------
|
||||||
|
10000
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
-- fetch total and number of affected rows
|
||||||
|
SELECT COUNT(*) AS "total_rows" FROM sample_table \gset
|
||||||
|
SELECT COUNT(*) AS "total_affected_rows" FROM sample_table WHERE c4 = 5 \gset
|
||||||
|
-- delete 10k rows
|
||||||
|
DELETE FROM sample_table WHERE c4 = 5;
|
||||||
|
-- report 0 rows
|
||||||
|
SELECT count(*) FROM sample_table WHERE c4 = 5;
|
||||||
|
count
|
||||||
|
-------
|
||||||
|
0
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
-- report 0 rows in uncompressed chunk
|
||||||
|
SELECT COUNT(*) FROM ONLY :CHUNK_1;
|
||||||
|
count
|
||||||
|
-------
|
||||||
|
0
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
-- report 0 rows in compressed chunk
|
||||||
|
SELECT COUNT(*) FROM :COMPRESS_CHUNK_1 where c4 = 5;
|
||||||
|
count
|
||||||
|
-------
|
||||||
|
0
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
-- validate correct number of rows was deleted
|
||||||
|
-- report true
|
||||||
|
SELECT COUNT(*) = :total_rows - :total_affected_rows FROM sample_table;
|
||||||
|
?column?
|
||||||
|
----------
|
||||||
|
t
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
ROLLBACK;
|
||||||
|
-- check that you uncompress and delete only for less than SEGMENTBY value
|
||||||
|
BEGIN;
|
||||||
|
-- report 50 rows
|
||||||
|
SELECT COUNT(*) FROM :COMPRESS_CHUNK_1 where c4 < 5;
|
||||||
|
count
|
||||||
|
-------
|
||||||
|
50
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
-- report 50k rows
|
||||||
|
SELECT COUNT(*) FROM sample_table WHERE c4 < 5;
|
||||||
|
count
|
||||||
|
-------
|
||||||
|
50000
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
-- fetch total and number of affected rows
|
||||||
|
SELECT COUNT(*) AS "total_rows" FROM sample_table \gset
|
||||||
|
SELECT COUNT(*) AS "total_affected_rows" FROM sample_table WHERE c4 < 5 \gset
|
||||||
|
-- delete 50k rows
|
||||||
|
DELETE FROM sample_table WHERE c4 < 5;
|
||||||
|
-- report 0 rows
|
||||||
|
SELECT count(*) FROM sample_table WHERE c4 < 5;
|
||||||
|
count
|
||||||
|
-------
|
||||||
|
0
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
-- report 0 rows in uncompressed chunk
|
||||||
|
SELECT COUNT(*) FROM ONLY :CHUNK_1;
|
||||||
|
count
|
||||||
|
-------
|
||||||
|
0
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
-- report 0 rows in compressed chunk
|
||||||
|
SELECT COUNT(*) FROM :COMPRESS_CHUNK_1 where c4 < 5;
|
||||||
|
count
|
||||||
|
-------
|
||||||
|
0
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
-- validate correct number of rows was deleted
|
||||||
|
-- report true
|
||||||
|
SELECT COUNT(*) = :total_rows - :total_affected_rows FROM sample_table;
|
||||||
|
?column?
|
||||||
|
----------
|
||||||
|
t
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
ROLLBACK;
|
||||||
|
-- check that you uncompress and delete only for greater and equal than SEGMENTBY value
|
||||||
|
BEGIN;
|
||||||
|
-- report 50 rows
|
||||||
|
SELECT COUNT(*) FROM :COMPRESS_CHUNK_1 where c4 >= 5;
|
||||||
|
count
|
||||||
|
-------
|
||||||
|
50
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
-- report 50k rows
|
||||||
|
SELECT COUNT(*) FROM sample_table WHERE c4 >= 5;
|
||||||
|
count
|
||||||
|
-------
|
||||||
|
50000
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
-- fetch total and number of affected rows
|
||||||
|
SELECT COUNT(*) AS "total_rows" FROM sample_table \gset
|
||||||
|
SELECT COUNT(*) AS "total_affected_rows" FROM sample_table WHERE c4 >= 5 \gset
|
||||||
|
-- delete 50k rows
|
||||||
|
DELETE FROM sample_table WHERE c4 >= 5;
|
||||||
|
-- report 0 rows
|
||||||
|
SELECT count(*) FROM sample_table WHERE c4 >= 5;
|
||||||
|
count
|
||||||
|
-------
|
||||||
|
0
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
-- report 0 rows in uncompressed chunk
|
||||||
|
SELECT COUNT(*) FROM ONLY :CHUNK_1;
|
||||||
|
count
|
||||||
|
-------
|
||||||
|
0
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
-- report 0 rows in compressed chunk
|
||||||
|
SELECT COUNT(*) FROM :COMPRESS_CHUNK_1 where c4 >= 5;
|
||||||
|
count
|
||||||
|
-------
|
||||||
|
0
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
-- validate correct number of rows was deleted
|
||||||
|
-- report true
|
||||||
|
SELECT COUNT(*) = :total_rows - :total_affected_rows FROM sample_table;
|
||||||
|
?column?
|
||||||
|
----------
|
||||||
|
t
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
ROLLBACK;
|
||||||
|
-- check that you uncompress and delete only for exact ORDERBY value
|
||||||
|
-- this will uncompress segments which have min <= value and max >= value
|
||||||
|
BEGIN;
|
||||||
|
-- report 10k rows
|
||||||
|
SELECT COUNT(*) FROM sample_table WHERE c2 = 3;
|
||||||
|
count
|
||||||
|
-------
|
||||||
|
10000
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
-- report 100 rows
|
||||||
|
SELECT COUNT(*) FROM :COMPRESS_CHUNK_1 WHERE _ts_meta_min_2 <= 3 and _ts_meta_max_2 >= 3;
|
||||||
|
count
|
||||||
|
-------
|
||||||
|
100
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
-- fetch total and number of affected rows
|
||||||
|
SELECT COUNT(*) AS "total_rows" FROM sample_table \gset
|
||||||
|
SELECT COUNT(*) AS "total_affected_rows" FROM sample_table WHERE c2 = 3 \gset
|
||||||
|
-- delete 10k rows
|
||||||
|
DELETE FROM sample_table WHERE c2 = 3;
|
||||||
|
-- report 0 rows
|
||||||
|
SELECT count(*) FROM sample_table WHERE c2 = 3;
|
||||||
|
count
|
||||||
|
-------
|
||||||
|
0
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
-- report 90k rows in uncompressed chunk
|
||||||
|
SELECT COUNT(*) FROM ONLY :CHUNK_1;
|
||||||
|
count
|
||||||
|
-------
|
||||||
|
90000
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
-- report 0 rows
|
||||||
|
SELECT COUNT(*) FROM :COMPRESS_CHUNK_1 WHERE _ts_meta_min_2 <= 3 and _ts_meta_max_2 >= 3;
|
||||||
|
count
|
||||||
|
-------
|
||||||
|
0
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
-- validate correct number of rows was deleted
|
||||||
|
-- report true
|
||||||
|
SELECT COUNT(*) = :total_rows - :total_affected_rows FROM sample_table;
|
||||||
|
?column?
|
||||||
|
----------
|
||||||
|
t
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
ROLLBACK;
|
||||||
|
-- check that you uncompress and delete only for less then ORDERBY value
|
||||||
|
-- this will uncompress segments which have min < value
|
||||||
|
BEGIN;
|
||||||
|
-- report 20k rows
|
||||||
|
SELECT COUNT(*) FROM sample_table WHERE c1 < 2;
|
||||||
|
count
|
||||||
|
-------
|
||||||
|
20000
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
-- report 20 rows
|
||||||
|
SELECT COUNT(*) FROM :COMPRESS_CHUNK_1 WHERE _ts_meta_max_1 < 2;
|
||||||
|
count
|
||||||
|
-------
|
||||||
|
20
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
-- fetch total and number of affected rows
|
||||||
|
SELECT COUNT(*) AS "total_rows" FROM sample_table \gset
|
||||||
|
SELECT COUNT(*) AS "total_affected_rows" FROM sample_table WHERE c1 < 2 \gset
|
||||||
|
-- delete 20k rows
|
||||||
|
DELETE FROM sample_table WHERE c1 < 2;
|
||||||
|
-- report 0 rows
|
||||||
|
SELECT count(*) FROM sample_table WHERE c1 < 2;
|
||||||
|
count
|
||||||
|
-------
|
||||||
|
0
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
-- report 0 rows in uncompressed chunk
|
||||||
|
SELECT COUNT(*) FROM ONLY :CHUNK_1;
|
||||||
|
count
|
||||||
|
-------
|
||||||
|
0
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
-- report 0 rows in compressed chunk
|
||||||
|
SELECT COUNT(*) FROM :COMPRESS_CHUNK_1 WHERE _ts_meta_max_1 < 2;
|
||||||
|
count
|
||||||
|
-------
|
||||||
|
0
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
-- validate correct number of rows was deleted
|
||||||
|
-- report true
|
||||||
|
SELECT COUNT(*) = :total_rows - :total_affected_rows FROM sample_table;
|
||||||
|
?column?
|
||||||
|
----------
|
||||||
|
t
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
ROLLBACK;
|
||||||
|
-- check that you uncompress and delete only for greater or equal then ORDERBY value
|
||||||
|
-- this will uncompress segments which have max >= value
|
||||||
|
BEGIN;
|
||||||
|
-- report 30k rows
|
||||||
|
SELECT COUNT(*) FROM sample_table WHERE c1 >= 7;
|
||||||
|
count
|
||||||
|
-------
|
||||||
|
30000
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
-- report 30 rows
|
||||||
|
SELECT COUNT(*) FROM :COMPRESS_CHUNK_1 WHERE _ts_meta_min_1 >= 7;
|
||||||
|
count
|
||||||
|
-------
|
||||||
|
30
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
-- fetch total and number of affected rows
|
||||||
|
SELECT COUNT(*) AS "total_rows" FROM sample_table \gset
|
||||||
|
SELECT COUNT(*) AS "total_affected_rows" FROM sample_table WHERE c1 >= 7 \gset
|
||||||
|
-- delete 30k rows
|
||||||
|
DELETE FROM sample_table WHERE c1 >= 7;
|
||||||
|
-- report 0 rows
|
||||||
|
SELECT count(*) FROM sample_table WHERE c1 >= 7;
|
||||||
|
count
|
||||||
|
-------
|
||||||
|
0
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
-- report 0 rows in uncompressed chunk
|
||||||
|
SELECT COUNT(*) FROM ONLY :CHUNK_1;
|
||||||
|
count
|
||||||
|
-------
|
||||||
|
0
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
-- report 0 rows in compressed chunks
|
||||||
|
SELECT COUNT(*) FROM :COMPRESS_CHUNK_1 WHERE _ts_meta_min_1 >= 7;
|
||||||
|
count
|
||||||
|
-------
|
||||||
|
0
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
-- validate correct number of rows was deleted
|
||||||
|
-- report true
|
||||||
|
SELECT COUNT(*) = :total_rows - :total_affected_rows FROM sample_table;
|
||||||
|
?column?
|
||||||
|
----------
|
||||||
|
t
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
ROLLBACK;
|
||||||
|
-- check that you uncompress and delete only tuples which satisfy SEGMENTBY
|
||||||
|
-- and ORDERBY qualifiers, segments only contain one distinct value for
|
||||||
|
-- these qualifiers, everything should be deleted that was decompressed
|
||||||
|
BEGIN;
|
||||||
|
-- report 1k rows
|
||||||
|
SELECT COUNT(*) FROM sample_table WHERE c4 = 5 and c1 = 5;
|
||||||
|
count
|
||||||
|
-------
|
||||||
|
1000
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
-- report 1 row in compressed chunks
|
||||||
|
SELECT COUNT(*) FROM :COMPRESS_CHUNK_1 WHERE c4 = 5 AND _ts_meta_min_1 <= 5 and _ts_meta_max_1 >= 5;
|
||||||
|
count
|
||||||
|
-------
|
||||||
|
1
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
-- fetch total and number of affected rows
|
||||||
|
SELECT COUNT(*) AS "total_rows" FROM sample_table \gset
|
||||||
|
SELECT COUNT(*) AS "total_affected_rows" FROM sample_table WHERE c4 = 5 and c1 = 5 \gset
|
||||||
|
-- delete 1k rows
|
||||||
|
DELETE FROM sample_table WHERE c4 = 5 and c1 = 5;
|
||||||
|
-- report 0 rows
|
||||||
|
SELECT count(*) FROM sample_table WHERE c4 = 5 and c1 = 5;
|
||||||
|
count
|
||||||
|
-------
|
||||||
|
0
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
-- report 0 rows in uncompressed chunk
|
||||||
|
SELECT COUNT(*) FROM ONLY :CHUNK_1;
|
||||||
|
count
|
||||||
|
-------
|
||||||
|
0
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
-- report 0 rows in compressed chunks
|
||||||
|
SELECT COUNT(*) FROM :COMPRESS_CHUNK_1 WHERE c4 = 5 AND _ts_meta_min_1 <= 5 and _ts_meta_max_1 >= 5;
|
||||||
|
count
|
||||||
|
-------
|
||||||
|
0
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
-- validate correct number of rows was deleted
|
||||||
|
-- report true
|
||||||
|
SELECT COUNT(*) = :total_rows - :total_affected_rows FROM sample_table;
|
||||||
|
?column?
|
||||||
|
----------
|
||||||
|
t
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
ROLLBACK;
|
||||||
|
-- check that you uncompress and delete only tuples which satisfy SEGMENTBY
|
||||||
|
-- and ORDERBY qualifiers, segments contain more than one distinct value for
|
||||||
|
-- these qualifiers, not everything should be deleted that was decompressed
|
||||||
|
BEGIN;
|
||||||
|
-- report 4k rows
|
||||||
|
SELECT COUNT(*) FROM sample_table WHERE c4 > 5 and c2 = 5;
|
||||||
|
count
|
||||||
|
-------
|
||||||
|
4000
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
-- report 40 rows in compressed chunks
|
||||||
|
SELECT COUNT(*) FROM :COMPRESS_CHUNK_1 WHERE c4 > 5 AND _ts_meta_min_2 <= 5 and _ts_meta_max_2 >= 5;
|
||||||
|
count
|
||||||
|
-------
|
||||||
|
40
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
-- fetch total and number of affected rows
|
||||||
|
SELECT COUNT(*) AS "total_rows" FROM sample_table \gset
|
||||||
|
SELECT COUNT(*) AS "total_affected_rows" FROM sample_table WHERE c4 > 5 and c2 = 5 \gset
|
||||||
|
-- delete 4k rows
|
||||||
|
DELETE FROM sample_table WHERE c4 > 5 and c2 = 5;
|
||||||
|
-- report 0 rows
|
||||||
|
SELECT count(*) FROM sample_table WHERE c4 > 5 and c2 = 5;
|
||||||
|
count
|
||||||
|
-------
|
||||||
|
0
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
-- report 36k rows in uncompressed chunk
|
||||||
|
SELECT COUNT(*) FROM ONLY :CHUNK_1;
|
||||||
|
count
|
||||||
|
-------
|
||||||
|
36000
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
-- report 0 rows in compressed chunks
|
||||||
|
SELECT COUNT(*) FROM :COMPRESS_CHUNK_1 WHERE c4 > 5 AND _ts_meta_min_2 <= 5 and _ts_meta_max_2 >= 5;
|
||||||
|
count
|
||||||
|
-------
|
||||||
|
0
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
-- validate correct number of rows was deleted
|
||||||
|
-- report true
|
||||||
|
SELECT COUNT(*) = :total_rows - :total_affected_rows FROM sample_table;
|
||||||
|
?column?
|
||||||
|
----------
|
||||||
|
t
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
ROLLBACK;
|
||||||
|
@ -92,7 +92,7 @@ SELECT chunk_status,
|
|||||||
FROM compressed_chunk_info_view
|
FROM compressed_chunk_info_view
|
||||||
WHERE hypertable_name = 'sample_table' ORDER BY chunk_name;
|
WHERE hypertable_name = 'sample_table' ORDER BY chunk_name;
|
||||||
|
|
||||||
-- recompress the paritial chunks
|
-- recompress the partial chunks
|
||||||
CALL recompress_chunk('_timescaledb_internal._hyper_1_1_chunk');
|
CALL recompress_chunk('_timescaledb_internal._hyper_1_1_chunk');
|
||||||
CALL recompress_chunk('_timescaledb_internal._hyper_1_2_chunk');
|
CALL recompress_chunk('_timescaledb_internal._hyper_1_2_chunk');
|
||||||
|
|
||||||
@ -731,3 +731,213 @@ SELECT COUNT(*) FROM :COMPRESS_CHUNK_1;
|
|||||||
SELECT COUNT(*) FROM :COMPRESS_CHUNK_1 WHERE c4 IS NULL;
|
SELECT COUNT(*) FROM :COMPRESS_CHUNK_1 WHERE c4 IS NULL;
|
||||||
|
|
||||||
DROP TABLE sample_table;
|
DROP TABLE sample_table;
|
||||||
|
|
||||||
|
-- test filtering with ORDER BY columns
|
||||||
|
CREATE TABLE sample_table(time timestamptz, c1 int, c2 int, c3 int, c4 int);
|
||||||
|
SELECT create_hypertable('sample_table','time');
|
||||||
|
ALTER TABLE sample_table SET (timescaledb.compress,timescaledb.compress_segmentby='c4', timescaledb.compress_orderby='c1,c2,time');
|
||||||
|
INSERT INTO sample_table
|
||||||
|
SELECT t, c1, c2, c3, c4
|
||||||
|
FROM generate_series(:'start_date'::timestamptz - INTERVAL '9 hours',
|
||||||
|
:'start_date'::timestamptz,
|
||||||
|
INTERVAL '1 hour') t,
|
||||||
|
generate_series(0,9,1) c1,
|
||||||
|
generate_series(0,9,1) c2,
|
||||||
|
generate_series(0,9,1) c3,
|
||||||
|
generate_series(0,9,1) c4;
|
||||||
|
SELECT compress_chunk(show_chunks('sample_table'));
|
||||||
|
|
||||||
|
-- get FIRST chunk
|
||||||
|
SELECT ch1.schema_name|| '.' || ch1.table_name AS "CHUNK_1"
|
||||||
|
FROM _timescaledb_catalog.chunk ch1, _timescaledb_catalog.hypertable ht
|
||||||
|
WHERE ch1.hypertable_id = ht.id AND ch1.table_name LIKE '_hyper_%'
|
||||||
|
ORDER BY ch1.id \gset
|
||||||
|
|
||||||
|
-- get FIRST compressed chunk
|
||||||
|
SELECT ch1.schema_name|| '.' || ch1.table_name AS "COMPRESS_CHUNK_1"
|
||||||
|
FROM _timescaledb_catalog.chunk ch1, _timescaledb_catalog.hypertable ht
|
||||||
|
WHERE ch1.hypertable_id = ht.id AND ch1.table_name LIKE 'compress_%'
|
||||||
|
ORDER BY ch1.id \gset
|
||||||
|
|
||||||
|
-- check that you uncompress and delete only for exact SEGMENTBY value
|
||||||
|
BEGIN;
|
||||||
|
-- report 10 rows
|
||||||
|
SELECT COUNT(*) FROM :COMPRESS_CHUNK_1 where c4 = 5;
|
||||||
|
-- report 10k rows
|
||||||
|
SELECT COUNT(*) FROM sample_table WHERE c4 = 5;
|
||||||
|
-- fetch total and number of affected rows
|
||||||
|
SELECT COUNT(*) AS "total_rows" FROM sample_table \gset
|
||||||
|
SELECT COUNT(*) AS "total_affected_rows" FROM sample_table WHERE c4 = 5 \gset
|
||||||
|
-- delete 10k rows
|
||||||
|
DELETE FROM sample_table WHERE c4 = 5;
|
||||||
|
-- report 0 rows
|
||||||
|
SELECT count(*) FROM sample_table WHERE c4 = 5;
|
||||||
|
-- report 0 rows in uncompressed chunk
|
||||||
|
SELECT COUNT(*) FROM ONLY :CHUNK_1;
|
||||||
|
-- report 0 rows in compressed chunk
|
||||||
|
SELECT COUNT(*) FROM :COMPRESS_CHUNK_1 where c4 = 5;
|
||||||
|
-- validate correct number of rows was deleted
|
||||||
|
-- report true
|
||||||
|
SELECT COUNT(*) = :total_rows - :total_affected_rows FROM sample_table;
|
||||||
|
ROLLBACK;
|
||||||
|
|
||||||
|
-- check that you uncompress and delete only for less than SEGMENTBY value
|
||||||
|
BEGIN;
|
||||||
|
-- report 50 rows
|
||||||
|
SELECT COUNT(*) FROM :COMPRESS_CHUNK_1 where c4 < 5;
|
||||||
|
-- report 50k rows
|
||||||
|
SELECT COUNT(*) FROM sample_table WHERE c4 < 5;
|
||||||
|
-- fetch total and number of affected rows
|
||||||
|
SELECT COUNT(*) AS "total_rows" FROM sample_table \gset
|
||||||
|
SELECT COUNT(*) AS "total_affected_rows" FROM sample_table WHERE c4 < 5 \gset
|
||||||
|
-- delete 50k rows
|
||||||
|
DELETE FROM sample_table WHERE c4 < 5;
|
||||||
|
-- report 0 rows
|
||||||
|
SELECT count(*) FROM sample_table WHERE c4 < 5;
|
||||||
|
-- report 0 rows in uncompressed chunk
|
||||||
|
SELECT COUNT(*) FROM ONLY :CHUNK_1;
|
||||||
|
-- report 0 rows in compressed chunk
|
||||||
|
SELECT COUNT(*) FROM :COMPRESS_CHUNK_1 where c4 < 5;
|
||||||
|
-- validate correct number of rows was deleted
|
||||||
|
-- report true
|
||||||
|
SELECT COUNT(*) = :total_rows - :total_affected_rows FROM sample_table;
|
||||||
|
ROLLBACK;
|
||||||
|
|
||||||
|
-- check that you uncompress and delete only for greater and equal than SEGMENTBY value
|
||||||
|
BEGIN;
|
||||||
|
-- report 50 rows
|
||||||
|
SELECT COUNT(*) FROM :COMPRESS_CHUNK_1 where c4 >= 5;
|
||||||
|
-- report 50k rows
|
||||||
|
SELECT COUNT(*) FROM sample_table WHERE c4 >= 5;
|
||||||
|
-- fetch total and number of affected rows
|
||||||
|
SELECT COUNT(*) AS "total_rows" FROM sample_table \gset
|
||||||
|
SELECT COUNT(*) AS "total_affected_rows" FROM sample_table WHERE c4 >= 5 \gset
|
||||||
|
-- delete 50k rows
|
||||||
|
DELETE FROM sample_table WHERE c4 >= 5;
|
||||||
|
-- report 0 rows
|
||||||
|
SELECT count(*) FROM sample_table WHERE c4 >= 5;
|
||||||
|
-- report 0 rows in uncompressed chunk
|
||||||
|
SELECT COUNT(*) FROM ONLY :CHUNK_1;
|
||||||
|
-- report 0 rows in compressed chunk
|
||||||
|
SELECT COUNT(*) FROM :COMPRESS_CHUNK_1 where c4 >= 5;
|
||||||
|
-- validate correct number of rows was deleted
|
||||||
|
-- report true
|
||||||
|
SELECT COUNT(*) = :total_rows - :total_affected_rows FROM sample_table;
|
||||||
|
ROLLBACK;
|
||||||
|
|
||||||
|
-- check that you uncompress and delete only for exact ORDERBY value
|
||||||
|
-- this will uncompress segments which have min <= value and max >= value
|
||||||
|
BEGIN;
|
||||||
|
-- report 10k rows
|
||||||
|
SELECT COUNT(*) FROM sample_table WHERE c2 = 3;
|
||||||
|
-- report 100 rows
|
||||||
|
SELECT COUNT(*) FROM :COMPRESS_CHUNK_1 WHERE _ts_meta_min_2 <= 3 and _ts_meta_max_2 >= 3;
|
||||||
|
-- fetch total and number of affected rows
|
||||||
|
SELECT COUNT(*) AS "total_rows" FROM sample_table \gset
|
||||||
|
SELECT COUNT(*) AS "total_affected_rows" FROM sample_table WHERE c2 = 3 \gset
|
||||||
|
-- delete 10k rows
|
||||||
|
DELETE FROM sample_table WHERE c2 = 3;
|
||||||
|
-- report 0 rows
|
||||||
|
SELECT count(*) FROM sample_table WHERE c2 = 3;
|
||||||
|
-- report 90k rows in uncompressed chunk
|
||||||
|
SELECT COUNT(*) FROM ONLY :CHUNK_1;
|
||||||
|
-- report 0 rows
|
||||||
|
SELECT COUNT(*) FROM :COMPRESS_CHUNK_1 WHERE _ts_meta_min_2 <= 3 and _ts_meta_max_2 >= 3;
|
||||||
|
-- validate correct number of rows was deleted
|
||||||
|
-- report true
|
||||||
|
SELECT COUNT(*) = :total_rows - :total_affected_rows FROM sample_table;
|
||||||
|
ROLLBACK;
|
||||||
|
|
||||||
|
-- check that you uncompress and delete only for less then ORDERBY value
|
||||||
|
-- this will uncompress segments which have min < value
|
||||||
|
BEGIN;
|
||||||
|
-- report 20k rows
|
||||||
|
SELECT COUNT(*) FROM sample_table WHERE c1 < 2;
|
||||||
|
-- report 20 rows
|
||||||
|
SELECT COUNT(*) FROM :COMPRESS_CHUNK_1 WHERE _ts_meta_max_1 < 2;
|
||||||
|
-- fetch total and number of affected rows
|
||||||
|
SELECT COUNT(*) AS "total_rows" FROM sample_table \gset
|
||||||
|
SELECT COUNT(*) AS "total_affected_rows" FROM sample_table WHERE c1 < 2 \gset
|
||||||
|
-- delete 20k rows
|
||||||
|
DELETE FROM sample_table WHERE c1 < 2;
|
||||||
|
-- report 0 rows
|
||||||
|
SELECT count(*) FROM sample_table WHERE c1 < 2;
|
||||||
|
-- report 0 rows in uncompressed chunk
|
||||||
|
SELECT COUNT(*) FROM ONLY :CHUNK_1;
|
||||||
|
-- report 0 rows in compressed chunk
|
||||||
|
SELECT COUNT(*) FROM :COMPRESS_CHUNK_1 WHERE _ts_meta_max_1 < 2;
|
||||||
|
-- validate correct number of rows was deleted
|
||||||
|
-- report true
|
||||||
|
SELECT COUNT(*) = :total_rows - :total_affected_rows FROM sample_table;
|
||||||
|
ROLLBACK;
|
||||||
|
|
||||||
|
-- check that you uncompress and delete only for greater or equal then ORDERBY value
|
||||||
|
-- this will uncompress segments which have max >= value
|
||||||
|
BEGIN;
|
||||||
|
-- report 30k rows
|
||||||
|
SELECT COUNT(*) FROM sample_table WHERE c1 >= 7;
|
||||||
|
-- report 30 rows
|
||||||
|
SELECT COUNT(*) FROM :COMPRESS_CHUNK_1 WHERE _ts_meta_min_1 >= 7;
|
||||||
|
-- fetch total and number of affected rows
|
||||||
|
SELECT COUNT(*) AS "total_rows" FROM sample_table \gset
|
||||||
|
SELECT COUNT(*) AS "total_affected_rows" FROM sample_table WHERE c1 >= 7 \gset
|
||||||
|
-- delete 30k rows
|
||||||
|
DELETE FROM sample_table WHERE c1 >= 7;
|
||||||
|
-- report 0 rows
|
||||||
|
SELECT count(*) FROM sample_table WHERE c1 >= 7;
|
||||||
|
-- report 0 rows in uncompressed chunk
|
||||||
|
SELECT COUNT(*) FROM ONLY :CHUNK_1;
|
||||||
|
-- report 0 rows in compressed chunks
|
||||||
|
SELECT COUNT(*) FROM :COMPRESS_CHUNK_1 WHERE _ts_meta_min_1 >= 7;
|
||||||
|
-- validate correct number of rows was deleted
|
||||||
|
-- report true
|
||||||
|
SELECT COUNT(*) = :total_rows - :total_affected_rows FROM sample_table;
|
||||||
|
ROLLBACK;
|
||||||
|
|
||||||
|
-- check that you uncompress and delete only tuples which satisfy SEGMENTBY
|
||||||
|
-- and ORDERBY qualifiers, segments only contain one distinct value for
|
||||||
|
-- these qualifiers, everything should be deleted that was decompressed
|
||||||
|
BEGIN;
|
||||||
|
-- report 1k rows
|
||||||
|
SELECT COUNT(*) FROM sample_table WHERE c4 = 5 and c1 = 5;
|
||||||
|
-- report 1 row in compressed chunks
|
||||||
|
SELECT COUNT(*) FROM :COMPRESS_CHUNK_1 WHERE c4 = 5 AND _ts_meta_min_1 <= 5 and _ts_meta_max_1 >= 5;
|
||||||
|
-- fetch total and number of affected rows
|
||||||
|
SELECT COUNT(*) AS "total_rows" FROM sample_table \gset
|
||||||
|
SELECT COUNT(*) AS "total_affected_rows" FROM sample_table WHERE c4 = 5 and c1 = 5 \gset
|
||||||
|
-- delete 1k rows
|
||||||
|
DELETE FROM sample_table WHERE c4 = 5 and c1 = 5;
|
||||||
|
-- report 0 rows
|
||||||
|
SELECT count(*) FROM sample_table WHERE c4 = 5 and c1 = 5;
|
||||||
|
-- report 0 rows in uncompressed chunk
|
||||||
|
SELECT COUNT(*) FROM ONLY :CHUNK_1;
|
||||||
|
-- report 0 rows in compressed chunks
|
||||||
|
SELECT COUNT(*) FROM :COMPRESS_CHUNK_1 WHERE c4 = 5 AND _ts_meta_min_1 <= 5 and _ts_meta_max_1 >= 5;
|
||||||
|
-- validate correct number of rows was deleted
|
||||||
|
-- report true
|
||||||
|
SELECT COUNT(*) = :total_rows - :total_affected_rows FROM sample_table;
|
||||||
|
ROLLBACK;
|
||||||
|
|
||||||
|
-- check that you uncompress and delete only tuples which satisfy SEGMENTBY
|
||||||
|
-- and ORDERBY qualifiers, segments contain more than one distinct value for
|
||||||
|
-- these qualifiers, not everything should be deleted that was decompressed
|
||||||
|
BEGIN;
|
||||||
|
-- report 4k rows
|
||||||
|
SELECT COUNT(*) FROM sample_table WHERE c4 > 5 and c2 = 5;
|
||||||
|
-- report 40 rows in compressed chunks
|
||||||
|
SELECT COUNT(*) FROM :COMPRESS_CHUNK_1 WHERE c4 > 5 AND _ts_meta_min_2 <= 5 and _ts_meta_max_2 >= 5;
|
||||||
|
-- fetch total and number of affected rows
|
||||||
|
SELECT COUNT(*) AS "total_rows" FROM sample_table \gset
|
||||||
|
SELECT COUNT(*) AS "total_affected_rows" FROM sample_table WHERE c4 > 5 and c2 = 5 \gset
|
||||||
|
-- delete 4k rows
|
||||||
|
DELETE FROM sample_table WHERE c4 > 5 and c2 = 5;
|
||||||
|
-- report 0 rows
|
||||||
|
SELECT count(*) FROM sample_table WHERE c4 > 5 and c2 = 5;
|
||||||
|
-- report 36k rows in uncompressed chunk
|
||||||
|
SELECT COUNT(*) FROM ONLY :CHUNK_1;
|
||||||
|
-- report 0 rows in compressed chunks
|
||||||
|
SELECT COUNT(*) FROM :COMPRESS_CHUNK_1 WHERE c4 > 5 AND _ts_meta_min_2 <= 5 and _ts_meta_max_2 >= 5;
|
||||||
|
-- validate correct number of rows was deleted
|
||||||
|
-- report true
|
||||||
|
SELECT COUNT(*) = :total_rows - :total_affected_rows FROM sample_table;
|
||||||
|
ROLLBACK;
|
||||||
|
Loading…
x
Reference in New Issue
Block a user