1
0
mirror of https://github.com/timescale/timescaledb.git synced 2025-05-19 20:24:46 +08:00

Split segment meta min_max into two columns

This simplifies the code and the access to the min/max
metadata. Before we used a custom type, but now the min/max
are just the same type as the underlying column and stored as two
columns.

This also removes the custom type that was used before.
This commit is contained in:
Matvey Arye 2019-09-30 14:25:47 -04:00 committed by Matvey Arye
parent 4d12f5b8f3
commit 0f3e74215a
32 changed files with 914 additions and 1340 deletions

@ -21,7 +21,6 @@ set(IMMUTABLE_API_SOURCE_FILES
set(SOURCE_FILES
hypertable.sql
chunk.sql
compression.sql
ddl_internal.sql
edition.sql
util_time.sql

@ -1,18 +0,0 @@
-- This file and its contents are licensed under the Apache License 2.0.
-- Please see the included NOTICE for copyright information and
-- LICENSE-APACHE for a copy of the license.
CREATE FUNCTION _timescaledb_internal.segment_meta_get_min(_timescaledb_internal.segment_meta_min_max, ANYELEMENT)
RETURNS ANYELEMENT
AS '@MODULE_PATHNAME@', 'ts_segment_meta_get_min'
LANGUAGE C IMMUTABLE;
CREATE FUNCTION _timescaledb_internal.segment_meta_get_max(_timescaledb_internal.segment_meta_min_max, ANYELEMENT)
RETURNS ANYELEMENT
AS '@MODULE_PATHNAME@', 'ts_segment_meta_get_max'
LANGUAGE C IMMUTABLE;
CREATE FUNCTION _timescaledb_internal.segment_meta_has_null(_timescaledb_internal.segment_meta_min_max)
RETURNS boolean
AS '@MODULE_PATHNAME@', 'ts_segment_meta_has_null'
LANGUAGE C IMMUTABLE;

@ -57,40 +57,3 @@ CREATE TYPE _timescaledb_internal.compressed_data (
RECEIVE = _timescaledb_internal.compressed_data_recv,
SEND = _timescaledb_internal.compressed_data_send
);
--
-- _timescaledb_internal.segment_meta_min_max keeps the min/max range of compressed data
--
CREATE TYPE _timescaledb_internal.segment_meta_min_max;
--the textual input/output is simply base64 encoding of the binary representation
CREATE FUNCTION _timescaledb_internal.segment_meta_min_max_in(CSTRING)
RETURNS _timescaledb_internal.segment_meta_min_max
AS '@MODULE_PATHNAME@', 'ts_segment_meta_min_max_in'
LANGUAGE C IMMUTABLE STRICT;
CREATE FUNCTION _timescaledb_internal.segment_meta_min_max_out(_timescaledb_internal.segment_meta_min_max)
RETURNS CSTRING
AS '@MODULE_PATHNAME@', 'ts_segment_meta_min_max_out'
LANGUAGE C IMMUTABLE STRICT;
CREATE FUNCTION _timescaledb_internal.segment_meta_min_max_send(_timescaledb_internal.segment_meta_min_max)
RETURNS BYTEA
AS '@MODULE_PATHNAME@', 'ts_segment_meta_min_max_send'
LANGUAGE C IMMUTABLE STRICT;
CREATE FUNCTION _timescaledb_internal.segment_meta_min_max_recv(internal)
RETURNS _timescaledb_internal.segment_meta_min_max
AS '@MODULE_PATHNAME@', 'ts_segment_meta_min_max_recv'
LANGUAGE C IMMUTABLE STRICT;
CREATE TYPE _timescaledb_internal.segment_meta_min_max (
INTERNALLENGTH = VARIABLE,
STORAGE = EXTERNAL, --move to toast, don't compress
ALIGNMENT = DOUBLE, --needed for alignment to work with arbitrary datums
INPUT = _timescaledb_internal.segment_meta_min_max_in,
OUTPUT = _timescaledb_internal.segment_meta_min_max_out,
RECEIVE = _timescaledb_internal.segment_meta_min_max_recv,
SEND = _timescaledb_internal.segment_meta_min_max_send
);

@ -214,40 +214,3 @@ insert into _timescaledb_catalog.compression_algorithm values
( 4, 1, 'COMPRESSION_ALGORITHM_DELTADELTA', 'deltadelta')
on conflict(id) do update set (version, name, description)
= (excluded.version, excluded.name, excluded.description);
--
-- _timescaledb_internal.segment_meta_min_max keeps the min/max range of compressed data
--
CREATE TYPE _timescaledb_internal.segment_meta_min_max;
--the textual input/output is simply base64 encoding of the binary representation
CREATE FUNCTION _timescaledb_internal.segment_meta_min_max_in(CSTRING)
RETURNS _timescaledb_internal.segment_meta_min_max
AS '@MODULE_PATHNAME@', 'ts_segment_meta_min_max_in'
LANGUAGE C IMMUTABLE STRICT;
CREATE FUNCTION _timescaledb_internal.segment_meta_min_max_out(_timescaledb_internal.segment_meta_min_max)
RETURNS CSTRING
AS '@MODULE_PATHNAME@', 'ts_segment_meta_min_max_out'
LANGUAGE C IMMUTABLE STRICT;
CREATE FUNCTION _timescaledb_internal.segment_meta_min_max_send(_timescaledb_internal.segment_meta_min_max)
RETURNS BYTEA
AS '@MODULE_PATHNAME@', 'ts_segment_meta_min_max_send'
LANGUAGE C IMMUTABLE STRICT;
CREATE FUNCTION _timescaledb_internal.segment_meta_min_max_recv(internal)
RETURNS _timescaledb_internal.segment_meta_min_max
AS '@MODULE_PATHNAME@', 'ts_segment_meta_min_max_recv'
LANGUAGE C IMMUTABLE STRICT;
CREATE TYPE _timescaledb_internal.segment_meta_min_max (
INTERNALLENGTH = VARIABLE,
STORAGE = EXTERNAL, --move to toast, don't compress
ALIGNMENT = DOUBLE, --needed for alignment to work with arbitrary datums
INPUT = _timescaledb_internal.segment_meta_min_max_in,
OUTPUT = _timescaledb_internal.segment_meta_min_max_out,
RECEIVE = _timescaledb_internal.segment_meta_min_max_recv,
SEND = _timescaledb_internal.segment_meta_min_max_send
);

@ -19,7 +19,6 @@ set(SOURCES
copy.c
compression_chunk_size.c
compression_with_clause.c
compression_segment_meta_min_max.c
dimension.c
dimension_slice.c
dimension_vector.c

@ -1,107 +0,0 @@
/*
* This file and its contents are licensed under the Apache License 2.0.
* Please see the included NOTICE for copyright information and
* LICENSE-APACHE for a copy of the license.
*/
#include <postgres.h>
#include "cross_module_fn.h"
#include "compat.h"
#include "base64_compat.h"
#include "license_guc.h"
TS_FUNCTION_INFO_V1(ts_segment_meta_min_max_send);
TS_FUNCTION_INFO_V1(ts_segment_meta_min_max_recv);
TS_FUNCTION_INFO_V1(ts_segment_meta_min_max_out);
TS_FUNCTION_INFO_V1(ts_segment_meta_min_max_in);
TS_FUNCTION_INFO_V1(ts_segment_meta_get_min);
TS_FUNCTION_INFO_V1(ts_segment_meta_get_max);
TS_FUNCTION_INFO_V1(ts_segment_meta_has_null);
Datum
ts_segment_meta_min_max_send(PG_FUNCTION_ARGS)
{
Datum meta = PG_GETARG_DATUM(0);
PG_RETURN_DATUM(PointerGetDatum(ts_cm_functions->segment_meta_min_max_send(meta)));
}
Datum
ts_segment_meta_min_max_recv(PG_FUNCTION_ARGS)
{
StringInfo buf = (StringInfo) PG_GETARG_POINTER(0);
PG_RETURN_DATUM(ts_cm_functions->segment_meta_min_max_recv(buf));
}
Datum
ts_segment_meta_min_max_out(PG_FUNCTION_ARGS)
{
Datum meta = PG_GETARG_DATUM(0);
bytea *bytes = ts_cm_functions->segment_meta_min_max_send(meta);
int raw_len = VARSIZE_ANY_EXHDR(bytes);
const char *raw_data = VARDATA(bytes);
int encoded_len = pg_b64_enc_len(raw_len);
char *encoded = palloc(encoded_len + 1);
encoded_len = pg_b64_encode(raw_data, raw_len, encoded);
encoded[encoded_len] = '\0';
PG_RETURN_CSTRING(encoded);
}
Datum
ts_segment_meta_min_max_in(PG_FUNCTION_ARGS)
{
const char *input = PG_GETARG_CSTRING(0);
size_t input_len = strlen(input);
int decoded_len;
char *decoded;
StringInfoData data;
/* Load TSL explicitly in case this is called during parsing */
ts_license_enable_module_loading();
if (input_len > PG_INT32_MAX)
elog(ERROR, "input too long");
decoded_len = pg_b64_dec_len(input_len);
decoded = palloc(decoded_len + 1);
decoded_len = pg_b64_decode(input, input_len, decoded);
decoded[decoded_len] = '\0';
data = (StringInfoData){
.data = decoded,
.len = decoded_len,
.maxlen = decoded_len,
};
PG_RETURN_DATUM(ts_cm_functions->segment_meta_min_max_recv(&data));
}
Datum
ts_segment_meta_get_min(PG_FUNCTION_ARGS)
{
if (PG_ARGISNULL(0))
PG_RETURN_NULL();
PG_RETURN_DATUM(PointerGetDatum(
ts_cm_functions->segment_meta_get_min(PG_GETARG_DATUM(0),
get_fn_expr_argtype(fcinfo->flinfo, 1))));
}
Datum
ts_segment_meta_get_max(PG_FUNCTION_ARGS)
{
if (PG_ARGISNULL(0))
PG_RETURN_NULL();
PG_RETURN_DATUM(PointerGetDatum(
ts_cm_functions->segment_meta_get_max(PG_GETARG_DATUM(0),
get_fn_expr_argtype(fcinfo->flinfo, 1))));
}
Datum
ts_segment_meta_has_null(PG_FUNCTION_ARGS)
{
if (PG_ARGISNULL(0))
PG_RETURN_BOOL(true);
PG_RETURN_BOOL(ts_cm_functions->segment_meta_has_null(PG_GETARG_DATUM(0)));
}

@ -350,41 +350,6 @@ continuous_agg_drop_chunks_by_chunk_id_default(int32 raw_hypertable_id, Chunk **
error_no_default_fn_community();
}
static bytea *
segment_meta_min_max_send_default(Datum arg1)
{
error_no_default_fn_community();
pg_unreachable();
}
static Datum
segment_meta_min_max_recv_default(StringInfo buf)
{
error_no_default_fn_community();
pg_unreachable();
}
static Datum
segment_meta_get_min_default(Datum meta, Oid type)
{
error_no_default_fn_community();
pg_unreachable();
}
static Datum
segment_meta_get_max_default(Datum meta, Oid type)
{
error_no_default_fn_community();
pg_unreachable();
}
static bool
segment_meta_has_null_default(Datum meta)
{
error_no_default_fn_community();
pg_unreachable();
}
/*
* Define cross-module functions' default values:
* If the submodule isn't activated, using one of the cm functions will throw an
@ -437,12 +402,6 @@ TSDLLEXPORT CrossModuleFunctions ts_cm_functions_default = {
.process_compress_table = process_compress_table_default,
.compress_chunk = error_no_default_fn_pg_community,
.decompress_chunk = error_no_default_fn_pg_community,
.segment_meta_min_max_send = segment_meta_min_max_send_default,
.segment_meta_min_max_recv = segment_meta_min_max_recv_default,
.segment_meta_get_min = segment_meta_get_min_default,
.segment_meta_get_max = segment_meta_get_max_default,
.segment_meta_has_null = segment_meta_has_null_default,
.compressed_data_decompress_forward = error_no_default_fn_pg_community,
.compressed_data_decompress_reverse = error_no_default_fn_pg_community,
.deltadelta_compressor_append = error_no_default_fn_pg_community,

@ -80,12 +80,6 @@ typedef struct CrossModuleFunctions
WithClauseResult *with_clause_options);
PGFunction compress_chunk;
PGFunction decompress_chunk;
bytea *(*segment_meta_min_max_send)(Datum);
Datum (*segment_meta_min_max_recv)(StringInfo buf);
Datum (*segment_meta_get_min)(Datum, Oid type);
Datum (*segment_meta_get_max)(Datum, Oid type);
bool (*segment_meta_has_null)(Datum);
/* The compression functions below are not installed in SQL as part of create extension;
* They are installed and tested during testing scripts. They are exposed in cross-module
* functions because they may be very useful for debugging customer problems if the sql

@ -93,7 +93,8 @@ typedef struct PerColumn
* Information on the metadata we'll store for this column (currently only min/max).
* Only used for order-by columns right now, will be {-1, NULL} for others.
*/
int16 min_max_metadata_attr_offset;
int16 min_metadata_attr_offset;
int16 max_metadata_attr_offset;
SegmentMetaMinMaxBuilder *min_max_metadata_builder;
/* segment info; only used if compressor is NULL */
@ -479,7 +480,8 @@ row_compressor_init(RowCompressor *row_compressor, TupleDesc uncompressed_tuple_
Assert(AttrNumberGetAttrOffset(compressed_colnum) < num_columns_in_compressed_table);
if (!COMPRESSIONCOL_IS_SEGMENT_BY(compression_info))
{
int16 segment_min_max_attr_offset = -1;
int16 segment_min_attr_offset = -1;
int16 segment_max_attr_offset = -1;
SegmentMetaMinMaxBuilder *segment_min_max_builder = NULL;
if (compressed_column_attr->atttypid != compressed_data_type_oid)
elog(ERROR,
@ -488,12 +490,18 @@ row_compressor_init(RowCompressor *row_compressor, TupleDesc uncompressed_tuple_
if (compression_info->orderby_column_index > 0)
{
char *segment_col_name = compression_column_segment_min_max_name(compression_info);
AttrNumber segment_min_max_attr_number =
get_attnum(compressed_table->rd_id, segment_col_name);
if (segment_min_max_attr_number == InvalidAttrNumber)
elog(ERROR, "couldn't find metadata column %s", segment_col_name);
segment_min_max_attr_offset = AttrNumberGetAttrOffset(segment_min_max_attr_number);
char *segment_min_col_name = compression_column_segment_min_name(compression_info);
char *segment_max_col_name = compression_column_segment_max_name(compression_info);
AttrNumber segment_min_attr_number =
get_attnum(compressed_table->rd_id, segment_min_col_name);
AttrNumber segment_max_attr_number =
get_attnum(compressed_table->rd_id, segment_max_col_name);
if (segment_min_attr_number == InvalidAttrNumber)
elog(ERROR, "couldn't find metadata column %s", segment_min_col_name);
if (segment_max_attr_number == InvalidAttrNumber)
elog(ERROR, "couldn't find metadata column %s", segment_max_col_name);
segment_min_attr_offset = AttrNumberGetAttrOffset(segment_min_attr_number);
segment_max_attr_offset = AttrNumberGetAttrOffset(segment_max_attr_number);
segment_min_max_builder =
segment_meta_min_max_builder_create(column_attr->atttypid,
column_attr->attcollation);
@ -501,7 +509,8 @@ row_compressor_init(RowCompressor *row_compressor, TupleDesc uncompressed_tuple_
*column = (PerColumn){
.compressor = compressor_for_algorithm_and_type(compression_info->algo_id,
column_attr->atttypid),
.min_max_metadata_attr_offset = segment_min_max_attr_offset,
.min_metadata_attr_offset = segment_min_attr_offset,
.max_metadata_attr_offset = segment_max_attr_offset,
.min_max_metadata_builder = segment_min_max_builder,
};
}
@ -513,7 +522,8 @@ row_compressor_init(RowCompressor *row_compressor, TupleDesc uncompressed_tuple_
compression_info->attname.data);
*column = (PerColumn){
.segment_info = segment_info_new(column_attr),
.min_max_metadata_attr_offset = -1,
.min_metadata_attr_offset = -1,
.max_metadata_attr_offset = -1,
};
}
}
@ -700,21 +710,26 @@ row_compressor_flush(RowCompressor *row_compressor, CommandId mycid, bool change
if (column->min_max_metadata_builder != NULL)
{
SegmentMetaMinMax *segment_meta_min_max =
segment_meta_min_max_builder_finish_and_reset(column->min_max_metadata_builder);
Assert(column->min_metadata_attr_offset >= 0);
Assert(column->max_metadata_attr_offset >= 0);
Assert(column->min_max_metadata_attr_offset >= 0);
if (!segment_meta_min_max_builder_empty(column->min_max_metadata_builder))
{
Assert(compressed_data != NULL);
row_compressor->compressed_is_null[column->min_metadata_attr_offset] = false;
row_compressor->compressed_is_null[column->max_metadata_attr_offset] = false;
/* both the data and metadata are only NULL if all the data is NULL, thus: either
* both the data and the metadata are both null or neither are */
Assert((compressed_data == NULL && segment_meta_min_max == NULL) ||
(compressed_data != NULL && segment_meta_min_max != NULL));
row_compressor->compressed_is_null[column->min_max_metadata_attr_offset] =
segment_meta_min_max == NULL;
if (segment_meta_min_max != NULL)
row_compressor->compressed_values[column->min_max_metadata_attr_offset] =
PointerGetDatum(segment_meta_min_max);
row_compressor->compressed_values[column->min_metadata_attr_offset] =
segment_meta_min_max_builder_min(column->min_max_metadata_builder);
row_compressor->compressed_values[column->max_metadata_attr_offset] =
segment_meta_min_max_builder_max(column->min_max_metadata_builder);
}
else
{
Assert(compressed_data == NULL);
row_compressor->compressed_is_null[column->min_metadata_attr_offset] = true;
row_compressor->compressed_is_null[column->max_metadata_attr_offset] = true;
}
}
}
else if (column->segment_info != NULL)
@ -768,13 +783,20 @@ row_compressor_flush(RowCompressor *row_compressor, CommandId mycid, bool change
if (column->compressor != NULL || !column->segment_info->typ_by_val)
pfree(DatumGetPointer(row_compressor->compressed_values[compressed_col]));
if (column->min_max_metadata_builder != NULL &&
row_compressor->compressed_is_null[column->min_max_metadata_attr_offset])
if (column->min_max_metadata_builder != NULL)
{
pfree(DatumGetPointer(
row_compressor->compressed_values[column->min_max_metadata_attr_offset]));
row_compressor->compressed_values[column->min_max_metadata_attr_offset] = 0;
row_compressor->compressed_is_null[column->min_max_metadata_attr_offset] = true;
/* segment_meta_min_max_builder_reset will free the values, so clear here */
if (!row_compressor->compressed_is_null[column->min_metadata_attr_offset])
{
row_compressor->compressed_values[column->min_metadata_attr_offset] = 0;
row_compressor->compressed_is_null[column->min_metadata_attr_offset] = true;
}
if (!row_compressor->compressed_is_null[column->max_metadata_attr_offset])
{
row_compressor->compressed_values[column->max_metadata_attr_offset] = 0;
row_compressor->compressed_is_null[column->max_metadata_attr_offset] = true;
}
segment_meta_min_max_builder_reset(column->min_max_metadata_builder);
}
row_compressor->compressed_values[compressed_col] = 0;

@ -20,6 +20,7 @@
#include <utils/builtins.h>
#include <utils/rel.h>
#include <utils/syscache.h>
#include <utils/typcache.h>
#include "catalog.h"
#include "create.h"
@ -91,12 +92,20 @@ get_default_algorithm_id(Oid typeoid)
return COMPRESSION_ALGORITHM_DICTIONARY;
}
default:
{
/* use dictitionary if possible, otherwise use array */
TypeCacheEntry *tentry =
lookup_type_cache(typeoid, TYPECACHE_EQ_OPR_FINFO | TYPECACHE_HASH_PROC_FINFO);
if (tentry->hash_proc_finfo.fn_addr == NULL || tentry->eq_opr_finfo.fn_addr == NULL)
return COMPRESSION_ALGORITHM_ARRAY;
return COMPRESSION_ALGORITHM_DICTIONARY;
}
}
}
char *
compression_column_segment_min_max_name(const FormData_hypertable_compression *fd)
static char *
compression_column_segment_metadata_name(const FormData_hypertable_compression *fd,
const char *type)
{
char *buf = palloc(sizeof(char) * NAMEDATALEN);
int ret;
@ -104,26 +113,36 @@ compression_column_segment_min_max_name(const FormData_hypertable_compression *f
Assert(fd->orderby_column_index > 0);
ret = snprintf(buf,
NAMEDATALEN,
COMPRESSION_COLUMN_METADATA_PREFIX "min_max_%d",
COMPRESSION_COLUMN_METADATA_PREFIX "%s_%d",
type,
fd->orderby_column_index);
if (ret < 0 || ret > NAMEDATALEN)
{
ereport(ERROR,
(errcode(ERRCODE_INTERNAL_ERROR),
errmsg("bad segment metadata min max column name")));
(errcode(ERRCODE_INTERNAL_ERROR), errmsg("bad segment metadata column name")));
}
return buf;
}
char *
compression_column_segment_min_name(const FormData_hypertable_compression *fd)
{
return compression_column_segment_metadata_name(fd, "min");
}
char *
compression_column_segment_max_name(const FormData_hypertable_compression *fd)
{
return compression_column_segment_metadata_name(fd, "max");
}
static void
compresscolinfo_add_metadata_columns(CompressColInfo *cc)
compresscolinfo_add_metadata_columns(CompressColInfo *cc, Relation uncompressed_rel)
{
/* additional metadata columns.
* these are not listed in hypertable_compression catalog table
* and so only has a ColDef entry */
int colno;
Oid segment_meta_min_max_oid =
ts_custom_type_cache_get(CUSTOM_TYPE_SEGMENT_META_MIN_MAX)->type_oid;
/* count column */
cc->coldeflist = lappend(cc->coldeflist,
@ -146,11 +165,30 @@ compresscolinfo_add_metadata_columns(CompressColInfo *cc)
{
if (cc->col_meta[colno].orderby_column_index > 0)
{
/* segment_meta_min_max columns */
FormData_hypertable_compression fd = cc->col_meta[colno];
AttrNumber col_attno = get_attnum(uncompressed_rel->rd_id, NameStr(fd.attname));
Form_pg_attribute attr = TupleDescAttr(RelationGetDescr(uncompressed_rel),
AttrNumberGetAttrOffset(col_attno));
TypeCacheEntry *type = lookup_type_cache(attr->atttypid, TYPECACHE_LT_OPR);
if (!OidIsValid(type->lt_opr))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_FUNCTION),
errmsg("invalid order by column type: could not identify an less-than "
"operator for type %s",
format_type_be(attr->atttypid))));
/* segment_meta min and max columns */
cc->coldeflist =
lappend(cc->coldeflist,
makeColumnDef(compression_column_segment_min_max_name(&cc->col_meta[colno]),
segment_meta_min_max_oid,
makeColumnDef(compression_column_segment_min_name(&cc->col_meta[colno]),
attr->atttypid,
-1 /* typemod */,
0 /*collation*/));
cc->coldeflist =
lappend(cc->coldeflist,
makeColumnDef(compression_column_segment_max_name(&cc->col_meta[colno]),
attr->atttypid,
-1 /* typemod */,
0 /*collation*/));
}
@ -271,7 +309,7 @@ compresscolinfo_init(CompressColInfo *cc, Oid srctbl_relid, List *segmentby_cols
}
cc->numcols = colno;
compresscolinfo_add_metadata_columns(cc);
compresscolinfo_add_metadata_columns(cc, rel);
relation_close(rel, AccessShareLock);
}

@ -21,6 +21,7 @@ bool tsl_process_compress_table(AlterTableCmd *cmd, Hypertable *ht,
WithClauseResult *with_clause_options);
Chunk *create_compress_chunk_table(Hypertable *compress_ht, Chunk *src_chunk);
char *compression_column_segment_min_max_name(const FormData_hypertable_compression *fd);
char *compression_column_segment_min_name(const FormData_hypertable_compression *fd);
char *compression_column_segment_max_name(const FormData_hypertable_compression *fd);
#endif /* TIMESCALEDB_TSL_COMPRESSION_CREATE_H */

@ -27,40 +27,6 @@ typedef struct SegmentMetaMinMaxBuilder
Datum max;
} SegmentMetaMinMaxBuilder;
typedef enum SegmentMetaMinMaxVersion
{
/* Not a real version, if this does get used, it's a bug in the code */
_INVALID_SEGMENT_MIN_MAX_VERSION = 0,
SEGMENT_SEGMENT_MIN_MAX_V1,
/* end of real values */
_MAX_SEGMENT_MIN_MAX_VERSION = 128,
} SegmentMetaMinMaxVersion;
typedef enum SegmentMetaMinMaxFlags
{
/* Has nulls allows us to optimize IS NULL quals */
HAS_NULLS = (1 << 0),
/* All Nulls should result in a NULL value for entire SegmentMetaMinMax */
_MAX_FLAG = (1 << 8),
} SegmentMetaMinMaxFlags;
/* start must be aligned according to the alignment of the stored type */
typedef struct SegmentMetaMinMax
{
char vl_len_[4];
uint8 version; /* SegmentMetaMinMaxVersion */
uint8 flags; /* SegmentMetaMinMaxFlags */
char padding[2];
Oid type;
/* optional alignment padding for type */
/*char data[FLEXIBLE_ARRAY_MEMBER]; bound values for two datums with alignment padding in
* between. First datum is min, second is max. Size determined by the datum type or the VARLENA
* header */
} SegmentMetaMinMax;
SegmentMetaMinMaxBuilder *
segment_meta_min_max_builder_create(Oid type_oid, Oid collation)
{
@ -105,11 +71,19 @@ segment_meta_min_max_builder_update_val(SegmentMetaMinMaxBuilder *builder, Datum
cmp = ApplySortComparator(builder->min, false, val, false, &builder->ssup);
if (cmp > 0)
{
if (!builder->type_by_val)
pfree(DatumGetPointer(builder->min));
builder->min = datumCopy(val, builder->type_by_val, builder->type_len);
}
cmp = ApplySortComparator(builder->max, false, val, false, &builder->ssup);
if (cmp < 0)
{
if (!builder->type_by_val)
pfree(DatumGetPointer(builder->max));
builder->max = datumCopy(val, builder->type_by_val, builder->type_len);
}
}
void
@ -118,7 +92,7 @@ segment_meta_min_max_builder_update_null(SegmentMetaMinMaxBuilder *builder)
builder->has_null = true;
}
static void
void
segment_meta_min_max_builder_reset(SegmentMetaMinMaxBuilder *builder)
{
if (!builder->empty)
@ -135,149 +109,38 @@ segment_meta_min_max_builder_reset(SegmentMetaMinMaxBuilder *builder)
builder->has_null = false;
}
SegmentMetaMinMax *
segment_meta_min_max_builder_finish(SegmentMetaMinMaxBuilder *builder)
Datum
segment_meta_min_max_builder_min(SegmentMetaMinMaxBuilder *builder)
{
SegmentMetaMinMax *res;
uint8 flags = 0;
Size total_size = sizeof(*res);
DatumSerializer *serializer;
char *data;
if (builder->empty)
return NULL;
serializer = create_datum_serializer(builder->type_oid);
if (builder->has_null)
flags |= HAS_NULLS;
elog(ERROR, "trying to get min from an empty builder");
if (builder->type_len == -1)
{
/* detoast if necessary. should never store toast pointers */
builder->min = PointerGetDatum(PG_DETOAST_DATUM_PACKED(builder->min));
builder->max = PointerGetDatum(PG_DETOAST_DATUM_PACKED(builder->max));
Datum unpacked = PointerGetDatum(PG_DETOAST_DATUM_PACKED(builder->min));
if (builder->min != unpacked)
pfree(DatumGetPointer(builder->min));
builder->min = unpacked;
}
total_size = datum_get_bytes_size(serializer, total_size, builder->min);
total_size = datum_get_bytes_size(serializer, total_size, builder->max);
res = palloc0(total_size);
*res = (SegmentMetaMinMax){
.version = SEGMENT_SEGMENT_MIN_MAX_V1,
.flags = flags,
.type = builder->type_oid,
};
SET_VARSIZE(res, total_size);
data = (char *) res + sizeof(*res);
total_size -= sizeof(*res);
data = datum_to_bytes_and_advance(serializer, data, &total_size, builder->min);
data = datum_to_bytes_and_advance(serializer, data, &total_size, builder->max);
Assert(total_size == 0);
return res;
return builder->min;
}
SegmentMetaMinMax *
segment_meta_min_max_builder_finish_and_reset(SegmentMetaMinMaxBuilder *builder)
Datum
segment_meta_min_max_builder_max(SegmentMetaMinMaxBuilder *builder)
{
SegmentMetaMinMax *res = segment_meta_min_max_builder_finish(builder);
segment_meta_min_max_builder_reset(builder);
return res;
}
static void
segment_meta_min_max_get_deconstruct(SegmentMetaMinMax *meta, DatumDeserializer *deser, Datum *min,
Datum *max)
{
const char *data = (char *) meta + sizeof(*meta);
/* skip the min */
*min = bytes_to_datum_and_advance(deser, &data);
*max = bytes_to_datum_and_advance(deser, &data);
}
bytea *
segment_meta_min_max_to_binary_string(SegmentMetaMinMax *meta)
{
StringInfoData buf;
DatumDeserializer *deser = create_datum_deserializer(meta->type);
DatumSerializer *ser = create_datum_serializer(meta->type);
Datum min, max;
segment_meta_min_max_get_deconstruct(meta, deser, &min, &max);
pq_begintypsend(&buf);
pq_sendbyte(&buf, meta->version);
pq_sendbyte(&buf, meta->flags);
type_append_to_binary_string(meta->type, &buf);
datum_append_to_binary_string(ser, MESSAGE_SPECIFIES_ENCODING, &buf, min);
datum_append_to_binary_string(ser, MESSAGE_SPECIFIES_ENCODING, &buf, max);
return pq_endtypsend(&buf);
}
SegmentMetaMinMax *
segment_meta_min_max_from_binary_string(StringInfo buf)
{
uint8 version = pq_getmsgbyte(buf);
if (version == SEGMENT_SEGMENT_MIN_MAX_V1)
if (builder->empty)
elog(ERROR, "trying to get max from an empty builder");
if (builder->type_len == -1)
{
uint8 flags = pq_getmsgbyte(buf);
Oid type_oid = binary_string_get_type(buf);
DatumDeserializer *deser = create_datum_deserializer(type_oid);
TypeCacheEntry *type = lookup_type_cache(type_oid, 0);
SegmentMetaMinMaxBuilder builder = (SegmentMetaMinMaxBuilder){
.type_oid = type_oid,
.empty = false,
.has_null = (flags & HAS_NULLS) != 0,
.type_by_val = type->typbyval,
.type_len = type->typlen,
.min = binary_string_to_datum(deser, MESSAGE_SPECIFIES_ENCODING, buf),
.max = binary_string_to_datum(deser, MESSAGE_SPECIFIES_ENCODING, buf),
};
return segment_meta_min_max_builder_finish(&builder);
Datum unpacked = PointerGetDatum(PG_DETOAST_DATUM_PACKED(builder->max));
if (builder->max != unpacked)
pfree(DatumGetPointer(builder->max));
builder->max = unpacked;
}
else
elog(ERROR, "Unknown version number for segment meta min max: %d", version);
}
Datum
tsl_segment_meta_get_min(Datum meta_datum, Oid type)
{
SegmentMetaMinMax *meta = (SegmentMetaMinMax *) PG_DETOAST_DATUM(meta_datum);
DatumDeserializer *deser;
Datum min, max;
if (type != meta->type)
elog(ERROR, "wrong type requested from segment_meta_min_max");
deser = create_datum_deserializer(meta->type);
segment_meta_min_max_get_deconstruct(meta, deser, &min, &max);
return min;
}
Datum
tsl_segment_meta_get_max(Datum meta_datum, Oid type)
{
SegmentMetaMinMax *meta = (SegmentMetaMinMax *) PG_DETOAST_DATUM(meta_datum);
DatumDeserializer *deser = create_datum_deserializer(meta->type);
Datum min, max;
if (type != meta->type)
elog(ERROR, "wrong type requested from segment_meta_min_max");
segment_meta_min_max_get_deconstruct(meta, deser, &min, &max);
return max;
return builder->max;
}
bool
tsl_segment_meta_has_null(Datum meta_datum)
segment_meta_min_max_builder_empty(SegmentMetaMinMaxBuilder *builder)
{
SegmentMetaMinMax *meta = (SegmentMetaMinMax *) PG_DETOAST_DATUM(meta_datum);
return (meta->flags & HAS_NULLS) != 0;
return builder->empty;
}

@ -10,36 +10,15 @@
#include <fmgr.h>
#include <lib/stringinfo.h>
typedef struct SegmentMetaMinMax SegmentMetaMinMax;
typedef struct SegmentMetaMinMaxBuilder SegmentMetaMinMaxBuilder;
#define SEGMENT_META_ACCESSOR_MAX_SQL_FUNCTION "segment_meta_get_max"
#define SEGMENT_META_ACCESSOR_MIN_SQL_FUNCTION "segment_meta_get_min"
SegmentMetaMinMaxBuilder *segment_meta_min_max_builder_create(Oid type, Oid collation);
void segment_meta_min_max_builder_update_val(SegmentMetaMinMaxBuilder *builder, Datum val);
void segment_meta_min_max_builder_update_null(SegmentMetaMinMaxBuilder *builder);
SegmentMetaMinMax *segment_meta_min_max_builder_finish(SegmentMetaMinMaxBuilder *builder);
SegmentMetaMinMax *segment_meta_min_max_builder_finish_and_reset(SegmentMetaMinMaxBuilder *builder);
Datum tsl_segment_meta_get_min(Datum meta, Oid type);
Datum tsl_segment_meta_get_max(Datum meta, Oid type);
bool tsl_segment_meta_has_null(Datum meta);
Datum segment_meta_min_max_builder_min(SegmentMetaMinMaxBuilder *builder);
Datum segment_meta_min_max_builder_max(SegmentMetaMinMaxBuilder *builder);
bool segment_meta_min_max_builder_empty(SegmentMetaMinMaxBuilder *builder);
bytea *segment_meta_min_max_to_binary_string(SegmentMetaMinMax *meta);
SegmentMetaMinMax *segment_meta_min_max_from_binary_string(StringInfo buf);
static inline bytea *
tsl_segment_meta_min_max_send(Datum arg1)
{
SegmentMetaMinMax *meta = (SegmentMetaMinMax *) PG_DETOAST_DATUM(arg1);
return segment_meta_min_max_to_binary_string(meta);
}
static inline Datum
tsl_segment_meta_min_max_recv(StringInfo buf)
{
return PointerGetDatum(segment_meta_min_max_from_binary_string(buf));
}
void segment_meta_min_max_builder_reset(SegmentMetaMinMaxBuilder *builder);
#endif

@ -268,13 +268,17 @@ compressed_rel_setup_reltarget(RelOptInfo *compressed_rel, CompressionInfo *info
compressed_reltarget_add_var_for_column(compressed_rel, compressed_relid, column_name);
/* if the column is an orderby, add it's metadata column too */
/* if the column is an orderby, add it's metadata columns too */
if (column_info->orderby_column_index > 0)
{
column_name = compression_column_segment_min_max_name(column_info);
compressed_reltarget_add_var_for_column(compressed_rel,
compressed_relid,
column_name);
compression_column_segment_min_name(
column_info));
compressed_reltarget_add_var_for_column(compressed_rel,
compressed_relid,
compression_column_segment_max_name(
column_info));
}
}
}

@ -109,75 +109,43 @@ get_compression_info_from_var(QualPushdownContext *context, Var *var)
return get_column_compressioninfo(context->compression_info, column_name);
}
static FuncExpr *
make_segment_meta_accessor_funcexpr(int strategy, Index compressed_rel_index,
AttrNumber meta_column_attno, Oid uncompressed_type_oid,
Oid result_collation)
{
List *func_name;
Oid segment_meta_type = ts_custom_type_cache_get(CUSTOM_TYPE_SEGMENT_META_MIN_MAX)->type_oid;
Oid accessor_function_oid;
Oid argtypes[] = { segment_meta_type, ANYELEMENTOID };
Var *meta_var;
Const *null_const;
switch (strategy)
{
case BTGreaterStrategyNumber:
case BTGreaterEqualStrategyNumber:
/* var > expr implies max > expr */
func_name = list_make2(makeString(INTERNAL_SCHEMA_NAME),
makeString(SEGMENT_META_ACCESSOR_MAX_SQL_FUNCTION));
break;
case BTLessStrategyNumber:
case BTLessEqualStrategyNumber:
/* var < expr implies min < expr */
func_name = list_make2(makeString(INTERNAL_SCHEMA_NAME),
makeString(SEGMENT_META_ACCESSOR_MIN_SQL_FUNCTION));
break;
default:
elog(ERROR, "invalid strategy");
break;
}
accessor_function_oid = LookupFuncName(func_name, lengthof(argtypes), argtypes, false);
meta_var =
makeVar(compressed_rel_index, meta_column_attno, segment_meta_type, -1, InvalidOid, 0);
null_const = makeNullConst(uncompressed_type_oid, -1, InvalidOid);
return makeFuncExpr(accessor_function_oid,
uncompressed_type_oid,
list_make2(meta_var, null_const),
result_collation,
InvalidOid,
0);
}
static OpExpr *
make_segment_meta_opexpr(QualPushdownContext *context, Oid opno, AttrNumber meta_column_attno,
Var *uncompressed_var, Expr *compare_to_expr, StrategyNumber strategy)
{
FuncExpr *func = make_segment_meta_accessor_funcexpr(strategy,
context->compressed_rel->relid,
meta_column_attno,
uncompressed_var->vartype,
uncompressed_var->varcollid);
Var *meta_var = makeVar(context->compressed_rel->relid,
meta_column_attno,
uncompressed_var->vartype,
-1,
InvalidOid,
0);
return (OpExpr *) make_opclause(opno,
BOOLOID,
false,
(Expr *) func,
(Expr *) meta_var,
copyObject(compare_to_expr),
InvalidOid,
uncompressed_var->varcollid);
}
static AttrNumber
get_segment_meta_attr_number(FormData_hypertable_compression *compression_info,
Oid compressed_relid)
get_segment_meta_min_attr_number(FormData_hypertable_compression *compression_info,
Oid compressed_relid)
{
char *meta_col_name = compression_column_segment_min_max_name(compression_info);
char *meta_col_name = compression_column_segment_min_name(compression_info);
if (meta_col_name == NULL)
elog(ERROR, "could not find meta column");
return get_attnum(compressed_relid, meta_col_name);
}
static AttrNumber
get_segment_meta_max_attr_number(FormData_hypertable_compression *compression_info,
Oid compressed_relid)
{
char *meta_col_name = compression_column_segment_max_name(compression_info);
if (meta_col_name == NULL)
elog(ERROR, "could not find meta column");
@ -282,8 +250,6 @@ pushdown_op_to_segment_meta_min_max(QualPushdownContext *context, List *expr_arg
case BTEqualStrategyNumber:
{
/* var = expr implies min < expr and max > expr */
AttrNumber meta_attno =
get_segment_meta_attr_number(compression_info, context->compressed_rte->relid);
Oid opno_le = get_opfamily_member(tce->btree_opf,
tce->type_id,
tce->type_id,
@ -296,39 +262,67 @@ pushdown_op_to_segment_meta_min_max(QualPushdownContext *context, List *expr_arg
if (!OidIsValid(opno_le) || !OidIsValid(opno_ge))
return NULL;
return make_andclause(
list_make2(make_segment_meta_opexpr(context,
opno_le,
meta_attno,
var_with_segment_meta,
expr,
BTLessEqualStrategyNumber),
make_segment_meta_opexpr(context,
opno_ge,
meta_attno,
var_with_segment_meta,
expr,
BTGreaterEqualStrategyNumber)));
return make_andclause(list_make2(
make_segment_meta_opexpr(context,
opno_le,
get_segment_meta_min_attr_number(compression_info,
context->compressed_rte
->relid),
var_with_segment_meta,
expr,
BTLessEqualStrategyNumber),
make_segment_meta_opexpr(context,
opno_ge,
get_segment_meta_max_attr_number(compression_info,
context->compressed_rte
->relid),
var_with_segment_meta,
expr,
BTGreaterEqualStrategyNumber)));
}
case BTLessStrategyNumber:
case BTLessEqualStrategyNumber:
/* var < expr implies min < expr */
{
Oid opno =
get_opfamily_member(tce->btree_opf, tce->type_id, tce->type_id, strategy);
if (!OidIsValid(opno))
return NULL;
return (Expr *)
make_segment_meta_opexpr(context,
opno,
get_segment_meta_min_attr_number(compression_info,
context
->compressed_rte
->relid),
var_with_segment_meta,
expr,
strategy);
}
case BTGreaterStrategyNumber:
case BTGreaterEqualStrategyNumber:
{
AttrNumber meta_attno =
get_segment_meta_attr_number(compression_info, context->compressed_rte->relid);
Oid opno = get_opfamily_member(tce->btree_opf, tce->type_id, tce->type_id, strategy);
/* var > expr implies max > expr */
{
Oid opno =
get_opfamily_member(tce->btree_opf, tce->type_id, tce->type_id, strategy);
if (!OidIsValid(opno))
return NULL;
if (!OidIsValid(opno))
return NULL;
return (Expr *) make_segment_meta_opexpr(context,
opno,
meta_attno,
var_with_segment_meta,
expr,
strategy);
}
return (Expr *)
make_segment_meta_opexpr(context,
opno,
get_segment_meta_max_attr_number(compression_info,
context
->compressed_rte
->relid),
var_with_segment_meta,
expr,
strategy);
}
default:
return NULL;
}

@ -115,11 +115,6 @@ CrossModuleFunctions tsl_cm_functions = {
.process_compress_table = tsl_process_compress_table,
.compress_chunk = tsl_compress_chunk,
.decompress_chunk = tsl_decompress_chunk,
.segment_meta_min_max_send = tsl_segment_meta_min_max_send,
.segment_meta_min_max_recv = tsl_segment_meta_min_max_recv,
.segment_meta_get_min = tsl_segment_meta_get_min,
.segment_meta_get_max = tsl_segment_meta_get_max,
.segment_meta_has_null = tsl_segment_meta_has_null,
};
TS_FUNCTION_INFO_V1(ts_module_init);

@ -51,8 +51,10 @@ CREATE TABLE uncompressed(
CREATE TABLE compressed(
_ts_meta_count int,
_ts_meta_sequence_num int,
_ts_meta_min_max_1 _timescaledb_internal.segment_meta_min_max,
_ts_meta_min_max_2 _timescaledb_internal.segment_meta_min_max,
_ts_meta_min_1 int,
_ts_meta_max_1 int,
_ts_meta_min_2 int,
_ts_meta_max_2 int,
time _timescaledb_internal.compressed_data,
device INT,
data _timescaledb_internal.compressed_data,
@ -398,7 +400,8 @@ CREATE TABLE uncompressed(
CREATE TABLE compressed(
_ts_meta_count int,
_ts_meta_sequence_num int,
_ts_meta_min_max_1 _timescaledb_internal.segment_meta_min_max,
_ts_meta_min_1 smallint,
_ts_meta_max_1 smallint,
b _timescaledb_internal.compressed_data,
device _timescaledb_internal.compressed_data,
time _timescaledb_internal.compressed_data);

@ -33,10 +33,11 @@ NOTICE: adding not-null constraint to column "a"
(1 row)
create unique index foo_uniq ON foo (a, b);
insert into foo values( 3 , 16 , 20, 11);
insert into foo values( 10 , 10 , 20, 120);
insert into foo values( 20 , 11 , 20, 13);
insert into foo values( 30 , 12 , 20, 14);
--note that the "d" order by column is all NULL
insert into foo values( 3 , 16 , 20, NULL);
insert into foo values( 10 , 10 , 20, NULL);
insert into foo values( 20 , 11 , 20, NULL);
insert into foo values( 30 , 12 , 20, NULL);
alter table foo set (timescaledb.compress, timescaledb.compress_segmentby = 'a,b', timescaledb.compress_orderby = 'c desc, d asc nulls last');
NOTICE: adding index _compressed_hypertable_2_a__ts_meta_sequence_num_idx ON _timescaledb_internal._compressed_hypertable_2 USING BTREE(a, _ts_meta_sequence_num)
NOTICE: adding index _compressed_hypertable_2_b__ts_meta_sequence_num_idx ON _timescaledb_internal._compressed_hypertable_2 USING BTREE(b, _ts_meta_sequence_num)
@ -65,7 +66,7 @@ select compress_chunk( '_timescaledb_internal._hyper_1_2_chunk');
(1 row)
select tgname , tgtype, tgenabled , relname
from pg_trigger t, pg_class rel
from pg_trigger t, pg_class rel
where t.tgrelid = rel.oid and rel.relname like '_hyper_1_2_chunk' order by tgname;
tgname | tgtype | tgenabled | relname
---------------------------------+--------+-----------+------------------
@ -144,7 +145,7 @@ where ch1.compressed_chunk_id = ch2.id;
--cannot recompress the chunk the second time around
select compress_chunk( '_timescaledb_internal._hyper_1_2_chunk');
ERROR: chunk is already compressed
--TEST2a try DML on a compressed chunk
--TEST2a try DML on a compressed chunk
insert into foo values( 11 , 10 , 20, 120);
ERROR: insert/update/delete not permitted on chunk "_hyper_1_2_chunk"
update foo set b =20 where a = 10;
@ -196,7 +197,6 @@ ERROR: insert/update/delete not permitted on chunk "_hyper_1_2_chunk"
update _timescaledb_internal._hyper_1_2_chunk
set b = 12;
delete from _timescaledb_internal._hyper_1_2_chunk;
--TEST2d decompress the chunk and try DML
select decompress_chunk( '_timescaledb_internal._hyper_1_2_chunk');
decompress_chunk
@ -209,7 +209,7 @@ update foo set b =20 where a = 10;
select * from _timescaledb_internal._hyper_1_2_chunk order by a;
a | b | c | d
----+----+----+-----
10 | 20 | 20 | 120
10 | 20 | 20 |
11 | 10 | 20 | 120
(2 rows)
@ -414,8 +414,8 @@ SELECT count(*) from :COMPRESSED_CHUNK_NAME;
ERROR: relation "_timescaledb_internal.compress_hyper_6_14_chunk" does not exist at character 22
\set ON_ERROR_STOP 1
--size information is gone too
select count(*)
FROM _timescaledb_catalog.chunk ch1, _timescaledb_catalog.hypertable ht,
select count(*)
FROM _timescaledb_catalog.chunk ch1, _timescaledb_catalog.hypertable ht,
_timescaledb_catalog.compression_chunk_size map
where ch1.hypertable_id = ht.id and ht.table_name like 'conditions'
and map.chunk_id = ch1.id;
@ -595,17 +595,17 @@ ERROR: could not determine which collation to use for string comparison
--segment meta on order bys pushdown
--should work
EXPLAIN (costs off) SELECT * FROM test_collation WHERE val_1 < 'a';
QUERY PLAN
----------------------------------------------------------------------------------------------------------------
QUERY PLAN
----------------------------------------------------------
Append
-> Custom Scan (DecompressChunk) on _hyper_9_19_chunk
Filter: (val_1 < 'a'::text)
-> Seq Scan on compress_hyper_10_29_chunk
Filter: (_timescaledb_internal.segment_meta_get_min(_ts_meta_min_max_1, NULL::text) < 'a'::text)
Filter: (_ts_meta_min_1 < 'a'::text)
-> Custom Scan (DecompressChunk) on _hyper_9_20_chunk
Filter: (val_1 < 'a'::text)
-> Seq Scan on compress_hyper_10_30_chunk
Filter: (_timescaledb_internal.segment_meta_get_min(_ts_meta_min_max_1, NULL::text) < 'a'::text)
Filter: (_ts_meta_min_1 < 'a'::text)
-> Seq Scan on _hyper_9_21_chunk
Filter: (val_1 < 'a'::text)
-> Seq Scan on _hyper_9_22_chunk
@ -625,17 +625,17 @@ EXPLAIN (costs off) SELECT * FROM test_collation WHERE val_1 < 'a';
(25 rows)
EXPLAIN (costs off) SELECT * FROM test_collation WHERE val_2 < 'a';
QUERY PLAN
----------------------------------------------------------------------------------------------------------------
QUERY PLAN
----------------------------------------------------------
Append
-> Custom Scan (DecompressChunk) on _hyper_9_19_chunk
Filter: (val_2 < 'a'::text)
-> Seq Scan on compress_hyper_10_29_chunk
Filter: (_timescaledb_internal.segment_meta_get_min(_ts_meta_min_max_2, NULL::text) < 'a'::text)
Filter: (_ts_meta_min_2 < 'a'::text)
-> Custom Scan (DecompressChunk) on _hyper_9_20_chunk
Filter: (val_2 < 'a'::text)
-> Seq Scan on compress_hyper_10_30_chunk
Filter: (_timescaledb_internal.segment_meta_get_min(_ts_meta_min_max_2, NULL::text) < 'a'::text)
Filter: (_ts_meta_min_2 < 'a'::text)
-> Seq Scan on _hyper_9_21_chunk
Filter: (val_2 < 'a'::text)
-> Seq Scan on _hyper_9_22_chunk
@ -655,17 +655,17 @@ EXPLAIN (costs off) SELECT * FROM test_collation WHERE val_2 < 'a';
(25 rows)
EXPLAIN (costs off) SELECT * FROM test_collation WHERE val_1 < 'a' COLLATE "C";
QUERY PLAN
----------------------------------------------------------------------------------------------------------------------------
QUERY PLAN
----------------------------------------------------------------
Append
-> Custom Scan (DecompressChunk) on _hyper_9_19_chunk
Filter: (val_1 < 'a'::text COLLATE "C")
-> Seq Scan on compress_hyper_10_29_chunk
Filter: (_timescaledb_internal.segment_meta_get_min(_ts_meta_min_max_1, NULL::text) < 'a'::text COLLATE "C")
Filter: (_ts_meta_min_1 < 'a'::text COLLATE "C")
-> Custom Scan (DecompressChunk) on _hyper_9_20_chunk
Filter: (val_1 < 'a'::text COLLATE "C")
-> Seq Scan on compress_hyper_10_30_chunk
Filter: (_timescaledb_internal.segment_meta_get_min(_ts_meta_min_max_1, NULL::text) < 'a'::text COLLATE "C")
Filter: (_ts_meta_min_1 < 'a'::text COLLATE "C")
-> Seq Scan on _hyper_9_21_chunk
Filter: (val_1 < 'a'::text COLLATE "C")
-> Seq Scan on _hyper_9_22_chunk
@ -685,17 +685,17 @@ EXPLAIN (costs off) SELECT * FROM test_collation WHERE val_1 < 'a' COLLATE "C";
(25 rows)
EXPLAIN (costs off) SELECT * FROM test_collation WHERE val_2 < 'a' COLLATE "POSIX";
QUERY PLAN
--------------------------------------------------------------------------------------------------------------------------------
QUERY PLAN
--------------------------------------------------------------------
Append
-> Custom Scan (DecompressChunk) on _hyper_9_19_chunk
Filter: (val_2 < 'a'::text COLLATE "POSIX")
-> Seq Scan on compress_hyper_10_29_chunk
Filter: (_timescaledb_internal.segment_meta_get_min(_ts_meta_min_max_2, NULL::text) < 'a'::text COLLATE "POSIX")
Filter: (_ts_meta_min_2 < 'a'::text COLLATE "POSIX")
-> Custom Scan (DecompressChunk) on _hyper_9_20_chunk
Filter: (val_2 < 'a'::text COLLATE "POSIX")
-> Seq Scan on compress_hyper_10_30_chunk
Filter: (_timescaledb_internal.segment_meta_get_min(_ts_meta_min_max_2, NULL::text) < 'a'::text COLLATE "POSIX")
Filter: (_ts_meta_min_2 < 'a'::text COLLATE "POSIX")
-> Seq Scan on _hyper_9_21_chunk
Filter: (val_2 < 'a'::text COLLATE "POSIX")
-> Seq Scan on _hyper_9_22_chunk

@ -67,7 +67,7 @@ NOTICE: adding not-null constraint to column "a"
ALTER TABLE reserved_column_prefix set (timescaledb.compress);
ERROR: cannot compress tables with reserved column prefix '_ts_meta_'
--basic test with count
create table foo (a integer, b integer, c integer, t text);
create table foo (a integer, b integer, c integer, t text, p point);
select table_name from create_hypertable('foo', 'a', chunk_time_interval=> 10);
NOTICE: adding not-null constraint to column "a"
table_name
@ -133,6 +133,8 @@ ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_segmentby = 'c L
ERROR: unable to parse the compress_segmentby option 'c LIMIT 1'
ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_segmentby = 'c + b');
ERROR: unable to parse the compress_segmentby option 'c + b'
ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_orderby = 'a, p');
ERROR: invalid order by column type: could not identify an less-than operator for type point
--should succeed
ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_orderby = 'a, b');
--note that the time column "a" should not be added to the end of the order by list again (should appear first)
@ -142,8 +144,9 @@ select hc.* from _timescaledb_catalog.hypertable_compression hc inner join _time
8 | a | 4 | | 1 | t | f
8 | b | 4 | | 2 | t | f
8 | c | 4 | | | |
8 | p | 1 | | | |
8 | t | 2 | | | |
(4 rows)
(5 rows)
select decompress_chunk(ch1.schema_name|| '.' || ch1.table_name)
FROM _timescaledb_catalog.chunk ch1, _timescaledb_catalog.hypertable ht where ch1.hypertable_id = ht.id and ht.table_name like 'foo' ORDER BY ch1.id limit 1;
@ -187,8 +190,9 @@ select hc.* from _timescaledb_catalog.hypertable_compression hc inner join _time
8 | a | 4 | | 1 | t | f
8 | b | 0 | 1 | | |
8 | c | 4 | | | |
8 | p | 1 | | | |
8 | t | 2 | | | |
(4 rows)
(5 rows)
SELECT comp_hyper.schema_name|| '.' || comp_hyper.table_name as "COMPRESSED_HYPER_NAME"
FROM _timescaledb_catalog.hypertable comp_hyper

@ -79,7 +79,8 @@ pg_dump: Consider using a full dump instead of a --data-only dump to avoid this
\set TYPE timestamptz
\set ORDER_BY_COL_NAME Time
\set SEGMENT_META_COL _ts_meta_min_max_1
\set SEGMENT_META_COL_MIN _ts_meta_min_1
\set SEGMENT_META_COL_MAX _ts_meta_max_1
\ir include/compression_test_hypertable_segment_meta.sql
-- This file and its contents are licensed under the Timescale License.
-- Please see the included NOTICE for copyright information and
@ -90,9 +91,9 @@ pg_dump: Consider using a full dump instead of a --data-only dump to avoid this
27
(1 row)
min_correct | max_correct | has_null_correct
-------------+-------------+------------------
t | t | t
min_correct | max_correct
-------------+-------------
t | t
(1 row)
TRUNCATE test1;
@ -104,8 +105,8 @@ SELECT * FROM test1;
/* nor compressed table */
SELECT * FROM _timescaledb_internal._compressed_hypertable_2;
Time | i | b | t | _ts_meta_count | _ts_meta_sequence_num | _ts_meta_min_max_1
------+---+---+---+----------------+-----------------------+--------------------
Time | i | b | t | _ts_meta_count | _ts_meta_sequence_num | _ts_meta_min_1 | _ts_meta_max_1
------+---+---+---+----------------+-----------------------+----------------+----------------
(0 rows)
/* the compressed table should have not chunks */
@ -175,7 +176,8 @@ pg_dump: Consider using a full dump instead of a --data-only dump to avoid this
\set TYPE int
\set ORDER_BY_COL_NAME c
\set SEGMENT_META_COL _ts_meta_min_max_1
\set SEGMENT_META_COL_MIN _ts_meta_min_1
\set SEGMENT_META_COL_MAX _ts_meta_max_1
\ir include/compression_test_hypertable_segment_meta.sql
-- This file and its contents are licensed under the Timescale License.
-- Please see the included NOTICE for copyright information and
@ -186,14 +188,15 @@ pg_dump: Consider using a full dump instead of a --data-only dump to avoid this
5
(1 row)
min_correct | max_correct | has_null_correct
-------------+-------------+------------------
t | t | t
min_correct | max_correct
-------------+-------------
t | t
(1 row)
\set TYPE timestamptz
\set ORDER_BY_COL_NAME Time
\set SEGMENT_META_COL _ts_meta_min_max_2
\set SEGMENT_META_COL_MIN _ts_meta_min_2
\set SEGMENT_META_COL_MAX _ts_meta_max_2
\ir include/compression_test_hypertable_segment_meta.sql
-- This file and its contents are licensed under the Timescale License.
-- Please see the included NOTICE for copyright information and
@ -204,9 +207,9 @@ pg_dump: Consider using a full dump instead of a --data-only dump to avoid this
0
(1 row)
min_correct | max_correct | has_null_correct
-------------+-------------+------------------
t | t | t
min_correct | max_correct
-------------+-------------
t | t
(1 row)
--TEST4 create segments with > 1000 rows.
@ -284,7 +287,8 @@ pg_dump: Consider using a full dump instead of a --data-only dump to avoid this
\set TYPE TIMESTAMPTZ
\set ORDER_BY_COL_NAME timec
\set SEGMENT_META_COL _ts_meta_min_max_1
\set SEGMENT_META_COL_MIN _ts_meta_min_1
\set SEGMENT_META_COL_MAX _ts_meta_max_1
\ir include/compression_test_hypertable_segment_meta.sql
-- This file and its contents are licensed under the Timescale License.
-- Please see the included NOTICE for copyright information and
@ -295,9 +299,9 @@ pg_dump: Consider using a full dump instead of a --data-only dump to avoid this
1
(1 row)
min_correct | max_correct | has_null_correct
-------------+-------------+------------------
t | t | t
min_correct | max_correct
-------------+-------------
t | t
(1 row)
--add hypertable with order by a non by-val type with NULLs
@ -357,7 +361,8 @@ pg_dump: Consider using a full dump instead of a --data-only dump to avoid this
\set TYPE TEXT
\set ORDER_BY_COL_NAME device_id
\set SEGMENT_META_COL _ts_meta_min_max_1
\set SEGMENT_META_COL_MIN _ts_meta_min_1
\set SEGMENT_META_COL_MAX _ts_meta_max_1
\ir include/compression_test_hypertable_segment_meta.sql
-- This file and its contents are licensed under the Timescale License.
-- Please see the included NOTICE for copyright information and
@ -368,9 +373,9 @@ pg_dump: Consider using a full dump instead of a --data-only dump to avoid this
10
(1 row)
min_correct | max_correct | has_null_correct
-------------+-------------+------------------
t | t | t
min_correct | max_correct
-------------+-------------
t | t
(1 row)
TRUNCATE test5;

@ -6,14 +6,25 @@ CREATE OR REPLACE FUNCTION _timescaledb_internal.tsl_segment_meta_min_max_append
RETURNS internal
AS :TSL_MODULE_PATHNAME, 'tsl_segment_meta_min_max_append'
LANGUAGE C IMMUTABLE PARALLEL SAFE;
CREATE OR REPLACE FUNCTION _timescaledb_internal.tsl_segment_meta_min_max_finish(internal)
RETURNS _timescaledb_internal.segment_meta_min_max
AS :TSL_MODULE_PATHNAME, 'tsl_segment_meta_min_max_finish'
LANGUAGE C IMMUTABLE PARALLEL SAFE STRICT;
CREATE AGGREGATE _timescaledb_internal.segment_meta_min_max_agg(ANYELEMENT) (
CREATE OR REPLACE FUNCTION _timescaledb_internal.tsl_segment_meta_min_max_finish_max(internal, ANYELEMENT)
RETURNS anyelement
AS :TSL_MODULE_PATHNAME, 'tsl_segment_meta_min_max_finish_max'
LANGUAGE C IMMUTABLE PARALLEL SAFE;
CREATE OR REPLACE FUNCTION _timescaledb_internal.tsl_segment_meta_min_max_finish_min(internal, ANYELEMENT)
RETURNS anyelement
AS :TSL_MODULE_PATHNAME, 'tsl_segment_meta_min_max_finish_min'
LANGUAGE C IMMUTABLE PARALLEL SAFE;
CREATE AGGREGATE _timescaledb_internal.segment_meta_min_max_agg_min(ANYELEMENT) (
STYPE = internal,
SFUNC = _timescaledb_internal.tsl_segment_meta_min_max_append,
FINALFUNC = _timescaledb_internal.tsl_segment_meta_min_max_finish
FINALFUNC = _timescaledb_internal.tsl_segment_meta_min_max_finish_min,
FINALFUNC_EXTRA
);
CREATE AGGREGATE _timescaledb_internal.segment_meta_min_max_agg_max(ANYELEMENT) (
STYPE = internal,
SFUNC = _timescaledb_internal.tsl_segment_meta_min_max_append,
FINALFUNC = _timescaledb_internal.tsl_segment_meta_min_max_finish_max,
FINALFUNC_EXTRA
);
\ir include/rand_generator.sql
-- This file and its contents are licensed under the Timescale License.
@ -35,42 +46,9 @@ $$
$$;
-- seed the random num generator
insert into rand_minstd_state values (321);
--use a custom type without send and recv functions to test
--the input/output fallback path.
CREATE TYPE customtype_no_send_recv;
CREATE OR REPLACE FUNCTION customtype_in(cstring) RETURNS customtype_no_send_recv AS
'timestamptz_in'
LANGUAGE internal IMMUTABLE STRICT;
NOTICE: return type customtype_no_send_recv is only a shell
CREATE OR REPLACE FUNCTION customtype_out( customtype_no_send_recv) RETURNS cstring AS
'timestamptz_out'
LANGUAGE internal IMMUTABLE STRICT;
NOTICE: argument type customtype_no_send_recv is only a shell
CREATE TYPE customtype_no_send_recv (
INPUT = customtype_in,
OUTPUT = customtype_out,
LIKE = TIMESTAMPTZ
);
CREATE CAST (customtype_no_send_recv AS bigint)
WITHOUT FUNCTION AS IMPLICIT;
\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER
CREATE TABLE metric (i int);
insert into metric select i from generate_series(1, 10) i;
SELECT
_timescaledb_internal.segment_meta_get_min(meta, NULL::int),
_timescaledb_internal.segment_meta_get_max(meta, NULL::int),
_timescaledb_internal.segment_meta_has_null(meta)
FROM
(
SELECT
_timescaledb_internal.segment_meta_min_max_agg(i) as meta
FROM metric
) AS meta_gen;
segment_meta_get_min | segment_meta_get_max | segment_meta_has_null
----------------------+----------------------+-----------------------
1 | 10 | f
(1 row)
\set TYPE int
\set TABLE metric
\ir include/compression_test_segment_meta.sql
@ -78,9 +56,9 @@ FROM
-- Please see the included NOTICE for copyright information and
-- LICENSE-TIMESCALE for a copy of the license.
\set ECHO errors
min_correct | max_correct | has_null_correct
-------------+-------------+------------------
t | t | t
?column? | ?column?
----------+----------
t | t
(1 row)
----NULL tests
@ -92,9 +70,9 @@ insert into metric select NULLIF(i,1) from generate_series(1, 10) i;
-- Please see the included NOTICE for copyright information and
-- LICENSE-TIMESCALE for a copy of the license.
\set ECHO errors
min_correct | max_correct | has_null_correct
-------------+-------------+------------------
t | t | t
?column? | ?column?
----------+----------
t | t
(1 row)
--Last
@ -105,9 +83,9 @@ insert into metric select NULLIF(i,10) from generate_series(1, 10) i;
-- Please see the included NOTICE for copyright information and
-- LICENSE-TIMESCALE for a copy of the license.
\set ECHO errors
min_correct | max_correct | has_null_correct
-------------+-------------+------------------
t | t | t
?column? | ?column?
----------+----------
t | t
(1 row)
--Middle
@ -118,33 +96,23 @@ insert into metric select NULLIF(i,5) from generate_series(1, 10) i;
-- Please see the included NOTICE for copyright information and
-- LICENSE-TIMESCALE for a copy of the license.
\set ECHO errors
min_correct | max_correct | has_null_correct
-------------+-------------+------------------
t | t | t
?column? | ?column?
----------+----------
t | t
(1 row)
--All NULLS should return null object
truncate metric;
insert into metric select NULL from generate_series(1, 10) i;
SELECT
_timescaledb_internal.segment_meta_min_max_agg(i) is NULL,
_timescaledb_internal.segment_meta_min_max_agg(i)::text is NULL
_timescaledb_internal.segment_meta_min_max_agg_min(i) is null,
_timescaledb_internal.segment_meta_min_max_agg_max(i) is null
FROM metric;
?column? | ?column?
----------+----------
t | t
(1 row)
--accessor functions work on NULLs
SELECT
_timescaledb_internal.segment_meta_get_min(NULL, NULL::int) IS NULL,
_timescaledb_internal.segment_meta_get_max(NULL, NULL::int) IS NULL,
_timescaledb_internal.segment_meta_has_null(NULL);
?column? | ?column? | segment_meta_has_null
----------+----------+-----------------------
t | t | t
(1 row)
--
--type tests
--
@ -163,9 +131,9 @@ CREATE TABLE base_texts AS SELECT
-- Please see the included NOTICE for copyright information and
-- LICENSE-TIMESCALE for a copy of the license.
\set ECHO errors
min_correct | max_correct | has_null_correct
-------------+-------------+------------------
t | t | t
?column? | ?column?
----------+----------
t | t
(1 row)
--toasted text
@ -191,9 +159,9 @@ SELECT pg_total_relation_size(reltoastrelid)
-- Please see the included NOTICE for copyright information and
-- LICENSE-TIMESCALE for a copy of the license.
\set ECHO errors
min_correct | max_correct | has_null_correct
-------------+-------------+------------------
t | t | t
?column? | ?column?
----------+----------
t | t
(1 row)
--name is a fixed-length pass by reference type
@ -211,9 +179,9 @@ CREATE TABLE base_name AS SELECT
-- Please see the included NOTICE for copyright information and
-- LICENSE-TIMESCALE for a copy of the license.
\set ECHO errors
min_correct | max_correct | has_null_correct
-------------+-------------+------------------
t | t | t
?column? | ?column?
----------+----------
t | t
(1 row)
--array
@ -231,9 +199,9 @@ CREATE TABLE text_array AS SELECT
-- Please see the included NOTICE for copyright information and
-- LICENSE-TIMESCALE for a copy of the license.
\set ECHO errors
min_correct | max_correct | has_null_correct
-------------+-------------+------------------
t | t | t
?column? | ?column?
----------+----------
t | t
(1 row)
--Points doesn't have an ordering so make sure it errors
@ -246,27 +214,11 @@ CREATE TABLE points AS SELECT
) sub;
\set ON_ERROR_STOP 0
SELECT
_timescaledb_internal.segment_meta_min_max_agg(i)
_timescaledb_internal.segment_meta_min_max_agg_max(i)
FROM points;
ERROR: could not identify an less-than operator for type point
SELECT
_timescaledb_internal.segment_meta_min_max_agg_min(i)
FROM points;
ERROR: could not identify an less-than operator for type point
\set ON_ERROR_STOP 1
--test with a custom type with no send/recv
CREATE TABLE customtype_table AS SELECT
item::text::customtype_no_send_recv as i
FROM
(SELECT sub.item from
(SELECT generate_series('2001-01-01 01:01:01', '2001-01-02 01:01:01', INTERVAL '1 hour') item) as sub
ORDER BY gen_rand_minstd()
) sub;
\set TYPE customtype_no_send_recv
\set TABLE customtype_table
\ir include/compression_test_segment_meta.sql
-- This file and its contents are licensed under the Timescale License.
-- Please see the included NOTICE for copyright information and
-- LICENSE-TIMESCALE for a copy of the license.
\set ECHO errors
min_correct | max_correct | has_null_correct
-------------+-------------+------------------
t | t | t
(1 row)

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

@ -50,8 +50,10 @@ CREATE TABLE uncompressed(
CREATE TABLE compressed(
_ts_meta_count int,
_ts_meta_sequence_num int,
_ts_meta_min_max_1 _timescaledb_internal.segment_meta_min_max,
_ts_meta_min_max_2 _timescaledb_internal.segment_meta_min_max,
_ts_meta_min_1 int,
_ts_meta_max_1 int,
_ts_meta_min_2 int,
_ts_meta_max_2 int,
time _timescaledb_internal.compressed_data,
device INT,
data _timescaledb_internal.compressed_data,
@ -177,7 +179,8 @@ CREATE TABLE uncompressed(
CREATE TABLE compressed(
_ts_meta_count int,
_ts_meta_sequence_num int,
_ts_meta_min_max_1 _timescaledb_internal.segment_meta_min_max,
_ts_meta_min_1 smallint,
_ts_meta_max_1 smallint,
b _timescaledb_internal.compressed_data,
device _timescaledb_internal.compressed_data,
time _timescaledb_internal.compressed_data);

@ -12,10 +12,11 @@ create table foo (a integer, b integer, c integer, d integer);
select table_name from create_hypertable('foo', 'a', chunk_time_interval=> 10);
create unique index foo_uniq ON foo (a, b);
insert into foo values( 3 , 16 , 20, 11);
insert into foo values( 10 , 10 , 20, 120);
insert into foo values( 20 , 11 , 20, 13);
insert into foo values( 30 , 12 , 20, 14);
--note that the "d" order by column is all NULL
insert into foo values( 3 , 16 , 20, NULL);
insert into foo values( 10 , 10 , 20, NULL);
insert into foo values( 20 , 11 , 20, NULL);
insert into foo values( 30 , 12 , 20, NULL);
alter table foo set (timescaledb.compress, timescaledb.compress_segmentby = 'a,b', timescaledb.compress_orderby = 'c desc, d asc nulls last');
select id, schema_name, table_name, compressed, compressed_hypertable_id from
@ -25,7 +26,7 @@ select * from _timescaledb_catalog.hypertable_compression order by hypertable_id
-- TEST2 compress-chunk for the chunks created earlier --
select compress_chunk( '_timescaledb_internal._hyper_1_2_chunk');
select tgname , tgtype, tgenabled , relname
from pg_trigger t, pg_class rel
from pg_trigger t, pg_class rel
where t.tgrelid = rel.oid and rel.relname like '_hyper_1_2_chunk' order by tgname;
\x
select * from timescaledb_information.compressed_chunk_stats
@ -45,7 +46,7 @@ where ch1.compressed_chunk_id = ch2.id;
--cannot recompress the chunk the second time around
select compress_chunk( '_timescaledb_internal._hyper_1_2_chunk');
--TEST2a try DML on a compressed chunk
--TEST2a try DML on a compressed chunk
insert into foo values( 11 , 10 , 20, 120);
update foo set b =20 where a = 10;
delete from foo where a = 10;
@ -80,7 +81,7 @@ insert into _timescaledb_internal._hyper_1_2_chunk values(10, 12, 12, 12);
update _timescaledb_internal._hyper_1_2_chunk
set b = 12;
delete from _timescaledb_internal._hyper_1_2_chunk;
--TEST2d decompress the chunk and try DML
select decompress_chunk( '_timescaledb_internal._hyper_1_2_chunk');
insert into foo values( 11 , 10 , 20, 120);
@ -166,8 +167,8 @@ SELECT count(*) from :COMPRESSED_CHUNK_NAME;
\set ON_ERROR_STOP 1
--size information is gone too
select count(*)
FROM _timescaledb_catalog.chunk ch1, _timescaledb_catalog.hypertable ht,
select count(*)
FROM _timescaledb_catalog.chunk ch1, _timescaledb_catalog.hypertable ht,
_timescaledb_catalog.compression_chunk_size map
where ch1.hypertable_id = ht.id and ht.table_name like 'conditions'
and map.chunk_id = ch1.id;

@ -36,7 +36,7 @@ select table_name from create_hypertable('reserved_column_prefix', 'a', chunk_ti
ALTER TABLE reserved_column_prefix set (timescaledb.compress);
--basic test with count
create table foo (a integer, b integer, c integer, t text);
create table foo (a integer, b integer, c integer, t text, p point);
select table_name from create_hypertable('foo', 'a', chunk_time_interval=> 10);
insert into foo values( 3 , 16 , 20);
@ -73,6 +73,7 @@ ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_segmentby = 'c +
ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_segmentby = 'random()');
ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_segmentby = 'c LIMIT 1');
ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_segmentby = 'c + b');
ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_orderby = 'a, p');
--should succeed
ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_orderby = 'a, b');

@ -24,7 +24,8 @@ SELECT 'test1' AS "HYPERTABLE_NAME" \gset
\ir include/compression_test_hypertable.sql
\set TYPE timestamptz
\set ORDER_BY_COL_NAME Time
\set SEGMENT_META_COL _ts_meta_min_max_1
\set SEGMENT_META_COL_MIN _ts_meta_min_1
\set SEGMENT_META_COL_MAX _ts_meta_max_1
\ir include/compression_test_hypertable_segment_meta.sql
TRUNCATE test1;
@ -63,12 +64,14 @@ SELECT 'test2' AS "HYPERTABLE_NAME" \gset
\set TYPE int
\set ORDER_BY_COL_NAME c
\set SEGMENT_META_COL _ts_meta_min_max_1
\set SEGMENT_META_COL_MIN _ts_meta_min_1
\set SEGMENT_META_COL_MAX _ts_meta_max_1
\ir include/compression_test_hypertable_segment_meta.sql
\set TYPE timestamptz
\set ORDER_BY_COL_NAME Time
\set SEGMENT_META_COL _ts_meta_min_max_2
\set SEGMENT_META_COL_MIN _ts_meta_min_2
\set SEGMENT_META_COL_MAX _ts_meta_max_2
\ir include/compression_test_hypertable_segment_meta.sql
--TEST4 create segments with > 1000 rows.
@ -101,7 +104,8 @@ SELECT 'test4' AS "HYPERTABLE_NAME" \gset
\ir include/compression_test_hypertable.sql
\set TYPE TIMESTAMPTZ
\set ORDER_BY_COL_NAME timec
\set SEGMENT_META_COL _ts_meta_min_max_1
\set SEGMENT_META_COL_MIN _ts_meta_min_1
\set SEGMENT_META_COL_MAX _ts_meta_max_1
\ir include/compression_test_hypertable_segment_meta.sql
@ -131,7 +135,8 @@ SELECT 'test5' AS "HYPERTABLE_NAME" \gset
\ir include/compression_test_hypertable.sql
\set TYPE TEXT
\set ORDER_BY_COL_NAME device_id
\set SEGMENT_META_COL _ts_meta_min_max_1
\set SEGMENT_META_COL_MIN _ts_meta_min_1
\set SEGMENT_META_COL_MAX _ts_meta_max_1
\ir include/compression_test_hypertable_segment_meta.sql
TRUNCATE test5;

@ -9,56 +9,37 @@ CREATE OR REPLACE FUNCTION _timescaledb_internal.tsl_segment_meta_min_max_append
AS :TSL_MODULE_PATHNAME, 'tsl_segment_meta_min_max_append'
LANGUAGE C IMMUTABLE PARALLEL SAFE;
CREATE OR REPLACE FUNCTION _timescaledb_internal.tsl_segment_meta_min_max_finish(internal)
RETURNS _timescaledb_internal.segment_meta_min_max
AS :TSL_MODULE_PATHNAME, 'tsl_segment_meta_min_max_finish'
LANGUAGE C IMMUTABLE PARALLEL SAFE STRICT;
CREATE OR REPLACE FUNCTION _timescaledb_internal.tsl_segment_meta_min_max_finish_max(internal, ANYELEMENT)
RETURNS anyelement
AS :TSL_MODULE_PATHNAME, 'tsl_segment_meta_min_max_finish_max'
LANGUAGE C IMMUTABLE PARALLEL SAFE;
CREATE AGGREGATE _timescaledb_internal.segment_meta_min_max_agg(ANYELEMENT) (
CREATE OR REPLACE FUNCTION _timescaledb_internal.tsl_segment_meta_min_max_finish_min(internal, ANYELEMENT)
RETURNS anyelement
AS :TSL_MODULE_PATHNAME, 'tsl_segment_meta_min_max_finish_min'
LANGUAGE C IMMUTABLE PARALLEL SAFE;
CREATE AGGREGATE _timescaledb_internal.segment_meta_min_max_agg_min(ANYELEMENT) (
STYPE = internal,
SFUNC = _timescaledb_internal.tsl_segment_meta_min_max_append,
FINALFUNC = _timescaledb_internal.tsl_segment_meta_min_max_finish
FINALFUNC = _timescaledb_internal.tsl_segment_meta_min_max_finish_min,
FINALFUNC_EXTRA
);
CREATE AGGREGATE _timescaledb_internal.segment_meta_min_max_agg_max(ANYELEMENT) (
STYPE = internal,
SFUNC = _timescaledb_internal.tsl_segment_meta_min_max_append,
FINALFUNC = _timescaledb_internal.tsl_segment_meta_min_max_finish_max,
FINALFUNC_EXTRA
);
\ir include/rand_generator.sql
--use a custom type without send and recv functions to test
--the input/output fallback path.
CREATE TYPE customtype_no_send_recv;
CREATE OR REPLACE FUNCTION customtype_in(cstring) RETURNS customtype_no_send_recv AS
'timestamptz_in'
LANGUAGE internal IMMUTABLE STRICT;
CREATE OR REPLACE FUNCTION customtype_out( customtype_no_send_recv) RETURNS cstring AS
'timestamptz_out'
LANGUAGE internal IMMUTABLE STRICT;
CREATE TYPE customtype_no_send_recv (
INPUT = customtype_in,
OUTPUT = customtype_out,
LIKE = TIMESTAMPTZ
);
CREATE CAST (customtype_no_send_recv AS bigint)
WITHOUT FUNCTION AS IMPLICIT;
\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER
CREATE TABLE metric (i int);
insert into metric select i from generate_series(1, 10) i;
SELECT
_timescaledb_internal.segment_meta_get_min(meta, NULL::int),
_timescaledb_internal.segment_meta_get_max(meta, NULL::int),
_timescaledb_internal.segment_meta_has_null(meta)
FROM
(
SELECT
_timescaledb_internal.segment_meta_min_max_agg(i) as meta
FROM metric
) AS meta_gen;
\set TYPE int
\set TABLE metric
\ir include/compression_test_segment_meta.sql
@ -82,20 +63,11 @@ insert into metric select NULLIF(i,5) from generate_series(1, 10) i;
--All NULLS should return null object
truncate metric;
insert into metric select NULL from generate_series(1, 10) i;
SELECT
_timescaledb_internal.segment_meta_min_max_agg(i) is NULL,
_timescaledb_internal.segment_meta_min_max_agg(i)::text is NULL
_timescaledb_internal.segment_meta_min_max_agg_min(i) is null,
_timescaledb_internal.segment_meta_min_max_agg_max(i) is null
FROM metric;
--accessor functions work on NULLs
SELECT
_timescaledb_internal.segment_meta_get_min(NULL, NULL::int) IS NULL,
_timescaledb_internal.segment_meta_get_max(NULL, NULL::int) IS NULL,
_timescaledb_internal.segment_meta_has_null(NULL);
--
--type tests
--
@ -168,20 +140,9 @@ CREATE TABLE points AS SELECT
\set ON_ERROR_STOP 0
SELECT
_timescaledb_internal.segment_meta_min_max_agg(i)
_timescaledb_internal.segment_meta_min_max_agg_max(i)
FROM points;
SELECT
_timescaledb_internal.segment_meta_min_max_agg_min(i)
FROM points;
\set ON_ERROR_STOP 1
--test with a custom type with no send/recv
CREATE TABLE customtype_table AS SELECT
item::text::customtype_no_send_recv as i
FROM
(SELECT sub.item from
(SELECT generate_series('2001-01-01 01:01:01', '2001-01-02 01:01:01', INTERVAL '1 hour') item) as sub
ORDER BY gen_rand_minstd()
) sub;
\set TYPE customtype_no_send_recv
\set TABLE customtype_table
\ir include/compression_test_segment_meta.sql

@ -20,9 +20,8 @@ INNER JOIN _timescaledb_catalog.hypertable comp_hypertable ON (comp_hypertable.i
WHERE uc_hypertable.table_name like :'HYPERTABLE_NAME' \gset
SELECT
bool_and(_timescaledb_internal.segment_meta_get_min(:SEGMENT_META_COL, :NULLTYPE) = true_min) as min_correct,
bool_and(_timescaledb_internal.segment_meta_get_max(:SEGMENT_META_COL, :NULLTYPE) = true_max) as max_correct,
bool_and(_timescaledb_internal.segment_meta_has_null(:SEGMENT_META_COL) = true_has_null) as has_null_correct
bool_and(:SEGMENT_META_COL_MIN = true_min) as min_correct,
bool_and(:SEGMENT_META_COL_MAX = true_max) as max_correct
FROM
:"COMP_SCHEMA_NAME".:"COMP_TABLE_NAME", LATERAL (
SELECT min(decomp) true_min, max(decomp) true_max, ((count(*)-count(decomp)) > 0) true_has_null

@ -4,25 +4,9 @@
\set ECHO errors
SELECT 'NULL::'||:'TYPE' as "NULLTYPE" \gset
SELECT
_timescaledb_internal.segment_meta_min_max_agg(i)::text as "META_TEXT",
min(i) as "TRUE_MIN",
max(i) as "TRUE_MAX",
(count(*)-count(i)) > 0 as "TRUE_HAS_NULL"
FROM :"TABLE" \gset
SELECT
_timescaledb_internal.segment_meta_get_min(meta, :NULLTYPE) = :'TRUE_MIN' as min_correct,
_timescaledb_internal.segment_meta_get_max(meta, :NULLTYPE) = :'TRUE_MAX' as max_correct,
_timescaledb_internal.segment_meta_has_null(meta) = :'TRUE_HAS_NULL' as has_null_correct
FROM
(
SELECT
:'META_TEXT'::_timescaledb_internal.segment_meta_min_max as meta
) AS meta_gen;
_timescaledb_internal.segment_meta_min_max_agg_max(i) = max(i),
_timescaledb_internal.segment_meta_min_max_agg_min(i) = min(i)
FROM :"TABLE";
\set ECHO all

@ -577,20 +577,28 @@ tsl_segment_meta_min_max_append(PG_FUNCTION_ARGS)
PG_RETURN_POINTER(builder);
}
TS_FUNCTION_INFO_V1(tsl_segment_meta_min_max_finish);
TS_FUNCTION_INFO_V1(tsl_segment_meta_min_max_finish_max);
Datum
tsl_segment_meta_min_max_finish(PG_FUNCTION_ARGS)
tsl_segment_meta_min_max_finish_max(PG_FUNCTION_ARGS)
{
SegmentMetaMinMaxBuilder *builder =
(SegmentMetaMinMaxBuilder *) (PG_ARGISNULL(0) ? NULL : PG_GETARG_POINTER(0));
SegmentMetaMinMax *res;
if (builder == NULL)
if (builder == NULL || segment_meta_min_max_builder_empty(builder))
PG_RETURN_NULL();
res = segment_meta_min_max_builder_finish(builder);
if (res == NULL)
PG_RETURN_NULL();
PG_RETURN_POINTER(res);
PG_RETURN_DATUM(segment_meta_min_max_builder_max(builder));
}
TS_FUNCTION_INFO_V1(tsl_segment_meta_min_max_finish_min);
Datum
tsl_segment_meta_min_max_finish_min(PG_FUNCTION_ARGS)
{
SegmentMetaMinMaxBuilder *builder =
(SegmentMetaMinMaxBuilder *) (PG_ARGISNULL(0) ? NULL : PG_GETARG_POINTER(0));
if (builder == NULL || segment_meta_min_max_builder_empty(builder))
PG_RETURN_NULL();
PG_RETURN_DATUM(segment_meta_min_max_builder_min(builder));
}