Integrate segment meta into compression

This commit integrates the SegmentMetaMinMax into the
compression logic. It adds metadata columns to the compressed table
and correctly sets it upon compression.

We also fix several errors with datum detoasting in SegmentMetaMinMax
This commit is contained in:
Matvey Arye 2019-08-26 12:54:21 -04:00 committed by Matvey Arye
parent be199bec70
commit b4a7108492
15 changed files with 389 additions and 29 deletions

View File

@ -86,6 +86,12 @@ typedef struct PerColumn
{
/* the compressor to use for regular columns, NULL for segmenters */
Compressor *compressor;
/*
* Information on the metadata we'll store for this column (currently only min/max).
* Only used for order-by columns right now, will be {-1, NULL} for others.
*/
int16 min_max_metadata_attr_offset;
SegmentMetaMinMaxBuilder *min_max_metadata_builder;
/* segment info; only used if compressor is NULL */
SegmentInfo *segment_info;
@ -451,13 +457,33 @@ row_compressor_init(RowCompressor *row_compressor, TupleDesc uncompressed_tuple_
Assert(AttrNumberGetAttrOffset(compressed_colnum) < num_columns_in_compressed_table);
if (!COMPRESSIONCOL_IS_SEGMENT_BY(compression_info))
{
int16 segment_min_max_attr_offset = -1;
SegmentMetaMinMaxBuilder *segment_min_max_builder = NULL;
if (compressed_column_attr->atttypid != compressed_data_type_oid)
elog(ERROR,
"expected column '%s' to be a compressed data type",
compression_info->attname.data);
if (compression_info->orderby_column_index > 0)
{
char *segment_col_name = compression_column_segment_min_max_name(compression_info);
AttrNumber segment_min_max_attr_number =
attno_find_by_attname(out_desc,
DatumGetName(DirectFunctionCall1(namein,
CStringGetDatum(
segment_col_name))));
if (segment_min_max_attr_number == InvalidAttrNumber)
elog(ERROR, "couldn't find metadata column %s", segment_col_name);
segment_min_max_attr_offset = AttrNumberGetAttrOffset(segment_min_max_attr_number);
segment_min_max_builder =
segment_meta_min_max_builder_create(column_attr->atttypid,
column_attr->attcollation);
}
*column = (PerColumn){
.compressor = compressor_for_algorithm_and_type(compression_info->algo_id,
column_attr->atttypid),
.min_max_metadata_attr_offset = segment_min_max_attr_offset,
.min_max_metadata_builder = segment_min_max_builder,
};
}
else
@ -468,6 +494,7 @@ row_compressor_init(RowCompressor *row_compressor, TupleDesc uncompressed_tuple_
compression_info->attname.data);
*column = (PerColumn){
.segment_info = segment_info_new(column_attr),
.min_max_metadata_attr_offset = -1,
};
}
}
@ -595,9 +622,20 @@ row_compressor_append_row(RowCompressor *row_compressor, TupleTableSlot *row)
// overhead here, and we should just access the array directly
val = slot_getattr(row, AttrOffsetGetAttrNumber(col), &is_null);
if (is_null)
{
compressor->append_null(compressor);
if (row_compressor->per_column[col].min_max_metadata_builder != NULL)
segment_meta_min_max_builder_update_null(
row_compressor->per_column[col].min_max_metadata_builder);
}
else
{
compressor->append_val(compressor, val);
if (row_compressor->per_column[col].min_max_metadata_builder != NULL)
segment_meta_min_max_builder_update_val(row_compressor->per_column[col]
.min_max_metadata_builder,
val);
}
}
row_compressor->rows_compressed_into_current_value += 1;
@ -634,6 +672,25 @@ row_compressor_flush(RowCompressor *row_compressor, CommandId mycid, bool change
if (compressed_data != NULL)
row_compressor->compressed_values[compressed_col] =
PointerGetDatum(compressed_data);
if (column->min_max_metadata_builder != NULL)
{
SegmentMetaMinMax *segment_meta_min_max =
segment_meta_min_max_builder_finish_and_reset(column->min_max_metadata_builder);
Assert(column->min_max_metadata_attr_offset >= 0);
/* both the data and metadata are only NULL if all the data is NULL, thus: either
* both the data and the metadata are both null or neither are */
Assert((compressed_data == NULL && segment_meta_min_max == NULL) ||
(compressed_data != NULL && segment_meta_min_max != NULL));
row_compressor->compressed_is_null[column->min_max_metadata_attr_offset] =
segment_meta_min_max == NULL;
if (segment_meta_min_max != NULL)
row_compressor->compressed_values[column->min_max_metadata_attr_offset] =
PointerGetDatum(segment_meta_min_max);
}
}
else if (column->segment_info != NULL)
{
@ -676,6 +733,15 @@ row_compressor_flush(RowCompressor *row_compressor, CommandId mycid, bool change
if (column->compressor != NULL || !column->segment_info->typ_by_val)
pfree(DatumGetPointer(row_compressor->compressed_values[compressed_col]));
if (column->min_max_metadata_builder != NULL &&
row_compressor->compressed_is_null[column->min_max_metadata_attr_offset])
{
pfree(DatumGetPointer(
row_compressor->compressed_values[column->min_max_metadata_attr_offset]));
row_compressor->compressed_values[column->min_max_metadata_attr_offset] = 0;
row_compressor->compressed_is_null[column->min_max_metadata_attr_offset] = true;
}
row_compressor->compressed_values[compressed_col] = 0;
row_compressor->compressed_is_null[compressed_col] = true;
}

View File

@ -92,6 +92,60 @@ get_default_algorithm_id(Oid typeoid)
}
}
char *
compression_column_segment_min_max_name(const FormData_hypertable_compression *fd)
{
char *buf = palloc(sizeof(char) * NAMEDATALEN);
int ret;
Assert(fd->orderby_column_index > 0);
ret = snprintf(buf,
NAMEDATALEN,
COMPRESSION_COLUMN_METADATA_PREFIX "min_max_%d",
fd->orderby_column_index);
if (ret < 0 || ret > NAMEDATALEN)
{
ereport(ERROR,
(errcode(ERRCODE_INTERNAL_ERROR),
errmsg("bad segment metadata min max column name")));
}
return buf;
}
static void
compresscolinfo_add_metadata_columns(CompressColInfo *cc)
{
/* additional metadata columns.
* these are not listed in hypertable_compression catalog table
* and so only has a ColDef entry */
int colno;
Oid segment_meta_min_max_oid =
ts_custom_type_cache_get(CUSTOM_TYPE_SEGMENT_META_MIN_MAX)->type_oid;
/* count column */
cc->coldeflist = lappend(cc->coldeflist,
/* count of the number of uncompressed rows */
makeColumnDef(COMPRESSION_COLUMN_METADATA_COUNT_NAME,
INT4OID,
-1 /* typemod */,
0 /*collation*/));
for (colno = 0; colno < cc->numcols; colno++)
{
if (cc->col_meta[colno].orderby_column_index > 0)
{
/* segment_meta_min_max columns */
cc->coldeflist =
lappend(cc->coldeflist,
makeColumnDef(compression_column_segment_min_max_name(&cc->col_meta[colno]),
segment_meta_min_max_oid,
-1 /* typemod */,
0 /*collation*/));
}
}
}
/*
* return the columndef list for compressed hypertable.
* we do this by getting the source hypertable's attrs,
@ -205,16 +259,9 @@ compresscolinfo_init(CompressColInfo *cc, Oid srctbl_relid, List *segmentby_cols
colno++;
}
cc->numcols = colno;
/* additional metadata columns.
* these are not listed in hypertable_compression catalog table
* and so only has a ColDef entry */
cc->coldeflist = lappend(cc->coldeflist,
(
/* count of the number of uncompressed rows */
makeColumnDef(COMPRESSION_COLUMN_METADATA_COUNT_NAME,
INT4OID,
-1 /* typemod */,
0 /*collation*/)));
compresscolinfo_add_metadata_columns(cc);
relation_close(rel, AccessShareLock);
}

View File

@ -18,4 +18,7 @@
bool tsl_process_compress_table(AlterTableCmd *cmd, Hypertable *ht,
WithClauseResult *with_clause_options);
Chunk *create_compress_chunk_table(Hypertable *compress_ht, Chunk *src_chunk);
char *compression_column_segment_min_max_name(const FormData_hypertable_compression *fd);
#endif /* TIMESCALEDB_TSL_COMPRESSION_CREATE_H */

View File

@ -118,6 +118,23 @@ segment_meta_min_max_builder_update_null(SegmentMetaMinMaxBuilder *builder)
builder->has_null = true;
}
static void
segment_meta_min_max_builder_reset(SegmentMetaMinMaxBuilder *builder)
{
if (!builder->empty)
{
if (!builder->type_by_val)
{
pfree(DatumGetPointer(builder->min));
pfree(DatumGetPointer(builder->max));
}
builder->min = 0;
builder->max = 0;
}
builder->empty = true;
builder->has_null = false;
}
SegmentMetaMinMax *
segment_meta_min_max_builder_finish(SegmentMetaMinMaxBuilder *builder)
{
@ -165,6 +182,14 @@ segment_meta_min_max_builder_finish(SegmentMetaMinMaxBuilder *builder)
return res;
}
SegmentMetaMinMax *
segment_meta_min_max_builder_finish_and_reset(SegmentMetaMinMaxBuilder *builder)
{
SegmentMetaMinMax *res = segment_meta_min_max_builder_finish(builder);
segment_meta_min_max_builder_reset(builder);
return res;
}
static void
segment_meta_min_max_get_deconstruct(SegmentMetaMinMax *meta, DatumDeserializer *deser, Datum *min,
Datum *max)
@ -225,7 +250,7 @@ segment_meta_min_max_from_binary_string(StringInfo buf)
Datum
tsl_segment_meta_min_max_get_min(Datum meta_datum, Oid type)
{
SegmentMetaMinMax *meta = (SegmentMetaMinMax *) DatumGetPointer(meta_datum);
SegmentMetaMinMax *meta = (SegmentMetaMinMax *) PG_DETOAST_DATUM(meta_datum);
DatumDeserializer *deser;
Datum min, max;
@ -240,7 +265,7 @@ tsl_segment_meta_min_max_get_min(Datum meta_datum, Oid type)
Datum
tsl_segment_meta_min_max_get_max(Datum meta_datum, Oid type)
{
SegmentMetaMinMax *meta = (SegmentMetaMinMax *) DatumGetPointer(meta_datum);
SegmentMetaMinMax *meta = (SegmentMetaMinMax *) PG_DETOAST_DATUM(meta_datum);
DatumDeserializer *deser = create_datum_deserializer(meta->type);
Datum min, max;
@ -253,6 +278,6 @@ tsl_segment_meta_min_max_get_max(Datum meta_datum, Oid type)
bool
tsl_segment_meta_min_max_has_null(Datum meta_datum)
{
SegmentMetaMinMax *meta = (SegmentMetaMinMax *) DatumGetPointer(meta_datum);
SegmentMetaMinMax *meta = (SegmentMetaMinMax *) PG_DETOAST_DATUM(meta_datum);
return (meta->flags & HAS_NULLS) != 0;
}

View File

@ -7,6 +7,7 @@
#define TIMESCALEDB_TSL_COMPRESSION_SEGMENT_META_H
#include <postgres.h>
#include <fmgr.h>
#include <lib/stringinfo.h>
typedef struct SegmentMetaMinMax SegmentMetaMinMax;
@ -16,6 +17,7 @@ SegmentMetaMinMaxBuilder *segment_meta_min_max_builder_create(Oid type, Oid coll
void segment_meta_min_max_builder_update_val(SegmentMetaMinMaxBuilder *builder, Datum val);
void segment_meta_min_max_builder_update_null(SegmentMetaMinMaxBuilder *builder);
SegmentMetaMinMax *segment_meta_min_max_builder_finish(SegmentMetaMinMaxBuilder *builder);
SegmentMetaMinMax *segment_meta_min_max_builder_finish_and_reset(SegmentMetaMinMaxBuilder *builder);
Datum tsl_segment_meta_min_max_get_min(Datum meta, Oid type);
Datum tsl_segment_meta_min_max_get_max(Datum meta, Oid type);
@ -28,7 +30,8 @@ SegmentMetaMinMax *segment_meta_min_max_from_binary_string(StringInfo buf);
static inline bytea *
tsl_segment_meta_min_max_send(Datum arg1)
{
return segment_meta_min_max_to_binary_string((SegmentMetaMinMax *) DatumGetPointer(arg1));
SegmentMetaMinMax *meta = (SegmentMetaMinMax *) PG_DETOAST_DATUM(arg1);
return segment_meta_min_max_to_binary_string(meta);
}
static inline Datum

View File

@ -50,6 +50,8 @@ CREATE TABLE uncompressed(
texts TEXT);
CREATE TABLE compressed(
_ts_meta_count int,
_ts_meta_min_max_1 _timescaledb_internal.segment_meta_min_max,
_ts_meta_min_max_2 _timescaledb_internal.segment_meta_min_max,
time _timescaledb_internal.compressed_data,
device INT,
data _timescaledb_internal.compressed_data,
@ -394,6 +396,7 @@ CREATE TABLE uncompressed(
time FLOAT);
CREATE TABLE compressed(
_ts_meta_count int,
_ts_meta_min_max_1 _timescaledb_internal.segment_meta_min_max,
b _timescaledb_internal.compressed_data,
device _timescaledb_internal.compressed_data,
time _timescaledb_internal.compressed_data);

View File

@ -1,3 +1,5 @@
-- This file and its contents are licensed under the Timescale License.
-- Please see the included NOTICE for copyright information and
-- LICENSE-TIMESCALE for a copy of the license.
SET timescaledb.enable_transparent_decompression to OFF;
--TEST1 ---
@ -134,15 +136,16 @@ where cl.oid = at.attrelid and at.attnum > 0
and cl.relname = '_compressed_hypertable_4'
and atttypid = ty.oid
order by at.attnum;
attname | attstorage | typname
----------------+------------+-----------------
time | x | compressed_data
location | x | text
location2 | x | compressed_data
temperature | e | compressed_data
humidity | e | compressed_data
_ts_meta_count | p | int4
(6 rows)
attname | attstorage | typname
--------------------+------------+----------------------
time | x | compressed_data
location | x | text
location2 | x | compressed_data
temperature | e | compressed_data
humidity | e | compressed_data
_ts_meta_count | p | int4
_ts_meta_min_max_1 | e | segment_meta_min_max
(7 rows)
SELECT ch1.schema_name|| '.' || ch1.table_name as "CHUNK_NAME", ch1.id "CHUNK_ID"
FROM _timescaledb_catalog.chunk ch1, _timescaledb_catalog.hypertable ht where ch1.hypertable_id = ht.id and ht.table_name like 'conditions'

View File

@ -23,7 +23,7 @@ SELECT ts_test_compression();
\ir include/rand_generator.sql
-- This file and its contents are licensed under the Timescale License.
-- Please see the included NOTICE for copyright information and
-- LICENSE-TIMESCALE for a copy of the license
-- LICENSE-TIMESCALE for a copy of the license.
--------------------------
-- cheap rand generator --
--------------------------

View File

@ -4,7 +4,7 @@
\ir include/rand_generator.sql
-- This file and its contents are licensed under the Timescale License.
-- Please see the included NOTICE for copyright information and
-- LICENSE-TIMESCALE for a copy of the license
-- LICENSE-TIMESCALE for a copy of the license.
--------------------------
-- cheap rand generator --
--------------------------
@ -21,6 +21,13 @@ $$
$$;
-- seed the random num generator
insert into rand_minstd_state values (321);
\c :TEST_DBNAME :ROLE_SUPERUSER
\ir include/compression_utils.sql
-- This file and its contents are licensed under the Timescale License.
-- Please see the included NOTICE for copyright information and
-- LICENSE-TIMESCALE for a copy of the license.
\set ECHO errors
\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER
CREATE TABLE test1 ("Time" timestamptz, i integer, b bigint, t text);
SELECT table_name from create_hypertable('test1', 'Time', chunk_time_interval=> INTERVAL '1 day');
NOTICE: adding not-null constraint to column "Time"
@ -72,6 +79,24 @@ psql:include/compression_test_hypertable.sql:41: WARNING: Timescale License exp
Number of rows different between original and data that has been compressed and then decompressed (expect 0) | 0
(1 row)
\set TYPE timestamptz
\set ORDER_BY_COL_NAME Time
\set SEGMENT_META_COL _ts_meta_min_max_1
\ir include/compression_test_hypertable_segment_meta.sql
-- This file and its contents are licensed under the Timescale License.
-- Please see the included NOTICE for copyright information and
-- LICENSE-TIMESCALE for a copy of the license.
\set ECHO errors
count_compressed
------------------
27
(1 row)
min_correct | max_correct | has_null_correct
-------------+-------------+------------------
t | t | t
(1 row)
--add test for altered hypertable
CREATE TABLE test2 ("Time" timestamptz, i integer, b bigint, t text);
SELECT table_name from create_hypertable('test2', 'Time', chunk_time_interval=> INTERVAL '1 day');
@ -130,6 +155,42 @@ psql:include/compression_test_hypertable.sql:41: WARNING: Timescale License exp
Number of rows different between original and data that has been compressed and then decompressed (expect 0) | 0
(1 row)
\set TYPE int
\set ORDER_BY_COL_NAME c
\set SEGMENT_META_COL _ts_meta_min_max_1
\ir include/compression_test_hypertable_segment_meta.sql
-- This file and its contents are licensed under the Timescale License.
-- Please see the included NOTICE for copyright information and
-- LICENSE-TIMESCALE for a copy of the license.
\set ECHO errors
count_compressed
------------------
5
(1 row)
min_correct | max_correct | has_null_correct
-------------+-------------+------------------
t | t | t
(1 row)
\set TYPE timestamptz
\set ORDER_BY_COL_NAME Time
\set SEGMENT_META_COL _ts_meta_min_max_2
\ir include/compression_test_hypertable_segment_meta.sql
-- This file and its contents are licensed under the Timescale License.
-- Please see the included NOTICE for copyright information and
-- LICENSE-TIMESCALE for a copy of the license.
\set ECHO errors
count_compressed
------------------
0
(1 row)
min_correct | max_correct | has_null_correct
-------------+-------------+------------------
t | t | t
(1 row)
--TEST4 create segments with > 1000 rows.
CREATE TABLE test4 (
timec TIMESTAMPTZ NOT NULL,
@ -203,3 +264,95 @@ psql:include/compression_test_hypertable.sql:41: WARNING: Timescale License exp
Number of rows different between original and data that has been compressed and then decompressed (expect 0) | 0
(1 row)
\set TYPE TIMESTAMPTZ
\set ORDER_BY_COL_NAME timec
\set SEGMENT_META_COL _ts_meta_min_max_1
\ir include/compression_test_hypertable_segment_meta.sql
-- This file and its contents are licensed under the Timescale License.
-- Please see the included NOTICE for copyright information and
-- LICENSE-TIMESCALE for a copy of the license.
\set ECHO errors
count_compressed
------------------
1
(1 row)
min_correct | max_correct | has_null_correct
-------------+-------------+------------------
t | t | t
(1 row)
--add hypertable with order by a non by-val type with NULLs
CREATE TABLE test5 (
time TIMESTAMPTZ NOT NULL,
device_id TEXT NULL,
temperature DOUBLE PRECISION NULL
);
--we want all the data to go into 1 chunk. so use 1 year chunk interval
select create_hypertable( 'test5', 'time', chunk_time_interval=> '1 day'::interval);
create_hypertable
--------------------
(7,public,test5,t)
(1 row)
alter table test5 set (timescaledb.compress, timescaledb.compress_orderby = 'device_id, time');
insert into test5
select generate_series('2018-01-01 00:00'::timestamp, '2018-01-10 00:00'::timestamp, '2 hour'), 'device_1', gen_rand_minstd();
insert into test5
select generate_series('2018-01-01 00:00'::timestamp, '2018-01-10 00:00'::timestamp, '2 hour'), 'device_2', gen_rand_minstd();
insert into test5
select generate_series('2018-01-01 00:00'::timestamp, '2018-01-10 00:00'::timestamp, '2 hour'), NULL, gen_rand_minstd();
SELECT $$ SELECT * FROM test5 ORDER BY device_id, time $$ AS "QUERY" \gset
SELECT 'test5' AS "HYPERTABLE_NAME" \gset
\ir include/compression_test_hypertable.sql
-- This file and its contents are licensed under the Timescale License.
-- Please see the included NOTICE for copyright information and
-- LICENSE-TIMESCALE for a copy of the license.
\set ECHO errors
count_compressed
------------------
10
(1 row)
pg_dump: NOTICE: there are circular foreign-key constraints on this table:
pg_dump: hypertable
pg_dump: You might not be able to restore the dump without using --disable-triggers or temporarily dropping the constraints.
pg_dump: Consider using a full dump instead of a --data-only dump to avoid this problem.
pg_dump: NOTICE: there are circular foreign-key constraints on this table:
pg_dump: chunk
pg_dump: You might not be able to restore the dump without using --disable-triggers or temporarily dropping the constraints.
pg_dump: Consider using a full dump instead of a --data-only dump to avoid this problem.
?column? | count
-----------------------------------------------------------------------------------+-------
Number of rows different between original and query on compressed data (expect 0) | 0
(1 row)
psql:include/compression_test_hypertable.sql:41: WARNING: Timescale License expired
count_decompressed
--------------------
10
(1 row)
?column? | count
--------------------------------------------------------------------------------------------------------------+-------
Number of rows different between original and data that has been compressed and then decompressed (expect 0) | 0
(1 row)
\set TYPE TEXT
\set ORDER_BY_COL_NAME device_id
\set SEGMENT_META_COL _ts_meta_min_max_1
\ir include/compression_test_hypertable_segment_meta.sql
-- This file and its contents are licensed under the Timescale License.
-- Please see the included NOTICE for copyright information and
-- LICENSE-TIMESCALE for a copy of the license.
\set ECHO errors
count_compressed
------------------
10
(1 row)
min_correct | max_correct | has_null_correct
-------------+-------------+------------------
t | t | t
(1 row)

View File

@ -1,6 +1,6 @@
-- This file and its contents are licensed under the Timescale License.
-- Please see the included NOTICE for copyright information and
-- LICENSE-TIMESCALE for a copy of the license
-- LICENSE-TIMESCALE for a copy of the license.
\c :TEST_DBNAME :ROLE_SUPERUSER
CREATE OR REPLACE FUNCTION _timescaledb_internal.tsl_segment_meta_min_max_append(internal, ANYELEMENT)
RETURNS internal
@ -18,7 +18,7 @@ CREATE AGGREGATE _timescaledb_internal.segment_meta_min_max_agg(ANYELEMENT) (
\ir include/rand_generator.sql
-- This file and its contents are licensed under the Timescale License.
-- Please see the included NOTICE for copyright information and
-- LICENSE-TIMESCALE for a copy of the license
-- LICENSE-TIMESCALE for a copy of the license.
--------------------------
-- cheap rand generator --
--------------------------

View File

@ -49,6 +49,8 @@ CREATE TABLE uncompressed(
CREATE TABLE compressed(
_ts_meta_count int,
_ts_meta_min_max_1 _timescaledb_internal.segment_meta_min_max,
_ts_meta_min_max_2 _timescaledb_internal.segment_meta_min_max,
time _timescaledb_internal.compressed_data,
device INT,
data _timescaledb_internal.compressed_data,
@ -173,6 +175,7 @@ CREATE TABLE uncompressed(
CREATE TABLE compressed(
_ts_meta_count int,
_ts_meta_min_max_1 _timescaledb_internal.segment_meta_min_max,
b _timescaledb_internal.compressed_data,
device _timescaledb_internal.compressed_data,
time _timescaledb_internal.compressed_data);

View File

@ -1,3 +1,5 @@
-- This file and its contents are licensed under the Timescale License.
-- Please see the included NOTICE for copyright information and
-- LICENSE-TIMESCALE for a copy of the license.
SET timescaledb.enable_transparent_decompression to OFF;

View File

@ -3,6 +3,9 @@
-- LICENSE-TIMESCALE for a copy of the license.
\ir include/rand_generator.sql
\c :TEST_DBNAME :ROLE_SUPERUSER
\ir include/compression_utils.sql
\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER
CREATE TABLE test1 ("Time" timestamptz, i integer, b bigint, t text);
SELECT table_name from create_hypertable('test1', 'Time', chunk_time_interval=> INTERVAL '1 day');
@ -19,6 +22,11 @@ SELECT
SELECT 'test1' AS "HYPERTABLE_NAME" \gset
\ir include/compression_test_hypertable.sql
\set TYPE timestamptz
\set ORDER_BY_COL_NAME Time
\set SEGMENT_META_COL _ts_meta_min_max_1
\ir include/compression_test_hypertable_segment_meta.sql
--add test for altered hypertable
CREATE TABLE test2 ("Time" timestamptz, i integer, b bigint, t text);
@ -46,6 +54,16 @@ SELECT
SELECT 'test2' AS "HYPERTABLE_NAME" \gset
\ir include/compression_test_hypertable.sql
\set TYPE int
\set ORDER_BY_COL_NAME c
\set SEGMENT_META_COL _ts_meta_min_max_1
\ir include/compression_test_hypertable_segment_meta.sql
\set TYPE timestamptz
\set ORDER_BY_COL_NAME Time
\set SEGMENT_META_COL _ts_meta_min_max_2
\ir include/compression_test_hypertable_segment_meta.sql
--TEST4 create segments with > 1000 rows.
CREATE TABLE test4 (
timec TIMESTAMPTZ NOT NULL,
@ -74,3 +92,37 @@ SELECT $$ SELECT * FROM test4 ORDER BY timec $$ AS "QUERY" \gset
SELECT 'test4' AS "HYPERTABLE_NAME" \gset
\ir include/compression_test_hypertable.sql
\set TYPE TIMESTAMPTZ
\set ORDER_BY_COL_NAME timec
\set SEGMENT_META_COL _ts_meta_min_max_1
\ir include/compression_test_hypertable_segment_meta.sql
--add hypertable with order by a non by-val type with NULLs
CREATE TABLE test5 (
time TIMESTAMPTZ NOT NULL,
device_id TEXT NULL,
temperature DOUBLE PRECISION NULL
);
--we want all the data to go into 1 chunk. so use 1 year chunk interval
select create_hypertable( 'test5', 'time', chunk_time_interval=> '1 day'::interval);
alter table test5 set (timescaledb.compress, timescaledb.compress_orderby = 'device_id, time');
insert into test5
select generate_series('2018-01-01 00:00'::timestamp, '2018-01-10 00:00'::timestamp, '2 hour'), 'device_1', gen_rand_minstd();
insert into test5
select generate_series('2018-01-01 00:00'::timestamp, '2018-01-10 00:00'::timestamp, '2 hour'), 'device_2', gen_rand_minstd();
insert into test5
select generate_series('2018-01-01 00:00'::timestamp, '2018-01-10 00:00'::timestamp, '2 hour'), NULL, gen_rand_minstd();
SELECT $$ SELECT * FROM test5 ORDER BY device_id, time $$ AS "QUERY" \gset
SELECT 'test5' AS "HYPERTABLE_NAME" \gset
\ir include/compression_test_hypertable.sql
\set TYPE TEXT
\set ORDER_BY_COL_NAME device_id
\set SEGMENT_META_COL _ts_meta_min_max_1
\ir include/compression_test_hypertable_segment_meta.sql

View File

@ -1,6 +1,6 @@
-- This file and its contents are licensed under the Timescale License.
-- Please see the included NOTICE for copyright information and
-- LICENSE-TIMESCALE for a copy of the license
-- LICENSE-TIMESCALE for a copy of the license.
\c :TEST_DBNAME :ROLE_SUPERUSER

View File

@ -1,6 +1,6 @@
-- This file and its contents are licensed under the Timescale License.
-- Please see the included NOTICE for copyright information and
-- LICENSE-TIMESCALE for a copy of the license
-- LICENSE-TIMESCALE for a copy of the license.
--------------------------
-- cheap rand generator --