Accept all compression options on caggs

Enable to properly handle 'compress_segmentby' and 'compress_orderby'
compression options on continous aggregates.

ALTER MATERIALIZED VIEW test_table_cagg SET (
  timescaledb.compress = true,
  timescaledb.compress_segmentby = 'device_id'
);

Fixes #5161
This commit is contained in:
Zoltan Haindrich 2023-02-10 13:22:09 +00:00 committed by Zoltan Haindrich
parent d00c1f3721
commit 9d3866a50e
29 changed files with 284 additions and 30 deletions

View File

@ -12,6 +12,7 @@ accidentally triggering the load of a previous DB version.**
* #5246 Make connection establishment interruptible
* #5253 Make data node command execution interruptible
* #5243 Enable real-time aggregation for continuous aggregates with joins
* #5262 Extend enabling compression on a continuous aggregrate with 'compress_segmentby' and 'compress_orderby' parameters
**Bugfixes**
* #4926 Fix corruption when inserting into compressed chunks

View File

@ -19,6 +19,7 @@ typedef enum CompressHypertableOption
CompressSegmentBy,
CompressOrderBy,
CompressChunkTimeInterval,
CompressOptionMax
} CompressHypertableOption;
typedef struct

View File

@ -39,6 +39,7 @@
#include "time_utils.h"
#include "ts_catalog/catalog.h"
#include "errors.h"
#include "compression_with_clause.h"
#define BUCKET_FUNCTION_SERIALIZE_VERSION 1
#define CHECK_NAME_MATCH(name1, name2) (namestrcmp(name1, name2) == 0)
@ -59,7 +60,7 @@ static const WithClauseDefinition continuous_aggregate_with_clause_def[] = {
.type_id = BOOLOID,
.default_val = BoolGetDatum(false),
},
[ContinuousViewOptionCompress] = {
[ContinuousViewOptionCompress] = {
.arg_name = "compress",
.type_id = BOOLOID,
},
@ -68,6 +69,18 @@ static const WithClauseDefinition continuous_aggregate_with_clause_def[] = {
.type_id = BOOLOID,
.default_val = BoolGetDatum(true),
},
[ContinuousViewOptionCompressSegmentBy] = {
.arg_name = "compress_segmentby",
.type_id = TEXTOID,
},
[ContinuousViewOptionCompressOrderBy] = {
.arg_name = "compress_orderby",
.type_id = TEXTOID,
},
[ContinuousViewOptionCompressChunkTimeInterval] = {
.arg_name = "compress_chunk_time_interval",
.type_id = INTERVALOID,
},
};
WithClauseResult *
@ -77,6 +90,51 @@ ts_continuous_agg_with_clause_parse(const List *defelems)
continuous_aggregate_with_clause_def,
TS_ARRAY_LEN(continuous_aggregate_with_clause_def));
}
List *
ts_continuous_agg_get_compression_defelems(const WithClauseResult *with_clauses)
{
List *ret = NIL;
for (int i = 0; i < CompressOptionMax; i++)
{
int option_index = 0;
switch (i)
{
case CompressEnabled:
option_index = ContinuousViewOptionCompress;
break;
case CompressSegmentBy:
option_index = ContinuousViewOptionCompressSegmentBy;
break;
case CompressOrderBy:
option_index = ContinuousViewOptionCompressOrderBy;
break;
case CompressChunkTimeInterval:
option_index = ContinuousViewOptionCompressChunkTimeInterval;
break;
default:
elog(ERROR, "Unhandled compression option");
break;
}
const WithClauseResult *input = &with_clauses[option_index];
WithClauseDefinition def = continuous_aggregate_with_clause_def[option_index];
if (!input->is_default)
{
Node *value = (Node *) makeString(ts_with_clause_result_deparse_value(input));
DefElem *elem = makeDefElemExtended("timescaledb",
(char *) def.arg_name,
value,
DEFELEM_UNSPEC,
-1);
ret = lappend(ret, elem);
}
}
return ret;
}
static void
init_scan_by_mat_hypertable_id(ScanIterator *iterator, const int32 mat_hypertable_id)
{

View File

@ -48,6 +48,10 @@ typedef enum ContinuousAggViewOption
ContinuousViewOptionMaterializedOnly,
ContinuousViewOptionCompress,
ContinuousViewOptionFinalized,
ContinuousViewOptionCompressSegmentBy,
ContinuousViewOptionCompressOrderBy,
ContinuousViewOptionCompressChunkTimeInterval,
ContinuousViewOptionMax
} ContinuousAggViewOption;
typedef enum ContinuousAggViewType
@ -60,6 +64,9 @@ typedef enum ContinuousAggViewType
extern TSDLLEXPORT WithClauseResult *ts_continuous_agg_with_clause_parse(const List *defelems);
extern TSDLLEXPORT List *
ts_continuous_agg_get_compression_defelems(const WithClauseResult *with_clauses);
#define BUCKET_WIDTH_VARIABLE (-1)
/*

View File

@ -15,6 +15,7 @@
#include <utils/lsyscache.h>
#include <utils/syscache.h>
#include "debug_assert.h"
#include "with_clause_parser.h"
#define TIMESCALEDB_NAMESPACE "timescaledb"
@ -76,6 +77,7 @@ ts_with_clauses_parse(const List *def_elems, const WithClauseDefinition *args, S
for (i = 0; i < nargs; i++)
{
results[i].definition = &args[i];
results[i].parsed = args[i].default_val;
results[i].is_default = true;
}
@ -113,6 +115,22 @@ ts_with_clauses_parse(const List *def_elems, const WithClauseDefinition *args, S
return results;
}
extern TSDLLEXPORT char *
ts_with_clause_result_deparse_value(const WithClauseResult *result)
{
Oid oid = result->definition->type_id;
Ensure(OidIsValid(oid), "argument \"%d\" has invalid OID", oid);
Oid in_fn;
bool typIsVarlena pg_attribute_unused();
getTypeOutputInfo(oid, &in_fn, &typIsVarlena);
Ensure(OidIsValid(in_fn), "no output function for type with OID %d", oid);
char *val = OidOutputFunctionCall(in_fn, result->parsed);
return val;
}
static Datum
parse_arg(WithClauseDefinition arg, DefElem *def)
{

View File

@ -22,6 +22,7 @@ typedef struct WithClauseDefinition
typedef struct WithClauseResult
{
const WithClauseDefinition *definition;
bool is_default;
Datum parsed;
} WithClauseResult;
@ -31,4 +32,6 @@ extern TSDLLEXPORT void ts_with_clause_filter(const List *def_elems, List **with
extern TSDLLEXPORT WithClauseResult *
ts_with_clauses_parse(const List *def_elems, const WithClauseDefinition *args, Size nargs);
extern TSDLLEXPORT char *ts_with_clause_result_deparse_value(const WithClauseResult *result);
#endif /* TIMESCALEDB_WITH_CLAUSE_PARSER_H */

View File

@ -194,28 +194,37 @@ cagg_get_compression_params(ContinuousAgg *agg, Hypertable *mat_ht)
return defelems;
}
/* enable/disable compression on continuous aggregate */
/* forwards compression related changes via an alter statement to the underlying HT */
static void
cagg_alter_compression(ContinuousAgg *agg, Hypertable *mat_ht, bool compress_enable)
cagg_alter_compression(ContinuousAgg *agg, Hypertable *mat_ht, List *compress_defelems)
{
List *defelems = NIL;
Assert(mat_ht != NULL);
if (compress_enable)
defelems = cagg_get_compression_params(agg, mat_ht);
WithClauseResult *with_clause_options =
ts_compress_hypertable_set_clause_parse(compress_defelems);
DefElem *enable = makeDefElemExtended("timescaledb",
"compress",
compress_enable ? (Node *) makeString("true") :
(Node *) makeString("false"),
DEFELEM_UNSPEC,
-1);
defelems = lappend(defelems, enable);
if (with_clause_options[CompressEnabled].parsed)
{
List *default_compress_defelems = cagg_get_compression_params(agg, mat_ht);
WithClauseResult *default_with_clause_options =
ts_compress_hypertable_set_clause_parse(default_compress_defelems);
/* Merge defaults if there's any. */
for (int i = 0; i < CompressOptionMax; i++)
{
if (with_clause_options[i].is_default && !default_with_clause_options[i].is_default)
{
with_clause_options[i] = default_with_clause_options[i];
elog(NOTICE,
"defaulting %s to %s",
with_clause_options[i].definition->arg_name,
ts_with_clause_result_deparse_value(&with_clause_options[i]));
}
}
}
WithClauseResult *with_clause_options = ts_compress_hypertable_set_clause_parse(defelems);
AlterTableCmd alter_cmd = {
.type = T_AlterTableCmd,
.subtype = AT_SetRelOptions,
.def = (Node *) defelems,
.def = (Node *) compress_defelems,
};
tsl_process_compress_table(&alter_cmd, mat_ht, with_clause_options);
@ -249,16 +258,16 @@ continuous_agg_update_options(ContinuousAgg *agg, WithClauseResult *with_clause_
update_materialized_only(agg, materialized_only);
ts_cache_release(hcache);
}
if (!with_clause_options[ContinuousViewOptionCompress].is_default)
List *compression_options = ts_continuous_agg_get_compression_defelems(with_clause_options);
if (list_length(compression_options) > 0)
{
bool compress_enable =
DatumGetBool(with_clause_options[ContinuousViewOptionCompress].parsed);
Cache *hcache = ts_hypertable_cache_pin();
Hypertable *mat_ht =
ts_hypertable_cache_get_entry_by_id(hcache, agg->data.mat_hypertable_id);
Assert(mat_ht != NULL);
cagg_alter_compression(agg, mat_ht, compress_enable);
cagg_alter_compression(agg, mat_ht, compression_options);
ts_cache_release(hcache);
}
if (!with_clause_options[ContinuousViewOptionCreateGroupIndex].is_default)

View File

@ -1646,6 +1646,7 @@ SELECT count(*) from test_setting_cagg ORDER BY 1;
INSERT INTO test_setting VALUES( '2020-11-01', 20);
--try out 2 settings here --
ALTER MATERIALIZED VIEW test_setting_cagg SET (timescaledb.materialized_only = 'true', timescaledb.compress='true');
psql:include/cagg_ddl_common.sql:1141: NOTICE: defaulting compress_orderby to time_bucket
SELECT view_name, compression_enabled, materialized_only
FROM timescaledb_information.continuous_aggregates
where view_name = 'test_setting_cagg';
@ -1663,6 +1664,7 @@ SELECT count(*) from test_setting_cagg ORDER BY 1;
--now set it back to false --
ALTER MATERIALIZED VIEW test_setting_cagg SET (timescaledb.materialized_only = 'false', timescaledb.compress='true');
psql:include/cagg_ddl_common.sql:1149: NOTICE: defaulting compress_orderby to time_bucket
SELECT view_name, compression_enabled, materialized_only
FROM timescaledb_information.continuous_aggregates
where view_name = 'test_setting_cagg';
@ -1729,6 +1731,7 @@ SELECT count(*) from test_setting_cagg ORDER BY 1;
INSERT INTO test_setting VALUES( '2020-11-01', 20);
--try out 2 settings here --
ALTER MATERIALIZED VIEW test_setting_cagg SET (timescaledb.materialized_only = 'false', timescaledb.compress='true');
psql:include/cagg_ddl_common.sql:1185: NOTICE: defaulting compress_orderby to time_bucket
SELECT view_name, compression_enabled, materialized_only
FROM timescaledb_information.continuous_aggregates
where view_name = 'test_setting_cagg';
@ -1746,6 +1749,7 @@ SELECT count(*) from test_setting_cagg ORDER BY 1;
--now set it back to false --
ALTER MATERIALIZED VIEW test_setting_cagg SET (timescaledb.materialized_only = 'true', timescaledb.compress='true');
psql:include/cagg_ddl_common.sql:1193: NOTICE: defaulting compress_orderby to time_bucket
SELECT view_name, compression_enabled, materialized_only
FROM timescaledb_information.continuous_aggregates
where view_name = 'test_setting_cagg';

View File

@ -1689,6 +1689,7 @@ SELECT count(*) from test_setting_cagg ORDER BY 1;
INSERT INTO test_setting VALUES( '2020-11-01', 20);
--try out 2 settings here --
ALTER MATERIALIZED VIEW test_setting_cagg SET (timescaledb.materialized_only = 'true', timescaledb.compress='true');
psql:include/cagg_ddl_common.sql:1141: NOTICE: defaulting compress_orderby to time_bucket
SELECT view_name, compression_enabled, materialized_only
FROM timescaledb_information.continuous_aggregates
where view_name = 'test_setting_cagg';
@ -1706,6 +1707,7 @@ SELECT count(*) from test_setting_cagg ORDER BY 1;
--now set it back to false --
ALTER MATERIALIZED VIEW test_setting_cagg SET (timescaledb.materialized_only = 'false', timescaledb.compress='true');
psql:include/cagg_ddl_common.sql:1149: NOTICE: defaulting compress_orderby to time_bucket
SELECT view_name, compression_enabled, materialized_only
FROM timescaledb_information.continuous_aggregates
where view_name = 'test_setting_cagg';
@ -1772,6 +1774,7 @@ SELECT count(*) from test_setting_cagg ORDER BY 1;
INSERT INTO test_setting VALUES( '2020-11-01', 20);
--try out 2 settings here --
ALTER MATERIALIZED VIEW test_setting_cagg SET (timescaledb.materialized_only = 'false', timescaledb.compress='true');
psql:include/cagg_ddl_common.sql:1185: NOTICE: defaulting compress_orderby to time_bucket
SELECT view_name, compression_enabled, materialized_only
FROM timescaledb_information.continuous_aggregates
where view_name = 'test_setting_cagg';
@ -1789,6 +1792,7 @@ SELECT count(*) from test_setting_cagg ORDER BY 1;
--now set it back to false --
ALTER MATERIALIZED VIEW test_setting_cagg SET (timescaledb.materialized_only = 'true', timescaledb.compress='true');
psql:include/cagg_ddl_common.sql:1193: NOTICE: defaulting compress_orderby to time_bucket
SELECT view_name, compression_enabled, materialized_only
FROM timescaledb_information.continuous_aggregates
where view_name = 'test_setting_cagg';

View File

@ -540,12 +540,13 @@ NOTICE: continuous aggregate "i2980_cagg2" is already up-to-date
--now enable compression with invalid parameters
ALTER MATERIALIZED VIEW i2980_cagg2 SET ( timescaledb.compress,
timescaledb.compress_segmentby = 'bucket');
ERROR: unrecognized parameter "timescaledb.compress_segmentby"
NOTICE: defaulting compress_orderby to bucket
ERROR: cannot use column "bucket" for both ordering and segmenting
ALTER MATERIALIZED VIEW i2980_cagg2 SET ( timescaledb.compress,
timescaledb.compress_orderby = 'bucket');
ERROR: unrecognized parameter "timescaledb.compress_orderby"
--enable compression and test re-enabling compression
ALTER MATERIALIZED VIEW i2980_cagg2 SET ( timescaledb.compress);
NOTICE: defaulting compress_orderby to bucket
insert into i2980 select now();
call refresh_continuous_aggregate('i2980_cagg2', NULL, NULL);
SELECT compress_chunk(ch) FROM show_chunks('i2980_cagg2') ch;
@ -557,9 +558,11 @@ SELECT compress_chunk(ch) FROM show_chunks('i2980_cagg2') ch;
ALTER MATERIALIZED VIEW i2980_cagg2 SET ( timescaledb.compress = 'false');
ERROR: cannot change configuration on already compressed chunks
ALTER MATERIALIZED VIEW i2980_cagg2 SET ( timescaledb.compress = 'true');
NOTICE: defaulting compress_orderby to bucket
ERROR: cannot change configuration on already compressed chunks
ALTER MATERIALIZED VIEW i2980_cagg2 SET ( timescaledb.compress, timescaledb.compress_segmentby = 'bucket');
ERROR: unrecognized parameter "timescaledb.compress_segmentby"
NOTICE: defaulting compress_orderby to bucket
ERROR: cannot change configuration on already compressed chunks
--Errors with compression policy on caggs--
select add_continuous_aggregate_policy('i2980_cagg2', interval '10 day', interval '2 day' ,'4h') AS job_id ;
job_id
@ -570,6 +573,7 @@ select add_continuous_aggregate_policy('i2980_cagg2', interval '10 day', interva
SELECT add_compression_policy('i2980_cagg', '8 day'::interval);
ERROR: compression not enabled on continuous aggregate "i2980_cagg"
ALTER MATERIALIZED VIEW i2980_cagg SET ( timescaledb.compress );
NOTICE: defaulting compress_orderby to time_bucket
SELECT add_compression_policy('i2980_cagg', '8 day'::interval);
ERROR: compress_after value for compression policy should be greater than the start of the refresh window of continuous aggregate policy for i2980_cagg
SELECT add_continuous_aggregate_policy('i2980_cagg2', '10 day'::interval, '6 day'::interval);

View File

@ -624,12 +624,13 @@ NOTICE: continuous aggregate "i2980_cagg2" is already up-to-date
--now enable compression with invalid parameters
ALTER MATERIALIZED VIEW i2980_cagg2 SET ( timescaledb.compress,
timescaledb.compress_segmentby = 'bucket');
ERROR: unrecognized parameter "timescaledb.compress_segmentby"
NOTICE: defaulting compress_orderby to bucket
ERROR: cannot use column "bucket" for both ordering and segmenting
ALTER MATERIALIZED VIEW i2980_cagg2 SET ( timescaledb.compress,
timescaledb.compress_orderby = 'bucket');
ERROR: unrecognized parameter "timescaledb.compress_orderby"
--enable compression and test re-enabling compression
ALTER MATERIALIZED VIEW i2980_cagg2 SET ( timescaledb.compress);
NOTICE: defaulting compress_orderby to bucket
insert into i2980 select now();
call refresh_continuous_aggregate('i2980_cagg2', NULL, NULL);
SELECT compress_chunk(ch) FROM show_chunks('i2980_cagg2') ch;
@ -641,9 +642,11 @@ SELECT compress_chunk(ch) FROM show_chunks('i2980_cagg2') ch;
ALTER MATERIALIZED VIEW i2980_cagg2 SET ( timescaledb.compress = 'false');
ERROR: cannot change configuration on already compressed chunks
ALTER MATERIALIZED VIEW i2980_cagg2 SET ( timescaledb.compress = 'true');
NOTICE: defaulting compress_orderby to bucket
ERROR: cannot change configuration on already compressed chunks
ALTER MATERIALIZED VIEW i2980_cagg2 SET ( timescaledb.compress, timescaledb.compress_segmentby = 'bucket');
ERROR: unrecognized parameter "timescaledb.compress_segmentby"
NOTICE: defaulting compress_orderby to bucket
ERROR: cannot change configuration on already compressed chunks
--Errors with compression policy on caggs--
select add_continuous_aggregate_policy('i2980_cagg2', interval '10 day', interval '2 day' ,'4h') AS job_id ;
job_id
@ -654,6 +657,7 @@ select add_continuous_aggregate_policy('i2980_cagg2', interval '10 day', interva
SELECT add_compression_policy('i2980_cagg', '8 day'::interval);
ERROR: compression not enabled on continuous aggregate "i2980_cagg"
ALTER MATERIALIZED VIEW i2980_cagg SET ( timescaledb.compress );
NOTICE: defaulting compress_orderby to time_bucket
SELECT add_compression_policy('i2980_cagg', '8 day'::interval);
ERROR: compress_after value for compression policy should be greater than the start of the refresh window of continuous aggregate policy for i2980_cagg
SELECT add_continuous_aggregate_policy('i2980_cagg2', '10 day'::interval, '6 day'::interval);

View File

@ -214,6 +214,7 @@ psql:include/cagg_migrate_common.sql:155: ERROR: plan already exists for contin
\set ON_ERROR_STOP 1
-- policies for test
ALTER MATERIALIZED VIEW conditions_summary_daily SET (timescaledb.compress=true);
psql:include/cagg_migrate_common.sql:159: NOTICE: defaulting compress_orderby to bucket
\if :IS_TIME_DIMENSION
SELECT add_retention_policy('conditions_summary_daily', '30 days'::interval);
SELECT add_continuous_aggregate_policy('conditions_summary_daily', '30 days'::interval, '1 day'::interval, '1 hour'::interval);
@ -256,6 +257,7 @@ psql:include/cagg_migrate_common.sql:178: NOTICE: drop cascades to 10 other obj
TRUNCATE _timescaledb_catalog.continuous_agg_migrate_plan RESTART IDENTITY CASCADE;
psql:include/cagg_migrate_common.sql:179: NOTICE: truncate cascades to table "continuous_agg_migrate_plan_step"
CALL cagg_migrate('conditions_summary_daily');
psql:include/cagg_migrate_common.sql:180: NOTICE: defaulting compress_orderby to bucket
psql:include/cagg_migrate_common.sql:180: WARNING: refresh the continuous aggregate after the migration executing this statement: "CALL public.refresh_continuous_aggregate('public.conditions_summary_daily_new', CAST('1008' AS integer), NULL);"
SELECT
ca.raw_hypertable_id AS "NEW_RAW_HYPERTABLE_ID",
@ -400,6 +402,7 @@ SELECT * FROM cagg_jobs WHERE schema = 'public' AND name = 'conditions_summary_d
(3 rows)
CALL cagg_migrate('conditions_summary_daily', override => TRUE);
psql:include/cagg_migrate_common.sql:232: NOTICE: defaulting compress_orderby to bucket
psql:include/cagg_migrate_common.sql:232: WARNING: refresh the continuous aggregate after the migration executing this statement: "CALL public.refresh_continuous_aggregate('public.conditions_summary_daily', CAST('1008' AS integer), NULL);"
-- cagg with the new format because it was overriden
\d+ conditions_summary_daily
@ -503,6 +506,7 @@ SELECT * FROM cagg_jobs WHERE schema = 'public' AND name = 'conditions_summary_d
(3 rows)
CALL cagg_migrate('conditions_summary_daily', override => TRUE, drop_old => TRUE);
psql:include/cagg_migrate_common.sql:254: NOTICE: defaulting compress_orderby to bucket
psql:include/cagg_migrate_common.sql:254: WARNING: refresh the continuous aggregate after the migration executing this statement: "CALL public.refresh_continuous_aggregate('public.conditions_summary_daily', CAST('1008' AS integer), NULL);"
psql:include/cagg_migrate_common.sql:254: NOTICE: drop cascades to 10 other objects
psql:include/cagg_migrate_common.sql:254: NOTICE: job 1002 not found, skipping
@ -889,6 +893,7 @@ psql:include/cagg_migrate_common.sql:155: ERROR: plan already exists for contin
\set ON_ERROR_STOP 1
-- policies for test
ALTER MATERIALIZED VIEW conditions_summary_daily SET (timescaledb.compress=true);
psql:include/cagg_migrate_common.sql:159: NOTICE: defaulting compress_orderby to bucket
\if :IS_TIME_DIMENSION
SELECT add_retention_policy('conditions_summary_daily', '30 days'::interval);
add_retention_policy
@ -931,6 +936,7 @@ psql:include/cagg_migrate_common.sql:178: NOTICE: drop cascades to 6 other obje
TRUNCATE _timescaledb_catalog.continuous_agg_migrate_plan RESTART IDENTITY CASCADE;
psql:include/cagg_migrate_common.sql:179: NOTICE: truncate cascades to table "continuous_agg_migrate_plan_step"
CALL cagg_migrate('conditions_summary_daily');
psql:include/cagg_migrate_common.sql:180: NOTICE: defaulting compress_orderby to bucket
psql:include/cagg_migrate_common.sql:180: WARNING: refresh the continuous aggregate after the migration executing this statement: "CALL public.refresh_continuous_aggregate('public.conditions_summary_daily_new', CAST('Sun Jan 01 00:00:00 2023' AS timestamp without time zone), NULL);"
SELECT
ca.raw_hypertable_id AS "NEW_RAW_HYPERTABLE_ID",
@ -1063,6 +1069,7 @@ SELECT * FROM cagg_jobs WHERE schema = 'public' AND name = 'conditions_summary_d
(3 rows)
CALL cagg_migrate('conditions_summary_daily', override => TRUE);
psql:include/cagg_migrate_common.sql:232: NOTICE: defaulting compress_orderby to bucket
psql:include/cagg_migrate_common.sql:232: WARNING: refresh the continuous aggregate after the migration executing this statement: "CALL public.refresh_continuous_aggregate('public.conditions_summary_daily', CAST('Sun Jan 01 00:00:00 2023' AS timestamp without time zone), NULL);"
-- cagg with the new format because it was overriden
\d+ conditions_summary_daily
@ -1166,6 +1173,7 @@ SELECT * FROM cagg_jobs WHERE schema = 'public' AND name = 'conditions_summary_d
(3 rows)
CALL cagg_migrate('conditions_summary_daily', override => TRUE, drop_old => TRUE);
psql:include/cagg_migrate_common.sql:254: NOTICE: defaulting compress_orderby to bucket
psql:include/cagg_migrate_common.sql:254: WARNING: refresh the continuous aggregate after the migration executing this statement: "CALL public.refresh_continuous_aggregate('public.conditions_summary_daily', CAST('Sun Jan 01 00:00:00 2023' AS timestamp without time zone), NULL);"
psql:include/cagg_migrate_common.sql:254: NOTICE: drop cascades to 6 other objects
psql:include/cagg_migrate_common.sql:254: NOTICE: job 1014 not found, skipping
@ -1547,6 +1555,7 @@ psql:include/cagg_migrate_common.sql:155: ERROR: plan already exists for contin
\set ON_ERROR_STOP 1
-- policies for test
ALTER MATERIALIZED VIEW conditions_summary_daily SET (timescaledb.compress=true);
psql:include/cagg_migrate_common.sql:159: NOTICE: defaulting compress_orderby to bucket
\if :IS_TIME_DIMENSION
SELECT add_retention_policy('conditions_summary_daily', '30 days'::interval);
add_retention_policy
@ -1589,6 +1598,7 @@ psql:include/cagg_migrate_common.sql:178: NOTICE: drop cascades to 6 other obje
TRUNCATE _timescaledb_catalog.continuous_agg_migrate_plan RESTART IDENTITY CASCADE;
psql:include/cagg_migrate_common.sql:179: NOTICE: truncate cascades to table "continuous_agg_migrate_plan_step"
CALL cagg_migrate('conditions_summary_daily');
psql:include/cagg_migrate_common.sql:180: NOTICE: defaulting compress_orderby to bucket
psql:include/cagg_migrate_common.sql:180: WARNING: refresh the continuous aggregate after the migration executing this statement: "CALL public.refresh_continuous_aggregate('public.conditions_summary_daily_new', CAST('Sun Jan 01 00:00:00 2023' AS timestamp with time zone), NULL);"
SELECT
ca.raw_hypertable_id AS "NEW_RAW_HYPERTABLE_ID",
@ -1721,6 +1731,7 @@ SELECT * FROM cagg_jobs WHERE schema = 'public' AND name = 'conditions_summary_d
(3 rows)
CALL cagg_migrate('conditions_summary_daily', override => TRUE);
psql:include/cagg_migrate_common.sql:232: NOTICE: defaulting compress_orderby to bucket
psql:include/cagg_migrate_common.sql:232: WARNING: refresh the continuous aggregate after the migration executing this statement: "CALL public.refresh_continuous_aggregate('public.conditions_summary_daily', CAST('Sun Jan 01 00:00:00 2023' AS timestamp with time zone), NULL);"
-- cagg with the new format because it was overriden
\d+ conditions_summary_daily
@ -1824,6 +1835,7 @@ SELECT * FROM cagg_jobs WHERE schema = 'public' AND name = 'conditions_summary_d
(3 rows)
CALL cagg_migrate('conditions_summary_daily', override => TRUE, drop_old => TRUE);
psql:include/cagg_migrate_common.sql:254: NOTICE: defaulting compress_orderby to bucket
psql:include/cagg_migrate_common.sql:254: WARNING: refresh the continuous aggregate after the migration executing this statement: "CALL public.refresh_continuous_aggregate('public.conditions_summary_daily', CAST('Sun Jan 01 00:00:00 2023' AS timestamp with time zone), NULL);"
psql:include/cagg_migrate_common.sql:254: NOTICE: drop cascades to 6 other objects
psql:include/cagg_migrate_common.sql:254: NOTICE: job 1026 not found, skipping

View File

@ -249,6 +249,7 @@ psql:include/cagg_migrate_common.sql:155: ERROR: plan already exists for contin
\set ON_ERROR_STOP 1
-- policies for test
ALTER MATERIALIZED VIEW conditions_summary_daily SET (timescaledb.compress=true);
psql:include/cagg_migrate_common.sql:159: NOTICE: defaulting compress_orderby to bucket
\if :IS_TIME_DIMENSION
SELECT add_retention_policy('conditions_summary_daily', '30 days'::interval);
SELECT add_continuous_aggregate_policy('conditions_summary_daily', '30 days'::interval, '1 day'::interval, '1 hour'::interval);
@ -291,6 +292,7 @@ psql:include/cagg_migrate_common.sql:178: NOTICE: drop cascades to 10 other obj
TRUNCATE _timescaledb_catalog.continuous_agg_migrate_plan RESTART IDENTITY CASCADE;
psql:include/cagg_migrate_common.sql:179: NOTICE: truncate cascades to table "continuous_agg_migrate_plan_step"
CALL cagg_migrate('conditions_summary_daily');
psql:include/cagg_migrate_common.sql:180: NOTICE: defaulting compress_orderby to bucket
psql:include/cagg_migrate_common.sql:180: WARNING: refresh the continuous aggregate after the migration executing this statement: "CALL public.refresh_continuous_aggregate('public.conditions_summary_daily_new', CAST('1008' AS integer), NULL);"
SELECT
ca.raw_hypertable_id AS "NEW_RAW_HYPERTABLE_ID",
@ -435,6 +437,7 @@ SELECT * FROM cagg_jobs WHERE schema = 'public' AND name = 'conditions_summary_d
(3 rows)
CALL cagg_migrate('conditions_summary_daily', override => TRUE);
psql:include/cagg_migrate_common.sql:232: NOTICE: defaulting compress_orderby to bucket
psql:include/cagg_migrate_common.sql:232: WARNING: refresh the continuous aggregate after the migration executing this statement: "CALL public.refresh_continuous_aggregate('public.conditions_summary_daily', CAST('1008' AS integer), NULL);"
-- cagg with the new format because it was overriden
\d+ conditions_summary_daily
@ -538,6 +541,7 @@ SELECT * FROM cagg_jobs WHERE schema = 'public' AND name = 'conditions_summary_d
(3 rows)
CALL cagg_migrate('conditions_summary_daily', override => TRUE, drop_old => TRUE);
psql:include/cagg_migrate_common.sql:254: NOTICE: defaulting compress_orderby to bucket
psql:include/cagg_migrate_common.sql:254: WARNING: refresh the continuous aggregate after the migration executing this statement: "CALL public.refresh_continuous_aggregate('public.conditions_summary_daily', CAST('1008' AS integer), NULL);"
psql:include/cagg_migrate_common.sql:254: NOTICE: drop cascades to 10 other objects
psql:include/cagg_migrate_common.sql:254: NOTICE: job 1002 not found, skipping
@ -924,6 +928,7 @@ psql:include/cagg_migrate_common.sql:155: ERROR: plan already exists for contin
\set ON_ERROR_STOP 1
-- policies for test
ALTER MATERIALIZED VIEW conditions_summary_daily SET (timescaledb.compress=true);
psql:include/cagg_migrate_common.sql:159: NOTICE: defaulting compress_orderby to bucket
\if :IS_TIME_DIMENSION
SELECT add_retention_policy('conditions_summary_daily', '30 days'::interval);
add_retention_policy
@ -966,6 +971,7 @@ psql:include/cagg_migrate_common.sql:178: NOTICE: drop cascades to 6 other obje
TRUNCATE _timescaledb_catalog.continuous_agg_migrate_plan RESTART IDENTITY CASCADE;
psql:include/cagg_migrate_common.sql:179: NOTICE: truncate cascades to table "continuous_agg_migrate_plan_step"
CALL cagg_migrate('conditions_summary_daily');
psql:include/cagg_migrate_common.sql:180: NOTICE: defaulting compress_orderby to bucket
psql:include/cagg_migrate_common.sql:180: WARNING: refresh the continuous aggregate after the migration executing this statement: "CALL public.refresh_continuous_aggregate('public.conditions_summary_daily_new', CAST('Sun Jan 01 00:00:00 2023' AS timestamp without time zone), NULL);"
SELECT
ca.raw_hypertable_id AS "NEW_RAW_HYPERTABLE_ID",
@ -1098,6 +1104,7 @@ SELECT * FROM cagg_jobs WHERE schema = 'public' AND name = 'conditions_summary_d
(3 rows)
CALL cagg_migrate('conditions_summary_daily', override => TRUE);
psql:include/cagg_migrate_common.sql:232: NOTICE: defaulting compress_orderby to bucket
psql:include/cagg_migrate_common.sql:232: WARNING: refresh the continuous aggregate after the migration executing this statement: "CALL public.refresh_continuous_aggregate('public.conditions_summary_daily', CAST('Sun Jan 01 00:00:00 2023' AS timestamp without time zone), NULL);"
-- cagg with the new format because it was overriden
\d+ conditions_summary_daily
@ -1201,6 +1208,7 @@ SELECT * FROM cagg_jobs WHERE schema = 'public' AND name = 'conditions_summary_d
(3 rows)
CALL cagg_migrate('conditions_summary_daily', override => TRUE, drop_old => TRUE);
psql:include/cagg_migrate_common.sql:254: NOTICE: defaulting compress_orderby to bucket
psql:include/cagg_migrate_common.sql:254: WARNING: refresh the continuous aggregate after the migration executing this statement: "CALL public.refresh_continuous_aggregate('public.conditions_summary_daily', CAST('Sun Jan 01 00:00:00 2023' AS timestamp without time zone), NULL);"
psql:include/cagg_migrate_common.sql:254: NOTICE: drop cascades to 6 other objects
psql:include/cagg_migrate_common.sql:254: NOTICE: job 1014 not found, skipping
@ -1582,6 +1590,7 @@ psql:include/cagg_migrate_common.sql:155: ERROR: plan already exists for contin
\set ON_ERROR_STOP 1
-- policies for test
ALTER MATERIALIZED VIEW conditions_summary_daily SET (timescaledb.compress=true);
psql:include/cagg_migrate_common.sql:159: NOTICE: defaulting compress_orderby to bucket
\if :IS_TIME_DIMENSION
SELECT add_retention_policy('conditions_summary_daily', '30 days'::interval);
add_retention_policy
@ -1624,6 +1633,7 @@ psql:include/cagg_migrate_common.sql:178: NOTICE: drop cascades to 6 other obje
TRUNCATE _timescaledb_catalog.continuous_agg_migrate_plan RESTART IDENTITY CASCADE;
psql:include/cagg_migrate_common.sql:179: NOTICE: truncate cascades to table "continuous_agg_migrate_plan_step"
CALL cagg_migrate('conditions_summary_daily');
psql:include/cagg_migrate_common.sql:180: NOTICE: defaulting compress_orderby to bucket
psql:include/cagg_migrate_common.sql:180: WARNING: refresh the continuous aggregate after the migration executing this statement: "CALL public.refresh_continuous_aggregate('public.conditions_summary_daily_new', CAST('Sun Jan 01 00:00:00 2023' AS timestamp with time zone), NULL);"
SELECT
ca.raw_hypertable_id AS "NEW_RAW_HYPERTABLE_ID",
@ -1756,6 +1766,7 @@ SELECT * FROM cagg_jobs WHERE schema = 'public' AND name = 'conditions_summary_d
(3 rows)
CALL cagg_migrate('conditions_summary_daily', override => TRUE);
psql:include/cagg_migrate_common.sql:232: NOTICE: defaulting compress_orderby to bucket
psql:include/cagg_migrate_common.sql:232: WARNING: refresh the continuous aggregate after the migration executing this statement: "CALL public.refresh_continuous_aggregate('public.conditions_summary_daily', CAST('Sun Jan 01 00:00:00 2023' AS timestamp with time zone), NULL);"
-- cagg with the new format because it was overriden
\d+ conditions_summary_daily
@ -1859,6 +1870,7 @@ SELECT * FROM cagg_jobs WHERE schema = 'public' AND name = 'conditions_summary_d
(3 rows)
CALL cagg_migrate('conditions_summary_daily', override => TRUE, drop_old => TRUE);
psql:include/cagg_migrate_common.sql:254: NOTICE: defaulting compress_orderby to bucket
psql:include/cagg_migrate_common.sql:254: WARNING: refresh the continuous aggregate after the migration executing this statement: "CALL public.refresh_continuous_aggregate('public.conditions_summary_daily', CAST('Sun Jan 01 00:00:00 2023' AS timestamp with time zone), NULL);"
psql:include/cagg_migrate_common.sql:254: NOTICE: drop cascades to 6 other objects
psql:include/cagg_migrate_common.sql:254: NOTICE: job 1026 not found, skipping

View File

@ -48,6 +48,8 @@ SELECT count(*) FROM _timescaledb_config.bgw_job;
\set VERBOSITY default
-- Test 1 step policy for integer type buckets
ALTER materialized view mat_m1 set (timescaledb.compress = true);
NOTICE: defaulting compress_segmentby to a
NOTICE: defaulting compress_orderby to time_partition_col
-- No policy is added if one errors out
SELECT timescaledb_experimental.add_policies('mat_m1', refresh_start_offset => 1, refresh_end_offset => 10, compress_after => 11, drop_after => 20);
ERROR: policy refresh window too small
@ -282,6 +284,7 @@ CREATE MATERIALIZED VIEW max_mat_view_date
\set VERBOSITY default
-- Test 1 step policy for timestamp type buckets
ALTER materialized view max_mat_view_date set (timescaledb.compress = true);
NOTICE: defaulting compress_orderby to time_bucket
-- Only works for cagg
SELECT timescaledb_experimental.add_policies('continuous_agg_max_mat_date', refresh_start_offset => '1 day'::interval, refresh_end_offset => '2 day'::interval, compress_after => '20 days'::interval, drop_after => '25 days'::interval);
ERROR: "continuous_agg_max_mat_date" is not a continuous aggregate
@ -760,6 +763,7 @@ GROUP BY 1 WITH NO DATA;
\set VERBOSITY default
-- Test 1 step policy for smallint type buckets
ALTER materialized view mat_smallint set (timescaledb.compress = true);
NOTICE: defaulting compress_orderby to a
-- All policies are added in one step
SELECT timescaledb_experimental.add_policies('mat_smallint', refresh_start_offset => 10::smallint, refresh_end_offset => 1::smallint, compress_after => 11::smallint, drop_after => 20::smallint);
add_policies
@ -962,6 +966,7 @@ FROM bigint_tab
GROUP BY 1 WITH NO DATA;
-- Test 1 step policy for bigint type buckets
ALTER materialized view mat_bigint set (timescaledb.compress = true);
NOTICE: defaulting compress_orderby to a
-- All policies are added in one step
SELECT timescaledb_experimental.add_policies('mat_bigint', refresh_start_offset => 10::bigint, refresh_end_offset => 1::bigint, compress_after => 11::bigint, drop_after => 20::bigint);
add_policies
@ -1032,7 +1037,9 @@ SELECT * FROM mat_bigint WHERE a>100 ORDER BY 1;
(1 row)
ALTER MATERIALIZED VIEW mat_bigint SET (timescaledb.compress);
NOTICE: defaulting compress_orderby to a
ALTER MATERIALIZED VIEW mat_smallint SET (timescaledb.compress);
NOTICE: defaulting compress_orderby to a
\set ON_ERROR_STOP 0
SELECT add_compression_policy('mat_smallint', 0::smallint);
ERROR: compress_after value for compression policy should be greater than the start of the refresh window of continuous aggregate policy for mat_smallint
@ -1105,6 +1112,8 @@ SELECT add_compression_policy('metrics_cagg', '8 day'::interval) AS "COMP_JOB" ;
ERROR: compression not enabled on continuous aggregate "metrics_cagg"
\set ON_ERROR_STOP 1
ALTER MATERIALIZED VIEW metrics_cagg SET (timescaledb.compress);
NOTICE: defaulting compress_segmentby to device_id
NOTICE: defaulting compress_orderby to dayb
SELECT add_compression_policy('metrics_cagg', '8 day'::interval) AS "COMP_JOB" ;
COMP_JOB
----------

View File

@ -1272,8 +1272,7 @@ Indexes:
Triggers:
ts_insert_blocker BEFORE INSERT ON _timescaledb_internal._compressed_hypertable_23 FOR EACH ROW EXECUTE FUNCTION _timescaledb_internal.insert_blocker()
DROP TABLE metric CASCADE;
-- Creating hypertable
-- #5290 Compression can't be enabled on caggs
CREATE TABLE "tEst2" (
"Id" uuid NOT NULL,
"Time" timestamp with time zone NOT NULL,
@ -1298,3 +1297,43 @@ FROM public."tEst2"
GROUP BY "Idd", "bUcket";
NOTICE: continuous aggregate "tEst2_mv" is already up-to-date
ALTER MATERIALIZED VIEW "tEst2_mv" SET (timescaledb.compress = true);
NOTICE: defaulting compress_segmentby to "Idd"
NOTICE: defaulting compress_orderby to "bUcket"
-- #5161 segmentby param
CREATE MATERIALIZED VIEW test1_cont_view2
WITH (timescaledb.continuous,
timescaledb.materialized_only=true
)
AS SELECT time_bucket('1 hour', "Time") as t, SUM(intcol) as sum,txtcol as "iDeA"
FROM test1
GROUP BY 1,txtcol WITH NO DATA;
\set ON_ERROR_STOP 0
ALTER MATERIALIZED VIEW test1_cont_view2 SET (
timescaledb.compress = true,
timescaledb.compress_segmentby = 'invalid_column'
);
NOTICE: defaulting compress_orderby to t
ERROR: column "invalid_column" does not exist
\set ON_ERROR_STOP 1
ALTER MATERIALIZED VIEW test1_cont_view2 SET (
timescaledb.compress = true
);
NOTICE: defaulting compress_segmentby to "iDeA"
NOTICE: defaulting compress_orderby to t
ALTER MATERIALIZED VIEW test1_cont_view2 SET (
timescaledb.compress = true,
timescaledb.compress_segmentby = '"iDeA"'
);
NOTICE: defaulting compress_orderby to t
\set ON_ERROR_STOP 0
ALTER MATERIALIZED VIEW test1_cont_view2 SET (
timescaledb.compress = true,
timescaledb.compress_orderby = '"iDeA"'
);
NOTICE: defaulting compress_segmentby to "iDeA"
ERROR: cannot use column "iDeA" for both ordering and segmenting
\set ON_ERROR_STOP 1
ALTER MATERIALIZED VIEW test1_cont_view2 SET (
timescaledb.compress = false
);
DROP TABLE metric CASCADE;

View File

@ -1498,6 +1498,8 @@ SELECT mat_htid AS "MAT_HTID"
FROM cagg_compression_status
WHERE cagg_name = 'search_query_count_3' \gset
ALTER MATERIALIZED VIEW search_query_count_3 SET (timescaledb.compress = 'true');
NOTICE: defaulting compress_segmentby to search_query
NOTICE: defaulting compress_orderby to bucket
SELECT cagg_name, mat_table_name
FROM cagg_compression_status where cagg_name = 'search_query_count_3';
cagg_name | mat_table_name
@ -1642,6 +1644,7 @@ AS SELECT time_bucket('30 days',time), avg(val1), count(val2)
FROM test_morecols GROUP BY 1;
NOTICE: refreshing continuous aggregate "test_morecols_cagg"
ALTER MATERIALIZED VIEW test_morecols_cagg SET (timescaledb.compress='true');
NOTICE: defaulting compress_orderby to time_bucket
SELECT compress_chunk(ch) FROM show_chunks('test_morecols_cagg') ch;
compress_chunk
------------------------------------------

View File

@ -1498,6 +1498,8 @@ SELECT mat_htid AS "MAT_HTID"
FROM cagg_compression_status
WHERE cagg_name = 'search_query_count_3' \gset
ALTER MATERIALIZED VIEW search_query_count_3 SET (timescaledb.compress = 'true');
NOTICE: defaulting compress_segmentby to search_query
NOTICE: defaulting compress_orderby to bucket
SELECT cagg_name, mat_table_name
FROM cagg_compression_status where cagg_name = 'search_query_count_3';
cagg_name | mat_table_name
@ -1642,6 +1644,7 @@ AS SELECT time_bucket('30 days',time), avg(val1), count(val2)
FROM test_morecols GROUP BY 1;
NOTICE: refreshing continuous aggregate "test_morecols_cagg"
ALTER MATERIALIZED VIEW test_morecols_cagg SET (timescaledb.compress='true');
NOTICE: defaulting compress_orderby to time_bucket
SELECT compress_chunk(ch) FROM show_chunks('test_morecols_cagg') ch;
compress_chunk
------------------------------------------

View File

@ -1498,6 +1498,8 @@ SELECT mat_htid AS "MAT_HTID"
FROM cagg_compression_status
WHERE cagg_name = 'search_query_count_3' \gset
ALTER MATERIALIZED VIEW search_query_count_3 SET (timescaledb.compress = 'true');
NOTICE: defaulting compress_segmentby to search_query
NOTICE: defaulting compress_orderby to bucket
SELECT cagg_name, mat_table_name
FROM cagg_compression_status where cagg_name = 'search_query_count_3';
cagg_name | mat_table_name
@ -1642,6 +1644,7 @@ AS SELECT time_bucket('30 days',time), avg(val1), count(val2)
FROM test_morecols GROUP BY 1;
NOTICE: refreshing continuous aggregate "test_morecols_cagg"
ALTER MATERIALIZED VIEW test_morecols_cagg SET (timescaledb.compress='true');
NOTICE: defaulting compress_orderby to time_bucket
SELECT compress_chunk(ch) FROM show_chunks('test_morecols_cagg') ch;
compress_chunk
------------------------------------------

View File

@ -1498,6 +1498,8 @@ SELECT mat_htid AS "MAT_HTID"
FROM cagg_compression_status
WHERE cagg_name = 'search_query_count_3' \gset
ALTER MATERIALIZED VIEW search_query_count_3 SET (timescaledb.compress = 'true');
NOTICE: defaulting compress_segmentby to search_query
NOTICE: defaulting compress_orderby to bucket
SELECT cagg_name, mat_table_name
FROM cagg_compression_status where cagg_name = 'search_query_count_3';
cagg_name | mat_table_name
@ -1642,6 +1644,7 @@ AS SELECT time_bucket('30 days',time), avg(val1), count(val2)
FROM test_morecols GROUP BY 1;
NOTICE: refreshing continuous aggregate "test_morecols_cagg"
ALTER MATERIALIZED VIEW test_morecols_cagg SET (timescaledb.compress='true');
NOTICE: defaulting compress_orderby to time_bucket
SELECT compress_chunk(ch) FROM show_chunks('test_morecols_cagg') ch;
compress_chunk
------------------------------------------

View File

@ -1525,6 +1525,8 @@ SELECT mat_htid AS "MAT_HTID"
FROM cagg_compression_status
WHERE cagg_name = 'search_query_count_3' \gset
ALTER MATERIALIZED VIEW search_query_count_3 SET (timescaledb.compress = 'true');
NOTICE: defaulting compress_segmentby to grp_5_5,search_query
NOTICE: defaulting compress_orderby to bucket
SELECT cagg_name, mat_table_name
FROM cagg_compression_status where cagg_name = 'search_query_count_3';
cagg_name | mat_table_name
@ -1677,6 +1679,7 @@ AS SELECT time_bucket('30 days',time), avg(val1), count(val2)
FROM test_morecols GROUP BY 1;
NOTICE: refreshing continuous aggregate "test_morecols_cagg"
ALTER MATERIALIZED VIEW test_morecols_cagg SET (timescaledb.compress='true');
NOTICE: defaulting compress_orderby to time_bucket
SELECT compress_chunk(ch) FROM show_chunks('test_morecols_cagg') ch;
compress_chunk
------------------------------------------

View File

@ -1174,6 +1174,7 @@ SELECT * FROM conditions_dist_1m_manual ORDER BY bucket;
(3 rows)
ALTER MATERIALIZED VIEW conditions_dist_1m_manual SET ( timescaledb.compress );
NOTICE: defaulting compress_orderby to bucket
SELECT compress_chunk(ch)
FROM show_chunks('conditions_dist_1m_manual') ch limit 1;
compress_chunk

View File

@ -611,6 +611,7 @@ SELECT * FROM conditions_dist_1m_manual ORDER BY bucket;
-- Compression on top of distributed hypertables
ALTER MATERIALIZED VIEW conditions_dist_1m_manual SET ( timescaledb.compress );
NOTICE: defaulting compress_orderby to bucket
SELECT compress_chunk(ch)
FROM show_chunks('conditions_dist_1m_manual') ch limit 1;
compress_chunk

View File

@ -818,6 +818,7 @@ ORDER BY month;
-- Check compatibility with compressed distributed hypertables
ALTER MATERIALIZED VIEW conditions_dist_1m_manual SET ( timescaledb.compress );
NOTICE: defaulting compress_orderby to bucket
SELECT compress_chunk(ch)
FROM show_chunks('conditions_dist_1m_manual') ch limit 1;
compress_chunk

View File

@ -375,6 +375,8 @@ FROM show_chunks('hyper') c ORDER BY c LIMIT 4;
(4 rows)
ALTER MATERIALIZED VIEW contagg SET (timescaledb.compress);
NOTICE: defaulting compress_segmentby to device
NOTICE: defaulting compress_orderby to hour
SELECT compress_chunk(c)
FROM show_chunks('contagg') c ORDER BY c LIMIT 1;
compress_chunk

View File

@ -375,6 +375,8 @@ FROM show_chunks('hyper') c ORDER BY c LIMIT 4;
(4 rows)
ALTER MATERIALIZED VIEW contagg SET (timescaledb.compress);
NOTICE: defaulting compress_segmentby to device
NOTICE: defaulting compress_orderby to hour
SELECT compress_chunk(c)
FROM show_chunks('contagg') c ORDER BY c LIMIT 1;
compress_chunk

View File

@ -375,6 +375,8 @@ FROM show_chunks('hyper') c ORDER BY c LIMIT 4;
(4 rows)
ALTER MATERIALIZED VIEW contagg SET (timescaledb.compress);
NOTICE: defaulting compress_segmentby to device
NOTICE: defaulting compress_orderby to hour
SELECT compress_chunk(c)
FROM show_chunks('contagg') c ORDER BY c LIMIT 1;
compress_chunk

View File

@ -375,6 +375,8 @@ FROM show_chunks('hyper') c ORDER BY c LIMIT 4;
(4 rows)
ALTER MATERIALIZED VIEW contagg SET (timescaledb.compress);
NOTICE: defaulting compress_segmentby to device
NOTICE: defaulting compress_orderby to hour
SELECT compress_chunk(c)
FROM show_chunks('contagg') c ORDER BY c LIMIT 1;
compress_chunk

View File

@ -52,6 +52,8 @@ WHERE
(1 row)
ALTER MATERIALIZED VIEW metrics_compressed_summary SET (timescaledb.compress);
psql:include/cagg_compression_setup.sql:42: NOTICE: defaulting compress_segmentby to device_id
psql:include/cagg_compression_setup.sql:42: NOTICE: defaulting compress_orderby to bucket
CALL refresh_continuous_aggregate('metrics_compressed_summary', NULL, '2000-01-15 23:55:00+0');
SELECT CASE WHEN res is NULL THEN NULL
ELSE 'compressed'
@ -112,6 +114,8 @@ WHERE
(1 row)
ALTER MATERIALIZED VIEW metrics_summary SET (timescaledb.compress);
psql:include/cagg_compression_setup.sql:94: NOTICE: defaulting compress_segmentby to device_id
psql:include/cagg_compression_setup.sql:94: NOTICE: defaulting compress_orderby to bucket
CALL refresh_continuous_aggregate('metrics_summary', NULL, '2000-01-15 23:55:00+0');
SELECT CASE WHEN res is NULL THEN NULL
ELSE 'compressed'
@ -347,5 +351,6 @@ SELECT time_bucket('1 week', time) AS bucket FROM comp_rename GROUP BY 1;
NOTICE: continuous aggregate "comp_rename_cagg" is already up-to-date
ALTER MATERIALIZED VIEW comp_rename_cagg RENAME COLUMN bucket to "time";
ALTER MATERIALIZED VIEW comp_rename_cagg SET ( timescaledb.compress='true');
NOTICE: defaulting compress_orderby to "time"
DROP TABLE comp_rename CASCADE;
NOTICE: drop cascades to 3 other objects

View File

@ -547,9 +547,7 @@ WHERE uc_hypertable.table_name like 'metric' \gset
-- get definition of compressed hypertable and notice the index
\d :COMP_SCHEMA_NAME.:COMP_TABLE_NAME
DROP TABLE metric CASCADE;
-- Creating hypertable
-- #5290 Compression can't be enabled on caggs
CREATE TABLE "tEst2" (
"Id" uuid NOT NULL,
"Time" timestamp with time zone NOT NULL,
@ -572,3 +570,43 @@ FROM public."tEst2"
GROUP BY "Idd", "bUcket";
ALTER MATERIALIZED VIEW "tEst2_mv" SET (timescaledb.compress = true);
-- #5161 segmentby param
CREATE MATERIALIZED VIEW test1_cont_view2
WITH (timescaledb.continuous,
timescaledb.materialized_only=true
)
AS SELECT time_bucket('1 hour', "Time") as t, SUM(intcol) as sum,txtcol as "iDeA"
FROM test1
GROUP BY 1,txtcol WITH NO DATA;
\set ON_ERROR_STOP 0
ALTER MATERIALIZED VIEW test1_cont_view2 SET (
timescaledb.compress = true,
timescaledb.compress_segmentby = 'invalid_column'
);
\set ON_ERROR_STOP 1
ALTER MATERIALIZED VIEW test1_cont_view2 SET (
timescaledb.compress = true
);
ALTER MATERIALIZED VIEW test1_cont_view2 SET (
timescaledb.compress = true,
timescaledb.compress_segmentby = '"iDeA"'
);
\set ON_ERROR_STOP 0
ALTER MATERIALIZED VIEW test1_cont_view2 SET (
timescaledb.compress = true,
timescaledb.compress_orderby = '"iDeA"'
);
\set ON_ERROR_STOP 1
ALTER MATERIALIZED VIEW test1_cont_view2 SET (
timescaledb.compress = false
);
DROP TABLE metric CASCADE;