Compression can't be enabled on caggs

The continuous aggregate creation failed in case segmentby/orderby
columns needed quotation.
This commit is contained in:
Zoltan Haindrich 2023-02-07 09:53:09 +00:00 committed by Zoltan Haindrich
parent 1eb8aa3f14
commit cad2440b58
4 changed files with 57 additions and 32 deletions

View File

@ -16,6 +16,7 @@ accidentally triggering the load of a previous DB version.**
* #4926 Fix corruption when inserting into compressed chunks
* #5218 Add role-level security to job error log
* #5214 Fix use of prepared statement in async module
* #5290 Compression can't be enabled on continuous aggregates when segmentby/orderby columns need quotation
## 2.9.3 (2023-02-03)

View File

@ -156,7 +156,7 @@ cagg_get_compression_params(ContinuousAgg *agg, Hypertable *mat_ht)
{
List *defelems = NIL;
const Dimension *mat_ht_dim = hyperspace_get_open_dimension(mat_ht->space, 0);
const char *mat_ht_timecolname = NameStr(mat_ht_dim->fd.column_name);
const char *mat_ht_timecolname = quote_identifier(NameStr(mat_ht_dim->fd.column_name));
DefElem *ordby = makeDefElemExtended("timescaledb",
"compress_orderby",
(Node *) makeString((char *) mat_ht_timecolname),
@ -166,49 +166,25 @@ cagg_get_compression_params(ContinuousAgg *agg, Hypertable *mat_ht)
List *grp_colnames = cagg_find_groupingcols(agg, mat_ht);
if (grp_colnames)
{
StringInfo info = makeStringInfo();
ListCell *lc;
/* we have column names. they are guaranteed to be at most
* NAMEDATALEN
*/
int seglen = ((NAMEDATALEN + 1) * list_length(grp_colnames)) + 1;
char *segmentby = (char *) palloc(seglen);
int segidx = 0;
foreach (lc, grp_colnames)
{
int collen;
char *grpcol = (char *) lfirst(lc);
/* skip time dimension col if it appears in group-by list */
if (namestrcmp((Name) & (mat_ht_dim->fd.column_name), grpcol) == 0)
continue;
if (segidx > 0 && (seglen - segidx) > 1)
{
strlcpy(segmentby + segidx, ",", 2);
segidx = segidx + 1;
}
collen = strlen(grpcol);
if (seglen - segidx > collen)
{
strlcpy(segmentby + segidx, grpcol, collen + 1);
segidx = segidx + collen;
}
else
{
ereport(ERROR,
(errcode(ERRCODE_INTERNAL_ERROR),
errmsg("%s not enough space to copy segment by column (%d %d %d)",
__func__,
seglen,
segidx,
collen)));
}
if (info->len > 0)
appendStringInfoString(info, ",");
appendStringInfoString(info, quote_identifier(grpcol));
}
if (segidx != 0)
if (info->len > 0)
{
DefElem *segby;
segmentby[segidx] = '\0';
segby = makeDefElemExtended("timescaledb",
"compress_segmentby",
(Node *) makeString(segmentby),
(Node *) makeString(info->data),
DEFELEM_UNSPEC,
-1);
defelems = lappend(defelems, segby);

View File

@ -1273,3 +1273,28 @@ Triggers:
ts_insert_blocker BEFORE INSERT ON _timescaledb_internal._compressed_hypertable_23 FOR EACH ROW EXECUTE FUNCTION _timescaledb_internal.insert_blocker()
DROP TABLE metric CASCADE;
-- Creating hypertable
CREATE TABLE "tEst2" (
"Id" uuid NOT NULL,
"Time" timestamp with time zone NOT NULL,
CONSTRAINT "test2_pkey" PRIMARY KEY ("Id", "Time")
);
SELECT create_hypertable(
'"tEst2"',
'Time',
chunk_time_interval => INTERVAL '1 day'
);
create_hypertable
---------------------
(24,public,tEst2,t)
(1 row)
alter table "tEst2" set (timescaledb.compress=true, timescaledb.compress_segmentby='"Id"');
CREATE MATERIALIZED VIEW "tEst2_mv"
WITH (timescaledb.continuous) AS
SELECT "Id" as "Idd",
time_bucket(INTERVAL '1 day', "Time") AS "bUcket"
FROM public."tEst2"
GROUP BY "Idd", "bUcket";
NOTICE: continuous aggregate "tEst2_mv" is already up-to-date
ALTER MATERIALIZED VIEW "tEst2_mv" SET (timescaledb.compress = true);

View File

@ -549,3 +549,26 @@ WHERE uc_hypertable.table_name like 'metric' \gset
DROP TABLE metric CASCADE;
-- Creating hypertable
CREATE TABLE "tEst2" (
"Id" uuid NOT NULL,
"Time" timestamp with time zone NOT NULL,
CONSTRAINT "test2_pkey" PRIMARY KEY ("Id", "Time")
);
SELECT create_hypertable(
'"tEst2"',
'Time',
chunk_time_interval => INTERVAL '1 day'
);
alter table "tEst2" set (timescaledb.compress=true, timescaledb.compress_segmentby='"Id"');
CREATE MATERIALIZED VIEW "tEst2_mv"
WITH (timescaledb.continuous) AS
SELECT "Id" as "Idd",
time_bucket(INTERVAL '1 day', "Time") AS "bUcket"
FROM public."tEst2"
GROUP BY "Idd", "bUcket";
ALTER MATERIALIZED VIEW "tEst2_mv" SET (timescaledb.compress = true);