mirror of
https://github.com/timescale/timescaledb.git
synced 2025-05-15 01:53:41 +08:00
error compressing wide table
Consider a compressed hypertable has many columns (like more than 600 columns). In call to compress_chunk(), the compressed tuple size exceeds, 8K which causes error as "row is too big: size 10856, maximum size 8160." This patch estimates the tuple size of compressed hypertable and reports a warning when compression is enabled on hypertable. Thus user gets aware of this warning before calling compress_chunk(). Fixes #4398
This commit is contained in:
parent
ffd9dfb7eb
commit
d00a55772c
@ -22,6 +22,7 @@ argument or resolve the type ambiguity by casting to the intended type.
|
||||
* #4673 Fix now() constification for VIEWs
|
||||
* #4681 Fix compression_chunk_size primary key
|
||||
* #4685 Improve chunk exclusion for space dimensions
|
||||
* #4696 Report warning when enabling compression on hypertable
|
||||
|
||||
**Thanks**
|
||||
* @maxtwardowski for reporting problems with chunk exclusion and space dimensions
|
||||
|
@ -2549,8 +2549,29 @@ ts_hypertable_create_compressed(Oid table_relid, int32 hypertable_id)
|
||||
NameData schema_name, table_name, associated_schema_name;
|
||||
ChunkSizingInfo *chunk_sizing_info;
|
||||
Relation rel;
|
||||
|
||||
rel = table_open(table_relid, AccessExclusiveLock);
|
||||
int32 row_size = MAXALIGN(SizeofHeapTupleHeader);
|
||||
/* estimate tuple width of compressed hypertable */
|
||||
for (int i = 1; i <= RelationGetNumberOfAttributes(rel); i++)
|
||||
{
|
||||
bool is_varlena = false;
|
||||
Oid outfunc;
|
||||
Form_pg_attribute att = TupleDescAttr(rel->rd_att, i - 1);
|
||||
getTypeOutputInfo(att->atttypid, &outfunc, &is_varlena);
|
||||
if (is_varlena)
|
||||
row_size += 18;
|
||||
else
|
||||
row_size += att->attlen;
|
||||
}
|
||||
if (row_size > MaxHeapTupleSize)
|
||||
{
|
||||
ereport(WARNING,
|
||||
(errmsg("compressed row size might exceed maximum row size"),
|
||||
errdetail("Estimated row size of compressed hypertable is %u. This exceeds the "
|
||||
"maximum size of %zu and can cause compression of chunks to fail.",
|
||||
row_size,
|
||||
MaxHeapTupleSize)));
|
||||
}
|
||||
/*
|
||||
* Check that the user has permissions to make this table to a compressed
|
||||
* hypertable
|
||||
|
File diff suppressed because one or more lines are too long
@ -347,4 +347,17 @@ SELECT COMPRESS_CHUNK(X) FROM SHOW_CHUNKS('test') X;
|
||||
|
||||
--below query should pass after chunks are compressed
|
||||
SELECT 1 FROM test GROUP BY enum_col;
|
||||
EXPLAIN SELECT DISTINCT 1 FROM test;
|
||||
EXPLAIN SELECT DISTINCT 1 FROM test;
|
||||
|
||||
--github issue 4398
|
||||
SELECT format('CREATE TABLE data_table AS SELECT now() AS tm, %s', array_to_string(array_agg(format('125 AS c%s',a)), ', ')) FROM generate_series(1,550)a \gexec
|
||||
CREATE TABLE ts_table (LIKE data_table);
|
||||
SELECT * FROM create_hypertable('ts_table', 'tm');
|
||||
--should report a warning
|
||||
\set VERBOSITY terse
|
||||
ALTER TABLE ts_table SET(timescaledb.compress, timescaledb.compress_segmentby = 'c1',
|
||||
timescaledb.compress_orderby = 'tm');
|
||||
INSERT INTO ts_table SELECT * FROM data_table;
|
||||
--cleanup tables
|
||||
DROP TABLE data_table cascade;
|
||||
DROP TABLE ts_table cascade;
|
||||
|
Loading…
x
Reference in New Issue
Block a user