Add decompress_chunk function

This is the opposite dual of compress_chunk.
This commit is contained in:
Matvey Arye 2019-08-14 23:09:11 -04:00 committed by Matvey Arye
parent bdc599793c
commit a078781c2e
16 changed files with 261 additions and 34 deletions

View File

@ -5,7 +5,6 @@ set(PRE_INSTALL_SOURCE_FILES
pre_install/schemas.sql # Must be first pre_install/schemas.sql # Must be first
pre_install/types.sql # Must be before tables.sql pre_install/types.sql # Must be before tables.sql
pre_install/tables.sql pre_install/tables.sql
pre_install/types.sql
pre_install/insert_data.sql pre_install/insert_data.sql
pre_install/bgw_scheduler_startup.sql pre_install/bgw_scheduler_startup.sql
) )

View File

@ -20,5 +20,9 @@ CREATE OR REPLACE FUNCTION move_chunk(
) RETURNS VOID AS '@MODULE_PATHNAME@', 'ts_move_chunk' LANGUAGE C VOLATILE; ) RETURNS VOID AS '@MODULE_PATHNAME@', 'ts_move_chunk' LANGUAGE C VOLATILE;
CREATE OR REPLACE FUNCTION compress_chunk( CREATE OR REPLACE FUNCTION compress_chunk(
chunk REGCLASS uncompressed_chunk REGCLASS
) RETURNS VOID AS '@MODULE_PATHNAME@', 'ts_compress_chunk' LANGUAGE C VOLATILE; ) RETURNS VOID AS '@MODULE_PATHNAME@', 'ts_compress_chunk' LANGUAGE C VOLATILE;
CREATE OR REPLACE FUNCTION decompress_chunk(
uncompressed_chunk REGCLASS
) RETURNS VOID AS '@MODULE_PATHNAME@', 'ts_decompress_chunk' LANGUAGE C VOLATILE;

View File

@ -328,7 +328,7 @@ CREATE TABLE IF NOT EXISTS _timescaledb_catalog.compression_chunk_size (
compressed_heap_size BIGINT NOT NULL, compressed_heap_size BIGINT NOT NULL,
compressed_toast_size BIGINT NOT NULL, compressed_toast_size BIGINT NOT NULL,
compressed_index_size BIGINT NOT NULL, compressed_index_size BIGINT NOT NULL,
PRIMARY KEY( chunk_id, compressed_chunk_id) PRIMARY KEY(chunk_id, compressed_chunk_id)
); );
SELECT pg_catalog.pg_extension_config_dump('_timescaledb_catalog.compression_chunk_size', ''); SELECT pg_catalog.pg_extension_config_dump('_timescaledb_catalog.compression_chunk_size', '');

View File

@ -1998,6 +1998,27 @@ chunks_return_srf(FunctionCallInfo fcinfo)
SRF_RETURN_DONE(funcctx); SRF_RETURN_DONE(funcctx);
} }
void
ts_chunk_drop(Chunk *chunk, bool cascade, int32 log_level)
{
ObjectAddress objaddr = {
.classId = RelationRelationId,
.objectId = chunk->table_id,
};
if (log_level >= 0)
elog(log_level,
"dropping chunk %s.%s",
chunk->fd.schema_name.data,
chunk->fd.table_name.data);
/* Remove the chunk from the hypertable table */
ts_chunk_delete_by_relid(chunk->table_id);
/* Drop the table */
performDeletion(&objaddr, cascade, 0);
}
List * List *
ts_chunk_do_drop_chunks(Oid table_relid, Datum older_than_datum, Datum newer_than_datum, ts_chunk_do_drop_chunks(Oid table_relid, Datum older_than_datum, Datum newer_than_datum,
Oid older_than_type, Oid newer_than_type, bool cascade, Oid older_than_type, Oid newer_than_type, bool cascade,
@ -2040,34 +2061,20 @@ ts_chunk_do_drop_chunks(Oid table_relid, Datum older_than_datum, Datum newer_tha
for (; i < num_chunks; i++) for (; i < num_chunks; i++)
{ {
size_t len; size_t len;
char *chunk_name; char *chunk_name;
ObjectAddress objaddr = { /* store chunk name for output */
.classId = RelationRelationId, schema_name = quote_identifier(chunks[i]->fd.schema_name.data);
.objectId = chunks[i]->table_id, table_name = quote_identifier(chunks[i]->fd.table_name.data);
};
elog(log_level, len = strlen(schema_name) + strlen(table_name) + 2;
"dropping chunk %s.%s", chunk_name = palloc(len);
chunks[i]->fd.schema_name.data,
chunks[i]->fd.table_name.data);
/* Store chunk name for output */ snprintf(chunk_name, len, "%s.%s", schema_name, table_name);
schema_name = quote_identifier(chunks[i]->fd.schema_name.data); dropped_chunk_names = lappend(dropped_chunk_names, chunk_name);
table_name = quote_identifier(chunks[i]->fd.table_name.data);
len = strlen(schema_name) + strlen(table_name) + 2; ts_chunk_drop(chunks[i], cascade, log_level);
chunk_name = palloc(len);
snprintf(chunk_name, len, "%s.%s", schema_name, table_name);
dropped_chunk_names = lappend(dropped_chunk_names, chunk_name);
/* Remove the chunk from the hypertable table */
ts_chunk_delete_by_relid(chunks[i]->table_id);
/* Drop the table */
performDeletion(&objaddr, cascade, 0);
} }
if (cascades_to_materializations) if (cascades_to_materializations)

View File

@ -111,6 +111,7 @@ extern List *ts_chunk_get_window(int32 dimension_id, int64 point, int count, Mem
extern void ts_chunks_rename_schema_name(char *old_schema, char *new_schema); extern void ts_chunks_rename_schema_name(char *old_schema, char *new_schema);
extern TSDLLEXPORT bool ts_chunk_set_compressed_chunk(Chunk *chunk, int32 compressed_chunk_id, extern TSDLLEXPORT bool ts_chunk_set_compressed_chunk(Chunk *chunk, int32 compressed_chunk_id,
bool isnull); bool isnull);
extern TSDLLEXPORT void ts_chunk_drop(Chunk *chunk, bool cascade, int32 log_level);
extern TSDLLEXPORT List *ts_chunk_do_drop_chunks(Oid table_relid, Datum older_than_datum, extern TSDLLEXPORT List *ts_chunk_do_drop_chunks(Oid table_relid, Datum older_than_datum,
Datum newer_than_datum, Oid older_than_type, Datum newer_than_datum, Oid older_than_type,
Oid newer_than_type, bool cascade, Oid newer_than_type, bool cascade,

View File

@ -25,6 +25,7 @@ TS_FUNCTION_INFO_V1(ts_finalize_agg_sfunc);
TS_FUNCTION_INFO_V1(ts_finalize_agg_ffunc); TS_FUNCTION_INFO_V1(ts_finalize_agg_ffunc);
TS_FUNCTION_INFO_V1(ts_continuous_agg_invalidation_trigger); TS_FUNCTION_INFO_V1(ts_continuous_agg_invalidation_trigger);
TS_FUNCTION_INFO_V1(ts_compress_chunk); TS_FUNCTION_INFO_V1(ts_compress_chunk);
TS_FUNCTION_INFO_V1(ts_decompress_chunk);
TS_FUNCTION_INFO_V1(ts_compressed_data_decompress_forward); TS_FUNCTION_INFO_V1(ts_compressed_data_decompress_forward);
TS_FUNCTION_INFO_V1(ts_compressed_data_decompress_reverse); TS_FUNCTION_INFO_V1(ts_compressed_data_decompress_reverse);
@ -201,6 +202,12 @@ ts_compress_chunk(PG_FUNCTION_ARGS)
PG_RETURN_DATUM(ts_cm_functions->compress_chunk(fcinfo)); PG_RETURN_DATUM(ts_cm_functions->compress_chunk(fcinfo));
} }
Datum
ts_decompress_chunk(PG_FUNCTION_ARGS)
{
PG_RETURN_DATUM(ts_cm_functions->decompress_chunk(fcinfo));
}
/* /*
* casting a function pointer to a pointer of another type is undefined * casting a function pointer to a pointer of another type is undefined
* behavior, so we need one of these for every function type we have * behavior, so we need one of these for every function type we have
@ -386,6 +393,7 @@ TSDLLEXPORT CrossModuleFunctions ts_cm_functions_default = {
.array_compressor_finish = error_no_default_fn_pg_community, .array_compressor_finish = error_no_default_fn_pg_community,
.process_compress_table = process_compress_table_default, .process_compress_table = process_compress_table_default,
.compress_chunk = error_no_default_fn_pg_enterprise, .compress_chunk = error_no_default_fn_pg_enterprise,
.decompress_chunk = error_no_default_fn_pg_enterprise,
}; };
TSDLLEXPORT CrossModuleFunctions *ts_cm_functions = &ts_cm_functions_default; TSDLLEXPORT CrossModuleFunctions *ts_cm_functions = &ts_cm_functions_default;

View File

@ -91,6 +91,7 @@ typedef struct CrossModuleFunctions
bool (*process_compress_table)(AlterTableCmd *cmd, Hypertable *ht, bool (*process_compress_table)(AlterTableCmd *cmd, Hypertable *ht,
WithClauseResult *with_clause_options); WithClauseResult *with_clause_options);
PGFunction compress_chunk; PGFunction compress_chunk;
PGFunction decompress_chunk;
} CrossModuleFunctions; } CrossModuleFunctions;
extern TSDLLEXPORT CrossModuleFunctions *ts_cm_functions; extern TSDLLEXPORT CrossModuleFunctions *ts_cm_functions;

View File

@ -23,6 +23,7 @@ ORDER BY proname;
chunk_relation_size_pretty chunk_relation_size_pretty
compress_chunk compress_chunk
create_hypertable create_hypertable
decompress_chunk
detach_tablespace detach_tablespace
detach_tablespaces detach_tablespaces
drop_chunks drop_chunks

View File

@ -22,6 +22,8 @@
#include "compress_utils.h" #include "compress_utils.h"
#include "compression.h" #include "compression.h"
#include "compat.h" #include "compat.h"
#include "scanner.h"
#include "scan_iterator.h"
#if !PG96 #if !PG96
#include <utils/fmgrprotos.h> #include <utils/fmgrprotos.h>
@ -64,6 +66,34 @@ compute_chunk_size(Oid chunk_relid)
return ret; return ret;
} }
static void
init_scan_by_uncompressed_chunk_id(ScanIterator *iterator, int32 uncompressed_chunk_id)
{
iterator->ctx.index =
catalog_get_index(ts_catalog_get(), COMPRESSION_CHUNK_SIZE, COMPRESSION_CHUNK_SIZE_PKEY);
ts_scan_iterator_scan_key_init(iterator,
Anum_compression_chunk_size_pkey_chunk_id,
BTEqualStrategyNumber,
F_INT4EQ,
Int32GetDatum(uncompressed_chunk_id));
}
static int
compression_chunk_size_delete(int32 uncompressed_chunk_id)
{
ScanIterator iterator =
ts_scan_iterator_create(COMPRESSION_CHUNK_SIZE, RowExclusiveLock, CurrentMemoryContext);
int count = 0;
init_scan_by_uncompressed_chunk_id(&iterator, uncompressed_chunk_id);
ts_scanner_foreach(&iterator)
{
TupleInfo *ti = ts_scan_iterator_tuple_info(&iterator);
ts_catalog_delete(ti->scanrel, ti->tuple);
}
return count;
}
static void static void
compression_chunk_size_catalog_insert(int32 src_chunk_id, ChunkSize *src_size, compression_chunk_size_catalog_insert(int32 src_chunk_id, ChunkSize *src_size,
int32 compress_chunk_id, ChunkSize *compress_size) int32 compress_chunk_id, ChunkSize *compress_size)
@ -186,6 +216,60 @@ compress_chunk_impl(Oid hypertable_relid, Oid chunk_relid)
ts_cache_release(hcache); ts_cache_release(hcache);
} }
static void
decompress_chunk_impl(Oid uncompressed_hypertable_relid, Oid uncompressed_chunk_relid)
{
Cache *hcache = ts_hypertable_cache_pin();
Hypertable *uncompressed_hypertable =
ts_hypertable_cache_get_entry(hcache, uncompressed_hypertable_relid);
Hypertable *compressed_hypertable;
Chunk *uncompressed_chunk;
Chunk *compressed_chunk;
if (uncompressed_hypertable == NULL)
ereport(ERROR,
(errcode(ERRCODE_TS_HYPERTABLE_NOT_EXIST),
errmsg("table \"%s\" is not a hypertable",
get_rel_name(uncompressed_hypertable_relid))));
ts_hypertable_permissions_check(uncompressed_hypertable->main_table_relid, GetUserId());
compressed_hypertable =
ts_hypertable_get_by_id(uncompressed_hypertable->fd.compressed_hypertable_id);
if (compressed_hypertable == NULL)
ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR), errmsg("missing compressed hypertable")));
uncompressed_chunk = ts_chunk_get_by_relid(uncompressed_chunk_relid, 0, true);
if (uncompressed_chunk == NULL)
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
errmsg("table \"%s\" is not a chunk", get_rel_name(uncompressed_chunk_relid))));
if (uncompressed_chunk->fd.hypertable_id != uncompressed_hypertable->fd.id)
elog(ERROR, "hypertable and chunk do not match");
if (uncompressed_chunk->fd.compressed_chunk_id == INVALID_CHUNK_ID)
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
errmsg("chunk \"%s\" is not a compressed",
get_rel_name(uncompressed_chunk_relid))));
;
compressed_chunk = ts_chunk_get_by_id(uncompressed_chunk->fd.compressed_chunk_id, 0, true);
/* acquire locks on src and compress hypertable and src chunk */
LockRelationOid(uncompressed_hypertable->main_table_relid, AccessShareLock);
LockRelationOid(compressed_hypertable->main_table_relid, AccessShareLock);
LockRelationOid(uncompressed_chunk->table_id, AccessShareLock); /*upgrade when needed */
decompress_chunk(compressed_chunk->table_id, uncompressed_chunk->table_id);
compression_chunk_size_delete(uncompressed_chunk->fd.id);
ts_chunk_set_compressed_chunk(uncompressed_chunk, 0, true);
ts_chunk_drop(compressed_chunk, false, -1);
ts_cache_release(hcache);
}
Datum Datum
tsl_compress_chunk(PG_FUNCTION_ARGS) tsl_compress_chunk(PG_FUNCTION_ARGS)
{ {
@ -198,3 +282,14 @@ tsl_compress_chunk(PG_FUNCTION_ARGS)
compress_chunk_impl(srcchunk->hypertable_relid, chunk_id); compress_chunk_impl(srcchunk->hypertable_relid, chunk_id);
PG_RETURN_VOID(); PG_RETURN_VOID();
} }
Datum
tsl_decompress_chunk(PG_FUNCTION_ARGS)
{
Oid uncompressed_chunk_id = PG_ARGISNULL(0) ? InvalidOid : PG_GETARG_OID(0);
Chunk *uncompressed_chunk = ts_chunk_get_by_relid(uncompressed_chunk_id, 0, true);
if (NULL == uncompressed_chunk)
elog(ERROR, "unkown chunk id %d", uncompressed_chunk_id);
decompress_chunk_impl(uncompressed_chunk->hypertable_relid, uncompressed_chunk_id);
PG_RETURN_VOID();
}

View File

@ -7,5 +7,6 @@
#define TIMESCALEDB_TSL_COMPRESSION_UTILS_H #define TIMESCALEDB_TSL_COMPRESSION_UTILS_H
extern Datum tsl_compress_chunk(PG_FUNCTION_ARGS); extern Datum tsl_compress_chunk(PG_FUNCTION_ARGS);
extern Datum tsl_decompress_chunk(PG_FUNCTION_ARGS);
#endif // TIMESCALEDB_TSL_COMPRESSION_UTILS_H #endif // TIMESCALEDB_TSL_COMPRESSION_UTILS_H

View File

@ -248,6 +248,9 @@ compress_chunk_populate_keys(Oid in_table, const ColumnCompressionInfo **columns
*n_keys_out += 1; *n_keys_out += 1;
} }
if (*n_keys_out == 0)
elog(ERROR, "compression should be configured with an orderby or segment by");
*keys_out = palloc(sizeof(**keys_out) * *n_keys_out); *keys_out = palloc(sizeof(**keys_out) * *n_keys_out);
for (i = 0; i < n_columns; i++) for (i = 0; i < n_columns; i++)
@ -844,8 +847,6 @@ decompress_chunk(Oid in_table, Oid out_table)
FreeBulkInsertState(decompressor.bistate); FreeBulkInsertState(decompressor.bistate);
} }
truncate_relation(in_table);
RelationClose(out_rel); RelationClose(out_rel);
RelationClose(in_rel); RelationClose(in_rel);
} }

View File

@ -108,6 +108,7 @@ CrossModuleFunctions tsl_cm_functions = {
.array_compressor_finish = tsl_array_compressor_finish, .array_compressor_finish = tsl_array_compressor_finish,
.process_compress_table = tsl_process_compress_table, .process_compress_table = tsl_process_compress_table,
.compress_chunk = tsl_compress_chunk, .compress_chunk = tsl_compress_chunk,
.decompress_chunk = tsl_decompress_chunk,
}; };
TS_FUNCTION_INFO_V1(ts_module_init); TS_FUNCTION_INFO_V1(ts_module_init);

View File

@ -123,6 +123,7 @@ SELECT count(*) from :CHUNK_NAME;
21 21
(1 row) (1 row)
SELECT count(*) as "ORIGINAL_CHUNK_COUNT" from :CHUNK_NAME \gset
select tableoid::regclass, count(*) from conditions group by tableoid order by tableoid; select tableoid::regclass, count(*) from conditions group by tableoid order by tableoid;
tableoid | count tableoid | count
----------------------------------------+------- ----------------------------------------+-------
@ -223,3 +224,39 @@ compressed_toast_bytes | 16 kB
compressed_total_bytes | 32 kB compressed_total_bytes | 32 kB
\x \x
select decompress_chunk(ch1.schema_name|| '.' || ch1.table_name)
FROM _timescaledb_catalog.chunk ch1, _timescaledb_catalog.hypertable ht where ch1.hypertable_id = ht.id and ht.table_name like 'conditions';
decompress_chunk
------------------
(2 rows)
SELECT count(*), count(*) = :'ORIGINAL_CHUNK_COUNT' from :CHUNK_NAME;
count | ?column?
-------+----------
21 | t
(1 row)
--check that the compressed chunk is dropped
\set ON_ERROR_STOP 0
SELECT count(*) from :COMPRESSED_CHUNK_NAME;
ERROR: relation "_timescaledb_internal.compress_hyper_4_9_chunk" does not exist at character 22
\set ON_ERROR_STOP 1
--size information is gone too
select count(*) from timescaledb_information.compressed_chunk_size
where hypertable_name::text like 'conditions';
count
-------
0
(1 row)
--make sure compressed_chunk_id is reset to NULL
select ch1.compressed_chunk_id IS NULL
FROM _timescaledb_catalog.chunk ch1, _timescaledb_catalog.hypertable ht where ch1.hypertable_id = ht.id and ht.table_name like 'conditions'
?column?
----------
t
t
(2 rows)

View File

@ -11,6 +11,15 @@ NOTICE: adding not-null constraint to column "a"
foo2 foo2
(1 row) (1 row)
create table non_compressed (a integer, "bacB toD" integer, c integer, d integer);
select table_name from create_hypertable('non_compressed', 'a', chunk_time_interval=> 10);
NOTICE: adding not-null constraint to column "a"
table_name
----------------
non_compressed
(1 row)
insert into non_compressed values( 3 , 16 , 20, 4);
ALTER TABLE foo2 set (timescaledb.compress, timescaledb.compress_segmentby = '"bacB toD",c' , timescaledb.compress_orderby = 'c'); ALTER TABLE foo2 set (timescaledb.compress, timescaledb.compress_segmentby = '"bacB toD",c' , timescaledb.compress_orderby = 'c');
ERROR: cannot use the same column c in compress_orderby and compress_segmentby ERROR: cannot use the same column c in compress_orderby and compress_segmentby
ALTER TABLE foo2 set (timescaledb.compress, timescaledb.compress_segmentby = '"bacB toD",c' , timescaledb.compress_orderby = 'd'); ALTER TABLE foo2 set (timescaledb.compress, timescaledb.compress_segmentby = '"bacB toD",c' , timescaledb.compress_orderby = 'd');
@ -67,3 +76,25 @@ ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_orderby = 'c des
ERROR: unexpected token descend in compress_orderby list ERROR: unexpected token descend in compress_orderby list
ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_segmentby = 'c asc' , timescaledb.compress_orderby = 'c'); ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_segmentby = 'c asc' , timescaledb.compress_orderby = 'c');
ERROR: column c asc in compress_segmentby list does not exist ERROR: column c asc in compress_segmentby list does not exist
--should succeed
ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_orderby = 'a');
select decompress_chunk(ch1.schema_name|| '.' || ch1.table_name)
FROM _timescaledb_catalog.chunk ch1, _timescaledb_catalog.hypertable ht where ch1.hypertable_id = ht.id and ht.table_name like 'foo' limit 1;
ERROR: chunk "_hyper_4_2_chunk" is not a compressed
--should succeed
select compress_chunk(ch1.schema_name|| '.' || ch1.table_name)
FROM _timescaledb_catalog.chunk ch1, _timescaledb_catalog.hypertable ht where ch1.hypertable_id = ht.id and ht.table_name like 'foo' limit 1;
compress_chunk
----------------
(1 row)
select compress_chunk(ch1.schema_name|| '.' || ch1.table_name)
FROM _timescaledb_catalog.chunk ch1, _timescaledb_catalog.hypertable ht where ch1.hypertable_id = ht.id and ht.table_name like 'foo' limit 1;
ERROR: chunk is already compressed
select compress_chunk(ch1.schema_name|| '.' || ch1.table_name)
FROM _timescaledb_catalog.chunk ch1, _timescaledb_catalog.hypertable ht where ch1.hypertable_id = ht.id and ht.table_name like 'non_compressed' limit 1;
ERROR: chunks can be compressed only if compression property is set on the hypertable
select decompress_chunk(ch1.schema_name|| '.' || ch1.table_name)
FROM _timescaledb_catalog.chunk ch1, _timescaledb_catalog.hypertable ht where ch1.hypertable_id = ht.id and ht.table_name like 'non_compressed' limit 1;
ERROR: missing compressed hypertable

View File

@ -55,6 +55,7 @@ FROM _timescaledb_catalog.chunk ch1, _timescaledb_catalog.hypertable ht where ch
LIMIT 1 \gset LIMIT 1 \gset
SELECT count(*) from :CHUNK_NAME; SELECT count(*) from :CHUNK_NAME;
SELECT count(*) as "ORIGINAL_CHUNK_COUNT" from :CHUNK_NAME \gset
select tableoid::regclass, count(*) from conditions group by tableoid order by tableoid; select tableoid::regclass, count(*) from conditions group by tableoid order by tableoid;
@ -84,3 +85,20 @@ order by hypertable_name, chunk_name;
select * from timescaledb_information.compressed_hypertable_size select * from timescaledb_information.compressed_hypertable_size
order by hypertable_name; order by hypertable_name;
\x \x
select decompress_chunk(ch1.schema_name|| '.' || ch1.table_name)
FROM _timescaledb_catalog.chunk ch1, _timescaledb_catalog.hypertable ht where ch1.hypertable_id = ht.id and ht.table_name like 'conditions';
SELECT count(*), count(*) = :'ORIGINAL_CHUNK_COUNT' from :CHUNK_NAME;
--check that the compressed chunk is dropped
\set ON_ERROR_STOP 0
SELECT count(*) from :COMPRESSED_CHUNK_NAME;
\set ON_ERROR_STOP 1
--size information is gone too
select count(*) from timescaledb_information.compressed_chunk_size
where hypertable_name::text like 'conditions';
--make sure compressed_chunk_id is reset to NULL
select ch1.compressed_chunk_id IS NULL
FROM _timescaledb_catalog.chunk ch1, _timescaledb_catalog.hypertable ht where ch1.hypertable_id = ht.id and ht.table_name like 'conditions'

View File

@ -8,6 +8,10 @@
create table foo2 (a integer, "bacB toD" integer, c integer, d integer); create table foo2 (a integer, "bacB toD" integer, c integer, d integer);
select table_name from create_hypertable('foo2', 'a', chunk_time_interval=> 10); select table_name from create_hypertable('foo2', 'a', chunk_time_interval=> 10);
create table non_compressed (a integer, "bacB toD" integer, c integer, d integer);
select table_name from create_hypertable('non_compressed', 'a', chunk_time_interval=> 10);
insert into non_compressed values( 3 , 16 , 20, 4);
ALTER TABLE foo2 set (timescaledb.compress, timescaledb.compress_segmentby = '"bacB toD",c' , timescaledb.compress_orderby = 'c'); ALTER TABLE foo2 set (timescaledb.compress, timescaledb.compress_segmentby = '"bacB toD",c' , timescaledb.compress_orderby = 'c');
ALTER TABLE foo2 set (timescaledb.compress, timescaledb.compress_segmentby = '"bacB toD",c' , timescaledb.compress_orderby = 'd'); ALTER TABLE foo2 set (timescaledb.compress, timescaledb.compress_segmentby = '"bacB toD",c' , timescaledb.compress_orderby = 'd');
select * from _timescaledb_catalog.hypertable_compression order by attname; select * from _timescaledb_catalog.hypertable_compression order by attname;
@ -39,3 +43,21 @@ ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_orderby = 'c des
ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_orderby = 'c descend'); ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_orderby = 'c descend');
ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_segmentby = 'c asc' , timescaledb.compress_orderby = 'c'); ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_segmentby = 'c asc' , timescaledb.compress_orderby = 'c');
--should succeed
ALTER TABLE foo set (timescaledb.compress, timescaledb.compress_orderby = 'a');
select decompress_chunk(ch1.schema_name|| '.' || ch1.table_name)
FROM _timescaledb_catalog.chunk ch1, _timescaledb_catalog.hypertable ht where ch1.hypertable_id = ht.id and ht.table_name like 'foo' limit 1;
--should succeed
select compress_chunk(ch1.schema_name|| '.' || ch1.table_name)
FROM _timescaledb_catalog.chunk ch1, _timescaledb_catalog.hypertable ht where ch1.hypertable_id = ht.id and ht.table_name like 'foo' limit 1;
select compress_chunk(ch1.schema_name|| '.' || ch1.table_name)
FROM _timescaledb_catalog.chunk ch1, _timescaledb_catalog.hypertable ht where ch1.hypertable_id = ht.id and ht.table_name like 'foo' limit 1;
select compress_chunk(ch1.schema_name|| '.' || ch1.table_name)
FROM _timescaledb_catalog.chunk ch1, _timescaledb_catalog.hypertable ht where ch1.hypertable_id = ht.id and ht.table_name like 'non_compressed' limit 1;
select decompress_chunk(ch1.schema_name|| '.' || ch1.table_name)
FROM _timescaledb_catalog.chunk ch1, _timescaledb_catalog.hypertable ht where ch1.hypertable_id = ht.id and ht.table_name like 'non_compressed' limit 1;