Add ability to create the compressed hypertable

This happens when compression is turned on for regular hypertables.
This commit is contained in:
gayyappan 2019-07-17 16:12:57 -04:00 committed by Matvey Arye
parent 584f5d1061
commit 1c6aacc374
46 changed files with 961 additions and 213 deletions

View File

@ -44,13 +44,17 @@ CREATE TABLE IF NOT EXISTS _timescaledb_catalog.hypertable (
table_name NAME NOT NULL,
associated_schema_name NAME NOT NULL,
associated_table_prefix NAME NOT NULL,
num_dimensions SMALLINT NOT NULL CHECK (num_dimensions > 0),
num_dimensions SMALLINT NOT NULL,
chunk_sizing_func_schema NAME NOT NULL,
chunk_sizing_func_name NAME NOT NULL,
chunk_target_size BIGINT NOT NULL CHECK (chunk_target_size >= 0), -- size in bytes
compressed BOOLEAN NOT NULL DEFAULT false,
compressed_hypertable_id INTEGER REFERENCES _timescaledb_catalog.hypertable(id),
UNIQUE (id, schema_name),
UNIQUE (schema_name, table_name),
UNIQUE (associated_schema_name, associated_table_prefix)
UNIQUE (associated_schema_name, associated_table_prefix),
constraint hypertable_dim_compress_check check ( num_dimensions > 0 or compressed = true ),
constraint hypertable_compress_check check ( compressed = false or (compressed = true and compressed_hypertable_id is null ))
);
SELECT pg_catalog.pg_extension_config_dump('_timescaledb_catalog.hypertable', '');
SELECT pg_catalog.pg_extension_config_dump(pg_get_serial_sequence('_timescaledb_catalog.hypertable','id'), '');
@ -119,6 +123,7 @@ CREATE TABLE IF NOT EXISTS _timescaledb_catalog.chunk (
hypertable_id INT NOT NULL REFERENCES _timescaledb_catalog.hypertable(id),
schema_name NAME NOT NULL,
table_name NAME NOT NULL,
compressed_chunk_id INTEGER REFERENCES _timescaledb_catalog.chunk(id),
UNIQUE (schema_name, table_name)
);
CREATE INDEX IF NOT EXISTS chunk_hypertable_id_idx

View File

@ -111,6 +111,15 @@ GRANT SELECT ON _timescaledb_internal.bgw_job_stat TO PUBLIC;
ALTER TABLE _timescaledb_catalog.hypertable add column compressed boolean NOT NULL default false;
ALTER TABLE _timescaledb_catalog.hypertable add column compressed_hypertable_id INTEGER REFERENCES _timescaledb_catalog.hypertable(id);
ALTER TABLE _timescaledb_catalog.hypertable drop constraint hypertable_num_dimensions_check;
ALTER TABLE _timescaledb_catalog.hypertable add constraint hypertable_dim_compress_check check ( num_dimensions > 0 or compressed = true );
alter table _timescaledb_catalog.hypertable add constraint hypertable_compress_check check ( compressed = false or (compressed = true and compressed_hypertable_id is null ));
ALTER TABLE _timescaledb_catalog.chunk add column compressed_chunk_id integer references _timescaledb_catalog.chunk(id);
CREATE TABLE IF NOT EXISTS _timescaledb_catalog.compression_algorithm(
id SMALLINT PRIMARY KEY,
version SMALLINT NOT NULL,

View File

@ -17,6 +17,8 @@ set(SOURCES
constraint_aware_append.c
cross_module_fn.c
copy.c
compress_hypertable.h
compress_hypertable.c
dimension.c
dimension_slice.c
dimension_vector.c

View File

@ -109,6 +109,8 @@ enum Anum_hypertable
Anum_hypertable_chunk_sizing_func_schema,
Anum_hypertable_chunk_sizing_func_name,
Anum_hypertable_chunk_target_size,
Anum_hypertable_compressed,
Anum_hypertable_compressed_hypertable_id,
_Anum_hypertable_max,
};
@ -125,6 +127,8 @@ typedef struct FormData_hypertable
NameData chunk_sizing_func_schema;
NameData chunk_sizing_func_name;
int64 chunk_target_size;
bool compressed;
int32 compressed_hypertable_id;
} FormData_hypertable;
typedef FormData_hypertable *Form_hypertable;
@ -293,6 +297,7 @@ enum Anum_chunk
Anum_chunk_hypertable_id,
Anum_chunk_schema_name,
Anum_chunk_table_name,
Anum_chunk_compressed_chunk_id,
_Anum_chunk_max,
};
@ -304,6 +309,7 @@ typedef struct FormData_chunk
int32 hypertable_id;
NameData schema_name;
NameData table_name;
int32 compressed_chunk_id;
} FormData_chunk;
typedef FormData_chunk *Form_chunk;

View File

@ -94,14 +94,21 @@ chunk_insert_relation(Relation rel, Chunk *chunk)
Int32GetDatum(chunk->fd.hypertable_id);
values[AttrNumberGetAttrOffset(Anum_chunk_schema_name)] = NameGetDatum(&chunk->fd.schema_name);
values[AttrNumberGetAttrOffset(Anum_chunk_table_name)] = NameGetDatum(&chunk->fd.table_name);
/*when we insert a chunk the compressed chunk id is always NULL */
if (chunk->fd.compressed_chunk_id == INVALID_CHUNK_ID)
nulls[AttrNumberGetAttrOffset(Anum_chunk_compressed_chunk_id)] = true;
else
{
values[AttrNumberGetAttrOffset(Anum_chunk_compressed_chunk_id)] =
Int32GetDatum(chunk->fd.compressed_chunk_id);
}
ts_catalog_database_info_become_owner(ts_catalog_database_info_get(), &sec_ctx);
ts_catalog_insert_values(rel, desc, values, nulls);
ts_catalog_restore_user(&sec_ctx);
}
static void
chunk_insert_lock(Chunk *chunk, LOCKMODE lock)
void
ts_chunk_insert_lock(Chunk *chunk, LOCKMODE lock)
{
Catalog *catalog = ts_catalog_get();
Relation rel;
@ -112,9 +119,20 @@ chunk_insert_lock(Chunk *chunk, LOCKMODE lock)
}
static void
chunk_fill(Chunk *chunk, HeapTuple tuple)
chunk_fill(Chunk *chunk, HeapTuple tuple, TupleDesc desc)
{
bool isnull;
Datum compress_id;
memcpy(&chunk->fd, GETSTRUCT(tuple), sizeof(FormData_chunk));
/* this is valid because NULLS are only at the end of the struct */
/* compressed_chunk_id can be null, so retrieve it */
compress_id = heap_getattr(tuple, Anum_chunk_compressed_chunk_id, desc, &isnull);
if (isnull)
chunk->fd.compressed_chunk_id = INVALID_CHUNK_ID;
else
chunk->fd.compressed_chunk_id = DatumGetInt32(compress_id);
chunk->table_id = get_relname_relid(chunk->fd.table_name.data,
get_namespace_oid(chunk->fd.schema_name.data, true));
chunk->hypertable_relid = ts_inheritance_parent_relid(chunk->table_id);
@ -526,8 +544,8 @@ create_toast_table(CreateStmt *stmt, Oid chunk_oid)
* table creation will fail. If the schema doesn't yet exist, the table owner
* instead needs the proper permissions on the database to create the schema.
*/
static Oid
chunk_create_table(Chunk *chunk, Hypertable *ht)
Oid
ts_chunk_create_table(Chunk *chunk, Hypertable *ht)
{
Relation rel;
ObjectAddress objaddr;
@ -617,7 +635,7 @@ chunk_create_after_lock(Hypertable *ht, Point *p, const char *schema, const char
snprintf(chunk->fd.table_name.data, NAMEDATALEN, "%s_%d_chunk", prefix, chunk->fd.id);
/* Insert chunk */
chunk_insert_lock(chunk, RowExclusiveLock);
ts_chunk_insert_lock(chunk, RowExclusiveLock);
/* Insert any new dimension slices */
ts_dimension_slice_insert_multi(cube->slices, cube->num_slices);
@ -626,7 +644,7 @@ chunk_create_after_lock(Hypertable *ht, Point *p, const char *schema, const char
chunk_add_constraints(chunk);
/* Create the actual table relation for the chunk */
chunk->table_id = chunk_create_table(chunk, ht);
chunk->table_id = ts_chunk_create_table(chunk, ht);
if (!OidIsValid(chunk->table_id))
elog(ERROR, "could not create chunk table");
@ -679,6 +697,7 @@ ts_chunk_create_stub(int32 id, int16 num_constraints)
chunk = palloc0(sizeof(Chunk));
chunk->fd.id = id;
chunk->fd.compressed_chunk_id = INVALID_CHUNK_ID;
if (num_constraints > 0)
chunk->constraints = ts_chunk_constraints_alloc(num_constraints, CurrentMemoryContext);
@ -691,7 +710,7 @@ chunk_tuple_found(TupleInfo *ti, void *arg)
{
Chunk *chunk = arg;
chunk_fill(chunk, ti->tuple);
chunk_fill(chunk, ti->tuple, ti->desc);
return SCAN_DONE;
}
@ -1821,6 +1840,64 @@ ts_chunk_set_schema(Chunk *chunk, const char *newschema)
return chunk_update_form(&chunk->fd);
}
static ScanTupleResult
chunk_set_compressed_id_in_tuple(TupleInfo *ti, void *data)
{
bool nulls[Natts_chunk];
Datum values[Natts_chunk];
bool repl[Natts_chunk] = { false };
CatalogSecurityContext sec_ctx;
HeapTuple tuple;
int32 compressed_chunk_id = *((int32 *) data);
heap_deform_tuple(ti->tuple, ti->desc, values, nulls);
if (compressed_chunk_id == INVALID_CHUNK_ID)
{
nulls[AttrNumberGetAttrOffset(Anum_chunk_compressed_chunk_id)] = true;
}
else
{
nulls[AttrNumberGetAttrOffset(Anum_chunk_compressed_chunk_id)] = false;
values[AttrNumberGetAttrOffset(Anum_chunk_compressed_chunk_id)] =
Int32GetDatum(compressed_chunk_id);
}
repl[AttrNumberGetAttrOffset(Anum_chunk_compressed_chunk_id)] = true;
ts_catalog_database_info_become_owner(ts_catalog_database_info_get(), &sec_ctx);
tuple = heap_modify_tuple(ti->tuple, ti->desc, values, nulls, repl);
ts_catalog_update(ti->scanrel, tuple);
heap_freetuple(tuple);
ts_catalog_restore_user(&sec_ctx);
return SCAN_DONE;
}
/*Assume permissions are already checked */
bool
ts_chunk_set_compressed_chunk(Chunk *chunk, int32 compressed_chunk_id, bool isnull)
{
int32 compress_id;
ScanKeyData scankey[1];
ScanKeyInit(&scankey[0],
Anum_chunk_idx_id,
BTEqualStrategyNumber,
F_INT4EQ,
Int32GetDatum(chunk->fd.id));
if (isnull)
compress_id = INVALID_CHUNK_ID;
else
compress_id = compressed_chunk_id;
return chunk_scan_internal(CHUNK_ID_INDEX,
scankey,
1,
chunk_set_compressed_id_in_tuple,
&compress_id,
0,
ForwardScanDirection,
AccessShareLock,
CurrentMemoryContext) > 0;
}
/* Used as a tuple found function */
static ScanTupleResult
chunk_rename_schema_name(TupleInfo *ti, void *data)

View File

@ -16,6 +16,8 @@
#include "chunk_constraint.h"
#include "hypertable.h"
#define INVALID_CHUNK_ID 0
typedef struct Hypercube Hypercube;
typedef struct Point Point;
typedef struct Hyperspace Hyperspace;
@ -79,7 +81,7 @@ typedef struct ChunkScanEntry
} ChunkScanEntry;
extern Chunk *ts_chunk_create(Hypertable *ht, Point *p, const char *schema, const char *prefix);
extern Chunk *ts_chunk_create_stub(int32 id, int16 num_constraints);
extern TSDLLEXPORT Chunk *ts_chunk_create_stub(int32 id, int16 num_constraints);
extern Chunk *ts_chunk_find(Hyperspace *hs, Point *p);
extern Chunk **ts_chunk_find_all(Hyperspace *hs, List *dimension_vecs, LOCKMODE lockmode,
unsigned int *num_chunks);
@ -90,6 +92,9 @@ extern Chunk *ts_chunk_get_by_name_with_memory_context(const char *schema_name,
const char *table_name,
int16 num_constraints, MemoryContext mctx,
bool fail_if_not_found);
extern TSDLLEXPORT void ts_chunk_insert_lock(Chunk *chunk, LOCKMODE lock);
extern TSDLLEXPORT Oid ts_chunk_create_table(Chunk *chunk, Hypertable *ht);
extern TSDLLEXPORT Chunk *ts_chunk_get_by_id(int32 id, int16 num_constraints,
bool fail_if_not_found);
extern TSDLLEXPORT Chunk *ts_chunk_get_by_relid(Oid relid, int16 num_constraints,
@ -104,6 +109,8 @@ extern bool ts_chunk_set_name(Chunk *chunk, const char *newname);
extern bool ts_chunk_set_schema(Chunk *chunk, const char *newschema);
extern List *ts_chunk_get_window(int32 dimension_id, int64 point, int count, MemoryContext mctx);
extern void ts_chunks_rename_schema_name(char *old_schema, char *new_schema);
extern TSDLLEXPORT bool ts_chunk_set_compressed_chunk(Chunk *chunk, int32 compressed_chunk_id,
bool isnull);
extern TSDLLEXPORT List *ts_chunk_do_drop_chunks(Oid table_relid, Datum older_than_datum,
Datum newer_than_datum, Oid older_than_type,
Oid newer_than_type, bool cascade,

View File

@ -603,8 +603,8 @@ ts_calculate_chunk_interval(PG_FUNCTION_ARGS)
*
* Parameter 'info' will be updated with the function's information
*/
static void
chunk_sizing_func_validate(regproc func, ChunkSizingInfo *info)
void
ts_chunk_sizing_func_validate(regproc func, ChunkSizingInfo *info)
{
HeapTuple tuple;
Form_pg_proc form;
@ -699,7 +699,7 @@ ts_chunk_adaptive_sizing_info_validate(ChunkSizingInfo *info)
(errcode(ERRCODE_UNDEFINED_COLUMN),
errmsg("column \"%s\" does not exist", info->colname)));
chunk_sizing_func_validate(info->func, info);
ts_chunk_sizing_func_validate(info->func, info);
if (NULL == info->target_size)
info->target_size_bytes = 0;
@ -790,7 +790,7 @@ ts_chunk_adaptive_set(PG_FUNCTION_ARGS)
}
else if (OidIsValid(ht->chunk_sizing_func))
{
chunk_sizing_func_validate(ht->chunk_sizing_func, &info);
ts_chunk_sizing_func_validate(ht->chunk_sizing_func, &info);
values[0] = ObjectIdGetDatum(ht->chunk_sizing_func);
}
else

View File

@ -24,7 +24,7 @@ typedef struct ChunkSizingInfo
} ChunkSizingInfo;
extern void ts_chunk_adaptive_sizing_info_validate(ChunkSizingInfo *info);
extern void ts_chunk_sizing_func_validate(regproc func, ChunkSizingInfo *info);
extern TSDLLEXPORT ChunkSizingInfo *ts_chunk_sizing_info_get_default_disabled(Oid table_relid);
#endif /* TIMESCALEDB_CHUNK_ADAPTIVE_H */

View File

@ -35,8 +35,8 @@ extern void ts_chunk_index_create_from_adjusted_index_info(int32 hypertable_id,
Relation hypertable_idxrel,
int32 chunk_id, Relation chunkrel,
IndexInfo *indexinfo);
extern void ts_chunk_index_create_all(int32 hypertable_id, Oid hypertable_relid, int32 chunk_id,
Oid chunkrelid);
extern TSDLLEXPORT void ts_chunk_index_create_all(int32 hypertable_id, Oid hypertable_relid,
int32 chunk_id, Oid chunkrelid);
extern Oid ts_chunk_index_create_from_stmt(IndexStmt *stmt, int32 chunk_id, Oid chunkrelid,
int32 hypertable_id, Oid hypertable_indexrelid);
extern int ts_chunk_index_delete(Chunk *chunk, Oid chunk_indexrelid, bool drop_index);

45
src/compress_hypertable.c Normal file
View File

@ -0,0 +1,45 @@
/*
* This file and its contents are licensed under the Apache License 2.0.
* Please see the included NOTICE for copyright information and
* LICENSE-APACHE for a copy of the license.
*/
#include <postgres.h>
#include <fmgr.h>
#include <access/htup_details.h>
#include <catalog/dependency.h>
#include <catalog/namespace.h>
#include <catalog/pg_type.h>
#include <catalog/pg_trigger.h>
#include <commands/trigger.h>
#include <storage/lmgr.h>
#include <utils/builtins.h>
#include <utils/lsyscache.h>
#include "compat.h"
#include "compress_hypertable.h"
static const WithClauseDefinition compress_hypertable_with_clause_def[] = {
[CompressEnabled] = {
.arg_name = "compress",
.type_id = BOOLOID,
.default_val = BoolGetDatum(false),
},
[CompressSegmentBy] = {
.arg_name = "segment_by",
.type_id = TEXTARRAYOID,
},
[CompressOrderBy] = {
.arg_name = "order_by",
.type_id = TEXTOID,
},
};
WithClauseResult *
ts_compress_hypertable_set_clause_parse(const List *defelems)
{
return ts_with_clauses_parse(defelems,
compress_hypertable_with_clause_def,
TS_ARRAY_LEN(compress_hypertable_with_clause_def));
}

25
src/compress_hypertable.h Normal file
View File

@ -0,0 +1,25 @@
/*
* This file and its contents are licensed under the Apache License 2.0.
* Please see the included NOTICE for copyright information and
* LICENSE-APACHE for a copy of the license.
*/
#ifndef TIMESCALEDB_COMPRESS_HYPERTABLE_H
#define TIMESCALEDB_COMPRESS_HYPERTABLE_H
#include <postgres.h>
#include <catalog/pg_type.h>
#include <catalog.h>
#include <chunk.h>
#include "with_clause_parser.h"
typedef enum CompressHypertableOption
{
CompressEnabled = 0,
CompressSegmentBy,
CompressOrderBy,
} CompressHypertableOption;
WithClauseResult *ts_compress_hypertable_set_clause_parse(const List *defelems);
#endif

View File

@ -268,6 +268,14 @@ cagg_materialize_default_fn(int32 materialization_id, bool verbose)
pg_unreachable();
}
static bool
process_compress_table_default(AlterTableCmd *cmd, Hypertable *ht,
WithClauseResult *with_clause_options)
{
error_no_default_fn_enterprise();
pg_unreachable();
}
static Datum
error_no_default_fn_pg_community(PG_FUNCTION_ARGS)
{
@ -368,6 +376,7 @@ TSDLLEXPORT CrossModuleFunctions ts_cm_functions_default = {
.dictionary_compressor_finish = error_no_default_fn_pg_community,
.array_compressor_append = error_no_default_fn_pg_community,
.array_compressor_finish = error_no_default_fn_pg_community,
.process_compress_table = process_compress_table_default,
};
TSDLLEXPORT CrossModuleFunctions *ts_cm_functions = &ts_cm_functions_default;

View File

@ -86,6 +86,8 @@ typedef struct CrossModuleFunctions
PGFunction dictionary_compressor_finish;
PGFunction array_compressor_append;
PGFunction array_compressor_finish;
bool (*process_compress_table)(AlterTableCmd *cmd, Hypertable *ht,
WithClauseResult *with_clause_options);
} CrossModuleFunctions;
extern TSDLLEXPORT CrossModuleFunctions *ts_cm_functions;

View File

@ -118,11 +118,28 @@ ts_hypertable_permissions_check_by_id(int32 hypertable_id)
ts_hypertable_permissions_check(table_relid, GetUserId());
}
static void
hypertable_fill(Hypertable *h, HeapTuple tuple, TupleDesc desc)
{
bool isnull;
Datum compress_id;
memcpy((void *) &h->fd, GETSTRUCT(tuple), sizeof(FormData_hypertable));
/* this is valid because NULLS are only at the end of the struct */
/* compressed_hypertable_id can be null, so retrieve it */
compress_id = heap_getattr(tuple, Anum_hypertable_compressed_hypertable_id, desc, &isnull);
if (isnull)
h->fd.compressed_hypertable_id = INVALID_HYPERTABLE_ID;
else
h->fd.compressed_hypertable_id = DatumGetInt32(compress_id);
}
static Hypertable *
hypertable_from_tuple(HeapTuple tuple, MemoryContext mctx, TupleDesc desc)
{
Oid namespace_oid;
Hypertable *h = STRUCT_FROM_TUPLE(tuple, mctx, Hypertable, FormData_hypertable);
Hypertable *h = MemoryContextAllocZero(mctx, sizeof(Hypertable));
hypertable_fill(h, tuple, desc);
namespace_oid = get_namespace_oid(NameStr(h->fd.schema_name), false);
h->main_table_relid = get_relname_relid(NameStr(h->fd.table_name), namespace_oid);
@ -362,8 +379,17 @@ hypertable_tuple_update(TupleInfo *ti, void *data)
}
else
{
nulls[AttrNumberGetAttrOffset(Anum_hypertable_chunk_sizing_func_schema)] = true;
nulls[AttrNumberGetAttrOffset(Anum_hypertable_chunk_sizing_func_name)] = true;
elog(ERROR, "hypertable_tuple_update chunk_sizing_function cannot be NULL");
}
values[AttrNumberGetAttrOffset(Anum_hypertable_compressed)] = BoolGetDatum(ht->fd.compressed);
if (ht->fd.compressed_hypertable_id == INVALID_HYPERTABLE_ID)
{
nulls[AttrNumberGetAttrOffset(Anum_hypertable_compressed_hypertable_id)] = true;
}
else
{
values[AttrNumberGetAttrOffset(Anum_hypertable_compressed_hypertable_id)] =
Int32GetDatum(ht->fd.compressed_hypertable_id);
}
copy = heap_form_tuple(ti->desc, values, nulls);
@ -724,7 +750,7 @@ static void
hypertable_insert_relation(Relation rel, int32 hypertable_id, Name schema_name, Name table_name,
Name associated_schema_name, Name associated_table_prefix,
Name chunk_sizing_func_schema, Name chunk_sizing_func_name,
int64 chunk_target_size, int16 num_dimensions)
int64 chunk_target_size, int16 num_dimensions, bool compressed)
{
TupleDesc desc = RelationGetDescr(rel);
Datum values[Natts_hypertable];
@ -771,6 +797,9 @@ hypertable_insert_relation(Relation rel, int32 hypertable_id, Name schema_name,
values[AttrNumberGetAttrOffset(Anum_hypertable_associated_table_prefix)] =
NameGetDatum(&default_associated_table_prefix);
}
values[AttrNumberGetAttrOffset(Anum_hypertable_compressed)] = BoolGetDatum(compressed);
/* associated compressed hypertable id is always NULL when we create a new hypertable*/
nulls[AttrNumberGetAttrOffset(Anum_hypertable_compressed_hypertable_id)] = true;
ts_catalog_insert_values(rel, desc, values, nulls);
ts_catalog_restore_user(&sec_ctx);
@ -780,7 +809,7 @@ static void
hypertable_insert(int32 hypertable_id, Name schema_name, Name table_name,
Name associated_schema_name, Name associated_table_prefix,
Name chunk_sizing_func_schema, Name chunk_sizing_func_name,
int64 chunk_target_size, int16 num_dimensions)
int64 chunk_target_size, int16 num_dimensions, bool compressed)
{
Catalog *catalog = ts_catalog_get();
Relation rel;
@ -795,7 +824,8 @@ hypertable_insert(int32 hypertable_id, Name schema_name, Name table_name,
chunk_sizing_func_schema,
chunk_sizing_func_name,
chunk_target_size,
num_dimensions);
num_dimensions,
compressed);
heap_close(rel, RowExclusiveLock);
}
@ -1719,7 +1749,8 @@ ts_hypertable_create_from_info(Oid table_relid, int32 hypertable_id, uint32 flag
&chunk_sizing_info->func_schema,
&chunk_sizing_info->func_name,
chunk_sizing_info->target_size_bytes,
DIMENSION_INFO_IS_SET(space_dim_info) ? 2 : 1);
DIMENSION_INFO_IS_SET(space_dim_info) ? 2 : 1,
false);
/* Get the a Hypertable object via the cache */
hcache = ts_hypertable_cache_pin();
@ -1980,3 +2011,128 @@ ts_hypertable_set_integer_now_func(PG_FUNCTION_ARGS)
ts_cache_release(hcache);
PG_RETURN_NULL();
}
static ScanTupleResult
hypertable_set_compressed_id_in_tuple(TupleInfo *ti, void *data)
{
bool nulls[Natts_hypertable];
Datum values[Natts_hypertable];
bool repl[Natts_hypertable] = { false };
CatalogSecurityContext sec_ctx;
HeapTuple tuple;
int32 compressed_hypertable_id = *((int32 *) data);
heap_deform_tuple(ti->tuple, ti->desc, values, nulls);
Assert(DatumGetBool(values[AttrNumberGetAttrOffset(Anum_hypertable_compressed)]) == false);
nulls[AttrNumberGetAttrOffset(Anum_hypertable_compressed_hypertable_id)] = false;
values[AttrNumberGetAttrOffset(Anum_hypertable_compressed_hypertable_id)] =
Int32GetDatum(compressed_hypertable_id);
repl[AttrNumberGetAttrOffset(Anum_hypertable_compressed_hypertable_id)] = true;
ts_catalog_database_info_become_owner(ts_catalog_database_info_get(), &sec_ctx);
tuple = heap_modify_tuple(ti->tuple, ti->desc, values, nulls, repl);
ts_catalog_update(ti->scanrel, tuple);
heap_freetuple(tuple);
ts_catalog_restore_user(&sec_ctx);
return SCAN_DONE;
}
/*Assume permissions are already checked */
bool
ts_hypertable_set_compressed_id(Hypertable *ht, int32 compressed_hypertable_id)
{
int32 compress_id;
ScanKeyData scankey[1];
ScanKeyInit(&scankey[0],
Anum_hypertable_pkey_idx_id,
BTEqualStrategyNumber,
F_INT4EQ,
Int32GetDatum(ht->fd.id));
compress_id = compressed_hypertable_id;
return hypertable_scan_limit_internal(scankey,
1,
HYPERTABLE_ID_INDEX,
hypertable_set_compressed_id_in_tuple,
&compress_id,
1,
RowExclusiveLock,
false,
CurrentMemoryContext) > 0;
}
/* create a compressed hypertable
* table_relid - already created table which we are going to
* set up as a compressed hypertable
* hypertable_id - id to be used while creating hypertable with
* compression property set
* NOTE:
* compressed hypertable has no dimensions.
*/
bool
ts_hypertable_create_compressed(Oid table_relid, int32 hypertable_id)
{
Oid user_oid = GetUserId();
Oid tspc_oid = get_rel_tablespace(table_relid);
NameData schema_name, table_name, associated_schema_name;
ChunkSizingInfo *chunk_sizing_info;
Relation rel;
rel = heap_open(table_relid, AccessExclusiveLock);
/*
* Check that the user has permissions to make this table to a compressed
* hypertable
*/
ts_hypertable_permissions_check(table_relid, user_oid);
if (ts_is_hypertable(table_relid))
{
ereport(ERROR,
(errcode(ERRCODE_TS_HYPERTABLE_EXISTS),
errmsg("table \"%s\" is already a hypertable", get_rel_name(table_relid))));
heap_close(rel, AccessExclusiveLock);
}
namestrcpy(&schema_name, get_namespace_name(get_rel_namespace(table_relid)));
namestrcpy(&table_name, get_rel_name(table_relid));
/* we don't use the chunking size info for managing the compressed table.
* But need this to satisfy hypertable constraints
*/
chunk_sizing_info = ts_chunk_sizing_info_get_default_disabled(table_relid);
ts_chunk_sizing_func_validate(chunk_sizing_info->func, chunk_sizing_info);
/* Checks pass, now we can create the catalog information */
namestrcpy(&schema_name, get_namespace_name(get_rel_namespace(table_relid)));
namestrcpy(&table_name, get_rel_name(table_relid));
namestrcpy(&associated_schema_name, INTERNAL_SCHEMA_NAME);
/* compressed hypertable has no dimensions of its own , shares the original hypertable dims*/
hypertable_insert(hypertable_id,
&schema_name,
&table_name,
&associated_schema_name,
NULL,
&chunk_sizing_info->func_schema,
&chunk_sizing_info->func_name,
chunk_sizing_info->target_size_bytes,
0 /*num_dimensions*/,
true);
/* No indexes are created for the compressed hypertable here */
/* Attach tablespace, if any */
if (OidIsValid(tspc_oid))
{
NameData tspc_name;
namestrcpy(&tspc_name, get_tablespace_name(tspc_oid));
ts_tablespace_attach_internal(&tspc_name, table_relid, false);
}
insert_blocker_trigger_add(table_relid);
/* lock will be released after the transaction is done */
heap_close(rel, NoLock);
return true;
}

View File

@ -53,7 +53,7 @@ enum Anum_create_hypertable
extern int ts_number_of_hypertables(void);
extern Oid ts_rel_get_owner(Oid relid);
extern TSDLLEXPORT Oid ts_rel_get_owner(Oid relid);
extern List *ts_hypertable_get_all(void);
typedef enum HypertableCreateFlags
@ -69,7 +69,7 @@ extern TSDLLEXPORT bool ts_hypertable_create_from_info(Oid table_relid, int32 hy
Name associated_schema_name,
Name associated_table_prefix,
ChunkSizingInfo *chunk_sizing_info);
extern TSDLLEXPORT bool ts_hypertable_create_compressed(Oid table_relid, int32 hypertable_id);
extern TSDLLEXPORT Hypertable *ts_hypertable_get_by_id(int32 hypertable_id);
extern Hypertable *ts_hypertable_get_by_name(char *schema, char *name);
extern bool ts_hypertable_has_privs_of(Oid hypertable_oid, Oid userid);
@ -109,6 +109,8 @@ extern bool ts_hypertable_has_tuples(Oid table_relid, LOCKMODE lockmode);
extern void ts_hypertables_rename_schema_name(const char *old_name, const char *new_name);
extern List *ts_hypertable_get_all_by_name(Name schema_name, Name table_name, MemoryContext mctx);
extern bool ts_is_partitioning_column(Hypertable *ht, Index column_attno);
extern TSDLLEXPORT bool ts_hypertable_set_compressed_id(Hypertable *ht,
int32 compressed_hypertable_id);
#define hypertable_scan(schema, table, tuple_found, data, lockmode, tuplock) \
ts_hypertable_scan_with_memory_context(schema, \

View File

@ -62,6 +62,7 @@
#include "with_clause_parser.h"
#include "cross_module_fn.h"
#include "continuous_agg.h"
#include "compress_hypertable.h"
#include "partitioning.h"
#include "cross_module_fn.h"
@ -72,6 +73,7 @@ void _process_utility_fini(void);
static ProcessUtility_hook_type prev_ProcessUtility_hook;
static bool expect_chunk_modification = false;
static bool process_altertable_set_options(AlterTableCmd *cmd, Hypertable *ht);
/* Call the default ProcessUtility and handle PostgreSQL version differences */
static void
@ -2142,7 +2144,7 @@ process_altertable_end_index(Node *parsetree, CollectedCommand *cmd)
ts_cache_release(hcache);
}
static void
static bool
process_altertable_start_table(ProcessUtilityArgs *args)
{
AlterTableStmt *stmt = (AlterTableStmt *) args->parsetree;
@ -2150,9 +2152,11 @@ process_altertable_start_table(ProcessUtilityArgs *args)
Cache *hcache;
Hypertable *ht;
ListCell *lc;
bool handled = false;
int num_cmds;
if (!OidIsValid(relid))
return;
return false;
check_chunk_alter_table_operation_allowed(relid, stmt);
@ -2165,7 +2169,7 @@ process_altertable_start_table(ProcessUtilityArgs *args)
relation_not_only(stmt->relation);
process_add_hypertable(args, ht);
}
num_cmds = list_length(stmt->cmds);
foreach (lc, stmt->cmds)
{
AlterTableCmd *cmd = (AlterTableCmd *) lfirst(lc);
@ -2240,14 +2244,32 @@ process_altertable_start_table(ProcessUtilityArgs *args)
errmsg("hypertables do not support native "
"postgres partitioning")));
}
break;
}
#endif
case AT_SetRelOptions:
{
if (num_cmds != 1)
{
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("ALTER TABLE <hypertable> SET does not support multiple "
"clauses")));
}
if (ht != NULL)
{
handled = process_altertable_set_options(cmd, ht);
}
break;
}
default:
break;
}
}
ts_cache_release(hcache);
return handled;
}
static void
@ -2343,8 +2365,7 @@ process_altertable_start(ProcessUtilityArgs *args)
switch (stmt->relkind)
{
case OBJECT_TABLE:
process_altertable_start_table(args);
return false;
return process_altertable_start_table(args);
case OBJECT_VIEW:
return process_altertable_start_view(args);
default:
@ -2652,10 +2673,40 @@ process_create_rule_start(ProcessUtilityArgs *args)
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("hypertables do not support rules")));
}
/* ALTER TABLE <name> SET ( timescaledb.compress, ...) */
static bool
process_altertable_set_options(AlterTableCmd *cmd, Hypertable *ht)
{
List *pg_options = NIL, *compress_options = NIL;
WithClauseResult *parse_results = NULL;
List *inpdef = NIL;
bool is_compress = false;
/* is this a compress table stmt */
Assert(IsA(cmd->def, List));
inpdef = (List *) cmd->def;
ts_with_clause_filter(inpdef, &compress_options, &pg_options);
if (compress_options)
{
parse_results = ts_compress_hypertable_set_clause_parse(compress_options);
is_compress = DatumGetBool(parse_results[CompressEnabled].parsed);
}
if (!is_compress)
return false;
if (pg_options != NIL)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("only timescaledb.compress parameters allowed when specifying compression "
"parameters for hypertable")));
ts_cm_functions->process_compress_table(cmd, ht, parse_results);
return true;
}
static bool
process_viewstmt(ProcessUtilityArgs *args)
{
WithClauseResult *parse_results;
WithClauseResult *parse_results = NULL;
bool is_cagg = false;
Node *parsetree = args->parsetree;
ViewStmt *stmt = (ViewStmt *) parsetree;

View File

@ -28,7 +28,8 @@ extern Tablespace *ts_tablespaces_add(Tablespaces *tablespaces, FormData_tablesp
Oid tspc_oid);
extern bool ts_tablespaces_contain(Tablespaces *tspcs, Oid tspc_oid);
extern Tablespaces *ts_tablespace_scan(int32 hypertable_id);
extern void ts_tablespace_attach_internal(Name tspcname, Oid hypertable_oid, bool if_not_attached);
extern TSDLLEXPORT void ts_tablespace_attach_internal(Name tspcname, Oid hypertable_oid,
bool if_not_attached);
extern int ts_tablespace_delete(int32 hypertable_id, const char *tspcname);
extern int ts_tablespace_count_attached(const char *tspcname);
extern void ts_tablespace_validate_revoke(GrantStmt *stmt);

View File

@ -17,7 +17,7 @@
extern void ts_trigger_create_on_chunk(Oid trigger_oid, char *chunk_schema_name,
char *chunk_table_name);
extern void ts_trigger_create_all_on_chunk(Hypertable *ht, Chunk *chunk);
extern TSDLLEXPORT void ts_trigger_create_all_on_chunk(Hypertable *ht, Chunk *chunk);
extern bool ts_relation_has_transition_table_trigger(Oid relid);
#endif /* TIMESCALEDB_TRIGGER_H */

View File

@ -185,25 +185,25 @@ SELECT relname, reloptions FROM pg_class WHERE relname IN ('_hyper_2_3_chunk','_
-- Need superuser to ALTER chunks in _timescaledb_internal schema
\c :TEST_DBNAME :ROLE_SUPERUSER
SELECT * FROM _timescaledb_catalog.chunk WHERE id = 2;
id | hypertable_id | schema_name | table_name
----+---------------+-----------------------+------------------
2 | 2 | _timescaledb_internal | _hyper_2_2_chunk
id | hypertable_id | schema_name | table_name | compressed_chunk_id
----+---------------+-----------------------+------------------+---------------------
2 | 2 | _timescaledb_internal | _hyper_2_2_chunk |
(1 row)
-- Rename chunk
ALTER TABLE _timescaledb_internal._hyper_2_2_chunk RENAME TO new_chunk_name;
SELECT * FROM _timescaledb_catalog.chunk WHERE id = 2;
id | hypertable_id | schema_name | table_name
----+---------------+-----------------------+----------------
2 | 2 | _timescaledb_internal | new_chunk_name
id | hypertable_id | schema_name | table_name | compressed_chunk_id
----+---------------+-----------------------+----------------+---------------------
2 | 2 | _timescaledb_internal | new_chunk_name |
(1 row)
-- Set schema
ALTER TABLE _timescaledb_internal.new_chunk_name SET SCHEMA public;
SELECT * FROM _timescaledb_catalog.chunk WHERE id = 2;
id | hypertable_id | schema_name | table_name
----+---------------+-------------+----------------
2 | 2 | public | new_chunk_name
id | hypertable_id | schema_name | table_name | compressed_chunk_id
----+---------------+-------------+----------------+---------------------
2 | 2 | public | new_chunk_name |
(1 row)
-- Test that we cannot rename chunk columns
@ -620,16 +620,16 @@ ALTER SCHEMA my_associated_schema RENAME TO new_associated_schema;
INSERT INTO my_table (date, quantity) VALUES ('2018-08-10T23:00:00+00:00', 20);
-- Make sure the schema name is changed in both catalog tables
SELECT * from _timescaledb_catalog.hypertable;
id | schema_name | table_name | associated_schema_name | associated_table_prefix | num_dimensions | chunk_sizing_func_schema | chunk_sizing_func_name | chunk_target_size
----+-------------+------------+------------------------+-------------------------+----------------+--------------------------+--------------------------+-------------------
11 | public | my_table | new_associated_schema | _hyper_11 | 1 | _timescaledb_internal | calculate_chunk_interval | 0
id | schema_name | table_name | associated_schema_name | associated_table_prefix | num_dimensions | chunk_sizing_func_schema | chunk_sizing_func_name | chunk_target_size | compressed | compressed_hypertable_id
----+-------------+------------+------------------------+-------------------------+----------------+--------------------------+--------------------------+-------------------+------------+--------------------------
11 | public | my_table | new_associated_schema | _hyper_11 | 1 | _timescaledb_internal | calculate_chunk_interval | 0 | f |
(1 row)
SELECT * from _timescaledb_catalog.chunk;
id | hypertable_id | schema_name | table_name
----+---------------+-----------------------+--------------------
22 | 11 | new_associated_schema | _hyper_11_22_chunk
23 | 11 | new_associated_schema | _hyper_11_23_chunk
id | hypertable_id | schema_name | table_name | compressed_chunk_id
----+---------------+-----------------------+--------------------+---------------------
22 | 11 | new_associated_schema | _hyper_11_22_chunk |
23 | 11 | new_associated_schema | _hyper_11_23_chunk |
(2 rows)
DROP TABLE my_table;

View File

@ -134,12 +134,12 @@ SELECT * FROM create_hypertable('"customSchema"."Hypertable_1"', 'time', NULL, 1
(1 row)
SELECT * FROM _timescaledb_catalog.hypertable;
id | schema_name | table_name | associated_schema_name | associated_table_prefix | num_dimensions | chunk_sizing_func_schema | chunk_sizing_func_name | chunk_target_size
----+--------------+---------------+------------------------+-------------------------+----------------+--------------------------+--------------------------+-------------------
1 | public | one_Partition | one_Partition | _hyper_1 | 1 | _timescaledb_internal | calculate_chunk_interval | 0
2 | public | 1dim | _timescaledb_internal | _hyper_2 | 1 | _timescaledb_internal | calculate_chunk_interval | 0
3 | public | Hypertable_1 | _timescaledb_internal | _hyper_3 | 2 | _timescaledb_internal | calculate_chunk_interval | 0
4 | customSchema | Hypertable_1 | _timescaledb_internal | _hyper_4 | 1 | _timescaledb_internal | calculate_chunk_interval | 0
id | schema_name | table_name | associated_schema_name | associated_table_prefix | num_dimensions | chunk_sizing_func_schema | chunk_sizing_func_name | chunk_target_size | compressed | compressed_hypertable_id
----+--------------+---------------+------------------------+-------------------------+----------------+--------------------------+--------------------------+-------------------+------------+--------------------------
1 | public | one_Partition | one_Partition | _hyper_1 | 1 | _timescaledb_internal | calculate_chunk_interval | 0 | f |
2 | public | 1dim | _timescaledb_internal | _hyper_2 | 1 | _timescaledb_internal | calculate_chunk_interval | 0 | f |
3 | public | Hypertable_1 | _timescaledb_internal | _hyper_3 | 2 | _timescaledb_internal | calculate_chunk_interval | 0 | f |
4 | customSchema | Hypertable_1 | _timescaledb_internal | _hyper_4 | 1 | _timescaledb_internal | calculate_chunk_interval | 0 | f |
(4 rows)
CREATE INDEX ON PUBLIC."Hypertable_1" (time, "temp_c");

View File

@ -134,12 +134,12 @@ SELECT * FROM create_hypertable('"customSchema"."Hypertable_1"', 'time', NULL, 1
(1 row)
SELECT * FROM _timescaledb_catalog.hypertable;
id | schema_name | table_name | associated_schema_name | associated_table_prefix | num_dimensions | chunk_sizing_func_schema | chunk_sizing_func_name | chunk_target_size
----+--------------+---------------+------------------------+-------------------------+----------------+--------------------------+--------------------------+-------------------
1 | public | one_Partition | one_Partition | _hyper_1 | 1 | _timescaledb_internal | calculate_chunk_interval | 0
2 | public | 1dim | _timescaledb_internal | _hyper_2 | 1 | _timescaledb_internal | calculate_chunk_interval | 0
3 | public | Hypertable_1 | _timescaledb_internal | _hyper_3 | 2 | _timescaledb_internal | calculate_chunk_interval | 0
4 | customSchema | Hypertable_1 | _timescaledb_internal | _hyper_4 | 1 | _timescaledb_internal | calculate_chunk_interval | 0
id | schema_name | table_name | associated_schema_name | associated_table_prefix | num_dimensions | chunk_sizing_func_schema | chunk_sizing_func_name | chunk_target_size | compressed | compressed_hypertable_id
----+--------------+---------------+------------------------+-------------------------+----------------+--------------------------+--------------------------+-------------------+------------+--------------------------
1 | public | one_Partition | one_Partition | _hyper_1 | 1 | _timescaledb_internal | calculate_chunk_interval | 0 | f |
2 | public | 1dim | _timescaledb_internal | _hyper_2 | 1 | _timescaledb_internal | calculate_chunk_interval | 0 | f |
3 | public | Hypertable_1 | _timescaledb_internal | _hyper_3 | 2 | _timescaledb_internal | calculate_chunk_interval | 0 | f |
4 | customSchema | Hypertable_1 | _timescaledb_internal | _hyper_4 | 1 | _timescaledb_internal | calculate_chunk_interval | 0 | f |
(4 rows)
CREATE INDEX ON PUBLIC."Hypertable_1" (time, "temp_c");

View File

@ -134,12 +134,12 @@ SELECT * FROM create_hypertable('"customSchema"."Hypertable_1"', 'time', NULL, 1
(1 row)
SELECT * FROM _timescaledb_catalog.hypertable;
id | schema_name | table_name | associated_schema_name | associated_table_prefix | num_dimensions | chunk_sizing_func_schema | chunk_sizing_func_name | chunk_target_size
----+--------------+---------------+------------------------+-------------------------+----------------+--------------------------+--------------------------+-------------------
1 | public | one_Partition | one_Partition | _hyper_1 | 1 | _timescaledb_internal | calculate_chunk_interval | 0
2 | public | 1dim | _timescaledb_internal | _hyper_2 | 1 | _timescaledb_internal | calculate_chunk_interval | 0
3 | public | Hypertable_1 | _timescaledb_internal | _hyper_3 | 2 | _timescaledb_internal | calculate_chunk_interval | 0
4 | customSchema | Hypertable_1 | _timescaledb_internal | _hyper_4 | 1 | _timescaledb_internal | calculate_chunk_interval | 0
id | schema_name | table_name | associated_schema_name | associated_table_prefix | num_dimensions | chunk_sizing_func_schema | chunk_sizing_func_name | chunk_target_size | compressed | compressed_hypertable_id
----+--------------+---------------+------------------------+-------------------------+----------------+--------------------------+--------------------------+-------------------+------------+--------------------------
1 | public | one_Partition | one_Partition | _hyper_1 | 1 | _timescaledb_internal | calculate_chunk_interval | 0 | f |
2 | public | 1dim | _timescaledb_internal | _hyper_2 | 1 | _timescaledb_internal | calculate_chunk_interval | 0 | f |
3 | public | Hypertable_1 | _timescaledb_internal | _hyper_3 | 2 | _timescaledb_internal | calculate_chunk_interval | 0 | f |
4 | customSchema | Hypertable_1 | _timescaledb_internal | _hyper_4 | 1 | _timescaledb_internal | calculate_chunk_interval | 0 | f |
(4 rows)
CREATE INDEX ON PUBLIC."Hypertable_1" (time, "temp_c");

View File

@ -86,9 +86,9 @@ select add_dimension('test_schema.test_table', 'location', 4);
(1 row)
select * from _timescaledb_catalog.hypertable where table_name = 'test_table';
id | schema_name | table_name | associated_schema_name | associated_table_prefix | num_dimensions | chunk_sizing_func_schema | chunk_sizing_func_name | chunk_target_size
----+-------------+------------+------------------------+-------------------------+----------------+--------------------------+--------------------------+-------------------
2 | test_schema | test_table | chunk_schema | _hyper_2 | 3 | _timescaledb_internal | calculate_chunk_interval | 0
id | schema_name | table_name | associated_schema_name | associated_table_prefix | num_dimensions | chunk_sizing_func_schema | chunk_sizing_func_name | chunk_target_size | compressed | compressed_hypertable_id
----+-------------+------------+------------------------+-------------------------+----------------+--------------------------+--------------------------+-------------------+------------+--------------------------
2 | test_schema | test_table | chunk_schema | _hyper_2 | 3 | _timescaledb_internal | calculate_chunk_interval | 0 | f |
(1 row)
select * from _timescaledb_catalog.dimension;
@ -149,9 +149,9 @@ NOTICE: adding not-null constraint to column "id"
(1 row)
select * from _timescaledb_catalog.hypertable where table_name = 'test_table';
id | schema_name | table_name | associated_schema_name | associated_table_prefix | num_dimensions | chunk_sizing_func_schema | chunk_sizing_func_name | chunk_target_size
----+-------------+------------+------------------------+-------------------------+----------------+--------------------------+--------------------------+-------------------
2 | test_schema | test_table | chunk_schema | _hyper_2 | 4 | _timescaledb_internal | calculate_chunk_interval | 0
id | schema_name | table_name | associated_schema_name | associated_table_prefix | num_dimensions | chunk_sizing_func_schema | chunk_sizing_func_name | chunk_target_size | compressed | compressed_hypertable_id
----+-------------+------------+------------------------+-------------------------+----------------+--------------------------+--------------------------+-------------------+------------+--------------------------
2 | test_schema | test_table | chunk_schema | _hyper_2 | 4 | _timescaledb_internal | calculate_chunk_interval | 0 | f |
(1 row)
select * from _timescaledb_catalog.dimension;
@ -391,19 +391,19 @@ NOTICE: migrating data to chunks
--there should be two new chunks
select * from _timescaledb_catalog.hypertable where table_name = 'test_migrate';
id | schema_name | table_name | associated_schema_name | associated_table_prefix | num_dimensions | chunk_sizing_func_schema | chunk_sizing_func_name | chunk_target_size
----+-------------+--------------+------------------------+-------------------------+----------------+--------------------------+--------------------------+-------------------
8 | test_schema | test_migrate | _timescaledb_internal | _hyper_8 | 1 | _timescaledb_internal | calculate_chunk_interval | 0
id | schema_name | table_name | associated_schema_name | associated_table_prefix | num_dimensions | chunk_sizing_func_schema | chunk_sizing_func_name | chunk_target_size | compressed | compressed_hypertable_id
----+-------------+--------------+------------------------+-------------------------+----------------+--------------------------+--------------------------+-------------------+------------+--------------------------
8 | test_schema | test_migrate | _timescaledb_internal | _hyper_8 | 1 | _timescaledb_internal | calculate_chunk_interval | 0 | f |
(1 row)
select * from _timescaledb_catalog.chunk;
id | hypertable_id | schema_name | table_name
----+---------------+-----------------------+------------------
1 | 1 | _timescaledb_internal | _hyper_1_1_chunk
2 | 2 | chunk_schema | _hyper_2_2_chunk
3 | 5 | _timescaledb_internal | _hyper_5_3_chunk
4 | 8 | _timescaledb_internal | _hyper_8_4_chunk
5 | 8 | _timescaledb_internal | _hyper_8_5_chunk
id | hypertable_id | schema_name | table_name | compressed_chunk_id
----+---------------+-----------------------+------------------+---------------------
1 | 1 | _timescaledb_internal | _hyper_1_1_chunk |
2 | 2 | chunk_schema | _hyper_2_2_chunk |
3 | 5 | _timescaledb_internal | _hyper_5_3_chunk |
4 | 8 | _timescaledb_internal | _hyper_8_4_chunk |
5 | 8 | _timescaledb_internal | _hyper_8_5_chunk |
(5 rows)
select * from test_schema.test_migrate;

View File

@ -43,10 +43,10 @@ SELECT * FROM create_hypertable('"customSchema"."Hypertable_1"', 'time', NULL, 1
(1 row)
SELECT * FROM _timescaledb_catalog.hypertable;
id | schema_name | table_name | associated_schema_name | associated_table_prefix | num_dimensions | chunk_sizing_func_schema | chunk_sizing_func_name | chunk_target_size
----+--------------+--------------+------------------------+-------------------------+----------------+--------------------------+--------------------------+-------------------
1 | public | Hypertable_1 | _timescaledb_internal | _hyper_1 | 2 | _timescaledb_internal | calculate_chunk_interval | 0
2 | customSchema | Hypertable_1 | _timescaledb_internal | _hyper_2 | 1 | _timescaledb_internal | calculate_chunk_interval | 0
id | schema_name | table_name | associated_schema_name | associated_table_prefix | num_dimensions | chunk_sizing_func_schema | chunk_sizing_func_name | chunk_target_size | compressed | compressed_hypertable_id
----+--------------+--------------+------------------------+-------------------------+----------------+--------------------------+--------------------------+-------------------+------------+--------------------------
1 | public | Hypertable_1 | _timescaledb_internal | _hyper_1 | 2 | _timescaledb_internal | calculate_chunk_interval | 0 | f |
2 | customSchema | Hypertable_1 | _timescaledb_internal | _hyper_2 | 1 | _timescaledb_internal | calculate_chunk_interval | 0 | f |
(2 rows)
CREATE INDEX ON PUBLIC."Hypertable_1" (time, "temp_c");

View File

@ -43,10 +43,10 @@ SELECT * FROM create_hypertable('"customSchema"."Hypertable_1"', 'time', NULL, 1
(1 row)
SELECT * FROM _timescaledb_catalog.hypertable;
id | schema_name | table_name | associated_schema_name | associated_table_prefix | num_dimensions | chunk_sizing_func_schema | chunk_sizing_func_name | chunk_target_size
----+--------------+--------------+------------------------+-------------------------+----------------+--------------------------+--------------------------+-------------------
1 | public | Hypertable_1 | _timescaledb_internal | _hyper_1 | 2 | _timescaledb_internal | calculate_chunk_interval | 0
2 | customSchema | Hypertable_1 | _timescaledb_internal | _hyper_2 | 1 | _timescaledb_internal | calculate_chunk_interval | 0
id | schema_name | table_name | associated_schema_name | associated_table_prefix | num_dimensions | chunk_sizing_func_schema | chunk_sizing_func_name | chunk_target_size | compressed | compressed_hypertable_id
----+--------------+--------------+------------------------+-------------------------+----------------+--------------------------+--------------------------+-------------------+------------+--------------------------
1 | public | Hypertable_1 | _timescaledb_internal | _hyper_1 | 2 | _timescaledb_internal | calculate_chunk_interval | 0 | f |
2 | customSchema | Hypertable_1 | _timescaledb_internal | _hyper_2 | 1 | _timescaledb_internal | calculate_chunk_interval | 0 | f |
(2 rows)
CREATE INDEX ON PUBLIC."Hypertable_1" (time, "temp_c");

View File

@ -10,9 +10,9 @@ NOTICE: adding not-null constraint to column "time"
(1 row)
SELECT * FROM _timescaledb_catalog.hypertable;
id | schema_name | table_name | associated_schema_name | associated_table_prefix | num_dimensions | chunk_sizing_func_schema | chunk_sizing_func_name | chunk_target_size
----+-------------+------------+------------------------+-------------------------+----------------+--------------------------+--------------------------+-------------------
1 | public | drop_test | _timescaledb_internal | _hyper_1 | 2 | _timescaledb_internal | calculate_chunk_interval | 0
id | schema_name | table_name | associated_schema_name | associated_table_prefix | num_dimensions | chunk_sizing_func_schema | chunk_sizing_func_name | chunk_target_size | compressed | compressed_hypertable_id
----+-------------+------------+------------------------+-------------------------+----------------+--------------------------+--------------------------+-------------------+------------+--------------------------
1 | public | drop_test | _timescaledb_internal | _hyper_1 | 2 | _timescaledb_internal | calculate_chunk_interval | 0 | f |
(1 row)
INSERT INTO drop_test VALUES('Mon Mar 20 09:17:00.936242 2017', 23.4, 'dev1');
@ -55,9 +55,9 @@ SELECT create_hypertable('drop_test', 'time', 'device', 2);
(1 row)
SELECT * FROM _timescaledb_catalog.hypertable;
id | schema_name | table_name | associated_schema_name | associated_table_prefix | num_dimensions | chunk_sizing_func_schema | chunk_sizing_func_name | chunk_target_size
----+-------------+------------+------------------------+-------------------------+----------------+--------------------------+--------------------------+-------------------
1 | public | drop_test | _timescaledb_internal | _hyper_1 | 2 | _timescaledb_internal | calculate_chunk_interval | 0
id | schema_name | table_name | associated_schema_name | associated_table_prefix | num_dimensions | chunk_sizing_func_schema | chunk_sizing_func_name | chunk_target_size | compressed | compressed_hypertable_id
----+-------------+------------+------------------------+-------------------------+----------------+--------------------------+--------------------------+-------------------+------------+--------------------------
1 | public | drop_test | _timescaledb_internal | _hyper_1 | 2 | _timescaledb_internal | calculate_chunk_interval | 0 | f |
(1 row)
INSERT INTO drop_test VALUES('Mon Mar 20 09:18:19.100462 2017', 22.1, 'dev1');

View File

@ -2,8 +2,8 @@
-- Please see the included NOTICE for copyright information and
-- LICENSE-APACHE for a copy of the license.
SELECT * from _timescaledb_catalog.hypertable;
id | schema_name | table_name | associated_schema_name | associated_table_prefix | num_dimensions | chunk_sizing_func_schema | chunk_sizing_func_name | chunk_target_size
----+-------------+------------+------------------------+-------------------------+----------------+--------------------------+------------------------+-------------------
id | schema_name | table_name | associated_schema_name | associated_table_prefix | num_dimensions | chunk_sizing_func_schema | chunk_sizing_func_name | chunk_target_size | compressed | compressed_hypertable_id
----+-------------+------------+------------------------+-------------------------+----------------+--------------------------+------------------------+-------------------+------------+--------------------------
(0 rows)
SELECT * from _timescaledb_catalog.dimension;
@ -73,9 +73,9 @@ NOTICE: table "should_drop" is already a hypertable, skipping
(1 row)
SELECT * from _timescaledb_catalog.hypertable;
id | schema_name | table_name | associated_schema_name | associated_table_prefix | num_dimensions | chunk_sizing_func_schema | chunk_sizing_func_name | chunk_target_size
----+-------------+-------------+------------------------+-------------------------+----------------+--------------------------+--------------------------+-------------------
1 | public | should_drop | _timescaledb_internal | _hyper_1 | 1 | _timescaledb_internal | calculate_chunk_interval | 0
id | schema_name | table_name | associated_schema_name | associated_table_prefix | num_dimensions | chunk_sizing_func_schema | chunk_sizing_func_name | chunk_target_size | compressed | compressed_hypertable_id
----+-------------+-------------+------------------------+-------------------------+----------------+--------------------------+--------------------------+-------------------+------------+--------------------------
1 | public | should_drop | _timescaledb_internal | _hyper_1 | 1 | _timescaledb_internal | calculate_chunk_interval | 0 | f |
(1 row)
SELECT * from _timescaledb_catalog.dimension;
@ -95,9 +95,9 @@ NOTICE: adding not-null constraint to column "time"
INSERT INTO should_drop VALUES (now(), 1.0);
SELECT * from _timescaledb_catalog.hypertable;
id | schema_name | table_name | associated_schema_name | associated_table_prefix | num_dimensions | chunk_sizing_func_schema | chunk_sizing_func_name | chunk_target_size
----+-------------+-------------+------------------------+-------------------------+----------------+--------------------------+--------------------------+-------------------
4 | public | should_drop | _timescaledb_internal | _hyper_4 | 1 | _timescaledb_internal | calculate_chunk_interval | 0
id | schema_name | table_name | associated_schema_name | associated_table_prefix | num_dimensions | chunk_sizing_func_schema | chunk_sizing_func_name | chunk_target_size | compressed | compressed_hypertable_id
----+-------------+-------------+------------------------+-------------------------+----------------+--------------------------+--------------------------+-------------------+------------+--------------------------
4 | public | should_drop | _timescaledb_internal | _hyper_4 | 1 | _timescaledb_internal | calculate_chunk_interval | 0 | f |
(1 row)
SELECT * from _timescaledb_catalog.dimension;

View File

@ -25,42 +25,42 @@ NOTICE: adding not-null constraint to column "time"
INSERT INTO hypertable_schema.superuser VALUES ('2001-01-01 01:01:01', 23.3, 1);
SELECT * FROM _timescaledb_catalog.hypertable ORDER BY id;
id | schema_name | table_name | associated_schema_name | associated_table_prefix | num_dimensions | chunk_sizing_func_schema | chunk_sizing_func_name | chunk_target_size
----+-------------------+-------------------+------------------------+-------------------------+----------------+--------------------------+--------------------------+-------------------
1 | hypertable_schema | default_perm_user | _timescaledb_internal | _hyper_1 | 2 | _timescaledb_internal | calculate_chunk_interval | 0
2 | hypertable_schema | superuser | _timescaledb_internal | _hyper_2 | 2 | _timescaledb_internal | calculate_chunk_interval | 0
id | schema_name | table_name | associated_schema_name | associated_table_prefix | num_dimensions | chunk_sizing_func_schema | chunk_sizing_func_name | chunk_target_size | compressed | compressed_hypertable_id
----+-------------------+-------------------+------------------------+-------------------------+----------------+--------------------------+--------------------------+-------------------+------------+--------------------------
1 | hypertable_schema | default_perm_user | _timescaledb_internal | _hyper_1 | 2 | _timescaledb_internal | calculate_chunk_interval | 0 | f |
2 | hypertable_schema | superuser | _timescaledb_internal | _hyper_2 | 2 | _timescaledb_internal | calculate_chunk_interval | 0 | f |
(2 rows)
SELECT * FROM _timescaledb_catalog.chunk;
id | hypertable_id | schema_name | table_name
----+---------------+-----------------------+------------------
1 | 1 | _timescaledb_internal | _hyper_1_1_chunk
2 | 2 | _timescaledb_internal | _hyper_2_2_chunk
id | hypertable_id | schema_name | table_name | compressed_chunk_id
----+---------------+-----------------------+------------------+---------------------
1 | 1 | _timescaledb_internal | _hyper_1_1_chunk |
2 | 2 | _timescaledb_internal | _hyper_2_2_chunk |
(2 rows)
DROP OWNED BY :ROLE_DEFAULT_PERM_USER;
SELECT * FROM _timescaledb_catalog.hypertable ORDER BY id;
id | schema_name | table_name | associated_schema_name | associated_table_prefix | num_dimensions | chunk_sizing_func_schema | chunk_sizing_func_name | chunk_target_size
----+-------------------+------------+------------------------+-------------------------+----------------+--------------------------+--------------------------+-------------------
2 | hypertable_schema | superuser | _timescaledb_internal | _hyper_2 | 2 | _timescaledb_internal | calculate_chunk_interval | 0
id | schema_name | table_name | associated_schema_name | associated_table_prefix | num_dimensions | chunk_sizing_func_schema | chunk_sizing_func_name | chunk_target_size | compressed | compressed_hypertable_id
----+-------------------+------------+------------------------+-------------------------+----------------+--------------------------+--------------------------+-------------------+------------+--------------------------
2 | hypertable_schema | superuser | _timescaledb_internal | _hyper_2 | 2 | _timescaledb_internal | calculate_chunk_interval | 0 | f |
(1 row)
SELECT * FROM _timescaledb_catalog.chunk;
id | hypertable_id | schema_name | table_name
----+---------------+-----------------------+------------------
2 | 2 | _timescaledb_internal | _hyper_2_2_chunk
id | hypertable_id | schema_name | table_name | compressed_chunk_id
----+---------------+-----------------------+------------------+---------------------
2 | 2 | _timescaledb_internal | _hyper_2_2_chunk |
(1 row)
DROP TABLE hypertable_schema.superuser;
--everything should be cleaned up
SELECT * FROM _timescaledb_catalog.hypertable GROUP BY id;
id | schema_name | table_name | associated_schema_name | associated_table_prefix | num_dimensions | chunk_sizing_func_schema | chunk_sizing_func_name | chunk_target_size
----+-------------+------------+------------------------+-------------------------+----------------+--------------------------+------------------------+-------------------
id | schema_name | table_name | associated_schema_name | associated_table_prefix | num_dimensions | chunk_sizing_func_schema | chunk_sizing_func_name | chunk_target_size | compressed | compressed_hypertable_id
----+-------------+------------+------------------------+-------------------------+----------------+--------------------------+------------------------+-------------------+------------+--------------------------
(0 rows)
SELECT * FROM _timescaledb_catalog.chunk;
id | hypertable_id | schema_name | table_name
----+---------------+-------------+------------
id | hypertable_id | schema_name | table_name | compressed_chunk_id
----+---------------+-------------+------------+---------------------
(0 rows)
SELECT * FROM _timescaledb_catalog.dimension;

View File

@ -146,9 +146,9 @@ SELECT * FROM "newname";
(12 rows)
SELECT * FROM _timescaledb_catalog.hypertable;
id | schema_name | table_name | associated_schema_name | associated_table_prefix | num_dimensions | chunk_sizing_func_schema | chunk_sizing_func_name | chunk_target_size
----+-------------+------------+------------------------+-------------------------+----------------+--------------------------+--------------------------+-------------------
1 | public | newname | _timescaledb_internal | _hyper_1 | 2 | _timescaledb_internal | calculate_chunk_interval | 0
id | schema_name | table_name | associated_schema_name | associated_table_prefix | num_dimensions | chunk_sizing_func_schema | chunk_sizing_func_name | chunk_target_size | compressed | compressed_hypertable_id
----+-------------+------------+------------------------+-------------------------+----------------+--------------------------+--------------------------+-------------------+------------+--------------------------
1 | public | newname | _timescaledb_internal | _hyper_1 | 2 | _timescaledb_internal | calculate_chunk_interval | 0 | f |
(1 row)
\c :TEST_DBNAME :ROLE_SUPERUSER
@ -173,15 +173,15 @@ SELECT * FROM "newschema"."newname";
(12 rows)
SELECT * FROM _timescaledb_catalog.hypertable;
id | schema_name | table_name | associated_schema_name | associated_table_prefix | num_dimensions | chunk_sizing_func_schema | chunk_sizing_func_name | chunk_target_size
----+-------------+------------+------------------------+-------------------------+----------------+--------------------------+--------------------------+-------------------
1 | newschema | newname | _timescaledb_internal | _hyper_1 | 2 | _timescaledb_internal | calculate_chunk_interval | 0
id | schema_name | table_name | associated_schema_name | associated_table_prefix | num_dimensions | chunk_sizing_func_schema | chunk_sizing_func_name | chunk_target_size | compressed | compressed_hypertable_id
----+-------------+------------+------------------------+-------------------------+----------------+--------------------------+--------------------------+-------------------+------------+--------------------------
1 | newschema | newname | _timescaledb_internal | _hyper_1 | 2 | _timescaledb_internal | calculate_chunk_interval | 0 | f |
(1 row)
DROP TABLE "newschema"."newname";
SELECT * FROM _timescaledb_catalog.hypertable;
id | schema_name | table_name | associated_schema_name | associated_table_prefix | num_dimensions | chunk_sizing_func_schema | chunk_sizing_func_name | chunk_target_size
----+-------------+------------+------------------------+-------------------------+----------------+--------------------------+------------------------+-------------------
id | schema_name | table_name | associated_schema_name | associated_table_prefix | num_dimensions | chunk_sizing_func_schema | chunk_sizing_func_name | chunk_target_size | compressed | compressed_hypertable_id
----+-------------+------------+------------------------+-------------------------+----------------+--------------------------+------------------------+-------------------+------------+--------------------------
(0 rows)
\dt "public".*

View File

@ -30,17 +30,17 @@ NOTICE: adding not-null constraint to column "time"
INSERT INTO hypertable_schema.test1 VALUES ('2001-01-01 01:01:01', 23.3, 1);
INSERT INTO hypertable_schema.test2 VALUES ('2001-01-01 01:01:01', 23.3, 1);
SELECT * FROM _timescaledb_catalog.hypertable ORDER BY id;
id | schema_name | table_name | associated_schema_name | associated_table_prefix | num_dimensions | chunk_sizing_func_schema | chunk_sizing_func_name | chunk_target_size
----+-------------------+------------+------------------------+-------------------------+----------------+--------------------------+--------------------------+-------------------
1 | hypertable_schema | test1 | chunk_schema1 | _hyper_1 | 2 | _timescaledb_internal | calculate_chunk_interval | 0
2 | hypertable_schema | test2 | chunk_schema2 | _hyper_2 | 2 | _timescaledb_internal | calculate_chunk_interval | 0
id | schema_name | table_name | associated_schema_name | associated_table_prefix | num_dimensions | chunk_sizing_func_schema | chunk_sizing_func_name | chunk_target_size | compressed | compressed_hypertable_id
----+-------------------+------------+------------------------+-------------------------+----------------+--------------------------+--------------------------+-------------------+------------+--------------------------
1 | hypertable_schema | test1 | chunk_schema1 | _hyper_1 | 2 | _timescaledb_internal | calculate_chunk_interval | 0 | f |
2 | hypertable_schema | test2 | chunk_schema2 | _hyper_2 | 2 | _timescaledb_internal | calculate_chunk_interval | 0 | f |
(2 rows)
SELECT * FROM _timescaledb_catalog.chunk;
id | hypertable_id | schema_name | table_name
----+---------------+---------------+------------------
1 | 1 | chunk_schema1 | _hyper_1_1_chunk
2 | 2 | chunk_schema2 | _hyper_2_2_chunk
id | hypertable_id | schema_name | table_name | compressed_chunk_id
----+---------------+---------------+------------------+---------------------
1 | 1 | chunk_schema1 | _hyper_1_1_chunk |
2 | 2 | chunk_schema2 | _hyper_2_2_chunk |
(2 rows)
RESET ROLE;
@ -53,25 +53,25 @@ SET ROLE :ROLE_DEFAULT_PERM_USER;
--show that the metadata for the table using the dropped schema is
--changed. The other table is not affected.
SELECT * FROM _timescaledb_catalog.hypertable ORDER BY id;
id | schema_name | table_name | associated_schema_name | associated_table_prefix | num_dimensions | chunk_sizing_func_schema | chunk_sizing_func_name | chunk_target_size
----+-------------------+------------+------------------------+-------------------------+----------------+--------------------------+--------------------------+-------------------
1 | hypertable_schema | test1 | _timescaledb_internal | _hyper_1 | 2 | _timescaledb_internal | calculate_chunk_interval | 0
2 | hypertable_schema | test2 | chunk_schema2 | _hyper_2 | 2 | _timescaledb_internal | calculate_chunk_interval | 0
id | schema_name | table_name | associated_schema_name | associated_table_prefix | num_dimensions | chunk_sizing_func_schema | chunk_sizing_func_name | chunk_target_size | compressed | compressed_hypertable_id
----+-------------------+------------+------------------------+-------------------------+----------------+--------------------------+--------------------------+-------------------+------------+--------------------------
1 | hypertable_schema | test1 | _timescaledb_internal | _hyper_1 | 2 | _timescaledb_internal | calculate_chunk_interval | 0 | f |
2 | hypertable_schema | test2 | chunk_schema2 | _hyper_2 | 2 | _timescaledb_internal | calculate_chunk_interval | 0 | f |
(2 rows)
SELECT * FROM _timescaledb_catalog.chunk;
id | hypertable_id | schema_name | table_name
----+---------------+---------------+------------------
2 | 2 | chunk_schema2 | _hyper_2_2_chunk
id | hypertable_id | schema_name | table_name | compressed_chunk_id
----+---------------+---------------+------------------+---------------------
2 | 2 | chunk_schema2 | _hyper_2_2_chunk |
(1 row)
--new chunk should be created in the internal associated schema
INSERT INTO hypertable_schema.test1 VALUES ('2001-01-01 01:01:01', 23.3, 1);
SELECT * FROM _timescaledb_catalog.chunk;
id | hypertable_id | schema_name | table_name
----+---------------+-----------------------+------------------
2 | 2 | chunk_schema2 | _hyper_2_2_chunk
3 | 1 | _timescaledb_internal | _hyper_1_3_chunk
id | hypertable_id | schema_name | table_name | compressed_chunk_id
----+---------------+-----------------------+------------------+---------------------
2 | 2 | chunk_schema2 | _hyper_2_2_chunk |
3 | 1 | _timescaledb_internal | _hyper_1_3_chunk |
(2 rows)
RESET ROLE;
@ -86,13 +86,13 @@ NOTICE: drop cascades to 4 other objects
SET ROLE :ROLE_DEFAULT_PERM_USER;
--everything should be cleaned up
SELECT * FROM _timescaledb_catalog.hypertable GROUP BY id;
id | schema_name | table_name | associated_schema_name | associated_table_prefix | num_dimensions | chunk_sizing_func_schema | chunk_sizing_func_name | chunk_target_size
----+-------------+------------+------------------------+-------------------------+----------------+--------------------------+------------------------+-------------------
id | schema_name | table_name | associated_schema_name | associated_table_prefix | num_dimensions | chunk_sizing_func_schema | chunk_sizing_func_name | chunk_target_size | compressed | compressed_hypertable_id
----+-------------+------------+------------------------+-------------------------+----------------+--------------------------+------------------------+-------------------+------------+--------------------------
(0 rows)
SELECT * FROM _timescaledb_catalog.chunk;
id | hypertable_id | schema_name | table_name
----+---------------+-------------+------------
id | hypertable_id | schema_name | table_name | compressed_chunk_id
----+---------------+-------------+------------+---------------------
(0 rows)
SELECT * FROM _timescaledb_catalog.dimension;

View File

@ -67,9 +67,9 @@ List of tables
\echo 'List of hypertables'
List of hypertables
SELECT * FROM _timescaledb_catalog.hypertable;
id | schema_name | table_name | associated_schema_name | associated_table_prefix | num_dimensions | chunk_sizing_func_schema | chunk_sizing_func_name | chunk_target_size
----+-------------+----------------+------------------------+-------------------------+----------------+--------------------------+--------------------------+-------------------
1 | public | two_Partitions | _timescaledb_internal | _hyper_1 | 2 | _timescaledb_internal | calculate_chunk_interval | 0
id | schema_name | table_name | associated_schema_name | associated_table_prefix | num_dimensions | chunk_sizing_func_schema | chunk_sizing_func_name | chunk_target_size | compressed | compressed_hypertable_id
----+-------------+----------------+------------------------+-------------------------+----------------+--------------------------+--------------------------+-------------------+------------+--------------------------
1 | public | two_Partitions | _timescaledb_internal | _hyper_1 | 2 | _timescaledb_internal | calculate_chunk_interval | 0 | f |
(1 row)
\echo 'List of chunk indexes'

View File

@ -157,12 +157,12 @@ SELECT * FROM test.show_indexesp('_timescaledb_internal._hyper%');
(28 rows)
SELECT * FROM _timescaledb_catalog.chunk;
id | hypertable_id | schema_name | table_name
----+---------------+-----------------------+------------------
1 | 1 | _timescaledb_internal | _hyper_1_1_chunk
2 | 1 | _timescaledb_internal | _hyper_1_2_chunk
3 | 1 | _timescaledb_internal | _hyper_1_3_chunk
4 | 1 | _timescaledb_internal | _hyper_1_4_chunk
id | hypertable_id | schema_name | table_name | compressed_chunk_id
----+---------------+-----------------------+------------------+---------------------
1 | 1 | _timescaledb_internal | _hyper_1_1_chunk |
2 | 1 | _timescaledb_internal | _hyper_1_2_chunk |
3 | 1 | _timescaledb_internal | _hyper_1_3_chunk |
4 | 1 | _timescaledb_internal | _hyper_1_4_chunk |
(4 rows)
SELECT * FROM "two_Partitions" ORDER BY "timeCustom", device_id, series_0, series_1;

View File

@ -242,22 +242,22 @@ SELECT * FROM "1dim_neg";
(7 rows)
SELECT * FROM _timescaledb_catalog.chunk;
id | hypertable_id | schema_name | table_name
----+---------------+-----------------------+-------------------
1 | 1 | one_Partition | _hyper_1_1_chunk
2 | 1 | one_Partition | _hyper_1_2_chunk
3 | 1 | one_Partition | _hyper_1_3_chunk
4 | 2 | _timescaledb_internal | _hyper_2_4_chunk
5 | 3 | _timescaledb_internal | _hyper_3_5_chunk
6 | 3 | _timescaledb_internal | _hyper_3_6_chunk
7 | 3 | _timescaledb_internal | _hyper_3_7_chunk
8 | 3 | _timescaledb_internal | _hyper_3_8_chunk
10 | 5 | _timescaledb_internal | _hyper_5_10_chunk
11 | 6 | _timescaledb_internal | _hyper_6_11_chunk
12 | 6 | _timescaledb_internal | _hyper_6_12_chunk
13 | 6 | _timescaledb_internal | _hyper_6_13_chunk
14 | 6 | _timescaledb_internal | _hyper_6_14_chunk
15 | 6 | _timescaledb_internal | _hyper_6_15_chunk
id | hypertable_id | schema_name | table_name | compressed_chunk_id
----+---------------+-----------------------+-------------------+---------------------
1 | 1 | one_Partition | _hyper_1_1_chunk |
2 | 1 | one_Partition | _hyper_1_2_chunk |
3 | 1 | one_Partition | _hyper_1_3_chunk |
4 | 2 | _timescaledb_internal | _hyper_2_4_chunk |
5 | 3 | _timescaledb_internal | _hyper_3_5_chunk |
6 | 3 | _timescaledb_internal | _hyper_3_6_chunk |
7 | 3 | _timescaledb_internal | _hyper_3_7_chunk |
8 | 3 | _timescaledb_internal | _hyper_3_8_chunk |
10 | 5 | _timescaledb_internal | _hyper_5_10_chunk |
11 | 6 | _timescaledb_internal | _hyper_6_11_chunk |
12 | 6 | _timescaledb_internal | _hyper_6_12_chunk |
13 | 6 | _timescaledb_internal | _hyper_6_13_chunk |
14 | 6 | _timescaledb_internal | _hyper_6_14_chunk |
15 | 6 | _timescaledb_internal | _hyper_6_15_chunk |
(14 rows)
SELECT * FROM _timescaledb_catalog.dimension_slice;

View File

@ -80,6 +80,14 @@ SELECT _timescaledb_internal.test_install_timestamp() = :'timestamp_1' as timest
-- Now make sure that only the exported_uuid is exported on pg_dump
\c postgres :ROLE_SUPERUSER
\! ${PG_BINDIR}/pg_dump -h ${TEST_PGHOST} -U super_user -Fc "${TEST_DBNAME}" > dump/instmeta.sql
pg_dump: NOTICE: there are circular foreign-key constraints on this table:
pg_dump: hypertable
pg_dump: You might not be able to restore the dump without using --disable-triggers or temporarily dropping the constraints.
pg_dump: Consider using a full dump instead of a --data-only dump to avoid this problem.
pg_dump: NOTICE: there are circular foreign-key constraints on this table:
pg_dump: chunk
pg_dump: You might not be able to restore the dump without using --disable-triggers or temporarily dropping the constraints.
pg_dump: Consider using a full dump instead of a --data-only dump to avoid this problem.
\! ${PG_BINDIR}/dropdb -h ${TEST_PGHOST} -U super_user "${TEST_DBNAME}"
\! ${PG_BINDIR}/createdb -h ${TEST_PGHOST} -U super_user "${TEST_DBNAME}"
ALTER DATABASE :TEST_DBNAME SET timescaledb.restoring='on';

View File

@ -72,9 +72,9 @@ WARNING: target chunk size for adaptive chunking is less than 10 MB
-- Chunk sizing func set
SELECT * FROM _timescaledb_catalog.hypertable;
id | schema_name | table_name | associated_schema_name | associated_table_prefix | num_dimensions | chunk_sizing_func_schema | chunk_sizing_func_name | chunk_target_size
----+-------------+----------------+------------------------+-------------------------+----------------+--------------------------+---------------------------------+-------------------
1 | test_schema | two_Partitions | _timescaledb_internal | _hyper_1 | 2 | public | custom_calculate_chunk_interval | 1048576
id | schema_name | table_name | associated_schema_name | associated_table_prefix | num_dimensions | chunk_sizing_func_schema | chunk_sizing_func_name | chunk_target_size | compressed | compressed_hypertable_id
----+-------------+----------------+------------------------+-------------------------+----------------+--------------------------+---------------------------------+-------------------+------------+--------------------------
1 | test_schema | two_Partitions | _timescaledb_internal | _hyper_1 | 2 | public | custom_calculate_chunk_interval | 1048576 | f |
(1 row)
SELECT proname, pronamespace, pronargs
@ -262,6 +262,14 @@ SELECT * FROM _timescaledb_catalog.chunk_constraint;
-- environmental variables that originally called this psql command. Sadly
-- vars passed to psql do not work in \! commands so we can't do it that way.
\! utils/pg_dump_aux_dump.sh dump/pg_dump.sql
pg_dump: NOTICE: there are circular foreign-key constraints on this table:
pg_dump: hypertable
pg_dump: You might not be able to restore the dump without using --disable-triggers or temporarily dropping the constraints.
pg_dump: Consider using a full dump instead of a --data-only dump to avoid this problem.
pg_dump: NOTICE: there are circular foreign-key constraints on this table:
pg_dump: chunk
pg_dump: You might not be able to restore the dump without using --disable-triggers or temporarily dropping the constraints.
pg_dump: Consider using a full dump instead of a --data-only dump to avoid this problem.
\c :TEST_DBNAME
SET client_min_messages = ERROR;
CREATE EXTENSION timescaledb CASCADE;
@ -478,9 +486,9 @@ SELECT * FROM _timescaledb_catalog.chunk_constraint;
--Chunk sizing function should have been restored
SELECT * FROM _timescaledb_catalog.hypertable;
id | schema_name | table_name | associated_schema_name | associated_table_prefix | num_dimensions | chunk_sizing_func_schema | chunk_sizing_func_name | chunk_target_size
----+-------------+----------------+------------------------+-------------------------+----------------+--------------------------+---------------------------------+-------------------
1 | test_schema | two_Partitions | _timescaledb_internal | _hyper_1 | 2 | public | custom_calculate_chunk_interval | 1048576
id | schema_name | table_name | associated_schema_name | associated_table_prefix | num_dimensions | chunk_sizing_func_schema | chunk_sizing_func_name | chunk_target_size | compressed | compressed_hypertable_id
----+-------------+----------------+------------------------+-------------------------+----------------+--------------------------+---------------------------------+-------------------+------------+--------------------------
1 | test_schema | two_Partitions | _timescaledb_internal | _hyper_1 | 2 | public | custom_calculate_chunk_interval | 1048576 | f |
(1 row)
SELECT proname, pronamespace, pronargs

View File

@ -9,6 +9,14 @@ CREATE USER dump_unprivileged CREATEDB;
\c template1 dump_unprivileged
CREATE database dump_unprivileged;
\! utils/pg_dump_unprivileged.sh
pg_dump: NOTICE: there are circular foreign-key constraints on this table:
pg_dump: hypertable
pg_dump: You might not be able to restore the dump without using --disable-triggers or temporarily dropping the constraints.
pg_dump: Consider using a full dump instead of a --data-only dump to avoid this problem.
pg_dump: NOTICE: there are circular foreign-key constraints on this table:
pg_dump: chunk
pg_dump: You might not be able to restore the dump without using --disable-triggers or temporarily dropping the constraints.
pg_dump: Consider using a full dump instead of a --data-only dump to avoid this problem.
Database dumped successfully
\c template1 :ROLE_SUPERUSER
DROP EXTENSION timescaledb;

View File

@ -38,11 +38,11 @@ NOTICE: adding not-null constraint to column "time"
(1 row)
SELECT * FROM _timescaledb_catalog.hypertable;
id | schema_name | table_name | associated_schema_name | associated_table_prefix | num_dimensions | chunk_sizing_func_schema | chunk_sizing_func_name | chunk_target_size
----+-------------+------------+------------------------+-------------------------+----------------+--------------------------+--------------------------+-------------------
1 | public | test_ts | _timescaledb_internal | _hyper_1 | 2 | _timescaledb_internal | calculate_chunk_interval | 0
2 | public | test_tz | _timescaledb_internal | _hyper_2 | 2 | _timescaledb_internal | calculate_chunk_interval | 0
3 | public | test_dt | _timescaledb_internal | _hyper_3 | 2 | _timescaledb_internal | calculate_chunk_interval | 0
id | schema_name | table_name | associated_schema_name | associated_table_prefix | num_dimensions | chunk_sizing_func_schema | chunk_sizing_func_name | chunk_target_size | compressed | compressed_hypertable_id
----+-------------+------------+------------------------+-------------------------+----------------+--------------------------+--------------------------+-------------------+------------+--------------------------
1 | public | test_ts | _timescaledb_internal | _hyper_1 | 2 | _timescaledb_internal | calculate_chunk_interval | 0 | f |
2 | public | test_tz | _timescaledb_internal | _hyper_2 | 2 | _timescaledb_internal | calculate_chunk_interval | 0 | f |
3 | public | test_dt | _timescaledb_internal | _hyper_3 | 2 | _timescaledb_internal | calculate_chunk_interval | 0 | f |
(3 rows)
INSERT INTO test_ts VALUES('Mon Mar 20 09:17:00.936242 2017', 23.4, 'dev1');

View File

@ -35,18 +35,18 @@ INSERT INTO "two_Partitions"("timeCustom", device_id, series_0, series_1) VALUES
\set QUIET on
\o
SELECT * FROM _timescaledb_catalog.hypertable;
id | schema_name | table_name | associated_schema_name | associated_table_prefix | num_dimensions | chunk_sizing_func_schema | chunk_sizing_func_name | chunk_target_size
----+-------------+----------------+------------------------+-------------------------+----------------+--------------------------+--------------------------+-------------------
1 | public | two_Partitions | _timescaledb_internal | _hyper_1 | 2 | _timescaledb_internal | calculate_chunk_interval | 0
id | schema_name | table_name | associated_schema_name | associated_table_prefix | num_dimensions | chunk_sizing_func_schema | chunk_sizing_func_name | chunk_target_size | compressed | compressed_hypertable_id
----+-------------+----------------+------------------------+-------------------------+----------------+--------------------------+--------------------------+-------------------+------------+--------------------------
1 | public | two_Partitions | _timescaledb_internal | _hyper_1 | 2 | _timescaledb_internal | calculate_chunk_interval | 0 | f |
(1 row)
SELECT * FROM _timescaledb_catalog.chunk;
id | hypertable_id | schema_name | table_name
----+---------------+-----------------------+------------------
1 | 1 | _timescaledb_internal | _hyper_1_1_chunk
2 | 1 | _timescaledb_internal | _hyper_1_2_chunk
3 | 1 | _timescaledb_internal | _hyper_1_3_chunk
4 | 1 | _timescaledb_internal | _hyper_1_4_chunk
id | hypertable_id | schema_name | table_name | compressed_chunk_id
----+---------------+-----------------------+------------------+---------------------
1 | 1 | _timescaledb_internal | _hyper_1_1_chunk |
2 | 1 | _timescaledb_internal | _hyper_1_2_chunk |
3 | 1 | _timescaledb_internal | _hyper_1_3_chunk |
4 | 1 | _timescaledb_internal | _hyper_1_4_chunk |
(4 rows)
SELECT * FROM test.show_subtables('"two_Partitions"');
@ -78,14 +78,14 @@ SELECT * FROM "two_Partitions";
SET client_min_messages = WARNING;
TRUNCATE "two_Partitions";
SELECT * FROM _timescaledb_catalog.hypertable;
id | schema_name | table_name | associated_schema_name | associated_table_prefix | num_dimensions | chunk_sizing_func_schema | chunk_sizing_func_name | chunk_target_size
----+-------------+----------------+------------------------+-------------------------+----------------+--------------------------+--------------------------+-------------------
1 | public | two_Partitions | _timescaledb_internal | _hyper_1 | 2 | _timescaledb_internal | calculate_chunk_interval | 0
id | schema_name | table_name | associated_schema_name | associated_table_prefix | num_dimensions | chunk_sizing_func_schema | chunk_sizing_func_name | chunk_target_size | compressed | compressed_hypertable_id
----+-------------+----------------+------------------------+-------------------------+----------------+--------------------------+--------------------------+-------------------+------------+--------------------------
1 | public | two_Partitions | _timescaledb_internal | _hyper_1 | 2 | _timescaledb_internal | calculate_chunk_interval | 0 | f |
(1 row)
SELECT * FROM _timescaledb_catalog.chunk;
id | hypertable_id | schema_name | table_name
----+---------------+-------------+------------
id | hypertable_id | schema_name | table_name | compressed_chunk_id
----+---------------+-------------+------------+---------------------
(0 rows)
-- should be empty
@ -105,11 +105,11 @@ INSERT INTO public."two_Partitions"("timeCustom", device_id, series_0, series_1)
(1257894000000000000, 'dev2', 1.5, 1),
(1257894002000000000, 'dev1', 2.5, 3);
SELECT * FROM _timescaledb_catalog.chunk;
id | hypertable_id | schema_name | table_name
----+---------------+-----------------------+------------------
5 | 1 | _timescaledb_internal | _hyper_1_5_chunk
6 | 1 | _timescaledb_internal | _hyper_1_6_chunk
7 | 1 | _timescaledb_internal | _hyper_1_7_chunk
id | hypertable_id | schema_name | table_name | compressed_chunk_id
----+---------------+-----------------------+------------------+---------------------
5 | 1 | _timescaledb_internal | _hyper_1_5_chunk |
6 | 1 | _timescaledb_internal | _hyper_1_6_chunk |
7 | 1 | _timescaledb_internal | _hyper_1_7_chunk |
(3 rows)
CREATE VIEW dependent_view AS SELECT * FROM _timescaledb_internal._hyper_1_5_chunk;

View File

@ -1,9 +1,9 @@
set(SOURCES
${CMAKE_CURRENT_SOURCE_DIR}/compression.c
${CMAKE_CURRENT_SOURCE_DIR}/array.c
${CMAKE_CURRENT_SOURCE_DIR}/dictionary.c
${CMAKE_CURRENT_SOURCE_DIR}/gorilla.c
${CMAKE_CURRENT_SOURCE_DIR}/deltadelta.c
${CMAKE_CURRENT_SOURCE_DIR}/create.c
)
target_sources(${TSL_LIBRARY_NAME} PRIVATE ${SOURCES})

View File

@ -0,0 +1,235 @@
/*
* This file and its contents are licensed under the Timescale License.
* Please see the included NOTICE for copyright information and
* LICENSE-TIMESCALE for a copy of the license.
*/
#include <postgres.h>
#include <miscadmin.h>
#include <access/heapam.h>
#include <access/reloptions.h>
#include <access/tupdesc.h>
#include <access/xact.h>
#include <catalog/pg_type.h>
#include <catalog/toasting.h>
#include <commands/tablecmds.h>
#include <commands/tablespace.h>
#include <nodes/makefuncs.h>
#include <utils/builtins.h>
#include <utils/rel.h>
#include "catalog.h"
#include "compat.h"
#include "create.h"
#include "chunk.h"
#include "chunk_index.h"
#include "trigger.h"
#include "scan_iterator.h"
#include "hypertable_cache.h"
/* entrypoint
* tsl_process_compress_table : is the entry point.
*/
#define PRINT_COMPRESSION_TABLE_NAME(buf, prefix, hypertable_id) \
do \
{ \
int ret = snprintf(buf, NAMEDATALEN, prefix, hypertable_id); \
if (ret < 0 || ret > NAMEDATALEN) \
{ \
ereport(ERROR, \
(errcode(ERRCODE_INTERNAL_ERROR), \
errmsg(" bad compression hypertable internal name"))); \
} \
} while (0);
static void test_compresschunk(Hypertable *ht, int32 compress_htid);
#define COMPRESSEDDATA_TYPE_NAME "_timescaledb_internal.compressed_data"
/* return ColumnDef list - dups columns of passed in relid
* new columns have BYTEA type
*/
static List *
get_compress_columndef_from_table(Oid srctbl_relid)
{
Relation rel;
TupleDesc tupdesc;
int attno;
List *collist = NIL;
const Oid compresseddata_oid =
DatumGetObjectId(DirectFunctionCall1(regtypein, CStringGetDatum(COMPRESSEDDATA_TYPE_NAME)));
if (!OidIsValid(compresseddata_oid))
ereport(ERROR,
(errcode(ERRCODE_UNDEFINED_OBJECT),
errmsg("type \"%s\" does not exist", COMPRESSEDDATA_TYPE_NAME)));
/* Get the tupledesc and turn it over to expandTupleDesc */
rel = relation_open(srctbl_relid, AccessShareLock);
tupdesc = rel->rd_att;
for (attno = 0; attno < tupdesc->natts; attno++)
{
Form_pg_attribute attr = TupleDescAttr(tupdesc, attno);
if (!attr->attisdropped)
{
ColumnDef *col = makeColumnDef(NameStr(attr->attname),
compresseddata_oid,
-1 /*typmod*/,
0 /*collation*/);
collist = lappend(collist, col);
}
}
relation_close(rel, AccessShareLock);
return collist;
}
static int32
create_compression_table(Oid relid, Oid owner)
{
ObjectAddress tbladdress;
char relnamebuf[NAMEDATALEN];
CatalogSecurityContext sec_ctx;
Datum toast_options;
static char *validnsps[] = HEAP_RELOPT_NAMESPACES;
Oid compress_relid;
CreateStmt *create;
RangeVar *compress_rel;
List *collist;
int32 compress_hypertable_id;
collist = get_compress_columndef_from_table(relid);
create = makeNode(CreateStmt);
create->tableElts = collist;
create->inhRelations = NIL;
create->ofTypename = NULL;
create->constraints = NIL;
create->options = NULL;
create->oncommit = ONCOMMIT_NOOP;
create->tablespacename = NULL;
create->if_not_exists = false;
/* create the compression table */
/* NewRelationCreateToastTable calls CommandCounterIncrement */
ts_catalog_database_info_become_owner(ts_catalog_database_info_get(), &sec_ctx);
compress_hypertable_id = ts_catalog_table_next_seq_id(ts_catalog_get(), HYPERTABLE);
PRINT_COMPRESSION_TABLE_NAME(relnamebuf, "_compressed_hypertable_%d", compress_hypertable_id);
compress_rel = makeRangeVar(pstrdup(INTERNAL_SCHEMA_NAME), pstrdup(relnamebuf), -1);
create->relation = compress_rel;
tbladdress = DefineRelationCompat(create, RELKIND_RELATION, owner, NULL, NULL);
CommandCounterIncrement();
compress_relid = tbladdress.objectId;
toast_options =
transformRelOptions((Datum) 0, create->options, "toast", validnsps, true, false);
(void) heap_reloptions(RELKIND_TOASTVALUE, toast_options, true);
NewRelationCreateToastTable(compress_relid, toast_options);
ts_catalog_restore_user(&sec_ctx);
ts_hypertable_create_compressed(compress_relid, compress_hypertable_id);
return compress_hypertable_id;
}
/* this function will change in the follow up PR. Please do not review */
static Chunk *
create_compress_chunk(Hypertable *compress_ht, int32 compress_hypertable_id, Chunk *src_chunk)
{
Hyperspace *hs = compress_ht->space;
Catalog *catalog = ts_catalog_get();
CatalogSecurityContext sec_ctx;
// Hypercube *cube;
Chunk *compress_chunk;
/* Create a new chunk based on the hypercube */
ts_catalog_database_info_become_owner(ts_catalog_database_info_get(), &sec_ctx);
compress_chunk =
ts_chunk_create_stub(ts_catalog_table_next_seq_id(catalog, CHUNK), hs->num_dimensions);
ts_catalog_restore_user(&sec_ctx);
compress_chunk->fd.hypertable_id = hs->hypertable_id;
compress_chunk->cube = src_chunk->cube;
compress_chunk->hypertable_relid = compress_ht->main_table_relid;
namestrcpy(&compress_chunk->fd.schema_name, INTERNAL_SCHEMA_NAME);
snprintf(compress_chunk->fd.table_name.data,
NAMEDATALEN,
"compress_%s_%d_chunk",
NameStr(compress_ht->fd.associated_table_prefix),
compress_chunk->fd.id);
compress_chunk->constraints = NULL;
/* Insert chunk */
ts_chunk_insert_lock(compress_chunk, RowExclusiveLock);
/* Create the actual table relation for the chunk */
compress_chunk->table_id = ts_chunk_create_table(compress_chunk, compress_ht);
if (!OidIsValid(compress_chunk->table_id))
elog(ERROR, "could not create chunk table");
/* Create the chunk's constraints, triggers, and indexes */
/* ts_chunk_constraints_create(compress_chunk->constraints,
compress_chunk->table_id,
compress_chunk->fd.id,
compress_chunk->hypertable_relid,
compress_chunk->fd.hypertable_id);
*/
ts_trigger_create_all_on_chunk(compress_ht, compress_chunk);
ts_chunk_index_create_all(compress_chunk->fd.hypertable_id,
compress_chunk->hypertable_relid,
compress_chunk->fd.id,
compress_chunk->table_id);
return compress_chunk;
}
/*
* enables compression for the passed in table by
* creating a compression hypertable with special properties
Note:
caller should check security permissions
*/
bool
tsl_process_compress_table(AlterTableCmd *cmd, Hypertable *ht,
WithClauseResult *with_clause_options)
{
int32 compress_htid;
Oid ownerid = ts_rel_get_owner(ht->main_table_relid);
compress_htid = create_compression_table(ht->main_table_relid, ownerid);
ts_hypertable_set_compressed_id(ht, compress_htid);
// TODO remove this after we have compress_chunks function
test_compresschunk(ht, compress_htid);
return true;
}
static List *
get_chunk_ids(int32 hypertable_id)
{
List *chunk_ids = NIL;
ScanIterator iterator = ts_scan_iterator_create(CHUNK, AccessShareLock, CurrentMemoryContext);
ts_scanner_foreach(&iterator)
{
FormData_chunk *form = (FormData_chunk *) GETSTRUCT(ts_scan_iterator_tuple(&iterator));
if (form->hypertable_id == hypertable_id)
chunk_ids = lappend_int(chunk_ids, form->id);
}
return chunk_ids;
}
static void
test_compresschunk(Hypertable *ht, int32 compress_htid)
{
Cache *hcache = ts_hypertable_cache_pin();
Hypertable *compress_ht = ts_hypertable_cache_get_entry_by_id(hcache, compress_htid);
// compress chunk from origin table */
List *ht_chks = get_chunk_ids(ht->fd.id);
ListCell *lc;
foreach (lc, ht_chks)
{
int chkid = lfirst_int(lc);
Chunk *src_chunk = ts_chunk_get_by_id(chkid, 0, true);
Chunk *compress_chunk = create_compress_chunk(compress_ht, compress_ht->fd.id, src_chunk);
ts_chunk_set_compressed_chunk(src_chunk, compress_chunk->fd.id, false);
}
ts_cache_release(hcache);
}

View File

@ -0,0 +1,17 @@
/*
* This file and its contents are licensed under the Timescale License.
* Please see the included NOTICE for copyright information and
* LICENSE-TIMESCALE for a copy of the license.
*/
#ifndef TIMESCALEDB_TSL_COMPRESSION_CREATE_H
#define TIMESCALEDB_TSL_COMPRESSION_CREATE_H
#include <postgres.h>
#include <nodes/parsenodes.h>
#include "with_clause_parser.h"
#include "hypertable.h"
bool tsl_process_compress_table(AlterTableCmd *cmd, Hypertable *ht,
WithClauseResult *with_clause_options);
#endif /* TIMESCALEDB_TSL_COMPRESSION_CREATE_H */

View File

@ -31,6 +31,8 @@
#include "continuous_aggs/materialize.h"
#include "continuous_aggs/options.h"
#include "process_utility.h"
#include "hypertable.h"
#include "compression/create.h"
#ifdef PG_MODULE_MAGIC
PG_MODULE_MAGIC;
@ -102,6 +104,7 @@ CrossModuleFunctions tsl_cm_functions = {
.dictionary_compressor_finish = tsl_dictionary_compressor_finish,
.array_compressor_append = tsl_array_compressor_append,
.array_compressor_finish = tsl_array_compressor_finish,
.process_compress_table = tsl_process_compress_table,
};
TS_FUNCTION_INFO_V1(ts_module_init);

View File

@ -0,0 +1,38 @@
-- This file and its contents are licensed under the Timescale License.
-- Please see the included NOTICE for copyright information and
-- LICENSE-TIMESCALE for a copy of the license.
--TEST1 ---
--basic test with count
create table foo (a integer, b integer, c integer);
select table_name from create_hypertable('foo', 'a', chunk_time_interval=> 10);
NOTICE: adding not-null constraint to column "a"
table_name
------------
foo
(1 row)
insert into foo values( 3 , 16 , 20);
insert into foo values( 10 , 10 , 20);
insert into foo values( 20 , 11 , 20);
insert into foo values( 30 , 12 , 20);
alter table foo set (timescaledb.compress);
select id, schema_name, table_name, compressed, compressed_hypertable_id from
_timescaledb_catalog.hypertable order by id;
id | schema_name | table_name | compressed | compressed_hypertable_id
----+-----------------------+--------------------------+------------+--------------------------
1 | public | foo | f | 2
2 | _timescaledb_internal | _compressed_hypertable_2 | t |
(2 rows)
-- should error out --
\set ON_ERROR_STOP 0
ALTER TABLE foo ALTER b SET NOT NULL, set (timescaledb.compress);
ERROR: ALTER TABLE <hypertable> SET does not support multiple clauses
\set ON_ERROR_STOP 1
ALTER TABLE foo ALTER b SET NOT NULL;
select attname, attnotnull from pg_attribute where attrelid = (select oid from pg_class where relname like 'foo') and attname like 'b';
attname | attnotnull
---------+------------
b | t
(1 row)

View File

@ -145,6 +145,14 @@ SELECT count(*) FROM conditions_after;
--dump & restore
\c postgres :ROLE_SUPERUSER
\! utils/pg_dump_aux_dump.sh dump/pg_dump.sql
pg_dump: NOTICE: there are circular foreign-key constraints on this table:
pg_dump: hypertable
pg_dump: You might not be able to restore the dump without using --disable-triggers or temporarily dropping the constraints.
pg_dump: Consider using a full dump instead of a --data-only dump to avoid this problem.
pg_dump: NOTICE: there are circular foreign-key constraints on this table:
pg_dump: chunk
pg_dump: You might not be able to restore the dump without using --disable-triggers or temporarily dropping the constraints.
pg_dump: Consider using a full dump instead of a --data-only dump to avoid this problem.
--\! cp dump/pg_dump.sql /tmp/dump.sql
ALTER DATABASE :TEST_DBNAME SET timescaledb.restoring='on';
\! utils/pg_dump_aux_restore.sh dump/pg_dump.sql

View File

@ -18,6 +18,7 @@ set(TEST_FILES_DEBUG
continuous_aggs_bgw.sql
continuous_aggs_materialize.sql
continuous_aggs_multi.sql
compression.sql
ddl_hook.sql
tsl_tables.sql
)

View File

@ -0,0 +1,25 @@
-- This file and its contents are licensed under the Timescale License.
-- Please see the included NOTICE for copyright information and
-- LICENSE-TIMESCALE for a copy of the license.
--TEST1 ---
--basic test with count
create table foo (a integer, b integer, c integer);
select table_name from create_hypertable('foo', 'a', chunk_time_interval=> 10);
insert into foo values( 3 , 16 , 20);
insert into foo values( 10 , 10 , 20);
insert into foo values( 20 , 11 , 20);
insert into foo values( 30 , 12 , 20);
alter table foo set (timescaledb.compress);
select id, schema_name, table_name, compressed, compressed_hypertable_id from
_timescaledb_catalog.hypertable order by id;
-- should error out --
\set ON_ERROR_STOP 0
ALTER TABLE foo ALTER b SET NOT NULL, set (timescaledb.compress);
\set ON_ERROR_STOP 1
ALTER TABLE foo ALTER b SET NOT NULL;
select attname, attnotnull from pg_attribute where attrelid = (select oid from pg_class where relname like 'foo') and attname like 'b';