Add ability to create chunk from existing table

The `create_chunk` API has been extended to allow creating a chunk
from an existing relational table. The table is turned into a chunk by
attaching it to the root hypertable via inheritance.

The purpose of this functionality is to allow copying a chunk to
another node. First, the chunk table and data is copied. After that,
the `create_chunk` can be executed to make the new table part of the
hypertable.

Currently, the relational table used to create the chunk has to match
the hypertable in terms of constraints, triggers, etc. PostgreSQL
itself enforces the existence of same-named CHECK constraints, but no
enforcement currently exists for other objects, including triggers
UNIQUE, PRIMARY KEY, or FOREIGN KEY constraints. Such enforcement can
be implemented in the future, if deemed necessary. Another option is
to automatically add all the required objects (triggers, constraints)
based on the hypertable equivalents. However, that might also lead to
duplicate objects in case some of them exist on the table prior to
creating the chunk.
This commit is contained in:
Erik Nordström 2021-04-29 21:03:51 +02:00 committed by Dmitry Simonenko
parent 3651e6e102
commit b8ff780c50
8 changed files with 367 additions and 61 deletions

View File

@ -40,12 +40,18 @@ CREATE OR REPLACE FUNCTION _timescaledb_internal.show_chunk(chunk REGCLASS)
RETURNS TABLE(chunk_id INTEGER, hypertable_id INTEGER, schema_name NAME, table_name NAME, relkind "char", slices JSONB)
AS '@MODULE_PATHNAME@', 'ts_chunk_show' LANGUAGE C VOLATILE;
-- Create a chunk with the given dimensional constraints (slices) as given in the JSONB.
-- Create a chunk with the given dimensional constraints (slices) as
-- given in the JSONB. If chunk_table is a valid relation, it will be
-- attached to the hypertable and used as the data table for the new
-- chunk. Note that schema_name and table_name need not be the same as
-- the existing schema and name for chunk_table. The provided chunk
-- table will be renamed and/or moved as necessary.
CREATE OR REPLACE FUNCTION _timescaledb_internal.create_chunk(
hypertable REGCLASS,
slices JSONB,
slices JSONB,
schema_name NAME = NULL,
table_name NAME = NULL)
table_name NAME = NULL,
chunk_table REGCLASS = NULL)
RETURNS TABLE(chunk_id INTEGER, hypertable_id INTEGER, schema_name NAME, table_name NAME, relkind "char", slices JSONB, created BOOLEAN)
AS '@MODULE_PATHNAME@', 'ts_chunk_create' LANGUAGE C VOLATILE;

View File

@ -3,3 +3,4 @@ GRANT USAGE ON SCHEMA timescaledb_experimental TO PUBLIC;
DROP FUNCTION IF EXISTS _timescaledb_internal.block_new_chunks;
DROP FUNCTION IF EXISTS _timescaledb_internal.allow_new_chunks;
DROP FUNCTION IF EXISTS _timescaledb_internal.refresh_continuous_aggregate;
DROP FUNCTION IF EXISTS _timescaledb_internal.create_chunk;

View File

@ -5,6 +5,7 @@ DROP FUNCTION IF EXISTS _timescaledb_internal.refresh_continuous_aggregate;
DROP FUNCTION IF EXISTS _timescaledb_internal.create_chunk_table;
DROP FUNCTION IF EXISTS _timescaledb_internal.create_chunk_replica_table;
DROP FUNCTION IF EXISTS _timescaledb_internal.chunk_drop_replica;
DROP FUNCTION IF EXISTS _timescaledb_internal.create_chunk;
-- We need to rewrite all continuous aggregates to make sure that the
-- queries do not contain qualification. They will be re-written in

View File

@ -593,7 +593,7 @@ chunk_collision_resolve(const Hypertable *ht, Hypercube *cube, const Point *p)
}
static int
chunk_add_constraints(Chunk *chunk)
chunk_add_constraints(const Chunk *chunk)
{
int num_added;
@ -1188,12 +1188,115 @@ chunk_create_from_hypercube_after_lock(const Hypertable *ht, Hypercube *cube,
chunk_add_constraints(chunk);
chunk_insert_into_metadata_after_lock(chunk);
chunk_create_table_constraints(chunk);
return chunk;
}
/*
* Make a chunk table inherit a hypertable.
*
* Execution happens via high-level ALTER TABLE statement. This includes
* numerous checks to ensure that the chunk table has all the prerequisites to
* properly inherit the hypertable.
*/
static void
chunk_add_inheritance(Chunk *chunk, const Hypertable *ht)
{
AlterTableCmd altercmd = {
.type = T_AlterTableCmd,
.subtype = AT_AddInherit,
.def = (Node *) makeRangeVar((char *) NameStr(ht->fd.schema_name),
(char *) NameStr(ht->fd.table_name),
0),
.missing_ok = false,
};
AlterTableStmt alterstmt = {
.type = T_AlterTableStmt,
.cmds = list_make1(&altercmd),
.missing_ok = false,
#if PG14_GE
.objtype = OBJECT_TABLE,
#else
.relkind = OBJECT_TABLE,
#endif
.relation = makeRangeVar((char *) NameStr(chunk->fd.schema_name),
(char *) NameStr(chunk->fd.table_name),
0),
};
LOCKMODE lockmode = AlterTableGetLockLevel(alterstmt.cmds);
#if PG13_GE
AlterTableUtilityContext atcontext = {
.relid = AlterTableLookupRelation(&alterstmt, lockmode),
};
AlterTable(&alterstmt, lockmode, &atcontext);
#else
AlterTable(AlterTableLookupRelation(&alterstmt, lockmode), lockmode, &alterstmt);
#endif
}
static Chunk *
chunk_create_from_hypercube_and_table_after_lock(const Hypertable *ht, Hypercube *cube,
Oid chunk_table_relid, const char *schema_name,
const char *table_name, const char *prefix)
{
Oid current_chunk_schemaid = get_rel_namespace(chunk_table_relid);
Oid new_chunk_schemaid = InvalidOid;
Chunk *chunk;
Assert(OidIsValid(chunk_table_relid));
Assert(OidIsValid(current_chunk_schemaid));
/* Insert any new dimension slices into metadata */
ts_dimension_slice_insert_multi(cube->slices, cube->num_slices);
chunk = chunk_create_object(ht, cube, schema_name, table_name, prefix, get_next_chunk_id());
chunk->table_id = chunk_table_relid;
chunk->hypertable_relid = ht->main_table_relid;
Assert(OidIsValid(ht->main_table_relid));
new_chunk_schemaid = get_namespace_oid(NameStr(chunk->fd.schema_name), false);
if (current_chunk_schemaid != new_chunk_schemaid)
{
Relation chunk_rel = table_open(chunk_table_relid, AccessExclusiveLock);
ObjectAddresses *objects;
CheckSetNamespace(current_chunk_schemaid, new_chunk_schemaid);
objects = new_object_addresses();
AlterTableNamespaceInternal(chunk_rel, current_chunk_schemaid, new_chunk_schemaid, objects);
free_object_addresses(objects);
table_close(chunk_rel, NoLock);
/* Make changes visible */
CommandCounterIncrement();
}
if (namestrcmp(&chunk->fd.table_name, get_rel_name(chunk_table_relid)) != 0)
{
/* Renaming will acquire and keep an AccessExclusivelock on the chunk
* table */
RenameRelationInternal(chunk_table_relid, NameStr(chunk->fd.table_name), true, false);
/* Make changes visible */
CommandCounterIncrement();
}
/* Note that we do not automatically add constrains and triggers to the
* chunk table when the chunk is created from an existing table. However,
* PostgreSQL currently validates that CHECK constraints exists, but no
* validation is done for other objects, including triggers, UNIQUE,
* PRIMARY KEY, and FOREIGN KEY constraints. We might want to either
* enforce that these constraints exist prior to creating the chunk from a
* table, or we ensure that they are automatically added when the chunk is
* created. However, for the latter case, we risk duplicating constraints
* and triggers if some of them already exist on the chunk table prior to
* creating the chunk from it. */
chunk_add_constraints(chunk);
chunk_insert_into_metadata_after_lock(chunk);
chunk_add_inheritance(chunk, ht);
return chunk;
}
static Chunk *
chunk_create_from_point_after_lock(const Hypertable *ht, const Point *p, const char *schema_name,
const char *table_name, const char *prefix)
@ -1229,7 +1332,7 @@ chunk_create_from_point_after_lock(const Hypertable *ht, const Point *p, const c
Chunk *
ts_chunk_find_or_create_without_cuts(const Hypertable *ht, Hypercube *hc, const char *schema_name,
const char *table_name, bool *created)
const char *table_name, Oid chunk_table_relid, bool *created)
{
ChunkStub *stub;
Chunk *chunk = NULL;
@ -1257,7 +1360,16 @@ ts_chunk_find_or_create_without_cuts(const Hypertable *ht, Hypercube *hc, const
* commit since we won't create those slices ourselves. */
ts_hypercube_find_existing_slices(hc, &tuplock);
chunk = chunk_create_from_hypercube_after_lock(ht, hc, schema_name, table_name, NULL);
if (OidIsValid(chunk_table_relid))
chunk = chunk_create_from_hypercube_and_table_after_lock(ht,
hc,
chunk_table_relid,
schema_name,
table_name,
NULL);
else
chunk =
chunk_create_from_hypercube_after_lock(ht, hc, schema_name, table_name, NULL);
if (NULL != created)
*created = true;
@ -1470,7 +1582,7 @@ chunk_tuple_found(TupleInfo *ti, void *arg)
* the data table and related objects. */
chunk->table_id = get_relname_relid(chunk->fd.table_name.data,
get_namespace_oid(chunk->fd.schema_name.data, true));
chunk->hypertable_relid = ts_inheritance_parent_relid(chunk->table_id);
chunk->hypertable_relid = ts_hypertable_id_to_relid(chunk->fd.hypertable_id);
chunk->relkind = get_rel_relkind(chunk->table_id);
if (chunk->relkind == RELKIND_FOREIGN_TABLE)

View File

@ -168,10 +168,9 @@ extern TSDLLEXPORT void ts_chunk_drop_preserve_catalog_row(const Chunk *chunk,
DropBehavior behavior, int32 log_level);
extern TSDLLEXPORT List *ts_chunk_do_drop_chunks(Hypertable *ht, int64 older_than, int64 newer_than,
int32 log_level, List **affected_data_nodes);
extern TSDLLEXPORT Chunk *ts_chunk_find_or_create_without_cuts(const Hypertable *ht, Hypercube *hc,
const char *schema_name,
const char *table_name,
bool *created);
extern TSDLLEXPORT Chunk *
ts_chunk_find_or_create_without_cuts(const Hypertable *ht, Hypercube *hc, const char *schema_name,
const char *table_name, Oid chunk_table_relid, bool *created);
extern TSDLLEXPORT Chunk *ts_chunk_get_compressed_chunk_parent(const Chunk *chunk);
extern TSDLLEXPORT bool ts_chunk_is_unordered(const Chunk *chunk);
extern TSDLLEXPORT bool ts_chunk_is_compressed(const Chunk *chunk);

View File

@ -298,6 +298,37 @@ chunk_show(PG_FUNCTION_ARGS)
PG_RETURN_DATUM(HeapTupleGetDatum(tuple));
}
static void
check_privileges_for_creating_chunk(Oid hyper_relid)
{
AclResult acl_result;
acl_result = pg_class_aclcheck(hyper_relid, GetUserId(), ACL_INSERT);
if (acl_result != ACLCHECK_OK)
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
errmsg("permission denied for table \"%s\"", get_rel_name(hyper_relid)),
errdetail("Insert privileges required on \"%s\" to create chunks.",
get_rel_name(hyper_relid))));
}
static Hypercube *
get_hypercube_from_slices(Jsonb *slices, const Hypertable *ht)
{
Hypercube *hc;
const char *parse_err;
hc = hypercube_from_jsonb(slices, ht->space, &parse_err);
if (hc == NULL)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("invalid hypercube for hypertable \"%s\"",
get_rel_name(ht->main_table_relid)),
errdetail("%s", parse_err)));
return hc;
}
Datum
chunk_create(PG_FUNCTION_ARGS)
{
@ -305,6 +336,7 @@ chunk_create(PG_FUNCTION_ARGS)
Jsonb *slices = PG_ARGISNULL(1) ? NULL : PG_GETARG_JSONB_P(1);
const char *schema_name = PG_ARGISNULL(2) ? NULL : PG_GETARG_CSTRING(2);
const char *table_name = PG_ARGISNULL(3) ? NULL : PG_GETARG_CSTRING(3);
Oid chunk_table_relid = PG_ARGISNULL(4) ? InvalidOid : PG_GETARG_OID(4);
Cache *hcache = ts_hypertable_cache_pin();
Hypertable *ht = ts_hypertable_cache_get_entry(hcache, hypertable_relid, CACHE_FLAG_NONE);
Hypercube *hc;
@ -312,18 +344,10 @@ chunk_create(PG_FUNCTION_ARGS)
TupleDesc tupdesc;
HeapTuple tuple;
bool created;
const char *parse_err;
AclResult acl_result;
Assert(NULL != ht);
acl_result = pg_class_aclcheck(hypertable_relid, GetUserId(), ACL_INSERT);
if (acl_result != ACLCHECK_OK)
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
errmsg("permission denied for table \"%s\"", get_rel_name(hypertable_relid)),
errdetail("Insert privileges required on \"%s\" to create chunks.",
get_rel_name(hypertable_relid))));
Assert(OidIsValid(ht->main_table_relid));
check_privileges_for_creating_chunk(hypertable_relid);
if (NULL == slices)
ereport(ERROR, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("invalid slices")));
@ -334,16 +358,14 @@ chunk_create(PG_FUNCTION_ARGS)
errmsg("function returning record called in context "
"that cannot accept type record")));
hc = hypercube_from_jsonb(slices, ht->space, &parse_err);
if (NULL == hc)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("invalid hypercube for hypertable \"%s\"", get_rel_name(hypertable_relid)),
errdetail("%s", parse_err)));
chunk = ts_chunk_find_or_create_without_cuts(ht, hc, schema_name, table_name, &created);
hc = get_hypercube_from_slices(slices, ht);
Assert(NULL != hc);
chunk = ts_chunk_find_or_create_without_cuts(ht,
hc,
schema_name,
table_name,
chunk_table_relid,
&created);
Assert(NULL != chunk);
tuple = chunk_form_tuple(chunk, ht, tupdesc, created);
@ -358,12 +380,15 @@ chunk_create(PG_FUNCTION_ARGS)
}
#define CREATE_CHUNK_FUNCTION_NAME "create_chunk"
#define CREATE_CHUNK_NUM_ARGS 5
#define CHUNK_CREATE_STMT \
"SELECT * FROM " INTERNAL_SCHEMA_NAME "." CREATE_CHUNK_FUNCTION_NAME "($1, $2, $3, $4)"
"SELECT * FROM " INTERNAL_SCHEMA_NAME "." CREATE_CHUNK_FUNCTION_NAME "($1, $2, $3, $4, $5)"
#define ESTIMATE_JSON_STR_SIZE(num_dims) (60 * (num_dims))
static Oid create_chunk_argtypes[4] = { REGCLASSOID, JSONBOID, NAMEOID, NAMEOID };
static Oid create_chunk_argtypes[CREATE_CHUNK_NUM_ARGS] = {
REGCLASSOID, JSONBOID, NAMEOID, NAMEOID, REGCLASSOID
};
/*
* Fill in / get the TupleDesc for the result type of the create_chunk()
@ -374,7 +399,7 @@ get_create_chunk_result_type(TupleDesc *tupdesc)
{
Oid funcoid = ts_get_function_oid(CREATE_CHUNK_FUNCTION_NAME,
INTERNAL_SCHEMA_NAME,
4,
CREATE_CHUNK_NUM_ARGS,
create_chunk_argtypes);
if (get_func_result_type(funcoid, NULL, tupdesc) != TYPEFUNC_COMPOSITE)
@ -421,11 +446,12 @@ void
chunk_api_create_on_data_nodes(const Chunk *chunk, const Hypertable *ht)
{
AsyncRequestSet *reqset = async_request_set_create();
const char *params[4] = {
const char *params[CREATE_CHUNK_NUM_ARGS] = {
quote_qualified_identifier(NameStr(ht->fd.schema_name), NameStr(ht->fd.table_name)),
chunk_api_dimension_slices_json(chunk, ht),
NameStr(chunk->fd.schema_name),
NameStr(chunk->fd.table_name),
NULL,
};
AsyncResponseResult *res;
ListCell *lc;
@ -444,7 +470,8 @@ chunk_api_create_on_data_nodes(const Chunk *chunk, const Hypertable *ht)
req = async_request_send_with_params(conn,
CHUNK_CREATE_STMT,
stmt_params_create_from_values(params, 4),
stmt_params_create_from_values(params,
CREATE_CHUNK_NUM_ARGS),
FORMAT_TEXT);
async_request_attach_user_data(req, cdn);
@ -1629,8 +1656,6 @@ chunk_create_empty_table(PG_FUNCTION_ARGS)
Cache *const hcache = ts_hypertable_cache_pin();
Hypertable *ht;
Hypercube *hc;
const char *parse_err;
AclResult acl_result;
GETARG_NOTNULL_OID(hypertable_relid, 0, "hypertable");
GETARG_NOTNULL_NULLABLE(slices, 1, "slices", JSONB_P);
@ -1639,23 +1664,9 @@ chunk_create_empty_table(PG_FUNCTION_ARGS)
ht = ts_hypertable_cache_get_entry(hcache, hypertable_relid, CACHE_FLAG_NONE);
Assert(ht != NULL);
acl_result = pg_class_aclcheck(hypertable_relid, GetUserId(), ACL_INSERT);
if (acl_result != ACLCHECK_OK)
ereport(ERROR,
(errcode(ERRCODE_INSUFFICIENT_PRIVILEGE),
errmsg("permission denied for table \"%s\"", get_rel_name(hypertable_relid)),
errdetail("Insert privileges required on \"%s\" to create chunks.",
get_rel_name(hypertable_relid))));
hc = hypercube_from_jsonb(slices, ht->space, &parse_err);
if (hc == NULL)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("invalid hypercube for hypertable \"%s\"", get_rel_name(hypertable_relid)),
errdetail("%s", parse_err)));
check_privileges_for_creating_chunk(hypertable_relid);
hc = get_hypercube_from_slices(slices, ht);
Assert(NULL != hc);
ts_chunk_create_only_table(ht, hc, schema_name, table_name);
ts_cache_release(hcache);

View File

@ -780,7 +780,9 @@ SELECT tablespace FROM pg_tables WHERE tablename = :'CHUNK_NAME';
-- Use the time partition to calculate the tablespace id to use
DROP TABLE chunkapi;
DROP TABLE :CHUNK_SCHEMA.:CHUNK_NAME;
CREATE TABLE chunkapi (time timestamptz not null, device int, temp float);
CREATE TABLE devices (id int PRIMARY KEY);
INSERT INTO devices VALUES (1);
CREATE TABLE chunkapi (time timestamptz NOT NULL PRIMARY KEY, device int REFERENCES devices(id), temp float CHECK (temp > 0));
SELECT * FROM create_hypertable('chunkapi', 'time');
hypertable_id | schema_name | table_name | created
---------------+-------------+------------+---------
@ -820,8 +822,116 @@ SELECT tablespace FROM pg_tables WHERE tablename = :'CHUNK_NAME';
tablespace1
(1 row)
-- Now create the complete chunk from the chunk table
SELECT _timescaledb_internal.create_chunk('chunkapi', :'SLICES', :'CHUNK_SCHEMA', :'CHUNK_NAME',
format('%I.%I', :'CHUNK_SCHEMA', :'CHUNK_NAME')::regclass);
create_chunk
---------------------------------------------------------------------------------------------------------
(11,10,_timescaledb_internal,_hyper_10_10_chunk,r,"{""time"": [1514419200000000, 1515024000000000]}",t)
(1 row)
SELECT * FROM test.show_constraints(format('%I.%I', :'CHUNK_SCHEMA', :'CHUNK_NAME')::regclass);
Constraint | Type | Columns | Index | Expr | Deferrable | Deferred | Validated
---------------------+------+---------+-------+--------------------------------+------------+----------+-----------
chunkapi_temp_check | c | {temp} | - | (temp > (0)::double precision) | f | f | t
(1 row)
-- The chunk should inherit the hypertable
SELECT relname
FROM pg_catalog.pg_inherits, pg_class
WHERE inhrelid = (:'CHUNK_SCHEMA'||'.'||:'CHUNK_NAME')::regclass AND inhparent = oid;
relname
----------
chunkapi
(1 row)
-- Show chunk's attached to the table
SELECT
:'CHUNK_SCHEMA' AS expected_schema,
:'CHUNK_NAME' AS expected_table_name,
(_timescaledb_internal.show_chunk(ch)).*
FROM show_chunks('chunkapi') ch;
expected_schema | expected_table_name | chunk_id | hypertable_id | schema_name | table_name | relkind | slices
-----------------------+---------------------+----------+---------------+-----------------------+--------------------+---------+------------------------------------------------
_timescaledb_internal | _hyper_10_10_chunk | 11 | 10 | _timescaledb_internal | _hyper_10_10_chunk | r | {"time": [1514419200000000, 1515024000000000]}
(1 row)
DROP TABLE chunkapi;
DROP TABLE devices;
-- Test creating a chunk from an existing chunk table which was not
-- created via create_chunk_table and having a different name.
CREATE TABLE devices (id int PRIMARY KEY);
INSERT INTO devices VALUES (1);
CREATE TABLE chunkapi (time timestamptz NOT NULL PRIMARY KEY, device int REFERENCES devices(id), temp float CHECK(temp > 0));
SELECT * FROM create_hypertable('chunkapi', 'time');
hypertable_id | schema_name | table_name | created
---------------+-------------+------------+---------
11 | public | chunkapi | t
(1 row)
CREATE TABLE newchunk (time timestamptz NOT NULL, device int, temp float);
SELECT * FROM test.show_constraints('newchunk');
Constraint | Type | Columns | Index | Expr | Deferrable | Deferred | Validated
------------+------+---------+-------+------+------------+----------+-----------
(0 rows)
INSERT INTO newchunk VALUES ('2018-01-01 05:00:00-8', 1, 23.4);
\set ON_ERROR_STOP 0
-- Creating the chunk without required CHECK constraints on a table
-- should fail. Currently, PostgreSQL only enforces presence of CHECK
-- constraints, but not foreign key, unique, or primary key
-- constraints. We should probably add checks to enforce the latter
-- too or auto-create all constraints.
SELECT * FROM _timescaledb_internal.create_chunk('chunkapi', :'SLICES', :'CHUNK_SCHEMA', :'CHUNK_NAME', 'newchunk');
ERROR: child table is missing constraint "chunkapi_temp_check"
\set ON_ERROR_STOP 1
-- Add the missing CHECK constraint. Note that the name must be the
-- same as on the parent table.
ALTER TABLE newchunk ADD CONSTRAINT chunkapi_temp_check CHECK (temp > 0);
SELECT * FROM _timescaledb_internal.create_chunk('chunkapi', :'SLICES', :'CHUNK_SCHEMA', :'CHUNK_NAME', 'newchunk');
chunk_id | hypertable_id | schema_name | table_name | relkind | slices | created
----------+---------------+-----------------------+--------------------+---------+------------------------------------------------+---------
13 | 11 | _timescaledb_internal | _hyper_10_10_chunk | r | {"time": [1514419200000000, 1515024000000000]} | t
(1 row)
-- Show the chunk and that names are what we'd expect
SELECT
:'CHUNK_SCHEMA' AS expected_schema,
:'CHUNK_NAME' AS expected_table_name,
(_timescaledb_internal.show_chunk(ch)).*
FROM show_chunks('chunkapi') ch;
expected_schema | expected_table_name | chunk_id | hypertable_id | schema_name | table_name | relkind | slices
-----------------------+---------------------+----------+---------------+-----------------------+--------------------+---------+------------------------------------------------
_timescaledb_internal | _hyper_10_10_chunk | 13 | 11 | _timescaledb_internal | _hyper_10_10_chunk | r | {"time": [1514419200000000, 1515024000000000]}
(1 row)
-- The chunk should inherit the hypertable
SELECT relname
FROM pg_catalog.pg_inherits, pg_class
WHERE inhrelid = (:'CHUNK_SCHEMA'||'.'||:'CHUNK_NAME')::regclass AND inhparent = oid;
relname
----------
chunkapi
(1 row)
-- Test that it is possible to query the data via the hypertable
SELECT * FROM chunkapi ORDER BY 1,2,3;
time | device | temp
------------------------------+--------+------
Mon Jan 01 05:00:00 2018 PST | 1 | 23.4
(1 row)
-- Show that the chunk has all the necessary constraints. These
-- include inheritable constraints and dimensional constraints, which
-- are specific to the chunk. Currently, foreign key, unique, and
-- primary key constraints are not inherited or auto-created.
SELECT * FROM test.show_constraints(format('%I.%I', :'CHUNK_SCHEMA', :'CHUNK_NAME')::regclass);
Constraint | Type | Columns | Index | Expr | Deferrable | Deferred | Validated
---------------------+------+---------+-------+--------------------------------+------------+----------+-----------
chunkapi_temp_check | c | {temp} | - | (temp > (0)::double precision) | f | f | t
(1 row)
DROP TABLE chunkapi;
DROP TABLE :CHUNK_SCHEMA.:CHUNK_NAME;
\c :TEST_DBNAME :ROLE_SUPERUSER
SET client_min_messages = ERROR;
DROP TABLESPACE tablespace1;

View File

@ -390,8 +390,9 @@ SELECT tablespace FROM pg_tables WHERE tablename = :'CHUNK_NAME';
DROP TABLE chunkapi;
DROP TABLE :CHUNK_SCHEMA.:CHUNK_NAME;
CREATE TABLE chunkapi (time timestamptz not null, device int, temp float);
CREATE TABLE devices (id int PRIMARY KEY);
INSERT INTO devices VALUES (1);
CREATE TABLE chunkapi (time timestamptz NOT NULL PRIMARY KEY, device int REFERENCES devices(id), temp float CHECK (temp > 0));
SELECT * FROM create_hypertable('chunkapi', 'time');
INSERT INTO chunkapi VALUES ('2018-01-01 05:00:00-8', 1, 23.4);
@ -413,8 +414,73 @@ SELECT count(*) FROM
SELECT tablespace FROM pg_tables WHERE tablename = :'CHUNK_NAME';
-- Now create the complete chunk from the chunk table
SELECT _timescaledb_internal.create_chunk('chunkapi', :'SLICES', :'CHUNK_SCHEMA', :'CHUNK_NAME',
format('%I.%I', :'CHUNK_SCHEMA', :'CHUNK_NAME')::regclass);
SELECT * FROM test.show_constraints(format('%I.%I', :'CHUNK_SCHEMA', :'CHUNK_NAME')::regclass);
-- The chunk should inherit the hypertable
SELECT relname
FROM pg_catalog.pg_inherits, pg_class
WHERE inhrelid = (:'CHUNK_SCHEMA'||'.'||:'CHUNK_NAME')::regclass AND inhparent = oid;
-- Show chunk's attached to the table
SELECT
:'CHUNK_SCHEMA' AS expected_schema,
:'CHUNK_NAME' AS expected_table_name,
(_timescaledb_internal.show_chunk(ch)).*
FROM show_chunks('chunkapi') ch;
DROP TABLE chunkapi;
DROP TABLE devices;
-- Test creating a chunk from an existing chunk table which was not
-- created via create_chunk_table and having a different name.
CREATE TABLE devices (id int PRIMARY KEY);
INSERT INTO devices VALUES (1);
CREATE TABLE chunkapi (time timestamptz NOT NULL PRIMARY KEY, device int REFERENCES devices(id), temp float CHECK(temp > 0));
SELECT * FROM create_hypertable('chunkapi', 'time');
CREATE TABLE newchunk (time timestamptz NOT NULL, device int, temp float);
SELECT * FROM test.show_constraints('newchunk');
INSERT INTO newchunk VALUES ('2018-01-01 05:00:00-8', 1, 23.4);
\set ON_ERROR_STOP 0
-- Creating the chunk without required CHECK constraints on a table
-- should fail. Currently, PostgreSQL only enforces presence of CHECK
-- constraints, but not foreign key, unique, or primary key
-- constraints. We should probably add checks to enforce the latter
-- too or auto-create all constraints.
SELECT * FROM _timescaledb_internal.create_chunk('chunkapi', :'SLICES', :'CHUNK_SCHEMA', :'CHUNK_NAME', 'newchunk');
\set ON_ERROR_STOP 1
-- Add the missing CHECK constraint. Note that the name must be the
-- same as on the parent table.
ALTER TABLE newchunk ADD CONSTRAINT chunkapi_temp_check CHECK (temp > 0);
SELECT * FROM _timescaledb_internal.create_chunk('chunkapi', :'SLICES', :'CHUNK_SCHEMA', :'CHUNK_NAME', 'newchunk');
-- Show the chunk and that names are what we'd expect
SELECT
:'CHUNK_SCHEMA' AS expected_schema,
:'CHUNK_NAME' AS expected_table_name,
(_timescaledb_internal.show_chunk(ch)).*
FROM show_chunks('chunkapi') ch;
-- The chunk should inherit the hypertable
SELECT relname
FROM pg_catalog.pg_inherits, pg_class
WHERE inhrelid = (:'CHUNK_SCHEMA'||'.'||:'CHUNK_NAME')::regclass AND inhparent = oid;
-- Test that it is possible to query the data via the hypertable
SELECT * FROM chunkapi ORDER BY 1,2,3;
-- Show that the chunk has all the necessary constraints. These
-- include inheritable constraints and dimensional constraints, which
-- are specific to the chunk. Currently, foreign key, unique, and
-- primary key constraints are not inherited or auto-created.
SELECT * FROM test.show_constraints(format('%I.%I', :'CHUNK_SCHEMA', :'CHUNK_NAME')::regclass);
DROP TABLE chunkapi;
DROP TABLE :CHUNK_SCHEMA.:CHUNK_NAME;
\c :TEST_DBNAME :ROLE_SUPERUSER
SET client_min_messages = ERROR;