Create chunks on remote servers

This change ensures that chunk replicas are created on remote
(datanode) servers whenever a chunk is created in a local distributed
hypertable.

Remote chunks are created using the `create_chunk()` function, which
has been slightly refactored to allow specifying an explicit chunk
table name. The one making the remote call also records the resulting
remote chunk IDs in its `chunk_server` mappings table.

Since remote command invokation without super-user permissions
requires password authentication, the test configuration files have
been updated to require password authentication for a cluster test
user that is used in tests.
This commit is contained in:
Erik Nordström 2019-01-25 18:49:49 +01:00 committed by Erik Nordström
parent db82c25d44
commit e2371558f7
23 changed files with 563 additions and 154 deletions

View File

@ -50,6 +50,6 @@ CREATE OR REPLACE FUNCTION _timescaledb_internal.create_chunk(
hypertable REGCLASS,
slices JSONB,
schema_name NAME = NULL,
table_prefix NAME = NULL)
table_name NAME = NULL)
RETURNS TABLE(chunk_id INTEGER, hypertable_id INTEGER, schema_name NAME, table_name NAME, relkind "char", slices JSONB, created BOOLEAN)
AS '@MODULE_PATHNAME@', 'ts_chunk_create' LANGUAGE C VOLATILE;

View File

@ -143,7 +143,7 @@ CREATE OR REPLACE FUNCTION add_server(
server_name NAME,
host TEXT = 'localhost',
database NAME = current_database(),
port INTEGER = 5432,
port INTEGER = inet_server_port(),
local_user REGROLE = NULL,
remote_user NAME = NULL,
password TEXT = NULL,

View File

@ -767,11 +767,13 @@ ts_chunk_create_table(Chunk *chunk, Hypertable *ht, const char *tablespacename)
* setting to work
*/
create_toast_table(&stmt.base, objaddr.objectId);
if (uid != saved_uid)
SetUserIdAndSecContext(saved_uid, sec_ctx);
}
else if (chunk->relkind == RELKIND_FOREIGN_TABLE)
{
ChunkServer *cs;
ListCell *lc;
if (list_length(chunk->servers) == 0)
ereport(ERROR,
@ -790,24 +792,22 @@ ts_chunk_create_table(Chunk *chunk, Hypertable *ht, const char *tablespacename)
/* Create the foreign table catalog information */
CreateForeignTable(&stmt, objaddr.objectId);
/* Create the corresponding chunks on the remote servers */
foreach (lc, chunk->servers)
{
cs = lfirst(lc);
/*
* Need to restore security context to execute remote commands as the
* original user
*/
if (uid != saved_uid)
SetUserIdAndSecContext(saved_uid, sec_ctx);
/* TODO: create chunk on server and get local ID */
/* Create the corresponding chunk replicas on the remote servers */
ts_cm_functions->create_chunk_on_servers(chunk, ht);
/* Set a bogus server_chunk_id for now */
cs->fd.server_chunk_id = chunk->fd.id;
ts_chunk_server_insert(cs);
}
/* Record the remote server chunk ID mappings */
ts_chunk_server_insert_multi(chunk->servers);
}
else
elog(ERROR, "invalid relkind \"%c\" when creating chunk", chunk->relkind);
if (uid != saved_uid)
SetUserIdAndSecContext(saved_uid, sec_ctx);
set_attoptions(rel, objaddr.objectId);
table_close(rel, AccessShareLock);
@ -878,9 +878,18 @@ get_chunk_name_suffix(const char relkind)
return "chunk";
}
/*
* Create a chunk from the dimensional constraints in the given hypercube.
*
* The table name for the chunk can be given explicitly, or generated if
* table_name is NULL. If the table name is generated, it will use the given
* prefix or, if NULL, use the hypertable's associated table prefix. Similarly,
* if schema_name is NULL it will use the hypertable's associated schema for
* the chunk.
*/
static Chunk *
chunk_create_metadata_after_lock(Hypertable *ht, Hypercube *cube, const char *schema,
const char *prefix)
chunk_create_metadata_after_lock(Hypertable *ht, Hypercube *cube, const char *schema_name,
const char *table_name, const char *prefix)
{
Hyperspace *hs = ht->space;
Catalog *catalog = ts_catalog_get();
@ -888,6 +897,9 @@ chunk_create_metadata_after_lock(Hypertable *ht, Hypercube *cube, const char *sc
Chunk *chunk;
const char relkind = hypertable_chunk_relkind(ht);
if (NULL == schema_name || schema_name[0] == '\0')
schema_name = NameStr(ht->fd.associated_schema_name);
/* Create a new chunk based on the hypercube */
ts_catalog_database_info_become_owner(ts_catalog_database_info_get(), &sec_ctx);
chunk = ts_chunk_create_base(ts_catalog_table_next_seq_id(catalog, CHUNK),
@ -899,14 +911,28 @@ chunk_create_metadata_after_lock(Hypertable *ht, Hypercube *cube, const char *sc
chunk->fd.hypertable_id = hs->hypertable_id;
chunk->cube = cube;
chunk->hypertable_relid = ht->main_table_relid;
namestrcpy(&chunk->fd.schema_name, schema);
snprintf(chunk->fd.table_name.data,
namestrcpy(&chunk->fd.schema_name, schema_name);
if (NULL == table_name || table_name[0] == '\0')
{
int len;
if (NULL == prefix)
prefix = NameStr(ht->fd.associated_table_prefix);
len = snprintf(chunk->fd.table_name.data,
NAMEDATALEN,
"%s_%d_%s",
prefix,
chunk->fd.id,
get_chunk_name_suffix(relkind));
if (len >= NAMEDATALEN)
elog(ERROR, "chunk table name too long");
}
else
namestrcpy(&chunk->fd.table_name, table_name);
/* Insert chunk */
ts_chunk_insert_lock(chunk, RowExclusiveLock);
@ -967,12 +993,12 @@ init_scan_by_chunk_id(ScanIterator *iterator, int32 chunk_id)
}
static Chunk *
chunk_create_from_hypercube_after_lock(Hypertable *ht, Hypercube *cube, const char *schema,
const char *prefix)
chunk_create_from_hypercube_after_lock(Hypertable *ht, Hypercube *cube, const char *schema_name,
const char *table_name, const char *prefix)
{
Chunk *chunk;
chunk = chunk_create_metadata_after_lock(ht, cube, schema, prefix);
chunk = chunk_create_metadata_after_lock(ht, cube, schema_name, table_name, prefix);
Assert(chunk != NULL);
chunk_create_table_after_lock(chunk, ht);
@ -980,7 +1006,8 @@ chunk_create_from_hypercube_after_lock(Hypertable *ht, Hypercube *cube, const ch
}
static Chunk *
chunk_create_from_point_after_lock(Hypertable *ht, Point *p, const char *schema, const char *prefix)
chunk_create_from_point_after_lock(Hypertable *ht, Point *p, const char *schema_name,
const char *table_name, const char *prefix)
{
Hyperspace *hs = ht->space;
Hypercube *cube;
@ -997,12 +1024,12 @@ chunk_create_from_point_after_lock(Hypertable *ht, Point *p, const char *schema,
/* Resolve collisions with other chunks by cutting the new hypercube */
chunk_collision_resolve(ht, cube, p);
return chunk_create_from_hypercube_after_lock(ht, cube, schema, prefix);
return chunk_create_from_hypercube_after_lock(ht, cube, schema_name, table_name, prefix);
}
Chunk *
ts_chunk_find_or_create_without_cuts(Hypertable *ht, Hypercube *hc, const char *schema,
const char *prefix, bool *created)
ts_chunk_find_or_create_without_cuts(Hypertable *ht, Hypercube *hc, const char *schema_name,
const char *table_name, bool *created)
{
ChunkStub *stub;
Chunk *chunk = NULL;
@ -1013,7 +1040,7 @@ ts_chunk_find_or_create_without_cuts(Hypertable *ht, Hypercube *hc, const char *
if (NULL == stub)
{
chunk = chunk_create_from_hypercube_after_lock(ht, hc, schema, prefix);
chunk = chunk_create_from_hypercube_after_lock(ht, hc, schema_name, table_name, NULL);
if (NULL != created)
*created = true;
@ -1057,7 +1084,7 @@ ts_chunk_create_from_point(Hypertable *ht, Point *p, const char *schema, const c
chunk = chunk_find(ht, p, true);
if (NULL == chunk)
chunk = chunk_create_from_point_after_lock(ht, p, schema, prefix);
chunk = chunk_create_from_point_after_lock(ht, p, schema, NULL, prefix);
ASSERT_IS_VALID_CHUNK(chunk);

View File

@ -159,8 +159,9 @@ ts_chunk_get_chunks_in_time_range(Oid table_relid, Datum older_than_datum, Datum
Oid older_than_type, Oid newer_than_type, char *caller_name,
MemoryContext mctx, uint64 *num_chunks_returned);
extern TSDLLEXPORT Chunk *ts_chunk_find_or_create_without_cuts(Hypertable *ht, Hypercube *hc,
const char *schema,
const char *prefix, bool *created);
const char *schema_name,
const char *table_name,
bool *created);
extern TSDLLEXPORT bool ts_chunk_contains_compressed_data(Chunk *chunk);
extern List *ts_chunk_servers_copy(Chunk *chunk);
extern TSDLLEXPORT bool ts_chunk_can_be_compressed(int32 chunk_id);

View File

@ -443,6 +443,12 @@ empty_fn(PG_FUNCTION_ARGS)
PG_RETURN_VOID();
}
static void
create_chunk_on_servers_default(Chunk *chunk, Hypertable *ht)
{
error_no_default_fn_community();
}
/*
* Define cross-module functions' default values:
* If the submodule isn't activated, using one of the cm functions will throw an
@ -508,6 +514,7 @@ TSDLLEXPORT CrossModuleFunctions ts_cm_functions_default = {
.delete_server = error_no_default_fn_pg_community,
.show_chunk = error_no_default_fn_pg_community,
.create_chunk = error_no_default_fn_pg_community,
.create_chunk_on_servers = create_chunk_on_servers_default,
.get_servername_list = get_servername_list_default_fn,
.hypertable_make_distributed = hypertable_make_distributed_default_fn,
.timescaledb_fdw_handler = error_no_default_fn_pg_community,

View File

@ -31,6 +31,7 @@
typedef struct JsonbParseState JsonbParseState;
typedef struct Hypertable Hypertable;
typedef struct Chunk Chunk;
typedef struct CrossModuleFunctions
{
@ -117,6 +118,7 @@ typedef struct CrossModuleFunctions
void (*cache_syscache_invalidate)(Datum arg, int cacheid, uint32 hashvalue);
PGFunction remote_txn_id_in;
PGFunction remote_txn_id_out;
void (*create_chunk_on_servers)(Chunk *chunk, Hypertable *ht);
} CrossModuleFunctions;
extern TSDLLEXPORT CrossModuleFunctions *ts_cm_functions;

View File

@ -498,6 +498,44 @@ ts_create_struct_from_tuple(HeapTuple tuple, MemoryContext mctx, size_t alloc_si
return struct_ptr;
}
bool
ts_function_types_equal(Oid left[], Oid right[], int nargs)
{
int arg_index;
for (arg_index = 0; arg_index < nargs; arg_index++)
{
if (left[arg_index] != right[arg_index])
return false;
}
return true;
}
Oid
ts_get_function_oid(const char *funcname, const char *schema_name, int nargs, Oid arg_types[])
{
List *qualified_funcname =
list_make2(makeString(pstrdup(schema_name)), makeString(pstrdup(funcname)));
FuncCandidateList func_candidates;
func_candidates = FuncnameGetCandidates(qualified_funcname, nargs, NIL, false, false, false);
while (func_candidates != NULL)
{
if (func_candidates->nargs == nargs &&
ts_function_types_equal(func_candidates->args, arg_types, nargs))
return func_candidates->oid;
func_candidates = func_candidates->next;
}
elog(ERROR,
"failed to find function %s with %d args in schema \"%s\"",
funcname,
nargs,
schema_name);
return InvalidOid;
}
/*
* Find a partitioning function with a given schema and name.
*

View File

@ -73,6 +73,10 @@ extern Oid ts_inheritance_parent_relid(Oid relid);
extern Oid ts_lookup_proc_filtered(const char *schema, const char *funcname, Oid *rettype,
proc_filter filter, void *filter_arg);
extern Oid ts_get_operator(const char *name, Oid namespace, Oid left, Oid right);
extern bool ts_function_types_equal(Oid left[], Oid right[], int nargs);
extern TSDLLEXPORT Oid ts_get_function_oid(const char *funcname, const char *schema_name, int nargs,
Oid arg_types[]);
extern TSDLLEXPORT Oid ts_get_cast_func(Oid source, Oid target);

10
test/pg_hba.conf.in Normal file
View File

@ -0,0 +1,10 @@
# TYPE DATABASE USER ADDRESS METHOD
# "local" is for Unix domain socket connections only
local all all trust
# IPv4 local connections:
host all @TEST_ROLE_DEFAULT_CLUSTER_USER@ 127.0.0.1/32 password
host all all 127.0.0.1/32 trust
# IPv6 local connections:
host all @TEST_ROLE_DEFAULT_CLUSTER_USER@ ::1/128 password
host all all ::1/128 trust

View File

@ -15,3 +15,4 @@ log_line_prefix='%u [%p] %d '
# numbers. Setting extra_float_digits=0 retains the old behavior which
# is needed to make our tests work for multiple PostgreSQL versions.
extra_float_digits=0
hba_file='@TEST_OUTPUT_DIR@/pg_hba.conf'

View File

@ -34,6 +34,7 @@ TEST_SPINWAIT_ITERS=${TEST_SPINWAIT_ITERS:-100}
TEST_ROLE_SUPERUSER=${TEST_ROLE_SUPERUSER:-super_user}
TEST_ROLE_DEFAULT_PERM_USER=${TEST_ROLE_DEFAULT_PERM_USER:-default_perm_user}
TEST_ROLE_DEFAULT_PERM_USER_2=${TEST_ROLE_DEFAULT_PERM_USER_2:-default_perm_user_2}
TEST_ROLE_DEFAULT_CLUSTER_USER=${TEST_ROLE_DEFAULT_CLUSTER_USER:-default_cluster_user}
shift
@ -74,6 +75,7 @@ ${PSQL} -U ${TEST_PGUSER} \
-v ROLE_SUPERUSER=${TEST_ROLE_SUPERUSER} \
-v ROLE_DEFAULT_PERM_USER=${TEST_ROLE_DEFAULT_PERM_USER} \
-v ROLE_DEFAULT_PERM_USER_2=${TEST_ROLE_DEFAULT_PERM_USER_2} \
-v ROLE_DEFAULT_CLUSTER_USER=${TEST_ROLE_DEFAULT_CLUSTER_USER} \
-v MODULE_PATHNAME="'timescaledb-${EXT_VERSION}'" \
-v TSL_MODULE_PATHNAME="'timescaledb-tsl-${EXT_VERSION}'" \
$@ -d ${TEST_DBNAME} 2>&1 | sed -e '/<exclude_from_test>/,/<\/exclude_from_test>/d' -e 's! Memory: [0-9]\{1,\}kB!!' -e 's! Memory Usage: [0-9]\{1,\}kB!!'

View File

@ -1,10 +1,13 @@
set(TEST_ROLE_SUPERUSER super_user)
set(TEST_ROLE_DEFAULT_PERM_USER default_perm_user)
set(TEST_ROLE_DEFAULT_PERM_USER_2 default_perm_user_2)
set(TEST_ROLE_DEFAULT_CLUSTER_USER default_cluster_user)
set(TEST_INPUT_DIR ${CMAKE_CURRENT_SOURCE_DIR})
set(TEST_OUTPUT_DIR ${CMAKE_CURRENT_BINARY_DIR})
set(TEST_CLUSTER ${TEST_OUTPUT_DIR}/testcluster)
configure_file(postgresql.conf.in postgresql.conf)
configure_file(${PRIMARY_TEST_DIR}/pg_hba.conf.in pg_hba.conf)
# Basic connection info for test instance
set(TEST_PGPORT_LOCAL 5432 CACHE STRING "The port of a running PostgreSQL instance")
@ -22,7 +25,7 @@ set(PG_REGRESS_OPTS_BASE
--dlpath=${PROJECT_BINARY_DIR}/src)
set(PG_REGRESS_OPTS_EXTRA
--create-role=${TEST_ROLE_SUPERUSER},${TEST_ROLE_DEFAULT_PERM_USER},${TEST_ROLE_DEFAULT_PERM_USER_2}
--create-role=${TEST_ROLE_SUPERUSER},${TEST_ROLE_DEFAULT_PERM_USER},${TEST_ROLE_DEFAULT_PERM_USER_2},${TEST_ROLE_DEFAULT_CLUSTER_USER}
--dbname=${TEST_DBNAME}
--launcher=${PRIMARY_TEST_DIR}/runner.sh)
@ -32,7 +35,7 @@ set(PG_REGRESS_SHARED_OPTS_EXTRA
--launcher=${PRIMARY_TEST_DIR}/runner_shared.sh)
set(PG_ISOLATION_REGRESS_OPTS_EXTRA
--create-role=${TEST_ROLE_SUPERUSER},${TEST_ROLE_DEFAULT_PERM_USER},${TEST_ROLE_DEFAULT_PERM_USER_2}
--create-role=${TEST_ROLE_SUPERUSER},${TEST_ROLE_DEFAULT_PERM_USER},${TEST_ROLE_DEFAULT_PERM_USER_2},${TEST_ROLE_DEFAULT_CLUSTER_USER}
--dbname=${TEST_DBNAME})
set(PG_REGRESS_OPTS_INOUT
@ -52,7 +55,7 @@ set(PG_ISOLATION_REGRESS_OPTS_INOUT
set(PG_REGRESS_OPTS_TEMP_INSTANCE
--port=${TEST_PGPORT_TEMP_INSTANCE}
--temp-instance=${TEST_CLUSTER}
--temp-config=${TEST_INPUT_DIR}/postgresql.conf
--temp-config=${TEST_OUTPUT_DIR}/postgresql.conf
)
set(PG_REGRESS_OPTS_TEMP_INSTANCE_PGTEST
@ -71,6 +74,7 @@ if(PG_REGRESS)
TEST_ROLE_SUPERUSER=${TEST_ROLE_SUPERUSER}
TEST_ROLE_DEFAULT_PERM_USER=${TEST_ROLE_DEFAULT_PERM_USER}
TEST_ROLE_DEFAULT_PERM_USER_2=${TEST_ROLE_DEFAULT_PERM_USER_2}
TEST_ROLE_DEFAULT_CLUSTER_USER=${TEST_ROLE_DEFAULT_CLUSTER_USER}
TEST_DBNAME=${TEST_DBNAME}
TEST_INPUT_DIR=${TEST_INPUT_DIR}
TEST_OUTPUT_DIR=${TEST_OUTPUT_DIR}
@ -85,6 +89,7 @@ if(PG_ISOLATION_REGRESS)
TEST_ROLE_SUPERUSER=${TEST_ROLE_SUPERUSER}
TEST_ROLE_DEFAULT_PERM_USER=${TEST_ROLE_DEFAULT_PERM_USER}
TEST_ROLE_DEFAULT_PERM_USER_2=${TEST_ROLE_DEFAULT_PERM_USER_2}
TEST_ROLE_DEFAULT_CLUSTER_USER=${TEST_ROLE_DEFAULT_CLUSTER_USER}
TEST_DBNAME=${TEST_DBNAME}
TEST_INPUT_DIR=${TEST_INPUT_DIR}
TEST_OUTPUT_DIR=${TEST_OUTPUT_DIR}

View File

@ -30,6 +30,11 @@ set_target_properties(${TSL_LIBRARY_NAME} PROPERTIES
OUTPUT_NAME ${TSL_LIBRARY_NAME}-${PROJECT_VERSION_MOD}
PREFIX "")
target_include_directories(${TSL_LIBRARY_NAME} PRIVATE ${PG_INCLUDEDIR})
if (USE_OPENSSL)
target_include_directories(${TSL_LIBRARY_NAME} PRIVATE ${OPENSSL_INCLUDE_DIR})
endif (USE_OPENSSL)
target_compile_definitions(${TSL_LIBRARY_NAME} PUBLIC TS_TSL)
target_compile_definitions(${TSL_LIBRARY_NAME} PUBLIC TS_SUBMODULE)

View File

@ -7,16 +7,22 @@
#include <utils/jsonb.h>
#include <utils/lsyscache.h>
#include <utils/builtins.h>
#include <catalog/pg_type.h>
#include <fmgr.h>
#include <funcapi.h>
#include <miscadmin.h>
#include <catalog.h>
#include <compat.h>
#include <chunk.h>
#include <chunk_server.h>
#include <errors.h>
#include <hypercube.h>
#include <hypertable.h>
#include <hypertable_cache.h>
#include "remote/async.h"
#include "remote/dist_txn.h"
#include "chunk_api.h"
/*
@ -267,8 +273,8 @@ chunk_create(PG_FUNCTION_ARGS)
{
Oid hypertable_relid = PG_ARGISNULL(0) ? InvalidOid : PG_GETARG_OID(0);
Jsonb *slices = PG_ARGISNULL(1) ? NULL : PG_GETARG_JSONB_P(1);
Name schema_name = PG_ARGISNULL(2) ? NULL : PG_GETARG_NAME(2);
Name table_prefix = PG_ARGISNULL(3) ? NULL : PG_GETARG_NAME(3);
const char *schema_name = PG_ARGISNULL(2) ? NULL : PG_GETARG_CSTRING(2);
const char *table_name = PG_ARGISNULL(3) ? NULL : PG_GETARG_CSTRING(3);
Cache *hcache = ts_hypertable_cache_pin();
Hypertable *ht = ts_hypertable_cache_get_entry(hcache, hypertable_relid, CACHE_FLAG_NONE);
Hypercube *hc;
@ -297,17 +303,7 @@ chunk_create(PG_FUNCTION_ARGS)
errmsg("invalid hypercube for hypertable \"%s\"", get_rel_name(hypertable_relid)),
errdetail("%s", parse_err)));
if (NULL == schema_name)
schema_name = &ht->fd.associated_schema_name;
if (NULL == table_prefix)
table_prefix = &ht->fd.associated_table_prefix;
chunk = ts_chunk_find_or_create_without_cuts(ht,
hc,
NameStr(*schema_name),
NameStr(*table_prefix),
&created);
chunk = ts_chunk_find_or_create_without_cuts(ht, hc, schema_name, table_name, &created);
Assert(NULL != chunk);
@ -321,3 +317,131 @@ chunk_create(PG_FUNCTION_ARGS)
PG_RETURN_DATUM(HeapTupleGetDatum(tuple));
}
#if !PG96 /* Remote chunk creation only supported on \
* PG10 and above */
#define CREATE_CHUNK_FUNCTION_NAME "create_chunk"
#define CHUNK_CREATE_STMT \
"SELECT * FROM " INTERNAL_SCHEMA_NAME "." CREATE_CHUNK_FUNCTION_NAME "($1, $2, $3, $4)"
#define ESTIMATE_JSON_STR_SIZE(num_dims) (60 * (num_dims))
static Oid create_chunk_argtypes[4] = { REGCLASSOID, JSONBOID, NAMEOID, NAMEOID };
/*
* Fill in / get the TupleDesc for the result type of the create_chunk()
* function.
*/
static void
get_create_chunk_result_type(TupleDesc *tupdesc)
{
Oid funcoid = ts_get_function_oid(CREATE_CHUNK_FUNCTION_NAME,
INTERNAL_SCHEMA_NAME,
4,
create_chunk_argtypes);
if (get_func_result_type(funcoid, NULL, tupdesc) != TYPEFUNC_COMPOSITE)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("function returning record called in context "
"that cannot accept type record")));
}
static void
get_result_datums(Datum *values, bool *nulls, unsigned int numvals, AttInMetadata *attinmeta,
PGresult *res)
{
unsigned int i;
memset(nulls, 0, sizeof(bool) * numvals);
for (i = 0; i < numvals; i++)
{
if (PQgetisnull(res, 0, i))
nulls[i] = true;
else
values[i] = InputFunctionCall(&attinmeta->attinfuncs[i],
PQgetvalue(res, 0, i),
attinmeta->attioparams[i],
attinmeta->atttypmods[i]);
}
}
/*
* Create a replica of a chunk on all its assigned servers.
*/
void
chunk_api_create_on_servers(Chunk *chunk, Hypertable *ht)
{
AsyncRequestSet *reqset = async_request_set_create();
JsonbParseState *ps = NULL;
JsonbValue *jv = hypercube_to_jsonb_value(chunk->cube, ht->space, &ps);
Jsonb *hcjson = JsonbValueToJsonb(jv);
const char *params[4] = {
quote_qualified_identifier(NameStr(ht->fd.schema_name), NameStr(ht->fd.table_name)),
JsonbToCString(NULL, &hcjson->root, ESTIMATE_JSON_STR_SIZE(ht->space->num_dimensions)),
NameStr(chunk->fd.schema_name),
NameStr(chunk->fd.table_name),
};
AsyncResponseResult *res;
ListCell *lc;
TupleDesc tupdesc;
AttInMetadata *attinmeta;
get_create_chunk_result_type(&tupdesc);
attinmeta = TupleDescGetAttInMetadata(tupdesc);
foreach (lc, chunk->servers)
{
ChunkServer *cs = lfirst(lc);
UserMapping *um = GetUserMapping(GetUserId(), cs->foreign_server_oid);
PGconn *conn = remote_dist_txn_get_connection(um, REMOTE_TXN_NO_PREP_STMT);
AsyncRequest *req;
req = async_request_send_with_params(conn, CHUNK_CREATE_STMT, 4, params);
async_request_attach_user_data(req, cs);
async_request_set_add(reqset, req);
}
while ((res = async_request_set_wait_ok_result(reqset)) != NULL)
{
PGresult *pgres = async_response_result_get_pg_result(res);
ChunkServer *cs = async_response_result_get_user_data(res);
Datum *values = palloc0(sizeof(Datum) * tupdesc->natts);
bool *nulls = palloc0(sizeof(bool) * tupdesc->natts);
const char *schema_name, *table_name;
bool created;
get_result_datums(values, nulls, tupdesc->natts, attinmeta, pgres);
created = DatumGetBool(values[AttrNumberGetAttrOffset(Anum_create_chunk_created)]);
/*
* Sanity check the result. Use error rather than an assert since this
* is the result of a remote call to a server that could potentially
* run a different version of the remote function than we'd expect.
*/
if (!created)
elog(ERROR, "chunk creation failed on server \"%s\"", NameStr(cs->fd.server_name));
if (nulls[AttrNumberGetAttrOffset(Anum_create_chunk_id)] ||
nulls[AttrNumberGetAttrOffset(Anum_create_chunk_schema_name)] ||
nulls[AttrNumberGetAttrOffset(Anum_create_chunk_table_name)])
elog(ERROR, "unexpected chunk creation result on remote server");
schema_name =
DatumGetCString(values[AttrNumberGetAttrOffset(Anum_create_chunk_schema_name)]);
table_name = DatumGetCString(values[AttrNumberGetAttrOffset(Anum_create_chunk_table_name)]);
if (namestrcmp(&chunk->fd.schema_name, schema_name) != 0 ||
namestrcmp(&chunk->fd.table_name, table_name) != 0)
elog(ERROR, "remote chunk has mismatching schema or table name");
cs->fd.server_chunk_id =
DatumGetInt32(values[AttrNumberGetAttrOffset(Anum_create_chunk_id)]);
}
}
#endif /* !PG96 */

View File

@ -10,5 +10,8 @@
extern Datum chunk_show(PG_FUNCTION_ARGS);
extern Datum chunk_create(PG_FUNCTION_ARGS);
#if !PG96
extern void chunk_api_create_on_servers(Chunk *chunk, Hypertable *ht);
#endif
#endif /* TIMESCALEDB_TSL_CHUNK_API_H */

View File

@ -143,6 +143,9 @@ CrossModuleFunctions tsl_cm_functions = {
.delete_server = server_delete,
.show_chunk = chunk_show,
.create_chunk = chunk_create,
#if !PG96
.create_chunk_on_servers = chunk_api_create_on_servers,
#endif
.get_servername_list = server_get_servername_list,
.hypertable_make_distributed = hypertable_make_distributed,
.timescaledb_fdw_handler = timescaledb_fdw_handler,

View File

@ -1,6 +1,9 @@
-- This file and its contents are licensed under the Timescale License.
-- Please see the included NOTICE for copyright information and
-- LICENSE-TIMESCALE for a copy of the license.
\c :TEST_DBNAME :ROLE_SUPERUSER
GRANT CREATE ON DATABASE :TEST_DBNAME TO :ROLE_DEFAULT_PERM_USER;
SET ROLE :ROLE_DEFAULT_PERM_USER;
CREATE TABLE chunkapi (time timestamptz, device int, temp float);
SELECT * FROM create_hypertable('chunkapi', 'time', 'device', 2);
NOTICE: adding not-null constraint to column "time"
@ -63,27 +66,34 @@ DETAIL: Token "device" is invalid.
CONTEXT: JSON data, line 1: {"time: [1515024000000000] "device...
\set ON_ERROR_STOP 1
\set VERBOSITY terse
-- Create a chunk that does not collide
SELECT * FROM _timescaledb_internal.create_chunk('chunkapi',' {"time": [1515024000000000, 1519024000000000], "device": [-9223372036854775808, 1073741823]}', 'public', 'my_chunk_prefix');
-- Create a chunk that does not collide and with custom schema and name
CREATE SCHEMA "ChunkSchema";
SELECT * FROM _timescaledb_internal.create_chunk('chunkapi',' {"time": [1515024000000000, 1519024000000000], "device": [-9223372036854775808, 1073741823]}', 'ChunkSchema', 'My_chunk_Table_name');
chunk_id | hypertable_id | schema_name | table_name | relkind | slices | created
----------+---------------+-------------+-------------------------+---------+----------------------------------------------------------------------------------------------+---------
2 | 1 | public | my_chunk_prefix_2_chunk | r | {"time": [1515024000000000, 1519024000000000], "device": [-9223372036854775808, 1073741823]} | t
----------+---------------+-------------+---------------------+---------+----------------------------------------------------------------------------------------------+---------
2 | 1 | ChunkSchema | My_chunk_Table_name | r | {"time": [1515024000000000, 1519024000000000], "device": [-9223372036854775808, 1073741823]} | t
(1 row)
SELECT (_timescaledb_internal.show_chunk(show_chunks)).*
FROM show_chunks('chunkapi');
chunk_id | hypertable_id | schema_name | table_name | relkind | slices
----------+---------------+-----------------------+-------------------------+---------+----------------------------------------------------------------------------------------------
----------+---------------+-----------------------+---------------------+---------+----------------------------------------------------------------------------------------------
1 | 1 | _timescaledb_internal | _hyper_1_1_chunk | r | {"time": [1514419200000000, 1515024000000000], "device": [-9223372036854775808, 1073741823]}
2 | 1 | public | my_chunk_prefix_2_chunk | r | {"time": [1515024000000000, 1519024000000000], "device": [-9223372036854775808, 1073741823]}
2 | 1 | ChunkSchema | My_chunk_Table_name | r | {"time": [1515024000000000, 1519024000000000], "device": [-9223372036854775808, 1073741823]}
(2 rows)
-- Show the new chunk in the public schema
\dt
-- Show the new chunks
\dt public.*
List of relations
Schema | Name | Type | Owner
--------+-------------------------+-------+-------------------
--------+----------+-------+-------------------
public | chunkapi | table | default_perm_user
public | my_chunk_prefix_2_chunk | table | default_perm_user
(2 rows)
(1 row)
\dt "ChunkSchema".*
List of relations
Schema | Name | Type | Owner
-------------+---------------------+-------+-------------------
ChunkSchema | My_chunk_Table_name | table | default_perm_user
(1 row)

View File

@ -4,29 +4,32 @@
-- Need to be super user to create extension and add servers
\c :TEST_DBNAME :ROLE_SUPERUSER;
-- Need explicit password for non-super users to connect
ALTER ROLE :ROLE_DEFAULT_PERM_USER PASSWORD 'perm_user_pass';
GRANT USAGE ON FOREIGN DATA WRAPPER timescaledb_fdw TO :ROLE_DEFAULT_PERM_USER;
SET ROLE :ROLE_DEFAULT_PERM_USER;
-- Add servers using TimescaleDB server management API
SELECT * FROM add_server('server_1', database => 'server_1', password => 'perm_user_pass');
ALTER ROLE :ROLE_DEFAULT_CLUSTER_USER CREATEDB PASSWORD 'pass';
GRANT USAGE ON FOREIGN DATA WRAPPER timescaledb_fdw TO :ROLE_DEFAULT_CLUSTER_USER;
SET ROLE :ROLE_DEFAULT_CLUSTER_USER;
CREATE DATABASE server_1;
CREATE DATABASE server_2;
CREATE DATABASE server_3;
-- Add servers using the TimescaleDB server management API
SELECT * FROM add_server('server_1', database => 'server_1', password => 'pass');
server_name | host | port | database | username | server_username | created
-------------+-----------+------+----------+-------------------+-------------------+---------
server_1 | localhost | 5432 | server_1 | default_perm_user | default_perm_user | t
-------------+-----------+-------+----------+----------------------+----------------------+---------
server_1 | localhost | 15432 | server_1 | default_cluster_user | default_cluster_user | t
(1 row)
SELECT * FROM add_server('server_2', database => 'server_2', password => 'perm_user_pass');
SELECT * FROM add_server('server_2', database => 'server_2', password => 'pass');
server_name | host | port | database | username | server_username | created
-------------+-----------+------+----------+-------------------+-------------------+---------
server_2 | localhost | 5432 | server_2 | default_perm_user | default_perm_user | t
-------------+-----------+-------+----------+----------------------+----------------------+---------
server_2 | localhost | 15432 | server_2 | default_cluster_user | default_cluster_user | t
(1 row)
SELECT * FROM add_server('server_3', database => 'server_3', password => 'perm_user_pass');
SELECT * FROM add_server('server_3', database => 'server_3', port => inet_server_port(), password => 'pass');
server_name | host | port | database | username | server_username | created
-------------+-----------+------+----------+-------------------+-------------------+---------
server_3 | localhost | 5432 | server_3 | default_perm_user | default_perm_user | t
-------------+-----------+-------+----------+----------------------+----------------------+---------
server_3 | localhost | 15432 | server_3 | default_cluster_user | default_cluster_user | t
(1 row)
-- Create a distributed hypertable. Add a trigger and primary key
-- Create distributed hypertables. Add a trigger and primary key
-- constraint to test how those work
CREATE TABLE disttable(time timestamptz PRIMARY KEY, device int CHECK (device > 0), color int, temp float);
SELECT * FROM create_hypertable('disttable', 'time', replication_factor => 1);
@ -35,6 +38,69 @@ SELECT * FROM create_hypertable('disttable', 'time', replication_factor => 1);
1 | public | disttable | t
(1 row)
-- An underreplicated table that will has a replication_factor > num_servers
CREATE TABLE underreplicated(time timestamptz, device int, temp float);
SELECT * FROM create_hypertable('underreplicated', 'time', replication_factor => 4);
NOTICE: adding not-null constraint to column "time"
hypertable_id | schema_name | table_name | created
---------------+-------------+-----------------+---------
2 | public | underreplicated | t
(1 row)
-- Create tables on remote servers
\c server_1
SET client_min_messages TO ERROR;
CREATE EXTENSION timescaledb;
CREATE TABLE disttable(time timestamptz PRIMARY KEY, device int CHECK (device > 0), color int, temp float);
SELECT * FROM create_hypertable('disttable', 'time');
hypertable_id | schema_name | table_name | created
---------------+-------------+------------+---------
1 | public | disttable | t
(1 row)
CREATE TABLE underreplicated(time timestamptz, device int, temp float);
SELECT * FROM create_hypertable('underreplicated', 'time');
hypertable_id | schema_name | table_name | created
---------------+-------------+-----------------+---------
2 | public | underreplicated | t
(1 row)
\c server_2
SET client_min_messages TO ERROR;
CREATE EXTENSION timescaledb;
CREATE TABLE disttable(time timestamptz PRIMARY KEY, device int CHECK (device > 0), color int, temp float);
SELECT * FROM create_hypertable('disttable', 'time');
hypertable_id | schema_name | table_name | created
---------------+-------------+------------+---------
1 | public | disttable | t
(1 row)
CREATE TABLE underreplicated(time timestamptz, device int, temp float);
SELECT * FROM create_hypertable('underreplicated', 'time');
hypertable_id | schema_name | table_name | created
---------------+-------------+-----------------+---------
2 | public | underreplicated | t
(1 row)
\c server_3
SET client_min_messages TO ERROR;
CREATE EXTENSION timescaledb;
CREATE TABLE disttable(time timestamptz PRIMARY KEY, device int CHECK (device > 0), color int, temp float);
SELECT * FROM create_hypertable('disttable', 'time');
hypertable_id | schema_name | table_name | created
---------------+-------------+------------+---------
1 | public | disttable | t
(1 row)
CREATE TABLE underreplicated(time timestamptz, device int, temp float);
SELECT * FROM create_hypertable('underreplicated', 'time');
hypertable_id | schema_name | table_name | created
---------------+-------------+-----------------+---------
2 | public | underreplicated | t
(1 row)
\c :TEST_DBNAME :ROLE_SUPERUSER
SET ROLE :ROLE_DEFAULT_CLUSTER_USER;
CREATE OR REPLACE FUNCTION test_trigger()
RETURNS TRIGGER LANGUAGE PLPGSQL AS
$BODY$
@ -60,7 +126,10 @@ SELECT * FROM _timescaledb_catalog.hypertable_server;
1 | | server_1
1 | | server_2
1 | | server_3
(3 rows)
2 | | server_1
2 | | server_2
2 | | server_3
(6 rows)
SELECT * FROM _timescaledb_catalog.chunk_server;
chunk_id | server_chunk_id | server_name
@ -110,6 +179,42 @@ FROM show_chunks('disttable');
3 | 1 | _timescaledb_internal | _hyper_1_3_dist_chunk | f | {"time": [1545868800000000, 1546473600000000]}
(3 rows)
-- Show that there are assigned server_chunk_id:s in chunk server mappings
SELECT * FROM _timescaledb_catalog.chunk_server;
chunk_id | server_chunk_id | server_name
----------+-----------------+-------------
1 | 1 | server_1
2 | 1 | server_2
3 | 1 | server_3
(3 rows)
-- Show that chunks are created on remote servers
\c server_1
SELECT (_timescaledb_internal.show_chunk(show_chunks)).*
FROM show_chunks('disttable');
chunk_id | hypertable_id | schema_name | table_name | relkind | slices
----------+---------------+-----------------------+-----------------------+---------+------------------------------------------------
1 | 1 | _timescaledb_internal | _hyper_1_1_dist_chunk | r | {"time": [1482969600000000, 1483574400000000]}
(1 row)
\c server_2
SELECT (_timescaledb_internal.show_chunk(show_chunks)).*
FROM show_chunks('disttable');
chunk_id | hypertable_id | schema_name | table_name | relkind | slices
----------+---------------+-----------------------+-----------------------+---------+------------------------------------------------
1 | 1 | _timescaledb_internal | _hyper_1_2_dist_chunk | r | {"time": [1514419200000000, 1515024000000000]}
(1 row)
\c server_3
SELECT (_timescaledb_internal.show_chunk(show_chunks)).*
FROM show_chunks('disttable');
chunk_id | hypertable_id | schema_name | table_name | relkind | slices
----------+---------------+-----------------------+-----------------------+---------+------------------------------------------------
1 | 1 | _timescaledb_internal | _hyper_1_3_dist_chunk | r | {"time": [1545868800000000, 1546473600000000]}
(1 row)
\c :TEST_DBNAME :ROLE_SUPERUSER
SET ROLE :ROLE_DEFAULT_CLUSTER_USER;
-- The constraints, indexes, and triggers on foreign chunks. Only
-- check constraints should recurse to foreign chunks (although they
-- aren't enforced on a foreign table)
@ -142,8 +247,8 @@ SELECT * FROM _timescaledb_catalog.chunk_server;
chunk_id | server_chunk_id | server_name
----------+-----------------+-------------
1 | 1 | server_1
2 | 2 | server_2
3 | 3 | server_3
2 | 1 | server_2
3 | 1 | server_3
(3 rows)
-- Adding a new trigger should not recurse to foreign chunks
@ -291,14 +396,6 @@ NOTICE: BEGIN FOREIGN MODIFY on chunk "_hyper_1_3_dist_chunk" [server_3]: DELET
NOTICE: BEGIN FOREIGN MODIFY on chunk "_hyper_1_4_dist_chunk" [server_2]: DELETE FROM _timescaledb_internal._hyper_1_4_dist_chunk WHERE ctid = $1
NOTICE: BEGIN FOREIGN MODIFY on chunk "_hyper_1_5_dist_chunk" [server_3]: DELETE FROM _timescaledb_internal._hyper_1_5_dist_chunk WHERE ctid = $1
-- Test underreplicated chunk warning
CREATE TABLE underreplicated(time timestamptz, device int, temp float);
SELECT * FROM create_hypertable('underreplicated', 'time', replication_factor => 4);
NOTICE: adding not-null constraint to column "time"
hypertable_id | schema_name | table_name | created
---------------+-------------+-----------------+---------
2 | public | underreplicated | t
(1 row)
INSERT INTO underreplicated VALUES ('2017-01-01 06:01', 1, 1.1);
WARNING: under-replicated chunk 6, lacks 1 server(s)
NOTICE: BEGIN FOREIGN MODIFY on chunk "_hyper_2_6_dist_chunk" [server_1,server_2,server_3]: INSERT INTO public.underreplicated("time", device, temp) VALUES ($1, $2, $3)
@ -306,13 +403,13 @@ SELECT * FROM _timescaledb_catalog.chunk_server;
chunk_id | server_chunk_id | server_name
----------+-----------------+-------------
1 | 1 | server_1
2 | 2 | server_2
3 | 3 | server_3
4 | 4 | server_2
5 | 5 | server_3
6 | 6 | server_1
6 | 6 | server_2
6 | 6 | server_3
2 | 1 | server_2
3 | 1 | server_3
4 | 2 | server_2
5 | 2 | server_3
6 | 2 | server_1
6 | 3 | server_2
6 | 3 | server_3
(8 rows)
SELECT (_timescaledb_internal.show_chunk(show_chunks)).*
@ -322,3 +419,17 @@ FROM show_chunks('underreplicated');
6 | 2 | _timescaledb_internal | _hyper_2_6_dist_chunk | f | {"time": [1482969600000000, 1483574400000000]}
(1 row)
-- Show chunk server mappings
SELECT * FROM _timescaledb_catalog.chunk_server;
chunk_id | server_chunk_id | server_name
----------+-----------------+-------------
1 | 1 | server_1
2 | 1 | server_2
3 | 1 | server_3
4 | 2 | server_2
5 | 2 | server_3
6 | 2 | server_1
6 | 3 | server_2
6 | 3 | server_3
(8 rows)

View File

@ -19,8 +19,8 @@ CREATE USER MAPPING FOR :ROLE_SUPERUSER SERVER server_1 OPTIONS (user 'cluster_u
-- Add servers using TimescaleDB server management API
SELECT * FROM add_server('server_2', password => 'perm_user_pass');
server_name | host | port | database | username | server_username | created
-------------+-----------+------+-----------+-------------------+-------------------+---------
server_2 | localhost | 5432 | db_server | default_perm_user | default_perm_user | t
-------------+-----------+-------+-----------+-------------------+-------------------+---------
server_2 | localhost | 15432 | db_server | default_perm_user | default_perm_user | t
(1 row)
\set ON_ERROR_STOP 0
@ -37,16 +37,16 @@ ERROR: invalid server name
-- Should not generate error with if_not_exists option
SELECT * FROM add_server('server_2', password => 'perm_user_pass', if_not_exists => true);
server_name | host | port | database | username | server_username | created
-------------+-----------+------+-----------+-------------------+-------------------+---------
server_2 | localhost | 5432 | db_server | default_perm_user | default_perm_user | f
-------------+-----------+-------+-----------+-------------------+-------------------+---------
server_2 | localhost | 15432 | db_server | default_perm_user | default_perm_user | f
(1 row)
RESET ROLE;
-- Superuser requires no password
SELECT * FROM add_server('server_3', host => '192.168.3.4', database => 'server_2', remote_user => 'cluster_user_2');
server_name | host | port | database | username | server_username | created
-------------+-------------+------+----------+------------+-----------------+---------
server_3 | 192.168.3.4 | 5432 | server_2 | super_user | cluster_user_2 | t
-------------+-------------+-------+----------+------------+-----------------+---------
server_3 | 192.168.3.4 | 15432 | server_2 | super_user | cluster_user_2 | t
(1 row)
SET ROLE :ROLE_DEFAULT_PERM_USER;
@ -57,16 +57,16 @@ OPTIONS (host 'localhost', port '5432', dbname 'server_4');
SELECT * FROM add_server('server_4', password => 'perm_user_pass', if_not_exists => true);
NOTICE: adding user mapping for "default_perm_user" to server "server_4"
server_name | host | port | database | username | server_username | created
-------------+-----------+------+-----------+-------------------+-------------------+---------
server_4 | localhost | 5432 | db_server | default_perm_user | default_perm_user | f
-------------+-----------+-------+-----------+-------------------+-------------------+---------
server_4 | localhost | 15432 | db_server | default_perm_user | default_perm_user | f
(1 row)
SELECT * FROM show_servers();
server_name | host | port | dbname
-------------+-------------+------+-----------
-------------+-------------+-------+-----------
server_1 | localhost | 5432 | server_1
server_2 | localhost | 5432 | db_server
server_3 | 192.168.3.4 | 5432 | server_2
server_2 | localhost | 15432 | db_server
server_3 | 192.168.3.4 | 15432 | server_2
server_4 | localhost | 5432 | server_4
(4 rows)
@ -74,10 +74,10 @@ SELECT * FROM show_servers();
SELECT server_name, options FROM timescaledb_information.server
ORDER BY server_name;
server_name | options
-------------+----------------------------------------------
-------------+-----------------------------------------------
server_1 | {host=localhost,port=5432,dbname=server_1}
server_2 | {host=localhost,port=5432,dbname=db_server}
server_3 | {host=192.168.3.4,port=5432,dbname=server_2}
server_2 | {host=localhost,port=15432,dbname=db_server}
server_3 | {host=192.168.3.4,port=15432,dbname=server_2}
server_4 | {host=localhost,port=5432,dbname=server_4}
(4 rows)
@ -86,10 +86,10 @@ SELECT srvname, srvoptions
FROM pg_foreign_server
ORDER BY srvname;
srvname | srvoptions
----------+----------------------------------------------
----------+-----------------------------------------------
server_1 | {host=localhost,port=5432,dbname=server_1}
server_2 | {host=localhost,port=5432,dbname=db_server}
server_3 | {host=192.168.3.4,port=5432,dbname=server_2}
server_2 | {host=localhost,port=15432,dbname=db_server}
server_3 | {host=192.168.3.4,port=15432,dbname=server_2}
server_4 | {host=localhost,port=5432,dbname=server_4}
(4 rows)
@ -130,9 +130,9 @@ SET ROLE :ROLE_DEFAULT_PERM_USER;
SELECT srvname, srvoptions
FROM pg_foreign_server;
srvname | srvoptions
----------+---------------------------------------------
----------+----------------------------------------------
server_1 | {host=localhost,port=5432,dbname=server_1}
server_2 | {host=localhost,port=5432,dbname=db_server}
server_2 | {host=localhost,port=15432,dbname=db_server}
server_4 | {host=localhost,port=5432,dbname=server_4}
(3 rows)
@ -162,9 +162,9 @@ SELECT * FROM delete_server('server_3', if_exists => true);
SELECT * FROM show_servers();
server_name | host | port | dbname
-------------+-----------+------+-----------
-------------+-----------+-------+-----------
server_1 | localhost | 5432 | server_1
server_2 | localhost | 5432 | db_server
server_2 | localhost | 15432 | db_server
server_4 | localhost | 5432 | server_4
(3 rows)

View File

@ -17,3 +17,4 @@ log_line_prefix='%u [%p] %d '
# numbers. Setting extra_float_digits=0 retains the old behavior which
# is needed to make our tests work for multiple PostgreSQL versions.
extra_float_digits=0
hba_file='@TEST_OUTPUT_DIR@/pg_hba.conf'

View File

@ -95,8 +95,8 @@ endforeach(TEMPLATE_FILE)
if (NOT ${PG_VERSION} VERSION_LESS "10")
list(APPEND TEST_FILES
hypertable_distributed.sql
chunk_api.sql
hypertable_distributed.sql
timescaledb_fdw.sql
)
list(APPEND TEST_FILES_DEBUG

View File

@ -2,6 +2,10 @@
-- Please see the included NOTICE for copyright information and
-- LICENSE-TIMESCALE for a copy of the license.
\c :TEST_DBNAME :ROLE_SUPERUSER
GRANT CREATE ON DATABASE :TEST_DBNAME TO :ROLE_DEFAULT_PERM_USER;
SET ROLE :ROLE_DEFAULT_PERM_USER;
CREATE TABLE chunkapi (time timestamptz, device int, temp float);
SELECT * FROM create_hypertable('chunkapi', 'time', 'device', 2);
@ -36,11 +40,14 @@ SELECT * FROM _timescaledb_internal.create_chunk('chunkapi',' {"time: [151502400
\set ON_ERROR_STOP 1
\set VERBOSITY terse
-- Create a chunk that does not collide
SELECT * FROM _timescaledb_internal.create_chunk('chunkapi',' {"time": [1515024000000000, 1519024000000000], "device": [-9223372036854775808, 1073741823]}', 'public', 'my_chunk_prefix');
-- Create a chunk that does not collide and with custom schema and name
CREATE SCHEMA "ChunkSchema";
SELECT * FROM _timescaledb_internal.create_chunk('chunkapi',' {"time": [1515024000000000, 1519024000000000], "device": [-9223372036854775808, 1073741823]}', 'ChunkSchema', 'My_chunk_Table_name');
SELECT (_timescaledb_internal.show_chunk(show_chunks)).*
FROM show_chunks('chunkapi');
-- Show the new chunk in the public schema
\dt
-- Show the new chunks
\dt public.*
\dt "ChunkSchema".*

View File

@ -5,21 +5,53 @@
-- Need to be super user to create extension and add servers
\c :TEST_DBNAME :ROLE_SUPERUSER;
-- Need explicit password for non-super users to connect
ALTER ROLE :ROLE_DEFAULT_PERM_USER PASSWORD 'perm_user_pass';
GRANT USAGE ON FOREIGN DATA WRAPPER timescaledb_fdw TO :ROLE_DEFAULT_PERM_USER;
SET ROLE :ROLE_DEFAULT_PERM_USER;
ALTER ROLE :ROLE_DEFAULT_CLUSTER_USER CREATEDB PASSWORD 'pass';
GRANT USAGE ON FOREIGN DATA WRAPPER timescaledb_fdw TO :ROLE_DEFAULT_CLUSTER_USER;
SET ROLE :ROLE_DEFAULT_CLUSTER_USER;
-- Add servers using TimescaleDB server management API
SELECT * FROM add_server('server_1', database => 'server_1', password => 'perm_user_pass');
SELECT * FROM add_server('server_2', database => 'server_2', password => 'perm_user_pass');
SELECT * FROM add_server('server_3', database => 'server_3', password => 'perm_user_pass');
CREATE DATABASE server_1;
CREATE DATABASE server_2;
CREATE DATABASE server_3;
-- Create a distributed hypertable. Add a trigger and primary key
-- Add servers using the TimescaleDB server management API
SELECT * FROM add_server('server_1', database => 'server_1', password => 'pass');
SELECT * FROM add_server('server_2', database => 'server_2', password => 'pass');
SELECT * FROM add_server('server_3', database => 'server_3', port => inet_server_port(), password => 'pass');
-- Create distributed hypertables. Add a trigger and primary key
-- constraint to test how those work
CREATE TABLE disttable(time timestamptz PRIMARY KEY, device int CHECK (device > 0), color int, temp float);
SELECT * FROM create_hypertable('disttable', 'time', replication_factor => 1);
-- An underreplicated table that will has a replication_factor > num_servers
CREATE TABLE underreplicated(time timestamptz, device int, temp float);
SELECT * FROM create_hypertable('underreplicated', 'time', replication_factor => 4);
-- Create tables on remote servers
\c server_1
SET client_min_messages TO ERROR;
CREATE EXTENSION timescaledb;
CREATE TABLE disttable(time timestamptz PRIMARY KEY, device int CHECK (device > 0), color int, temp float);
SELECT * FROM create_hypertable('disttable', 'time');
CREATE TABLE underreplicated(time timestamptz, device int, temp float);
SELECT * FROM create_hypertable('underreplicated', 'time');
\c server_2
SET client_min_messages TO ERROR;
CREATE EXTENSION timescaledb;
CREATE TABLE disttable(time timestamptz PRIMARY KEY, device int CHECK (device > 0), color int, temp float);
SELECT * FROM create_hypertable('disttable', 'time');
CREATE TABLE underreplicated(time timestamptz, device int, temp float);
SELECT * FROM create_hypertable('underreplicated', 'time');
\c server_3
SET client_min_messages TO ERROR;
CREATE EXTENSION timescaledb;
CREATE TABLE disttable(time timestamptz PRIMARY KEY, device int CHECK (device > 0), color int, temp float);
SELECT * FROM create_hypertable('disttable', 'time');
CREATE TABLE underreplicated(time timestamptz, device int, temp float);
SELECT * FROM create_hypertable('underreplicated', 'time');
\c :TEST_DBNAME :ROLE_SUPERUSER
SET ROLE :ROLE_DEFAULT_CLUSTER_USER;
CREATE OR REPLACE FUNCTION test_trigger()
RETURNS TRIGGER LANGUAGE PLPGSQL AS
$BODY$
@ -64,6 +96,22 @@ INSERT INTO disttable VALUES
SELECT (_timescaledb_internal.show_chunk(show_chunks)).*
FROM show_chunks('disttable');
-- Show that there are assigned server_chunk_id:s in chunk server mappings
SELECT * FROM _timescaledb_catalog.chunk_server;
-- Show that chunks are created on remote servers
\c server_1
SELECT (_timescaledb_internal.show_chunk(show_chunks)).*
FROM show_chunks('disttable');
\c server_2
SELECT (_timescaledb_internal.show_chunk(show_chunks)).*
FROM show_chunks('disttable');
\c server_3
SELECT (_timescaledb_internal.show_chunk(show_chunks)).*
FROM show_chunks('disttable');
\c :TEST_DBNAME :ROLE_SUPERUSER
SET ROLE :ROLE_DEFAULT_CLUSTER_USER;
-- The constraints, indexes, and triggers on foreign chunks. Only
-- check constraints should recurse to foreign chunks (although they
-- aren't enforced on a foreign table)
@ -152,11 +200,11 @@ UPDATE disttable SET tableoid = 4 WHERE device = 2;
DELETE FROM disttable WHERE device = 3;
-- Test underreplicated chunk warning
CREATE TABLE underreplicated(time timestamptz, device int, temp float);
SELECT * FROM create_hypertable('underreplicated', 'time', replication_factor => 4);
INSERT INTO underreplicated VALUES ('2017-01-01 06:01', 1, 1.1);
SELECT * FROM _timescaledb_catalog.chunk_server;
SELECT (_timescaledb_internal.show_chunk(show_chunks)).*
FROM show_chunks('underreplicated');
-- Show chunk server mappings
SELECT * FROM _timescaledb_catalog.chunk_server;