Create chunk table from access node

Creates a table for chunk replica on the given data node. The table
gets the same schema and name as the chunk. The created chunk replica
table is not added into metadata on the access node or data node.

The primary goal is to use it during copy/move chunk.
This commit is contained in:
Ruslan Fomkin 2021-04-26 14:15:49 +02:00 committed by Dmitry Simonenko
parent fac03f8882
commit 404f1cdbad
17 changed files with 364 additions and 31 deletions

View File

@ -8,3 +8,8 @@ AS '@MODULE_PATHNAME@', 'ts_chunk_index_clone' LANGUAGE C VOLATILE STRICT;
CREATE OR REPLACE FUNCTION _timescaledb_internal.chunk_index_replace(chunk_index_oid_old OID, chunk_index_oid_new OID) RETURNS VOID
AS '@MODULE_PATHNAME@', 'ts_chunk_index_replace' LANGUAGE C VOLATILE STRICT;
CREATE OR REPLACE FUNCTION _timescaledb_internal.create_chunk_replica_table(
chunk REGCLASS,
data_node_name NAME
) RETURNS VOID AS '@MODULE_PATHNAME@', 'ts_chunk_create_replica_table' LANGUAGE C VOLATILE;

View File

@ -3,6 +3,7 @@ DROP FUNCTION IF EXISTS _timescaledb_internal.block_new_chunks;
DROP FUNCTION IF EXISTS _timescaledb_internal.allow_new_chunks;
DROP FUNCTION IF EXISTS _timescaledb_internal.refresh_continuous_aggregate;
DROP FUNCTION IF EXISTS _timescaledb_internal.create_chunk_table;
DROP FUNCTION IF EXISTS _timescaledb_internal.create_chunk_replica_table;
-- We need to rewrite all continuous aggregates to make sure that the
-- queries do not contain qualification. They will be re-written in

View File

@ -84,6 +84,7 @@ CROSSMODULE_WRAPPER(chunk_set_default_data_node);
CROSSMODULE_WRAPPER(chunk_get_relstats);
CROSSMODULE_WRAPPER(chunk_get_colstats);
CROSSMODULE_WRAPPER(chunk_create_empty_table);
CROSSMODULE_WRAPPER(chunk_create_replica_table);
CROSSMODULE_WRAPPER(timescaledb_fdw_handler);
CROSSMODULE_WRAPPER(timescaledb_fdw_validator);
@ -401,6 +402,7 @@ TSDLLEXPORT CrossModuleFunctions ts_cm_functions_default = {
.chunk_get_relstats = error_no_default_fn_pg_community,
.chunk_get_colstats = error_no_default_fn_pg_community,
.chunk_create_empty_table = error_no_default_fn_pg_community,
.chunk_create_replica_table = error_no_default_fn_pg_community,
.hypertable_distributed_set_replication_factor = error_no_default_fn_pg_community,
.update_compressed_chunk_relstats = update_compressed_chunk_relstats_default,
};

View File

@ -161,6 +161,7 @@ typedef struct CrossModuleFunctions
PGFunction chunk_get_colstats;
PGFunction hypertable_distributed_set_replication_factor;
PGFunction chunk_create_empty_table;
PGFunction chunk_create_replica_table;
void (*update_compressed_chunk_relstats)(Oid uncompressed_relid, Oid compressed_relid);
CompressSingleRowState *(*compress_row_init)(int srcht_id, Relation in_rel, Relation out_rel);
TupleTableSlot *(*compress_row_exec)(CompressSingleRowState *cr, TupleTableSlot *slot);

View File

@ -32,12 +32,35 @@
#include <chunk_data_node.h>
#include <extension.h>
#include <errors.h>
#include <error_utils.h>
#include <hypertable_cache.h>
#include "chunk.h"
#include "chunk_api.h"
#include "data_node.h"
#include "deparse.h"
#include "remote/dist_commands.h"
static bool
chunk_match_data_node_by_server(const Chunk *chunk, const ForeignServer *server)
{
bool server_found = false;
ListCell *lc;
foreach (lc, chunk->data_nodes)
{
ChunkDataNode *cdn = lfirst(lc);
if (cdn->foreign_server_oid == server->serverid)
{
server_found = true;
break;
}
}
return server_found;
}
static bool
chunk_set_foreign_server(Chunk *chunk, ForeignServer *new_server)
{
@ -49,21 +72,8 @@ chunk_set_foreign_server(Chunk *chunk, ForeignServer *new_server)
CatalogSecurityContext sec_ctx;
Oid old_server_id;
long updated;
ListCell *lc;
bool new_server_found = false;
foreach (lc, chunk->data_nodes)
{
ChunkDataNode *cdn = lfirst(lc);
if (cdn->foreign_server_oid == new_server->serverid)
{
new_server_found = true;
break;
}
}
if (!new_server_found)
if (!chunk_match_data_node_by_server(chunk, new_server))
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("chunk \"%s\" does not exist on data node \"%s\"",
@ -259,3 +269,65 @@ chunk_invoke_drop_chunks(Oid relid, Datum older_than, Datum older_than_type)
return num_results;
}
static bool
chunk_is_distributed(const Chunk *chunk)
{
return chunk->relkind == RELKIND_FOREIGN_TABLE;
}
Datum
chunk_create_replica_table(PG_FUNCTION_ARGS)
{
Oid chunk_relid;
const char *data_node_name;
const Chunk *chunk;
const Hypertable *ht;
const ForeignServer *server;
Cache *hcache = ts_hypertable_cache_pin();
TS_PREVENT_FUNC_IF_READ_ONLY();
GETARG_NOTNULL_OID(chunk_relid, 0, "chunk");
GETARG_NOTNULL_NULLABLE(data_node_name, 1, "data node name", CSTRING);
chunk = ts_chunk_get_by_relid(chunk_relid, false);
if (chunk == NULL)
{
const char *rel_name = get_rel_name(chunk_relid);
if (rel_name == NULL)
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("oid \"%u\" is not a chunk", chunk_relid)));
else
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("relation \"%s\" is not a chunk", rel_name)));
}
if (!chunk_is_distributed(chunk))
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("chunk \"%s\" doesn't belong to a distributed hypertable",
get_rel_name(chunk_relid))));
ht = ts_hypertable_cache_get_entry(hcache, chunk->hypertable_relid, CACHE_FLAG_NONE);
ts_hypertable_permissions_check(ht->main_table_relid, GetUserId());
/* Check the given data node exists */
server = data_node_get_foreign_server(data_node_name, ACL_USAGE, true, false);
/* Find if hypertable is attached to the data node and return an error otherwise */
data_node_hypertable_get_by_node_name(ht, data_node_name, true);
if (chunk_match_data_node_by_server(chunk, server))
ereport(ERROR,
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
errmsg("chunk \"%s\" already exists on data node \"%s\"",
get_rel_name(chunk_relid),
data_node_name)));
chunk_api_call_create_empty_chunk_table(ht, chunk, data_node_name);
ts_cache_release(hcache);
PG_RETURN_VOID();
}

View File

@ -14,5 +14,6 @@
extern void chunk_update_foreign_server_if_needed(int32 chunk_id, Oid existing_server_id);
extern Datum chunk_set_default_data_node(PG_FUNCTION_ARGS);
extern int chunk_invoke_drop_chunks(Oid relid, Datum older_than, Datum older_than_type);
extern Datum chunk_create_replica_table(PG_FUNCTION_ARGS);
#endif /* TIMESCALEDB_TSL_CHUNK_H */

View File

@ -27,12 +27,10 @@
#include <catalog.h>
#include <compat.h>
#include <chunk.h>
#include <chunk_data_node.h>
#include <errors.h>
#include <error_utils.h>
#include <hypercube.h>
#include <hypertable.h>
#include <hypertable_cache.h>
#include "remote/async.h"
@ -1664,3 +1662,24 @@ chunk_create_empty_table(PG_FUNCTION_ARGS)
PG_RETURN_BOOL(true);
}
#define CREATE_CHUNK_TABLE_NAME "create_chunk_table"
void
chunk_api_call_create_empty_chunk_table(const Hypertable *ht, const Chunk *chunk,
const char *node_name)
{
const char *create_cmd =
psprintf("SELECT %s.%s($1, $2, $3, $4)", INTERNAL_SCHEMA_NAME, CREATE_CHUNK_TABLE_NAME);
const char *params[4] = { quote_qualified_identifier(NameStr(ht->fd.schema_name),
NameStr(ht->fd.table_name)),
chunk_api_dimension_slices_json(chunk, ht),
NameStr(chunk->fd.schema_name),
NameStr(chunk->fd.table_name) };
ts_dist_cmd_close_response(
ts_dist_cmd_params_invoke_on_data_nodes(create_cmd,
stmt_params_create_from_values(params, 4),
list_make1((void *) node_name),
true));
}

View File

@ -10,6 +10,8 @@
#include <chunk.h>
#include "hypertable_data_node.h"
extern Datum chunk_show(PG_FUNCTION_ARGS);
extern Datum chunk_create(PG_FUNCTION_ARGS);
extern void chunk_api_create_on_data_nodes(const Chunk *chunk, const Hypertable *ht);
@ -17,5 +19,7 @@ extern Datum chunk_api_get_chunk_relstats(PG_FUNCTION_ARGS);
extern Datum chunk_api_get_chunk_colstats(PG_FUNCTION_ARGS);
extern void chunk_api_update_distributed_hypertable_stats(Oid relid);
extern Datum chunk_create_empty_table(PG_FUNCTION_ARGS);
extern void chunk_api_call_create_empty_chunk_table(const Hypertable *ht, const Chunk *chunk,
const char *node_name);
#endif /* TIMESCALEDB_TSL_CHUNK_API_H */

View File

@ -40,7 +40,6 @@
#include "remote/connection_cache.h"
#include "data_node.h"
#include "remote/utils.h"
#include "hypertable.h"
#include "hypertable_cache.h"
#include "errors.h"
#include "dist_util.h"
@ -1158,16 +1157,18 @@ data_node_detach_hypertable_data_nodes(const char *node_name, List *hypertable_d
repartition);
}
static HypertableDataNode *
get_hypertable_data_node(Oid table_id, const char *node_name, bool owner_check, bool attach_check)
HypertableDataNode *
data_node_hypertable_get_by_node_name(const Hypertable *ht, const char *node_name,
bool attach_check)
{
HypertableDataNode *hdn = NULL;
Cache *hcache = ts_hypertable_cache_pin();
Hypertable *ht = ts_hypertable_cache_get_entry(hcache, table_id, CACHE_FLAG_NONE);
ListCell *lc;
if (owner_check)
ts_hypertable_permissions_check(table_id, GetUserId());
if (!hypertable_is_distributed(ht))
ereport(ERROR,
(errcode(ERRCODE_TS_HYPERTABLE_NOT_DISTRIBUTED),
errmsg("hypertable \"%s\" is not distributed",
get_rel_name(ht->main_table_relid))));
foreach (lc, ht->data_nodes)
{
@ -1185,16 +1186,31 @@ get_hypertable_data_node(Oid table_id, const char *node_name, bool owner_check,
(errcode(ERRCODE_TS_DATA_NODE_NOT_ATTACHED),
errmsg("data node \"%s\" is not attached to hypertable \"%s\"",
node_name,
get_rel_name(table_id))));
get_rel_name(ht->main_table_relid))));
else
ereport(NOTICE,
(errcode(ERRCODE_TS_DATA_NODE_NOT_ATTACHED),
errmsg("data node \"%s\" is not attached to hypertable \"%s\", "
"skipping",
node_name,
get_rel_name(table_id))));
get_rel_name(ht->main_table_relid))));
}
return hdn;
}
static HypertableDataNode *
get_hypertable_data_node(Oid table_id, const char *node_name, bool owner_check, bool attach_check)
{
HypertableDataNode *hdn = NULL;
Cache *hcache = ts_hypertable_cache_pin();
const Hypertable *ht = ts_hypertable_cache_get_entry(hcache, table_id, CACHE_FLAG_NONE);
if (owner_check)
ts_hypertable_permissions_check(table_id, GetUserId());
hdn = data_node_hypertable_get_by_node_name(ht, node_name, attach_check);
ts_cache_release(hcache);
return hdn;

View File

@ -8,7 +8,10 @@
#include <foreign/foreign.h>
#include <hypertable_data_node.h>
#include "catalog.h"
#include "hypertable.h"
#include "remote/dist_txn.h"
/* Used to skip ACL checks */
@ -39,6 +42,10 @@ extern List *data_node_oids_to_node_name_list(List *data_node_oids, AclMode mode
extern void data_node_name_list_check_acl(List *data_node_names, AclMode mode);
extern Datum data_node_ping(PG_FUNCTION_ARGS);
extern HypertableDataNode *data_node_hypertable_get_by_node_name(const Hypertable *ht,
const char *node_name,
bool attach_check);
/* This should only be used for testing */
extern Datum data_node_add_without_dist_id(PG_FUNCTION_ARGS);

View File

@ -192,6 +192,7 @@ CrossModuleFunctions tsl_cm_functions = {
.chunk_get_relstats = chunk_api_get_chunk_relstats,
.chunk_get_colstats = chunk_api_get_chunk_colstats,
.chunk_create_empty_table = chunk_create_empty_table,
.chunk_create_replica_table = chunk_create_replica_table,
.hypertable_distributed_set_replication_factor = hypertable_set_replication_factor,
.cache_syscache_invalidate = cache_syscache_invalidate,
.update_compressed_chunk_relstats = update_compressed_chunk_relstats,

View File

@ -15,7 +15,6 @@
#include "dist_commands.h"
#include "dist_txn.h"
#include "connection_cache.h"
#include "async.h"
#include "data_node.h"
#include "dist_util.h"
#include "miscadmin.h"
@ -81,7 +80,8 @@ ts_dist_cmd_collect_responses(List *requests)
* server OIDs.
*/
DistCmdResult *
ts_dist_cmd_invoke_on_data_nodes(const char *sql, List *data_nodes, bool transactional)
ts_dist_cmd_params_invoke_on_data_nodes(const char *sql, StmtParams *params, List *data_nodes,
bool transactional)
{
ListCell *lc;
List *requests = NIL;
@ -113,7 +113,11 @@ ts_dist_cmd_invoke_on_data_nodes(const char *sql, List *data_nodes, bool transac
ereport(DEBUG2, (errmsg_internal("sending \"%s\" to data node \"%s\"", sql, node_name)));
req = async_request_send(connection, sql);
if (params == NULL)
req = async_request_send(connection, sql);
else
req = async_request_send_with_params(connection, sql, params, FORMAT_TEXT);
async_request_attach_user_data(req, (char *) node_name);
requests = lappend(requests, req);
}
@ -125,6 +129,12 @@ ts_dist_cmd_invoke_on_data_nodes(const char *sql, List *data_nodes, bool transac
return results;
}
DistCmdResult *
ts_dist_cmd_invoke_on_data_nodes(const char *sql, List *data_nodes, bool transactional)
{
return ts_dist_cmd_params_invoke_on_data_nodes(sql, NULL, data_nodes, transactional);
}
DistCmdResult *
ts_dist_cmd_invoke_on_data_nodes_using_search_path(const char *sql, const char *search_path,
List *node_names, bool transactional)

View File

@ -7,13 +7,16 @@
#define TIMESCALEDB_TSL_REMOTE_DIST_COMMANDS_H
#include <catalog.h>
#include <libpq-fe.h>
#include "async.h"
typedef struct DistCmdResult DistCmdResult;
typedef struct List PreparedDistCmd;
extern DistCmdResult *ts_dist_cmd_invoke_on_data_nodes(const char *sql, List *node_names,
bool transactional);
extern DistCmdResult *ts_dist_cmd_params_invoke_on_data_nodes(const char *sql, StmtParams *params,
List *data_nodes, bool transactional);
extern DistCmdResult *ts_dist_cmd_invoke_on_data_nodes_using_search_path(const char *sql,
const char *search_path,
List *node_names,

View File

@ -0,0 +1,114 @@
-- This file and its contents are licensed under the Timescale License.
-- Please see the included NOTICE for copyright information and
-- LICENSE-TIMESCALE for a copy of the license.
-- Test function _timescaledb_internal.create_chunk_replica_table
-- A table for the first chunk will be created on the data node, where it is not present.
SELECT chunk_name, data_nodes
FROM timescaledb_information.chunks
WHERE hypertable_name = 'dist_chunk_copy';
chunk_name | data_nodes
-------------------------+---------------------------
_dist_hyper_X_X_chunk | {data_node_1,data_node_2}
_dist_hyper_X_X_chunk | {data_node_2,data_node_3}
_dist_hyper_X_X_chunk | {data_node_1,data_node_3}
_dist_hyper_X_X_chunk | {data_node_1,data_node_2}
_dist_hyper_X_X_chunk | {data_node_2,data_node_3}
(5 rows)
SELECT compress_chunk('_timescaledb_internal._dist_hyper_X_X_chunk');
compress_chunk
-----------------------------------------------
_timescaledb_internal._dist_hyper_X_X_chunk
(1 row)
SELECT compress_chunk('_timescaledb_internal._dist_hyper_X_X_chunk');
compress_chunk
-----------------------------------------------
_timescaledb_internal._dist_hyper_X_X_chunk
(1 row)
-- Non-distributed chunk will be used to test an error
SELECT chunk_name
FROM timescaledb_information.chunks
WHERE hypertable_name = 'conditions';
chunk_name
--------------------
_hyper_X_X_chunk
_hyper_X_X_chunk
(2 rows)
\set ON_ERROR_STOP 0
SELECT _timescaledb_internal.create_chunk_replica_table(NULL, 'data_node_1');
ERROR: chunk cannot be NULL
SELECT _timescaledb_internal.create_chunk_replica_table('_timescaledb_internal._dist_hyper_X_X_chunk', NULL);
ERROR: data node name cannot be NULL
SELECT _timescaledb_internal.create_chunk_replica_table(1234, 'data_node_1');
ERROR: oid "1234" is not a chunk
SELECT _timescaledb_internal.create_chunk_replica_table('metrics_int', 'data_node_1');
ERROR: relation "metrics_int" is not a chunk
SELECT _timescaledb_internal.create_chunk_replica_table('conditions', 'data_node_1');
ERROR: relation "conditions" is not a chunk
SELECT _timescaledb_internal. create_chunk_replica_table('_timescaledb_internal._hyper_X_X_chunk', 'data_node_1');
ERROR: chunk "_hyper_X_X_chunk" doesn't belong to a distributed hypertable
SELECT _timescaledb_internal.create_chunk_replica_table('_timescaledb_internal._dist_hyper_X_X_chunk', 'data_node_1');
ERROR: chunk "_dist_hyper_X_X_chunk" already exists on data node "data_node_1"
SELECT _timescaledb_internal.create_chunk_replica_table('_timescaledb_internal._dist_hyper_X_X_chunk', 'data_node_1');
ERROR: relation "_timescaledb_internal._dist_hyper_X_X_chunk" does not exist at character 57
SELECT _timescaledb_internal.create_chunk_replica_table('_timescaledb_internal._dist_hyper_X_X_chunk', 'data_node_4');
ERROR: server "data_node_4" does not exist
BEGIN READ ONLY;
SELECT _timescaledb_internal.create_chunk_replica_table('_timescaledb_internal._dist_hyper_X_X_chunk', 'data_node_3');
ERROR: cannot execute create_chunk_replica_table() in a read-only transaction
COMMIT;
\set ON_ERROR_STOP 1
\c data_node_3
SELECT table_name
FROM information_schema.tables
WHERE table_schema = '_timescaledb_internal' AND
(table_name LIKE '_dist_hyper_15_%' OR table_name LIKE 'compress_hyper_5_%');
table_name
--------------------------
_dist_hyper_X_X_chunk
_dist_hyper_X_X_chunk
_dist_hyper_X_X_chunk
compress_hyper_X_X_chunk
(4 rows)
\c :TEST_DBNAME
SELECT _timescaledb_internal.create_chunk_replica_table('_timescaledb_internal._dist_hyper_X_X_chunk', 'data_node_3');
create_chunk_replica_table
----------------------------
(1 row)
-- Test that the table cannot be created since it was already created on the data node
\set ON_ERROR_STOP 0
SELECT _timescaledb_internal.create_chunk_replica_table('_timescaledb_internal._dist_hyper_X_X_chunk', 'data_node_3');
ERROR: [data_node_3]: relation "_dist_hyper_X_X_chunk" already exists
\set ON_ERROR_STOP 1
-- Creating chunk replica table ignores compression now:
SELECT _timescaledb_internal.create_chunk_replica_table('_timescaledb_internal._dist_hyper_X_X_chunk', 'data_node_3');
create_chunk_replica_table
----------------------------
(1 row)
\c data_node_3
SELECT table_name
FROM information_schema.tables
WHERE table_schema = '_timescaledb_internal' AND
(table_name LIKE '_dist_hyper_15_%' OR table_name LIKE 'compress_hyper_5_%');
table_name
--------------------------
_dist_hyper_X_X_chunk
_dist_hyper_X_X_chunk
_dist_hyper_X_X_chunk
_dist_hyper_X_X_chunk
_dist_hyper_X_X_chunk
compress_hyper_X_X_chunk
(6 rows)
\c :TEST_DBNAME
DROP TABLE dist_chunk_copy;
CALL distributed_exec($$ DROP TABLE _timescaledb_internal._dist_hyper_X_X_chunk $$, '{"data_node_3"}');
CALL distributed_exec($$ DROP TABLE _timescaledb_internal._dist_hyper_X_X_chunk $$, '{"data_node_3"}');

View File

@ -1,6 +1,10 @@
set(TEST_FILES_SHARED
constify_timestamptz_op_interval.sql constraint_exclusion_prepared.sql
decompress_placeholdervar.sql dist_gapfill.sql dist_insert.sql
constify_timestamptz_op_interval.sql
constraint_exclusion_prepared.sql
decompress_placeholdervar.sql
dist_chunk.sql
dist_gapfill.sql
dist_insert.sql
dist_distinct.sql)
if(CMAKE_BUILD_TYPE MATCHES Debug)

View File

@ -0,0 +1,61 @@
-- This file and its contents are licensed under the Timescale License.
-- Please see the included NOTICE for copyright information and
-- LICENSE-TIMESCALE for a copy of the license.
-- Test function _timescaledb_internal.create_chunk_replica_table
-- A table for the first chunk will be created on the data node, where it is not present.
SELECT chunk_name, data_nodes
FROM timescaledb_information.chunks
WHERE hypertable_name = 'dist_chunk_copy';
SELECT compress_chunk('_timescaledb_internal._dist_hyper_15_68_chunk');
SELECT compress_chunk('_timescaledb_internal._dist_hyper_15_70_chunk');
-- Non-distributed chunk will be used to test an error
SELECT chunk_name
FROM timescaledb_information.chunks
WHERE hypertable_name = 'conditions';
\set ON_ERROR_STOP 0
SELECT _timescaledb_internal.create_chunk_replica_table(NULL, 'data_node_1');
SELECT _timescaledb_internal.create_chunk_replica_table('_timescaledb_internal._dist_hyper_15_67_chunk', NULL);
SELECT _timescaledb_internal.create_chunk_replica_table(1234, 'data_node_1');
SELECT _timescaledb_internal.create_chunk_replica_table('metrics_int', 'data_node_1');
SELECT _timescaledb_internal.create_chunk_replica_table('conditions', 'data_node_1');
SELECT _timescaledb_internal. create_chunk_replica_table('_timescaledb_internal._hyper_10_51_chunk', 'data_node_1');
SELECT _timescaledb_internal.create_chunk_replica_table('_timescaledb_internal._dist_hyper_15_67_chunk', 'data_node_1');
SELECT _timescaledb_internal.create_chunk_replica_table('_timescaledb_internal._dist_hyper_15_27_chunk', 'data_node_1');
SELECT _timescaledb_internal.create_chunk_replica_table('_timescaledb_internal._dist_hyper_15_67_chunk', 'data_node_4');
BEGIN READ ONLY;
SELECT _timescaledb_internal.create_chunk_replica_table('_timescaledb_internal._dist_hyper_15_67_chunk', 'data_node_3');
COMMIT;
\set ON_ERROR_STOP 1
\c data_node_3
SELECT table_name
FROM information_schema.tables
WHERE table_schema = '_timescaledb_internal' AND
(table_name LIKE '_dist_hyper_15_%' OR table_name LIKE 'compress_hyper_5_%');
\c :TEST_DBNAME
SELECT _timescaledb_internal.create_chunk_replica_table('_timescaledb_internal._dist_hyper_15_67_chunk', 'data_node_3');
-- Test that the table cannot be created since it was already created on the data node
\set ON_ERROR_STOP 0
SELECT _timescaledb_internal.create_chunk_replica_table('_timescaledb_internal._dist_hyper_15_67_chunk', 'data_node_3');
\set ON_ERROR_STOP 1
-- Creating chunk replica table ignores compression now:
SELECT _timescaledb_internal.create_chunk_replica_table('_timescaledb_internal._dist_hyper_15_70_chunk', 'data_node_3');
\c data_node_3
SELECT table_name
FROM information_schema.tables
WHERE table_schema = '_timescaledb_internal' AND
(table_name LIKE '_dist_hyper_15_%' OR table_name LIKE 'compress_hyper_5_%');
\c :TEST_DBNAME
DROP TABLE dist_chunk_copy;
CALL distributed_exec($$ DROP TABLE _timescaledb_internal._dist_hyper_15_67_chunk $$, '{"data_node_3"}');
CALL distributed_exec($$ DROP TABLE _timescaledb_internal._dist_hyper_15_70_chunk $$, '{"data_node_3"}');

View File

@ -213,3 +213,15 @@ INSERT INTO metrics_int_dist1 VALUES
(5,1,2,10.0),
(100,1,1,0.0),
(100,1,2,-100.0);
CREATE TABLE dist_chunk_copy (
time timestamptz NOT NULL,
value integer);
SELECT create_distributed_hypertable('dist_chunk_copy', 'time', replication_factor => 2);
ALTER TABLE dist_chunk_copy SET (timescaledb.compress);
SELECT setseed(0);
INSERT INTO dist_chunk_copy
SELECT t, random() * 20
FROM generate_series('2020-01-01'::timestamp, '2020-01-25'::timestamp, '1d') t;