Move copy/move chunk tests to separate file

Fix flaky data_node test by moving tests associated with copy/move chunk
functionality into a separate test file/groupand with increased
background workers (using max_bgw_8.conf).

Issue: #4047
This commit is contained in:
Dmitry Simonenko 2022-02-21 11:11:28 +03:00 committed by Dmitry Simonenko
parent eedaaecc46
commit 0f7ab056c2
7 changed files with 492 additions and 474 deletions

View File

@ -1361,341 +1361,6 @@ SELECT * FROM delete_data_node('data_node_6');
t t
(1 row) (1 row)
--
-- Tests for copy/move chunk API
--
RESET ROLE;
DROP DATABASE :DN_DBNAME_1;
DROP DATABASE :DN_DBNAME_2;
DROP DATABASE :DN_DBNAME_3;
SELECT * FROM add_data_node('data_node_1', host => 'localhost',
database => :'DN_DBNAME_1');
node_name | host | port | database | node_created | database_created | extension_created
-------------+-----------+-------+----------------+--------------+------------------+-------------------
data_node_1 | localhost | 55432 | db_data_node_1 | t | t | t
(1 row)
SELECT * FROM add_data_node('data_node_2', host => 'localhost',
database => :'DN_DBNAME_2');
node_name | host | port | database | node_created | database_created | extension_created
-------------+-----------+-------+----------------+--------------+------------------+-------------------
data_node_2 | localhost | 55432 | db_data_node_2 | t | t | t
(1 row)
SELECT * FROM add_data_node('data_node_3', host => 'localhost',
database => :'DN_DBNAME_3');
node_name | host | port | database | node_created | database_created | extension_created
-------------+-----------+-------+----------------+--------------+------------------+-------------------
data_node_3 | localhost | 55432 | db_data_node_3 | t | t | t
(1 row)
GRANT USAGE ON FOREIGN SERVER data_node_1, data_node_2, data_node_3 TO PUBLIC;
SET ROLE :ROLE_1;
CREATE TABLE dist_test(time timestamp NOT NULL, device int, temp float);
SELECT create_distributed_hypertable('dist_test', 'time', 'device', 3);
create_distributed_hypertable
-------------------------------
(9,public,dist_test,t)
(1 row)
INSERT INTO dist_test SELECT t, (abs(timestamp_hash(t::timestamp)) % 10) + 1, 0.10 FROM generate_series('2018-03-02 1:00'::TIMESTAMPTZ, '2018-03-08 1:00', '1 hour') t;
SELECT * from show_chunks('dist_test');
show_chunks
----------------------------------------------
_timescaledb_internal._dist_hyper_9_12_chunk
_timescaledb_internal._dist_hyper_9_13_chunk
_timescaledb_internal._dist_hyper_9_14_chunk
_timescaledb_internal._dist_hyper_9_15_chunk
(4 rows)
SELECT * FROM test.remote_exec(NULL, $$ SELECT * from show_chunks('dist_test'); $$);
NOTICE: [data_node_1]: SELECT * from show_chunks('dist_test')
NOTICE: [data_node_1]:
show_chunks
--------------------------------------------
_timescaledb_internal._dist_hyper_9_12_chunk
_timescaledb_internal._dist_hyper_9_15_chunk
(2 rows)
NOTICE: [data_node_2]: SELECT * from show_chunks('dist_test')
NOTICE: [data_node_2]:
show_chunks
--------------------------------------------
_timescaledb_internal._dist_hyper_9_13_chunk
(1 row)
NOTICE: [data_node_3]: SELECT * from show_chunks('dist_test')
NOTICE: [data_node_3]:
show_chunks
--------------------------------------------
_timescaledb_internal._dist_hyper_9_14_chunk
(1 row)
remote_exec
-------------
(1 row)
SELECT sum(device) FROM dist_test;
sum
-----
846
(1 row)
SELECT * FROM test.remote_exec(ARRAY['data_node_1'], $$ SELECT sum(device) FROM _timescaledb_internal._dist_hyper_9_12_chunk; $$);
NOTICE: [data_node_1]: SELECT sum(device) FROM _timescaledb_internal._dist_hyper_9_12_chunk
NOTICE: [data_node_1]:
sum
---
406
(1 row)
remote_exec
-------------
(1 row)
-- ensure data node name is provided and has proper type
\set ON_ERROR_STOP 0
CALL timescaledb_experimental.move_chunk(chunk=>'_timescaledb_internal._dist_hyper_9_12_chunk', source_node=> null, destination_node => 'data_node_2');
ERROR: invalid source or destination node
CALL timescaledb_experimental.copy_chunk(chunk=>'_timescaledb_internal._dist_hyper_9_12_chunk', source_node=> 'data_node_1', destination_node => null);
ERROR: invalid source or destination node
CALL timescaledb_experimental.copy_chunk(chunk=>'_timescaledb_internal._dist_hyper_9_12_chunk', source_node=> 'data_node_1', destination_node => 2);
ERROR: procedure timescaledb_experimental.copy_chunk(chunk => unknown, source_node => unknown, destination_node => integer) does not exist at character 6
CALL timescaledb_experimental.move_chunk(chunk=>'_timescaledb_internal._dist_hyper_9_12_chunk', source_node => 'data_node_1');
ERROR: invalid source or destination node
\set ON_ERROR_STOP 1
-- ensure functions can't be run in read only mode
SET default_transaction_read_only TO on;
\set ON_ERROR_STOP 0
CALL timescaledb_experimental.move_chunk(chunk=>'_timescaledb_internal._dist_hyper_9_12_chunk', source_node=> 'data_node_1', destination_node => 'data_node_2');
ERROR: cannot execute move_chunk() in a read-only transaction
CALL timescaledb_experimental.copy_chunk(chunk=>'_timescaledb_internal._dist_hyper_9_12_chunk', source_node=> 'data_node_1', destination_node => 'data_node_2');
ERROR: cannot execute copy_chunk() in a read-only transaction
\set ON_ERROR_STOP 1
SET default_transaction_read_only TO off;
-- ensure functions can't be run in an active multi-statement transaction
\set ON_ERROR_STOP 0
BEGIN;
CALL timescaledb_experimental.move_chunk(chunk=>'_timescaledb_internal._dist_hyper_9_12_chunk', source_node=> 'data_node_1', destination_node => 'data_node_2');
ERROR: move_chunk cannot run inside a transaction block
ROLLBACK;
BEGIN;
CALL timescaledb_experimental.copy_chunk(chunk=>'_timescaledb_internal._dist_hyper_9_12_chunk', source_node=> 'data_node_1', destination_node => 'data_node_2');
ERROR: copy_chunk cannot run inside a transaction block
ROLLBACK;
\set ON_ERROR_STOP 1
-- must be superuser to copy/move chunks
SET ROLE :ROLE_DEFAULT_PERM_USER;
\set ON_ERROR_STOP 0
CALL timescaledb_experimental.move_chunk(chunk=>'_timescaledb_internal._dist_hyper_9_12_chunk', source_node=> 'data_node_1', destination_node => 'data_node_2');
ERROR: must be superuser to copy/move chunk to data node
\set ON_ERROR_STOP 1
SET ROLE :ROLE_1;
-- can't run copy/move chunk on a data node
\c :DN_DBNAME_1 :ROLE_CLUSTER_SUPERUSER;
\set ON_ERROR_STOP 0
CALL timescaledb_experimental.move_chunk(chunk=>'_timescaledb_internal._dist_hyper_9_12_chunk', source_node=> 'data_node_1', destination_node => 'data_node_2');
ERROR: function must be run on the access node only
CALL timescaledb_experimental.copy_chunk(chunk=>'_timescaledb_internal._dist_hyper_9_12_chunk', source_node=> 'data_node_1', destination_node => 'data_node_2');
ERROR: function must be run on the access node only
\set ON_ERROR_STOP 1
\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER;
-- ensure that hypertable chunks are distributed
CREATE TABLE nondist_test(time timestamp NOT NULL, device int, temp float);
SELECT create_hypertable('nondist_test', 'time', 'device', 3);
create_hypertable
----------------------------
(10,public,nondist_test,t)
(1 row)
INSERT INTO nondist_test SELECT t, (abs(timestamp_hash(t::timestamp)) % 10) + 1, 0.10 FROM generate_series('2018-03-02 1:00'::TIMESTAMPTZ, '2018-03-08 1:00', '1 hour') t;
SELECT * from show_chunks('nondist_test');
show_chunks
------------------------------------------
_timescaledb_internal._hyper_10_16_chunk
_timescaledb_internal._hyper_10_17_chunk
_timescaledb_internal._hyper_10_18_chunk
_timescaledb_internal._hyper_10_19_chunk
(4 rows)
\set ON_ERROR_STOP 0
CALL timescaledb_experimental.move_chunk(chunk=>'_timescaledb_internal._hyper_10_16_chunk', source_node=> 'data_node_1', destination_node => 'data_node_2');
ERROR: "_hyper_10_16_chunk" is not a valid remote chunk
CALL timescaledb_experimental.copy_chunk(chunk=>'_timescaledb_internal._hyper_10_16_chunk', source_node=> 'data_node_1', destination_node => 'data_node_2');
ERROR: "_hyper_10_16_chunk" is not a valid remote chunk
\set ON_ERROR_STOP 1
-- ensure that distributed chunk is not compressed
ALTER TABLE dist_test SET (timescaledb.compress, timescaledb.compress_segmentby='device', timescaledb.compress_orderby = 'time DESC');
SELECT compress_chunk('_timescaledb_internal._dist_hyper_9_15_chunk');
compress_chunk
----------------------------------------------
_timescaledb_internal._dist_hyper_9_15_chunk
(1 row)
\set ON_ERROR_STOP 0
CALL timescaledb_experimental.move_chunk(chunk=>'_timescaledb_internal._dist_hyper_9_15_chunk', source_node=> 'data_node_1', destination_node => 'data_node_2');
ERROR: "_dist_hyper_9_15_chunk" is a compressed remote chunk. Chunk copy/move not supported currently on compressed chunks
CALL timescaledb_experimental.copy_chunk(chunk=>'_timescaledb_internal._dist_hyper_9_15_chunk', source_node=> 'data_node_1', destination_node => 'data_node_2');
ERROR: "_dist_hyper_9_15_chunk" is a compressed remote chunk. Chunk copy/move not supported currently on compressed chunks
\set ON_ERROR_STOP 1
-- ensure that chunk exists on a source data node
\set ON_ERROR_STOP 0
CALL timescaledb_experimental.move_chunk(chunk=>'_timescaledb_internal._dist_hyper_9_13_chunk', source_node=> 'data_node_1', destination_node => 'data_node_2');
ERROR: chunk "_dist_hyper_9_13_chunk" does not exist on source data node "data_node_1"
\set ON_ERROR_STOP 1
-- do actualy copy
CALL timescaledb_experimental.copy_chunk(chunk=>'_timescaledb_internal._dist_hyper_9_12_chunk', source_node=> 'data_node_1', destination_node => 'data_node_2');
SELECT * FROM test.remote_exec(NULL, $$ SELECT * from show_chunks('dist_test'); $$);
NOTICE: [data_node_1]: SELECT * from show_chunks('dist_test')
NOTICE: [data_node_1]:
show_chunks
--------------------------------------------
_timescaledb_internal._dist_hyper_9_12_chunk
_timescaledb_internal._dist_hyper_9_15_chunk
(2 rows)
NOTICE: [data_node_2]: SELECT * from show_chunks('dist_test')
NOTICE: [data_node_2]:
show_chunks
--------------------------------------------
_timescaledb_internal._dist_hyper_9_13_chunk
_timescaledb_internal._dist_hyper_9_12_chunk
(2 rows)
NOTICE: [data_node_3]: SELECT * from show_chunks('dist_test')
NOTICE: [data_node_3]:
show_chunks
--------------------------------------------
_timescaledb_internal._dist_hyper_9_14_chunk
(1 row)
remote_exec
-------------
(1 row)
SELECT * FROM test.remote_exec(ARRAY['data_node_2'], $$ SELECT sum(device) FROM _timescaledb_internal._dist_hyper_9_12_chunk; $$);
NOTICE: [data_node_2]: SELECT sum(device) FROM _timescaledb_internal._dist_hyper_9_12_chunk
NOTICE: [data_node_2]:
sum
---
406
(1 row)
remote_exec
-------------
(1 row)
-- ensure that chunk exists on a destination data node
\set ON_ERROR_STOP 0
CALL timescaledb_experimental.copy_chunk(chunk=>'_timescaledb_internal._dist_hyper_9_12_chunk', source_node=> 'data_node_1', destination_node => 'data_node_2');
ERROR: chunk "_dist_hyper_9_12_chunk" already exists on destination data node "data_node_2"
\set ON_ERROR_STOP 1
-- now try to move the same chunk from data node 2 to 3
CALL timescaledb_experimental.move_chunk(chunk=>'_timescaledb_internal._dist_hyper_9_12_chunk', source_node=> 'data_node_2', destination_node => 'data_node_3');
SELECT * FROM test.remote_exec(NULL, $$ SELECT * from show_chunks('dist_test'); $$);
NOTICE: [data_node_1]: SELECT * from show_chunks('dist_test')
NOTICE: [data_node_1]:
show_chunks
--------------------------------------------
_timescaledb_internal._dist_hyper_9_12_chunk
_timescaledb_internal._dist_hyper_9_15_chunk
(2 rows)
NOTICE: [data_node_2]: SELECT * from show_chunks('dist_test')
NOTICE: [data_node_2]:
show_chunks
--------------------------------------------
_timescaledb_internal._dist_hyper_9_13_chunk
(1 row)
NOTICE: [data_node_3]: SELECT * from show_chunks('dist_test')
NOTICE: [data_node_3]:
show_chunks
--------------------------------------------
_timescaledb_internal._dist_hyper_9_14_chunk
_timescaledb_internal._dist_hyper_9_12_chunk
(2 rows)
remote_exec
-------------
(1 row)
SELECT * FROM test.remote_exec(ARRAY['data_node_3'], $$ SELECT sum(device) FROM _timescaledb_internal._dist_hyper_9_12_chunk; $$);
NOTICE: [data_node_3]: SELECT sum(device) FROM _timescaledb_internal._dist_hyper_9_12_chunk
NOTICE: [data_node_3]:
sum
---
406
(1 row)
remote_exec
-------------
(1 row)
SELECT sum(device) FROM dist_test;
sum
-----
846
(1 row)
-- Check that they can be called from inside a procedure without
-- generating warnings or error messages (#3495).
CREATE OR REPLACE PROCEDURE copy_wrapper(regclass, text, text)
AS $$
BEGIN
CALL timescaledb_experimental.copy_chunk($1, $2, $3);
END
$$
LANGUAGE PLPGSQL;
CREATE OR REPLACE PROCEDURE move_wrapper(regclass, text, text)
AS $$
BEGIN
CALL timescaledb_experimental.move_chunk($1, $2, $3);
END
$$
LANGUAGE PLPGSQL;
SELECT chunk_name, replica_nodes, non_replica_nodes
FROM timescaledb_experimental.chunk_replication_status;
chunk_name | replica_nodes | non_replica_nodes
------------------------+---------------------------+---------------------------
_dist_hyper_9_12_chunk | {data_node_1,data_node_3} | {data_node_2}
_dist_hyper_9_13_chunk | {data_node_2} | {data_node_1,data_node_3}
_dist_hyper_9_14_chunk | {data_node_3} | {data_node_1,data_node_2}
_dist_hyper_9_15_chunk | {data_node_1} | {data_node_2,data_node_3}
(4 rows)
CALL copy_wrapper('_timescaledb_internal._dist_hyper_9_14_chunk', 'data_node_3', 'data_node_2');
CALL move_wrapper('_timescaledb_internal._dist_hyper_9_13_chunk', 'data_node_2', 'data_node_1');
SELECT chunk_name, replica_nodes, non_replica_nodes
FROM timescaledb_experimental.chunk_replication_status;
chunk_name | replica_nodes | non_replica_nodes
------------------------+---------------------------+---------------------------
_dist_hyper_9_12_chunk | {data_node_1,data_node_3} | {data_node_2}
_dist_hyper_9_13_chunk | {data_node_1} | {data_node_2,data_node_3}
_dist_hyper_9_14_chunk | {data_node_3,data_node_2} | {data_node_1}
_dist_hyper_9_15_chunk | {data_node_1} | {data_node_2,data_node_3}
(4 rows)
DROP PROCEDURE copy_wrapper;
DROP PROCEDURE move_wrapper;
RESET ROLE; RESET ROLE;
DROP DATABASE :DN_DBNAME_1; DROP DATABASE :DN_DBNAME_1;
DROP DATABASE :DN_DBNAME_2; DROP DATABASE :DN_DBNAME_2;

View File

@ -0,0 +1,341 @@
-- This file and its contents are licensed under the Timescale License.
-- Please see the included NOTICE for copyright information and
-- LICENSE-TIMESCALE for a copy of the license.
\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER;
\unset ECHO
psql:include/remote_exec.sql:5: NOTICE: schema "test" already exists, skipping
\set DN_DBNAME_1 :TEST_DBNAME _1
\set DN_DBNAME_2 :TEST_DBNAME _2
\set DN_DBNAME_3 :TEST_DBNAME _3
SELECT * FROM add_data_node('data_node_1', host => 'localhost',
database => :'DN_DBNAME_1');
node_name | host | port | database | node_created | database_created | extension_created
-------------+-----------+-------+----------------------+--------------+------------------+-------------------
data_node_1 | localhost | 55432 | db_dist_move_chunk_1 | t | t | t
(1 row)
SELECT * FROM add_data_node('data_node_2', host => 'localhost',
database => :'DN_DBNAME_2');
node_name | host | port | database | node_created | database_created | extension_created
-------------+-----------+-------+----------------------+--------------+------------------+-------------------
data_node_2 | localhost | 55432 | db_dist_move_chunk_2 | t | t | t
(1 row)
SELECT * FROM add_data_node('data_node_3', host => 'localhost',
database => :'DN_DBNAME_3');
node_name | host | port | database | node_created | database_created | extension_created
-------------+-----------+-------+----------------------+--------------+------------------+-------------------
data_node_3 | localhost | 55432 | db_dist_move_chunk_3 | t | t | t
(1 row)
GRANT USAGE ON FOREIGN SERVER data_node_1, data_node_2, data_node_3 TO PUBLIC;
SET ROLE :ROLE_1;
CREATE TABLE dist_test(time timestamp NOT NULL, device int, temp float);
SELECT create_distributed_hypertable('dist_test', 'time', 'device', 3);
create_distributed_hypertable
-------------------------------
(1,public,dist_test,t)
(1 row)
INSERT INTO dist_test SELECT t, (abs(timestamp_hash(t::timestamp)) % 10) + 1, 0.10 FROM generate_series('2018-03-02 1:00'::TIMESTAMPTZ, '2018-03-08 1:00', '1 hour') t;
SELECT * from show_chunks('dist_test');
show_chunks
---------------------------------------------
_timescaledb_internal._dist_hyper_1_1_chunk
_timescaledb_internal._dist_hyper_1_2_chunk
_timescaledb_internal._dist_hyper_1_3_chunk
_timescaledb_internal._dist_hyper_1_4_chunk
(4 rows)
SELECT * FROM test.remote_exec(NULL, $$ SELECT * from show_chunks('dist_test'); $$);
NOTICE: [data_node_1]: SELECT * from show_chunks('dist_test')
NOTICE: [data_node_1]:
show_chunks
-------------------------------------------
_timescaledb_internal._dist_hyper_1_1_chunk
_timescaledb_internal._dist_hyper_1_4_chunk
(2 rows)
NOTICE: [data_node_2]: SELECT * from show_chunks('dist_test')
NOTICE: [data_node_2]:
show_chunks
-------------------------------------------
_timescaledb_internal._dist_hyper_1_2_chunk
(1 row)
NOTICE: [data_node_3]: SELECT * from show_chunks('dist_test')
NOTICE: [data_node_3]:
show_chunks
-------------------------------------------
_timescaledb_internal._dist_hyper_1_3_chunk
(1 row)
remote_exec
-------------
(1 row)
SELECT sum(device) FROM dist_test;
sum
-----
846
(1 row)
SELECT * FROM test.remote_exec(ARRAY['data_node_1'], $$ SELECT sum(device) FROM _timescaledb_internal._dist_hyper_1_1_chunk; $$);
NOTICE: [data_node_1]: SELECT sum(device) FROM _timescaledb_internal._dist_hyper_1_1_chunk
NOTICE: [data_node_1]:
sum
---
406
(1 row)
remote_exec
-------------
(1 row)
-- ensure data node name is provided and has proper type
\set ON_ERROR_STOP 0
CALL timescaledb_experimental.move_chunk(chunk=>'_timescaledb_internal._dist_hyper_1_1_chunk', source_node=> null, destination_node => 'data_node_2');
ERROR: invalid source or destination node
CALL timescaledb_experimental.copy_chunk(chunk=>'_timescaledb_internal._dist_hyper_1_1_chunk', source_node=> 'data_node_1', destination_node => null);
ERROR: invalid source or destination node
CALL timescaledb_experimental.copy_chunk(chunk=>'_timescaledb_internal._dist_hyper_1_1_chunk', source_node=> 'data_node_1', destination_node => 2);
ERROR: procedure timescaledb_experimental.copy_chunk(chunk => unknown, source_node => unknown, destination_node => integer) does not exist at character 6
CALL timescaledb_experimental.move_chunk(chunk=>'_timescaledb_internal._dist_hyper_1_1_chunk', source_node => 'data_node_1');
ERROR: invalid source or destination node
\set ON_ERROR_STOP 1
-- ensure functions can't be run in read only mode
SET default_transaction_read_only TO on;
\set ON_ERROR_STOP 0
CALL timescaledb_experimental.move_chunk(chunk=>'_timescaledb_internal._dist_hyper_1_1_chunk', source_node=> 'data_node_1', destination_node => 'data_node_2');
ERROR: cannot execute move_chunk() in a read-only transaction
CALL timescaledb_experimental.copy_chunk(chunk=>'_timescaledb_internal._dist_hyper_1_1_chunk', source_node=> 'data_node_1', destination_node => 'data_node_2');
ERROR: cannot execute copy_chunk() in a read-only transaction
\set ON_ERROR_STOP 1
SET default_transaction_read_only TO off;
-- ensure functions can't be run in an active multi-statement transaction
\set ON_ERROR_STOP 0
BEGIN;
CALL timescaledb_experimental.move_chunk(chunk=>'_timescaledb_internal._dist_hyper_1_1_chunk', source_node=> 'data_node_1', destination_node => 'data_node_2');
ERROR: move_chunk cannot run inside a transaction block
ROLLBACK;
BEGIN;
CALL timescaledb_experimental.copy_chunk(chunk=>'_timescaledb_internal._dist_hyper_1_1_chunk', source_node=> 'data_node_1', destination_node => 'data_node_2');
ERROR: copy_chunk cannot run inside a transaction block
ROLLBACK;
\set ON_ERROR_STOP 1
-- must be superuser to copy/move chunks
SET ROLE :ROLE_DEFAULT_PERM_USER;
\set ON_ERROR_STOP 0
CALL timescaledb_experimental.move_chunk(chunk=>'_timescaledb_internal._dist_hyper_1_1_chunk', source_node=> 'data_node_1', destination_node => 'data_node_2');
ERROR: must be superuser to copy/move chunk to data node
\set ON_ERROR_STOP 1
SET ROLE :ROLE_1;
-- can't run copy/move chunk on a data node
\c :DN_DBNAME_1 :ROLE_CLUSTER_SUPERUSER;
\set ON_ERROR_STOP 0
CALL timescaledb_experimental.move_chunk(chunk=>'_timescaledb_internal._dist_hyper_1_1_chunk', source_node=> 'data_node_1', destination_node => 'data_node_2');
ERROR: function must be run on the access node only
CALL timescaledb_experimental.copy_chunk(chunk=>'_timescaledb_internal._dist_hyper_1_1_chunk', source_node=> 'data_node_1', destination_node => 'data_node_2');
ERROR: function must be run on the access node only
\set ON_ERROR_STOP 1
\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER;
-- ensure that hypertable chunks are distributed
CREATE TABLE nondist_test(time timestamp NOT NULL, device int, temp float);
SELECT create_hypertable('nondist_test', 'time', 'device', 3);
create_hypertable
---------------------------
(2,public,nondist_test,t)
(1 row)
INSERT INTO nondist_test SELECT t, (abs(timestamp_hash(t::timestamp)) % 10) + 1, 0.10 FROM generate_series('2018-03-02 1:00'::TIMESTAMPTZ, '2018-03-08 1:00', '1 hour') t;
SELECT * from show_chunks('nondist_test');
show_chunks
----------------------------------------
_timescaledb_internal._hyper_2_5_chunk
_timescaledb_internal._hyper_2_6_chunk
_timescaledb_internal._hyper_2_7_chunk
_timescaledb_internal._hyper_2_8_chunk
(4 rows)
\set ON_ERROR_STOP 0
CALL timescaledb_experimental.move_chunk(chunk=>'_timescaledb_internal._hyper_2_5_chunk', source_node=> 'data_node_1', destination_node => 'data_node_2');
ERROR: "_hyper_2_5_chunk" is not a valid remote chunk
CALL timescaledb_experimental.copy_chunk(chunk=>'_timescaledb_internal._hyper_2_5_chunk', source_node=> 'data_node_1', destination_node => 'data_node_2');
ERROR: "_hyper_2_5_chunk" is not a valid remote chunk
\set ON_ERROR_STOP 1
-- ensure that distributed chunk is not compressed
ALTER TABLE dist_test SET (timescaledb.compress, timescaledb.compress_segmentby='device', timescaledb.compress_orderby = 'time DESC');
SELECT compress_chunk('_timescaledb_internal._dist_hyper_1_4_chunk');
compress_chunk
---------------------------------------------
_timescaledb_internal._dist_hyper_1_4_chunk
(1 row)
\set ON_ERROR_STOP 0
CALL timescaledb_experimental.move_chunk(chunk=>'_timescaledb_internal._dist_hyper_1_4_chunk', source_node=> 'data_node_1', destination_node => 'data_node_2');
ERROR: "_dist_hyper_1_4_chunk" is a compressed remote chunk. Chunk copy/move not supported currently on compressed chunks
CALL timescaledb_experimental.copy_chunk(chunk=>'_timescaledb_internal._dist_hyper_1_4_chunk', source_node=> 'data_node_1', destination_node => 'data_node_2');
ERROR: "_dist_hyper_1_4_chunk" is a compressed remote chunk. Chunk copy/move not supported currently on compressed chunks
\set ON_ERROR_STOP 1
-- ensure that chunk exists on a source data node
\set ON_ERROR_STOP 0
CALL timescaledb_experimental.move_chunk(chunk=>'_timescaledb_internal._dist_hyper_1_2_chunk', source_node=> 'data_node_1', destination_node => 'data_node_2');
ERROR: chunk "_dist_hyper_1_2_chunk" does not exist on source data node "data_node_1"
\set ON_ERROR_STOP 1
-- do actualy copy
CALL timescaledb_experimental.copy_chunk(chunk=>'_timescaledb_internal._dist_hyper_1_1_chunk', source_node=> 'data_node_1', destination_node => 'data_node_2');
SELECT * FROM test.remote_exec(NULL, $$ SELECT * from show_chunks('dist_test'); $$);
NOTICE: [data_node_1]: SELECT * from show_chunks('dist_test')
NOTICE: [data_node_1]:
show_chunks
-------------------------------------------
_timescaledb_internal._dist_hyper_1_1_chunk
_timescaledb_internal._dist_hyper_1_4_chunk
(2 rows)
NOTICE: [data_node_2]: SELECT * from show_chunks('dist_test')
NOTICE: [data_node_2]:
show_chunks
-------------------------------------------
_timescaledb_internal._dist_hyper_1_2_chunk
_timescaledb_internal._dist_hyper_1_1_chunk
(2 rows)
NOTICE: [data_node_3]: SELECT * from show_chunks('dist_test')
NOTICE: [data_node_3]:
show_chunks
-------------------------------------------
_timescaledb_internal._dist_hyper_1_3_chunk
(1 row)
remote_exec
-------------
(1 row)
SELECT * FROM test.remote_exec(ARRAY['data_node_2'], $$ SELECT sum(device) FROM _timescaledb_internal._dist_hyper_1_1_chunk; $$);
NOTICE: [data_node_2]: SELECT sum(device) FROM _timescaledb_internal._dist_hyper_1_1_chunk
NOTICE: [data_node_2]:
sum
---
406
(1 row)
remote_exec
-------------
(1 row)
-- ensure that chunk exists on a destination data node
\set ON_ERROR_STOP 0
CALL timescaledb_experimental.copy_chunk(chunk=>'_timescaledb_internal._dist_hyper_1_1_chunk', source_node=> 'data_node_1', destination_node => 'data_node_2');
ERROR: chunk "_dist_hyper_1_1_chunk" already exists on destination data node "data_node_2"
\set ON_ERROR_STOP 1
-- now try to move the same chunk from data node 2 to 3
CALL timescaledb_experimental.move_chunk(chunk=>'_timescaledb_internal._dist_hyper_1_1_chunk', source_node=> 'data_node_2', destination_node => 'data_node_3');
SELECT * FROM test.remote_exec(NULL, $$ SELECT * from show_chunks('dist_test'); $$);
NOTICE: [data_node_1]: SELECT * from show_chunks('dist_test')
NOTICE: [data_node_1]:
show_chunks
-------------------------------------------
_timescaledb_internal._dist_hyper_1_1_chunk
_timescaledb_internal._dist_hyper_1_4_chunk
(2 rows)
NOTICE: [data_node_2]: SELECT * from show_chunks('dist_test')
NOTICE: [data_node_2]:
show_chunks
-------------------------------------------
_timescaledb_internal._dist_hyper_1_2_chunk
(1 row)
NOTICE: [data_node_3]: SELECT * from show_chunks('dist_test')
NOTICE: [data_node_3]:
show_chunks
-------------------------------------------
_timescaledb_internal._dist_hyper_1_3_chunk
_timescaledb_internal._dist_hyper_1_1_chunk
(2 rows)
remote_exec
-------------
(1 row)
SELECT * FROM test.remote_exec(ARRAY['data_node_3'], $$ SELECT sum(device) FROM _timescaledb_internal._dist_hyper_1_1_chunk; $$);
NOTICE: [data_node_3]: SELECT sum(device) FROM _timescaledb_internal._dist_hyper_1_1_chunk
NOTICE: [data_node_3]:
sum
---
406
(1 row)
remote_exec
-------------
(1 row)
SELECT sum(device) FROM dist_test;
sum
-----
846
(1 row)
-- Check that they can be called from inside a procedure without
-- generating warnings or error messages (#3495).
CREATE OR REPLACE PROCEDURE copy_wrapper(regclass, text, text)
AS $$
BEGIN
CALL timescaledb_experimental.copy_chunk($1, $2, $3);
END
$$
LANGUAGE PLPGSQL;
CREATE OR REPLACE PROCEDURE move_wrapper(regclass, text, text)
AS $$
BEGIN
CALL timescaledb_experimental.move_chunk($1, $2, $3);
END
$$
LANGUAGE PLPGSQL;
SELECT chunk_name, replica_nodes, non_replica_nodes
FROM timescaledb_experimental.chunk_replication_status;
chunk_name | replica_nodes | non_replica_nodes
-----------------------+---------------------------+---------------------------
_dist_hyper_1_1_chunk | {data_node_1,data_node_3} | {data_node_2}
_dist_hyper_1_2_chunk | {data_node_2} | {data_node_1,data_node_3}
_dist_hyper_1_3_chunk | {data_node_3} | {data_node_1,data_node_2}
_dist_hyper_1_4_chunk | {data_node_1} | {data_node_2,data_node_3}
(4 rows)
CALL copy_wrapper('_timescaledb_internal._dist_hyper_1_3_chunk', 'data_node_3', 'data_node_2');
CALL move_wrapper('_timescaledb_internal._dist_hyper_1_2_chunk', 'data_node_2', 'data_node_1');
SELECT chunk_name, replica_nodes, non_replica_nodes
FROM timescaledb_experimental.chunk_replication_status;
chunk_name | replica_nodes | non_replica_nodes
-----------------------+---------------------------+---------------------------
_dist_hyper_1_1_chunk | {data_node_1,data_node_3} | {data_node_2}
_dist_hyper_1_2_chunk | {data_node_1} | {data_node_2,data_node_3}
_dist_hyper_1_3_chunk | {data_node_3,data_node_2} | {data_node_1}
_dist_hyper_1_4_chunk | {data_node_1} | {data_node_2,data_node_3}
(4 rows)
DROP PROCEDURE copy_wrapper;
DROP PROCEDURE move_wrapper;
RESET ROLE;
DROP DATABASE :DN_DBNAME_1;
DROP DATABASE :DN_DBNAME_2;
DROP DATABASE :DN_DBNAME_3;

View File

@ -1,6 +1,6 @@
# This section has to be equivalent to test/postgresql.conf # This section has to be equivalent to test/postgresql.conf
shared_preload_libraries=timescaledb shared_preload_libraries=timescaledb
max_worker_processes=16 max_worker_processes=24
autovacuum=false autovacuum=false
random_page_cost=1.0 random_page_cost=1.0
@TELEMETRY_DEFAULT_SETTING@ @TELEMETRY_DEFAULT_SETTING@

View File

@ -1,6 +1,6 @@
# This section has to be equivalent to test/postgresql.conf # This section has to be equivalent to test/postgresql.conf
shared_preload_libraries=timescaledb shared_preload_libraries=timescaledb
max_worker_processes=16 max_worker_processes=24
autovacuum=false autovacuum=false
random_page_cost=1.0 random_page_cost=1.0
timescaledb.telemetry_level=off timescaledb.telemetry_level=off

View File

@ -110,7 +110,7 @@ set(SOLO_TESTS_postgresql
set(TEST_FILES_max_bgw_8) set(TEST_FILES_max_bgw_8)
set(SOLO_TESTS_max_bgw_8 bgw_db_scheduler) set(SOLO_TESTS_max_bgw_8 bgw_db_scheduler)
if(CMAKE_BUILD_TYPE MATCHES Debug) if(CMAKE_BUILD_TYPE MATCHES Debug)
list(APPEND TEST_FILES_max_bgw_8 bgw_db_scheduler.sql) list(APPEND TEST_FILES_max_bgw_8 bgw_db_scheduler.sql dist_move_chunk.sql)
endif() endif()
set(TEST_TEMPLATES set(TEST_TEMPLATES

View File

@ -703,142 +703,6 @@ SELECT * FROM add_data_node('data_node_6', host => 'localhost', database => :'DN
SELECT * FROM add_data_node('data_node_6', host => 'localhost', database => :'DN_DBNAME_6', password => :'ROLE_3_PASS'); SELECT * FROM add_data_node('data_node_6', host => 'localhost', database => :'DN_DBNAME_6', password => :'ROLE_3_PASS');
SELECT * FROM delete_data_node('data_node_6'); SELECT * FROM delete_data_node('data_node_6');
--
-- Tests for copy/move chunk API
--
RESET ROLE;
DROP DATABASE :DN_DBNAME_1;
DROP DATABASE :DN_DBNAME_2;
DROP DATABASE :DN_DBNAME_3;
SELECT * FROM add_data_node('data_node_1', host => 'localhost',
database => :'DN_DBNAME_1');
SELECT * FROM add_data_node('data_node_2', host => 'localhost',
database => :'DN_DBNAME_2');
SELECT * FROM add_data_node('data_node_3', host => 'localhost',
database => :'DN_DBNAME_3');
GRANT USAGE ON FOREIGN SERVER data_node_1, data_node_2, data_node_3 TO PUBLIC;
SET ROLE :ROLE_1;
CREATE TABLE dist_test(time timestamp NOT NULL, device int, temp float);
SELECT create_distributed_hypertable('dist_test', 'time', 'device', 3);
INSERT INTO dist_test SELECT t, (abs(timestamp_hash(t::timestamp)) % 10) + 1, 0.10 FROM generate_series('2018-03-02 1:00'::TIMESTAMPTZ, '2018-03-08 1:00', '1 hour') t;
SELECT * from show_chunks('dist_test');
SELECT * FROM test.remote_exec(NULL, $$ SELECT * from show_chunks('dist_test'); $$);
SELECT sum(device) FROM dist_test;
SELECT * FROM test.remote_exec(ARRAY['data_node_1'], $$ SELECT sum(device) FROM _timescaledb_internal._dist_hyper_9_12_chunk; $$);
-- ensure data node name is provided and has proper type
\set ON_ERROR_STOP 0
CALL timescaledb_experimental.move_chunk(chunk=>'_timescaledb_internal._dist_hyper_9_12_chunk', source_node=> null, destination_node => 'data_node_2');
CALL timescaledb_experimental.copy_chunk(chunk=>'_timescaledb_internal._dist_hyper_9_12_chunk', source_node=> 'data_node_1', destination_node => null);
CALL timescaledb_experimental.copy_chunk(chunk=>'_timescaledb_internal._dist_hyper_9_12_chunk', source_node=> 'data_node_1', destination_node => 2);
CALL timescaledb_experimental.move_chunk(chunk=>'_timescaledb_internal._dist_hyper_9_12_chunk', source_node => 'data_node_1');
\set ON_ERROR_STOP 1
-- ensure functions can't be run in read only mode
SET default_transaction_read_only TO on;
\set ON_ERROR_STOP 0
CALL timescaledb_experimental.move_chunk(chunk=>'_timescaledb_internal._dist_hyper_9_12_chunk', source_node=> 'data_node_1', destination_node => 'data_node_2');
CALL timescaledb_experimental.copy_chunk(chunk=>'_timescaledb_internal._dist_hyper_9_12_chunk', source_node=> 'data_node_1', destination_node => 'data_node_2');
\set ON_ERROR_STOP 1
SET default_transaction_read_only TO off;
-- ensure functions can't be run in an active multi-statement transaction
\set ON_ERROR_STOP 0
BEGIN;
CALL timescaledb_experimental.move_chunk(chunk=>'_timescaledb_internal._dist_hyper_9_12_chunk', source_node=> 'data_node_1', destination_node => 'data_node_2');
ROLLBACK;
BEGIN;
CALL timescaledb_experimental.copy_chunk(chunk=>'_timescaledb_internal._dist_hyper_9_12_chunk', source_node=> 'data_node_1', destination_node => 'data_node_2');
ROLLBACK;
\set ON_ERROR_STOP 1
-- must be superuser to copy/move chunks
SET ROLE :ROLE_DEFAULT_PERM_USER;
\set ON_ERROR_STOP 0
CALL timescaledb_experimental.move_chunk(chunk=>'_timescaledb_internal._dist_hyper_9_12_chunk', source_node=> 'data_node_1', destination_node => 'data_node_2');
\set ON_ERROR_STOP 1
SET ROLE :ROLE_1;
-- can't run copy/move chunk on a data node
\c :DN_DBNAME_1 :ROLE_CLUSTER_SUPERUSER;
\set ON_ERROR_STOP 0
CALL timescaledb_experimental.move_chunk(chunk=>'_timescaledb_internal._dist_hyper_9_12_chunk', source_node=> 'data_node_1', destination_node => 'data_node_2');
CALL timescaledb_experimental.copy_chunk(chunk=>'_timescaledb_internal._dist_hyper_9_12_chunk', source_node=> 'data_node_1', destination_node => 'data_node_2');
\set ON_ERROR_STOP 1
\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER;
-- ensure that hypertable chunks are distributed
CREATE TABLE nondist_test(time timestamp NOT NULL, device int, temp float);
SELECT create_hypertable('nondist_test', 'time', 'device', 3);
INSERT INTO nondist_test SELECT t, (abs(timestamp_hash(t::timestamp)) % 10) + 1, 0.10 FROM generate_series('2018-03-02 1:00'::TIMESTAMPTZ, '2018-03-08 1:00', '1 hour') t;
SELECT * from show_chunks('nondist_test');
\set ON_ERROR_STOP 0
CALL timescaledb_experimental.move_chunk(chunk=>'_timescaledb_internal._hyper_10_16_chunk', source_node=> 'data_node_1', destination_node => 'data_node_2');
CALL timescaledb_experimental.copy_chunk(chunk=>'_timescaledb_internal._hyper_10_16_chunk', source_node=> 'data_node_1', destination_node => 'data_node_2');
\set ON_ERROR_STOP 1
-- ensure that distributed chunk is not compressed
ALTER TABLE dist_test SET (timescaledb.compress, timescaledb.compress_segmentby='device', timescaledb.compress_orderby = 'time DESC');
SELECT compress_chunk('_timescaledb_internal._dist_hyper_9_15_chunk');
\set ON_ERROR_STOP 0
CALL timescaledb_experimental.move_chunk(chunk=>'_timescaledb_internal._dist_hyper_9_15_chunk', source_node=> 'data_node_1', destination_node => 'data_node_2');
CALL timescaledb_experimental.copy_chunk(chunk=>'_timescaledb_internal._dist_hyper_9_15_chunk', source_node=> 'data_node_1', destination_node => 'data_node_2');
\set ON_ERROR_STOP 1
-- ensure that chunk exists on a source data node
\set ON_ERROR_STOP 0
CALL timescaledb_experimental.move_chunk(chunk=>'_timescaledb_internal._dist_hyper_9_13_chunk', source_node=> 'data_node_1', destination_node => 'data_node_2');
\set ON_ERROR_STOP 1
-- do actualy copy
CALL timescaledb_experimental.copy_chunk(chunk=>'_timescaledb_internal._dist_hyper_9_12_chunk', source_node=> 'data_node_1', destination_node => 'data_node_2');
SELECT * FROM test.remote_exec(NULL, $$ SELECT * from show_chunks('dist_test'); $$);
SELECT * FROM test.remote_exec(ARRAY['data_node_2'], $$ SELECT sum(device) FROM _timescaledb_internal._dist_hyper_9_12_chunk; $$);
-- ensure that chunk exists on a destination data node
\set ON_ERROR_STOP 0
CALL timescaledb_experimental.copy_chunk(chunk=>'_timescaledb_internal._dist_hyper_9_12_chunk', source_node=> 'data_node_1', destination_node => 'data_node_2');
\set ON_ERROR_STOP 1
-- now try to move the same chunk from data node 2 to 3
CALL timescaledb_experimental.move_chunk(chunk=>'_timescaledb_internal._dist_hyper_9_12_chunk', source_node=> 'data_node_2', destination_node => 'data_node_3');
SELECT * FROM test.remote_exec(NULL, $$ SELECT * from show_chunks('dist_test'); $$);
SELECT * FROM test.remote_exec(ARRAY['data_node_3'], $$ SELECT sum(device) FROM _timescaledb_internal._dist_hyper_9_12_chunk; $$);
SELECT sum(device) FROM dist_test;
-- Check that they can be called from inside a procedure without
-- generating warnings or error messages (#3495).
CREATE OR REPLACE PROCEDURE copy_wrapper(regclass, text, text)
AS $$
BEGIN
CALL timescaledb_experimental.copy_chunk($1, $2, $3);
END
$$
LANGUAGE PLPGSQL;
CREATE OR REPLACE PROCEDURE move_wrapper(regclass, text, text)
AS $$
BEGIN
CALL timescaledb_experimental.move_chunk($1, $2, $3);
END
$$
LANGUAGE PLPGSQL;
SELECT chunk_name, replica_nodes, non_replica_nodes
FROM timescaledb_experimental.chunk_replication_status;
CALL copy_wrapper('_timescaledb_internal._dist_hyper_9_14_chunk', 'data_node_3', 'data_node_2');
CALL move_wrapper('_timescaledb_internal._dist_hyper_9_13_chunk', 'data_node_2', 'data_node_1');
SELECT chunk_name, replica_nodes, non_replica_nodes
FROM timescaledb_experimental.chunk_replication_status;
DROP PROCEDURE copy_wrapper;
DROP PROCEDURE move_wrapper;
RESET ROLE; RESET ROLE;
DROP DATABASE :DN_DBNAME_1; DROP DATABASE :DN_DBNAME_1;

View File

@ -0,0 +1,148 @@
-- This file and its contents are licensed under the Timescale License.
-- Please see the included NOTICE for copyright information and
-- LICENSE-TIMESCALE for a copy of the license.
\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER;
\unset ECHO
\o /dev/null
\ir include/remote_exec.sql
\o
\set ECHO all
\set DN_DBNAME_1 :TEST_DBNAME _1
\set DN_DBNAME_2 :TEST_DBNAME _2
\set DN_DBNAME_3 :TEST_DBNAME _3
SELECT * FROM add_data_node('data_node_1', host => 'localhost',
database => :'DN_DBNAME_1');
SELECT * FROM add_data_node('data_node_2', host => 'localhost',
database => :'DN_DBNAME_2');
SELECT * FROM add_data_node('data_node_3', host => 'localhost',
database => :'DN_DBNAME_3');
GRANT USAGE ON FOREIGN SERVER data_node_1, data_node_2, data_node_3 TO PUBLIC;
SET ROLE :ROLE_1;
CREATE TABLE dist_test(time timestamp NOT NULL, device int, temp float);
SELECT create_distributed_hypertable('dist_test', 'time', 'device', 3);
INSERT INTO dist_test SELECT t, (abs(timestamp_hash(t::timestamp)) % 10) + 1, 0.10 FROM generate_series('2018-03-02 1:00'::TIMESTAMPTZ, '2018-03-08 1:00', '1 hour') t;
SELECT * from show_chunks('dist_test');
SELECT * FROM test.remote_exec(NULL, $$ SELECT * from show_chunks('dist_test'); $$);
SELECT sum(device) FROM dist_test;
SELECT * FROM test.remote_exec(ARRAY['data_node_1'], $$ SELECT sum(device) FROM _timescaledb_internal._dist_hyper_1_1_chunk; $$);
-- ensure data node name is provided and has proper type
\set ON_ERROR_STOP 0
CALL timescaledb_experimental.move_chunk(chunk=>'_timescaledb_internal._dist_hyper_1_1_chunk', source_node=> null, destination_node => 'data_node_2');
CALL timescaledb_experimental.copy_chunk(chunk=>'_timescaledb_internal._dist_hyper_1_1_chunk', source_node=> 'data_node_1', destination_node => null);
CALL timescaledb_experimental.copy_chunk(chunk=>'_timescaledb_internal._dist_hyper_1_1_chunk', source_node=> 'data_node_1', destination_node => 2);
CALL timescaledb_experimental.move_chunk(chunk=>'_timescaledb_internal._dist_hyper_1_1_chunk', source_node => 'data_node_1');
\set ON_ERROR_STOP 1
-- ensure functions can't be run in read only mode
SET default_transaction_read_only TO on;
\set ON_ERROR_STOP 0
CALL timescaledb_experimental.move_chunk(chunk=>'_timescaledb_internal._dist_hyper_1_1_chunk', source_node=> 'data_node_1', destination_node => 'data_node_2');
CALL timescaledb_experimental.copy_chunk(chunk=>'_timescaledb_internal._dist_hyper_1_1_chunk', source_node=> 'data_node_1', destination_node => 'data_node_2');
\set ON_ERROR_STOP 1
SET default_transaction_read_only TO off;
-- ensure functions can't be run in an active multi-statement transaction
\set ON_ERROR_STOP 0
BEGIN;
CALL timescaledb_experimental.move_chunk(chunk=>'_timescaledb_internal._dist_hyper_1_1_chunk', source_node=> 'data_node_1', destination_node => 'data_node_2');
ROLLBACK;
BEGIN;
CALL timescaledb_experimental.copy_chunk(chunk=>'_timescaledb_internal._dist_hyper_1_1_chunk', source_node=> 'data_node_1', destination_node => 'data_node_2');
ROLLBACK;
\set ON_ERROR_STOP 1
-- must be superuser to copy/move chunks
SET ROLE :ROLE_DEFAULT_PERM_USER;
\set ON_ERROR_STOP 0
CALL timescaledb_experimental.move_chunk(chunk=>'_timescaledb_internal._dist_hyper_1_1_chunk', source_node=> 'data_node_1', destination_node => 'data_node_2');
\set ON_ERROR_STOP 1
SET ROLE :ROLE_1;
-- can't run copy/move chunk on a data node
\c :DN_DBNAME_1 :ROLE_CLUSTER_SUPERUSER;
\set ON_ERROR_STOP 0
CALL timescaledb_experimental.move_chunk(chunk=>'_timescaledb_internal._dist_hyper_1_1_chunk', source_node=> 'data_node_1', destination_node => 'data_node_2');
CALL timescaledb_experimental.copy_chunk(chunk=>'_timescaledb_internal._dist_hyper_1_1_chunk', source_node=> 'data_node_1', destination_node => 'data_node_2');
\set ON_ERROR_STOP 1
\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER;
-- ensure that hypertable chunks are distributed
CREATE TABLE nondist_test(time timestamp NOT NULL, device int, temp float);
SELECT create_hypertable('nondist_test', 'time', 'device', 3);
INSERT INTO nondist_test SELECT t, (abs(timestamp_hash(t::timestamp)) % 10) + 1, 0.10 FROM generate_series('2018-03-02 1:00'::TIMESTAMPTZ, '2018-03-08 1:00', '1 hour') t;
SELECT * from show_chunks('nondist_test');
\set ON_ERROR_STOP 0
CALL timescaledb_experimental.move_chunk(chunk=>'_timescaledb_internal._hyper_2_5_chunk', source_node=> 'data_node_1', destination_node => 'data_node_2');
CALL timescaledb_experimental.copy_chunk(chunk=>'_timescaledb_internal._hyper_2_5_chunk', source_node=> 'data_node_1', destination_node => 'data_node_2');
\set ON_ERROR_STOP 1
-- ensure that distributed chunk is not compressed
ALTER TABLE dist_test SET (timescaledb.compress, timescaledb.compress_segmentby='device', timescaledb.compress_orderby = 'time DESC');
SELECT compress_chunk('_timescaledb_internal._dist_hyper_1_4_chunk');
\set ON_ERROR_STOP 0
CALL timescaledb_experimental.move_chunk(chunk=>'_timescaledb_internal._dist_hyper_1_4_chunk', source_node=> 'data_node_1', destination_node => 'data_node_2');
CALL timescaledb_experimental.copy_chunk(chunk=>'_timescaledb_internal._dist_hyper_1_4_chunk', source_node=> 'data_node_1', destination_node => 'data_node_2');
\set ON_ERROR_STOP 1
-- ensure that chunk exists on a source data node
\set ON_ERROR_STOP 0
CALL timescaledb_experimental.move_chunk(chunk=>'_timescaledb_internal._dist_hyper_1_2_chunk', source_node=> 'data_node_1', destination_node => 'data_node_2');
\set ON_ERROR_STOP 1
-- do actualy copy
CALL timescaledb_experimental.copy_chunk(chunk=>'_timescaledb_internal._dist_hyper_1_1_chunk', source_node=> 'data_node_1', destination_node => 'data_node_2');
SELECT * FROM test.remote_exec(NULL, $$ SELECT * from show_chunks('dist_test'); $$);
SELECT * FROM test.remote_exec(ARRAY['data_node_2'], $$ SELECT sum(device) FROM _timescaledb_internal._dist_hyper_1_1_chunk; $$);
-- ensure that chunk exists on a destination data node
\set ON_ERROR_STOP 0
CALL timescaledb_experimental.copy_chunk(chunk=>'_timescaledb_internal._dist_hyper_1_1_chunk', source_node=> 'data_node_1', destination_node => 'data_node_2');
\set ON_ERROR_STOP 1
-- now try to move the same chunk from data node 2 to 3
CALL timescaledb_experimental.move_chunk(chunk=>'_timescaledb_internal._dist_hyper_1_1_chunk', source_node=> 'data_node_2', destination_node => 'data_node_3');
SELECT * FROM test.remote_exec(NULL, $$ SELECT * from show_chunks('dist_test'); $$);
SELECT * FROM test.remote_exec(ARRAY['data_node_3'], $$ SELECT sum(device) FROM _timescaledb_internal._dist_hyper_1_1_chunk; $$);
SELECT sum(device) FROM dist_test;
-- Check that they can be called from inside a procedure without
-- generating warnings or error messages (#3495).
CREATE OR REPLACE PROCEDURE copy_wrapper(regclass, text, text)
AS $$
BEGIN
CALL timescaledb_experimental.copy_chunk($1, $2, $3);
END
$$
LANGUAGE PLPGSQL;
CREATE OR REPLACE PROCEDURE move_wrapper(regclass, text, text)
AS $$
BEGIN
CALL timescaledb_experimental.move_chunk($1, $2, $3);
END
$$
LANGUAGE PLPGSQL;
SELECT chunk_name, replica_nodes, non_replica_nodes
FROM timescaledb_experimental.chunk_replication_status;
CALL copy_wrapper('_timescaledb_internal._dist_hyper_1_3_chunk', 'data_node_3', 'data_node_2');
CALL move_wrapper('_timescaledb_internal._dist_hyper_1_2_chunk', 'data_node_2', 'data_node_1');
SELECT chunk_name, replica_nodes, non_replica_nodes
FROM timescaledb_experimental.chunk_replication_status;
DROP PROCEDURE copy_wrapper;
DROP PROCEDURE move_wrapper;
RESET ROLE;
DROP DATABASE :DN_DBNAME_1;
DROP DATABASE :DN_DBNAME_2;
DROP DATABASE :DN_DBNAME_3;