From 1e7b9bc558b1e6f9d036cf10baabc214f8eeb8d6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Erik=20Nordstr=C3=B6m?= Date: Thu, 22 Dec 2022 10:24:42 +0100 Subject: [PATCH] Fix issue with deleting data node and dropping database When deleting a data node with the option `drop_database=>true`, the database is deleted even if the command fails. Fix this behavior by dropping the remote database at the end of the delete data node operation so that other checks fail first. --- CHANGELOG.md | 1 + tsl/src/data_node.c | 4 +- tsl/test/expected/data_node.out | 8 +-- tsl/test/expected/data_node_bootstrap.out | 72 +++++++++++++++++++++-- tsl/test/sql/data_node.sql | 9 +-- tsl/test/sql/data_node_bootstrap.sql | 37 +++++++++++- 6 files changed, 115 insertions(+), 16 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 055df8cad..ff2f5bd8b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -8,6 +8,7 @@ accidentally triggering the load of a previous DB version.** **Bugfixes** * #4926 Fix corruption when inserting into compressed chunks +* #5114 Fix issue with deleting data node and dropping database * #5130 Fix CAgg on CAgg variable bucket size validation * #5133 Fix CAgg on CAgg using different column order on the original hypertable * #5152 Fix adding column with NULL constraint to compressed hypertable diff --git a/tsl/src/data_node.c b/tsl/src/data_node.c index f8faaba4d..9439d7f53 100644 --- a/tsl/src/data_node.c +++ b/tsl/src/data_node.c @@ -1841,7 +1841,6 @@ data_node_delete(PG_FUNCTION_ARGS) if (drop_database) { TS_PREVENT_IN_TRANSACTION_BLOCK(true); - drop_data_node_database(server); } /* close any pending connections */ @@ -1873,6 +1872,9 @@ data_node_delete(PG_FUNCTION_ARGS) parsetree = (Node *) &stmt; + if (drop_database) + drop_data_node_database(server); + /* Make sure event triggers are invoked so that all dropped objects * are collected during a cascading drop. This ensures all dependent * objects get cleaned up. */ diff --git a/tsl/test/expected/data_node.out b/tsl/test/expected/data_node.out index d6c5670f1..d7ddf3fa3 100644 --- a/tsl/test/expected/data_node.out +++ b/tsl/test/expected/data_node.out @@ -2117,6 +2117,10 @@ SELECT * FROM alter_data_node('data_node_1'); data_node_1 | foo.bar | 8989 | new_db | t (1 row) +DROP TABLE hyper1; +DROP TABLE hyper2; +DROP TABLE hyper3; +DROP TABLE hyper_1dim; \set ON_ERROR_STOP 0 -- test some error cases SELECT * FROM alter_data_node(NULL); @@ -2144,10 +2148,6 @@ SELECT node_name, options FROM timescaledb_information.data_nodes order by node_ data_node_3 | {host=localhost,port=55432,dbname=db_data_node_3,available=true} (3 rows) -DROP TABLE hyper1; -DROP TABLE hyper2; -DROP TABLE hyper3; -DROP TABLE hyper_1dim; DROP VIEW chunk_query_data_node; -- create new session to clear out connection cache \c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER; diff --git a/tsl/test/expected/data_node_bootstrap.out b/tsl/test/expected/data_node_bootstrap.out index 34f4017fe..556f72544 100644 --- a/tsl/test/expected/data_node_bootstrap.out +++ b/tsl/test/expected/data_node_bootstrap.out @@ -124,11 +124,6 @@ SELECT * FROM delete_data_node('bootstrap_test', drop_database => true); ERROR: delete_data_node() cannot run inside a transaction block ROLLBACK; \set ON_ERROR_STOP 1 --- Using the drop_database option when there are active connections to --- the data node should fail. But any connections in the current --- session should be cleared when dropping the database. To test that --- the connection is cleared, first create a connection in the --- connection cache by inserting some data CREATE TABLE conditions (time timestamptz, device int, temp float); SELECT create_distributed_hypertable('conditions', 'time', 'device'); WARNING: only one data node was assigned to the hypertable @@ -138,6 +133,73 @@ NOTICE: adding not-null constraint to column "time" (1,public,conditions,t) (1 row) +\set ON_ERROR_STOP 0 +-- Should fail because the data node is the last one +SELECT * FROM delete_data_node('bootstrap_test', drop_database => true); +ERROR: insufficient number of data nodes for distributed hypertable "conditions" +\set ON_ERROR_STOP 1 +-- Add another data node +SELECT node_name, database, node_created, database_created, extension_created +FROM add_data_node('bootstrap_test_2', host => 'localhost', database => 'bootstrap_test_2', bootstrap => true); + node_name | database | node_created | database_created | extension_created +------------------+------------------+--------------+------------------+------------------- + bootstrap_test_2 | bootstrap_test_2 | t | t | t +(1 row) + +SELECT attach_data_node('bootstrap_test_2', 'conditions'); +NOTICE: the number of partitions in dimension "device" was increased to 2 + attach_data_node +------------------------ + (1,1,bootstrap_test_2) +(1 row) + +-- Insert some data into the node +INSERT INTO conditions VALUES ('2021-12-01 10:30', 2, 20.3); +\set ON_ERROR_STOP 0 +-- Should fail because the data node still holds data +SELECT * FROM delete_data_node('bootstrap_test_2', drop_database => true); +ERROR: insufficient number of data nodes +\set ON_ERROR_STOP 1 +-- Data node's database still exists after failure to delete +SELECT count(*) FROM pg_database WHERE datname = 'bootstrap_test_2'; + count +------- + 1 +(1 row) + +-- Delete the chunks so that we can delete the data node +SELECT drop_chunks('conditions', older_than => '2022-01-01'::timestamptz); + drop_chunks +--------------------------------------------- + _timescaledb_internal._dist_hyper_1_1_chunk +(1 row) + +SELECT * FROM delete_data_node('bootstrap_test_2', drop_database => true); +NOTICE: the number of partitions in dimension "device" of hypertable "conditions" was decreased to 1 + delete_data_node +------------------ + t +(1 row) + +-- The data node's database is dropped +SELECT count(*) FROM pg_database WHERE datname = 'bootstrap_test_2'; + count +------- + 0 +(1 row) + +SELECT data_nodes FROM timescaledb_information.hypertables +WHERE hypertable_name = 'conditions'; + data_nodes +------------------ + {bootstrap_test} +(1 row) + +-- Using the drop_database option when there are active connections to +-- the data node should fail. But any connections in the current +-- session should be cleared when dropping the database. To test that +-- the connection is cleared, first create a connection in the +-- connection cache by inserting some data INSERT INTO conditions VALUES ('2021-12-01 10:30', 1, 20.3); DROP TABLE conditions; -- Now drop the data node and it should clear the connection from the diff --git a/tsl/test/sql/data_node.sql b/tsl/test/sql/data_node.sql index 44b6bea12..e8f8ae436 100644 --- a/tsl/test/sql/data_node.sql +++ b/tsl/test/sql/data_node.sql @@ -978,6 +978,11 @@ SELECT node_name, options FROM timescaledb_information.data_nodes order by node_ -- just show current options: SELECT * FROM alter_data_node('data_node_1'); +DROP TABLE hyper1; +DROP TABLE hyper2; +DROP TABLE hyper3; +DROP TABLE hyper_1dim; + \set ON_ERROR_STOP 0 -- test some error cases SELECT * FROM alter_data_node(NULL); @@ -991,10 +996,6 @@ SELECT delete_data_node('data_node_1', drop_database=>true); SELECT * FROM alter_data_node('data_node_1', host=>'localhost', port=>:old_port, database=>:'DN_DBNAME_1'); SELECT node_name, options FROM timescaledb_information.data_nodes order by node_name; -DROP TABLE hyper1; -DROP TABLE hyper2; -DROP TABLE hyper3; -DROP TABLE hyper_1dim; DROP VIEW chunk_query_data_node; -- create new session to clear out connection cache diff --git a/tsl/test/sql/data_node_bootstrap.sql b/tsl/test/sql/data_node_bootstrap.sql index e47ac982b..be32ec4ca 100644 --- a/tsl/test/sql/data_node_bootstrap.sql +++ b/tsl/test/sql/data_node_bootstrap.sql @@ -82,13 +82,46 @@ SELECT * FROM delete_data_node('bootstrap_test', drop_database => true); ROLLBACK; \set ON_ERROR_STOP 1 +CREATE TABLE conditions (time timestamptz, device int, temp float); +SELECT create_distributed_hypertable('conditions', 'time', 'device'); + +\set ON_ERROR_STOP 0 +-- Should fail because the data node is the last one +SELECT * FROM delete_data_node('bootstrap_test', drop_database => true); +\set ON_ERROR_STOP 1 + +-- Add another data node +SELECT node_name, database, node_created, database_created, extension_created +FROM add_data_node('bootstrap_test_2', host => 'localhost', database => 'bootstrap_test_2', bootstrap => true); +SELECT attach_data_node('bootstrap_test_2', 'conditions'); + +-- Insert some data into the node +INSERT INTO conditions VALUES ('2021-12-01 10:30', 2, 20.3); + +\set ON_ERROR_STOP 0 +-- Should fail because the data node still holds data +SELECT * FROM delete_data_node('bootstrap_test_2', drop_database => true); +\set ON_ERROR_STOP 1 + +-- Data node's database still exists after failure to delete +SELECT count(*) FROM pg_database WHERE datname = 'bootstrap_test_2'; + +-- Delete the chunks so that we can delete the data node +SELECT drop_chunks('conditions', older_than => '2022-01-01'::timestamptz); + +SELECT * FROM delete_data_node('bootstrap_test_2', drop_database => true); + +-- The data node's database is dropped +SELECT count(*) FROM pg_database WHERE datname = 'bootstrap_test_2'; + +SELECT data_nodes FROM timescaledb_information.hypertables +WHERE hypertable_name = 'conditions'; + -- Using the drop_database option when there are active connections to -- the data node should fail. But any connections in the current -- session should be cleared when dropping the database. To test that -- the connection is cleared, first create a connection in the -- connection cache by inserting some data -CREATE TABLE conditions (time timestamptz, device int, temp float); -SELECT create_distributed_hypertable('conditions', 'time', 'device'); INSERT INTO conditions VALUES ('2021-12-01 10:30', 1, 20.3); DROP TABLE conditions;