mirror of
https://github.com/timescale/timescaledb.git
synced 2025-05-14 17:43:34 +08:00
Fix issue with deleting data node and dropping database
When deleting a data node with the option `drop_database=>true`, the database is deleted even if the command fails. Fix this behavior by dropping the remote database at the end of the delete data node operation so that other checks fail first.
This commit is contained in:
parent
4118a72575
commit
1e7b9bc558
@ -8,6 +8,7 @@ accidentally triggering the load of a previous DB version.**
|
||||
|
||||
**Bugfixes**
|
||||
* #4926 Fix corruption when inserting into compressed chunks
|
||||
* #5114 Fix issue with deleting data node and dropping database
|
||||
* #5130 Fix CAgg on CAgg variable bucket size validation
|
||||
* #5133 Fix CAgg on CAgg using different column order on the original hypertable
|
||||
* #5152 Fix adding column with NULL constraint to compressed hypertable
|
||||
|
@ -1841,7 +1841,6 @@ data_node_delete(PG_FUNCTION_ARGS)
|
||||
if (drop_database)
|
||||
{
|
||||
TS_PREVENT_IN_TRANSACTION_BLOCK(true);
|
||||
drop_data_node_database(server);
|
||||
}
|
||||
|
||||
/* close any pending connections */
|
||||
@ -1873,6 +1872,9 @@ data_node_delete(PG_FUNCTION_ARGS)
|
||||
|
||||
parsetree = (Node *) &stmt;
|
||||
|
||||
if (drop_database)
|
||||
drop_data_node_database(server);
|
||||
|
||||
/* Make sure event triggers are invoked so that all dropped objects
|
||||
* are collected during a cascading drop. This ensures all dependent
|
||||
* objects get cleaned up. */
|
||||
|
@ -2117,6 +2117,10 @@ SELECT * FROM alter_data_node('data_node_1');
|
||||
data_node_1 | foo.bar | 8989 | new_db | t
|
||||
(1 row)
|
||||
|
||||
DROP TABLE hyper1;
|
||||
DROP TABLE hyper2;
|
||||
DROP TABLE hyper3;
|
||||
DROP TABLE hyper_1dim;
|
||||
\set ON_ERROR_STOP 0
|
||||
-- test some error cases
|
||||
SELECT * FROM alter_data_node(NULL);
|
||||
@ -2144,10 +2148,6 @@ SELECT node_name, options FROM timescaledb_information.data_nodes order by node_
|
||||
data_node_3 | {host=localhost,port=55432,dbname=db_data_node_3,available=true}
|
||||
(3 rows)
|
||||
|
||||
DROP TABLE hyper1;
|
||||
DROP TABLE hyper2;
|
||||
DROP TABLE hyper3;
|
||||
DROP TABLE hyper_1dim;
|
||||
DROP VIEW chunk_query_data_node;
|
||||
-- create new session to clear out connection cache
|
||||
\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER;
|
||||
|
@ -124,11 +124,6 @@ SELECT * FROM delete_data_node('bootstrap_test', drop_database => true);
|
||||
ERROR: delete_data_node() cannot run inside a transaction block
|
||||
ROLLBACK;
|
||||
\set ON_ERROR_STOP 1
|
||||
-- Using the drop_database option when there are active connections to
|
||||
-- the data node should fail. But any connections in the current
|
||||
-- session should be cleared when dropping the database. To test that
|
||||
-- the connection is cleared, first create a connection in the
|
||||
-- connection cache by inserting some data
|
||||
CREATE TABLE conditions (time timestamptz, device int, temp float);
|
||||
SELECT create_distributed_hypertable('conditions', 'time', 'device');
|
||||
WARNING: only one data node was assigned to the hypertable
|
||||
@ -138,6 +133,73 @@ NOTICE: adding not-null constraint to column "time"
|
||||
(1,public,conditions,t)
|
||||
(1 row)
|
||||
|
||||
\set ON_ERROR_STOP 0
|
||||
-- Should fail because the data node is the last one
|
||||
SELECT * FROM delete_data_node('bootstrap_test', drop_database => true);
|
||||
ERROR: insufficient number of data nodes for distributed hypertable "conditions"
|
||||
\set ON_ERROR_STOP 1
|
||||
-- Add another data node
|
||||
SELECT node_name, database, node_created, database_created, extension_created
|
||||
FROM add_data_node('bootstrap_test_2', host => 'localhost', database => 'bootstrap_test_2', bootstrap => true);
|
||||
node_name | database | node_created | database_created | extension_created
|
||||
------------------+------------------+--------------+------------------+-------------------
|
||||
bootstrap_test_2 | bootstrap_test_2 | t | t | t
|
||||
(1 row)
|
||||
|
||||
SELECT attach_data_node('bootstrap_test_2', 'conditions');
|
||||
NOTICE: the number of partitions in dimension "device" was increased to 2
|
||||
attach_data_node
|
||||
------------------------
|
||||
(1,1,bootstrap_test_2)
|
||||
(1 row)
|
||||
|
||||
-- Insert some data into the node
|
||||
INSERT INTO conditions VALUES ('2021-12-01 10:30', 2, 20.3);
|
||||
\set ON_ERROR_STOP 0
|
||||
-- Should fail because the data node still holds data
|
||||
SELECT * FROM delete_data_node('bootstrap_test_2', drop_database => true);
|
||||
ERROR: insufficient number of data nodes
|
||||
\set ON_ERROR_STOP 1
|
||||
-- Data node's database still exists after failure to delete
|
||||
SELECT count(*) FROM pg_database WHERE datname = 'bootstrap_test_2';
|
||||
count
|
||||
-------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
-- Delete the chunks so that we can delete the data node
|
||||
SELECT drop_chunks('conditions', older_than => '2022-01-01'::timestamptz);
|
||||
drop_chunks
|
||||
---------------------------------------------
|
||||
_timescaledb_internal._dist_hyper_1_1_chunk
|
||||
(1 row)
|
||||
|
||||
SELECT * FROM delete_data_node('bootstrap_test_2', drop_database => true);
|
||||
NOTICE: the number of partitions in dimension "device" of hypertable "conditions" was decreased to 1
|
||||
delete_data_node
|
||||
------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
-- The data node's database is dropped
|
||||
SELECT count(*) FROM pg_database WHERE datname = 'bootstrap_test_2';
|
||||
count
|
||||
-------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
SELECT data_nodes FROM timescaledb_information.hypertables
|
||||
WHERE hypertable_name = 'conditions';
|
||||
data_nodes
|
||||
------------------
|
||||
{bootstrap_test}
|
||||
(1 row)
|
||||
|
||||
-- Using the drop_database option when there are active connections to
|
||||
-- the data node should fail. But any connections in the current
|
||||
-- session should be cleared when dropping the database. To test that
|
||||
-- the connection is cleared, first create a connection in the
|
||||
-- connection cache by inserting some data
|
||||
INSERT INTO conditions VALUES ('2021-12-01 10:30', 1, 20.3);
|
||||
DROP TABLE conditions;
|
||||
-- Now drop the data node and it should clear the connection from the
|
||||
|
@ -978,6 +978,11 @@ SELECT node_name, options FROM timescaledb_information.data_nodes order by node_
|
||||
-- just show current options:
|
||||
SELECT * FROM alter_data_node('data_node_1');
|
||||
|
||||
DROP TABLE hyper1;
|
||||
DROP TABLE hyper2;
|
||||
DROP TABLE hyper3;
|
||||
DROP TABLE hyper_1dim;
|
||||
|
||||
\set ON_ERROR_STOP 0
|
||||
-- test some error cases
|
||||
SELECT * FROM alter_data_node(NULL);
|
||||
@ -991,10 +996,6 @@ SELECT delete_data_node('data_node_1', drop_database=>true);
|
||||
SELECT * FROM alter_data_node('data_node_1', host=>'localhost', port=>:old_port, database=>:'DN_DBNAME_1');
|
||||
SELECT node_name, options FROM timescaledb_information.data_nodes order by node_name;
|
||||
|
||||
DROP TABLE hyper1;
|
||||
DROP TABLE hyper2;
|
||||
DROP TABLE hyper3;
|
||||
DROP TABLE hyper_1dim;
|
||||
DROP VIEW chunk_query_data_node;
|
||||
|
||||
-- create new session to clear out connection cache
|
||||
|
@ -82,13 +82,46 @@ SELECT * FROM delete_data_node('bootstrap_test', drop_database => true);
|
||||
ROLLBACK;
|
||||
\set ON_ERROR_STOP 1
|
||||
|
||||
CREATE TABLE conditions (time timestamptz, device int, temp float);
|
||||
SELECT create_distributed_hypertable('conditions', 'time', 'device');
|
||||
|
||||
\set ON_ERROR_STOP 0
|
||||
-- Should fail because the data node is the last one
|
||||
SELECT * FROM delete_data_node('bootstrap_test', drop_database => true);
|
||||
\set ON_ERROR_STOP 1
|
||||
|
||||
-- Add another data node
|
||||
SELECT node_name, database, node_created, database_created, extension_created
|
||||
FROM add_data_node('bootstrap_test_2', host => 'localhost', database => 'bootstrap_test_2', bootstrap => true);
|
||||
SELECT attach_data_node('bootstrap_test_2', 'conditions');
|
||||
|
||||
-- Insert some data into the node
|
||||
INSERT INTO conditions VALUES ('2021-12-01 10:30', 2, 20.3);
|
||||
|
||||
\set ON_ERROR_STOP 0
|
||||
-- Should fail because the data node still holds data
|
||||
SELECT * FROM delete_data_node('bootstrap_test_2', drop_database => true);
|
||||
\set ON_ERROR_STOP 1
|
||||
|
||||
-- Data node's database still exists after failure to delete
|
||||
SELECT count(*) FROM pg_database WHERE datname = 'bootstrap_test_2';
|
||||
|
||||
-- Delete the chunks so that we can delete the data node
|
||||
SELECT drop_chunks('conditions', older_than => '2022-01-01'::timestamptz);
|
||||
|
||||
SELECT * FROM delete_data_node('bootstrap_test_2', drop_database => true);
|
||||
|
||||
-- The data node's database is dropped
|
||||
SELECT count(*) FROM pg_database WHERE datname = 'bootstrap_test_2';
|
||||
|
||||
SELECT data_nodes FROM timescaledb_information.hypertables
|
||||
WHERE hypertable_name = 'conditions';
|
||||
|
||||
-- Using the drop_database option when there are active connections to
|
||||
-- the data node should fail. But any connections in the current
|
||||
-- session should be cleared when dropping the database. To test that
|
||||
-- the connection is cleared, first create a connection in the
|
||||
-- connection cache by inserting some data
|
||||
CREATE TABLE conditions (time timestamptz, device int, temp float);
|
||||
SELECT create_distributed_hypertable('conditions', 'time', 'device');
|
||||
INSERT INTO conditions VALUES ('2021-12-01 10:30', 1, 20.3);
|
||||
DROP TABLE conditions;
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user