diff --git a/tsl/src/data_node.c b/tsl/src/data_node.c index 90d9f7bc9..14c96c477 100644 --- a/tsl/src/data_node.c +++ b/tsl/src/data_node.c @@ -849,6 +849,9 @@ data_node_attach(PG_FUNCTION_ARGS) List *result; int num_nodes; ListCell *lc; + Oid uid, saved_uid; + int sec_ctx; + Relation rel; TS_PREVENT_FUNC_IF_READ_ONLY(); @@ -899,6 +902,24 @@ data_node_attach(PG_FUNCTION_ARGS) } } + /* + * Change to the hypertable owner so that the same permissions will be set up on the + * datanode being attached to as well. We need to do this explicitly because the + * caller of this function could be a superuser and we definitely don't want to create + * this hypertable with superuser ownership on the datanode being attached to! + * + * We retain the lock on the hypertable till the end of the traction to avoid any + * possibility of a concurrent "ALTER TABLE OWNER TO" changing the owner underneath + * us. + */ + rel = table_open(ht->main_table_relid, AccessShareLock); + uid = rel->rd_rel->relowner; + table_close(rel, NoLock); + GetUserIdAndSecContext(&saved_uid, &sec_ctx); + + if (uid != saved_uid) + SetUserIdAndSecContext(uid, sec_ctx | SECURITY_LOCAL_USERID_CHANGE); + result = hypertable_assign_data_nodes(ht->fd.id, list_make1((char *) node_name)); Assert(result->length == 1); @@ -948,6 +969,10 @@ data_node_attach(PG_FUNCTION_ARGS) node = linitial(result); ts_cache_release(hcache); + /* Need to restore security context */ + if (uid != saved_uid) + SetUserIdAndSecContext(saved_uid, sec_ctx); + PG_RETURN_DATUM(create_hypertable_data_node_datum(fcinfo, node)); } diff --git a/tsl/test/expected/data_node.out b/tsl/test/expected/data_node.out index be68dede2..4ff8087a1 100644 --- a/tsl/test/expected/data_node.out +++ b/tsl/test/expected/data_node.out @@ -700,6 +700,10 @@ SELECT * FROM add_data_node('data_node_4', host => 'localhost', database => :'DN data_node_4 | localhost | 55432 | db_data_node_4 | t | t | t (1 row) +-- Now let ROLE_1 use data_node_4 since it owns this "disttable" +GRANT USAGE + ON FOREIGN SERVER data_node_4 + TO :ROLE_1; SELECT * FROM attach_data_node('data_node_4', 'disttable'); NOTICE: the number of partitions in dimension "device" was increased to 2 hypertable_id | node_hypertable_id | node_name @@ -707,6 +711,29 @@ NOTICE: the number of partitions in dimension "device" was increased to 2 3 | 1 | data_node_4 (1 row) +-- Recheck that ownership on data_node_4 is proper +SELECT * FROM test.remote_exec(NULL, $$ SELECT tablename, tableowner from pg_catalog.pg_tables where tablename = 'disttable'; $$); +NOTICE: [data_node_1]: SELECT tablename, tableowner from pg_catalog.pg_tables where tablename = 'disttable' +NOTICE: [data_node_1]: +tablename|tableowner +---------+----------- +disttable|test_role_1 +(1 row) + + +NOTICE: [data_node_4]: SELECT tablename, tableowner from pg_catalog.pg_tables where tablename = 'disttable' +NOTICE: [data_node_4]: +tablename|tableowner +---------+----------- +disttable|test_role_1 +(1 row) + + + remote_exec +------------- + +(1 row) + -- Show updated number of slices in 'device' dimension. SELECT column_name, num_slices FROM _timescaledb_catalog.dimension diff --git a/tsl/test/sql/data_node.sql b/tsl/test/sql/data_node.sql index 0ec43baba..6260835f0 100644 --- a/tsl/test/sql/data_node.sql +++ b/tsl/test/sql/data_node.sql @@ -368,7 +368,13 @@ AND column_name = 'device'; SET ROLE :ROLE_CLUSTER_SUPERUSER; SELECT * FROM add_data_node('data_node_4', host => 'localhost', database => :'DN_DBNAME_4', if_not_exists => true); +-- Now let ROLE_1 use data_node_4 since it owns this "disttable" +GRANT USAGE + ON FOREIGN SERVER data_node_4 + TO :ROLE_1; SELECT * FROM attach_data_node('data_node_4', 'disttable'); +-- Recheck that ownership on data_node_4 is proper +SELECT * FROM test.remote_exec(NULL, $$ SELECT tablename, tableowner from pg_catalog.pg_tables where tablename = 'disttable'; $$); -- Show updated number of slices in 'device' dimension. SELECT column_name, num_slices