mirror of
https://github.com/timescale/timescaledb.git
synced 2025-05-18 19:59:48 +08:00
Retain hypertable ownership on attach_data_node
If a superuser is used to invoke attach_data_node on a hypertable then we need to ensure that the object created on this data node has the same original ownership permissions. Fixes #4433
This commit is contained in:
parent
d83db1578c
commit
ed55654a32
@ -849,6 +849,9 @@ data_node_attach(PG_FUNCTION_ARGS)
|
|||||||
List *result;
|
List *result;
|
||||||
int num_nodes;
|
int num_nodes;
|
||||||
ListCell *lc;
|
ListCell *lc;
|
||||||
|
Oid uid, saved_uid;
|
||||||
|
int sec_ctx;
|
||||||
|
Relation rel;
|
||||||
|
|
||||||
TS_PREVENT_FUNC_IF_READ_ONLY();
|
TS_PREVENT_FUNC_IF_READ_ONLY();
|
||||||
|
|
||||||
@ -899,6 +902,24 @@ data_node_attach(PG_FUNCTION_ARGS)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Change to the hypertable owner so that the same permissions will be set up on the
|
||||||
|
* datanode being attached to as well. We need to do this explicitly because the
|
||||||
|
* caller of this function could be a superuser and we definitely don't want to create
|
||||||
|
* this hypertable with superuser ownership on the datanode being attached to!
|
||||||
|
*
|
||||||
|
* We retain the lock on the hypertable till the end of the traction to avoid any
|
||||||
|
* possibility of a concurrent "ALTER TABLE OWNER TO" changing the owner underneath
|
||||||
|
* us.
|
||||||
|
*/
|
||||||
|
rel = table_open(ht->main_table_relid, AccessShareLock);
|
||||||
|
uid = rel->rd_rel->relowner;
|
||||||
|
table_close(rel, NoLock);
|
||||||
|
GetUserIdAndSecContext(&saved_uid, &sec_ctx);
|
||||||
|
|
||||||
|
if (uid != saved_uid)
|
||||||
|
SetUserIdAndSecContext(uid, sec_ctx | SECURITY_LOCAL_USERID_CHANGE);
|
||||||
|
|
||||||
result = hypertable_assign_data_nodes(ht->fd.id, list_make1((char *) node_name));
|
result = hypertable_assign_data_nodes(ht->fd.id, list_make1((char *) node_name));
|
||||||
Assert(result->length == 1);
|
Assert(result->length == 1);
|
||||||
|
|
||||||
@ -948,6 +969,10 @@ data_node_attach(PG_FUNCTION_ARGS)
|
|||||||
node = linitial(result);
|
node = linitial(result);
|
||||||
ts_cache_release(hcache);
|
ts_cache_release(hcache);
|
||||||
|
|
||||||
|
/* Need to restore security context */
|
||||||
|
if (uid != saved_uid)
|
||||||
|
SetUserIdAndSecContext(saved_uid, sec_ctx);
|
||||||
|
|
||||||
PG_RETURN_DATUM(create_hypertable_data_node_datum(fcinfo, node));
|
PG_RETURN_DATUM(create_hypertable_data_node_datum(fcinfo, node));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -700,6 +700,10 @@ SELECT * FROM add_data_node('data_node_4', host => 'localhost', database => :'DN
|
|||||||
data_node_4 | localhost | 55432 | db_data_node_4 | t | t | t
|
data_node_4 | localhost | 55432 | db_data_node_4 | t | t | t
|
||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
|
-- Now let ROLE_1 use data_node_4 since it owns this "disttable"
|
||||||
|
GRANT USAGE
|
||||||
|
ON FOREIGN SERVER data_node_4
|
||||||
|
TO :ROLE_1;
|
||||||
SELECT * FROM attach_data_node('data_node_4', 'disttable');
|
SELECT * FROM attach_data_node('data_node_4', 'disttable');
|
||||||
NOTICE: the number of partitions in dimension "device" was increased to 2
|
NOTICE: the number of partitions in dimension "device" was increased to 2
|
||||||
hypertable_id | node_hypertable_id | node_name
|
hypertable_id | node_hypertable_id | node_name
|
||||||
@ -707,6 +711,29 @@ NOTICE: the number of partitions in dimension "device" was increased to 2
|
|||||||
3 | 1 | data_node_4
|
3 | 1 | data_node_4
|
||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
|
-- Recheck that ownership on data_node_4 is proper
|
||||||
|
SELECT * FROM test.remote_exec(NULL, $$ SELECT tablename, tableowner from pg_catalog.pg_tables where tablename = 'disttable'; $$);
|
||||||
|
NOTICE: [data_node_1]: SELECT tablename, tableowner from pg_catalog.pg_tables where tablename = 'disttable'
|
||||||
|
NOTICE: [data_node_1]:
|
||||||
|
tablename|tableowner
|
||||||
|
---------+-----------
|
||||||
|
disttable|test_role_1
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
|
||||||
|
NOTICE: [data_node_4]: SELECT tablename, tableowner from pg_catalog.pg_tables where tablename = 'disttable'
|
||||||
|
NOTICE: [data_node_4]:
|
||||||
|
tablename|tableowner
|
||||||
|
---------+-----------
|
||||||
|
disttable|test_role_1
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
|
||||||
|
remote_exec
|
||||||
|
-------------
|
||||||
|
|
||||||
|
(1 row)
|
||||||
|
|
||||||
-- Show updated number of slices in 'device' dimension.
|
-- Show updated number of slices in 'device' dimension.
|
||||||
SELECT column_name, num_slices
|
SELECT column_name, num_slices
|
||||||
FROM _timescaledb_catalog.dimension
|
FROM _timescaledb_catalog.dimension
|
||||||
|
@ -368,7 +368,13 @@ AND column_name = 'device';
|
|||||||
SET ROLE :ROLE_CLUSTER_SUPERUSER;
|
SET ROLE :ROLE_CLUSTER_SUPERUSER;
|
||||||
SELECT * FROM add_data_node('data_node_4', host => 'localhost', database => :'DN_DBNAME_4',
|
SELECT * FROM add_data_node('data_node_4', host => 'localhost', database => :'DN_DBNAME_4',
|
||||||
if_not_exists => true);
|
if_not_exists => true);
|
||||||
|
-- Now let ROLE_1 use data_node_4 since it owns this "disttable"
|
||||||
|
GRANT USAGE
|
||||||
|
ON FOREIGN SERVER data_node_4
|
||||||
|
TO :ROLE_1;
|
||||||
SELECT * FROM attach_data_node('data_node_4', 'disttable');
|
SELECT * FROM attach_data_node('data_node_4', 'disttable');
|
||||||
|
-- Recheck that ownership on data_node_4 is proper
|
||||||
|
SELECT * FROM test.remote_exec(NULL, $$ SELECT tablename, tableowner from pg_catalog.pg_tables where tablename = 'disttable'; $$);
|
||||||
|
|
||||||
-- Show updated number of slices in 'device' dimension.
|
-- Show updated number of slices in 'device' dimension.
|
||||||
SELECT column_name, num_slices
|
SELECT column_name, num_slices
|
||||||
|
Loading…
x
Reference in New Issue
Block a user