Fix GRANT/REVOKE ALL IN SCHEMA handling

Fix the "GRANT/REVOKE ALL IN SCHEMA" handling uniformly across
single-node and multi-node.

Even thought this is a SCHEMA specific activity, we decided to
include the chunks even if they are part of another SCHEMA. So
they will also end up getting/resetting the same privileges.

Includes test case changes for both single-node and multi-node use
cases.
This commit is contained in:
Nikhil Sontakke 2021-10-18 18:58:34 +05:30 committed by Nikhils
parent b0886c1b6d
commit 68697859df
8 changed files with 851 additions and 35 deletions

View File

@ -45,7 +45,6 @@
#include "hypertable_cache.h"
#include "trigger.h"
#include "scanner.h"
#include "scan_iterator.h"
#include "catalog.h"
#include "dimension_slice.h"
#include "dimension_vector.h"
@ -170,8 +169,8 @@ hypertable_formdata_make_tuple(const FormData_hypertable *fd, TupleDesc desc)
return heap_form_tuple(desc, values, nulls);
}
static void
hypertable_formdata_fill(FormData_hypertable *fd, const TupleInfo *ti)
void
ts_hypertable_formdata_fill(FormData_hypertable *fd, const TupleInfo *ti)
{
bool nulls[Natts_hypertable];
Datum values[Natts_hypertable];
@ -242,7 +241,7 @@ ts_hypertable_from_tupleinfo(const TupleInfo *ti)
Oid namespace_oid;
Hypertable *h = MemoryContextAllocZero(ti->mctx, sizeof(Hypertable));
hypertable_formdata_fill(&h->fd, ti);
ts_hypertable_formdata_fill(&h->fd, ti);
namespace_oid = get_namespace_oid(NameStr(h->fd.schema_name), false);
h->main_table_relid = get_relname_relid(NameStr(h->fd.table_name), namespace_oid);
h->space = ts_dimension_scan(h->fd.id, h->main_table_relid, h->fd.num_dimensions, ti->mctx);
@ -261,7 +260,7 @@ hypertable_tuple_get_relid(TupleInfo *ti, void *data)
FormData_hypertable fd;
Oid schema_oid;
hypertable_formdata_fill(&fd, ti);
ts_hypertable_formdata_fill(&fd, ti);
schema_oid = get_namespace_oid(NameStr(fd.schema_name), true);
if (OidIsValid(schema_oid))
@ -785,7 +784,7 @@ reset_associated_tuple_found(TupleInfo *ti, void *data)
FormData_hypertable fd;
CatalogSecurityContext sec_ctx;
hypertable_formdata_fill(&fd, ti);
ts_hypertable_formdata_fill(&fd, ti);
namestrcpy(&fd.associated_schema_name, INTERNAL_SCHEMA_NAME);
new_tuple = hypertable_formdata_make_tuple(&fd, ts_scanner_get_tupledesc(ti));
ts_catalog_database_info_become_owner(ts_catalog_database_info_get(), &sec_ctx);
@ -1037,22 +1036,27 @@ ts_hypertable_get_by_name(const char *schema, const char *name)
return ht;
}
static void
hypertable_scan_by_name(ScanIterator *iterator, const char *schema, const char *name)
void
ts_hypertable_scan_by_name(ScanIterator *iterator, const char *schema, const char *name)
{
iterator->ctx.index = catalog_get_index(ts_catalog_get(), HYPERTABLE, HYPERTABLE_NAME_INDEX);
ts_scan_iterator_scan_key_init(iterator,
Anum_hypertable_name_idx_table,
BTEqualStrategyNumber,
F_NAMEEQ,
CStringGetDatum(name));
/* both cannot be NULL inputs */
Assert(name != NULL || schema != NULL);
ts_scan_iterator_scan_key_init(iterator,
Anum_hypertable_name_idx_schema,
BTEqualStrategyNumber,
F_NAMEEQ,
CStringGetDatum(schema));
if (name)
ts_scan_iterator_scan_key_init(iterator,
Anum_hypertable_name_idx_table,
BTEqualStrategyNumber,
F_NAMEEQ,
CStringGetDatum(name));
if (schema)
ts_scan_iterator_scan_key_init(iterator,
Anum_hypertable_name_idx_schema,
BTEqualStrategyNumber,
F_NAMEEQ,
CStringGetDatum(schema));
}
/*
@ -1070,11 +1074,11 @@ ts_hypertable_get_attributes_by_name(const char *schema, const char *name,
ScanIterator iterator =
ts_scan_iterator_create(HYPERTABLE, AccessShareLock, CurrentMemoryContext);
hypertable_scan_by_name(&iterator, schema, name);
ts_hypertable_scan_by_name(&iterator, schema, name);
ts_scanner_foreach(&iterator)
{
TupleInfo *ti = ts_scan_iterator_tuple_info(&iterator);
hypertable_formdata_fill(form, ti);
ts_hypertable_formdata_fill(form, ti);
ts_scan_iterator_close(&iterator);
return true;
}
@ -2250,7 +2254,7 @@ hypertable_rename_schema_name(TupleInfo *ti, void *data)
bool updated = false;
FormData_hypertable fd;
hypertable_formdata_fill(&fd, ti);
ts_hypertable_formdata_fill(&fd, ti);
/*
* Because we are doing a heap scan with no scankey, we don't know which

View File

@ -15,6 +15,7 @@
#include "dimension.h"
#include "export.h"
#include "scanner.h"
#include "scan_iterator.h"
#include "tablespace.h"
#define OLD_INSERT_BLOCKER_NAME "insert_blocker"
@ -176,6 +177,9 @@ extern TSDLLEXPORT Datum ts_hypertable_get_open_dim_max_value(const Hypertable *
int dimension_index, bool *isnull);
extern TSDLLEXPORT bool ts_hypertable_has_compression_table(const Hypertable *ht);
extern TSDLLEXPORT void ts_hypertable_formdata_fill(FormData_hypertable *fd, const TupleInfo *ti);
extern TSDLLEXPORT void ts_hypertable_scan_by_name(ScanIterator *iterator, const char *schema,
const char *name);
#define hypertable_scan(schema, table, tuple_found, data, lockmode, tuplock) \
ts_hypertable_scan_with_memory_context(schema, \

View File

@ -288,14 +288,39 @@ add_hypertable_to_process_args(ProcessUtilityArgs *args, const Hypertable *ht)
args->hypertable_list = lappend_oid(args->hypertable_list, ht->main_table_relid);
}
static bool
check_table_in_rangevar_list(List *rvlist, Name schema_name, Name table_name)
{
ListCell *l;
foreach (l, rvlist)
{
RangeVar *rvar = lfirst_node(RangeVar, l);
if (strcmp(rvar->relname, NameStr(*table_name)) == 0 &&
strcmp(rvar->schemaname, NameStr(*schema_name)) == 0)
return true;
}
return false;
}
static void
add_chunk_oid(Hypertable *ht, Oid chunk_relid, void *vargs)
{
ProcessUtilityArgs *args = vargs;
GrantStmt *stmt = castNode(GrantStmt, args->parsetree);
Chunk *chunk = ts_chunk_get_by_relid(chunk_relid, true);
RangeVar *rv = makeRangeVar(NameStr(chunk->fd.schema_name), NameStr(chunk->fd.table_name), -1);
stmt->objects = lappend(stmt->objects, rv);
/*
* If chunk is in the same schema as the hypertable it could already be part of
* the objects list in the case of "GRANT ALL IN SCHEMA" for example
*/
if (!check_table_in_rangevar_list(stmt->objects, &chunk->fd.schema_name, &chunk->fd.table_name))
{
RangeVar *rv =
makeRangeVar(NameStr(chunk->fd.schema_name), NameStr(chunk->fd.table_name), -1);
stmt->objects = lappend(stmt->objects, rv);
}
}
static bool
@ -1282,10 +1307,103 @@ process_grant_add_by_rel(GrantStmt *stmt, RangeVar *relation)
stmt->objects = lappend(stmt->objects, relation);
}
/*
* If it is a "GRANT/REVOKE ON ALL TABLES IN SCHEMA" operation then we need to check if
* the rangevar was already added when we added all objects inside the SCHEMA
*
* This could get a little expensive for schemas containing a lot of objects..
*/
static void
process_grant_add_by_name(GrantStmt *stmt, Name schema_name, Name table_name)
process_grant_add_by_name(GrantStmt *stmt, bool was_schema_op, Name schema_name, Name table_name)
{
process_grant_add_by_rel(stmt, makeRangeVar(NameStr(*schema_name), NameStr(*table_name), -1));
bool already_added = false;
if (was_schema_op)
already_added = check_table_in_rangevar_list(stmt->objects, schema_name, table_name);
if (!already_added)
process_grant_add_by_rel(stmt,
makeRangeVar(NameStr(*schema_name), NameStr(*table_name), -1));
}
static void
process_relations_in_namespace(GrantStmt *stmt, Name schema_name, Oid namespaceId, char relkind)
{
ScanKeyData key[2];
Relation rel;
TableScanDesc scan;
HeapTuple tuple;
ScanKeyInit(&key[0],
Anum_pg_class_relnamespace,
BTEqualStrategyNumber,
F_OIDEQ,
ObjectIdGetDatum(namespaceId));
ScanKeyInit(&key[1],
Anum_pg_class_relkind,
BTEqualStrategyNumber,
F_CHAREQ,
CharGetDatum(relkind));
rel = table_open(RelationRelationId, AccessShareLock);
scan = table_beginscan_catalog(rel, 2, key);
while ((tuple = heap_getnext(scan, ForwardScanDirection)) != NULL)
{
Name relname = &((Form_pg_class) GETSTRUCT(tuple))->relname;
/* these are being added for the first time into this list */
process_grant_add_by_name(stmt, false, schema_name, relname);
}
table_endscan(scan);
table_close(rel, AccessShareLock);
return;
}
/*
* For "GRANT ALL ON ALL TABLES IN SCHEMA" GrantStmt, the targtype field is ACL_TARGET_ALL_IN_SCHEMA
* whereas in regular "GRANT ON TABLE table_name", the targtype field is ACL_TARGET_OBJECT. In the
* latter case the objects list contains a list of relation range vars whereas in the former it is
* the list of schema names.
*
* To make things work we change the targtype field from ACL_TARGET_ALL_IN_SCHEMA to
* ACL_TARGET_OBJECT and then create a new list of rangevars of all relation type entities in it and
* assign it to the "stmt->objects" field.
*
*/
static void
process_grant_add_by_schema(GrantStmt *stmt)
{
ListCell *cell;
List *nspnames = stmt->objects;
/*
* We will be adding rangevars to the "stmt->objects" field in the loop below. So
* we track the nspnames separately above and NIL out the objects list
*/
stmt->objects = NIL;
foreach (cell, nspnames)
{
char *nspname = strVal(lfirst(cell));
Oid namespaceId = LookupExplicitNamespace(nspname, false);
Name schema;
schema = (Name) palloc(NAMEDATALEN);
namestrcpy(schema, nspname);
/* Inspired from objectsInSchemaToOids PG function */
process_relations_in_namespace(stmt, schema, namespaceId, RELKIND_RELATION);
process_relations_in_namespace(stmt, schema, namespaceId, RELKIND_VIEW);
process_relations_in_namespace(stmt, schema, namespaceId, RELKIND_MATVIEW);
process_relations_in_namespace(stmt, schema, namespaceId, RELKIND_FOREIGN_TABLE);
process_relations_in_namespace(stmt, schema, namespaceId, RELKIND_PARTITIONED_TABLE);
}
/* change targtype to ACL_TARGET_OBJECT now */
stmt->targtype = ACL_TARGET_OBJECT;
}
/*
@ -1300,8 +1418,8 @@ process_grant_and_revoke(ProcessUtilityArgs *args)
DDLResult result = DDL_CONTINUE;
/* We let the calling function handle anything that is not
* ACL_TARGET_OBJECT (currently only ACL_TARGET_ALL_IN_SCHEMA) */
if (stmt->targtype != ACL_TARGET_OBJECT)
* ACL_TARGET_OBJECT or ACL_TARGET_ALL_IN_SCHEMA */
if (stmt->targtype != ACL_TARGET_OBJECT && stmt->targtype != ACL_TARGET_ALL_IN_SCHEMA)
return DDL_CONTINUE;
switch (stmt->objtype)
@ -1322,9 +1440,22 @@ process_grant_and_revoke(ProcessUtilityArgs *args)
* consider those when sending grants to other data nodes.
*/
{
Cache *hcache = ts_hypertable_cache_pin();
Cache *hcache;
ListCell *cell;
bool was_schema_op = false;
/*
* If it's a GRANT/REVOKE ALL IN SCHEMA then we need to collect all
* objects in this schema and convert this into an ACL_TARGET_OBJECT
* entry with its objects field pointing to rangevars
*/
if (stmt->targtype == ACL_TARGET_ALL_IN_SCHEMA)
{
process_grant_add_by_schema(stmt);
was_schema_op = true;
}
hcache = ts_hypertable_cache_pin();
/* First process all continuous aggregates in the list and add
* the associated hypertables and views to the list of objects
* to process */
@ -1337,12 +1468,15 @@ process_grant_and_revoke(ProcessUtilityArgs *args)
Hypertable *mat_hypertable =
ts_hypertable_get_by_id(cagg->data.mat_hypertable_id);
process_grant_add_by_name(stmt,
was_schema_op,
&mat_hypertable->fd.schema_name,
&mat_hypertable->fd.table_name);
process_grant_add_by_name(stmt,
was_schema_op,
&cagg->data.direct_view_schema,
&cagg->data.direct_view_name);
process_grant_add_by_name(stmt,
was_schema_op,
&cagg->data.partial_view_schema,
&cagg->data.partial_view_name);
}
@ -1359,6 +1493,7 @@ process_grant_and_revoke(ProcessUtilityArgs *args)
ts_hypertable_get_by_id(hypertable->fd.compressed_hypertable_id);
Assert(compressed_hypertable);
process_grant_add_by_name(stmt,
was_schema_op,
&compressed_hypertable->fd.schema_name,
&compressed_hypertable->fd.table_name);
}
@ -3473,6 +3608,8 @@ process_altertable_end_subcmd(Hypertable *ht, Node *parsetree, ObjectAddress *ob
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("operation not supported on hypertables %d", cmd->subtype)));
break;
default:
break;
}
if (ts_cm_functions->process_altertable_cmd)
ts_cm_functions->process_altertable_cmd(ht, cmd);

View File

@ -246,3 +246,145 @@ REVOKE INSERT ON conditions FROM PUBLIC;
| | | =rw/super_user | |
(1 row)
-- Check that GRANT ALL IN SCHEMA adds privileges to the parent
-- and also goes to chunks in another schema
GRANT ALL ON ALL TABLES IN SCHEMA public TO :ROLE_DEFAULT_PERM_USER_2;
\z conditions
Access privileges
Schema | Name | Type | Access privileges | Column privileges | Policies
--------+------------+-------+----------------------------------------+-------------------+----------
public | conditions | table | super_user=arwdDxt/super_user +| |
| | | =r/super_user +| |
| | | default_perm_user_2=arwdDxt/super_user | |
(1 row)
\z _timescaledb_internal.*chunk
Access privileges
Schema | Name | Type | Access privileges | Column privileges | Policies
-----------------------+------------------+-------+----------------------------------------+-------------------+----------
_timescaledb_internal | _hyper_1_1_chunk | table | super_user=arwdDxt/super_user +| |
| | | =w/super_user +| |
| | | default_perm_user_2=arwdDxt/super_user | |
_timescaledb_internal | _hyper_1_2_chunk | table | super_user=arwdDxt/super_user +| |
| | | default_perm_user_2=arwdDxt/super_user | |
_timescaledb_internal | _hyper_1_3_chunk | table | super_user=arwdDxt/super_user +| |
| | | =rw/super_user +| |
| | | default_perm_user_2=arwdDxt/super_user | |
_timescaledb_internal | _hyper_1_4_chunk | table | super_user=arwdDxt/super_user +| |
| | | =r/super_user +| |
| | | default_perm_user_2=arwdDxt/super_user | |
_timescaledb_internal | _hyper_1_5_chunk | table | super_user=arwdDxt/super_user +| |
| | | =r/super_user +| |
| | | default_perm_user_2=arwdDxt/super_user | |
_timescaledb_internal | _hyper_1_6_chunk | table | super_user=arwdDxt/super_user +| |
| | | =r/super_user +| |
| | | default_perm_user_2=arwdDxt/super_user | |
_timescaledb_internal | _hyper_1_7_chunk | table | super_user=arwdDxt/super_user +| |
| | | =r/super_user +| |
| | | default_perm_user_2=arwdDxt/super_user | |
(7 rows)
-- Check that REVOKE ALL IN SCHEMA removes privileges of the parent
-- and also goes to chunks in another schema
REVOKE ALL ON ALL TABLES IN SCHEMA public FROM :ROLE_DEFAULT_PERM_USER_2;
\z conditions
Access privileges
Schema | Name | Type | Access privileges | Column privileges | Policies
--------+------------+-------+-------------------------------+-------------------+----------
public | conditions | table | super_user=arwdDxt/super_user+| |
| | | =r/super_user | |
(1 row)
\z _timescaledb_internal.*chunk
Access privileges
Schema | Name | Type | Access privileges | Column privileges | Policies
-----------------------+------------------+-------+-------------------------------+-------------------+----------
_timescaledb_internal | _hyper_1_1_chunk | table | super_user=arwdDxt/super_user+| |
| | | =w/super_user | |
_timescaledb_internal | _hyper_1_2_chunk | table | super_user=arwdDxt/super_user | |
_timescaledb_internal | _hyper_1_3_chunk | table | super_user=arwdDxt/super_user+| |
| | | =rw/super_user | |
_timescaledb_internal | _hyper_1_4_chunk | table | super_user=arwdDxt/super_user+| |
| | | =r/super_user | |
_timescaledb_internal | _hyper_1_5_chunk | table | super_user=arwdDxt/super_user+| |
| | | =r/super_user | |
_timescaledb_internal | _hyper_1_6_chunk | table | super_user=arwdDxt/super_user+| |
| | | =r/super_user | |
_timescaledb_internal | _hyper_1_7_chunk | table | super_user=arwdDxt/super_user+| |
| | | =r/super_user | |
(7 rows)
-- Create chunks in the same schema as the hypertable and check that
-- they also get the same privileges as the hypertable
CREATE TABLE measurements(
time TIMESTAMPTZ NOT NULL,
device INTEGER,
temperature FLOAT
);
-- Create a hypertable with chunks in the same schema
SELECT * FROM create_hypertable('public.measurements', 'time', chunk_time_interval => '5 days'::interval, associated_schema_name => 'public');
hypertable_id | schema_name | table_name | created
---------------+-------------+--------------+---------
2 | public | measurements | t
(1 row)
INSERT INTO measurements
SELECT time, (random()*30)::int, random()*80 - 40
FROM generate_series('2018-12-01 00:00'::timestamp, '2018-12-10 00:00'::timestamp, '1h') AS time;
-- GRANT ALL and check privileges
GRANT ALL ON ALL TABLES IN SCHEMA public TO :ROLE_DEFAULT_PERM_USER_2;
\z measurements
Access privileges
Schema | Name | Type | Access privileges | Column privileges | Policies
--------+--------------+-------+----------------------------------------+-------------------+----------
public | measurements | table | super_user=arwdDxt/super_user +| |
| | | default_perm_user_2=arwdDxt/super_user | |
(1 row)
\z conditions
Access privileges
Schema | Name | Type | Access privileges | Column privileges | Policies
--------+------------+-------+----------------------------------------+-------------------+----------
public | conditions | table | super_user=arwdDxt/super_user +| |
| | | =r/super_user +| |
| | | default_perm_user_2=arwdDxt/super_user | |
(1 row)
\z public.*chunk
Access privileges
Schema | Name | Type | Access privileges | Column privileges | Policies
--------+-------------------+-------+----------------------------------------+-------------------+----------
public | _hyper_2_10_chunk | table | super_user=arwdDxt/super_user +| |
| | | default_perm_user_2=arwdDxt/super_user | |
public | _hyper_2_8_chunk | table | super_user=arwdDxt/super_user +| |
| | | default_perm_user_2=arwdDxt/super_user | |
public | _hyper_2_9_chunk | table | super_user=arwdDxt/super_user +| |
| | | default_perm_user_2=arwdDxt/super_user | |
(3 rows)
-- REVOKE ALL and check privileges
REVOKE ALL ON ALL TABLES IN SCHEMA public FROM :ROLE_DEFAULT_PERM_USER_2;
\z measurements
Access privileges
Schema | Name | Type | Access privileges | Column privileges | Policies
--------+--------------+-------+-------------------------------+-------------------+----------
public | measurements | table | super_user=arwdDxt/super_user | |
(1 row)
\z conditions
Access privileges
Schema | Name | Type | Access privileges | Column privileges | Policies
--------+------------+-------+-------------------------------+-------------------+----------
public | conditions | table | super_user=arwdDxt/super_user+| |
| | | =r/super_user | |
(1 row)
\z public.*chunk
Access privileges
Schema | Name | Type | Access privileges | Column privileges | Policies
--------+-------------------+-------+-------------------------------+-------------------+----------
public | _hyper_2_10_chunk | table | super_user=arwdDxt/super_user | |
public | _hyper_2_8_chunk | table | super_user=arwdDxt/super_user | |
public | _hyper_2_9_chunk | table | super_user=arwdDxt/super_user | |
(3 rows)

View File

@ -73,3 +73,41 @@ GRANT INSERT ON conditions TO PUBLIC;
REVOKE INSERT ON conditions FROM PUBLIC;
\z conditions
\z _timescaledb_internal._hyper_1_3_chunk
-- Check that GRANT ALL IN SCHEMA adds privileges to the parent
-- and also goes to chunks in another schema
GRANT ALL ON ALL TABLES IN SCHEMA public TO :ROLE_DEFAULT_PERM_USER_2;
\z conditions
\z _timescaledb_internal.*chunk
-- Check that REVOKE ALL IN SCHEMA removes privileges of the parent
-- and also goes to chunks in another schema
REVOKE ALL ON ALL TABLES IN SCHEMA public FROM :ROLE_DEFAULT_PERM_USER_2;
\z conditions
\z _timescaledb_internal.*chunk
-- Create chunks in the same schema as the hypertable and check that
-- they also get the same privileges as the hypertable
CREATE TABLE measurements(
time TIMESTAMPTZ NOT NULL,
device INTEGER,
temperature FLOAT
);
-- Create a hypertable with chunks in the same schema
SELECT * FROM create_hypertable('public.measurements', 'time', chunk_time_interval => '5 days'::interval, associated_schema_name => 'public');
INSERT INTO measurements
SELECT time, (random()*30)::int, random()*80 - 40
FROM generate_series('2018-12-01 00:00'::timestamp, '2018-12-10 00:00'::timestamp, '1h') AS time;
-- GRANT ALL and check privileges
GRANT ALL ON ALL TABLES IN SCHEMA public TO :ROLE_DEFAULT_PERM_USER_2;
\z measurements
\z conditions
\z public.*chunk
-- REVOKE ALL and check privileges
REVOKE ALL ON ALL TABLES IN SCHEMA public FROM :ROLE_DEFAULT_PERM_USER_2;
\z measurements
\z conditions
\z public.*chunk

View File

@ -15,6 +15,7 @@
#include <annotations.h>
#include <guc.h>
#include "data_node.h"
#include "hypertable_data_node.h"
#include "chunk_index.h"
#include "chunk_api.h"
@ -23,6 +24,7 @@
#include "remote/dist_commands.h"
#include "remote/dist_ddl.h"
#include "remote/connection_cache.h"
#include "scan_iterator.h"
#include "dist_util.h"
/* DDL Query execution type */
@ -221,11 +223,12 @@ dist_ddl_preprocess(ProcessUtilityArgs *args)
NodeTag tag = nodeTag(args->parsetree);
int hypertable_list_length = list_length(args->hypertable_list);
Cache *hcache;
Hypertable *ht;
Hypertable *ht = NULL;
Oid relid = InvalidOid;
unsigned int num_hypertables;
unsigned int num_dist_hypertables;
unsigned int num_dist_hypertable_members;
bool allow_dist = false;
/*
* This function is executed for any Utility/DDL operation and for any
@ -309,6 +312,65 @@ dist_ddl_preprocess(ProcessUtilityArgs *args)
}
break;
}
case T_GrantStmt:
{
GrantStmt *stmt = castNode(GrantStmt, args->parsetree);
if (stmt->objtype == OBJECT_TABLE && stmt->targtype == ACL_TARGET_ALL_IN_SCHEMA)
{
bool exec_on_datanodes = false;
ListCell *cell;
/*
* Check if there are any distributed hypertables in the schemas.
* Otherwise no need to execute the query on the datanodes
*
* If more than one schemas are specified, then we ship the query
* if any of the schemas contain distributed hypertables. It will
* be the onus of the user to ensure that all schemas exist on the
* datanodes as required. They can always call on an individual
* schema one by one in that case.
*/
foreach (cell, stmt->objects)
{
char *nspname = strVal(lfirst(cell));
LookupExplicitNamespace(nspname, false);
/* no need to check further if any schema has distributed hypertables */
if (exec_on_datanodes)
break;
ScanIterator iterator = ts_scan_iterator_create(HYPERTABLE,
AccessShareLock,
CurrentMemoryContext);
ts_hypertable_scan_by_name(&iterator, nspname, NULL);
ts_scanner_foreach(&iterator)
{
FormData_hypertable fd;
TupleInfo *ti = ts_scan_iterator_tuple_info(&iterator);
ts_hypertable_formdata_fill(&fd, ti);
if (fd.replication_factor > 0)
{
exec_on_datanodes = true;
break;
}
}
ts_scan_iterator_close(&iterator);
}
if (exec_on_datanodes)
{
allow_dist = true;
set_dist_exec_type(DIST_DDL_EXEC_ON_START);
dist_ddl_state.data_node_list = data_node_get_node_name_list();
}
}
break;
}
/* Skip COPY here, since it has its own process path using
* cross module API. */
case T_CopyStmt:
@ -317,6 +379,23 @@ dist_ddl_preprocess(ProcessUtilityArgs *args)
break;
}
}
else /* more than one hypertables */
{
switch (tag)
{
case T_GrantStmt:
{
GrantStmt *stmt = castNode(GrantStmt, args->parsetree);
if (stmt->objtype == OBJECT_TABLE)
allow_dist = true;
break;
}
default:
break;
}
}
/*
* Iterate over hypertable list and reason about the type of hypertables
@ -345,14 +424,21 @@ dist_ddl_preprocess(ProcessUtilityArgs *args)
* distributed. Otherwise this makes query_string unusable for remote
* execution without deparsing.
*
* Also allow error only if "allow_dist" is not set.
*
* TODO: Support multiple tables inside statements.
*/
if (hypertable_list_length > 1)
if (hypertable_list_length > 1 && !allow_dist)
dist_ddl_error_raise_unsupported();
ht = ts_hypertable_cache_get_entry(hcache, relid, CACHE_FLAG_NONE);
Assert(ht != NULL);
Assert(hypertable_is_distributed(ht));
if (hypertable_list_length == 1)
{
ht = ts_hypertable_cache_get_entry(hcache, relid, CACHE_FLAG_NONE);
Assert(ht != NULL);
Assert(hypertable_is_distributed(ht));
}
else
Assert(allow_dist);
/* Block unsupported operations on distributed hypertables and
* decide on how to execute it. */
@ -514,7 +600,12 @@ dist_ddl_preprocess(ProcessUtilityArgs *args)
* during sql_drop and command_end triggers execution.
*/
if (dist_ddl_scheduled_for_execution())
dist_ddl_state.data_node_list = ts_hypertable_get_data_node_name_list(ht);
{
if (ht)
dist_ddl_state.data_node_list = ts_hypertable_get_data_node_name_list(ht);
else
dist_ddl_state.data_node_list = data_node_get_node_name_list();
}
ts_cache_release(hcache);
}

View File

@ -709,6 +709,330 @@ SELECT current_user;
cluster_super_user
(1 row)
-- Check that GRANT ALL IN SCHEMA adds privileges to the parent
-- and also does so on the foreign chunks in another schema
CREATE VIEW CHUNK_QRY1 AS SELECT n.nspname as schema, substring(c.relname for 12) as name, pg_catalog.array_to_string(c.relacl, E'\n') AS Access_privileges FROM pg_catalog.pg_class c LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace WHERE c.relkind IN ('r','v','m','S','f','p') AND c.relname OPERATOR(pg_catalog.~) '^(_dist.*)$' COLLATE pg_catalog.default ORDER BY 1, 2;
CALL distributed_exec($$ CREATE VIEW CHUNK_QRY1 AS SELECT n.nspname as schema, substring(c.relname for 12) as name, pg_catalog.array_to_string(c.relacl, E'\n') AS Access_privileges FROM pg_catalog.pg_class c LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace WHERE c.relkind IN ('r','v','m','S','f','p') AND c.relname OPERATOR(pg_catalog.~) '^(_dist.*)$' COLLATE pg_catalog.default ORDER BY 1, 2; $$);
GRANT ALL ON ALL TABLES IN SCHEMA public TO :ROLE_DEFAULT_PERM_USER;
\z conditions
Access privileges
Schema | Name | Type | Access privileges | Column privileges | Policies
--------+------------+-------+-----------------------------------------------+-------------------+----------
public | conditions | table | cluster_super_user=arwdDxt/cluster_super_user+| |
| | | default_perm_user_2=a/cluster_super_user +| |
| | | default_perm_user=arwdDxt/cluster_super_user | |
(1 row)
SELECT * FROM CHUNK_QRY1;
schema | name | access_privileges
-----------------------+--------------+-----------------------------------------------
_timescaledb_internal | _dist_hyper_ | cluster_super_user=arwdDxt/cluster_super_user+
| | default_perm_user_2=a/cluster_super_user +
| | default_perm_user=arwdDxt/cluster_super_user
_timescaledb_internal | _dist_hyper_ | cluster_super_user=arwdDxt/cluster_super_user+
| | default_perm_user_2=a/cluster_super_user +
| | default_perm_user=arwdDxt/cluster_super_user
_timescaledb_internal | _dist_hyper_ | cluster_super_user=arwdDxt/cluster_super_user+
| | default_perm_user_2=a/cluster_super_user +
| | default_perm_user=arwdDxt/cluster_super_user
_timescaledb_internal | _dist_hyper_ | cluster_super_user=arwdDxt/cluster_super_user+
| | default_perm_user_2=a/cluster_super_user +
| | default_perm_user=arwdDxt/cluster_super_user
_timescaledb_internal | _dist_hyper_ | cluster_super_user=arwdDxt/cluster_super_user+
| | default_perm_user_2=a/cluster_super_user +
| | default_perm_user=arwdDxt/cluster_super_user
_timescaledb_internal | _dist_hyper_ | cluster_super_user=arwdDxt/cluster_super_user+
| | default_perm_user_2=a/cluster_super_user +
| | default_perm_user=arwdDxt/cluster_super_user
_timescaledb_internal | _dist_hyper_ | cluster_super_user=arwdDxt/cluster_super_user+
| | default_perm_user_2=a/cluster_super_user +
| | default_perm_user=arwdDxt/cluster_super_user
_timescaledb_internal | _dist_hyper_ | cluster_super_user=arwdDxt/cluster_super_user+
| | default_perm_user_2=a/cluster_super_user +
| | default_perm_user=arwdDxt/cluster_super_user
_timescaledb_internal | _dist_hyper_ | cluster_super_user=arwdDxt/cluster_super_user+
| | default_perm_user_2=a/cluster_super_user +
| | default_perm_user=arwdDxt/cluster_super_user
_timescaledb_internal | _dist_hyper_ | cluster_super_user=arwdDxt/cluster_super_user+
| | default_perm_user_2=a/cluster_super_user +
| | default_perm_user=arwdDxt/cluster_super_user
_timescaledb_internal | _dist_hyper_ | cluster_super_user=arwdDxt/cluster_super_user+
| | default_perm_user_2=a/cluster_super_user +
| | default_perm_user=arwdDxt/cluster_super_user
_timescaledb_internal | _dist_hyper_ | cluster_super_user=arwdDxt/cluster_super_user+
| | default_perm_user_2=a/cluster_super_user +
| | default_perm_user=arwdDxt/cluster_super_user
_timescaledb_internal | _dist_hyper_ | cluster_super_user=arwdDxt/cluster_super_user+
| | default_perm_user_2=a/cluster_super_user +
| | default_perm_user=arwdDxt/cluster_super_user
_timescaledb_internal | _dist_hyper_ | cluster_super_user=arwdDxt/cluster_super_user+
| | default_perm_user_2=a/cluster_super_user +
| | default_perm_user=arwdDxt/cluster_super_user
_timescaledb_internal | _dist_hyper_ | cluster_super_user=arwdDxt/cluster_super_user+
| | default_perm_user_2=a/cluster_super_user +
| | default_perm_user=arwdDxt/cluster_super_user
_timescaledb_internal | _dist_hyper_ | cluster_super_user=arwdDxt/cluster_super_user+
| | default_perm_user_2=a/cluster_super_user +
| | default_perm_user=arwdDxt/cluster_super_user
_timescaledb_internal | _dist_hyper_ | cluster_super_user=arwdDxt/cluster_super_user+
| | default_perm_user_2=a/cluster_super_user +
| | default_perm_user=arwdDxt/cluster_super_user
_timescaledb_internal | _dist_hyper_ | cluster_super_user=arwdDxt/cluster_super_user+
| | default_perm_user_2=a/cluster_super_user +
| | default_perm_user=arwdDxt/cluster_super_user
(18 rows)
-- Check on one datanode, should be the same on others as well
\c :DN_DBNAME_1 :ROLE_CLUSTER_SUPERUSER;
\z conditions
Access privileges
Schema | Name | Type | Access privileges | Column privileges | Policies
--------+------------+-------+-----------------------------------------------+-------------------+----------
public | conditions | table | cluster_super_user=arwdDxt/cluster_super_user+| |
| | | default_perm_user_2=a/cluster_super_user +| |
| | | default_perm_user=arwdDxt/cluster_super_user | |
(1 row)
SELECT * FROM CHUNK_QRY1;
schema | name | access_privileges
-----------------------+--------------+-----------------------------------------------
_timescaledb_internal | _dist_hyper_ | cluster_super_user=arwdDxt/cluster_super_user+
| | default_perm_user_2=a/cluster_super_user +
| | default_perm_user=arwdDxt/cluster_super_user
_timescaledb_internal | _dist_hyper_ | cluster_super_user=arwdDxt/cluster_super_user+
| | default_perm_user_2=a/cluster_super_user +
| | default_perm_user=arwdDxt/cluster_super_user
_timescaledb_internal | _dist_hyper_ | cluster_super_user=arwdDxt/cluster_super_user+
| | default_perm_user_2=a/cluster_super_user +
| | default_perm_user=arwdDxt/cluster_super_user
_timescaledb_internal | _dist_hyper_ | cluster_super_user=arwdDxt/cluster_super_user+
| | default_perm_user_2=a/cluster_super_user +
| | default_perm_user=arwdDxt/cluster_super_user
_timescaledb_internal | _dist_hyper_ | cluster_super_user=arwdDxt/cluster_super_user+
| | default_perm_user_2=a/cluster_super_user +
| | default_perm_user=arwdDxt/cluster_super_user
_timescaledb_internal | _dist_hyper_ | cluster_super_user=arwdDxt/cluster_super_user+
| | default_perm_user_2=a/cluster_super_user +
| | default_perm_user=arwdDxt/cluster_super_user
(6 rows)
-- Check that REVOKE ALL IN SCHEMA removes privileges of the parent
-- and also does so on foreign chunks in another schema
\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER;
REVOKE ALL ON ALL TABLES IN SCHEMA public FROM :ROLE_DEFAULT_PERM_USER;
\z conditions
Access privileges
Schema | Name | Type | Access privileges | Column privileges | Policies
--------+------------+-------+-----------------------------------------------+-------------------+----------
public | conditions | table | cluster_super_user=arwdDxt/cluster_super_user+| |
| | | default_perm_user_2=a/cluster_super_user | |
(1 row)
SELECT * FROM CHUNK_QRY1;
schema | name | access_privileges
-----------------------+--------------+-----------------------------------------------
_timescaledb_internal | _dist_hyper_ | cluster_super_user=arwdDxt/cluster_super_user+
| | default_perm_user_2=a/cluster_super_user
_timescaledb_internal | _dist_hyper_ | cluster_super_user=arwdDxt/cluster_super_user+
| | default_perm_user_2=a/cluster_super_user
_timescaledb_internal | _dist_hyper_ | cluster_super_user=arwdDxt/cluster_super_user+
| | default_perm_user_2=a/cluster_super_user
_timescaledb_internal | _dist_hyper_ | cluster_super_user=arwdDxt/cluster_super_user+
| | default_perm_user_2=a/cluster_super_user
_timescaledb_internal | _dist_hyper_ | cluster_super_user=arwdDxt/cluster_super_user+
| | default_perm_user_2=a/cluster_super_user
_timescaledb_internal | _dist_hyper_ | cluster_super_user=arwdDxt/cluster_super_user+
| | default_perm_user_2=a/cluster_super_user
_timescaledb_internal | _dist_hyper_ | cluster_super_user=arwdDxt/cluster_super_user+
| | default_perm_user_2=a/cluster_super_user
_timescaledb_internal | _dist_hyper_ | cluster_super_user=arwdDxt/cluster_super_user+
| | default_perm_user_2=a/cluster_super_user
_timescaledb_internal | _dist_hyper_ | cluster_super_user=arwdDxt/cluster_super_user+
| | default_perm_user_2=a/cluster_super_user
_timescaledb_internal | _dist_hyper_ | cluster_super_user=arwdDxt/cluster_super_user+
| | default_perm_user_2=a/cluster_super_user
_timescaledb_internal | _dist_hyper_ | cluster_super_user=arwdDxt/cluster_super_user+
| | default_perm_user_2=a/cluster_super_user
_timescaledb_internal | _dist_hyper_ | cluster_super_user=arwdDxt/cluster_super_user+
| | default_perm_user_2=a/cluster_super_user
_timescaledb_internal | _dist_hyper_ | cluster_super_user=arwdDxt/cluster_super_user+
| | default_perm_user_2=a/cluster_super_user
_timescaledb_internal | _dist_hyper_ | cluster_super_user=arwdDxt/cluster_super_user+
| | default_perm_user_2=a/cluster_super_user
_timescaledb_internal | _dist_hyper_ | cluster_super_user=arwdDxt/cluster_super_user+
| | default_perm_user_2=a/cluster_super_user
_timescaledb_internal | _dist_hyper_ | cluster_super_user=arwdDxt/cluster_super_user+
| | default_perm_user_2=a/cluster_super_user
_timescaledb_internal | _dist_hyper_ | cluster_super_user=arwdDxt/cluster_super_user+
| | default_perm_user_2=a/cluster_super_user
_timescaledb_internal | _dist_hyper_ | cluster_super_user=arwdDxt/cluster_super_user+
| | default_perm_user_2=a/cluster_super_user
(18 rows)
-- Check on one datanode, should be the same on others as well
\c :DN_DBNAME_2 :ROLE_CLUSTER_SUPERUSER;
\z conditions
Access privileges
Schema | Name | Type | Access privileges | Column privileges | Policies
--------+------------+-------+-----------------------------------------------+-------------------+----------
public | conditions | table | cluster_super_user=arwdDxt/cluster_super_user+| |
| | | default_perm_user_2=a/cluster_super_user | |
(1 row)
SELECT * FROM CHUNK_QRY1;
schema | name | access_privileges
-----------------------+--------------+-----------------------------------------------
_timescaledb_internal | _dist_hyper_ | cluster_super_user=arwdDxt/cluster_super_user+
| | default_perm_user_2=a/cluster_super_user
_timescaledb_internal | _dist_hyper_ | cluster_super_user=arwdDxt/cluster_super_user+
| | default_perm_user_2=a/cluster_super_user
_timescaledb_internal | _dist_hyper_ | cluster_super_user=arwdDxt/cluster_super_user+
| | default_perm_user_2=a/cluster_super_user
_timescaledb_internal | _dist_hyper_ | cluster_super_user=arwdDxt/cluster_super_user+
| | default_perm_user_2=a/cluster_super_user
_timescaledb_internal | _dist_hyper_ | cluster_super_user=arwdDxt/cluster_super_user+
| | default_perm_user_2=a/cluster_super_user
_timescaledb_internal | _dist_hyper_ | cluster_super_user=arwdDxt/cluster_super_user+
| | default_perm_user_2=a/cluster_super_user
(6 rows)
-- Create chunks in the same schema as the hypertable and check that
-- they also get the same privileges as the hypertable
\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER;
CREATE TABLE measurements(
time TIMESTAMPTZ NOT NULL,
device INTEGER,
temperature FLOAT
);
-- Create a distributed hypertable with chunks in the same schema
SELECT * FROM create_distributed_hypertable('public.measurements', 'time', chunk_time_interval => '5 days'::interval, associated_schema_name => 'public');
hypertable_id | schema_name | table_name | created
---------------+-------------+--------------+---------
5 | public | measurements | t
(1 row)
INSERT INTO measurements
SELECT time, (random()*30)::int, random()*80 - 40
FROM generate_series('2018-12-01 00:00'::timestamp, '2018-12-10 00:00'::timestamp, '1h') AS time;
-- Create a local regular table
CREATE TABLE local(g int primary key, h int);
-- Create a local hypertable
CREATE TABLE conditions_lht(time TIMESTAMPTZ NOT NULL, device INTEGER, temperature FLOAT, humidity FLOAT);
SELECT * FROM create_hypertable('conditions_lht', 'time', chunk_time_interval => '5 days'::interval);
hypertable_id | schema_name | table_name | created
---------------+-------------+----------------+---------
6 | public | conditions_lht | t
(1 row)
INSERT INTO conditions_lht
SELECT time, (random()*30)::int, random()*80 - 40
FROM generate_series('2018-12-01 00:00'::timestamp, '2018-12-10 00:00'::timestamp, '1h') AS time;
-- GRANT ALL and check privileges of these mix of local table, local hypertable and distributed hypertable
GRANT ALL ON ALL TABLES IN SCHEMA public TO :ROLE_DEFAULT_PERM_USER;
\z measurements
Access privileges
Schema | Name | Type | Access privileges | Column privileges | Policies
--------+--------------+-------+-----------------------------------------------+-------------------+----------
public | measurements | table | cluster_super_user=arwdDxt/cluster_super_user+| |
| | | default_perm_user=arwdDxt/cluster_super_user | |
(1 row)
\z conditions
Access privileges
Schema | Name | Type | Access privileges | Column privileges | Policies
--------+------------+-------+-----------------------------------------------+-------------------+----------
public | conditions | table | cluster_super_user=arwdDxt/cluster_super_user+| |
| | | default_perm_user_2=a/cluster_super_user +| |
| | | default_perm_user=arwdDxt/cluster_super_user | |
(1 row)
SELECT * FROM CHUNK_QRY1 WHERE schema = 'public';
schema | name | access_privileges
--------+--------------+-----------------------------------------------
public | _dist_hyper_ | cluster_super_user=arwdDxt/cluster_super_user+
| | default_perm_user=arwdDxt/cluster_super_user
public | _dist_hyper_ | cluster_super_user=arwdDxt/cluster_super_user+
| | default_perm_user=arwdDxt/cluster_super_user
public | _dist_hyper_ | cluster_super_user=arwdDxt/cluster_super_user+
| | default_perm_user=arwdDxt/cluster_super_user
(3 rows)
-- Check on one datanode, should be the same on others as well
\c :DN_DBNAME_3 :ROLE_CLUSTER_SUPERUSER;
\z measurements
Access privileges
Schema | Name | Type | Access privileges | Column privileges | Policies
--------+--------------+-------+-----------------------------------------------+-------------------+----------
public | measurements | table | cluster_super_user=arwdDxt/cluster_super_user+| |
| | | default_perm_user=arwdDxt/cluster_super_user | |
(1 row)
\z conditions
Access privileges
Schema | Name | Type | Access privileges | Column privileges | Policies
--------+------------+-------+-----------------------------------------------+-------------------+----------
public | conditions | table | cluster_super_user=arwdDxt/cluster_super_user+| |
| | | default_perm_user_2=a/cluster_super_user +| |
| | | default_perm_user=arwdDxt/cluster_super_user | |
(1 row)
SELECT * FROM CHUNK_QRY1 WHERE schema = 'public';
schema | name | access_privileges
--------+--------------+-----------------------------------------------
public | _dist_hyper_ | cluster_super_user=arwdDxt/cluster_super_user+
| | default_perm_user=arwdDxt/cluster_super_user
(1 row)
-- REVOKE ALL and check privileges
\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER;
REVOKE ALL ON ALL TABLES IN SCHEMA public FROM :ROLE_DEFAULT_PERM_USER;
\z measurements
Access privileges
Schema | Name | Type | Access privileges | Column privileges | Policies
--------+--------------+-------+-----------------------------------------------+-------------------+----------
public | measurements | table | cluster_super_user=arwdDxt/cluster_super_user | |
(1 row)
\z conditions
Access privileges
Schema | Name | Type | Access privileges | Column privileges | Policies
--------+------------+-------+-----------------------------------------------+-------------------+----------
public | conditions | table | cluster_super_user=arwdDxt/cluster_super_user+| |
| | | default_perm_user_2=a/cluster_super_user | |
(1 row)
SELECT * FROM CHUNK_QRY1 WHERE schema = 'public';
schema | name | access_privileges
--------+--------------+-----------------------------------------------
public | _dist_hyper_ | cluster_super_user=arwdDxt/cluster_super_user
public | _dist_hyper_ | cluster_super_user=arwdDxt/cluster_super_user
public | _dist_hyper_ | cluster_super_user=arwdDxt/cluster_super_user
(3 rows)
-- Check on one datanode, should be the same on others as well
\c :DN_DBNAME_4 :ROLE_CLUSTER_SUPERUSER;
\z measurements
Access privileges
Schema | Name | Type | Access privileges | Column privileges | Policies
--------+--------------+-------+-----------------------------------------------+-------------------+----------
public | measurements | table | cluster_super_user=arwdDxt/cluster_super_user | |
(1 row)
\z conditions
Access privileges
Schema | Name | Type | Access privileges | Column privileges | Policies
--------+------------+-------+-----------------------------------------------+-------------------+----------
public | conditions | table | cluster_super_user=arwdDxt/cluster_super_user+| |
| | | default_perm_user_2=a/cluster_super_user | |
(1 row)
SELECT * FROM CHUNK_QRY1 WHERE schema = 'public';
schema | name | access_privileges
--------+--------------+-----------------------------------------------
public | _dist_hyper_ | cluster_super_user=arwdDxt/cluster_super_user
(1 row)
\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER;
-- Test GRANT on foreign server and data node authentication using a
-- user mapping
SET ROLE :ROLE_3;
@ -764,7 +1088,7 @@ SELECT * FROM create_distributed_hypertable('disttable_role_3', 'time', data_nod
NOTICE: adding not-null constraint to column "time"
hypertable_id | schema_name | table_name | created
---------------+-------------+------------------+---------
7 | public | disttable_role_3 | t
9 | public | disttable_role_3 | t
(1 row)
-- Test insert and query

View File

@ -223,6 +223,82 @@ FROM generate_series('2019-01-01 00:00:00'::timestamptz, '2019-02-01 00:00:00',
RESET ROLE;
SELECT current_user;
-- Check that GRANT ALL IN SCHEMA adds privileges to the parent
-- and also does so on the foreign chunks in another schema
CREATE VIEW CHUNK_QRY1 AS SELECT n.nspname as schema, substring(c.relname for 12) as name, pg_catalog.array_to_string(c.relacl, E'\n') AS Access_privileges FROM pg_catalog.pg_class c LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace WHERE c.relkind IN ('r','v','m','S','f','p') AND c.relname OPERATOR(pg_catalog.~) '^(_dist.*)$' COLLATE pg_catalog.default ORDER BY 1, 2;
CALL distributed_exec($$ CREATE VIEW CHUNK_QRY1 AS SELECT n.nspname as schema, substring(c.relname for 12) as name, pg_catalog.array_to_string(c.relacl, E'\n') AS Access_privileges FROM pg_catalog.pg_class c LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace WHERE c.relkind IN ('r','v','m','S','f','p') AND c.relname OPERATOR(pg_catalog.~) '^(_dist.*)$' COLLATE pg_catalog.default ORDER BY 1, 2; $$);
GRANT ALL ON ALL TABLES IN SCHEMA public TO :ROLE_DEFAULT_PERM_USER;
\z conditions
SELECT * FROM CHUNK_QRY1;
-- Check on one datanode, should be the same on others as well
\c :DN_DBNAME_1 :ROLE_CLUSTER_SUPERUSER;
\z conditions
SELECT * FROM CHUNK_QRY1;
-- Check that REVOKE ALL IN SCHEMA removes privileges of the parent
-- and also does so on foreign chunks in another schema
\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER;
REVOKE ALL ON ALL TABLES IN SCHEMA public FROM :ROLE_DEFAULT_PERM_USER;
\z conditions
SELECT * FROM CHUNK_QRY1;
-- Check on one datanode, should be the same on others as well
\c :DN_DBNAME_2 :ROLE_CLUSTER_SUPERUSER;
\z conditions
SELECT * FROM CHUNK_QRY1;
-- Create chunks in the same schema as the hypertable and check that
-- they also get the same privileges as the hypertable
\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER;
CREATE TABLE measurements(
time TIMESTAMPTZ NOT NULL,
device INTEGER,
temperature FLOAT
);
-- Create a distributed hypertable with chunks in the same schema
SELECT * FROM create_distributed_hypertable('public.measurements', 'time', chunk_time_interval => '5 days'::interval, associated_schema_name => 'public');
INSERT INTO measurements
SELECT time, (random()*30)::int, random()*80 - 40
FROM generate_series('2018-12-01 00:00'::timestamp, '2018-12-10 00:00'::timestamp, '1h') AS time;
-- Create a local regular table
CREATE TABLE local(g int primary key, h int);
-- Create a local hypertable
CREATE TABLE conditions_lht(time TIMESTAMPTZ NOT NULL, device INTEGER, temperature FLOAT, humidity FLOAT);
SELECT * FROM create_hypertable('conditions_lht', 'time', chunk_time_interval => '5 days'::interval);
INSERT INTO conditions_lht
SELECT time, (random()*30)::int, random()*80 - 40
FROM generate_series('2018-12-01 00:00'::timestamp, '2018-12-10 00:00'::timestamp, '1h') AS time;
-- GRANT ALL and check privileges of these mix of local table, local hypertable and distributed hypertable
GRANT ALL ON ALL TABLES IN SCHEMA public TO :ROLE_DEFAULT_PERM_USER;
\z measurements
\z conditions
SELECT * FROM CHUNK_QRY1 WHERE schema = 'public';
-- Check on one datanode, should be the same on others as well
\c :DN_DBNAME_3 :ROLE_CLUSTER_SUPERUSER;
\z measurements
\z conditions
SELECT * FROM CHUNK_QRY1 WHERE schema = 'public';
-- REVOKE ALL and check privileges
\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER;
REVOKE ALL ON ALL TABLES IN SCHEMA public FROM :ROLE_DEFAULT_PERM_USER;
\z measurements
\z conditions
SELECT * FROM CHUNK_QRY1 WHERE schema = 'public';
-- Check on one datanode, should be the same on others as well
\c :DN_DBNAME_4 :ROLE_CLUSTER_SUPERUSER;
\z measurements
\z conditions
SELECT * FROM CHUNK_QRY1 WHERE schema = 'public';
\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER;
-- Test GRANT on foreign server and data node authentication using a
-- user mapping
SET ROLE :ROLE_3;