Handle DROP SCHEMA for hypertable and chunk schemas

Dropping a schema that a hypertable depends on should clean up
dependent metadata. There are two schemas that matter for hypertables:
the hypertable's schema and the associated schema where chunks are
stored.

This change deals with the above as follows:

- If the hypertable schema is dropped, the hypertable and all chunks
should be deleted as well, including metadata.
- If an associated schema is dropped, the hypertables that use that
associated schema will have their associated schemas reset to the
internal schema.
- Even if no hypertable currently uses the dropped schema as their
associated schema, there might be chunks that reside in the dropped
schema (e.g., if the associated schema was changed for their
hypertables), so those chunks should have the metadata deleted.
This commit is contained in:
Erik Nordström 2018-01-30 12:41:27 +01:00 committed by Erik Nordström
parent b534a5a7d9
commit b9a6f890a1
8 changed files with 318 additions and 7 deletions

View File

@ -1053,6 +1053,19 @@ chunk_delete_by_hypertable_id(int32 hypertable_id)
RowExclusiveLock); RowExclusiveLock);
} }
int
chunk_delete_by_schema_name(const char *schema_name)
{
ScanKeyData scankey[1];
ScanKeyInit(&scankey[0], Anum_chunk_schema_name_idx_schema_name, BTEqualStrategyNumber,
F_NAMEEQ, DirectFunctionCall1(namein, CStringGetDatum(schema_name)));
return chunk_scan_internal(CHUNK_SCHEMA_NAME_INDEX, scankey, 1,
chunk_tuple_delete, NULL, 0,
RowExclusiveLock);
}
static bool static bool
chunk_recreate_constraint(ChunkScanCtx *ctx, Chunk *chunk) chunk_recreate_constraint(ChunkScanCtx *ctx, Chunk *chunk)
{ {

View File

@ -75,5 +75,6 @@ extern bool chunk_exists_relid(Oid relid);
extern void chunk_recreate_all_constraints_for_dimension(Hyperspace *hs, int32 dimension_id); extern void chunk_recreate_all_constraints_for_dimension(Hyperspace *hs, int32 dimension_id);
extern int chunk_delete_by_relid(Oid chunk_oid); extern int chunk_delete_by_relid(Oid chunk_oid);
extern int chunk_delete_by_hypertable_id(int32 hypertable_id); extern int chunk_delete_by_hypertable_id(int32 hypertable_id);
extern int chunk_delete_by_schema_name(const char *schema_name);
#endif /* TIMESCALEDB_CHUNK_H */ #endif /* TIMESCALEDB_CHUNK_H */

View File

@ -143,6 +143,8 @@ chunk_cache_entry_free(void *cce)
MemoryContextDelete(((ChunkCacheEntry *) cce)->mcxt); MemoryContextDelete(((ChunkCacheEntry *) cce)->mcxt);
} }
#define NOINDEX -1
static int static int
hypertable_scan_limit_internal(ScanKeyData *scankey, hypertable_scan_limit_internal(ScanKeyData *scankey,
int num_scankeys, int num_scankeys,
@ -156,8 +158,8 @@ hypertable_scan_limit_internal(ScanKeyData *scankey,
Catalog *catalog = catalog_get(); Catalog *catalog = catalog_get();
ScannerCtx scanctx = { ScannerCtx scanctx = {
.table = catalog->tables[HYPERTABLE].id, .table = catalog->tables[HYPERTABLE].id,
.index = catalog->tables[HYPERTABLE].index_ids[indexid], .index = (indexid == NOINDEX) ? 0 : catalog->tables[HYPERTABLE].index_ids[indexid],
.scantype = ScannerTypeIndex, .scantype = (indexid == NOINDEX) ? ScannerTypeHeap : ScannerTypeIndex,
.nkeys = num_scankeys, .nkeys = num_scankeys,
.scankey = scankey, .scankey = scankey,
.data = scandata, .data = scandata,
@ -267,12 +269,18 @@ static bool
hypertable_tuple_delete(TupleInfo *ti, void *data) hypertable_tuple_delete(TupleInfo *ti, void *data)
{ {
CatalogSecurityContext sec_ctx; CatalogSecurityContext sec_ctx;
bool isnull;
int hypertable_id = heap_getattr(ti->tuple, Anum_hypertable_id, ti->desc, &isnull);
tablespace_delete(hypertable_id, NULL);
chunk_delete_by_hypertable_id(hypertable_id);
dimension_delete_by_hypertable_id(hypertable_id, true);
catalog_become_owner(catalog_get(), &sec_ctx); catalog_become_owner(catalog_get(), &sec_ctx);
catalog_delete(ti->scanrel, ti->tuple); catalog_delete(ti->scanrel, ti->tuple);
catalog_restore_user(&sec_ctx); catalog_restore_user(&sec_ctx);
return false; return true;
} }
int int
@ -280,10 +288,6 @@ hypertable_delete_by_id(int32 hypertable_id)
{ {
ScanKeyData scankey[1]; ScanKeyData scankey[1];
tablespace_delete(hypertable_id, NULL);
chunk_delete_by_hypertable_id(hypertable_id);
dimension_delete_by_hypertable_id(hypertable_id, true);
ScanKeyInit(&scankey[0], Anum_hypertable_pkey_idx_id, ScanKeyInit(&scankey[0], Anum_hypertable_pkey_idx_id,
BTEqualStrategyNumber, F_INT4EQ, BTEqualStrategyNumber, F_INT4EQ,
Int32GetDatum(hypertable_id)); Int32GetDatum(hypertable_id));
@ -298,6 +302,65 @@ hypertable_delete_by_id(int32 hypertable_id)
false); false);
} }
int
hypertable_delete_by_schema_name(const char *schema_name)
{
ScanKeyData scankey[1];
ScanKeyInit(&scankey[0], Anum_hypertable_name_idx_schema,
BTEqualStrategyNumber, F_NAMEEQ,
DirectFunctionCall1(namein, CStringGetDatum(schema_name)));
return hypertable_scan_limit_internal(scankey,
1,
HYPERTABLE_NAME_INDEX,
hypertable_tuple_delete,
NULL,
0,
RowExclusiveLock,
false);
}
static bool
reset_associated_tuple_found(TupleInfo *ti, void *data)
{
HeapTuple tuple = heap_copytuple(ti->tuple);
FormData_hypertable *form = (FormData_hypertable *) GETSTRUCT(tuple);
CatalogSecurityContext sec_ctx;
namestrcpy(&form->associated_schema_name, INTERNAL_SCHEMA_NAME);
catalog_become_owner(catalog_get(), &sec_ctx);
catalog_update(ti->scanrel, tuple);
catalog_restore_user(&sec_ctx);
heap_freetuple(tuple);
return true;
}
/*
* Reset the matching associated schema to the internal schema.
*/
int
hypertable_reset_associated_schema_name(const char *associated_schema)
{
ScanKeyData scankey[1];
ScanKeyInit(&scankey[0], Anum_hypertable_associated_schema_name,
BTEqualStrategyNumber, F_NAMEEQ,
DirectFunctionCall1(namein, CStringGetDatum(associated_schema)));
return hypertable_scan_limit_internal(scankey,
1,
NOINDEX,
reset_associated_tuple_found,
NULL,
0,
RowExclusiveLock,
false);
}
static bool static bool
tuple_found_lock(TupleInfo *ti, void *data) tuple_found_lock(TupleInfo *ti, void *data)
{ {

View File

@ -32,6 +32,8 @@ extern int hypertable_set_name(Hypertable *ht, const char *newname);
extern int hypertable_set_schema(Hypertable *ht, const char *newname); extern int hypertable_set_schema(Hypertable *ht, const char *newname);
extern int hypertable_set_num_dimensions(Hypertable *ht, int16 num_dimensions); extern int hypertable_set_num_dimensions(Hypertable *ht, int16 num_dimensions);
extern int hypertable_delete_by_id(int32 hypertable_id); extern int hypertable_delete_by_id(int32 hypertable_id);
extern int hypertable_delete_by_schema_name(const char *schema_name);
extern int hypertable_reset_associated_schema_name(const char *associated_schema);
extern Oid hypertable_id_to_relid(int32 hypertable_id); extern Oid hypertable_id_to_relid(int32 hypertable_id);
extern Chunk *hypertable_get_chunk(Hypertable *h, Point *point); extern Chunk *hypertable_get_chunk(Hypertable *h, Point *point);
extern Oid hypertable_relid(RangeVar *rv); extern Oid hypertable_relid(RangeVar *rv);

View File

@ -399,6 +399,67 @@ process_truncate(ProcessUtilityArgs *args)
return true; return true;
} }
/*
* Handle DROP SCHEMA.
*
* There are three cases to handle. The DROP can be for a schema that:
*
* 1. Is the schema for one or more hypertables
* 2. Is an associated schema for chunks
* 3. Is a schema for one or more chunks (possibly different from
* the associated schema if it has been changed)
*/
static void
process_drop_schema(DropStmt *stmt)
{
ListCell *lc;
foreach(lc, stmt->objects)
{
int count;
#if PG10
const char *schema = strVal(lfirst(lc));;
#elif PG96
const char *schema = NameListToString(lfirst(lc));
#endif
if (strcmp(schema, INTERNAL_SCHEMA_NAME) == 0)
ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("cannot drop the internal schema for extension \"%s\"",
EXTENSION_NAME),
errhint("Use DROP EXTENSION to remove the extension and the schema.")));
/*
* For hypertables and chunks, we only care about this DROP if it
* cascades
*/
if (stmt->behavior == DROP_CASCADE)
{
/* Delete any hypertables that exists in the schema */
hypertable_delete_by_schema_name(schema);
/*
* There might be chunks that exist in the dropped schema although
* the hypertable does not
*/
chunk_delete_by_schema_name(schema);
}
/*
* Check for any remaining hypertables that use the schema as its
* associated schema. For matches, we reset their associated schema to
* the INTERNAL schema
*/
count = hypertable_reset_associated_schema_name(schema);
if (count > 0)
ereport(NOTICE,
(errmsg("the chunk storage schema changed to \"%s\" for %d hypertable%c",
INTERNAL_SCHEMA_NAME, count, (count > 1) ? 's' : '\0')));
}
}
static void static void
process_drop_table_chunk(Hypertable *ht, Oid chunk_relid, void *arg) process_drop_table_chunk(Hypertable *ht, Oid chunk_relid, void *arg)
{ {
@ -557,6 +618,9 @@ process_drop(Node *parsetree)
switch (stmt->removeType) switch (stmt->removeType)
{ {
case OBJECT_SCHEMA:
process_drop_schema(stmt);
break;
case OBJECT_TABLE: case OBJECT_TABLE:
process_drop_table(stmt); process_drop_table(stmt);
break; break;

View File

@ -0,0 +1,114 @@
\c single :ROLE_SUPERUSER
CREATE SCHEMA chunk_schema1;
CREATE SCHEMA chunk_schema2;
CREATE SCHEMA hypertable_schema;
CREATE SCHEMA extra_schema;
GRANT ALL ON SCHEMA hypertable_schema TO :ROLE_DEFAULT_PERM_USER;
GRANT ALL ON SCHEMA chunk_schema1 TO :ROLE_DEFAULT_PERM_USER;
GRANT ALL ON SCHEMA chunk_schema2 TO :ROLE_DEFAULT_PERM_USER;
SET ROLE :ROLE_DEFAULT_PERM_USER;
CREATE TABLE hypertable_schema.test1 (time timestamptz, temp float, location int);
CREATE TABLE hypertable_schema.test2 (time timestamptz, temp float, location int);
--create two identical tables with their own chunk schemas
SELECT create_hypertable('hypertable_schema.test1', 'time', 'location', 2, associated_schema_name => 'chunk_schema1');
NOTICE: adding NOT NULL constraint to column "time"
create_hypertable
-------------------
(1 row)
SELECT create_hypertable('hypertable_schema.test2', 'time', 'location', 2, associated_schema_name => 'chunk_schema2');
NOTICE: adding NOT NULL constraint to column "time"
create_hypertable
-------------------
(1 row)
INSERT INTO hypertable_schema.test1 VALUES ('2001-01-01 01:01:01', 23.3, 1);
INSERT INTO hypertable_schema.test2 VALUES ('2001-01-01 01:01:01', 23.3, 1);
SELECT * FROM _timescaledb_catalog.hypertable ORDER BY id;
id | schema_name | table_name | associated_schema_name | associated_table_prefix | num_dimensions
----+-------------------+------------+------------------------+-------------------------+----------------
1 | hypertable_schema | test1 | chunk_schema1 | _hyper_1 | 2
2 | hypertable_schema | test2 | chunk_schema2 | _hyper_2 | 2
(2 rows)
SELECT * FROM _timescaledb_catalog.chunk;
id | hypertable_id | schema_name | table_name
----+---------------+---------------+------------------
1 | 1 | chunk_schema1 | _hyper_1_1_chunk
2 | 2 | chunk_schema2 | _hyper_2_2_chunk
(2 rows)
RESET ROLE;
--drop the associated schema. We drop the extra schema to show we can
--handle multi-schema drops
DROP SCHEMA chunk_schema1, extra_schema CASCADE;
NOTICE: the chunk storage schema changed to "_timescaledb_internal" for 1 hypertable
NOTICE: drop cascades to table chunk_schema1._hyper_1_1_chunk
SET ROLE :ROLE_DEFAULT_PERM_USER;
--show that the metadata for the table using the dropped schema is
--changed. The other table is not affected.
SELECT * FROM _timescaledb_catalog.hypertable ORDER BY id;
id | schema_name | table_name | associated_schema_name | associated_table_prefix | num_dimensions
----+-------------------+------------+------------------------+-------------------------+----------------
1 | hypertable_schema | test1 | _timescaledb_internal | _hyper_1 | 2
2 | hypertable_schema | test2 | chunk_schema2 | _hyper_2 | 2
(2 rows)
SELECT * FROM _timescaledb_catalog.chunk;
id | hypertable_id | schema_name | table_name
----+---------------+---------------+------------------
2 | 2 | chunk_schema2 | _hyper_2_2_chunk
(1 row)
--new chunk should be created in the internal associated schema
INSERT INTO hypertable_schema.test1 VALUES ('2001-01-01 01:01:01', 23.3, 1);
SELECT * FROM _timescaledb_catalog.chunk;
id | hypertable_id | schema_name | table_name
----+---------------+-----------------------+------------------
2 | 2 | chunk_schema2 | _hyper_2_2_chunk
3 | 1 | _timescaledb_internal | _hyper_1_3_chunk
(2 rows)
RESET ROLE;
--dropping the internal schema should not work
\set ON_ERROR_STOP 0
DROP SCHEMA _timescaledb_internal CASCADE;
ERROR: cannot drop the internal schema for extension "timescaledb"
\set ON_ERROR_STOP 1
--dropping the hypertable schema should delete everything
DROP SCHEMA hypertable_schema CASCADE;
NOTICE: drop cascades to 4 other objects
SET ROLE :ROLE_DEFAULT_PERM_USER;
--everything should be cleaned up
SELECT * FROM _timescaledb_catalog.hypertable GROUP BY id;
id | schema_name | table_name | associated_schema_name | associated_table_prefix | num_dimensions
----+-------------+------------+------------------------+-------------------------+----------------
(0 rows)
SELECT * FROM _timescaledb_catalog.chunk;
id | hypertable_id | schema_name | table_name
----+---------------+-------------+------------
(0 rows)
SELECT * FROM _timescaledb_catalog.dimension;
id | hypertable_id | column_name | column_type | aligned | num_slices | partitioning_func_schema | partitioning_func | interval_length
----+---------------+-------------+-------------+---------+------------+--------------------------+-------------------+-----------------
(0 rows)
SELECT * FROM _timescaledb_catalog.dimension_slice;
id | dimension_id | range_start | range_end
----+--------------+-------------+-----------
(0 rows)
SELECT * FROM _timescaledb_catalog.chunk_index;
chunk_id | index_name | hypertable_id | hypertable_index_name
----------+------------+---------------+-----------------------
(0 rows)
SELECT * FROM _timescaledb_catalog.chunk_constraint;
chunk_id | dimension_slice_id | constraint_name | hypertable_constraint_name
----------+--------------------+-----------------+----------------------------
(0 rows)

View File

@ -17,6 +17,7 @@ set(TEST_FILES
ddl_single.sql ddl_single.sql
ddl.sql ddl.sql
delete.sql delete.sql
drop_schema.sql
drop_chunks.sql drop_chunks.sql
drop_extension.sql drop_extension.sql
drop_hypertable.sql drop_hypertable.sql

53
test/sql/drop_schema.sql Normal file
View File

@ -0,0 +1,53 @@
\c single :ROLE_SUPERUSER
CREATE SCHEMA chunk_schema1;
CREATE SCHEMA chunk_schema2;
CREATE SCHEMA hypertable_schema;
CREATE SCHEMA extra_schema;
GRANT ALL ON SCHEMA hypertable_schema TO :ROLE_DEFAULT_PERM_USER;
GRANT ALL ON SCHEMA chunk_schema1 TO :ROLE_DEFAULT_PERM_USER;
GRANT ALL ON SCHEMA chunk_schema2 TO :ROLE_DEFAULT_PERM_USER;
SET ROLE :ROLE_DEFAULT_PERM_USER;
CREATE TABLE hypertable_schema.test1 (time timestamptz, temp float, location int);
CREATE TABLE hypertable_schema.test2 (time timestamptz, temp float, location int);
--create two identical tables with their own chunk schemas
SELECT create_hypertable('hypertable_schema.test1', 'time', 'location', 2, associated_schema_name => 'chunk_schema1');
SELECT create_hypertable('hypertable_schema.test2', 'time', 'location', 2, associated_schema_name => 'chunk_schema2');
INSERT INTO hypertable_schema.test1 VALUES ('2001-01-01 01:01:01', 23.3, 1);
INSERT INTO hypertable_schema.test2 VALUES ('2001-01-01 01:01:01', 23.3, 1);
SELECT * FROM _timescaledb_catalog.hypertable ORDER BY id;
SELECT * FROM _timescaledb_catalog.chunk;
RESET ROLE;
--drop the associated schema. We drop the extra schema to show we can
--handle multi-schema drops
DROP SCHEMA chunk_schema1, extra_schema CASCADE;
SET ROLE :ROLE_DEFAULT_PERM_USER;
--show that the metadata for the table using the dropped schema is
--changed. The other table is not affected.
SELECT * FROM _timescaledb_catalog.hypertable ORDER BY id;
SELECT * FROM _timescaledb_catalog.chunk;
--new chunk should be created in the internal associated schema
INSERT INTO hypertable_schema.test1 VALUES ('2001-01-01 01:01:01', 23.3, 1);
SELECT * FROM _timescaledb_catalog.chunk;
RESET ROLE;
--dropping the internal schema should not work
\set ON_ERROR_STOP 0
DROP SCHEMA _timescaledb_internal CASCADE;
\set ON_ERROR_STOP 1
--dropping the hypertable schema should delete everything
DROP SCHEMA hypertable_schema CASCADE;
SET ROLE :ROLE_DEFAULT_PERM_USER;
--everything should be cleaned up
SELECT * FROM _timescaledb_catalog.hypertable GROUP BY id;
SELECT * FROM _timescaledb_catalog.chunk;
SELECT * FROM _timescaledb_catalog.dimension;
SELECT * FROM _timescaledb_catalog.dimension_slice;
SELECT * FROM _timescaledb_catalog.chunk_index;
SELECT * FROM _timescaledb_catalog.chunk_constraint;