Move index and constraints drop handling to event trigger

This Fixes at least two bugs:

1) A drop of a referenced table used to drop the associated
FK constraint but not the metadata associated with the constraint.
Fixes #43.

2) A drop of a column removed any indexes associated with the column
but not the metadata associated with the index.
This commit is contained in:
Matvey Arye 2018-02-01 19:25:46 -05:00 committed by Matvey Arye
parent d6baccb9d7
commit 26965826f4
26 changed files with 650 additions and 184 deletions

View File

@ -1,8 +1,12 @@
CREATE OR REPLACE FUNCTION _timescaledb_internal.ddl_command_end() RETURNS event_trigger
AS '@MODULE_PATHNAME@', 'timescaledb_ddl_command_end' LANGUAGE C;
CREATE OR REPLACE FUNCTION _timescaledb_internal.process_ddl_event() RETURNS event_trigger
AS '@MODULE_PATHNAME@', 'timescaledb_process_ddl_event' LANGUAGE C;
DROP EVENT TRIGGER IF EXISTS timescaledb_ddl_command_end;
--EVENT TRIGGER MUST exclude the ALTER EXTENSION tag.
CREATE EVENT TRIGGER timescaledb_ddl_command_end ON ddl_command_end
WHEN TAG IN ('ALTER TABLE','CREATE TRIGGER','CREATE TABLE','CREATE INDEX','ALTER INDEX')
EXECUTE PROCEDURE _timescaledb_internal.ddl_command_end();
EXECUTE PROCEDURE _timescaledb_internal.process_ddl_event();
DROP EVENT TRIGGER IF EXISTS timescaledb_ddl_sql_drop;
CREATE EVENT TRIGGER timescaledb_ddl_sql_drop ON sql_drop
EXECUTE PROCEDURE _timescaledb_internal.process_ddl_event();

View File

@ -0,0 +1 @@
DROP FUNCTION _timescaledb_internal.ddl_command_end();

View File

@ -71,6 +71,7 @@ static const TableIndexDef catalog_table_index_definitions[_MAX_CATALOG_TABLES]
[CHUNK_CONSTRAINT] = {
.length = _MAX_CHUNK_CONSTRAINT_INDEX,
.names = (char *[]) {
[CHUNK_CONSTRAINT_CHUNK_ID_CONSTRAINT_NAME_IDX] = "chunk_constraint_chunk_id_constraint_name_key",
[CHUNK_CONSTRAINT_CHUNK_ID_DIMENSION_SLICE_ID_IDX] = "chunk_constraint_chunk_id_dimension_slice_id_idx",
}
},

View File

@ -34,6 +34,10 @@ typedef enum CatalogTable
} CatalogTable;
#define INVALID_CATALOG_TABLE _MAX_CATALOG_TABLES
#define INVALID_INDEXID -1
#define CATALOG_INDEX(catalog, tableid, indexid) \
(indexid == INVALID_INDEXID ? InvalidOid : (catalog)->tables[tableid].index_ids[indexid])
#define CatalogInternalCall1(func, datum1) \
OidFunctionCall1(catalog_get_internal_function_id(catalog_get(), func), datum1)
@ -329,7 +333,8 @@ typedef FormData_chunk_constraint *Form_chunk_constraint;
enum
{
CHUNK_CONSTRAINT_CHUNK_ID_DIMENSION_SLICE_ID_IDX = 0,
CHUNK_CONSTRAINT_CHUNK_ID_CONSTRAINT_NAME_IDX = 0,
CHUNK_CONSTRAINT_CHUNK_ID_DIMENSION_SLICE_ID_IDX,
_MAX_CHUNK_CONSTRAINT_INDEX,
};
@ -340,6 +345,13 @@ enum Anum_chunk_constraint_chunk_id_dimension_slice_id_idx
_Anum_chunk_constraint_chunk_id_dimension_slice_id_idx_max,
};
enum Anum_chunk_constraint_chunk_id_constraint_name_idx
{
Anum_chunk_constraint_chunk_id_constraint_name_idx_chunk_id = 1,
Anum_chunk_constraint_chunk_id_constraint_name_idx_constraint_name,
_Anum_chunk_constraint_chunk_id_constraint_name_idx_max,
};
/************************************
*
* Chunk index table definitions

View File

@ -566,7 +566,6 @@ chunk_fill_stub(Chunk *chunk_stub, bool tuplock)
ScannerCtx ctx = {
.table = catalog->tables[CHUNK].id,
.index = catalog->tables[CHUNK].index_ids[CHUNK_ID_INDEX],
.scantype = ScannerTypeIndex,
.nkeys = 1,
.scankey = scankey,
.data = chunk_stub,
@ -868,7 +867,6 @@ chunk_scan_internal(int indexid,
ScannerCtx ctx = {
.table = catalog->tables[CHUNK].id,
.index = catalog->tables[CHUNK].index_ids[indexid],
.scantype = ScannerTypeIndex,
.nkeys = nkeys,
.data = data,
.scankey = scankey,

View File

@ -356,8 +356,6 @@ chunk_constraint_tuple_found(TupleInfo *ti, void *data)
return true;
}
#define NO_INDEXSCAN -1
static int
chunk_constraint_scan_internal(int indexid,
ScanKeyData *scankey,
@ -370,7 +368,7 @@ chunk_constraint_scan_internal(int indexid,
Catalog *catalog = catalog_get();
ScannerCtx scanctx = {
.table = catalog->tables[CHUNK_CONSTRAINT].id,
.scantype = (indexid == NO_INDEXSCAN) ? ScannerTypeHeap : ScannerTypeIndex,
.index = CATALOG_INDEX(catalog, CHUNK_CONSTRAINT, indexid),
.nkeys = nkeys,
.scankey = scankey,
.data = data,
@ -380,9 +378,6 @@ chunk_constraint_scan_internal(int indexid,
.scandirection = ForwardScanDirection,
};
if (indexid != NO_INDEXSCAN)
scanctx.index = catalog->tables[CHUNK_CONSTRAINT].index_ids[indexid];
return scanner_scan(&scanctx);
}
@ -413,6 +408,38 @@ chunk_constraint_scan_by_chunk_id_internal(int32 chunk_id,
lockmode);
}
static int
chunk_constraint_scan_by_chunk_id_constraint_name_internal(int32 chunk_id,
const char *constraint_name,
tuple_found_func tuple_found,
tuple_found_func tuple_filter,
void *data,
LOCKMODE lockmode)
{
ScanKeyData scankey[2];
ScanKeyInit(&scankey[0],
Anum_chunk_constraint_chunk_id_constraint_name_idx_chunk_id,
BTEqualStrategyNumber,
F_INT4EQ,
Int32GetDatum(chunk_id));
ScanKeyInit(&scankey[1],
Anum_chunk_constraint_chunk_id_constraint_name_idx_constraint_name,
BTEqualStrategyNumber,
F_NAMEEQ,
DirectFunctionCall1(namein, CStringGetDatum(constraint_name)));
return chunk_constraint_scan_internal(CHUNK_CONSTRAINT_CHUNK_ID_CONSTRAINT_NAME_IDX,
scankey,
2,
tuple_found,
tuple_filter,
data,
lockmode);
}
/*
* Scan all the chunk's constraints given its chunk ID.
*
@ -625,6 +652,8 @@ typedef struct ConstraintInfo
{
const char *hypertable_constraint_name;
ChunkConstraints *ccs;
bool delete_metadata;
bool drop_constraint;
} ConstraintInfo;
typedef struct RenameHypertableConstraintInfo
@ -652,24 +681,29 @@ chunk_constraint_delete_tuple(TupleInfo *ti, void *data)
ObjectAddress constrobj = {
.classId = ConstraintRelationId,
.objectId = get_relation_constraint_oid(chunk->table_id,
NameStr(*DatumGetName(constrname)), false),
NameStr(*DatumGetName(constrname)), true),
};
Oid index_relid = get_constraint_index(constrobj.objectId);
/* Collect the deleted constraints */
if (NULL != info && NULL != info->ccs)
if (NULL != info->ccs)
chunk_constraint_tuple_found(ti, info->ccs);
/*
* If this is an index constraint, we need to cleanup the index metadata.
* Don't drop the index though, since that will happend when the
* constraint is dropped.
*/
if (OidIsValid(index_relid))
chunk_index_delete(chunk, index_relid, false);
if (info->delete_metadata)
{
/*
* If this is an index constraint, we need to cleanup the index
* metadata. Don't drop the index though, since that will happend when
* the constraint is dropped.
*/
if (OidIsValid(index_relid))
chunk_index_delete(chunk, index_relid, false);
catalog_delete(ti->scanrel, ti->tuple);
performDeletion(&constrobj, DROP_RESTRICT, 0);
catalog_delete(ti->scanrel, ti->tuple);
}
if (info->drop_constraint && OidIsValid(constrobj.objectId))
performDeletion(&constrobj, DROP_RESTRICT, 0);
return true;
}
@ -695,10 +729,13 @@ hypertable_constraint_tuple_filter(TupleInfo *ti, void *data)
int
chunk_constraint_delete_by_hypertable_constraint_name(int32 chunk_id,
char *hypertable_constraint_name)
char *hypertable_constraint_name,
bool delete_metadata, bool drop_constraint)
{
ConstraintInfo info = {
.hypertable_constraint_name = hypertable_constraint_name,
.delete_metadata = delete_metadata,
.drop_constraint = drop_constraint
};
return chunk_constraint_scan_by_chunk_id_internal(chunk_id,
@ -708,6 +745,23 @@ chunk_constraint_delete_by_hypertable_constraint_name(int32 chunk_id,
RowExclusiveLock);
}
int
chunk_constraint_delete_by_constraint_name(int32 chunk_id, const char *constraint_name,
bool delete_metadata, bool drop_constraint)
{
ConstraintInfo info = {
.delete_metadata = delete_metadata,
.drop_constraint = drop_constraint
};
return chunk_constraint_scan_by_chunk_id_constraint_name_internal(chunk_id,
constraint_name,
chunk_constraint_delete_tuple,
NULL,
&info,
RowExclusiveLock);
}
/*
* Delete all constraints for a chunk. Optionally, collect the deleted constraints.
*/
@ -716,6 +770,8 @@ chunk_constraint_delete_by_chunk_id(int32 chunk_id, ChunkConstraints *ccs)
{
ConstraintInfo info = {
.ccs = ccs,
.delete_metadata = true,
.drop_constraint = true,
};
return chunk_constraint_scan_by_chunk_id_internal(chunk_id,
@ -728,6 +784,11 @@ chunk_constraint_delete_by_chunk_id(int32 chunk_id, ChunkConstraints *ccs)
int
chunk_constraint_delete_by_dimension_slice_id(int32 dimension_slice_id)
{
ConstraintInfo info = {
.delete_metadata = true,
.drop_constraint = true,
};
ScanKeyData scankey[1];
ScanKeyInit(&scankey[0],
@ -736,12 +797,12 @@ chunk_constraint_delete_by_dimension_slice_id(int32 dimension_slice_id)
F_INT4EQ,
Int32GetDatum(dimension_slice_id));
return chunk_constraint_scan_internal(NO_INDEXSCAN,
return chunk_constraint_scan_internal(INVALID_INDEXID,
scankey,
1,
chunk_constraint_delete_tuple,
NULL,
NULL,
&info,
RowExclusiveLock);
}

View File

@ -42,9 +42,10 @@ extern int chunk_constraints_add_dimension_constraints(ChunkConstraints *ccs, in
extern int chunk_constraints_add_inheritable_constraints(ChunkConstraints *ccs, int32 chunk_id, Oid hypertable_oid);
extern void chunk_constraints_create(ChunkConstraints *ccs, Oid chunk_oid, int32 chunk_id, Oid hypertable_oid, int32 hypertable_id);
extern void chunk_constraint_create_on_chunk(Chunk *chunk, Oid constraint_oid);
extern int chunk_constraint_delete_by_hypertable_constraint_name(int32 chunk_id, char *hypertable_constraint_name);
extern int chunk_constraint_delete_by_hypertable_constraint_name(int32 chunk_id, char *hypertable_constraint_name, bool delete_metadata, bool drop_constraint);
extern int chunk_constraint_delete_by_chunk_id(int32 chunk_id, ChunkConstraints *ccs);
extern int chunk_constraint_delete_by_dimension_slice_id(int32 dimension_slice_id);
extern int chunk_constraint_delete_by_constraint_name(int32 chunk_id, const char *constraint_name, bool delete_metadata, bool drop_constraint);
extern void chunk_constraint_recreate(ChunkConstraint *cc, Oid chunk_oid);
extern int chunk_constraint_rename_hypertable_constraint(int32 chunk_id, const char *oldname, const char *newname);

View File

@ -507,16 +507,16 @@ chunk_index_create_all(int32 hypertable_id, Oid hypertable_relid, int32 chunk_id
static int
chunk_index_scan(int indexid, ScanKeyData scankey[], int nkeys,
tuple_found_func tuple_found, void *data, LOCKMODE lockmode)
tuple_found_func tuple_found, tuple_filter_func tuple_filter, void *data, LOCKMODE lockmode)
{
Catalog *catalog = catalog_get();
ScannerCtx scanCtx = {
.table = catalog->tables[CHUNK_INDEX].id,
.index = catalog->tables[CHUNK_INDEX].index_ids[indexid],
.scantype = ScannerTypeIndex,
.index = CATALOG_INDEX(catalog, CHUNK_INDEX, indexid),
.nkeys = nkeys,
.scankey = scankey,
.tuple_found = tuple_found,
.filter = tuple_filter,
.data = data,
.lockmode = lockmode,
.scandirection = ForwardScanDirection,
@ -525,8 +525,8 @@ chunk_index_scan(int indexid, ScanKeyData scankey[], int nkeys,
return scanner_scan(&scanCtx);
}
#define chunk_index_scan_update(idxid, scankey, nkeys, tuple_found, data) \
chunk_index_scan(idxid, scankey, nkeys, tuple_found, data, RowExclusiveLock)
#define chunk_index_scan_update(idxid, scankey, nkeys, tuple_found, tuple_filter, data) \
chunk_index_scan(idxid, scankey, nkeys, tuple_found, tuple_filter, data, RowExclusiveLock)
static ChunkIndexMapping *
chunk_index_mapping_from_tuple(TupleInfo *ti, ChunkIndexMapping *cim)
@ -575,40 +575,78 @@ chunk_index_get_mappings(Hypertable *ht, Oid hypertable_indexrelid)
DirectFunctionCall1(namein, CStringGetDatum((indexname))));
chunk_index_scan(CHUNK_INDEX_HYPERTABLE_ID_HYPERTABLE_INDEX_NAME_IDX,
scankey, 2, chunk_index_collect, &mappings, AccessShareLock);
scankey, 2, chunk_index_collect, NULL, &mappings, AccessShareLock);
return mappings;
}
typedef struct ChunkIndexDeleteData
{
const char *index_name;
const char *schema;
bool drop_index;
} ChunkIndexDeleteData;
static bool
chunk_index_tuple_delete(TupleInfo *ti, void *data)
{
FormData_chunk_index *chunk_index = (FormData_chunk_index *) GETSTRUCT(ti->tuple);
Oid schemaid = chunk_index_get_schemaid(chunk_index);
bool *should_drop = data;
ChunkIndexDeleteData *cid = data;
catalog_delete(ti->scanrel, ti->tuple);
if (*should_drop)
if (cid->drop_index)
{
ObjectAddress idxobj = {
.classId = RelationRelationId,
.objectId = get_relname_relid(NameStr(chunk_index->index_name), schemaid),
};
Assert(OidIsValid(idxobj.objectId));
performDeletion(&idxobj, DROP_RESTRICT, 0);
if (OidIsValid(idxobj.objectId))
performDeletion(&idxobj, DROP_RESTRICT, 0);
}
return true;
}
static bool
chunk_index_name_and_schema_filter(TupleInfo *ti, void *data)
{
FormData_chunk_index *chunk_index = (FormData_chunk_index *) GETSTRUCT(ti->tuple);
ChunkIndexDeleteData *cid = data;
if (namestrcmp(&chunk_index->index_name, cid->index_name) == 0)
{
Chunk *chunk = chunk_get_by_id(chunk_index->chunk_id, 0, false);
if (NULL != chunk && namestrcmp(&chunk->fd.schema_name, cid->schema) == 0)
return true;
}
if (namestrcmp(&chunk_index->hypertable_index_name, cid->index_name) == 0)
{
Hypertable *ht;
ht = hypertable_get_by_id(chunk_index->hypertable_id);
if (NULL != ht && namestrcmp(&ht->fd.schema_name, cid->schema) == 0)
return true;
}
return false;
}
int
chunk_index_delete_children_of(Hypertable *ht, Oid hypertable_indexrelid, bool should_drop)
{
ScanKeyData scankey[2];
const char *indexname = get_rel_name(hypertable_indexrelid);
ChunkIndexDeleteData data = {
.drop_index = should_drop,
};
ScanKeyInit(&scankey[0],
Anum_chunk_index_hypertable_id_hypertable_index_name_idx_hypertable_id,
BTEqualStrategyNumber, F_INT4EQ, Int32GetDatum(ht->fd.id));
@ -618,7 +656,7 @@ chunk_index_delete_children_of(Hypertable *ht, Oid hypertable_indexrelid, bool s
DirectFunctionCall1(namein, CStringGetDatum((indexname))));
return chunk_index_scan_update(CHUNK_INDEX_HYPERTABLE_ID_HYPERTABLE_INDEX_NAME_IDX,
scankey, 2, chunk_index_tuple_delete, &should_drop);
scankey, 2, chunk_index_tuple_delete, NULL, &data);
}
int
@ -626,6 +664,10 @@ chunk_index_delete(Chunk *chunk, Oid chunk_indexrelid, bool drop_index)
{
ScanKeyData scankey[2];
const char *indexname = get_rel_name(chunk_indexrelid);
ChunkIndexDeleteData data = {
.drop_index = drop_index,
};
ScanKeyInit(&scankey[0],
Anum_chunk_index_chunk_id_index_name_idx_chunk_id,
@ -636,33 +678,54 @@ chunk_index_delete(Chunk *chunk, Oid chunk_indexrelid, bool drop_index)
DirectFunctionCall1(namein, CStringGetDatum(indexname)));
return chunk_index_scan_update(CHUNK_INDEX_CHUNK_ID_INDEX_NAME_IDX,
scankey, 2, chunk_index_tuple_delete, &drop_index);
scankey, 2, chunk_index_tuple_delete, NULL, &data);
}
void
chunk_index_delete_by_name(const char *schema, const char *index_name, bool drop_index)
{
ChunkIndexDeleteData data = {
.index_name = index_name,
.drop_index = drop_index,
.schema = schema,
};
chunk_index_scan_update(INVALID_INDEXID,
NULL, 0, chunk_index_tuple_delete, chunk_index_name_and_schema_filter, &data);
}
int
chunk_index_delete_by_chunk_id(int32 chunk_id, bool drop_index)
{
ScanKeyData scankey[1];
ChunkIndexDeleteData data = {
.drop_index = drop_index,
};
ScanKeyInit(&scankey[0],
Anum_chunk_index_chunk_id_index_name_idx_chunk_id,
BTEqualStrategyNumber, F_INT4EQ, Int32GetDatum(chunk_id));
return chunk_index_scan_update(CHUNK_INDEX_CHUNK_ID_INDEX_NAME_IDX,
scankey, 1, chunk_index_tuple_delete, &drop_index);
scankey, 1, chunk_index_tuple_delete, NULL, &data);
}
int
chunk_index_delete_by_hypertable_id(int32 hypertable_id, bool drop_index)
{
ScanKeyData scankey[1];
ChunkIndexDeleteData data = {
.drop_index = drop_index,
};
ScanKeyInit(&scankey[0],
Anum_chunk_index_hypertable_id_hypertable_index_name_idx_hypertable_id,
BTEqualStrategyNumber, F_INT4EQ, Int32GetDatum(hypertable_id));
return chunk_index_scan_update(CHUNK_INDEX_HYPERTABLE_ID_HYPERTABLE_INDEX_NAME_IDX,
scankey, 1, chunk_index_tuple_delete, &drop_index);
scankey, 1, chunk_index_tuple_delete, NULL, &data);
}
static bool
@ -690,7 +753,7 @@ chunk_index_get_by_indexrelid(Chunk *chunk, Oid chunk_indexrelid)
BTEqualStrategyNumber, F_NAMEEQ, DirectFunctionCall1(namein, CStringGetDatum(indexname)));
chunk_index_scan(CHUNK_INDEX_CHUNK_ID_INDEX_NAME_IDX,
scankey, 2, chunk_index_tuple_found, cim, AccessShareLock);
scankey, 2, chunk_index_tuple_found, NULL, cim, AccessShareLock);
return cim;
}
@ -759,7 +822,7 @@ chunk_index_rename(Chunk *chunk, Oid chunk_indexrelid, const char *newname)
BTEqualStrategyNumber, F_NAMEEQ, CStringGetDatum(indexname));
return chunk_index_scan_update(CHUNK_INDEX_CHUNK_ID_INDEX_NAME_IDX,
scankey, 2, chunk_index_tuple_rename, &renameinfo);
scankey, 2, chunk_index_tuple_rename, NULL, &renameinfo);
}
int
@ -781,7 +844,7 @@ chunk_index_rename_parent(Hypertable *ht, Oid hypertable_indexrelid, const char
BTEqualStrategyNumber, F_NAMEEQ, CStringGetDatum(indexname));
return chunk_index_scan_update(CHUNK_INDEX_HYPERTABLE_ID_HYPERTABLE_INDEX_NAME_IDX,
scankey, 2, chunk_index_tuple_rename, &renameinfo);
scankey, 2, chunk_index_tuple_rename, NULL, &renameinfo);
}
static bool
@ -817,7 +880,7 @@ chunk_index_set_tablespace(Hypertable *ht, Oid hypertable_indexrelid, const char
BTEqualStrategyNumber, F_NAMEEQ, CStringGetDatum(indexname));
return chunk_index_scan_update(CHUNK_INDEX_HYPERTABLE_ID_HYPERTABLE_INDEX_NAME_IDX,
scankey, 2, chunk_index_tuple_set_tablespace,
scankey, 2, chunk_index_tuple_set_tablespace, NULL,
(char *) tablespace);
}

View File

@ -22,6 +22,7 @@ extern int chunk_index_delete_children_of(Hypertable *ht, Oid hypertable_indexre
extern int chunk_index_delete(Chunk *chunk, Oid chunk_indexrelid, bool drop_index);
extern int chunk_index_delete_by_chunk_id(int32 chunk_id, bool drop_index);
extern int chunk_index_delete_by_hypertable_id(int32 hypertable_id, bool drop_index);
extern void chunk_index_delete_by_name(const char *schema, const char *index_name, bool drop_index);
extern int chunk_index_rename(Chunk *chunk, Oid chunk_indexrelid, const char *newname);
extern int chunk_index_rename_parent(Hypertable *ht, Oid hypertable_indexrelid, const char *newname);
extern int chunk_index_set_tablespace(Hypertable *ht, Oid hypertable_indexrelid, const char *tablespace);

View File

@ -2,6 +2,7 @@
#include <funcapi.h>
#include "compat.h"
#include "extension.h"
/* Old functions that are no longer used but are needed for compatibility when
* updating the extension. */
@ -58,3 +59,15 @@ hypertable_validate_triggers(PG_FUNCTION_ARGS)
elog(ERROR, "Deprecated function should not be invoked");
PG_RETURN_NULL();
}
TS_FUNCTION_INFO_V1(timescaledb_ddl_command_end);
Datum
timescaledb_ddl_command_end(PG_FUNCTION_ARGS)
{
if (!extension_is_loaded())
PG_RETURN_NULL();
elog(ERROR, "Deprecated function should not be invoked");
PG_RETURN_NULL();
}

View File

@ -321,7 +321,6 @@ dimension_scan_internal(ScanKeyData *scankey,
ScannerCtx scanctx = {
.table = catalog->tables[DIMENSION].id,
.index = catalog->tables[DIMENSION].index_ids[DIMENSION_HYPERTABLE_ID_IDX],
.scantype = ScannerTypeIndex,
.nkeys = nkeys,
.limit = limit,
.scankey = scankey,
@ -367,7 +366,6 @@ dimension_scan_update(int32 dimension_id, tuple_found_func tuple_found, void *da
ScannerCtx scanctx = {
.table = catalog->tables[DIMENSION].id,
.index = catalog->tables[DIMENSION].index_ids[DIMENSION_ID_IDX],
.scantype = ScannerTypeIndex,
.nkeys = 1,
.limit = 1,
.scankey = scankey,

View File

@ -119,7 +119,6 @@ dimension_slice_scan_limit_internal(int indexid,
ScannerCtx scanCtx = {
.table = catalog->tables[DIMENSION_SLICE].id,
.index = catalog->tables[DIMENSION_SLICE].index_ids[indexid],
.scantype = ScannerTypeIndex,
.nkeys = nkeys,
.scankey = scankey,
.data = scandata,

View File

@ -1,16 +1,22 @@
#include <postgres.h>
#include <commands/event_trigger.h>
#include <utils/builtins.h>
#include <executor/executor.h>
#include <access/htup_details.h>
#include <catalog/pg_type.h>
#include <catalog/pg_constraint.h>
#include <catalog/pg_class.h>
#include "event_trigger.h"
#define DDL_INFO_NATTS 9
#define DROPPED_OBJECTS_NATTS 12
/* Function manager info for the event "pg_event_trigger_ddl_commands", which is
* used to retrieve information on executed DDL commands in an event
* trigger. The function manager info is initialized on extension load. */
static FmgrInfo ddl_commands_fmgrinfo;
static FmgrInfo dropped_objects_fmgrinfo;
/*
* Get a list of executed DDL commands in an event trigger.
@ -63,11 +69,145 @@ event_trigger_ddl_commands(void)
return objects;
}
/* Given a TEXT[] of addrnames return a list of heap allocated char *
*
* similar to textarray_to_strvaluelist */
static List *
extract_addrnames(ArrayType *arr)
{
Datum *elems;
bool *nulls;
int nelems;
List *list = NIL;
int i;
deconstruct_array(arr, TEXTOID, -1, false, 'i',
&elems, &nulls, &nelems);
for (i = 0; i < nelems; i++)
{
if (nulls[i])
elog(ERROR, "unexpected null in name list");
/* TextDatumGetCString heap allocates the string */
list = lappend(list, TextDatumGetCString(elems[i]));
}
return list;
}
static EventTriggerDropTableConstraint *
makeEventTriggerDropTableConstraint(char *constraint_name, char *schema, char *table)
{
EventTriggerDropTableConstraint *obj = palloc(sizeof(EventTriggerDropTableConstraint));
*obj = (EventTriggerDropTableConstraint)
{
.obj =
{
.type = EVENT_TRIGGER_DROP_TABLE_CONSTRAINT
},
.constraint_name = constraint_name,
.schema = schema,
.table = table
};
return obj;
}
static EventTriggerDropIndex *
makeEventTriggerDropIndex(char *index_name, char *schema)
{
EventTriggerDropIndex *obj = palloc(sizeof(EventTriggerDropIndex));
*obj = (EventTriggerDropIndex)
{
.obj =
{
.type = EVENT_TRIGGER_DROP_INDEX
},
.index_name = index_name,
.schema = schema,
};
return obj;
}
List *
event_trigger_dropped_objects(void)
{
ReturnSetInfo rsinfo;
FunctionCallInfoData fcinfo;
TupleTableSlot *slot;
EState *estate = CreateExecutorState();
List *objects = NIL;
InitFunctionCallInfoData(fcinfo, &dropped_objects_fmgrinfo, 0, InvalidOid, NULL, NULL);
MemSet(&rsinfo, 0, sizeof(rsinfo));
rsinfo.type = T_ReturnSetInfo;
rsinfo.allowedModes = SFRM_Materialize;
rsinfo.econtext = CreateExprContext(estate);
fcinfo.resultinfo = (fmNodePtr) &rsinfo;
FunctionCallInvoke(&fcinfo);
slot = MakeSingleTupleTableSlot(rsinfo.setDesc);
while (tuplestore_gettupleslot(rsinfo.setResult, true, false, slot))
{
HeapTuple tuple = ExecFetchSlotTuple(slot);
Datum values[DROPPED_OBJECTS_NATTS];
bool nulls[DROPPED_OBJECTS_NATTS];
Oid class_id;
char *objtype;
heap_deform_tuple(tuple, rsinfo.setDesc, values, nulls);
class_id = DatumGetObjectId(values[0]);
switch (class_id)
{
case ConstraintRelationId:
objtype = TextDatumGetCString(values[6]);
if (objtype != NULL && strcmp(objtype, "table constraint") == 0)
{
List *addrnames = extract_addrnames(DatumGetArrayTypeP(values[10]));
objects = lappend(objects,
makeEventTriggerDropTableConstraint(lthird(addrnames),
linitial(addrnames),
lsecond(addrnames)));
}
break;
case RelationRelationId:
objtype = TextDatumGetCString(values[6]);
if (objtype != NULL && strcmp(objtype, "index") == 0)
{
List *addrnames = extract_addrnames(DatumGetArrayTypeP(values[10]));
objects = lappend(objects,
makeEventTriggerDropIndex(lsecond(addrnames),
linitial(addrnames)));
}
break;
default:
break;
}
}
FreeExprContext(rsinfo.econtext, false);
FreeExecutorState(estate);
ExecDropSingleTupleTableSlot(slot);
return objects;
}
void
_event_trigger_init(void)
{
fmgr_info(fmgr_internal_function("pg_event_trigger_ddl_commands"),
&ddl_commands_fmgrinfo);
fmgr_info(fmgr_internal_function("pg_event_trigger_dropped_objects"),
&dropped_objects_fmgrinfo);
}
void

View File

@ -4,6 +4,33 @@
#include <postgres.h>
#include <nodes/pg_list.h>
typedef enum EventTriggerDropType
{
EVENT_TRIGGER_DROP_TABLE_CONSTRAINT,
EVENT_TRIGGER_DROP_INDEX,
} EventTriggerDropType;
typedef struct EventTriggerDropObject
{
EventTriggerDropType type;
} EventTriggerDropObject;
typedef struct EventTriggerDropTableConstraint
{
EventTriggerDropObject obj;
char *constraint_name;
char *schema;
char *table;
} EventTriggerDropTableConstraint;
typedef struct EventTriggerDropIndex
{
EventTriggerDropObject obj;
char *index_name;
char *schema;
} EventTriggerDropIndex;
extern List *event_trigger_dropped_objects(void);
extern List *event_trigger_ddl_commands(void);
extern void _event_trigger_init(void);
extern void _event_trigger_fini(void);

View File

@ -112,7 +112,6 @@ hypertable_id_to_relid(int32 hypertable_id)
ScannerCtx scanctx = {
.table = catalog->tables[HYPERTABLE].id,
.index = catalog->tables[HYPERTABLE].index_ids[HYPERTABLE_ID_INDEX],
.scantype = ScannerTypeIndex,
.nkeys = 1,
.scankey = scankey,
.tuple_found = hypertable_tuple_get_relid,
@ -143,8 +142,6 @@ chunk_cache_entry_free(void *cce)
MemoryContextDelete(((ChunkCacheEntry *) cce)->mcxt);
}
#define NOINDEX -1
static int
hypertable_scan_limit_internal(ScanKeyData *scankey,
int num_scankeys,
@ -158,8 +155,7 @@ hypertable_scan_limit_internal(ScanKeyData *scankey,
Catalog *catalog = catalog_get();
ScannerCtx scanctx = {
.table = catalog->tables[HYPERTABLE].id,
.index = (indexid == NOINDEX) ? 0 : catalog->tables[HYPERTABLE].index_ids[indexid],
.scantype = (indexid == NOINDEX) ? ScannerTypeHeap : ScannerTypeIndex,
.index = CATALOG_INDEX(catalog, HYPERTABLE, indexid),
.nkeys = num_scankeys,
.scankey = scankey,
.data = scandata,
@ -353,7 +349,7 @@ hypertable_reset_associated_schema_name(const char *associated_schema)
return hypertable_scan_limit_internal(scankey,
1,
NOINDEX,
INVALID_INDEXID,
reset_associated_tuple_found,
NULL,
0,
@ -518,6 +514,51 @@ hypertable_insert(Name schema_name,
heap_close(rel, RowExclusiveLock);
}
static bool
hypertable_tuple_found(TupleInfo *ti, void *data)
{
Hypertable **entry = data;
*entry = hypertable_from_tuple(ti->tuple);
return false;
}
Hypertable *
hypertable_get_by_name(char *schema, char *name)
{
Hypertable *ht = NULL;
hypertable_scan(schema,
name,
hypertable_tuple_found,
&ht,
AccessShareLock,
false);
return ht;
}
Hypertable *
hypertable_get_by_id(int32 hypertable_id)
{
ScanKeyData scankey[1];
Hypertable *ht = NULL;
ScanKeyInit(&scankey[0], Anum_hypertable_pkey_idx_id,
BTEqualStrategyNumber, F_INT4EQ,
Int32GetDatum(hypertable_id));
hypertable_scan_limit_internal(scankey,
1,
HYPERTABLE_ID_INDEX,
hypertable_tuple_found,
&ht,
1,
AccessShareLock,
false);
return ht;
}
Chunk *
hypertable_get_chunk(Hypertable *h, Point *point)
{

View File

@ -23,6 +23,8 @@ typedef struct Hypertable
extern Oid rel_get_owner(Oid relid);
extern Hypertable *hypertable_get_by_id(int32 hypertable_id);
extern Hypertable *hypertable_get_by_name(char *schema, char *name);
extern bool hypertable_has_privs_of(Oid hypertable_oid, Oid userid);
extern Oid hypertable_permissions_check(Oid hypertable_oid, Oid userid);
extern Hypertable *hypertable_from_tuple(HeapTuple tuple);

View File

@ -9,6 +9,7 @@
#include <catalog/objectaddress.h>
#include <catalog/pg_trigger.h>
#include <catalog/pg_constraint_fn.h>
#include <catalog/pg_constraint.h>
#include <commands/copy.h>
#include <commands/vacuum.h>
#include <commands/defrem.h>
@ -564,53 +565,6 @@ process_drop_trigger(DropStmt *stmt)
cache_release(hcache);
}
static void
process_drop_index(DropStmt *stmt)
{
ListCell *lc;
foreach(lc, stmt->objects)
{
List *object = lfirst(lc);
RangeVar *relation = makeRangeVarFromNameList(object);
Oid idxrelid;
if (NULL == relation)
continue;
idxrelid = RangeVarGetRelid(relation, NoLock, true);
if (OidIsValid(idxrelid))
{
Oid tblrelid = IndexGetRelation(idxrelid, false);
Cache *hcache = hypertable_cache_pin();
Hypertable *ht = hypertable_cache_get_entry(hcache, tblrelid);
/*
* Drop a hypertable index, i.e., all corresponding indexes on all
* chunks
*/
if (NULL != ht)
chunk_index_delete_children_of(ht, idxrelid, true);
else
{
/* Drop an index on a chunk */
Chunk *chunk = chunk_get_by_relid(tblrelid, 0, false);
if (NULL != chunk)
/*
* No need DROP the index here since DDL statement drops
* (hence 'false' parameter), just delete the metadata
*/
chunk_index_delete(chunk, idxrelid, false);
}
cache_release(hcache);
}
}
}
static void
process_drop_tablespace(Node *parsetree)
{
@ -693,9 +647,6 @@ process_drop(Node *parsetree)
case OBJECT_TRIGGER:
process_drop_trigger(stmt);
break;
case OBJECT_INDEX:
process_drop_index(stmt);
break;
default:
break;
}
@ -945,44 +896,6 @@ process_altertable_add_constraint(Hypertable *ht, const char *constraint_name)
foreach_chunk(ht, process_add_constraint_chunk, &hypertable_constraint_oid);
}
static void
process_drop_constraint_chunk(Hypertable *ht, Oid chunk_relid, void *arg)
{
char *hypertable_constraint_name = arg;
Chunk *chunk = chunk_get_by_relid(chunk_relid, ht->space->num_dimensions, true);
chunk_constraint_delete_by_hypertable_constraint_name(chunk->fd.id, hypertable_constraint_name);
}
static void
process_altertable_drop_constraint(Hypertable *ht, AlterTableCmd *cmd)
{
char *constraint_name = NULL;
CatalogSecurityContext sec_ctx;
Oid hypertable_constraint_oid,
hypertable_constraint_index_oid;
constraint_name = cmd->name;
Assert(constraint_name != NULL);
hypertable_constraint_oid = get_relation_constraint_oid(ht->main_table_relid, constraint_name, false);
hypertable_constraint_index_oid = get_constraint_index(hypertable_constraint_oid);
catalog_become_owner(catalog_get(), &sec_ctx);
/* Recurse to each chunk and drop the corresponding constraint */
foreach_chunk(ht, process_drop_constraint_chunk, constraint_name);
/*
* If this is a constraint backed by and index, we need to delete
* index-related metadata as well
*/
if (OidIsValid(hypertable_constraint_index_oid))
chunk_index_delete_children_of(ht, hypertable_constraint_index_oid, false);
catalog_restore_user(&sec_ctx);
}
static void
process_altertable_drop_not_null(Hypertable *ht, AlterTableCmd *cmd)
{
@ -1547,11 +1460,6 @@ process_altertable_start_table(Node *parsetree)
if (ht != NULL)
process_altertable_drop_not_null(ht, cmd);
break;
case AT_DropConstraint:
case AT_DropConstraintRecurse:
if (ht != NULL)
process_altertable_drop_constraint(ht, cmd);
break;
case AT_AddConstraint:
case AT_AddConstraintRecurse:
Assert(IsA(cmd->def, Constraint));
@ -1886,6 +1794,75 @@ process_ddl_command_end(CollectedCommand *cmd)
}
}
static void
process_drop_constraint_on_chunk(Hypertable *ht, Oid chunk_relid, void *arg)
{
char *hypertable_constraint_name = arg;
Chunk *chunk = chunk_get_by_relid(chunk_relid, ht->space->num_dimensions, true);
/* drop both metadata and table; sql_drop won't be called recursively */
chunk_constraint_delete_by_hypertable_constraint_name(chunk->fd.id, hypertable_constraint_name, true, true);
}
static void
process_drop_table_constraint(EventTriggerDropObject *obj)
{
EventTriggerDropTableConstraint *constraint;
Hypertable *ht;
Assert(obj->type == EVENT_TRIGGER_DROP_TABLE_CONSTRAINT);
constraint = (EventTriggerDropTableConstraint *) obj;
/* do not use relids because underlying table could be gone */
ht = hypertable_get_by_name(constraint->schema, constraint->table);
if (ht != NULL)
{
CatalogSecurityContext sec_ctx;
catalog_become_owner(catalog_get(), &sec_ctx);
/* Recurse to each chunk and drop the corresponding constraint */
foreach_chunk(ht, process_drop_constraint_on_chunk, constraint->constraint_name);
catalog_restore_user(&sec_ctx);
}
else
{
Chunk *chunk = chunk_get_by_name(constraint->schema, constraint->table, 0, false);
if (NULL != chunk)
{
chunk_constraint_delete_by_constraint_name(chunk->fd.id, constraint->constraint_name, true, false);
}
}
}
static void
process_drop_index(EventTriggerDropObject *obj)
{
EventTriggerDropIndex *index;
Assert(obj->type == EVENT_TRIGGER_DROP_INDEX);
index = (EventTriggerDropIndex *) obj;
chunk_index_delete_by_name(index->schema, index->index_name, true);
}
static void
process_ddl_sql_drop(EventTriggerDropObject *obj)
{
switch (obj->type)
{
case EVENT_TRIGGER_DROP_TABLE_CONSTRAINT:
process_drop_table_constraint(obj);
break;
case EVENT_TRIGGER_DROP_INDEX:
process_drop_index(obj);
break;
}
}
/*
* ProcessUtility hook for DDL commands that have not yet been processed by
* PostgreSQL.
@ -1943,26 +1920,12 @@ timescaledb_ddl_command_start(
prev_ProcessUtility(&args);
}
TS_FUNCTION_INFO_V1(timescaledb_ddl_command_end);
/*
* Event trigger hook for DDL commands that have alread been handled by
* PostgreSQL (i.e., "ddl_command_end" events).
*/
Datum
timescaledb_ddl_command_end(PG_FUNCTION_ARGS)
static
void
process_ddl_event_command_end(EventTriggerData *trigdata)
{
EventTriggerData *trigdata = (EventTriggerData *) fcinfo->context;
ListCell *lc;
if (!CALLED_AS_EVENT_TRIGGER(fcinfo))
elog(ERROR, "not fired by event trigger manager");
if (!extension_is_loaded())
PG_RETURN_NULL();
Assert(strcmp("ddl_command_end", trigdata->event) == 0);
/* Inhibit collecting new commands while in the trigger */
EventTriggerInhibitCommandCollection();
@ -1980,6 +1943,32 @@ timescaledb_ddl_command_end(PG_FUNCTION_ARGS)
}
EventTriggerUndoInhibitCommandCollection();
}
TS_FUNCTION_INFO_V1(timescaledb_process_ddl_event);
/*
* Event trigger hook for DDL commands that have alread been handled by
* PostgreSQL (i.e., "ddl_command_end" and "sql_drop" events).
*/
Datum
timescaledb_process_ddl_event(PG_FUNCTION_ARGS)
{
EventTriggerData *trigdata = (EventTriggerData *) fcinfo->context;
ListCell *lc;
if (!CALLED_AS_EVENT_TRIGGER(fcinfo))
elog(ERROR, "not fired by event trigger manager");
if (!extension_is_loaded())
PG_RETURN_NULL();
if (strcmp("ddl_command_end", trigdata->event) == 0)
process_ddl_event_command_end(trigdata);
else if (strcmp("sql_drop", trigdata->event) == 0)
{
foreach(lc, event_trigger_dropped_objects())
process_ddl_sql_drop(lfirst(lc));
}
PG_RETURN_NULL();
}

View File

@ -154,11 +154,17 @@ scanner_scan(ScannerCtx *ctx)
{
TupleDesc tuple_desc;
bool is_valid;
Scanner *scanner = &scanners[ctx->scantype];
Scanner *scanner;
InternalScannerCtx ictx = {
.sctx = ctx,
};
if (OidIsValid(ctx->index))
scanner = &scanners[ScannerTypeIndex];
else
scanner = &scanners[ScannerTypeHeap];
scanner->openheap(&ictx);
scanner->beginscan(&ictx);

View File

@ -11,7 +11,7 @@ typedef enum ScannerType
{
ScannerTypeHeap,
ScannerTypeIndex,
} ScannerType;
} ScannerType;
/* Tuple information passed on to handlers when scanning for tuples. */
typedef struct TupleInfo
@ -32,12 +32,12 @@ typedef struct TupleInfo
} TupleInfo;
typedef bool (*tuple_found_func) (TupleInfo *ti, void *data);
typedef bool (*tuple_filter_func) (TupleInfo *ti, void *data);
typedef struct ScannerCtx
{
Oid table;
Oid index;
ScannerType scantype;
ScanKey scankey;
int nkeys,
norderbys,

View File

@ -106,8 +106,6 @@ tablespace_tuple_found(TupleInfo *ti, void *data)
return true;
}
#define NO_INDEX -1
static int
tablespace_scan_internal(int indexid,
ScanKeyData *scankey,
@ -121,8 +119,7 @@ tablespace_scan_internal(int indexid,
Catalog *catalog = catalog_get();
ScannerCtx scanctx = {
.table = catalog->tables[TABLESPACE].id,
.index = (indexid == NO_INDEX) ? 0 : catalog->tables[TABLESPACE].index_ids[indexid],
.scantype = (indexid == NO_INDEX) ? ScannerTypeHeap : ScannerTypeIndex,
.index = CATALOG_INDEX(catalog, TABLESPACE, indexid),
.nkeys = nkeys,
.scankey = scankey,
.tuple_found = tuple_found,
@ -180,7 +177,7 @@ tablespace_scan_by_name(const char *tspcname, tuple_found_func tuple_found, void
BTEqualStrategyNumber, F_NAMEEQ,
DirectFunctionCall1(namein, CStringGetDatum(tspcname)));
return tablespace_scan_internal(NO_INDEX,
return tablespace_scan_internal(INVALID_INDEXID,
scankey,
nkeys,
tuple_found,
@ -433,7 +430,7 @@ tablespace_delete_from_all(const char *tspcname, Oid userid)
BTEqualStrategyNumber, F_NAMEEQ,
DirectFunctionCall1(namein, CStringGetDatum(tspcname)));
num_deleted = tablespace_scan_internal(NO_INDEX,
num_deleted = tablespace_scan_internal(INVALID_INDEXID,
scankey,
1,
tablespace_tuple_delete,

View File

@ -407,13 +407,12 @@ SELECT * FROM _timescaledb_catalog.chunk_index;
5 | _hyper_3_5_chunk_Hypertable_1_time_Device_id_idx | 3 | Hypertable_1_time_Device_id_idx
5 | _hyper_3_5_chunk_Hypertable_1_time_idx | 3 | Hypertable_1_time_idx
5 | _hyper_3_5_chunk_Hypertable_1_Device_id_time_idx | 3 | Hypertable_1_Device_id_time_idx
5 | _hyper_3_5_chunk_Hypertable_1_time_temp_c_idx | 3 | Hypertable_1_time_temp_c_idx
5 | _hyper_3_5_chunk_Unique1 | 3 | Unique1
6 | _hyper_4_6_chunk_Hypertable_1_time_Device_id_idx | 4 | Hypertable_1_time_Device_id_idx
6 | _hyper_4_6_chunk_Hypertable_1_time_idx | 4 | Hypertable_1_time_idx
6 | _hyper_4_6_chunk_Unique1 | 4 | Unique1
5 | _hyper_3_5_chunk_ind_humdity2 | 3 | ind_humdity2
(28 rows)
(27 rows)
--create column with same name as previously renamed one
ALTER TABLE PUBLIC."Hypertable_1" ADD COLUMN sensor_3 BIGINT NOT NULL DEFAULT 131;

View File

@ -63,6 +63,14 @@ SELECT * FROM _timescaledb_catalog.chunk_constraint;
5 | | 5_2_hyper_unique_with_looooooooooooooooooooooooooooooooooo_time | hyper_unique_with_looooooooooooooooooooooooooooooooooo_time_key
(5 rows)
SELECT * FROM _timescaledb_catalog.chunk_index;
chunk_id | index_name | hypertable_id | hypertable_index_name
----------+-----------------------------------------------------------------+---------------+-----------------------------------------------------------------
3 | _hyper_1_3_chunk_hyper_time_idx | 1 | hyper_time_idx
4 | 4_1_hyper_unique_with_looooooooooooooooooooooooooooooooooo_time | 2 | hyper_unique_with_looooooooooooooooooooooooooooooooooo_time_key
5 | 5_2_hyper_unique_with_looooooooooooooooooooooooooooooooooo_time | 2 | hyper_unique_with_looooooooooooooooooooooooooooooooooo_time_key
(3 rows)
SELECT * FROM test.show_constraints('hyper');
Constraint | Type | Columns | Index | Expr
----------------------+------+------------+-------+----------------------------
@ -95,6 +103,13 @@ SELECT * FROM _timescaledb_catalog.chunk_constraint;
5 | 5 | constraint_5 |
(3 rows)
-- The index should also have been removed
SELECT * FROM _timescaledb_catalog.chunk_index;
chunk_id | index_name | hypertable_id | hypertable_index_name
----------+---------------------------------+---------------+-----------------------
3 | _hyper_1_3_chunk_hyper_time_idx | 1 | hyper_time_idx
(1 row)
SELECT * FROM test.show_constraints('_timescaledb_internal._hyper_2_4_chunk');
Constraint | Type | Columns | Index | Expr
-----------------------------------------------------------------+------+------------+-------+------------------------------------------------------------------------------------------
@ -371,6 +386,72 @@ INSERT INTO hyper_fk(time, device_id,sensor_1) VALUES
(1257987700000000002, 'dev3', 11);
ERROR: insert or update on table "_hyper_4_8_chunk" violates foreign key constraint "8_22_hyper_fk_device_id_fkey"
\set ON_ERROR_STOP 1
SELECT * FROM test.show_constraints('_timescaledb_internal._hyper_4_8_chunk');
Constraint | Type | Columns | Index | Expr
------------------------------+------+-------------+--------------------------------------------+------------------------------------------------------------------------------------------
8_20_hyper_fk_pkey | p | {time} | _timescaledb_internal."8_20_hyper_fk_pkey" |
8_22_hyper_fk_device_id_fkey | f | {device_id} | devices_pkey |
constraint_8 | c | {time} | - | (("time" >= '1257987700000000000'::bigint) AND ("time" < '1257987700000000010'::bigint))
hyper_fk_sensor_1_check | c | {sensor_1} | - | (sensor_1 > (10)::numeric)
(4 rows)
SELECT * FROM _timescaledb_catalog.chunk_constraint;
chunk_id | dimension_slice_id | constraint_name | hypertable_constraint_name
----------+--------------------+------------------------------+----------------------------
3 | 3 | constraint_3 |
4 | 4 | constraint_4 |
5 | 5 | constraint_5 |
4 | | 4_10_new_name2 | new_name2
5 | | 5_11_new_name2 | new_name2
6 | 6 | constraint_6 |
6 | | 6_16_hyper_pk_pkey | hyper_pk_pkey
8 | 8 | constraint_8 |
8 | | 8_20_hyper_fk_pkey | hyper_fk_pkey
8 | | 8_22_hyper_fk_device_id_fkey | hyper_fk_device_id_fkey
(10 rows)
--test CASCADE drop behavior
DROP TABLE devices CASCADE;
NOTICE: drop cascades to 2 other objects
SELECT * FROM test.show_constraints('_timescaledb_internal._hyper_4_8_chunk');
Constraint | Type | Columns | Index | Expr
-------------------------+------+------------+--------------------------------------------+------------------------------------------------------------------------------------------
8_20_hyper_fk_pkey | p | {time} | _timescaledb_internal."8_20_hyper_fk_pkey" |
constraint_8 | c | {time} | - | (("time" >= '1257987700000000000'::bigint) AND ("time" < '1257987700000000010'::bigint))
hyper_fk_sensor_1_check | c | {sensor_1} | - | (sensor_1 > (10)::numeric)
(3 rows)
SELECT * FROM _timescaledb_catalog.chunk_constraint;
chunk_id | dimension_slice_id | constraint_name | hypertable_constraint_name
----------+--------------------+--------------------+----------------------------
3 | 3 | constraint_3 |
4 | 4 | constraint_4 |
5 | 5 | constraint_5 |
4 | | 4_10_new_name2 | new_name2
5 | | 5_11_new_name2 | new_name2
6 | 6 | constraint_6 |
6 | | 6_16_hyper_pk_pkey | hyper_pk_pkey
8 | 8 | constraint_8 |
8 | | 8_20_hyper_fk_pkey | hyper_fk_pkey
(9 rows)
--the fk went away.
INSERT INTO hyper_fk(time, device_id,sensor_1) VALUES
(1257987700000000002, 'dev3', 11);
CREATE TABLE devices(
device_id TEXT NOT NULL,
PRIMARY KEY (device_id)
);
INSERT INTO devices VALUES ('dev2'), ('dev3');
ALTER TABLE hyper_fk ADD CONSTRAINT hyper_fk_device_id_fkey
FOREIGN KEY (device_id) REFERENCES devices(device_id);
\set ON_ERROR_STOP 0
INSERT INTO hyper_fk(time, device_id,sensor_1) VALUES
(1257987700000000003, 'dev4', 11);
ERROR: insert or update on table "_hyper_4_8_chunk" violates foreign key constraint "8_23_hyper_fk_device_id_fkey"
\set ON_ERROR_STOP 1
--this tests that there are no extra chunk_constraints left on hyper_fk
TRUNCATE hyper_fk;
----------------------- FOREIGN KEY INTO A HYPERTABLE ------------------
--FOREIGN KEY references into a hypertable are currently broken.
--The referencing table will never find the corresponding row in the hypertable
@ -423,7 +504,7 @@ INSERT INTO hyper_ex(time, device_id,sensor_1) VALUES
\set ON_ERROR_STOP 0
INSERT INTO hyper_ex(time, device_id,sensor_1) VALUES
(1257987700000000000, 'dev2', 12);
ERROR: conflicting key value violates exclusion constraint "9_24_hyper_ex_time_device_id_excl"
ERROR: conflicting key value violates exclusion constraint "9_25_hyper_ex_time_device_id_excl"
\set ON_ERROR_STOP 1
ALTER TABLE hyper_ex DROP CONSTRAINT hyper_ex_time_device_id_excl;
--can now add
@ -436,7 +517,7 @@ ALTER TABLE hyper_ex ADD CONSTRAINT hyper_ex_time_device_id_excl
time WITH =, device_id WITH =
) WHERE (not canceled)
;
ERROR: could not create exclusion constraint "9_25_hyper_ex_time_device_id_excl"
ERROR: could not create exclusion constraint "9_26_hyper_ex_time_device_id_excl"
\set ON_ERROR_STOP 1
DELETE FROM hyper_ex WHERE sensor_1 = 12;
ALTER TABLE hyper_ex ADD CONSTRAINT hyper_ex_time_device_id_excl
@ -447,7 +528,7 @@ ALTER TABLE hyper_ex ADD CONSTRAINT hyper_ex_time_device_id_excl
\set ON_ERROR_STOP 0
INSERT INTO hyper_ex(time, device_id,sensor_1) VALUES
(1257987700000000000, 'dev2', 12);
ERROR: conflicting key value violates exclusion constraint "9_26_hyper_ex_time_device_id_excl"
ERROR: conflicting key value violates exclusion constraint "9_27_hyper_ex_time_device_id_excl"
\set ON_ERROR_STOP 1
--cannot add exclusion constraint without partition key.
CREATE TABLE hyper_ex_invalid (

View File

@ -336,13 +336,12 @@ SELECT * FROM _timescaledb_catalog.chunk_index;
1 | _hyper_1_1_chunk_Hypertable_1_time_Device_id_idx | 1 | Hypertable_1_time_Device_id_idx
1 | _hyper_1_1_chunk_Hypertable_1_time_idx | 1 | Hypertable_1_time_idx
1 | _hyper_1_1_chunk_Hypertable_1_Device_id_time_idx | 1 | Hypertable_1_Device_id_time_idx
1 | _hyper_1_1_chunk_Hypertable_1_time_temp_c_idx | 1 | Hypertable_1_time_temp_c_idx
1 | _hyper_1_1_chunk_Unique1 | 1 | Unique1
2 | _hyper_2_2_chunk_Hypertable_1_time_Device_id_idx | 2 | Hypertable_1_time_Device_id_idx
2 | _hyper_2_2_chunk_Hypertable_1_time_idx | 2 | Hypertable_1_time_idx
2 | _hyper_2_2_chunk_Unique1 | 2 | Unique1
1 | _hyper_1_1_chunk_ind_humdity2 | 1 | ind_humdity2
(9 rows)
(8 rows)
--create column with same name as previously renamed one
ALTER TABLE PUBLIC."Hypertable_1" ADD COLUMN sensor_3 BIGINT NOT NULL DEFAULT 131;

View File

@ -299,13 +299,12 @@ SELECT * FROM _timescaledb_catalog.chunk_index;
1 | _hyper_1_1_chunk_Hypertable_1_time_Device_id_idx | 1 | Hypertable_1_time_Device_id_idx
1 | _hyper_1_1_chunk_Hypertable_1_time_idx | 1 | Hypertable_1_time_idx
1 | _hyper_1_1_chunk_Hypertable_1_Device_id_time_idx | 1 | Hypertable_1_Device_id_time_idx
1 | _hyper_1_1_chunk_Hypertable_1_time_temp_c_idx | 1 | Hypertable_1_time_temp_c_idx
1 | _hyper_1_1_chunk_Unique1 | 1 | Unique1
2 | _hyper_2_2_chunk_Hypertable_1_time_Device_id_idx | 2 | Hypertable_1_time_Device_id_idx
2 | _hyper_2_2_chunk_Hypertable_1_time_idx | 2 | Hypertable_1_time_idx
2 | _hyper_2_2_chunk_Unique1 | 2 | Unique1
1 | _hyper_1_1_chunk_ind_humdity2 | 1 | ind_humdity2
(9 rows)
(8 rows)
--create column with same name as previously renamed one
ALTER TABLE PUBLIC."Hypertable_1" ADD COLUMN sensor_3 BIGINT NOT NULL DEFAULT 131;

View File

@ -49,7 +49,7 @@ SELECT count(*)
AND refobjid = (SELECT oid FROM pg_extension WHERE extname = 'timescaledb');
count
-------
94
95
(1 row)
SELECT * FROM test.show_columns('public."two_Partitions"');
@ -235,7 +235,7 @@ SELECT count(*)
AND refobjid = (SELECT oid FROM pg_extension WHERE extname = 'timescaledb');
count
-------
94
95
(1 row)
--main table and chunk schemas should be the same

View File

@ -58,6 +58,7 @@ INSERT INTO hyper_unique_with_looooooooooooooooooooooooooooooooooooong_name(time
-- Show constraints on main tables
SELECT * FROM _timescaledb_catalog.chunk_constraint;
SELECT * FROM _timescaledb_catalog.chunk_index;
SELECT * FROM test.show_constraints('hyper');
SELECT * FROM test.show_constraints('hyper_unique_with_looooooooooooooooooooooooooooooooooooong_name');
--should have unique constraint not just unique index
@ -66,6 +67,8 @@ SELECT * FROM test.show_constraints('_timescaledb_internal._hyper_2_4_chunk');
ALTER TABLE hyper_unique_with_looooooooooooooooooooooooooooooooooooong_name DROP CONSTRAINT hyper_unique_with_looooooooooooooooooooooooooooooooooo_time_key;
-- The constraint should have been removed from the chunk as well
SELECT * FROM _timescaledb_catalog.chunk_constraint;
-- The index should also have been removed
SELECT * FROM _timescaledb_catalog.chunk_index;
SELECT * FROM test.show_constraints('_timescaledb_internal._hyper_2_4_chunk');
--uniqueness not enforced
@ -269,6 +272,37 @@ INSERT INTO hyper_fk(time, device_id,sensor_1) VALUES
(1257987700000000002, 'dev3', 11);
\set ON_ERROR_STOP 1
SELECT * FROM test.show_constraints('_timescaledb_internal._hyper_4_8_chunk');
SELECT * FROM _timescaledb_catalog.chunk_constraint;
--test CASCADE drop behavior
DROP TABLE devices CASCADE;
SELECT * FROM test.show_constraints('_timescaledb_internal._hyper_4_8_chunk');
SELECT * FROM _timescaledb_catalog.chunk_constraint;
--the fk went away.
INSERT INTO hyper_fk(time, device_id,sensor_1) VALUES
(1257987700000000002, 'dev3', 11);
CREATE TABLE devices(
device_id TEXT NOT NULL,
PRIMARY KEY (device_id)
);
INSERT INTO devices VALUES ('dev2'), ('dev3');
ALTER TABLE hyper_fk ADD CONSTRAINT hyper_fk_device_id_fkey
FOREIGN KEY (device_id) REFERENCES devices(device_id);
\set ON_ERROR_STOP 0
INSERT INTO hyper_fk(time, device_id,sensor_1) VALUES
(1257987700000000003, 'dev4', 11);
\set ON_ERROR_STOP 1
--this tests that there are no extra chunk_constraints left on hyper_fk
TRUNCATE hyper_fk;
----------------------- FOREIGN KEY INTO A HYPERTABLE ------------------