Drop FK constraints on chunk compression

Drop Foreign Key constraints from uncompressed chunks during the
compression. This allows to cascade data deletion in FK-referenced
tables to compressed chunks. The foreign key constrains are restored
during decompression.
This commit is contained in:
Ruslan Fomkin 2020-03-13 09:21:25 +01:00 committed by Erik Nordström
parent c3d1c51054
commit 16897d2238
11 changed files with 309 additions and 23 deletions

View File

@ -57,6 +57,12 @@
#include "scan_iterator.h"
#include "compression_chunk_size.h"
/* Strictly speaking there is no danger to include it always, but it helps to remove it with PG11_LT
* support. */
#if PG11_LT
#include "compat/fkeylist.h"
#endif
TS_FUNCTION_INFO_V1(ts_chunk_show_chunks);
TS_FUNCTION_INFO_V1(ts_chunk_drop_chunks);
TS_FUNCTION_INFO_V1(ts_chunks_in);
@ -2045,6 +2051,58 @@ ts_chunk_recreate_all_constraints_for_dimension(Hyperspace *hs, int32 dimension_
chunk_scan_ctx_destroy(&chunkctx);
}
/*
* Drops all FK constraints on a given chunk.
* Currently it is used only for chunks, which have been compressed and
* contain no data.
*/
void
ts_chunk_drop_fks(Chunk *const chunk)
{
Relation rel;
List *fks;
ListCell *lc;
Assert(!chunk->fd.dropped);
rel = table_open(chunk->table_id, AccessShareLock);
fks = copy_fk_list_from_cache(RelationGetFKeyListCompat(rel));
table_close(rel, AccessShareLock);
foreach (lc, fks)
{
const ForeignKeyCacheInfoCompat *const fk = lfirst_node(ForeignKeyCacheInfoCompat, lc);
ts_chunk_constraint_delete_by_constraint_name(chunk->fd.id,
get_constraint_name(fk->conoid),
true,
true);
}
}
/*
* Recreates all FK constraints on a chunk by using the constraints on the parent hypertable as a
* template. Currently it is used only during chunk decompression, since FK constraints are dropped
* during compression.
*/
void
ts_chunk_create_fks(Chunk *const chunk)
{
Relation rel;
List *fks;
ListCell *lc;
Assert(!chunk->fd.dropped);
rel = table_open(chunk->hypertable_relid, AccessShareLock);
fks = copy_fk_list_from_cache(RelationGetFKeyListCompat(rel));
table_close(rel, AccessShareLock);
foreach (lc, fks)
{
ForeignKeyCacheInfoCompat *fk = lfirst_node(ForeignKeyCacheInfoCompat, lc);
ts_chunk_constraint_create_on_chunk(chunk, fk->conoid);
}
}
static ScanTupleResult
chunk_tuple_update_schema_and_table(TupleInfo *ti, void *data)
{

View File

@ -124,6 +124,8 @@ extern bool ts_chunk_exists_relid(Oid relid);
extern TSDLLEXPORT bool ts_chunk_exists_with_compression(int32 hypertable_id);
extern void ts_chunk_recreate_all_constraints_for_dimension(Hyperspace *hs, int32 dimension_id);
extern TSDLLEXPORT void ts_chunk_drop_fks(Chunk *chunk);
extern TSDLLEXPORT void ts_chunk_create_fks(Chunk *chunk);
extern int ts_chunk_delete_by_hypertable_id(int32 hypertable_id);
extern int ts_chunk_delete_by_name(const char *schema, const char *table, DropBehavior behavior);
extern bool ts_chunk_set_name(Chunk *chunk, const char *newname);

View File

@ -959,4 +959,25 @@ list_qsort(const List *list, list_qsort_comparator cmp)
#endif
/*
* ForeignKeyCacheInfo doesn't contain the constraint Oid in early versions.
* This is a fix for PG10 and PG96 until support for them is gone.
*/
#if PG11_LT
#define RelationGetFKeyListCompat(rel) ts_relation_get_fk_list(rel)
#define T_ForeignKeyCacheInfoCompat T_ForeignKeyCacheInfo
typedef struct ForeignKeyCacheInfoCompat
{
ForeignKeyCacheInfo base;
Oid conoid;
} ForeignKeyCacheInfoCompat;
/* No need to copy FK list, since custom implementation doesn't use cache. */
#define copy_fk_list_from_cache(l) l
#else
#define RelationGetFKeyListCompat(rel) RelationGetFKeyList(rel)
#define ForeignKeyCacheInfoCompat ForeignKeyCacheInfo
/* Copies FK list, since the cache can be invalidated. */
#define copy_fk_list_from_cache(l) copyObject(l)
#endif /* PG11_LT */
#endif /* TIMESCALEDB_COMPAT_H */

View File

@ -4,5 +4,13 @@ if (${PG_VERSION_MAJOR} LESS "12")
${CMAKE_CURRENT_SOURCE_DIR}/tupconvert.c
${CMAKE_CURRENT_SOURCE_DIR}/tuptable.c
)
target_sources(${PROJECT_NAME} PRIVATE ${SOURCES})
endif ()
if (${PG_VERSION_MAJOR} LESS "11")
set(SOURCES
${SOURCES}
${CMAKE_CURRENT_SOURCE_DIR}/fkeylist.c
)
endif ()
target_sources(${PROJECT_NAME} PRIVATE ${SOURCES})

57
src/compat/fkeylist.c Normal file
View File

@ -0,0 +1,57 @@
/*
* This file and its contents are licensed under the Apache License 2.0.
* Please see the included NOTICE for copyright information and
* LICENSE-APACHE for a copy of the license.
*/
/*
* This file contains a definition replacing PG function to get a list of FK constraints
* using a struct according PG11 and PG12, which contains a data member missing in PG96 and
* PG10.
*/
#include <postgres.h>
#include <access/genam.h>
#include <access/heapam.h>
#include <access/htup.h>
#include <access/htup_details.h>
#include <catalog/indexing.h>
#include <catalog/pg_constraint.h>
#include <utils/fmgroids.h>
#include "fkeylist.h"
List *
ts_relation_get_fk_list(Relation relation)
{
List *result = NIL;
Relation conrel;
SysScanDesc conscan;
ScanKeyData skey;
HeapTuple htup;
ScanKeyInit(&skey,
Anum_pg_constraint_conrelid,
BTEqualStrategyNumber,
F_OIDEQ,
ObjectIdGetDatum(RelationGetRelid(relation)));
conrel = heap_open(ConstraintRelationId, AccessShareLock);
conscan = systable_beginscan(conrel, ConstraintRelidTypidNameIndexId, true, NULL, 1, &skey);
while (HeapTupleIsValid(htup = systable_getnext(conscan)))
{
Form_pg_constraint constraint = (Form_pg_constraint) GETSTRUCT(htup);
ForeignKeyCacheInfoCompat *info;
if (constraint->contype != CONSTRAINT_FOREIGN)
continue;
info = (ForeignKeyCacheInfoCompat *) newNode(sizeof(ForeignKeyCacheInfoCompat),
T_ForeignKeyCacheInfoCompat);
info->conoid = HeapTupleGetOid(htup);
result = lappend(result, info);
}
systable_endscan(conscan);
heap_close(conrel, AccessShareLock);
return result;
}

16
src/compat/fkeylist.h Normal file
View File

@ -0,0 +1,16 @@
/*
* This file and its contents are licensed under the Apache License 2.0.
* Please see the included NOTICE for copyright information and
* LICENSE-APACHE for a copy of the license.
*/
#ifndef TIMESCALEDB_FKEYLIST_H
#define TIMESCALEDB_FKEYLIST_H
#include "compat.h"
#if PG11_LT
extern List *ts_relation_get_fk_list(Relation relation);
#endif
#endif /* TIMESCALEDB_FKEYLIST_H */

View File

@ -233,6 +233,11 @@ compress_chunk_impl(Oid hypertable_relid, Oid chunk_relid)
compress_ht_chunk->table_id,
colinfo_array,
htcols_listlen);
/* Drop all FK constraints on the uncompressed chunk. This is needed to allow
* cascading deleted data in FK-referenced tables, while blocking deleting data
* directly on the hypertable or chunks.
*/
ts_chunk_drop_fks(cxt.srcht_chunk);
chunk_dml_blocker_trigger_add(cxt.srcht_chunk->table_id);
after_size = compute_chunk_size(compress_ht_chunk->table_id);
compression_chunk_size_catalog_insert(cxt.srcht_chunk->fd.id,
@ -263,7 +268,9 @@ decompress_chunk_impl(Oid uncompressed_hypertable_relid, Oid uncompressed_chunk_
if (compressed_hypertable == NULL)
ereport(ERROR, (errcode(ERRCODE_INTERNAL_ERROR), errmsg("missing compressed hypertable")));
uncompressed_chunk = ts_chunk_get_by_relid(uncompressed_chunk_relid, 0, true);
uncompressed_chunk = ts_chunk_get_by_relid(uncompressed_chunk_relid,
uncompressed_hypertable->space->num_dimensions,
true);
if (uncompressed_chunk == NULL)
ereport(ERROR,
(errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE),
@ -295,6 +302,8 @@ decompress_chunk_impl(Oid uncompressed_hypertable_relid, Oid uncompressed_chunk_
chunk_dml_trigger_drop(uncompressed_chunk->table_id);
decompress_chunk(compressed_chunk->table_id, uncompressed_chunk->table_id);
/* Recreate FK constraints, since they were dropped during compression. */
ts_chunk_create_fks(uncompressed_chunk);
ts_compression_chunk_size_delete(uncompressed_chunk->fd.id);
ts_chunk_set_compressed_chunk(uncompressed_chunk, INVALID_CHUNK_ID, true);
ts_chunk_drop(compressed_chunk, DROP_RESTRICT, -1);

View File

@ -963,3 +963,91 @@ FROM (SELECT x.id, x.t, x.val FROM unnest(array[(1, '2000-01-03 00:00:00+00', 2.
WHERE rescan_test.id = tmp.id AND rescan_test.t = tmp.t;
ERROR: cannot update/delete rows from chunk "_hyper_16_36_chunk" as it is compressed
\set ON_ERROR_STOP 1
-- Test FK constraint drop and recreate during compression and decompression on a chunk
CREATE TABLE meta (device_id INT PRIMARY KEY);
CREATE TABLE hyper(
time INT NOT NULL,
device_id INT REFERENCES meta(device_id) ON DELETE CASCADE ON UPDATE CASCADE,
val INT);
SELECT * FROM create_hypertable('hyper', 'time', chunk_time_interval => 10);
hypertable_id | schema_name | table_name | created
---------------+-------------+------------+---------
18 | public | hyper | t
(1 row)
ALTER TABLE hyper SET (
timescaledb.compress,
timescaledb.compress_orderby = 'time',
timescaledb.compress_segmentby = 'device_id');
NOTICE: adding index _compressed_hypertable_19_device_id__ts_meta_sequence_num_idx ON _timescaledb_internal._compressed_hypertable_19 USING BTREE(device_id, _ts_meta_sequence_num)
INSERT INTO meta VALUES (1), (2), (3), (4), (5);
INSERT INTO hyper VALUES (1, 1, 1), (2, 2, 1), (3, 3, 1), (10, 3, 2), (11, 4, 2), (11, 5, 2);
SELECT ch1.table_name AS "CHUNK_NAME", ch1.schema_name|| '.' || ch1.table_name AS "CHUNK_FULL_NAME"
FROM _timescaledb_catalog.chunk ch1, _timescaledb_catalog.hypertable ht
WHERE ch1.hypertable_id = ht.id AND ht.table_name LIKE 'hyper'
ORDER BY ch1.id LIMIT 1 \gset
SELECT constraint_schema, constraint_name, table_schema, table_name, constraint_type
FROM information_schema.table_constraints
WHERE table_name = :'CHUNK_NAME' AND constraint_type = 'FOREIGN KEY'
ORDER BY constraint_name;
constraint_schema | constraint_name | table_schema | table_name | constraint_type
-----------------------+---------------------------+-----------------------+--------------------+-----------------
_timescaledb_internal | 42_6_hyper_device_id_fkey | _timescaledb_internal | _hyper_18_42_chunk | FOREIGN KEY
(1 row)
SELECT compress_chunk(:'CHUNK_FULL_NAME');
compress_chunk
------------------------------------------
_timescaledb_internal._hyper_18_42_chunk
(1 row)
SELECT constraint_schema, constraint_name, table_schema, table_name, constraint_type
FROM information_schema.table_constraints
WHERE table_name = :'CHUNK_NAME' AND constraint_type = 'FOREIGN KEY'
ORDER BY constraint_name;
constraint_schema | constraint_name | table_schema | table_name | constraint_type
-------------------+-----------------+--------------+------------+-----------------
(0 rows)
-- Delete data from compressed chunk directly fails
\set ON_ERROR_STOP 0
DELETE FROM hyper WHERE device_id = 3;
ERROR: cannot update/delete rows from chunk "_hyper_18_42_chunk" as it is compressed
\set ON_ERROR_STOP 0
-- Delete data from FK-referenced table deletes data from compressed chunk
SELECT * FROM hyper ORDER BY time, device_id;
time | device_id | val
------+-----------+-----
1 | 1 | 1
2 | 2 | 1
3 | 3 | 1
10 | 3 | 2
11 | 4 | 2
11 | 5 | 2
(6 rows)
DELETE FROM meta WHERE device_id = 3;
SELECT * FROM hyper ORDER BY time, device_id;
time | device_id | val
------+-----------+-----
1 | 1 | 1
2 | 2 | 1
11 | 4 | 2
11 | 5 | 2
(4 rows)
SELECT decompress_chunk(:'CHUNK_FULL_NAME');
decompress_chunk
------------------------------------------
_timescaledb_internal._hyper_18_42_chunk
(1 row)
SELECT constraint_schema, constraint_name, table_schema, table_name, constraint_type
FROM information_schema.table_constraints
WHERE table_name = :'CHUNK_NAME' AND constraint_type = 'FOREIGN KEY'
ORDER BY constraint_name;
constraint_schema | constraint_name | table_schema | table_name | constraint_type
-----------------------+---------------------------+-----------------------+--------------------+-----------------
_timescaledb_internal | 42_9_hyper_device_id_fkey | _timescaledb_internal | _hyper_18_42_chunk | FOREIGN KEY
(1 row)

View File

@ -329,23 +329,6 @@ FROM timescaledb_information.compressed_hypertable_stats;
table_constr | 2 | 1
(1 row)
--delete from foreign table, should delete from hypertable too
select device_id, d from table_constr order by device_id, d;
device_id | d
-----------+----
1000 | 1
1000 | 10
(2 rows)
delete from fortable where col = 1 or col = 10;
ERROR: cannot update/delete rows from chunk "_hyper_15_7_chunk" as it is compressed
select device_id, d from table_constr order by device_id, d;
device_id | d
-----------+----
1000 | 1
1000 | 10
(2 rows)
--github issue 1661
--disable compression after enabling it on a table that has fk constraints
CREATE TABLE table_constr2( device_id integer,

View File

@ -370,3 +370,51 @@ WHERE rescan_test.id = tmp.id AND rescan_test.t = tmp.t;
\set ON_ERROR_STOP 1
-- Test FK constraint drop and recreate during compression and decompression on a chunk
CREATE TABLE meta (device_id INT PRIMARY KEY);
CREATE TABLE hyper(
time INT NOT NULL,
device_id INT REFERENCES meta(device_id) ON DELETE CASCADE ON UPDATE CASCADE,
val INT);
SELECT * FROM create_hypertable('hyper', 'time', chunk_time_interval => 10);
ALTER TABLE hyper SET (
timescaledb.compress,
timescaledb.compress_orderby = 'time',
timescaledb.compress_segmentby = 'device_id');
INSERT INTO meta VALUES (1), (2), (3), (4), (5);
INSERT INTO hyper VALUES (1, 1, 1), (2, 2, 1), (3, 3, 1), (10, 3, 2), (11, 4, 2), (11, 5, 2);
SELECT ch1.table_name AS "CHUNK_NAME", ch1.schema_name|| '.' || ch1.table_name AS "CHUNK_FULL_NAME"
FROM _timescaledb_catalog.chunk ch1, _timescaledb_catalog.hypertable ht
WHERE ch1.hypertable_id = ht.id AND ht.table_name LIKE 'hyper'
ORDER BY ch1.id LIMIT 1 \gset
SELECT constraint_schema, constraint_name, table_schema, table_name, constraint_type
FROM information_schema.table_constraints
WHERE table_name = :'CHUNK_NAME' AND constraint_type = 'FOREIGN KEY'
ORDER BY constraint_name;
SELECT compress_chunk(:'CHUNK_FULL_NAME');
SELECT constraint_schema, constraint_name, table_schema, table_name, constraint_type
FROM information_schema.table_constraints
WHERE table_name = :'CHUNK_NAME' AND constraint_type = 'FOREIGN KEY'
ORDER BY constraint_name;
-- Delete data from compressed chunk directly fails
\set ON_ERROR_STOP 0
DELETE FROM hyper WHERE device_id = 3;
\set ON_ERROR_STOP 0
-- Delete data from FK-referenced table deletes data from compressed chunk
SELECT * FROM hyper ORDER BY time, device_id;
DELETE FROM meta WHERE device_id = 3;
SELECT * FROM hyper ORDER BY time, device_id;
SELECT decompress_chunk(:'CHUNK_FULL_NAME');
SELECT constraint_schema, constraint_name, table_schema, table_name, constraint_type
FROM information_schema.table_constraints
WHERE table_name = :'CHUNK_NAME' AND constraint_type = 'FOREIGN KEY'
ORDER BY constraint_name;

View File

@ -200,10 +200,6 @@ select compress_chunk(:'CHUNK_NAME');
SELECT hypertable_name , total_chunks , number_compressed_chunks
FROM timescaledb_information.compressed_hypertable_stats;
--delete from foreign table, should delete from hypertable too
select device_id, d from table_constr order by device_id, d;
delete from fortable where col = 1 or col = 10;
select device_id, d from table_constr order by device_id, d;
--github issue 1661
--disable compression after enabling it on a table that has fk constraints