Remove update files for PG12

Removed the update files that were used only for PG12.
This commit is contained in:
Lakshmi Narayanan Sreethar 2023-07-07 14:53:01 +05:30 committed by Lakshmi Narayanan Sreethar
parent c3a9f90fdd
commit ac33d04aa8
10 changed files with 0 additions and 1590 deletions

View File

@ -9,19 +9,6 @@ include(ScriptFiles)
# There still needs to be an entry here to build an update script for that
# version. Thus, for every new release, an entry should be added here.
set(MOD_FILES
updates/1.7.0--1.7.1.sql
updates/1.7.1--1.7.2.sql
updates/1.7.2--1.7.3.sql
updates/1.7.3--1.7.4.sql
updates/1.7.4--1.7.5.sql
updates/1.7.5--2.0.0-rc1.sql
updates/2.0.0-rc1--2.0.0-rc2.sql
updates/2.0.0-rc2--2.0.0-rc3.sql
updates/2.0.0-rc3--2.0.0-rc4.sql
updates/2.0.0-rc4--2.0.0.sql
updates/2.0.0--2.0.1.sql
updates/2.0.1--2.0.2.sql
updates/2.0.2--2.1.0.sql
updates/2.1.0--2.1.1.sql
updates/2.1.1--2.2.0.sql
updates/2.2.0--2.2.1.sql

View File

@ -1,83 +0,0 @@
-- Recreate missing dimension slices that might be missing due to a bug
-- that is fixed in this release. If the dimension slice table is broken and there are dimension
-- slices missing from the table, we will repair it by:
-- 1. Finding all chunk constraints that have missing dimension
-- slices and extract the constraint expression from the associated
-- constraint.
-- 2. Parse the constraint expression and extract the column name,
-- and upper and lower range values as text.
-- 3. Use the column type to construct the range values (UNIX
-- microseconds) from these values.
CREATE OR REPLACE FUNCTION _timescaledb_internal.time_to_internal(time_val ANYELEMENT)
RETURNS BIGINT AS '@MODULE_PATHNAME@', 'ts_time_to_internal' LANGUAGE C VOLATILE STRICT;
INSERT INTO _timescaledb_catalog.dimension_slice
WITH
-- All dimension slices that are mentioned in the chunk_constraint
-- table but are missing from the dimension_slice table. There can
-- be duplicates since several chunk constraints can refer to one
-- dimension slice.
missing_slices AS (
SELECT DISTINCT ch.hypertable_id,
di.id as dimension_id,
dimension_slice_id,
constraint_name,
di.column_type,
attname AS column_name,
pg_get_expr(conbin, conrelid) AS constraint_expr
FROM _timescaledb_catalog.chunk_constraint cc
JOIN _timescaledb_catalog.chunk ch ON cc.chunk_id = ch.id
JOIN pg_constraint ON conname = constraint_name
JOIN pg_namespace ns ON connamespace = ns.oid AND ns.nspname = ch.schema_name
JOIN pg_attribute ON attnum = conkey[1] AND attrelid = conrelid
JOIN _timescaledb_catalog.dimension di
ON ch.hypertable_id = di.hypertable_id AND di.column_name = attname
WHERE
dimension_slice_id NOT IN (SELECT id FROM _timescaledb_catalog.dimension_slice)
),
-- Unparsed range start and end for each dimension slice id that
-- is missing.
unparsed_missing_slices AS (
SELECT dimension_id,
dimension_slice_id,
constraint_name,
column_type,
column_name,
(SELECT SUBSTRING(constraint_expr, $$>=\s*'?([\d\s:+-]+)'?$$)) AS range_start,
(SELECT SUBSTRING(constraint_expr, $$<\s*'?([\d\s:+-]+)'?$$)) AS range_end
FROM missing_slices
)
SELECT dimension_slice_id,
dimension_id,
CASE
WHEN column_type IN ('smallint'::regtype, 'bigint'::regtype, 'integer'::regtype) THEN
CASE
WHEN range_start IS NULL
THEN -9223372036854775808
ELSE _timescaledb_internal.time_to_internal(range_start::bigint)
END
WHEN column_type = 'timestamptz'::regtype THEN
_timescaledb_internal.time_to_internal(range_start::timestamptz)
WHEN column_type = 'timestamp'::regtype THEN
_timescaledb_internal.time_to_internal(range_start::timestamp)
WHEN column_type = 'date'::regtype THEN
_timescaledb_internal.time_to_internal(range_start::date)
ELSE
NULL
END AS range_start,
CASE
WHEN column_type IN ('smallint'::regtype, 'bigint'::regtype, 'integer'::regtype) THEN
CASE WHEN range_end IS NULL
THEN 9223372036854775807
ELSE _timescaledb_internal.time_to_internal(range_end::bigint)
END
WHEN column_type = 'timestamptz'::regtype THEN
_timescaledb_internal.time_to_internal(range_end::timestamptz)
WHEN column_type = 'timestamp'::regtype THEN
_timescaledb_internal.time_to_internal(range_end::timestamp)
WHEN column_type = 'date'::regtype THEN
_timescaledb_internal.time_to_internal(range_end::date)
ELSE NULL
END AS range_end
FROM unparsed_missing_slices;

View File

@ -1,81 +0,0 @@
-- Recreate missing dimension slices that might be missing. If the
-- dimension slice table is broken and there are dimension slices
-- missing from the table, we will repair it by:
--
-- 1. Finding all chunk constraints that have missing dimension
-- slices and extract the constraint expression from the
-- associated constraint.
--
-- 2. Parse the constraint expression and extract the column name,
-- and upper and lower range values as text or, if it is a
-- partition constraint, pick the existing constraint (either
-- uppper or lower end of range) and make the other end open.
--
-- 3. Use the column type to construct the range values (UNIX
-- microseconds) from these strings.
INSERT INTO _timescaledb_catalog.dimension_slice
WITH
-- All dimension slices that are mentioned in the chunk_constraint
-- table but are missing from the dimension_slice table.
missing_slices AS (
SELECT hypertable_id,
chunk_id,
dimension_slice_id,
constraint_name,
attname AS column_name,
pg_get_expr(conbin, conrelid) AS constraint_expr
FROM _timescaledb_catalog.chunk_constraint cc
JOIN _timescaledb_catalog.chunk ch ON cc.chunk_id = ch.id
JOIN pg_constraint ON conname = constraint_name
JOIN pg_namespace ns ON connamespace = ns.oid AND ns.nspname = ch.schema_name
JOIN pg_attribute ON attnum = conkey[1] AND attrelid = conrelid
WHERE
dimension_slice_id NOT IN (SELECT id FROM _timescaledb_catalog.dimension_slice)
),
-- Unparsed range start and end for each dimension slice id that
-- is missing.
unparsed_missing_slices AS (
SELECT di.id AS dimension_id,
dimension_slice_id,
constraint_name,
column_type,
column_name,
(SELECT SUBSTRING(constraint_expr, $$>=\s*'?([\w\d\s:+-]+)'?$$)) AS range_start,
(SELECT SUBSTRING(constraint_expr, $$<\s*'?([\w\d\s:+-]+)'?$$)) AS range_end
FROM missing_slices JOIN _timescaledb_catalog.dimension di USING (hypertable_id, column_name)
)
SELECT DISTINCT
dimension_slice_id,
dimension_id,
CASE
WHEN column_type = 'timestamptz'::regtype THEN
EXTRACT(EPOCH FROM range_start::timestamptz)::bigint * 1000000
WHEN column_type = 'timestamp'::regtype THEN
EXTRACT(EPOCH FROM range_start::timestamp)::bigint * 1000000
WHEN column_type = 'date'::regtype THEN
EXTRACT(EPOCH FROM range_start::date)::bigint * 1000000
ELSE
CASE
WHEN range_start IS NULL
THEN (-9223372036854775808)::bigint
ELSE range_start::bigint
END
END AS range_start,
CASE
WHEN column_type = 'timestamptz'::regtype THEN
EXTRACT(EPOCH FROM range_end::timestamptz)::bigint * 1000000
WHEN column_type = 'timestamp'::regtype THEN
EXTRACT(EPOCH FROM range_end::timestamp)::bigint * 1000000
WHEN column_type = 'date'::regtype THEN
EXTRACT(EPOCH FROM range_end::date)::bigint * 1000000
ELSE
CASE WHEN range_end IS NULL
THEN 9223372036854775807::bigint
ELSE range_end::bigint
END
END AS range_end
FROM unparsed_missing_slices;
-- set compressed_chunk_id to NULL for dropped chunks
UPDATE _timescaledb_catalog.chunk SET compressed_chunk_id = NULL WHERE dropped = true AND compressed_chunk_id IS NOT NULL;

File diff suppressed because it is too large Load Diff

View File

@ -1,38 +0,0 @@
-- For continuous aggregates: Copy ACL privileges (grants) from the
-- query view (user-facing object) to the internal objects (e.g.,
-- materialized hypertable, direct, and partial views). We want to
-- maintain the abstraction that a continuous aggregates is similar to
-- a materialized view (which is one object), so privileges on the
-- user-facing object should apply also to the internal objects that
-- implement the continuous aggregate. Having the right permissions on
-- internal objects is necessary for the watermark function used by
-- real-time aggregation since it queries the materialized hypertable
-- directly.
WITH rels_and_acl AS (
-- For each cagg, collect an array of all relations (including
-- chunks) to copy the ACL to
SELECT array_cat(ARRAY[format('%I.%I', h.schema_name, h.table_name)::regclass,
format('%I.%I', direct_view_schema, direct_view_name)::regclass,
format('%I.%I', partial_view_schema, partial_view_name)::regclass],
(SELECT array_agg(inhrelid::regclass)
FROM pg_inherits
WHERE inhparent = format('%I.%I', h.schema_name, h.table_name)::regclass)) AS relarr,
relacl AS user_view_acl
FROM _timescaledb_catalog.continuous_agg ca
LEFT JOIN pg_class cl
ON (cl.oid = format('%I.%I', user_view_schema, user_view_name)::regclass)
LEFT JOIN _timescaledb_catalog.hypertable h
ON (ca.mat_hypertable_id = h.id)
WHERE relacl IS NOT NULL
)
-- Set the ACL on all internal cagg relations, including
-- chunks. Note that we cannot use GRANT statements because
-- such statements are recorded as privileges on extension
-- objects when run in an update script. The result is that
-- the privileges will become init privileges, which will then
-- be ignored by, e.g., pg_dump.
UPDATE pg_class
SET relacl = user_view_acl
FROM rels_and_acl
WHERE oid = ANY (relarr);

View File

@ -1,181 +0,0 @@
DROP FUNCTION IF EXISTS @extschema@.detach_data_node(name,regclass,boolean,boolean);
DROP FUNCTION IF EXISTS @extschema@.distributed_exec;
DROP PROCEDURE IF EXISTS @extschema@.refresh_continuous_aggregate;
DROP VIEW IF EXISTS timescaledb_information.continuous_aggregates;
-- Rebuild hypertable invalidation log
CREATE TABLE _timescaledb_catalog.continuous_aggs_hypertable_invalidation_log_tmp AS
SELECT hypertable_id, lowest_modified_value, greatest_modified_value
FROM _timescaledb_catalog.continuous_aggs_hypertable_invalidation_log;
ALTER EXTENSION timescaledb
DROP TABLE _timescaledb_catalog.continuous_aggs_hypertable_invalidation_log;
DROP TABLE _timescaledb_catalog.continuous_aggs_hypertable_invalidation_log;
CREATE TABLE _timescaledb_catalog.continuous_aggs_hypertable_invalidation_log (
hypertable_id integer NOT NULL,
lowest_modified_value bigint NOT NULL,
greatest_modified_value bigint NOT NULL
);
INSERT INTO _timescaledb_catalog.continuous_aggs_hypertable_invalidation_log
SELECT * FROM _timescaledb_catalog.continuous_aggs_hypertable_invalidation_log_tmp;
DROP TABLE _timescaledb_catalog.continuous_aggs_hypertable_invalidation_log_tmp;
SELECT pg_catalog.pg_extension_config_dump(
'_timescaledb_catalog.continuous_aggs_hypertable_invalidation_log', '');
CREATE INDEX continuous_aggs_hypertable_invalidation_log_idx ON
_timescaledb_catalog.continuous_aggs_hypertable_invalidation_log (
hypertable_id, lowest_modified_value ASC);
GRANT SELECT ON _timescaledb_catalog.continuous_aggs_hypertable_invalidation_log TO PUBLIC;
-- Rebuild materialization invalidation log
CREATE TABLE _timescaledb_catalog.continuous_aggs_materialization_invalidation_log_tmp AS
SELECT materialization_id, lowest_modified_value, greatest_modified_value
FROM _timescaledb_catalog.continuous_aggs_materialization_invalidation_log;
ALTER EXTENSION timescaledb
DROP TABLE _timescaledb_catalog.continuous_aggs_materialization_invalidation_log;
DROP TABLE _timescaledb_catalog.continuous_aggs_materialization_invalidation_log;
CREATE TABLE _timescaledb_catalog.continuous_aggs_materialization_invalidation_log (
materialization_id integer
REFERENCES _timescaledb_catalog.continuous_agg (mat_hypertable_id) ON DELETE CASCADE,
lowest_modified_value bigint NOT NULL,
greatest_modified_value bigint NOT NULL
);
INSERT INTO _timescaledb_catalog.continuous_aggs_materialization_invalidation_log
SELECT * FROM _timescaledb_catalog.continuous_aggs_materialization_invalidation_log_tmp;
DROP TABLE _timescaledb_catalog.continuous_aggs_materialization_invalidation_log_tmp;
SELECT pg_catalog.pg_extension_config_dump(
'_timescaledb_catalog.continuous_aggs_materialization_invalidation_log',
'');
CREATE INDEX continuous_aggs_materialization_invalidation_log_idx ON
_timescaledb_catalog.continuous_aggs_materialization_invalidation_log (
materialization_id, lowest_modified_value ASC);
GRANT SELECT ON _timescaledb_catalog.continuous_aggs_materialization_invalidation_log TO PUBLIC;
-- Suspend any running retention policies that conflict with continuous aggs
-- Note that this approach will work for both timestamp and integer time columns
DO $$
DECLARE
jobid INTEGER;
BEGIN
FOR jobid IN
SELECT c.id
FROM _timescaledb_config.bgw_job a
LEFT JOIN _timescaledb_catalog.continuous_agg b ON a.hypertable_id = b.mat_hypertable_id
INNER JOIN _timescaledb_config.bgw_job c ON c.hypertable_id = b.raw_hypertable_id
WHERE a.proc_name = 'policy_refresh_continuous_aggregate' AND c.proc_name = 'policy_retention' AND c.scheduled
AND ((a.config->'start_offset') = NULL OR (a.config->'start_offset')::text::interval > (c.config->'drop_after')::text::interval)
LOOP
RAISE NOTICE 'suspending data retention policy with job id %.', jobid
USING DETAIL = 'The retention policy (formerly drop_chunks policy) will drop chunks while a continuous aggregate is still running on them. This will likely result in overwriting the aggregate with empty data.',
HINT = ('To restore the retention policy, with the possibility of updating aggregates with dropped data, run: SELECT alter_job(%, scheduled=>true); Otherwise, please create a new rention_policy with a larger drop_after parameter and remove the old policy with: SELECT delete_job(%);', jobid, jobid);
UPDATE _timescaledb_config.bgw_job SET scheduled = false WHERE id = jobid;
END LOOP;
END $$;
-- Recreate missing dimension slices that might be missing. If the
-- dimension slice table is broken and there are dimension slices
-- missing from the table, we will repair it by:
--
-- 1. Finding all chunk constraints that have missing dimension
-- slices and extract the constraint expression from the
-- associated constraint.
--
-- 2. Parse the constraint expression and extract the column name,
-- and upper and lower range values as text or, if it is a
-- partition constraint, pick the existing constraint (either
-- uppper or lower end of range) and make the other end open.
--
-- 3. Use the column type to construct the range values (UNIX
-- microseconds) from these strings.
INSERT INTO _timescaledb_catalog.dimension_slice
WITH
-- All dimension slices that are mentioned in the chunk_constraint
-- table but are missing from the dimension_slice table.
missing_slices AS (
SELECT hypertable_id,
chunk_id,
dimension_slice_id,
constraint_name,
attname AS column_name,
pg_get_expr(conbin, conrelid) AS constraint_expr
FROM _timescaledb_catalog.chunk_constraint cc
JOIN _timescaledb_catalog.chunk ch ON cc.chunk_id = ch.id
JOIN pg_constraint ON conname = constraint_name
JOIN pg_namespace ns ON connamespace = ns.oid AND ns.nspname = ch.schema_name
JOIN pg_attribute ON attnum = conkey[1] AND attrelid = conrelid
WHERE
dimension_slice_id NOT IN (SELECT id FROM _timescaledb_catalog.dimension_slice)
),
-- Unparsed range start and end for each dimension slice id that
-- is missing.
unparsed_missing_slices AS (
SELECT di.id AS dimension_id,
dimension_slice_id,
constraint_name,
column_type,
column_name,
(SELECT SUBSTRING(constraint_expr, $$>=\s*'?([\w\d\s:+-]+)'?$$)) AS range_start,
(SELECT SUBSTRING(constraint_expr, $$<\s*'?([\w\d\s:+-]+)'?$$)) AS range_end
FROM missing_slices JOIN _timescaledb_catalog.dimension di USING (hypertable_id, column_name)
)
SELECT DISTINCT
dimension_slice_id,
dimension_id,
CASE
WHEN column_type = 'timestamptz'::regtype THEN
EXTRACT(EPOCH FROM range_start::timestamptz)::bigint * 1000000
WHEN column_type = 'timestamp'::regtype THEN
EXTRACT(EPOCH FROM range_start::timestamp)::bigint * 1000000
WHEN column_type = 'date'::regtype THEN
EXTRACT(EPOCH FROM range_start::date)::bigint * 1000000
ELSE
CASE
WHEN range_start IS NULL
THEN (-9223372036854775808)::bigint
ELSE range_start::bigint
END
END AS range_start,
CASE
WHEN column_type = 'timestamptz'::regtype THEN
EXTRACT(EPOCH FROM range_end::timestamptz)::bigint * 1000000
WHEN column_type = 'timestamp'::regtype THEN
EXTRACT(EPOCH FROM range_end::timestamp)::bigint * 1000000
WHEN column_type = 'date'::regtype THEN
EXTRACT(EPOCH FROM range_end::date)::bigint * 1000000
ELSE
CASE WHEN range_end IS NULL
THEN 9223372036854775807::bigint
ELSE range_end::bigint
END
END AS range_end
FROM unparsed_missing_slices;
CREATE FUNCTION @extschema@.detach_data_node(
node_name NAME,
hypertable REGCLASS = NULL,
if_attached BOOLEAN = FALSE,
force BOOLEAN = FALSE,
repartition BOOLEAN = TRUE
) RETURNS INTEGER
AS '@MODULE_PATHNAME@', 'ts_data_node_detach' LANGUAGE C VOLATILE;
CREATE OR REPLACE PROCEDURE @extschema@.distributed_exec(
query TEXT,
node_list name[] = NULL,
transactional BOOLEAN = TRUE)
AS '@MODULE_PATHNAME@', 'ts_distributed_exec' LANGUAGE C;

View File

@ -1,13 +0,0 @@
DROP FUNCTION IF EXISTS @extschema@.add_data_node;
CREATE FUNCTION @extschema@.add_data_node(
node_name NAME,
host TEXT,
database NAME = NULL,
port INTEGER = NULL,
if_not_exists BOOLEAN = FALSE,
bootstrap BOOLEAN = TRUE,
password TEXT = NULL
) RETURNS TABLE(node_name NAME, host TEXT, port INTEGER, database NAME,
node_created BOOL, database_created BOOL, extension_created BOOL)
AS '@MODULE_PATHNAME@', 'ts_data_node_add' LANGUAGE C VOLATILE;

View File

@ -1,129 +0,0 @@
DROP VIEW IF EXISTS timescaledb_information.continuous_aggregates;
DROP VIEW IF EXISTS timescaledb_information.job_stats;
DROP FUNCTION IF EXISTS _timescaledb_internal.get_git_commit;
-- Begin Modify hypertable table
-- we make a copy of the data, remove dependencies, drop the table
DROP VIEW IF EXISTS timescaledb_information.hypertables;
DROP VIEW IF EXISTS timescaledb_information.chunks;
DROP VIEW IF EXISTS timescaledb_information.dimensions;
DROP VIEW IF EXISTS timescaledb_information.jobs;
DROP VIEW IF EXISTS timescaledb_information.compression_settings;
DROP VIEW IF EXISTS _timescaledb_internal.compressed_chunk_stats;
DROP VIEW IF EXISTS _timescaledb_internal.hypertable_chunk_local_size;
DROP FUNCTION IF EXISTS _timescaledb_internal.hypertable_from_main_table;
CREATE TABLE _timescaledb_internal.hypertable_tmp
AS SELECT * from _timescaledb_catalog.hypertable;
--drop foreign keys on hypertable
ALTER TABLE _timescaledb_catalog.hypertable DROP CONSTRAINT hypertable_compressed_hypertable_id_fkey;
ALTER TABLE _timescaledb_catalog.hypertable_data_node DROP CONSTRAINT hypertable_data_node_hypertable_id_fkey;
ALTER TABLE _timescaledb_catalog.tablespace DROP CONSTRAINT tablespace_hypertable_id_fkey;
ALTER TABLE _timescaledb_catalog.dimension DROP CONSTRAINT dimension_hypertable_id_fkey;
ALTER TABLE _timescaledb_catalog.chunk DROP CONSTRAINT chunk_hypertable_id_fkey;
ALTER TABLE _timescaledb_catalog.chunk_index DROP CONSTRAINT chunk_index_hypertable_id_fkey;
ALTER TABLE _timescaledb_config.bgw_job DROP CONSTRAINT bgw_job_hypertable_id_fkey;
ALTER TABLE _timescaledb_catalog.continuous_agg DROP CONSTRAINT continuous_agg_mat_hypertable_id_fkey;
ALTER TABLE _timescaledb_catalog.continuous_agg DROP CONSTRAINT continuous_agg_raw_hypertable_id_fkey;
ALTER TABLE _timescaledb_catalog.continuous_aggs_invalidation_threshold DROP CONSTRAINT continuous_aggs_invalidation_threshold_hypertable_id_fkey;
ALTER TABLE _timescaledb_catalog.hypertable_compression DROP CONSTRAINT hypertable_compression_hypertable_id_fkey;
CREATE TABLE _timescaledb_internal.tmp_hypertable_seq_value AS
SELECT last_value, is_called FROM _timescaledb_catalog.hypertable_id_seq;
ALTER EXTENSION timescaledb DROP TABLE _timescaledb_catalog.hypertable;
ALTER EXTENSION timescaledb DROP SEQUENCE _timescaledb_catalog.hypertable_id_seq;
DROP TABLE _timescaledb_catalog.hypertable;
CREATE SEQUENCE _timescaledb_catalog.hypertable_id_seq MINVALUE 1;
-- now create table without self referential foreign key
CREATE TABLE _timescaledb_catalog.hypertable(
id INTEGER PRIMARY KEY DEFAULT nextval('_timescaledb_catalog.hypertable_id_seq'),
schema_name name NOT NULL CHECK (schema_name != '_timescaledb_catalog'),
table_name name NOT NULL,
associated_schema_name name NOT NULL,
associated_table_prefix name NOT NULL,
num_dimensions smallint NOT NULL,
chunk_sizing_func_schema name NOT NULL,
chunk_sizing_func_name name NOT NULL,
chunk_target_size bigint NOT NULL CHECK (chunk_target_size >= 0), -- size in bytes
compression_state smallint NOT NULL DEFAULT 0,
compressed_hypertable_id integer ,
replication_factor smallint NULL,
UNIQUE (associated_schema_name, associated_table_prefix),
CONSTRAINT hypertable_table_name_schema_name_key UNIQUE (table_name, schema_name),
-- internal compressed hypertables have compression state = 2
CONSTRAINT hypertable_dim_compress_check CHECK (num_dimensions > 0 OR compression_state = 2),
CONSTRAINT hypertable_compress_check CHECK ( (compression_state = 0 OR compression_state = 1 ) OR (compression_state = 2 AND compressed_hypertable_id IS NULL)),
-- replication_factor NULL: regular hypertable
-- replication_factor > 0: distributed hypertable on access node
-- replication_factor -1: distributed hypertable on data node, which is part of a larger table
CONSTRAINT hypertable_replication_factor_check CHECK (replication_factor > 0 OR replication_factor = -1)
);
ALTER SEQUENCE _timescaledb_catalog.hypertable_id_seq OWNED BY _timescaledb_catalog.hypertable.id;
SELECT pg_catalog.pg_extension_config_dump('_timescaledb_catalog.hypertable_id_seq', '');
SELECT setval('_timescaledb_catalog.hypertable_id_seq', last_value, is_called) FROM _timescaledb_internal.tmp_hypertable_seq_value;
INSERT INTO _timescaledb_catalog.hypertable
( id, schema_name, table_name, associated_schema_name, associated_table_prefix,
num_dimensions, chunk_sizing_func_schema, chunk_sizing_func_name,
chunk_target_size, compression_state, compressed_hypertable_id,
replication_factor)
SELECT id, schema_name, table_name, associated_schema_name, associated_table_prefix,
num_dimensions, chunk_sizing_func_schema, chunk_sizing_func_name,
chunk_target_size,
CASE WHEN compressed is FALSE AND compressed_hypertable_id IS NOT NULL THEN 1
WHEN compressed is TRUE THEN 2
ELSE 0
END,
compressed_hypertable_id,
replication_factor
FROM _timescaledb_internal.hypertable_tmp;
-- add self referential foreign key
ALTER TABLE _timescaledb_catalog.hypertable ADD CONSTRAINT hypertable_compressed_hypertable_id_fkey FOREIGN KEY ( compressed_hypertable_id )
REFERENCES _timescaledb_catalog.hypertable( id );
SELECT pg_catalog.pg_extension_config_dump('_timescaledb_catalog.hypertable', '');
--cleanup
DROP TABLE _timescaledb_internal.hypertable_tmp;
DROP TABLE _timescaledb_internal.tmp_hypertable_seq_value;
-- add all the other foreign keys
ALTER TABLE _timescaledb_catalog.hypertable_data_node
ADD CONSTRAINT hypertable_data_node_hypertable_id_fkey
FOREIGN KEY ( hypertable_id ) REFERENCES _timescaledb_catalog.hypertable( id );
ALTER TABLE _timescaledb_catalog.tablespace ADD CONSTRAINT tablespace_hypertable_id_fkey
FOREIGN KEY ( hypertable_id ) REFERENCES _timescaledb_catalog.hypertable( id )
ON DELETE CASCADE;
ALTER TABLE _timescaledb_catalog.dimension ADD CONSTRAINT dimension_hypertable_id_fkey
FOREIGN KEY ( hypertable_id ) REFERENCES _timescaledb_catalog.hypertable( id )
ON DELETE CASCADE;
ALTER TABLE _timescaledb_catalog.chunk ADD CONSTRAINT chunk_hypertable_id_fkey
FOREIGN KEY ( hypertable_id ) REFERENCES _timescaledb_catalog.hypertable( id );
ALTER TABLE _timescaledb_catalog.chunk_index ADD CONSTRAINT chunk_index_hypertable_id_fkey
FOREIGN KEY ( hypertable_id ) REFERENCES _timescaledb_catalog.hypertable( id )
ON DELETE CASCADE;
ALTER TABLE _timescaledb_config.bgw_job ADD CONSTRAINT bgw_job_hypertable_id_fkey
FOREIGN KEY ( hypertable_id ) REFERENCES _timescaledb_catalog.hypertable( id )
ON DELETE CASCADE;
ALTER TABLE _timescaledb_catalog.continuous_agg ADD CONSTRAINT
continuous_agg_mat_hypertable_id_fkey FOREIGN KEY ( mat_hypertable_id )
REFERENCES _timescaledb_catalog.hypertable( id )
ON DELETE CASCADE;
ALTER TABLE _timescaledb_catalog.continuous_agg ADD CONSTRAINT
continuous_agg_raw_hypertable_id_fkey FOREIGN KEY ( raw_hypertable_id )
REFERENCES _timescaledb_catalog.hypertable( id )
ON DELETE CASCADE;
ALTER TABLE _timescaledb_catalog.continuous_aggs_invalidation_threshold
ADD CONSTRAINT continuous_aggs_invalidation_threshold_hypertable_id_fkey
FOREIGN KEY (hypertable_id) REFERENCES _timescaledb_catalog.hypertable( id )
ON DELETE CASCADE;
ALTER TABLE _timescaledb_catalog.hypertable_compression ADD CONSTRAINT
hypertable_compression_hypertable_id_fkey FOREIGN KEY ( hypertable_id )
REFERENCES _timescaledb_catalog.hypertable( id )
ON DELETE CASCADE;
GRANT SELECT ON _timescaledb_catalog.hypertable_id_seq TO PUBLIC;
GRANT SELECT ON _timescaledb_catalog.hypertable TO PUBLIC;
--End Modify hypertable table

View File

@ -1,3 +0,0 @@
ALTER TABLE _timescaledb_catalog.hypertable
DROP CONSTRAINT hypertable_replication_factor_check,
ADD CONSTRAINT hypertable_replication_factor_check CHECK (replication_factor > 0 OR replication_factor = -1);

View File

@ -1,2 +0,0 @@
-- set compressed_chunk_id to NULL for dropped chunks
UPDATE _timescaledb_catalog.chunk SET compressed_chunk_id = NULL WHERE dropped = true AND compressed_chunk_id IS NOT NULL;