Mats Kindahl 92b6c03e43 Remove cascade option from drop_chunks
This commit removes the `cascade` option from the function
`drop_chunks` and `add_drop_chunk_policy`, which will now never cascade
drops to dependent objects.  The tests are fixed accordingly and
verbosity turned up to ensure that the dependent objects are printed in
the error details.
2020-06-02 16:08:51 +02:00

399 lines
20 KiB
SQL

-- This file and its contents are licensed under the Apache License 2.0.
-- Please see the included NOTICE for copyright information and
-- LICENSE-APACHE for a copy of the license.
--NOTICE: UPGRADE-SCRIPT-NEEDED contents in this file are not auto-upgraded.
-- This file contains table definitions for various abstractions and data
-- structures for representing hypertables and lower-level concepts.
-- Hypertable
-- ==========
--
-- The hypertable is an abstraction that represents a table that is
-- partitioned in N dimensions, where each dimension maps to a column
-- in the table. A dimension can either be 'open' or 'closed', which
-- reflects the scheme that divides the dimension's keyspace into
-- "slices".
--
-- Conceptually, a partition -- called a "chunk", is a hypercube in
-- the N-dimensional space. A chunk stores a subset of the
-- hypertable's tuples on disk in its own distinct table. The slices
-- that span the chunk's hypercube each correspond to a constraint on
-- the chunk's table, enabling constraint exclusion during queries on
-- the hypertable's data.
--
--
-- Open dimensions
------------------
-- An open dimension does on-demand slicing, creating a new slice
-- based on a configurable interval whenever a tuple falls outside the
-- existing slices. Open dimensions fit well with columns that are
-- incrementally increasing, such as time-based ones.
--
-- Closed dimensions
--------------------
-- A closed dimension completely divides its keyspace into a
-- configurable number of slices. The number of slices can be
-- reconfigured, but the new partitioning only affects newly created
-- chunks.
--
CREATE TABLE IF NOT EXISTS _timescaledb_catalog.hypertable (
id SERIAL PRIMARY KEY,
schema_name NAME NOT NULL CHECK (schema_name != '_timescaledb_catalog'),
table_name NAME NOT NULL,
associated_schema_name NAME NOT NULL,
associated_table_prefix NAME NOT NULL,
num_dimensions SMALLINT NOT NULL,
chunk_sizing_func_schema NAME NOT NULL,
chunk_sizing_func_name NAME NOT NULL,
chunk_target_size BIGINT NOT NULL CHECK (chunk_target_size >= 0), -- size in bytes
compressed BOOLEAN NOT NULL DEFAULT false,
compressed_hypertable_id INTEGER REFERENCES _timescaledb_catalog.hypertable(id),
replication_factor SMALLINT NULL CHECK (replication_factor > 0),
UNIQUE (id, schema_name),
UNIQUE (schema_name, table_name),
UNIQUE (associated_schema_name, associated_table_prefix),
constraint hypertable_dim_compress_check check ( num_dimensions > 0 or compressed = true ),
constraint hypertable_compress_check check ( compressed = false or (compressed = true and compressed_hypertable_id is null ))
);
SELECT pg_catalog.pg_extension_config_dump('_timescaledb_catalog.hypertable', '');
SELECT pg_catalog.pg_extension_config_dump(pg_get_serial_sequence('_timescaledb_catalog.hypertable','id'), '');
CREATE TABLE IF NOT EXISTS _timescaledb_catalog.hypertable_data_node (
hypertable_id INTEGER NOT NULL REFERENCES _timescaledb_catalog.hypertable(id),
node_hypertable_id INTEGER NULL,
node_name NAME NOT NULL,
block_chunks BOOLEAN NOT NULL,
UNIQUE (node_hypertable_id, node_name),
UNIQUE (hypertable_id, node_name)
);
SELECT pg_catalog.pg_extension_config_dump('_timescaledb_catalog.hypertable_data_node', '');
-- The tablespace table maps tablespaces to hypertables.
-- This allows spreading a hypertable's chunks across multiple disks.
CREATE TABLE IF NOT EXISTS _timescaledb_catalog.tablespace (
id SERIAL PRIMARY KEY,
hypertable_id INT NOT NULL REFERENCES _timescaledb_catalog.hypertable(id) ON DELETE CASCADE,
tablespace_name NAME NOT NULL,
UNIQUE (hypertable_id, tablespace_name)
);
SELECT pg_catalog.pg_extension_config_dump('_timescaledb_catalog.tablespace', '');
-- A dimension represents an axis along which data is partitioned.
CREATE TABLE IF NOT EXISTS _timescaledb_catalog.dimension (
id SERIAL NOT NULL PRIMARY KEY,
hypertable_id INTEGER NOT NULL REFERENCES _timescaledb_catalog.hypertable(id) ON DELETE CASCADE,
column_name NAME NOT NULL,
column_type REGTYPE NOT NULL,
aligned BOOLEAN NOT NULL,
-- closed dimensions
num_slices SMALLINT NULL,
partitioning_func_schema NAME NULL,
partitioning_func NAME NULL,
-- open dimensions (e.g., time)
interval_length BIGINT NULL CHECK(interval_length IS NULL OR interval_length > 0),
integer_now_func_schema NAME NULL,
integer_now_func NAME NULL,
CHECK (
(partitioning_func_schema IS NULL AND partitioning_func IS NULL) OR
(partitioning_func_schema IS NOT NULL AND partitioning_func IS NOT NULL)
),
CHECK (
(num_slices IS NULL AND interval_length IS NOT NULL) OR
(num_slices IS NOT NULL AND interval_length IS NULL)
),
CHECK (
(integer_now_func_schema IS NULL AND integer_now_func IS NULL) OR
(integer_now_func_schema IS NOT NULL AND integer_now_func IS NOT NULL)
),
UNIQUE (hypertable_id, column_name)
);
SELECT pg_catalog.pg_extension_config_dump('_timescaledb_catalog.dimension', '');
SELECT pg_catalog.pg_extension_config_dump(pg_get_serial_sequence('_timescaledb_catalog.dimension','id'), '');
-- A dimension slice defines a keyspace range along a dimension axis.
CREATE TABLE IF NOT EXISTS _timescaledb_catalog.dimension_slice (
id SERIAL NOT NULL PRIMARY KEY,
dimension_id INTEGER NOT NULL REFERENCES _timescaledb_catalog.dimension(id) ON DELETE CASCADE,
range_start BIGINT NOT NULL,
range_end BIGINT NOT NULL,
CHECK (range_start <= range_end),
UNIQUE (dimension_id, range_start, range_end)
);
SELECT pg_catalog.pg_extension_config_dump('_timescaledb_catalog.dimension_slice', '');
SELECT pg_catalog.pg_extension_config_dump(pg_get_serial_sequence('_timescaledb_catalog.dimension_slice','id'), '');
-- A chunk is a partition (hypercube) in an N-dimensional
-- hyperspace. Each chunk is associated with N constraints that define
-- the chunk's hypercube. Tuples that fall within the chunk's
-- hypercube are stored in the chunk's data table, as given by
-- 'schema_name' and 'table_name'.
CREATE TABLE IF NOT EXISTS _timescaledb_catalog.chunk (
id SERIAL NOT NULL PRIMARY KEY,
hypertable_id INT NOT NULL REFERENCES _timescaledb_catalog.hypertable(id),
schema_name NAME NOT NULL,
table_name NAME NOT NULL,
compressed_chunk_id INTEGER REFERENCES _timescaledb_catalog.chunk(id),
dropped BOOLEAN NOT NULL DEFAULT false,
UNIQUE (schema_name, table_name)
);
CREATE INDEX IF NOT EXISTS chunk_hypertable_id_idx
ON _timescaledb_catalog.chunk(hypertable_id);
CREATE INDEX IF NOT EXISTS chunk_compressed_chunk_id_idx
ON _timescaledb_catalog.chunk(compressed_chunk_id);
SELECT pg_catalog.pg_extension_config_dump('_timescaledb_catalog.chunk', '');
SELECT pg_catalog.pg_extension_config_dump(pg_get_serial_sequence('_timescaledb_catalog.chunk','id'), '');
-- A chunk constraint maps a dimension slice to a chunk. Each
-- constraint associated with a chunk will also be a table constraint
-- on the chunk's data table.
CREATE TABLE IF NOT EXISTS _timescaledb_catalog.chunk_constraint (
chunk_id INTEGER NOT NULL REFERENCES _timescaledb_catalog.chunk(id),
dimension_slice_id INTEGER NULL REFERENCES _timescaledb_catalog.dimension_slice(id),
constraint_name NAME NOT NULL,
hypertable_constraint_name NAME NULL,
UNIQUE(chunk_id, constraint_name)
);
SELECT pg_catalog.pg_extension_config_dump('_timescaledb_catalog.chunk_constraint', '');
CREATE INDEX IF NOT EXISTS chunk_constraint_chunk_id_dimension_slice_id_idx
ON _timescaledb_catalog.chunk_constraint(chunk_id, dimension_slice_id);
CREATE SEQUENCE IF NOT EXISTS _timescaledb_catalog.chunk_constraint_name;
SELECT pg_catalog.pg_extension_config_dump('_timescaledb_catalog.chunk_constraint_name', '');
CREATE TABLE IF NOT EXISTS _timescaledb_catalog.chunk_index (
chunk_id INTEGER NOT NULL REFERENCES _timescaledb_catalog.chunk(id) ON DELETE CASCADE,
index_name NAME NOT NULL,
hypertable_id INTEGER NOT NULL REFERENCES _timescaledb_catalog.hypertable(id) ON DELETE CASCADE,
hypertable_index_name NAME NOT NULL,
UNIQUE(chunk_id, index_name)
);
CREATE INDEX IF NOT EXISTS chunk_index_hypertable_id_hypertable_index_name_idx
ON _timescaledb_catalog.chunk_index(hypertable_id, hypertable_index_name);
SELECT pg_catalog.pg_extension_config_dump('_timescaledb_catalog.chunk_index', '');
CREATE TABLE IF NOT EXISTS _timescaledb_catalog.chunk_data_node (
chunk_id INTEGER NOT NULL REFERENCES _timescaledb_catalog.chunk(id),
node_chunk_id INTEGER NOT NULL,
node_name NAME NOT NULL,
UNIQUE (node_chunk_id, node_name),
UNIQUE (chunk_id, node_name)
);
SELECT pg_catalog.pg_extension_config_dump('_timescaledb_catalog.chunk_data_node', '');
-- Default jobs are given the id space [1,1000). User-installed jobs and any jobs created inside tests
-- are given the id space [1000, INT_MAX). That way, we do not pg_dump jobs that are always default-installed
-- inside other .sql scripts. This avoids insertion conflicts during pg_restore.
CREATE SEQUENCE IF NOT EXISTS _timescaledb_config.bgw_job_id_seq MINVALUE 1000;
SELECT pg_catalog.pg_extension_config_dump('_timescaledb_config.bgw_job_id_seq', '');
CREATE TABLE IF NOT EXISTS _timescaledb_config.bgw_job (
id INTEGER PRIMARY KEY DEFAULT nextval('_timescaledb_config.bgw_job_id_seq'),
application_name NAME NOT NULL,
job_type NAME NOT NULL,
schedule_interval INTERVAL NOT NULL,
max_runtime INTERVAL NOT NULL,
max_retries INT NOT NULL,
retry_period INTERVAL NOT NULL,
CONSTRAINT valid_job_type CHECK (job_type IN ('telemetry_and_version_check_if_enabled', 'reorder', 'drop_chunks', 'continuous_aggregate', 'compress_chunks'))
);
ALTER SEQUENCE _timescaledb_config.bgw_job_id_seq OWNED BY _timescaledb_config.bgw_job.id;
SELECT pg_catalog.pg_extension_config_dump('_timescaledb_config.bgw_job', 'WHERE id >= 1000');
CREATE TABLE IF NOT EXISTS _timescaledb_internal.bgw_job_stat (
job_id INTEGER PRIMARY KEY REFERENCES _timescaledb_config.bgw_job(id) ON DELETE CASCADE,
last_start TIMESTAMPTZ NOT NULL DEFAULT NOW(),
last_finish TIMESTAMPTZ NOT NULL,
next_start TIMESTAMPTZ NOT NULL,
last_successful_finish TIMESTAMPTZ NOT NULL,
last_run_success BOOL NOT NULL,
total_runs BIGINT NOT NULL,
total_duration INTERVAL NOT NULL,
total_successes BIGINT NOT NULL,
total_failures BIGINT NOT NULL,
total_crashes BIGINT NOT NULL,
consecutive_failures INT NOT NULL,
consecutive_crashes INT NOT NULL
);
--The job_stat table is not dumped by pg_dump on purpose because
--the statistics probably aren't very meaningful across instances.
--Now we define the argument tables for available BGW policies.
CREATE TABLE IF NOT EXISTS _timescaledb_config.bgw_policy_reorder (
job_id INTEGER PRIMARY KEY REFERENCES _timescaledb_config.bgw_job(id) ON DELETE CASCADE,
hypertable_id INTEGER UNIQUE NOT NULL REFERENCES _timescaledb_catalog.hypertable(id) ON DELETE CASCADE,
hypertable_index_name NAME NOT NULL
);
SELECT pg_catalog.pg_extension_config_dump('_timescaledb_config.bgw_policy_reorder', '');
CREATE TABLE IF NOT EXISTS _timescaledb_config.bgw_policy_drop_chunks (
job_id INTEGER PRIMARY KEY REFERENCES _timescaledb_config.bgw_job(id) ON DELETE CASCADE,
hypertable_id INTEGER UNIQUE NOT NULL REFERENCES _timescaledb_catalog.hypertable(id) ON DELETE CASCADE,
older_than _timescaledb_catalog.ts_interval NOT NULL,
cascade_to_materializations BOOLEAN,
CONSTRAINT valid_older_than CHECK(_timescaledb_internal.valid_ts_interval(older_than))
);
SELECT pg_catalog.pg_extension_config_dump('_timescaledb_config.bgw_policy_drop_chunks', '');
----- End BGW policy table definitions
-- Now we define a special stats table for each job/chunk pair. This will be used by the scheduler
-- to determine whether to run a specific job on a specific chunk.
CREATE TABLE IF NOT EXISTS _timescaledb_internal.bgw_policy_chunk_stats (
job_id INTEGER NOT NULL REFERENCES _timescaledb_config.bgw_job(id) ON DELETE CASCADE,
chunk_id INTEGER NOT NULL REFERENCES _timescaledb_catalog.chunk(id) ON DELETE CASCADE,
num_times_job_run INTEGER,
last_time_job_run TIMESTAMPTZ,
UNIQUE(job_id,chunk_id)
);
CREATE TABLE IF NOT EXISTS _timescaledb_catalog.metadata (
key NAME NOT NULL PRIMARY KEY,
value TEXT NOT NULL,
include_in_telemetry BOOLEAN NOT NULL
);
SELECT pg_catalog.pg_extension_config_dump('_timescaledb_catalog.metadata', $$WHERE key='exported_uuid'$$);
CREATE TABLE IF NOT EXISTS _timescaledb_catalog.continuous_agg (
mat_hypertable_id INTEGER PRIMARY KEY REFERENCES _timescaledb_catalog.hypertable(id) ON DELETE CASCADE,
raw_hypertable_id INTEGER NOT NULL REFERENCES _timescaledb_catalog.hypertable(id) ON DELETE CASCADE,
user_view_schema NAME NOT NULL,
user_view_name NAME NOT NULL,
partial_view_schema NAME NOT NULL,
partial_view_name NAME NOT NULL,
bucket_width BIGINT NOT NULL,
job_id INTEGER UNIQUE NOT NULL REFERENCES _timescaledb_config.bgw_job(id) ON DELETE RESTRICT,
refresh_lag BIGINT NOT NULL,
direct_view_schema NAME NOT NULL,
direct_view_name NAME NOT NULL,
max_interval_per_job BIGINT NOT NULL,
ignore_invalidation_older_than BIGINT NOT NULL DEFAULT BIGINT '9223372036854775807',
materialized_only BOOL NOT NULL DEFAULT false,
UNIQUE(user_view_schema, user_view_name),
UNIQUE(partial_view_schema, partial_view_name)
);
CREATE INDEX IF NOT EXISTS continuous_agg_raw_hypertable_id_idx
ON _timescaledb_catalog.continuous_agg(raw_hypertable_id);
SELECT pg_catalog.pg_extension_config_dump('_timescaledb_catalog.continuous_agg', '');
CREATE TABLE IF NOT EXISTS _timescaledb_catalog.continuous_aggs_invalidation_threshold(
hypertable_id INTEGER PRIMARY KEY REFERENCES _timescaledb_catalog.hypertable(id) ON DELETE CASCADE,
watermark BIGINT NOT NULL
);
SELECT pg_catalog.pg_extension_config_dump('_timescaledb_catalog.continuous_aggs_invalidation_threshold', '');
CREATE TABLE IF NOT EXISTS _timescaledb_catalog.continuous_aggs_completed_threshold(
materialization_id INTEGER PRIMARY KEY
REFERENCES _timescaledb_catalog.continuous_agg(mat_hypertable_id)
ON DELETE CASCADE,
watermark BIGINT NOT NULL --exclusive (everything up to but not including watermark is done)
);
SELECT pg_catalog.pg_extension_config_dump('_timescaledb_catalog.continuous_aggs_completed_threshold', '');
-- this does not have an FK on the materialization table since INSERTs to this
-- table are performance critical
CREATE TABLE IF NOT EXISTS _timescaledb_catalog.continuous_aggs_hypertable_invalidation_log(
hypertable_id INTEGER NOT NULL,
modification_time BIGINT NOT NULL, --now time for txn when the raw table was modified
lowest_modified_value BIGINT NOT NULL,
greatest_modified_value BIGINT NOT NULL
);
SELECT pg_catalog.pg_extension_config_dump('_timescaledb_catalog.continuous_aggs_hypertable_invalidation_log', '');
CREATE INDEX continuous_aggs_hypertable_invalidation_log_idx
ON _timescaledb_catalog.continuous_aggs_hypertable_invalidation_log (hypertable_id, lowest_modified_value ASC);
-- per cagg copy of invalidation log
CREATE TABLE IF NOT EXISTS _timescaledb_catalog.continuous_aggs_materialization_invalidation_log(
materialization_id INTEGER
REFERENCES _timescaledb_catalog.continuous_agg(mat_hypertable_id)
ON DELETE CASCADE,
modification_time BIGINT NOT NULL, --now time for txn when the raw table was modified
lowest_modified_value BIGINT NOT NULL,
greatest_modified_value BIGINT NOT NULL
);
SELECT pg_catalog.pg_extension_config_dump('_timescaledb_catalog.continuous_aggs_materialization_invalidation_log', '');
CREATE INDEX continuous_aggs_materialization_invalidation_log_idx
ON _timescaledb_catalog.continuous_aggs_materialization_invalidation_log (materialization_id, lowest_modified_value ASC);
/* the source of this data is the enum from the source code that lists
* the algorithms. This table is NOT dumped.
*/
CREATE TABLE IF NOT EXISTS _timescaledb_catalog.compression_algorithm(
id SMALLINT PRIMARY KEY,
version SMALLINT NOT NULL,
name NAME NOT NULL,
description TEXT
);
CREATE TABLE IF NOT EXISTS _timescaledb_catalog.hypertable_compression (
hypertable_id INTEGER REFERENCES _timescaledb_catalog.hypertable(id) ON DELETE CASCADE,
attname NAME NOT NULL,
compression_algorithm_id SMALLINT REFERENCES _timescaledb_catalog.compression_algorithm(id),
segmentby_column_index SMALLINT ,
orderby_column_index SMALLINT,
orderby_asc BOOLEAN,
orderby_nullsfirst BOOLEAN,
PRIMARY KEY (hypertable_id, attname),
UNIQUE (hypertable_id, segmentby_column_index),
UNIQUE (hypertable_id, orderby_column_index)
);
SELECT pg_catalog.pg_extension_config_dump('_timescaledb_catalog.hypertable_compression', '');
CREATE TABLE IF NOT EXISTS _timescaledb_catalog.compression_chunk_size (
chunk_id INTEGER REFERENCES _timescaledb_catalog.chunk(id) ON DELETE CASCADE,
compressed_chunk_id INTEGER REFERENCES _timescaledb_catalog.chunk(id) ON DELETE CASCADE,
uncompressed_heap_size BIGINT NOT NULL,
uncompressed_toast_size BIGINT NOT NULL,
uncompressed_index_size BIGINT NOT NULL,
compressed_heap_size BIGINT NOT NULL,
compressed_toast_size BIGINT NOT NULL,
compressed_index_size BIGINT NOT NULL,
PRIMARY KEY(chunk_id, compressed_chunk_id)
);
SELECT pg_catalog.pg_extension_config_dump('_timescaledb_catalog.compression_chunk_size', '');
CREATE TABLE IF NOT EXISTS _timescaledb_config.bgw_policy_compress_chunks(
job_id INTEGER PRIMARY KEY REFERENCES _timescaledb_config.bgw_job(id) ON DELETE CASCADE,
hypertable_id INTEGER UNIQUE NOT NULL REFERENCES _timescaledb_catalog.hypertable(id) ON DELETE CASCADE,
older_than _timescaledb_catalog.ts_interval NOT NULL,
CONSTRAINT valid_older_than CHECK(_timescaledb_internal.valid_ts_interval(older_than))
);
SELECT pg_catalog.pg_extension_config_dump('_timescaledb_config.bgw_policy_compress_chunks', '');
--This stores commit decisions for 2pc remote txns. Abort decisions are never stored.
--If a PREPARE TRANSACTION fails for any data node then the entire
--frontend transaction will be rolled back and no rows will be stored.
--the frontend_transaction_id represents the entire distributed transaction
--each datanode will have a unique remote_transaction_id.
CREATE TABLE _timescaledb_catalog.remote_txn (
data_node_name NAME, --this is really only to allow us to cleanup stuff on a per-node basis.
remote_transaction_id TEXT CHECK (remote_transaction_id::rxid is not null),
PRIMARY KEY (remote_transaction_id)
);
CREATE INDEX IF NOT EXISTS remote_txn_data_node_name_idx
ON _timescaledb_catalog.remote_txn(data_node_name);
SELECT pg_catalog.pg_extension_config_dump('_timescaledb_catalog.remote_txn', '');
-- Set table permissions
-- We need to grant SELECT to PUBLIC for all tables even those not
-- marked as being dumped because pg_dump will try to access all
-- tables initially to detect inheritance chains and then decide
-- which objects actually need to be dumped.
GRANT SELECT ON ALL TABLES IN SCHEMA _timescaledb_catalog TO PUBLIC;
GRANT SELECT ON ALL TABLES IN SCHEMA _timescaledb_config TO PUBLIC;
GRANT SELECT ON ALL TABLES IN SCHEMA _timescaledb_internal TO PUBLIC;
GRANT SELECT ON ALL SEQUENCES IN SCHEMA _timescaledb_catalog TO PUBLIC;
GRANT SELECT ON ALL SEQUENCES IN SCHEMA _timescaledb_config TO PUBLIC;
GRANT SELECT ON ALL SEQUENCES IN SCHEMA _timescaledb_internal TO PUBLIC;