Fabrízio de Royes Mello 38fcd1b76b Improve Realtime Continuous Aggregate performance
When calling the `cagg_watermark` function to get the watermark of a
Continuous Aggregate we execute a `SELECT MAX(time_dimension)` query
in the underlying materialization hypertable.

The problem is that a `SELECT MAX(time_dimention)` query can be
expensive because it will scan all hypertable chunks increasing the
planning time for a Realtime Continuous Aggregates.

Improved it by creating a new catalog table to serve as a cache table
to store the current Continous Aggregate watermark in the following
situations:
- Create CAgg: store the minimum value of hypertable time dimension
  data type;
- Refresh CAgg: store the last value of the time dimension materialized
  in the underlying materialization hypertable (or the minimum value of
  materialization hypertable time dimension data type if there's no
  data materialized);
- Drop CAgg Chunks: the same as refresh cagg.

Closes #4699, #5307
2023-03-22 16:35:23 -03:00

606 lines
30 KiB
SQL

-- This file and its contents are licensed under the Apache License 2.0.
-- Please see the included NOTICE for copyright information and
-- LICENSE-APACHE for a copy of the license.
--NOTICE: UPGRADE-SCRIPT-NEEDED contents in this file are not auto-upgraded.
-- This file contains table definitions for various abstractions and data
-- structures for representing hypertables and lower-level concepts.
-- Hypertable
-- ==========
--
-- The hypertable is an abstraction that represents a table that is
-- partitioned in N dimensions, where each dimension maps to a column
-- in the table. A dimension can either be 'open' or 'closed', which
-- reflects the scheme that divides the dimension's keyspace into
-- "slices".
--
-- Conceptually, a partition -- called a "chunk", is a hypercube in
-- the N-dimensional space. A chunk stores a subset of the
-- hypertable's tuples on disk in its own distinct table. The slices
-- that span the chunk's hypercube each correspond to a constraint on
-- the chunk's table, enabling constraint exclusion during queries on
-- the hypertable's data.
--
--
-- Open dimensions
------------------
-- An open dimension does on-demand slicing, creating a new slice
-- based on a configurable interval whenever a tuple falls outside the
-- existing slices. Open dimensions fit well with columns that are
-- incrementally increasing, such as time-based ones.
--
-- Closed dimensions
--------------------
-- A closed dimension completely divides its keyspace into a
-- configurable number of slices. The number of slices can be
-- reconfigured, but the new partitioning only affects newly created
-- chunks.
-- The unique constraint is table_name +schema_name. The ordering is
-- important as we want index access when we filter by table_name
CREATE SEQUENCE _timescaledb_catalog.hypertable_id_seq MINVALUE 1;
CREATE TABLE _timescaledb_catalog.hypertable (
id INTEGER NOT NULL DEFAULT nextval('_timescaledb_catalog.hypertable_id_seq'),
schema_name name NOT NULL,
table_name name NOT NULL,
associated_schema_name name NOT NULL,
associated_table_prefix name NOT NULL,
num_dimensions smallint NOT NULL,
chunk_sizing_func_schema name NOT NULL,
chunk_sizing_func_name name NOT NULL,
chunk_target_size bigint NOT NULL, -- size in bytes
compression_state smallint NOT NULL DEFAULT 0,
compressed_hypertable_id integer,
replication_factor smallint NULL,
-- table constraints
CONSTRAINT hypertable_pkey PRIMARY KEY (id),
CONSTRAINT hypertable_associated_schema_name_associated_table_prefix_key UNIQUE (associated_schema_name, associated_table_prefix),
CONSTRAINT hypertable_table_name_schema_name_key UNIQUE (table_name, schema_name),
CONSTRAINT hypertable_schema_name_check CHECK (schema_name != '_timescaledb_catalog'),
-- internal compressed hypertables have compression state = 2
CONSTRAINT hypertable_dim_compress_check CHECK (num_dimensions > 0 OR compression_state = 2),
CONSTRAINT hypertable_chunk_target_size_check CHECK (chunk_target_size >= 0),
CONSTRAINT hypertable_compress_check CHECK ( (compression_state = 0 OR compression_state = 1 ) OR (compression_state = 2 AND compressed_hypertable_id IS NULL)),
-- replication_factor NULL: regular hypertable
-- replication_factor > 0: distributed hypertable on access node
-- replication_factor -1: distributed hypertable on data node, which is part of a larger table
CONSTRAINT hypertable_replication_factor_check CHECK (replication_factor > 0 OR replication_factor = -1),
CONSTRAINT hypertable_compressed_hypertable_id_fkey FOREIGN KEY (compressed_hypertable_id) REFERENCES _timescaledb_catalog.hypertable (id)
);
ALTER SEQUENCE _timescaledb_catalog.hypertable_id_seq OWNED BY _timescaledb_catalog.hypertable.id;
SELECT pg_catalog.pg_extension_config_dump('_timescaledb_catalog.hypertable_id_seq', '');
SELECT pg_catalog.pg_extension_config_dump('_timescaledb_catalog.hypertable', '');
CREATE TABLE _timescaledb_catalog.hypertable_data_node (
hypertable_id integer NOT NULL,
node_hypertable_id integer NULL,
node_name name NOT NULL,
block_chunks boolean NOT NULL,
-- table constraints
CONSTRAINT hypertable_data_node_hypertable_id_node_name_key UNIQUE (hypertable_id, node_name),
CONSTRAINT hypertable_data_node_node_hypertable_id_node_name_key UNIQUE (node_hypertable_id, node_name),
CONSTRAINT hypertable_data_node_hypertable_id_fkey FOREIGN KEY (hypertable_id) REFERENCES _timescaledb_catalog.hypertable (id)
);
SELECT pg_catalog.pg_extension_config_dump('_timescaledb_catalog.hypertable_data_node', '');
-- The tablespace table maps tablespaces to hypertables.
-- This allows spreading a hypertable's chunks across multiple disks.
CREATE TABLE _timescaledb_catalog.tablespace (
id serial NOT NULL,
hypertable_id int NOT NULL,
tablespace_name name NOT NULL,
-- table constraints
CONSTRAINT tablespace_pkey PRIMARY KEY (id),
CONSTRAINT tablespace_hypertable_id_tablespace_name_key UNIQUE (hypertable_id, tablespace_name),
CONSTRAINT tablespace_hypertable_id_fkey FOREIGN KEY (hypertable_id) REFERENCES _timescaledb_catalog.hypertable (id) ON DELETE CASCADE
);
SELECT pg_catalog.pg_extension_config_dump('_timescaledb_catalog.tablespace', '');
-- A dimension represents an axis along which data is partitioned.
CREATE TABLE _timescaledb_catalog.dimension (
id serial NOT NULL ,
hypertable_id integer NOT NULL,
column_name name NOT NULL,
column_type REGTYPE NOT NULL,
aligned boolean NOT NULL,
-- closed dimensions
num_slices smallint NULL,
partitioning_func_schema name NULL,
partitioning_func name NULL,
-- open dimensions (e.g., time)
interval_length bigint NULL,
-- compress interval is used by rollup procedure during compression
-- in order to merge multiple chunks into a single one
compress_interval_length bigint NULL,
integer_now_func_schema name NULL,
integer_now_func name NULL,
-- table constraints
CONSTRAINT dimension_pkey PRIMARY KEY (id),
CONSTRAINT dimension_hypertable_id_column_name_key UNIQUE (hypertable_id, column_name),
CONSTRAINT dimension_check CHECK ((partitioning_func_schema IS NULL AND partitioning_func IS NULL) OR (partitioning_func_schema IS NOT NULL AND partitioning_func IS NOT NULL)),
CONSTRAINT dimension_check1 CHECK ((num_slices IS NULL AND interval_length IS NOT NULL) OR (num_slices IS NOT NULL AND interval_length IS NULL)),
CONSTRAINT dimension_check2 CHECK ((integer_now_func_schema IS NULL AND integer_now_func IS NULL) OR (integer_now_func_schema IS NOT NULL AND integer_now_func IS NOT NULL)),
CONSTRAINT dimension_interval_length_check CHECK (interval_length IS NULL OR interval_length > 0),
CONSTRAINT dimension_compress_interval_length_check CHECK (compress_interval_length IS NULL OR compress_interval_length > 0),
CONSTRAINT dimension_hypertable_id_fkey FOREIGN KEY (hypertable_id) REFERENCES _timescaledb_catalog.hypertable (id) ON DELETE CASCADE
);
SELECT pg_catalog.pg_extension_config_dump('_timescaledb_catalog.dimension', '');
SELECT pg_catalog.pg_extension_config_dump(pg_get_serial_sequence('_timescaledb_catalog.dimension', 'id'), '');
-- A dimension partition represents the current division of a (space)
-- dimension into partitions, and the mapping of those partitions to
-- data nodes. When a chunk is created, it will use the partition and
-- data nodes information in this table to decide range and node
-- placement of the chunk.
--
-- Normally, only closed/space dimensions are pre-partitioned and
-- present in this table. The dimension stretches from -INF to +INF
-- and the range_start value for a partition represents where the
-- partition starts, stretching to the start of the next partition
-- (non-inclusive). There is no range_end since it is implicit by the
-- start of the next partition and thus uses less space. Having no end
-- also makes it easier to split partitions by inserting a new row
-- instead of potentially updating multiple rows.
CREATE TABLE _timescaledb_catalog.dimension_partition (
dimension_id integer NOT NULL REFERENCES _timescaledb_catalog.dimension (id) ON DELETE CASCADE,
range_start bigint NOT NULL,
data_nodes name[] NULL,
UNIQUE (dimension_id, range_start)
);
SELECT pg_catalog.pg_extension_config_dump('_timescaledb_catalog.dimension_partition', '');
-- A dimension slice defines a keyspace range along a dimension
-- axis. A chunk references a slice in each of its dimensions, forming
-- a hypercube.
CREATE TABLE _timescaledb_catalog.dimension_slice (
id serial NOT NULL,
dimension_id integer NOT NULL,
range_start bigint NOT NULL,
range_end bigint NOT NULL,
-- table constraints
CONSTRAINT dimension_slice_pkey PRIMARY KEY (id),
CONSTRAINT dimension_slice_dimension_id_range_start_range_end_key UNIQUE (dimension_id, range_start, range_end),
CONSTRAINT dimension_slice_check CHECK (range_start <= range_end),
CONSTRAINT dimension_slice_dimension_id_fkey FOREIGN KEY (dimension_id) REFERENCES _timescaledb_catalog.dimension (id) ON DELETE CASCADE
);
SELECT pg_catalog.pg_extension_config_dump('_timescaledb_catalog.dimension_slice', '');
SELECT pg_catalog.pg_extension_config_dump(pg_get_serial_sequence('_timescaledb_catalog.dimension_slice', 'id'), '');
-- A chunk is a partition (hypercube) in an N-dimensional
-- hyperspace. Each chunk is associated with N constraints that define
-- the chunk's hypercube. Tuples that fall within the chunk's
-- hypercube are stored in the chunk's data table, as given by
-- 'schema_name' and 'table_name'.
CREATE SEQUENCE _timescaledb_catalog.chunk_id_seq MINVALUE 1;
CREATE TABLE _timescaledb_catalog.chunk (
id integer NOT NULL DEFAULT nextval('_timescaledb_catalog.chunk_id_seq'),
hypertable_id int NOT NULL,
schema_name name NOT NULL,
table_name name NOT NULL,
compressed_chunk_id integer ,
dropped boolean NOT NULL DEFAULT FALSE,
status integer NOT NULL DEFAULT 0,
osm_chunk boolean NOT NULL DEFAULT FALSE,
-- table constraints
CONSTRAINT chunk_pkey PRIMARY KEY (id),
CONSTRAINT chunk_schema_name_table_name_key UNIQUE (schema_name, table_name),
CONSTRAINT chunk_compressed_chunk_id_fkey FOREIGN KEY (compressed_chunk_id) REFERENCES _timescaledb_catalog.chunk (id),
CONSTRAINT chunk_hypertable_id_fkey FOREIGN KEY (hypertable_id) REFERENCES _timescaledb_catalog.hypertable (id)
);
ALTER SEQUENCE _timescaledb_catalog.chunk_id_seq OWNED BY _timescaledb_catalog.chunk.id;
CREATE INDEX chunk_hypertable_id_idx ON _timescaledb_catalog.chunk (hypertable_id);
CREATE INDEX chunk_compressed_chunk_id_idx ON _timescaledb_catalog.chunk (compressed_chunk_id);
--we could use a partial index (where osm_chunk is true). However, the catalog code
--does not work with partial/functional indexes. So we instead have a full index here.
--Another option would be to use the status field to identify a OSM chunk. However bit
--operations only work on varbit datatype and not integer datatype.
CREATE INDEX chunk_osm_chunk_idx ON _timescaledb_catalog.chunk (osm_chunk, hypertable_id);
SELECT pg_catalog.pg_extension_config_dump('_timescaledb_catalog.chunk', '');
SELECT pg_catalog.pg_extension_config_dump('_timescaledb_catalog.chunk_id_seq', '');
-- A chunk constraint maps a dimension slice to a chunk. Each
-- constraint associated with a chunk will also be a table constraint
-- on the chunk's data table.
CREATE TABLE _timescaledb_catalog.chunk_constraint (
chunk_id integer NOT NULL,
dimension_slice_id integer NULL,
constraint_name name NOT NULL,
hypertable_constraint_name name NULL,
-- table constraints
CONSTRAINT chunk_constraint_chunk_id_constraint_name_key UNIQUE (chunk_id, constraint_name),
CONSTRAINT chunk_constraint_chunk_id_fkey FOREIGN KEY (chunk_id) REFERENCES _timescaledb_catalog.chunk (id),
CONSTRAINT chunk_constraint_dimension_slice_id_fkey FOREIGN KEY (dimension_slice_id) REFERENCES _timescaledb_catalog.dimension_slice (id)
);
CREATE INDEX chunk_constraint_dimension_slice_id_idx ON _timescaledb_catalog.chunk_constraint (dimension_slice_id);
SELECT pg_catalog.pg_extension_config_dump('_timescaledb_catalog.chunk_constraint', '');
CREATE SEQUENCE _timescaledb_catalog.chunk_constraint_name;
SELECT pg_catalog.pg_extension_config_dump('_timescaledb_catalog.chunk_constraint_name', '');
CREATE TABLE _timescaledb_catalog.chunk_index (
chunk_id integer NOT NULL,
index_name name NOT NULL,
hypertable_id integer NOT NULL,
hypertable_index_name name NOT NULL,
-- table constraints
CONSTRAINT chunk_index_chunk_id_index_name_key UNIQUE (chunk_id, index_name),
CONSTRAINT chunk_index_chunk_id_fkey FOREIGN KEY (chunk_id) REFERENCES _timescaledb_catalog.chunk (id) ON DELETE CASCADE,
CONSTRAINT chunk_index_hypertable_id_fkey FOREIGN KEY (hypertable_id) REFERENCES _timescaledb_catalog.hypertable (id) ON DELETE CASCADE
);
CREATE INDEX chunk_index_hypertable_id_hypertable_index_name_idx ON _timescaledb_catalog.chunk_index (hypertable_id, hypertable_index_name);
SELECT pg_catalog.pg_extension_config_dump('_timescaledb_catalog.chunk_index', '');
CREATE TABLE _timescaledb_catalog.chunk_data_node (
chunk_id integer NOT NULL,
node_chunk_id integer NOT NULL,
node_name name NOT NULL,
-- table constraints
CONSTRAINT chunk_data_node_chunk_id_node_name_key UNIQUE (chunk_id, node_name),
CONSTRAINT chunk_data_node_node_chunk_id_node_name_key UNIQUE (node_chunk_id, node_name),
CONSTRAINT chunk_data_node_chunk_id_fkey FOREIGN KEY (chunk_id) REFERENCES _timescaledb_catalog.chunk (id)
);
CREATE INDEX chunk_data_node_node_name_idx ON _timescaledb_catalog.chunk_data_node (node_name);
SELECT pg_catalog.pg_extension_config_dump('_timescaledb_catalog.chunk_data_node', '');
-- Default jobs are given the id space [1,1000). User-installed jobs and any jobs created inside tests
-- are given the id space [1000, INT_MAX). That way, we do not pg_dump jobs that are always default-installed
-- inside other .sql scripts. This avoids insertion conflicts during pg_restore.
CREATE SEQUENCE _timescaledb_config.bgw_job_id_seq
MINVALUE 1000;
SELECT pg_catalog.pg_extension_config_dump('_timescaledb_config.bgw_job_id_seq', '');
-- We put columns that can be null or have variable length
-- last. This allow us to read the important fields above in the
-- scheduler without materializing these fields below, which the
-- scheduler does not neeed.
CREATE TABLE _timescaledb_config.bgw_job (
id integer NOT NULL DEFAULT nextval('_timescaledb_config.bgw_job_id_seq'),
application_name name NOT NULL,
schedule_interval interval NOT NULL,
max_runtime interval NOT NULL,
max_retries integer NOT NULL,
retry_period interval NOT NULL,
proc_schema name NOT NULL,
proc_name name NOT NULL,
owner name NOT NULL DEFAULT CURRENT_ROLE,
scheduled bool NOT NULL DEFAULT TRUE,
fixed_schedule bool not null default true,
initial_start timestamptz,
hypertable_id integer,
config jsonb,
check_schema name,
check_name name,
timezone text,
-- table constraints
CONSTRAINT bgw_job_pkey PRIMARY KEY (id),
CONSTRAINT bgw_job_hypertable_id_fkey FOREIGN KEY (hypertable_id) REFERENCES _timescaledb_catalog.hypertable (id) ON DELETE CASCADE
);
ALTER SEQUENCE _timescaledb_config.bgw_job_id_seq OWNED BY _timescaledb_config.bgw_job.id;
CREATE INDEX bgw_job_proc_hypertable_id_idx ON _timescaledb_config.bgw_job (proc_schema, proc_name, hypertable_id);
SELECT pg_catalog.pg_extension_config_dump('_timescaledb_config.bgw_job', 'WHERE id >= 1000');
CREATE TABLE _timescaledb_internal.bgw_job_stat (
job_id integer NOT NULL,
last_start timestamptz NOT NULL DEFAULT NOW(),
last_finish timestamptz NOT NULL,
next_start timestamptz NOT NULL,
last_successful_finish timestamptz NOT NULL,
last_run_success bool NOT NULL,
total_runs bigint NOT NULL,
total_duration interval NOT NULL,
total_duration_failures interval NOT NULL,
total_successes bigint NOT NULL,
total_failures bigint NOT NULL,
total_crashes bigint NOT NULL,
consecutive_failures int NOT NULL,
consecutive_crashes int NOT NULL,
flags int NOT NULL DEFAULT 0,
-- table constraints
CONSTRAINT bgw_job_stat_pkey PRIMARY KEY (job_id),
CONSTRAINT bgw_job_stat_job_id_fkey FOREIGN KEY (job_id) REFERENCES _timescaledb_config.bgw_job (id) ON DELETE CASCADE
);
--The job_stat table is not dumped by pg_dump on purpose because
--the statistics probably aren't very meaningful across instances.
-- Now we define a special stats table for each job/chunk pair. This will be used by the scheduler
-- to determine whether to run a specific job on a specific chunk.
CREATE TABLE _timescaledb_internal.bgw_policy_chunk_stats (
job_id integer NOT NULL,
chunk_id integer NOT NULL,
num_times_job_run integer,
last_time_job_run timestamptz,
-- table constraints
CONSTRAINT bgw_policy_chunk_stats_job_id_chunk_id_key UNIQUE (job_id, chunk_id),
CONSTRAINT bgw_policy_chunk_stats_chunk_id_fkey FOREIGN KEY (chunk_id) REFERENCES _timescaledb_catalog.chunk (id) ON DELETE CASCADE,
CONSTRAINT bgw_policy_chunk_stats_job_id_fkey FOREIGN KEY (job_id) REFERENCES _timescaledb_config.bgw_job (id) ON DELETE CASCADE
);
CREATE TABLE _timescaledb_catalog.metadata (
key NAME NOT NULL,
value text NOT NULL,
include_in_telemetry boolean NOT NULL,
-- table constraints
CONSTRAINT metadata_pkey PRIMARY KEY (key)
);
SELECT pg_catalog.pg_extension_config_dump('_timescaledb_catalog.metadata', $$
WHERE KEY = 'exported_uuid' $$);
CREATE TABLE _timescaledb_catalog.continuous_agg (
mat_hypertable_id integer NOT NULL,
raw_hypertable_id integer NOT NULL,
parent_mat_hypertable_id integer,
user_view_schema name NOT NULL,
user_view_name name NOT NULL,
partial_view_schema name NOT NULL,
partial_view_name name NOT NULL,
bucket_width bigint NOT NULL,
direct_view_schema name NOT NULL,
direct_view_name name NOT NULL,
materialized_only bool NOT NULL DEFAULT FALSE,
finalized bool NOT NULL DEFAULT TRUE,
-- table constraints
CONSTRAINT continuous_agg_pkey PRIMARY KEY (mat_hypertable_id),
CONSTRAINT continuous_agg_partial_view_schema_partial_view_name_key UNIQUE (partial_view_schema, partial_view_name),
CONSTRAINT continuous_agg_user_view_schema_user_view_name_key UNIQUE (user_view_schema, user_view_name),
CONSTRAINT continuous_agg_mat_hypertable_id_fkey FOREIGN KEY (mat_hypertable_id) REFERENCES _timescaledb_catalog.hypertable (id) ON DELETE CASCADE,
CONSTRAINT continuous_agg_raw_hypertable_id_fkey FOREIGN KEY (raw_hypertable_id) REFERENCES _timescaledb_catalog.hypertable (id) ON DELETE CASCADE,
CONSTRAINT continuous_agg_parent_mat_hypertable_id_fkey FOREIGN KEY (parent_mat_hypertable_id)
REFERENCES _timescaledb_catalog.continuous_agg (mat_hypertable_id) ON DELETE CASCADE
);
CREATE INDEX continuous_agg_raw_hypertable_id_idx ON _timescaledb_catalog.continuous_agg (raw_hypertable_id);
SELECT pg_catalog.pg_extension_config_dump('_timescaledb_catalog.continuous_agg', '');
-- See the comments for ContinuousAggsBucketFunction structure.
CREATE TABLE _timescaledb_catalog.continuous_aggs_bucket_function (
mat_hypertable_id integer NOT NULL,
-- The schema of the function. Equals TRUE for "timescaledb_experimental", FALSE otherwise.
experimental bool NOT NULL,
-- Name of the bucketing function, e.g. "time_bucket" or "time_bucket_ng"
name text NOT NULL,
-- `bucket_width` argument of the function, e.g. "1 month"
bucket_width text NOT NULL,
-- `origin` argument of the function provided by the user
origin text NOT NULL,
-- `timezone` argument of the function provided by the user
timezone text NOT NULL,
-- table constraints
CONSTRAINT continuous_aggs_bucket_function_pkey PRIMARY KEY (mat_hypertable_id),
CONSTRAINT continuous_aggs_bucket_function_mat_hypertable_id_fkey FOREIGN KEY (mat_hypertable_id) REFERENCES _timescaledb_catalog.hypertable (id) ON DELETE CASCADE
);
SELECT pg_catalog.pg_extension_config_dump('_timescaledb_catalog.continuous_aggs_bucket_function', '');
CREATE TABLE _timescaledb_catalog.continuous_aggs_invalidation_threshold (
hypertable_id integer NOT NULL,
watermark bigint NOT NULL,
-- table constraints
CONSTRAINT continuous_aggs_invalidation_threshold_pkey PRIMARY KEY (hypertable_id),
CONSTRAINT continuous_aggs_invalidation_threshold_hypertable_id_fkey FOREIGN KEY (hypertable_id) REFERENCES _timescaledb_catalog.hypertable (id) ON DELETE CASCADE
);
SELECT pg_catalog.pg_extension_config_dump('_timescaledb_catalog.continuous_aggs_invalidation_threshold', '');
CREATE TABLE _timescaledb_catalog.continuous_aggs_watermark (
mat_hypertable_id integer NOT NULL,
watermark bigint NOT NULL,
-- table constraints
CONSTRAINT continuous_aggs_watermark_pkey PRIMARY KEY (mat_hypertable_id),
CONSTRAINT continuous_aggs_watermark_mat_hypertable_id_fkey FOREIGN KEY (mat_hypertable_id) REFERENCES _timescaledb_catalog.continuous_agg (mat_hypertable_id) ON DELETE CASCADE
);
SELECT pg_catalog.pg_extension_config_dump('_timescaledb_catalog.continuous_aggs_watermark', '');
-- this does not have an FK on the materialization table since INSERTs to this
-- table are performance critical
CREATE TABLE _timescaledb_catalog.continuous_aggs_hypertable_invalidation_log (
hypertable_id integer NOT NULL,
lowest_modified_value bigint NOT NULL,
greatest_modified_value bigint NOT NULL
);
SELECT pg_catalog.pg_extension_config_dump('_timescaledb_catalog.continuous_aggs_hypertable_invalidation_log', '');
CREATE INDEX continuous_aggs_hypertable_invalidation_log_idx ON _timescaledb_catalog.continuous_aggs_hypertable_invalidation_log (hypertable_id, lowest_modified_value ASC);
-- per cagg copy of invalidation log
CREATE TABLE _timescaledb_catalog.continuous_aggs_materialization_invalidation_log (
materialization_id integer,
lowest_modified_value bigint NOT NULL,
greatest_modified_value bigint NOT NULL,
-- table constraints
CONSTRAINT continuous_aggs_materialization_invalid_materialization_id_fkey FOREIGN KEY (materialization_id) REFERENCES _timescaledb_catalog.continuous_agg (mat_hypertable_id) ON DELETE CASCADE
);
SELECT pg_catalog.pg_extension_config_dump('_timescaledb_catalog.continuous_aggs_materialization_invalidation_log', '');
CREATE INDEX continuous_aggs_materialization_invalidation_log_idx ON _timescaledb_catalog.continuous_aggs_materialization_invalidation_log (materialization_id, lowest_modified_value ASC);
/* the source of this data is the enum from the source code that lists
* the algorithms. This table is NOT dumped.
*/
CREATE TABLE _timescaledb_catalog.compression_algorithm (
id smallint NOT NULL,
version smallint NOT NULL,
name name NOT NULL,
description text,
-- table constraints
CONSTRAINT compression_algorithm_pkey PRIMARY KEY (id)
);
CREATE TABLE _timescaledb_catalog.hypertable_compression (
hypertable_id integer NOT NULL,
attname name NOT NULL,
compression_algorithm_id smallint,
segmentby_column_index smallint,
orderby_column_index smallint,
orderby_asc boolean,
orderby_nullsfirst boolean,
-- table constraints
CONSTRAINT hypertable_compression_pkey PRIMARY KEY (hypertable_id, attname),
CONSTRAINT hypertable_compression_hypertable_id_orderby_column_index_key UNIQUE (hypertable_id, orderby_column_index),
CONSTRAINT hypertable_compression_hypertable_id_segmentby_column_index_key UNIQUE (hypertable_id, segmentby_column_index),
CONSTRAINT hypertable_compression_compression_algorithm_id_fkey FOREIGN KEY (compression_algorithm_id) REFERENCES _timescaledb_catalog.compression_algorithm (id),
CONSTRAINT hypertable_compression_hypertable_id_fkey FOREIGN KEY (hypertable_id) REFERENCES _timescaledb_catalog.hypertable (id) ON DELETE CASCADE
);
SELECT pg_catalog.pg_extension_config_dump('_timescaledb_catalog.hypertable_compression', '');
CREATE TABLE _timescaledb_catalog.compression_chunk_size (
chunk_id integer NOT NULL,
compressed_chunk_id integer NOT NULL,
uncompressed_heap_size bigint NOT NULL,
uncompressed_toast_size bigint NOT NULL,
uncompressed_index_size bigint NOT NULL,
compressed_heap_size bigint NOT NULL,
compressed_toast_size bigint NOT NULL,
compressed_index_size bigint NOT NULL,
numrows_pre_compression bigint,
numrows_post_compression bigint,
-- table constraints
CONSTRAINT compression_chunk_size_pkey PRIMARY KEY (chunk_id),
CONSTRAINT compression_chunk_size_chunk_id_fkey FOREIGN KEY (chunk_id) REFERENCES _timescaledb_catalog.chunk (id) ON DELETE CASCADE,
CONSTRAINT compression_chunk_size_compressed_chunk_id_fkey FOREIGN KEY (compressed_chunk_id) REFERENCES _timescaledb_catalog.chunk (id) ON DELETE CASCADE
);
SELECT pg_catalog.pg_extension_config_dump('_timescaledb_catalog.compression_chunk_size', '');
--This stores commit decisions for 2pc remote txns. Abort decisions are never stored.
--If a PREPARE TRANSACTION fails for any data node then the entire
--frontend transaction will be rolled back and no rows will be stored.
--the frontend_transaction_id represents the entire distributed transaction
--each datanode will have a unique remote_transaction_id.
CREATE TABLE _timescaledb_catalog.remote_txn (
data_node_name name, --this is really only to allow us to cleanup stuff on a per-node basis.
remote_transaction_id text NOT NULL,
-- table constraints
CONSTRAINT remote_txn_pkey PRIMARY KEY (remote_transaction_id),
CONSTRAINT remote_txn_remote_transaction_id_check CHECK (remote_transaction_id::@extschema@.rxid IS NOT NULL)
);
CREATE INDEX remote_txn_data_node_name_idx ON _timescaledb_catalog.remote_txn (data_node_name);
SELECT pg_catalog.pg_extension_config_dump('_timescaledb_catalog.remote_txn', '');
-- This table stores information about the stage that has been completed of a
-- chunk move/copy activity
--
-- A cleanup activity can query and check if the backend is running. If the
-- backend has exited then we can commence cleanup. The cleanup
-- activity can also do a diff with the "time_start" value to ascertain if
-- the entire end-to-end activity is going on for too long
--
-- We also track the end time of every stage. A diff with the current time
-- will give us an idea about how long the current stage has been running
--
-- Entry for a chunk move/copy activity gets deleted on successful completion
--
-- We don't want to pg_dump this table's contents. A node restored using it
-- could be part of a totally different multinode setup and we don't want to
-- carry over chunk copy/move operations from earlier (if it makes sense at all)
--
CREATE SEQUENCE _timescaledb_catalog.chunk_copy_operation_id_seq MINVALUE 1;
CREATE TABLE _timescaledb_catalog.chunk_copy_operation (
operation_id name NOT NULL, -- the publisher/subscriber identifier used
backend_pid integer NOT NULL, -- the pid of the backend running this activity
completed_stage name NOT NULL, -- the completed stage/step
time_start timestamptz NOT NULL DEFAULT NOW(), -- start time of the activity
chunk_id integer NOT NULL,
compress_chunk_name name NOT NULL,
source_node_name name NOT NULL,
dest_node_name name NOT NULL,
delete_on_source_node bool NOT NULL, -- is a move or copy activity
-- table constraints
CONSTRAINT chunk_copy_operation_pkey PRIMARY KEY (operation_id),
CONSTRAINT chunk_copy_operation_chunk_id_fkey FOREIGN KEY (chunk_id) REFERENCES _timescaledb_catalog.chunk (id) ON DELETE CASCADE
);
CREATE TABLE _timescaledb_catalog.continuous_agg_migrate_plan (
mat_hypertable_id integer NOT NULL,
start_ts TIMESTAMPTZ NOT NULL DEFAULT pg_catalog.now(),
end_ts TIMESTAMPTZ,
-- table constraints
CONSTRAINT continuous_agg_migrate_plan_pkey PRIMARY KEY (mat_hypertable_id),
CONSTRAINT continuous_agg_migrate_plan_mat_hypertable_id_fkey FOREIGN KEY (mat_hypertable_id) REFERENCES _timescaledb_catalog.continuous_agg (mat_hypertable_id)
);
SELECT pg_catalog.pg_extension_config_dump('_timescaledb_catalog.continuous_agg_migrate_plan', '');
CREATE TABLE _timescaledb_catalog.continuous_agg_migrate_plan_step (
mat_hypertable_id integer NOT NULL,
step_id serial NOT NULL,
status TEXT NOT NULL DEFAULT 'NOT STARTED', -- NOT STARTED, STARTED, FINISHED, CANCELED
start_ts TIMESTAMPTZ,
end_ts TIMESTAMPTZ,
type TEXT NOT NULL,
config JSONB,
-- table constraints
CONSTRAINT continuous_agg_migrate_plan_step_pkey PRIMARY KEY (mat_hypertable_id, step_id),
CONSTRAINT continuous_agg_migrate_plan_step_mat_hypertable_id_fkey FOREIGN KEY (mat_hypertable_id) REFERENCES _timescaledb_catalog.continuous_agg_migrate_plan (mat_hypertable_id) ON DELETE CASCADE,
CONSTRAINT continuous_agg_migrate_plan_step_check CHECK (start_ts <= end_ts),
CONSTRAINT continuous_agg_migrate_plan_step_check2 CHECK (type IN ('CREATE NEW CAGG', 'DISABLE POLICIES', 'COPY POLICIES', 'ENABLE POLICIES', 'SAVE WATERMARK', 'REFRESH NEW CAGG', 'COPY DATA', 'OVERRIDE CAGG', 'DROP OLD CAGG'))
);
SELECT pg_catalog.pg_extension_config_dump('_timescaledb_catalog.continuous_agg_migrate_plan_step', '');
SELECT pg_catalog.pg_extension_config_dump(pg_get_serial_sequence('_timescaledb_catalog.continuous_agg_migrate_plan_step', 'step_id'), '');
CREATE TABLE _timescaledb_internal.job_errors (
job_id integer not null,
pid integer,
start_time timestamptz,
finish_time timestamptz,
error_data jsonb
);
SELECT pg_catalog.pg_extension_config_dump('_timescaledb_internal.job_errors', '');
-- Set table permissions
-- We need to grant SELECT to PUBLIC for all tables even those not
-- marked as being dumped because pg_dump will try to access all
-- tables initially to detect inheritance chains and then decide
-- which objects actually need to be dumped.
GRANT SELECT ON ALL TABLES IN SCHEMA _timescaledb_catalog TO PUBLIC;
GRANT SELECT ON ALL TABLES IN SCHEMA _timescaledb_config TO PUBLIC;
GRANT SELECT ON ALL TABLES IN SCHEMA _timescaledb_internal TO PUBLIC;
GRANT SELECT ON ALL SEQUENCES IN SCHEMA _timescaledb_catalog TO PUBLIC;
GRANT SELECT ON ALL SEQUENCES IN SCHEMA _timescaledb_config TO PUBLIC;
GRANT SELECT ON ALL SEQUENCES IN SCHEMA _timescaledb_internal TO PUBLIC;
-- We want to restrict access to the job errors to only work through
-- the job_errors view.
REVOKE ALL ON _timescaledb_internal.job_errors FROM PUBLIC;