mirror of
https://github.com/timescale/timescaledb.git
synced 2025-05-15 18:13:18 +08:00
This patch allows setting chunk_time_interval when creating a continuous agg and allows changing it with ALTER MATERIALIZED VIEW. Previously you had to create the cagg with `WITH NO DATA` and then call `set_chunk_time_interval` followed by manually refreshing.
2163 lines
86 KiB
Plaintext
2163 lines
86 KiB
Plaintext
-- This file and its contents are licensed under the Timescale License.
|
|
-- Please see the included NOTICE for copyright information and
|
|
-- LICENSE-TIMESCALE for a copy of the license.
|
|
-- Set this variable to avoid using a hard-coded path each time query
|
|
-- results are compared
|
|
\set QUERY_RESULT_TEST_EQUAL_RELPATH '../../../../test/sql/include/query_result_test_equal.sql'
|
|
SET ROLE :ROLE_DEFAULT_PERM_USER;
|
|
--DDL commands on continuous aggregates
|
|
CREATE TABLE conditions (
|
|
timec TIMESTAMPTZ NOT NULL,
|
|
location TEXT NOT NULL,
|
|
temperature integer NULL,
|
|
humidity DOUBLE PRECISION NULL,
|
|
timemeasure TIMESTAMPTZ,
|
|
timeinterval INTERVAL
|
|
);
|
|
SELECT table_name FROM create_hypertable('conditions', 'timec');
|
|
table_name
|
|
------------
|
|
conditions
|
|
(1 row)
|
|
|
|
-- schema tests
|
|
\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER
|
|
SET timezone TO 'UTC+8';
|
|
-- drop if the tablespace1 and/or tablespace2 exists
|
|
SET client_min_messages TO error;
|
|
DROP TABLESPACE IF EXISTS tablespace1;
|
|
DROP TABLESPACE IF EXISTS tablespace2;
|
|
RESET client_min_messages;
|
|
CREATE TABLESPACE tablespace1 OWNER :ROLE_DEFAULT_PERM_USER LOCATION :TEST_TABLESPACE1_PATH;
|
|
CREATE TABLESPACE tablespace2 OWNER :ROLE_DEFAULT_PERM_USER LOCATION :TEST_TABLESPACE2_PATH;
|
|
CREATE SCHEMA rename_schema;
|
|
GRANT ALL ON SCHEMA rename_schema TO :ROLE_DEFAULT_PERM_USER;
|
|
CREATE SCHEMA test_schema AUTHORIZATION :ROLE_DEFAULT_PERM_USER;
|
|
SET ROLE :ROLE_DEFAULT_PERM_USER;
|
|
CREATE TABLE foo(time TIMESTAMPTZ NOT NULL, data INTEGER);
|
|
SELECT create_hypertable('foo', 'time');
|
|
create_hypertable
|
|
-------------------
|
|
(2,public,foo,t)
|
|
(1 row)
|
|
|
|
CREATE MATERIALIZED VIEW rename_test_old
|
|
WITH ( timescaledb.continuous, timescaledb.materialized_only=true)
|
|
AS SELECT time_bucket('1week', time), COUNT(data)
|
|
FROM foo
|
|
GROUP BY 1 WITH NO DATA;
|
|
SELECT user_view_schema, user_view_name, partial_view_schema, partial_view_name
|
|
FROM _timescaledb_catalog.continuous_agg;
|
|
user_view_schema | user_view_name | partial_view_schema | partial_view_name
|
|
------------------+-----------------+-----------------------+-------------------
|
|
public | rename_test_old | _timescaledb_internal | _partial_view_3
|
|
(1 row)
|
|
|
|
ALTER TABLE rename_test_old RENAME TO rename_test;
|
|
ALTER TABLE rename_test SET SCHEMA test_schema;
|
|
SELECT user_view_schema, user_view_name, partial_view_schema, partial_view_name
|
|
FROM _timescaledb_catalog.continuous_agg;
|
|
user_view_schema | user_view_name | partial_view_schema | partial_view_name
|
|
------------------+----------------+-----------------------+-------------------
|
|
test_schema | rename_test | _timescaledb_internal | _partial_view_3
|
|
(1 row)
|
|
|
|
ALTER MATERIALIZED VIEW test_schema.rename_test SET SCHEMA rename_schema;
|
|
DROP SCHEMA test_schema;
|
|
SELECT user_view_schema, user_view_name, partial_view_schema, partial_view_name
|
|
FROM _timescaledb_catalog.continuous_agg;
|
|
user_view_schema | user_view_name | partial_view_schema | partial_view_name
|
|
------------------+----------------+-----------------------+-------------------
|
|
rename_schema | rename_test | _timescaledb_internal | _partial_view_3
|
|
(1 row)
|
|
|
|
SELECT ca.raw_hypertable_id as "RAW_HYPERTABLE_ID",
|
|
h.schema_name AS "MAT_SCHEMA_NAME",
|
|
h.table_name AS "MAT_TABLE_NAME",
|
|
partial_view_name as "PART_VIEW_NAME",
|
|
partial_view_schema as "PART_VIEW_SCHEMA",
|
|
direct_view_name as "DIR_VIEW_NAME",
|
|
direct_view_schema as "DIR_VIEW_SCHEMA"
|
|
FROM _timescaledb_catalog.continuous_agg ca
|
|
INNER JOIN _timescaledb_catalog.hypertable h ON(h.id = ca.mat_hypertable_id)
|
|
WHERE user_view_name = 'rename_test'
|
|
\gset
|
|
RESET ROLE;
|
|
SELECT current_user;
|
|
current_user
|
|
--------------------
|
|
cluster_super_user
|
|
(1 row)
|
|
|
|
ALTER VIEW :"PART_VIEW_SCHEMA".:"PART_VIEW_NAME" SET SCHEMA public;
|
|
SET ROLE :ROLE_DEFAULT_PERM_USER;
|
|
SELECT user_view_schema, user_view_name, partial_view_schema, partial_view_name
|
|
FROM _timescaledb_catalog.continuous_agg;
|
|
user_view_schema | user_view_name | partial_view_schema | partial_view_name
|
|
------------------+----------------+---------------------+-------------------
|
|
rename_schema | rename_test | public | _partial_view_3
|
|
(1 row)
|
|
|
|
--alter direct view schema
|
|
SELECT user_view_schema, user_view_name, direct_view_schema, direct_view_name
|
|
FROM _timescaledb_catalog.continuous_agg;
|
|
user_view_schema | user_view_name | direct_view_schema | direct_view_name
|
|
------------------+----------------+-----------------------+------------------
|
|
rename_schema | rename_test | _timescaledb_internal | _direct_view_3
|
|
(1 row)
|
|
|
|
RESET ROLE;
|
|
SELECT current_user;
|
|
current_user
|
|
--------------------
|
|
cluster_super_user
|
|
(1 row)
|
|
|
|
ALTER VIEW :"DIR_VIEW_SCHEMA".:"DIR_VIEW_NAME" SET SCHEMA public;
|
|
SET ROLE :ROLE_DEFAULT_PERM_USER;
|
|
SELECT user_view_schema, user_view_name, partial_view_schema, partial_view_name,
|
|
direct_view_schema, direct_view_name
|
|
FROM _timescaledb_catalog.continuous_agg;
|
|
user_view_schema | user_view_name | partial_view_schema | partial_view_name | direct_view_schema | direct_view_name
|
|
------------------+----------------+---------------------+-------------------+--------------------+------------------
|
|
rename_schema | rename_test | public | _partial_view_3 | public | _direct_view_3
|
|
(1 row)
|
|
|
|
RESET ROLE;
|
|
SELECT current_user;
|
|
current_user
|
|
--------------------
|
|
cluster_super_user
|
|
(1 row)
|
|
|
|
ALTER SCHEMA rename_schema RENAME TO new_name_schema;
|
|
SET ROLE :ROLE_DEFAULT_PERM_USER;
|
|
SELECT user_view_schema, user_view_name, partial_view_schema, partial_view_name,
|
|
direct_view_schema, direct_view_name
|
|
FROM _timescaledb_catalog.continuous_agg;
|
|
user_view_schema | user_view_name | partial_view_schema | partial_view_name | direct_view_schema | direct_view_name
|
|
------------------+----------------+---------------------+-------------------+--------------------+------------------
|
|
new_name_schema | rename_test | public | _partial_view_3 | public | _direct_view_3
|
|
(1 row)
|
|
|
|
ALTER VIEW :"PART_VIEW_NAME" SET SCHEMA new_name_schema;
|
|
ALTER VIEW :"DIR_VIEW_NAME" SET SCHEMA new_name_schema;
|
|
SELECT user_view_schema, user_view_name, partial_view_schema, partial_view_name,
|
|
direct_view_schema, direct_view_name
|
|
FROM _timescaledb_catalog.continuous_agg;
|
|
user_view_schema | user_view_name | partial_view_schema | partial_view_name | direct_view_schema | direct_view_name
|
|
------------------+----------------+---------------------+-------------------+--------------------+------------------
|
|
new_name_schema | rename_test | new_name_schema | _partial_view_3 | new_name_schema | _direct_view_3
|
|
(1 row)
|
|
|
|
RESET ROLE;
|
|
SELECT current_user;
|
|
current_user
|
|
--------------------
|
|
cluster_super_user
|
|
(1 row)
|
|
|
|
ALTER SCHEMA new_name_schema RENAME TO foo_name_schema;
|
|
SET ROLE :ROLE_DEFAULT_PERM_USER;
|
|
SELECT user_view_schema, user_view_name, partial_view_schema, partial_view_name
|
|
FROM _timescaledb_catalog.continuous_agg;
|
|
user_view_schema | user_view_name | partial_view_schema | partial_view_name
|
|
------------------+----------------+---------------------+-------------------
|
|
foo_name_schema | rename_test | foo_name_schema | _partial_view_3
|
|
(1 row)
|
|
|
|
ALTER MATERIALIZED VIEW foo_name_schema.rename_test SET SCHEMA public;
|
|
SELECT user_view_schema, user_view_name, partial_view_schema, partial_view_name
|
|
FROM _timescaledb_catalog.continuous_agg;
|
|
user_view_schema | user_view_name | partial_view_schema | partial_view_name
|
|
------------------+----------------+---------------------+-------------------
|
|
public | rename_test | foo_name_schema | _partial_view_3
|
|
(1 row)
|
|
|
|
RESET ROLE;
|
|
SELECT current_user;
|
|
current_user
|
|
--------------------
|
|
cluster_super_user
|
|
(1 row)
|
|
|
|
ALTER SCHEMA foo_name_schema RENAME TO rename_schema;
|
|
SET ROLE :ROLE_DEFAULT_PERM_USER;
|
|
SET client_min_messages TO NOTICE;
|
|
SELECT user_view_schema, user_view_name, partial_view_schema, partial_view_name
|
|
FROM _timescaledb_catalog.continuous_agg;
|
|
user_view_schema | user_view_name | partial_view_schema | partial_view_name
|
|
------------------+----------------+---------------------+-------------------
|
|
public | rename_test | rename_schema | _partial_view_3
|
|
(1 row)
|
|
|
|
ALTER MATERIALIZED VIEW rename_test RENAME TO rename_c_aggregate;
|
|
SELECT user_view_schema, user_view_name, partial_view_schema, partial_view_name
|
|
FROM _timescaledb_catalog.continuous_agg;
|
|
user_view_schema | user_view_name | partial_view_schema | partial_view_name
|
|
------------------+--------------------+---------------------+-------------------
|
|
public | rename_c_aggregate | rename_schema | _partial_view_3
|
|
(1 row)
|
|
|
|
SELECT * FROM rename_c_aggregate;
|
|
time_bucket | count
|
|
-------------+-------
|
|
(0 rows)
|
|
|
|
ALTER VIEW rename_schema.:"PART_VIEW_NAME" RENAME TO partial_view;
|
|
SELECT user_view_schema, user_view_name, partial_view_schema, partial_view_name,
|
|
direct_view_schema, direct_view_name
|
|
FROM _timescaledb_catalog.continuous_agg;
|
|
user_view_schema | user_view_name | partial_view_schema | partial_view_name | direct_view_schema | direct_view_name
|
|
------------------+--------------------+---------------------+-------------------+--------------------+------------------
|
|
public | rename_c_aggregate | rename_schema | partial_view | rename_schema | _direct_view_3
|
|
(1 row)
|
|
|
|
--rename direct view
|
|
ALTER VIEW rename_schema.:"DIR_VIEW_NAME" RENAME TO direct_view;
|
|
SELECT user_view_schema, user_view_name, partial_view_schema, partial_view_name,
|
|
direct_view_schema, direct_view_name
|
|
FROM _timescaledb_catalog.continuous_agg;
|
|
user_view_schema | user_view_name | partial_view_schema | partial_view_name | direct_view_schema | direct_view_name
|
|
------------------+--------------------+---------------------+-------------------+--------------------+------------------
|
|
public | rename_c_aggregate | rename_schema | partial_view | rename_schema | direct_view
|
|
(1 row)
|
|
|
|
-- drop_chunks tests
|
|
DROP TABLE conditions CASCADE;
|
|
DROP TABLE foo CASCADE;
|
|
NOTICE: drop cascades to 2 other objects
|
|
CREATE TABLE drop_chunks_table(time BIGINT NOT NULL, data INTEGER);
|
|
SELECT hypertable_id AS drop_chunks_table_id
|
|
FROM create_hypertable('drop_chunks_table', 'time', chunk_time_interval => 10) \gset
|
|
CREATE OR REPLACE FUNCTION integer_now_test() returns bigint LANGUAGE SQL STABLE as $$ SELECT coalesce(max(time), bigint '0') FROM drop_chunks_table $$;
|
|
SELECT set_integer_now_func('drop_chunks_table', 'integer_now_test');
|
|
set_integer_now_func
|
|
----------------------
|
|
|
|
(1 row)
|
|
|
|
CREATE MATERIALIZED VIEW drop_chunks_view
|
|
WITH (
|
|
timescaledb.continuous,
|
|
timescaledb.materialized_only=true
|
|
)
|
|
AS SELECT time_bucket('5', time), COUNT(data)
|
|
FROM drop_chunks_table
|
|
GROUP BY 1 WITH NO DATA;
|
|
SELECT format('%I.%I', schema_name, table_name) AS drop_chunks_mat_table,
|
|
schema_name AS drop_chunks_mat_schema,
|
|
table_name AS drop_chunks_mat_table_name
|
|
FROM _timescaledb_catalog.hypertable, _timescaledb_catalog.continuous_agg
|
|
WHERE _timescaledb_catalog.continuous_agg.raw_hypertable_id = :drop_chunks_table_id
|
|
AND _timescaledb_catalog.hypertable.id = _timescaledb_catalog.continuous_agg.mat_hypertable_id \gset
|
|
-- create 3 chunks, with 3 time bucket
|
|
INSERT INTO drop_chunks_table SELECT i, i FROM generate_series(0, 29) AS i;
|
|
-- Only refresh up to bucket 15 initially. Matches the old refresh
|
|
-- behavior that didn't materialize everything
|
|
CALL refresh_continuous_aggregate('drop_chunks_view', 0, 15);
|
|
SELECT count(c) FROM show_chunks('drop_chunks_table') AS c;
|
|
count
|
|
-------
|
|
3
|
|
(1 row)
|
|
|
|
SELECT count(c) FROM show_chunks('drop_chunks_view') AS c;
|
|
count
|
|
-------
|
|
1
|
|
(1 row)
|
|
|
|
SELECT * FROM drop_chunks_view ORDER BY 1;
|
|
time_bucket | count
|
|
-------------+-------
|
|
0 | 5
|
|
5 | 5
|
|
10 | 5
|
|
(3 rows)
|
|
|
|
-- cannot drop directly from the materialization table without specifying
|
|
-- cont. aggregate view name explicitly
|
|
\set ON_ERROR_STOP 0
|
|
SELECT drop_chunks(:'drop_chunks_mat_table',
|
|
newer_than => -20,
|
|
verbose => true);
|
|
ERROR: operation not supported on materialized hypertable
|
|
\set ON_ERROR_STOP 1
|
|
SELECT count(c) FROM show_chunks('drop_chunks_table') AS c;
|
|
count
|
|
-------
|
|
3
|
|
(1 row)
|
|
|
|
SELECT count(c) FROM show_chunks('drop_chunks_view') AS c;
|
|
count
|
|
-------
|
|
1
|
|
(1 row)
|
|
|
|
SELECT * FROM drop_chunks_view ORDER BY 1;
|
|
time_bucket | count
|
|
-------------+-------
|
|
0 | 5
|
|
5 | 5
|
|
10 | 5
|
|
(3 rows)
|
|
|
|
-- drop chunks when the chunksize and time_bucket aren't aligned
|
|
DROP TABLE drop_chunks_table CASCADE;
|
|
NOTICE: drop cascades to 2 other objects
|
|
NOTICE: drop cascades to table _timescaledb_internal._hyper_5_4_chunk
|
|
CREATE TABLE drop_chunks_table_u(time BIGINT NOT NULL, data INTEGER);
|
|
SELECT hypertable_id AS drop_chunks_table_u_id
|
|
FROM create_hypertable('drop_chunks_table_u', 'time', chunk_time_interval => 7) \gset
|
|
CREATE OR REPLACE FUNCTION integer_now_test1() returns bigint LANGUAGE SQL STABLE as $$ SELECT coalesce(max(time), bigint '0') FROM drop_chunks_table_u $$;
|
|
SELECT set_integer_now_func('drop_chunks_table_u', 'integer_now_test1');
|
|
set_integer_now_func
|
|
----------------------
|
|
|
|
(1 row)
|
|
|
|
CREATE MATERIALIZED VIEW drop_chunks_view
|
|
WITH (
|
|
timescaledb.continuous,
|
|
timescaledb.materialized_only=true
|
|
)
|
|
AS SELECT time_bucket('3', time), COUNT(data)
|
|
FROM drop_chunks_table_u
|
|
GROUP BY 1 WITH NO DATA;
|
|
SELECT format('%I.%I', schema_name, table_name) AS drop_chunks_mat_table_u,
|
|
schema_name AS drop_chunks_mat_schema,
|
|
table_name AS drop_chunks_mat_table_u_name
|
|
FROM _timescaledb_catalog.hypertable, _timescaledb_catalog.continuous_agg
|
|
WHERE _timescaledb_catalog.continuous_agg.raw_hypertable_id = :drop_chunks_table_u_id
|
|
AND _timescaledb_catalog.hypertable.id = _timescaledb_catalog.continuous_agg.mat_hypertable_id \gset
|
|
-- create 3 chunks, with 3 time bucket
|
|
INSERT INTO drop_chunks_table_u SELECT i, i FROM generate_series(0, 21) AS i;
|
|
-- Refresh up to bucket 15 to match old materializer behavior
|
|
CALL refresh_continuous_aggregate('drop_chunks_view', 0, 15);
|
|
SELECT count(c) FROM show_chunks('drop_chunks_table_u') AS c;
|
|
count
|
|
-------
|
|
4
|
|
(1 row)
|
|
|
|
SELECT count(c) FROM show_chunks('drop_chunks_view') AS c;
|
|
count
|
|
-------
|
|
1
|
|
(1 row)
|
|
|
|
SELECT * FROM drop_chunks_view ORDER BY 1;
|
|
time_bucket | count
|
|
-------------+-------
|
|
0 | 3
|
|
3 | 3
|
|
6 | 3
|
|
9 | 3
|
|
12 | 3
|
|
(5 rows)
|
|
|
|
-- TRUNCATE test
|
|
-- Can truncate regular hypertables that have caggs
|
|
TRUNCATE drop_chunks_table_u;
|
|
\set ON_ERROR_STOP 0
|
|
-- Can't truncate materialized hypertables directly
|
|
TRUNCATE :drop_chunks_mat_table_u;
|
|
ERROR: cannot TRUNCATE a hypertable underlying a continuous aggregate
|
|
\set ON_ERROR_STOP 1
|
|
-- Check that we don't interfere with TRUNCATE of normal table and
|
|
-- partitioned table
|
|
CREATE TABLE truncate (value int);
|
|
INSERT INTO truncate VALUES (1), (2);
|
|
TRUNCATE truncate;
|
|
SELECT * FROM truncate;
|
|
value
|
|
-------
|
|
(0 rows)
|
|
|
|
CREATE TABLE truncate_partitioned (value int)
|
|
PARTITION BY RANGE(value);
|
|
CREATE TABLE truncate_p1 PARTITION OF truncate_partitioned
|
|
FOR VALUES FROM (1) TO (3);
|
|
INSERT INTO truncate_partitioned VALUES (1), (2);
|
|
TRUNCATE truncate_partitioned;
|
|
SELECT * FROM truncate_partitioned;
|
|
value
|
|
-------
|
|
(0 rows)
|
|
|
|
-- ALTER TABLE tests
|
|
\set ON_ERROR_STOP 0
|
|
-- test a variety of ALTER TABLE statements
|
|
ALTER TABLE :drop_chunks_mat_table_u RENAME time_bucket TO bad_name;
|
|
ERROR: renaming columns on materialization tables is not supported
|
|
ALTER TABLE :drop_chunks_mat_table_u ADD UNIQUE(time_bucket);
|
|
ERROR: operation not supported on materialization tables
|
|
ALTER TABLE :drop_chunks_mat_table_u SET UNLOGGED;
|
|
ERROR: operation not supported on materialization tables
|
|
ALTER TABLE :drop_chunks_mat_table_u ENABLE ROW LEVEL SECURITY;
|
|
ERROR: operation not supported on materialization tables
|
|
ALTER TABLE :drop_chunks_mat_table_u ADD COLUMN fizzle INTEGER;
|
|
ERROR: operation not supported on materialization tables
|
|
ALTER TABLE :drop_chunks_mat_table_u DROP COLUMN time_bucket;
|
|
ERROR: operation not supported on materialization tables
|
|
ALTER TABLE :drop_chunks_mat_table_u ALTER COLUMN time_bucket DROP NOT NULL;
|
|
ERROR: operation not supported on materialization tables
|
|
ALTER TABLE :drop_chunks_mat_table_u ALTER COLUMN time_bucket SET DEFAULT 1;
|
|
ERROR: operation not supported on materialization tables
|
|
ALTER TABLE :drop_chunks_mat_table_u ALTER COLUMN time_bucket SET STORAGE EXTERNAL;
|
|
ERROR: operation not supported on materialization tables
|
|
ALTER TABLE :drop_chunks_mat_table_u DISABLE TRIGGER ALL;
|
|
ERROR: operation not supported on materialization tables
|
|
ALTER TABLE :drop_chunks_mat_table_u SET TABLESPACE foo;
|
|
ERROR: operation not supported on materialization tables
|
|
ALTER TABLE :drop_chunks_mat_table_u NOT OF;
|
|
ERROR: operation not supported on materialization tables
|
|
ALTER TABLE :drop_chunks_mat_table_u OWNER TO CURRENT_USER;
|
|
ERROR: operation not supported on materialization tables
|
|
\set ON_ERROR_STOP 1
|
|
ALTER TABLE :drop_chunks_mat_table_u SET SCHEMA public;
|
|
ALTER TABLE :drop_chunks_mat_table_u_name RENAME TO new_name;
|
|
SET ROLE :ROLE_DEFAULT_PERM_USER;
|
|
SET client_min_messages TO NOTICE;
|
|
SELECT * FROM new_name;
|
|
time_bucket | count
|
|
-------------+-------
|
|
0 | 3
|
|
3 | 3
|
|
6 | 3
|
|
9 | 3
|
|
12 | 3
|
|
(5 rows)
|
|
|
|
SELECT * FROM drop_chunks_view ORDER BY 1;
|
|
time_bucket | count
|
|
-------------+-------
|
|
0 | 3
|
|
3 | 3
|
|
6 | 3
|
|
9 | 3
|
|
12 | 3
|
|
(5 rows)
|
|
|
|
\set ON_ERROR_STOP 0
|
|
-- no continuous aggregates on a continuous aggregate materialization table
|
|
CREATE MATERIALIZED VIEW new_name_view
|
|
WITH (
|
|
timescaledb.continuous,
|
|
timescaledb.materialized_only=true
|
|
)
|
|
AS SELECT time_bucket('6', time_bucket), COUNT("count")
|
|
FROM new_name
|
|
GROUP BY 1 WITH NO DATA;
|
|
ERROR: hypertable is a continuous aggregate materialization table
|
|
\set ON_ERROR_STOP 1
|
|
CREATE TABLE metrics(time timestamptz NOT NULL, device_id int, v1 float, v2 float);
|
|
SELECT create_hypertable('metrics','time');
|
|
create_hypertable
|
|
----------------------
|
|
(8,public,metrics,t)
|
|
(1 row)
|
|
|
|
INSERT INTO metrics SELECT generate_series('2000-01-01'::timestamptz,'2000-01-10','1m'),1,0.25,0.75;
|
|
-- check expressions in view definition
|
|
CREATE MATERIALIZED VIEW cagg_expr
|
|
WITH (timescaledb.continuous, timescaledb.materialized_only=true)
|
|
AS
|
|
SELECT
|
|
time_bucket('1d', time) AS time,
|
|
'Const'::text AS Const,
|
|
4.3::numeric AS "numeric",
|
|
first(metrics,time),
|
|
CASE WHEN true THEN 'foo' ELSE 'bar' END,
|
|
COALESCE(NULL,'coalesce'),
|
|
avg(v1) + avg(v2) AS avg1,
|
|
avg(v1+v2) AS avg2
|
|
FROM metrics
|
|
GROUP BY 1 WITH NO DATA;
|
|
CALL refresh_continuous_aggregate('cagg_expr', NULL, NULL);
|
|
SELECT * FROM cagg_expr ORDER BY time LIMIT 5;
|
|
time | const | numeric | first | case | coalesce | avg1 | avg2
|
|
------------------------------+-------+---------+----------------------------------------------+------+----------+------+------
|
|
Fri Dec 31 16:00:00 1999 UTC | Const | 4.3 | ("Sat Jan 01 00:00:00 2000 UTC",1,0.25,0.75) | foo | coalesce | 1 | 1
|
|
Sat Jan 01 16:00:00 2000 UTC | Const | 4.3 | ("Sat Jan 01 16:00:00 2000 UTC",1,0.25,0.75) | foo | coalesce | 1 | 1
|
|
Sun Jan 02 16:00:00 2000 UTC | Const | 4.3 | ("Sun Jan 02 16:00:00 2000 UTC",1,0.25,0.75) | foo | coalesce | 1 | 1
|
|
Mon Jan 03 16:00:00 2000 UTC | Const | 4.3 | ("Mon Jan 03 16:00:00 2000 UTC",1,0.25,0.75) | foo | coalesce | 1 | 1
|
|
Tue Jan 04 16:00:00 2000 UTC | Const | 4.3 | ("Tue Jan 04 16:00:00 2000 UTC",1,0.25,0.75) | foo | coalesce | 1 | 1
|
|
(5 rows)
|
|
|
|
--test materialization of invalidation before drop
|
|
DROP TABLE IF EXISTS drop_chunks_table CASCADE;
|
|
NOTICE: table "drop_chunks_table" does not exist, skipping
|
|
DROP TABLE IF EXISTS drop_chunks_table_u CASCADE;
|
|
NOTICE: drop cascades to 2 other objects
|
|
NOTICE: drop cascades to table _timescaledb_internal._hyper_7_9_chunk
|
|
CREATE TABLE drop_chunks_table(time BIGINT NOT NULL, data INTEGER);
|
|
SELECT hypertable_id AS drop_chunks_table_nid
|
|
FROM create_hypertable('drop_chunks_table', 'time', chunk_time_interval => 10) \gset
|
|
CREATE OR REPLACE FUNCTION integer_now_test2() returns bigint LANGUAGE SQL STABLE as $$ SELECT coalesce(max(time), bigint '0') FROM drop_chunks_table $$;
|
|
SELECT set_integer_now_func('drop_chunks_table', 'integer_now_test2');
|
|
set_integer_now_func
|
|
----------------------
|
|
|
|
(1 row)
|
|
|
|
CREATE MATERIALIZED VIEW drop_chunks_view
|
|
WITH (
|
|
timescaledb.continuous,
|
|
timescaledb.materialized_only=true
|
|
)
|
|
AS SELECT time_bucket('5', time), max(data)
|
|
FROM drop_chunks_table
|
|
GROUP BY 1 WITH NO DATA;
|
|
INSERT INTO drop_chunks_table SELECT i, i FROM generate_series(0, 20) AS i;
|
|
--dropping chunks will process the invalidations
|
|
SELECT drop_chunks('drop_chunks_table', older_than => (integer_now_test2()-9));
|
|
drop_chunks
|
|
------------------------------------------
|
|
_timescaledb_internal._hyper_10_13_chunk
|
|
(1 row)
|
|
|
|
SELECT * FROM drop_chunks_table ORDER BY time ASC limit 1;
|
|
time | data
|
|
------+------
|
|
10 | 10
|
|
(1 row)
|
|
|
|
INSERT INTO drop_chunks_table SELECT i, i FROM generate_series(20, 35) AS i;
|
|
CALL refresh_continuous_aggregate('drop_chunks_view', 10, 40);
|
|
--this will be seen after the drop its within the invalidation window and will be dropped
|
|
INSERT INTO drop_chunks_table VALUES (26, 100);
|
|
--this will not be processed by the drop since chunk 30-39 is not dropped but will be seen after refresh
|
|
--shows that the drop doesn't do more work than necessary
|
|
INSERT INTO drop_chunks_table VALUES (31, 200);
|
|
--move the time up to 39
|
|
INSERT INTO drop_chunks_table SELECT i, i FROM generate_series(35, 39) AS i;
|
|
--the chunks and ranges we have thus far
|
|
SELECT chunk_name, range_start_integer, range_end_integer
|
|
FROM timescaledb_information.chunks
|
|
WHERE hypertable_name = 'drop_chunks_table';
|
|
chunk_name | range_start_integer | range_end_integer
|
|
--------------------+---------------------+-------------------
|
|
_hyper_10_14_chunk | 10 | 20
|
|
_hyper_10_15_chunk | 20 | 30
|
|
_hyper_10_16_chunk | 30 | 40
|
|
(3 rows)
|
|
|
|
--the invalidation on 25 not yet seen
|
|
SELECT * FROM drop_chunks_view ORDER BY time_bucket DESC;
|
|
time_bucket | max
|
|
-------------+-----
|
|
35 | 35
|
|
30 | 34
|
|
25 | 29
|
|
20 | 24
|
|
15 | 19
|
|
10 | 14
|
|
(6 rows)
|
|
|
|
--refresh to process the invalidations and then drop
|
|
CALL refresh_continuous_aggregate('drop_chunks_view', NULL, (integer_now_test2()-9));
|
|
SELECT drop_chunks('drop_chunks_table', older_than => (integer_now_test2()-9));
|
|
drop_chunks
|
|
------------------------------------------
|
|
_timescaledb_internal._hyper_10_14_chunk
|
|
_timescaledb_internal._hyper_10_15_chunk
|
|
(2 rows)
|
|
|
|
--new values on 25 now seen in view
|
|
SELECT * FROM drop_chunks_view ORDER BY time_bucket DESC;
|
|
time_bucket | max
|
|
-------------+-----
|
|
35 | 35
|
|
30 | 34
|
|
25 | 100
|
|
20 | 24
|
|
15 | 19
|
|
10 | 14
|
|
(6 rows)
|
|
|
|
--earliest datapoint now in table
|
|
SELECT * FROM drop_chunks_table ORDER BY time ASC limit 1;
|
|
time | data
|
|
------+------
|
|
30 | 30
|
|
(1 row)
|
|
|
|
--chunks are removed
|
|
SELECT id, hypertable_id, schema_name, table_name, compressed_chunk_id, dropped, status, osm_chunk FROM _timescaledb_catalog.chunk WHERE dropped;
|
|
id | hypertable_id | schema_name | table_name | compressed_chunk_id | dropped | status | osm_chunk
|
|
----+---------------+-------------+------------+---------------------+---------+--------+-----------
|
|
(0 rows)
|
|
|
|
--still see data in the view
|
|
SELECT * FROM drop_chunks_view WHERE time_bucket < (integer_now_test2()-9) ORDER BY time_bucket DESC;
|
|
time_bucket | max
|
|
-------------+-----
|
|
25 | 100
|
|
20 | 24
|
|
15 | 19
|
|
10 | 14
|
|
(4 rows)
|
|
|
|
--no data but covers dropped chunks
|
|
SELECT * FROM drop_chunks_table WHERE time < (integer_now_test2()-9) ORDER BY time DESC;
|
|
time | data
|
|
------+------
|
|
(0 rows)
|
|
|
|
--recreate the dropped chunk
|
|
INSERT INTO drop_chunks_table SELECT i, i FROM generate_series(0, 20) AS i;
|
|
--see data from recreated region
|
|
SELECT * FROM drop_chunks_table WHERE time < (integer_now_test2()-9) ORDER BY time DESC;
|
|
time | data
|
|
------+------
|
|
20 | 20
|
|
19 | 19
|
|
18 | 18
|
|
17 | 17
|
|
16 | 16
|
|
15 | 15
|
|
14 | 14
|
|
13 | 13
|
|
12 | 12
|
|
11 | 11
|
|
10 | 10
|
|
9 | 9
|
|
8 | 8
|
|
7 | 7
|
|
6 | 6
|
|
5 | 5
|
|
4 | 4
|
|
3 | 3
|
|
2 | 2
|
|
1 | 1
|
|
0 | 0
|
|
(21 rows)
|
|
|
|
--should show chunk with old name and old ranges
|
|
SELECT chunk_name, range_start_integer, range_end_integer
|
|
FROM timescaledb_information.chunks
|
|
WHERE hypertable_name = 'drop_chunks_table'
|
|
ORDER BY range_start_integer;
|
|
chunk_name | range_start_integer | range_end_integer
|
|
--------------------+---------------------+-------------------
|
|
_hyper_10_18_chunk | 0 | 10
|
|
_hyper_10_19_chunk | 10 | 20
|
|
_hyper_10_20_chunk | 20 | 30
|
|
_hyper_10_16_chunk | 30 | 40
|
|
(4 rows)
|
|
|
|
--We dropped everything up to the bucket starting at 30 and then
|
|
--inserted new data up to and including time 20. Therefore, the
|
|
--dropped data should stay the same as long as we only refresh
|
|
--buckets that have non-dropped data.
|
|
CALL refresh_continuous_aggregate('drop_chunks_view', 30, 40);
|
|
SELECT * FROM drop_chunks_view ORDER BY time_bucket DESC;
|
|
time_bucket | max
|
|
-------------+-----
|
|
35 | 39
|
|
30 | 200
|
|
25 | 100
|
|
20 | 24
|
|
15 | 19
|
|
10 | 14
|
|
(6 rows)
|
|
|
|
SELECT format('%I.%I', schema_name, table_name) AS drop_chunks_mat_tablen,
|
|
schema_name AS drop_chunks_mat_schema,
|
|
table_name AS drop_chunks_mat_table_name
|
|
FROM _timescaledb_catalog.hypertable, _timescaledb_catalog.continuous_agg
|
|
WHERE _timescaledb_catalog.continuous_agg.raw_hypertable_id = :drop_chunks_table_nid
|
|
AND _timescaledb_catalog.hypertable.id = _timescaledb_catalog.continuous_agg.mat_hypertable_id \gset
|
|
-- TEST drop chunks from continuous aggregates by specifying view name
|
|
SELECT drop_chunks('drop_chunks_view',
|
|
newer_than => -20,
|
|
verbose => true);
|
|
INFO: dropping chunk _timescaledb_internal._hyper_11_17_chunk
|
|
drop_chunks
|
|
------------------------------------------
|
|
_timescaledb_internal._hyper_11_17_chunk
|
|
(1 row)
|
|
|
|
-- Test that we cannot drop chunks when specifying materialized
|
|
-- hypertable
|
|
INSERT INTO drop_chunks_table SELECT generate_series(45, 55), 500;
|
|
CALL refresh_continuous_aggregate('drop_chunks_view', 45, 55);
|
|
SELECT chunk_name, range_start_integer, range_end_integer
|
|
FROM timescaledb_information.chunks
|
|
WHERE hypertable_name = :'drop_chunks_mat_table_name' ORDER BY range_start_integer;
|
|
chunk_name | range_start_integer | range_end_integer
|
|
--------------------+---------------------+-------------------
|
|
_hyper_11_23_chunk | 0 | 100
|
|
(1 row)
|
|
|
|
\set ON_ERROR_STOP 0
|
|
\set VERBOSITY default
|
|
SELECT drop_chunks(:'drop_chunks_mat_tablen', older_than => 60);
|
|
ERROR: operation not supported on materialized hypertable
|
|
DETAIL: Hypertable "_materialized_hypertable_11" is a materialized hypertable.
|
|
HINT: Try the operation on the continuous aggregate instead.
|
|
\set VERBOSITY terse
|
|
\set ON_ERROR_STOP 1
|
|
-----------------------------------------------------------------
|
|
-- Test that refresh_continuous_aggregate on chunk will refresh,
|
|
-- but only in the regions covered by the show chunks.
|
|
-----------------------------------------------------------------
|
|
SELECT chunk_name, range_start_integer, range_end_integer
|
|
FROM timescaledb_information.chunks
|
|
WHERE hypertable_name = 'drop_chunks_table'
|
|
ORDER BY 2,3;
|
|
chunk_name | range_start_integer | range_end_integer
|
|
--------------------+---------------------+-------------------
|
|
_hyper_10_18_chunk | 0 | 10
|
|
_hyper_10_19_chunk | 10 | 20
|
|
_hyper_10_20_chunk | 20 | 30
|
|
_hyper_10_16_chunk | 30 | 40
|
|
_hyper_10_21_chunk | 40 | 50
|
|
_hyper_10_22_chunk | 50 | 60
|
|
(6 rows)
|
|
|
|
-- Pick the second chunk as the one to drop
|
|
WITH numbered_chunks AS (
|
|
SELECT row_number() OVER (ORDER BY range_start_integer), chunk_schema, chunk_name, range_start_integer, range_end_integer
|
|
FROM timescaledb_information.chunks
|
|
WHERE hypertable_name = 'drop_chunks_table'
|
|
ORDER BY 1
|
|
)
|
|
SELECT format('%I.%I', chunk_schema, chunk_name) AS chunk_to_drop, range_start_integer, range_end_integer
|
|
FROM numbered_chunks
|
|
WHERE row_number = 2 \gset
|
|
-- There's data in the table for the chunk/range we will drop
|
|
SELECT * FROM drop_chunks_table
|
|
WHERE time >= :range_start_integer
|
|
AND time < :range_end_integer
|
|
ORDER BY 1;
|
|
time | data
|
|
------+------
|
|
10 | 10
|
|
11 | 11
|
|
12 | 12
|
|
13 | 13
|
|
14 | 14
|
|
15 | 15
|
|
16 | 16
|
|
17 | 17
|
|
18 | 18
|
|
19 | 19
|
|
(10 rows)
|
|
|
|
-- Make sure there is also data in the continuous aggregate
|
|
-- CARE:
|
|
-- Note that this behaviour of dropping the materialization table chunks and expecting a refresh
|
|
-- that overlaps that time range to NOT update those chunks is undefined.
|
|
CALL refresh_continuous_aggregate('drop_chunks_view', 0, 50);
|
|
SELECT * FROM drop_chunks_view
|
|
ORDER BY 1;
|
|
time_bucket | max
|
|
-------------+-----
|
|
0 | 4
|
|
5 | 9
|
|
10 | 14
|
|
15 | 19
|
|
20 | 20
|
|
45 | 500
|
|
50 | 500
|
|
(7 rows)
|
|
|
|
-- Drop the second chunk, to leave a gap in the data
|
|
DROP TABLE :chunk_to_drop;
|
|
-- Verify that the second chunk is dropped
|
|
SELECT chunk_name, range_start_integer, range_end_integer
|
|
FROM timescaledb_information.chunks
|
|
WHERE hypertable_name = 'drop_chunks_table'
|
|
ORDER BY 2,3;
|
|
chunk_name | range_start_integer | range_end_integer
|
|
--------------------+---------------------+-------------------
|
|
_hyper_10_18_chunk | 0 | 10
|
|
_hyper_10_20_chunk | 20 | 30
|
|
_hyper_10_16_chunk | 30 | 40
|
|
_hyper_10_21_chunk | 40 | 50
|
|
_hyper_10_22_chunk | 50 | 60
|
|
(5 rows)
|
|
|
|
-- Data is no longer in the table but still in the view
|
|
SELECT * FROM drop_chunks_table
|
|
WHERE time >= :range_start_integer
|
|
AND time < :range_end_integer
|
|
ORDER BY 1;
|
|
time | data
|
|
------+------
|
|
(0 rows)
|
|
|
|
SELECT * FROM drop_chunks_view
|
|
WHERE time_bucket >= :range_start_integer
|
|
AND time_bucket < :range_end_integer
|
|
ORDER BY 1;
|
|
time_bucket | max
|
|
-------------+-----
|
|
10 | 14
|
|
15 | 19
|
|
(2 rows)
|
|
|
|
-- Insert a large value in one of the chunks that will be dropped
|
|
INSERT INTO drop_chunks_table VALUES (:range_start_integer-1, 100);
|
|
-- Now refresh and drop the two adjecent chunks
|
|
CALL refresh_continuous_aggregate('drop_chunks_view', NULL, 30);
|
|
SELECT drop_chunks('drop_chunks_table', older_than=>30);
|
|
drop_chunks
|
|
------------------------------------------
|
|
_timescaledb_internal._hyper_10_18_chunk
|
|
_timescaledb_internal._hyper_10_20_chunk
|
|
(2 rows)
|
|
|
|
-- Verify that the chunks are dropped
|
|
SELECT chunk_name, range_start_integer, range_end_integer
|
|
FROM timescaledb_information.chunks
|
|
WHERE hypertable_name = 'drop_chunks_table'
|
|
ORDER BY 2,3;
|
|
chunk_name | range_start_integer | range_end_integer
|
|
--------------------+---------------------+-------------------
|
|
_hyper_10_16_chunk | 30 | 40
|
|
_hyper_10_21_chunk | 40 | 50
|
|
_hyper_10_22_chunk | 50 | 60
|
|
(3 rows)
|
|
|
|
-- The continuous aggregate should be refreshed in the regions covered
|
|
-- by the dropped chunks, but not in the "gap" region, i.e., the
|
|
-- region of the chunk that was dropped via DROP TABLE.
|
|
SELECT * FROM drop_chunks_view
|
|
ORDER BY 1;
|
|
time_bucket | max
|
|
-------------+-----
|
|
0 | 4
|
|
5 | 100
|
|
20 | 20
|
|
45 | 500
|
|
50 | 500
|
|
(5 rows)
|
|
|
|
-- Now refresh in the region of the first two dropped chunks
|
|
CALL refresh_continuous_aggregate('drop_chunks_view', 0, :range_end_integer);
|
|
-- Aggregate data in the refreshed range should no longer exist since
|
|
-- the underlying data was dropped.
|
|
SELECT * FROM drop_chunks_view
|
|
ORDER BY 1;
|
|
time_bucket | max
|
|
-------------+-----
|
|
20 | 20
|
|
45 | 500
|
|
50 | 500
|
|
(3 rows)
|
|
|
|
--------------------------------------------------------------------
|
|
-- Check that we can create a materialized table in a tablespace. We
|
|
-- create one with tablespace and one without and compare them.
|
|
CREATE VIEW cagg_info AS
|
|
WITH
|
|
caggs AS (
|
|
SELECT format('%I.%I', user_view_schema, user_view_name)::regclass AS user_view,
|
|
format('%I.%I', direct_view_schema, direct_view_name)::regclass AS direct_view,
|
|
format('%I.%I', partial_view_schema, partial_view_name)::regclass AS partial_view,
|
|
format('%I.%I', ht.schema_name, ht.table_name)::regclass AS mat_relid
|
|
FROM _timescaledb_catalog.hypertable ht,
|
|
_timescaledb_catalog.continuous_agg cagg
|
|
WHERE ht.id = cagg.mat_hypertable_id
|
|
)
|
|
SELECT user_view,
|
|
pg_get_userbyid(relowner) AS user_view_owner,
|
|
relname AS mat_table,
|
|
(SELECT pg_get_userbyid(relowner) FROM pg_class WHERE oid = mat_relid) AS mat_table_owner,
|
|
direct_view,
|
|
(SELECT pg_get_userbyid(relowner) FROM pg_class WHERE oid = direct_view) AS direct_view_owner,
|
|
partial_view,
|
|
(SELECT pg_get_userbyid(relowner) FROM pg_class WHERE oid = partial_view) AS partial_view_owner,
|
|
(SELECT spcname FROM pg_tablespace WHERE oid = reltablespace) AS tablespace
|
|
FROM pg_class JOIN caggs ON pg_class.oid = caggs.mat_relid;
|
|
GRANT SELECT ON cagg_info TO PUBLIC;
|
|
CREATE VIEW chunk_info AS
|
|
SELECT ht.schema_name, ht.table_name, relname AS chunk_name,
|
|
(SELECT spcname FROM pg_tablespace WHERE oid = reltablespace) AS tablespace
|
|
FROM pg_class c,
|
|
_timescaledb_catalog.hypertable ht,
|
|
_timescaledb_catalog.chunk ch
|
|
WHERE ch.table_name = c.relname AND ht.id = ch.hypertable_id;
|
|
CREATE TABLE whatever(time BIGINT NOT NULL, data INTEGER);
|
|
SELECT hypertable_id AS whatever_nid
|
|
FROM create_hypertable('whatever', 'time', chunk_time_interval => 10)
|
|
\gset
|
|
SELECT set_integer_now_func('whatever', 'integer_now_test');
|
|
set_integer_now_func
|
|
----------------------
|
|
|
|
(1 row)
|
|
|
|
CREATE MATERIALIZED VIEW whatever_view_1
|
|
WITH (timescaledb.continuous, timescaledb.materialized_only=true) AS
|
|
SELECT time_bucket('5', time), COUNT(data)
|
|
FROM whatever GROUP BY 1 WITH NO DATA;
|
|
CREATE MATERIALIZED VIEW whatever_view_2
|
|
WITH (timescaledb.continuous, timescaledb.materialized_only=true)
|
|
TABLESPACE tablespace1 AS
|
|
SELECT time_bucket('5', time), COUNT(data)
|
|
FROM whatever GROUP BY 1 WITH NO DATA;
|
|
INSERT INTO whatever SELECT i, i FROM generate_series(0, 29) AS i;
|
|
CALL refresh_continuous_aggregate('whatever_view_1', NULL, NULL);
|
|
CALL refresh_continuous_aggregate('whatever_view_2', NULL, NULL);
|
|
SELECT user_view,
|
|
mat_table,
|
|
cagg_info.tablespace AS mat_tablespace,
|
|
chunk_name,
|
|
chunk_info.tablespace AS chunk_tablespace
|
|
FROM cagg_info, chunk_info
|
|
WHERE mat_table::text = table_name
|
|
AND user_view::text LIKE 'whatever_view%';
|
|
user_view | mat_table | mat_tablespace | chunk_name | chunk_tablespace
|
|
-----------------+-----------------------------+----------------+--------------------+------------------
|
|
whatever_view_1 | _materialized_hypertable_13 | | _hyper_13_27_chunk |
|
|
whatever_view_2 | _materialized_hypertable_14 | tablespace1 | _hyper_14_28_chunk | tablespace1
|
|
(2 rows)
|
|
|
|
ALTER MATERIALIZED VIEW whatever_view_1 SET TABLESPACE tablespace2;
|
|
SELECT user_view,
|
|
mat_table,
|
|
cagg_info.tablespace AS mat_tablespace,
|
|
chunk_name,
|
|
chunk_info.tablespace AS chunk_tablespace
|
|
FROM cagg_info, chunk_info
|
|
WHERE mat_table::text = table_name
|
|
AND user_view::text LIKE 'whatever_view%';
|
|
user_view | mat_table | mat_tablespace | chunk_name | chunk_tablespace
|
|
-----------------+-----------------------------+----------------+--------------------+------------------
|
|
whatever_view_1 | _materialized_hypertable_13 | tablespace2 | _hyper_13_27_chunk | tablespace2
|
|
whatever_view_2 | _materialized_hypertable_14 | tablespace1 | _hyper_14_28_chunk | tablespace1
|
|
(2 rows)
|
|
|
|
DROP MATERIALIZED VIEW whatever_view_1;
|
|
NOTICE: drop cascades to table _timescaledb_internal._hyper_13_27_chunk
|
|
DROP MATERIALIZED VIEW whatever_view_2;
|
|
NOTICE: drop cascades to table _timescaledb_internal._hyper_14_28_chunk
|
|
-- test bucket width expressions on integer hypertables
|
|
CREATE TABLE metrics_int2 (
|
|
time int2 NOT NULL,
|
|
device_id int,
|
|
v1 float,
|
|
v2 float
|
|
);
|
|
CREATE TABLE metrics_int4 (
|
|
time int4 NOT NULL,
|
|
device_id int,
|
|
v1 float,
|
|
v2 float
|
|
);
|
|
CREATE TABLE metrics_int8 (
|
|
time int8 NOT NULL,
|
|
device_id int,
|
|
v1 float,
|
|
v2 float
|
|
);
|
|
SELECT create_hypertable (('metrics_' || dt)::regclass, 'time', chunk_time_interval => 10)
|
|
FROM (
|
|
VALUES ('int2'),
|
|
('int4'),
|
|
('int8')) v (dt);
|
|
create_hypertable
|
|
----------------------------
|
|
(15,public,metrics_int2,t)
|
|
(16,public,metrics_int4,t)
|
|
(17,public,metrics_int8,t)
|
|
(3 rows)
|
|
|
|
CREATE OR REPLACE FUNCTION int2_now ()
|
|
RETURNS int2
|
|
LANGUAGE SQL
|
|
STABLE
|
|
AS $$
|
|
SELECT 10::int2
|
|
$$;
|
|
CREATE OR REPLACE FUNCTION int4_now ()
|
|
RETURNS int4
|
|
LANGUAGE SQL
|
|
STABLE
|
|
AS $$
|
|
SELECT 10::int4
|
|
$$;
|
|
CREATE OR REPLACE FUNCTION int8_now ()
|
|
RETURNS int8
|
|
LANGUAGE SQL
|
|
STABLE
|
|
AS $$
|
|
SELECT 10::int8
|
|
$$;
|
|
SELECT set_integer_now_func (('metrics_' || dt)::regclass, (dt || '_now')::regproc)
|
|
FROM (
|
|
VALUES ('int2'),
|
|
('int4'),
|
|
('int8')) v (dt);
|
|
set_integer_now_func
|
|
----------------------
|
|
|
|
|
|
|
|
(3 rows)
|
|
|
|
-- width expression for int2 hypertables
|
|
CREATE MATERIALIZED VIEW width_expr WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS
|
|
SELECT time_bucket(1::smallint, time)
|
|
FROM metrics_int2
|
|
GROUP BY 1;
|
|
NOTICE: continuous aggregate "width_expr" is already up-to-date
|
|
DROP MATERIALIZED VIEW width_expr;
|
|
CREATE MATERIALIZED VIEW width_expr WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS
|
|
SELECT time_bucket(1::smallint + 2::smallint, time)
|
|
FROM metrics_int2
|
|
GROUP BY 1;
|
|
NOTICE: continuous aggregate "width_expr" is already up-to-date
|
|
DROP MATERIALIZED VIEW width_expr;
|
|
-- width expression for int4 hypertables
|
|
CREATE MATERIALIZED VIEW width_expr WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS
|
|
SELECT time_bucket(1, time)
|
|
FROM metrics_int4
|
|
GROUP BY 1;
|
|
NOTICE: continuous aggregate "width_expr" is already up-to-date
|
|
DROP MATERIALIZED VIEW width_expr;
|
|
CREATE MATERIALIZED VIEW width_expr WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS
|
|
SELECT time_bucket(1 + 2, time)
|
|
FROM metrics_int4
|
|
GROUP BY 1;
|
|
NOTICE: continuous aggregate "width_expr" is already up-to-date
|
|
DROP MATERIALIZED VIEW width_expr;
|
|
-- width expression for int8 hypertables
|
|
CREATE MATERIALIZED VIEW width_expr WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS
|
|
SELECT time_bucket(1, time)
|
|
FROM metrics_int8
|
|
GROUP BY 1;
|
|
NOTICE: continuous aggregate "width_expr" is already up-to-date
|
|
DROP MATERIALIZED VIEW width_expr;
|
|
CREATE MATERIALIZED VIEW width_expr WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS
|
|
SELECT time_bucket(1 + 2, time)
|
|
FROM metrics_int8
|
|
GROUP BY 1;
|
|
NOTICE: continuous aggregate "width_expr" is already up-to-date
|
|
DROP MATERIALIZED VIEW width_expr;
|
|
\set ON_ERROR_STOP 0
|
|
-- non-immutable expresions should be rejected
|
|
CREATE MATERIALIZED VIEW width_expr WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS
|
|
SELECT time_bucket(extract(year FROM now())::smallint, time)
|
|
FROM metrics_int2
|
|
GROUP BY 1;
|
|
ERROR: only immutable expressions allowed in time bucket function
|
|
CREATE MATERIALIZED VIEW width_expr WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS
|
|
SELECT time_bucket(extract(year FROM now())::int, time)
|
|
FROM metrics_int4
|
|
GROUP BY 1;
|
|
ERROR: only immutable expressions allowed in time bucket function
|
|
CREATE MATERIALIZED VIEW width_expr WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS
|
|
SELECT time_bucket(extract(year FROM now())::int, time)
|
|
FROM metrics_int8
|
|
GROUP BY 1;
|
|
ERROR: only immutable expressions allowed in time bucket function
|
|
\set ON_ERROR_STOP 1
|
|
-- Test various ALTER MATERIALIZED VIEW statements.
|
|
SET ROLE :ROLE_DEFAULT_PERM_USER;
|
|
CREATE MATERIALIZED VIEW owner_check WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS
|
|
SELECT time_bucket(1 + 2, time)
|
|
FROM metrics_int8
|
|
GROUP BY 1
|
|
WITH NO DATA;
|
|
\x on
|
|
SELECT * FROM cagg_info WHERE user_view::text = 'owner_check';
|
|
-[ RECORD 1 ]------+---------------------------------------
|
|
user_view | owner_check
|
|
user_view_owner | default_perm_user
|
|
mat_table | _materialized_hypertable_24
|
|
mat_table_owner | default_perm_user
|
|
direct_view | _timescaledb_internal._direct_view_24
|
|
direct_view_owner | default_perm_user
|
|
partial_view | _timescaledb_internal._partial_view_24
|
|
partial_view_owner | default_perm_user
|
|
tablespace |
|
|
|
|
\x off
|
|
-- This should not work since the target user has the wrong role, but
|
|
-- we test that the normal checks are done when changing the owner.
|
|
\set ON_ERROR_STOP 0
|
|
ALTER MATERIALIZED VIEW owner_check OWNER TO :ROLE_1;
|
|
ERROR: must be able to SET ROLE "test_role_1"
|
|
\set ON_ERROR_STOP 1
|
|
-- Superuser can always change owner
|
|
SET ROLE :ROLE_CLUSTER_SUPERUSER;
|
|
ALTER MATERIALIZED VIEW owner_check OWNER TO :ROLE_1;
|
|
\x on
|
|
SELECT * FROM cagg_info WHERE user_view::text = 'owner_check';
|
|
-[ RECORD 1 ]------+---------------------------------------
|
|
user_view | owner_check
|
|
user_view_owner | test_role_1
|
|
mat_table | _materialized_hypertable_24
|
|
mat_table_owner | test_role_1
|
|
direct_view | _timescaledb_internal._direct_view_24
|
|
direct_view_owner | test_role_1
|
|
partial_view | _timescaledb_internal._partial_view_24
|
|
partial_view_owner | test_role_1
|
|
tablespace |
|
|
|
|
\x off
|
|
--
|
|
-- Test drop continuous aggregate cases
|
|
--
|
|
-- Issue: #2608
|
|
--
|
|
CREATE OR REPLACE FUNCTION test_int_now()
|
|
RETURNS INT LANGUAGE SQL STABLE AS
|
|
$BODY$
|
|
SELECT 50;
|
|
$BODY$;
|
|
CREATE TABLE conditionsnm(time_int INT NOT NULL, device INT, value FLOAT);
|
|
SELECT create_hypertable('conditionsnm', 'time_int', chunk_time_interval => 10);
|
|
create_hypertable
|
|
----------------------------
|
|
(25,public,conditionsnm,t)
|
|
(1 row)
|
|
|
|
SELECT set_integer_now_func('conditionsnm', 'test_int_now');
|
|
set_integer_now_func
|
|
----------------------
|
|
|
|
(1 row)
|
|
|
|
INSERT INTO conditionsnm
|
|
SELECT time_val, time_val % 4, 3.14 FROM generate_series(0,100,1) AS time_val;
|
|
-- Case 1: DROP
|
|
CREATE MATERIALIZED VIEW conditionsnm_4
|
|
WITH (timescaledb.continuous, timescaledb.materialized_only = TRUE)
|
|
AS
|
|
SELECT time_bucket(7, time_int) as bucket,
|
|
SUM(value), COUNT(value)
|
|
FROM conditionsnm GROUP BY bucket WITH DATA;
|
|
NOTICE: refreshing continuous aggregate "conditionsnm_4"
|
|
DROP materialized view conditionsnm_4;
|
|
NOTICE: drop cascades to table _timescaledb_internal._hyper_26_40_chunk
|
|
-- Case 2: DROP CASCADE should have similar behaviour as DROP
|
|
CREATE MATERIALIZED VIEW conditionsnm_4
|
|
WITH (timescaledb.continuous, timescaledb.materialized_only = TRUE)
|
|
AS
|
|
SELECT time_bucket(7, time_int) as bucket,
|
|
SUM(value), COUNT(value)
|
|
FROM conditionsnm GROUP BY bucket WITH DATA;
|
|
NOTICE: refreshing continuous aggregate "conditionsnm_4"
|
|
DROP materialized view conditionsnm_4 CASCADE;
|
|
NOTICE: drop cascades to table _timescaledb_internal._hyper_27_41_chunk
|
|
-- Case 3: require CASCADE in case of dependent object
|
|
CREATE MATERIALIZED VIEW conditionsnm_4
|
|
WITH (timescaledb.continuous, timescaledb.materialized_only = TRUE)
|
|
AS
|
|
SELECT time_bucket(7, time_int) as bucket,
|
|
SUM(value), COUNT(value)
|
|
FROM conditionsnm GROUP BY bucket WITH DATA;
|
|
NOTICE: refreshing continuous aggregate "conditionsnm_4"
|
|
CREATE VIEW see_cagg as select * from conditionsnm_4;
|
|
\set ON_ERROR_STOP 0
|
|
DROP MATERIALIZED VIEW conditionsnm_4;
|
|
ERROR: cannot drop view conditionsnm_4 because other objects depend on it
|
|
\set ON_ERROR_STOP 1
|
|
-- Case 4: DROP CASCADE with dependency
|
|
DROP MATERIALIZED VIEW conditionsnm_4 CASCADE;
|
|
NOTICE: drop cascades to view see_cagg
|
|
NOTICE: drop cascades to table _timescaledb_internal._hyper_28_42_chunk
|
|
-- Test DROP SCHEMA CASCADE with continuous aggregates
|
|
--
|
|
-- Issue: #2350
|
|
--
|
|
-- Case 1: DROP SCHEMA CASCADE
|
|
CREATE SCHEMA test_schema;
|
|
CREATE TABLE test_schema.telemetry_raw (
|
|
ts TIMESTAMP WITH TIME ZONE NOT NULL,
|
|
value DOUBLE PRECISION
|
|
);
|
|
SELECT create_hypertable('test_schema.telemetry_raw', 'ts');
|
|
create_hypertable
|
|
----------------------------------
|
|
(29,test_schema,telemetry_raw,t)
|
|
(1 row)
|
|
|
|
CREATE MATERIALIZED VIEW test_schema.telemetry_1s
|
|
WITH (timescaledb.continuous, timescaledb.materialized_only=false)
|
|
AS
|
|
SELECT time_bucket(INTERVAL '1s', ts) AS ts_1s,
|
|
avg(value)
|
|
FROM test_schema.telemetry_raw
|
|
GROUP BY ts_1s WITH NO DATA;
|
|
SELECT ca.raw_hypertable_id,
|
|
h.schema_name,
|
|
h.table_name AS "MAT_TABLE_NAME",
|
|
partial_view_name as "PART_VIEW_NAME",
|
|
partial_view_schema
|
|
FROM _timescaledb_catalog.continuous_agg ca
|
|
INNER JOIN _timescaledb_catalog.hypertable h ON (h.id = ca.mat_hypertable_id)
|
|
WHERE user_view_name = 'telemetry_1s';
|
|
raw_hypertable_id | schema_name | MAT_TABLE_NAME | PART_VIEW_NAME | partial_view_schema
|
|
-------------------+-----------------------+-----------------------------+------------------+-----------------------
|
|
29 | _timescaledb_internal | _materialized_hypertable_30 | _partial_view_30 | _timescaledb_internal
|
|
(1 row)
|
|
|
|
\gset
|
|
DROP SCHEMA test_schema CASCADE;
|
|
NOTICE: drop cascades to 4 other objects
|
|
SELECT count(*) FROM pg_class WHERE relname = :'MAT_TABLE_NAME';
|
|
count
|
|
-------
|
|
0
|
|
(1 row)
|
|
|
|
SELECT count(*) FROM pg_class WHERE relname = :'PART_VIEW_NAME';
|
|
count
|
|
-------
|
|
0
|
|
(1 row)
|
|
|
|
SELECT count(*) FROM pg_class WHERE relname = 'telemetry_1s';
|
|
count
|
|
-------
|
|
0
|
|
(1 row)
|
|
|
|
SELECT count(*) FROM pg_namespace WHERE nspname = 'test_schema';
|
|
count
|
|
-------
|
|
0
|
|
(1 row)
|
|
|
|
-- Case 2: DROP SCHEMA CASCADE with multiple caggs
|
|
CREATE SCHEMA test_schema;
|
|
CREATE TABLE test_schema.telemetry_raw (
|
|
ts TIMESTAMP WITH TIME ZONE NOT NULL,
|
|
value DOUBLE PRECISION
|
|
);
|
|
SELECT create_hypertable('test_schema.telemetry_raw', 'ts');
|
|
create_hypertable
|
|
----------------------------------
|
|
(31,test_schema,telemetry_raw,t)
|
|
(1 row)
|
|
|
|
CREATE MATERIALIZED VIEW test_schema.cagg1
|
|
WITH (timescaledb.continuous, timescaledb.materialized_only=false)
|
|
AS
|
|
SELECT time_bucket(INTERVAL '1s', ts) AS ts_1s,
|
|
avg(value)
|
|
FROM test_schema.telemetry_raw
|
|
GROUP BY ts_1s WITH NO DATA;
|
|
CREATE MATERIALIZED VIEW test_schema.cagg2
|
|
WITH (timescaledb.continuous, timescaledb.materialized_only=false)
|
|
AS
|
|
SELECT time_bucket(INTERVAL '1s', ts) AS ts_1s,
|
|
avg(value)
|
|
FROM test_schema.telemetry_raw
|
|
GROUP BY ts_1s WITH NO DATA;
|
|
SELECT ca.raw_hypertable_id,
|
|
h.schema_name,
|
|
h.table_name AS "MAT_TABLE_NAME1",
|
|
partial_view_name as "PART_VIEW_NAME1",
|
|
partial_view_schema
|
|
FROM _timescaledb_catalog.continuous_agg ca
|
|
INNER JOIN _timescaledb_catalog.hypertable h ON (h.id = ca.mat_hypertable_id)
|
|
WHERE user_view_name = 'cagg1';
|
|
raw_hypertable_id | schema_name | MAT_TABLE_NAME1 | PART_VIEW_NAME1 | partial_view_schema
|
|
-------------------+-----------------------+-----------------------------+------------------+-----------------------
|
|
31 | _timescaledb_internal | _materialized_hypertable_32 | _partial_view_32 | _timescaledb_internal
|
|
(1 row)
|
|
|
|
\gset
|
|
SELECT ca.raw_hypertable_id,
|
|
h.schema_name,
|
|
h.table_name AS "MAT_TABLE_NAME2",
|
|
partial_view_name as "PART_VIEW_NAME2",
|
|
partial_view_schema
|
|
FROM _timescaledb_catalog.continuous_agg ca
|
|
INNER JOIN _timescaledb_catalog.hypertable h ON (h.id = ca.mat_hypertable_id)
|
|
WHERE user_view_name = 'cagg2';
|
|
raw_hypertable_id | schema_name | MAT_TABLE_NAME2 | PART_VIEW_NAME2 | partial_view_schema
|
|
-------------------+-----------------------+-----------------------------+------------------+-----------------------
|
|
31 | _timescaledb_internal | _materialized_hypertable_33 | _partial_view_33 | _timescaledb_internal
|
|
(1 row)
|
|
|
|
\gset
|
|
DROP SCHEMA test_schema CASCADE;
|
|
NOTICE: drop cascades to 7 other objects
|
|
SELECT count(*) FROM pg_class WHERE relname = :'MAT_TABLE_NAME1';
|
|
count
|
|
-------
|
|
0
|
|
(1 row)
|
|
|
|
SELECT count(*) FROM pg_class WHERE relname = :'PART_VIEW_NAME1';
|
|
count
|
|
-------
|
|
0
|
|
(1 row)
|
|
|
|
SELECT count(*) FROM pg_class WHERE relname = 'cagg1';
|
|
count
|
|
-------
|
|
0
|
|
(1 row)
|
|
|
|
SELECT count(*) FROM pg_class WHERE relname = :'MAT_TABLE_NAME2';
|
|
count
|
|
-------
|
|
0
|
|
(1 row)
|
|
|
|
SELECT count(*) FROM pg_class WHERE relname = :'PART_VIEW_NAME2';
|
|
count
|
|
-------
|
|
0
|
|
(1 row)
|
|
|
|
SELECT count(*) FROM pg_class WHERE relname = 'cagg2';
|
|
count
|
|
-------
|
|
0
|
|
(1 row)
|
|
|
|
SELECT count(*) FROM pg_namespace WHERE nspname = 'test_schema';
|
|
count
|
|
-------
|
|
0
|
|
(1 row)
|
|
|
|
DROP TABLESPACE tablespace1;
|
|
DROP TABLESPACE tablespace2;
|
|
-- Check that we can rename a column of a materialized view and still
|
|
-- rebuild it after (#3051, #3405)
|
|
CREATE TABLE conditions (
|
|
time TIMESTAMPTZ NOT NULL,
|
|
location TEXT NOT NULL,
|
|
temperature DOUBLE PRECISION NULL
|
|
);
|
|
SELECT create_hypertable('conditions', 'time');
|
|
create_hypertable
|
|
--------------------------
|
|
(34,public,conditions,t)
|
|
(1 row)
|
|
|
|
INSERT INTO conditions VALUES ( '2018-01-01 09:20:00-08', 'SFO', 55);
|
|
INSERT INTO conditions VALUES ( '2018-01-02 09:30:00-08', 'por', 100);
|
|
INSERT INTO conditions VALUES ( '2018-01-02 09:20:00-08', 'SFO', 65);
|
|
INSERT INTO conditions VALUES ( '2018-01-02 09:10:00-08', 'NYC', 65);
|
|
INSERT INTO conditions VALUES ( '2018-11-01 09:20:00-08', 'NYC', 45);
|
|
INSERT INTO conditions VALUES ( '2018-11-01 10:40:00-08', 'NYC', 55);
|
|
INSERT INTO conditions VALUES ( '2018-11-01 11:50:00-08', 'NYC', 65);
|
|
INSERT INTO conditions VALUES ( '2018-11-01 12:10:00-08', 'NYC', 75);
|
|
INSERT INTO conditions VALUES ( '2018-11-01 13:10:00-08', 'NYC', 85);
|
|
INSERT INTO conditions VALUES ( '2018-11-02 09:20:00-08', 'NYC', 10);
|
|
INSERT INTO conditions VALUES ( '2018-11-02 10:30:00-08', 'NYC', 20);
|
|
CREATE MATERIALIZED VIEW conditions_daily
|
|
WITH (timescaledb.continuous, timescaledb.materialized_only = false) AS
|
|
SELECT location,
|
|
time_bucket(INTERVAL '1 day', time) AS bucket,
|
|
AVG(temperature)
|
|
FROM conditions
|
|
GROUP BY location, bucket
|
|
WITH NO DATA;
|
|
SELECT format('%I.%I', '_timescaledb_internal', h.table_name) AS "MAT_TABLE_NAME",
|
|
format('%I.%I', '_timescaledb_internal', partial_view_name) AS "PART_VIEW_NAME",
|
|
format('%I.%I', '_timescaledb_internal', direct_view_name) AS "DIRECT_VIEW_NAME"
|
|
FROM _timescaledb_catalog.continuous_agg ca
|
|
INNER JOIN _timescaledb_catalog.hypertable h ON (h.id = ca.mat_hypertable_id)
|
|
WHERE user_view_name = 'conditions_daily'
|
|
\gset
|
|
-- Show both the columns and the view definitions to see that
|
|
-- references are correct in the view as well.
|
|
SELECT * FROM test.show_columns('conditions_daily');
|
|
Column | Type | NotNull
|
|
----------+--------------------------+---------
|
|
location | text | f
|
|
bucket | timestamp with time zone | f
|
|
avg | double precision | f
|
|
(3 rows)
|
|
|
|
SELECT * FROM test.show_columns(:'DIRECT_VIEW_NAME');
|
|
Column | Type | NotNull
|
|
----------+--------------------------+---------
|
|
location | text | f
|
|
bucket | timestamp with time zone | f
|
|
avg | double precision | f
|
|
(3 rows)
|
|
|
|
SELECT * FROM test.show_columns(:'PART_VIEW_NAME');
|
|
Column | Type | NotNull
|
|
----------+--------------------------+---------
|
|
location | text | f
|
|
bucket | timestamp with time zone | f
|
|
avg | double precision | f
|
|
(3 rows)
|
|
|
|
SELECT * FROM test.show_columns(:'MAT_TABLE_NAME');
|
|
Column | Type | NotNull
|
|
----------+--------------------------+---------
|
|
location | text | f
|
|
bucket | timestamp with time zone | t
|
|
avg | double precision | f
|
|
(3 rows)
|
|
|
|
ALTER MATERIALIZED VIEW conditions_daily RENAME COLUMN bucket to "time";
|
|
-- Show both the columns and the view definitions to see that
|
|
-- references are correct in the view as well.
|
|
SELECT * FROM test.show_columns(' conditions_daily');
|
|
Column | Type | NotNull
|
|
----------+--------------------------+---------
|
|
location | text | f
|
|
time | timestamp with time zone | f
|
|
avg | double precision | f
|
|
(3 rows)
|
|
|
|
SELECT * FROM test.show_columns(:'DIRECT_VIEW_NAME');
|
|
Column | Type | NotNull
|
|
----------+--------------------------+---------
|
|
location | text | f
|
|
time | timestamp with time zone | f
|
|
avg | double precision | f
|
|
(3 rows)
|
|
|
|
SELECT * FROM test.show_columns(:'PART_VIEW_NAME');
|
|
Column | Type | NotNull
|
|
----------+--------------------------+---------
|
|
location | text | f
|
|
time | timestamp with time zone | f
|
|
avg | double precision | f
|
|
(3 rows)
|
|
|
|
SELECT * FROM test.show_columns(:'MAT_TABLE_NAME');
|
|
Column | Type | NotNull
|
|
----------+--------------------------+---------
|
|
location | text | f
|
|
time | timestamp with time zone | t
|
|
avg | double precision | f
|
|
(3 rows)
|
|
|
|
-- This will rebuild the materialized view and should succeed.
|
|
ALTER MATERIALIZED VIEW conditions_daily SET (timescaledb.materialized_only = false);
|
|
-- Refresh the continuous aggregate to check that it works after the
|
|
-- rename.
|
|
\set VERBOSITY verbose
|
|
CALL refresh_continuous_aggregate('conditions_daily', NULL, NULL);
|
|
\set VERBOSITY terse
|
|
--
|
|
-- Indexes on continuous aggregate
|
|
--
|
|
\set ON_ERROR_STOP 0
|
|
-- unique indexes are not supported
|
|
CREATE UNIQUE INDEX index_unique_error ON conditions_daily ("time", location);
|
|
ERROR: continuous aggregates do not support UNIQUE indexes
|
|
-- concurrently index creation not supported
|
|
CREATE INDEX CONCURRENTLY index_concurrently_avg ON conditions_daily (avg);
|
|
ERROR: hypertables do not support concurrent index creation
|
|
\set ON_ERROR_STOP 1
|
|
CREATE INDEX index_avg ON conditions_daily (avg);
|
|
CREATE INDEX index_avg_only ON ONLY conditions_daily (avg);
|
|
CREATE INDEX index_avg_include ON conditions_daily (avg) INCLUDE (location);
|
|
CREATE INDEX index_avg_expr ON conditions_daily ((avg + 1));
|
|
CREATE INDEX index_avg_location_sfo ON conditions_daily (avg) WHERE location = 'SFO';
|
|
CREATE INDEX index_avg_expr_location_sfo ON conditions_daily ((avg + 2)) WHERE location = 'SFO';
|
|
SELECT * FROM test.show_indexespred(:'MAT_TABLE_NAME');
|
|
Index | Columns | Expr | Pred | Unique | Primary | Exclusion | Tablespace
|
|
-----------------------------------------------------------------------+-------------------+---------------------------+------------------------+--------+---------+-----------+------------
|
|
_timescaledb_internal._materialized_hypertable_35_bucket_idx | {bucket} | | | f | f | f |
|
|
_timescaledb_internal._materialized_hypertable_35_location_bucket_idx | {location,bucket} | | | f | f | f |
|
|
_timescaledb_internal.index_avg | {avg} | | | f | f | f |
|
|
_timescaledb_internal.index_avg_expr | {expr} | avg + 1::double precision | | f | f | f |
|
|
_timescaledb_internal.index_avg_expr_location_sfo | {expr} | avg + 2::double precision | location = 'SFO'::text | f | f | f |
|
|
_timescaledb_internal.index_avg_include | {avg,location} | | | f | f | f |
|
|
_timescaledb_internal.index_avg_location_sfo | {avg} | | location = 'SFO'::text | f | f | f |
|
|
_timescaledb_internal.index_avg_only | {avg} | | | f | f | f |
|
|
(8 rows)
|
|
|
|
-- #3696 assertion failure when referencing columns not present in result
|
|
CREATE TABLE i3696(time timestamptz NOT NULL, search_query text, cnt integer, cnt2 integer);
|
|
SELECT table_name FROM create_hypertable('i3696','time');
|
|
table_name
|
|
------------
|
|
i3696
|
|
(1 row)
|
|
|
|
CREATE MATERIALIZED VIEW i3696_cagg1 WITH (timescaledb.continuous, timescaledb.materialized_only=false)
|
|
AS
|
|
SELECT search_query,count(search_query) as count, sum(cnt), time_bucket(INTERVAL '1 minute', time) AS bucket
|
|
FROM i3696 GROUP BY cnt +cnt2 , bucket, search_query;
|
|
NOTICE: continuous aggregate "i3696_cagg1" is already up-to-date
|
|
ALTER MATERIALIZED VIEW i3696_cagg1 SET (timescaledb.materialized_only = 'true');
|
|
CREATE MATERIALIZED VIEW i3696_cagg2 WITH (timescaledb.continuous, timescaledb.materialized_only=false)
|
|
AS
|
|
SELECT search_query,count(search_query) as count, sum(cnt), time_bucket(INTERVAL '1 minute', time) AS bucket
|
|
FROM i3696 GROUP BY cnt + cnt2, bucket, search_query
|
|
HAVING cnt + cnt2 + sum(cnt) > 2 or count(cnt2) > 10;
|
|
NOTICE: continuous aggregate "i3696_cagg2" is already up-to-date
|
|
ALTER MATERIALIZED VIEW i3696_cagg2 SET (timescaledb.materialized_only = 'true');
|
|
--TEST test with multiple settings on continuous aggregates --
|
|
-- test for materialized_only + compress combinations (real time aggs enabled initially)
|
|
CREATE TABLE test_setting(time timestamptz not null, val numeric);
|
|
SELECT create_hypertable('test_setting', 'time');
|
|
create_hypertable
|
|
----------------------------
|
|
(39,public,test_setting,t)
|
|
(1 row)
|
|
|
|
CREATE MATERIALIZED VIEW test_setting_cagg with (timescaledb.continuous, timescaledb.materialized_only=false)
|
|
AS SELECT time_bucket('1h',time), avg(val), count(*) FROM test_setting GROUP BY 1;
|
|
NOTICE: continuous aggregate "test_setting_cagg" is already up-to-date
|
|
INSERT INTO test_setting
|
|
SELECT generate_series( '2020-01-10 8:00'::timestamp, '2020-01-30 10:00+00'::timestamptz, '1 day'::interval), 10.0;
|
|
CALL refresh_continuous_aggregate('test_setting_cagg', NULL, '2020-05-30 10:00+00'::timestamptz);
|
|
SELECT count(*) from test_setting_cagg ORDER BY 1;
|
|
count
|
|
-------
|
|
20
|
|
(1 row)
|
|
|
|
--this row is not in the materialized result ---
|
|
INSERT INTO test_setting VALUES( '2020-11-01', 20);
|
|
--try out 2 settings here --
|
|
ALTER MATERIALIZED VIEW test_setting_cagg SET (timescaledb.materialized_only = 'true', timescaledb.compress='true');
|
|
NOTICE: defaulting compress_orderby to time_bucket
|
|
WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes
|
|
NOTICE: default segment by for hypertable "_materialized_hypertable_40" is set to ""
|
|
SELECT view_name, compression_enabled, materialized_only
|
|
FROM timescaledb_information.continuous_aggregates
|
|
where view_name = 'test_setting_cagg';
|
|
view_name | compression_enabled | materialized_only
|
|
-------------------+---------------------+-------------------
|
|
test_setting_cagg | t | t
|
|
(1 row)
|
|
|
|
--real time aggs is off now , should return 20 --
|
|
SELECT count(*) from test_setting_cagg ORDER BY 1;
|
|
count
|
|
-------
|
|
20
|
|
(1 row)
|
|
|
|
--now set it back to false --
|
|
ALTER MATERIALIZED VIEW test_setting_cagg SET (timescaledb.materialized_only = 'false', timescaledb.compress='true');
|
|
NOTICE: defaulting compress_orderby to time_bucket
|
|
WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes
|
|
NOTICE: default segment by for hypertable "_materialized_hypertable_40" is set to ""
|
|
SELECT view_name, compression_enabled, materialized_only
|
|
FROM timescaledb_information.continuous_aggregates
|
|
where view_name = 'test_setting_cagg';
|
|
view_name | compression_enabled | materialized_only
|
|
-------------------+---------------------+-------------------
|
|
test_setting_cagg | t | f
|
|
(1 row)
|
|
|
|
--count should return additional data since we have real time aggs on
|
|
SELECT count(*) from test_setting_cagg ORDER BY 1;
|
|
count
|
|
-------
|
|
21
|
|
(1 row)
|
|
|
|
ALTER MATERIALIZED VIEW test_setting_cagg SET (timescaledb.materialized_only = 'true', timescaledb.compress='false');
|
|
SELECT view_name, compression_enabled, materialized_only
|
|
FROM timescaledb_information.continuous_aggregates
|
|
where view_name = 'test_setting_cagg';
|
|
view_name | compression_enabled | materialized_only
|
|
-------------------+---------------------+-------------------
|
|
test_setting_cagg | f | t
|
|
(1 row)
|
|
|
|
--real time aggs is off now , should return 20 --
|
|
SELECT count(*) from test_setting_cagg ORDER BY 1;
|
|
count
|
|
-------
|
|
20
|
|
(1 row)
|
|
|
|
ALTER MATERIALIZED VIEW test_setting_cagg SET (timescaledb.materialized_only = 'false', timescaledb.compress='false');
|
|
SELECT view_name, compression_enabled, materialized_only
|
|
FROM timescaledb_information.continuous_aggregates
|
|
where view_name = 'test_setting_cagg';
|
|
view_name | compression_enabled | materialized_only
|
|
-------------------+---------------------+-------------------
|
|
test_setting_cagg | f | f
|
|
(1 row)
|
|
|
|
--count should return additional data since we have real time aggs on
|
|
SELECT count(*) from test_setting_cagg ORDER BY 1;
|
|
count
|
|
-------
|
|
21
|
|
(1 row)
|
|
|
|
DELETE FROM test_setting WHERE val = 20;
|
|
--TEST test with multiple settings on continuous aggregates with real time aggregates turned off initially --
|
|
-- test for materialized_only + compress combinations (real time aggs enabled initially)
|
|
DROP MATERIALIZED VIEW test_setting_cagg;
|
|
NOTICE: drop cascades to table _timescaledb_internal._hyper_40_50_chunk
|
|
CREATE MATERIALIZED VIEW test_setting_cagg with (timescaledb.continuous, timescaledb.materialized_only = true)
|
|
AS SELECT time_bucket('1h',time), avg(val), count(*) FROM test_setting GROUP BY 1;
|
|
NOTICE: refreshing continuous aggregate "test_setting_cagg"
|
|
CALL refresh_continuous_aggregate('test_setting_cagg', NULL, '2020-05-30 10:00+00'::timestamptz);
|
|
SELECT count(*) from test_setting_cagg ORDER BY 1;
|
|
count
|
|
-------
|
|
20
|
|
(1 row)
|
|
|
|
--this row is not in the materialized result ---
|
|
INSERT INTO test_setting VALUES( '2020-11-01', 20);
|
|
--try out 2 settings here --
|
|
ALTER MATERIALIZED VIEW test_setting_cagg SET (timescaledb.materialized_only = 'false', timescaledb.compress='true');
|
|
NOTICE: defaulting compress_orderby to time_bucket
|
|
WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes
|
|
NOTICE: default segment by for hypertable "_materialized_hypertable_42" is set to ""
|
|
SELECT view_name, compression_enabled, materialized_only
|
|
FROM timescaledb_information.continuous_aggregates
|
|
where view_name = 'test_setting_cagg';
|
|
view_name | compression_enabled | materialized_only
|
|
-------------------+---------------------+-------------------
|
|
test_setting_cagg | t | f
|
|
(1 row)
|
|
|
|
--count should return additional data since we have real time aggs on
|
|
SELECT count(*) from test_setting_cagg ORDER BY 1;
|
|
count
|
|
-------
|
|
21
|
|
(1 row)
|
|
|
|
--now set it back to false --
|
|
ALTER MATERIALIZED VIEW test_setting_cagg SET (timescaledb.materialized_only = 'true', timescaledb.compress='true');
|
|
NOTICE: defaulting compress_orderby to time_bucket
|
|
WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes
|
|
NOTICE: default segment by for hypertable "_materialized_hypertable_42" is set to ""
|
|
SELECT view_name, compression_enabled, materialized_only
|
|
FROM timescaledb_information.continuous_aggregates
|
|
where view_name = 'test_setting_cagg';
|
|
view_name | compression_enabled | materialized_only
|
|
-------------------+---------------------+-------------------
|
|
test_setting_cagg | t | t
|
|
(1 row)
|
|
|
|
--real time aggs is off now , should return 20 --
|
|
SELECT count(*) from test_setting_cagg ORDER BY 1;
|
|
count
|
|
-------
|
|
20
|
|
(1 row)
|
|
|
|
ALTER MATERIALIZED VIEW test_setting_cagg SET (timescaledb.materialized_only = 'false', timescaledb.compress='false');
|
|
SELECT view_name, compression_enabled, materialized_only
|
|
FROM timescaledb_information.continuous_aggregates
|
|
where view_name = 'test_setting_cagg';
|
|
view_name | compression_enabled | materialized_only
|
|
-------------------+---------------------+-------------------
|
|
test_setting_cagg | f | f
|
|
(1 row)
|
|
|
|
--count should return additional data since we have real time aggs on
|
|
SELECT count(*) from test_setting_cagg ORDER BY 1;
|
|
count
|
|
-------
|
|
21
|
|
(1 row)
|
|
|
|
ALTER MATERIALIZED VIEW test_setting_cagg SET (timescaledb.materialized_only = 'true', timescaledb.compress='false');
|
|
SELECT view_name, compression_enabled, materialized_only
|
|
FROM timescaledb_information.continuous_aggregates
|
|
where view_name = 'test_setting_cagg';
|
|
view_name | compression_enabled | materialized_only
|
|
-------------------+---------------------+-------------------
|
|
test_setting_cagg | f | t
|
|
(1 row)
|
|
|
|
--real time aggs is off now , should return 20 --
|
|
SELECT count(*) from test_setting_cagg ORDER BY 1;
|
|
count
|
|
-------
|
|
20
|
|
(1 row)
|
|
|
|
-- END TEST with multiple settings
|
|
-- Test View Target Entries that contain both aggrefs and Vars in the same expression
|
|
CREATE TABLE transactions
|
|
(
|
|
"time" timestamp with time zone NOT NULL,
|
|
dummy1 integer,
|
|
dummy2 integer,
|
|
dummy3 integer,
|
|
dummy4 integer,
|
|
dummy5 integer,
|
|
amount integer,
|
|
fiat_value integer
|
|
);
|
|
SELECT create_hypertable('transactions', 'time');
|
|
create_hypertable
|
|
----------------------------
|
|
(44,public,transactions,t)
|
|
(1 row)
|
|
|
|
INSERT INTO transactions VALUES ( '2018-01-01 09:20:00-08', 0, 0, 0, 0, 0, 1, 10);
|
|
INSERT INTO transactions VALUES ( '2018-01-02 09:30:00-08', 0, 0, 0, 0, 0, -1, 10);
|
|
INSERT INTO transactions VALUES ( '2018-01-02 09:20:00-08', 0, 0, 0, 0, 0, -1, 10);
|
|
INSERT INTO transactions VALUES ( '2018-01-02 09:10:00-08', 0, 0, 0, 0, 0, -1, 10);
|
|
INSERT INTO transactions VALUES ( '2018-11-01 09:20:00-08', 0, 0, 0, 0, 0, 1, 10);
|
|
INSERT INTO transactions VALUES ( '2018-11-01 10:40:00-08', 0, 0, 0, 0, 0, 1, 10);
|
|
INSERT INTO transactions VALUES ( '2018-11-01 11:50:00-08', 0, 0, 0, 0, 0, 1, 10);
|
|
INSERT INTO transactions VALUES ( '2018-11-01 12:10:00-08', 0, 0, 0, 0, 0, -1, 10);
|
|
INSERT INTO transactions VALUES ( '2018-11-01 13:10:00-08', 0, 0, 0, 0, 0, -1, 10);
|
|
INSERT INTO transactions VALUES ( '2018-11-02 09:20:00-08', 0, 0, 0, 0, 0, 1, 10);
|
|
INSERT INTO transactions VALUES ( '2018-11-02 10:30:00-08', 0, 0, 0, 0, 0, -1, 10);
|
|
CREATE materialized view cashflows(
|
|
bucket,
|
|
amount,
|
|
cashflow,
|
|
cashflow2
|
|
) WITH (
|
|
timescaledb.continuous,
|
|
timescaledb.materialized_only = true
|
|
) AS
|
|
SELECT time_bucket ('1 day', time) AS bucket,
|
|
amount,
|
|
CASE
|
|
WHEN amount < 0 THEN (0 - sum(fiat_value))
|
|
ELSE sum(fiat_value)
|
|
END AS cashflow,
|
|
amount + sum(fiat_value)
|
|
FROM transactions
|
|
GROUP BY bucket, amount;
|
|
NOTICE: refreshing continuous aggregate "cashflows"
|
|
SELECT h.table_name AS "MAT_TABLE_NAME",
|
|
partial_view_name AS "PART_VIEW_NAME",
|
|
direct_view_name AS "DIRECT_VIEW_NAME"
|
|
FROM _timescaledb_catalog.continuous_agg ca
|
|
INNER JOIN _timescaledb_catalog.hypertable h ON (h.id = ca.mat_hypertable_id)
|
|
WHERE user_view_name = 'cashflows'
|
|
\gset
|
|
-- Show both the columns and the view definitions to see that
|
|
-- references are correct in the view as well.
|
|
\d+ "_timescaledb_internal".:"DIRECT_VIEW_NAME"
|
|
View "_timescaledb_internal._direct_view_45"
|
|
Column | Type | Collation | Nullable | Default | Storage | Description
|
|
-----------+--------------------------+-----------+----------+---------+---------+-------------
|
|
bucket | timestamp with time zone | | | | plain |
|
|
amount | integer | | | | plain |
|
|
cashflow | bigint | | | | plain |
|
|
cashflow2 | bigint | | | | plain |
|
|
View definition:
|
|
SELECT time_bucket('@ 1 day'::interval, "time") AS bucket,
|
|
amount,
|
|
CASE
|
|
WHEN amount < 0 THEN 0 - sum(fiat_value)
|
|
ELSE sum(fiat_value)
|
|
END AS cashflow,
|
|
amount + sum(fiat_value) AS cashflow2
|
|
FROM transactions
|
|
GROUP BY (time_bucket('@ 1 day'::interval, "time")), amount;
|
|
|
|
\d+ "_timescaledb_internal".:"PART_VIEW_NAME"
|
|
View "_timescaledb_internal._partial_view_45"
|
|
Column | Type | Collation | Nullable | Default | Storage | Description
|
|
-----------+--------------------------+-----------+----------+---------+---------+-------------
|
|
bucket | timestamp with time zone | | | | plain |
|
|
amount | integer | | | | plain |
|
|
cashflow | bigint | | | | plain |
|
|
cashflow2 | bigint | | | | plain |
|
|
View definition:
|
|
SELECT time_bucket('@ 1 day'::interval, "time") AS bucket,
|
|
amount,
|
|
CASE
|
|
WHEN amount < 0 THEN 0 - sum(fiat_value)
|
|
ELSE sum(fiat_value)
|
|
END AS cashflow,
|
|
amount + sum(fiat_value) AS cashflow2
|
|
FROM transactions
|
|
GROUP BY (time_bucket('@ 1 day'::interval, "time")), amount;
|
|
|
|
\d+ "_timescaledb_internal".:"MAT_TABLE_NAME"
|
|
Table "_timescaledb_internal._materialized_hypertable_45"
|
|
Column | Type | Collation | Nullable | Default | Storage | Stats target | Description
|
|
-----------+--------------------------+-----------+----------+---------+---------+--------------+-------------
|
|
bucket | timestamp with time zone | | not null | | plain | |
|
|
amount | integer | | | | plain | |
|
|
cashflow | bigint | | | | plain | |
|
|
cashflow2 | bigint | | | | plain | |
|
|
Indexes:
|
|
"_materialized_hypertable_45_amount_bucket_idx" btree (amount, bucket DESC)
|
|
"_materialized_hypertable_45_bucket_idx" btree (bucket DESC)
|
|
Triggers:
|
|
ts_insert_blocker BEFORE INSERT ON _timescaledb_internal._materialized_hypertable_45 FOR EACH ROW EXECUTE FUNCTION _timescaledb_functions.insert_blocker()
|
|
Child tables: _timescaledb_internal._hyper_45_55_chunk,
|
|
_timescaledb_internal._hyper_45_56_chunk
|
|
|
|
\d+ 'cashflows'
|
|
View "public.cashflows"
|
|
Column | Type | Collation | Nullable | Default | Storage | Description
|
|
-----------+--------------------------+-----------+----------+---------+---------+-------------
|
|
bucket | timestamp with time zone | | | | plain |
|
|
amount | integer | | | | plain |
|
|
cashflow | bigint | | | | plain |
|
|
cashflow2 | bigint | | | | plain |
|
|
View definition:
|
|
SELECT bucket,
|
|
amount,
|
|
cashflow,
|
|
cashflow2
|
|
FROM _timescaledb_internal._materialized_hypertable_45;
|
|
|
|
SELECT * FROM cashflows ORDER BY cashflows;
|
|
bucket | amount | cashflow | cashflow2
|
|
------------------------------+--------+----------+-----------
|
|
Sun Dec 31 16:00:00 2017 UTC | 1 | 10 | 11
|
|
Mon Jan 01 16:00:00 2018 UTC | -1 | -30 | 29
|
|
Wed Oct 31 16:00:00 2018 UTC | -1 | -20 | 19
|
|
Wed Oct 31 16:00:00 2018 UTC | 1 | 30 | 31
|
|
Thu Nov 01 16:00:00 2018 UTC | -1 | -10 | 9
|
|
Thu Nov 01 16:00:00 2018 UTC | 1 | 10 | 11
|
|
(6 rows)
|
|
|
|
-- test cagg creation with named arguments in time_bucket
|
|
-- note that positional arguments cannot follow named arguments
|
|
-- 1. test named origin
|
|
-- 2. test named timezone
|
|
-- 3. test named ts
|
|
-- 4. test named bucket width
|
|
-- named origin
|
|
CREATE MATERIALIZED VIEW cagg_named_origin WITH
|
|
(timescaledb.continuous, timescaledb.materialized_only=false) AS
|
|
SELECT time_bucket('1h', time, 'UTC', origin => '2001-01-03 01:23:45') AS bucket,
|
|
avg(amount) as avg_amount
|
|
FROM transactions GROUP BY 1 WITH NO DATA;
|
|
-- named timezone
|
|
CREATE MATERIALIZED VIEW cagg_named_tz_origin WITH
|
|
(timescaledb.continuous, timescaledb.materialized_only=false) AS
|
|
SELECT time_bucket('1h', time, timezone => 'UTC', origin => '2001-01-03 01:23:45') AS bucket,
|
|
avg(amount) as avg_amount
|
|
FROM transactions GROUP BY 1 WITH NO DATA;
|
|
-- named ts
|
|
CREATE MATERIALIZED VIEW cagg_named_ts_tz_origin WITH
|
|
(timescaledb.continuous, timescaledb.materialized_only=false) AS
|
|
SELECT time_bucket('1h', ts => time, timezone => 'UTC', origin => '2001-01-03 01:23:45') AS bucket,
|
|
avg(amount) as avg_amount
|
|
FROM transactions GROUP BY 1 WITH NO DATA;
|
|
-- named bucket width
|
|
CREATE MATERIALIZED VIEW cagg_named_all WITH
|
|
(timescaledb.continuous, timescaledb.materialized_only=false) AS
|
|
SELECT time_bucket(bucket_width => '1h', ts => time, timezone => 'UTC', origin => '2001-01-03 01:23:45') AS bucket,
|
|
avg(amount) as avg_amount
|
|
FROM transactions GROUP BY 1 WITH NO DATA;
|
|
-- Refreshing from the beginning (NULL) of a CAGG with variable time bucket and
|
|
-- using an INTERVAL for the end timestamp (issue #5534)
|
|
CREATE MATERIALIZED VIEW transactions_montly
|
|
WITH (timescaledb.continuous, timescaledb.materialized_only = true) AS
|
|
SELECT time_bucket(INTERVAL '1 month', time) AS bucket,
|
|
SUM(fiat_value),
|
|
MAX(fiat_value),
|
|
MIN(fiat_value)
|
|
FROM transactions
|
|
GROUP BY 1
|
|
WITH NO DATA;
|
|
-- No rows
|
|
SELECT * FROM transactions_montly ORDER BY bucket;
|
|
bucket | sum | max | min
|
|
--------+-----+-----+-----
|
|
(0 rows)
|
|
|
|
-- Refresh from beginning of the CAGG for 1 month
|
|
CALL refresh_continuous_aggregate('transactions_montly', NULL, INTERVAL '1 month');
|
|
SELECT * FROM transactions_montly ORDER BY bucket;
|
|
bucket | sum | max | min
|
|
------------------------------+-----+-----+-----
|
|
Sun Dec 31 16:00:00 2017 UTC | 40 | 10 | 10
|
|
Wed Oct 31 16:00:00 2018 UTC | 70 | 10 | 10
|
|
(2 rows)
|
|
|
|
TRUNCATE transactions_montly;
|
|
-- Partial refresh the CAGG from beginning to an specific timestamp
|
|
CALL refresh_continuous_aggregate('transactions_montly', NULL, '2018-11-01 11:50:00-08'::timestamptz);
|
|
SELECT * FROM transactions_montly ORDER BY bucket;
|
|
bucket | sum | max | min
|
|
------------------------------+-----+-----+-----
|
|
Sun Dec 31 16:00:00 2017 UTC | 40 | 10 | 10
|
|
(1 row)
|
|
|
|
-- Full refresh the CAGG
|
|
CALL refresh_continuous_aggregate('transactions_montly', NULL, NULL);
|
|
SELECT * FROM transactions_montly ORDER BY bucket;
|
|
bucket | sum | max | min
|
|
------------------------------+-----+-----+-----
|
|
Sun Dec 31 16:00:00 2017 UTC | 40 | 10 | 10
|
|
Wed Oct 31 16:00:00 2018 UTC | 70 | 10 | 10
|
|
(2 rows)
|
|
|
|
-- Check set_chunk_time_interval on continuous aggregate
|
|
CREATE MATERIALIZED VIEW cagg_set_chunk_time_interval
|
|
WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS
|
|
SELECT time_bucket(INTERVAL '1 month', time) AS bucket,
|
|
SUM(fiat_value),
|
|
MAX(fiat_value),
|
|
MIN(fiat_value)
|
|
FROM transactions
|
|
GROUP BY 1
|
|
WITH NO DATA;
|
|
SELECT set_chunk_time_interval('cagg_set_chunk_time_interval', chunk_time_interval => interval '1 month');
|
|
set_chunk_time_interval
|
|
-------------------------
|
|
|
|
(1 row)
|
|
|
|
CALL refresh_continuous_aggregate('cagg_set_chunk_time_interval', NULL, NULL);
|
|
SELECT _timescaledb_functions.to_interval(d.interval_length) = interval '1 month'
|
|
FROM _timescaledb_catalog.dimension d
|
|
RIGHT JOIN _timescaledb_catalog.continuous_agg ca ON ca.user_view_name = 'cagg_set_chunk_time_interval'
|
|
WHERE d.hypertable_id = ca.mat_hypertable_id;
|
|
?column?
|
|
----------
|
|
t
|
|
(1 row)
|
|
|
|
-- Since #6077 CAggs are materialized only by default
|
|
DROP TABLE conditions CASCADE;
|
|
NOTICE: drop cascades to 3 other objects
|
|
NOTICE: drop cascades to 2 other objects
|
|
CREATE TABLE conditions (
|
|
time TIMESTAMPTZ NOT NULL,
|
|
location TEXT NOT NULL,
|
|
temperature DOUBLE PRECISION NULL
|
|
);
|
|
SELECT create_hypertable('conditions', 'time');
|
|
create_hypertable
|
|
--------------------------
|
|
(52,public,conditions,t)
|
|
(1 row)
|
|
|
|
INSERT INTO conditions VALUES ( '2018-01-01 09:20:00-08', 'SFO', 55);
|
|
INSERT INTO conditions VALUES ( '2018-01-02 09:30:00-08', 'POR', 100);
|
|
INSERT INTO conditions VALUES ( '2018-01-02 09:20:00-08', 'SFO', 65);
|
|
INSERT INTO conditions VALUES ( '2018-01-02 09:10:00-08', 'NYC', 65);
|
|
INSERT INTO conditions VALUES ( '2018-11-01 09:20:00-08', 'NYC', 45);
|
|
INSERT INTO conditions VALUES ( '2018-11-01 10:40:00-08', 'NYC', 55);
|
|
INSERT INTO conditions VALUES ( '2018-11-01 11:50:00-08', 'NYC', 65);
|
|
INSERT INTO conditions VALUES ( '2018-11-01 12:10:00-08', 'NYC', 75);
|
|
INSERT INTO conditions VALUES ( '2018-11-01 13:10:00-08', 'NYC', 85);
|
|
INSERT INTO conditions VALUES ( '2018-11-02 09:20:00-08', 'NYC', 10);
|
|
INSERT INTO conditions VALUES ( '2018-11-02 10:30:00-08', 'NYC', 20);
|
|
CREATE MATERIALIZED VIEW conditions_daily
|
|
WITH (timescaledb.continuous) AS
|
|
SELECT location,
|
|
time_bucket(INTERVAL '1 day', time) AS bucket,
|
|
AVG(temperature)
|
|
FROM conditions
|
|
GROUP BY location, bucket
|
|
WITH NO DATA;
|
|
\d+ conditions_daily
|
|
View "public.conditions_daily"
|
|
Column | Type | Collation | Nullable | Default | Storage | Description
|
|
----------+--------------------------+-----------+----------+---------+----------+-------------
|
|
location | text | | | | extended |
|
|
bucket | timestamp with time zone | | | | plain |
|
|
avg | double precision | | | | plain |
|
|
View definition:
|
|
SELECT location,
|
|
bucket,
|
|
avg
|
|
FROM _timescaledb_internal._materialized_hypertable_53;
|
|
|
|
-- Should return NO ROWS
|
|
SELECT * FROM conditions_daily ORDER BY bucket, location;
|
|
location | bucket | avg
|
|
----------+--------+-----
|
|
(0 rows)
|
|
|
|
ALTER MATERIALIZED VIEW conditions_daily SET (timescaledb.materialized_only=false);
|
|
\d+ conditions_daily
|
|
View "public.conditions_daily"
|
|
Column | Type | Collation | Nullable | Default | Storage | Description
|
|
----------+--------------------------+-----------+----------+---------+----------+-------------
|
|
location | text | | | | extended |
|
|
bucket | timestamp with time zone | | | | plain |
|
|
avg | double precision | | | | plain |
|
|
View definition:
|
|
SELECT _materialized_hypertable_53.location,
|
|
_materialized_hypertable_53.bucket,
|
|
_materialized_hypertable_53.avg
|
|
FROM _timescaledb_internal._materialized_hypertable_53
|
|
WHERE _materialized_hypertable_53.bucket < COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(53)), '-infinity'::timestamp with time zone)
|
|
UNION ALL
|
|
SELECT conditions.location,
|
|
time_bucket('@ 1 day'::interval, conditions."time") AS bucket,
|
|
avg(conditions.temperature) AS avg
|
|
FROM conditions
|
|
WHERE conditions."time" >= COALESCE(_timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(53)), '-infinity'::timestamp with time zone)
|
|
GROUP BY conditions.location, (time_bucket('@ 1 day'::interval, conditions."time"));
|
|
|
|
-- Should return ROWS because now it is realtime
|
|
SELECT * FROM conditions_daily ORDER BY bucket, location;
|
|
location | bucket | avg
|
|
----------+------------------------------+-----
|
|
SFO | Sun Dec 31 16:00:00 2017 UTC | 55
|
|
NYC | Mon Jan 01 16:00:00 2018 UTC | 65
|
|
POR | Mon Jan 01 16:00:00 2018 UTC | 100
|
|
SFO | Mon Jan 01 16:00:00 2018 UTC | 65
|
|
NYC | Wed Oct 31 16:00:00 2018 UTC | 65
|
|
NYC | Thu Nov 01 16:00:00 2018 UTC | 15
|
|
(6 rows)
|
|
|
|
-- Should return ROWS because we refreshed it
|
|
ALTER MATERIALIZED VIEW conditions_daily SET (timescaledb.materialized_only=true);
|
|
\d+ conditions_daily
|
|
View "public.conditions_daily"
|
|
Column | Type | Collation | Nullable | Default | Storage | Description
|
|
----------+--------------------------+-----------+----------+---------+----------+-------------
|
|
location | text | | | | extended |
|
|
bucket | timestamp with time zone | | | | plain |
|
|
avg | double precision | | | | plain |
|
|
View definition:
|
|
SELECT location,
|
|
bucket,
|
|
avg
|
|
FROM _timescaledb_internal._materialized_hypertable_53;
|
|
|
|
CALL refresh_continuous_aggregate('conditions_daily', NULL, NULL);
|
|
SELECT * FROM conditions_daily ORDER BY bucket, location;
|
|
location | bucket | avg
|
|
----------+------------------------------+-----
|
|
SFO | Sun Dec 31 16:00:00 2017 UTC | 55
|
|
NYC | Mon Jan 01 16:00:00 2018 UTC | 65
|
|
POR | Mon Jan 01 16:00:00 2018 UTC | 100
|
|
SFO | Mon Jan 01 16:00:00 2018 UTC | 65
|
|
NYC | Wed Oct 31 16:00:00 2018 UTC | 65
|
|
NYC | Thu Nov 01 16:00:00 2018 UTC | 15
|
|
(6 rows)
|
|
|
|
-- Test TRUNCATE over a Realtime CAgg
|
|
DROP MATERIALIZED VIEW conditions_daily;
|
|
NOTICE: drop cascades to 2 other objects
|
|
CREATE MATERIALIZED VIEW conditions_daily
|
|
WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS
|
|
SELECT location,
|
|
time_bucket(INTERVAL '1 day', time) AS bucket,
|
|
AVG(temperature)
|
|
FROM conditions
|
|
GROUP BY location, bucket
|
|
WITH NO DATA;
|
|
SELECT mat_hypertable_id FROM _timescaledb_catalog.continuous_agg WHERE user_view_name = 'conditions_daily' \gset
|
|
-- Check the current watermark for an empty CAgg
|
|
SELECT _timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(:mat_hypertable_id)) AS watermak_empty_cagg;
|
|
watermak_empty_cagg
|
|
---------------------------------
|
|
Sun Nov 23 16:00:00 4714 UTC BC
|
|
(1 row)
|
|
|
|
-- Refresh the CAGG
|
|
CALL refresh_continuous_aggregate('conditions_daily', NULL, NULL);
|
|
-- Check the watermark after the refresh and before truncate the CAgg
|
|
SELECT _timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(:mat_hypertable_id)) AS watermak_before;
|
|
watermak_before
|
|
------------------------------
|
|
Fri Nov 02 16:00:00 2018 UTC
|
|
(1 row)
|
|
|
|
-- Exists chunks before truncate the cagg (> 0)
|
|
SELECT count(*) FROM show_chunks('conditions_daily');
|
|
count
|
|
-------
|
|
2
|
|
(1 row)
|
|
|
|
-- Truncate the given CAgg, it should reset the watermark to the empty state
|
|
TRUNCATE conditions_daily;
|
|
-- No chunks remains after truncate the cagg (= 0)
|
|
SELECT count(*) FROM show_chunks('conditions_daily');
|
|
count
|
|
-------
|
|
0
|
|
(1 row)
|
|
|
|
-- Watermark should be reseted
|
|
SELECT _timescaledb_functions.to_timestamp(_timescaledb_functions.cagg_watermark(:mat_hypertable_id)) AS watermak_after;
|
|
watermak_after
|
|
---------------------------------
|
|
Sun Nov 23 16:00:00 4714 UTC BC
|
|
(1 row)
|
|
|
|
-- Should return ROWS because the watermark was reseted by the TRUNCATE
|
|
SELECT * FROM conditions_daily ORDER BY bucket, location;
|
|
location | bucket | avg
|
|
----------+------------------------------+-----
|
|
SFO | Sun Dec 31 16:00:00 2017 UTC | 55
|
|
NYC | Mon Jan 01 16:00:00 2018 UTC | 65
|
|
POR | Mon Jan 01 16:00:00 2018 UTC | 100
|
|
SFO | Mon Jan 01 16:00:00 2018 UTC | 65
|
|
NYC | Wed Oct 31 16:00:00 2018 UTC | 65
|
|
NYC | Thu Nov 01 16:00:00 2018 UTC | 15
|
|
(6 rows)
|
|
|
|
-- check compression settings are cleaned up when deleting a cagg with compression
|
|
CREATE TABLE cagg_cleanup(time timestamptz not null);
|
|
SELECT table_name FROM create_hypertable('cagg_cleanup','time');
|
|
table_name
|
|
--------------
|
|
cagg_cleanup
|
|
(1 row)
|
|
|
|
INSERT INTO cagg_cleanup SELECT '2020-01-01';
|
|
CREATE MATERIALIZED VIEW cagg1 WITH (timescaledb.continuous) AS SELECT time_bucket('1h',time) FROM cagg_cleanup GROUP BY 1;
|
|
NOTICE: refreshing continuous aggregate "cagg1"
|
|
ALTER MATERIALIZED VIEW cagg1 SET (timescaledb.compress);
|
|
NOTICE: defaulting compress_orderby to time_bucket
|
|
WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes
|
|
NOTICE: default segment by for hypertable "_materialized_hypertable_56" is set to ""
|
|
SELECT count(compress_chunk(ch)) FROM show_chunks('cagg1') ch;
|
|
count
|
|
-------
|
|
1
|
|
(1 row)
|
|
|
|
DROP MATERIALIZED VIEW cagg1;
|
|
NOTICE: drop cascades to table _timescaledb_internal._hyper_56_70_chunk
|
|
SELECT * FROM _timescaledb_catalog.compression_settings;
|
|
relid | compress_relid | segmentby | orderby | orderby_desc | orderby_nullsfirst
|
|
-------+----------------+-----------+---------+--------------+--------------------
|
|
(0 rows)
|
|
|
|
-- test WITH namespace alias
|
|
CREATE TABLE with_alias(time timestamptz not null);
|
|
CREATE MATERIALIZED VIEW cagg_alias
|
|
WITH (tsdb.continuous, tsdb.materialized_only=false) AS
|
|
SELECT time_bucket(INTERVAL '1 day', time) FROM conditions GROUP BY 1 WITH NO DATA;
|
|
ALTER MATERIALIZED VIEW cagg_alias SET (timescaledb.materialized_only=false);
|
|
ALTER MATERIALIZED VIEW cagg_alias SET (tsdb.materialized_only=false);
|
|
DROP MATERIALIZED VIEW cagg_alias;
|
|
-- test SET chunk_time_interval
|
|
CREATE MATERIALIZED VIEW cagg_set
|
|
WITH (tsdb.continuous, tsdb.chunk_time_interval='1day') AS
|
|
SELECT time_bucket(INTERVAL '1 day', time) AS cagg_interval_setter FROM conditions GROUP BY 1 WITH NO DATA;
|
|
SELECT column_name, time_interval FROM timescaledb_information.dimensions WHERE column_name='cagg_interval_setter';
|
|
column_name | time_interval
|
|
----------------------+---------------
|
|
cagg_interval_setter | @ 1 day
|
|
(1 row)
|
|
|
|
ALTER MATERIALIZED VIEW cagg_set SET (tsdb.chunk_time_interval='23 day');
|
|
SELECT column_name, time_interval FROM timescaledb_information.dimensions WHERE column_name='cagg_interval_setter';
|
|
column_name | time_interval
|
|
----------------------+---------------
|
|
cagg_interval_setter | @ 23 days
|
|
(1 row)
|
|
|
|
ALTER MATERIALIZED VIEW cagg_set SET (tsdb.chunk_time_interval='6 month');
|
|
SELECT column_name, time_interval FROM timescaledb_information.dimensions WHERE column_name='cagg_interval_setter';
|
|
column_name | time_interval
|
|
----------------------+---------------
|
|
cagg_interval_setter | @ 180 days
|
|
(1 row)
|
|
|
|
ALTER MATERIALIZED VIEW cagg_set SET (tsdb.chunk_time_interval='1 year');
|
|
SELECT column_name, time_interval FROM timescaledb_information.dimensions WHERE column_name='cagg_interval_setter';
|
|
column_name | time_interval
|
|
----------------------+---------------
|
|
cagg_interval_setter | @ 360 days
|
|
(1 row)
|
|
|