From 06eca172bda323779ba2105814507494e4d81303 Mon Sep 17 00:00:00 2001 From: Lakshmi Narayanan Sreethar Date: Tue, 10 Jan 2023 15:22:48 +0530 Subject: [PATCH] Fix telemetry_stats test failure in PG15 The telemetry_stats testcase uses random() with seed(1) to generate the column values on which the hypertable is partitioned. The Postgres commit postgres/postgres@3804539e48 updates the random() implementation to use a better algorithim causing the test to generate a different set of rows in PG15. Due to this the test failed in PG15 as the distrubution stats of the tuples have now changed. Fixed that by creating separate test outputs for PG15 and other releases. Fixes #5037 --- .github/gh_matrix_builder.py | 3 +- .../workflows/linux-32bit-build-and-test.yaml | 2 +- .../workflows/sanitizer-build-and-test.yaml | 3 +- .github/workflows/windows-build-and-test.yaml | 2 +- ...metry_stats.out => telemetry_stats-12.out} | 0 tsl/test/expected/telemetry_stats-13.out | 1194 +++++++++++++++++ tsl/test/expected/telemetry_stats-14.out | 1194 +++++++++++++++++ tsl/test/expected/telemetry_stats-15.out | 1194 +++++++++++++++++ tsl/test/sql/.gitignore | 1 + tsl/test/sql/CMakeLists.txt | 6 +- ...metry_stats.sql => telemetry_stats.sql.in} | 0 11 files changed, 3590 insertions(+), 9 deletions(-) rename tsl/test/expected/{telemetry_stats.out => telemetry_stats-12.out} (100%) create mode 100644 tsl/test/expected/telemetry_stats-13.out create mode 100644 tsl/test/expected/telemetry_stats-14.out create mode 100644 tsl/test/expected/telemetry_stats-15.out rename tsl/test/sql/{telemetry_stats.sql => telemetry_stats.sql.in} (100%) diff --git a/.github/gh_matrix_builder.py b/.github/gh_matrix_builder.py index e97879bab..349d3a0d8 100755 --- a/.github/gh_matrix_builder.py +++ b/.github/gh_matrix_builder.py @@ -138,9 +138,8 @@ def macos_config(overrides): # common installcheck_args for all pg15 tests # dist_move_chunk is skipped due to #4972 -# telemetry_stats is ignored due to #5037 # partialize_finalize is ignored due to #4937 -pg15_installcheck_args = "SKIPS='dist_move_chunk' IGNORES='telemetry_stats partialize_finalize'" +pg15_installcheck_args = "SKIPS='dist_move_chunk' IGNORES='partialize_finalize'" # always test debug build on latest of all supported pg versions m["include"].append(build_debug_config({"pg": PG12_LATEST})) diff --git a/.github/workflows/linux-32bit-build-and-test.yaml b/.github/workflows/linux-32bit-build-and-test.yaml index e2a8c44cd..13a6f67ae 100644 --- a/.github/workflows/linux-32bit-build-and-test.yaml +++ b/.github/workflows/linux-32bit-build-and-test.yaml @@ -37,7 +37,7 @@ jobs: include: - pg: ${{ fromJson(needs.config.outputs.pg15_latest) }} skips_version: dist_move_chunk - ignores_version: telemetry_stats partialize_finalize + ignores_version: partialize_finalize steps: diff --git a/.github/workflows/sanitizer-build-and-test.yaml b/.github/workflows/sanitizer-build-and-test.yaml index 34c9aa6ca..bb2ce0859 100644 --- a/.github/workflows/sanitizer-build-and-test.yaml +++ b/.github/workflows/sanitizer-build-and-test.yaml @@ -64,9 +64,8 @@ jobs: pg: ${{ fromJson(needs.config.outputs.pg_latest) }} include: - pg: ${{ fromJson(needs.config.outputs.pg15_latest) }} - # telemetry_stats is ignored in PG15 due to #5037 # partialize_finalize is ignored in PG15 due to #4937 - ignores_version: telemetry_stats partialize_finalize + ignores_version: partialize_finalize skips_version: 002_replication_telemetry 003_connections_privs 004_multinode_rdwr_1pc steps: - name: Install Linux Dependencies diff --git a/.github/workflows/windows-build-and-test.yaml b/.github/workflows/windows-build-and-test.yaml index 8ca5f3df1..d7486e2bc 100644 --- a/.github/workflows/windows-build-and-test.yaml +++ b/.github/workflows/windows-build-and-test.yaml @@ -59,7 +59,7 @@ jobs: - pg: 15 pkg_version: 15.0.1 # hardcoded due to issues with PG15.1 on chocolatey tsl_skips_version: dist_partial_agg-15 dist_grant-15 - tsl_ignores_version: telemetry_stats partialize_finalize + tsl_ignores_version: partialize_finalize env: # PostgreSQL configuration PGPORT: 55432 diff --git a/tsl/test/expected/telemetry_stats.out b/tsl/test/expected/telemetry_stats-12.out similarity index 100% rename from tsl/test/expected/telemetry_stats.out rename to tsl/test/expected/telemetry_stats-12.out diff --git a/tsl/test/expected/telemetry_stats-13.out b/tsl/test/expected/telemetry_stats-13.out new file mode 100644 index 000000000..a68498af3 --- /dev/null +++ b/tsl/test/expected/telemetry_stats-13.out @@ -0,0 +1,1194 @@ +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +--telemetry tests that require a community license +\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER; +-- function call info size is too variable for this test, so disable it +SET timescaledb.telemetry_level='no_functions'; +SELECT setseed(1); + setseed +--------- + +(1 row) + +-- Create a materialized view from the telemetry report so that we +-- don't regenerate telemetry for every query. Filter heap_size for +-- materialized views since PG14 reports a different heap size for +-- them compared to earlier PG versions. +CREATE MATERIALIZED VIEW telemetry_report AS +SELECT (r #- '{relations,materialized_views,heap_size}') AS r +FROM get_telemetry_report() r; +CREATE VIEW relations AS +SELECT r -> 'relations' AS rels +FROM telemetry_report; +SELECT rels -> 'continuous_aggregates' -> 'num_relations' AS num_continuous_aggs, + rels -> 'hypertables' -> 'num_relations' AS num_hypertables +FROM relations; + num_continuous_aggs | num_hypertables +---------------------+----------------- + 0 | 0 +(1 row) + +-- check telemetry picks up flagged content from metadata +SELECT r -> 'db_metadata' AS db_metadata +FROM telemetry_report; + db_metadata +------------- + {} +(1 row) + +-- check timescaledb_telemetry.cloud +SELECT r -> 'instance_metadata' AS instance_metadata +FROM telemetry_report r; + instance_metadata +------------------- + {"cloud": "ci"} +(1 row) + +CREATE TABLE normal (time timestamptz NOT NULL, device int, temp float); +CREATE TABLE part (time timestamptz NOT NULL, device int, temp float) PARTITION BY RANGE (time); +CREATE TABLE part_t1 PARTITION OF part FOR VALUES FROM ('2018-01-01') TO ('2018-02-01') PARTITION BY HASH (device); +CREATE TABLE part_t2 PARTITION OF part FOR VALUES FROM ('2018-02-01') TO ('2018-03-01') PARTITION BY HASH (device); +CREATE TABLE part_t1_d1 PARTITION OF part_t1 FOR VALUES WITH (MODULUS 2, REMAINDER 0); +CREATE TABLE part_t1_d2 PARTITION OF part_t1 FOR VALUES WITH (MODULUS 2, REMAINDER 1); +CREATE TABLE part_t2_d1 PARTITION OF part_t2 FOR VALUES WITH (MODULUS 2, REMAINDER 0); +CREATE TABLE part_t2_d2 PARTITION OF part_t2 FOR VALUES WITH (MODULUS 2, REMAINDER 1); +CREATE TABLE hyper (LIKE normal); +SELECT table_name FROM create_hypertable('hyper', 'time'); + table_name +------------ + hyper +(1 row) + +CREATE MATERIALIZED VIEW contagg +WITH (timescaledb.continuous) AS +SELECT + time_bucket('1 hour', time) AS hour, + device, + min(time) +FROM + hyper +GROUP BY hour, device; +NOTICE: continuous aggregate "contagg" is already up-to-date +CREATE MATERIALIZED VIEW contagg_old +WITH (timescaledb.continuous, timescaledb.finalized=false) AS +SELECT + time_bucket('1 hour', time) AS hour, + device, + min(time) +FROM + hyper +GROUP BY hour, device; +NOTICE: continuous aggregate "contagg_old" is already up-to-date +-- Create another view (already have the "relations" view) +CREATE VIEW devices AS +SELECT DISTINCT ON (device) device +FROM hyper; +-- Show relations with no data +REFRESH MATERIALIZED VIEW telemetry_report; +SELECT jsonb_pretty(rels) AS relations FROM relations; + relations +----------------------------------------------------- + { + + "views": { + + "num_relations": 2 + + }, + + "tables": { + + "heap_size": 0, + + "toast_size": 8192, + + "indexes_size": 0, + + "num_relations": 2, + + "num_reltuples": 0 + + }, + + "hypertables": { + + "heap_size": 0, + + "toast_size": 0, + + "compression": { + + "compressed_heap_size": 0, + + "compressed_row_count": 0, + + "compressed_toast_size": 0, + + "num_compressed_chunks": 0, + + "uncompressed_heap_size": 0, + + "uncompressed_row_count": 0, + + "compressed_indexes_size": 0, + + "uncompressed_toast_size": 0, + + "uncompressed_indexes_size": 0, + + "num_compressed_hypertables": 0 + + }, + + "indexes_size": 8192, + + "num_children": 0, + + "num_relations": 1, + + "num_reltuples": 0 + + }, + + "materialized_views": { + + "toast_size": 8192, + + "indexes_size": 0, + + "num_relations": 1, + + "num_reltuples": 0 + + }, + + "partitioned_tables": { + + "heap_size": 0, + + "toast_size": 0, + + "indexes_size": 0, + + "num_children": 6, + + "num_relations": 1, + + "num_reltuples": 0 + + }, + + "continuous_aggregates": { + + "heap_size": 0, + + "toast_size": 0, + + "compression": { + + "compressed_heap_size": 0, + + "compressed_row_count": 0, + + "num_compressed_caggs": 0, + + "compressed_toast_size": 0, + + "num_compressed_chunks": 0, + + "uncompressed_heap_size": 0, + + "uncompressed_row_count": 0, + + "compressed_indexes_size": 0, + + "uncompressed_toast_size": 0, + + "uncompressed_indexes_size": 0 + + }, + + "indexes_size": 0, + + "num_children": 0, + + "num_relations": 2, + + "num_reltuples": 0, + + "num_caggs_nested": 0, + + "num_caggs_finalized": 1, + + "num_caggs_on_distributed_hypertables": 0, + + "num_caggs_using_real_time_aggregation": 2 + + }, + + "distributed_hypertables_data_node": { + + "heap_size": 0, + + "toast_size": 0, + + "compression": { + + "compressed_heap_size": 0, + + "compressed_row_count": 0, + + "compressed_toast_size": 0, + + "num_compressed_chunks": 0, + + "uncompressed_heap_size": 0, + + "uncompressed_row_count": 0, + + "compressed_indexes_size": 0, + + "uncompressed_toast_size": 0, + + "uncompressed_indexes_size": 0, + + "num_compressed_hypertables": 0 + + }, + + "indexes_size": 0, + + "num_children": 0, + + "num_relations": 0, + + "num_reltuples": 0 + + }, + + "distributed_hypertables_access_node": { + + "heap_size": 0, + + "toast_size": 0, + + "compression": { + + "compressed_heap_size": 0, + + "compressed_row_count": 0, + + "compressed_toast_size": 0, + + "num_compressed_chunks": 0, + + "uncompressed_heap_size": 0, + + "uncompressed_row_count": 0, + + "compressed_indexes_size": 0, + + "uncompressed_toast_size": 0, + + "uncompressed_indexes_size": 0, + + "num_compressed_hypertables": 0 + + }, + + "indexes_size": 0, + + "num_children": 0, + + "num_relations": 0, + + "num_reltuples": 0, + + "num_replica_chunks": 0, + + "num_replicated_distributed_hypertables": 0+ + } + + } +(1 row) + +-- Insert data +INSERT INTO normal +SELECT t, ceil(random() * 10)::int, random() * 30 +FROM generate_series('2018-01-01'::timestamptz, '2018-02-28', '2h') t; +INSERT INTO hyper +SELECT * FROM normal; +INSERT INTO part +SELECT * FROM normal; +CALL refresh_continuous_aggregate('contagg', NULL, NULL); +CALL refresh_continuous_aggregate('contagg_old', NULL, NULL); +-- ANALYZE to get updated reltuples stats +ANALYZE normal, hyper, part; +SELECT count(c) FROM show_chunks('hyper') c; + count +------- + 9 +(1 row) + +SELECT count(c) FROM show_chunks('contagg') c; + count +------- + 2 +(1 row) + +SELECT count(c) FROM show_chunks('contagg_old') c; + count +------- + 2 +(1 row) + +-- Update and show the telemetry report +REFRESH MATERIALIZED VIEW telemetry_report; +SELECT jsonb_pretty(rels) AS relations FROM relations; + relations +----------------------------------------------------- + { + + "views": { + + "num_relations": 2 + + }, + + "tables": { + + "heap_size": 65536, + + "toast_size": 8192, + + "indexes_size": 0, + + "num_relations": 2, + + "num_reltuples": 697 + + }, + + "hypertables": { + + "heap_size": 73728, + + "toast_size": 0, + + "compression": { + + "compressed_heap_size": 0, + + "compressed_row_count": 0, + + "compressed_toast_size": 0, + + "num_compressed_chunks": 0, + + "uncompressed_heap_size": 0, + + "uncompressed_row_count": 0, + + "compressed_indexes_size": 0, + + "uncompressed_toast_size": 0, + + "uncompressed_indexes_size": 0, + + "num_compressed_hypertables": 0 + + }, + + "indexes_size": 155648, + + "num_children": 9, + + "num_relations": 1, + + "num_reltuples": 697 + + }, + + "materialized_views": { + + "toast_size": 8192, + + "indexes_size": 0, + + "num_relations": 1, + + "num_reltuples": 0 + + }, + + "partitioned_tables": { + + "heap_size": 98304, + + "toast_size": 0, + + "indexes_size": 0, + + "num_children": 6, + + "num_relations": 1, + + "num_reltuples": 697 + + }, + + "continuous_aggregates": { + + "heap_size": 188416, + + "toast_size": 16384, + + "compression": { + + "compressed_heap_size": 0, + + "compressed_row_count": 0, + + "num_compressed_caggs": 0, + + "compressed_toast_size": 0, + + "num_compressed_chunks": 0, + + "uncompressed_heap_size": 0, + + "uncompressed_row_count": 0, + + "compressed_indexes_size": 0, + + "uncompressed_toast_size": 0, + + "uncompressed_indexes_size": 0 + + }, + + "indexes_size": 229376, + + "num_children": 4, + + "num_relations": 2, + + "num_reltuples": 0, + + "num_caggs_nested": 0, + + "num_caggs_finalized": 1, + + "num_caggs_on_distributed_hypertables": 0, + + "num_caggs_using_real_time_aggregation": 2 + + }, + + "distributed_hypertables_data_node": { + + "heap_size": 0, + + "toast_size": 0, + + "compression": { + + "compressed_heap_size": 0, + + "compressed_row_count": 0, + + "compressed_toast_size": 0, + + "num_compressed_chunks": 0, + + "uncompressed_heap_size": 0, + + "uncompressed_row_count": 0, + + "compressed_indexes_size": 0, + + "uncompressed_toast_size": 0, + + "uncompressed_indexes_size": 0, + + "num_compressed_hypertables": 0 + + }, + + "indexes_size": 0, + + "num_children": 0, + + "num_relations": 0, + + "num_reltuples": 0 + + }, + + "distributed_hypertables_access_node": { + + "heap_size": 0, + + "toast_size": 0, + + "compression": { + + "compressed_heap_size": 0, + + "compressed_row_count": 0, + + "compressed_toast_size": 0, + + "num_compressed_chunks": 0, + + "uncompressed_heap_size": 0, + + "uncompressed_row_count": 0, + + "compressed_indexes_size": 0, + + "uncompressed_toast_size": 0, + + "uncompressed_indexes_size": 0, + + "num_compressed_hypertables": 0 + + }, + + "indexes_size": 0, + + "num_children": 0, + + "num_relations": 0, + + "num_reltuples": 0, + + "num_replica_chunks": 0, + + "num_replicated_distributed_hypertables": 0+ + } + + } +(1 row) + +-- Actual row count should be the same as reltuples stats for all tables +SELECT (SELECT count(*) FROM normal) num_inserted_rows, + (SELECT rels -> 'tables' -> 'num_reltuples' FROM relations) normal_reltuples, + (SELECT rels -> 'hypertables' -> 'num_reltuples' FROM relations) hyper_reltuples, + (SELECT rels -> 'partitioned_tables' -> 'num_reltuples' FROM relations) part_reltuples; + num_inserted_rows | normal_reltuples | hyper_reltuples | part_reltuples +-------------------+------------------+-----------------+---------------- + 697 | 697 | 697 | 697 +(1 row) + +-- Add compression +ALTER TABLE hyper SET (timescaledb.compress); +SELECT compress_chunk(c) +FROM show_chunks('hyper') c ORDER BY c LIMIT 4; + compress_chunk +---------------------------------------- + _timescaledb_internal._hyper_1_1_chunk + _timescaledb_internal._hyper_1_2_chunk + _timescaledb_internal._hyper_1_3_chunk + _timescaledb_internal._hyper_1_4_chunk +(4 rows) + +ALTER MATERIALIZED VIEW contagg SET (timescaledb.compress); +SELECT compress_chunk(c) +FROM show_chunks('contagg') c ORDER BY c LIMIT 1; + compress_chunk +----------------------------------------- + _timescaledb_internal._hyper_2_10_chunk +(1 row) + +-- Turn of real-time aggregation +ALTER MATERIALIZED VIEW contagg SET (timescaledb.materialized_only = true); +ANALYZE normal, hyper, part; +REFRESH MATERIALIZED VIEW telemetry_report; +SELECT jsonb_pretty(rels) AS relations FROM relations; + relations +----------------------------------------------------- + { + + "views": { + + "num_relations": 2 + + }, + + "tables": { + + "heap_size": 65536, + + "toast_size": 8192, + + "indexes_size": 0, + + "num_relations": 2, + + "num_reltuples": 697 + + }, + + "hypertables": { + + "heap_size": 73728, + + "toast_size": 32768, + + "compression": { + + "compressed_heap_size": 32768, + + "compressed_row_count": 4, + + "compressed_toast_size": 32768, + + "num_compressed_chunks": 4, + + "uncompressed_heap_size": 32768, + + "uncompressed_row_count": 284, + + "compressed_indexes_size": 0, + + "uncompressed_toast_size": 0, + + "uncompressed_indexes_size": 65536, + + "num_compressed_hypertables": 1 + + }, + + "indexes_size": 122880, + + "num_children": 9, + + "num_relations": 1, + + "num_reltuples": 697 + + }, + + "materialized_views": { + + "toast_size": 8192, + + "indexes_size": 0, + + "num_relations": 1, + + "num_reltuples": 0 + + }, + + "partitioned_tables": { + + "heap_size": 98304, + + "toast_size": 0, + + "indexes_size": 0, + + "num_children": 6, + + "num_relations": 1, + + "num_reltuples": 697 + + }, + + "continuous_aggregates": { + + "heap_size": 180224, + + "toast_size": 24576, + + "compression": { + + "compressed_heap_size": 40960, + + "compressed_row_count": 10, + + "num_compressed_caggs": 1, + + "compressed_toast_size": 8192, + + "num_compressed_chunks": 1, + + "uncompressed_heap_size": 49152, + + "uncompressed_row_count": 452, + + "compressed_indexes_size": 16384, + + "uncompressed_toast_size": 0, + + "uncompressed_indexes_size": 81920 + + }, + + "indexes_size": 180224, + + "num_children": 4, + + "num_relations": 2, + + "num_reltuples": 452, + + "num_caggs_nested": 0, + + "num_caggs_finalized": 1, + + "num_caggs_on_distributed_hypertables": 0, + + "num_caggs_using_real_time_aggregation": 1 + + }, + + "distributed_hypertables_data_node": { + + "heap_size": 0, + + "toast_size": 0, + + "compression": { + + "compressed_heap_size": 0, + + "compressed_row_count": 0, + + "compressed_toast_size": 0, + + "num_compressed_chunks": 0, + + "uncompressed_heap_size": 0, + + "uncompressed_row_count": 0, + + "compressed_indexes_size": 0, + + "uncompressed_toast_size": 0, + + "uncompressed_indexes_size": 0, + + "num_compressed_hypertables": 0 + + }, + + "indexes_size": 0, + + "num_children": 0, + + "num_relations": 0, + + "num_reltuples": 0 + + }, + + "distributed_hypertables_access_node": { + + "heap_size": 0, + + "toast_size": 0, + + "compression": { + + "compressed_heap_size": 0, + + "compressed_row_count": 0, + + "compressed_toast_size": 0, + + "num_compressed_chunks": 0, + + "uncompressed_heap_size": 0, + + "uncompressed_row_count": 0, + + "compressed_indexes_size": 0, + + "uncompressed_toast_size": 0, + + "uncompressed_indexes_size": 0, + + "num_compressed_hypertables": 0 + + }, + + "indexes_size": 0, + + "num_children": 0, + + "num_relations": 0, + + "num_reltuples": 0, + + "num_replica_chunks": 0, + + "num_replicated_distributed_hypertables": 0+ + } + + } +(1 row) + +-- Add distributed hypertables +\set DN_DBNAME_1 :TEST_DBNAME _1 +\set DN_DBNAME_2 :TEST_DBNAME _2 +-- Not an access node or data node +SELECT r -> 'num_data_nodes' AS num_data_nodes, + r -> 'distributed_member' AS distributed_member +FROM telemetry_report; + num_data_nodes | distributed_member +----------------+-------------------- + | "none" +(1 row) + +-- Become an access node by adding a data node +SELECT node_name, database, node_created, database_created, extension_created +FROM add_data_node('data_node_1', host => 'localhost', database => :'DN_DBNAME_1'); + node_name | database | node_created | database_created | extension_created +-------------+----------------------+--------------+------------------+------------------- + data_node_1 | db_telemetry_stats_1 | t | t | t +(1 row) + +-- Telemetry should show one data node and "acces node" status +REFRESH MATERIALIZED VIEW telemetry_report; +SELECT r -> 'num_data_nodes' AS num_data_nodes, + r -> 'distributed_member' AS distributed_member +FROM telemetry_report; + num_data_nodes | distributed_member +----------------+-------------------- + 1 | "access node" +(1 row) + +-- See telemetry report from a data node +\ir include/remote_exec.sql +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +CREATE SCHEMA IF NOT EXISTS test; +psql:include/remote_exec.sql:5: NOTICE: schema "test" already exists, skipping +GRANT USAGE ON SCHEMA test TO PUBLIC; +CREATE OR REPLACE FUNCTION test.remote_exec(srv_name name[], command text) +RETURNS VOID +AS :TSL_MODULE_PATHNAME, 'ts_remote_exec' +LANGUAGE C; +CREATE OR REPLACE FUNCTION test.remote_exec_get_result_strings(srv_name name[], command text) +RETURNS TABLE("table_record" CSTRING[]) +AS :TSL_MODULE_PATHNAME, 'ts_remote_exec_get_result_strings' +LANGUAGE C; +SELECT test.remote_exec(NULL, $$ + SELECT t -> 'num_data_nodes' AS num_data_nodes, + t -> 'distributed_member' AS distributed_member + FROM get_telemetry_report() t; +$$); +NOTICE: [data_node_1]: + SELECT t -> 'num_data_nodes' AS num_data_nodes, + t -> 'distributed_member' AS distributed_member + FROM get_telemetry_report() t +NOTICE: [data_node_1]: +num_data_nodes|distributed_member +--------------+------------------ + |"data node" +(1 row) + + + remote_exec +------------- + +(1 row) + +SELECT node_name, database, node_created, database_created, extension_created +FROM add_data_node('data_node_2', host => 'localhost', database => :'DN_DBNAME_2'); + node_name | database | node_created | database_created | extension_created +-------------+----------------------+--------------+------------------+------------------- + data_node_2 | db_telemetry_stats_2 | t | t | t +(1 row) + +CREATE TABLE disthyper (LIKE normal); +SELECT create_distributed_hypertable('disthyper', 'time', 'device'); + create_distributed_hypertable +------------------------------- + (6,public,disthyper,t) +(1 row) + +-- Show distributed hypertables stats with no data +REFRESH MATERIALIZED VIEW telemetry_report; +SELECT + jsonb_pretty(rels -> 'distributed_hypertables_access_node') AS distributed_hypertables_an +FROM relations; + distributed_hypertables_an +------------------------------------------------- + { + + "heap_size": 0, + + "toast_size": 0, + + "compression": { + + "compressed_heap_size": 0, + + "compressed_row_count": 0, + + "compressed_toast_size": 0, + + "num_compressed_chunks": 0, + + "uncompressed_heap_size": 0, + + "uncompressed_row_count": 0, + + "compressed_indexes_size": 0, + + "uncompressed_toast_size": 0, + + "uncompressed_indexes_size": 0, + + "num_compressed_hypertables": 0 + + }, + + "indexes_size": 0, + + "num_children": 0, + + "num_relations": 1, + + "num_reltuples": 0, + + "num_replica_chunks": 0, + + "num_replicated_distributed_hypertables": 0+ + } +(1 row) + +-- No datanode-related stats on the access node +SELECT + jsonb_pretty(rels -> 'distributed_hypertables_data_node') AS distributed_hypertables_dn +FROM relations; + distributed_hypertables_dn +----------------------------------------- + { + + "heap_size": 0, + + "toast_size": 0, + + "compression": { + + "compressed_heap_size": 0, + + "compressed_row_count": 0, + + "compressed_toast_size": 0, + + "num_compressed_chunks": 0, + + "uncompressed_heap_size": 0, + + "uncompressed_row_count": 0, + + "compressed_indexes_size": 0, + + "uncompressed_toast_size": 0, + + "uncompressed_indexes_size": 0,+ + "num_compressed_hypertables": 0+ + }, + + "indexes_size": 0, + + "num_children": 0, + + "num_relations": 0, + + "num_reltuples": 0 + + } +(1 row) + +-- Insert data into the distributed hypertable +INSERT INTO disthyper +SELECT * FROM normal; +-- Update telemetry stats and show output on access node and data +-- nodes. Note that the access node doesn't store data so shows +-- zero. It should have stats from ANALYZE, though, like +-- num_reltuples. +ANALYZE disthyper; +REFRESH MATERIALIZED VIEW telemetry_report; +SELECT + jsonb_pretty(rels -> 'distributed_hypertables_access_node') AS distributed_hypertables_an +FROM relations; + distributed_hypertables_an +------------------------------------------------- + { + + "heap_size": 0, + + "toast_size": 0, + + "compression": { + + "compressed_heap_size": 0, + + "compressed_row_count": 0, + + "compressed_toast_size": 0, + + "num_compressed_chunks": 0, + + "uncompressed_heap_size": 0, + + "uncompressed_row_count": 0, + + "compressed_indexes_size": 0, + + "uncompressed_toast_size": 0, + + "uncompressed_indexes_size": 0, + + "num_compressed_hypertables": 0 + + }, + + "indexes_size": 0, + + "num_children": 18, + + "num_relations": 1, + + "num_reltuples": 697, + + "num_replica_chunks": 0, + + "num_replicated_distributed_hypertables": 0+ + } +(1 row) + +-- Show data node stats +SELECT test.remote_exec(NULL, $$ + SELECT + jsonb_pretty(t -> 'relations' -> 'distributed_hypertables_data_node') AS distributed_hypertables_dn + FROM get_telemetry_report() t; +$$); +NOTICE: [data_node_1]: + SELECT + jsonb_pretty(t -> 'relations' -> 'distributed_hypertables_data_node') AS distributed_hypertables_dn + FROM get_telemetry_report() t +NOTICE: [data_node_1]: +distributed_hypertables_dn +-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- +{ + "heap_size": 73728, + "toast_size": 0, + "compression": { + "compressed_heap_size": 0, + "compressed_row_count": 0, + "compressed_toast_size": 0, + "num_compressed_chunks": 0, + "uncompressed_heap_size": 0, + "uncompressed_row_count": 0, + "compressed_indexes_size": 0, + "uncompressed_toast_size": 0, + "uncompressed_indexes_size": 0, + "num_compressed_hypertables": 0 + }, + "indexes_size": 311296, + "num_children": 9, + "num_relations": 1, + "num_reltuples": 357 +} +(1 row) + + +NOTICE: [data_node_2]: + SELECT + jsonb_pretty(t -> 'relations' -> 'distributed_hypertables_data_node') AS distributed_hypertables_dn + FROM get_telemetry_report() t +NOTICE: [data_node_2]: +distributed_hypertables_dn +-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- +{ + "heap_size": 73728, + "toast_size": 0, + "compression": { + "compressed_heap_size": 0, + "compressed_row_count": 0, + "compressed_toast_size": 0, + "num_compressed_chunks": 0, + "uncompressed_heap_size": 0, + "uncompressed_row_count": 0, + "compressed_indexes_size": 0, + "uncompressed_toast_size": 0, + "uncompressed_indexes_size": 0, + "num_compressed_hypertables": 0 + }, + "indexes_size": 311296, + "num_children": 9, + "num_relations": 1, + "num_reltuples": 340 +} +(1 row) + + + remote_exec +------------- + +(1 row) + +-- Add compression +ALTER TABLE disthyper SET (timescaledb.compress); +SELECT compress_chunk(c) +FROM show_chunks('disthyper') c ORDER BY c LIMIT 4; + compress_chunk +---------------------------------------------- + _timescaledb_internal._dist_hyper_6_19_chunk + _timescaledb_internal._dist_hyper_6_20_chunk + _timescaledb_internal._dist_hyper_6_21_chunk + _timescaledb_internal._dist_hyper_6_22_chunk +(4 rows) + +ANALYZE disthyper; +-- Update telemetry stats and show updated compression stats +REFRESH MATERIALIZED VIEW telemetry_report; +SELECT + jsonb_pretty(rels -> 'distributed_hypertables_access_node') AS distributed_hypertables_an +FROM relations; + distributed_hypertables_an +------------------------------------------------- + { + + "heap_size": 0, + + "toast_size": 0, + + "compression": { + + "compressed_heap_size": 0, + + "compressed_row_count": 0, + + "compressed_toast_size": 0, + + "num_compressed_chunks": 4, + + "uncompressed_heap_size": 0, + + "uncompressed_row_count": 0, + + "compressed_indexes_size": 0, + + "uncompressed_toast_size": 0, + + "uncompressed_indexes_size": 0, + + "num_compressed_hypertables": 1 + + }, + + "indexes_size": 0, + + "num_children": 18, + + "num_relations": 1, + + "num_reltuples": 697, + + "num_replica_chunks": 0, + + "num_replicated_distributed_hypertables": 0+ + } +(1 row) + +-- Show data node stats +SELECT test.remote_exec(NULL, $$ + SELECT + jsonb_pretty(t -> 'relations' -> 'distributed_hypertables_data_node') AS distributed_hypertables_dn + FROM get_telemetry_report() t; +$$); +NOTICE: [data_node_1]: + SELECT + jsonb_pretty(t -> 'relations' -> 'distributed_hypertables_data_node') AS distributed_hypertables_dn + FROM get_telemetry_report() t +NOTICE: [data_node_1]: +distributed_hypertables_dn +----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- +{ + "heap_size": 73728, + "toast_size": 16384, + "compression": { + "compressed_heap_size": 16384, + "compressed_row_count": 2, + "compressed_toast_size": 16384, + "num_compressed_chunks": 2, + "uncompressed_heap_size": 16384, + "uncompressed_row_count": 72, + "compressed_indexes_size": 0, + "uncompressed_toast_size": 0, + "uncompressed_indexes_size": 65536, + "num_compressed_hypertables": 1 + }, + "indexes_size": 278528, + "num_children": 9, + "num_relations": 1, + "num_reltuples": 357 +} +(1 row) + + +NOTICE: [data_node_2]: + SELECT + jsonb_pretty(t -> 'relations' -> 'distributed_hypertables_data_node') AS distributed_hypertables_dn + FROM get_telemetry_report() t +NOTICE: [data_node_2]: +distributed_hypertables_dn +----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- +{ + "heap_size": 73728, + "toast_size": 16384, + "compression": { + "compressed_heap_size": 16384, + "compressed_row_count": 2, + "compressed_toast_size": 16384, + "num_compressed_chunks": 2, + "uncompressed_heap_size": 16384, + "uncompressed_row_count": 44, + "compressed_indexes_size": 0, + "uncompressed_toast_size": 0, + "uncompressed_indexes_size": 65536, + "num_compressed_hypertables": 1 + }, + "indexes_size": 278528, + "num_children": 9, + "num_relations": 1, + "num_reltuples": 340 +} +(1 row) + + + remote_exec +------------- + +(1 row) + +-- Create a replicated distributed hypertable and show replication stats +CREATE TABLE disthyper_repl (LIKE normal); +SELECT create_distributed_hypertable('disthyper_repl', 'time', 'device', replication_factor => 2); + create_distributed_hypertable +------------------------------- + (7,public,disthyper_repl,t) +(1 row) + +INSERT INTO disthyper_repl +SELECT * FROM normal; +REFRESH MATERIALIZED VIEW telemetry_report; +SELECT + jsonb_pretty(rels -> 'distributed_hypertables_access_node') AS distributed_hypertables_an +FROM relations; + distributed_hypertables_an +------------------------------------------------- + { + + "heap_size": 0, + + "toast_size": 0, + + "compression": { + + "compressed_heap_size": 0, + + "compressed_row_count": 0, + + "compressed_toast_size": 0, + + "num_compressed_chunks": 4, + + "uncompressed_heap_size": 0, + + "uncompressed_row_count": 0, + + "compressed_indexes_size": 0, + + "uncompressed_toast_size": 0, + + "uncompressed_indexes_size": 0, + + "num_compressed_hypertables": 1 + + }, + + "indexes_size": 0, + + "num_children": 36, + + "num_relations": 2, + + "num_reltuples": 697, + + "num_replica_chunks": 18, + + "num_replicated_distributed_hypertables": 1+ + } +(1 row) + +-- Create a continuous aggregate on the distributed hypertable +CREATE MATERIALIZED VIEW distcontagg +WITH (timescaledb.continuous) AS +SELECT + time_bucket('1 hour', time) AS hour, + device, + min(time) +FROM + disthyper +GROUP BY hour, device; +NOTICE: refreshing continuous aggregate "distcontagg" +CREATE MATERIALIZED VIEW distcontagg_old +WITH (timescaledb.continuous, timescaledb.finalized=false) AS +SELECT + time_bucket('1 hour', time) AS hour, + device, + min(time) +FROM + disthyper +GROUP BY hour, device; +NOTICE: refreshing continuous aggregate "distcontagg_old" +REFRESH MATERIALIZED VIEW telemetry_report; +SELECT + jsonb_pretty(rels -> 'continuous_aggregates') AS continuous_aggregates +FROM relations; + continuous_aggregates +------------------------------------------------ + { + + "heap_size": 368640, + + "toast_size": 40960, + + "compression": { + + "compressed_heap_size": 40960, + + "compressed_row_count": 10, + + "num_compressed_caggs": 1, + + "compressed_toast_size": 8192, + + "num_compressed_chunks": 1, + + "uncompressed_heap_size": 49152, + + "uncompressed_row_count": 452, + + "compressed_indexes_size": 16384, + + "uncompressed_toast_size": 0, + + "uncompressed_indexes_size": 81920 + + }, + + "indexes_size": 409600, + + "num_children": 8, + + "num_relations": 4, + + "num_reltuples": 452, + + "num_caggs_nested": 0, + + "num_caggs_finalized": 2, + + "num_caggs_on_distributed_hypertables": 2,+ + "num_caggs_using_real_time_aggregation": 3+ + } +(1 row) + +-- check telemetry for fixed schedule jobs works +create or replace procedure job_test_fixed(jobid int, config jsonb) language plpgsql as $$ +begin +raise log 'this is job_test_fixed'; +end +$$; +create or replace procedure job_test_drifting(jobid int, config jsonb) language plpgsql as $$ +begin +raise log 'this is job_test_drifting'; +end +$$; +-- before adding the jobs +select get_telemetry_report()->'num_user_defined_actions_fixed'; + ?column? +---------- + 0 +(1 row) + +select get_telemetry_report()->'num_user_defined_actions'; + ?column? +---------- + 0 +(1 row) + +select add_job('job_test_fixed', '1 week'); + add_job +--------- + 1000 +(1 row) + +select add_job('job_test_drifting', '1 week', fixed_schedule => false); + add_job +--------- + 1001 +(1 row) + +-- add continuous aggregate refresh policy for contagg +select add_continuous_aggregate_policy('contagg', interval '3 weeks', NULL, interval '3 weeks'); -- drifting + add_continuous_aggregate_policy +--------------------------------- + 1002 +(1 row) + +select add_continuous_aggregate_policy('contagg_old', interval '3 weeks', NULL, interval '3 weeks', initial_start => now()); -- fixed + add_continuous_aggregate_policy +--------------------------------- + 1003 +(1 row) + +-- add retention policy, fixed +select add_retention_policy('hyper', interval '1 year', initial_start => now()); + add_retention_policy +---------------------- + 1004 +(1 row) + +-- add compression policy +select add_compression_policy('hyper', interval '3 weeks', initial_start => now()); + add_compression_policy +------------------------ + 1005 +(1 row) + +select r->'num_user_defined_actions_fixed' as UDA_fixed, r->'num_user_defined_actions' AS UDA_drifting FROM get_telemetry_report() r; + uda_fixed | uda_drifting +-----------+-------------- + 1 | 1 +(1 row) + +select r->'num_continuous_aggs_policies_fixed' as contagg_fixed, r->'num_continuous_aggs_policies' as contagg_drifting FROM get_telemetry_report() r; + contagg_fixed | contagg_drifting +---------------+------------------ + 1 | 1 +(1 row) + +select r->'num_compression_policies_fixed' as compress_fixed, r->'num_retention_policies_fixed' as retention_fixed FROM get_telemetry_report() r; + compress_fixed | retention_fixed +----------------+----------------- + 1 | 1 +(1 row) + +DELETE FROM _timescaledb_config.bgw_job WHERE id = 2; +TRUNCATE _timescaledb_internal.job_errors; +-- create some "errors" for testing +INSERT INTO +_timescaledb_config.bgw_job(id, application_name, schedule_interval, max_runtime, max_retries, retry_period, proc_schema, proc_name) +VALUES (2000, 'User-Defined Action [2000]', interval '3 days', interval '1 hour', 5, interval '5 min', 'public', 'custom_action_1'), +(2001, 'User-Defined Action [2001]', interval '3 days', interval '1 hour', 5, interval '5 min', 'public', 'custom_action_2'), +(2002, 'Compression Policy [2002]', interval '3 days', interval '1 hour', 5, interval '5 min', '_timescaledb_internal', 'policy_compression'), +(2003, 'Retention Policy [2003]', interval '3 days', interval '1 hour', 5, interval '5 min', '_timescaledb_internal', 'policy_retention'), +(2004, 'Refresh Continuous Aggregate Policy [2004]', interval '3 days', interval '1 hour', 5, interval '5 min', '_timescaledb_internal', 'policy_refresh_continuous_aggregate'), +-- user decided to define a custom action in the _timescaledb_internal schema, we group it with the User-defined actions +(2005, 'User-Defined Action [2005]', interval '3 days', interval '1 hour', 5, interval '5 min', '_timescaledb_internal', 'policy_refresh_continuous_aggregate'); +-- create some errors for them +INSERT INTO +_timescaledb_internal.job_errors(job_id, pid, start_time, finish_time, error_data) +values (2000, 12345, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '{"sqlerrcode":"P0001", "proc_schema":"public", "proc_name": "custom_action_1"}'), +(2000, 23456, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '{"sqlerrcode":"ABCDE", "proc_schema": "public", "proc_name": "custom_action_1"}'), +(2001, 54321, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '{"sqlerrcode":"P0001", "proc_schema":"public", "proc_name": "custom_action_2"}'), +(2002, 23443, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '{"sqlerrcode":"JF009", "proc_schema":"_timescaledb_internal", "proc_name": "policy_compression"}'), +(2003, 14567, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '{"sqlerrcode":"P0001", "proc_schema":"_timescaledb_internal", "proc_name": "policy_retention"}'), +(2004, 78907, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '{"sqlerrcode":"P0001", "proc_schema":"_timescaledb_internal", "proc_name": "policy_refresh_continuous_aggregate"}'), +(2005, 45757, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '{"sqlerrcode":"P0001", "proc_schema":"_timescaledb_internal", "proc_name": "policy_refresh_continuous_aggregate"}'); +-- we have 3 error records for user-defined actions, and three for policies, so we expect 4 types of jobs +SELECT jsonb_pretty(get_telemetry_report() -> 'errors_by_sqlerrcode'); + jsonb_pretty +---------------------------------------------- + { + + "policy_retention": { + + "P0001": 1 + + }, + + "policy_compression": { + + "JF009": 1 + + }, + + "user_defined_action": { + + "ABCDE": 1, + + "P0001": 2 + + }, + + "policy_refresh_continuous_aggregate": {+ + "P0001": 2 + + } + + } +(1 row) + +-- for job statistics, insert some records into bgw_job_stats +INSERT INTO _timescaledb_internal.bgw_job_stat +values +(2000, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '-infinity'::timestamptz, '-infinity'::timestamptz, +false, 1, interval '00:00:00', interval '00:00:02', 0, 1, 0, 1, 0), +(2001, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '-infinity'::timestamptz, '-infinity'::timestamptz, +false, 1, interval '00:00:00', interval '00:00:02', 0, 1, 0, 1, 0), +(2002, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '-infinity'::timestamptz, '-infinity'::timestamptz, +false, 1, interval '00:00:00', interval '00:00:02', 0, 1, 0, 1, 0), +(2003, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '-infinity'::timestamptz, '-infinity'::timestamptz, +false, 1, interval '00:00:00', interval '00:00:02', 0, 1, 0, 1, 0), +(2004, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '-infinity'::timestamptz, '-infinity'::timestamptz, +false, 1, interval '00:00:00', interval '00:00:02', 0, 1, 0, 1, 0), +(2005, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '-infinity'::timestamptz, '-infinity'::timestamptz, +false, 1, interval '00:00:00', interval '00:00:02', 0, 1, 0, 1, 0); +SELECT jsonb_pretty(get_telemetry_report() -> 'stats_by_job_type'); + jsonb_pretty +------------------------------------------------ + { + + "policy_retention": { + + "total_runs": 1, + + "total_crashes": 0, + + "total_duration": "@ 0", + + "total_failures": 1, + + "total_successes": 0, + + "max_consecutive_crashes": 0, + + "total_duration_failures": "@ 2 secs",+ + "max_consecutive_failures": 1 + + }, + + "policy_compression": { + + "total_runs": 1, + + "total_crashes": 0, + + "total_duration": "@ 0", + + "total_failures": 1, + + "total_successes": 0, + + "max_consecutive_crashes": 0, + + "total_duration_failures": "@ 2 secs",+ + "max_consecutive_failures": 1 + + }, + + "user_defined_action": { + + "total_runs": 2, + + "total_crashes": 0, + + "total_duration": "@ 0", + + "total_failures": 2, + + "total_successes": 0, + + "max_consecutive_crashes": 0, + + "total_duration_failures": "@ 4 secs",+ + "max_consecutive_failures": 1 + + }, + + "policy_refresh_continuous_aggregate": { + + "total_runs": 2, + + "total_crashes": 0, + + "total_duration": "@ 0", + + "total_failures": 2, + + "total_successes": 0, + + "max_consecutive_crashes": 0, + + "total_duration_failures": "@ 4 secs",+ + "max_consecutive_failures": 1 + + } + + } +(1 row) + +-- create nested continuous aggregates - copied from cagg_on_cagg_common +CREATE TABLE conditions ( + time timestamptz NOT NULL, + temperature int +); +SELECT create_hypertable('conditions', 'time'); + create_hypertable +-------------------------- + (10,public,conditions,t) +(1 row) + +CREATE MATERIALIZED VIEW conditions_summary_hourly_1 +WITH (timescaledb.continuous, timescaledb.materialized_only=true) AS +SELECT + time_bucket('1 hour', "time") AS bucket, + SUM(temperature) AS temperature +FROM conditions +GROUP BY 1 +WITH NO DATA; +CREATE MATERIALIZED VIEW conditions_summary_daily_2 +WITH (timescaledb.continuous, timescaledb.materialized_only=true) AS +SELECT + time_bucket('1 day', "bucket") AS bucket, + SUM(temperature) AS temperature +FROM conditions_summary_hourly_1 +GROUP BY 1 +WITH NO DATA; +CREATE MATERIALIZED VIEW conditions_summary_weekly_3 +WITH (timescaledb.continuous, timescaledb.materialized_only=true) AS +SELECT + time_bucket('1 week', "bucket") AS bucket, + SUM(temperature) AS temperature +FROM conditions_summary_daily_2 +GROUP BY 1 +WITH NO DATA; +SELECT jsonb_pretty(get_telemetry_report() -> 'relations' -> 'continuous_aggregates' -> 'num_caggs_nested'); + jsonb_pretty +-------------- + 2 +(1 row) + +DROP VIEW relations; +DROP MATERIALIZED VIEW telemetry_report; +\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER +DROP DATABASE :DN_DBNAME_1; +DROP DATABASE :DN_DBNAME_2; diff --git a/tsl/test/expected/telemetry_stats-14.out b/tsl/test/expected/telemetry_stats-14.out new file mode 100644 index 000000000..a68498af3 --- /dev/null +++ b/tsl/test/expected/telemetry_stats-14.out @@ -0,0 +1,1194 @@ +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +--telemetry tests that require a community license +\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER; +-- function call info size is too variable for this test, so disable it +SET timescaledb.telemetry_level='no_functions'; +SELECT setseed(1); + setseed +--------- + +(1 row) + +-- Create a materialized view from the telemetry report so that we +-- don't regenerate telemetry for every query. Filter heap_size for +-- materialized views since PG14 reports a different heap size for +-- them compared to earlier PG versions. +CREATE MATERIALIZED VIEW telemetry_report AS +SELECT (r #- '{relations,materialized_views,heap_size}') AS r +FROM get_telemetry_report() r; +CREATE VIEW relations AS +SELECT r -> 'relations' AS rels +FROM telemetry_report; +SELECT rels -> 'continuous_aggregates' -> 'num_relations' AS num_continuous_aggs, + rels -> 'hypertables' -> 'num_relations' AS num_hypertables +FROM relations; + num_continuous_aggs | num_hypertables +---------------------+----------------- + 0 | 0 +(1 row) + +-- check telemetry picks up flagged content from metadata +SELECT r -> 'db_metadata' AS db_metadata +FROM telemetry_report; + db_metadata +------------- + {} +(1 row) + +-- check timescaledb_telemetry.cloud +SELECT r -> 'instance_metadata' AS instance_metadata +FROM telemetry_report r; + instance_metadata +------------------- + {"cloud": "ci"} +(1 row) + +CREATE TABLE normal (time timestamptz NOT NULL, device int, temp float); +CREATE TABLE part (time timestamptz NOT NULL, device int, temp float) PARTITION BY RANGE (time); +CREATE TABLE part_t1 PARTITION OF part FOR VALUES FROM ('2018-01-01') TO ('2018-02-01') PARTITION BY HASH (device); +CREATE TABLE part_t2 PARTITION OF part FOR VALUES FROM ('2018-02-01') TO ('2018-03-01') PARTITION BY HASH (device); +CREATE TABLE part_t1_d1 PARTITION OF part_t1 FOR VALUES WITH (MODULUS 2, REMAINDER 0); +CREATE TABLE part_t1_d2 PARTITION OF part_t1 FOR VALUES WITH (MODULUS 2, REMAINDER 1); +CREATE TABLE part_t2_d1 PARTITION OF part_t2 FOR VALUES WITH (MODULUS 2, REMAINDER 0); +CREATE TABLE part_t2_d2 PARTITION OF part_t2 FOR VALUES WITH (MODULUS 2, REMAINDER 1); +CREATE TABLE hyper (LIKE normal); +SELECT table_name FROM create_hypertable('hyper', 'time'); + table_name +------------ + hyper +(1 row) + +CREATE MATERIALIZED VIEW contagg +WITH (timescaledb.continuous) AS +SELECT + time_bucket('1 hour', time) AS hour, + device, + min(time) +FROM + hyper +GROUP BY hour, device; +NOTICE: continuous aggregate "contagg" is already up-to-date +CREATE MATERIALIZED VIEW contagg_old +WITH (timescaledb.continuous, timescaledb.finalized=false) AS +SELECT + time_bucket('1 hour', time) AS hour, + device, + min(time) +FROM + hyper +GROUP BY hour, device; +NOTICE: continuous aggregate "contagg_old" is already up-to-date +-- Create another view (already have the "relations" view) +CREATE VIEW devices AS +SELECT DISTINCT ON (device) device +FROM hyper; +-- Show relations with no data +REFRESH MATERIALIZED VIEW telemetry_report; +SELECT jsonb_pretty(rels) AS relations FROM relations; + relations +----------------------------------------------------- + { + + "views": { + + "num_relations": 2 + + }, + + "tables": { + + "heap_size": 0, + + "toast_size": 8192, + + "indexes_size": 0, + + "num_relations": 2, + + "num_reltuples": 0 + + }, + + "hypertables": { + + "heap_size": 0, + + "toast_size": 0, + + "compression": { + + "compressed_heap_size": 0, + + "compressed_row_count": 0, + + "compressed_toast_size": 0, + + "num_compressed_chunks": 0, + + "uncompressed_heap_size": 0, + + "uncompressed_row_count": 0, + + "compressed_indexes_size": 0, + + "uncompressed_toast_size": 0, + + "uncompressed_indexes_size": 0, + + "num_compressed_hypertables": 0 + + }, + + "indexes_size": 8192, + + "num_children": 0, + + "num_relations": 1, + + "num_reltuples": 0 + + }, + + "materialized_views": { + + "toast_size": 8192, + + "indexes_size": 0, + + "num_relations": 1, + + "num_reltuples": 0 + + }, + + "partitioned_tables": { + + "heap_size": 0, + + "toast_size": 0, + + "indexes_size": 0, + + "num_children": 6, + + "num_relations": 1, + + "num_reltuples": 0 + + }, + + "continuous_aggregates": { + + "heap_size": 0, + + "toast_size": 0, + + "compression": { + + "compressed_heap_size": 0, + + "compressed_row_count": 0, + + "num_compressed_caggs": 0, + + "compressed_toast_size": 0, + + "num_compressed_chunks": 0, + + "uncompressed_heap_size": 0, + + "uncompressed_row_count": 0, + + "compressed_indexes_size": 0, + + "uncompressed_toast_size": 0, + + "uncompressed_indexes_size": 0 + + }, + + "indexes_size": 0, + + "num_children": 0, + + "num_relations": 2, + + "num_reltuples": 0, + + "num_caggs_nested": 0, + + "num_caggs_finalized": 1, + + "num_caggs_on_distributed_hypertables": 0, + + "num_caggs_using_real_time_aggregation": 2 + + }, + + "distributed_hypertables_data_node": { + + "heap_size": 0, + + "toast_size": 0, + + "compression": { + + "compressed_heap_size": 0, + + "compressed_row_count": 0, + + "compressed_toast_size": 0, + + "num_compressed_chunks": 0, + + "uncompressed_heap_size": 0, + + "uncompressed_row_count": 0, + + "compressed_indexes_size": 0, + + "uncompressed_toast_size": 0, + + "uncompressed_indexes_size": 0, + + "num_compressed_hypertables": 0 + + }, + + "indexes_size": 0, + + "num_children": 0, + + "num_relations": 0, + + "num_reltuples": 0 + + }, + + "distributed_hypertables_access_node": { + + "heap_size": 0, + + "toast_size": 0, + + "compression": { + + "compressed_heap_size": 0, + + "compressed_row_count": 0, + + "compressed_toast_size": 0, + + "num_compressed_chunks": 0, + + "uncompressed_heap_size": 0, + + "uncompressed_row_count": 0, + + "compressed_indexes_size": 0, + + "uncompressed_toast_size": 0, + + "uncompressed_indexes_size": 0, + + "num_compressed_hypertables": 0 + + }, + + "indexes_size": 0, + + "num_children": 0, + + "num_relations": 0, + + "num_reltuples": 0, + + "num_replica_chunks": 0, + + "num_replicated_distributed_hypertables": 0+ + } + + } +(1 row) + +-- Insert data +INSERT INTO normal +SELECT t, ceil(random() * 10)::int, random() * 30 +FROM generate_series('2018-01-01'::timestamptz, '2018-02-28', '2h') t; +INSERT INTO hyper +SELECT * FROM normal; +INSERT INTO part +SELECT * FROM normal; +CALL refresh_continuous_aggregate('contagg', NULL, NULL); +CALL refresh_continuous_aggregate('contagg_old', NULL, NULL); +-- ANALYZE to get updated reltuples stats +ANALYZE normal, hyper, part; +SELECT count(c) FROM show_chunks('hyper') c; + count +------- + 9 +(1 row) + +SELECT count(c) FROM show_chunks('contagg') c; + count +------- + 2 +(1 row) + +SELECT count(c) FROM show_chunks('contagg_old') c; + count +------- + 2 +(1 row) + +-- Update and show the telemetry report +REFRESH MATERIALIZED VIEW telemetry_report; +SELECT jsonb_pretty(rels) AS relations FROM relations; + relations +----------------------------------------------------- + { + + "views": { + + "num_relations": 2 + + }, + + "tables": { + + "heap_size": 65536, + + "toast_size": 8192, + + "indexes_size": 0, + + "num_relations": 2, + + "num_reltuples": 697 + + }, + + "hypertables": { + + "heap_size": 73728, + + "toast_size": 0, + + "compression": { + + "compressed_heap_size": 0, + + "compressed_row_count": 0, + + "compressed_toast_size": 0, + + "num_compressed_chunks": 0, + + "uncompressed_heap_size": 0, + + "uncompressed_row_count": 0, + + "compressed_indexes_size": 0, + + "uncompressed_toast_size": 0, + + "uncompressed_indexes_size": 0, + + "num_compressed_hypertables": 0 + + }, + + "indexes_size": 155648, + + "num_children": 9, + + "num_relations": 1, + + "num_reltuples": 697 + + }, + + "materialized_views": { + + "toast_size": 8192, + + "indexes_size": 0, + + "num_relations": 1, + + "num_reltuples": 0 + + }, + + "partitioned_tables": { + + "heap_size": 98304, + + "toast_size": 0, + + "indexes_size": 0, + + "num_children": 6, + + "num_relations": 1, + + "num_reltuples": 697 + + }, + + "continuous_aggregates": { + + "heap_size": 188416, + + "toast_size": 16384, + + "compression": { + + "compressed_heap_size": 0, + + "compressed_row_count": 0, + + "num_compressed_caggs": 0, + + "compressed_toast_size": 0, + + "num_compressed_chunks": 0, + + "uncompressed_heap_size": 0, + + "uncompressed_row_count": 0, + + "compressed_indexes_size": 0, + + "uncompressed_toast_size": 0, + + "uncompressed_indexes_size": 0 + + }, + + "indexes_size": 229376, + + "num_children": 4, + + "num_relations": 2, + + "num_reltuples": 0, + + "num_caggs_nested": 0, + + "num_caggs_finalized": 1, + + "num_caggs_on_distributed_hypertables": 0, + + "num_caggs_using_real_time_aggregation": 2 + + }, + + "distributed_hypertables_data_node": { + + "heap_size": 0, + + "toast_size": 0, + + "compression": { + + "compressed_heap_size": 0, + + "compressed_row_count": 0, + + "compressed_toast_size": 0, + + "num_compressed_chunks": 0, + + "uncompressed_heap_size": 0, + + "uncompressed_row_count": 0, + + "compressed_indexes_size": 0, + + "uncompressed_toast_size": 0, + + "uncompressed_indexes_size": 0, + + "num_compressed_hypertables": 0 + + }, + + "indexes_size": 0, + + "num_children": 0, + + "num_relations": 0, + + "num_reltuples": 0 + + }, + + "distributed_hypertables_access_node": { + + "heap_size": 0, + + "toast_size": 0, + + "compression": { + + "compressed_heap_size": 0, + + "compressed_row_count": 0, + + "compressed_toast_size": 0, + + "num_compressed_chunks": 0, + + "uncompressed_heap_size": 0, + + "uncompressed_row_count": 0, + + "compressed_indexes_size": 0, + + "uncompressed_toast_size": 0, + + "uncompressed_indexes_size": 0, + + "num_compressed_hypertables": 0 + + }, + + "indexes_size": 0, + + "num_children": 0, + + "num_relations": 0, + + "num_reltuples": 0, + + "num_replica_chunks": 0, + + "num_replicated_distributed_hypertables": 0+ + } + + } +(1 row) + +-- Actual row count should be the same as reltuples stats for all tables +SELECT (SELECT count(*) FROM normal) num_inserted_rows, + (SELECT rels -> 'tables' -> 'num_reltuples' FROM relations) normal_reltuples, + (SELECT rels -> 'hypertables' -> 'num_reltuples' FROM relations) hyper_reltuples, + (SELECT rels -> 'partitioned_tables' -> 'num_reltuples' FROM relations) part_reltuples; + num_inserted_rows | normal_reltuples | hyper_reltuples | part_reltuples +-------------------+------------------+-----------------+---------------- + 697 | 697 | 697 | 697 +(1 row) + +-- Add compression +ALTER TABLE hyper SET (timescaledb.compress); +SELECT compress_chunk(c) +FROM show_chunks('hyper') c ORDER BY c LIMIT 4; + compress_chunk +---------------------------------------- + _timescaledb_internal._hyper_1_1_chunk + _timescaledb_internal._hyper_1_2_chunk + _timescaledb_internal._hyper_1_3_chunk + _timescaledb_internal._hyper_1_4_chunk +(4 rows) + +ALTER MATERIALIZED VIEW contagg SET (timescaledb.compress); +SELECT compress_chunk(c) +FROM show_chunks('contagg') c ORDER BY c LIMIT 1; + compress_chunk +----------------------------------------- + _timescaledb_internal._hyper_2_10_chunk +(1 row) + +-- Turn of real-time aggregation +ALTER MATERIALIZED VIEW contagg SET (timescaledb.materialized_only = true); +ANALYZE normal, hyper, part; +REFRESH MATERIALIZED VIEW telemetry_report; +SELECT jsonb_pretty(rels) AS relations FROM relations; + relations +----------------------------------------------------- + { + + "views": { + + "num_relations": 2 + + }, + + "tables": { + + "heap_size": 65536, + + "toast_size": 8192, + + "indexes_size": 0, + + "num_relations": 2, + + "num_reltuples": 697 + + }, + + "hypertables": { + + "heap_size": 73728, + + "toast_size": 32768, + + "compression": { + + "compressed_heap_size": 32768, + + "compressed_row_count": 4, + + "compressed_toast_size": 32768, + + "num_compressed_chunks": 4, + + "uncompressed_heap_size": 32768, + + "uncompressed_row_count": 284, + + "compressed_indexes_size": 0, + + "uncompressed_toast_size": 0, + + "uncompressed_indexes_size": 65536, + + "num_compressed_hypertables": 1 + + }, + + "indexes_size": 122880, + + "num_children": 9, + + "num_relations": 1, + + "num_reltuples": 697 + + }, + + "materialized_views": { + + "toast_size": 8192, + + "indexes_size": 0, + + "num_relations": 1, + + "num_reltuples": 0 + + }, + + "partitioned_tables": { + + "heap_size": 98304, + + "toast_size": 0, + + "indexes_size": 0, + + "num_children": 6, + + "num_relations": 1, + + "num_reltuples": 697 + + }, + + "continuous_aggregates": { + + "heap_size": 180224, + + "toast_size": 24576, + + "compression": { + + "compressed_heap_size": 40960, + + "compressed_row_count": 10, + + "num_compressed_caggs": 1, + + "compressed_toast_size": 8192, + + "num_compressed_chunks": 1, + + "uncompressed_heap_size": 49152, + + "uncompressed_row_count": 452, + + "compressed_indexes_size": 16384, + + "uncompressed_toast_size": 0, + + "uncompressed_indexes_size": 81920 + + }, + + "indexes_size": 180224, + + "num_children": 4, + + "num_relations": 2, + + "num_reltuples": 452, + + "num_caggs_nested": 0, + + "num_caggs_finalized": 1, + + "num_caggs_on_distributed_hypertables": 0, + + "num_caggs_using_real_time_aggregation": 1 + + }, + + "distributed_hypertables_data_node": { + + "heap_size": 0, + + "toast_size": 0, + + "compression": { + + "compressed_heap_size": 0, + + "compressed_row_count": 0, + + "compressed_toast_size": 0, + + "num_compressed_chunks": 0, + + "uncompressed_heap_size": 0, + + "uncompressed_row_count": 0, + + "compressed_indexes_size": 0, + + "uncompressed_toast_size": 0, + + "uncompressed_indexes_size": 0, + + "num_compressed_hypertables": 0 + + }, + + "indexes_size": 0, + + "num_children": 0, + + "num_relations": 0, + + "num_reltuples": 0 + + }, + + "distributed_hypertables_access_node": { + + "heap_size": 0, + + "toast_size": 0, + + "compression": { + + "compressed_heap_size": 0, + + "compressed_row_count": 0, + + "compressed_toast_size": 0, + + "num_compressed_chunks": 0, + + "uncompressed_heap_size": 0, + + "uncompressed_row_count": 0, + + "compressed_indexes_size": 0, + + "uncompressed_toast_size": 0, + + "uncompressed_indexes_size": 0, + + "num_compressed_hypertables": 0 + + }, + + "indexes_size": 0, + + "num_children": 0, + + "num_relations": 0, + + "num_reltuples": 0, + + "num_replica_chunks": 0, + + "num_replicated_distributed_hypertables": 0+ + } + + } +(1 row) + +-- Add distributed hypertables +\set DN_DBNAME_1 :TEST_DBNAME _1 +\set DN_DBNAME_2 :TEST_DBNAME _2 +-- Not an access node or data node +SELECT r -> 'num_data_nodes' AS num_data_nodes, + r -> 'distributed_member' AS distributed_member +FROM telemetry_report; + num_data_nodes | distributed_member +----------------+-------------------- + | "none" +(1 row) + +-- Become an access node by adding a data node +SELECT node_name, database, node_created, database_created, extension_created +FROM add_data_node('data_node_1', host => 'localhost', database => :'DN_DBNAME_1'); + node_name | database | node_created | database_created | extension_created +-------------+----------------------+--------------+------------------+------------------- + data_node_1 | db_telemetry_stats_1 | t | t | t +(1 row) + +-- Telemetry should show one data node and "acces node" status +REFRESH MATERIALIZED VIEW telemetry_report; +SELECT r -> 'num_data_nodes' AS num_data_nodes, + r -> 'distributed_member' AS distributed_member +FROM telemetry_report; + num_data_nodes | distributed_member +----------------+-------------------- + 1 | "access node" +(1 row) + +-- See telemetry report from a data node +\ir include/remote_exec.sql +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +CREATE SCHEMA IF NOT EXISTS test; +psql:include/remote_exec.sql:5: NOTICE: schema "test" already exists, skipping +GRANT USAGE ON SCHEMA test TO PUBLIC; +CREATE OR REPLACE FUNCTION test.remote_exec(srv_name name[], command text) +RETURNS VOID +AS :TSL_MODULE_PATHNAME, 'ts_remote_exec' +LANGUAGE C; +CREATE OR REPLACE FUNCTION test.remote_exec_get_result_strings(srv_name name[], command text) +RETURNS TABLE("table_record" CSTRING[]) +AS :TSL_MODULE_PATHNAME, 'ts_remote_exec_get_result_strings' +LANGUAGE C; +SELECT test.remote_exec(NULL, $$ + SELECT t -> 'num_data_nodes' AS num_data_nodes, + t -> 'distributed_member' AS distributed_member + FROM get_telemetry_report() t; +$$); +NOTICE: [data_node_1]: + SELECT t -> 'num_data_nodes' AS num_data_nodes, + t -> 'distributed_member' AS distributed_member + FROM get_telemetry_report() t +NOTICE: [data_node_1]: +num_data_nodes|distributed_member +--------------+------------------ + |"data node" +(1 row) + + + remote_exec +------------- + +(1 row) + +SELECT node_name, database, node_created, database_created, extension_created +FROM add_data_node('data_node_2', host => 'localhost', database => :'DN_DBNAME_2'); + node_name | database | node_created | database_created | extension_created +-------------+----------------------+--------------+------------------+------------------- + data_node_2 | db_telemetry_stats_2 | t | t | t +(1 row) + +CREATE TABLE disthyper (LIKE normal); +SELECT create_distributed_hypertable('disthyper', 'time', 'device'); + create_distributed_hypertable +------------------------------- + (6,public,disthyper,t) +(1 row) + +-- Show distributed hypertables stats with no data +REFRESH MATERIALIZED VIEW telemetry_report; +SELECT + jsonb_pretty(rels -> 'distributed_hypertables_access_node') AS distributed_hypertables_an +FROM relations; + distributed_hypertables_an +------------------------------------------------- + { + + "heap_size": 0, + + "toast_size": 0, + + "compression": { + + "compressed_heap_size": 0, + + "compressed_row_count": 0, + + "compressed_toast_size": 0, + + "num_compressed_chunks": 0, + + "uncompressed_heap_size": 0, + + "uncompressed_row_count": 0, + + "compressed_indexes_size": 0, + + "uncompressed_toast_size": 0, + + "uncompressed_indexes_size": 0, + + "num_compressed_hypertables": 0 + + }, + + "indexes_size": 0, + + "num_children": 0, + + "num_relations": 1, + + "num_reltuples": 0, + + "num_replica_chunks": 0, + + "num_replicated_distributed_hypertables": 0+ + } +(1 row) + +-- No datanode-related stats on the access node +SELECT + jsonb_pretty(rels -> 'distributed_hypertables_data_node') AS distributed_hypertables_dn +FROM relations; + distributed_hypertables_dn +----------------------------------------- + { + + "heap_size": 0, + + "toast_size": 0, + + "compression": { + + "compressed_heap_size": 0, + + "compressed_row_count": 0, + + "compressed_toast_size": 0, + + "num_compressed_chunks": 0, + + "uncompressed_heap_size": 0, + + "uncompressed_row_count": 0, + + "compressed_indexes_size": 0, + + "uncompressed_toast_size": 0, + + "uncompressed_indexes_size": 0,+ + "num_compressed_hypertables": 0+ + }, + + "indexes_size": 0, + + "num_children": 0, + + "num_relations": 0, + + "num_reltuples": 0 + + } +(1 row) + +-- Insert data into the distributed hypertable +INSERT INTO disthyper +SELECT * FROM normal; +-- Update telemetry stats and show output on access node and data +-- nodes. Note that the access node doesn't store data so shows +-- zero. It should have stats from ANALYZE, though, like +-- num_reltuples. +ANALYZE disthyper; +REFRESH MATERIALIZED VIEW telemetry_report; +SELECT + jsonb_pretty(rels -> 'distributed_hypertables_access_node') AS distributed_hypertables_an +FROM relations; + distributed_hypertables_an +------------------------------------------------- + { + + "heap_size": 0, + + "toast_size": 0, + + "compression": { + + "compressed_heap_size": 0, + + "compressed_row_count": 0, + + "compressed_toast_size": 0, + + "num_compressed_chunks": 0, + + "uncompressed_heap_size": 0, + + "uncompressed_row_count": 0, + + "compressed_indexes_size": 0, + + "uncompressed_toast_size": 0, + + "uncompressed_indexes_size": 0, + + "num_compressed_hypertables": 0 + + }, + + "indexes_size": 0, + + "num_children": 18, + + "num_relations": 1, + + "num_reltuples": 697, + + "num_replica_chunks": 0, + + "num_replicated_distributed_hypertables": 0+ + } +(1 row) + +-- Show data node stats +SELECT test.remote_exec(NULL, $$ + SELECT + jsonb_pretty(t -> 'relations' -> 'distributed_hypertables_data_node') AS distributed_hypertables_dn + FROM get_telemetry_report() t; +$$); +NOTICE: [data_node_1]: + SELECT + jsonb_pretty(t -> 'relations' -> 'distributed_hypertables_data_node') AS distributed_hypertables_dn + FROM get_telemetry_report() t +NOTICE: [data_node_1]: +distributed_hypertables_dn +-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- +{ + "heap_size": 73728, + "toast_size": 0, + "compression": { + "compressed_heap_size": 0, + "compressed_row_count": 0, + "compressed_toast_size": 0, + "num_compressed_chunks": 0, + "uncompressed_heap_size": 0, + "uncompressed_row_count": 0, + "compressed_indexes_size": 0, + "uncompressed_toast_size": 0, + "uncompressed_indexes_size": 0, + "num_compressed_hypertables": 0 + }, + "indexes_size": 311296, + "num_children": 9, + "num_relations": 1, + "num_reltuples": 357 +} +(1 row) + + +NOTICE: [data_node_2]: + SELECT + jsonb_pretty(t -> 'relations' -> 'distributed_hypertables_data_node') AS distributed_hypertables_dn + FROM get_telemetry_report() t +NOTICE: [data_node_2]: +distributed_hypertables_dn +-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- +{ + "heap_size": 73728, + "toast_size": 0, + "compression": { + "compressed_heap_size": 0, + "compressed_row_count": 0, + "compressed_toast_size": 0, + "num_compressed_chunks": 0, + "uncompressed_heap_size": 0, + "uncompressed_row_count": 0, + "compressed_indexes_size": 0, + "uncompressed_toast_size": 0, + "uncompressed_indexes_size": 0, + "num_compressed_hypertables": 0 + }, + "indexes_size": 311296, + "num_children": 9, + "num_relations": 1, + "num_reltuples": 340 +} +(1 row) + + + remote_exec +------------- + +(1 row) + +-- Add compression +ALTER TABLE disthyper SET (timescaledb.compress); +SELECT compress_chunk(c) +FROM show_chunks('disthyper') c ORDER BY c LIMIT 4; + compress_chunk +---------------------------------------------- + _timescaledb_internal._dist_hyper_6_19_chunk + _timescaledb_internal._dist_hyper_6_20_chunk + _timescaledb_internal._dist_hyper_6_21_chunk + _timescaledb_internal._dist_hyper_6_22_chunk +(4 rows) + +ANALYZE disthyper; +-- Update telemetry stats and show updated compression stats +REFRESH MATERIALIZED VIEW telemetry_report; +SELECT + jsonb_pretty(rels -> 'distributed_hypertables_access_node') AS distributed_hypertables_an +FROM relations; + distributed_hypertables_an +------------------------------------------------- + { + + "heap_size": 0, + + "toast_size": 0, + + "compression": { + + "compressed_heap_size": 0, + + "compressed_row_count": 0, + + "compressed_toast_size": 0, + + "num_compressed_chunks": 4, + + "uncompressed_heap_size": 0, + + "uncompressed_row_count": 0, + + "compressed_indexes_size": 0, + + "uncompressed_toast_size": 0, + + "uncompressed_indexes_size": 0, + + "num_compressed_hypertables": 1 + + }, + + "indexes_size": 0, + + "num_children": 18, + + "num_relations": 1, + + "num_reltuples": 697, + + "num_replica_chunks": 0, + + "num_replicated_distributed_hypertables": 0+ + } +(1 row) + +-- Show data node stats +SELECT test.remote_exec(NULL, $$ + SELECT + jsonb_pretty(t -> 'relations' -> 'distributed_hypertables_data_node') AS distributed_hypertables_dn + FROM get_telemetry_report() t; +$$); +NOTICE: [data_node_1]: + SELECT + jsonb_pretty(t -> 'relations' -> 'distributed_hypertables_data_node') AS distributed_hypertables_dn + FROM get_telemetry_report() t +NOTICE: [data_node_1]: +distributed_hypertables_dn +----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- +{ + "heap_size": 73728, + "toast_size": 16384, + "compression": { + "compressed_heap_size": 16384, + "compressed_row_count": 2, + "compressed_toast_size": 16384, + "num_compressed_chunks": 2, + "uncompressed_heap_size": 16384, + "uncompressed_row_count": 72, + "compressed_indexes_size": 0, + "uncompressed_toast_size": 0, + "uncompressed_indexes_size": 65536, + "num_compressed_hypertables": 1 + }, + "indexes_size": 278528, + "num_children": 9, + "num_relations": 1, + "num_reltuples": 357 +} +(1 row) + + +NOTICE: [data_node_2]: + SELECT + jsonb_pretty(t -> 'relations' -> 'distributed_hypertables_data_node') AS distributed_hypertables_dn + FROM get_telemetry_report() t +NOTICE: [data_node_2]: +distributed_hypertables_dn +----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- +{ + "heap_size": 73728, + "toast_size": 16384, + "compression": { + "compressed_heap_size": 16384, + "compressed_row_count": 2, + "compressed_toast_size": 16384, + "num_compressed_chunks": 2, + "uncompressed_heap_size": 16384, + "uncompressed_row_count": 44, + "compressed_indexes_size": 0, + "uncompressed_toast_size": 0, + "uncompressed_indexes_size": 65536, + "num_compressed_hypertables": 1 + }, + "indexes_size": 278528, + "num_children": 9, + "num_relations": 1, + "num_reltuples": 340 +} +(1 row) + + + remote_exec +------------- + +(1 row) + +-- Create a replicated distributed hypertable and show replication stats +CREATE TABLE disthyper_repl (LIKE normal); +SELECT create_distributed_hypertable('disthyper_repl', 'time', 'device', replication_factor => 2); + create_distributed_hypertable +------------------------------- + (7,public,disthyper_repl,t) +(1 row) + +INSERT INTO disthyper_repl +SELECT * FROM normal; +REFRESH MATERIALIZED VIEW telemetry_report; +SELECT + jsonb_pretty(rels -> 'distributed_hypertables_access_node') AS distributed_hypertables_an +FROM relations; + distributed_hypertables_an +------------------------------------------------- + { + + "heap_size": 0, + + "toast_size": 0, + + "compression": { + + "compressed_heap_size": 0, + + "compressed_row_count": 0, + + "compressed_toast_size": 0, + + "num_compressed_chunks": 4, + + "uncompressed_heap_size": 0, + + "uncompressed_row_count": 0, + + "compressed_indexes_size": 0, + + "uncompressed_toast_size": 0, + + "uncompressed_indexes_size": 0, + + "num_compressed_hypertables": 1 + + }, + + "indexes_size": 0, + + "num_children": 36, + + "num_relations": 2, + + "num_reltuples": 697, + + "num_replica_chunks": 18, + + "num_replicated_distributed_hypertables": 1+ + } +(1 row) + +-- Create a continuous aggregate on the distributed hypertable +CREATE MATERIALIZED VIEW distcontagg +WITH (timescaledb.continuous) AS +SELECT + time_bucket('1 hour', time) AS hour, + device, + min(time) +FROM + disthyper +GROUP BY hour, device; +NOTICE: refreshing continuous aggregate "distcontagg" +CREATE MATERIALIZED VIEW distcontagg_old +WITH (timescaledb.continuous, timescaledb.finalized=false) AS +SELECT + time_bucket('1 hour', time) AS hour, + device, + min(time) +FROM + disthyper +GROUP BY hour, device; +NOTICE: refreshing continuous aggregate "distcontagg_old" +REFRESH MATERIALIZED VIEW telemetry_report; +SELECT + jsonb_pretty(rels -> 'continuous_aggregates') AS continuous_aggregates +FROM relations; + continuous_aggregates +------------------------------------------------ + { + + "heap_size": 368640, + + "toast_size": 40960, + + "compression": { + + "compressed_heap_size": 40960, + + "compressed_row_count": 10, + + "num_compressed_caggs": 1, + + "compressed_toast_size": 8192, + + "num_compressed_chunks": 1, + + "uncompressed_heap_size": 49152, + + "uncompressed_row_count": 452, + + "compressed_indexes_size": 16384, + + "uncompressed_toast_size": 0, + + "uncompressed_indexes_size": 81920 + + }, + + "indexes_size": 409600, + + "num_children": 8, + + "num_relations": 4, + + "num_reltuples": 452, + + "num_caggs_nested": 0, + + "num_caggs_finalized": 2, + + "num_caggs_on_distributed_hypertables": 2,+ + "num_caggs_using_real_time_aggregation": 3+ + } +(1 row) + +-- check telemetry for fixed schedule jobs works +create or replace procedure job_test_fixed(jobid int, config jsonb) language plpgsql as $$ +begin +raise log 'this is job_test_fixed'; +end +$$; +create or replace procedure job_test_drifting(jobid int, config jsonb) language plpgsql as $$ +begin +raise log 'this is job_test_drifting'; +end +$$; +-- before adding the jobs +select get_telemetry_report()->'num_user_defined_actions_fixed'; + ?column? +---------- + 0 +(1 row) + +select get_telemetry_report()->'num_user_defined_actions'; + ?column? +---------- + 0 +(1 row) + +select add_job('job_test_fixed', '1 week'); + add_job +--------- + 1000 +(1 row) + +select add_job('job_test_drifting', '1 week', fixed_schedule => false); + add_job +--------- + 1001 +(1 row) + +-- add continuous aggregate refresh policy for contagg +select add_continuous_aggregate_policy('contagg', interval '3 weeks', NULL, interval '3 weeks'); -- drifting + add_continuous_aggregate_policy +--------------------------------- + 1002 +(1 row) + +select add_continuous_aggregate_policy('contagg_old', interval '3 weeks', NULL, interval '3 weeks', initial_start => now()); -- fixed + add_continuous_aggregate_policy +--------------------------------- + 1003 +(1 row) + +-- add retention policy, fixed +select add_retention_policy('hyper', interval '1 year', initial_start => now()); + add_retention_policy +---------------------- + 1004 +(1 row) + +-- add compression policy +select add_compression_policy('hyper', interval '3 weeks', initial_start => now()); + add_compression_policy +------------------------ + 1005 +(1 row) + +select r->'num_user_defined_actions_fixed' as UDA_fixed, r->'num_user_defined_actions' AS UDA_drifting FROM get_telemetry_report() r; + uda_fixed | uda_drifting +-----------+-------------- + 1 | 1 +(1 row) + +select r->'num_continuous_aggs_policies_fixed' as contagg_fixed, r->'num_continuous_aggs_policies' as contagg_drifting FROM get_telemetry_report() r; + contagg_fixed | contagg_drifting +---------------+------------------ + 1 | 1 +(1 row) + +select r->'num_compression_policies_fixed' as compress_fixed, r->'num_retention_policies_fixed' as retention_fixed FROM get_telemetry_report() r; + compress_fixed | retention_fixed +----------------+----------------- + 1 | 1 +(1 row) + +DELETE FROM _timescaledb_config.bgw_job WHERE id = 2; +TRUNCATE _timescaledb_internal.job_errors; +-- create some "errors" for testing +INSERT INTO +_timescaledb_config.bgw_job(id, application_name, schedule_interval, max_runtime, max_retries, retry_period, proc_schema, proc_name) +VALUES (2000, 'User-Defined Action [2000]', interval '3 days', interval '1 hour', 5, interval '5 min', 'public', 'custom_action_1'), +(2001, 'User-Defined Action [2001]', interval '3 days', interval '1 hour', 5, interval '5 min', 'public', 'custom_action_2'), +(2002, 'Compression Policy [2002]', interval '3 days', interval '1 hour', 5, interval '5 min', '_timescaledb_internal', 'policy_compression'), +(2003, 'Retention Policy [2003]', interval '3 days', interval '1 hour', 5, interval '5 min', '_timescaledb_internal', 'policy_retention'), +(2004, 'Refresh Continuous Aggregate Policy [2004]', interval '3 days', interval '1 hour', 5, interval '5 min', '_timescaledb_internal', 'policy_refresh_continuous_aggregate'), +-- user decided to define a custom action in the _timescaledb_internal schema, we group it with the User-defined actions +(2005, 'User-Defined Action [2005]', interval '3 days', interval '1 hour', 5, interval '5 min', '_timescaledb_internal', 'policy_refresh_continuous_aggregate'); +-- create some errors for them +INSERT INTO +_timescaledb_internal.job_errors(job_id, pid, start_time, finish_time, error_data) +values (2000, 12345, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '{"sqlerrcode":"P0001", "proc_schema":"public", "proc_name": "custom_action_1"}'), +(2000, 23456, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '{"sqlerrcode":"ABCDE", "proc_schema": "public", "proc_name": "custom_action_1"}'), +(2001, 54321, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '{"sqlerrcode":"P0001", "proc_schema":"public", "proc_name": "custom_action_2"}'), +(2002, 23443, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '{"sqlerrcode":"JF009", "proc_schema":"_timescaledb_internal", "proc_name": "policy_compression"}'), +(2003, 14567, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '{"sqlerrcode":"P0001", "proc_schema":"_timescaledb_internal", "proc_name": "policy_retention"}'), +(2004, 78907, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '{"sqlerrcode":"P0001", "proc_schema":"_timescaledb_internal", "proc_name": "policy_refresh_continuous_aggregate"}'), +(2005, 45757, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '{"sqlerrcode":"P0001", "proc_schema":"_timescaledb_internal", "proc_name": "policy_refresh_continuous_aggregate"}'); +-- we have 3 error records for user-defined actions, and three for policies, so we expect 4 types of jobs +SELECT jsonb_pretty(get_telemetry_report() -> 'errors_by_sqlerrcode'); + jsonb_pretty +---------------------------------------------- + { + + "policy_retention": { + + "P0001": 1 + + }, + + "policy_compression": { + + "JF009": 1 + + }, + + "user_defined_action": { + + "ABCDE": 1, + + "P0001": 2 + + }, + + "policy_refresh_continuous_aggregate": {+ + "P0001": 2 + + } + + } +(1 row) + +-- for job statistics, insert some records into bgw_job_stats +INSERT INTO _timescaledb_internal.bgw_job_stat +values +(2000, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '-infinity'::timestamptz, '-infinity'::timestamptz, +false, 1, interval '00:00:00', interval '00:00:02', 0, 1, 0, 1, 0), +(2001, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '-infinity'::timestamptz, '-infinity'::timestamptz, +false, 1, interval '00:00:00', interval '00:00:02', 0, 1, 0, 1, 0), +(2002, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '-infinity'::timestamptz, '-infinity'::timestamptz, +false, 1, interval '00:00:00', interval '00:00:02', 0, 1, 0, 1, 0), +(2003, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '-infinity'::timestamptz, '-infinity'::timestamptz, +false, 1, interval '00:00:00', interval '00:00:02', 0, 1, 0, 1, 0), +(2004, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '-infinity'::timestamptz, '-infinity'::timestamptz, +false, 1, interval '00:00:00', interval '00:00:02', 0, 1, 0, 1, 0), +(2005, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '-infinity'::timestamptz, '-infinity'::timestamptz, +false, 1, interval '00:00:00', interval '00:00:02', 0, 1, 0, 1, 0); +SELECT jsonb_pretty(get_telemetry_report() -> 'stats_by_job_type'); + jsonb_pretty +------------------------------------------------ + { + + "policy_retention": { + + "total_runs": 1, + + "total_crashes": 0, + + "total_duration": "@ 0", + + "total_failures": 1, + + "total_successes": 0, + + "max_consecutive_crashes": 0, + + "total_duration_failures": "@ 2 secs",+ + "max_consecutive_failures": 1 + + }, + + "policy_compression": { + + "total_runs": 1, + + "total_crashes": 0, + + "total_duration": "@ 0", + + "total_failures": 1, + + "total_successes": 0, + + "max_consecutive_crashes": 0, + + "total_duration_failures": "@ 2 secs",+ + "max_consecutive_failures": 1 + + }, + + "user_defined_action": { + + "total_runs": 2, + + "total_crashes": 0, + + "total_duration": "@ 0", + + "total_failures": 2, + + "total_successes": 0, + + "max_consecutive_crashes": 0, + + "total_duration_failures": "@ 4 secs",+ + "max_consecutive_failures": 1 + + }, + + "policy_refresh_continuous_aggregate": { + + "total_runs": 2, + + "total_crashes": 0, + + "total_duration": "@ 0", + + "total_failures": 2, + + "total_successes": 0, + + "max_consecutive_crashes": 0, + + "total_duration_failures": "@ 4 secs",+ + "max_consecutive_failures": 1 + + } + + } +(1 row) + +-- create nested continuous aggregates - copied from cagg_on_cagg_common +CREATE TABLE conditions ( + time timestamptz NOT NULL, + temperature int +); +SELECT create_hypertable('conditions', 'time'); + create_hypertable +-------------------------- + (10,public,conditions,t) +(1 row) + +CREATE MATERIALIZED VIEW conditions_summary_hourly_1 +WITH (timescaledb.continuous, timescaledb.materialized_only=true) AS +SELECT + time_bucket('1 hour', "time") AS bucket, + SUM(temperature) AS temperature +FROM conditions +GROUP BY 1 +WITH NO DATA; +CREATE MATERIALIZED VIEW conditions_summary_daily_2 +WITH (timescaledb.continuous, timescaledb.materialized_only=true) AS +SELECT + time_bucket('1 day', "bucket") AS bucket, + SUM(temperature) AS temperature +FROM conditions_summary_hourly_1 +GROUP BY 1 +WITH NO DATA; +CREATE MATERIALIZED VIEW conditions_summary_weekly_3 +WITH (timescaledb.continuous, timescaledb.materialized_only=true) AS +SELECT + time_bucket('1 week', "bucket") AS bucket, + SUM(temperature) AS temperature +FROM conditions_summary_daily_2 +GROUP BY 1 +WITH NO DATA; +SELECT jsonb_pretty(get_telemetry_report() -> 'relations' -> 'continuous_aggregates' -> 'num_caggs_nested'); + jsonb_pretty +-------------- + 2 +(1 row) + +DROP VIEW relations; +DROP MATERIALIZED VIEW telemetry_report; +\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER +DROP DATABASE :DN_DBNAME_1; +DROP DATABASE :DN_DBNAME_2; diff --git a/tsl/test/expected/telemetry_stats-15.out b/tsl/test/expected/telemetry_stats-15.out new file mode 100644 index 000000000..9b4c4fa19 --- /dev/null +++ b/tsl/test/expected/telemetry_stats-15.out @@ -0,0 +1,1194 @@ +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +--telemetry tests that require a community license +\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER; +-- function call info size is too variable for this test, so disable it +SET timescaledb.telemetry_level='no_functions'; +SELECT setseed(1); + setseed +--------- + +(1 row) + +-- Create a materialized view from the telemetry report so that we +-- don't regenerate telemetry for every query. Filter heap_size for +-- materialized views since PG14 reports a different heap size for +-- them compared to earlier PG versions. +CREATE MATERIALIZED VIEW telemetry_report AS +SELECT (r #- '{relations,materialized_views,heap_size}') AS r +FROM get_telemetry_report() r; +CREATE VIEW relations AS +SELECT r -> 'relations' AS rels +FROM telemetry_report; +SELECT rels -> 'continuous_aggregates' -> 'num_relations' AS num_continuous_aggs, + rels -> 'hypertables' -> 'num_relations' AS num_hypertables +FROM relations; + num_continuous_aggs | num_hypertables +---------------------+----------------- + 0 | 0 +(1 row) + +-- check telemetry picks up flagged content from metadata +SELECT r -> 'db_metadata' AS db_metadata +FROM telemetry_report; + db_metadata +------------- + {} +(1 row) + +-- check timescaledb_telemetry.cloud +SELECT r -> 'instance_metadata' AS instance_metadata +FROM telemetry_report r; + instance_metadata +------------------- + {"cloud": "ci"} +(1 row) + +CREATE TABLE normal (time timestamptz NOT NULL, device int, temp float); +CREATE TABLE part (time timestamptz NOT NULL, device int, temp float) PARTITION BY RANGE (time); +CREATE TABLE part_t1 PARTITION OF part FOR VALUES FROM ('2018-01-01') TO ('2018-02-01') PARTITION BY HASH (device); +CREATE TABLE part_t2 PARTITION OF part FOR VALUES FROM ('2018-02-01') TO ('2018-03-01') PARTITION BY HASH (device); +CREATE TABLE part_t1_d1 PARTITION OF part_t1 FOR VALUES WITH (MODULUS 2, REMAINDER 0); +CREATE TABLE part_t1_d2 PARTITION OF part_t1 FOR VALUES WITH (MODULUS 2, REMAINDER 1); +CREATE TABLE part_t2_d1 PARTITION OF part_t2 FOR VALUES WITH (MODULUS 2, REMAINDER 0); +CREATE TABLE part_t2_d2 PARTITION OF part_t2 FOR VALUES WITH (MODULUS 2, REMAINDER 1); +CREATE TABLE hyper (LIKE normal); +SELECT table_name FROM create_hypertable('hyper', 'time'); + table_name +------------ + hyper +(1 row) + +CREATE MATERIALIZED VIEW contagg +WITH (timescaledb.continuous) AS +SELECT + time_bucket('1 hour', time) AS hour, + device, + min(time) +FROM + hyper +GROUP BY hour, device; +NOTICE: continuous aggregate "contagg" is already up-to-date +CREATE MATERIALIZED VIEW contagg_old +WITH (timescaledb.continuous, timescaledb.finalized=false) AS +SELECT + time_bucket('1 hour', time) AS hour, + device, + min(time) +FROM + hyper +GROUP BY hour, device; +NOTICE: continuous aggregate "contagg_old" is already up-to-date +-- Create another view (already have the "relations" view) +CREATE VIEW devices AS +SELECT DISTINCT ON (device) device +FROM hyper; +-- Show relations with no data +REFRESH MATERIALIZED VIEW telemetry_report; +SELECT jsonb_pretty(rels) AS relations FROM relations; + relations +----------------------------------------------------- + { + + "views": { + + "num_relations": 2 + + }, + + "tables": { + + "heap_size": 0, + + "toast_size": 8192, + + "indexes_size": 0, + + "num_relations": 2, + + "num_reltuples": 0 + + }, + + "hypertables": { + + "heap_size": 0, + + "toast_size": 0, + + "compression": { + + "compressed_heap_size": 0, + + "compressed_row_count": 0, + + "compressed_toast_size": 0, + + "num_compressed_chunks": 0, + + "uncompressed_heap_size": 0, + + "uncompressed_row_count": 0, + + "compressed_indexes_size": 0, + + "uncompressed_toast_size": 0, + + "uncompressed_indexes_size": 0, + + "num_compressed_hypertables": 0 + + }, + + "indexes_size": 8192, + + "num_children": 0, + + "num_relations": 1, + + "num_reltuples": 0 + + }, + + "materialized_views": { + + "toast_size": 8192, + + "indexes_size": 0, + + "num_relations": 1, + + "num_reltuples": 0 + + }, + + "partitioned_tables": { + + "heap_size": 0, + + "toast_size": 0, + + "indexes_size": 0, + + "num_children": 6, + + "num_relations": 1, + + "num_reltuples": 0 + + }, + + "continuous_aggregates": { + + "heap_size": 0, + + "toast_size": 0, + + "compression": { + + "compressed_heap_size": 0, + + "compressed_row_count": 0, + + "num_compressed_caggs": 0, + + "compressed_toast_size": 0, + + "num_compressed_chunks": 0, + + "uncompressed_heap_size": 0, + + "uncompressed_row_count": 0, + + "compressed_indexes_size": 0, + + "uncompressed_toast_size": 0, + + "uncompressed_indexes_size": 0 + + }, + + "indexes_size": 0, + + "num_children": 0, + + "num_relations": 2, + + "num_reltuples": 0, + + "num_caggs_nested": 0, + + "num_caggs_finalized": 1, + + "num_caggs_on_distributed_hypertables": 0, + + "num_caggs_using_real_time_aggregation": 2 + + }, + + "distributed_hypertables_data_node": { + + "heap_size": 0, + + "toast_size": 0, + + "compression": { + + "compressed_heap_size": 0, + + "compressed_row_count": 0, + + "compressed_toast_size": 0, + + "num_compressed_chunks": 0, + + "uncompressed_heap_size": 0, + + "uncompressed_row_count": 0, + + "compressed_indexes_size": 0, + + "uncompressed_toast_size": 0, + + "uncompressed_indexes_size": 0, + + "num_compressed_hypertables": 0 + + }, + + "indexes_size": 0, + + "num_children": 0, + + "num_relations": 0, + + "num_reltuples": 0 + + }, + + "distributed_hypertables_access_node": { + + "heap_size": 0, + + "toast_size": 0, + + "compression": { + + "compressed_heap_size": 0, + + "compressed_row_count": 0, + + "compressed_toast_size": 0, + + "num_compressed_chunks": 0, + + "uncompressed_heap_size": 0, + + "uncompressed_row_count": 0, + + "compressed_indexes_size": 0, + + "uncompressed_toast_size": 0, + + "uncompressed_indexes_size": 0, + + "num_compressed_hypertables": 0 + + }, + + "indexes_size": 0, + + "num_children": 0, + + "num_relations": 0, + + "num_reltuples": 0, + + "num_replica_chunks": 0, + + "num_replicated_distributed_hypertables": 0+ + } + + } +(1 row) + +-- Insert data +INSERT INTO normal +SELECT t, ceil(random() * 10)::int, random() * 30 +FROM generate_series('2018-01-01'::timestamptz, '2018-02-28', '2h') t; +INSERT INTO hyper +SELECT * FROM normal; +INSERT INTO part +SELECT * FROM normal; +CALL refresh_continuous_aggregate('contagg', NULL, NULL); +CALL refresh_continuous_aggregate('contagg_old', NULL, NULL); +-- ANALYZE to get updated reltuples stats +ANALYZE normal, hyper, part; +SELECT count(c) FROM show_chunks('hyper') c; + count +------- + 9 +(1 row) + +SELECT count(c) FROM show_chunks('contagg') c; + count +------- + 2 +(1 row) + +SELECT count(c) FROM show_chunks('contagg_old') c; + count +------- + 2 +(1 row) + +-- Update and show the telemetry report +REFRESH MATERIALIZED VIEW telemetry_report; +SELECT jsonb_pretty(rels) AS relations FROM relations; + relations +----------------------------------------------------- + { + + "views": { + + "num_relations": 2 + + }, + + "tables": { + + "heap_size": 65536, + + "toast_size": 8192, + + "indexes_size": 0, + + "num_relations": 2, + + "num_reltuples": 697 + + }, + + "hypertables": { + + "heap_size": 73728, + + "toast_size": 0, + + "compression": { + + "compressed_heap_size": 0, + + "compressed_row_count": 0, + + "compressed_toast_size": 0, + + "num_compressed_chunks": 0, + + "uncompressed_heap_size": 0, + + "uncompressed_row_count": 0, + + "compressed_indexes_size": 0, + + "uncompressed_toast_size": 0, + + "uncompressed_indexes_size": 0, + + "num_compressed_hypertables": 0 + + }, + + "indexes_size": 155648, + + "num_children": 9, + + "num_relations": 1, + + "num_reltuples": 697 + + }, + + "materialized_views": { + + "toast_size": 8192, + + "indexes_size": 0, + + "num_relations": 1, + + "num_reltuples": 0 + + }, + + "partitioned_tables": { + + "heap_size": 98304, + + "toast_size": 0, + + "indexes_size": 0, + + "num_children": 6, + + "num_relations": 1, + + "num_reltuples": 697 + + }, + + "continuous_aggregates": { + + "heap_size": 188416, + + "toast_size": 16384, + + "compression": { + + "compressed_heap_size": 0, + + "compressed_row_count": 0, + + "num_compressed_caggs": 0, + + "compressed_toast_size": 0, + + "num_compressed_chunks": 0, + + "uncompressed_heap_size": 0, + + "uncompressed_row_count": 0, + + "compressed_indexes_size": 0, + + "uncompressed_toast_size": 0, + + "uncompressed_indexes_size": 0 + + }, + + "indexes_size": 229376, + + "num_children": 4, + + "num_relations": 2, + + "num_reltuples": 0, + + "num_caggs_nested": 0, + + "num_caggs_finalized": 1, + + "num_caggs_on_distributed_hypertables": 0, + + "num_caggs_using_real_time_aggregation": 2 + + }, + + "distributed_hypertables_data_node": { + + "heap_size": 0, + + "toast_size": 0, + + "compression": { + + "compressed_heap_size": 0, + + "compressed_row_count": 0, + + "compressed_toast_size": 0, + + "num_compressed_chunks": 0, + + "uncompressed_heap_size": 0, + + "uncompressed_row_count": 0, + + "compressed_indexes_size": 0, + + "uncompressed_toast_size": 0, + + "uncompressed_indexes_size": 0, + + "num_compressed_hypertables": 0 + + }, + + "indexes_size": 0, + + "num_children": 0, + + "num_relations": 0, + + "num_reltuples": 0 + + }, + + "distributed_hypertables_access_node": { + + "heap_size": 0, + + "toast_size": 0, + + "compression": { + + "compressed_heap_size": 0, + + "compressed_row_count": 0, + + "compressed_toast_size": 0, + + "num_compressed_chunks": 0, + + "uncompressed_heap_size": 0, + + "uncompressed_row_count": 0, + + "compressed_indexes_size": 0, + + "uncompressed_toast_size": 0, + + "uncompressed_indexes_size": 0, + + "num_compressed_hypertables": 0 + + }, + + "indexes_size": 0, + + "num_children": 0, + + "num_relations": 0, + + "num_reltuples": 0, + + "num_replica_chunks": 0, + + "num_replicated_distributed_hypertables": 0+ + } + + } +(1 row) + +-- Actual row count should be the same as reltuples stats for all tables +SELECT (SELECT count(*) FROM normal) num_inserted_rows, + (SELECT rels -> 'tables' -> 'num_reltuples' FROM relations) normal_reltuples, + (SELECT rels -> 'hypertables' -> 'num_reltuples' FROM relations) hyper_reltuples, + (SELECT rels -> 'partitioned_tables' -> 'num_reltuples' FROM relations) part_reltuples; + num_inserted_rows | normal_reltuples | hyper_reltuples | part_reltuples +-------------------+------------------+-----------------+---------------- + 697 | 697 | 697 | 697 +(1 row) + +-- Add compression +ALTER TABLE hyper SET (timescaledb.compress); +SELECT compress_chunk(c) +FROM show_chunks('hyper') c ORDER BY c LIMIT 4; + compress_chunk +---------------------------------------- + _timescaledb_internal._hyper_1_1_chunk + _timescaledb_internal._hyper_1_2_chunk + _timescaledb_internal._hyper_1_3_chunk + _timescaledb_internal._hyper_1_4_chunk +(4 rows) + +ALTER MATERIALIZED VIEW contagg SET (timescaledb.compress); +SELECT compress_chunk(c) +FROM show_chunks('contagg') c ORDER BY c LIMIT 1; + compress_chunk +----------------------------------------- + _timescaledb_internal._hyper_2_10_chunk +(1 row) + +-- Turn of real-time aggregation +ALTER MATERIALIZED VIEW contagg SET (timescaledb.materialized_only = true); +ANALYZE normal, hyper, part; +REFRESH MATERIALIZED VIEW telemetry_report; +SELECT jsonb_pretty(rels) AS relations FROM relations; + relations +----------------------------------------------------- + { + + "views": { + + "num_relations": 2 + + }, + + "tables": { + + "heap_size": 65536, + + "toast_size": 8192, + + "indexes_size": 0, + + "num_relations": 2, + + "num_reltuples": 697 + + }, + + "hypertables": { + + "heap_size": 73728, + + "toast_size": 32768, + + "compression": { + + "compressed_heap_size": 32768, + + "compressed_row_count": 4, + + "compressed_toast_size": 32768, + + "num_compressed_chunks": 4, + + "uncompressed_heap_size": 32768, + + "uncompressed_row_count": 284, + + "compressed_indexes_size": 0, + + "uncompressed_toast_size": 0, + + "uncompressed_indexes_size": 65536, + + "num_compressed_hypertables": 1 + + }, + + "indexes_size": 122880, + + "num_children": 9, + + "num_relations": 1, + + "num_reltuples": 697 + + }, + + "materialized_views": { + + "toast_size": 8192, + + "indexes_size": 0, + + "num_relations": 1, + + "num_reltuples": 0 + + }, + + "partitioned_tables": { + + "heap_size": 98304, + + "toast_size": 0, + + "indexes_size": 0, + + "num_children": 6, + + "num_relations": 1, + + "num_reltuples": 697 + + }, + + "continuous_aggregates": { + + "heap_size": 180224, + + "toast_size": 24576, + + "compression": { + + "compressed_heap_size": 40960, + + "compressed_row_count": 10, + + "num_compressed_caggs": 1, + + "compressed_toast_size": 8192, + + "num_compressed_chunks": 1, + + "uncompressed_heap_size": 49152, + + "uncompressed_row_count": 452, + + "compressed_indexes_size": 16384, + + "uncompressed_toast_size": 0, + + "uncompressed_indexes_size": 81920 + + }, + + "indexes_size": 180224, + + "num_children": 4, + + "num_relations": 2, + + "num_reltuples": 452, + + "num_caggs_nested": 0, + + "num_caggs_finalized": 1, + + "num_caggs_on_distributed_hypertables": 0, + + "num_caggs_using_real_time_aggregation": 1 + + }, + + "distributed_hypertables_data_node": { + + "heap_size": 0, + + "toast_size": 0, + + "compression": { + + "compressed_heap_size": 0, + + "compressed_row_count": 0, + + "compressed_toast_size": 0, + + "num_compressed_chunks": 0, + + "uncompressed_heap_size": 0, + + "uncompressed_row_count": 0, + + "compressed_indexes_size": 0, + + "uncompressed_toast_size": 0, + + "uncompressed_indexes_size": 0, + + "num_compressed_hypertables": 0 + + }, + + "indexes_size": 0, + + "num_children": 0, + + "num_relations": 0, + + "num_reltuples": 0 + + }, + + "distributed_hypertables_access_node": { + + "heap_size": 0, + + "toast_size": 0, + + "compression": { + + "compressed_heap_size": 0, + + "compressed_row_count": 0, + + "compressed_toast_size": 0, + + "num_compressed_chunks": 0, + + "uncompressed_heap_size": 0, + + "uncompressed_row_count": 0, + + "compressed_indexes_size": 0, + + "uncompressed_toast_size": 0, + + "uncompressed_indexes_size": 0, + + "num_compressed_hypertables": 0 + + }, + + "indexes_size": 0, + + "num_children": 0, + + "num_relations": 0, + + "num_reltuples": 0, + + "num_replica_chunks": 0, + + "num_replicated_distributed_hypertables": 0+ + } + + } +(1 row) + +-- Add distributed hypertables +\set DN_DBNAME_1 :TEST_DBNAME _1 +\set DN_DBNAME_2 :TEST_DBNAME _2 +-- Not an access node or data node +SELECT r -> 'num_data_nodes' AS num_data_nodes, + r -> 'distributed_member' AS distributed_member +FROM telemetry_report; + num_data_nodes | distributed_member +----------------+-------------------- + | "none" +(1 row) + +-- Become an access node by adding a data node +SELECT node_name, database, node_created, database_created, extension_created +FROM add_data_node('data_node_1', host => 'localhost', database => :'DN_DBNAME_1'); + node_name | database | node_created | database_created | extension_created +-------------+----------------------+--------------+------------------+------------------- + data_node_1 | db_telemetry_stats_1 | t | t | t +(1 row) + +-- Telemetry should show one data node and "acces node" status +REFRESH MATERIALIZED VIEW telemetry_report; +SELECT r -> 'num_data_nodes' AS num_data_nodes, + r -> 'distributed_member' AS distributed_member +FROM telemetry_report; + num_data_nodes | distributed_member +----------------+-------------------- + 1 | "access node" +(1 row) + +-- See telemetry report from a data node +\ir include/remote_exec.sql +-- This file and its contents are licensed under the Timescale License. +-- Please see the included NOTICE for copyright information and +-- LICENSE-TIMESCALE for a copy of the license. +CREATE SCHEMA IF NOT EXISTS test; +psql:include/remote_exec.sql:5: NOTICE: schema "test" already exists, skipping +GRANT USAGE ON SCHEMA test TO PUBLIC; +CREATE OR REPLACE FUNCTION test.remote_exec(srv_name name[], command text) +RETURNS VOID +AS :TSL_MODULE_PATHNAME, 'ts_remote_exec' +LANGUAGE C; +CREATE OR REPLACE FUNCTION test.remote_exec_get_result_strings(srv_name name[], command text) +RETURNS TABLE("table_record" CSTRING[]) +AS :TSL_MODULE_PATHNAME, 'ts_remote_exec_get_result_strings' +LANGUAGE C; +SELECT test.remote_exec(NULL, $$ + SELECT t -> 'num_data_nodes' AS num_data_nodes, + t -> 'distributed_member' AS distributed_member + FROM get_telemetry_report() t; +$$); +NOTICE: [data_node_1]: + SELECT t -> 'num_data_nodes' AS num_data_nodes, + t -> 'distributed_member' AS distributed_member + FROM get_telemetry_report() t +NOTICE: [data_node_1]: +num_data_nodes|distributed_member +--------------+------------------ + |"data node" +(1 row) + + + remote_exec +------------- + +(1 row) + +SELECT node_name, database, node_created, database_created, extension_created +FROM add_data_node('data_node_2', host => 'localhost', database => :'DN_DBNAME_2'); + node_name | database | node_created | database_created | extension_created +-------------+----------------------+--------------+------------------+------------------- + data_node_2 | db_telemetry_stats_2 | t | t | t +(1 row) + +CREATE TABLE disthyper (LIKE normal); +SELECT create_distributed_hypertable('disthyper', 'time', 'device'); + create_distributed_hypertable +------------------------------- + (6,public,disthyper,t) +(1 row) + +-- Show distributed hypertables stats with no data +REFRESH MATERIALIZED VIEW telemetry_report; +SELECT + jsonb_pretty(rels -> 'distributed_hypertables_access_node') AS distributed_hypertables_an +FROM relations; + distributed_hypertables_an +------------------------------------------------- + { + + "heap_size": 0, + + "toast_size": 0, + + "compression": { + + "compressed_heap_size": 0, + + "compressed_row_count": 0, + + "compressed_toast_size": 0, + + "num_compressed_chunks": 0, + + "uncompressed_heap_size": 0, + + "uncompressed_row_count": 0, + + "compressed_indexes_size": 0, + + "uncompressed_toast_size": 0, + + "uncompressed_indexes_size": 0, + + "num_compressed_hypertables": 0 + + }, + + "indexes_size": 0, + + "num_children": 0, + + "num_relations": 1, + + "num_reltuples": 0, + + "num_replica_chunks": 0, + + "num_replicated_distributed_hypertables": 0+ + } +(1 row) + +-- No datanode-related stats on the access node +SELECT + jsonb_pretty(rels -> 'distributed_hypertables_data_node') AS distributed_hypertables_dn +FROM relations; + distributed_hypertables_dn +----------------------------------------- + { + + "heap_size": 0, + + "toast_size": 0, + + "compression": { + + "compressed_heap_size": 0, + + "compressed_row_count": 0, + + "compressed_toast_size": 0, + + "num_compressed_chunks": 0, + + "uncompressed_heap_size": 0, + + "uncompressed_row_count": 0, + + "compressed_indexes_size": 0, + + "uncompressed_toast_size": 0, + + "uncompressed_indexes_size": 0,+ + "num_compressed_hypertables": 0+ + }, + + "indexes_size": 0, + + "num_children": 0, + + "num_relations": 0, + + "num_reltuples": 0 + + } +(1 row) + +-- Insert data into the distributed hypertable +INSERT INTO disthyper +SELECT * FROM normal; +-- Update telemetry stats and show output on access node and data +-- nodes. Note that the access node doesn't store data so shows +-- zero. It should have stats from ANALYZE, though, like +-- num_reltuples. +ANALYZE disthyper; +REFRESH MATERIALIZED VIEW telemetry_report; +SELECT + jsonb_pretty(rels -> 'distributed_hypertables_access_node') AS distributed_hypertables_an +FROM relations; + distributed_hypertables_an +------------------------------------------------- + { + + "heap_size": 0, + + "toast_size": 0, + + "compression": { + + "compressed_heap_size": 0, + + "compressed_row_count": 0, + + "compressed_toast_size": 0, + + "num_compressed_chunks": 0, + + "uncompressed_heap_size": 0, + + "uncompressed_row_count": 0, + + "compressed_indexes_size": 0, + + "uncompressed_toast_size": 0, + + "uncompressed_indexes_size": 0, + + "num_compressed_hypertables": 0 + + }, + + "indexes_size": 0, + + "num_children": 18, + + "num_relations": 1, + + "num_reltuples": 697, + + "num_replica_chunks": 0, + + "num_replicated_distributed_hypertables": 0+ + } +(1 row) + +-- Show data node stats +SELECT test.remote_exec(NULL, $$ + SELECT + jsonb_pretty(t -> 'relations' -> 'distributed_hypertables_data_node') AS distributed_hypertables_dn + FROM get_telemetry_report() t; +$$); +NOTICE: [data_node_1]: + SELECT + jsonb_pretty(t -> 'relations' -> 'distributed_hypertables_data_node') AS distributed_hypertables_dn + FROM get_telemetry_report() t +NOTICE: [data_node_1]: +distributed_hypertables_dn +-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- +{ + "heap_size": 73728, + "toast_size": 0, + "compression": { + "compressed_heap_size": 0, + "compressed_row_count": 0, + "compressed_toast_size": 0, + "num_compressed_chunks": 0, + "uncompressed_heap_size": 0, + "uncompressed_row_count": 0, + "compressed_indexes_size": 0, + "uncompressed_toast_size": 0, + "uncompressed_indexes_size": 0, + "num_compressed_hypertables": 0 + }, + "indexes_size": 311296, + "num_children": 9, + "num_relations": 1, + "num_reltuples": 368 +} +(1 row) + + +NOTICE: [data_node_2]: + SELECT + jsonb_pretty(t -> 'relations' -> 'distributed_hypertables_data_node') AS distributed_hypertables_dn + FROM get_telemetry_report() t +NOTICE: [data_node_2]: +distributed_hypertables_dn +-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- +{ + "heap_size": 73728, + "toast_size": 0, + "compression": { + "compressed_heap_size": 0, + "compressed_row_count": 0, + "compressed_toast_size": 0, + "num_compressed_chunks": 0, + "uncompressed_heap_size": 0, + "uncompressed_row_count": 0, + "compressed_indexes_size": 0, + "uncompressed_toast_size": 0, + "uncompressed_indexes_size": 0, + "num_compressed_hypertables": 0 + }, + "indexes_size": 311296, + "num_children": 9, + "num_relations": 1, + "num_reltuples": 329 +} +(1 row) + + + remote_exec +------------- + +(1 row) + +-- Add compression +ALTER TABLE disthyper SET (timescaledb.compress); +SELECT compress_chunk(c) +FROM show_chunks('disthyper') c ORDER BY c LIMIT 4; + compress_chunk +---------------------------------------------- + _timescaledb_internal._dist_hyper_6_19_chunk + _timescaledb_internal._dist_hyper_6_20_chunk + _timescaledb_internal._dist_hyper_6_21_chunk + _timescaledb_internal._dist_hyper_6_22_chunk +(4 rows) + +ANALYZE disthyper; +-- Update telemetry stats and show updated compression stats +REFRESH MATERIALIZED VIEW telemetry_report; +SELECT + jsonb_pretty(rels -> 'distributed_hypertables_access_node') AS distributed_hypertables_an +FROM relations; + distributed_hypertables_an +------------------------------------------------- + { + + "heap_size": 0, + + "toast_size": 0, + + "compression": { + + "compressed_heap_size": 0, + + "compressed_row_count": 0, + + "compressed_toast_size": 0, + + "num_compressed_chunks": 4, + + "uncompressed_heap_size": 0, + + "uncompressed_row_count": 0, + + "compressed_indexes_size": 0, + + "uncompressed_toast_size": 0, + + "uncompressed_indexes_size": 0, + + "num_compressed_hypertables": 1 + + }, + + "indexes_size": 0, + + "num_children": 18, + + "num_relations": 1, + + "num_reltuples": 697, + + "num_replica_chunks": 0, + + "num_replicated_distributed_hypertables": 0+ + } +(1 row) + +-- Show data node stats +SELECT test.remote_exec(NULL, $$ + SELECT + jsonb_pretty(t -> 'relations' -> 'distributed_hypertables_data_node') AS distributed_hypertables_dn + FROM get_telemetry_report() t; +$$); +NOTICE: [data_node_1]: + SELECT + jsonb_pretty(t -> 'relations' -> 'distributed_hypertables_data_node') AS distributed_hypertables_dn + FROM get_telemetry_report() t +NOTICE: [data_node_1]: +distributed_hypertables_dn +----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- +{ + "heap_size": 73728, + "toast_size": 16384, + "compression": { + "compressed_heap_size": 16384, + "compressed_row_count": 2, + "compressed_toast_size": 16384, + "num_compressed_chunks": 2, + "uncompressed_heap_size": 16384, + "uncompressed_row_count": 56, + "compressed_indexes_size": 0, + "uncompressed_toast_size": 0, + "uncompressed_indexes_size": 65536, + "num_compressed_hypertables": 1 + }, + "indexes_size": 278528, + "num_children": 9, + "num_relations": 1, + "num_reltuples": 368 +} +(1 row) + + +NOTICE: [data_node_2]: + SELECT + jsonb_pretty(t -> 'relations' -> 'distributed_hypertables_data_node') AS distributed_hypertables_dn + FROM get_telemetry_report() t +NOTICE: [data_node_2]: +distributed_hypertables_dn +----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- +{ + "heap_size": 73728, + "toast_size": 16384, + "compression": { + "compressed_heap_size": 16384, + "compressed_row_count": 2, + "compressed_toast_size": 16384, + "num_compressed_chunks": 2, + "uncompressed_heap_size": 16384, + "uncompressed_row_count": 60, + "compressed_indexes_size": 0, + "uncompressed_toast_size": 0, + "uncompressed_indexes_size": 65536, + "num_compressed_hypertables": 1 + }, + "indexes_size": 278528, + "num_children": 9, + "num_relations": 1, + "num_reltuples": 329 +} +(1 row) + + + remote_exec +------------- + +(1 row) + +-- Create a replicated distributed hypertable and show replication stats +CREATE TABLE disthyper_repl (LIKE normal); +SELECT create_distributed_hypertable('disthyper_repl', 'time', 'device', replication_factor => 2); + create_distributed_hypertable +------------------------------- + (7,public,disthyper_repl,t) +(1 row) + +INSERT INTO disthyper_repl +SELECT * FROM normal; +REFRESH MATERIALIZED VIEW telemetry_report; +SELECT + jsonb_pretty(rels -> 'distributed_hypertables_access_node') AS distributed_hypertables_an +FROM relations; + distributed_hypertables_an +------------------------------------------------- + { + + "heap_size": 0, + + "toast_size": 0, + + "compression": { + + "compressed_heap_size": 0, + + "compressed_row_count": 0, + + "compressed_toast_size": 0, + + "num_compressed_chunks": 4, + + "uncompressed_heap_size": 0, + + "uncompressed_row_count": 0, + + "compressed_indexes_size": 0, + + "uncompressed_toast_size": 0, + + "uncompressed_indexes_size": 0, + + "num_compressed_hypertables": 1 + + }, + + "indexes_size": 0, + + "num_children": 36, + + "num_relations": 2, + + "num_reltuples": 697, + + "num_replica_chunks": 18, + + "num_replicated_distributed_hypertables": 1+ + } +(1 row) + +-- Create a continuous aggregate on the distributed hypertable +CREATE MATERIALIZED VIEW distcontagg +WITH (timescaledb.continuous) AS +SELECT + time_bucket('1 hour', time) AS hour, + device, + min(time) +FROM + disthyper +GROUP BY hour, device; +NOTICE: refreshing continuous aggregate "distcontagg" +CREATE MATERIALIZED VIEW distcontagg_old +WITH (timescaledb.continuous, timescaledb.finalized=false) AS +SELECT + time_bucket('1 hour', time) AS hour, + device, + min(time) +FROM + disthyper +GROUP BY hour, device; +NOTICE: refreshing continuous aggregate "distcontagg_old" +REFRESH MATERIALIZED VIEW telemetry_report; +SELECT + jsonb_pretty(rels -> 'continuous_aggregates') AS continuous_aggregates +FROM relations; + continuous_aggregates +------------------------------------------------ + { + + "heap_size": 368640, + + "toast_size": 40960, + + "compression": { + + "compressed_heap_size": 40960, + + "compressed_row_count": 10, + + "num_compressed_caggs": 1, + + "compressed_toast_size": 8192, + + "num_compressed_chunks": 1, + + "uncompressed_heap_size": 49152, + + "uncompressed_row_count": 452, + + "compressed_indexes_size": 16384, + + "uncompressed_toast_size": 0, + + "uncompressed_indexes_size": 81920 + + }, + + "indexes_size": 409600, + + "num_children": 8, + + "num_relations": 4, + + "num_reltuples": 452, + + "num_caggs_nested": 0, + + "num_caggs_finalized": 2, + + "num_caggs_on_distributed_hypertables": 2,+ + "num_caggs_using_real_time_aggregation": 3+ + } +(1 row) + +-- check telemetry for fixed schedule jobs works +create or replace procedure job_test_fixed(jobid int, config jsonb) language plpgsql as $$ +begin +raise log 'this is job_test_fixed'; +end +$$; +create or replace procedure job_test_drifting(jobid int, config jsonb) language plpgsql as $$ +begin +raise log 'this is job_test_drifting'; +end +$$; +-- before adding the jobs +select get_telemetry_report()->'num_user_defined_actions_fixed'; + ?column? +---------- + 0 +(1 row) + +select get_telemetry_report()->'num_user_defined_actions'; + ?column? +---------- + 0 +(1 row) + +select add_job('job_test_fixed', '1 week'); + add_job +--------- + 1000 +(1 row) + +select add_job('job_test_drifting', '1 week', fixed_schedule => false); + add_job +--------- + 1001 +(1 row) + +-- add continuous aggregate refresh policy for contagg +select add_continuous_aggregate_policy('contagg', interval '3 weeks', NULL, interval '3 weeks'); -- drifting + add_continuous_aggregate_policy +--------------------------------- + 1002 +(1 row) + +select add_continuous_aggregate_policy('contagg_old', interval '3 weeks', NULL, interval '3 weeks', initial_start => now()); -- fixed + add_continuous_aggregate_policy +--------------------------------- + 1003 +(1 row) + +-- add retention policy, fixed +select add_retention_policy('hyper', interval '1 year', initial_start => now()); + add_retention_policy +---------------------- + 1004 +(1 row) + +-- add compression policy +select add_compression_policy('hyper', interval '3 weeks', initial_start => now()); + add_compression_policy +------------------------ + 1005 +(1 row) + +select r->'num_user_defined_actions_fixed' as UDA_fixed, r->'num_user_defined_actions' AS UDA_drifting FROM get_telemetry_report() r; + uda_fixed | uda_drifting +-----------+-------------- + 1 | 1 +(1 row) + +select r->'num_continuous_aggs_policies_fixed' as contagg_fixed, r->'num_continuous_aggs_policies' as contagg_drifting FROM get_telemetry_report() r; + contagg_fixed | contagg_drifting +---------------+------------------ + 1 | 1 +(1 row) + +select r->'num_compression_policies_fixed' as compress_fixed, r->'num_retention_policies_fixed' as retention_fixed FROM get_telemetry_report() r; + compress_fixed | retention_fixed +----------------+----------------- + 1 | 1 +(1 row) + +DELETE FROM _timescaledb_config.bgw_job WHERE id = 2; +TRUNCATE _timescaledb_internal.job_errors; +-- create some "errors" for testing +INSERT INTO +_timescaledb_config.bgw_job(id, application_name, schedule_interval, max_runtime, max_retries, retry_period, proc_schema, proc_name) +VALUES (2000, 'User-Defined Action [2000]', interval '3 days', interval '1 hour', 5, interval '5 min', 'public', 'custom_action_1'), +(2001, 'User-Defined Action [2001]', interval '3 days', interval '1 hour', 5, interval '5 min', 'public', 'custom_action_2'), +(2002, 'Compression Policy [2002]', interval '3 days', interval '1 hour', 5, interval '5 min', '_timescaledb_internal', 'policy_compression'), +(2003, 'Retention Policy [2003]', interval '3 days', interval '1 hour', 5, interval '5 min', '_timescaledb_internal', 'policy_retention'), +(2004, 'Refresh Continuous Aggregate Policy [2004]', interval '3 days', interval '1 hour', 5, interval '5 min', '_timescaledb_internal', 'policy_refresh_continuous_aggregate'), +-- user decided to define a custom action in the _timescaledb_internal schema, we group it with the User-defined actions +(2005, 'User-Defined Action [2005]', interval '3 days', interval '1 hour', 5, interval '5 min', '_timescaledb_internal', 'policy_refresh_continuous_aggregate'); +-- create some errors for them +INSERT INTO +_timescaledb_internal.job_errors(job_id, pid, start_time, finish_time, error_data) +values (2000, 12345, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '{"sqlerrcode":"P0001", "proc_schema":"public", "proc_name": "custom_action_1"}'), +(2000, 23456, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '{"sqlerrcode":"ABCDE", "proc_schema": "public", "proc_name": "custom_action_1"}'), +(2001, 54321, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '{"sqlerrcode":"P0001", "proc_schema":"public", "proc_name": "custom_action_2"}'), +(2002, 23443, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '{"sqlerrcode":"JF009", "proc_schema":"_timescaledb_internal", "proc_name": "policy_compression"}'), +(2003, 14567, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '{"sqlerrcode":"P0001", "proc_schema":"_timescaledb_internal", "proc_name": "policy_retention"}'), +(2004, 78907, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '{"sqlerrcode":"P0001", "proc_schema":"_timescaledb_internal", "proc_name": "policy_refresh_continuous_aggregate"}'), +(2005, 45757, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '{"sqlerrcode":"P0001", "proc_schema":"_timescaledb_internal", "proc_name": "policy_refresh_continuous_aggregate"}'); +-- we have 3 error records for user-defined actions, and three for policies, so we expect 4 types of jobs +SELECT jsonb_pretty(get_telemetry_report() -> 'errors_by_sqlerrcode'); + jsonb_pretty +---------------------------------------------- + { + + "policy_retention": { + + "P0001": 1 + + }, + + "policy_compression": { + + "JF009": 1 + + }, + + "user_defined_action": { + + "ABCDE": 1, + + "P0001": 2 + + }, + + "policy_refresh_continuous_aggregate": {+ + "P0001": 2 + + } + + } +(1 row) + +-- for job statistics, insert some records into bgw_job_stats +INSERT INTO _timescaledb_internal.bgw_job_stat +values +(2000, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '-infinity'::timestamptz, '-infinity'::timestamptz, +false, 1, interval '00:00:00', interval '00:00:02', 0, 1, 0, 1, 0), +(2001, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '-infinity'::timestamptz, '-infinity'::timestamptz, +false, 1, interval '00:00:00', interval '00:00:02', 0, 1, 0, 1, 0), +(2002, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '-infinity'::timestamptz, '-infinity'::timestamptz, +false, 1, interval '00:00:00', interval '00:00:02', 0, 1, 0, 1, 0), +(2003, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '-infinity'::timestamptz, '-infinity'::timestamptz, +false, 1, interval '00:00:00', interval '00:00:02', 0, 1, 0, 1, 0), +(2004, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '-infinity'::timestamptz, '-infinity'::timestamptz, +false, 1, interval '00:00:00', interval '00:00:02', 0, 1, 0, 1, 0), +(2005, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '-infinity'::timestamptz, '-infinity'::timestamptz, +false, 1, interval '00:00:00', interval '00:00:02', 0, 1, 0, 1, 0); +SELECT jsonb_pretty(get_telemetry_report() -> 'stats_by_job_type'); + jsonb_pretty +------------------------------------------------ + { + + "policy_retention": { + + "total_runs": 1, + + "total_crashes": 0, + + "total_duration": "@ 0", + + "total_failures": 1, + + "total_successes": 0, + + "max_consecutive_crashes": 0, + + "total_duration_failures": "@ 2 secs",+ + "max_consecutive_failures": 1 + + }, + + "policy_compression": { + + "total_runs": 1, + + "total_crashes": 0, + + "total_duration": "@ 0", + + "total_failures": 1, + + "total_successes": 0, + + "max_consecutive_crashes": 0, + + "total_duration_failures": "@ 2 secs",+ + "max_consecutive_failures": 1 + + }, + + "user_defined_action": { + + "total_runs": 2, + + "total_crashes": 0, + + "total_duration": "@ 0", + + "total_failures": 2, + + "total_successes": 0, + + "max_consecutive_crashes": 0, + + "total_duration_failures": "@ 4 secs",+ + "max_consecutive_failures": 1 + + }, + + "policy_refresh_continuous_aggregate": { + + "total_runs": 2, + + "total_crashes": 0, + + "total_duration": "@ 0", + + "total_failures": 2, + + "total_successes": 0, + + "max_consecutive_crashes": 0, + + "total_duration_failures": "@ 4 secs",+ + "max_consecutive_failures": 1 + + } + + } +(1 row) + +-- create nested continuous aggregates - copied from cagg_on_cagg_common +CREATE TABLE conditions ( + time timestamptz NOT NULL, + temperature int +); +SELECT create_hypertable('conditions', 'time'); + create_hypertable +-------------------------- + (10,public,conditions,t) +(1 row) + +CREATE MATERIALIZED VIEW conditions_summary_hourly_1 +WITH (timescaledb.continuous, timescaledb.materialized_only=true) AS +SELECT + time_bucket('1 hour', "time") AS bucket, + SUM(temperature) AS temperature +FROM conditions +GROUP BY 1 +WITH NO DATA; +CREATE MATERIALIZED VIEW conditions_summary_daily_2 +WITH (timescaledb.continuous, timescaledb.materialized_only=true) AS +SELECT + time_bucket('1 day', "bucket") AS bucket, + SUM(temperature) AS temperature +FROM conditions_summary_hourly_1 +GROUP BY 1 +WITH NO DATA; +CREATE MATERIALIZED VIEW conditions_summary_weekly_3 +WITH (timescaledb.continuous, timescaledb.materialized_only=true) AS +SELECT + time_bucket('1 week', "bucket") AS bucket, + SUM(temperature) AS temperature +FROM conditions_summary_daily_2 +GROUP BY 1 +WITH NO DATA; +SELECT jsonb_pretty(get_telemetry_report() -> 'relations' -> 'continuous_aggregates' -> 'num_caggs_nested'); + jsonb_pretty +-------------- + 2 +(1 row) + +DROP VIEW relations; +DROP MATERIALIZED VIEW telemetry_report; +\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER +DROP DATABASE :DN_DBNAME_1; +DROP DATABASE :DN_DBNAME_2; diff --git a/tsl/test/sql/.gitignore b/tsl/test/sql/.gitignore index 694cf628e..c93e599e9 100644 --- a/tsl/test/sql/.gitignore +++ b/tsl/test/sql/.gitignore @@ -17,5 +17,6 @@ /plan_skip_scan-*.sql /remote-copy-*sv /remote_copy-*.sql +/telemetry_stats-*.sql /transparent_decompression-*.sql /transparent_decompression_ordered_index-*.sql diff --git a/tsl/test/sql/CMakeLists.txt b/tsl/test/sql/CMakeLists.txt index 9901683ac..1e5ee521c 100644 --- a/tsl/test/sql/CMakeLists.txt +++ b/tsl/test/sql/CMakeLists.txt @@ -95,9 +95,6 @@ if(CMAKE_BUILD_TYPE MATCHES Debug) transparent_decompression_queries.sql tsl_tables.sql license_tsl.sql) - if(USE_TELEMETRY) - list(APPEND TEST_FILES telemetry_stats.sql) - endif() endif(CMAKE_BUILD_TYPE MATCHES Debug) if((${PG_VERSION_MAJOR} GREATER_EQUAL "14")) @@ -153,6 +150,9 @@ if(CMAKE_BUILD_TYPE MATCHES Debug) dist_query.sql.in cagg_invalidation_dist_ht.sql.in continuous_aggs.sql.in) + if(USE_TELEMETRY) + list(APPEND TEST_TEMPLATES telemetry_stats.sql.in) + endif() endif(CMAKE_BUILD_TYPE MATCHES Debug) # Check if PostgreSQL was compiled with JIT support diff --git a/tsl/test/sql/telemetry_stats.sql b/tsl/test/sql/telemetry_stats.sql.in similarity index 100% rename from tsl/test/sql/telemetry_stats.sql rename to tsl/test/sql/telemetry_stats.sql.in