mirror of
https://github.com/timescale/timescaledb.git
synced 2025-05-21 13:14:19 +08:00
Reduce WAL activity by freezing tuples immediately
When we compress a chunk, we create a new compressed chunk for storing the compressed data. So far, the tuples were just inserted into the compressed chunk and frozen by a later vacuum run. However, freezing tuples causes WAL activity can be optimized because the compressed chunk is created in the same transaction as the tuples. This patch reduces the WAL activity by storing these tuples directly as frozen and preventing a freeze operation in the future. This approach is similar to PostgreSQL's COPY FREEZE.
This commit is contained in:
parent
dc91938885
commit
8767de658b
.unreleased
sql
src
tsl
src
test
expected
bgw_custom-13.outbgw_custom-14.outbgw_custom-15.outbgw_custom-16.outcompression.outcompression_bgw-13.outcompression_bgw-14.outcompression_bgw-15.outcompression_bgw-16.outtelemetry_stats-13.outtelemetry_stats-14.outtelemetry_stats-15.outtelemetry_stats-16.outtelemetry_stats.out
isolation
sql
src
1
.unreleased/feature_5890
Normal file
1
.unreleased/feature_5890
Normal file
@ -0,0 +1 @@
|
||||
Implements: #5890 Reduce WAL activity by freezing compressed tuples immediately
|
@ -495,6 +495,7 @@ CREATE TABLE _timescaledb_catalog.compression_chunk_size (
|
||||
compressed_index_size bigint NOT NULL,
|
||||
numrows_pre_compression bigint,
|
||||
numrows_post_compression bigint,
|
||||
numrows_frozen_immediately bigint,
|
||||
-- table constraints
|
||||
CONSTRAINT compression_chunk_size_pkey PRIMARY KEY (chunk_id),
|
||||
CONSTRAINT compression_chunk_size_chunk_id_fkey FOREIGN KEY (chunk_id) REFERENCES _timescaledb_catalog.chunk (id) ON DELETE CASCADE,
|
||||
|
@ -179,3 +179,53 @@ DROP TABLE _timescaledb_internal.tmp_chunk_seq_value;
|
||||
GRANT SELECT ON _timescaledb_catalog.chunk_id_seq TO PUBLIC;
|
||||
GRANT SELECT ON _timescaledb_catalog.chunk TO PUBLIC;
|
||||
-- end recreate _timescaledb_catalog.chunk table --
|
||||
|
||||
--
|
||||
-- Rebuild the catalog table `_timescaledb_catalog.compression_chunk_size` to
|
||||
-- add new column `numrows_frozen_immediately`
|
||||
--
|
||||
CREATE TABLE _timescaledb_internal.compression_chunk_size_tmp
|
||||
AS SELECT * from _timescaledb_catalog.compression_chunk_size;
|
||||
|
||||
-- Drop depended views
|
||||
-- We assume that '_timescaledb_internal.compressed_chunk_stats' was already dropped in this update
|
||||
-- (see above)
|
||||
|
||||
-- Drop table
|
||||
ALTER EXTENSION timescaledb DROP TABLE _timescaledb_catalog.compression_chunk_size;
|
||||
DROP TABLE _timescaledb_catalog.compression_chunk_size;
|
||||
|
||||
CREATE TABLE _timescaledb_catalog.compression_chunk_size (
|
||||
chunk_id integer NOT NULL,
|
||||
compressed_chunk_id integer NOT NULL,
|
||||
uncompressed_heap_size bigint NOT NULL,
|
||||
uncompressed_toast_size bigint NOT NULL,
|
||||
uncompressed_index_size bigint NOT NULL,
|
||||
compressed_heap_size bigint NOT NULL,
|
||||
compressed_toast_size bigint NOT NULL,
|
||||
compressed_index_size bigint NOT NULL,
|
||||
numrows_pre_compression bigint,
|
||||
numrows_post_compression bigint,
|
||||
numrows_frozen_immediately bigint,
|
||||
-- table constraints
|
||||
CONSTRAINT compression_chunk_size_pkey PRIMARY KEY (chunk_id),
|
||||
CONSTRAINT compression_chunk_size_chunk_id_fkey FOREIGN KEY (chunk_id) REFERENCES _timescaledb_catalog.chunk (id) ON DELETE CASCADE,
|
||||
CONSTRAINT compression_chunk_size_compressed_chunk_id_fkey FOREIGN KEY (compressed_chunk_id) REFERENCES _timescaledb_catalog.chunk (id) ON DELETE CASCADE
|
||||
);
|
||||
|
||||
INSERT INTO _timescaledb_catalog.compression_chunk_size
|
||||
(chunk_id, compressed_chunk_id, uncompressed_heap_size, uncompressed_toast_size,
|
||||
uncompressed_index_size, compressed_heap_size, compressed_toast_size,
|
||||
compressed_index_size, numrows_pre_compression, numrows_post_compression, numrows_frozen_immediately)
|
||||
SELECT chunk_id, compressed_chunk_id, uncompressed_heap_size, uncompressed_toast_size,
|
||||
uncompressed_index_size, compressed_heap_size, compressed_toast_size,
|
||||
compressed_index_size, numrows_pre_compression, numrows_post_compression, 0
|
||||
FROM _timescaledb_internal.compression_chunk_size_tmp;
|
||||
|
||||
DROP TABLE _timescaledb_internal.compression_chunk_size_tmp;
|
||||
|
||||
SELECT pg_catalog.pg_extension_config_dump('_timescaledb_catalog.compression_chunk_size', '');
|
||||
|
||||
GRANT SELECT ON _timescaledb_catalog.compression_chunk_size TO PUBLIC;
|
||||
|
||||
-- End modify `_timescaledb_catalog.compression_chunk_size`
|
||||
|
@ -124,3 +124,53 @@ GRANT SELECT ON _timescaledb_catalog.chunk_id_seq TO PUBLIC;
|
||||
GRANT SELECT ON _timescaledb_catalog.chunk TO PUBLIC;
|
||||
|
||||
-- end recreate _timescaledb_catalog.chunk table --
|
||||
|
||||
|
||||
--
|
||||
-- Rebuild the catalog table `_timescaledb_catalog.compression_chunk_size` to
|
||||
-- remove column `numrows_frozen_immediately`
|
||||
--
|
||||
CREATE TABLE _timescaledb_internal.compression_chunk_size_tmp
|
||||
AS SELECT * from _timescaledb_catalog.compression_chunk_size;
|
||||
|
||||
-- Drop depended views
|
||||
-- We assume that '_timescaledb_internal.compressed_chunk_stats' was already dropped in this update
|
||||
-- (see above)
|
||||
|
||||
-- Drop table
|
||||
ALTER EXTENSION timescaledb DROP TABLE _timescaledb_catalog.compression_chunk_size;
|
||||
DROP TABLE _timescaledb_catalog.compression_chunk_size;
|
||||
|
||||
CREATE TABLE _timescaledb_catalog.compression_chunk_size (
|
||||
chunk_id integer NOT NULL,
|
||||
compressed_chunk_id integer NOT NULL,
|
||||
uncompressed_heap_size bigint NOT NULL,
|
||||
uncompressed_toast_size bigint NOT NULL,
|
||||
uncompressed_index_size bigint NOT NULL,
|
||||
compressed_heap_size bigint NOT NULL,
|
||||
compressed_toast_size bigint NOT NULL,
|
||||
compressed_index_size bigint NOT NULL,
|
||||
numrows_pre_compression bigint,
|
||||
numrows_post_compression bigint,
|
||||
-- table constraints
|
||||
CONSTRAINT compression_chunk_size_pkey PRIMARY KEY (chunk_id),
|
||||
CONSTRAINT compression_chunk_size_chunk_id_fkey FOREIGN KEY (chunk_id) REFERENCES _timescaledb_catalog.chunk (id) ON DELETE CASCADE,
|
||||
CONSTRAINT compression_chunk_size_compressed_chunk_id_fkey FOREIGN KEY (compressed_chunk_id) REFERENCES _timescaledb_catalog.chunk (id) ON DELETE CASCADE
|
||||
);
|
||||
|
||||
INSERT INTO _timescaledb_catalog.compression_chunk_size
|
||||
(chunk_id, compressed_chunk_id, uncompressed_heap_size, uncompressed_toast_size,
|
||||
uncompressed_index_size, compressed_heap_size, compressed_toast_size,
|
||||
compressed_index_size, numrows_pre_compression, numrows_post_compression)
|
||||
SELECT chunk_id, compressed_chunk_id, uncompressed_heap_size, uncompressed_toast_size,
|
||||
uncompressed_index_size, compressed_heap_size, compressed_toast_size,
|
||||
compressed_index_size, numrows_pre_compression, numrows_post_compression
|
||||
FROM _timescaledb_internal.compression_chunk_size_tmp;
|
||||
|
||||
DROP TABLE _timescaledb_internal.compression_chunk_size_tmp;
|
||||
|
||||
SELECT pg_catalog.pg_extension_config_dump('_timescaledb_catalog.compression_chunk_size', '');
|
||||
|
||||
GRANT SELECT ON _timescaledb_catalog.compression_chunk_size TO PUBLIC;
|
||||
|
||||
-- End modify `_timescaledb_catalog.compression_chunk_size`
|
||||
|
@ -335,6 +335,7 @@ add_chunk_stats(HyperStats *stats, Form_pg_class class, const Chunk *chunk,
|
||||
stats->uncompressed_toast_size += fd_compr->uncompressed_toast_size;
|
||||
stats->uncompressed_row_count += fd_compr->numrows_pre_compression;
|
||||
stats->compressed_row_count += fd_compr->numrows_post_compression;
|
||||
stats->compressed_row_frozen_immediately_count += fd_compr->numrows_frozen_immediately;
|
||||
|
||||
/* Also add compressed sizes to total number for entire table */
|
||||
stats->storage.relsize.heap_size += fd_compr->compressed_heap_size;
|
||||
|
@ -64,6 +64,7 @@ typedef struct HyperStats
|
||||
int64 compressed_indexes_size;
|
||||
int64 compressed_toast_size;
|
||||
int64 compressed_row_count;
|
||||
int64 compressed_row_frozen_immediately_count;
|
||||
int64 uncompressed_heap_size;
|
||||
int64 uncompressed_indexes_size;
|
||||
int64 uncompressed_toast_size;
|
||||
|
@ -604,6 +604,7 @@ format_iso8601(Datum value)
|
||||
#define REQ_RELKIND_COMPRESSED_TOAST_SIZE "compressed_toast_size"
|
||||
#define REQ_RELKIND_COMPRESSED_INDEXES_SIZE "compressed_indexes_size"
|
||||
#define REQ_RELKIND_COMPRESSED_ROWCOUNT "compressed_row_count"
|
||||
#define REQ_RELKIND_COMPRESSED_ROWCOUNT_FROZEN_IMMEDIATELY "compressed_row_count_frozen_immediately"
|
||||
|
||||
#define REQ_RELKIND_CAGG_ON_DISTRIBUTED_HYPERTABLE_COUNT "num_caggs_on_distributed_hypertables"
|
||||
#define REQ_RELKIND_CAGG_USES_REAL_TIME_AGGREGATION_COUNT "num_caggs_using_real_time_aggregation"
|
||||
@ -639,6 +640,9 @@ add_compression_stats_object(JsonbParseState *parse_state, StatsRelType reltype,
|
||||
ts_jsonb_add_int64(parse_state,
|
||||
REQ_RELKIND_COMPRESSED_INDEXES_SIZE,
|
||||
hs->compressed_indexes_size);
|
||||
ts_jsonb_add_int64(parse_state,
|
||||
REQ_RELKIND_COMPRESSED_ROWCOUNT_FROZEN_IMMEDIATELY,
|
||||
hs->compressed_row_frozen_immediately_count);
|
||||
ts_jsonb_add_int64(parse_state, REQ_RELKIND_UNCOMPRESSED_ROWCOUNT, hs->uncompressed_row_count);
|
||||
ts_jsonb_add_int64(parse_state, REQ_RELKIND_UNCOMPRESSED_HEAP_SIZE, hs->uncompressed_heap_size);
|
||||
ts_jsonb_add_int64(parse_state,
|
||||
|
@ -1289,6 +1289,7 @@ typedef enum Anum_compression_chunk_size
|
||||
Anum_compression_chunk_size_compressed_index_size,
|
||||
Anum_compression_chunk_size_numrows_pre_compression,
|
||||
Anum_compression_chunk_size_numrows_post_compression,
|
||||
Anum_compression_chunk_size_numrows_frozen_immediately,
|
||||
_Anum_compression_chunk_size_max,
|
||||
} Anum_compression_chunk_size;
|
||||
|
||||
@ -1306,6 +1307,7 @@ typedef struct FormData_compression_chunk_size
|
||||
int64 compressed_index_size;
|
||||
int64 numrows_pre_compression;
|
||||
int64 numrows_post_compression;
|
||||
int64 numrows_frozen_immediately;
|
||||
} FormData_compression_chunk_size;
|
||||
|
||||
typedef FormData_compression_chunk_size *Form_compression_chunk_size;
|
||||
|
@ -549,6 +549,7 @@ chunk_copy_get_source_compressed_chunk_stats(ChunkCopy *cc)
|
||||
cc->fd_ccs.compressed_index_size = atoll(PQgetvalue(res, 0, 5));
|
||||
cc->fd_ccs.numrows_pre_compression = atoll(PQgetvalue(res, 0, 6));
|
||||
cc->fd_ccs.numrows_post_compression = atoll(PQgetvalue(res, 0, 7));
|
||||
cc->fd_ccs.numrows_frozen_immediately = 0;
|
||||
|
||||
ts_dist_cmd_close_response(dist_res);
|
||||
}
|
||||
|
@ -58,7 +58,8 @@ typedef struct CompressChunkCxt
|
||||
static void
|
||||
compression_chunk_size_catalog_insert(int32 src_chunk_id, const RelationSize *src_size,
|
||||
int32 compress_chunk_id, const RelationSize *compress_size,
|
||||
int64 rowcnt_pre_compression, int64 rowcnt_post_compression)
|
||||
int64 rowcnt_pre_compression, int64 rowcnt_post_compression,
|
||||
int64 rowcnt_frozen)
|
||||
{
|
||||
Catalog *catalog = ts_catalog_get();
|
||||
Relation rel;
|
||||
@ -93,6 +94,8 @@ compression_chunk_size_catalog_insert(int32 src_chunk_id, const RelationSize *sr
|
||||
Int64GetDatum(rowcnt_pre_compression);
|
||||
values[AttrNumberGetAttrOffset(Anum_compression_chunk_size_numrows_post_compression)] =
|
||||
Int64GetDatum(rowcnt_post_compression);
|
||||
values[AttrNumberGetAttrOffset(Anum_compression_chunk_size_numrows_frozen_immediately)] =
|
||||
Int64GetDatum(rowcnt_frozen);
|
||||
|
||||
ts_catalog_database_info_become_owner(ts_catalog_database_info_get(), &sec_ctx);
|
||||
ts_catalog_insert_values(rel, desc, values, nulls);
|
||||
@ -487,6 +490,27 @@ compress_chunk_impl(Oid hypertable_relid, Oid chunk_relid)
|
||||
compress_ht_chunk = ts_chunk_get_by_id(mergable_chunk->fd.compressed_chunk_id, true);
|
||||
result_chunk_id = mergable_chunk->table_id;
|
||||
}
|
||||
|
||||
/* Since the compressed relation is created in the same transaction as the tuples that will be
|
||||
* written by the compressor, we can insert the tuple directly in frozen state. This is the same
|
||||
* logic as performed in COPY INSERT FROZEN.
|
||||
*
|
||||
* Note: Tuples inserted with HEAP_INSERT_FROZEN become immediately visible to all transactions
|
||||
* (they violate the MVCC pattern). So, this flag can only be used when creating the compressed
|
||||
* chunk in the same transaction as the compressed tuples are inserted.
|
||||
*
|
||||
* If this isn't the case, then tuples can be seen multiple times by parallel readers - once in
|
||||
* the uncompressed part of the hypertable (since they are not deleted in the transaction) and
|
||||
* once in the compressed part of the hypertable since the MVCC semantic is violated due to the
|
||||
* flag.
|
||||
*
|
||||
* In contrast, when the compressed chunk part is created in the same transaction as the tuples
|
||||
* are written, the compressed chunk (i.e., the catalog entry) becomes visible to other
|
||||
* transactions only after the transaction that performs the compression is commited and
|
||||
* the uncompressed chunk is truncated.
|
||||
*/
|
||||
int insert_options = new_compressed_chunk ? HEAP_INSERT_FROZEN : 0;
|
||||
|
||||
/* convert list to array of pointers for compress_chunk */
|
||||
colinfo_array = palloc(sizeof(ColumnCompressionInfo *) * htcols_listlen);
|
||||
foreach (lc, htcols_list)
|
||||
@ -498,7 +522,8 @@ compress_chunk_impl(Oid hypertable_relid, Oid chunk_relid)
|
||||
cstat = compress_chunk(cxt.srcht_chunk->table_id,
|
||||
compress_ht_chunk->table_id,
|
||||
colinfo_array,
|
||||
htcols_listlen);
|
||||
htcols_listlen,
|
||||
insert_options);
|
||||
|
||||
/* Drop all FK constraints on the uncompressed chunk. This is needed to allow
|
||||
* cascading deleted data in FK-referenced tables, while blocking deleting data
|
||||
@ -514,7 +539,8 @@ compress_chunk_impl(Oid hypertable_relid, Oid chunk_relid)
|
||||
compress_ht_chunk->fd.id,
|
||||
&after_size,
|
||||
cstat.rowcnt_pre_compression,
|
||||
cstat.rowcnt_post_compression);
|
||||
cstat.rowcnt_post_compression,
|
||||
cstat.rowcnt_frozen);
|
||||
|
||||
/* Copy chunk constraints (including fkey) to compressed chunk.
|
||||
* Do this after compressing the chunk to avoid holding strong, unnecessary locks on the
|
||||
@ -811,7 +837,8 @@ tsl_create_compressed_chunk(PG_FUNCTION_ARGS)
|
||||
compress_ht_chunk->fd.id,
|
||||
&compressed_size,
|
||||
numrows_pre_compression,
|
||||
numrows_post_compression);
|
||||
numrows_post_compression,
|
||||
0);
|
||||
|
||||
chunk_was_compressed = ts_chunk_is_compressed(cxt.srcht_chunk);
|
||||
ts_chunk_set_compressed_chunk(cxt.srcht_chunk, compress_ht_chunk->fd.id);
|
||||
@ -1071,7 +1098,8 @@ tsl_get_compressed_chunk_index_for_recompression(PG_FUNCTION_ARGS)
|
||||
in_column_offsets,
|
||||
compressed_rel_tupdesc->natts,
|
||||
true /*need_bistate*/,
|
||||
true /*reset_sequence*/);
|
||||
true /*reset_sequence*/,
|
||||
0 /*insert options*/);
|
||||
|
||||
/*
|
||||
* Keep the ExclusiveLock on the compressed chunk. This lock will be requested
|
||||
@ -1372,7 +1400,8 @@ tsl_recompress_chunk_segmentwise(PG_FUNCTION_ARGS)
|
||||
in_column_offsets,
|
||||
compressed_rel_tupdesc->natts,
|
||||
true /*need_bistate*/,
|
||||
true /*reset_sequence*/);
|
||||
true /*reset_sequence*/,
|
||||
0 /*insert options*/);
|
||||
|
||||
/* create an array of the segmentby column offsets in the compressed chunk */
|
||||
int16 *segmentby_column_offsets_compressed =
|
||||
|
@ -38,6 +38,7 @@
|
||||
#include <utils/fmgroids.h>
|
||||
#include <utils/lsyscache.h>
|
||||
#include <utils/memutils.h>
|
||||
#include <utils/portal.h>
|
||||
#include <utils/rel.h>
|
||||
#include <utils/relcache.h>
|
||||
#include <utils/snapmgr.h>
|
||||
@ -53,6 +54,7 @@
|
||||
#include "create.h"
|
||||
#include "custom_type_cache.h"
|
||||
#include "arrow_c_data_interface.h"
|
||||
#include "debug_assert.h"
|
||||
#include "debug_point.h"
|
||||
#include "deltadelta.h"
|
||||
#include "dictionary.h"
|
||||
@ -223,7 +225,7 @@ truncate_relation(Oid table_oid)
|
||||
|
||||
CompressionStats
|
||||
compress_chunk(Oid in_table, Oid out_table, const ColumnCompressionInfo **column_compression_info,
|
||||
int num_compression_infos)
|
||||
int num_compression_infos, int insert_options)
|
||||
{
|
||||
int n_keys;
|
||||
ListCell *lc;
|
||||
@ -399,7 +401,8 @@ compress_chunk(Oid in_table, Oid out_table, const ColumnCompressionInfo **column
|
||||
in_column_offsets,
|
||||
out_desc->natts,
|
||||
true /*need_bistate*/,
|
||||
false /*reset_sequence*/);
|
||||
false /*reset_sequence*/,
|
||||
insert_options);
|
||||
|
||||
if (matched_index_rel != NULL)
|
||||
{
|
||||
@ -441,12 +444,19 @@ compress_chunk(Oid in_table, Oid out_table, const ColumnCompressionInfo **column
|
||||
}
|
||||
|
||||
row_compressor_finish(&row_compressor);
|
||||
DEBUG_WAITPOINT("compression_done_before_truncate_uncompressed");
|
||||
truncate_relation(in_table);
|
||||
|
||||
table_close(out_rel, NoLock);
|
||||
table_close(in_rel, NoLock);
|
||||
cstat.rowcnt_pre_compression = row_compressor.rowcnt_pre_compression;
|
||||
cstat.rowcnt_post_compression = row_compressor.num_compressed_rows;
|
||||
|
||||
if ((insert_options & HEAP_INSERT_FROZEN) == HEAP_INSERT_FROZEN)
|
||||
cstat.rowcnt_frozen = row_compressor.num_compressed_rows;
|
||||
else
|
||||
cstat.rowcnt_frozen = 0;
|
||||
|
||||
return cstat;
|
||||
}
|
||||
|
||||
@ -836,7 +846,8 @@ void
|
||||
row_compressor_init(RowCompressor *row_compressor, TupleDesc uncompressed_tuple_desc,
|
||||
Relation compressed_table, int num_compression_infos,
|
||||
const ColumnCompressionInfo **column_compression_info, int16 *in_column_offsets,
|
||||
int16 num_columns_in_compressed_table, bool need_bistate, bool reset_sequence)
|
||||
int16 num_columns_in_compressed_table, bool need_bistate, bool reset_sequence,
|
||||
int insert_options)
|
||||
{
|
||||
TupleDesc out_desc = RelationGetDescr(compressed_table);
|
||||
int col;
|
||||
@ -883,6 +894,7 @@ row_compressor_init(RowCompressor *row_compressor, TupleDesc uncompressed_tuple_
|
||||
.sequence_num = SEQUENCE_NUM_GAP,
|
||||
.reset_sequence = reset_sequence,
|
||||
.first_iteration = true,
|
||||
.insert_options = insert_options,
|
||||
};
|
||||
|
||||
memset(row_compressor->compressed_is_null, 1, sizeof(bool) * num_columns_in_compressed_table);
|
||||
@ -1214,7 +1226,7 @@ row_compressor_flush(RowCompressor *row_compressor, CommandId mycid, bool change
|
||||
heap_insert(row_compressor->compressed_table,
|
||||
compressed_tuple,
|
||||
mycid,
|
||||
0 /*=options*/,
|
||||
row_compressor->insert_options /*=options*/,
|
||||
row_compressor->bistate);
|
||||
if (row_compressor->resultRelInfo->ri_NumIndices > 0)
|
||||
{
|
||||
|
@ -202,6 +202,7 @@ typedef struct CompressionStats
|
||||
{
|
||||
int64 rowcnt_pre_compression;
|
||||
int64 rowcnt_post_compression;
|
||||
int64 rowcnt_frozen;
|
||||
} CompressionStats;
|
||||
|
||||
typedef struct PerColumn
|
||||
@ -265,6 +266,8 @@ typedef struct RowCompressor
|
||||
bool reset_sequence;
|
||||
/* flag for checking if we are working on the first tuple */
|
||||
bool first_iteration;
|
||||
/* the heap insert options */
|
||||
int insert_options;
|
||||
} RowCompressor;
|
||||
|
||||
/* SegmentFilter is used for filtering segments based on qualifiers */
|
||||
@ -313,7 +316,7 @@ pg_attribute_unused() assert_num_compression_algorithms_sane(void)
|
||||
extern CompressionStorage compression_get_toast_storage(CompressionAlgorithms algo);
|
||||
extern CompressionStats compress_chunk(Oid in_table, Oid out_table,
|
||||
const ColumnCompressionInfo **column_compression_info,
|
||||
int num_compression_infos);
|
||||
int num_compression_infos, int insert_options);
|
||||
extern void decompress_chunk(Oid in_table, Oid out_table);
|
||||
|
||||
extern DecompressionIterator *(*tsl_get_decompression_iterator_init(
|
||||
@ -355,7 +358,7 @@ extern void row_compressor_init(RowCompressor *row_compressor, TupleDesc uncompr
|
||||
Relation compressed_table, int num_compression_infos,
|
||||
const ColumnCompressionInfo **column_compression_info,
|
||||
int16 *column_offsets, int16 num_columns_in_compressed_table,
|
||||
bool need_bistate, bool reset_sequence);
|
||||
bool need_bistate, bool reset_sequence, int insert_options);
|
||||
extern void row_compressor_finish(RowCompressor *row_compressor);
|
||||
extern void populate_per_compressed_columns_from_data(PerCompressedColumn *per_compressed_cols,
|
||||
int16 num_cols, Datum *compressed_datums,
|
||||
|
1072
tsl/test/expected/bgw_custom-14.out
Normal file
1072
tsl/test/expected/bgw_custom-14.out
Normal file
File diff suppressed because it is too large
Load Diff
1072
tsl/test/expected/bgw_custom-15.out
Normal file
1072
tsl/test/expected/bgw_custom-15.out
Normal file
File diff suppressed because it is too large
Load Diff
1072
tsl/test/expected/bgw_custom-16.out
Normal file
1072
tsl/test/expected/bgw_custom-16.out
Normal file
File diff suppressed because it is too large
Load Diff
@ -108,10 +108,10 @@ before_compression_table_bytes | 8192
|
||||
before_compression_index_bytes | 32768
|
||||
before_compression_toast_bytes | 0
|
||||
before_compression_total_bytes | 40960
|
||||
after_compression_table_bytes | 8192
|
||||
after_compression_table_bytes | 16384
|
||||
after_compression_index_bytes | 16384
|
||||
after_compression_toast_bytes | 8192
|
||||
after_compression_total_bytes | 32768
|
||||
after_compression_total_bytes | 40960
|
||||
node_name |
|
||||
|
||||
\x
|
||||
@ -124,28 +124,30 @@ select compress_chunk( '_timescaledb_internal._hyper_1_1_chunk');
|
||||
\x
|
||||
select * from _timescaledb_catalog.compression_chunk_size
|
||||
order by chunk_id;
|
||||
-[ RECORD 1 ]------------+------
|
||||
chunk_id | 1
|
||||
compressed_chunk_id | 6
|
||||
uncompressed_heap_size | 8192
|
||||
uncompressed_toast_size | 0
|
||||
uncompressed_index_size | 32768
|
||||
compressed_heap_size | 8192
|
||||
compressed_toast_size | 8192
|
||||
compressed_index_size | 16384
|
||||
numrows_pre_compression | 1
|
||||
numrows_post_compression | 1
|
||||
-[ RECORD 2 ]------------+------
|
||||
chunk_id | 2
|
||||
compressed_chunk_id | 5
|
||||
uncompressed_heap_size | 8192
|
||||
uncompressed_toast_size | 0
|
||||
uncompressed_index_size | 32768
|
||||
compressed_heap_size | 8192
|
||||
compressed_toast_size | 8192
|
||||
compressed_index_size | 16384
|
||||
numrows_pre_compression | 1
|
||||
numrows_post_compression | 1
|
||||
-[ RECORD 1 ]--------------+------
|
||||
chunk_id | 1
|
||||
compressed_chunk_id | 6
|
||||
uncompressed_heap_size | 8192
|
||||
uncompressed_toast_size | 0
|
||||
uncompressed_index_size | 32768
|
||||
compressed_heap_size | 16384
|
||||
compressed_toast_size | 8192
|
||||
compressed_index_size | 16384
|
||||
numrows_pre_compression | 1
|
||||
numrows_post_compression | 1
|
||||
numrows_frozen_immediately | 1
|
||||
-[ RECORD 2 ]--------------+------
|
||||
chunk_id | 2
|
||||
compressed_chunk_id | 5
|
||||
uncompressed_heap_size | 8192
|
||||
uncompressed_toast_size | 0
|
||||
uncompressed_index_size | 32768
|
||||
compressed_heap_size | 16384
|
||||
compressed_toast_size | 8192
|
||||
compressed_index_size | 16384
|
||||
numrows_pre_compression | 1
|
||||
numrows_post_compression | 1
|
||||
numrows_frozen_immediately | 1
|
||||
|
||||
\x
|
||||
select ch1.id, ch1.schema_name, ch1.table_name , ch2.table_name as compress_table
|
||||
@ -393,10 +395,10 @@ before_compression_table_bytes | 8192
|
||||
before_compression_index_bytes | 16384
|
||||
before_compression_toast_bytes | 8192
|
||||
before_compression_total_bytes | 32768
|
||||
after_compression_table_bytes | 8192
|
||||
after_compression_table_bytes | 16384
|
||||
after_compression_index_bytes | 16384
|
||||
after_compression_toast_bytes | 8192
|
||||
after_compression_total_bytes | 32768
|
||||
after_compression_total_bytes | 40960
|
||||
node_name |
|
||||
-[ RECORD 2 ]------------------+----------------------
|
||||
chunk_schema | _timescaledb_internal
|
||||
@ -406,10 +408,10 @@ before_compression_table_bytes | 8192
|
||||
before_compression_index_bytes | 16384
|
||||
before_compression_toast_bytes | 8192
|
||||
before_compression_total_bytes | 32768
|
||||
after_compression_table_bytes | 8192
|
||||
after_compression_table_bytes | 16384
|
||||
after_compression_index_bytes | 16384
|
||||
after_compression_toast_bytes | 8192
|
||||
after_compression_total_bytes | 32768
|
||||
after_compression_total_bytes | 40960
|
||||
node_name |
|
||||
|
||||
select * from hypertable_compression_stats('foo');
|
||||
@ -420,10 +422,10 @@ before_compression_table_bytes | 8192
|
||||
before_compression_index_bytes | 32768
|
||||
before_compression_toast_bytes | 0
|
||||
before_compression_total_bytes | 40960
|
||||
after_compression_table_bytes | 8192
|
||||
after_compression_table_bytes | 16384
|
||||
after_compression_index_bytes | 16384
|
||||
after_compression_toast_bytes | 8192
|
||||
after_compression_total_bytes | 32768
|
||||
after_compression_total_bytes | 40960
|
||||
node_name |
|
||||
|
||||
select * from hypertable_compression_stats('conditions');
|
||||
@ -434,10 +436,10 @@ before_compression_table_bytes | 16384
|
||||
before_compression_index_bytes | 32768
|
||||
before_compression_toast_bytes | 16384
|
||||
before_compression_total_bytes | 65536
|
||||
after_compression_table_bytes | 16384
|
||||
after_compression_table_bytes | 32768
|
||||
after_compression_index_bytes | 32768
|
||||
after_compression_toast_bytes | 16384
|
||||
after_compression_total_bytes | 65536
|
||||
after_compression_total_bytes | 81920
|
||||
node_name |
|
||||
|
||||
vacuum full foo;
|
||||
|
657
tsl/test/expected/compression_bgw-14.out
Normal file
657
tsl/test/expected/compression_bgw-14.out
Normal file
@ -0,0 +1,657 @@
|
||||
-- This file and its contents are licensed under the Timescale License.
|
||||
-- Please see the included NOTICE for copyright information and
|
||||
-- LICENSE-TIMESCALE for a copy of the license.
|
||||
\c :TEST_DBNAME :ROLE_SUPERUSER
|
||||
CREATE ROLE NOLOGIN_ROLE WITH nologin noinherit;
|
||||
-- though user on access node has required GRANTS, this will propagate GRANTS to the connected data nodes
|
||||
GRANT CREATE ON SCHEMA public TO NOLOGIN_ROLE;
|
||||
GRANT NOLOGIN_ROLE TO :ROLE_DEFAULT_PERM_USER WITH ADMIN OPTION;
|
||||
\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER
|
||||
CREATE TABLE conditions (
|
||||
time TIMESTAMPTZ NOT NULL,
|
||||
location TEXT NOT NULL,
|
||||
location2 char(10) NOT NULL,
|
||||
temperature DOUBLE PRECISION NULL,
|
||||
humidity DOUBLE PRECISION NULL
|
||||
);
|
||||
select create_hypertable( 'conditions', 'time', chunk_time_interval=> '31days'::interval);
|
||||
create_hypertable
|
||||
-------------------------
|
||||
(1,public,conditions,t)
|
||||
(1 row)
|
||||
|
||||
--TEST 1--
|
||||
--cannot set policy without enabling compression --
|
||||
\set ON_ERROR_STOP 0
|
||||
select add_compression_policy('conditions', '60d'::interval);
|
||||
ERROR: compression not enabled on hypertable "conditions"
|
||||
\set ON_ERROR_STOP 1
|
||||
-- TEST2 --
|
||||
--add a policy to compress chunks --
|
||||
alter table conditions set (timescaledb.compress, timescaledb.compress_segmentby = 'location', timescaledb.compress_orderby = 'time');
|
||||
insert into conditions
|
||||
select generate_series('2018-12-01 00:00'::timestamp, '2018-12-31 00:00'::timestamp, '1 day'), 'POR', 'klick', 55, 75;
|
||||
select add_compression_policy('conditions', '60d'::interval) AS compressjob_id
|
||||
\gset
|
||||
select * from _timescaledb_config.bgw_job where id = :compressjob_id;
|
||||
id | application_name | schedule_interval | max_runtime | max_retries | retry_period | proc_schema | proc_name | owner | scheduled | fixed_schedule | initial_start | hypertable_id | config | check_schema | check_name | timezone
|
||||
------+---------------------------+-------------------+-------------+-------------+--------------+------------------------+--------------------+-------------------+-----------+----------------+---------------+---------------+-----------------------------------------------------+------------------------+--------------------------+----------
|
||||
1000 | Compression Policy [1000] | @ 12 hours | @ 0 | -1 | @ 1 hour | _timescaledb_functions | policy_compression | default_perm_user | t | f | | 1 | {"hypertable_id": 1, "compress_after": "@ 60 days"} | _timescaledb_functions | policy_compression_check |
|
||||
(1 row)
|
||||
|
||||
select * from alter_job(:compressjob_id, schedule_interval=>'1s');
|
||||
job_id | schedule_interval | max_runtime | max_retries | retry_period | scheduled | config | next_start | check_config | fixed_schedule | initial_start | timezone
|
||||
--------+-------------------+-------------+-------------+--------------+-----------+-----------------------------------------------------+------------+-------------------------------------------------+----------------+---------------+----------
|
||||
1000 | @ 1 sec | @ 0 | -1 | @ 1 hour | t | {"hypertable_id": 1, "compress_after": "@ 60 days"} | -infinity | _timescaledb_functions.policy_compression_check | f | |
|
||||
(1 row)
|
||||
|
||||
--enable maxchunks to 1 so that only 1 chunk is compressed by the job
|
||||
SELECT alter_job(id,config:=jsonb_set(config,'{maxchunks_to_compress}', '1'))
|
||||
FROM _timescaledb_config.bgw_job WHERE id = :compressjob_id;
|
||||
alter_job
|
||||
------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
|
||||
(1000,"@ 1 sec","@ 0",-1,"@ 1 hour",t,"{""hypertable_id"": 1, ""compress_after"": ""@ 60 days"", ""maxchunks_to_compress"": 1}",-infinity,_timescaledb_functions.policy_compression_check,f,,)
|
||||
(1 row)
|
||||
|
||||
select * from _timescaledb_config.bgw_job where id >= 1000 ORDER BY id;
|
||||
id | application_name | schedule_interval | max_runtime | max_retries | retry_period | proc_schema | proc_name | owner | scheduled | fixed_schedule | initial_start | hypertable_id | config | check_schema | check_name | timezone
|
||||
------+---------------------------+-------------------+-------------+-------------+--------------+------------------------+--------------------+-------------------+-----------+----------------+---------------+---------------+---------------------------------------------------------------------------------+------------------------+--------------------------+----------
|
||||
1000 | Compression Policy [1000] | @ 1 sec | @ 0 | -1 | @ 1 hour | _timescaledb_functions | policy_compression | default_perm_user | t | f | | 1 | {"hypertable_id": 1, "compress_after": "@ 60 days", "maxchunks_to_compress": 1} | _timescaledb_functions | policy_compression_check |
|
||||
(1 row)
|
||||
|
||||
insert into conditions
|
||||
select now()::timestamp, 'TOK', 'sony', 55, 75;
|
||||
-- TEST3 --
|
||||
--only the old chunks will get compressed when policy is executed--
|
||||
CALL run_job(:compressjob_id);
|
||||
select chunk_name, pg_size_pretty(before_compression_total_bytes) before_total,
|
||||
pg_size_pretty( after_compression_total_bytes) after_total
|
||||
from chunk_compression_stats('conditions') where compression_status like 'Compressed' order by chunk_name;
|
||||
chunk_name | before_total | after_total
|
||||
------------------+--------------+-------------
|
||||
_hyper_1_1_chunk | 32 kB | 40 kB
|
||||
(1 row)
|
||||
|
||||
SELECT id, hypertable_id, schema_name, table_name, compressed_chunk_id, dropped, status, osm_chunk FROM _timescaledb_catalog.chunk ORDER BY id;
|
||||
id | hypertable_id | schema_name | table_name | compressed_chunk_id | dropped | status | osm_chunk
|
||||
----+---------------+-----------------------+--------------------------+---------------------+---------+--------+-----------
|
||||
1 | 1 | _timescaledb_internal | _hyper_1_1_chunk | 4 | f | 1 | f
|
||||
2 | 1 | _timescaledb_internal | _hyper_1_2_chunk | | f | 0 | f
|
||||
3 | 1 | _timescaledb_internal | _hyper_1_3_chunk | | f | 0 | f
|
||||
4 | 2 | _timescaledb_internal | compress_hyper_2_4_chunk | | f | 0 | f
|
||||
(4 rows)
|
||||
|
||||
-- TEST 4 --
|
||||
--cannot set another policy
|
||||
\set ON_ERROR_STOP 0
|
||||
select add_compression_policy('conditions', '60d'::interval, if_not_exists=>true);
|
||||
NOTICE: compression policy already exists for hypertable "conditions", skipping
|
||||
add_compression_policy
|
||||
------------------------
|
||||
-1
|
||||
(1 row)
|
||||
|
||||
select add_compression_policy('conditions', '60d'::interval);
|
||||
ERROR: compression policy already exists for hypertable or continuous aggregate "conditions"
|
||||
select add_compression_policy('conditions', '30d'::interval, if_not_exists=>true);
|
||||
WARNING: compression policy already exists for hypertable "conditions"
|
||||
add_compression_policy
|
||||
------------------------
|
||||
-1
|
||||
(1 row)
|
||||
|
||||
\set ON_ERROR_STOP 1
|
||||
--TEST 5 --
|
||||
-- drop the policy --
|
||||
select remove_compression_policy('conditions');
|
||||
remove_compression_policy
|
||||
---------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
select count(*) from _timescaledb_config.bgw_job WHERE id>=1000;
|
||||
count
|
||||
-------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
--TEST 6 --
|
||||
-- try to execute the policy after it has been dropped --
|
||||
\set ON_ERROR_STOP 0
|
||||
CALL run_job(:compressjob_id);
|
||||
ERROR: job 1000 not found
|
||||
--errors with bad input for add/remove compression policy
|
||||
create view dummyv1 as select * from conditions limit 1;
|
||||
select add_compression_policy( 100 , compress_after=> '1 day'::interval);
|
||||
ERROR: object with id "100" not found
|
||||
select add_compression_policy( 'dummyv1', compress_after=> '1 day'::interval );
|
||||
ERROR: "dummyv1" is not a hypertable or a continuous aggregate
|
||||
select remove_compression_policy( 100 );
|
||||
ERROR: relation is not a hypertable or continuous aggregate
|
||||
\set ON_ERROR_STOP 1
|
||||
-- We're done with the table, so drop it.
|
||||
DROP TABLE IF EXISTS conditions CASCADE;
|
||||
NOTICE: drop cascades to table _timescaledb_internal.compress_hyper_2_4_chunk
|
||||
NOTICE: drop cascades to view dummyv1
|
||||
--TEST 7
|
||||
--compression policy for smallint, integer or bigint based partition hypertable
|
||||
--smallint test
|
||||
CREATE TABLE test_table_smallint(time SMALLINT, val SMALLINT);
|
||||
SELECT create_hypertable('test_table_smallint', 'time', chunk_time_interval => 1);
|
||||
NOTICE: adding not-null constraint to column "time"
|
||||
create_hypertable
|
||||
----------------------------------
|
||||
(3,public,test_table_smallint,t)
|
||||
(1 row)
|
||||
|
||||
CREATE OR REPLACE FUNCTION dummy_now_smallint() RETURNS SMALLINT LANGUAGE SQL IMMUTABLE AS 'SELECT 5::SMALLINT';
|
||||
SELECT set_integer_now_func('test_table_smallint', 'dummy_now_smallint');
|
||||
set_integer_now_func
|
||||
----------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
INSERT INTO test_table_smallint SELECT generate_series(1,5), 10;
|
||||
ALTER TABLE test_table_smallint SET (timescaledb.compress);
|
||||
\set ON_ERROR_STOP 0
|
||||
select add_compression_policy( 'test_table_smallint', compress_after=> '1 day'::interval );
|
||||
ERROR: unsupported compress_after argument type, expected type : smallint
|
||||
\set ON_ERROR_STOP 1
|
||||
SELECT add_compression_policy('test_table_smallint', 2::SMALLINT) AS compressjob_id \gset
|
||||
SELECT * FROM _timescaledb_config.bgw_job WHERE id = :compressjob_id;
|
||||
id | application_name | schedule_interval | max_runtime | max_retries | retry_period | proc_schema | proc_name | owner | scheduled | fixed_schedule | initial_start | hypertable_id | config | check_schema | check_name | timezone
|
||||
------+---------------------------+-------------------+-------------+-------------+--------------+------------------------+--------------------+-------------------+-----------+----------------+---------------+---------------+-------------------------------------------+------------------------+--------------------------+----------
|
||||
1001 | Compression Policy [1001] | @ 1 day | @ 0 | -1 | @ 1 hour | _timescaledb_functions | policy_compression | default_perm_user | t | f | | 3 | {"hypertable_id": 3, "compress_after": 2} | _timescaledb_functions | policy_compression_check |
|
||||
(1 row)
|
||||
|
||||
--will compress all chunks that need compression
|
||||
CALL run_job(:compressjob_id);
|
||||
SELECT chunk_name, before_compression_total_bytes, after_compression_total_bytes
|
||||
FROM chunk_compression_stats('test_table_smallint')
|
||||
WHERE compression_status LIKE 'Compressed'
|
||||
ORDER BY chunk_name;
|
||||
chunk_name | before_compression_total_bytes | after_compression_total_bytes
|
||||
------------------+--------------------------------+-------------------------------
|
||||
_hyper_3_5_chunk | 24576 | 24576
|
||||
_hyper_3_6_chunk | 24576 | 24576
|
||||
(2 rows)
|
||||
|
||||
--integer tests
|
||||
CREATE TABLE test_table_integer(time INTEGER, val INTEGER);
|
||||
SELECT create_hypertable('test_table_integer', 'time', chunk_time_interval => 1);
|
||||
NOTICE: adding not-null constraint to column "time"
|
||||
create_hypertable
|
||||
---------------------------------
|
||||
(5,public,test_table_integer,t)
|
||||
(1 row)
|
||||
|
||||
CREATE OR REPLACE FUNCTION dummy_now_integer() RETURNS INTEGER LANGUAGE SQL IMMUTABLE AS 'SELECT 5::INTEGER';
|
||||
SELECT set_integer_now_func('test_table_integer', 'dummy_now_integer');
|
||||
set_integer_now_func
|
||||
----------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
INSERT INTO test_table_integer SELECT generate_series(1,5), 10;
|
||||
ALTER TABLE test_table_integer SET (timescaledb.compress);
|
||||
SELECT add_compression_policy('test_table_integer', 2::INTEGER) AS compressjob_id \gset
|
||||
SELECT * FROM _timescaledb_config.bgw_job WHERE id = :compressjob_id;
|
||||
id | application_name | schedule_interval | max_runtime | max_retries | retry_period | proc_schema | proc_name | owner | scheduled | fixed_schedule | initial_start | hypertable_id | config | check_schema | check_name | timezone
|
||||
------+---------------------------+-------------------+-------------+-------------+--------------+------------------------+--------------------+-------------------+-----------+----------------+---------------+---------------+-------------------------------------------+------------------------+--------------------------+----------
|
||||
1002 | Compression Policy [1002] | @ 1 day | @ 0 | -1 | @ 1 hour | _timescaledb_functions | policy_compression | default_perm_user | t | f | | 5 | {"hypertable_id": 5, "compress_after": 2} | _timescaledb_functions | policy_compression_check |
|
||||
(1 row)
|
||||
|
||||
--will compress all chunks that need compression
|
||||
CALL run_job(:compressjob_id);
|
||||
SELECT chunk_name, before_compression_total_bytes, after_compression_total_bytes
|
||||
FROM chunk_compression_stats('test_table_integer')
|
||||
WHERE compression_status LIKE 'Compressed'
|
||||
ORDER BY chunk_name;
|
||||
chunk_name | before_compression_total_bytes | after_compression_total_bytes
|
||||
-------------------+--------------------------------+-------------------------------
|
||||
_hyper_5_12_chunk | 24576 | 24576
|
||||
_hyper_5_13_chunk | 24576 | 24576
|
||||
(2 rows)
|
||||
|
||||
--bigint test
|
||||
CREATE TABLE test_table_bigint(time BIGINT, val BIGINT);
|
||||
SELECT create_hypertable('test_table_bigint', 'time', chunk_time_interval => 1);
|
||||
NOTICE: adding not-null constraint to column "time"
|
||||
create_hypertable
|
||||
--------------------------------
|
||||
(7,public,test_table_bigint,t)
|
||||
(1 row)
|
||||
|
||||
CREATE OR REPLACE FUNCTION dummy_now_bigint() RETURNS BIGINT LANGUAGE SQL IMMUTABLE AS 'SELECT 5::BIGINT';
|
||||
SELECT set_integer_now_func('test_table_bigint', 'dummy_now_bigint');
|
||||
set_integer_now_func
|
||||
----------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
INSERT INTO test_table_bigint SELECT generate_series(1,5), 10;
|
||||
ALTER TABLE test_table_bigint SET (timescaledb.compress);
|
||||
SELECT add_compression_policy('test_table_bigint', 2::BIGINT) AS compressjob_id \gset
|
||||
SELECT * FROM _timescaledb_config.bgw_job WHERE id = :compressjob_id;
|
||||
id | application_name | schedule_interval | max_runtime | max_retries | retry_period | proc_schema | proc_name | owner | scheduled | fixed_schedule | initial_start | hypertable_id | config | check_schema | check_name | timezone
|
||||
------+---------------------------+-------------------+-------------+-------------+--------------+------------------------+--------------------+-------------------+-----------+----------------+---------------+---------------+-------------------------------------------+------------------------+--------------------------+----------
|
||||
1003 | Compression Policy [1003] | @ 1 day | @ 0 | -1 | @ 1 hour | _timescaledb_functions | policy_compression | default_perm_user | t | f | | 7 | {"hypertable_id": 7, "compress_after": 2} | _timescaledb_functions | policy_compression_check |
|
||||
(1 row)
|
||||
|
||||
--will compress all chunks that need compression
|
||||
CALL run_job(:compressjob_id);
|
||||
SELECT chunk_name, before_compression_total_bytes, after_compression_total_bytes
|
||||
FROM chunk_compression_stats('test_table_bigint')
|
||||
WHERE compression_status LIKE 'Compressed'
|
||||
ORDER BY chunk_name;
|
||||
chunk_name | before_compression_total_bytes | after_compression_total_bytes
|
||||
-------------------+--------------------------------+-------------------------------
|
||||
_hyper_7_19_chunk | 24576 | 24576
|
||||
_hyper_7_20_chunk | 24576 | 24576
|
||||
(2 rows)
|
||||
|
||||
--TEST 8
|
||||
--hypertable owner lacks permission to start background worker
|
||||
SET ROLE NOLOGIN_ROLE;
|
||||
CREATE TABLE test_table_nologin(time bigint, val int);
|
||||
SELECT create_hypertable('test_table_nologin', 'time', chunk_time_interval => 1);
|
||||
NOTICE: adding not-null constraint to column "time"
|
||||
create_hypertable
|
||||
---------------------------------
|
||||
(9,public,test_table_nologin,t)
|
||||
(1 row)
|
||||
|
||||
SELECT set_integer_now_func('test_table_nologin', 'dummy_now_bigint');
|
||||
set_integer_now_func
|
||||
----------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
ALTER TABLE test_table_nologin set (timescaledb.compress);
|
||||
\set ON_ERROR_STOP 0
|
||||
SELECT add_compression_policy('test_table_nologin', 2::int);
|
||||
ERROR: permission denied to start background process as role "nologin_role"
|
||||
\set ON_ERROR_STOP 1
|
||||
DROP TABLE test_table_nologin;
|
||||
RESET ROLE;
|
||||
\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER
|
||||
CREATE TABLE conditions(
|
||||
time TIMESTAMPTZ NOT NULL,
|
||||
device INTEGER,
|
||||
temperature FLOAT
|
||||
);
|
||||
SELECT * FROM create_hypertable('conditions', 'time',
|
||||
chunk_time_interval => '1 day'::interval);
|
||||
hypertable_id | schema_name | table_name | created
|
||||
---------------+-------------+------------+---------
|
||||
11 | public | conditions | t
|
||||
(1 row)
|
||||
|
||||
INSERT INTO conditions
|
||||
SELECT time, (random()*30)::int, random()*80 - 40
|
||||
FROM generate_series('2018-12-01 00:00'::timestamp, '2018-12-31 00:00'::timestamp, '10 min') AS time;
|
||||
CREATE MATERIALIZED VIEW conditions_summary
|
||||
WITH (timescaledb.continuous) AS
|
||||
SELECT device,
|
||||
time_bucket(INTERVAL '1 hour', "time") AS day,
|
||||
AVG(temperature) AS avg_temperature,
|
||||
MAX(temperature) AS max_temperature,
|
||||
MIN(temperature) AS min_temperature
|
||||
FROM conditions
|
||||
GROUP BY device, time_bucket(INTERVAL '1 hour', "time") WITH NO DATA;
|
||||
CALL refresh_continuous_aggregate('conditions_summary', NULL, NULL);
|
||||
ALTER TABLE conditions SET (timescaledb.compress);
|
||||
SELECT COUNT(*) AS dropped_chunks_count
|
||||
FROM drop_chunks('conditions', TIMESTAMPTZ '2018-12-15 00:00');
|
||||
dropped_chunks_count
|
||||
----------------------
|
||||
14
|
||||
(1 row)
|
||||
|
||||
-- We need to have some chunks that are marked as dropped, otherwise
|
||||
-- we will not have a problem below.
|
||||
SELECT COUNT(*) AS dropped_chunks_count
|
||||
FROM _timescaledb_catalog.chunk
|
||||
WHERE dropped = TRUE;
|
||||
dropped_chunks_count
|
||||
----------------------
|
||||
14
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FROM timescaledb_information.chunks
|
||||
WHERE hypertable_name = 'conditions' and is_compressed = true;
|
||||
count
|
||||
-------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
SELECT add_compression_policy AS job_id
|
||||
FROM add_compression_policy('conditions', INTERVAL '1 day') \gset
|
||||
-- job compresses only 1 chunk at a time --
|
||||
SELECT alter_job(id,config:=jsonb_set(config,'{maxchunks_to_compress}', '1'))
|
||||
FROM _timescaledb_config.bgw_job WHERE id = :job_id;
|
||||
alter_job
|
||||
--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
|
||||
(1004,"@ 12 hours","@ 0",-1,"@ 1 hour",t,"{""hypertable_id"": 11, ""compress_after"": ""@ 1 day"", ""maxchunks_to_compress"": 1}",-infinity,_timescaledb_functions.policy_compression_check,f,,)
|
||||
(1 row)
|
||||
|
||||
SELECT alter_job(id,config:=jsonb_set(config,'{verbose_log}', 'true'))
|
||||
FROM _timescaledb_config.bgw_job WHERE id = :job_id;
|
||||
alter_job
|
||||
-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
|
||||
(1004,"@ 12 hours","@ 0",-1,"@ 1 hour",t,"{""verbose_log"": true, ""hypertable_id"": 11, ""compress_after"": ""@ 1 day"", ""maxchunks_to_compress"": 1}",-infinity,_timescaledb_functions.policy_compression_check,f,,)
|
||||
(1 row)
|
||||
|
||||
set client_min_messages TO LOG;
|
||||
CALL run_job(:job_id);
|
||||
LOG: statement: CALL run_job(1004);
|
||||
LOG: job 1004 completed processing chunk _timescaledb_internal._hyper_11_40_chunk
|
||||
set client_min_messages TO NOTICE;
|
||||
LOG: statement: set client_min_messages TO NOTICE;
|
||||
SELECT count(*) FROM timescaledb_information.chunks
|
||||
WHERE hypertable_name = 'conditions' and is_compressed = true;
|
||||
count
|
||||
-------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
\i include/recompress_basic.sql
|
||||
-- This file and its contents are licensed under the Timescale License.
|
||||
-- Please see the included NOTICE for copyright information and
|
||||
-- LICENSE-TIMESCALE for a copy of the license.
|
||||
CREATE OR REPLACE VIEW compressed_chunk_info_view AS
|
||||
SELECT
|
||||
h.schema_name AS hypertable_schema,
|
||||
h.table_name AS hypertable_name,
|
||||
c.schema_name as chunk_schema,
|
||||
c.table_name as chunk_name,
|
||||
c.status as chunk_status,
|
||||
comp.schema_name as compressed_chunk_schema,
|
||||
comp.table_name as compressed_chunk_name
|
||||
FROM
|
||||
_timescaledb_catalog.hypertable h JOIN
|
||||
_timescaledb_catalog.chunk c ON h.id = c.hypertable_id
|
||||
LEFT JOIN _timescaledb_catalog.chunk comp
|
||||
ON comp.id = c.compressed_chunk_id
|
||||
;
|
||||
CREATE TABLE test2 (timec timestamptz NOT NULL, i integer ,
|
||||
b bigint, t text);
|
||||
SELECT table_name from create_hypertable('test2', 'timec', chunk_time_interval=> INTERVAL '7 days');
|
||||
table_name
|
||||
------------
|
||||
test2
|
||||
(1 row)
|
||||
|
||||
INSERT INTO test2 SELECT q, 10, 11, 'hello' FROM generate_series( '2020-01-03 10:00:00+00', '2020-01-03 12:00:00+00' , '5 min'::interval) q;
|
||||
ALTER TABLE test2 set (timescaledb.compress,
|
||||
timescaledb.compress_segmentby = 'b',
|
||||
timescaledb.compress_orderby = 'timec DESC');
|
||||
SELECT compress_chunk(c)
|
||||
FROM show_chunks('test2') c;
|
||||
compress_chunk
|
||||
------------------------------------------
|
||||
_timescaledb_internal._hyper_14_62_chunk
|
||||
(1 row)
|
||||
|
||||
---insert into the middle of the range ---
|
||||
INSERT INTO test2 values ( '2020-01-03 10:01:00+00', 20, 11, '2row');
|
||||
INSERT INTO test2 values ( '2020-01-03 11:01:00+00', 20, 11, '3row');
|
||||
INSERT INTO test2 values ( '2020-01-03 12:01:00+00', 20, 11, '4row');
|
||||
--- insert a new segment by ---
|
||||
INSERT INTO test2 values ( '2020-01-03 11:01:00+00', 20, 12, '12row');
|
||||
SELECT time_bucket(INTERVAL '2 hour', timec), b, count(*)
|
||||
FROM test2
|
||||
GROUP BY time_bucket(INTERVAL '2 hour', timec), b
|
||||
ORDER BY 1, 2;
|
||||
time_bucket | b | count
|
||||
------------------------------+----+-------
|
||||
Fri Jan 03 02:00:00 2020 PST | 11 | 26
|
||||
Fri Jan 03 02:00:00 2020 PST | 12 | 1
|
||||
Fri Jan 03 04:00:00 2020 PST | 11 | 2
|
||||
(3 rows)
|
||||
|
||||
--check status for chunk --
|
||||
SELECT chunk_status,
|
||||
chunk_name as "CHUNK_NAME"
|
||||
FROM compressed_chunk_info_view
|
||||
WHERE hypertable_name = 'test2' ORDER BY chunk_name;
|
||||
chunk_status | CHUNK_NAME
|
||||
--------------+--------------------
|
||||
9 | _hyper_14_62_chunk
|
||||
(1 row)
|
||||
|
||||
SELECT compressed_chunk_schema || '.' || compressed_chunk_name as "COMP_CHUNK_NAME",
|
||||
chunk_schema || '.' || chunk_name as "CHUNK_NAME"
|
||||
FROM compressed_chunk_info_view
|
||||
WHERE hypertable_name = 'test2' \gset
|
||||
SELECT count(*) from test2;
|
||||
count
|
||||
-------
|
||||
29
|
||||
(1 row)
|
||||
|
||||
-- call recompress_chunk inside a transaction. This should fails since
|
||||
-- it contains transaction-terminating commands.
|
||||
\set ON_ERROR_STOP 0
|
||||
START TRANSACTION;
|
||||
CALL recompress_chunk(:'CHUNK_NAME'::regclass);
|
||||
ROLLBACK;
|
||||
\set ON_ERROR_STOP 1
|
||||
CALL recompress_chunk(:'CHUNK_NAME'::regclass);
|
||||
-- Demonstrate that no locks are held on the hypertable, chunk, or the
|
||||
-- compressed chunk after recompress_chunk has executed.
|
||||
SELECT pid, locktype, relation, relation::regclass, mode, granted
|
||||
FROM pg_locks
|
||||
WHERE relation::regclass::text IN (:'CHUNK_NAME', :'COMP_CHUNK_NAME', 'test2')
|
||||
ORDER BY pid;
|
||||
pid | locktype | relation | relation | mode | granted
|
||||
-----+----------+----------+----------+------+---------
|
||||
(0 rows)
|
||||
|
||||
SELECT chunk_status,
|
||||
chunk_name as "CHUNK_NAME"
|
||||
FROM compressed_chunk_info_view
|
||||
WHERE hypertable_name = 'test2' ORDER BY chunk_name;
|
||||
chunk_status | CHUNK_NAME
|
||||
--------------+--------------------
|
||||
1 | _hyper_14_62_chunk
|
||||
(1 row)
|
||||
|
||||
--- insert into a compressed chunk again + a new chunk--
|
||||
INSERT INTO test2 values ( '2020-01-03 11:01:03+00', 20, 11, '33row'),
|
||||
( '2020-01-03 11:01:06+00', 20, 11, '36row'),
|
||||
( '2020-01-03 11:02:00+00', 20, 12, '12row'),
|
||||
( '2020-04-03 00:02:00+00', 30, 13, '3013row');
|
||||
SELECT time_bucket(INTERVAL '2 hour', timec), b, count(*)
|
||||
FROM test2
|
||||
GROUP BY time_bucket(INTERVAL '2 hour', timec), b
|
||||
ORDER BY 1, 2;
|
||||
time_bucket | b | count
|
||||
------------------------------+----+-------
|
||||
Fri Jan 03 02:00:00 2020 PST | 11 | 28
|
||||
Fri Jan 03 02:00:00 2020 PST | 12 | 2
|
||||
Fri Jan 03 04:00:00 2020 PST | 11 | 2
|
||||
Thu Apr 02 17:00:00 2020 PDT | 13 | 1
|
||||
(4 rows)
|
||||
|
||||
--chunk status should be unordered for the previously compressed chunk
|
||||
SELECT chunk_status,
|
||||
chunk_name as "CHUNK_NAME"
|
||||
FROM compressed_chunk_info_view
|
||||
WHERE hypertable_name = 'test2' ORDER BY chunk_name;
|
||||
chunk_status | CHUNK_NAME
|
||||
--------------+--------------------
|
||||
9 | _hyper_14_62_chunk
|
||||
0 | _hyper_14_64_chunk
|
||||
(2 rows)
|
||||
|
||||
SELECT add_compression_policy AS job_id
|
||||
FROM add_compression_policy('test2', '30d'::interval) \gset
|
||||
CALL run_job(:job_id);
|
||||
CALL run_job(:job_id);
|
||||
-- status should be compressed ---
|
||||
SELECT chunk_status,
|
||||
chunk_name as "CHUNK_NAME"
|
||||
FROM compressed_chunk_info_view
|
||||
WHERE hypertable_name = 'test2' ORDER BY chunk_name;
|
||||
chunk_status | CHUNK_NAME
|
||||
--------------+--------------------
|
||||
1 | _hyper_14_62_chunk
|
||||
1 | _hyper_14_64_chunk
|
||||
(2 rows)
|
||||
|
||||
\set ON_ERROR_STOP 0
|
||||
-- call recompress_chunk when status is not unordered
|
||||
CALL recompress_chunk(:'CHUNK_NAME'::regclass, true);
|
||||
psql:include/recompress_basic.sql:110: NOTICE: nothing to recompress in chunk "_hyper_14_62_chunk"
|
||||
-- This will succeed and compress the chunk for the test below.
|
||||
CALL recompress_chunk(:'CHUNK_NAME'::regclass, false);
|
||||
psql:include/recompress_basic.sql:113: ERROR: nothing to recompress in chunk "_hyper_14_62_chunk"
|
||||
--now decompress it , then try and recompress
|
||||
SELECT decompress_chunk(:'CHUNK_NAME'::regclass);
|
||||
decompress_chunk
|
||||
------------------------------------------
|
||||
_timescaledb_internal._hyper_14_62_chunk
|
||||
(1 row)
|
||||
|
||||
CALL recompress_chunk(:'CHUNK_NAME'::regclass);
|
||||
psql:include/recompress_basic.sql:117: ERROR: call compress_chunk instead of recompress_chunk
|
||||
\set ON_ERROR_STOP 1
|
||||
-- test recompress policy
|
||||
CREATE TABLE metrics(time timestamptz NOT NULL);
|
||||
SELECT hypertable_id AS "HYPERTABLE_ID", schema_name, table_name, created FROM create_hypertable('metrics','time') \gset
|
||||
ALTER TABLE metrics SET (timescaledb.compress);
|
||||
-- create chunk with some data and compress
|
||||
INSERT INTO metrics SELECT '2000-01-01' FROM generate_series(1,10);
|
||||
-- create custom compression job without recompress boolean
|
||||
SELECT add_job('_timescaledb_functions.policy_compression','1w',('{"hypertable_id": '||:'HYPERTABLE_ID'||', "compress_after": "@ 7 days"}')::jsonb, initial_start => '2000-01-01 00:00:00+00'::timestamptz) AS "JOB_COMPRESS" \gset
|
||||
-- first call should compress
|
||||
CALL run_job(:JOB_COMPRESS);
|
||||
-- 2nd call should do nothing
|
||||
CALL run_job(:JOB_COMPRESS);
|
||||
---- status should be 1
|
||||
SELECT chunk_status FROM compressed_chunk_info_view WHERE hypertable_name = 'metrics';
|
||||
chunk_status
|
||||
--------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
-- do an INSERT so recompress has something to do
|
||||
INSERT INTO metrics SELECT '2000-01-01';
|
||||
---- status should be 3
|
||||
SELECT chunk_status FROM compressed_chunk_info_view WHERE hypertable_name = 'metrics';
|
||||
chunk_status
|
||||
--------------
|
||||
9
|
||||
(1 row)
|
||||
|
||||
-- should recompress
|
||||
CALL run_job(:JOB_COMPRESS);
|
||||
---- status should be 1
|
||||
SELECT chunk_status FROM compressed_chunk_info_view WHERE hypertable_name = 'metrics';
|
||||
chunk_status
|
||||
--------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
-- disable recompress in compress job
|
||||
SELECT alter_job(id,config:=jsonb_set(config,'{recompress}','false'), next_start => '2000-01-01 00:00:00+00'::timestamptz) FROM _timescaledb_config.bgw_job WHERE id = :JOB_COMPRESS;
|
||||
alter_job
|
||||
----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
|
||||
(1006,"@ 7 days","@ 0",-1,"@ 5 mins",t,"{""recompress"": false, ""hypertable_id"": 16, ""compress_after"": ""@ 7 days""}","Fri Dec 31 16:00:00 1999 PST",,t,"Fri Dec 31 16:00:00 1999 PST",)
|
||||
(1 row)
|
||||
|
||||
-- nothing to do
|
||||
CALL run_job(:JOB_COMPRESS);
|
||||
---- status should be 1
|
||||
SELECT chunk_status FROM compressed_chunk_info_view WHERE hypertable_name = 'metrics';
|
||||
chunk_status
|
||||
--------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
-- do an INSERT so recompress has something to do
|
||||
INSERT INTO metrics SELECT '2000-01-01';
|
||||
---- status should be 3
|
||||
SELECT chunk_status FROM compressed_chunk_info_view WHERE hypertable_name = 'metrics';
|
||||
chunk_status
|
||||
--------------
|
||||
9
|
||||
(1 row)
|
||||
|
||||
-- still nothing to do since we disabled recompress
|
||||
CALL run_job(:JOB_COMPRESS);
|
||||
---- status should be 3
|
||||
SELECT chunk_status FROM compressed_chunk_info_view WHERE hypertable_name = 'metrics';
|
||||
chunk_status
|
||||
--------------
|
||||
9
|
||||
(1 row)
|
||||
|
||||
-- reenable recompress in compress job
|
||||
SELECT alter_job(id,config:=jsonb_set(config,'{recompress}','true'), next_start => '2000-01-01 00:00:00+00'::timestamptz) FROM _timescaledb_config.bgw_job WHERE id = :JOB_COMPRESS;
|
||||
alter_job
|
||||
---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
|
||||
(1006,"@ 7 days","@ 0",-1,"@ 5 mins",t,"{""recompress"": true, ""hypertable_id"": 16, ""compress_after"": ""@ 7 days""}","Fri Dec 31 16:00:00 1999 PST",,t,"Fri Dec 31 16:00:00 1999 PST",)
|
||||
(1 row)
|
||||
|
||||
-- should recompress now
|
||||
CALL run_job(:JOB_COMPRESS);
|
||||
---- status should be 1
|
||||
SELECT chunk_status FROM compressed_chunk_info_view WHERE hypertable_name = 'metrics';
|
||||
chunk_status
|
||||
--------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
SELECT delete_job(:JOB_COMPRESS);
|
||||
delete_job
|
||||
------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT add_job('_timescaledb_functions.policy_recompression','1w',('{"hypertable_id": '||:'HYPERTABLE_ID'||', "recompress_after": "@ 7 days", "maxchunks_to_compress": 1}')::jsonb) AS "JOB_RECOMPRESS" \gset
|
||||
---- status should be 1
|
||||
SELECT chunk_status FROM compressed_chunk_info_view WHERE hypertable_name = 'metrics';
|
||||
chunk_status
|
||||
--------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
---- nothing to do yet
|
||||
CALL run_job(:JOB_RECOMPRESS);
|
||||
psql:include/recompress_basic.sql:189: NOTICE: no chunks for hypertable "public.metrics" that satisfy recompress chunk policy
|
||||
---- status should be 1
|
||||
SELECT chunk_status FROM compressed_chunk_info_view WHERE hypertable_name = 'metrics';
|
||||
chunk_status
|
||||
--------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
-- create some work for recompress
|
||||
INSERT INTO metrics SELECT '2000-01-01';
|
||||
-- status should be 3
|
||||
SELECT chunk_status FROM compressed_chunk_info_view WHERE hypertable_name = 'metrics';
|
||||
chunk_status
|
||||
--------------
|
||||
9
|
||||
(1 row)
|
||||
|
||||
CALL run_job(:JOB_RECOMPRESS);
|
||||
-- status should be 1
|
||||
SELECT chunk_status FROM compressed_chunk_info_view WHERE hypertable_name = 'metrics';
|
||||
chunk_status
|
||||
--------------
|
||||
9
|
||||
(1 row)
|
||||
|
||||
SELECT delete_job(:JOB_RECOMPRESS);
|
||||
delete_job
|
||||
------------
|
||||
|
||||
(1 row)
|
||||
|
||||
-- Teardown test
|
||||
\c :TEST_DBNAME :ROLE_SUPERUSER
|
||||
REVOKE CREATE ON SCHEMA public FROM NOLOGIN_ROLE;
|
||||
DROP ROLE NOLOGIN_ROLE;
|
657
tsl/test/expected/compression_bgw-15.out
Normal file
657
tsl/test/expected/compression_bgw-15.out
Normal file
@ -0,0 +1,657 @@
|
||||
-- This file and its contents are licensed under the Timescale License.
|
||||
-- Please see the included NOTICE for copyright information and
|
||||
-- LICENSE-TIMESCALE for a copy of the license.
|
||||
\c :TEST_DBNAME :ROLE_SUPERUSER
|
||||
CREATE ROLE NOLOGIN_ROLE WITH nologin noinherit;
|
||||
-- though user on access node has required GRANTS, this will propagate GRANTS to the connected data nodes
|
||||
GRANT CREATE ON SCHEMA public TO NOLOGIN_ROLE;
|
||||
GRANT NOLOGIN_ROLE TO :ROLE_DEFAULT_PERM_USER WITH ADMIN OPTION;
|
||||
\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER
|
||||
CREATE TABLE conditions (
|
||||
time TIMESTAMPTZ NOT NULL,
|
||||
location TEXT NOT NULL,
|
||||
location2 char(10) NOT NULL,
|
||||
temperature DOUBLE PRECISION NULL,
|
||||
humidity DOUBLE PRECISION NULL
|
||||
);
|
||||
select create_hypertable( 'conditions', 'time', chunk_time_interval=> '31days'::interval);
|
||||
create_hypertable
|
||||
-------------------------
|
||||
(1,public,conditions,t)
|
||||
(1 row)
|
||||
|
||||
--TEST 1--
|
||||
--cannot set policy without enabling compression --
|
||||
\set ON_ERROR_STOP 0
|
||||
select add_compression_policy('conditions', '60d'::interval);
|
||||
ERROR: compression not enabled on hypertable "conditions"
|
||||
\set ON_ERROR_STOP 1
|
||||
-- TEST2 --
|
||||
--add a policy to compress chunks --
|
||||
alter table conditions set (timescaledb.compress, timescaledb.compress_segmentby = 'location', timescaledb.compress_orderby = 'time');
|
||||
insert into conditions
|
||||
select generate_series('2018-12-01 00:00'::timestamp, '2018-12-31 00:00'::timestamp, '1 day'), 'POR', 'klick', 55, 75;
|
||||
select add_compression_policy('conditions', '60d'::interval) AS compressjob_id
|
||||
\gset
|
||||
select * from _timescaledb_config.bgw_job where id = :compressjob_id;
|
||||
id | application_name | schedule_interval | max_runtime | max_retries | retry_period | proc_schema | proc_name | owner | scheduled | fixed_schedule | initial_start | hypertable_id | config | check_schema | check_name | timezone
|
||||
------+---------------------------+-------------------+-------------+-------------+--------------+------------------------+--------------------+-------------------+-----------+----------------+---------------+---------------+-----------------------------------------------------+------------------------+--------------------------+----------
|
||||
1000 | Compression Policy [1000] | @ 12 hours | @ 0 | -1 | @ 1 hour | _timescaledb_functions | policy_compression | default_perm_user | t | f | | 1 | {"hypertable_id": 1, "compress_after": "@ 60 days"} | _timescaledb_functions | policy_compression_check |
|
||||
(1 row)
|
||||
|
||||
select * from alter_job(:compressjob_id, schedule_interval=>'1s');
|
||||
job_id | schedule_interval | max_runtime | max_retries | retry_period | scheduled | config | next_start | check_config | fixed_schedule | initial_start | timezone
|
||||
--------+-------------------+-------------+-------------+--------------+-----------+-----------------------------------------------------+------------+-------------------------------------------------+----------------+---------------+----------
|
||||
1000 | @ 1 sec | @ 0 | -1 | @ 1 hour | t | {"hypertable_id": 1, "compress_after": "@ 60 days"} | -infinity | _timescaledb_functions.policy_compression_check | f | |
|
||||
(1 row)
|
||||
|
||||
--enable maxchunks to 1 so that only 1 chunk is compressed by the job
|
||||
SELECT alter_job(id,config:=jsonb_set(config,'{maxchunks_to_compress}', '1'))
|
||||
FROM _timescaledb_config.bgw_job WHERE id = :compressjob_id;
|
||||
alter_job
|
||||
------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
|
||||
(1000,"@ 1 sec","@ 0",-1,"@ 1 hour",t,"{""hypertable_id"": 1, ""compress_after"": ""@ 60 days"", ""maxchunks_to_compress"": 1}",-infinity,_timescaledb_functions.policy_compression_check,f,,)
|
||||
(1 row)
|
||||
|
||||
select * from _timescaledb_config.bgw_job where id >= 1000 ORDER BY id;
|
||||
id | application_name | schedule_interval | max_runtime | max_retries | retry_period | proc_schema | proc_name | owner | scheduled | fixed_schedule | initial_start | hypertable_id | config | check_schema | check_name | timezone
|
||||
------+---------------------------+-------------------+-------------+-------------+--------------+------------------------+--------------------+-------------------+-----------+----------------+---------------+---------------+---------------------------------------------------------------------------------+------------------------+--------------------------+----------
|
||||
1000 | Compression Policy [1000] | @ 1 sec | @ 0 | -1 | @ 1 hour | _timescaledb_functions | policy_compression | default_perm_user | t | f | | 1 | {"hypertable_id": 1, "compress_after": "@ 60 days", "maxchunks_to_compress": 1} | _timescaledb_functions | policy_compression_check |
|
||||
(1 row)
|
||||
|
||||
insert into conditions
|
||||
select now()::timestamp, 'TOK', 'sony', 55, 75;
|
||||
-- TEST3 --
|
||||
--only the old chunks will get compressed when policy is executed--
|
||||
CALL run_job(:compressjob_id);
|
||||
select chunk_name, pg_size_pretty(before_compression_total_bytes) before_total,
|
||||
pg_size_pretty( after_compression_total_bytes) after_total
|
||||
from chunk_compression_stats('conditions') where compression_status like 'Compressed' order by chunk_name;
|
||||
chunk_name | before_total | after_total
|
||||
------------------+--------------+-------------
|
||||
_hyper_1_1_chunk | 32 kB | 40 kB
|
||||
(1 row)
|
||||
|
||||
SELECT id, hypertable_id, schema_name, table_name, compressed_chunk_id, dropped, status, osm_chunk FROM _timescaledb_catalog.chunk ORDER BY id;
|
||||
id | hypertable_id | schema_name | table_name | compressed_chunk_id | dropped | status | osm_chunk
|
||||
----+---------------+-----------------------+--------------------------+---------------------+---------+--------+-----------
|
||||
1 | 1 | _timescaledb_internal | _hyper_1_1_chunk | 4 | f | 1 | f
|
||||
2 | 1 | _timescaledb_internal | _hyper_1_2_chunk | | f | 0 | f
|
||||
3 | 1 | _timescaledb_internal | _hyper_1_3_chunk | | f | 0 | f
|
||||
4 | 2 | _timescaledb_internal | compress_hyper_2_4_chunk | | f | 0 | f
|
||||
(4 rows)
|
||||
|
||||
-- TEST 4 --
|
||||
--cannot set another policy
|
||||
\set ON_ERROR_STOP 0
|
||||
select add_compression_policy('conditions', '60d'::interval, if_not_exists=>true);
|
||||
NOTICE: compression policy already exists for hypertable "conditions", skipping
|
||||
add_compression_policy
|
||||
------------------------
|
||||
-1
|
||||
(1 row)
|
||||
|
||||
select add_compression_policy('conditions', '60d'::interval);
|
||||
ERROR: compression policy already exists for hypertable or continuous aggregate "conditions"
|
||||
select add_compression_policy('conditions', '30d'::interval, if_not_exists=>true);
|
||||
WARNING: compression policy already exists for hypertable "conditions"
|
||||
add_compression_policy
|
||||
------------------------
|
||||
-1
|
||||
(1 row)
|
||||
|
||||
\set ON_ERROR_STOP 1
|
||||
--TEST 5 --
|
||||
-- drop the policy --
|
||||
select remove_compression_policy('conditions');
|
||||
remove_compression_policy
|
||||
---------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
select count(*) from _timescaledb_config.bgw_job WHERE id>=1000;
|
||||
count
|
||||
-------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
--TEST 6 --
|
||||
-- try to execute the policy after it has been dropped --
|
||||
\set ON_ERROR_STOP 0
|
||||
CALL run_job(:compressjob_id);
|
||||
ERROR: job 1000 not found
|
||||
--errors with bad input for add/remove compression policy
|
||||
create view dummyv1 as select * from conditions limit 1;
|
||||
select add_compression_policy( 100 , compress_after=> '1 day'::interval);
|
||||
ERROR: object with id "100" not found
|
||||
select add_compression_policy( 'dummyv1', compress_after=> '1 day'::interval );
|
||||
ERROR: "dummyv1" is not a hypertable or a continuous aggregate
|
||||
select remove_compression_policy( 100 );
|
||||
ERROR: relation is not a hypertable or continuous aggregate
|
||||
\set ON_ERROR_STOP 1
|
||||
-- We're done with the table, so drop it.
|
||||
DROP TABLE IF EXISTS conditions CASCADE;
|
||||
NOTICE: drop cascades to table _timescaledb_internal.compress_hyper_2_4_chunk
|
||||
NOTICE: drop cascades to view dummyv1
|
||||
--TEST 7
|
||||
--compression policy for smallint, integer or bigint based partition hypertable
|
||||
--smallint test
|
||||
CREATE TABLE test_table_smallint(time SMALLINT, val SMALLINT);
|
||||
SELECT create_hypertable('test_table_smallint', 'time', chunk_time_interval => 1);
|
||||
NOTICE: adding not-null constraint to column "time"
|
||||
create_hypertable
|
||||
----------------------------------
|
||||
(3,public,test_table_smallint,t)
|
||||
(1 row)
|
||||
|
||||
CREATE OR REPLACE FUNCTION dummy_now_smallint() RETURNS SMALLINT LANGUAGE SQL IMMUTABLE AS 'SELECT 5::SMALLINT';
|
||||
SELECT set_integer_now_func('test_table_smallint', 'dummy_now_smallint');
|
||||
set_integer_now_func
|
||||
----------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
INSERT INTO test_table_smallint SELECT generate_series(1,5), 10;
|
||||
ALTER TABLE test_table_smallint SET (timescaledb.compress);
|
||||
\set ON_ERROR_STOP 0
|
||||
select add_compression_policy( 'test_table_smallint', compress_after=> '1 day'::interval );
|
||||
ERROR: unsupported compress_after argument type, expected type : smallint
|
||||
\set ON_ERROR_STOP 1
|
||||
SELECT add_compression_policy('test_table_smallint', 2::SMALLINT) AS compressjob_id \gset
|
||||
SELECT * FROM _timescaledb_config.bgw_job WHERE id = :compressjob_id;
|
||||
id | application_name | schedule_interval | max_runtime | max_retries | retry_period | proc_schema | proc_name | owner | scheduled | fixed_schedule | initial_start | hypertable_id | config | check_schema | check_name | timezone
|
||||
------+---------------------------+-------------------+-------------+-------------+--------------+------------------------+--------------------+-------------------+-----------+----------------+---------------+---------------+-------------------------------------------+------------------------+--------------------------+----------
|
||||
1001 | Compression Policy [1001] | @ 1 day | @ 0 | -1 | @ 1 hour | _timescaledb_functions | policy_compression | default_perm_user | t | f | | 3 | {"hypertable_id": 3, "compress_after": 2} | _timescaledb_functions | policy_compression_check |
|
||||
(1 row)
|
||||
|
||||
--will compress all chunks that need compression
|
||||
CALL run_job(:compressjob_id);
|
||||
SELECT chunk_name, before_compression_total_bytes, after_compression_total_bytes
|
||||
FROM chunk_compression_stats('test_table_smallint')
|
||||
WHERE compression_status LIKE 'Compressed'
|
||||
ORDER BY chunk_name;
|
||||
chunk_name | before_compression_total_bytes | after_compression_total_bytes
|
||||
------------------+--------------------------------+-------------------------------
|
||||
_hyper_3_5_chunk | 24576 | 24576
|
||||
_hyper_3_6_chunk | 24576 | 24576
|
||||
(2 rows)
|
||||
|
||||
--integer tests
|
||||
CREATE TABLE test_table_integer(time INTEGER, val INTEGER);
|
||||
SELECT create_hypertable('test_table_integer', 'time', chunk_time_interval => 1);
|
||||
NOTICE: adding not-null constraint to column "time"
|
||||
create_hypertable
|
||||
---------------------------------
|
||||
(5,public,test_table_integer,t)
|
||||
(1 row)
|
||||
|
||||
CREATE OR REPLACE FUNCTION dummy_now_integer() RETURNS INTEGER LANGUAGE SQL IMMUTABLE AS 'SELECT 5::INTEGER';
|
||||
SELECT set_integer_now_func('test_table_integer', 'dummy_now_integer');
|
||||
set_integer_now_func
|
||||
----------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
INSERT INTO test_table_integer SELECT generate_series(1,5), 10;
|
||||
ALTER TABLE test_table_integer SET (timescaledb.compress);
|
||||
SELECT add_compression_policy('test_table_integer', 2::INTEGER) AS compressjob_id \gset
|
||||
SELECT * FROM _timescaledb_config.bgw_job WHERE id = :compressjob_id;
|
||||
id | application_name | schedule_interval | max_runtime | max_retries | retry_period | proc_schema | proc_name | owner | scheduled | fixed_schedule | initial_start | hypertable_id | config | check_schema | check_name | timezone
|
||||
------+---------------------------+-------------------+-------------+-------------+--------------+------------------------+--------------------+-------------------+-----------+----------------+---------------+---------------+-------------------------------------------+------------------------+--------------------------+----------
|
||||
1002 | Compression Policy [1002] | @ 1 day | @ 0 | -1 | @ 1 hour | _timescaledb_functions | policy_compression | default_perm_user | t | f | | 5 | {"hypertable_id": 5, "compress_after": 2} | _timescaledb_functions | policy_compression_check |
|
||||
(1 row)
|
||||
|
||||
--will compress all chunks that need compression
|
||||
CALL run_job(:compressjob_id);
|
||||
SELECT chunk_name, before_compression_total_bytes, after_compression_total_bytes
|
||||
FROM chunk_compression_stats('test_table_integer')
|
||||
WHERE compression_status LIKE 'Compressed'
|
||||
ORDER BY chunk_name;
|
||||
chunk_name | before_compression_total_bytes | after_compression_total_bytes
|
||||
-------------------+--------------------------------+-------------------------------
|
||||
_hyper_5_12_chunk | 24576 | 24576
|
||||
_hyper_5_13_chunk | 24576 | 24576
|
||||
(2 rows)
|
||||
|
||||
--bigint test
|
||||
CREATE TABLE test_table_bigint(time BIGINT, val BIGINT);
|
||||
SELECT create_hypertable('test_table_bigint', 'time', chunk_time_interval => 1);
|
||||
NOTICE: adding not-null constraint to column "time"
|
||||
create_hypertable
|
||||
--------------------------------
|
||||
(7,public,test_table_bigint,t)
|
||||
(1 row)
|
||||
|
||||
CREATE OR REPLACE FUNCTION dummy_now_bigint() RETURNS BIGINT LANGUAGE SQL IMMUTABLE AS 'SELECT 5::BIGINT';
|
||||
SELECT set_integer_now_func('test_table_bigint', 'dummy_now_bigint');
|
||||
set_integer_now_func
|
||||
----------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
INSERT INTO test_table_bigint SELECT generate_series(1,5), 10;
|
||||
ALTER TABLE test_table_bigint SET (timescaledb.compress);
|
||||
SELECT add_compression_policy('test_table_bigint', 2::BIGINT) AS compressjob_id \gset
|
||||
SELECT * FROM _timescaledb_config.bgw_job WHERE id = :compressjob_id;
|
||||
id | application_name | schedule_interval | max_runtime | max_retries | retry_period | proc_schema | proc_name | owner | scheduled | fixed_schedule | initial_start | hypertable_id | config | check_schema | check_name | timezone
|
||||
------+---------------------------+-------------------+-------------+-------------+--------------+------------------------+--------------------+-------------------+-----------+----------------+---------------+---------------+-------------------------------------------+------------------------+--------------------------+----------
|
||||
1003 | Compression Policy [1003] | @ 1 day | @ 0 | -1 | @ 1 hour | _timescaledb_functions | policy_compression | default_perm_user | t | f | | 7 | {"hypertable_id": 7, "compress_after": 2} | _timescaledb_functions | policy_compression_check |
|
||||
(1 row)
|
||||
|
||||
--will compress all chunks that need compression
|
||||
CALL run_job(:compressjob_id);
|
||||
SELECT chunk_name, before_compression_total_bytes, after_compression_total_bytes
|
||||
FROM chunk_compression_stats('test_table_bigint')
|
||||
WHERE compression_status LIKE 'Compressed'
|
||||
ORDER BY chunk_name;
|
||||
chunk_name | before_compression_total_bytes | after_compression_total_bytes
|
||||
-------------------+--------------------------------+-------------------------------
|
||||
_hyper_7_19_chunk | 24576 | 24576
|
||||
_hyper_7_20_chunk | 24576 | 24576
|
||||
(2 rows)
|
||||
|
||||
--TEST 8
|
||||
--hypertable owner lacks permission to start background worker
|
||||
SET ROLE NOLOGIN_ROLE;
|
||||
CREATE TABLE test_table_nologin(time bigint, val int);
|
||||
SELECT create_hypertable('test_table_nologin', 'time', chunk_time_interval => 1);
|
||||
NOTICE: adding not-null constraint to column "time"
|
||||
create_hypertable
|
||||
---------------------------------
|
||||
(9,public,test_table_nologin,t)
|
||||
(1 row)
|
||||
|
||||
SELECT set_integer_now_func('test_table_nologin', 'dummy_now_bigint');
|
||||
set_integer_now_func
|
||||
----------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
ALTER TABLE test_table_nologin set (timescaledb.compress);
|
||||
\set ON_ERROR_STOP 0
|
||||
SELECT add_compression_policy('test_table_nologin', 2::int);
|
||||
ERROR: permission denied to start background process as role "nologin_role"
|
||||
\set ON_ERROR_STOP 1
|
||||
DROP TABLE test_table_nologin;
|
||||
RESET ROLE;
|
||||
\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER
|
||||
CREATE TABLE conditions(
|
||||
time TIMESTAMPTZ NOT NULL,
|
||||
device INTEGER,
|
||||
temperature FLOAT
|
||||
);
|
||||
SELECT * FROM create_hypertable('conditions', 'time',
|
||||
chunk_time_interval => '1 day'::interval);
|
||||
hypertable_id | schema_name | table_name | created
|
||||
---------------+-------------+------------+---------
|
||||
11 | public | conditions | t
|
||||
(1 row)
|
||||
|
||||
INSERT INTO conditions
|
||||
SELECT time, (random()*30)::int, random()*80 - 40
|
||||
FROM generate_series('2018-12-01 00:00'::timestamp, '2018-12-31 00:00'::timestamp, '10 min') AS time;
|
||||
CREATE MATERIALIZED VIEW conditions_summary
|
||||
WITH (timescaledb.continuous) AS
|
||||
SELECT device,
|
||||
time_bucket(INTERVAL '1 hour', "time") AS day,
|
||||
AVG(temperature) AS avg_temperature,
|
||||
MAX(temperature) AS max_temperature,
|
||||
MIN(temperature) AS min_temperature
|
||||
FROM conditions
|
||||
GROUP BY device, time_bucket(INTERVAL '1 hour', "time") WITH NO DATA;
|
||||
CALL refresh_continuous_aggregate('conditions_summary', NULL, NULL);
|
||||
ALTER TABLE conditions SET (timescaledb.compress);
|
||||
SELECT COUNT(*) AS dropped_chunks_count
|
||||
FROM drop_chunks('conditions', TIMESTAMPTZ '2018-12-15 00:00');
|
||||
dropped_chunks_count
|
||||
----------------------
|
||||
14
|
||||
(1 row)
|
||||
|
||||
-- We need to have some chunks that are marked as dropped, otherwise
|
||||
-- we will not have a problem below.
|
||||
SELECT COUNT(*) AS dropped_chunks_count
|
||||
FROM _timescaledb_catalog.chunk
|
||||
WHERE dropped = TRUE;
|
||||
dropped_chunks_count
|
||||
----------------------
|
||||
14
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FROM timescaledb_information.chunks
|
||||
WHERE hypertable_name = 'conditions' and is_compressed = true;
|
||||
count
|
||||
-------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
SELECT add_compression_policy AS job_id
|
||||
FROM add_compression_policy('conditions', INTERVAL '1 day') \gset
|
||||
-- job compresses only 1 chunk at a time --
|
||||
SELECT alter_job(id,config:=jsonb_set(config,'{maxchunks_to_compress}', '1'))
|
||||
FROM _timescaledb_config.bgw_job WHERE id = :job_id;
|
||||
alter_job
|
||||
--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
|
||||
(1004,"@ 12 hours","@ 0",-1,"@ 1 hour",t,"{""hypertable_id"": 11, ""compress_after"": ""@ 1 day"", ""maxchunks_to_compress"": 1}",-infinity,_timescaledb_functions.policy_compression_check,f,,)
|
||||
(1 row)
|
||||
|
||||
SELECT alter_job(id,config:=jsonb_set(config,'{verbose_log}', 'true'))
|
||||
FROM _timescaledb_config.bgw_job WHERE id = :job_id;
|
||||
alter_job
|
||||
-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
|
||||
(1004,"@ 12 hours","@ 0",-1,"@ 1 hour",t,"{""verbose_log"": true, ""hypertable_id"": 11, ""compress_after"": ""@ 1 day"", ""maxchunks_to_compress"": 1}",-infinity,_timescaledb_functions.policy_compression_check,f,,)
|
||||
(1 row)
|
||||
|
||||
set client_min_messages TO LOG;
|
||||
CALL run_job(:job_id);
|
||||
LOG: statement: CALL run_job(1004);
|
||||
LOG: job 1004 completed processing chunk _timescaledb_internal._hyper_11_40_chunk
|
||||
set client_min_messages TO NOTICE;
|
||||
LOG: statement: set client_min_messages TO NOTICE;
|
||||
SELECT count(*) FROM timescaledb_information.chunks
|
||||
WHERE hypertable_name = 'conditions' and is_compressed = true;
|
||||
count
|
||||
-------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
\i include/recompress_basic.sql
|
||||
-- This file and its contents are licensed under the Timescale License.
|
||||
-- Please see the included NOTICE for copyright information and
|
||||
-- LICENSE-TIMESCALE for a copy of the license.
|
||||
CREATE OR REPLACE VIEW compressed_chunk_info_view AS
|
||||
SELECT
|
||||
h.schema_name AS hypertable_schema,
|
||||
h.table_name AS hypertable_name,
|
||||
c.schema_name as chunk_schema,
|
||||
c.table_name as chunk_name,
|
||||
c.status as chunk_status,
|
||||
comp.schema_name as compressed_chunk_schema,
|
||||
comp.table_name as compressed_chunk_name
|
||||
FROM
|
||||
_timescaledb_catalog.hypertable h JOIN
|
||||
_timescaledb_catalog.chunk c ON h.id = c.hypertable_id
|
||||
LEFT JOIN _timescaledb_catalog.chunk comp
|
||||
ON comp.id = c.compressed_chunk_id
|
||||
;
|
||||
CREATE TABLE test2 (timec timestamptz NOT NULL, i integer ,
|
||||
b bigint, t text);
|
||||
SELECT table_name from create_hypertable('test2', 'timec', chunk_time_interval=> INTERVAL '7 days');
|
||||
table_name
|
||||
------------
|
||||
test2
|
||||
(1 row)
|
||||
|
||||
INSERT INTO test2 SELECT q, 10, 11, 'hello' FROM generate_series( '2020-01-03 10:00:00+00', '2020-01-03 12:00:00+00' , '5 min'::interval) q;
|
||||
ALTER TABLE test2 set (timescaledb.compress,
|
||||
timescaledb.compress_segmentby = 'b',
|
||||
timescaledb.compress_orderby = 'timec DESC');
|
||||
SELECT compress_chunk(c)
|
||||
FROM show_chunks('test2') c;
|
||||
compress_chunk
|
||||
------------------------------------------
|
||||
_timescaledb_internal._hyper_14_62_chunk
|
||||
(1 row)
|
||||
|
||||
---insert into the middle of the range ---
|
||||
INSERT INTO test2 values ( '2020-01-03 10:01:00+00', 20, 11, '2row');
|
||||
INSERT INTO test2 values ( '2020-01-03 11:01:00+00', 20, 11, '3row');
|
||||
INSERT INTO test2 values ( '2020-01-03 12:01:00+00', 20, 11, '4row');
|
||||
--- insert a new segment by ---
|
||||
INSERT INTO test2 values ( '2020-01-03 11:01:00+00', 20, 12, '12row');
|
||||
SELECT time_bucket(INTERVAL '2 hour', timec), b, count(*)
|
||||
FROM test2
|
||||
GROUP BY time_bucket(INTERVAL '2 hour', timec), b
|
||||
ORDER BY 1, 2;
|
||||
time_bucket | b | count
|
||||
------------------------------+----+-------
|
||||
Fri Jan 03 02:00:00 2020 PST | 11 | 26
|
||||
Fri Jan 03 02:00:00 2020 PST | 12 | 1
|
||||
Fri Jan 03 04:00:00 2020 PST | 11 | 2
|
||||
(3 rows)
|
||||
|
||||
--check status for chunk --
|
||||
SELECT chunk_status,
|
||||
chunk_name as "CHUNK_NAME"
|
||||
FROM compressed_chunk_info_view
|
||||
WHERE hypertable_name = 'test2' ORDER BY chunk_name;
|
||||
chunk_status | CHUNK_NAME
|
||||
--------------+--------------------
|
||||
9 | _hyper_14_62_chunk
|
||||
(1 row)
|
||||
|
||||
SELECT compressed_chunk_schema || '.' || compressed_chunk_name as "COMP_CHUNK_NAME",
|
||||
chunk_schema || '.' || chunk_name as "CHUNK_NAME"
|
||||
FROM compressed_chunk_info_view
|
||||
WHERE hypertable_name = 'test2' \gset
|
||||
SELECT count(*) from test2;
|
||||
count
|
||||
-------
|
||||
29
|
||||
(1 row)
|
||||
|
||||
-- call recompress_chunk inside a transaction. This should fails since
|
||||
-- it contains transaction-terminating commands.
|
||||
\set ON_ERROR_STOP 0
|
||||
START TRANSACTION;
|
||||
CALL recompress_chunk(:'CHUNK_NAME'::regclass);
|
||||
ROLLBACK;
|
||||
\set ON_ERROR_STOP 1
|
||||
CALL recompress_chunk(:'CHUNK_NAME'::regclass);
|
||||
-- Demonstrate that no locks are held on the hypertable, chunk, or the
|
||||
-- compressed chunk after recompress_chunk has executed.
|
||||
SELECT pid, locktype, relation, relation::regclass, mode, granted
|
||||
FROM pg_locks
|
||||
WHERE relation::regclass::text IN (:'CHUNK_NAME', :'COMP_CHUNK_NAME', 'test2')
|
||||
ORDER BY pid;
|
||||
pid | locktype | relation | relation | mode | granted
|
||||
-----+----------+----------+----------+------+---------
|
||||
(0 rows)
|
||||
|
||||
SELECT chunk_status,
|
||||
chunk_name as "CHUNK_NAME"
|
||||
FROM compressed_chunk_info_view
|
||||
WHERE hypertable_name = 'test2' ORDER BY chunk_name;
|
||||
chunk_status | CHUNK_NAME
|
||||
--------------+--------------------
|
||||
1 | _hyper_14_62_chunk
|
||||
(1 row)
|
||||
|
||||
--- insert into a compressed chunk again + a new chunk--
|
||||
INSERT INTO test2 values ( '2020-01-03 11:01:03+00', 20, 11, '33row'),
|
||||
( '2020-01-03 11:01:06+00', 20, 11, '36row'),
|
||||
( '2020-01-03 11:02:00+00', 20, 12, '12row'),
|
||||
( '2020-04-03 00:02:00+00', 30, 13, '3013row');
|
||||
SELECT time_bucket(INTERVAL '2 hour', timec), b, count(*)
|
||||
FROM test2
|
||||
GROUP BY time_bucket(INTERVAL '2 hour', timec), b
|
||||
ORDER BY 1, 2;
|
||||
time_bucket | b | count
|
||||
------------------------------+----+-------
|
||||
Fri Jan 03 02:00:00 2020 PST | 11 | 28
|
||||
Fri Jan 03 02:00:00 2020 PST | 12 | 2
|
||||
Fri Jan 03 04:00:00 2020 PST | 11 | 2
|
||||
Thu Apr 02 17:00:00 2020 PDT | 13 | 1
|
||||
(4 rows)
|
||||
|
||||
--chunk status should be unordered for the previously compressed chunk
|
||||
SELECT chunk_status,
|
||||
chunk_name as "CHUNK_NAME"
|
||||
FROM compressed_chunk_info_view
|
||||
WHERE hypertable_name = 'test2' ORDER BY chunk_name;
|
||||
chunk_status | CHUNK_NAME
|
||||
--------------+--------------------
|
||||
9 | _hyper_14_62_chunk
|
||||
0 | _hyper_14_64_chunk
|
||||
(2 rows)
|
||||
|
||||
SELECT add_compression_policy AS job_id
|
||||
FROM add_compression_policy('test2', '30d'::interval) \gset
|
||||
CALL run_job(:job_id);
|
||||
CALL run_job(:job_id);
|
||||
-- status should be compressed ---
|
||||
SELECT chunk_status,
|
||||
chunk_name as "CHUNK_NAME"
|
||||
FROM compressed_chunk_info_view
|
||||
WHERE hypertable_name = 'test2' ORDER BY chunk_name;
|
||||
chunk_status | CHUNK_NAME
|
||||
--------------+--------------------
|
||||
1 | _hyper_14_62_chunk
|
||||
1 | _hyper_14_64_chunk
|
||||
(2 rows)
|
||||
|
||||
\set ON_ERROR_STOP 0
|
||||
-- call recompress_chunk when status is not unordered
|
||||
CALL recompress_chunk(:'CHUNK_NAME'::regclass, true);
|
||||
psql:include/recompress_basic.sql:110: NOTICE: nothing to recompress in chunk "_hyper_14_62_chunk"
|
||||
-- This will succeed and compress the chunk for the test below.
|
||||
CALL recompress_chunk(:'CHUNK_NAME'::regclass, false);
|
||||
psql:include/recompress_basic.sql:113: ERROR: nothing to recompress in chunk "_hyper_14_62_chunk"
|
||||
--now decompress it , then try and recompress
|
||||
SELECT decompress_chunk(:'CHUNK_NAME'::regclass);
|
||||
decompress_chunk
|
||||
------------------------------------------
|
||||
_timescaledb_internal._hyper_14_62_chunk
|
||||
(1 row)
|
||||
|
||||
CALL recompress_chunk(:'CHUNK_NAME'::regclass);
|
||||
psql:include/recompress_basic.sql:117: ERROR: call compress_chunk instead of recompress_chunk
|
||||
\set ON_ERROR_STOP 1
|
||||
-- test recompress policy
|
||||
CREATE TABLE metrics(time timestamptz NOT NULL);
|
||||
SELECT hypertable_id AS "HYPERTABLE_ID", schema_name, table_name, created FROM create_hypertable('metrics','time') \gset
|
||||
ALTER TABLE metrics SET (timescaledb.compress);
|
||||
-- create chunk with some data and compress
|
||||
INSERT INTO metrics SELECT '2000-01-01' FROM generate_series(1,10);
|
||||
-- create custom compression job without recompress boolean
|
||||
SELECT add_job('_timescaledb_functions.policy_compression','1w',('{"hypertable_id": '||:'HYPERTABLE_ID'||', "compress_after": "@ 7 days"}')::jsonb, initial_start => '2000-01-01 00:00:00+00'::timestamptz) AS "JOB_COMPRESS" \gset
|
||||
-- first call should compress
|
||||
CALL run_job(:JOB_COMPRESS);
|
||||
-- 2nd call should do nothing
|
||||
CALL run_job(:JOB_COMPRESS);
|
||||
---- status should be 1
|
||||
SELECT chunk_status FROM compressed_chunk_info_view WHERE hypertable_name = 'metrics';
|
||||
chunk_status
|
||||
--------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
-- do an INSERT so recompress has something to do
|
||||
INSERT INTO metrics SELECT '2000-01-01';
|
||||
---- status should be 3
|
||||
SELECT chunk_status FROM compressed_chunk_info_view WHERE hypertable_name = 'metrics';
|
||||
chunk_status
|
||||
--------------
|
||||
9
|
||||
(1 row)
|
||||
|
||||
-- should recompress
|
||||
CALL run_job(:JOB_COMPRESS);
|
||||
---- status should be 1
|
||||
SELECT chunk_status FROM compressed_chunk_info_view WHERE hypertable_name = 'metrics';
|
||||
chunk_status
|
||||
--------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
-- disable recompress in compress job
|
||||
SELECT alter_job(id,config:=jsonb_set(config,'{recompress}','false'), next_start => '2000-01-01 00:00:00+00'::timestamptz) FROM _timescaledb_config.bgw_job WHERE id = :JOB_COMPRESS;
|
||||
alter_job
|
||||
----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
|
||||
(1006,"@ 7 days","@ 0",-1,"@ 5 mins",t,"{""recompress"": false, ""hypertable_id"": 16, ""compress_after"": ""@ 7 days""}","Fri Dec 31 16:00:00 1999 PST",,t,"Fri Dec 31 16:00:00 1999 PST",)
|
||||
(1 row)
|
||||
|
||||
-- nothing to do
|
||||
CALL run_job(:JOB_COMPRESS);
|
||||
---- status should be 1
|
||||
SELECT chunk_status FROM compressed_chunk_info_view WHERE hypertable_name = 'metrics';
|
||||
chunk_status
|
||||
--------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
-- do an INSERT so recompress has something to do
|
||||
INSERT INTO metrics SELECT '2000-01-01';
|
||||
---- status should be 3
|
||||
SELECT chunk_status FROM compressed_chunk_info_view WHERE hypertable_name = 'metrics';
|
||||
chunk_status
|
||||
--------------
|
||||
9
|
||||
(1 row)
|
||||
|
||||
-- still nothing to do since we disabled recompress
|
||||
CALL run_job(:JOB_COMPRESS);
|
||||
---- status should be 3
|
||||
SELECT chunk_status FROM compressed_chunk_info_view WHERE hypertable_name = 'metrics';
|
||||
chunk_status
|
||||
--------------
|
||||
9
|
||||
(1 row)
|
||||
|
||||
-- reenable recompress in compress job
|
||||
SELECT alter_job(id,config:=jsonb_set(config,'{recompress}','true'), next_start => '2000-01-01 00:00:00+00'::timestamptz) FROM _timescaledb_config.bgw_job WHERE id = :JOB_COMPRESS;
|
||||
alter_job
|
||||
---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
|
||||
(1006,"@ 7 days","@ 0",-1,"@ 5 mins",t,"{""recompress"": true, ""hypertable_id"": 16, ""compress_after"": ""@ 7 days""}","Fri Dec 31 16:00:00 1999 PST",,t,"Fri Dec 31 16:00:00 1999 PST",)
|
||||
(1 row)
|
||||
|
||||
-- should recompress now
|
||||
CALL run_job(:JOB_COMPRESS);
|
||||
---- status should be 1
|
||||
SELECT chunk_status FROM compressed_chunk_info_view WHERE hypertable_name = 'metrics';
|
||||
chunk_status
|
||||
--------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
SELECT delete_job(:JOB_COMPRESS);
|
||||
delete_job
|
||||
------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT add_job('_timescaledb_functions.policy_recompression','1w',('{"hypertable_id": '||:'HYPERTABLE_ID'||', "recompress_after": "@ 7 days", "maxchunks_to_compress": 1}')::jsonb) AS "JOB_RECOMPRESS" \gset
|
||||
---- status should be 1
|
||||
SELECT chunk_status FROM compressed_chunk_info_view WHERE hypertable_name = 'metrics';
|
||||
chunk_status
|
||||
--------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
---- nothing to do yet
|
||||
CALL run_job(:JOB_RECOMPRESS);
|
||||
psql:include/recompress_basic.sql:189: NOTICE: no chunks for hypertable "public.metrics" that satisfy recompress chunk policy
|
||||
---- status should be 1
|
||||
SELECT chunk_status FROM compressed_chunk_info_view WHERE hypertable_name = 'metrics';
|
||||
chunk_status
|
||||
--------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
-- create some work for recompress
|
||||
INSERT INTO metrics SELECT '2000-01-01';
|
||||
-- status should be 3
|
||||
SELECT chunk_status FROM compressed_chunk_info_view WHERE hypertable_name = 'metrics';
|
||||
chunk_status
|
||||
--------------
|
||||
9
|
||||
(1 row)
|
||||
|
||||
CALL run_job(:JOB_RECOMPRESS);
|
||||
-- status should be 1
|
||||
SELECT chunk_status FROM compressed_chunk_info_view WHERE hypertable_name = 'metrics';
|
||||
chunk_status
|
||||
--------------
|
||||
9
|
||||
(1 row)
|
||||
|
||||
SELECT delete_job(:JOB_RECOMPRESS);
|
||||
delete_job
|
||||
------------
|
||||
|
||||
(1 row)
|
||||
|
||||
-- Teardown test
|
||||
\c :TEST_DBNAME :ROLE_SUPERUSER
|
||||
REVOKE CREATE ON SCHEMA public FROM NOLOGIN_ROLE;
|
||||
DROP ROLE NOLOGIN_ROLE;
|
657
tsl/test/expected/compression_bgw-16.out
Normal file
657
tsl/test/expected/compression_bgw-16.out
Normal file
@ -0,0 +1,657 @@
|
||||
-- This file and its contents are licensed under the Timescale License.
|
||||
-- Please see the included NOTICE for copyright information and
|
||||
-- LICENSE-TIMESCALE for a copy of the license.
|
||||
\c :TEST_DBNAME :ROLE_SUPERUSER
|
||||
CREATE ROLE NOLOGIN_ROLE WITH nologin noinherit;
|
||||
-- though user on access node has required GRANTS, this will propagate GRANTS to the connected data nodes
|
||||
GRANT CREATE ON SCHEMA public TO NOLOGIN_ROLE;
|
||||
GRANT NOLOGIN_ROLE TO :ROLE_DEFAULT_PERM_USER WITH ADMIN OPTION;
|
||||
\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER
|
||||
CREATE TABLE conditions (
|
||||
time TIMESTAMPTZ NOT NULL,
|
||||
location TEXT NOT NULL,
|
||||
location2 char(10) NOT NULL,
|
||||
temperature DOUBLE PRECISION NULL,
|
||||
humidity DOUBLE PRECISION NULL
|
||||
);
|
||||
select create_hypertable( 'conditions', 'time', chunk_time_interval=> '31days'::interval);
|
||||
create_hypertable
|
||||
-------------------------
|
||||
(1,public,conditions,t)
|
||||
(1 row)
|
||||
|
||||
--TEST 1--
|
||||
--cannot set policy without enabling compression --
|
||||
\set ON_ERROR_STOP 0
|
||||
select add_compression_policy('conditions', '60d'::interval);
|
||||
ERROR: compression not enabled on hypertable "conditions"
|
||||
\set ON_ERROR_STOP 1
|
||||
-- TEST2 --
|
||||
--add a policy to compress chunks --
|
||||
alter table conditions set (timescaledb.compress, timescaledb.compress_segmentby = 'location', timescaledb.compress_orderby = 'time');
|
||||
insert into conditions
|
||||
select generate_series('2018-12-01 00:00'::timestamp, '2018-12-31 00:00'::timestamp, '1 day'), 'POR', 'klick', 55, 75;
|
||||
select add_compression_policy('conditions', '60d'::interval) AS compressjob_id
|
||||
\gset
|
||||
select * from _timescaledb_config.bgw_job where id = :compressjob_id;
|
||||
id | application_name | schedule_interval | max_runtime | max_retries | retry_period | proc_schema | proc_name | owner | scheduled | fixed_schedule | initial_start | hypertable_id | config | check_schema | check_name | timezone
|
||||
------+---------------------------+-------------------+-------------+-------------+--------------+------------------------+--------------------+-------------------+-----------+----------------+---------------+---------------+-----------------------------------------------------+------------------------+--------------------------+----------
|
||||
1000 | Compression Policy [1000] | @ 12 hours | @ 0 | -1 | @ 1 hour | _timescaledb_functions | policy_compression | default_perm_user | t | f | | 1 | {"hypertable_id": 1, "compress_after": "@ 60 days"} | _timescaledb_functions | policy_compression_check |
|
||||
(1 row)
|
||||
|
||||
select * from alter_job(:compressjob_id, schedule_interval=>'1s');
|
||||
job_id | schedule_interval | max_runtime | max_retries | retry_period | scheduled | config | next_start | check_config | fixed_schedule | initial_start | timezone
|
||||
--------+-------------------+-------------+-------------+--------------+-----------+-----------------------------------------------------+------------+-------------------------------------------------+----------------+---------------+----------
|
||||
1000 | @ 1 sec | @ 0 | -1 | @ 1 hour | t | {"hypertable_id": 1, "compress_after": "@ 60 days"} | -infinity | _timescaledb_functions.policy_compression_check | f | |
|
||||
(1 row)
|
||||
|
||||
--enable maxchunks to 1 so that only 1 chunk is compressed by the job
|
||||
SELECT alter_job(id,config:=jsonb_set(config,'{maxchunks_to_compress}', '1'))
|
||||
FROM _timescaledb_config.bgw_job WHERE id = :compressjob_id;
|
||||
alter_job
|
||||
------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
|
||||
(1000,"@ 1 sec","@ 0",-1,"@ 1 hour",t,"{""hypertable_id"": 1, ""compress_after"": ""@ 60 days"", ""maxchunks_to_compress"": 1}",-infinity,_timescaledb_functions.policy_compression_check,f,,)
|
||||
(1 row)
|
||||
|
||||
select * from _timescaledb_config.bgw_job where id >= 1000 ORDER BY id;
|
||||
id | application_name | schedule_interval | max_runtime | max_retries | retry_period | proc_schema | proc_name | owner | scheduled | fixed_schedule | initial_start | hypertable_id | config | check_schema | check_name | timezone
|
||||
------+---------------------------+-------------------+-------------+-------------+--------------+------------------------+--------------------+-------------------+-----------+----------------+---------------+---------------+---------------------------------------------------------------------------------+------------------------+--------------------------+----------
|
||||
1000 | Compression Policy [1000] | @ 1 sec | @ 0 | -1 | @ 1 hour | _timescaledb_functions | policy_compression | default_perm_user | t | f | | 1 | {"hypertable_id": 1, "compress_after": "@ 60 days", "maxchunks_to_compress": 1} | _timescaledb_functions | policy_compression_check |
|
||||
(1 row)
|
||||
|
||||
insert into conditions
|
||||
select now()::timestamp, 'TOK', 'sony', 55, 75;
|
||||
-- TEST3 --
|
||||
--only the old chunks will get compressed when policy is executed--
|
||||
CALL run_job(:compressjob_id);
|
||||
select chunk_name, pg_size_pretty(before_compression_total_bytes) before_total,
|
||||
pg_size_pretty( after_compression_total_bytes) after_total
|
||||
from chunk_compression_stats('conditions') where compression_status like 'Compressed' order by chunk_name;
|
||||
chunk_name | before_total | after_total
|
||||
------------------+--------------+-------------
|
||||
_hyper_1_1_chunk | 32 kB | 40 kB
|
||||
(1 row)
|
||||
|
||||
SELECT id, hypertable_id, schema_name, table_name, compressed_chunk_id, dropped, status, osm_chunk FROM _timescaledb_catalog.chunk ORDER BY id;
|
||||
id | hypertable_id | schema_name | table_name | compressed_chunk_id | dropped | status | osm_chunk
|
||||
----+---------------+-----------------------+--------------------------+---------------------+---------+--------+-----------
|
||||
1 | 1 | _timescaledb_internal | _hyper_1_1_chunk | 4 | f | 1 | f
|
||||
2 | 1 | _timescaledb_internal | _hyper_1_2_chunk | | f | 0 | f
|
||||
3 | 1 | _timescaledb_internal | _hyper_1_3_chunk | | f | 0 | f
|
||||
4 | 2 | _timescaledb_internal | compress_hyper_2_4_chunk | | f | 0 | f
|
||||
(4 rows)
|
||||
|
||||
-- TEST 4 --
|
||||
--cannot set another policy
|
||||
\set ON_ERROR_STOP 0
|
||||
select add_compression_policy('conditions', '60d'::interval, if_not_exists=>true);
|
||||
NOTICE: compression policy already exists for hypertable "conditions", skipping
|
||||
add_compression_policy
|
||||
------------------------
|
||||
-1
|
||||
(1 row)
|
||||
|
||||
select add_compression_policy('conditions', '60d'::interval);
|
||||
ERROR: compression policy already exists for hypertable or continuous aggregate "conditions"
|
||||
select add_compression_policy('conditions', '30d'::interval, if_not_exists=>true);
|
||||
WARNING: compression policy already exists for hypertable "conditions"
|
||||
add_compression_policy
|
||||
------------------------
|
||||
-1
|
||||
(1 row)
|
||||
|
||||
\set ON_ERROR_STOP 1
|
||||
--TEST 5 --
|
||||
-- drop the policy --
|
||||
select remove_compression_policy('conditions');
|
||||
remove_compression_policy
|
||||
---------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
select count(*) from _timescaledb_config.bgw_job WHERE id>=1000;
|
||||
count
|
||||
-------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
--TEST 6 --
|
||||
-- try to execute the policy after it has been dropped --
|
||||
\set ON_ERROR_STOP 0
|
||||
CALL run_job(:compressjob_id);
|
||||
ERROR: job 1000 not found
|
||||
--errors with bad input for add/remove compression policy
|
||||
create view dummyv1 as select * from conditions limit 1;
|
||||
select add_compression_policy( 100 , compress_after=> '1 day'::interval);
|
||||
ERROR: object with id "100" not found
|
||||
select add_compression_policy( 'dummyv1', compress_after=> '1 day'::interval );
|
||||
ERROR: "dummyv1" is not a hypertable or a continuous aggregate
|
||||
select remove_compression_policy( 100 );
|
||||
ERROR: relation is not a hypertable or continuous aggregate
|
||||
\set ON_ERROR_STOP 1
|
||||
-- We're done with the table, so drop it.
|
||||
DROP TABLE IF EXISTS conditions CASCADE;
|
||||
NOTICE: drop cascades to table _timescaledb_internal.compress_hyper_2_4_chunk
|
||||
NOTICE: drop cascades to view dummyv1
|
||||
--TEST 7
|
||||
--compression policy for smallint, integer or bigint based partition hypertable
|
||||
--smallint test
|
||||
CREATE TABLE test_table_smallint(time SMALLINT, val SMALLINT);
|
||||
SELECT create_hypertable('test_table_smallint', 'time', chunk_time_interval => 1);
|
||||
NOTICE: adding not-null constraint to column "time"
|
||||
create_hypertable
|
||||
----------------------------------
|
||||
(3,public,test_table_smallint,t)
|
||||
(1 row)
|
||||
|
||||
CREATE OR REPLACE FUNCTION dummy_now_smallint() RETURNS SMALLINT LANGUAGE SQL IMMUTABLE AS 'SELECT 5::SMALLINT';
|
||||
SELECT set_integer_now_func('test_table_smallint', 'dummy_now_smallint');
|
||||
set_integer_now_func
|
||||
----------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
INSERT INTO test_table_smallint SELECT generate_series(1,5), 10;
|
||||
ALTER TABLE test_table_smallint SET (timescaledb.compress);
|
||||
\set ON_ERROR_STOP 0
|
||||
select add_compression_policy( 'test_table_smallint', compress_after=> '1 day'::interval );
|
||||
ERROR: unsupported compress_after argument type, expected type : smallint
|
||||
\set ON_ERROR_STOP 1
|
||||
SELECT add_compression_policy('test_table_smallint', 2::SMALLINT) AS compressjob_id \gset
|
||||
SELECT * FROM _timescaledb_config.bgw_job WHERE id = :compressjob_id;
|
||||
id | application_name | schedule_interval | max_runtime | max_retries | retry_period | proc_schema | proc_name | owner | scheduled | fixed_schedule | initial_start | hypertable_id | config | check_schema | check_name | timezone
|
||||
------+---------------------------+-------------------+-------------+-------------+--------------+------------------------+--------------------+-------------------+-----------+----------------+---------------+---------------+-------------------------------------------+------------------------+--------------------------+----------
|
||||
1001 | Compression Policy [1001] | @ 1 day | @ 0 | -1 | @ 1 hour | _timescaledb_functions | policy_compression | default_perm_user | t | f | | 3 | {"hypertable_id": 3, "compress_after": 2} | _timescaledb_functions | policy_compression_check |
|
||||
(1 row)
|
||||
|
||||
--will compress all chunks that need compression
|
||||
CALL run_job(:compressjob_id);
|
||||
SELECT chunk_name, before_compression_total_bytes, after_compression_total_bytes
|
||||
FROM chunk_compression_stats('test_table_smallint')
|
||||
WHERE compression_status LIKE 'Compressed'
|
||||
ORDER BY chunk_name;
|
||||
chunk_name | before_compression_total_bytes | after_compression_total_bytes
|
||||
------------------+--------------------------------+-------------------------------
|
||||
_hyper_3_5_chunk | 24576 | 24576
|
||||
_hyper_3_6_chunk | 24576 | 24576
|
||||
(2 rows)
|
||||
|
||||
--integer tests
|
||||
CREATE TABLE test_table_integer(time INTEGER, val INTEGER);
|
||||
SELECT create_hypertable('test_table_integer', 'time', chunk_time_interval => 1);
|
||||
NOTICE: adding not-null constraint to column "time"
|
||||
create_hypertable
|
||||
---------------------------------
|
||||
(5,public,test_table_integer,t)
|
||||
(1 row)
|
||||
|
||||
CREATE OR REPLACE FUNCTION dummy_now_integer() RETURNS INTEGER LANGUAGE SQL IMMUTABLE AS 'SELECT 5::INTEGER';
|
||||
SELECT set_integer_now_func('test_table_integer', 'dummy_now_integer');
|
||||
set_integer_now_func
|
||||
----------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
INSERT INTO test_table_integer SELECT generate_series(1,5), 10;
|
||||
ALTER TABLE test_table_integer SET (timescaledb.compress);
|
||||
SELECT add_compression_policy('test_table_integer', 2::INTEGER) AS compressjob_id \gset
|
||||
SELECT * FROM _timescaledb_config.bgw_job WHERE id = :compressjob_id;
|
||||
id | application_name | schedule_interval | max_runtime | max_retries | retry_period | proc_schema | proc_name | owner | scheduled | fixed_schedule | initial_start | hypertable_id | config | check_schema | check_name | timezone
|
||||
------+---------------------------+-------------------+-------------+-------------+--------------+------------------------+--------------------+-------------------+-----------+----------------+---------------+---------------+-------------------------------------------+------------------------+--------------------------+----------
|
||||
1002 | Compression Policy [1002] | @ 1 day | @ 0 | -1 | @ 1 hour | _timescaledb_functions | policy_compression | default_perm_user | t | f | | 5 | {"hypertable_id": 5, "compress_after": 2} | _timescaledb_functions | policy_compression_check |
|
||||
(1 row)
|
||||
|
||||
--will compress all chunks that need compression
|
||||
CALL run_job(:compressjob_id);
|
||||
SELECT chunk_name, before_compression_total_bytes, after_compression_total_bytes
|
||||
FROM chunk_compression_stats('test_table_integer')
|
||||
WHERE compression_status LIKE 'Compressed'
|
||||
ORDER BY chunk_name;
|
||||
chunk_name | before_compression_total_bytes | after_compression_total_bytes
|
||||
-------------------+--------------------------------+-------------------------------
|
||||
_hyper_5_12_chunk | 24576 | 24576
|
||||
_hyper_5_13_chunk | 24576 | 24576
|
||||
(2 rows)
|
||||
|
||||
--bigint test
|
||||
CREATE TABLE test_table_bigint(time BIGINT, val BIGINT);
|
||||
SELECT create_hypertable('test_table_bigint', 'time', chunk_time_interval => 1);
|
||||
NOTICE: adding not-null constraint to column "time"
|
||||
create_hypertable
|
||||
--------------------------------
|
||||
(7,public,test_table_bigint,t)
|
||||
(1 row)
|
||||
|
||||
CREATE OR REPLACE FUNCTION dummy_now_bigint() RETURNS BIGINT LANGUAGE SQL IMMUTABLE AS 'SELECT 5::BIGINT';
|
||||
SELECT set_integer_now_func('test_table_bigint', 'dummy_now_bigint');
|
||||
set_integer_now_func
|
||||
----------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
INSERT INTO test_table_bigint SELECT generate_series(1,5), 10;
|
||||
ALTER TABLE test_table_bigint SET (timescaledb.compress);
|
||||
SELECT add_compression_policy('test_table_bigint', 2::BIGINT) AS compressjob_id \gset
|
||||
SELECT * FROM _timescaledb_config.bgw_job WHERE id = :compressjob_id;
|
||||
id | application_name | schedule_interval | max_runtime | max_retries | retry_period | proc_schema | proc_name | owner | scheduled | fixed_schedule | initial_start | hypertable_id | config | check_schema | check_name | timezone
|
||||
------+---------------------------+-------------------+-------------+-------------+--------------+------------------------+--------------------+-------------------+-----------+----------------+---------------+---------------+-------------------------------------------+------------------------+--------------------------+----------
|
||||
1003 | Compression Policy [1003] | @ 1 day | @ 0 | -1 | @ 1 hour | _timescaledb_functions | policy_compression | default_perm_user | t | f | | 7 | {"hypertable_id": 7, "compress_after": 2} | _timescaledb_functions | policy_compression_check |
|
||||
(1 row)
|
||||
|
||||
--will compress all chunks that need compression
|
||||
CALL run_job(:compressjob_id);
|
||||
SELECT chunk_name, before_compression_total_bytes, after_compression_total_bytes
|
||||
FROM chunk_compression_stats('test_table_bigint')
|
||||
WHERE compression_status LIKE 'Compressed'
|
||||
ORDER BY chunk_name;
|
||||
chunk_name | before_compression_total_bytes | after_compression_total_bytes
|
||||
-------------------+--------------------------------+-------------------------------
|
||||
_hyper_7_19_chunk | 24576 | 24576
|
||||
_hyper_7_20_chunk | 24576 | 24576
|
||||
(2 rows)
|
||||
|
||||
--TEST 8
|
||||
--hypertable owner lacks permission to start background worker
|
||||
SET ROLE NOLOGIN_ROLE;
|
||||
CREATE TABLE test_table_nologin(time bigint, val int);
|
||||
SELECT create_hypertable('test_table_nologin', 'time', chunk_time_interval => 1);
|
||||
NOTICE: adding not-null constraint to column "time"
|
||||
create_hypertable
|
||||
---------------------------------
|
||||
(9,public,test_table_nologin,t)
|
||||
(1 row)
|
||||
|
||||
SELECT set_integer_now_func('test_table_nologin', 'dummy_now_bigint');
|
||||
set_integer_now_func
|
||||
----------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
ALTER TABLE test_table_nologin set (timescaledb.compress);
|
||||
\set ON_ERROR_STOP 0
|
||||
SELECT add_compression_policy('test_table_nologin', 2::int);
|
||||
ERROR: permission denied to start background process as role "nologin_role"
|
||||
\set ON_ERROR_STOP 1
|
||||
DROP TABLE test_table_nologin;
|
||||
RESET ROLE;
|
||||
\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER
|
||||
CREATE TABLE conditions(
|
||||
time TIMESTAMPTZ NOT NULL,
|
||||
device INTEGER,
|
||||
temperature FLOAT
|
||||
);
|
||||
SELECT * FROM create_hypertable('conditions', 'time',
|
||||
chunk_time_interval => '1 day'::interval);
|
||||
hypertable_id | schema_name | table_name | created
|
||||
---------------+-------------+------------+---------
|
||||
11 | public | conditions | t
|
||||
(1 row)
|
||||
|
||||
INSERT INTO conditions
|
||||
SELECT time, (random()*30)::int, random()*80 - 40
|
||||
FROM generate_series('2018-12-01 00:00'::timestamp, '2018-12-31 00:00'::timestamp, '10 min') AS time;
|
||||
CREATE MATERIALIZED VIEW conditions_summary
|
||||
WITH (timescaledb.continuous) AS
|
||||
SELECT device,
|
||||
time_bucket(INTERVAL '1 hour', "time") AS day,
|
||||
AVG(temperature) AS avg_temperature,
|
||||
MAX(temperature) AS max_temperature,
|
||||
MIN(temperature) AS min_temperature
|
||||
FROM conditions
|
||||
GROUP BY device, time_bucket(INTERVAL '1 hour', "time") WITH NO DATA;
|
||||
CALL refresh_continuous_aggregate('conditions_summary', NULL, NULL);
|
||||
ALTER TABLE conditions SET (timescaledb.compress);
|
||||
SELECT COUNT(*) AS dropped_chunks_count
|
||||
FROM drop_chunks('conditions', TIMESTAMPTZ '2018-12-15 00:00');
|
||||
dropped_chunks_count
|
||||
----------------------
|
||||
14
|
||||
(1 row)
|
||||
|
||||
-- We need to have some chunks that are marked as dropped, otherwise
|
||||
-- we will not have a problem below.
|
||||
SELECT COUNT(*) AS dropped_chunks_count
|
||||
FROM _timescaledb_catalog.chunk
|
||||
WHERE dropped = TRUE;
|
||||
dropped_chunks_count
|
||||
----------------------
|
||||
14
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FROM timescaledb_information.chunks
|
||||
WHERE hypertable_name = 'conditions' and is_compressed = true;
|
||||
count
|
||||
-------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
SELECT add_compression_policy AS job_id
|
||||
FROM add_compression_policy('conditions', INTERVAL '1 day') \gset
|
||||
-- job compresses only 1 chunk at a time --
|
||||
SELECT alter_job(id,config:=jsonb_set(config,'{maxchunks_to_compress}', '1'))
|
||||
FROM _timescaledb_config.bgw_job WHERE id = :job_id;
|
||||
alter_job
|
||||
--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
|
||||
(1004,"@ 12 hours","@ 0",-1,"@ 1 hour",t,"{""hypertable_id"": 11, ""compress_after"": ""@ 1 day"", ""maxchunks_to_compress"": 1}",-infinity,_timescaledb_functions.policy_compression_check,f,,)
|
||||
(1 row)
|
||||
|
||||
SELECT alter_job(id,config:=jsonb_set(config,'{verbose_log}', 'true'))
|
||||
FROM _timescaledb_config.bgw_job WHERE id = :job_id;
|
||||
alter_job
|
||||
-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
|
||||
(1004,"@ 12 hours","@ 0",-1,"@ 1 hour",t,"{""verbose_log"": true, ""hypertable_id"": 11, ""compress_after"": ""@ 1 day"", ""maxchunks_to_compress"": 1}",-infinity,_timescaledb_functions.policy_compression_check,f,,)
|
||||
(1 row)
|
||||
|
||||
set client_min_messages TO LOG;
|
||||
CALL run_job(:job_id);
|
||||
LOG: statement: CALL run_job(1004);
|
||||
LOG: job 1004 completed processing chunk _timescaledb_internal._hyper_11_40_chunk
|
||||
set client_min_messages TO NOTICE;
|
||||
LOG: statement: set client_min_messages TO NOTICE;
|
||||
SELECT count(*) FROM timescaledb_information.chunks
|
||||
WHERE hypertable_name = 'conditions' and is_compressed = true;
|
||||
count
|
||||
-------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
\i include/recompress_basic.sql
|
||||
-- This file and its contents are licensed under the Timescale License.
|
||||
-- Please see the included NOTICE for copyright information and
|
||||
-- LICENSE-TIMESCALE for a copy of the license.
|
||||
CREATE OR REPLACE VIEW compressed_chunk_info_view AS
|
||||
SELECT
|
||||
h.schema_name AS hypertable_schema,
|
||||
h.table_name AS hypertable_name,
|
||||
c.schema_name as chunk_schema,
|
||||
c.table_name as chunk_name,
|
||||
c.status as chunk_status,
|
||||
comp.schema_name as compressed_chunk_schema,
|
||||
comp.table_name as compressed_chunk_name
|
||||
FROM
|
||||
_timescaledb_catalog.hypertable h JOIN
|
||||
_timescaledb_catalog.chunk c ON h.id = c.hypertable_id
|
||||
LEFT JOIN _timescaledb_catalog.chunk comp
|
||||
ON comp.id = c.compressed_chunk_id
|
||||
;
|
||||
CREATE TABLE test2 (timec timestamptz NOT NULL, i integer ,
|
||||
b bigint, t text);
|
||||
SELECT table_name from create_hypertable('test2', 'timec', chunk_time_interval=> INTERVAL '7 days');
|
||||
table_name
|
||||
------------
|
||||
test2
|
||||
(1 row)
|
||||
|
||||
INSERT INTO test2 SELECT q, 10, 11, 'hello' FROM generate_series( '2020-01-03 10:00:00+00', '2020-01-03 12:00:00+00' , '5 min'::interval) q;
|
||||
ALTER TABLE test2 set (timescaledb.compress,
|
||||
timescaledb.compress_segmentby = 'b',
|
||||
timescaledb.compress_orderby = 'timec DESC');
|
||||
SELECT compress_chunk(c)
|
||||
FROM show_chunks('test2') c;
|
||||
compress_chunk
|
||||
------------------------------------------
|
||||
_timescaledb_internal._hyper_14_62_chunk
|
||||
(1 row)
|
||||
|
||||
---insert into the middle of the range ---
|
||||
INSERT INTO test2 values ( '2020-01-03 10:01:00+00', 20, 11, '2row');
|
||||
INSERT INTO test2 values ( '2020-01-03 11:01:00+00', 20, 11, '3row');
|
||||
INSERT INTO test2 values ( '2020-01-03 12:01:00+00', 20, 11, '4row');
|
||||
--- insert a new segment by ---
|
||||
INSERT INTO test2 values ( '2020-01-03 11:01:00+00', 20, 12, '12row');
|
||||
SELECT time_bucket(INTERVAL '2 hour', timec), b, count(*)
|
||||
FROM test2
|
||||
GROUP BY time_bucket(INTERVAL '2 hour', timec), b
|
||||
ORDER BY 1, 2;
|
||||
time_bucket | b | count
|
||||
------------------------------+----+-------
|
||||
Fri Jan 03 02:00:00 2020 PST | 11 | 26
|
||||
Fri Jan 03 02:00:00 2020 PST | 12 | 1
|
||||
Fri Jan 03 04:00:00 2020 PST | 11 | 2
|
||||
(3 rows)
|
||||
|
||||
--check status for chunk --
|
||||
SELECT chunk_status,
|
||||
chunk_name as "CHUNK_NAME"
|
||||
FROM compressed_chunk_info_view
|
||||
WHERE hypertable_name = 'test2' ORDER BY chunk_name;
|
||||
chunk_status | CHUNK_NAME
|
||||
--------------+--------------------
|
||||
9 | _hyper_14_62_chunk
|
||||
(1 row)
|
||||
|
||||
SELECT compressed_chunk_schema || '.' || compressed_chunk_name as "COMP_CHUNK_NAME",
|
||||
chunk_schema || '.' || chunk_name as "CHUNK_NAME"
|
||||
FROM compressed_chunk_info_view
|
||||
WHERE hypertable_name = 'test2' \gset
|
||||
SELECT count(*) from test2;
|
||||
count
|
||||
-------
|
||||
29
|
||||
(1 row)
|
||||
|
||||
-- call recompress_chunk inside a transaction. This should fails since
|
||||
-- it contains transaction-terminating commands.
|
||||
\set ON_ERROR_STOP 0
|
||||
START TRANSACTION;
|
||||
CALL recompress_chunk(:'CHUNK_NAME'::regclass);
|
||||
ROLLBACK;
|
||||
\set ON_ERROR_STOP 1
|
||||
CALL recompress_chunk(:'CHUNK_NAME'::regclass);
|
||||
-- Demonstrate that no locks are held on the hypertable, chunk, or the
|
||||
-- compressed chunk after recompress_chunk has executed.
|
||||
SELECT pid, locktype, relation, relation::regclass, mode, granted
|
||||
FROM pg_locks
|
||||
WHERE relation::regclass::text IN (:'CHUNK_NAME', :'COMP_CHUNK_NAME', 'test2')
|
||||
ORDER BY pid;
|
||||
pid | locktype | relation | relation | mode | granted
|
||||
-----+----------+----------+----------+------+---------
|
||||
(0 rows)
|
||||
|
||||
SELECT chunk_status,
|
||||
chunk_name as "CHUNK_NAME"
|
||||
FROM compressed_chunk_info_view
|
||||
WHERE hypertable_name = 'test2' ORDER BY chunk_name;
|
||||
chunk_status | CHUNK_NAME
|
||||
--------------+--------------------
|
||||
1 | _hyper_14_62_chunk
|
||||
(1 row)
|
||||
|
||||
--- insert into a compressed chunk again + a new chunk--
|
||||
INSERT INTO test2 values ( '2020-01-03 11:01:03+00', 20, 11, '33row'),
|
||||
( '2020-01-03 11:01:06+00', 20, 11, '36row'),
|
||||
( '2020-01-03 11:02:00+00', 20, 12, '12row'),
|
||||
( '2020-04-03 00:02:00+00', 30, 13, '3013row');
|
||||
SELECT time_bucket(INTERVAL '2 hour', timec), b, count(*)
|
||||
FROM test2
|
||||
GROUP BY time_bucket(INTERVAL '2 hour', timec), b
|
||||
ORDER BY 1, 2;
|
||||
time_bucket | b | count
|
||||
------------------------------+----+-------
|
||||
Fri Jan 03 02:00:00 2020 PST | 11 | 28
|
||||
Fri Jan 03 02:00:00 2020 PST | 12 | 2
|
||||
Fri Jan 03 04:00:00 2020 PST | 11 | 2
|
||||
Thu Apr 02 17:00:00 2020 PDT | 13 | 1
|
||||
(4 rows)
|
||||
|
||||
--chunk status should be unordered for the previously compressed chunk
|
||||
SELECT chunk_status,
|
||||
chunk_name as "CHUNK_NAME"
|
||||
FROM compressed_chunk_info_view
|
||||
WHERE hypertable_name = 'test2' ORDER BY chunk_name;
|
||||
chunk_status | CHUNK_NAME
|
||||
--------------+--------------------
|
||||
9 | _hyper_14_62_chunk
|
||||
0 | _hyper_14_64_chunk
|
||||
(2 rows)
|
||||
|
||||
SELECT add_compression_policy AS job_id
|
||||
FROM add_compression_policy('test2', '30d'::interval) \gset
|
||||
CALL run_job(:job_id);
|
||||
CALL run_job(:job_id);
|
||||
-- status should be compressed ---
|
||||
SELECT chunk_status,
|
||||
chunk_name as "CHUNK_NAME"
|
||||
FROM compressed_chunk_info_view
|
||||
WHERE hypertable_name = 'test2' ORDER BY chunk_name;
|
||||
chunk_status | CHUNK_NAME
|
||||
--------------+--------------------
|
||||
1 | _hyper_14_62_chunk
|
||||
1 | _hyper_14_64_chunk
|
||||
(2 rows)
|
||||
|
||||
\set ON_ERROR_STOP 0
|
||||
-- call recompress_chunk when status is not unordered
|
||||
CALL recompress_chunk(:'CHUNK_NAME'::regclass, true);
|
||||
psql:include/recompress_basic.sql:110: NOTICE: nothing to recompress in chunk "_hyper_14_62_chunk"
|
||||
-- This will succeed and compress the chunk for the test below.
|
||||
CALL recompress_chunk(:'CHUNK_NAME'::regclass, false);
|
||||
psql:include/recompress_basic.sql:113: ERROR: nothing to recompress in chunk "_hyper_14_62_chunk"
|
||||
--now decompress it , then try and recompress
|
||||
SELECT decompress_chunk(:'CHUNK_NAME'::regclass);
|
||||
decompress_chunk
|
||||
------------------------------------------
|
||||
_timescaledb_internal._hyper_14_62_chunk
|
||||
(1 row)
|
||||
|
||||
CALL recompress_chunk(:'CHUNK_NAME'::regclass);
|
||||
psql:include/recompress_basic.sql:117: ERROR: call compress_chunk instead of recompress_chunk
|
||||
\set ON_ERROR_STOP 1
|
||||
-- test recompress policy
|
||||
CREATE TABLE metrics(time timestamptz NOT NULL);
|
||||
SELECT hypertable_id AS "HYPERTABLE_ID", schema_name, table_name, created FROM create_hypertable('metrics','time') \gset
|
||||
ALTER TABLE metrics SET (timescaledb.compress);
|
||||
-- create chunk with some data and compress
|
||||
INSERT INTO metrics SELECT '2000-01-01' FROM generate_series(1,10);
|
||||
-- create custom compression job without recompress boolean
|
||||
SELECT add_job('_timescaledb_functions.policy_compression','1w',('{"hypertable_id": '||:'HYPERTABLE_ID'||', "compress_after": "@ 7 days"}')::jsonb, initial_start => '2000-01-01 00:00:00+00'::timestamptz) AS "JOB_COMPRESS" \gset
|
||||
-- first call should compress
|
||||
CALL run_job(:JOB_COMPRESS);
|
||||
-- 2nd call should do nothing
|
||||
CALL run_job(:JOB_COMPRESS);
|
||||
---- status should be 1
|
||||
SELECT chunk_status FROM compressed_chunk_info_view WHERE hypertable_name = 'metrics';
|
||||
chunk_status
|
||||
--------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
-- do an INSERT so recompress has something to do
|
||||
INSERT INTO metrics SELECT '2000-01-01';
|
||||
---- status should be 3
|
||||
SELECT chunk_status FROM compressed_chunk_info_view WHERE hypertable_name = 'metrics';
|
||||
chunk_status
|
||||
--------------
|
||||
9
|
||||
(1 row)
|
||||
|
||||
-- should recompress
|
||||
CALL run_job(:JOB_COMPRESS);
|
||||
---- status should be 1
|
||||
SELECT chunk_status FROM compressed_chunk_info_view WHERE hypertable_name = 'metrics';
|
||||
chunk_status
|
||||
--------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
-- disable recompress in compress job
|
||||
SELECT alter_job(id,config:=jsonb_set(config,'{recompress}','false'), next_start => '2000-01-01 00:00:00+00'::timestamptz) FROM _timescaledb_config.bgw_job WHERE id = :JOB_COMPRESS;
|
||||
alter_job
|
||||
----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
|
||||
(1006,"@ 7 days","@ 0",-1,"@ 5 mins",t,"{""recompress"": false, ""hypertable_id"": 16, ""compress_after"": ""@ 7 days""}","Fri Dec 31 16:00:00 1999 PST",,t,"Fri Dec 31 16:00:00 1999 PST",)
|
||||
(1 row)
|
||||
|
||||
-- nothing to do
|
||||
CALL run_job(:JOB_COMPRESS);
|
||||
---- status should be 1
|
||||
SELECT chunk_status FROM compressed_chunk_info_view WHERE hypertable_name = 'metrics';
|
||||
chunk_status
|
||||
--------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
-- do an INSERT so recompress has something to do
|
||||
INSERT INTO metrics SELECT '2000-01-01';
|
||||
---- status should be 3
|
||||
SELECT chunk_status FROM compressed_chunk_info_view WHERE hypertable_name = 'metrics';
|
||||
chunk_status
|
||||
--------------
|
||||
9
|
||||
(1 row)
|
||||
|
||||
-- still nothing to do since we disabled recompress
|
||||
CALL run_job(:JOB_COMPRESS);
|
||||
---- status should be 3
|
||||
SELECT chunk_status FROM compressed_chunk_info_view WHERE hypertable_name = 'metrics';
|
||||
chunk_status
|
||||
--------------
|
||||
9
|
||||
(1 row)
|
||||
|
||||
-- reenable recompress in compress job
|
||||
SELECT alter_job(id,config:=jsonb_set(config,'{recompress}','true'), next_start => '2000-01-01 00:00:00+00'::timestamptz) FROM _timescaledb_config.bgw_job WHERE id = :JOB_COMPRESS;
|
||||
alter_job
|
||||
---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------
|
||||
(1006,"@ 7 days","@ 0",-1,"@ 5 mins",t,"{""recompress"": true, ""hypertable_id"": 16, ""compress_after"": ""@ 7 days""}","Fri Dec 31 16:00:00 1999 PST",,t,"Fri Dec 31 16:00:00 1999 PST",)
|
||||
(1 row)
|
||||
|
||||
-- should recompress now
|
||||
CALL run_job(:JOB_COMPRESS);
|
||||
---- status should be 1
|
||||
SELECT chunk_status FROM compressed_chunk_info_view WHERE hypertable_name = 'metrics';
|
||||
chunk_status
|
||||
--------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
SELECT delete_job(:JOB_COMPRESS);
|
||||
delete_job
|
||||
------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT add_job('_timescaledb_functions.policy_recompression','1w',('{"hypertable_id": '||:'HYPERTABLE_ID'||', "recompress_after": "@ 7 days", "maxchunks_to_compress": 1}')::jsonb) AS "JOB_RECOMPRESS" \gset
|
||||
---- status should be 1
|
||||
SELECT chunk_status FROM compressed_chunk_info_view WHERE hypertable_name = 'metrics';
|
||||
chunk_status
|
||||
--------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
---- nothing to do yet
|
||||
CALL run_job(:JOB_RECOMPRESS);
|
||||
psql:include/recompress_basic.sql:189: NOTICE: no chunks for hypertable "public.metrics" that satisfy recompress chunk policy
|
||||
---- status should be 1
|
||||
SELECT chunk_status FROM compressed_chunk_info_view WHERE hypertable_name = 'metrics';
|
||||
chunk_status
|
||||
--------------
|
||||
1
|
||||
(1 row)
|
||||
|
||||
-- create some work for recompress
|
||||
INSERT INTO metrics SELECT '2000-01-01';
|
||||
-- status should be 3
|
||||
SELECT chunk_status FROM compressed_chunk_info_view WHERE hypertable_name = 'metrics';
|
||||
chunk_status
|
||||
--------------
|
||||
9
|
||||
(1 row)
|
||||
|
||||
CALL run_job(:JOB_RECOMPRESS);
|
||||
-- status should be 1
|
||||
SELECT chunk_status FROM compressed_chunk_info_view WHERE hypertable_name = 'metrics';
|
||||
chunk_status
|
||||
--------------
|
||||
9
|
||||
(1 row)
|
||||
|
||||
SELECT delete_job(:JOB_RECOMPRESS);
|
||||
delete_job
|
||||
------------
|
||||
|
||||
(1 row)
|
||||
|
||||
-- Teardown test
|
||||
\c :TEST_DBNAME :ROLE_SUPERUSER
|
||||
REVOKE CREATE ON SCHEMA public FROM NOLOGIN_ROLE;
|
||||
DROP ROLE NOLOGIN_ROLE;
|
748
tsl/test/expected/telemetry_stats-13.out
Normal file
748
tsl/test/expected/telemetry_stats-13.out
Normal file
@ -0,0 +1,748 @@
|
||||
-- This file and its contents are licensed under the Timescale License.
|
||||
-- Please see the included NOTICE for copyright information and
|
||||
-- LICENSE-TIMESCALE for a copy of the license.
|
||||
--telemetry tests that require a community license
|
||||
\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER;
|
||||
-- function call info size is too variable for this test, so disable it
|
||||
SET timescaledb.telemetry_level='no_functions';
|
||||
SELECT setseed(1);
|
||||
setseed
|
||||
---------
|
||||
|
||||
(1 row)
|
||||
|
||||
-- Create a materialized view from the telemetry report so that we
|
||||
-- don't regenerate telemetry for every query. Filter heap_size for
|
||||
-- materialized views since PG14 reports a different heap size for
|
||||
-- them compared to earlier PG versions.
|
||||
CREATE MATERIALIZED VIEW telemetry_report AS
|
||||
SELECT (r #- '{relations,materialized_views,heap_size}') AS r
|
||||
FROM get_telemetry_report() r;
|
||||
CREATE VIEW relations AS
|
||||
SELECT r -> 'relations' AS rels
|
||||
FROM telemetry_report;
|
||||
SELECT rels -> 'continuous_aggregates' -> 'num_relations' AS num_continuous_aggs,
|
||||
rels -> 'hypertables' -> 'num_relations' AS num_hypertables
|
||||
FROM relations;
|
||||
num_continuous_aggs | num_hypertables
|
||||
---------------------+-----------------
|
||||
0 | 0
|
||||
(1 row)
|
||||
|
||||
-- check telemetry picks up flagged content from metadata
|
||||
SELECT r -> 'db_metadata' AS db_metadata
|
||||
FROM telemetry_report;
|
||||
db_metadata
|
||||
-------------
|
||||
{}
|
||||
(1 row)
|
||||
|
||||
-- check timescaledb_telemetry.cloud
|
||||
SELECT r -> 'instance_metadata' AS instance_metadata
|
||||
FROM telemetry_report r;
|
||||
instance_metadata
|
||||
-------------------
|
||||
{"cloud": "ci"}
|
||||
(1 row)
|
||||
|
||||
CREATE TABLE normal (time timestamptz NOT NULL, device int, temp float);
|
||||
CREATE TABLE part (time timestamptz NOT NULL, device int, temp float) PARTITION BY RANGE (time);
|
||||
CREATE TABLE part_t1 PARTITION OF part FOR VALUES FROM ('2018-01-01') TO ('2018-02-01') PARTITION BY HASH (device);
|
||||
CREATE TABLE part_t2 PARTITION OF part FOR VALUES FROM ('2018-02-01') TO ('2018-03-01') PARTITION BY HASH (device);
|
||||
CREATE TABLE part_t1_d1 PARTITION OF part_t1 FOR VALUES WITH (MODULUS 2, REMAINDER 0);
|
||||
CREATE TABLE part_t1_d2 PARTITION OF part_t1 FOR VALUES WITH (MODULUS 2, REMAINDER 1);
|
||||
CREATE TABLE part_t2_d1 PARTITION OF part_t2 FOR VALUES WITH (MODULUS 2, REMAINDER 0);
|
||||
CREATE TABLE part_t2_d2 PARTITION OF part_t2 FOR VALUES WITH (MODULUS 2, REMAINDER 1);
|
||||
CREATE TABLE hyper (LIKE normal);
|
||||
SELECT table_name FROM create_hypertable('hyper', 'time');
|
||||
table_name
|
||||
------------
|
||||
hyper
|
||||
(1 row)
|
||||
|
||||
CREATE MATERIALIZED VIEW contagg
|
||||
WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS
|
||||
SELECT
|
||||
time_bucket('1 hour', time) AS hour,
|
||||
device,
|
||||
min(time)
|
||||
FROM
|
||||
hyper
|
||||
GROUP BY hour, device;
|
||||
NOTICE: continuous aggregate "contagg" is already up-to-date
|
||||
CREATE MATERIALIZED VIEW contagg_old
|
||||
WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized=false) AS
|
||||
SELECT
|
||||
time_bucket('1 hour', time) AS hour,
|
||||
device,
|
||||
min(time)
|
||||
FROM
|
||||
hyper
|
||||
GROUP BY hour, device;
|
||||
NOTICE: continuous aggregate "contagg_old" is already up-to-date
|
||||
-- Create another view (already have the "relations" view)
|
||||
CREATE VIEW devices AS
|
||||
SELECT DISTINCT ON (device) device
|
||||
FROM hyper;
|
||||
-- Show relations with no data
|
||||
REFRESH MATERIALIZED VIEW telemetry_report;
|
||||
SELECT jsonb_pretty(rels) AS relations FROM relations;
|
||||
relations
|
||||
----------------------------------------------------------
|
||||
{ +
|
||||
"views": { +
|
||||
"num_relations": 2 +
|
||||
}, +
|
||||
"tables": { +
|
||||
"heap_size": 0, +
|
||||
"toast_size": 8192, +
|
||||
"indexes_size": 0, +
|
||||
"num_relations": 2, +
|
||||
"num_reltuples": 0 +
|
||||
}, +
|
||||
"hypertables": { +
|
||||
"heap_size": 0, +
|
||||
"toast_size": 0, +
|
||||
"compression": { +
|
||||
"compressed_heap_size": 0, +
|
||||
"compressed_row_count": 0, +
|
||||
"compressed_toast_size": 0, +
|
||||
"num_compressed_chunks": 0, +
|
||||
"uncompressed_heap_size": 0, +
|
||||
"uncompressed_row_count": 0, +
|
||||
"compressed_indexes_size": 0, +
|
||||
"uncompressed_toast_size": 0, +
|
||||
"uncompressed_indexes_size": 0, +
|
||||
"num_compressed_hypertables": 0, +
|
||||
"compressed_row_count_frozen_immediately": 0+
|
||||
}, +
|
||||
"indexes_size": 8192, +
|
||||
"num_children": 0, +
|
||||
"num_relations": 1, +
|
||||
"num_reltuples": 0 +
|
||||
}, +
|
||||
"materialized_views": { +
|
||||
"toast_size": 8192, +
|
||||
"indexes_size": 0, +
|
||||
"num_relations": 1, +
|
||||
"num_reltuples": 0 +
|
||||
}, +
|
||||
"partitioned_tables": { +
|
||||
"heap_size": 0, +
|
||||
"toast_size": 0, +
|
||||
"indexes_size": 0, +
|
||||
"num_children": 6, +
|
||||
"num_relations": 1, +
|
||||
"num_reltuples": 0 +
|
||||
}, +
|
||||
"continuous_aggregates": { +
|
||||
"heap_size": 0, +
|
||||
"toast_size": 0, +
|
||||
"compression": { +
|
||||
"compressed_heap_size": 0, +
|
||||
"compressed_row_count": 0, +
|
||||
"num_compressed_caggs": 0, +
|
||||
"compressed_toast_size": 0, +
|
||||
"num_compressed_chunks": 0, +
|
||||
"uncompressed_heap_size": 0, +
|
||||
"uncompressed_row_count": 0, +
|
||||
"compressed_indexes_size": 0, +
|
||||
"uncompressed_toast_size": 0, +
|
||||
"uncompressed_indexes_size": 0, +
|
||||
"compressed_row_count_frozen_immediately": 0+
|
||||
}, +
|
||||
"indexes_size": 0, +
|
||||
"num_children": 0, +
|
||||
"num_relations": 2, +
|
||||
"num_reltuples": 0, +
|
||||
"num_caggs_nested": 0, +
|
||||
"num_caggs_finalized": 1, +
|
||||
"num_caggs_on_distributed_hypertables": 0, +
|
||||
"num_caggs_using_real_time_aggregation": 2 +
|
||||
}, +
|
||||
"distributed_hypertables_data_node": { +
|
||||
"heap_size": 0, +
|
||||
"toast_size": 0, +
|
||||
"compression": { +
|
||||
"compressed_heap_size": 0, +
|
||||
"compressed_row_count": 0, +
|
||||
"compressed_toast_size": 0, +
|
||||
"num_compressed_chunks": 0, +
|
||||
"uncompressed_heap_size": 0, +
|
||||
"uncompressed_row_count": 0, +
|
||||
"compressed_indexes_size": 0, +
|
||||
"uncompressed_toast_size": 0, +
|
||||
"uncompressed_indexes_size": 0, +
|
||||
"num_compressed_hypertables": 0, +
|
||||
"compressed_row_count_frozen_immediately": 0+
|
||||
}, +
|
||||
"indexes_size": 0, +
|
||||
"num_children": 0, +
|
||||
"num_relations": 0, +
|
||||
"num_reltuples": 0 +
|
||||
}, +
|
||||
"distributed_hypertables_access_node": { +
|
||||
"heap_size": 0, +
|
||||
"toast_size": 0, +
|
||||
"compression": { +
|
||||
"compressed_heap_size": 0, +
|
||||
"compressed_row_count": 0, +
|
||||
"compressed_toast_size": 0, +
|
||||
"num_compressed_chunks": 0, +
|
||||
"uncompressed_heap_size": 0, +
|
||||
"uncompressed_row_count": 0, +
|
||||
"compressed_indexes_size": 0, +
|
||||
"uncompressed_toast_size": 0, +
|
||||
"uncompressed_indexes_size": 0, +
|
||||
"num_compressed_hypertables": 0, +
|
||||
"compressed_row_count_frozen_immediately": 0+
|
||||
}, +
|
||||
"indexes_size": 0, +
|
||||
"num_children": 0, +
|
||||
"num_relations": 0, +
|
||||
"num_reltuples": 0, +
|
||||
"num_replica_chunks": 0, +
|
||||
"num_replicated_distributed_hypertables": 0 +
|
||||
} +
|
||||
}
|
||||
(1 row)
|
||||
|
||||
-- Insert data
|
||||
INSERT INTO normal
|
||||
SELECT t, ceil(random() * 10)::int, random() * 30
|
||||
FROM generate_series('2018-01-01'::timestamptz, '2018-02-28', '2h') t;
|
||||
INSERT INTO hyper
|
||||
SELECT * FROM normal;
|
||||
INSERT INTO part
|
||||
SELECT * FROM normal;
|
||||
CALL refresh_continuous_aggregate('contagg', NULL, NULL);
|
||||
CALL refresh_continuous_aggregate('contagg_old', NULL, NULL);
|
||||
-- ANALYZE to get updated reltuples stats
|
||||
ANALYZE normal, hyper, part;
|
||||
SELECT count(c) FROM show_chunks('hyper') c;
|
||||
count
|
||||
-------
|
||||
9
|
||||
(1 row)
|
||||
|
||||
SELECT count(c) FROM show_chunks('contagg') c;
|
||||
count
|
||||
-------
|
||||
2
|
||||
(1 row)
|
||||
|
||||
SELECT count(c) FROM show_chunks('contagg_old') c;
|
||||
count
|
||||
-------
|
||||
2
|
||||
(1 row)
|
||||
|
||||
-- Update and show the telemetry report
|
||||
REFRESH MATERIALIZED VIEW telemetry_report;
|
||||
SELECT jsonb_pretty(rels) AS relations FROM relations;
|
||||
relations
|
||||
----------------------------------------------------------
|
||||
{ +
|
||||
"views": { +
|
||||
"num_relations": 2 +
|
||||
}, +
|
||||
"tables": { +
|
||||
"heap_size": 65536, +
|
||||
"toast_size": 8192, +
|
||||
"indexes_size": 0, +
|
||||
"num_relations": 2, +
|
||||
"num_reltuples": 697 +
|
||||
}, +
|
||||
"hypertables": { +
|
||||
"heap_size": 73728, +
|
||||
"toast_size": 0, +
|
||||
"compression": { +
|
||||
"compressed_heap_size": 0, +
|
||||
"compressed_row_count": 0, +
|
||||
"compressed_toast_size": 0, +
|
||||
"num_compressed_chunks": 0, +
|
||||
"uncompressed_heap_size": 0, +
|
||||
"uncompressed_row_count": 0, +
|
||||
"compressed_indexes_size": 0, +
|
||||
"uncompressed_toast_size": 0, +
|
||||
"uncompressed_indexes_size": 0, +
|
||||
"num_compressed_hypertables": 0, +
|
||||
"compressed_row_count_frozen_immediately": 0+
|
||||
}, +
|
||||
"indexes_size": 155648, +
|
||||
"num_children": 9, +
|
||||
"num_relations": 1, +
|
||||
"num_reltuples": 697 +
|
||||
}, +
|
||||
"materialized_views": { +
|
||||
"toast_size": 8192, +
|
||||
"indexes_size": 0, +
|
||||
"num_relations": 1, +
|
||||
"num_reltuples": 0 +
|
||||
}, +
|
||||
"partitioned_tables": { +
|
||||
"heap_size": 98304, +
|
||||
"toast_size": 0, +
|
||||
"indexes_size": 0, +
|
||||
"num_children": 6, +
|
||||
"num_relations": 1, +
|
||||
"num_reltuples": 697 +
|
||||
}, +
|
||||
"continuous_aggregates": { +
|
||||
"heap_size": 188416, +
|
||||
"toast_size": 16384, +
|
||||
"compression": { +
|
||||
"compressed_heap_size": 0, +
|
||||
"compressed_row_count": 0, +
|
||||
"num_compressed_caggs": 0, +
|
||||
"compressed_toast_size": 0, +
|
||||
"num_compressed_chunks": 0, +
|
||||
"uncompressed_heap_size": 0, +
|
||||
"uncompressed_row_count": 0, +
|
||||
"compressed_indexes_size": 0, +
|
||||
"uncompressed_toast_size": 0, +
|
||||
"uncompressed_indexes_size": 0, +
|
||||
"compressed_row_count_frozen_immediately": 0+
|
||||
}, +
|
||||
"indexes_size": 229376, +
|
||||
"num_children": 4, +
|
||||
"num_relations": 2, +
|
||||
"num_reltuples": 0, +
|
||||
"num_caggs_nested": 0, +
|
||||
"num_caggs_finalized": 1, +
|
||||
"num_caggs_on_distributed_hypertables": 0, +
|
||||
"num_caggs_using_real_time_aggregation": 2 +
|
||||
}, +
|
||||
"distributed_hypertables_data_node": { +
|
||||
"heap_size": 0, +
|
||||
"toast_size": 0, +
|
||||
"compression": { +
|
||||
"compressed_heap_size": 0, +
|
||||
"compressed_row_count": 0, +
|
||||
"compressed_toast_size": 0, +
|
||||
"num_compressed_chunks": 0, +
|
||||
"uncompressed_heap_size": 0, +
|
||||
"uncompressed_row_count": 0, +
|
||||
"compressed_indexes_size": 0, +
|
||||
"uncompressed_toast_size": 0, +
|
||||
"uncompressed_indexes_size": 0, +
|
||||
"num_compressed_hypertables": 0, +
|
||||
"compressed_row_count_frozen_immediately": 0+
|
||||
}, +
|
||||
"indexes_size": 0, +
|
||||
"num_children": 0, +
|
||||
"num_relations": 0, +
|
||||
"num_reltuples": 0 +
|
||||
}, +
|
||||
"distributed_hypertables_access_node": { +
|
||||
"heap_size": 0, +
|
||||
"toast_size": 0, +
|
||||
"compression": { +
|
||||
"compressed_heap_size": 0, +
|
||||
"compressed_row_count": 0, +
|
||||
"compressed_toast_size": 0, +
|
||||
"num_compressed_chunks": 0, +
|
||||
"uncompressed_heap_size": 0, +
|
||||
"uncompressed_row_count": 0, +
|
||||
"compressed_indexes_size": 0, +
|
||||
"uncompressed_toast_size": 0, +
|
||||
"uncompressed_indexes_size": 0, +
|
||||
"num_compressed_hypertables": 0, +
|
||||
"compressed_row_count_frozen_immediately": 0+
|
||||
}, +
|
||||
"indexes_size": 0, +
|
||||
"num_children": 0, +
|
||||
"num_relations": 0, +
|
||||
"num_reltuples": 0, +
|
||||
"num_replica_chunks": 0, +
|
||||
"num_replicated_distributed_hypertables": 0 +
|
||||
} +
|
||||
}
|
||||
(1 row)
|
||||
|
||||
-- Actual row count should be the same as reltuples stats for all tables
|
||||
SELECT (SELECT count(*) FROM normal) num_inserted_rows,
|
||||
(SELECT rels -> 'tables' -> 'num_reltuples' FROM relations) normal_reltuples,
|
||||
(SELECT rels -> 'hypertables' -> 'num_reltuples' FROM relations) hyper_reltuples,
|
||||
(SELECT rels -> 'partitioned_tables' -> 'num_reltuples' FROM relations) part_reltuples;
|
||||
num_inserted_rows | normal_reltuples | hyper_reltuples | part_reltuples
|
||||
-------------------+------------------+-----------------+----------------
|
||||
697 | 697 | 697 | 697
|
||||
(1 row)
|
||||
|
||||
-- Add compression
|
||||
ALTER TABLE hyper SET (timescaledb.compress);
|
||||
SELECT compress_chunk(c)
|
||||
FROM show_chunks('hyper') c ORDER BY c LIMIT 4;
|
||||
compress_chunk
|
||||
----------------------------------------
|
||||
_timescaledb_internal._hyper_1_1_chunk
|
||||
_timescaledb_internal._hyper_1_2_chunk
|
||||
_timescaledb_internal._hyper_1_3_chunk
|
||||
_timescaledb_internal._hyper_1_4_chunk
|
||||
(4 rows)
|
||||
|
||||
ALTER MATERIALIZED VIEW contagg SET (timescaledb.compress);
|
||||
NOTICE: defaulting compress_segmentby to device
|
||||
NOTICE: defaulting compress_orderby to hour
|
||||
SELECT compress_chunk(c)
|
||||
FROM show_chunks('contagg') c ORDER BY c LIMIT 1;
|
||||
compress_chunk
|
||||
-----------------------------------------
|
||||
_timescaledb_internal._hyper_2_10_chunk
|
||||
(1 row)
|
||||
|
||||
-- Turn of real-time aggregation
|
||||
ALTER MATERIALIZED VIEW contagg SET (timescaledb.materialized_only = true);
|
||||
ANALYZE normal, hyper, part;
|
||||
REFRESH MATERIALIZED VIEW telemetry_report;
|
||||
SELECT jsonb_pretty(rels) AS relations FROM relations;
|
||||
relations
|
||||
-----------------------------------------------------------
|
||||
{ +
|
||||
"views": { +
|
||||
"num_relations": 2 +
|
||||
}, +
|
||||
"tables": { +
|
||||
"heap_size": 65536, +
|
||||
"toast_size": 8192, +
|
||||
"indexes_size": 0, +
|
||||
"num_relations": 2, +
|
||||
"num_reltuples": 697 +
|
||||
}, +
|
||||
"hypertables": { +
|
||||
"heap_size": 73728, +
|
||||
"toast_size": 32768, +
|
||||
"compression": { +
|
||||
"compressed_heap_size": 32768, +
|
||||
"compressed_row_count": 4, +
|
||||
"compressed_toast_size": 32768, +
|
||||
"num_compressed_chunks": 4, +
|
||||
"uncompressed_heap_size": 32768, +
|
||||
"uncompressed_row_count": 284, +
|
||||
"compressed_indexes_size": 0, +
|
||||
"uncompressed_toast_size": 0, +
|
||||
"uncompressed_indexes_size": 65536, +
|
||||
"num_compressed_hypertables": 1, +
|
||||
"compressed_row_count_frozen_immediately": 4 +
|
||||
}, +
|
||||
"indexes_size": 122880, +
|
||||
"num_children": 9, +
|
||||
"num_relations": 1, +
|
||||
"num_reltuples": 413 +
|
||||
}, +
|
||||
"materialized_views": { +
|
||||
"toast_size": 8192, +
|
||||
"indexes_size": 0, +
|
||||
"num_relations": 1, +
|
||||
"num_reltuples": 0 +
|
||||
}, +
|
||||
"partitioned_tables": { +
|
||||
"heap_size": 98304, +
|
||||
"toast_size": 0, +
|
||||
"indexes_size": 0, +
|
||||
"num_children": 6, +
|
||||
"num_relations": 1, +
|
||||
"num_reltuples": 697 +
|
||||
}, +
|
||||
"continuous_aggregates": { +
|
||||
"heap_size": 180224, +
|
||||
"toast_size": 24576, +
|
||||
"compression": { +
|
||||
"compressed_heap_size": 40960, +
|
||||
"compressed_row_count": 10, +
|
||||
"num_compressed_caggs": 1, +
|
||||
"compressed_toast_size": 8192, +
|
||||
"num_compressed_chunks": 1, +
|
||||
"uncompressed_heap_size": 49152, +
|
||||
"uncompressed_row_count": 452, +
|
||||
"compressed_indexes_size": 16384, +
|
||||
"uncompressed_toast_size": 0, +
|
||||
"uncompressed_indexes_size": 81920, +
|
||||
"compressed_row_count_frozen_immediately": 10+
|
||||
}, +
|
||||
"indexes_size": 180224, +
|
||||
"num_children": 4, +
|
||||
"num_relations": 2, +
|
||||
"num_reltuples": 0, +
|
||||
"num_caggs_nested": 0, +
|
||||
"num_caggs_finalized": 1, +
|
||||
"num_caggs_on_distributed_hypertables": 0, +
|
||||
"num_caggs_using_real_time_aggregation": 1 +
|
||||
}, +
|
||||
"distributed_hypertables_data_node": { +
|
||||
"heap_size": 0, +
|
||||
"toast_size": 0, +
|
||||
"compression": { +
|
||||
"compressed_heap_size": 0, +
|
||||
"compressed_row_count": 0, +
|
||||
"compressed_toast_size": 0, +
|
||||
"num_compressed_chunks": 0, +
|
||||
"uncompressed_heap_size": 0, +
|
||||
"uncompressed_row_count": 0, +
|
||||
"compressed_indexes_size": 0, +
|
||||
"uncompressed_toast_size": 0, +
|
||||
"uncompressed_indexes_size": 0, +
|
||||
"num_compressed_hypertables": 0, +
|
||||
"compressed_row_count_frozen_immediately": 0 +
|
||||
}, +
|
||||
"indexes_size": 0, +
|
||||
"num_children": 0, +
|
||||
"num_relations": 0, +
|
||||
"num_reltuples": 0 +
|
||||
}, +
|
||||
"distributed_hypertables_access_node": { +
|
||||
"heap_size": 0, +
|
||||
"toast_size": 0, +
|
||||
"compression": { +
|
||||
"compressed_heap_size": 0, +
|
||||
"compressed_row_count": 0, +
|
||||
"compressed_toast_size": 0, +
|
||||
"num_compressed_chunks": 0, +
|
||||
"uncompressed_heap_size": 0, +
|
||||
"uncompressed_row_count": 0, +
|
||||
"compressed_indexes_size": 0, +
|
||||
"uncompressed_toast_size": 0, +
|
||||
"uncompressed_indexes_size": 0, +
|
||||
"num_compressed_hypertables": 0, +
|
||||
"compressed_row_count_frozen_immediately": 0 +
|
||||
}, +
|
||||
"indexes_size": 0, +
|
||||
"num_children": 0, +
|
||||
"num_relations": 0, +
|
||||
"num_reltuples": 0, +
|
||||
"num_replica_chunks": 0, +
|
||||
"num_replicated_distributed_hypertables": 0 +
|
||||
} +
|
||||
}
|
||||
(1 row)
|
||||
|
||||
-- check telemetry for fixed schedule jobs works
|
||||
create or replace procedure job_test_fixed(jobid int, config jsonb) language plpgsql as $$
|
||||
begin
|
||||
raise log 'this is job_test_fixed';
|
||||
end
|
||||
$$;
|
||||
create or replace procedure job_test_drifting(jobid int, config jsonb) language plpgsql as $$
|
||||
begin
|
||||
raise log 'this is job_test_drifting';
|
||||
end
|
||||
$$;
|
||||
-- before adding the jobs
|
||||
select get_telemetry_report()->'num_user_defined_actions_fixed';
|
||||
?column?
|
||||
----------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
select get_telemetry_report()->'num_user_defined_actions';
|
||||
?column?
|
||||
----------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
select add_job('job_test_fixed', '1 week');
|
||||
add_job
|
||||
---------
|
||||
1000
|
||||
(1 row)
|
||||
|
||||
select add_job('job_test_drifting', '1 week', fixed_schedule => false);
|
||||
add_job
|
||||
---------
|
||||
1001
|
||||
(1 row)
|
||||
|
||||
-- add continuous aggregate refresh policy for contagg
|
||||
select add_continuous_aggregate_policy('contagg', interval '3 weeks', NULL, interval '3 weeks'); -- drifting
|
||||
add_continuous_aggregate_policy
|
||||
---------------------------------
|
||||
1002
|
||||
(1 row)
|
||||
|
||||
select add_continuous_aggregate_policy('contagg_old', interval '3 weeks', NULL, interval '3 weeks', initial_start => now()); -- fixed
|
||||
add_continuous_aggregate_policy
|
||||
---------------------------------
|
||||
1003
|
||||
(1 row)
|
||||
|
||||
-- add retention policy, fixed
|
||||
select add_retention_policy('hyper', interval '1 year', initial_start => now());
|
||||
add_retention_policy
|
||||
----------------------
|
||||
1004
|
||||
(1 row)
|
||||
|
||||
-- add compression policy
|
||||
select add_compression_policy('hyper', interval '3 weeks', initial_start => now());
|
||||
add_compression_policy
|
||||
------------------------
|
||||
1005
|
||||
(1 row)
|
||||
|
||||
select r->'num_user_defined_actions_fixed' as UDA_fixed, r->'num_user_defined_actions' AS UDA_drifting FROM get_telemetry_report() r;
|
||||
uda_fixed | uda_drifting
|
||||
-----------+--------------
|
||||
1 | 1
|
||||
(1 row)
|
||||
|
||||
select r->'num_continuous_aggs_policies_fixed' as contagg_fixed, r->'num_continuous_aggs_policies' as contagg_drifting FROM get_telemetry_report() r;
|
||||
contagg_fixed | contagg_drifting
|
||||
---------------+------------------
|
||||
1 | 1
|
||||
(1 row)
|
||||
|
||||
select r->'num_compression_policies_fixed' as compress_fixed, r->'num_retention_policies_fixed' as retention_fixed FROM get_telemetry_report() r;
|
||||
compress_fixed | retention_fixed
|
||||
----------------+-----------------
|
||||
1 | 1
|
||||
(1 row)
|
||||
|
||||
DELETE FROM _timescaledb_config.bgw_job WHERE id = 2;
|
||||
TRUNCATE _timescaledb_internal.job_errors;
|
||||
-- create some "errors" for testing
|
||||
INSERT INTO
|
||||
_timescaledb_config.bgw_job(id, application_name, schedule_interval, max_runtime, max_retries, retry_period, proc_schema, proc_name)
|
||||
VALUES (2000, 'User-Defined Action [2000]', interval '3 days', interval '1 hour', 5, interval '5 min', 'public', 'custom_action_1'),
|
||||
(2001, 'User-Defined Action [2001]', interval '3 days', interval '1 hour', 5, interval '5 min', 'public', 'custom_action_2'),
|
||||
(2002, 'Compression Policy [2002]', interval '3 days', interval '1 hour', 5, interval '5 min', '_timescaledb_functions', 'policy_compression'),
|
||||
(2003, 'Retention Policy [2003]', interval '3 days', interval '1 hour', 5, interval '5 min', '_timescaledb_functions', 'policy_retention'),
|
||||
(2004, 'Refresh Continuous Aggregate Policy [2004]', interval '3 days', interval '1 hour', 5, interval '5 min', '_timescaledb_functions', 'policy_refresh_continuous_aggregate'),
|
||||
-- user decided to define a custom action in the _timescaledb_functions schema, we group it with the User-defined actions
|
||||
(2005, 'User-Defined Action [2005]', interval '3 days', interval '1 hour', 5, interval '5 min', '_timescaledb_functions', 'policy_refresh_continuous_aggregate');
|
||||
-- create some errors for them
|
||||
INSERT INTO
|
||||
_timescaledb_internal.job_errors(job_id, pid, start_time, finish_time, error_data)
|
||||
values (2000, 12345, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '{"sqlerrcode":"P0001", "proc_schema":"public", "proc_name": "custom_action_1"}'),
|
||||
(2000, 23456, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '{"sqlerrcode":"ABCDE", "proc_schema": "public", "proc_name": "custom_action_1"}'),
|
||||
(2001, 54321, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '{"sqlerrcode":"P0001", "proc_schema":"public", "proc_name": "custom_action_2"}'),
|
||||
(2002, 23443, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '{"sqlerrcode":"JF009", "proc_schema":"_timescaledb_functions", "proc_name": "policy_compression"}'),
|
||||
(2003, 14567, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '{"sqlerrcode":"P0001", "proc_schema":"_timescaledb_functions", "proc_name": "policy_retention"}'),
|
||||
(2004, 78907, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '{"sqlerrcode":"P0001", "proc_schema":"_timescaledb_functions", "proc_name": "policy_refresh_continuous_aggregate"}'),
|
||||
(2005, 45757, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '{"sqlerrcode":"P0001", "proc_schema":"_timescaledb_functions", "proc_name": "policy_refresh_continuous_aggregate"}');
|
||||
-- we have 3 error records for user-defined actions, and three for policies, so we expect 4 types of jobs
|
||||
SELECT jsonb_pretty(get_telemetry_report() -> 'errors_by_sqlerrcode');
|
||||
jsonb_pretty
|
||||
----------------------------------------------
|
||||
{ +
|
||||
"policy_retention": { +
|
||||
"P0001": 1 +
|
||||
}, +
|
||||
"policy_compression": { +
|
||||
"JF009": 1 +
|
||||
}, +
|
||||
"user_defined_action": { +
|
||||
"ABCDE": 1, +
|
||||
"P0001": 2 +
|
||||
}, +
|
||||
"policy_refresh_continuous_aggregate": {+
|
||||
"P0001": 2 +
|
||||
} +
|
||||
}
|
||||
(1 row)
|
||||
|
||||
-- for job statistics, insert some records into bgw_job_stats
|
||||
INSERT INTO _timescaledb_internal.bgw_job_stat
|
||||
values
|
||||
(2000, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '-infinity'::timestamptz, '-infinity'::timestamptz,
|
||||
false, 1, interval '00:00:00', interval '00:00:02', 0, 1, 0, 1, 0),
|
||||
(2001, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '-infinity'::timestamptz, '-infinity'::timestamptz,
|
||||
false, 1, interval '00:00:00', interval '00:00:02', 0, 1, 0, 1, 0),
|
||||
(2002, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '-infinity'::timestamptz, '-infinity'::timestamptz,
|
||||
false, 1, interval '00:00:00', interval '00:00:02', 0, 1, 0, 1, 0),
|
||||
(2003, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '-infinity'::timestamptz, '-infinity'::timestamptz,
|
||||
false, 1, interval '00:00:00', interval '00:00:02', 0, 1, 0, 1, 0),
|
||||
(2004, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '-infinity'::timestamptz, '-infinity'::timestamptz,
|
||||
false, 1, interval '00:00:00', interval '00:00:02', 0, 1, 0, 1, 0),
|
||||
(2005, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '-infinity'::timestamptz, '-infinity'::timestamptz,
|
||||
false, 1, interval '00:00:00', interval '00:00:02', 0, 1, 0, 1, 0);
|
||||
SELECT jsonb_pretty(get_telemetry_report() -> 'stats_by_job_type');
|
||||
jsonb_pretty
|
||||
------------------------------------------------
|
||||
{ +
|
||||
"policy_retention": { +
|
||||
"total_runs": 1, +
|
||||
"total_crashes": 0, +
|
||||
"total_duration": "@ 0", +
|
||||
"total_failures": 1, +
|
||||
"total_successes": 0, +
|
||||
"max_consecutive_crashes": 0, +
|
||||
"total_duration_failures": "@ 2 secs",+
|
||||
"max_consecutive_failures": 1 +
|
||||
}, +
|
||||
"policy_compression": { +
|
||||
"total_runs": 1, +
|
||||
"total_crashes": 0, +
|
||||
"total_duration": "@ 0", +
|
||||
"total_failures": 1, +
|
||||
"total_successes": 0, +
|
||||
"max_consecutive_crashes": 0, +
|
||||
"total_duration_failures": "@ 2 secs",+
|
||||
"max_consecutive_failures": 1 +
|
||||
}, +
|
||||
"user_defined_action": { +
|
||||
"total_runs": 2, +
|
||||
"total_crashes": 0, +
|
||||
"total_duration": "@ 0", +
|
||||
"total_failures": 2, +
|
||||
"total_successes": 0, +
|
||||
"max_consecutive_crashes": 0, +
|
||||
"total_duration_failures": "@ 4 secs",+
|
||||
"max_consecutive_failures": 1 +
|
||||
}, +
|
||||
"policy_refresh_continuous_aggregate": { +
|
||||
"total_runs": 2, +
|
||||
"total_crashes": 0, +
|
||||
"total_duration": "@ 0", +
|
||||
"total_failures": 2, +
|
||||
"total_successes": 0, +
|
||||
"max_consecutive_crashes": 0, +
|
||||
"total_duration_failures": "@ 4 secs",+
|
||||
"max_consecutive_failures": 1 +
|
||||
} +
|
||||
}
|
||||
(1 row)
|
||||
|
||||
-- create nested continuous aggregates - copied from cagg_on_cagg_common
|
||||
CREATE TABLE conditions (
|
||||
time timestamptz NOT NULL,
|
||||
temperature int
|
||||
);
|
||||
SELECT create_hypertable('conditions', 'time');
|
||||
create_hypertable
|
||||
-------------------------
|
||||
(6,public,conditions,t)
|
||||
(1 row)
|
||||
|
||||
CREATE MATERIALIZED VIEW conditions_summary_hourly_1
|
||||
WITH (timescaledb.continuous, timescaledb.materialized_only=true) AS
|
||||
SELECT
|
||||
time_bucket('1 hour', "time") AS bucket,
|
||||
SUM(temperature) AS temperature
|
||||
FROM conditions
|
||||
GROUP BY 1
|
||||
WITH NO DATA;
|
||||
CREATE MATERIALIZED VIEW conditions_summary_daily_2
|
||||
WITH (timescaledb.continuous, timescaledb.materialized_only=true) AS
|
||||
SELECT
|
||||
time_bucket('1 day', "bucket") AS bucket,
|
||||
SUM(temperature) AS temperature
|
||||
FROM conditions_summary_hourly_1
|
||||
GROUP BY 1
|
||||
WITH NO DATA;
|
||||
CREATE MATERIALIZED VIEW conditions_summary_weekly_3
|
||||
WITH (timescaledb.continuous, timescaledb.materialized_only=true) AS
|
||||
SELECT
|
||||
time_bucket('1 week', "bucket") AS bucket,
|
||||
SUM(temperature) AS temperature
|
||||
FROM conditions_summary_daily_2
|
||||
GROUP BY 1
|
||||
WITH NO DATA;
|
||||
SELECT jsonb_pretty(get_telemetry_report() -> 'relations' -> 'continuous_aggregates' -> 'num_caggs_nested');
|
||||
jsonb_pretty
|
||||
--------------
|
||||
2
|
||||
(1 row)
|
||||
|
||||
DROP VIEW relations;
|
||||
DROP MATERIALIZED VIEW telemetry_report;
|
748
tsl/test/expected/telemetry_stats-14.out
Normal file
748
tsl/test/expected/telemetry_stats-14.out
Normal file
@ -0,0 +1,748 @@
|
||||
-- This file and its contents are licensed under the Timescale License.
|
||||
-- Please see the included NOTICE for copyright information and
|
||||
-- LICENSE-TIMESCALE for a copy of the license.
|
||||
--telemetry tests that require a community license
|
||||
\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER;
|
||||
-- function call info size is too variable for this test, so disable it
|
||||
SET timescaledb.telemetry_level='no_functions';
|
||||
SELECT setseed(1);
|
||||
setseed
|
||||
---------
|
||||
|
||||
(1 row)
|
||||
|
||||
-- Create a materialized view from the telemetry report so that we
|
||||
-- don't regenerate telemetry for every query. Filter heap_size for
|
||||
-- materialized views since PG14 reports a different heap size for
|
||||
-- them compared to earlier PG versions.
|
||||
CREATE MATERIALIZED VIEW telemetry_report AS
|
||||
SELECT (r #- '{relations,materialized_views,heap_size}') AS r
|
||||
FROM get_telemetry_report() r;
|
||||
CREATE VIEW relations AS
|
||||
SELECT r -> 'relations' AS rels
|
||||
FROM telemetry_report;
|
||||
SELECT rels -> 'continuous_aggregates' -> 'num_relations' AS num_continuous_aggs,
|
||||
rels -> 'hypertables' -> 'num_relations' AS num_hypertables
|
||||
FROM relations;
|
||||
num_continuous_aggs | num_hypertables
|
||||
---------------------+-----------------
|
||||
0 | 0
|
||||
(1 row)
|
||||
|
||||
-- check telemetry picks up flagged content from metadata
|
||||
SELECT r -> 'db_metadata' AS db_metadata
|
||||
FROM telemetry_report;
|
||||
db_metadata
|
||||
-------------
|
||||
{}
|
||||
(1 row)
|
||||
|
||||
-- check timescaledb_telemetry.cloud
|
||||
SELECT r -> 'instance_metadata' AS instance_metadata
|
||||
FROM telemetry_report r;
|
||||
instance_metadata
|
||||
-------------------
|
||||
{"cloud": "ci"}
|
||||
(1 row)
|
||||
|
||||
CREATE TABLE normal (time timestamptz NOT NULL, device int, temp float);
|
||||
CREATE TABLE part (time timestamptz NOT NULL, device int, temp float) PARTITION BY RANGE (time);
|
||||
CREATE TABLE part_t1 PARTITION OF part FOR VALUES FROM ('2018-01-01') TO ('2018-02-01') PARTITION BY HASH (device);
|
||||
CREATE TABLE part_t2 PARTITION OF part FOR VALUES FROM ('2018-02-01') TO ('2018-03-01') PARTITION BY HASH (device);
|
||||
CREATE TABLE part_t1_d1 PARTITION OF part_t1 FOR VALUES WITH (MODULUS 2, REMAINDER 0);
|
||||
CREATE TABLE part_t1_d2 PARTITION OF part_t1 FOR VALUES WITH (MODULUS 2, REMAINDER 1);
|
||||
CREATE TABLE part_t2_d1 PARTITION OF part_t2 FOR VALUES WITH (MODULUS 2, REMAINDER 0);
|
||||
CREATE TABLE part_t2_d2 PARTITION OF part_t2 FOR VALUES WITH (MODULUS 2, REMAINDER 1);
|
||||
CREATE TABLE hyper (LIKE normal);
|
||||
SELECT table_name FROM create_hypertable('hyper', 'time');
|
||||
table_name
|
||||
------------
|
||||
hyper
|
||||
(1 row)
|
||||
|
||||
CREATE MATERIALIZED VIEW contagg
|
||||
WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS
|
||||
SELECT
|
||||
time_bucket('1 hour', time) AS hour,
|
||||
device,
|
||||
min(time)
|
||||
FROM
|
||||
hyper
|
||||
GROUP BY hour, device;
|
||||
NOTICE: continuous aggregate "contagg" is already up-to-date
|
||||
CREATE MATERIALIZED VIEW contagg_old
|
||||
WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized=false) AS
|
||||
SELECT
|
||||
time_bucket('1 hour', time) AS hour,
|
||||
device,
|
||||
min(time)
|
||||
FROM
|
||||
hyper
|
||||
GROUP BY hour, device;
|
||||
NOTICE: continuous aggregate "contagg_old" is already up-to-date
|
||||
-- Create another view (already have the "relations" view)
|
||||
CREATE VIEW devices AS
|
||||
SELECT DISTINCT ON (device) device
|
||||
FROM hyper;
|
||||
-- Show relations with no data
|
||||
REFRESH MATERIALIZED VIEW telemetry_report;
|
||||
SELECT jsonb_pretty(rels) AS relations FROM relations;
|
||||
relations
|
||||
----------------------------------------------------------
|
||||
{ +
|
||||
"views": { +
|
||||
"num_relations": 2 +
|
||||
}, +
|
||||
"tables": { +
|
||||
"heap_size": 0, +
|
||||
"toast_size": 8192, +
|
||||
"indexes_size": 0, +
|
||||
"num_relations": 2, +
|
||||
"num_reltuples": 0 +
|
||||
}, +
|
||||
"hypertables": { +
|
||||
"heap_size": 0, +
|
||||
"toast_size": 0, +
|
||||
"compression": { +
|
||||
"compressed_heap_size": 0, +
|
||||
"compressed_row_count": 0, +
|
||||
"compressed_toast_size": 0, +
|
||||
"num_compressed_chunks": 0, +
|
||||
"uncompressed_heap_size": 0, +
|
||||
"uncompressed_row_count": 0, +
|
||||
"compressed_indexes_size": 0, +
|
||||
"uncompressed_toast_size": 0, +
|
||||
"uncompressed_indexes_size": 0, +
|
||||
"num_compressed_hypertables": 0, +
|
||||
"compressed_row_count_frozen_immediately": 0+
|
||||
}, +
|
||||
"indexes_size": 8192, +
|
||||
"num_children": 0, +
|
||||
"num_relations": 1, +
|
||||
"num_reltuples": 0 +
|
||||
}, +
|
||||
"materialized_views": { +
|
||||
"toast_size": 8192, +
|
||||
"indexes_size": 0, +
|
||||
"num_relations": 1, +
|
||||
"num_reltuples": 0 +
|
||||
}, +
|
||||
"partitioned_tables": { +
|
||||
"heap_size": 0, +
|
||||
"toast_size": 0, +
|
||||
"indexes_size": 0, +
|
||||
"num_children": 6, +
|
||||
"num_relations": 1, +
|
||||
"num_reltuples": 0 +
|
||||
}, +
|
||||
"continuous_aggregates": { +
|
||||
"heap_size": 0, +
|
||||
"toast_size": 0, +
|
||||
"compression": { +
|
||||
"compressed_heap_size": 0, +
|
||||
"compressed_row_count": 0, +
|
||||
"num_compressed_caggs": 0, +
|
||||
"compressed_toast_size": 0, +
|
||||
"num_compressed_chunks": 0, +
|
||||
"uncompressed_heap_size": 0, +
|
||||
"uncompressed_row_count": 0, +
|
||||
"compressed_indexes_size": 0, +
|
||||
"uncompressed_toast_size": 0, +
|
||||
"uncompressed_indexes_size": 0, +
|
||||
"compressed_row_count_frozen_immediately": 0+
|
||||
}, +
|
||||
"indexes_size": 0, +
|
||||
"num_children": 0, +
|
||||
"num_relations": 2, +
|
||||
"num_reltuples": 0, +
|
||||
"num_caggs_nested": 0, +
|
||||
"num_caggs_finalized": 1, +
|
||||
"num_caggs_on_distributed_hypertables": 0, +
|
||||
"num_caggs_using_real_time_aggregation": 2 +
|
||||
}, +
|
||||
"distributed_hypertables_data_node": { +
|
||||
"heap_size": 0, +
|
||||
"toast_size": 0, +
|
||||
"compression": { +
|
||||
"compressed_heap_size": 0, +
|
||||
"compressed_row_count": 0, +
|
||||
"compressed_toast_size": 0, +
|
||||
"num_compressed_chunks": 0, +
|
||||
"uncompressed_heap_size": 0, +
|
||||
"uncompressed_row_count": 0, +
|
||||
"compressed_indexes_size": 0, +
|
||||
"uncompressed_toast_size": 0, +
|
||||
"uncompressed_indexes_size": 0, +
|
||||
"num_compressed_hypertables": 0, +
|
||||
"compressed_row_count_frozen_immediately": 0+
|
||||
}, +
|
||||
"indexes_size": 0, +
|
||||
"num_children": 0, +
|
||||
"num_relations": 0, +
|
||||
"num_reltuples": 0 +
|
||||
}, +
|
||||
"distributed_hypertables_access_node": { +
|
||||
"heap_size": 0, +
|
||||
"toast_size": 0, +
|
||||
"compression": { +
|
||||
"compressed_heap_size": 0, +
|
||||
"compressed_row_count": 0, +
|
||||
"compressed_toast_size": 0, +
|
||||
"num_compressed_chunks": 0, +
|
||||
"uncompressed_heap_size": 0, +
|
||||
"uncompressed_row_count": 0, +
|
||||
"compressed_indexes_size": 0, +
|
||||
"uncompressed_toast_size": 0, +
|
||||
"uncompressed_indexes_size": 0, +
|
||||
"num_compressed_hypertables": 0, +
|
||||
"compressed_row_count_frozen_immediately": 0+
|
||||
}, +
|
||||
"indexes_size": 0, +
|
||||
"num_children": 0, +
|
||||
"num_relations": 0, +
|
||||
"num_reltuples": 0, +
|
||||
"num_replica_chunks": 0, +
|
||||
"num_replicated_distributed_hypertables": 0 +
|
||||
} +
|
||||
}
|
||||
(1 row)
|
||||
|
||||
-- Insert data
|
||||
INSERT INTO normal
|
||||
SELECT t, ceil(random() * 10)::int, random() * 30
|
||||
FROM generate_series('2018-01-01'::timestamptz, '2018-02-28', '2h') t;
|
||||
INSERT INTO hyper
|
||||
SELECT * FROM normal;
|
||||
INSERT INTO part
|
||||
SELECT * FROM normal;
|
||||
CALL refresh_continuous_aggregate('contagg', NULL, NULL);
|
||||
CALL refresh_continuous_aggregate('contagg_old', NULL, NULL);
|
||||
-- ANALYZE to get updated reltuples stats
|
||||
ANALYZE normal, hyper, part;
|
||||
SELECT count(c) FROM show_chunks('hyper') c;
|
||||
count
|
||||
-------
|
||||
9
|
||||
(1 row)
|
||||
|
||||
SELECT count(c) FROM show_chunks('contagg') c;
|
||||
count
|
||||
-------
|
||||
2
|
||||
(1 row)
|
||||
|
||||
SELECT count(c) FROM show_chunks('contagg_old') c;
|
||||
count
|
||||
-------
|
||||
2
|
||||
(1 row)
|
||||
|
||||
-- Update and show the telemetry report
|
||||
REFRESH MATERIALIZED VIEW telemetry_report;
|
||||
SELECT jsonb_pretty(rels) AS relations FROM relations;
|
||||
relations
|
||||
----------------------------------------------------------
|
||||
{ +
|
||||
"views": { +
|
||||
"num_relations": 2 +
|
||||
}, +
|
||||
"tables": { +
|
||||
"heap_size": 65536, +
|
||||
"toast_size": 8192, +
|
||||
"indexes_size": 0, +
|
||||
"num_relations": 2, +
|
||||
"num_reltuples": 697 +
|
||||
}, +
|
||||
"hypertables": { +
|
||||
"heap_size": 73728, +
|
||||
"toast_size": 0, +
|
||||
"compression": { +
|
||||
"compressed_heap_size": 0, +
|
||||
"compressed_row_count": 0, +
|
||||
"compressed_toast_size": 0, +
|
||||
"num_compressed_chunks": 0, +
|
||||
"uncompressed_heap_size": 0, +
|
||||
"uncompressed_row_count": 0, +
|
||||
"compressed_indexes_size": 0, +
|
||||
"uncompressed_toast_size": 0, +
|
||||
"uncompressed_indexes_size": 0, +
|
||||
"num_compressed_hypertables": 0, +
|
||||
"compressed_row_count_frozen_immediately": 0+
|
||||
}, +
|
||||
"indexes_size": 155648, +
|
||||
"num_children": 9, +
|
||||
"num_relations": 1, +
|
||||
"num_reltuples": 697 +
|
||||
}, +
|
||||
"materialized_views": { +
|
||||
"toast_size": 8192, +
|
||||
"indexes_size": 0, +
|
||||
"num_relations": 1, +
|
||||
"num_reltuples": 0 +
|
||||
}, +
|
||||
"partitioned_tables": { +
|
||||
"heap_size": 98304, +
|
||||
"toast_size": 0, +
|
||||
"indexes_size": 0, +
|
||||
"num_children": 6, +
|
||||
"num_relations": 1, +
|
||||
"num_reltuples": 697 +
|
||||
}, +
|
||||
"continuous_aggregates": { +
|
||||
"heap_size": 188416, +
|
||||
"toast_size": 16384, +
|
||||
"compression": { +
|
||||
"compressed_heap_size": 0, +
|
||||
"compressed_row_count": 0, +
|
||||
"num_compressed_caggs": 0, +
|
||||
"compressed_toast_size": 0, +
|
||||
"num_compressed_chunks": 0, +
|
||||
"uncompressed_heap_size": 0, +
|
||||
"uncompressed_row_count": 0, +
|
||||
"compressed_indexes_size": 0, +
|
||||
"uncompressed_toast_size": 0, +
|
||||
"uncompressed_indexes_size": 0, +
|
||||
"compressed_row_count_frozen_immediately": 0+
|
||||
}, +
|
||||
"indexes_size": 229376, +
|
||||
"num_children": 4, +
|
||||
"num_relations": 2, +
|
||||
"num_reltuples": 0, +
|
||||
"num_caggs_nested": 0, +
|
||||
"num_caggs_finalized": 1, +
|
||||
"num_caggs_on_distributed_hypertables": 0, +
|
||||
"num_caggs_using_real_time_aggregation": 2 +
|
||||
}, +
|
||||
"distributed_hypertables_data_node": { +
|
||||
"heap_size": 0, +
|
||||
"toast_size": 0, +
|
||||
"compression": { +
|
||||
"compressed_heap_size": 0, +
|
||||
"compressed_row_count": 0, +
|
||||
"compressed_toast_size": 0, +
|
||||
"num_compressed_chunks": 0, +
|
||||
"uncompressed_heap_size": 0, +
|
||||
"uncompressed_row_count": 0, +
|
||||
"compressed_indexes_size": 0, +
|
||||
"uncompressed_toast_size": 0, +
|
||||
"uncompressed_indexes_size": 0, +
|
||||
"num_compressed_hypertables": 0, +
|
||||
"compressed_row_count_frozen_immediately": 0+
|
||||
}, +
|
||||
"indexes_size": 0, +
|
||||
"num_children": 0, +
|
||||
"num_relations": 0, +
|
||||
"num_reltuples": 0 +
|
||||
}, +
|
||||
"distributed_hypertables_access_node": { +
|
||||
"heap_size": 0, +
|
||||
"toast_size": 0, +
|
||||
"compression": { +
|
||||
"compressed_heap_size": 0, +
|
||||
"compressed_row_count": 0, +
|
||||
"compressed_toast_size": 0, +
|
||||
"num_compressed_chunks": 0, +
|
||||
"uncompressed_heap_size": 0, +
|
||||
"uncompressed_row_count": 0, +
|
||||
"compressed_indexes_size": 0, +
|
||||
"uncompressed_toast_size": 0, +
|
||||
"uncompressed_indexes_size": 0, +
|
||||
"num_compressed_hypertables": 0, +
|
||||
"compressed_row_count_frozen_immediately": 0+
|
||||
}, +
|
||||
"indexes_size": 0, +
|
||||
"num_children": 0, +
|
||||
"num_relations": 0, +
|
||||
"num_reltuples": 0, +
|
||||
"num_replica_chunks": 0, +
|
||||
"num_replicated_distributed_hypertables": 0 +
|
||||
} +
|
||||
}
|
||||
(1 row)
|
||||
|
||||
-- Actual row count should be the same as reltuples stats for all tables
|
||||
SELECT (SELECT count(*) FROM normal) num_inserted_rows,
|
||||
(SELECT rels -> 'tables' -> 'num_reltuples' FROM relations) normal_reltuples,
|
||||
(SELECT rels -> 'hypertables' -> 'num_reltuples' FROM relations) hyper_reltuples,
|
||||
(SELECT rels -> 'partitioned_tables' -> 'num_reltuples' FROM relations) part_reltuples;
|
||||
num_inserted_rows | normal_reltuples | hyper_reltuples | part_reltuples
|
||||
-------------------+------------------+-----------------+----------------
|
||||
697 | 697 | 697 | 697
|
||||
(1 row)
|
||||
|
||||
-- Add compression
|
||||
ALTER TABLE hyper SET (timescaledb.compress);
|
||||
SELECT compress_chunk(c)
|
||||
FROM show_chunks('hyper') c ORDER BY c LIMIT 4;
|
||||
compress_chunk
|
||||
----------------------------------------
|
||||
_timescaledb_internal._hyper_1_1_chunk
|
||||
_timescaledb_internal._hyper_1_2_chunk
|
||||
_timescaledb_internal._hyper_1_3_chunk
|
||||
_timescaledb_internal._hyper_1_4_chunk
|
||||
(4 rows)
|
||||
|
||||
ALTER MATERIALIZED VIEW contagg SET (timescaledb.compress);
|
||||
NOTICE: defaulting compress_segmentby to device
|
||||
NOTICE: defaulting compress_orderby to hour
|
||||
SELECT compress_chunk(c)
|
||||
FROM show_chunks('contagg') c ORDER BY c LIMIT 1;
|
||||
compress_chunk
|
||||
-----------------------------------------
|
||||
_timescaledb_internal._hyper_2_10_chunk
|
||||
(1 row)
|
||||
|
||||
-- Turn of real-time aggregation
|
||||
ALTER MATERIALIZED VIEW contagg SET (timescaledb.materialized_only = true);
|
||||
ANALYZE normal, hyper, part;
|
||||
REFRESH MATERIALIZED VIEW telemetry_report;
|
||||
SELECT jsonb_pretty(rels) AS relations FROM relations;
|
||||
relations
|
||||
-----------------------------------------------------------
|
||||
{ +
|
||||
"views": { +
|
||||
"num_relations": 2 +
|
||||
}, +
|
||||
"tables": { +
|
||||
"heap_size": 65536, +
|
||||
"toast_size": 8192, +
|
||||
"indexes_size": 0, +
|
||||
"num_relations": 2, +
|
||||
"num_reltuples": 697 +
|
||||
}, +
|
||||
"hypertables": { +
|
||||
"heap_size": 106496, +
|
||||
"toast_size": 32768, +
|
||||
"compression": { +
|
||||
"compressed_heap_size": 65536, +
|
||||
"compressed_row_count": 4, +
|
||||
"compressed_toast_size": 32768, +
|
||||
"num_compressed_chunks": 4, +
|
||||
"uncompressed_heap_size": 32768, +
|
||||
"uncompressed_row_count": 284, +
|
||||
"compressed_indexes_size": 0, +
|
||||
"uncompressed_toast_size": 0, +
|
||||
"uncompressed_indexes_size": 65536, +
|
||||
"num_compressed_hypertables": 1, +
|
||||
"compressed_row_count_frozen_immediately": 4 +
|
||||
}, +
|
||||
"indexes_size": 122880, +
|
||||
"num_children": 9, +
|
||||
"num_relations": 1, +
|
||||
"num_reltuples": 413 +
|
||||
}, +
|
||||
"materialized_views": { +
|
||||
"toast_size": 8192, +
|
||||
"indexes_size": 0, +
|
||||
"num_relations": 1, +
|
||||
"num_reltuples": 0 +
|
||||
}, +
|
||||
"partitioned_tables": { +
|
||||
"heap_size": 98304, +
|
||||
"toast_size": 0, +
|
||||
"indexes_size": 0, +
|
||||
"num_children": 6, +
|
||||
"num_relations": 1, +
|
||||
"num_reltuples": 697 +
|
||||
}, +
|
||||
"continuous_aggregates": { +
|
||||
"heap_size": 188416, +
|
||||
"toast_size": 24576, +
|
||||
"compression": { +
|
||||
"compressed_heap_size": 49152, +
|
||||
"compressed_row_count": 10, +
|
||||
"num_compressed_caggs": 1, +
|
||||
"compressed_toast_size": 8192, +
|
||||
"num_compressed_chunks": 1, +
|
||||
"uncompressed_heap_size": 49152, +
|
||||
"uncompressed_row_count": 452, +
|
||||
"compressed_indexes_size": 16384, +
|
||||
"uncompressed_toast_size": 0, +
|
||||
"uncompressed_indexes_size": 81920, +
|
||||
"compressed_row_count_frozen_immediately": 10+
|
||||
}, +
|
||||
"indexes_size": 180224, +
|
||||
"num_children": 4, +
|
||||
"num_relations": 2, +
|
||||
"num_reltuples": 0, +
|
||||
"num_caggs_nested": 0, +
|
||||
"num_caggs_finalized": 1, +
|
||||
"num_caggs_on_distributed_hypertables": 0, +
|
||||
"num_caggs_using_real_time_aggregation": 1 +
|
||||
}, +
|
||||
"distributed_hypertables_data_node": { +
|
||||
"heap_size": 0, +
|
||||
"toast_size": 0, +
|
||||
"compression": { +
|
||||
"compressed_heap_size": 0, +
|
||||
"compressed_row_count": 0, +
|
||||
"compressed_toast_size": 0, +
|
||||
"num_compressed_chunks": 0, +
|
||||
"uncompressed_heap_size": 0, +
|
||||
"uncompressed_row_count": 0, +
|
||||
"compressed_indexes_size": 0, +
|
||||
"uncompressed_toast_size": 0, +
|
||||
"uncompressed_indexes_size": 0, +
|
||||
"num_compressed_hypertables": 0, +
|
||||
"compressed_row_count_frozen_immediately": 0 +
|
||||
}, +
|
||||
"indexes_size": 0, +
|
||||
"num_children": 0, +
|
||||
"num_relations": 0, +
|
||||
"num_reltuples": 0 +
|
||||
}, +
|
||||
"distributed_hypertables_access_node": { +
|
||||
"heap_size": 0, +
|
||||
"toast_size": 0, +
|
||||
"compression": { +
|
||||
"compressed_heap_size": 0, +
|
||||
"compressed_row_count": 0, +
|
||||
"compressed_toast_size": 0, +
|
||||
"num_compressed_chunks": 0, +
|
||||
"uncompressed_heap_size": 0, +
|
||||
"uncompressed_row_count": 0, +
|
||||
"compressed_indexes_size": 0, +
|
||||
"uncompressed_toast_size": 0, +
|
||||
"uncompressed_indexes_size": 0, +
|
||||
"num_compressed_hypertables": 0, +
|
||||
"compressed_row_count_frozen_immediately": 0 +
|
||||
}, +
|
||||
"indexes_size": 0, +
|
||||
"num_children": 0, +
|
||||
"num_relations": 0, +
|
||||
"num_reltuples": 0, +
|
||||
"num_replica_chunks": 0, +
|
||||
"num_replicated_distributed_hypertables": 0 +
|
||||
} +
|
||||
}
|
||||
(1 row)
|
||||
|
||||
-- check telemetry for fixed schedule jobs works
|
||||
create or replace procedure job_test_fixed(jobid int, config jsonb) language plpgsql as $$
|
||||
begin
|
||||
raise log 'this is job_test_fixed';
|
||||
end
|
||||
$$;
|
||||
create or replace procedure job_test_drifting(jobid int, config jsonb) language plpgsql as $$
|
||||
begin
|
||||
raise log 'this is job_test_drifting';
|
||||
end
|
||||
$$;
|
||||
-- before adding the jobs
|
||||
select get_telemetry_report()->'num_user_defined_actions_fixed';
|
||||
?column?
|
||||
----------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
select get_telemetry_report()->'num_user_defined_actions';
|
||||
?column?
|
||||
----------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
select add_job('job_test_fixed', '1 week');
|
||||
add_job
|
||||
---------
|
||||
1000
|
||||
(1 row)
|
||||
|
||||
select add_job('job_test_drifting', '1 week', fixed_schedule => false);
|
||||
add_job
|
||||
---------
|
||||
1001
|
||||
(1 row)
|
||||
|
||||
-- add continuous aggregate refresh policy for contagg
|
||||
select add_continuous_aggregate_policy('contagg', interval '3 weeks', NULL, interval '3 weeks'); -- drifting
|
||||
add_continuous_aggregate_policy
|
||||
---------------------------------
|
||||
1002
|
||||
(1 row)
|
||||
|
||||
select add_continuous_aggregate_policy('contagg_old', interval '3 weeks', NULL, interval '3 weeks', initial_start => now()); -- fixed
|
||||
add_continuous_aggregate_policy
|
||||
---------------------------------
|
||||
1003
|
||||
(1 row)
|
||||
|
||||
-- add retention policy, fixed
|
||||
select add_retention_policy('hyper', interval '1 year', initial_start => now());
|
||||
add_retention_policy
|
||||
----------------------
|
||||
1004
|
||||
(1 row)
|
||||
|
||||
-- add compression policy
|
||||
select add_compression_policy('hyper', interval '3 weeks', initial_start => now());
|
||||
add_compression_policy
|
||||
------------------------
|
||||
1005
|
||||
(1 row)
|
||||
|
||||
select r->'num_user_defined_actions_fixed' as UDA_fixed, r->'num_user_defined_actions' AS UDA_drifting FROM get_telemetry_report() r;
|
||||
uda_fixed | uda_drifting
|
||||
-----------+--------------
|
||||
1 | 1
|
||||
(1 row)
|
||||
|
||||
select r->'num_continuous_aggs_policies_fixed' as contagg_fixed, r->'num_continuous_aggs_policies' as contagg_drifting FROM get_telemetry_report() r;
|
||||
contagg_fixed | contagg_drifting
|
||||
---------------+------------------
|
||||
1 | 1
|
||||
(1 row)
|
||||
|
||||
select r->'num_compression_policies_fixed' as compress_fixed, r->'num_retention_policies_fixed' as retention_fixed FROM get_telemetry_report() r;
|
||||
compress_fixed | retention_fixed
|
||||
----------------+-----------------
|
||||
1 | 1
|
||||
(1 row)
|
||||
|
||||
DELETE FROM _timescaledb_config.bgw_job WHERE id = 2;
|
||||
TRUNCATE _timescaledb_internal.job_errors;
|
||||
-- create some "errors" for testing
|
||||
INSERT INTO
|
||||
_timescaledb_config.bgw_job(id, application_name, schedule_interval, max_runtime, max_retries, retry_period, proc_schema, proc_name)
|
||||
VALUES (2000, 'User-Defined Action [2000]', interval '3 days', interval '1 hour', 5, interval '5 min', 'public', 'custom_action_1'),
|
||||
(2001, 'User-Defined Action [2001]', interval '3 days', interval '1 hour', 5, interval '5 min', 'public', 'custom_action_2'),
|
||||
(2002, 'Compression Policy [2002]', interval '3 days', interval '1 hour', 5, interval '5 min', '_timescaledb_functions', 'policy_compression'),
|
||||
(2003, 'Retention Policy [2003]', interval '3 days', interval '1 hour', 5, interval '5 min', '_timescaledb_functions', 'policy_retention'),
|
||||
(2004, 'Refresh Continuous Aggregate Policy [2004]', interval '3 days', interval '1 hour', 5, interval '5 min', '_timescaledb_functions', 'policy_refresh_continuous_aggregate'),
|
||||
-- user decided to define a custom action in the _timescaledb_functions schema, we group it with the User-defined actions
|
||||
(2005, 'User-Defined Action [2005]', interval '3 days', interval '1 hour', 5, interval '5 min', '_timescaledb_functions', 'policy_refresh_continuous_aggregate');
|
||||
-- create some errors for them
|
||||
INSERT INTO
|
||||
_timescaledb_internal.job_errors(job_id, pid, start_time, finish_time, error_data)
|
||||
values (2000, 12345, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '{"sqlerrcode":"P0001", "proc_schema":"public", "proc_name": "custom_action_1"}'),
|
||||
(2000, 23456, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '{"sqlerrcode":"ABCDE", "proc_schema": "public", "proc_name": "custom_action_1"}'),
|
||||
(2001, 54321, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '{"sqlerrcode":"P0001", "proc_schema":"public", "proc_name": "custom_action_2"}'),
|
||||
(2002, 23443, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '{"sqlerrcode":"JF009", "proc_schema":"_timescaledb_functions", "proc_name": "policy_compression"}'),
|
||||
(2003, 14567, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '{"sqlerrcode":"P0001", "proc_schema":"_timescaledb_functions", "proc_name": "policy_retention"}'),
|
||||
(2004, 78907, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '{"sqlerrcode":"P0001", "proc_schema":"_timescaledb_functions", "proc_name": "policy_refresh_continuous_aggregate"}'),
|
||||
(2005, 45757, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '{"sqlerrcode":"P0001", "proc_schema":"_timescaledb_functions", "proc_name": "policy_refresh_continuous_aggregate"}');
|
||||
-- we have 3 error records for user-defined actions, and three for policies, so we expect 4 types of jobs
|
||||
SELECT jsonb_pretty(get_telemetry_report() -> 'errors_by_sqlerrcode');
|
||||
jsonb_pretty
|
||||
----------------------------------------------
|
||||
{ +
|
||||
"policy_retention": { +
|
||||
"P0001": 1 +
|
||||
}, +
|
||||
"policy_compression": { +
|
||||
"JF009": 1 +
|
||||
}, +
|
||||
"user_defined_action": { +
|
||||
"ABCDE": 1, +
|
||||
"P0001": 2 +
|
||||
}, +
|
||||
"policy_refresh_continuous_aggregate": {+
|
||||
"P0001": 2 +
|
||||
} +
|
||||
}
|
||||
(1 row)
|
||||
|
||||
-- for job statistics, insert some records into bgw_job_stats
|
||||
INSERT INTO _timescaledb_internal.bgw_job_stat
|
||||
values
|
||||
(2000, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '-infinity'::timestamptz, '-infinity'::timestamptz,
|
||||
false, 1, interval '00:00:00', interval '00:00:02', 0, 1, 0, 1, 0),
|
||||
(2001, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '-infinity'::timestamptz, '-infinity'::timestamptz,
|
||||
false, 1, interval '00:00:00', interval '00:00:02', 0, 1, 0, 1, 0),
|
||||
(2002, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '-infinity'::timestamptz, '-infinity'::timestamptz,
|
||||
false, 1, interval '00:00:00', interval '00:00:02', 0, 1, 0, 1, 0),
|
||||
(2003, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '-infinity'::timestamptz, '-infinity'::timestamptz,
|
||||
false, 1, interval '00:00:00', interval '00:00:02', 0, 1, 0, 1, 0),
|
||||
(2004, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '-infinity'::timestamptz, '-infinity'::timestamptz,
|
||||
false, 1, interval '00:00:00', interval '00:00:02', 0, 1, 0, 1, 0),
|
||||
(2005, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '-infinity'::timestamptz, '-infinity'::timestamptz,
|
||||
false, 1, interval '00:00:00', interval '00:00:02', 0, 1, 0, 1, 0);
|
||||
SELECT jsonb_pretty(get_telemetry_report() -> 'stats_by_job_type');
|
||||
jsonb_pretty
|
||||
------------------------------------------------
|
||||
{ +
|
||||
"policy_retention": { +
|
||||
"total_runs": 1, +
|
||||
"total_crashes": 0, +
|
||||
"total_duration": "@ 0", +
|
||||
"total_failures": 1, +
|
||||
"total_successes": 0, +
|
||||
"max_consecutive_crashes": 0, +
|
||||
"total_duration_failures": "@ 2 secs",+
|
||||
"max_consecutive_failures": 1 +
|
||||
}, +
|
||||
"policy_compression": { +
|
||||
"total_runs": 1, +
|
||||
"total_crashes": 0, +
|
||||
"total_duration": "@ 0", +
|
||||
"total_failures": 1, +
|
||||
"total_successes": 0, +
|
||||
"max_consecutive_crashes": 0, +
|
||||
"total_duration_failures": "@ 2 secs",+
|
||||
"max_consecutive_failures": 1 +
|
||||
}, +
|
||||
"user_defined_action": { +
|
||||
"total_runs": 2, +
|
||||
"total_crashes": 0, +
|
||||
"total_duration": "@ 0", +
|
||||
"total_failures": 2, +
|
||||
"total_successes": 0, +
|
||||
"max_consecutive_crashes": 0, +
|
||||
"total_duration_failures": "@ 4 secs",+
|
||||
"max_consecutive_failures": 1 +
|
||||
}, +
|
||||
"policy_refresh_continuous_aggregate": { +
|
||||
"total_runs": 2, +
|
||||
"total_crashes": 0, +
|
||||
"total_duration": "@ 0", +
|
||||
"total_failures": 2, +
|
||||
"total_successes": 0, +
|
||||
"max_consecutive_crashes": 0, +
|
||||
"total_duration_failures": "@ 4 secs",+
|
||||
"max_consecutive_failures": 1 +
|
||||
} +
|
||||
}
|
||||
(1 row)
|
||||
|
||||
-- create nested continuous aggregates - copied from cagg_on_cagg_common
|
||||
CREATE TABLE conditions (
|
||||
time timestamptz NOT NULL,
|
||||
temperature int
|
||||
);
|
||||
SELECT create_hypertable('conditions', 'time');
|
||||
create_hypertable
|
||||
-------------------------
|
||||
(6,public,conditions,t)
|
||||
(1 row)
|
||||
|
||||
CREATE MATERIALIZED VIEW conditions_summary_hourly_1
|
||||
WITH (timescaledb.continuous, timescaledb.materialized_only=true) AS
|
||||
SELECT
|
||||
time_bucket('1 hour', "time") AS bucket,
|
||||
SUM(temperature) AS temperature
|
||||
FROM conditions
|
||||
GROUP BY 1
|
||||
WITH NO DATA;
|
||||
CREATE MATERIALIZED VIEW conditions_summary_daily_2
|
||||
WITH (timescaledb.continuous, timescaledb.materialized_only=true) AS
|
||||
SELECT
|
||||
time_bucket('1 day', "bucket") AS bucket,
|
||||
SUM(temperature) AS temperature
|
||||
FROM conditions_summary_hourly_1
|
||||
GROUP BY 1
|
||||
WITH NO DATA;
|
||||
CREATE MATERIALIZED VIEW conditions_summary_weekly_3
|
||||
WITH (timescaledb.continuous, timescaledb.materialized_only=true) AS
|
||||
SELECT
|
||||
time_bucket('1 week', "bucket") AS bucket,
|
||||
SUM(temperature) AS temperature
|
||||
FROM conditions_summary_daily_2
|
||||
GROUP BY 1
|
||||
WITH NO DATA;
|
||||
SELECT jsonb_pretty(get_telemetry_report() -> 'relations' -> 'continuous_aggregates' -> 'num_caggs_nested');
|
||||
jsonb_pretty
|
||||
--------------
|
||||
2
|
||||
(1 row)
|
||||
|
||||
DROP VIEW relations;
|
||||
DROP MATERIALIZED VIEW telemetry_report;
|
748
tsl/test/expected/telemetry_stats-15.out
Normal file
748
tsl/test/expected/telemetry_stats-15.out
Normal file
@ -0,0 +1,748 @@
|
||||
-- This file and its contents are licensed under the Timescale License.
|
||||
-- Please see the included NOTICE for copyright information and
|
||||
-- LICENSE-TIMESCALE for a copy of the license.
|
||||
--telemetry tests that require a community license
|
||||
\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER;
|
||||
-- function call info size is too variable for this test, so disable it
|
||||
SET timescaledb.telemetry_level='no_functions';
|
||||
SELECT setseed(1);
|
||||
setseed
|
||||
---------
|
||||
|
||||
(1 row)
|
||||
|
||||
-- Create a materialized view from the telemetry report so that we
|
||||
-- don't regenerate telemetry for every query. Filter heap_size for
|
||||
-- materialized views since PG14 reports a different heap size for
|
||||
-- them compared to earlier PG versions.
|
||||
CREATE MATERIALIZED VIEW telemetry_report AS
|
||||
SELECT (r #- '{relations,materialized_views,heap_size}') AS r
|
||||
FROM get_telemetry_report() r;
|
||||
CREATE VIEW relations AS
|
||||
SELECT r -> 'relations' AS rels
|
||||
FROM telemetry_report;
|
||||
SELECT rels -> 'continuous_aggregates' -> 'num_relations' AS num_continuous_aggs,
|
||||
rels -> 'hypertables' -> 'num_relations' AS num_hypertables
|
||||
FROM relations;
|
||||
num_continuous_aggs | num_hypertables
|
||||
---------------------+-----------------
|
||||
0 | 0
|
||||
(1 row)
|
||||
|
||||
-- check telemetry picks up flagged content from metadata
|
||||
SELECT r -> 'db_metadata' AS db_metadata
|
||||
FROM telemetry_report;
|
||||
db_metadata
|
||||
-------------
|
||||
{}
|
||||
(1 row)
|
||||
|
||||
-- check timescaledb_telemetry.cloud
|
||||
SELECT r -> 'instance_metadata' AS instance_metadata
|
||||
FROM telemetry_report r;
|
||||
instance_metadata
|
||||
-------------------
|
||||
{"cloud": "ci"}
|
||||
(1 row)
|
||||
|
||||
CREATE TABLE normal (time timestamptz NOT NULL, device int, temp float);
|
||||
CREATE TABLE part (time timestamptz NOT NULL, device int, temp float) PARTITION BY RANGE (time);
|
||||
CREATE TABLE part_t1 PARTITION OF part FOR VALUES FROM ('2018-01-01') TO ('2018-02-01') PARTITION BY HASH (device);
|
||||
CREATE TABLE part_t2 PARTITION OF part FOR VALUES FROM ('2018-02-01') TO ('2018-03-01') PARTITION BY HASH (device);
|
||||
CREATE TABLE part_t1_d1 PARTITION OF part_t1 FOR VALUES WITH (MODULUS 2, REMAINDER 0);
|
||||
CREATE TABLE part_t1_d2 PARTITION OF part_t1 FOR VALUES WITH (MODULUS 2, REMAINDER 1);
|
||||
CREATE TABLE part_t2_d1 PARTITION OF part_t2 FOR VALUES WITH (MODULUS 2, REMAINDER 0);
|
||||
CREATE TABLE part_t2_d2 PARTITION OF part_t2 FOR VALUES WITH (MODULUS 2, REMAINDER 1);
|
||||
CREATE TABLE hyper (LIKE normal);
|
||||
SELECT table_name FROM create_hypertable('hyper', 'time');
|
||||
table_name
|
||||
------------
|
||||
hyper
|
||||
(1 row)
|
||||
|
||||
CREATE MATERIALIZED VIEW contagg
|
||||
WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS
|
||||
SELECT
|
||||
time_bucket('1 hour', time) AS hour,
|
||||
device,
|
||||
min(time)
|
||||
FROM
|
||||
hyper
|
||||
GROUP BY hour, device;
|
||||
NOTICE: continuous aggregate "contagg" is already up-to-date
|
||||
CREATE MATERIALIZED VIEW contagg_old
|
||||
WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized=false) AS
|
||||
SELECT
|
||||
time_bucket('1 hour', time) AS hour,
|
||||
device,
|
||||
min(time)
|
||||
FROM
|
||||
hyper
|
||||
GROUP BY hour, device;
|
||||
NOTICE: continuous aggregate "contagg_old" is already up-to-date
|
||||
-- Create another view (already have the "relations" view)
|
||||
CREATE VIEW devices AS
|
||||
SELECT DISTINCT ON (device) device
|
||||
FROM hyper;
|
||||
-- Show relations with no data
|
||||
REFRESH MATERIALIZED VIEW telemetry_report;
|
||||
SELECT jsonb_pretty(rels) AS relations FROM relations;
|
||||
relations
|
||||
----------------------------------------------------------
|
||||
{ +
|
||||
"views": { +
|
||||
"num_relations": 2 +
|
||||
}, +
|
||||
"tables": { +
|
||||
"heap_size": 0, +
|
||||
"toast_size": 8192, +
|
||||
"indexes_size": 0, +
|
||||
"num_relations": 2, +
|
||||
"num_reltuples": 0 +
|
||||
}, +
|
||||
"hypertables": { +
|
||||
"heap_size": 0, +
|
||||
"toast_size": 0, +
|
||||
"compression": { +
|
||||
"compressed_heap_size": 0, +
|
||||
"compressed_row_count": 0, +
|
||||
"compressed_toast_size": 0, +
|
||||
"num_compressed_chunks": 0, +
|
||||
"uncompressed_heap_size": 0, +
|
||||
"uncompressed_row_count": 0, +
|
||||
"compressed_indexes_size": 0, +
|
||||
"uncompressed_toast_size": 0, +
|
||||
"uncompressed_indexes_size": 0, +
|
||||
"num_compressed_hypertables": 0, +
|
||||
"compressed_row_count_frozen_immediately": 0+
|
||||
}, +
|
||||
"indexes_size": 8192, +
|
||||
"num_children": 0, +
|
||||
"num_relations": 1, +
|
||||
"num_reltuples": 0 +
|
||||
}, +
|
||||
"materialized_views": { +
|
||||
"toast_size": 8192, +
|
||||
"indexes_size": 0, +
|
||||
"num_relations": 1, +
|
||||
"num_reltuples": 0 +
|
||||
}, +
|
||||
"partitioned_tables": { +
|
||||
"heap_size": 0, +
|
||||
"toast_size": 0, +
|
||||
"indexes_size": 0, +
|
||||
"num_children": 6, +
|
||||
"num_relations": 1, +
|
||||
"num_reltuples": 0 +
|
||||
}, +
|
||||
"continuous_aggregates": { +
|
||||
"heap_size": 0, +
|
||||
"toast_size": 0, +
|
||||
"compression": { +
|
||||
"compressed_heap_size": 0, +
|
||||
"compressed_row_count": 0, +
|
||||
"num_compressed_caggs": 0, +
|
||||
"compressed_toast_size": 0, +
|
||||
"num_compressed_chunks": 0, +
|
||||
"uncompressed_heap_size": 0, +
|
||||
"uncompressed_row_count": 0, +
|
||||
"compressed_indexes_size": 0, +
|
||||
"uncompressed_toast_size": 0, +
|
||||
"uncompressed_indexes_size": 0, +
|
||||
"compressed_row_count_frozen_immediately": 0+
|
||||
}, +
|
||||
"indexes_size": 0, +
|
||||
"num_children": 0, +
|
||||
"num_relations": 2, +
|
||||
"num_reltuples": 0, +
|
||||
"num_caggs_nested": 0, +
|
||||
"num_caggs_finalized": 1, +
|
||||
"num_caggs_on_distributed_hypertables": 0, +
|
||||
"num_caggs_using_real_time_aggregation": 2 +
|
||||
}, +
|
||||
"distributed_hypertables_data_node": { +
|
||||
"heap_size": 0, +
|
||||
"toast_size": 0, +
|
||||
"compression": { +
|
||||
"compressed_heap_size": 0, +
|
||||
"compressed_row_count": 0, +
|
||||
"compressed_toast_size": 0, +
|
||||
"num_compressed_chunks": 0, +
|
||||
"uncompressed_heap_size": 0, +
|
||||
"uncompressed_row_count": 0, +
|
||||
"compressed_indexes_size": 0, +
|
||||
"uncompressed_toast_size": 0, +
|
||||
"uncompressed_indexes_size": 0, +
|
||||
"num_compressed_hypertables": 0, +
|
||||
"compressed_row_count_frozen_immediately": 0+
|
||||
}, +
|
||||
"indexes_size": 0, +
|
||||
"num_children": 0, +
|
||||
"num_relations": 0, +
|
||||
"num_reltuples": 0 +
|
||||
}, +
|
||||
"distributed_hypertables_access_node": { +
|
||||
"heap_size": 0, +
|
||||
"toast_size": 0, +
|
||||
"compression": { +
|
||||
"compressed_heap_size": 0, +
|
||||
"compressed_row_count": 0, +
|
||||
"compressed_toast_size": 0, +
|
||||
"num_compressed_chunks": 0, +
|
||||
"uncompressed_heap_size": 0, +
|
||||
"uncompressed_row_count": 0, +
|
||||
"compressed_indexes_size": 0, +
|
||||
"uncompressed_toast_size": 0, +
|
||||
"uncompressed_indexes_size": 0, +
|
||||
"num_compressed_hypertables": 0, +
|
||||
"compressed_row_count_frozen_immediately": 0+
|
||||
}, +
|
||||
"indexes_size": 0, +
|
||||
"num_children": 0, +
|
||||
"num_relations": 0, +
|
||||
"num_reltuples": 0, +
|
||||
"num_replica_chunks": 0, +
|
||||
"num_replicated_distributed_hypertables": 0 +
|
||||
} +
|
||||
}
|
||||
(1 row)
|
||||
|
||||
-- Insert data
|
||||
INSERT INTO normal
|
||||
SELECT t, ceil(random() * 10)::int, random() * 30
|
||||
FROM generate_series('2018-01-01'::timestamptz, '2018-02-28', '2h') t;
|
||||
INSERT INTO hyper
|
||||
SELECT * FROM normal;
|
||||
INSERT INTO part
|
||||
SELECT * FROM normal;
|
||||
CALL refresh_continuous_aggregate('contagg', NULL, NULL);
|
||||
CALL refresh_continuous_aggregate('contagg_old', NULL, NULL);
|
||||
-- ANALYZE to get updated reltuples stats
|
||||
ANALYZE normal, hyper, part;
|
||||
SELECT count(c) FROM show_chunks('hyper') c;
|
||||
count
|
||||
-------
|
||||
9
|
||||
(1 row)
|
||||
|
||||
SELECT count(c) FROM show_chunks('contagg') c;
|
||||
count
|
||||
-------
|
||||
2
|
||||
(1 row)
|
||||
|
||||
SELECT count(c) FROM show_chunks('contagg_old') c;
|
||||
count
|
||||
-------
|
||||
2
|
||||
(1 row)
|
||||
|
||||
-- Update and show the telemetry report
|
||||
REFRESH MATERIALIZED VIEW telemetry_report;
|
||||
SELECT jsonb_pretty(rels) AS relations FROM relations;
|
||||
relations
|
||||
----------------------------------------------------------
|
||||
{ +
|
||||
"views": { +
|
||||
"num_relations": 2 +
|
||||
}, +
|
||||
"tables": { +
|
||||
"heap_size": 65536, +
|
||||
"toast_size": 8192, +
|
||||
"indexes_size": 0, +
|
||||
"num_relations": 2, +
|
||||
"num_reltuples": 697 +
|
||||
}, +
|
||||
"hypertables": { +
|
||||
"heap_size": 73728, +
|
||||
"toast_size": 0, +
|
||||
"compression": { +
|
||||
"compressed_heap_size": 0, +
|
||||
"compressed_row_count": 0, +
|
||||
"compressed_toast_size": 0, +
|
||||
"num_compressed_chunks": 0, +
|
||||
"uncompressed_heap_size": 0, +
|
||||
"uncompressed_row_count": 0, +
|
||||
"compressed_indexes_size": 0, +
|
||||
"uncompressed_toast_size": 0, +
|
||||
"uncompressed_indexes_size": 0, +
|
||||
"num_compressed_hypertables": 0, +
|
||||
"compressed_row_count_frozen_immediately": 0+
|
||||
}, +
|
||||
"indexes_size": 155648, +
|
||||
"num_children": 9, +
|
||||
"num_relations": 1, +
|
||||
"num_reltuples": 697 +
|
||||
}, +
|
||||
"materialized_views": { +
|
||||
"toast_size": 8192, +
|
||||
"indexes_size": 0, +
|
||||
"num_relations": 1, +
|
||||
"num_reltuples": 0 +
|
||||
}, +
|
||||
"partitioned_tables": { +
|
||||
"heap_size": 98304, +
|
||||
"toast_size": 0, +
|
||||
"indexes_size": 0, +
|
||||
"num_children": 6, +
|
||||
"num_relations": 1, +
|
||||
"num_reltuples": 697 +
|
||||
}, +
|
||||
"continuous_aggregates": { +
|
||||
"heap_size": 188416, +
|
||||
"toast_size": 16384, +
|
||||
"compression": { +
|
||||
"compressed_heap_size": 0, +
|
||||
"compressed_row_count": 0, +
|
||||
"num_compressed_caggs": 0, +
|
||||
"compressed_toast_size": 0, +
|
||||
"num_compressed_chunks": 0, +
|
||||
"uncompressed_heap_size": 0, +
|
||||
"uncompressed_row_count": 0, +
|
||||
"compressed_indexes_size": 0, +
|
||||
"uncompressed_toast_size": 0, +
|
||||
"uncompressed_indexes_size": 0, +
|
||||
"compressed_row_count_frozen_immediately": 0+
|
||||
}, +
|
||||
"indexes_size": 229376, +
|
||||
"num_children": 4, +
|
||||
"num_relations": 2, +
|
||||
"num_reltuples": 0, +
|
||||
"num_caggs_nested": 0, +
|
||||
"num_caggs_finalized": 1, +
|
||||
"num_caggs_on_distributed_hypertables": 0, +
|
||||
"num_caggs_using_real_time_aggregation": 2 +
|
||||
}, +
|
||||
"distributed_hypertables_data_node": { +
|
||||
"heap_size": 0, +
|
||||
"toast_size": 0, +
|
||||
"compression": { +
|
||||
"compressed_heap_size": 0, +
|
||||
"compressed_row_count": 0, +
|
||||
"compressed_toast_size": 0, +
|
||||
"num_compressed_chunks": 0, +
|
||||
"uncompressed_heap_size": 0, +
|
||||
"uncompressed_row_count": 0, +
|
||||
"compressed_indexes_size": 0, +
|
||||
"uncompressed_toast_size": 0, +
|
||||
"uncompressed_indexes_size": 0, +
|
||||
"num_compressed_hypertables": 0, +
|
||||
"compressed_row_count_frozen_immediately": 0+
|
||||
}, +
|
||||
"indexes_size": 0, +
|
||||
"num_children": 0, +
|
||||
"num_relations": 0, +
|
||||
"num_reltuples": 0 +
|
||||
}, +
|
||||
"distributed_hypertables_access_node": { +
|
||||
"heap_size": 0, +
|
||||
"toast_size": 0, +
|
||||
"compression": { +
|
||||
"compressed_heap_size": 0, +
|
||||
"compressed_row_count": 0, +
|
||||
"compressed_toast_size": 0, +
|
||||
"num_compressed_chunks": 0, +
|
||||
"uncompressed_heap_size": 0, +
|
||||
"uncompressed_row_count": 0, +
|
||||
"compressed_indexes_size": 0, +
|
||||
"uncompressed_toast_size": 0, +
|
||||
"uncompressed_indexes_size": 0, +
|
||||
"num_compressed_hypertables": 0, +
|
||||
"compressed_row_count_frozen_immediately": 0+
|
||||
}, +
|
||||
"indexes_size": 0, +
|
||||
"num_children": 0, +
|
||||
"num_relations": 0, +
|
||||
"num_reltuples": 0, +
|
||||
"num_replica_chunks": 0, +
|
||||
"num_replicated_distributed_hypertables": 0 +
|
||||
} +
|
||||
}
|
||||
(1 row)
|
||||
|
||||
-- Actual row count should be the same as reltuples stats for all tables
|
||||
SELECT (SELECT count(*) FROM normal) num_inserted_rows,
|
||||
(SELECT rels -> 'tables' -> 'num_reltuples' FROM relations) normal_reltuples,
|
||||
(SELECT rels -> 'hypertables' -> 'num_reltuples' FROM relations) hyper_reltuples,
|
||||
(SELECT rels -> 'partitioned_tables' -> 'num_reltuples' FROM relations) part_reltuples;
|
||||
num_inserted_rows | normal_reltuples | hyper_reltuples | part_reltuples
|
||||
-------------------+------------------+-----------------+----------------
|
||||
697 | 697 | 697 | 697
|
||||
(1 row)
|
||||
|
||||
-- Add compression
|
||||
ALTER TABLE hyper SET (timescaledb.compress);
|
||||
SELECT compress_chunk(c)
|
||||
FROM show_chunks('hyper') c ORDER BY c LIMIT 4;
|
||||
compress_chunk
|
||||
----------------------------------------
|
||||
_timescaledb_internal._hyper_1_1_chunk
|
||||
_timescaledb_internal._hyper_1_2_chunk
|
||||
_timescaledb_internal._hyper_1_3_chunk
|
||||
_timescaledb_internal._hyper_1_4_chunk
|
||||
(4 rows)
|
||||
|
||||
ALTER MATERIALIZED VIEW contagg SET (timescaledb.compress);
|
||||
NOTICE: defaulting compress_segmentby to device
|
||||
NOTICE: defaulting compress_orderby to hour
|
||||
SELECT compress_chunk(c)
|
||||
FROM show_chunks('contagg') c ORDER BY c LIMIT 1;
|
||||
compress_chunk
|
||||
-----------------------------------------
|
||||
_timescaledb_internal._hyper_2_10_chunk
|
||||
(1 row)
|
||||
|
||||
-- Turn of real-time aggregation
|
||||
ALTER MATERIALIZED VIEW contagg SET (timescaledb.materialized_only = true);
|
||||
ANALYZE normal, hyper, part;
|
||||
REFRESH MATERIALIZED VIEW telemetry_report;
|
||||
SELECT jsonb_pretty(rels) AS relations FROM relations;
|
||||
relations
|
||||
-----------------------------------------------------------
|
||||
{ +
|
||||
"views": { +
|
||||
"num_relations": 2 +
|
||||
}, +
|
||||
"tables": { +
|
||||
"heap_size": 65536, +
|
||||
"toast_size": 8192, +
|
||||
"indexes_size": 0, +
|
||||
"num_relations": 2, +
|
||||
"num_reltuples": 697 +
|
||||
}, +
|
||||
"hypertables": { +
|
||||
"heap_size": 106496, +
|
||||
"toast_size": 32768, +
|
||||
"compression": { +
|
||||
"compressed_heap_size": 65536, +
|
||||
"compressed_row_count": 4, +
|
||||
"compressed_toast_size": 32768, +
|
||||
"num_compressed_chunks": 4, +
|
||||
"uncompressed_heap_size": 32768, +
|
||||
"uncompressed_row_count": 284, +
|
||||
"compressed_indexes_size": 0, +
|
||||
"uncompressed_toast_size": 0, +
|
||||
"uncompressed_indexes_size": 65536, +
|
||||
"num_compressed_hypertables": 1, +
|
||||
"compressed_row_count_frozen_immediately": 4 +
|
||||
}, +
|
||||
"indexes_size": 122880, +
|
||||
"num_children": 9, +
|
||||
"num_relations": 1, +
|
||||
"num_reltuples": 413 +
|
||||
}, +
|
||||
"materialized_views": { +
|
||||
"toast_size": 8192, +
|
||||
"indexes_size": 0, +
|
||||
"num_relations": 1, +
|
||||
"num_reltuples": 0 +
|
||||
}, +
|
||||
"partitioned_tables": { +
|
||||
"heap_size": 98304, +
|
||||
"toast_size": 0, +
|
||||
"indexes_size": 0, +
|
||||
"num_children": 6, +
|
||||
"num_relations": 1, +
|
||||
"num_reltuples": 697 +
|
||||
}, +
|
||||
"continuous_aggregates": { +
|
||||
"heap_size": 188416, +
|
||||
"toast_size": 24576, +
|
||||
"compression": { +
|
||||
"compressed_heap_size": 49152, +
|
||||
"compressed_row_count": 10, +
|
||||
"num_compressed_caggs": 1, +
|
||||
"compressed_toast_size": 8192, +
|
||||
"num_compressed_chunks": 1, +
|
||||
"uncompressed_heap_size": 49152, +
|
||||
"uncompressed_row_count": 452, +
|
||||
"compressed_indexes_size": 16384, +
|
||||
"uncompressed_toast_size": 0, +
|
||||
"uncompressed_indexes_size": 81920, +
|
||||
"compressed_row_count_frozen_immediately": 10+
|
||||
}, +
|
||||
"indexes_size": 180224, +
|
||||
"num_children": 4, +
|
||||
"num_relations": 2, +
|
||||
"num_reltuples": 0, +
|
||||
"num_caggs_nested": 0, +
|
||||
"num_caggs_finalized": 1, +
|
||||
"num_caggs_on_distributed_hypertables": 0, +
|
||||
"num_caggs_using_real_time_aggregation": 1 +
|
||||
}, +
|
||||
"distributed_hypertables_data_node": { +
|
||||
"heap_size": 0, +
|
||||
"toast_size": 0, +
|
||||
"compression": { +
|
||||
"compressed_heap_size": 0, +
|
||||
"compressed_row_count": 0, +
|
||||
"compressed_toast_size": 0, +
|
||||
"num_compressed_chunks": 0, +
|
||||
"uncompressed_heap_size": 0, +
|
||||
"uncompressed_row_count": 0, +
|
||||
"compressed_indexes_size": 0, +
|
||||
"uncompressed_toast_size": 0, +
|
||||
"uncompressed_indexes_size": 0, +
|
||||
"num_compressed_hypertables": 0, +
|
||||
"compressed_row_count_frozen_immediately": 0 +
|
||||
}, +
|
||||
"indexes_size": 0, +
|
||||
"num_children": 0, +
|
||||
"num_relations": 0, +
|
||||
"num_reltuples": 0 +
|
||||
}, +
|
||||
"distributed_hypertables_access_node": { +
|
||||
"heap_size": 0, +
|
||||
"toast_size": 0, +
|
||||
"compression": { +
|
||||
"compressed_heap_size": 0, +
|
||||
"compressed_row_count": 0, +
|
||||
"compressed_toast_size": 0, +
|
||||
"num_compressed_chunks": 0, +
|
||||
"uncompressed_heap_size": 0, +
|
||||
"uncompressed_row_count": 0, +
|
||||
"compressed_indexes_size": 0, +
|
||||
"uncompressed_toast_size": 0, +
|
||||
"uncompressed_indexes_size": 0, +
|
||||
"num_compressed_hypertables": 0, +
|
||||
"compressed_row_count_frozen_immediately": 0 +
|
||||
}, +
|
||||
"indexes_size": 0, +
|
||||
"num_children": 0, +
|
||||
"num_relations": 0, +
|
||||
"num_reltuples": 0, +
|
||||
"num_replica_chunks": 0, +
|
||||
"num_replicated_distributed_hypertables": 0 +
|
||||
} +
|
||||
}
|
||||
(1 row)
|
||||
|
||||
-- check telemetry for fixed schedule jobs works
|
||||
create or replace procedure job_test_fixed(jobid int, config jsonb) language plpgsql as $$
|
||||
begin
|
||||
raise log 'this is job_test_fixed';
|
||||
end
|
||||
$$;
|
||||
create or replace procedure job_test_drifting(jobid int, config jsonb) language plpgsql as $$
|
||||
begin
|
||||
raise log 'this is job_test_drifting';
|
||||
end
|
||||
$$;
|
||||
-- before adding the jobs
|
||||
select get_telemetry_report()->'num_user_defined_actions_fixed';
|
||||
?column?
|
||||
----------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
select get_telemetry_report()->'num_user_defined_actions';
|
||||
?column?
|
||||
----------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
select add_job('job_test_fixed', '1 week');
|
||||
add_job
|
||||
---------
|
||||
1000
|
||||
(1 row)
|
||||
|
||||
select add_job('job_test_drifting', '1 week', fixed_schedule => false);
|
||||
add_job
|
||||
---------
|
||||
1001
|
||||
(1 row)
|
||||
|
||||
-- add continuous aggregate refresh policy for contagg
|
||||
select add_continuous_aggregate_policy('contagg', interval '3 weeks', NULL, interval '3 weeks'); -- drifting
|
||||
add_continuous_aggregate_policy
|
||||
---------------------------------
|
||||
1002
|
||||
(1 row)
|
||||
|
||||
select add_continuous_aggregate_policy('contagg_old', interval '3 weeks', NULL, interval '3 weeks', initial_start => now()); -- fixed
|
||||
add_continuous_aggregate_policy
|
||||
---------------------------------
|
||||
1003
|
||||
(1 row)
|
||||
|
||||
-- add retention policy, fixed
|
||||
select add_retention_policy('hyper', interval '1 year', initial_start => now());
|
||||
add_retention_policy
|
||||
----------------------
|
||||
1004
|
||||
(1 row)
|
||||
|
||||
-- add compression policy
|
||||
select add_compression_policy('hyper', interval '3 weeks', initial_start => now());
|
||||
add_compression_policy
|
||||
------------------------
|
||||
1005
|
||||
(1 row)
|
||||
|
||||
select r->'num_user_defined_actions_fixed' as UDA_fixed, r->'num_user_defined_actions' AS UDA_drifting FROM get_telemetry_report() r;
|
||||
uda_fixed | uda_drifting
|
||||
-----------+--------------
|
||||
1 | 1
|
||||
(1 row)
|
||||
|
||||
select r->'num_continuous_aggs_policies_fixed' as contagg_fixed, r->'num_continuous_aggs_policies' as contagg_drifting FROM get_telemetry_report() r;
|
||||
contagg_fixed | contagg_drifting
|
||||
---------------+------------------
|
||||
1 | 1
|
||||
(1 row)
|
||||
|
||||
select r->'num_compression_policies_fixed' as compress_fixed, r->'num_retention_policies_fixed' as retention_fixed FROM get_telemetry_report() r;
|
||||
compress_fixed | retention_fixed
|
||||
----------------+-----------------
|
||||
1 | 1
|
||||
(1 row)
|
||||
|
||||
DELETE FROM _timescaledb_config.bgw_job WHERE id = 2;
|
||||
TRUNCATE _timescaledb_internal.job_errors;
|
||||
-- create some "errors" for testing
|
||||
INSERT INTO
|
||||
_timescaledb_config.bgw_job(id, application_name, schedule_interval, max_runtime, max_retries, retry_period, proc_schema, proc_name)
|
||||
VALUES (2000, 'User-Defined Action [2000]', interval '3 days', interval '1 hour', 5, interval '5 min', 'public', 'custom_action_1'),
|
||||
(2001, 'User-Defined Action [2001]', interval '3 days', interval '1 hour', 5, interval '5 min', 'public', 'custom_action_2'),
|
||||
(2002, 'Compression Policy [2002]', interval '3 days', interval '1 hour', 5, interval '5 min', '_timescaledb_functions', 'policy_compression'),
|
||||
(2003, 'Retention Policy [2003]', interval '3 days', interval '1 hour', 5, interval '5 min', '_timescaledb_functions', 'policy_retention'),
|
||||
(2004, 'Refresh Continuous Aggregate Policy [2004]', interval '3 days', interval '1 hour', 5, interval '5 min', '_timescaledb_functions', 'policy_refresh_continuous_aggregate'),
|
||||
-- user decided to define a custom action in the _timescaledb_functions schema, we group it with the User-defined actions
|
||||
(2005, 'User-Defined Action [2005]', interval '3 days', interval '1 hour', 5, interval '5 min', '_timescaledb_functions', 'policy_refresh_continuous_aggregate');
|
||||
-- create some errors for them
|
||||
INSERT INTO
|
||||
_timescaledb_internal.job_errors(job_id, pid, start_time, finish_time, error_data)
|
||||
values (2000, 12345, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '{"sqlerrcode":"P0001", "proc_schema":"public", "proc_name": "custom_action_1"}'),
|
||||
(2000, 23456, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '{"sqlerrcode":"ABCDE", "proc_schema": "public", "proc_name": "custom_action_1"}'),
|
||||
(2001, 54321, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '{"sqlerrcode":"P0001", "proc_schema":"public", "proc_name": "custom_action_2"}'),
|
||||
(2002, 23443, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '{"sqlerrcode":"JF009", "proc_schema":"_timescaledb_functions", "proc_name": "policy_compression"}'),
|
||||
(2003, 14567, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '{"sqlerrcode":"P0001", "proc_schema":"_timescaledb_functions", "proc_name": "policy_retention"}'),
|
||||
(2004, 78907, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '{"sqlerrcode":"P0001", "proc_schema":"_timescaledb_functions", "proc_name": "policy_refresh_continuous_aggregate"}'),
|
||||
(2005, 45757, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '{"sqlerrcode":"P0001", "proc_schema":"_timescaledb_functions", "proc_name": "policy_refresh_continuous_aggregate"}');
|
||||
-- we have 3 error records for user-defined actions, and three for policies, so we expect 4 types of jobs
|
||||
SELECT jsonb_pretty(get_telemetry_report() -> 'errors_by_sqlerrcode');
|
||||
jsonb_pretty
|
||||
----------------------------------------------
|
||||
{ +
|
||||
"policy_retention": { +
|
||||
"P0001": 1 +
|
||||
}, +
|
||||
"policy_compression": { +
|
||||
"JF009": 1 +
|
||||
}, +
|
||||
"user_defined_action": { +
|
||||
"ABCDE": 1, +
|
||||
"P0001": 2 +
|
||||
}, +
|
||||
"policy_refresh_continuous_aggregate": {+
|
||||
"P0001": 2 +
|
||||
} +
|
||||
}
|
||||
(1 row)
|
||||
|
||||
-- for job statistics, insert some records into bgw_job_stats
|
||||
INSERT INTO _timescaledb_internal.bgw_job_stat
|
||||
values
|
||||
(2000, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '-infinity'::timestamptz, '-infinity'::timestamptz,
|
||||
false, 1, interval '00:00:00', interval '00:00:02', 0, 1, 0, 1, 0),
|
||||
(2001, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '-infinity'::timestamptz, '-infinity'::timestamptz,
|
||||
false, 1, interval '00:00:00', interval '00:00:02', 0, 1, 0, 1, 0),
|
||||
(2002, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '-infinity'::timestamptz, '-infinity'::timestamptz,
|
||||
false, 1, interval '00:00:00', interval '00:00:02', 0, 1, 0, 1, 0),
|
||||
(2003, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '-infinity'::timestamptz, '-infinity'::timestamptz,
|
||||
false, 1, interval '00:00:00', interval '00:00:02', 0, 1, 0, 1, 0),
|
||||
(2004, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '-infinity'::timestamptz, '-infinity'::timestamptz,
|
||||
false, 1, interval '00:00:00', interval '00:00:02', 0, 1, 0, 1, 0),
|
||||
(2005, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '-infinity'::timestamptz, '-infinity'::timestamptz,
|
||||
false, 1, interval '00:00:00', interval '00:00:02', 0, 1, 0, 1, 0);
|
||||
SELECT jsonb_pretty(get_telemetry_report() -> 'stats_by_job_type');
|
||||
jsonb_pretty
|
||||
------------------------------------------------
|
||||
{ +
|
||||
"policy_retention": { +
|
||||
"total_runs": 1, +
|
||||
"total_crashes": 0, +
|
||||
"total_duration": "@ 0", +
|
||||
"total_failures": 1, +
|
||||
"total_successes": 0, +
|
||||
"max_consecutive_crashes": 0, +
|
||||
"total_duration_failures": "@ 2 secs",+
|
||||
"max_consecutive_failures": 1 +
|
||||
}, +
|
||||
"policy_compression": { +
|
||||
"total_runs": 1, +
|
||||
"total_crashes": 0, +
|
||||
"total_duration": "@ 0", +
|
||||
"total_failures": 1, +
|
||||
"total_successes": 0, +
|
||||
"max_consecutive_crashes": 0, +
|
||||
"total_duration_failures": "@ 2 secs",+
|
||||
"max_consecutive_failures": 1 +
|
||||
}, +
|
||||
"user_defined_action": { +
|
||||
"total_runs": 2, +
|
||||
"total_crashes": 0, +
|
||||
"total_duration": "@ 0", +
|
||||
"total_failures": 2, +
|
||||
"total_successes": 0, +
|
||||
"max_consecutive_crashes": 0, +
|
||||
"total_duration_failures": "@ 4 secs",+
|
||||
"max_consecutive_failures": 1 +
|
||||
}, +
|
||||
"policy_refresh_continuous_aggregate": { +
|
||||
"total_runs": 2, +
|
||||
"total_crashes": 0, +
|
||||
"total_duration": "@ 0", +
|
||||
"total_failures": 2, +
|
||||
"total_successes": 0, +
|
||||
"max_consecutive_crashes": 0, +
|
||||
"total_duration_failures": "@ 4 secs",+
|
||||
"max_consecutive_failures": 1 +
|
||||
} +
|
||||
}
|
||||
(1 row)
|
||||
|
||||
-- create nested continuous aggregates - copied from cagg_on_cagg_common
|
||||
CREATE TABLE conditions (
|
||||
time timestamptz NOT NULL,
|
||||
temperature int
|
||||
);
|
||||
SELECT create_hypertable('conditions', 'time');
|
||||
create_hypertable
|
||||
-------------------------
|
||||
(6,public,conditions,t)
|
||||
(1 row)
|
||||
|
||||
CREATE MATERIALIZED VIEW conditions_summary_hourly_1
|
||||
WITH (timescaledb.continuous, timescaledb.materialized_only=true) AS
|
||||
SELECT
|
||||
time_bucket('1 hour', "time") AS bucket,
|
||||
SUM(temperature) AS temperature
|
||||
FROM conditions
|
||||
GROUP BY 1
|
||||
WITH NO DATA;
|
||||
CREATE MATERIALIZED VIEW conditions_summary_daily_2
|
||||
WITH (timescaledb.continuous, timescaledb.materialized_only=true) AS
|
||||
SELECT
|
||||
time_bucket('1 day', "bucket") AS bucket,
|
||||
SUM(temperature) AS temperature
|
||||
FROM conditions_summary_hourly_1
|
||||
GROUP BY 1
|
||||
WITH NO DATA;
|
||||
CREATE MATERIALIZED VIEW conditions_summary_weekly_3
|
||||
WITH (timescaledb.continuous, timescaledb.materialized_only=true) AS
|
||||
SELECT
|
||||
time_bucket('1 week', "bucket") AS bucket,
|
||||
SUM(temperature) AS temperature
|
||||
FROM conditions_summary_daily_2
|
||||
GROUP BY 1
|
||||
WITH NO DATA;
|
||||
SELECT jsonb_pretty(get_telemetry_report() -> 'relations' -> 'continuous_aggregates' -> 'num_caggs_nested');
|
||||
jsonb_pretty
|
||||
--------------
|
||||
2
|
||||
(1 row)
|
||||
|
||||
DROP VIEW relations;
|
||||
DROP MATERIALIZED VIEW telemetry_report;
|
748
tsl/test/expected/telemetry_stats-16.out
Normal file
748
tsl/test/expected/telemetry_stats-16.out
Normal file
@ -0,0 +1,748 @@
|
||||
-- This file and its contents are licensed under the Timescale License.
|
||||
-- Please see the included NOTICE for copyright information and
|
||||
-- LICENSE-TIMESCALE for a copy of the license.
|
||||
--telemetry tests that require a community license
|
||||
\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER;
|
||||
-- function call info size is too variable for this test, so disable it
|
||||
SET timescaledb.telemetry_level='no_functions';
|
||||
SELECT setseed(1);
|
||||
setseed
|
||||
---------
|
||||
|
||||
(1 row)
|
||||
|
||||
-- Create a materialized view from the telemetry report so that we
|
||||
-- don't regenerate telemetry for every query. Filter heap_size for
|
||||
-- materialized views since PG14 reports a different heap size for
|
||||
-- them compared to earlier PG versions.
|
||||
CREATE MATERIALIZED VIEW telemetry_report AS
|
||||
SELECT (r #- '{relations,materialized_views,heap_size}') AS r
|
||||
FROM get_telemetry_report() r;
|
||||
CREATE VIEW relations AS
|
||||
SELECT r -> 'relations' AS rels
|
||||
FROM telemetry_report;
|
||||
SELECT rels -> 'continuous_aggregates' -> 'num_relations' AS num_continuous_aggs,
|
||||
rels -> 'hypertables' -> 'num_relations' AS num_hypertables
|
||||
FROM relations;
|
||||
num_continuous_aggs | num_hypertables
|
||||
---------------------+-----------------
|
||||
0 | 0
|
||||
(1 row)
|
||||
|
||||
-- check telemetry picks up flagged content from metadata
|
||||
SELECT r -> 'db_metadata' AS db_metadata
|
||||
FROM telemetry_report;
|
||||
db_metadata
|
||||
-------------
|
||||
{}
|
||||
(1 row)
|
||||
|
||||
-- check timescaledb_telemetry.cloud
|
||||
SELECT r -> 'instance_metadata' AS instance_metadata
|
||||
FROM telemetry_report r;
|
||||
instance_metadata
|
||||
-------------------
|
||||
{"cloud": "ci"}
|
||||
(1 row)
|
||||
|
||||
CREATE TABLE normal (time timestamptz NOT NULL, device int, temp float);
|
||||
CREATE TABLE part (time timestamptz NOT NULL, device int, temp float) PARTITION BY RANGE (time);
|
||||
CREATE TABLE part_t1 PARTITION OF part FOR VALUES FROM ('2018-01-01') TO ('2018-02-01') PARTITION BY HASH (device);
|
||||
CREATE TABLE part_t2 PARTITION OF part FOR VALUES FROM ('2018-02-01') TO ('2018-03-01') PARTITION BY HASH (device);
|
||||
CREATE TABLE part_t1_d1 PARTITION OF part_t1 FOR VALUES WITH (MODULUS 2, REMAINDER 0);
|
||||
CREATE TABLE part_t1_d2 PARTITION OF part_t1 FOR VALUES WITH (MODULUS 2, REMAINDER 1);
|
||||
CREATE TABLE part_t2_d1 PARTITION OF part_t2 FOR VALUES WITH (MODULUS 2, REMAINDER 0);
|
||||
CREATE TABLE part_t2_d2 PARTITION OF part_t2 FOR VALUES WITH (MODULUS 2, REMAINDER 1);
|
||||
CREATE TABLE hyper (LIKE normal);
|
||||
SELECT table_name FROM create_hypertable('hyper', 'time');
|
||||
table_name
|
||||
------------
|
||||
hyper
|
||||
(1 row)
|
||||
|
||||
CREATE MATERIALIZED VIEW contagg
|
||||
WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS
|
||||
SELECT
|
||||
time_bucket('1 hour', time) AS hour,
|
||||
device,
|
||||
min(time)
|
||||
FROM
|
||||
hyper
|
||||
GROUP BY hour, device;
|
||||
NOTICE: continuous aggregate "contagg" is already up-to-date
|
||||
CREATE MATERIALIZED VIEW contagg_old
|
||||
WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized=false) AS
|
||||
SELECT
|
||||
time_bucket('1 hour', time) AS hour,
|
||||
device,
|
||||
min(time)
|
||||
FROM
|
||||
hyper
|
||||
GROUP BY hour, device;
|
||||
NOTICE: continuous aggregate "contagg_old" is already up-to-date
|
||||
-- Create another view (already have the "relations" view)
|
||||
CREATE VIEW devices AS
|
||||
SELECT DISTINCT ON (device) device
|
||||
FROM hyper;
|
||||
-- Show relations with no data
|
||||
REFRESH MATERIALIZED VIEW telemetry_report;
|
||||
SELECT jsonb_pretty(rels) AS relations FROM relations;
|
||||
relations
|
||||
----------------------------------------------------------
|
||||
{ +
|
||||
"views": { +
|
||||
"num_relations": 2 +
|
||||
}, +
|
||||
"tables": { +
|
||||
"heap_size": 0, +
|
||||
"toast_size": 8192, +
|
||||
"indexes_size": 0, +
|
||||
"num_relations": 2, +
|
||||
"num_reltuples": 0 +
|
||||
}, +
|
||||
"hypertables": { +
|
||||
"heap_size": 0, +
|
||||
"toast_size": 0, +
|
||||
"compression": { +
|
||||
"compressed_heap_size": 0, +
|
||||
"compressed_row_count": 0, +
|
||||
"compressed_toast_size": 0, +
|
||||
"num_compressed_chunks": 0, +
|
||||
"uncompressed_heap_size": 0, +
|
||||
"uncompressed_row_count": 0, +
|
||||
"compressed_indexes_size": 0, +
|
||||
"uncompressed_toast_size": 0, +
|
||||
"uncompressed_indexes_size": 0, +
|
||||
"num_compressed_hypertables": 0, +
|
||||
"compressed_row_count_frozen_immediately": 0+
|
||||
}, +
|
||||
"indexes_size": 8192, +
|
||||
"num_children": 0, +
|
||||
"num_relations": 1, +
|
||||
"num_reltuples": 0 +
|
||||
}, +
|
||||
"materialized_views": { +
|
||||
"toast_size": 8192, +
|
||||
"indexes_size": 0, +
|
||||
"num_relations": 1, +
|
||||
"num_reltuples": 0 +
|
||||
}, +
|
||||
"partitioned_tables": { +
|
||||
"heap_size": 0, +
|
||||
"toast_size": 0, +
|
||||
"indexes_size": 0, +
|
||||
"num_children": 6, +
|
||||
"num_relations": 1, +
|
||||
"num_reltuples": 0 +
|
||||
}, +
|
||||
"continuous_aggregates": { +
|
||||
"heap_size": 0, +
|
||||
"toast_size": 0, +
|
||||
"compression": { +
|
||||
"compressed_heap_size": 0, +
|
||||
"compressed_row_count": 0, +
|
||||
"num_compressed_caggs": 0, +
|
||||
"compressed_toast_size": 0, +
|
||||
"num_compressed_chunks": 0, +
|
||||
"uncompressed_heap_size": 0, +
|
||||
"uncompressed_row_count": 0, +
|
||||
"compressed_indexes_size": 0, +
|
||||
"uncompressed_toast_size": 0, +
|
||||
"uncompressed_indexes_size": 0, +
|
||||
"compressed_row_count_frozen_immediately": 0+
|
||||
}, +
|
||||
"indexes_size": 0, +
|
||||
"num_children": 0, +
|
||||
"num_relations": 2, +
|
||||
"num_reltuples": 0, +
|
||||
"num_caggs_nested": 0, +
|
||||
"num_caggs_finalized": 1, +
|
||||
"num_caggs_on_distributed_hypertables": 0, +
|
||||
"num_caggs_using_real_time_aggregation": 2 +
|
||||
}, +
|
||||
"distributed_hypertables_data_node": { +
|
||||
"heap_size": 0, +
|
||||
"toast_size": 0, +
|
||||
"compression": { +
|
||||
"compressed_heap_size": 0, +
|
||||
"compressed_row_count": 0, +
|
||||
"compressed_toast_size": 0, +
|
||||
"num_compressed_chunks": 0, +
|
||||
"uncompressed_heap_size": 0, +
|
||||
"uncompressed_row_count": 0, +
|
||||
"compressed_indexes_size": 0, +
|
||||
"uncompressed_toast_size": 0, +
|
||||
"uncompressed_indexes_size": 0, +
|
||||
"num_compressed_hypertables": 0, +
|
||||
"compressed_row_count_frozen_immediately": 0+
|
||||
}, +
|
||||
"indexes_size": 0, +
|
||||
"num_children": 0, +
|
||||
"num_relations": 0, +
|
||||
"num_reltuples": 0 +
|
||||
}, +
|
||||
"distributed_hypertables_access_node": { +
|
||||
"heap_size": 0, +
|
||||
"toast_size": 0, +
|
||||
"compression": { +
|
||||
"compressed_heap_size": 0, +
|
||||
"compressed_row_count": 0, +
|
||||
"compressed_toast_size": 0, +
|
||||
"num_compressed_chunks": 0, +
|
||||
"uncompressed_heap_size": 0, +
|
||||
"uncompressed_row_count": 0, +
|
||||
"compressed_indexes_size": 0, +
|
||||
"uncompressed_toast_size": 0, +
|
||||
"uncompressed_indexes_size": 0, +
|
||||
"num_compressed_hypertables": 0, +
|
||||
"compressed_row_count_frozen_immediately": 0+
|
||||
}, +
|
||||
"indexes_size": 0, +
|
||||
"num_children": 0, +
|
||||
"num_relations": 0, +
|
||||
"num_reltuples": 0, +
|
||||
"num_replica_chunks": 0, +
|
||||
"num_replicated_distributed_hypertables": 0 +
|
||||
} +
|
||||
}
|
||||
(1 row)
|
||||
|
||||
-- Insert data
|
||||
INSERT INTO normal
|
||||
SELECT t, ceil(random() * 10)::int, random() * 30
|
||||
FROM generate_series('2018-01-01'::timestamptz, '2018-02-28', '2h') t;
|
||||
INSERT INTO hyper
|
||||
SELECT * FROM normal;
|
||||
INSERT INTO part
|
||||
SELECT * FROM normal;
|
||||
CALL refresh_continuous_aggregate('contagg', NULL, NULL);
|
||||
CALL refresh_continuous_aggregate('contagg_old', NULL, NULL);
|
||||
-- ANALYZE to get updated reltuples stats
|
||||
ANALYZE normal, hyper, part;
|
||||
SELECT count(c) FROM show_chunks('hyper') c;
|
||||
count
|
||||
-------
|
||||
9
|
||||
(1 row)
|
||||
|
||||
SELECT count(c) FROM show_chunks('contagg') c;
|
||||
count
|
||||
-------
|
||||
2
|
||||
(1 row)
|
||||
|
||||
SELECT count(c) FROM show_chunks('contagg_old') c;
|
||||
count
|
||||
-------
|
||||
2
|
||||
(1 row)
|
||||
|
||||
-- Update and show the telemetry report
|
||||
REFRESH MATERIALIZED VIEW telemetry_report;
|
||||
SELECT jsonb_pretty(rels) AS relations FROM relations;
|
||||
relations
|
||||
----------------------------------------------------------
|
||||
{ +
|
||||
"views": { +
|
||||
"num_relations": 2 +
|
||||
}, +
|
||||
"tables": { +
|
||||
"heap_size": 65536, +
|
||||
"toast_size": 8192, +
|
||||
"indexes_size": 0, +
|
||||
"num_relations": 2, +
|
||||
"num_reltuples": 697 +
|
||||
}, +
|
||||
"hypertables": { +
|
||||
"heap_size": 73728, +
|
||||
"toast_size": 0, +
|
||||
"compression": { +
|
||||
"compressed_heap_size": 0, +
|
||||
"compressed_row_count": 0, +
|
||||
"compressed_toast_size": 0, +
|
||||
"num_compressed_chunks": 0, +
|
||||
"uncompressed_heap_size": 0, +
|
||||
"uncompressed_row_count": 0, +
|
||||
"compressed_indexes_size": 0, +
|
||||
"uncompressed_toast_size": 0, +
|
||||
"uncompressed_indexes_size": 0, +
|
||||
"num_compressed_hypertables": 0, +
|
||||
"compressed_row_count_frozen_immediately": 0+
|
||||
}, +
|
||||
"indexes_size": 155648, +
|
||||
"num_children": 9, +
|
||||
"num_relations": 1, +
|
||||
"num_reltuples": 697 +
|
||||
}, +
|
||||
"materialized_views": { +
|
||||
"toast_size": 8192, +
|
||||
"indexes_size": 0, +
|
||||
"num_relations": 1, +
|
||||
"num_reltuples": 0 +
|
||||
}, +
|
||||
"partitioned_tables": { +
|
||||
"heap_size": 98304, +
|
||||
"toast_size": 0, +
|
||||
"indexes_size": 0, +
|
||||
"num_children": 6, +
|
||||
"num_relations": 1, +
|
||||
"num_reltuples": 697 +
|
||||
}, +
|
||||
"continuous_aggregates": { +
|
||||
"heap_size": 188416, +
|
||||
"toast_size": 16384, +
|
||||
"compression": { +
|
||||
"compressed_heap_size": 0, +
|
||||
"compressed_row_count": 0, +
|
||||
"num_compressed_caggs": 0, +
|
||||
"compressed_toast_size": 0, +
|
||||
"num_compressed_chunks": 0, +
|
||||
"uncompressed_heap_size": 0, +
|
||||
"uncompressed_row_count": 0, +
|
||||
"compressed_indexes_size": 0, +
|
||||
"uncompressed_toast_size": 0, +
|
||||
"uncompressed_indexes_size": 0, +
|
||||
"compressed_row_count_frozen_immediately": 0+
|
||||
}, +
|
||||
"indexes_size": 229376, +
|
||||
"num_children": 4, +
|
||||
"num_relations": 2, +
|
||||
"num_reltuples": 0, +
|
||||
"num_caggs_nested": 0, +
|
||||
"num_caggs_finalized": 1, +
|
||||
"num_caggs_on_distributed_hypertables": 0, +
|
||||
"num_caggs_using_real_time_aggregation": 2 +
|
||||
}, +
|
||||
"distributed_hypertables_data_node": { +
|
||||
"heap_size": 0, +
|
||||
"toast_size": 0, +
|
||||
"compression": { +
|
||||
"compressed_heap_size": 0, +
|
||||
"compressed_row_count": 0, +
|
||||
"compressed_toast_size": 0, +
|
||||
"num_compressed_chunks": 0, +
|
||||
"uncompressed_heap_size": 0, +
|
||||
"uncompressed_row_count": 0, +
|
||||
"compressed_indexes_size": 0, +
|
||||
"uncompressed_toast_size": 0, +
|
||||
"uncompressed_indexes_size": 0, +
|
||||
"num_compressed_hypertables": 0, +
|
||||
"compressed_row_count_frozen_immediately": 0+
|
||||
}, +
|
||||
"indexes_size": 0, +
|
||||
"num_children": 0, +
|
||||
"num_relations": 0, +
|
||||
"num_reltuples": 0 +
|
||||
}, +
|
||||
"distributed_hypertables_access_node": { +
|
||||
"heap_size": 0, +
|
||||
"toast_size": 0, +
|
||||
"compression": { +
|
||||
"compressed_heap_size": 0, +
|
||||
"compressed_row_count": 0, +
|
||||
"compressed_toast_size": 0, +
|
||||
"num_compressed_chunks": 0, +
|
||||
"uncompressed_heap_size": 0, +
|
||||
"uncompressed_row_count": 0, +
|
||||
"compressed_indexes_size": 0, +
|
||||
"uncompressed_toast_size": 0, +
|
||||
"uncompressed_indexes_size": 0, +
|
||||
"num_compressed_hypertables": 0, +
|
||||
"compressed_row_count_frozen_immediately": 0+
|
||||
}, +
|
||||
"indexes_size": 0, +
|
||||
"num_children": 0, +
|
||||
"num_relations": 0, +
|
||||
"num_reltuples": 0, +
|
||||
"num_replica_chunks": 0, +
|
||||
"num_replicated_distributed_hypertables": 0 +
|
||||
} +
|
||||
}
|
||||
(1 row)
|
||||
|
||||
-- Actual row count should be the same as reltuples stats for all tables
|
||||
SELECT (SELECT count(*) FROM normal) num_inserted_rows,
|
||||
(SELECT rels -> 'tables' -> 'num_reltuples' FROM relations) normal_reltuples,
|
||||
(SELECT rels -> 'hypertables' -> 'num_reltuples' FROM relations) hyper_reltuples,
|
||||
(SELECT rels -> 'partitioned_tables' -> 'num_reltuples' FROM relations) part_reltuples;
|
||||
num_inserted_rows | normal_reltuples | hyper_reltuples | part_reltuples
|
||||
-------------------+------------------+-----------------+----------------
|
||||
697 | 697 | 697 | 697
|
||||
(1 row)
|
||||
|
||||
-- Add compression
|
||||
ALTER TABLE hyper SET (timescaledb.compress);
|
||||
SELECT compress_chunk(c)
|
||||
FROM show_chunks('hyper') c ORDER BY c LIMIT 4;
|
||||
compress_chunk
|
||||
----------------------------------------
|
||||
_timescaledb_internal._hyper_1_1_chunk
|
||||
_timescaledb_internal._hyper_1_2_chunk
|
||||
_timescaledb_internal._hyper_1_3_chunk
|
||||
_timescaledb_internal._hyper_1_4_chunk
|
||||
(4 rows)
|
||||
|
||||
ALTER MATERIALIZED VIEW contagg SET (timescaledb.compress);
|
||||
NOTICE: defaulting compress_segmentby to device
|
||||
NOTICE: defaulting compress_orderby to hour
|
||||
SELECT compress_chunk(c)
|
||||
FROM show_chunks('contagg') c ORDER BY c LIMIT 1;
|
||||
compress_chunk
|
||||
-----------------------------------------
|
||||
_timescaledb_internal._hyper_2_10_chunk
|
||||
(1 row)
|
||||
|
||||
-- Turn of real-time aggregation
|
||||
ALTER MATERIALIZED VIEW contagg SET (timescaledb.materialized_only = true);
|
||||
ANALYZE normal, hyper, part;
|
||||
REFRESH MATERIALIZED VIEW telemetry_report;
|
||||
SELECT jsonb_pretty(rels) AS relations FROM relations;
|
||||
relations
|
||||
-----------------------------------------------------------
|
||||
{ +
|
||||
"views": { +
|
||||
"num_relations": 2 +
|
||||
}, +
|
||||
"tables": { +
|
||||
"heap_size": 65536, +
|
||||
"toast_size": 8192, +
|
||||
"indexes_size": 0, +
|
||||
"num_relations": 2, +
|
||||
"num_reltuples": 697 +
|
||||
}, +
|
||||
"hypertables": { +
|
||||
"heap_size": 106496, +
|
||||
"toast_size": 32768, +
|
||||
"compression": { +
|
||||
"compressed_heap_size": 65536, +
|
||||
"compressed_row_count": 4, +
|
||||
"compressed_toast_size": 32768, +
|
||||
"num_compressed_chunks": 4, +
|
||||
"uncompressed_heap_size": 32768, +
|
||||
"uncompressed_row_count": 284, +
|
||||
"compressed_indexes_size": 0, +
|
||||
"uncompressed_toast_size": 0, +
|
||||
"uncompressed_indexes_size": 65536, +
|
||||
"num_compressed_hypertables": 1, +
|
||||
"compressed_row_count_frozen_immediately": 4 +
|
||||
}, +
|
||||
"indexes_size": 122880, +
|
||||
"num_children": 9, +
|
||||
"num_relations": 1, +
|
||||
"num_reltuples": 413 +
|
||||
}, +
|
||||
"materialized_views": { +
|
||||
"toast_size": 8192, +
|
||||
"indexes_size": 0, +
|
||||
"num_relations": 1, +
|
||||
"num_reltuples": 0 +
|
||||
}, +
|
||||
"partitioned_tables": { +
|
||||
"heap_size": 98304, +
|
||||
"toast_size": 0, +
|
||||
"indexes_size": 0, +
|
||||
"num_children": 6, +
|
||||
"num_relations": 1, +
|
||||
"num_reltuples": 697 +
|
||||
}, +
|
||||
"continuous_aggregates": { +
|
||||
"heap_size": 188416, +
|
||||
"toast_size": 24576, +
|
||||
"compression": { +
|
||||
"compressed_heap_size": 49152, +
|
||||
"compressed_row_count": 10, +
|
||||
"num_compressed_caggs": 1, +
|
||||
"compressed_toast_size": 8192, +
|
||||
"num_compressed_chunks": 1, +
|
||||
"uncompressed_heap_size": 49152, +
|
||||
"uncompressed_row_count": 452, +
|
||||
"compressed_indexes_size": 16384, +
|
||||
"uncompressed_toast_size": 0, +
|
||||
"uncompressed_indexes_size": 81920, +
|
||||
"compressed_row_count_frozen_immediately": 10+
|
||||
}, +
|
||||
"indexes_size": 180224, +
|
||||
"num_children": 4, +
|
||||
"num_relations": 2, +
|
||||
"num_reltuples": 0, +
|
||||
"num_caggs_nested": 0, +
|
||||
"num_caggs_finalized": 1, +
|
||||
"num_caggs_on_distributed_hypertables": 0, +
|
||||
"num_caggs_using_real_time_aggregation": 1 +
|
||||
}, +
|
||||
"distributed_hypertables_data_node": { +
|
||||
"heap_size": 0, +
|
||||
"toast_size": 0, +
|
||||
"compression": { +
|
||||
"compressed_heap_size": 0, +
|
||||
"compressed_row_count": 0, +
|
||||
"compressed_toast_size": 0, +
|
||||
"num_compressed_chunks": 0, +
|
||||
"uncompressed_heap_size": 0, +
|
||||
"uncompressed_row_count": 0, +
|
||||
"compressed_indexes_size": 0, +
|
||||
"uncompressed_toast_size": 0, +
|
||||
"uncompressed_indexes_size": 0, +
|
||||
"num_compressed_hypertables": 0, +
|
||||
"compressed_row_count_frozen_immediately": 0 +
|
||||
}, +
|
||||
"indexes_size": 0, +
|
||||
"num_children": 0, +
|
||||
"num_relations": 0, +
|
||||
"num_reltuples": 0 +
|
||||
}, +
|
||||
"distributed_hypertables_access_node": { +
|
||||
"heap_size": 0, +
|
||||
"toast_size": 0, +
|
||||
"compression": { +
|
||||
"compressed_heap_size": 0, +
|
||||
"compressed_row_count": 0, +
|
||||
"compressed_toast_size": 0, +
|
||||
"num_compressed_chunks": 0, +
|
||||
"uncompressed_heap_size": 0, +
|
||||
"uncompressed_row_count": 0, +
|
||||
"compressed_indexes_size": 0, +
|
||||
"uncompressed_toast_size": 0, +
|
||||
"uncompressed_indexes_size": 0, +
|
||||
"num_compressed_hypertables": 0, +
|
||||
"compressed_row_count_frozen_immediately": 0 +
|
||||
}, +
|
||||
"indexes_size": 0, +
|
||||
"num_children": 0, +
|
||||
"num_relations": 0, +
|
||||
"num_reltuples": 0, +
|
||||
"num_replica_chunks": 0, +
|
||||
"num_replicated_distributed_hypertables": 0 +
|
||||
} +
|
||||
}
|
||||
(1 row)
|
||||
|
||||
-- check telemetry for fixed schedule jobs works
|
||||
create or replace procedure job_test_fixed(jobid int, config jsonb) language plpgsql as $$
|
||||
begin
|
||||
raise log 'this is job_test_fixed';
|
||||
end
|
||||
$$;
|
||||
create or replace procedure job_test_drifting(jobid int, config jsonb) language plpgsql as $$
|
||||
begin
|
||||
raise log 'this is job_test_drifting';
|
||||
end
|
||||
$$;
|
||||
-- before adding the jobs
|
||||
select get_telemetry_report()->'num_user_defined_actions_fixed';
|
||||
?column?
|
||||
----------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
select get_telemetry_report()->'num_user_defined_actions';
|
||||
?column?
|
||||
----------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
select add_job('job_test_fixed', '1 week');
|
||||
add_job
|
||||
---------
|
||||
1000
|
||||
(1 row)
|
||||
|
||||
select add_job('job_test_drifting', '1 week', fixed_schedule => false);
|
||||
add_job
|
||||
---------
|
||||
1001
|
||||
(1 row)
|
||||
|
||||
-- add continuous aggregate refresh policy for contagg
|
||||
select add_continuous_aggregate_policy('contagg', interval '3 weeks', NULL, interval '3 weeks'); -- drifting
|
||||
add_continuous_aggregate_policy
|
||||
---------------------------------
|
||||
1002
|
||||
(1 row)
|
||||
|
||||
select add_continuous_aggregate_policy('contagg_old', interval '3 weeks', NULL, interval '3 weeks', initial_start => now()); -- fixed
|
||||
add_continuous_aggregate_policy
|
||||
---------------------------------
|
||||
1003
|
||||
(1 row)
|
||||
|
||||
-- add retention policy, fixed
|
||||
select add_retention_policy('hyper', interval '1 year', initial_start => now());
|
||||
add_retention_policy
|
||||
----------------------
|
||||
1004
|
||||
(1 row)
|
||||
|
||||
-- add compression policy
|
||||
select add_compression_policy('hyper', interval '3 weeks', initial_start => now());
|
||||
add_compression_policy
|
||||
------------------------
|
||||
1005
|
||||
(1 row)
|
||||
|
||||
select r->'num_user_defined_actions_fixed' as UDA_fixed, r->'num_user_defined_actions' AS UDA_drifting FROM get_telemetry_report() r;
|
||||
uda_fixed | uda_drifting
|
||||
-----------+--------------
|
||||
1 | 1
|
||||
(1 row)
|
||||
|
||||
select r->'num_continuous_aggs_policies_fixed' as contagg_fixed, r->'num_continuous_aggs_policies' as contagg_drifting FROM get_telemetry_report() r;
|
||||
contagg_fixed | contagg_drifting
|
||||
---------------+------------------
|
||||
1 | 1
|
||||
(1 row)
|
||||
|
||||
select r->'num_compression_policies_fixed' as compress_fixed, r->'num_retention_policies_fixed' as retention_fixed FROM get_telemetry_report() r;
|
||||
compress_fixed | retention_fixed
|
||||
----------------+-----------------
|
||||
1 | 1
|
||||
(1 row)
|
||||
|
||||
DELETE FROM _timescaledb_config.bgw_job WHERE id = 2;
|
||||
TRUNCATE _timescaledb_internal.job_errors;
|
||||
-- create some "errors" for testing
|
||||
INSERT INTO
|
||||
_timescaledb_config.bgw_job(id, application_name, schedule_interval, max_runtime, max_retries, retry_period, proc_schema, proc_name)
|
||||
VALUES (2000, 'User-Defined Action [2000]', interval '3 days', interval '1 hour', 5, interval '5 min', 'public', 'custom_action_1'),
|
||||
(2001, 'User-Defined Action [2001]', interval '3 days', interval '1 hour', 5, interval '5 min', 'public', 'custom_action_2'),
|
||||
(2002, 'Compression Policy [2002]', interval '3 days', interval '1 hour', 5, interval '5 min', '_timescaledb_functions', 'policy_compression'),
|
||||
(2003, 'Retention Policy [2003]', interval '3 days', interval '1 hour', 5, interval '5 min', '_timescaledb_functions', 'policy_retention'),
|
||||
(2004, 'Refresh Continuous Aggregate Policy [2004]', interval '3 days', interval '1 hour', 5, interval '5 min', '_timescaledb_functions', 'policy_refresh_continuous_aggregate'),
|
||||
-- user decided to define a custom action in the _timescaledb_functions schema, we group it with the User-defined actions
|
||||
(2005, 'User-Defined Action [2005]', interval '3 days', interval '1 hour', 5, interval '5 min', '_timescaledb_functions', 'policy_refresh_continuous_aggregate');
|
||||
-- create some errors for them
|
||||
INSERT INTO
|
||||
_timescaledb_internal.job_errors(job_id, pid, start_time, finish_time, error_data)
|
||||
values (2000, 12345, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '{"sqlerrcode":"P0001", "proc_schema":"public", "proc_name": "custom_action_1"}'),
|
||||
(2000, 23456, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '{"sqlerrcode":"ABCDE", "proc_schema": "public", "proc_name": "custom_action_1"}'),
|
||||
(2001, 54321, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '{"sqlerrcode":"P0001", "proc_schema":"public", "proc_name": "custom_action_2"}'),
|
||||
(2002, 23443, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '{"sqlerrcode":"JF009", "proc_schema":"_timescaledb_functions", "proc_name": "policy_compression"}'),
|
||||
(2003, 14567, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '{"sqlerrcode":"P0001", "proc_schema":"_timescaledb_functions", "proc_name": "policy_retention"}'),
|
||||
(2004, 78907, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '{"sqlerrcode":"P0001", "proc_schema":"_timescaledb_functions", "proc_name": "policy_refresh_continuous_aggregate"}'),
|
||||
(2005, 45757, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '{"sqlerrcode":"P0001", "proc_schema":"_timescaledb_functions", "proc_name": "policy_refresh_continuous_aggregate"}');
|
||||
-- we have 3 error records for user-defined actions, and three for policies, so we expect 4 types of jobs
|
||||
SELECT jsonb_pretty(get_telemetry_report() -> 'errors_by_sqlerrcode');
|
||||
jsonb_pretty
|
||||
----------------------------------------------
|
||||
{ +
|
||||
"policy_retention": { +
|
||||
"P0001": 1 +
|
||||
}, +
|
||||
"policy_compression": { +
|
||||
"JF009": 1 +
|
||||
}, +
|
||||
"user_defined_action": { +
|
||||
"ABCDE": 1, +
|
||||
"P0001": 2 +
|
||||
}, +
|
||||
"policy_refresh_continuous_aggregate": {+
|
||||
"P0001": 2 +
|
||||
} +
|
||||
}
|
||||
(1 row)
|
||||
|
||||
-- for job statistics, insert some records into bgw_job_stats
|
||||
INSERT INTO _timescaledb_internal.bgw_job_stat
|
||||
values
|
||||
(2000, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '-infinity'::timestamptz, '-infinity'::timestamptz,
|
||||
false, 1, interval '00:00:00', interval '00:00:02', 0, 1, 0, 1, 0),
|
||||
(2001, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '-infinity'::timestamptz, '-infinity'::timestamptz,
|
||||
false, 1, interval '00:00:00', interval '00:00:02', 0, 1, 0, 1, 0),
|
||||
(2002, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '-infinity'::timestamptz, '-infinity'::timestamptz,
|
||||
false, 1, interval '00:00:00', interval '00:00:02', 0, 1, 0, 1, 0),
|
||||
(2003, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '-infinity'::timestamptz, '-infinity'::timestamptz,
|
||||
false, 1, interval '00:00:00', interval '00:00:02', 0, 1, 0, 1, 0),
|
||||
(2004, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '-infinity'::timestamptz, '-infinity'::timestamptz,
|
||||
false, 1, interval '00:00:00', interval '00:00:02', 0, 1, 0, 1, 0),
|
||||
(2005, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '-infinity'::timestamptz, '-infinity'::timestamptz,
|
||||
false, 1, interval '00:00:00', interval '00:00:02', 0, 1, 0, 1, 0);
|
||||
SELECT jsonb_pretty(get_telemetry_report() -> 'stats_by_job_type');
|
||||
jsonb_pretty
|
||||
------------------------------------------------
|
||||
{ +
|
||||
"policy_retention": { +
|
||||
"total_runs": 1, +
|
||||
"total_crashes": 0, +
|
||||
"total_duration": "@ 0", +
|
||||
"total_failures": 1, +
|
||||
"total_successes": 0, +
|
||||
"max_consecutive_crashes": 0, +
|
||||
"total_duration_failures": "@ 2 secs",+
|
||||
"max_consecutive_failures": 1 +
|
||||
}, +
|
||||
"policy_compression": { +
|
||||
"total_runs": 1, +
|
||||
"total_crashes": 0, +
|
||||
"total_duration": "@ 0", +
|
||||
"total_failures": 1, +
|
||||
"total_successes": 0, +
|
||||
"max_consecutive_crashes": 0, +
|
||||
"total_duration_failures": "@ 2 secs",+
|
||||
"max_consecutive_failures": 1 +
|
||||
}, +
|
||||
"user_defined_action": { +
|
||||
"total_runs": 2, +
|
||||
"total_crashes": 0, +
|
||||
"total_duration": "@ 0", +
|
||||
"total_failures": 2, +
|
||||
"total_successes": 0, +
|
||||
"max_consecutive_crashes": 0, +
|
||||
"total_duration_failures": "@ 4 secs",+
|
||||
"max_consecutive_failures": 1 +
|
||||
}, +
|
||||
"policy_refresh_continuous_aggregate": { +
|
||||
"total_runs": 2, +
|
||||
"total_crashes": 0, +
|
||||
"total_duration": "@ 0", +
|
||||
"total_failures": 2, +
|
||||
"total_successes": 0, +
|
||||
"max_consecutive_crashes": 0, +
|
||||
"total_duration_failures": "@ 4 secs",+
|
||||
"max_consecutive_failures": 1 +
|
||||
} +
|
||||
}
|
||||
(1 row)
|
||||
|
||||
-- create nested continuous aggregates - copied from cagg_on_cagg_common
|
||||
CREATE TABLE conditions (
|
||||
time timestamptz NOT NULL,
|
||||
temperature int
|
||||
);
|
||||
SELECT create_hypertable('conditions', 'time');
|
||||
create_hypertable
|
||||
-------------------------
|
||||
(6,public,conditions,t)
|
||||
(1 row)
|
||||
|
||||
CREATE MATERIALIZED VIEW conditions_summary_hourly_1
|
||||
WITH (timescaledb.continuous, timescaledb.materialized_only=true) AS
|
||||
SELECT
|
||||
time_bucket('1 hour', "time") AS bucket,
|
||||
SUM(temperature) AS temperature
|
||||
FROM conditions
|
||||
GROUP BY 1
|
||||
WITH NO DATA;
|
||||
CREATE MATERIALIZED VIEW conditions_summary_daily_2
|
||||
WITH (timescaledb.continuous, timescaledb.materialized_only=true) AS
|
||||
SELECT
|
||||
time_bucket('1 day', "bucket") AS bucket,
|
||||
SUM(temperature) AS temperature
|
||||
FROM conditions_summary_hourly_1
|
||||
GROUP BY 1
|
||||
WITH NO DATA;
|
||||
CREATE MATERIALIZED VIEW conditions_summary_weekly_3
|
||||
WITH (timescaledb.continuous, timescaledb.materialized_only=true) AS
|
||||
SELECT
|
||||
time_bucket('1 week', "bucket") AS bucket,
|
||||
SUM(temperature) AS temperature
|
||||
FROM conditions_summary_daily_2
|
||||
GROUP BY 1
|
||||
WITH NO DATA;
|
||||
SELECT jsonb_pretty(get_telemetry_report() -> 'relations' -> 'continuous_aggregates' -> 'num_caggs_nested');
|
||||
jsonb_pretty
|
||||
--------------
|
||||
2
|
||||
(1 row)
|
||||
|
||||
DROP VIEW relations;
|
||||
DROP MATERIALIZED VIEW telemetry_report;
|
@ -1,736 +0,0 @@
|
||||
-- This file and its contents are licensed under the Timescale License.
|
||||
-- Please see the included NOTICE for copyright information and
|
||||
-- LICENSE-TIMESCALE for a copy of the license.
|
||||
--telemetry tests that require a community license
|
||||
\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER;
|
||||
-- function call info size is too variable for this test, so disable it
|
||||
SET timescaledb.telemetry_level='no_functions';
|
||||
SELECT setseed(1);
|
||||
setseed
|
||||
---------
|
||||
|
||||
(1 row)
|
||||
|
||||
-- Create a materialized view from the telemetry report so that we
|
||||
-- don't regenerate telemetry for every query. Filter heap_size for
|
||||
-- materialized views since PG14 reports a different heap size for
|
||||
-- them compared to earlier PG versions.
|
||||
CREATE MATERIALIZED VIEW telemetry_report AS
|
||||
SELECT (r #- '{relations,materialized_views,heap_size}') AS r
|
||||
FROM get_telemetry_report() r;
|
||||
CREATE VIEW relations AS
|
||||
SELECT r -> 'relations' AS rels
|
||||
FROM telemetry_report;
|
||||
SELECT rels -> 'continuous_aggregates' -> 'num_relations' AS num_continuous_aggs,
|
||||
rels -> 'hypertables' -> 'num_relations' AS num_hypertables
|
||||
FROM relations;
|
||||
num_continuous_aggs | num_hypertables
|
||||
---------------------+-----------------
|
||||
0 | 0
|
||||
(1 row)
|
||||
|
||||
-- check telemetry picks up flagged content from metadata
|
||||
SELECT r -> 'db_metadata' AS db_metadata
|
||||
FROM telemetry_report;
|
||||
db_metadata
|
||||
-------------
|
||||
{}
|
||||
(1 row)
|
||||
|
||||
-- check timescaledb_telemetry.cloud
|
||||
SELECT r -> 'instance_metadata' AS instance_metadata
|
||||
FROM telemetry_report r;
|
||||
instance_metadata
|
||||
-------------------
|
||||
{"cloud": "ci"}
|
||||
(1 row)
|
||||
|
||||
CREATE TABLE normal (time timestamptz NOT NULL, device int, temp float);
|
||||
CREATE TABLE part (time timestamptz NOT NULL, device int, temp float) PARTITION BY RANGE (time);
|
||||
CREATE TABLE part_t1 PARTITION OF part FOR VALUES FROM ('2018-01-01') TO ('2018-02-01') PARTITION BY HASH (device);
|
||||
CREATE TABLE part_t2 PARTITION OF part FOR VALUES FROM ('2018-02-01') TO ('2018-03-01') PARTITION BY HASH (device);
|
||||
CREATE TABLE part_t1_d1 PARTITION OF part_t1 FOR VALUES WITH (MODULUS 2, REMAINDER 0);
|
||||
CREATE TABLE part_t1_d2 PARTITION OF part_t1 FOR VALUES WITH (MODULUS 2, REMAINDER 1);
|
||||
CREATE TABLE part_t2_d1 PARTITION OF part_t2 FOR VALUES WITH (MODULUS 2, REMAINDER 0);
|
||||
CREATE TABLE part_t2_d2 PARTITION OF part_t2 FOR VALUES WITH (MODULUS 2, REMAINDER 1);
|
||||
CREATE TABLE hyper (LIKE normal);
|
||||
SELECT table_name FROM create_hypertable('hyper', 'time');
|
||||
table_name
|
||||
------------
|
||||
hyper
|
||||
(1 row)
|
||||
|
||||
CREATE MATERIALIZED VIEW contagg
|
||||
WITH (timescaledb.continuous, timescaledb.materialized_only=false) AS
|
||||
SELECT
|
||||
time_bucket('1 hour', time) AS hour,
|
||||
device,
|
||||
min(time)
|
||||
FROM
|
||||
hyper
|
||||
GROUP BY hour, device;
|
||||
NOTICE: continuous aggregate "contagg" is already up-to-date
|
||||
CREATE MATERIALIZED VIEW contagg_old
|
||||
WITH (timescaledb.continuous, timescaledb.materialized_only=false, timescaledb.finalized=false) AS
|
||||
SELECT
|
||||
time_bucket('1 hour', time) AS hour,
|
||||
device,
|
||||
min(time)
|
||||
FROM
|
||||
hyper
|
||||
GROUP BY hour, device;
|
||||
NOTICE: continuous aggregate "contagg_old" is already up-to-date
|
||||
-- Create another view (already have the "relations" view)
|
||||
CREATE VIEW devices AS
|
||||
SELECT DISTINCT ON (device) device
|
||||
FROM hyper;
|
||||
-- Show relations with no data
|
||||
REFRESH MATERIALIZED VIEW telemetry_report;
|
||||
SELECT jsonb_pretty(rels) AS relations FROM relations;
|
||||
relations
|
||||
-----------------------------------------------------
|
||||
{ +
|
||||
"views": { +
|
||||
"num_relations": 2 +
|
||||
}, +
|
||||
"tables": { +
|
||||
"heap_size": 0, +
|
||||
"toast_size": 8192, +
|
||||
"indexes_size": 0, +
|
||||
"num_relations": 2, +
|
||||
"num_reltuples": 0 +
|
||||
}, +
|
||||
"hypertables": { +
|
||||
"heap_size": 0, +
|
||||
"toast_size": 0, +
|
||||
"compression": { +
|
||||
"compressed_heap_size": 0, +
|
||||
"compressed_row_count": 0, +
|
||||
"compressed_toast_size": 0, +
|
||||
"num_compressed_chunks": 0, +
|
||||
"uncompressed_heap_size": 0, +
|
||||
"uncompressed_row_count": 0, +
|
||||
"compressed_indexes_size": 0, +
|
||||
"uncompressed_toast_size": 0, +
|
||||
"uncompressed_indexes_size": 0, +
|
||||
"num_compressed_hypertables": 0 +
|
||||
}, +
|
||||
"indexes_size": 8192, +
|
||||
"num_children": 0, +
|
||||
"num_relations": 1, +
|
||||
"num_reltuples": 0 +
|
||||
}, +
|
||||
"materialized_views": { +
|
||||
"toast_size": 8192, +
|
||||
"indexes_size": 0, +
|
||||
"num_relations": 1, +
|
||||
"num_reltuples": 0 +
|
||||
}, +
|
||||
"partitioned_tables": { +
|
||||
"heap_size": 0, +
|
||||
"toast_size": 0, +
|
||||
"indexes_size": 0, +
|
||||
"num_children": 6, +
|
||||
"num_relations": 1, +
|
||||
"num_reltuples": 0 +
|
||||
}, +
|
||||
"continuous_aggregates": { +
|
||||
"heap_size": 0, +
|
||||
"toast_size": 0, +
|
||||
"compression": { +
|
||||
"compressed_heap_size": 0, +
|
||||
"compressed_row_count": 0, +
|
||||
"num_compressed_caggs": 0, +
|
||||
"compressed_toast_size": 0, +
|
||||
"num_compressed_chunks": 0, +
|
||||
"uncompressed_heap_size": 0, +
|
||||
"uncompressed_row_count": 0, +
|
||||
"compressed_indexes_size": 0, +
|
||||
"uncompressed_toast_size": 0, +
|
||||
"uncompressed_indexes_size": 0 +
|
||||
}, +
|
||||
"indexes_size": 0, +
|
||||
"num_children": 0, +
|
||||
"num_relations": 2, +
|
||||
"num_reltuples": 0, +
|
||||
"num_caggs_nested": 0, +
|
||||
"num_caggs_finalized": 1, +
|
||||
"num_caggs_on_distributed_hypertables": 0, +
|
||||
"num_caggs_using_real_time_aggregation": 2 +
|
||||
}, +
|
||||
"distributed_hypertables_data_node": { +
|
||||
"heap_size": 0, +
|
||||
"toast_size": 0, +
|
||||
"compression": { +
|
||||
"compressed_heap_size": 0, +
|
||||
"compressed_row_count": 0, +
|
||||
"compressed_toast_size": 0, +
|
||||
"num_compressed_chunks": 0, +
|
||||
"uncompressed_heap_size": 0, +
|
||||
"uncompressed_row_count": 0, +
|
||||
"compressed_indexes_size": 0, +
|
||||
"uncompressed_toast_size": 0, +
|
||||
"uncompressed_indexes_size": 0, +
|
||||
"num_compressed_hypertables": 0 +
|
||||
}, +
|
||||
"indexes_size": 0, +
|
||||
"num_children": 0, +
|
||||
"num_relations": 0, +
|
||||
"num_reltuples": 0 +
|
||||
}, +
|
||||
"distributed_hypertables_access_node": { +
|
||||
"heap_size": 0, +
|
||||
"toast_size": 0, +
|
||||
"compression": { +
|
||||
"compressed_heap_size": 0, +
|
||||
"compressed_row_count": 0, +
|
||||
"compressed_toast_size": 0, +
|
||||
"num_compressed_chunks": 0, +
|
||||
"uncompressed_heap_size": 0, +
|
||||
"uncompressed_row_count": 0, +
|
||||
"compressed_indexes_size": 0, +
|
||||
"uncompressed_toast_size": 0, +
|
||||
"uncompressed_indexes_size": 0, +
|
||||
"num_compressed_hypertables": 0 +
|
||||
}, +
|
||||
"indexes_size": 0, +
|
||||
"num_children": 0, +
|
||||
"num_relations": 0, +
|
||||
"num_reltuples": 0, +
|
||||
"num_replica_chunks": 0, +
|
||||
"num_replicated_distributed_hypertables": 0+
|
||||
} +
|
||||
}
|
||||
(1 row)
|
||||
|
||||
-- Insert data
|
||||
INSERT INTO normal
|
||||
SELECT t, ceil(random() * 10)::int, random() * 30
|
||||
FROM generate_series('2018-01-01'::timestamptz, '2018-02-28', '2h') t;
|
||||
INSERT INTO hyper
|
||||
SELECT * FROM normal;
|
||||
INSERT INTO part
|
||||
SELECT * FROM normal;
|
||||
CALL refresh_continuous_aggregate('contagg', NULL, NULL);
|
||||
CALL refresh_continuous_aggregate('contagg_old', NULL, NULL);
|
||||
-- ANALYZE to get updated reltuples stats
|
||||
ANALYZE normal, hyper, part;
|
||||
SELECT count(c) FROM show_chunks('hyper') c;
|
||||
count
|
||||
-------
|
||||
9
|
||||
(1 row)
|
||||
|
||||
SELECT count(c) FROM show_chunks('contagg') c;
|
||||
count
|
||||
-------
|
||||
2
|
||||
(1 row)
|
||||
|
||||
SELECT count(c) FROM show_chunks('contagg_old') c;
|
||||
count
|
||||
-------
|
||||
2
|
||||
(1 row)
|
||||
|
||||
-- Update and show the telemetry report
|
||||
REFRESH MATERIALIZED VIEW telemetry_report;
|
||||
SELECT jsonb_pretty(rels) AS relations FROM relations;
|
||||
relations
|
||||
-----------------------------------------------------
|
||||
{ +
|
||||
"views": { +
|
||||
"num_relations": 2 +
|
||||
}, +
|
||||
"tables": { +
|
||||
"heap_size": 65536, +
|
||||
"toast_size": 8192, +
|
||||
"indexes_size": 0, +
|
||||
"num_relations": 2, +
|
||||
"num_reltuples": 697 +
|
||||
}, +
|
||||
"hypertables": { +
|
||||
"heap_size": 73728, +
|
||||
"toast_size": 0, +
|
||||
"compression": { +
|
||||
"compressed_heap_size": 0, +
|
||||
"compressed_row_count": 0, +
|
||||
"compressed_toast_size": 0, +
|
||||
"num_compressed_chunks": 0, +
|
||||
"uncompressed_heap_size": 0, +
|
||||
"uncompressed_row_count": 0, +
|
||||
"compressed_indexes_size": 0, +
|
||||
"uncompressed_toast_size": 0, +
|
||||
"uncompressed_indexes_size": 0, +
|
||||
"num_compressed_hypertables": 0 +
|
||||
}, +
|
||||
"indexes_size": 155648, +
|
||||
"num_children": 9, +
|
||||
"num_relations": 1, +
|
||||
"num_reltuples": 697 +
|
||||
}, +
|
||||
"materialized_views": { +
|
||||
"toast_size": 8192, +
|
||||
"indexes_size": 0, +
|
||||
"num_relations": 1, +
|
||||
"num_reltuples": 0 +
|
||||
}, +
|
||||
"partitioned_tables": { +
|
||||
"heap_size": 98304, +
|
||||
"toast_size": 0, +
|
||||
"indexes_size": 0, +
|
||||
"num_children": 6, +
|
||||
"num_relations": 1, +
|
||||
"num_reltuples": 697 +
|
||||
}, +
|
||||
"continuous_aggregates": { +
|
||||
"heap_size": 188416, +
|
||||
"toast_size": 16384, +
|
||||
"compression": { +
|
||||
"compressed_heap_size": 0, +
|
||||
"compressed_row_count": 0, +
|
||||
"num_compressed_caggs": 0, +
|
||||
"compressed_toast_size": 0, +
|
||||
"num_compressed_chunks": 0, +
|
||||
"uncompressed_heap_size": 0, +
|
||||
"uncompressed_row_count": 0, +
|
||||
"compressed_indexes_size": 0, +
|
||||
"uncompressed_toast_size": 0, +
|
||||
"uncompressed_indexes_size": 0 +
|
||||
}, +
|
||||
"indexes_size": 229376, +
|
||||
"num_children": 4, +
|
||||
"num_relations": 2, +
|
||||
"num_reltuples": 0, +
|
||||
"num_caggs_nested": 0, +
|
||||
"num_caggs_finalized": 1, +
|
||||
"num_caggs_on_distributed_hypertables": 0, +
|
||||
"num_caggs_using_real_time_aggregation": 2 +
|
||||
}, +
|
||||
"distributed_hypertables_data_node": { +
|
||||
"heap_size": 0, +
|
||||
"toast_size": 0, +
|
||||
"compression": { +
|
||||
"compressed_heap_size": 0, +
|
||||
"compressed_row_count": 0, +
|
||||
"compressed_toast_size": 0, +
|
||||
"num_compressed_chunks": 0, +
|
||||
"uncompressed_heap_size": 0, +
|
||||
"uncompressed_row_count": 0, +
|
||||
"compressed_indexes_size": 0, +
|
||||
"uncompressed_toast_size": 0, +
|
||||
"uncompressed_indexes_size": 0, +
|
||||
"num_compressed_hypertables": 0 +
|
||||
}, +
|
||||
"indexes_size": 0, +
|
||||
"num_children": 0, +
|
||||
"num_relations": 0, +
|
||||
"num_reltuples": 0 +
|
||||
}, +
|
||||
"distributed_hypertables_access_node": { +
|
||||
"heap_size": 0, +
|
||||
"toast_size": 0, +
|
||||
"compression": { +
|
||||
"compressed_heap_size": 0, +
|
||||
"compressed_row_count": 0, +
|
||||
"compressed_toast_size": 0, +
|
||||
"num_compressed_chunks": 0, +
|
||||
"uncompressed_heap_size": 0, +
|
||||
"uncompressed_row_count": 0, +
|
||||
"compressed_indexes_size": 0, +
|
||||
"uncompressed_toast_size": 0, +
|
||||
"uncompressed_indexes_size": 0, +
|
||||
"num_compressed_hypertables": 0 +
|
||||
}, +
|
||||
"indexes_size": 0, +
|
||||
"num_children": 0, +
|
||||
"num_relations": 0, +
|
||||
"num_reltuples": 0, +
|
||||
"num_replica_chunks": 0, +
|
||||
"num_replicated_distributed_hypertables": 0+
|
||||
} +
|
||||
}
|
||||
(1 row)
|
||||
|
||||
-- Actual row count should be the same as reltuples stats for all tables
|
||||
SELECT (SELECT count(*) FROM normal) num_inserted_rows,
|
||||
(SELECT rels -> 'tables' -> 'num_reltuples' FROM relations) normal_reltuples,
|
||||
(SELECT rels -> 'hypertables' -> 'num_reltuples' FROM relations) hyper_reltuples,
|
||||
(SELECT rels -> 'partitioned_tables' -> 'num_reltuples' FROM relations) part_reltuples;
|
||||
num_inserted_rows | normal_reltuples | hyper_reltuples | part_reltuples
|
||||
-------------------+------------------+-----------------+----------------
|
||||
697 | 697 | 697 | 697
|
||||
(1 row)
|
||||
|
||||
-- Add compression
|
||||
ALTER TABLE hyper SET (timescaledb.compress);
|
||||
SELECT compress_chunk(c)
|
||||
FROM show_chunks('hyper') c ORDER BY c LIMIT 4;
|
||||
compress_chunk
|
||||
----------------------------------------
|
||||
_timescaledb_internal._hyper_1_1_chunk
|
||||
_timescaledb_internal._hyper_1_2_chunk
|
||||
_timescaledb_internal._hyper_1_3_chunk
|
||||
_timescaledb_internal._hyper_1_4_chunk
|
||||
(4 rows)
|
||||
|
||||
ALTER MATERIALIZED VIEW contagg SET (timescaledb.compress);
|
||||
NOTICE: defaulting compress_segmentby to device
|
||||
NOTICE: defaulting compress_orderby to hour
|
||||
SELECT compress_chunk(c)
|
||||
FROM show_chunks('contagg') c ORDER BY c LIMIT 1;
|
||||
compress_chunk
|
||||
-----------------------------------------
|
||||
_timescaledb_internal._hyper_2_10_chunk
|
||||
(1 row)
|
||||
|
||||
-- Turn of real-time aggregation
|
||||
ALTER MATERIALIZED VIEW contagg SET (timescaledb.materialized_only = true);
|
||||
ANALYZE normal, hyper, part;
|
||||
REFRESH MATERIALIZED VIEW telemetry_report;
|
||||
SELECT jsonb_pretty(rels) AS relations FROM relations;
|
||||
relations
|
||||
-----------------------------------------------------
|
||||
{ +
|
||||
"views": { +
|
||||
"num_relations": 2 +
|
||||
}, +
|
||||
"tables": { +
|
||||
"heap_size": 65536, +
|
||||
"toast_size": 8192, +
|
||||
"indexes_size": 0, +
|
||||
"num_relations": 2, +
|
||||
"num_reltuples": 697 +
|
||||
}, +
|
||||
"hypertables": { +
|
||||
"heap_size": 73728, +
|
||||
"toast_size": 32768, +
|
||||
"compression": { +
|
||||
"compressed_heap_size": 32768, +
|
||||
"compressed_row_count": 4, +
|
||||
"compressed_toast_size": 32768, +
|
||||
"num_compressed_chunks": 4, +
|
||||
"uncompressed_heap_size": 32768, +
|
||||
"uncompressed_row_count": 284, +
|
||||
"compressed_indexes_size": 0, +
|
||||
"uncompressed_toast_size": 0, +
|
||||
"uncompressed_indexes_size": 65536, +
|
||||
"num_compressed_hypertables": 1 +
|
||||
}, +
|
||||
"indexes_size": 122880, +
|
||||
"num_children": 9, +
|
||||
"num_relations": 1, +
|
||||
"num_reltuples": 413 +
|
||||
}, +
|
||||
"materialized_views": { +
|
||||
"toast_size": 8192, +
|
||||
"indexes_size": 0, +
|
||||
"num_relations": 1, +
|
||||
"num_reltuples": 0 +
|
||||
}, +
|
||||
"partitioned_tables": { +
|
||||
"heap_size": 98304, +
|
||||
"toast_size": 0, +
|
||||
"indexes_size": 0, +
|
||||
"num_children": 6, +
|
||||
"num_relations": 1, +
|
||||
"num_reltuples": 697 +
|
||||
}, +
|
||||
"continuous_aggregates": { +
|
||||
"heap_size": 180224, +
|
||||
"toast_size": 24576, +
|
||||
"compression": { +
|
||||
"compressed_heap_size": 40960, +
|
||||
"compressed_row_count": 10, +
|
||||
"num_compressed_caggs": 1, +
|
||||
"compressed_toast_size": 8192, +
|
||||
"num_compressed_chunks": 1, +
|
||||
"uncompressed_heap_size": 49152, +
|
||||
"uncompressed_row_count": 452, +
|
||||
"compressed_indexes_size": 16384, +
|
||||
"uncompressed_toast_size": 0, +
|
||||
"uncompressed_indexes_size": 81920 +
|
||||
}, +
|
||||
"indexes_size": 180224, +
|
||||
"num_children": 4, +
|
||||
"num_relations": 2, +
|
||||
"num_reltuples": 0, +
|
||||
"num_caggs_nested": 0, +
|
||||
"num_caggs_finalized": 1, +
|
||||
"num_caggs_on_distributed_hypertables": 0, +
|
||||
"num_caggs_using_real_time_aggregation": 1 +
|
||||
}, +
|
||||
"distributed_hypertables_data_node": { +
|
||||
"heap_size": 0, +
|
||||
"toast_size": 0, +
|
||||
"compression": { +
|
||||
"compressed_heap_size": 0, +
|
||||
"compressed_row_count": 0, +
|
||||
"compressed_toast_size": 0, +
|
||||
"num_compressed_chunks": 0, +
|
||||
"uncompressed_heap_size": 0, +
|
||||
"uncompressed_row_count": 0, +
|
||||
"compressed_indexes_size": 0, +
|
||||
"uncompressed_toast_size": 0, +
|
||||
"uncompressed_indexes_size": 0, +
|
||||
"num_compressed_hypertables": 0 +
|
||||
}, +
|
||||
"indexes_size": 0, +
|
||||
"num_children": 0, +
|
||||
"num_relations": 0, +
|
||||
"num_reltuples": 0 +
|
||||
}, +
|
||||
"distributed_hypertables_access_node": { +
|
||||
"heap_size": 0, +
|
||||
"toast_size": 0, +
|
||||
"compression": { +
|
||||
"compressed_heap_size": 0, +
|
||||
"compressed_row_count": 0, +
|
||||
"compressed_toast_size": 0, +
|
||||
"num_compressed_chunks": 0, +
|
||||
"uncompressed_heap_size": 0, +
|
||||
"uncompressed_row_count": 0, +
|
||||
"compressed_indexes_size": 0, +
|
||||
"uncompressed_toast_size": 0, +
|
||||
"uncompressed_indexes_size": 0, +
|
||||
"num_compressed_hypertables": 0 +
|
||||
}, +
|
||||
"indexes_size": 0, +
|
||||
"num_children": 0, +
|
||||
"num_relations": 0, +
|
||||
"num_reltuples": 0, +
|
||||
"num_replica_chunks": 0, +
|
||||
"num_replicated_distributed_hypertables": 0+
|
||||
} +
|
||||
}
|
||||
(1 row)
|
||||
|
||||
-- check telemetry for fixed schedule jobs works
|
||||
create or replace procedure job_test_fixed(jobid int, config jsonb) language plpgsql as $$
|
||||
begin
|
||||
raise log 'this is job_test_fixed';
|
||||
end
|
||||
$$;
|
||||
create or replace procedure job_test_drifting(jobid int, config jsonb) language plpgsql as $$
|
||||
begin
|
||||
raise log 'this is job_test_drifting';
|
||||
end
|
||||
$$;
|
||||
-- before adding the jobs
|
||||
select get_telemetry_report()->'num_user_defined_actions_fixed';
|
||||
?column?
|
||||
----------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
select get_telemetry_report()->'num_user_defined_actions';
|
||||
?column?
|
||||
----------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
select add_job('job_test_fixed', '1 week');
|
||||
add_job
|
||||
---------
|
||||
1000
|
||||
(1 row)
|
||||
|
||||
select add_job('job_test_drifting', '1 week', fixed_schedule => false);
|
||||
add_job
|
||||
---------
|
||||
1001
|
||||
(1 row)
|
||||
|
||||
-- add continuous aggregate refresh policy for contagg
|
||||
select add_continuous_aggregate_policy('contagg', interval '3 weeks', NULL, interval '3 weeks'); -- drifting
|
||||
add_continuous_aggregate_policy
|
||||
---------------------------------
|
||||
1002
|
||||
(1 row)
|
||||
|
||||
select add_continuous_aggregate_policy('contagg_old', interval '3 weeks', NULL, interval '3 weeks', initial_start => now()); -- fixed
|
||||
add_continuous_aggregate_policy
|
||||
---------------------------------
|
||||
1003
|
||||
(1 row)
|
||||
|
||||
-- add retention policy, fixed
|
||||
select add_retention_policy('hyper', interval '1 year', initial_start => now());
|
||||
add_retention_policy
|
||||
----------------------
|
||||
1004
|
||||
(1 row)
|
||||
|
||||
-- add compression policy
|
||||
select add_compression_policy('hyper', interval '3 weeks', initial_start => now());
|
||||
add_compression_policy
|
||||
------------------------
|
||||
1005
|
||||
(1 row)
|
||||
|
||||
select r->'num_user_defined_actions_fixed' as UDA_fixed, r->'num_user_defined_actions' AS UDA_drifting FROM get_telemetry_report() r;
|
||||
uda_fixed | uda_drifting
|
||||
-----------+--------------
|
||||
1 | 1
|
||||
(1 row)
|
||||
|
||||
select r->'num_continuous_aggs_policies_fixed' as contagg_fixed, r->'num_continuous_aggs_policies' as contagg_drifting FROM get_telemetry_report() r;
|
||||
contagg_fixed | contagg_drifting
|
||||
---------------+------------------
|
||||
1 | 1
|
||||
(1 row)
|
||||
|
||||
select r->'num_compression_policies_fixed' as compress_fixed, r->'num_retention_policies_fixed' as retention_fixed FROM get_telemetry_report() r;
|
||||
compress_fixed | retention_fixed
|
||||
----------------+-----------------
|
||||
1 | 1
|
||||
(1 row)
|
||||
|
||||
DELETE FROM _timescaledb_config.bgw_job WHERE id = 2;
|
||||
TRUNCATE _timescaledb_internal.job_errors;
|
||||
-- create some "errors" for testing
|
||||
INSERT INTO
|
||||
_timescaledb_config.bgw_job(id, application_name, schedule_interval, max_runtime, max_retries, retry_period, proc_schema, proc_name)
|
||||
VALUES (2000, 'User-Defined Action [2000]', interval '3 days', interval '1 hour', 5, interval '5 min', 'public', 'custom_action_1'),
|
||||
(2001, 'User-Defined Action [2001]', interval '3 days', interval '1 hour', 5, interval '5 min', 'public', 'custom_action_2'),
|
||||
(2002, 'Compression Policy [2002]', interval '3 days', interval '1 hour', 5, interval '5 min', '_timescaledb_functions', 'policy_compression'),
|
||||
(2003, 'Retention Policy [2003]', interval '3 days', interval '1 hour', 5, interval '5 min', '_timescaledb_functions', 'policy_retention'),
|
||||
(2004, 'Refresh Continuous Aggregate Policy [2004]', interval '3 days', interval '1 hour', 5, interval '5 min', '_timescaledb_functions', 'policy_refresh_continuous_aggregate'),
|
||||
-- user decided to define a custom action in the _timescaledb_functions schema, we group it with the User-defined actions
|
||||
(2005, 'User-Defined Action [2005]', interval '3 days', interval '1 hour', 5, interval '5 min', '_timescaledb_functions', 'policy_refresh_continuous_aggregate');
|
||||
-- create some errors for them
|
||||
INSERT INTO
|
||||
_timescaledb_internal.job_errors(job_id, pid, start_time, finish_time, error_data)
|
||||
values (2000, 12345, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '{"sqlerrcode":"P0001", "proc_schema":"public", "proc_name": "custom_action_1"}'),
|
||||
(2000, 23456, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '{"sqlerrcode":"ABCDE", "proc_schema": "public", "proc_name": "custom_action_1"}'),
|
||||
(2001, 54321, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '{"sqlerrcode":"P0001", "proc_schema":"public", "proc_name": "custom_action_2"}'),
|
||||
(2002, 23443, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '{"sqlerrcode":"JF009", "proc_schema":"_timescaledb_functions", "proc_name": "policy_compression"}'),
|
||||
(2003, 14567, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '{"sqlerrcode":"P0001", "proc_schema":"_timescaledb_functions", "proc_name": "policy_retention"}'),
|
||||
(2004, 78907, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '{"sqlerrcode":"P0001", "proc_schema":"_timescaledb_functions", "proc_name": "policy_refresh_continuous_aggregate"}'),
|
||||
(2005, 45757, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '{"sqlerrcode":"P0001", "proc_schema":"_timescaledb_functions", "proc_name": "policy_refresh_continuous_aggregate"}');
|
||||
-- we have 3 error records for user-defined actions, and three for policies, so we expect 4 types of jobs
|
||||
SELECT jsonb_pretty(get_telemetry_report() -> 'errors_by_sqlerrcode');
|
||||
jsonb_pretty
|
||||
----------------------------------------------
|
||||
{ +
|
||||
"policy_retention": { +
|
||||
"P0001": 1 +
|
||||
}, +
|
||||
"policy_compression": { +
|
||||
"JF009": 1 +
|
||||
}, +
|
||||
"user_defined_action": { +
|
||||
"ABCDE": 1, +
|
||||
"P0001": 2 +
|
||||
}, +
|
||||
"policy_refresh_continuous_aggregate": {+
|
||||
"P0001": 2 +
|
||||
} +
|
||||
}
|
||||
(1 row)
|
||||
|
||||
-- for job statistics, insert some records into bgw_job_stats
|
||||
INSERT INTO _timescaledb_internal.bgw_job_stat
|
||||
values
|
||||
(2000, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '-infinity'::timestamptz, '-infinity'::timestamptz,
|
||||
false, 1, interval '00:00:00', interval '00:00:02', 0, 1, 0, 1, 0),
|
||||
(2001, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '-infinity'::timestamptz, '-infinity'::timestamptz,
|
||||
false, 1, interval '00:00:00', interval '00:00:02', 0, 1, 0, 1, 0),
|
||||
(2002, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '-infinity'::timestamptz, '-infinity'::timestamptz,
|
||||
false, 1, interval '00:00:00', interval '00:00:02', 0, 1, 0, 1, 0),
|
||||
(2003, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '-infinity'::timestamptz, '-infinity'::timestamptz,
|
||||
false, 1, interval '00:00:00', interval '00:00:02', 0, 1, 0, 1, 0),
|
||||
(2004, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '-infinity'::timestamptz, '-infinity'::timestamptz,
|
||||
false, 1, interval '00:00:00', interval '00:00:02', 0, 1, 0, 1, 0),
|
||||
(2005, '2040-01-01 00:00:00+00'::timestamptz, '2040-01-01 00:00:01+00'::timestamptz, '-infinity'::timestamptz, '-infinity'::timestamptz,
|
||||
false, 1, interval '00:00:00', interval '00:00:02', 0, 1, 0, 1, 0);
|
||||
SELECT jsonb_pretty(get_telemetry_report() -> 'stats_by_job_type');
|
||||
jsonb_pretty
|
||||
------------------------------------------------
|
||||
{ +
|
||||
"policy_retention": { +
|
||||
"total_runs": 1, +
|
||||
"total_crashes": 0, +
|
||||
"total_duration": "@ 0", +
|
||||
"total_failures": 1, +
|
||||
"total_successes": 0, +
|
||||
"max_consecutive_crashes": 0, +
|
||||
"total_duration_failures": "@ 2 secs",+
|
||||
"max_consecutive_failures": 1 +
|
||||
}, +
|
||||
"policy_compression": { +
|
||||
"total_runs": 1, +
|
||||
"total_crashes": 0, +
|
||||
"total_duration": "@ 0", +
|
||||
"total_failures": 1, +
|
||||
"total_successes": 0, +
|
||||
"max_consecutive_crashes": 0, +
|
||||
"total_duration_failures": "@ 2 secs",+
|
||||
"max_consecutive_failures": 1 +
|
||||
}, +
|
||||
"user_defined_action": { +
|
||||
"total_runs": 2, +
|
||||
"total_crashes": 0, +
|
||||
"total_duration": "@ 0", +
|
||||
"total_failures": 2, +
|
||||
"total_successes": 0, +
|
||||
"max_consecutive_crashes": 0, +
|
||||
"total_duration_failures": "@ 4 secs",+
|
||||
"max_consecutive_failures": 1 +
|
||||
}, +
|
||||
"policy_refresh_continuous_aggregate": { +
|
||||
"total_runs": 2, +
|
||||
"total_crashes": 0, +
|
||||
"total_duration": "@ 0", +
|
||||
"total_failures": 2, +
|
||||
"total_successes": 0, +
|
||||
"max_consecutive_crashes": 0, +
|
||||
"total_duration_failures": "@ 4 secs",+
|
||||
"max_consecutive_failures": 1 +
|
||||
} +
|
||||
}
|
||||
(1 row)
|
||||
|
||||
-- create nested continuous aggregates - copied from cagg_on_cagg_common
|
||||
CREATE TABLE conditions (
|
||||
time timestamptz NOT NULL,
|
||||
temperature int
|
||||
);
|
||||
SELECT create_hypertable('conditions', 'time');
|
||||
create_hypertable
|
||||
-------------------------
|
||||
(6,public,conditions,t)
|
||||
(1 row)
|
||||
|
||||
CREATE MATERIALIZED VIEW conditions_summary_hourly_1
|
||||
WITH (timescaledb.continuous, timescaledb.materialized_only=true) AS
|
||||
SELECT
|
||||
time_bucket('1 hour', "time") AS bucket,
|
||||
SUM(temperature) AS temperature
|
||||
FROM conditions
|
||||
GROUP BY 1
|
||||
WITH NO DATA;
|
||||
CREATE MATERIALIZED VIEW conditions_summary_daily_2
|
||||
WITH (timescaledb.continuous, timescaledb.materialized_only=true) AS
|
||||
SELECT
|
||||
time_bucket('1 day', "bucket") AS bucket,
|
||||
SUM(temperature) AS temperature
|
||||
FROM conditions_summary_hourly_1
|
||||
GROUP BY 1
|
||||
WITH NO DATA;
|
||||
CREATE MATERIALIZED VIEW conditions_summary_weekly_3
|
||||
WITH (timescaledb.continuous, timescaledb.materialized_only=true) AS
|
||||
SELECT
|
||||
time_bucket('1 week', "bucket") AS bucket,
|
||||
SUM(temperature) AS temperature
|
||||
FROM conditions_summary_daily_2
|
||||
GROUP BY 1
|
||||
WITH NO DATA;
|
||||
SELECT jsonb_pretty(get_telemetry_report() -> 'relations' -> 'continuous_aggregates' -> 'num_caggs_nested');
|
||||
jsonb_pretty
|
||||
--------------
|
||||
2
|
||||
(1 row)
|
||||
|
||||
DROP VIEW relations;
|
||||
DROP MATERIALIZED VIEW telemetry_report;
|
145
tsl/test/isolation/expected/compression_freeze.out
Normal file
145
tsl/test/isolation/expected/compression_freeze.out
Normal file
@ -0,0 +1,145 @@
|
||||
Parsed test spec with 2 sessions
|
||||
|
||||
starting permutation: s1_select_count s2_select_count_and_stats
|
||||
step s1_select_count:
|
||||
SELECT count(*) FROM sensor_data;
|
||||
|
||||
count
|
||||
-----
|
||||
16850
|
||||
(1 row)
|
||||
|
||||
step s2_select_count_and_stats:
|
||||
SELECT count(*) FROM sensor_data;
|
||||
SELECT chunk_schema, chunk_name, compression_status FROM chunk_compression_stats('sensor_data') ORDER BY 1, 2, 3;
|
||||
|
||||
count
|
||||
-----
|
||||
16850
|
||||
(1 row)
|
||||
|
||||
chunk_schema |chunk_name |compression_status
|
||||
---------------------+--------------------+------------------
|
||||
_timescaledb_internal|_hyper_X_X_chunk|Uncompressed
|
||||
_timescaledb_internal|_hyper_X_X_chunk|Uncompressed
|
||||
(2 rows)
|
||||
|
||||
|
||||
starting permutation: s1_select_count s1_compress s1_select_count s2_select_count_and_stats
|
||||
step s1_select_count:
|
||||
SELECT count(*) FROM sensor_data;
|
||||
|
||||
count
|
||||
-----
|
||||
16850
|
||||
(1 row)
|
||||
|
||||
step s1_compress:
|
||||
SELECT count(*) FROM (SELECT compress_chunk(i, if_not_compressed => true) FROM show_chunks('sensor_data') i) i;
|
||||
|
||||
count
|
||||
-----
|
||||
2
|
||||
(1 row)
|
||||
|
||||
step s1_select_count:
|
||||
SELECT count(*) FROM sensor_data;
|
||||
|
||||
count
|
||||
-----
|
||||
16850
|
||||
(1 row)
|
||||
|
||||
step s2_select_count_and_stats:
|
||||
SELECT count(*) FROM sensor_data;
|
||||
SELECT chunk_schema, chunk_name, compression_status FROM chunk_compression_stats('sensor_data') ORDER BY 1, 2, 3;
|
||||
|
||||
count
|
||||
-----
|
||||
16850
|
||||
(1 row)
|
||||
|
||||
chunk_schema |chunk_name |compression_status
|
||||
---------------------+--------------------+------------------
|
||||
_timescaledb_internal|_hyper_X_X_chunk|Compressed
|
||||
_timescaledb_internal|_hyper_X_X_chunk|Compressed
|
||||
(2 rows)
|
||||
|
||||
|
||||
starting permutation: s2_lock_compression s2_select_count_and_stats s1_compress s2_select_count_and_stats s2_unlock_compression s2_select_count_and_stats
|
||||
step s2_lock_compression:
|
||||
SELECT debug_waitpoint_enable('compression_done_before_truncate_uncompressed');
|
||||
|
||||
debug_waitpoint_enable
|
||||
----------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
step s2_select_count_and_stats:
|
||||
SELECT count(*) FROM sensor_data;
|
||||
SELECT chunk_schema, chunk_name, compression_status FROM chunk_compression_stats('sensor_data') ORDER BY 1, 2, 3;
|
||||
|
||||
count
|
||||
-----
|
||||
16850
|
||||
(1 row)
|
||||
|
||||
chunk_schema |chunk_name |compression_status
|
||||
---------------------+--------------------+------------------
|
||||
_timescaledb_internal|_hyper_X_X_chunk|Uncompressed
|
||||
_timescaledb_internal|_hyper_X_X_chunk|Uncompressed
|
||||
(2 rows)
|
||||
|
||||
step s1_compress:
|
||||
SELECT count(*) FROM (SELECT compress_chunk(i, if_not_compressed => true) FROM show_chunks('sensor_data') i) i;
|
||||
<waiting ...>
|
||||
step s2_select_count_and_stats:
|
||||
SELECT count(*) FROM sensor_data;
|
||||
SELECT chunk_schema, chunk_name, compression_status FROM chunk_compression_stats('sensor_data') ORDER BY 1, 2, 3;
|
||||
|
||||
count
|
||||
-----
|
||||
16850
|
||||
(1 row)
|
||||
|
||||
chunk_schema |chunk_name |compression_status
|
||||
---------------------+--------------------+------------------
|
||||
_timescaledb_internal|_hyper_X_X_chunk|Uncompressed
|
||||
_timescaledb_internal|_hyper_X_X_chunk|Uncompressed
|
||||
(2 rows)
|
||||
|
||||
step s2_unlock_compression:
|
||||
SELECT locktype, mode, granted, objid FROM pg_locks WHERE not granted AND (locktype = 'advisory' or relation::regclass::text LIKE '%chunk') ORDER BY relation, locktype, mode, granted;
|
||||
SELECT debug_waitpoint_release('compression_done_before_truncate_uncompressed');
|
||||
|
||||
locktype|mode |granted| objid
|
||||
--------+---------+-------+---------
|
||||
advisory|ShareLock|f |113732026
|
||||
(1 row)
|
||||
|
||||
debug_waitpoint_release
|
||||
-----------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
step s1_compress: <... completed>
|
||||
count
|
||||
-----
|
||||
2
|
||||
(1 row)
|
||||
|
||||
step s2_select_count_and_stats:
|
||||
SELECT count(*) FROM sensor_data;
|
||||
SELECT chunk_schema, chunk_name, compression_status FROM chunk_compression_stats('sensor_data') ORDER BY 1, 2, 3;
|
||||
|
||||
count
|
||||
-----
|
||||
16850
|
||||
(1 row)
|
||||
|
||||
chunk_schema |chunk_name |compression_status
|
||||
---------------------+--------------------+------------------
|
||||
_timescaledb_internal|_hyper_X_X_chunk|Compressed
|
||||
_timescaledb_internal|_hyper_X_X_chunk|Compressed
|
||||
(2 rows)
|
||||
|
@ -27,7 +27,7 @@ endif()
|
||||
|
||||
if(CMAKE_BUILD_TYPE MATCHES Debug)
|
||||
list(APPEND TEST_TEMPLATES_MODULE ${TEST_TEMPLATES_MODULE_DEBUG})
|
||||
list(APPEND TEST_FILES compression_chunk_race.spec
|
||||
list(APPEND TEST_FILES compression_chunk_race.spec compression_freeze.spec
|
||||
compression_merge_race.spec
|
||||
decompression_chunk_and_parallel_query_wo_idx.spec)
|
||||
if(PG_VERSION VERSION_GREATER_EQUAL "14.0")
|
||||
|
68
tsl/test/isolation/specs/compression_freeze.spec
Normal file
68
tsl/test/isolation/specs/compression_freeze.spec
Normal file
@ -0,0 +1,68 @@
|
||||
# This file and its contents are licensed under the Timescale License.
|
||||
# Please see the included NOTICE for copyright information and
|
||||
# LICENSE-TIMESCALE for a copy of the license.
|
||||
|
||||
###
|
||||
# This test verifies if the compressed and uncompressed data are seen in parallel. Since
|
||||
# we freeze the compressed data immediately, it becomes visible to all transactions
|
||||
# that are running concurrently. However, parallel transactions should not be able to
|
||||
# see the compressed hypertable in the catalog and query the data two times.
|
||||
###
|
||||
|
||||
setup {
|
||||
CREATE TABLE sensor_data (
|
||||
time timestamptz not null,
|
||||
sensor_id integer not null,
|
||||
cpu double precision null,
|
||||
temperature double precision null);
|
||||
|
||||
-- Create large chunks that take a long time to compress
|
||||
SELECT FROM create_hypertable('sensor_data','time', chunk_time_interval => INTERVAL '14 days');
|
||||
|
||||
INSERT INTO sensor_data
|
||||
SELECT
|
||||
time + (INTERVAL '1 minute' * random()) AS time,
|
||||
sensor_id,
|
||||
random() AS cpu,
|
||||
random()* 100 AS temperature
|
||||
FROM
|
||||
generate_series('2022-01-01', '2022-01-15', INTERVAL '1 hour') AS g1(time),
|
||||
generate_series(1, 50, 1) AS g2(sensor_id)
|
||||
ORDER BY time;
|
||||
|
||||
SELECT count(*) FROM sensor_data;
|
||||
|
||||
ALTER TABLE sensor_data SET (timescaledb.compress, timescaledb.compress_segmentby = 'sensor_id');
|
||||
}
|
||||
|
||||
teardown {
|
||||
DROP TABLE sensor_data;
|
||||
}
|
||||
|
||||
session "s1"
|
||||
step "s1_compress" {
|
||||
SELECT count(*) FROM (SELECT compress_chunk(i, if_not_compressed => true) FROM show_chunks('sensor_data') i) i;
|
||||
}
|
||||
|
||||
step "s1_select_count" {
|
||||
SELECT count(*) FROM sensor_data;
|
||||
}
|
||||
|
||||
session "s2"
|
||||
step "s2_select_count_and_stats" {
|
||||
SELECT count(*) FROM sensor_data;
|
||||
SELECT chunk_schema, chunk_name, compression_status FROM chunk_compression_stats('sensor_data') ORDER BY 1, 2, 3;
|
||||
}
|
||||
|
||||
step "s2_lock_compression" {
|
||||
SELECT debug_waitpoint_enable('compression_done_before_truncate_uncompressed');
|
||||
}
|
||||
|
||||
step "s2_unlock_compression" {
|
||||
SELECT locktype, mode, granted, objid FROM pg_locks WHERE not granted AND (locktype = 'advisory' or relation::regclass::text LIKE '%chunk') ORDER BY relation, locktype, mode, granted;
|
||||
SELECT debug_waitpoint_release('compression_done_before_truncate_uncompressed');
|
||||
}
|
||||
|
||||
permutation "s1_select_count" "s2_select_count_and_stats"
|
||||
permutation "s1_select_count" "s1_compress" "s1_select_count" "s2_select_count_and_stats"
|
||||
permutation "s2_lock_compression" "s2_select_count_and_stats" "s1_compress" "s2_select_count_and_stats" "s2_unlock_compression" "s2_select_count_and_stats"
|
3
tsl/test/sql/.gitignore
vendored
3
tsl/test/sql/.gitignore
vendored
@ -1,4 +1,5 @@
|
||||
/*.pgbinary
|
||||
/bgw_custom-*.sql
|
||||
/cagg_bgw-*.sql
|
||||
/cagg_ddl-*.sql
|
||||
/cagg_errors_deprecated-*.sql
|
||||
@ -8,6 +9,7 @@
|
||||
/cagg_repair-*.sql
|
||||
/cagg_union_view-*.sql
|
||||
/cagg_usage-*.sql
|
||||
/compression_bgw-*.sql
|
||||
/compression_errors-*.sql
|
||||
/compression_sorted_merge-*.sql
|
||||
/compression_permissions-*.sql
|
||||
@ -29,4 +31,5 @@
|
||||
/remote-copy-*sv
|
||||
/transparent_decompression-*.sql
|
||||
/transparent_decompression_ordered_index-*.sql
|
||||
/telemetry_stats-*.sql
|
||||
/merge_append_partially_compressed-*.sql
|
||||
|
@ -4,7 +4,6 @@ include(GenerateTestSchedule)
|
||||
# so unless you have a good reason, add new test files here.
|
||||
set(TEST_FILES
|
||||
agg_partials_pushdown.sql
|
||||
bgw_custom.sql
|
||||
bgw_security.sql
|
||||
bgw_policy.sql
|
||||
cagg_errors.sql
|
||||
@ -15,7 +14,6 @@ set(TEST_FILES
|
||||
cagg_watermark.sql
|
||||
compressed_collation.sql
|
||||
compression_create_compressed_table.sql
|
||||
compression_bgw.sql
|
||||
compression_conflicts.sql
|
||||
compression_insert.sql
|
||||
compression_policy.sql
|
||||
@ -85,9 +83,6 @@ if(CMAKE_BUILD_TYPE MATCHES Debug)
|
||||
recompress_chunk_segmentwise.sql
|
||||
transparent_decompression_join_index.sql
|
||||
feature_flags.sql)
|
||||
if(USE_TELEMETRY)
|
||||
list(APPEND TEST_FILES telemetry_stats.sql)
|
||||
endif()
|
||||
|
||||
if(ENABLE_MULTINODE_TESTS AND ${PG_VERSION_MAJOR} LESS "16")
|
||||
list(
|
||||
@ -177,6 +172,8 @@ if(ENABLE_MULTINODE_TESTS AND ${PG_VERSION_MAJOR} LESS "16")
|
||||
endif()
|
||||
|
||||
set(TEST_TEMPLATES
|
||||
bgw_custom.sql.in
|
||||
compression_bgw.sql.in
|
||||
compression_sorted_merge.sql.in
|
||||
cagg_union_view.sql.in
|
||||
plan_skip_scan.sql.in
|
||||
@ -203,6 +200,9 @@ if(CMAKE_BUILD_TYPE MATCHES Debug)
|
||||
continuous_aggs.sql.in
|
||||
continuous_aggs_deprecated.sql.in
|
||||
deparse.sql.in)
|
||||
if(USE_TELEMETRY)
|
||||
list(APPEND TEST_TEMPLATES telemetry_stats.sql.in)
|
||||
endif()
|
||||
if(ENABLE_MULTINODE_TESTS AND ${PG_VERSION_MAJOR} LESS "16")
|
||||
list(
|
||||
APPEND
|
||||
|
@ -715,7 +715,8 @@ ts_compress_table(PG_FUNCTION_ARGS)
|
||||
compress_chunk(in_table,
|
||||
out_table,
|
||||
(const ColumnCompressionInfo **) compression_info->data,
|
||||
compression_info->num_elements);
|
||||
compression_info->num_elements,
|
||||
0 /*insert options*/);
|
||||
|
||||
PG_RETURN_VOID();
|
||||
}
|
||||
|
Loading…
x
Reference in New Issue
Block a user