mirror of
https://github.com/timescale/timescaledb.git
synced 2025-05-16 18:43:18 +08:00
Add test for statistics on compressed chunks
Add a test for updating statistics on chunks compressed before statistics where kept. This patch also sets metadata for previously compressed chunks so running ANALYZE on them will set the correct tuplecount.
This commit is contained in:
parent
7f3feb8200
commit
7fe74d4f07
@ -29,7 +29,6 @@ AS SELECT * from _timescaledb_catalog.hypertable;
|
||||
ALTER TABLE _timescaledb_catalog.continuous_aggs_invalidation_threshold DROP CONSTRAINT continuous_aggs_invalidation_threshold_hypertable_id_fkey ;
|
||||
ALTER TABLE _timescaledb_catalog.hypertable_compression DROP CONSTRAINT hypertable_compression_hypertable_id_fkey ;
|
||||
|
||||
|
||||
CREATE TABLE tmp_hypertable_seq_value AS
|
||||
SELECT last_value, is_called FROM _timescaledb_catalog.hypertable_id_seq;
|
||||
ALTER EXTENSION timescaledb DROP TABLE _timescaledb_catalog.hypertable;
|
||||
@ -124,3 +123,37 @@ DROP TABLE tmp_hypertable_seq_value;
|
||||
GRANT SELECT ON _timescaledb_catalog.hypertable_id_seq TO PUBLIC;
|
||||
GRANT SELECT ON _timescaledb_catalog.hypertable TO PUBLIC;
|
||||
--End Modify hypertable table
|
||||
|
||||
-- update metadata for chunks compressed before 2.0
|
||||
DO $$
|
||||
DECLARE
|
||||
plain_chunk RECORD;
|
||||
comp_chunk TEXT;
|
||||
rowcount_pre BIGINT;
|
||||
rowcount_post BIGINT;
|
||||
BEGIN
|
||||
FOR plain_chunk IN
|
||||
SELECT
|
||||
*
|
||||
FROM
|
||||
_timescaledb_catalog.chunk comp
|
||||
WHERE
|
||||
compressed_chunk_id IS NOT NULL LOOP
|
||||
SELECT
|
||||
format('%I.%I', schema_name, table_name) INTO comp_chunk
|
||||
FROM
|
||||
_timescaledb_catalog.chunk
|
||||
WHERE
|
||||
id = plain_chunk.compressed_chunk_id;
|
||||
EXECUTE format('SELECT sum(_ts_meta_count), count(*) FROM %s', comp_chunk) INTO rowcount_pre, rowcount_post;
|
||||
UPDATE
|
||||
_timescaledb_catalog.compression_chunk_size
|
||||
SET
|
||||
numrows_pre_compression = rowcount_pre,
|
||||
numrows_post_compression = rowcount_post
|
||||
WHERE
|
||||
chunk_id = plain_chunk.id;
|
||||
END LOOP;
|
||||
END
|
||||
$$;
|
||||
|
||||
|
@ -8,9 +8,42 @@ INSERT INTO compress
|
||||
SELECT g, 'QW', g::text, 2, 0, (100,4)::custom_type_for_compression, false
|
||||
FROM generate_series('2019-11-01 00:00'::timestamp, '2019-12-15 00:00'::timestamp, '1 day') g;
|
||||
|
||||
SELECT count(compress_chunk(chunk.schema_name|| '.' || chunk.table_name)) as count_compressed
|
||||
FROM _timescaledb_catalog.chunk chunk
|
||||
SELECT
|
||||
count(compress_chunk(chunk.schema_name || '.' || chunk.table_name)) AS count_compressed
|
||||
FROM
|
||||
_timescaledb_catalog.chunk chunk
|
||||
INNER JOIN _timescaledb_catalog.hypertable hypertable ON (chunk.hypertable_id = hypertable.id)
|
||||
WHERE hypertable.table_name = 'compress' and chunk.compressed_chunk_id IS NULL;
|
||||
WHERE
|
||||
hypertable.table_name = 'compress'
|
||||
AND chunk.compressed_chunk_id IS NULL;
|
||||
|
||||
SELECT * FROM compress ORDER BY time DESC, small_cardinality;
|
||||
|
||||
-- check count and approximate_row_count are the same after analyze
|
||||
ANALYZE compress;
|
||||
SELECT
|
||||
count,
|
||||
approximate,
|
||||
CASE WHEN count != approximate THEN
|
||||
'counts not matching' || random()::TEXT
|
||||
ELSE
|
||||
'match'
|
||||
END AS MATCH
|
||||
FROM (
|
||||
SELECT
|
||||
count(*)
|
||||
FROM
|
||||
compress) AS count,
|
||||
approximate_row_count('compress') AS approximate;
|
||||
|
||||
SELECT
|
||||
hypertable_schema,
|
||||
hypertable_name,
|
||||
approximate_row_count(format('%I.%I', hypertable_schema, hypertable_name)::REGCLASS)
|
||||
FROM
|
||||
timescaledb_information.hypertables
|
||||
WHERE
|
||||
compression_enabled = true
|
||||
ORDER BY
|
||||
1,
|
||||
2;
|
||||
|
Loading…
x
Reference in New Issue
Block a user