mirror of
https://github.com/timescale/timescaledb.git
synced 2025-05-16 02:23:49 +08:00
Add compressed table size to existing views
Some information views report hypertable sizes. Include compressed table size in the calculation when applicable.
This commit is contained in:
parent
940d5aa3ac
commit
87786f1520
@ -6,7 +6,8 @@ CREATE SCHEMA IF NOT EXISTS timescaledb_information;
|
||||
|
||||
-- Convenience view to list all hypertables and their space usage
|
||||
CREATE OR REPLACE VIEW timescaledb_information.hypertable AS
|
||||
SELECT ht.schema_name AS table_schema,
|
||||
WITH ht_size as (
|
||||
SELECT ht.id, ht.schema_name AS table_schema,
|
||||
ht.table_name,
|
||||
t.tableowner AS table_owner,
|
||||
ht.num_dimensions,
|
||||
@ -14,15 +15,36 @@ CREATE OR REPLACE VIEW timescaledb_information.hypertable AS
|
||||
FROM _timescaledb_catalog.chunk ch
|
||||
WHERE ch.hypertable_id=ht.id
|
||||
) AS num_chunks,
|
||||
size.table_size,
|
||||
size.index_size,
|
||||
size.toast_size,
|
||||
size.total_size
|
||||
bsize.table_bytes,
|
||||
bsize.index_bytes,
|
||||
bsize.toast_bytes,
|
||||
bsize.total_bytes
|
||||
FROM _timescaledb_catalog.hypertable ht
|
||||
LEFT OUTER JOIN pg_tables t ON ht.table_name=t.tablename AND ht.schema_name=t.schemaname
|
||||
LEFT OUTER JOIN LATERAL @extschema@.hypertable_relation_size_pretty(
|
||||
LEFT OUTER JOIN LATERAL @extschema@.hypertable_relation_size(
|
||||
CASE WHEN has_schema_privilege(ht.schema_name,'USAGE') THEN format('%I.%I',ht.schema_name,ht.table_name) ELSE NULL END
|
||||
) size ON true;
|
||||
) bsize ON true
|
||||
),
|
||||
compht_size as
|
||||
(
|
||||
select srcht.id,
|
||||
sum(map.compressed_heap_size) as heap_bytes,
|
||||
sum(map.compressed_index_size) as index_bytes,
|
||||
sum(map.compressed_toast_size) as toast_bytes,
|
||||
sum(map.compressed_heap_size) + sum(map.compressed_toast_size) + sum(map.compressed_index_size) as total_bytes
|
||||
FROM _timescaledb_catalog.chunk srcch, _timescaledb_catalog.compression_chunk_size map,
|
||||
_timescaledb_catalog.hypertable srcht
|
||||
where map.chunk_id = srcch.id and srcht.id = srcch.hypertable_id
|
||||
group by srcht.id
|
||||
)
|
||||
select hts.table_schema, hts.table_name, hts.table_owner,
|
||||
hts.num_dimensions, hts.num_chunks,
|
||||
pg_size_pretty( COALESCE(hts.table_bytes + compht_size.heap_bytes, hts.table_bytes)) as table_size,
|
||||
pg_size_pretty( COALESCE(hts.index_bytes + compht_size.index_bytes , hts.index_bytes, compht_size.index_bytes)) as index_size,
|
||||
pg_size_pretty( COALESCE(hts.toast_bytes + compht_size.toast_bytes, hts.toast_bytes, compht_size.toast_bytes)) as toast_size,
|
||||
pg_size_pretty( COALESCE(hts.total_bytes + compht_size.total_bytes, hts.total_bytes)) as total_size
|
||||
FROM ht_size hts LEFT OUTER JOIN compht_size
|
||||
ON hts.id = compht_size.id;
|
||||
|
||||
CREATE OR REPLACE VIEW timescaledb_information.license AS
|
||||
SELECT _timescaledb_internal.license_edition() as edition,
|
||||
|
@ -393,6 +393,35 @@ compressed_index_bytes | 32 kB
|
||||
compressed_toast_bytes | 16 kB
|
||||
compressed_total_bytes | 64 kB
|
||||
|
||||
vacuum full foo;
|
||||
vacuum full conditions;
|
||||
-- After vacuum, table_bytes is 0, but any associated index/toast storage is not
|
||||
-- completely reclaimed. Sets it at 8K (page size). So a chunk which has
|
||||
-- been compressed still incurs an overhead of n * 8KB (for every index + toast table) storage on the original uncompressed chunk.
|
||||
select * from timescaledb_information.hypertable
|
||||
where table_name like 'foo' or table_name like 'conditions'
|
||||
order by table_name;
|
||||
-[ RECORD 1 ]--+------------------
|
||||
table_schema | public
|
||||
table_name | conditions
|
||||
table_owner | default_perm_user
|
||||
num_dimensions | 1
|
||||
num_chunks | 2
|
||||
table_size | 16 kB
|
||||
index_size | 48 kB
|
||||
toast_size | 32 kB
|
||||
total_size | 96 kB
|
||||
-[ RECORD 2 ]--+------------------
|
||||
table_schema | public
|
||||
table_name | foo
|
||||
table_owner | default_perm_user
|
||||
num_dimensions | 1
|
||||
num_chunks | 4
|
||||
table_size | 32 kB
|
||||
index_size | 144 kB
|
||||
toast_size | 8192 bytes
|
||||
total_size | 184 kB
|
||||
|
||||
\x
|
||||
select decompress_chunk(ch1.schema_name|| '.' || ch1.table_name)
|
||||
FROM _timescaledb_catalog.chunk ch1, _timescaledb_catalog.hypertable ht where ch1.hypertable_id = ht.id and ht.table_name like 'conditions';
|
||||
|
@ -155,6 +155,14 @@ where hypertable_name::text like 'conditions'
|
||||
order by hypertable_name, chunk_name;
|
||||
select * from timescaledb_information.compressed_hypertable_stats
|
||||
order by hypertable_name;
|
||||
vacuum full foo;
|
||||
vacuum full conditions;
|
||||
-- After vacuum, table_bytes is 0, but any associated index/toast storage is not
|
||||
-- completely reclaimed. Sets it at 8K (page size). So a chunk which has
|
||||
-- been compressed still incurs an overhead of n * 8KB (for every index + toast table) storage on the original uncompressed chunk.
|
||||
select * from timescaledb_information.hypertable
|
||||
where table_name like 'foo' or table_name like 'conditions'
|
||||
order by table_name;
|
||||
\x
|
||||
|
||||
select decompress_chunk(ch1.schema_name|| '.' || ch1.table_name)
|
||||
|
Loading…
x
Reference in New Issue
Block a user