Change compress_chunk to recompress partial and unordered chunks when needed

This patch changes compress_chunk to recompress partial or unordered
chunks so the result of compress_chunk will always be a fully compressed
chunk.
This commit is contained in:
Sven Klemm 2024-02-04 19:05:48 +01:00 committed by Sven Klemm
parent 525b045839
commit 4118dcaeab
9 changed files with 118 additions and 110 deletions

View File

@ -4339,6 +4339,13 @@ ts_chunk_is_compressed(const Chunk *chunk)
return ts_flags_are_set_32(chunk->fd.status, CHUNK_STATUS_COMPRESSED);
}
bool
ts_chunk_needs_recompression(const Chunk *chunk)
{
Assert(ts_chunk_is_compressed(chunk));
return ts_chunk_is_partial(chunk) || ts_chunk_is_unordered(chunk);
}
/* Note that only a compressed chunk can have partial flag set */
bool
ts_chunk_is_partial(const Chunk *chunk)

View File

@ -217,6 +217,7 @@ extern TSDLLEXPORT Chunk *ts_chunk_get_compressed_chunk_parent(const Chunk *chun
extern TSDLLEXPORT bool ts_chunk_is_unordered(const Chunk *chunk);
extern TSDLLEXPORT bool ts_chunk_is_partial(const Chunk *chunk);
extern TSDLLEXPORT bool ts_chunk_is_compressed(const Chunk *chunk);
extern TSDLLEXPORT bool ts_chunk_needs_recompression(const Chunk *chunk);
extern TSDLLEXPORT bool ts_chunk_validate_chunk_status_for_operation(const Chunk *chunk,
ChunkOperation cmd,
bool throw_error);

View File

@ -510,7 +510,7 @@ policy_recompression_execute(int32 job_id, Jsonb *config)
int32 chunkid = lfirst_int(lc);
Chunk *chunk = ts_chunk_get_by_id(chunkid, true);
Assert(chunk);
if (!ts_chunk_is_unordered(chunk) && !ts_chunk_is_partial(chunk))
if (!ts_chunk_needs_recompression(chunk))
continue;
tsl_recompress_chunk_wrapper(chunk);

View File

@ -56,6 +56,9 @@ typedef struct CompressChunkCxt
Hypertable *compress_ht; /*compressed table for srcht */
} CompressChunkCxt;
static Oid get_compressed_chunk_index_for_recompression(Chunk *uncompressed_chunk);
static Oid recompress_chunk_segmentwise_impl(Chunk *chunk);
static void
compression_chunk_size_catalog_insert(int32 src_chunk_id, const RelationSize *src_size,
int32 compress_chunk_id, const RelationSize *compress_size,
@ -668,25 +671,6 @@ decompress_chunk_impl(Chunk *uncompressed_chunk, bool if_compressed)
ts_cache_release(hcache);
}
/*
* Set if_not_compressed to true for idempotent operation. Aborts transaction if the chunk is
* already compressed, unless it is running in idempotent mode.
*/
Oid
tsl_compress_chunk_wrapper(Chunk *chunk, bool if_not_compressed)
{
if (chunk->fd.compressed_chunk_id != INVALID_CHUNK_ID)
{
ereport((if_not_compressed ? NOTICE : ERROR),
(errcode(ERRCODE_DUPLICATE_OBJECT),
errmsg("chunk \"%s\" is already compressed", get_rel_name(chunk->table_id))));
return chunk->table_id;
}
return compress_chunk_impl(chunk->hypertable_relid, chunk->table_id);
}
/*
* Create a new compressed chunk using existing table with compressed data.
*
@ -770,14 +754,37 @@ Datum
tsl_compress_chunk(PG_FUNCTION_ARGS)
{
Oid uncompressed_chunk_id = PG_ARGISNULL(0) ? InvalidOid : PG_GETARG_OID(0);
bool if_not_compressed = PG_ARGISNULL(1) ? false : PG_GETARG_BOOL(1);
bool if_not_compressed = PG_ARGISNULL(1) ? true : PG_GETARG_BOOL(1);
ts_feature_flag_check(FEATURE_HYPERTABLE_COMPRESSION);
TS_PREVENT_FUNC_IF_READ_ONLY();
Chunk *chunk = ts_chunk_get_by_relid(uncompressed_chunk_id, true);
uncompressed_chunk_id = tsl_compress_chunk_wrapper(chunk, if_not_compressed);
if (ts_chunk_is_compressed(chunk))
{
if (!ts_chunk_needs_recompression(chunk))
{
ereport((if_not_compressed ? NOTICE : ERROR),
(errcode(ERRCODE_DUPLICATE_OBJECT),
errmsg("chunk \"%s\" is already compressed", get_rel_name(chunk->table_id))));
PG_RETURN_OID(uncompressed_chunk_id);
}
if (get_compressed_chunk_index_for_recompression(chunk))
{
uncompressed_chunk_id = recompress_chunk_segmentwise_impl(chunk);
}
else
{
decompress_chunk_impl(chunk, false);
compress_chunk_impl(chunk->hypertable_relid, chunk->table_id);
}
}
else
{
uncompressed_chunk_id = compress_chunk_impl(chunk->hypertable_relid, chunk->table_id);
}
PG_RETURN_OID(uncompressed_chunk_id);
}
@ -831,7 +838,8 @@ tsl_recompress_chunk_wrapper(Chunk *uncompressed_chunk)
Chunk *chunk = ts_chunk_get_by_relid(uncompressed_chunk_relid, true);
Assert(!ts_chunk_is_compressed(chunk));
tsl_compress_chunk_wrapper(chunk, false);
compress_chunk_impl(chunk->hypertable_relid, chunk->table_id);
return true;
}
@ -922,6 +930,20 @@ tsl_get_compressed_chunk_index_for_recompression(PG_FUNCTION_ARGS)
Oid uncompressed_chunk_id = PG_ARGISNULL(0) ? InvalidOid : PG_GETARG_OID(0);
Chunk *uncompressed_chunk = ts_chunk_get_by_relid(uncompressed_chunk_id, true);
Oid index_oid = get_compressed_chunk_index_for_recompression(uncompressed_chunk);
if (OidIsValid(index_oid))
{
PG_RETURN_OID(index_oid);
}
else
PG_RETURN_NULL();
}
static Oid
get_compressed_chunk_index_for_recompression(Chunk *uncompressed_chunk)
{
Chunk *compressed_chunk = ts_chunk_get_by_id(uncompressed_chunk->fd.compressed_chunk_id, true);
Relation uncompressed_chunk_rel = table_open(uncompressed_chunk->table_id, ShareLock);
@ -937,12 +959,7 @@ tsl_get_compressed_chunk_index_for_recompression(PG_FUNCTION_ARGS)
table_close(compressed_chunk_rel, NoLock);
table_close(uncompressed_chunk_rel, NoLock);
if (OidIsValid(index_oid))
{
PG_RETURN_OID(index_oid);
}
else
PG_RETURN_NULL();
return index_oid;
}
/*
@ -1074,24 +1091,32 @@ Datum
tsl_recompress_chunk_segmentwise(PG_FUNCTION_ARGS)
{
Oid uncompressed_chunk_id = PG_ARGISNULL(0) ? InvalidOid : PG_GETARG_OID(0);
bool if_not_compressed = PG_ARGISNULL(1) ? false : PG_GETARG_BOOL(1);
bool if_not_compressed = PG_ARGISNULL(1) ? true : PG_GETARG_BOOL(1);
ts_feature_flag_check(FEATURE_HYPERTABLE_COMPRESSION);
TS_PREVENT_FUNC_IF_READ_ONLY();
Chunk *uncompressed_chunk = ts_chunk_get_by_relid(uncompressed_chunk_id, true);
Chunk *chunk = ts_chunk_get_by_relid(uncompressed_chunk_id, true);
int32 status = uncompressed_chunk->fd.status;
if (status == CHUNK_STATUS_DEFAULT)
elog(ERROR, "call compress_chunk instead of recompress_chunk");
if (status == CHUNK_STATUS_COMPRESSED)
if (!ts_chunk_needs_recompression(chunk))
{
int elevel = if_not_compressed ? NOTICE : ERROR;
elog(elevel,
"nothing to recompress in chunk %s.%s",
NameStr(uncompressed_chunk->fd.schema_name),
NameStr(uncompressed_chunk->fd.table_name));
NameStr(chunk->fd.schema_name),
NameStr(chunk->fd.table_name));
}
else
{
uncompressed_chunk_id = recompress_chunk_segmentwise_impl(chunk);
}
PG_RETURN_OID(uncompressed_chunk_id);
}
static Oid
recompress_chunk_segmentwise_impl(Chunk *uncompressed_chunk)
{
Oid uncompressed_chunk_id = uncompressed_chunk->table_id;
/*
* only proceed if status in (3, 9, 11)
@ -1100,11 +1125,11 @@ tsl_recompress_chunk_segmentwise(PG_FUNCTION_ARGS)
* 4: frozen
* 8: compressed_partial
*/
if (!(ts_chunk_is_compressed(uncompressed_chunk) &&
(ts_chunk_is_unordered(uncompressed_chunk) || ts_chunk_is_partial(uncompressed_chunk))))
if (!ts_chunk_is_compressed(uncompressed_chunk) &&
ts_chunk_needs_recompression(uncompressed_chunk))
elog(ERROR,
"unexpected chunk status %d in chunk %s.%s",
status,
uncompressed_chunk->fd.status,
NameStr(uncompressed_chunk->fd.schema_name),
NameStr(uncompressed_chunk->fd.table_name));

View File

@ -12,7 +12,6 @@ extern Datum tsl_create_compressed_chunk(PG_FUNCTION_ARGS);
extern Datum tsl_compress_chunk(PG_FUNCTION_ARGS);
extern Datum tsl_decompress_chunk(PG_FUNCTION_ARGS);
extern Datum tsl_recompress_chunk(PG_FUNCTION_ARGS);
extern Oid tsl_compress_chunk_wrapper(Chunk *chunk, bool if_not_compressed);
extern bool tsl_recompress_chunk_wrapper(Chunk *chunk);
extern Datum tsl_recompress_chunk_segmentwise(PG_FUNCTION_ARGS);

View File

@ -2178,7 +2178,6 @@ EXPLAIN (COSTS OFF) SELECT * FROM space_part ORDER BY time;
-- compress them
SELECT compress_chunk(c, if_not_compressed=>true) FROM show_chunks('space_part') c;
NOTICE: chunk "_hyper_35_133_chunk" is already compressed
NOTICE: chunk "_hyper_35_134_chunk" is already compressed
compress_chunk
-------------------------------------------
@ -2194,15 +2193,10 @@ EXPLAIN (COSTS OFF) SELECT * FROM space_part ORDER BY time;
--------------------------------------------------------------------------------------
Custom Scan (ChunkAppend) on space_part
Order: space_part."time"
-> Merge Append
Sort Key: _hyper_35_133_chunk."time"
-> Custom Scan (DecompressChunk) on _hyper_35_133_chunk
-> Sort
Sort Key: compress_hyper_36_135_chunk._ts_meta_sequence_num DESC
-> Seq Scan on compress_hyper_36_135_chunk
-> Custom Scan (DecompressChunk) on _hyper_35_133_chunk
-> Sort
Sort Key: _hyper_35_133_chunk."time"
-> Seq Scan on _hyper_35_133_chunk
Sort Key: compress_hyper_36_139_chunk._ts_meta_sequence_num DESC
-> Seq Scan on compress_hyper_36_139_chunk
-> Custom Scan (DecompressChunk) on _hyper_35_134_chunk
-> Sort
Sort Key: compress_hyper_36_136_chunk._ts_meta_sequence_num DESC
@ -2210,14 +2204,14 @@ EXPLAIN (COSTS OFF) SELECT * FROM space_part ORDER BY time;
-> Merge Append
Sort Key: _hyper_35_137_chunk."time"
-> Custom Scan (DecompressChunk) on _hyper_35_137_chunk
-> Sort
Sort Key: compress_hyper_36_139_chunk._ts_meta_sequence_num DESC
-> Seq Scan on compress_hyper_36_139_chunk
-> Custom Scan (DecompressChunk) on _hyper_35_138_chunk
-> Sort
Sort Key: compress_hyper_36_140_chunk._ts_meta_sequence_num DESC
-> Seq Scan on compress_hyper_36_140_chunk
(25 rows)
-> Custom Scan (DecompressChunk) on _hyper_35_138_chunk
-> Sort
Sort Key: compress_hyper_36_141_chunk._ts_meta_sequence_num DESC
-> Seq Scan on compress_hyper_36_141_chunk
(20 rows)
-- make second one of them partial
insert into space_part values
@ -2228,15 +2222,10 @@ EXPLAIN (COSTS OFF) SELECT * FROM space_part ORDER BY time;
--------------------------------------------------------------------------------------
Custom Scan (ChunkAppend) on space_part
Order: space_part."time"
-> Merge Append
Sort Key: _hyper_35_133_chunk."time"
-> Custom Scan (DecompressChunk) on _hyper_35_133_chunk
-> Sort
Sort Key: compress_hyper_36_135_chunk._ts_meta_sequence_num DESC
-> Seq Scan on compress_hyper_36_135_chunk
-> Custom Scan (DecompressChunk) on _hyper_35_133_chunk
-> Sort
Sort Key: _hyper_35_133_chunk."time"
-> Seq Scan on _hyper_35_133_chunk
Sort Key: compress_hyper_36_139_chunk._ts_meta_sequence_num DESC
-> Seq Scan on compress_hyper_36_139_chunk
-> Custom Scan (DecompressChunk) on _hyper_35_134_chunk
-> Sort
Sort Key: compress_hyper_36_136_chunk._ts_meta_sequence_num DESC
@ -2244,19 +2233,19 @@ EXPLAIN (COSTS OFF) SELECT * FROM space_part ORDER BY time;
-> Merge Append
Sort Key: _hyper_35_137_chunk."time"
-> Custom Scan (DecompressChunk) on _hyper_35_137_chunk
-> Sort
Sort Key: compress_hyper_36_139_chunk._ts_meta_sequence_num DESC
-> Seq Scan on compress_hyper_36_139_chunk
-> Custom Scan (DecompressChunk) on _hyper_35_138_chunk
-> Sort
Sort Key: compress_hyper_36_140_chunk._ts_meta_sequence_num DESC
-> Seq Scan on compress_hyper_36_140_chunk
-> Custom Scan (DecompressChunk) on _hyper_35_138_chunk
-> Sort
Sort Key: compress_hyper_36_141_chunk._ts_meta_sequence_num DESC
-> Seq Scan on compress_hyper_36_141_chunk
-> Sort
Sort Key: _hyper_35_138_chunk."time"
-> Sort
Sort Key: _hyper_35_138_chunk."time"
-> Seq Scan on _hyper_35_138_chunk
(30 rows)
(25 rows)
-- make other one partial too
INSERT INTO space_part VALUES
@ -2266,15 +2255,10 @@ EXPLAIN (COSTS OFF) SELECT * FROM space_part ORDER BY time;
--------------------------------------------------------------------------------------
Custom Scan (ChunkAppend) on space_part
Order: space_part."time"
-> Merge Append
Sort Key: _hyper_35_133_chunk."time"
-> Custom Scan (DecompressChunk) on _hyper_35_133_chunk
-> Sort
Sort Key: compress_hyper_36_135_chunk._ts_meta_sequence_num DESC
-> Seq Scan on compress_hyper_36_135_chunk
-> Custom Scan (DecompressChunk) on _hyper_35_133_chunk
-> Sort
Sort Key: _hyper_35_133_chunk."time"
-> Seq Scan on _hyper_35_133_chunk
Sort Key: compress_hyper_36_139_chunk._ts_meta_sequence_num DESC
-> Seq Scan on compress_hyper_36_139_chunk
-> Custom Scan (DecompressChunk) on _hyper_35_134_chunk
-> Sort
Sort Key: compress_hyper_36_136_chunk._ts_meta_sequence_num DESC
@ -2283,8 +2267,8 @@ EXPLAIN (COSTS OFF) SELECT * FROM space_part ORDER BY time;
Sort Key: _hyper_35_137_chunk."time"
-> Custom Scan (DecompressChunk) on _hyper_35_137_chunk
-> Sort
Sort Key: compress_hyper_36_139_chunk._ts_meta_sequence_num DESC
-> Seq Scan on compress_hyper_36_139_chunk
Sort Key: compress_hyper_36_140_chunk._ts_meta_sequence_num DESC
-> Seq Scan on compress_hyper_36_140_chunk
-> Sort
Sort Key: _hyper_35_137_chunk."time"
-> Sort
@ -2292,14 +2276,14 @@ EXPLAIN (COSTS OFF) SELECT * FROM space_part ORDER BY time;
-> Seq Scan on _hyper_35_137_chunk
-> Custom Scan (DecompressChunk) on _hyper_35_138_chunk
-> Sort
Sort Key: compress_hyper_36_140_chunk._ts_meta_sequence_num DESC
-> Seq Scan on compress_hyper_36_140_chunk
Sort Key: compress_hyper_36_141_chunk._ts_meta_sequence_num DESC
-> Seq Scan on compress_hyper_36_141_chunk
-> Sort
Sort Key: _hyper_35_138_chunk."time"
-> Sort
Sort Key: _hyper_35_138_chunk."time"
-> Seq Scan on _hyper_35_138_chunk
(35 rows)
(30 rows)
-- test creation of unique expression index does not interfere with enabling compression
-- github issue 6205
@ -2327,14 +2311,14 @@ values ('meter1', 1, 2.3, '2022-01-01'::timestamptz, '2022-01-01'::timestamptz),
select compress_chunk(show_chunks('mytab'));
compress_chunk
-------------------------------------------
_timescaledb_internal._hyper_37_141_chunk
_timescaledb_internal._hyper_37_142_chunk
(1 row)
REINDEX TABLE mytab; -- should update index
select decompress_chunk(show_chunks('mytab'));
decompress_chunk
-------------------------------------------
_timescaledb_internal._hyper_37_141_chunk
_timescaledb_internal._hyper_37_142_chunk
(1 row)
\set EXPLAIN 'EXPLAIN (costs off,timing off,summary off)'
@ -2345,7 +2329,7 @@ set enable_indexscan = on;
:EXPLAIN_ANALYZE select * from mytab where lower(col1::text) = 'meter1';
QUERY PLAN
--------------------------------------------------------------------------------------------------
Index Scan using _hyper_37_141_chunk_myidx_unique on _hyper_37_141_chunk (actual rows=3 loops=1)
Index Scan using _hyper_37_142_chunk_myidx_unique on _hyper_37_142_chunk (actual rows=3 loops=1)
Index Cond: (lower((col1)::text) = 'meter1'::text)
(2 rows)
@ -2363,19 +2347,19 @@ WHERE (value > 2.4 AND value < 3);
select compress_chunk(show_chunks('mytab'));
compress_chunk
-------------------------------------------
_timescaledb_internal._hyper_37_141_chunk
_timescaledb_internal._hyper_37_142_chunk
(1 row)
select decompress_chunk(show_chunks('mytab'));
decompress_chunk
-------------------------------------------
_timescaledb_internal._hyper_37_141_chunk
_timescaledb_internal._hyper_37_142_chunk
(1 row)
:EXPLAIN_ANALYZE SELECT * FROM mytab WHERE value BETWEEN 2.4 AND 2.8;
QUERY PLAN
---------------------------------------------------------------------------------------
Seq Scan on _hyper_37_141_chunk (actual rows=1 loops=1)
Seq Scan on _hyper_37_142_chunk (actual rows=1 loops=1)
Filter: ((value >= '2.4'::double precision) AND (value <= '2.8'::double precision))
Rows Removed by Filter: 2
(3 rows)
@ -2422,14 +2406,14 @@ alter table hyper_unique_deferred set (timescaledb.compress);
select compress_chunk(show_chunks('hyper_unique_deferred')); -- also worked fine before 2.11.0
compress_chunk
-------------------------------------------
_timescaledb_internal._hyper_40_145_chunk
_timescaledb_internal._hyper_40_146_chunk
(1 row)
select decompress_chunk(show_chunks('hyper_unique_deferred'));
decompress_chunk
-------------------------------------------
_timescaledb_internal._hyper_40_145_chunk
_timescaledb_internal._hyper_40_146_chunk
(1 row)
begin; insert INTO hyper_unique_deferred values (1257987700000000000, 'dev1', 1); abort;
ERROR: new row for relation "_hyper_40_145_chunk" violates check constraint "hyper_unique_deferred_sensor_1_check"
ERROR: new row for relation "_hyper_40_146_chunk" violates check constraint "hyper_unique_deferred_sensor_1_check"

View File

@ -714,7 +714,6 @@ ALTER TABLE trigger_test ADD COLUMN addcoli integer;
INSERT INTO trigger_test(time, device, value, addcolv, addcoli)
VALUES ( '2010-01-01', 10, 10, 'ten', 222);
SELECT compress_chunk(c, true) FROM show_chunks('trigger_test') c;
NOTICE: chunk "_hyper_11_15_chunk" is already compressed
compress_chunk
------------------------------------------
_timescaledb_internal._hyper_11_15_chunk
@ -866,7 +865,6 @@ INSERT INTO test_ordering VALUES (23), (24), (115) RETURNING tableoid::regclass,
(3 rows)
SELECT compress_chunk(format('%I.%I',chunk_schema,chunk_name), true) FROM timescaledb_information.chunks WHERE hypertable_name = 'test_ordering';
NOTICE: chunk "_hyper_13_20_chunk" is already compressed
compress_chunk
------------------------------------------
_timescaledb_internal._hyper_13_20_chunk
@ -875,24 +873,19 @@ NOTICE: chunk "_hyper_13_20_chunk" is already compressed
-- should be ordered append
:PREFIX SELECT * FROM test_ordering ORDER BY 1;
QUERY PLAN
-------------------------------------------------------------------------------------
QUERY PLAN
-------------------------------------------------------------------------------
Custom Scan (ChunkAppend) on test_ordering
Order: test_ordering."time"
-> Merge Append
Sort Key: _hyper_13_20_chunk."time"
-> Custom Scan (DecompressChunk) on _hyper_13_20_chunk
-> Sort
Sort Key: compress_hyper_14_21_chunk._ts_meta_sequence_num DESC
-> Seq Scan on compress_hyper_14_21_chunk
-> Sort
Sort Key: _hyper_13_20_chunk."time"
-> Seq Scan on _hyper_13_20_chunk
-> Custom Scan (DecompressChunk) on _hyper_13_22_chunk
-> Custom Scan (DecompressChunk) on _hyper_13_20_chunk
-> Sort
Sort Key: compress_hyper_14_23_chunk._ts_meta_sequence_num DESC
-> Seq Scan on compress_hyper_14_23_chunk
(15 rows)
-> Custom Scan (DecompressChunk) on _hyper_13_22_chunk
-> Sort
Sort Key: compress_hyper_14_24_chunk._ts_meta_sequence_num DESC
-> Seq Scan on compress_hyper_14_24_chunk
(10 rows)
SET timescaledb.enable_decompression_sorted_merge = 1;
-- TEST cagg triggers with insert into compressed chunk
@ -926,7 +919,7 @@ ALTER TABLE conditions SET (timescaledb.compress);
SELECT compress_chunk(ch) FROM show_chunks('conditions') ch;
compress_chunk
------------------------------------------
_timescaledb_internal._hyper_15_24_chunk
_timescaledb_internal._hyper_15_25_chunk
(1 row)
SELECT chunk_name, range_start, range_end, is_compressed
@ -934,7 +927,7 @@ FROM timescaledb_information.chunks
WHERE hypertable_name = 'conditions';
chunk_name | range_start | range_end | is_compressed
--------------------+------------------------------+------------------------------+---------------
_hyper_15_24_chunk | Wed Dec 30 16:00:00 2009 PST | Wed Jan 06 16:00:00 2010 PST | t
_hyper_15_25_chunk | Wed Dec 30 16:00:00 2009 PST | Wed Jan 06 16:00:00 2010 PST | t
(1 row)
--now insert into compressed chunk

View File

@ -1197,7 +1197,6 @@ select count(distinct a) from text_table;
(1 row)
select count(compress_chunk(x, true)) from show_chunks('text_table') x;
NOTICE: chunk "_hyper_9_17_chunk" is already compressed
count
-------
1
@ -1205,6 +1204,7 @@ NOTICE: chunk "_hyper_9_17_chunk" is already compressed
select format('call recompress_chunk(''%s'')', x) from show_chunks('text_table') x \gexec
call recompress_chunk('_timescaledb_internal._hyper_9_17_chunk')
NOTICE: nothing to recompress in chunk "_hyper_9_17_chunk"
set timescaledb.enable_bulk_decompression to on;
set timescaledb.debug_require_vector_qual to 'forbid';
select sum(length(a)) from text_table;

View File

@ -48,7 +48,6 @@ compression_status
Compressed
(1 row)
s1: NOTICE: chunk "_hyper_X_X_chunk" is already compressed
count
-----
1