mirror of
https://github.com/timescale/timescaledb.git
synced 2025-05-16 18:43:18 +08:00
Make the bulk decompression function depend on PG type
This is a refactoring to enable bulk decompression of array and dictionary compressed text columns, but not other types. Currently has no effect.
This commit is contained in:
parent
37ece578df
commit
3d8ec1e43d
@ -130,7 +130,7 @@ DecompressionIterator *(*tsl_get_decompression_iterator_init(CompressionAlgorith
|
|||||||
}
|
}
|
||||||
|
|
||||||
DecompressAllFunction
|
DecompressAllFunction
|
||||||
tsl_get_decompress_all_function(CompressionAlgorithm algorithm)
|
tsl_get_decompress_all_function(CompressionAlgorithm algorithm, Oid type)
|
||||||
{
|
{
|
||||||
if (algorithm >= _END_COMPRESSION_ALGORITHMS)
|
if (algorithm >= _END_COMPRESSION_ALGORITHMS)
|
||||||
elog(ERROR, "invalid compression algorithm %d", algorithm);
|
elog(ERROR, "invalid compression algorithm %d", algorithm);
|
||||||
|
@ -320,7 +320,8 @@ extern void decompress_chunk(Oid in_table, Oid out_table);
|
|||||||
extern DecompressionIterator *(*tsl_get_decompression_iterator_init(
|
extern DecompressionIterator *(*tsl_get_decompression_iterator_init(
|
||||||
CompressionAlgorithm algorithm, bool reverse))(Datum, Oid element_type);
|
CompressionAlgorithm algorithm, bool reverse))(Datum, Oid element_type);
|
||||||
|
|
||||||
extern DecompressAllFunction tsl_get_decompress_all_function(CompressionAlgorithm algorithm);
|
extern DecompressAllFunction tsl_get_decompress_all_function(CompressionAlgorithm algorithm,
|
||||||
|
Oid type);
|
||||||
|
|
||||||
typedef struct Chunk Chunk;
|
typedef struct Chunk Chunk;
|
||||||
typedef struct ChunkInsertState ChunkInsertState;
|
typedef struct ChunkInsertState ChunkInsertState;
|
||||||
|
@ -42,7 +42,7 @@ FUNCTION_NAME(ALGO, CTYPE)(const uint8 *Data, size_t Size, bool extra_checks)
|
|||||||
* For routine fuzzing, we only run bulk decompression to make it faster
|
* For routine fuzzing, we only run bulk decompression to make it faster
|
||||||
* and the coverage space smaller.
|
* and the coverage space smaller.
|
||||||
*/
|
*/
|
||||||
DecompressAllFunction decompress_all = tsl_get_decompress_all_function(algo);
|
DecompressAllFunction decompress_all = tsl_get_decompress_all_function(algo, PGTYPE);
|
||||||
decompress_all(compressed_data, PGTYPE, CurrentMemoryContext);
|
decompress_all(compressed_data, PGTYPE, CurrentMemoryContext);
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -53,7 +53,7 @@ FUNCTION_NAME(ALGO, CTYPE)(const uint8 *Data, size_t Size, bool extra_checks)
|
|||||||
* the row-by-row is old and stable.
|
* the row-by-row is old and stable.
|
||||||
*/
|
*/
|
||||||
ArrowArray *arrow = NULL;
|
ArrowArray *arrow = NULL;
|
||||||
DecompressAllFunction decompress_all = tsl_get_decompress_all_function(algo);
|
DecompressAllFunction decompress_all = tsl_get_decompress_all_function(algo, PGTYPE);
|
||||||
if (decompress_all)
|
if (decompress_all)
|
||||||
{
|
{
|
||||||
arrow = decompress_all(compressed_data, PGTYPE, CurrentMemoryContext);
|
arrow = decompress_all(compressed_data, PGTYPE, CurrentMemoryContext);
|
||||||
|
@ -123,7 +123,8 @@ decompress_column(DecompressContext *dcontext, DecompressBatchState *batch_state
|
|||||||
}
|
}
|
||||||
|
|
||||||
DecompressAllFunction decompress_all =
|
DecompressAllFunction decompress_all =
|
||||||
tsl_get_decompress_all_function(header->compression_algorithm);
|
tsl_get_decompress_all_function(header->compression_algorithm,
|
||||||
|
column_description->typid);
|
||||||
Assert(decompress_all != NULL);
|
Assert(decompress_all != NULL);
|
||||||
|
|
||||||
MemoryContext context_before_decompression =
|
MemoryContext context_before_decompression =
|
||||||
|
@ -609,7 +609,8 @@ perform_vectorized_sum_int4(DecompressChunkState *chunk_state, Aggref *aggref)
|
|||||||
ArrowArray *arrow = NULL;
|
ArrowArray *arrow = NULL;
|
||||||
|
|
||||||
DecompressAllFunction decompress_all =
|
DecompressAllFunction decompress_all =
|
||||||
tsl_get_decompress_all_function(header->compression_algorithm);
|
tsl_get_decompress_all_function(header->compression_algorithm,
|
||||||
|
column_description->typid);
|
||||||
Assert(decompress_all != NULL);
|
Assert(decompress_all != NULL);
|
||||||
|
|
||||||
MemoryContext context_before_decompression =
|
MemoryContext context_before_decompression =
|
||||||
|
@ -211,14 +211,22 @@ build_decompression_map(PlannerInfo *root, DecompressChunkPath *path, List *scan
|
|||||||
lappend_int(path->decompression_map, destination_attno_in_uncompressed_chunk);
|
lappend_int(path->decompression_map, destination_attno_in_uncompressed_chunk);
|
||||||
path->is_segmentby_column = lappend_int(path->is_segmentby_column, is_segment);
|
path->is_segmentby_column = lappend_int(path->is_segmentby_column, is_segment);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Determine if we can use bulk decompression for this column.
|
||||||
|
*/
|
||||||
Oid typoid = get_atttype(path->info->chunk_rte->relid, chunk_attno);
|
Oid typoid = get_atttype(path->info->chunk_rte->relid, chunk_attno);
|
||||||
const bool bulk_decompression_possible =
|
const bool bulk_decompression_possible =
|
||||||
!is_segment && destination_attno_in_uncompressed_chunk > 0 &&
|
!is_segment && destination_attno_in_uncompressed_chunk > 0 &&
|
||||||
tsl_get_decompress_all_function(compression_get_default_algorithm(typoid)) != NULL;
|
tsl_get_decompress_all_function(compression_get_default_algorithm(typoid), typoid) !=
|
||||||
|
NULL;
|
||||||
path->have_bulk_decompression_columns |= bulk_decompression_possible;
|
path->have_bulk_decompression_columns |= bulk_decompression_possible;
|
||||||
path->bulk_decompression_column =
|
path->bulk_decompression_column =
|
||||||
lappend_int(path->bulk_decompression_column, bulk_decompression_possible);
|
lappend_int(path->bulk_decompression_column, bulk_decompression_possible);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Save information about decompressed columns in uncompressed chunk
|
||||||
|
* for planning of vectorized filters.
|
||||||
|
*/
|
||||||
if (destination_attno_in_uncompressed_chunk > 0)
|
if (destination_attno_in_uncompressed_chunk > 0)
|
||||||
{
|
{
|
||||||
path->uncompressed_chunk_attno_to_compression_info
|
path->uncompressed_chunk_attno_to_compression_info
|
||||||
|
Loading…
x
Reference in New Issue
Block a user