mirror of
https://github.com/timescale/timescaledb.git
synced 2025-05-18 11:45:11 +08:00
Remove restrict from const objects (#6791)
We don't really need it if we systematically use restrict on the read/write objects. This is a minor refactoring to avoid confusion, shouldn't actually change any behavior or code generation.
This commit is contained in:
parent
25af8f4741
commit
2a30ca428d
@ -494,7 +494,7 @@ text_array_decompress_all_serialized_no_header(StringInfo si, bool has_nulls,
|
||||
Simple8bRleSerialized *sizes_serialized = bytes_deserialize_simple8b_and_advance(si);
|
||||
|
||||
uint32 n_notnull;
|
||||
uint32 *restrict sizes = simple8brle_decompress_all_uint32(sizes_serialized, &n_notnull);
|
||||
const uint32 *sizes = simple8brle_decompress_all_uint32(sizes_serialized, &n_notnull);
|
||||
const uint32 n_total = has_nulls ? nulls_serialized->num_elements : n_notnull;
|
||||
CheckCompressedData(n_total >= n_notnull);
|
||||
|
||||
@ -507,7 +507,7 @@ text_array_decompress_all_serialized_no_header(StringInfo si, bool has_nulls,
|
||||
uint32 offset = 0;
|
||||
for (uint32 i = 0; i < n_notnull; i++)
|
||||
{
|
||||
void *unaligned = consumeCompressedData(si, sizes[i]);
|
||||
const void *unaligned = consumeCompressedData(si, sizes[i]);
|
||||
|
||||
/*
|
||||
* We start reading from the end of previous datum, but this pointer
|
||||
@ -517,7 +517,8 @@ text_array_decompress_all_serialized_no_header(StringInfo si, bool has_nulls,
|
||||
*
|
||||
* See the corresponding row-by-row code in bytes_to_datum_and_advance().
|
||||
*/
|
||||
void *vardata = DatumGetPointer(att_align_pointer(unaligned, TYPALIGN_INT, -1, unaligned));
|
||||
const void *vardata =
|
||||
DatumGetPointer(att_align_pointer(unaligned, TYPALIGN_INT, -1, unaligned));
|
||||
|
||||
/*
|
||||
* Check for potentially corrupt varlena headers since we're reading them
|
||||
@ -578,7 +579,7 @@ text_array_decompress_all_serialized_no_header(StringInfo si, bool has_nulls,
|
||||
* We have decompressed the data with nulls skipped, reshuffle it
|
||||
* according to the nulls bitmap.
|
||||
*/
|
||||
Simple8bRleBitmap nulls = simple8brle_bitmap_decompress(nulls_serialized);
|
||||
const Simple8bRleBitmap nulls = simple8brle_bitmap_decompress(nulls_serialized);
|
||||
CheckCompressedData(n_notnull + simple8brle_bitmap_num_ones(&nulls) == n_total);
|
||||
|
||||
int current_notnull_element = n_notnull - 1;
|
||||
|
@ -31,8 +31,7 @@ FUNCTION_NAME(delta_delta_decompress_all, ELEMENT_TYPE)(Datum compressed, Memory
|
||||
* test_delta4().
|
||||
*/
|
||||
uint32 num_deltas;
|
||||
const uint64 *restrict deltas_zigzag =
|
||||
simple8brle_decompress_all_uint64(deltas_compressed, &num_deltas);
|
||||
const uint64 *deltas_zigzag = simple8brle_decompress_all_uint64(deltas_compressed, &num_deltas);
|
||||
|
||||
Simple8bRleBitmap nulls = { 0 };
|
||||
if (has_nulls)
|
||||
|
@ -867,7 +867,7 @@ unpack_leading_zeros_array(BitArray *bitarray, uint32 *_n)
|
||||
for (uint32 lane = 0; lane < n_lanes; lane++)
|
||||
{
|
||||
uint8 *restrict lane_dest = &dest[lane * LANE_OUTPUTS];
|
||||
const uint8 *restrict lane_src = &((uint8 *) bitarray->buckets.data)[lane * LANE_INPUTS];
|
||||
const uint8 *lane_src = &((uint8 *) bitarray->buckets.data)[lane * LANE_INPUTS];
|
||||
for (uint32 output_in_lane = 0; output_in_lane < LANE_OUTPUTS; output_in_lane++)
|
||||
{
|
||||
const int startbit_abs = output_in_lane * BITS_PER_LEADING_ZEROS;
|
||||
|
@ -40,19 +40,19 @@ FUNCTION_NAME(gorilla_decompress_all, ELEMENT_TYPE)(CompressedGorillaData *goril
|
||||
CheckCompressedData(n_total >= n_notnull);
|
||||
|
||||
/* Unpack the basic compressed data parts. */
|
||||
Simple8bRleBitmap tag0s = simple8brle_bitmap_prefixsums(gorilla_data->tag0s);
|
||||
Simple8bRleBitmap tag1s = simple8brle_bitmap_prefixsums(gorilla_data->tag1s);
|
||||
const Simple8bRleBitmap tag0s = simple8brle_bitmap_prefixsums(gorilla_data->tag0s);
|
||||
const Simple8bRleBitmap tag1s = simple8brle_bitmap_prefixsums(gorilla_data->tag1s);
|
||||
|
||||
BitArray leading_zeros_bitarray = gorilla_data->leading_zeros;
|
||||
BitArrayIterator leading_zeros_iterator;
|
||||
bit_array_iterator_init(&leading_zeros_iterator, &leading_zeros_bitarray);
|
||||
|
||||
uint32 num_leading_zeros_padded;
|
||||
uint8 *restrict all_leading_zeros =
|
||||
const uint8 *all_leading_zeros =
|
||||
unpack_leading_zeros_array(&gorilla_data->leading_zeros, &num_leading_zeros_padded);
|
||||
|
||||
uint32 num_bit_widths;
|
||||
uint8 *restrict bit_widths =
|
||||
const uint8 *bit_widths =
|
||||
simple8brle_decompress_all_uint8(gorilla_data->num_bits_used_per_xor, &num_bit_widths);
|
||||
|
||||
BitArray xors_bitarray = gorilla_data->xors;
|
||||
@ -147,7 +147,7 @@ FUNCTION_NAME(gorilla_decompress_all, ELEMENT_TYPE)(CompressedGorillaData *goril
|
||||
* We have decompressed the data with nulls skipped, reshuffle it
|
||||
* according to the nulls bitmap.
|
||||
*/
|
||||
Simple8bRleBitmap nulls = simple8brle_bitmap_decompress(gorilla_data->nulls);
|
||||
const Simple8bRleBitmap nulls = simple8brle_bitmap_decompress(gorilla_data->nulls);
|
||||
CheckCompressedData(n_notnull + simple8brle_bitmap_num_ones(&nulls) == n_total);
|
||||
|
||||
int current_notnull_element = n_notnull - 1;
|
||||
|
@ -15,30 +15,30 @@
|
||||
typedef struct Simple8bRleBitmap
|
||||
{
|
||||
/* Either the bools or prefix sums, depending on the decompression method. */
|
||||
void *data;
|
||||
const void *data;
|
||||
|
||||
uint16 num_elements;
|
||||
uint16 num_ones;
|
||||
} Simple8bRleBitmap;
|
||||
|
||||
pg_attribute_always_inline static bool
|
||||
simple8brle_bitmap_get_at(Simple8bRleBitmap *bitmap, uint16 i)
|
||||
simple8brle_bitmap_get_at(const Simple8bRleBitmap *bitmap, uint16 i)
|
||||
{
|
||||
/* We have some padding on the right but we shouldn't overrun it. */
|
||||
Assert(i < ((bitmap->num_elements + 63) / 64 + 1) * 64);
|
||||
|
||||
return ((bool *restrict) bitmap->data)[i];
|
||||
return ((const bool *) bitmap->data)[i];
|
||||
}
|
||||
|
||||
pg_attribute_always_inline static uint16
|
||||
simple8brle_bitmap_prefix_sum(Simple8bRleBitmap *bitmap, uint16 i)
|
||||
simple8brle_bitmap_prefix_sum(const Simple8bRleBitmap *bitmap, uint16 i)
|
||||
{
|
||||
Assert(i < ((bitmap->num_elements + 63) / 64 + 1) * 64);
|
||||
return ((uint16 *restrict) bitmap->data)[i];
|
||||
return ((const uint16 *) bitmap->data)[i];
|
||||
}
|
||||
|
||||
pg_attribute_always_inline static uint16
|
||||
simple8brle_bitmap_num_ones(Simple8bRleBitmap *bitmap)
|
||||
simple8brle_bitmap_num_ones(const Simple8bRleBitmap *bitmap)
|
||||
{
|
||||
return bitmap->num_ones;
|
||||
}
|
||||
|
@ -36,7 +36,7 @@ FUNCTION_NAME(simple8brle_decompress_all_buf,
|
||||
*/
|
||||
Assert(num_blocks <= GLOBAL_MAX_ROWS_PER_COMPRESSION);
|
||||
uint8 selector_values[GLOBAL_MAX_ROWS_PER_COMPRESSION];
|
||||
const uint64 *restrict slots = compressed->slots;
|
||||
const uint64 *slots = compressed->slots;
|
||||
for (uint32 block_index = 0; block_index < num_blocks; block_index++)
|
||||
{
|
||||
const uint32 selector_slot = block_index / SIMPLE8B_SELECTORS_PER_SELECTOR_SLOT;
|
||||
@ -52,7 +52,7 @@ FUNCTION_NAME(simple8brle_decompress_all_buf,
|
||||
* Now decompress the individual blocks.
|
||||
*/
|
||||
uint32 decompressed_index = 0;
|
||||
const uint64 *restrict blocks = compressed->slots + num_selector_slots;
|
||||
const uint64 *blocks = compressed->slots + num_selector_slots;
|
||||
for (uint32 block_index = 0; block_index < num_blocks; block_index++)
|
||||
{
|
||||
const uint8 selector_value = selector_values[block_index];
|
||||
|
@ -539,7 +539,7 @@ compute_plain_qual(DecompressContext *dcontext, DecompressBatchState *batch_stat
|
||||
const size_t n_vector_result_words = (vector->length + 63) / 64;
|
||||
Assert((predicate_result != default_value_predicate_result) ||
|
||||
n_vector_result_words == 1); /* to placate Coverity. */
|
||||
const uint64 *restrict validity = (uint64 *restrict) vector->buffers[0];
|
||||
const uint64 *validity = (const uint64 *) vector->buffers[0];
|
||||
for (size_t i = 0; i < n_vector_result_words; i++)
|
||||
{
|
||||
predicate_result[i] &= validity[i];
|
||||
|
@ -29,7 +29,7 @@ FUNCTION_NAME(PREDICATE_NAME, VECTOR_CTYPE,
|
||||
|
||||
/* Now run the predicate itself. */
|
||||
const CONST_CTYPE constvalue = CONST_CONVERSION(constdatum);
|
||||
const VECTOR_CTYPE *restrict vector = (VECTOR_CTYPE *restrict) arrow->buffers[1];
|
||||
const VECTOR_CTYPE *vector = (const VECTOR_CTYPE *) arrow->buffers[1];
|
||||
|
||||
for (size_t outer = 0; outer < n / 64; outer++)
|
||||
{
|
||||
|
@ -71,7 +71,7 @@ vector_nulltest(const ArrowArray *arrow, int test_type, uint64 *restrict result)
|
||||
const bool should_be_null = test_type == IS_NULL;
|
||||
|
||||
const uint16 bitmap_words = (arrow->length + 63) / 64;
|
||||
const uint64 *restrict validity = (const uint64 *) arrow->buffers[0];
|
||||
const uint64 *validity = (const uint64 *) arrow->buffers[0];
|
||||
for (uint16 i = 0; i < bitmap_words; i++)
|
||||
{
|
||||
if (should_be_null)
|
||||
|
Loading…
x
Reference in New Issue
Block a user