Optimize compressed chunk resorting

This patch adds an optimization to the DecompressChunk node. If the
query 'order by' and the compression 'order by' are compatible (query
'order by' is equal or a prefix of compression 'order by'), the
compressed batches of the segments are decompressed in parallel and
merged using a binary heep. This preserves the ordering and the sorting
of the result can be prevented. Especially LIMIT queries benefit from
this optimization because only the first tuples of some batches have to
be decompressed. Previously, all segments were completely decompressed
and sorted.

Fixes: #4223

Co-authored-by: Sotiris Stamokostas <sotiris@timescale.com>
This commit is contained in:
Jan Nidzwetzki 2023-04-06 13:12:13 +02:00 committed by Jan Nidzwetzki
parent cc9c3b3431
commit df32ad4b79
47 changed files with 8010 additions and 707 deletions

View File

@ -18,6 +18,7 @@ accidentally triggering the load of a previous DB version.**
* #5547 Skip Ordered Append when only 1 child node is present
* #5510 Propagate vacuum/analyze to compressed chunks
* #5584 Reduce decompression during constraint checking
* #5530 Optimize compressed chunk resorting
**Bugfixes**
* #5396 Fix SEGMENTBY columns predicates to be pushed down

View File

@ -788,4 +788,20 @@ RelationGetSmgr(Relation rel)
}
#endif
#if PG14_LT
/*
* pg_nodiscard was introduced with PostgreSQL 14
*
* pg_nodiscard means the compiler should warn if the result of a function
* call is ignored. The name "nodiscard" is chosen in alignment with
* (possibly future) C and C++ standards. For maximum compatibility, use it
* as a function declaration specifier, so it goes before the return type.
*/
#ifdef __GNUC__
#define pg_nodiscard __attribute__((warn_unused_result))
#else
#define pg_nodiscard
#endif
#endif
#endif /* TIMESCALEDB_COMPAT_H */

View File

@ -76,6 +76,7 @@ bool ts_guc_enable_cagg_reorder_groupby = true;
bool ts_guc_enable_now_constify = true;
bool ts_guc_enable_osm_reads = true;
TSDLLEXPORT bool ts_guc_enable_transparent_decompression = true;
TSDLLEXPORT bool ts_guc_enable_decompression_sorted_merge = true;
bool ts_guc_enable_per_data_node_queries = true;
bool ts_guc_enable_parameterized_data_node_scan = true;
bool ts_guc_enable_async_append = true;
@ -290,6 +291,18 @@ _guc_init(void)
NULL,
NULL);
DefineCustomBoolVariable("timescaledb.enable_decompression_sorted_merge",
"Enable compressed batches heap merge",
"Enable the merge of compressed batches to preserve the compression "
"order by",
&ts_guc_enable_decompression_sorted_merge,
true,
PGC_USERSET,
0,
NULL,
NULL,
NULL);
DefineCustomBoolVariable("timescaledb.enable_cagg_reorder_groupby",
"Enable group by reordering",
"Enable group by clause reordering for continuous aggregates",

View File

@ -27,6 +27,7 @@ extern bool ts_guc_enable_cagg_reorder_groupby;
extern bool ts_guc_enable_now_constify;
extern bool ts_guc_enable_osm_reads;
extern TSDLLEXPORT bool ts_guc_enable_transparent_decompression;
extern TSDLLEXPORT bool ts_guc_enable_decompression_sorted_merge;
extern TSDLLEXPORT bool ts_guc_enable_per_data_node_queries;
extern TSDLLEXPORT bool ts_guc_enable_parameterized_data_node_scan;
extern TSDLLEXPORT bool ts_guc_enable_async_append;

View File

@ -625,14 +625,14 @@ ts_make_pathkey_from_sortop(PlannerInfo *root, Expr *expr, Relids nullable_relid
}
/*
* make_sort --- basic routine to build a Sort plan node
* ts_make_sort --- basic routine to build a Sort plan node
*
* Caller must have built the sortColIdx, sortOperators, collations, and
* nullsFirst arrays already.
*/
static Sort *
make_sort(Plan *lefttree, int numCols, AttrNumber *sortColIdx, Oid *sortOperators, Oid *collations,
bool *nullsFirst)
Sort *
ts_make_sort(Plan *lefttree, int numCols, AttrNumber *sortColIdx, Oid *sortOperators,
Oid *collations, bool *nullsFirst)
{
Sort *node = makeNode(Sort);
Plan *plan = &node->plan;
@ -680,7 +680,7 @@ ts_make_sort_from_pathkeys(Plan *lefttree, List *pathkeys, Relids relids)
&nullsFirst);
/* Now build the Sort node */
return make_sort(lefttree, numsortkeys, sortColIdx, sortOperators, collations, nullsFirst);
return ts_make_sort(lefttree, numsortkeys, sortColIdx, sortOperators, collations, nullsFirst);
}
/*

View File

@ -33,13 +33,17 @@ extern struct PathTarget *ts_make_partial_grouping_target(struct PlannerInfo *ro
extern bool ts_get_variable_range(PlannerInfo *root, VariableStatData *vardata, Oid sortop,
Datum *min, Datum *max);
extern Plan *ts_prepare_sort_from_pathkeys(Plan *lefttree, List *pathkeys, Relids relids,
extern TSDLLEXPORT Plan *
ts_prepare_sort_from_pathkeys(Plan *lefttree, List *pathkeys, Relids relids,
const AttrNumber *reqColIdx, bool adjust_tlist_in_place,
int *p_numsortkeys, AttrNumber **p_sortColIdx,
Oid **p_sortOperators, Oid **p_collations,
bool **p_nullsFirst);
int *p_numsortkeys, AttrNumber **p_sortColIdx, Oid **p_sortOperators,
Oid **p_collations, bool **p_nullsFirst);
extern TSDLLEXPORT Sort *ts_make_sort_from_pathkeys(Plan *lefttree, List *pathkeys, Relids relids);
extern TSDLLEXPORT Sort *ts_make_sort(Plan *lefttree, int numCols, AttrNumber *sortColIdx,
Oid *sortOperators, Oid *collations, bool *nullsFirst);
extern TSDLLEXPORT PathKey *ts_make_pathkey_from_sortop(PlannerInfo *root, Expr *expr,
Relids nullable_relids, Oid ordering_op,
bool nulls_first, Index sortref,

View File

@ -1,6 +1,8 @@
# Add all *.c to sources in upperlevel directory
set(SOURCES
${CMAKE_CURRENT_SOURCE_DIR}/decompress_chunk.c
${CMAKE_CURRENT_SOURCE_DIR}/exec.c ${CMAKE_CURRENT_SOURCE_DIR}/planner.c
${CMAKE_CURRENT_SOURCE_DIR}/qual_pushdown.c)
${CMAKE_CURRENT_SOURCE_DIR}/exec.c
${CMAKE_CURRENT_SOURCE_DIR}/planner.c
${CMAKE_CURRENT_SOURCE_DIR}/qual_pushdown.c
${CMAKE_CURRENT_SOURCE_DIR}/sorted_merge.c)
target_sources(${TSL_LIBRARY_NAME} PRIVATE ${SOURCES})

View File

@ -4,6 +4,7 @@
* LICENSE-TIMESCALE for a copy of the license.
*/
#include <math.h>
#include <postgres.h>
#include <catalog/pg_operator.h>
#include <miscadmin.h>
@ -26,12 +27,14 @@
#include "ts_catalog/hypertable_compression.h"
#include "import/planner.h"
#include "compression/create.h"
#include "nodes/decompress_chunk/sorted_merge.h"
#include "nodes/decompress_chunk/decompress_chunk.h"
#include "nodes/decompress_chunk/planner.h"
#include "nodes/decompress_chunk/qual_pushdown.h"
#include "utils.h"
#define DECOMPRESS_CHUNK_CPU_TUPLE_COST 0.01
#define DECOMPRESS_CHUNK_BATCH_SIZE 1000
static CustomPathMethods decompress_chunk_path_methods = {
@ -47,6 +50,13 @@ typedef struct SortInfo
bool reverse;
} SortInfo;
typedef enum MergeBatchResult
{
MERGE_NOT_POSSIBLE,
SCAN_FORWARD,
SCAN_BACKWARD
} MergeBatchResult;
static RangeTblEntry *decompress_chunk_make_rte(Oid compressed_relid, LOCKMODE lockmode);
static void create_compressed_scan_paths(PlannerInfo *root, RelOptInfo *compressed_rel,
int parallel_workers, CompressionInfo *info,
@ -74,7 +84,7 @@ is_compressed_column(CompressionInfo *info, AttrNumber attno)
/*
* Like ts_make_pathkey_from_sortop but passes down the compressed relid so that existing
* equivalence members that are marked as childen are properly checked.
* equivalence members that are marked as children are properly checked.
*/
static PathKey *
make_pathkey_from_compressed(PlannerInfo *root, Index compressed_relid, Expr *expr, Oid ordering_op,
@ -360,6 +370,196 @@ cost_decompress_chunk(Path *path, Path *compressed_path)
path->rows = compressed_path->rows * DECOMPRESS_CHUNK_BATCH_SIZE;
}
/*
* Calculate the costs for retrieving the decompressed in-order using
* a binary heap.
*/
static void
cost_decompress_sorted_merge_append(PlannerInfo *root, DecompressChunkPath *dcpath,
Path *child_path)
{
Path sort_path; /* dummy for result of cost_sort */
cost_sort(&sort_path,
root,
dcpath->compressed_pathkeys,
child_path->total_cost,
child_path->rows,
child_path->pathtarget->width,
0.0,
work_mem,
-1);
/* startup_cost is cost before fetching first tuple */
dcpath->cpath.path.startup_cost = sort_path.total_cost;
/*
* The cost model for the normal chunk decompression produces the following total
* costs.
*
* Segments Total costs
* 10 711.84
* 50 4060.91
* 100 8588.32
* 10000 119281.84
*
* The cost model of the regular decompression is roughly linear. Opening multiple batches in
* parallel needs resources and merging a high amount of batches becomes inefficient at some
* point. So, we use a quadratic cost model here to have higher costs than the normal
* decompression when more than ~100 batches are used. We set
* DECOMPRESS_CHUNK_HEAP_MERGE_CPU_TUPLE_COST to 0.8 to become most costly as soon as we have to
* process more than 120 batches.
*
* Note: To behave similarly to the cost model of the regular decompression path, this cost
* model does not consider the number of tuples.
*/
dcpath->cpath.path.total_cost =
sort_path.total_cost + pow(sort_path.rows, 2) * DECOMPRESS_CHUNK_HEAP_MERGE_CPU_TUPLE_COST;
dcpath->cpath.path.rows = sort_path.rows * DECOMPRESS_CHUNK_BATCH_SIZE;
}
/*
* If the query 'order by' is prefix of the compression 'order by' (or equal), we can exploit
* the ordering of the individual batches to create a total ordered result without resorting
* the tuples. This speeds up all queries that use this ordering (because no sort node is
* needed). In particular, queries that use a LIMIT are speed-up because only the top elements
* of the affected batches needs to be decompressed. Without the optimization, the entire batches
* are decompressed, sorted, and then the top elements are taken from the result.
*
* The idea is to do something similar to the MergeAppend node; a BinaryHeap is used
* to merge the per segment by column sorted individual batches into a sorted result. So, we end
* up which a data flow which looks as follows:
*
* DecompressChunk
* * Decompress Batch 1
* * Decompress Batch 2
* * Decompress Batch 3
* [....]
* * Decompress Batch N
*
* Using the presorted batches, we are able to open these batches dynamically. If we don't presort
* them, we would have to open all batches at the same time. This would be similar to the work the
* MergeAppend does, but this is not needed in our case and we could reduce the size of the heap and
* the amount of parallel open batches.
*
* The algorithm works as follows:
*
* (1) A sort node is placed below the decompress scan node and on top of the scan
* on the compressed chunk. This sort node uses the min/max values of the 'order by'
* columns from the metadata of the batch to get them into an order which can be
* used to merge them.
*
* [Scan on compressed chunk] -> [Sort on min/max values] -> [Decompress and merge]
*
* For example, the batches are sorted on the min value of the 'order by' metadata
* column: [0, 3] [0, 5] [3, 7] [6, 10]
*
* (2) The decompress chunk node initializes a binary heap, opens the first batch and
* decompresses the first tuple from the batch. The tuple is put on the heap. In addition
* the opened batch is marked as the most recent batch (MRB).
*
* (3) As soon as a tuple is requested from the heap, the following steps are performed:
* (3a) If the heap is empty, we are done.
* (3b) The top tuple from the heap is taken. It is checked if this tuple is from the
* MRB. If this is the case, the next batch is opened, the first tuple is decompressed,
* placed on the heap and this batch is marked as MRB. This is repeated until the
* top tuple from the heap is not from the MRB. After the top tuple is not from the
* MRB, all batches (and one ahead) which might contain the most recent tuple are
* opened and placed on the heap.
*
* In the example above, the first three batches are opened because the first two
* batches might contain tuples with a value of 0.
* (3c) The top element from the heap is removed, the next tuple from the batch is
* decompressed (if present) and placed on the heap.
* (3d) The former top tuple of the heap is returned.
*
* This function checks if the compression 'order by' and the query 'order by' are
* compatible and the optimization can be used.
*/
static MergeBatchResult
can_sorted_merge_append(PlannerInfo *root, CompressionInfo *info, Chunk *chunk)
{
PathKey *pk;
Var *var;
Expr *expr;
char *column_name;
List *pathkeys = root->query_pathkeys;
FormData_hypertable_compression *ci;
MergeBatchResult merge_result = SCAN_FORWARD;
/* Ensure that we have path keys and the chunk is ordered */
if (pathkeys == NIL || ts_chunk_is_unordered(chunk) || ts_chunk_is_partial(chunk))
return MERGE_NOT_POSSIBLE;
int nkeys = list_length(pathkeys);
/*
* Loop over the pathkeys of the query. These pathkeys need to match the
* configured compress_orderby pathkeys.
*/
for (int pk_index = 0; pk_index < nkeys; pk_index++)
{
pk = list_nth(pathkeys, pk_index);
expr = find_em_expr_for_rel(pk->pk_eclass, info->chunk_rel);
if (expr == NULL || !IsA(expr, Var))
return MERGE_NOT_POSSIBLE;
var = castNode(Var, expr);
if (var->varattno <= 0)
return MERGE_NOT_POSSIBLE;
column_name = get_attname(info->chunk_rte->relid, var->varattno, false);
ci = get_column_compressioninfo(info->hypertable_compression_info, column_name);
if (ci->orderby_column_index != pk_index + 1)
return MERGE_NOT_POSSIBLE;
/* Check order, if the order of the first column do not match, switch to backward scan */
Assert(pk->pk_strategy == BTLessStrategyNumber ||
pk->pk_strategy == BTGreaterStrategyNumber);
if (pk->pk_strategy != BTLessStrategyNumber)
{
/* Test that ORDER BY and NULLS first/last do match in forward scan */
if (!ci->orderby_asc && ci->orderby_nullsfirst == pk->pk_nulls_first &&
merge_result == SCAN_FORWARD)
continue;
/* Exact opposite in backward scan */
else if (ci->orderby_asc && ci->orderby_nullsfirst != pk->pk_nulls_first &&
merge_result == SCAN_BACKWARD)
continue;
/* Switch scan direction on exact opposite order for first attribute */
else if (ci->orderby_asc && ci->orderby_nullsfirst != pk->pk_nulls_first &&
pk_index == 0)
merge_result = SCAN_BACKWARD;
else
return MERGE_NOT_POSSIBLE;
}
else
{
/* Test that ORDER BY and NULLS first/last do match in forward scan */
if (ci->orderby_asc && ci->orderby_nullsfirst == pk->pk_nulls_first &&
merge_result == SCAN_FORWARD)
continue;
/* Exact opposite in backward scan */
else if (!ci->orderby_asc && ci->orderby_nullsfirst != pk->pk_nulls_first &&
merge_result == SCAN_BACKWARD)
continue;
/* Switch scan direction on exact opposite order for first attribute */
else if (!ci->orderby_asc && ci->orderby_nullsfirst != pk->pk_nulls_first &&
pk_index == 0)
merge_result = SCAN_BACKWARD;
else
return MERGE_NOT_POSSIBLE;
}
}
return merge_result;
}
void
ts_decompress_chunk_generate_paths(PlannerInfo *root, RelOptInfo *chunk_rel, Hypertable *ht,
Chunk *chunk)
@ -513,10 +713,38 @@ ts_decompress_chunk_generate_paths(PlannerInfo *root, RelOptInfo *chunk_rel, Hyp
path = (Path *) decompress_chunk_path_create(root, info, 0, child_path);
/* If we can push down the sort below the DecompressChunk node, we set the pathkeys of the
* decompress node to the query pathkeys, while remembering the compressed_pathkeys
* corresponding to those query_pathkeys. We will determine whether to put a sort between
* the decompression node and the scan during plan creation */
/*
* Create a path for the sorted merge append optimization. This optimization performs a
* merge append of the involved batches by using a binary heap and preserving the
* compression order. This optimization is only taken into consideration if we can't push
* down the sort to the compressed chunk. If we can push down the sort, the batches can be
* directly consumed in this order and we don't need to use this optimization.
*/
if (ts_guc_enable_decompression_sorted_merge && !sort_info.can_pushdown_sort)
{
MergeBatchResult merge_result = can_sorted_merge_append(root, info, chunk);
if (merge_result != MERGE_NOT_POSSIBLE)
{
DecompressChunkPath *dcpath =
copy_decompress_chunk_path((DecompressChunkPath *) path);
dcpath->reverse = (merge_result != SCAN_FORWARD);
dcpath->sorted_merge_append = true;
/* The segment by optimization is only enabled if it can deliver the tuples in the
* same order as the query requested it. So, we can just copy the pathkeys of the
* query here.
*/
dcpath->cpath.path.pathkeys = root->query_pathkeys;
cost_decompress_sorted_merge_append(root, dcpath, child_path);
add_path(chunk_rel, &dcpath->cpath.path);
}
}
/* If we can push down the sort below the DecompressChunk node, we set the pathkeys of
* the decompress node to the query pathkeys, while remembering the compressed_pathkeys
* corresponding to those query_pathkeys. We will determine whether to put a sort
* between the decompression node and the scan during plan creation */
if (sort_info.can_pushdown_sort)
{
DecompressChunkPath *dcpath = copy_decompress_chunk_path((DecompressChunkPath *) path);
@ -527,8 +755,9 @@ ts_decompress_chunk_generate_paths(PlannerInfo *root, RelOptInfo *chunk_rel, Hyp
/*
* Add costing for a sort. The standard Postgres pattern is to add the cost during
* path creation, but not add the sort path itself, that's done during plan creation.
* Examples of this in: create_merge_append_path & create_merge_append_plan
* path creation, but not add the sort path itself, that's done during plan
* creation. Examples of this in: create_merge_append_path &
* create_merge_append_plan
*/
if (!pathkeys_contained_in(dcpath->compressed_pathkeys, child_path->pathkeys))
{
@ -543,6 +772,7 @@ ts_decompress_chunk_generate_paths(PlannerInfo *root, RelOptInfo *chunk_rel, Hyp
0.0,
work_mem,
-1);
cost_decompress_chunk(&dcpath->cpath.path, &sort_path);
}
add_path(chunk_rel, &dcpath->cpath.path);
@ -1256,6 +1486,7 @@ decompress_chunk_path_create(PlannerInfo *root, CompressionInfo *info, int paral
path->cpath.flags = 0;
path->cpath.methods = &decompress_chunk_path_methods;
path->sorted_merge_append = false;
/* To prevent a non-parallel path with this node appearing
* in a parallel plan we only set parallel_safe to true

View File

@ -63,6 +63,7 @@ typedef struct DecompressChunkPath
List *compressed_pathkeys;
bool needs_sequence_num;
bool reverse;
bool sorted_merge_append;
} DecompressChunkPath;
void ts_decompress_chunk_generate_paths(PlannerInfo *root, RelOptInfo *rel, Hypertable *ht,

View File

@ -21,130 +21,260 @@
#include "compat/compat.h"
#include "compression/array.h"
#include "compression/compression.h"
#include "nodes/decompress_chunk/sorted_merge.h"
#include "nodes/decompress_chunk/decompress_chunk.h"
#include "nodes/decompress_chunk/exec.h"
#include "nodes/decompress_chunk/planner.h"
#include "ts_catalog/hypertable_compression.h"
typedef enum DecompressChunkColumnType
{
SEGMENTBY_COLUMN,
COMPRESSED_COLUMN,
COUNT_COLUMN,
SEQUENCE_NUM_COLUMN,
} DecompressChunkColumnType;
typedef struct DecompressChunkColumnState
{
DecompressChunkColumnType type;
Oid typid;
/*
* Attno of the decompressed column in the output of DecompressChunk node.
* Negative values are special columns that do not have a representation in
* the uncompressed chunk, but are still used for decompression. They should
* have the respective `type` field.
*/
AttrNumber output_attno;
/*
* Attno of the compressed column in the input compressed chunk scan.
*/
AttrNumber compressed_scan_attno;
union
{
struct
{
Datum value;
bool isnull;
int count;
} segmentby;
struct
{
DecompressionIterator *iterator;
} compressed;
};
} DecompressChunkColumnState;
typedef struct DecompressChunkState
{
CustomScanState csstate;
List *decompression_map;
List *is_segmentby_column;
int num_columns;
DecompressChunkColumnState *columns;
bool initialized;
bool reverse;
int hypertable_id;
Oid chunk_relid;
List *hypertable_compression_info;
int total_batch_rows;
int current_batch_row;
MemoryContext per_batch_context;
} DecompressChunkState;
static TupleTableSlot *decompress_chunk_exec(CustomScanState *node);
static void decompress_chunk_begin(CustomScanState *node, EState *estate, int eflags);
static void decompress_chunk_end(CustomScanState *node);
static void decompress_chunk_rescan(CustomScanState *node);
static TupleTableSlot *decompress_chunk_create_tuple(DecompressChunkState *state);
static void decompress_chunk_explain(CustomScanState *node, List *ancestors, ExplainState *es);
static void decompress_chunk_create_tuple(DecompressChunkState *chunk_state,
DecompressBatchState *batch_state);
static void decompress_initialize_batch_state(DecompressChunkState *chunk_state,
DecompressBatchState *batch_state);
static CustomExecMethods decompress_chunk_state_methods = {
.BeginCustomScan = decompress_chunk_begin,
.ExecCustomScan = decompress_chunk_exec,
.EndCustomScan = decompress_chunk_end,
.ReScanCustomScan = decompress_chunk_rescan,
.ExplainCustomScan = decompress_chunk_explain,
};
/*
* Build the sortkeys data structure from the list structure in the
* custom_private field of the custom scan. This sort info is used to sort
* binary heap used for sorted merge append.
*/
static void
build_batch_sorted_merge_info(DecompressChunkState *chunk_state, List *sortinfo)
{
if (sortinfo == NIL)
{
chunk_state->n_sortkeys = 0;
chunk_state->sortkeys = NULL;
return;
}
List *sort_col_idx = linitial(sortinfo);
List *sort_ops = lsecond(sortinfo);
List *sort_collations = lthird(sortinfo);
List *sort_nulls = lfourth(sortinfo);
chunk_state->n_sortkeys = list_length(linitial((sortinfo)));
Assert(list_length(sort_col_idx) == list_length(sort_ops));
Assert(list_length(sort_ops) == list_length(sort_collations));
Assert(list_length(sort_collations) == list_length(sort_nulls));
Assert(chunk_state->n_sortkeys > 0);
SortSupportData *sortkeys = palloc0(sizeof(SortSupportData) * chunk_state->n_sortkeys);
/* Inspired by nodeMergeAppend.c */
for (int i = 0; i < chunk_state->n_sortkeys; i++)
{
SortSupportData *sortKey = &sortkeys[i];
sortKey->ssup_cxt = CurrentMemoryContext;
sortKey->ssup_collation = list_nth_oid(sort_collations, i);
sortKey->ssup_nulls_first = list_nth_oid(sort_nulls, i);
sortKey->ssup_attno = list_nth_oid(sort_col_idx, i);
/*
* It isn't feasible to perform abbreviated key conversion, since
* tuples are pulled into mergestate's binary heap as needed. It
* would likely be counter-productive to convert tuples into an
* abbreviated representation as they're pulled up, so opt out of that
* additional optimization entirely.
*/
sortKey->abbreviate = false;
PrepareSortSupportFromOrderingOp(list_nth_oid(sort_ops, i), sortKey);
}
chunk_state->sortkeys = sortkeys;
}
Node *
decompress_chunk_state_create(CustomScan *cscan)
{
DecompressChunkState *state;
DecompressChunkState *chunk_state;
List *settings;
state = (DecompressChunkState *) newNode(sizeof(DecompressChunkState), T_CustomScanState);
chunk_state = (DecompressChunkState *) newNode(sizeof(DecompressChunkState), T_CustomScanState);
state->csstate.methods = &decompress_chunk_state_methods;
chunk_state->csstate.methods = &decompress_chunk_state_methods;
settings = linitial(cscan->custom_private);
state->hypertable_id = linitial_int(settings);
state->chunk_relid = lsecond_int(settings);
state->reverse = lthird_int(settings);
state->decompression_map = lsecond(cscan->custom_private);
state->is_segmentby_column = lthird(cscan->custom_private);
return (Node *) state;
Assert(list_length(settings) == 4);
chunk_state->hypertable_id = linitial_int(settings);
chunk_state->chunk_relid = lsecond_int(settings);
chunk_state->reverse = lthird_int(settings);
chunk_state->sorted_merge_append = lfourth_int(settings);
chunk_state->decompression_map = lsecond(cscan->custom_private);
chunk_state->is_segmentby_column = lthird(cscan->custom_private);
/* Extract sort info */
List *sortinfo = lfourth(cscan->custom_private);
build_batch_sorted_merge_info(chunk_state, sortinfo);
/* Sort keys should only be present when sorted_merge_append is used */
Assert(chunk_state->sorted_merge_append == true || chunk_state->n_sortkeys == 0);
Assert(chunk_state->n_sortkeys == 0 || chunk_state->sortkeys != NULL);
return (Node *) chunk_state;
}
/*
* initialize column state
* Create states to hold information for up to n batches
*/
static void
batch_states_create(DecompressChunkState *chunk_state, int nbatches)
{
Assert(nbatches >= 0);
chunk_state->n_batch_states = nbatches;
chunk_state->batch_states = palloc0(sizeof(DecompressBatchState) * nbatches);
for (int segment = 0; segment < nbatches; segment++)
{
DecompressBatchState *batch_state = &chunk_state->batch_states[segment];
decompress_initialize_batch_state(chunk_state, batch_state);
}
chunk_state->unused_batch_states =
bms_add_range(chunk_state->unused_batch_states, 0, nbatches - 1);
}
/*
* Enhance the capacity of existing batch states
*/
static void
batch_states_enlarge(DecompressChunkState *chunk_state, int nbatches)
{
Assert(nbatches > chunk_state->n_batch_states);
/* Request additional memory */
chunk_state->batch_states =
(DecompressBatchState *) repalloc(chunk_state->batch_states,
sizeof(DecompressBatchState) * nbatches);
/* Init new batch states (lazy initialization, expensive data structures
* like TupleTableSlot are created on demand) */
for (int segment = chunk_state->n_batch_states; segment < nbatches; segment++)
{
DecompressBatchState *batch_state = &chunk_state->batch_states[segment];
decompress_initialize_batch_state(chunk_state, batch_state);
}
/* Register the new states as unused */
chunk_state->unused_batch_states =
bms_add_range(chunk_state->unused_batch_states, chunk_state->n_batch_states, nbatches - 1);
chunk_state->n_batch_states = nbatches;
}
/*
* Mark a DecompressBatchState as unused
*/
void
decompress_set_batch_state_to_unused(DecompressChunkState *chunk_state, int batch_id)
{
Assert(batch_id >= 0);
Assert(batch_id < chunk_state->n_batch_states);
DecompressBatchState *batch_state = &chunk_state->batch_states[batch_id];
/* Reset batch state */
batch_state->initialized = false;
batch_state->total_batch_rows = 0;
batch_state->current_batch_row = 0;
if (batch_state->compressed_slot != NULL)
ExecClearTuple(batch_state->compressed_slot);
if (batch_state->decompressed_slot_projected != NULL)
ExecClearTuple(batch_state->decompressed_slot_projected);
if (batch_state->decompressed_slot_scan != NULL)
ExecClearTuple(batch_state->decompressed_slot_scan);
chunk_state->unused_batch_states = bms_add_member(chunk_state->unused_batch_states, batch_id);
}
/*
* Get the next free and unused batch state and mark as used
*/
DecompressSlotNumber
decompress_get_free_batch_state_id(DecompressChunkState *chunk_state)
{
if (bms_is_empty(chunk_state->unused_batch_states))
batch_states_enlarge(chunk_state, chunk_state->n_batch_states * 2);
Assert(!bms_is_empty(chunk_state->unused_batch_states));
DecompressSlotNumber next_free_batch = bms_next_member(chunk_state->unused_batch_states, -1);
Assert(next_free_batch >= 0);
Assert(next_free_batch < chunk_state->n_batch_states);
Assert(chunk_state->batch_states[next_free_batch].initialized == false);
bms_del_member(chunk_state->unused_batch_states, next_free_batch);
return next_free_batch;
}
/*
* initialize column chunk_state
*
* the column state indexes are based on the index
* of the columns of the uncompressed chunk because
* the column chunk_state indexes are based on the index
* of the columns of the decompressed chunk because
* that is the tuple layout we are creating
*/
static void
initialize_column_state(DecompressChunkState *state)
decompress_initialize_batch_state(DecompressChunkState *chunk_state,
DecompressBatchState *batch_state)
{
ScanState *ss = (ScanState *) state;
ScanState *ss = (ScanState *) chunk_state;
TupleDesc desc = ss->ss_ScanTupleSlot->tts_tupleDescriptor;
if (list_length(state->decompression_map) == 0)
if (list_length(chunk_state->decompression_map) == 0)
{
elog(ERROR, "no columns specified to decompress");
}
state->columns =
palloc0(list_length(state->decompression_map) * sizeof(DecompressChunkColumnState));
batch_state->per_batch_context = AllocSetContextCreate(CurrentMemoryContext,
"DecompressChunk per_batch",
ALLOCSET_DEFAULT_SIZES);
batch_state->columns =
palloc0(list_length(chunk_state->decompression_map) * sizeof(DecompressChunkColumnState));
batch_state->initialized = false;
/* The slots will be created on first usage of the batch state */
batch_state->decompressed_slot_projected = NULL;
batch_state->decompressed_slot_scan = NULL;
batch_state->compressed_slot = NULL;
AttrNumber next_compressed_scan_attno = 0;
state->num_columns = 0;
chunk_state->num_columns = 0;
ListCell *dest_cell;
ListCell *is_segmentby_cell;
Assert(list_length(state->decompression_map) == list_length(state->is_segmentby_column));
forboth (dest_cell, state->decompression_map, is_segmentby_cell, state->is_segmentby_column)
Assert(list_length(chunk_state->decompression_map) ==
list_length(chunk_state->is_segmentby_column));
forboth (dest_cell,
chunk_state->decompression_map,
is_segmentby_cell,
chunk_state->is_segmentby_column)
{
next_compressed_scan_attno++;
@ -155,15 +285,15 @@ initialize_column_state(DecompressChunkState *state)
continue;
}
DecompressChunkColumnState *column = &state->columns[state->num_columns];
state->num_columns++;
DecompressChunkColumnState *column = &batch_state->columns[chunk_state->num_columns];
chunk_state->num_columns++;
column->output_attno = output_attno;
column->compressed_scan_attno = next_compressed_scan_attno;
if (output_attno > 0)
{
/* normal column that is also present in uncompressed chunk */
/* normal column that is also present in decompressed chunk */
Form_pg_attribute attribute =
TupleDescAttr(desc, AttrNumberGetAttrOffset(output_attno));
@ -291,40 +421,94 @@ decompress_chunk_begin(CustomScanState *node, EState *estate, int eflags)
}
}
initialize_column_state(state);
node->custom_ps = lappend(node->custom_ps, ExecInitNode(compressed_scan, estate, eflags));
state->per_batch_context = AllocSetContextCreate(CurrentMemoryContext,
"DecompressChunk per_batch",
ALLOCSET_DEFAULT_SIZES);
}
static void
initialize_batch(DecompressChunkState *state, TupleTableSlot *compressed_slot,
TupleTableSlot *decompressed_slot)
void
decompress_initialize_batch(DecompressChunkState *chunk_state, DecompressBatchState *batch_state,
TupleTableSlot *subslot)
{
Datum value;
bool isnull;
int i;
MemoryContext old_context = MemoryContextSwitchTo(state->per_batch_context);
MemoryContextReset(state->per_batch_context);
state->total_batch_rows = 0;
state->current_batch_row = 0;
Assert(batch_state->initialized == false);
for (i = 0; i < state->num_columns; i++)
/* Batch states can be re-used skip tuple slot creation in that case */
if (batch_state->compressed_slot == NULL)
{
DecompressChunkColumnState *column = &state->columns[i];
batch_state->compressed_slot =
MakeSingleTupleTableSlot(subslot->tts_tupleDescriptor, subslot->tts_ops);
}
else
{
ExecClearTuple(batch_state->compressed_slot);
}
ExecCopySlot(batch_state->compressed_slot, subslot);
Assert(!TupIsNull(batch_state->compressed_slot));
/* DecompressBatchState can be re-used. The expensive TupleTableSlot are created on demand as
* soon as this state is used for the first time.
*/
if (batch_state->decompressed_slot_scan == NULL)
{
/* Get a reference the the output TupleTableSlot */
TupleTableSlot *slot = chunk_state->csstate.ss.ss_ScanTupleSlot;
batch_state->decompressed_slot_scan =
MakeSingleTupleTableSlot(slot->tts_tupleDescriptor, slot->tts_ops);
}
else
{
ExecClearTuple(batch_state->decompressed_slot_scan);
}
/* Ensure that all fields are empty. Calling ExecClearTuple is not enough
* because some attributes might not be populated (e.g., due to a dropped
* column) and these attributes need to be set to null. */
ExecStoreAllNullTuple(batch_state->decompressed_slot_scan);
if (batch_state->decompressed_slot_projected == NULL)
{
if (chunk_state->csstate.ss.ps.ps_ProjInfo != NULL)
{
TupleTableSlot *slot = chunk_state->csstate.ss.ps.ps_ProjInfo->pi_state.resultslot;
batch_state->decompressed_slot_projected =
MakeSingleTupleTableSlot(slot->tts_tupleDescriptor, slot->tts_ops);
}
else
{
/* If we don't have any projection info, set decompressed_slot_scan to
* decompressed_slot_projected. So, we don't need to copy the content after the
* scan to the output slot in decompress_chunk_perform_select_project() */
batch_state->decompressed_slot_projected = batch_state->decompressed_slot_scan;
}
}
else
{
ExecClearTuple(batch_state->decompressed_slot_projected);
}
Assert(!TTS_EMPTY(batch_state->compressed_slot));
batch_state->total_batch_rows = 0;
batch_state->current_batch_row = 0;
MemoryContext old_context = MemoryContextSwitchTo(batch_state->per_batch_context);
MemoryContextReset(batch_state->per_batch_context);
for (i = 0; i < chunk_state->num_columns; i++)
{
DecompressChunkColumnState *column = &batch_state->columns[i];
switch (column->type)
{
case COMPRESSED_COLUMN:
{
column->compressed.iterator = NULL;
value = slot_getattr(compressed_slot, column->compressed_scan_attno, &isnull);
value = slot_getattr(batch_state->compressed_slot,
column->compressed_scan_attno,
&isnull);
if (isnull)
{
/*
@ -332,10 +516,11 @@ initialize_batch(DecompressChunkState *state, TupleTableSlot *compressed_slot,
* set it now.
*/
AttrNumber attr = AttrNumberGetAttrOffset(column->output_attno);
decompressed_slot->tts_values[attr] =
getmissingattr(decompressed_slot->tts_tupleDescriptor,
batch_state->decompressed_slot_scan->tts_values[attr] =
getmissingattr(batch_state->decompressed_slot_scan->tts_tupleDescriptor,
attr + 1,
&decompressed_slot->tts_isnull[attr]);
&batch_state->decompressed_slot_scan->tts_isnull[attr]);
break;
}
@ -343,7 +528,8 @@ initialize_batch(DecompressChunkState *state, TupleTableSlot *compressed_slot,
column->compressed.iterator =
tsl_get_decompression_iterator_init(header->compression_algorithm,
state->reverse)(PointerGetDatum(header),
chunk_state->reverse)(PointerGetDatum(
header),
column->typid);
break;
@ -356,14 +542,17 @@ initialize_batch(DecompressChunkState *state, TupleTableSlot *compressed_slot,
* save it once per batch, which we do here.
*/
AttrNumber attr = AttrNumberGetAttrOffset(column->output_attno);
decompressed_slot->tts_values[attr] =
slot_getattr(compressed_slot,
batch_state->decompressed_slot_scan->tts_values[attr] =
slot_getattr(batch_state->compressed_slot,
column->compressed_scan_attno,
&decompressed_slot->tts_isnull[attr]);
&batch_state->decompressed_slot_scan->tts_isnull[attr]);
break;
}
case COUNT_COLUMN:
value = slot_getattr(compressed_slot, column->compressed_scan_attno, &isnull);
{
value = slot_getattr(batch_state->compressed_slot,
column->compressed_scan_attno,
&isnull);
/* count column should never be NULL */
Assert(!isnull);
int count_value = DatumGetInt32(value);
@ -373,9 +562,10 @@ initialize_batch(DecompressChunkState *state, TupleTableSlot *compressed_slot,
(errmsg("the compressed data is corrupt: got a segment with length %d",
count_value)));
}
Assert(state->total_batch_rows == 0);
state->total_batch_rows = count_value;
Assert(batch_state->total_batch_rows == 0);
batch_state->total_batch_rows = count_value;
break;
}
case SEQUENCE_NUM_COLUMN:
/*
* nothing to do here for sequence number
@ -384,80 +574,181 @@ initialize_batch(DecompressChunkState *state, TupleTableSlot *compressed_slot,
break;
}
}
state->initialized = true;
batch_state->initialized = true;
MemoryContextSwitchTo(old_context);
}
static TupleTableSlot *
decompress_chunk_exec(CustomScanState *node)
/* Perform the projection and selection of the decompressed tuple */
static bool pg_nodiscard
decompress_chunk_perform_select_project(CustomScanState *node,
TupleTableSlot *decompressed_slot_scan,
TupleTableSlot *decompressed_slot_projected)
{
DecompressChunkState *state = (DecompressChunkState *) node;
ExprContext *econtext = node->ss.ps.ps_ExprContext;
if (node->custom_ps == NIL)
return NULL;
while (true)
{
TupleTableSlot *decompressed_slot = decompress_chunk_create_tuple(state);
if (TupIsNull(decompressed_slot))
return NULL;
econtext->ecxt_scantuple = decompressed_slot;
/* Reset expression memory context to clean out any cruft from
* previous tuple. */
/*
* Reset expression memory context to clean out any cruft from
* previous batch. Our batches are 1000 rows max, and this memory
* context is used by ExecProject and ExecQual, which shouldn't
* leak too much. So we only do this per batch and not per tuple to
* save some CPU.
*/
econtext->ecxt_scantuple = decompressed_slot_scan;
ResetExprContext(econtext);
if (node->ss.ps.qual && !ExecQual(node->ss.ps.qual, econtext))
{
InstrCountFiltered1(node, 1);
ExecClearTuple(decompressed_slot);
continue;
return false;
}
if (!node->ss.ps.ps_ProjInfo)
return decompressed_slot;
if (node->ss.ps.ps_ProjInfo)
{
TupleTableSlot *projected = ExecProject(node->ss.ps.ps_ProjInfo);
ExecCopySlot(decompressed_slot_projected, projected);
}
return ExecProject(node->ss.ps.ps_ProjInfo);
return true;
}
static TupleTableSlot *
decompress_chunk_exec(CustomScanState *node)
{
DecompressChunkState *chunk_state = (DecompressChunkState *) node;
if (node->custom_ps == NIL)
return NULL;
/* If the sorted_merge_append flag is set, the compression order_by and the
* query order_by do match. Therefore, we use a binary heap to decompress the compressed
* segments and merge the tuples.
*/
if (chunk_state->sorted_merge_append)
{
/* Create the heap on the first call. */
if (chunk_state->merge_heap == NULL)
{
batch_states_create(chunk_state, INITIAL_BATCH_CAPACITY);
decompress_sorted_merge_init(chunk_state);
}
else
{
/* Remove the tuple returned in the last iteration and refresh the heap.
* This operation is delayed up to this point where the next tuple actually
* needs to be decompressed.
*/
decompress_sorted_merge_remove_top_tuple_and_decompress_next(chunk_state);
}
return decompress_sorted_merge_get_next_tuple(chunk_state);
}
else
{
if (chunk_state->batch_states == NULL)
batch_states_create(chunk_state, 1);
DecompressBatchState *batch_state = &chunk_state->batch_states[0];
decompress_chunk_create_tuple(chunk_state, batch_state);
return batch_state->decompressed_slot_projected;
}
}
static void
decompress_chunk_rescan(CustomScanState *node)
{
((DecompressChunkState *) node)->initialized = false;
DecompressChunkState *chunk_state = (DecompressChunkState *) node;
if (chunk_state->merge_heap != NULL)
decompress_sorted_merge_free(chunk_state);
for (int i = 0; i < chunk_state->n_batch_states; i++)
{
decompress_set_batch_state_to_unused(chunk_state, i);
}
ExecReScan(linitial(node->custom_ps));
}
/* End the decompress operation and free the requested resources */
static void
decompress_chunk_end(CustomScanState *node)
{
MemoryContextReset(((DecompressChunkState *) node)->per_batch_context);
int i;
DecompressChunkState *chunk_state = (DecompressChunkState *) node;
if (chunk_state->merge_heap != NULL)
{
decompress_sorted_merge_free(chunk_state);
}
for (i = 0; i < chunk_state->n_batch_states; i++)
{
DecompressBatchState *batch_state = &chunk_state->batch_states[i];
Assert(batch_state != NULL);
if (batch_state->compressed_slot != NULL)
ExecDropSingleTupleTableSlot(batch_state->compressed_slot);
if (batch_state->decompressed_slot_scan != NULL)
ExecDropSingleTupleTableSlot(batch_state->decompressed_slot_scan);
/* If we dont have any projection info decompressed_slot_scan and
* decompressed_slot_projected can be equal */
if (batch_state->decompressed_slot_projected != NULL &&
batch_state->decompressed_slot_scan != batch_state->decompressed_slot_projected)
ExecDropSingleTupleTableSlot(batch_state->decompressed_slot_projected);
batch_state = NULL;
}
ExecEndNode(linitial(node->custom_ps));
}
/*
* Create generated tuple according to column state
* Output additional information for EXPLAIN of a custom-scan plan node.
*/
static TupleTableSlot *
decompress_chunk_create_tuple(DecompressChunkState *state)
static void
decompress_chunk_explain(CustomScanState *node, List *ancestors, ExplainState *es)
{
TupleTableSlot *decompressed_slot = state->csstate.ss.ss_ScanTupleSlot;
DecompressChunkState *chunk_state = (DecompressChunkState *) node;
if (es->verbose || es->format != EXPLAIN_FORMAT_TEXT)
{
if (chunk_state->sorted_merge_append)
{
ExplainPropertyBool("Sorted merge append", chunk_state->sorted_merge_append, es);
}
}
}
/*
* Decompress the next tuple from the batch indicated by batch state. The result is stored
* in batch_state->decompressed_slot_projected. The slot will be empty if the batch
* is entirely processed.
*/
void
decompress_get_next_tuple_from_batch(DecompressChunkState *chunk_state,
DecompressBatchState *batch_state)
{
TupleTableSlot *decompressed_slot_scan = batch_state->decompressed_slot_scan;
TupleTableSlot *decompressed_slot_projected = batch_state->decompressed_slot_projected;
Assert(decompressed_slot_scan != NULL);
Assert(decompressed_slot_projected != NULL);
while (true)
{
if (state->initialized && state->current_batch_row >= state->total_batch_rows)
if (batch_state->current_batch_row >= batch_state->total_batch_rows)
{
/*
* Reached end of batch. Check that the columns that we're decompressing
* row-by-row have also ended.
*/
state->initialized = false;
for (int i = 0; i < state->num_columns; i++)
batch_state->initialized = false;
for (int i = 0; i < chunk_state->num_columns; i++)
{
DecompressChunkColumnState *column = &state->columns[i];
DecompressChunkColumnState *column = &batch_state->columns[i];
if (column->type == COMPRESSED_COLUMN && column->compressed.iterator)
{
DecompressResult result =
@ -468,44 +759,27 @@ decompress_chunk_create_tuple(DecompressChunkState *state)
}
}
}
/* Clear old slot state */
ExecClearTuple(decompressed_slot_projected);
return;
}
if (!state->initialized)
Assert(batch_state->initialized);
Assert(batch_state->total_batch_rows > 0);
Assert(batch_state->current_batch_row < batch_state->total_batch_rows);
for (int i = 0; i < chunk_state->num_columns; i++)
{
ExecStoreAllNullTuple(decompressed_slot);
DecompressChunkColumnState *column = &batch_state->columns[i];
/*
* Reset expression memory context to clean out any cruft from
* previous batch. Our batches are 1000 rows max, and this memory
* context is used by ExecProject and ExecQual, which shouldn't
* leak too much. So we only do this per batch and not per tuple to
* save some CPU.
*/
ExprContext *econtext = state->csstate.ss.ps.ps_ExprContext;
ResetExprContext(econtext);
TupleTableSlot *compressed_slot = ExecProcNode(linitial(state->csstate.custom_ps));
if (TupIsNull(compressed_slot))
return NULL;
initialize_batch(state, compressed_slot, decompressed_slot);
}
Assert(state->initialized);
Assert(state->total_batch_rows > 0);
Assert(state->current_batch_row < state->total_batch_rows);
for (int i = 0; i < state->num_columns; i++)
{
DecompressChunkColumnState *column = &state->columns[i];
if (column->type != COMPRESSED_COLUMN)
{
continue;
}
const AttrNumber attr = AttrNumberGetAttrOffset(column->output_attno);
if (column->compressed.iterator != NULL)
{
DecompressResult result =
@ -516,11 +790,13 @@ decompress_chunk_create_tuple(DecompressChunkState *state)
elog(ERROR, "compressed column out of sync with batch counter");
}
decompressed_slot->tts_isnull[attr] = result.is_null;
decompressed_slot->tts_values[attr] = result.val;
decompressed_slot_scan->tts_isnull[attr] = result.is_null;
decompressed_slot_scan->tts_values[attr] = result.val;
}
}
batch_state->current_batch_row++;
/*
* It's a virtual tuple slot, so no point in clearing/storing it
* per each row, we can just update the values in-place. This saves
@ -530,13 +806,55 @@ decompress_chunk_create_tuple(DecompressChunkState *state)
* slots are read-only, and the memory is owned by this node, so it is
* safe to violate this protocol.
*/
Assert(TTS_IS_VIRTUAL(decompressed_slot));
if (TTS_EMPTY(decompressed_slot))
Assert(TTS_IS_VIRTUAL(decompressed_slot_scan));
if (TTS_EMPTY(decompressed_slot_scan))
{
ExecStoreVirtualTuple(decompressed_slot);
ExecStoreVirtualTuple(decompressed_slot_scan);
}
state->current_batch_row++;
return decompressed_slot;
/* Perform selection and projection if needed */
bool is_valid_tuple = decompress_chunk_perform_select_project(&chunk_state->csstate,
decompressed_slot_scan,
decompressed_slot_projected);
/* Non empty result, return it */
if (is_valid_tuple)
{
Assert(!TTS_EMPTY(decompressed_slot_projected));
return;
}
/* Otherwise fetch the next tuple in the next iteration */
}
}
/*
* Create generated tuple according to column chunk_state
*/
static void
decompress_chunk_create_tuple(DecompressChunkState *chunk_state, DecompressBatchState *batch_state)
{
while (true)
{
if (!batch_state->initialized)
{
TupleTableSlot *subslot = ExecProcNode(linitial(chunk_state->csstate.custom_ps));
if (TupIsNull(subslot))
{
Assert(TupIsNull(batch_state->decompressed_slot_projected));
return;
}
decompress_initialize_batch(chunk_state, batch_state, subslot);
}
/* Decompress next tuple from batch */
decompress_get_next_tuple_from_batch(chunk_state, batch_state);
if (!TupIsNull(batch_state->decompressed_slot_projected))
return;
batch_state->initialized = false;
}
}

View File

@ -12,6 +12,107 @@
#define DECOMPRESS_CHUNK_COUNT_ID -9
#define DECOMPRESS_CHUNK_SEQUENCE_NUM_ID -10
/* Initial amount of batch states */
#define INITIAL_BATCH_CAPACITY 16
/*
* From nodeMergeAppend.c
*
* We have one slot for each item in the heap array. We use DecompressSlotNumber
* to store slot indexes. This doesn't actually provide any formal
* type-safety, but it makes the code more self-documenting.
*/
typedef int32 DecompressSlotNumber;
typedef enum DecompressChunkColumnType
{
SEGMENTBY_COLUMN,
COMPRESSED_COLUMN,
COUNT_COLUMN,
SEQUENCE_NUM_COLUMN,
} DecompressChunkColumnType;
typedef struct DecompressChunkColumnState
{
DecompressChunkColumnType type;
Oid typid;
/*
* Attno of the decompressed column in the output of DecompressChunk node.
* Negative values are special columns that do not have a representation in
* the decompressed chunk, but are still used for decompression. They should
* have the respective `type` field.
*/
AttrNumber output_attno;
/*
* Attno of the compressed column in the input compressed chunk scan.
*/
AttrNumber compressed_scan_attno;
union
{
struct
{
Datum value;
bool isnull;
int count;
} segmentby;
struct
{
DecompressionIterator *iterator;
} compressed;
};
} DecompressChunkColumnState;
/*
* All the needed information to decompress a batch
*/
typedef struct DecompressBatchState
{
bool initialized;
TupleTableSlot *decompressed_slot_projected; /* The result slot with the final tuples */
TupleTableSlot *decompressed_slot_scan; /* A slot for the decompressed data */
TupleTableSlot *compressed_slot; /* A slot for compressed data */
DecompressChunkColumnState *columns;
int total_batch_rows;
int current_batch_row;
MemoryContext per_batch_context;
} DecompressBatchState;
typedef struct DecompressChunkState
{
CustomScanState csstate;
List *decompression_map;
List *is_segmentby_column;
int num_columns;
bool reverse;
int hypertable_id;
Oid chunk_relid;
/* Batch states */
int n_batch_states; /* Number of batch states */
DecompressBatchState *batch_states; /* The batch states */
Bitmapset *unused_batch_states; /* The unused batch states */
bool sorted_merge_append; /* Merge append optimization enabled */
int most_recent_batch; /* The batch state with the most recent value */
struct binaryheap *merge_heap; /* Binary heap of slot indices */
int n_sortkeys; /* Number of sort keys for heap compare function */
SortSupportData *sortkeys; /* Sort keys for binary heap compare function */
} DecompressChunkState;
extern Node *decompress_chunk_state_create(CustomScan *cscan);
extern DecompressSlotNumber decompress_get_free_batch_state_id(DecompressChunkState *chunk_state);
extern void decompress_initialize_batch(DecompressChunkState *chunk_state,
DecompressBatchState *batch_state, TupleTableSlot *subslot);
extern void decompress_get_next_tuple_from_batch(DecompressChunkState *chunk_state,
DecompressBatchState *batch_state);
extern void decompress_set_batch_state_to_unused(DecompressChunkState *chunk_state, int batch_id);
#endif /* TIMESCALEDB_DECOMPRESS_CHUNK_EXEC_H */

View File

@ -127,7 +127,7 @@ build_decompression_map(DecompressChunkPath *path, List *scan_tlist, Bitmapset *
elog(ERROR, "compressed scan targetlist entries must be Vars");
}
Var *var = (Var *) target->expr;
Var *var = castNode(Var, target->expr);
Assert((Index) var->varno == path->info->compressed_rel->relid);
AttrNumber compressed_attno = var->varattno;
@ -336,6 +336,35 @@ clause_has_compressed_attrs(Node *node, void *context)
return expression_tree_walker(node, clause_has_compressed_attrs, context);
}
/*
* Find the resno of the given attribute in the provided target list
*/
static AttrNumber
find_attr_pos_in_tlist(List *targetlist, AttrNumber pos)
{
ListCell *lc;
Assert(targetlist != NIL);
Assert(pos > 0 && pos != InvalidAttrNumber);
foreach (lc, targetlist)
{
TargetEntry *target = (TargetEntry *) lfirst(lc);
if (!IsA(target->expr, Var))
elog(ERROR, "compressed scan targetlist entries must be Vars");
Var *var = castNode(Var, target->expr);
AttrNumber compressed_attno = var->varattno;
if (compressed_attno == pos)
return target->resno;
}
elog(ERROR, "Unable to locate var %d in targetlist", pos);
pg_unreachable();
}
Plan *
decompress_chunk_plan_create(PlannerInfo *root, RelOptInfo *rel, CustomPath *path,
List *decompressed_tlist, List *clauses, List *custom_plans)
@ -415,9 +444,10 @@ decompress_chunk_plan_create(PlannerInfo *root, RelOptInfo *rel, CustomPath *pat
* extra work of projecting the result of compressed chunk scan, because
* DecompressChunk can choose only the needed columns itself.
* Note that Postgres uses the CP_EXACT_TLIST option when planning the child
* paths of the Custom path, so we won't automatically get a phsyical tlist
* paths of the Custom path, so we won't automatically get a physical tlist
* here.
*/
bool target_list_compressed_is_physical = false;
if (compressed_path->pathtype == T_IndexOnlyScan)
{
compressed_scan->plan.targetlist = ((IndexPath *) compressed_path)->indexinfo->indextlist;
@ -429,6 +459,7 @@ decompress_chunk_plan_create(PlannerInfo *root, RelOptInfo *rel, CustomPath *pat
if (physical_tlist)
{
compressed_scan->plan.targetlist = physical_tlist;
target_list_compressed_is_physical = true;
}
}
@ -454,6 +485,96 @@ decompress_chunk_plan_create(PlannerInfo *root, RelOptInfo *rel, CustomPath *pat
*/
build_decompression_map(dcpath, compressed_scan->plan.targetlist, chunk_attrs_needed);
/* Build heap sort info for sorted_merge_append */
List *sort_options = NIL;
if (dcpath->sorted_merge_append)
{
/* sorted_merge_append is used when the 'order by' of the query and the
* 'order by' of the segments do match, we use a heap to merge the segments.
* For the heap we need a compare function that determines the heap order. This
* function is constructed here.
*/
AttrNumber *sortColIdx = NULL;
Oid *sortOperators = NULL;
Oid *collations = NULL;
bool *nullsFirst = NULL;
int numsortkeys = 0;
ts_prepare_sort_from_pathkeys(&decompress_plan->scan.plan,
dcpath->cpath.path.pathkeys,
bms_make_singleton(dcpath->info->chunk_rel->relid),
NULL,
false,
&numsortkeys,
&sortColIdx,
&sortOperators,
&collations,
&nullsFirst);
List *sort_col_idx = NIL;
List *sort_ops = NIL;
List *sort_collations = NIL;
List *sort_nulls = NIL;
/* Since we have to keep the sort info in custom_private, we store the information
* in copyable lists */
for (int i = 0; i < numsortkeys; i++)
{
sort_col_idx = lappend_oid(sort_col_idx, sortColIdx[i]);
sort_ops = lappend_oid(sort_ops, sortOperators[i]);
sort_collations = lappend_oid(sort_collations, collations[i]);
sort_nulls = lappend_oid(sort_nulls, nullsFirst[i]);
}
sort_options = list_make4(sort_col_idx, sort_ops, sort_collations, sort_nulls);
/* Build a sort node for the compressed batches. The sort function is derived from the sort
* function of the pathkeys, except that it refers to the min and max elements of the
* batches. We have already verified that the pathkeys match the compression order_by, so
* this mapping can be done here. */
for (int i = 0; i < numsortkeys; i++)
{
Oid opfamily, opcintype;
int16 strategy;
/* Find the operator in pg_amop --- failure shouldn't happen */
if (!get_ordering_op_properties(sortOperators[i], &opfamily, &opcintype, &strategy))
elog(ERROR, "operator %u is not a valid ordering operator", sortOperators[i]);
Assert(strategy == BTLessStrategyNumber || strategy == BTGreaterStrategyNumber);
char *meta_col_name = strategy == BTLessStrategyNumber ?
column_segment_min_name(i + 1) :
column_segment_max_name(i + 1);
AttrNumber attr_position =
get_attnum(dcpath->info->compressed_rte->relid, meta_col_name);
if (attr_position == InvalidAttrNumber)
elog(ERROR, "couldn't find metadata column \"%s\"", meta_col_name);
/* If the the target list is not based on the layout of the uncompressed chunk,
* (see comment for physical_tlist above), adjust the position of the attribute.
*/
if (target_list_compressed_is_physical)
sortColIdx[i] = attr_position;
else
sortColIdx[i] =
find_attr_pos_in_tlist(compressed_scan->plan.targetlist, attr_position);
}
/* Now build the compressed batches sort node */
Sort *sort = ts_make_sort((Plan *) compressed_scan,
numsortkeys,
sortColIdx,
sortOperators,
collations,
nullsFirst);
decompress_plan->custom_plans = list_make1(sort);
}
else
{
/*
* Add a sort if the compressed scan is not ordered appropriately.
*/
@ -469,14 +590,17 @@ decompress_chunk_plan_create(PlannerInfo *root, RelOptInfo *rel, CustomPath *pat
{
decompress_plan->custom_plans = custom_plans;
}
}
Assert(list_length(custom_plans) == 1);
settings = list_make3_int(dcpath->info->hypertable_id,
settings = list_make4_int(dcpath->info->hypertable_id,
dcpath->info->chunk_rte->relid,
dcpath->reverse);
dcpath->reverse,
dcpath->sorted_merge_append);
decompress_plan->custom_private =
list_make3(settings, dcpath->decompression_map, dcpath->is_segmentby_column);
list_make4(settings, dcpath->decompression_map, dcpath->is_segmentby_column, sort_options);
return &decompress_plan->scan.plan;
}

View File

@ -0,0 +1,236 @@
/*
* This file and its contents are licensed under the Timescale License.
* Please see the included NOTICE for copyright information and
* LICENSE-TIMESCALE for a copy of the license.
*/
#include <postgres.h>
#include <nodes/bitmapset.h>
#include <lib/binaryheap.h>
#include "compression/compression.h"
#include "nodes/decompress_chunk/sorted_merge.h"
#include "nodes/decompress_chunk/exec.h"
/*
* Compare the tuples of two given slots.
*/
static int32
decompress_binaryheap_compare_slots(TupleTableSlot *tupleA, TupleTableSlot *tupleB,
DecompressChunkState *chunk_state)
{
Assert(!TupIsNull(tupleA));
Assert(!TupIsNull(tupleB));
Assert(chunk_state != NULL);
for (int nkey = 0; nkey < chunk_state->n_sortkeys; nkey++)
{
SortSupportData *sortKey = &chunk_state->sortkeys[nkey];
Assert(sortKey != NULL);
AttrNumber attno = sortKey->ssup_attno;
bool isNullA, isNullB;
Datum datumA = slot_getattr(tupleA, attno, &isNullA);
Datum datumB = slot_getattr(tupleB, attno, &isNullB);
int compare = ApplySortComparator(datumA, isNullA, datumB, isNullB, sortKey);
if (compare != 0)
{
INVERT_COMPARE_RESULT(compare);
return compare;
}
}
return 0;
}
/*
* Compare the tuples of the datum of two given DecompressSlotNumbers.
*/
static int32
decompress_binaryheap_compare_heap_pos(Datum a, Datum b, void *arg)
{
DecompressChunkState *chunk_state = (DecompressChunkState *) arg;
DecompressSlotNumber batchA = DatumGetInt32(a);
Assert(batchA <= chunk_state->n_batch_states);
DecompressSlotNumber batchB = DatumGetInt32(b);
Assert(batchB <= chunk_state->n_batch_states);
TupleTableSlot *tupleA = chunk_state->batch_states[batchA].decompressed_slot_projected;
TupleTableSlot *tupleB = chunk_state->batch_states[batchB].decompressed_slot_projected;
return decompress_binaryheap_compare_slots(tupleA, tupleB, chunk_state);
}
/* Add a new datum to the heap and perform an automatic resizing if needed. In contrast to
* the binaryheap_add_unordered() function, the capacity of the heap is automatically
* increased if needed.
*/
static pg_nodiscard binaryheap *
binaryheap_add_unordered_autoresize(binaryheap *heap, Datum d)
{
/* Resize heap if needed */
if (heap->bh_size >= heap->bh_space)
{
heap->bh_space = heap->bh_space * 2;
Size new_size = offsetof(binaryheap, bh_nodes) + sizeof(Datum) * heap->bh_space;
heap = (binaryheap *) repalloc(heap, new_size);
}
/* Insert new element */
binaryheap_add(heap, d);
return heap;
}
/*
* Open the next batch and add the tuple to the heap
*/
static void
decompress_batch_open_next_batch(DecompressChunkState *chunk_state)
{
while (true)
{
TupleTableSlot *subslot = ExecProcNode(linitial(chunk_state->csstate.custom_ps));
/* All batches are consumed */
if (TupIsNull(subslot))
{
chunk_state->most_recent_batch = INVALID_BATCH_ID;
return;
}
DecompressSlotNumber batch_state_id = decompress_get_free_batch_state_id(chunk_state);
DecompressBatchState *batch_state = &chunk_state->batch_states[batch_state_id];
decompress_initialize_batch(chunk_state, batch_state, subslot);
decompress_get_next_tuple_from_batch(chunk_state, batch_state);
if (!TupIsNull(batch_state->decompressed_slot_projected))
{
chunk_state->merge_heap =
binaryheap_add_unordered_autoresize(chunk_state->merge_heap,
Int32GetDatum(batch_state_id));
chunk_state->most_recent_batch = batch_state_id;
return;
}
}
}
/*
* Remove the top tuple from the heap (i.e., the tuple we have returned last time) and decompress
* the next tuple from the batch.
*/
void
decompress_sorted_merge_remove_top_tuple_and_decompress_next(DecompressChunkState *chunk_state)
{
DecompressSlotNumber i = DatumGetInt32(binaryheap_first(chunk_state->merge_heap));
DecompressBatchState *batch_state = &chunk_state->batch_states[i];
Assert(batch_state != NULL);
#ifdef USE_ASSERT_CHECKING
/* Prepare an assert on the tuple sort between the last returned tuple and the intended next
* tuple. The last returned tuple will be changed during this function. So, store a copy for
* later comparison. */
TupleTableSlot *last_returned_tuple =
MakeSingleTupleTableSlot(batch_state->decompressed_slot_projected->tts_tupleDescriptor,
batch_state->decompressed_slot_projected->tts_ops);
ExecCopySlot(last_returned_tuple, batch_state->decompressed_slot_projected);
#endif
/* Decompress the next tuple from segment */
decompress_get_next_tuple_from_batch(chunk_state, batch_state);
if (TupIsNull(batch_state->decompressed_slot_projected))
{
/* Batch is exhausted, recycle batch_state */
(void) binaryheap_remove_first(chunk_state->merge_heap);
decompress_set_batch_state_to_unused(chunk_state, i);
}
else
{
/* Put the next tuple from this batch on the heap */
binaryheap_replace_first(chunk_state->merge_heap, Int32GetDatum(i));
}
#ifdef USE_ASSERT_CHECKING
if (!binaryheap_empty(chunk_state->merge_heap))
{
DecompressSlotNumber next_tuple = DatumGetInt32(binaryheap_first(chunk_state->merge_heap));
DecompressBatchState *next_batch_state = &chunk_state->batch_states[next_tuple];
/* Assert that the intended sorting is produced. */
Assert(decompress_binaryheap_compare_slots(last_returned_tuple,
next_batch_state->decompressed_slot_projected,
chunk_state) >= 0);
}
ExecDropSingleTupleTableSlot(last_returned_tuple);
last_returned_tuple = NULL;
#endif
}
/*
* Init the binary heap and open the first compressed batch.
*/
void
decompress_sorted_merge_init(DecompressChunkState *chunk_state)
{
/* Prepare the heap and the batch states */
chunk_state->merge_heap = binaryheap_allocate(INITIAL_BATCH_CAPACITY,
decompress_binaryheap_compare_heap_pos,
chunk_state);
/* Open the first batch */
decompress_batch_open_next_batch(chunk_state);
}
/*
* Free the binary heap.
*/
void
decompress_sorted_merge_free(DecompressChunkState *chunk_state)
{
elog(DEBUG3, "Heap has capacity of %d", chunk_state->merge_heap->bh_space);
elog(DEBUG3, "Created batch states %d", chunk_state->n_batch_states);
binaryheap_free(chunk_state->merge_heap);
chunk_state->merge_heap = NULL;
}
/*
* Get the next tuple from the binary heap. In addition, further batches are opened
* and placed on the heep if needed (i.e., the top tuple is from the top batch).
* This function returns NULL if all tuples from the batches are consumed.
*/
TupleTableSlot *
decompress_sorted_merge_get_next_tuple(DecompressChunkState *chunk_state)
{
/* All tuples are decompressed and consumed */
if (binaryheap_empty(chunk_state->merge_heap))
return NULL;
/* If the next tuple is from the top batch, open the next batches until
* the next batch contains a tuple that is larger than the top tuple from the
* heap (i.e., the batch is not the top element of the heap). */
while (DatumGetInt32(binaryheap_first(chunk_state->merge_heap)) ==
chunk_state->most_recent_batch)
{
decompress_batch_open_next_batch(chunk_state);
}
/* Fetch tuple the top tuple from the heap */
DecompressSlotNumber slot_number = DatumGetInt32(binaryheap_first(chunk_state->merge_heap));
TupleTableSlot *decompressed_slot_projected =
chunk_state->batch_states[slot_number].decompressed_slot_projected;
Assert(decompressed_slot_projected != NULL);
Assert(!TupIsNull(decompressed_slot_projected));
return decompressed_slot_projected;
}

View File

@ -0,0 +1,31 @@
/*
* This file and its contents are licensed under the Timescale License.
* Please see the included NOTICE for copyright information and
* LICENSE-TIMESCALE for a copy of the license.
*/
#ifndef TIMESCALEDB_DECOMPRESS_SORTED_MERGE_H
#define TIMESCALEDB_DECOMPRESS_SORTED_MERGE_H
#include "compression/compression.h"
#include "nodes/decompress_chunk/exec.h"
/* We have to decompress the compressed batches in parallel. Therefore, we need a high
* amount of memory. Set the tuple cost for this algorithm a very high value to prevent
* that this algorithm is chosen when a lot of batches needs to be merged. For more details,
* see the discussion in cost_decompress_sorted_merge_append(). */
#define DECOMPRESS_CHUNK_HEAP_MERGE_CPU_TUPLE_COST 0.8
/* The value for an invalid batch id */
#define INVALID_BATCH_ID -1
extern void decompress_sorted_merge_init(DecompressChunkState *chunk_state);
extern void decompress_sorted_merge_free(DecompressChunkState *chunk_state);
extern void
decompress_sorted_merge_remove_top_tuple_and_decompress_next(DecompressChunkState *chunk_state);
extern TupleTableSlot *decompress_sorted_merge_get_next_tuple(DecompressChunkState *chunk_state);
#endif

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -6729,19 +6729,24 @@ ORDER BY c.id;
-- should not have ordered DecompressChunk path because segmentby columns are not part of pathkeys
:PREFIX SELECT * FROM metrics_ordered ORDER BY time DESC LIMIT 10;
QUERY PLAN
--------------------------------------------------------------------------------------------------
------------------------------------------------------------------------------------------
Limit (actual rows=10 loops=1)
-> Sort (actual rows=10 loops=1)
Sort Key: _hyper_11_28_chunk."time" DESC
Sort Method: top-N heapsort
-> Append (actual rows=6840 loops=1)
-> Custom Scan (DecompressChunk) on _hyper_11_28_chunk (actual rows=2520 loops=1)
-> Custom Scan (ChunkAppend) on metrics_ordered (actual rows=10 loops=1)
Order: metrics_ordered."time" DESC
-> Custom Scan (DecompressChunk) on _hyper_11_28_chunk (actual rows=10 loops=1)
-> Sort (actual rows=5 loops=1)
Sort Key: compress_hyper_12_31_chunk._ts_meta_max_1 DESC
Sort Method: quicksort
-> Seq Scan on compress_hyper_12_31_chunk (actual rows=5 loops=1)
-> Custom Scan (DecompressChunk) on _hyper_11_27_chunk (actual rows=2520 loops=1)
-> Seq Scan on compress_hyper_12_30_chunk (actual rows=5 loops=1)
-> Custom Scan (DecompressChunk) on _hyper_11_26_chunk (actual rows=1800 loops=1)
-> Seq Scan on compress_hyper_12_29_chunk (actual rows=5 loops=1)
(11 rows)
-> Custom Scan (DecompressChunk) on _hyper_11_27_chunk (never executed)
-> Sort (never executed)
Sort Key: compress_hyper_12_30_chunk._ts_meta_max_1 DESC
-> Seq Scan on compress_hyper_12_30_chunk (never executed)
-> Custom Scan (DecompressChunk) on _hyper_11_26_chunk (never executed)
-> Sort (never executed)
Sort Key: compress_hyper_12_29_chunk._ts_meta_max_1 DESC
-> Seq Scan on compress_hyper_12_29_chunk (never executed)
(16 rows)
-- should have ordered DecompressChunk path because segmentby columns have equality constraints
:PREFIX SELECT * FROM metrics_ordered WHERE device_id = 1 AND device_id_peer = 3 ORDER BY time DESC LIMIT 10;

View File

@ -7812,19 +7812,24 @@ ORDER BY c.id;
-- should not have ordered DecompressChunk path because segmentby columns are not part of pathkeys
:PREFIX SELECT * FROM metrics_ordered ORDER BY time DESC LIMIT 10;
QUERY PLAN
--------------------------------------------------------------------------------------------------
------------------------------------------------------------------------------------------
Limit (actual rows=10 loops=1)
-> Sort (actual rows=10 loops=1)
Sort Key: _hyper_11_28_chunk."time" DESC
Sort Method: top-N heapsort
-> Append (actual rows=6840 loops=1)
-> Custom Scan (DecompressChunk) on _hyper_11_28_chunk (actual rows=2520 loops=1)
-> Custom Scan (ChunkAppend) on metrics_ordered (actual rows=10 loops=1)
Order: metrics_ordered."time" DESC
-> Custom Scan (DecompressChunk) on _hyper_11_28_chunk (actual rows=10 loops=1)
-> Sort (actual rows=5 loops=1)
Sort Key: compress_hyper_12_31_chunk._ts_meta_max_1 DESC
Sort Method: quicksort
-> Seq Scan on compress_hyper_12_31_chunk (actual rows=5 loops=1)
-> Custom Scan (DecompressChunk) on _hyper_11_27_chunk (actual rows=2520 loops=1)
-> Seq Scan on compress_hyper_12_30_chunk (actual rows=5 loops=1)
-> Custom Scan (DecompressChunk) on _hyper_11_26_chunk (actual rows=1800 loops=1)
-> Seq Scan on compress_hyper_12_29_chunk (actual rows=5 loops=1)
(11 rows)
-> Custom Scan (DecompressChunk) on _hyper_11_27_chunk (never executed)
-> Sort (never executed)
Sort Key: compress_hyper_12_30_chunk._ts_meta_max_1 DESC
-> Seq Scan on compress_hyper_12_30_chunk (never executed)
-> Custom Scan (DecompressChunk) on _hyper_11_26_chunk (never executed)
-> Sort (never executed)
Sort Key: compress_hyper_12_29_chunk._ts_meta_max_1 DESC
-> Seq Scan on compress_hyper_12_29_chunk (never executed)
(16 rows)
-- should have ordered DecompressChunk path because segmentby columns have equality constraints
:PREFIX SELECT * FROM metrics_ordered WHERE device_id = 1 AND device_id_peer = 3 ORDER BY time DESC LIMIT 10;

View File

@ -8026,19 +8026,24 @@ ORDER BY c.id;
-- should not have ordered DecompressChunk path because segmentby columns are not part of pathkeys
:PREFIX SELECT * FROM metrics_ordered ORDER BY time DESC LIMIT 10;
QUERY PLAN
--------------------------------------------------------------------------------------------------
------------------------------------------------------------------------------------------
Limit (actual rows=10 loops=1)
-> Sort (actual rows=10 loops=1)
Sort Key: _hyper_11_28_chunk."time" DESC
Sort Method: top-N heapsort
-> Append (actual rows=6840 loops=1)
-> Custom Scan (DecompressChunk) on _hyper_11_28_chunk (actual rows=2520 loops=1)
-> Custom Scan (ChunkAppend) on metrics_ordered (actual rows=10 loops=1)
Order: metrics_ordered."time" DESC
-> Custom Scan (DecompressChunk) on _hyper_11_28_chunk (actual rows=10 loops=1)
-> Sort (actual rows=5 loops=1)
Sort Key: compress_hyper_12_31_chunk._ts_meta_max_1 DESC
Sort Method: quicksort
-> Seq Scan on compress_hyper_12_31_chunk (actual rows=5 loops=1)
-> Custom Scan (DecompressChunk) on _hyper_11_27_chunk (actual rows=2520 loops=1)
-> Seq Scan on compress_hyper_12_30_chunk (actual rows=5 loops=1)
-> Custom Scan (DecompressChunk) on _hyper_11_26_chunk (actual rows=1800 loops=1)
-> Seq Scan on compress_hyper_12_29_chunk (actual rows=5 loops=1)
(11 rows)
-> Custom Scan (DecompressChunk) on _hyper_11_27_chunk (never executed)
-> Sort (never executed)
Sort Key: compress_hyper_12_30_chunk._ts_meta_max_1 DESC
-> Seq Scan on compress_hyper_12_30_chunk (never executed)
-> Custom Scan (DecompressChunk) on _hyper_11_26_chunk (never executed)
-> Sort (never executed)
Sort Key: compress_hyper_12_29_chunk._ts_meta_max_1 DESC
-> Seq Scan on compress_hyper_12_29_chunk (never executed)
(16 rows)
-- should have ordered DecompressChunk path because segmentby columns have equality constraints
:PREFIX SELECT * FROM metrics_ordered WHERE device_id = 1 AND device_id_peer = 3 ORDER BY time DESC LIMIT 10;

View File

@ -8028,19 +8028,24 @@ ORDER BY c.id;
-- should not have ordered DecompressChunk path because segmentby columns are not part of pathkeys
:PREFIX SELECT * FROM metrics_ordered ORDER BY time DESC LIMIT 10;
QUERY PLAN
--------------------------------------------------------------------------------------------------
------------------------------------------------------------------------------------------
Limit (actual rows=10 loops=1)
-> Sort (actual rows=10 loops=1)
Sort Key: _hyper_11_28_chunk."time" DESC
Sort Method: top-N heapsort
-> Append (actual rows=6840 loops=1)
-> Custom Scan (DecompressChunk) on _hyper_11_28_chunk (actual rows=2520 loops=1)
-> Custom Scan (ChunkAppend) on metrics_ordered (actual rows=10 loops=1)
Order: metrics_ordered."time" DESC
-> Custom Scan (DecompressChunk) on _hyper_11_28_chunk (actual rows=10 loops=1)
-> Sort (actual rows=5 loops=1)
Sort Key: compress_hyper_12_31_chunk._ts_meta_max_1 DESC
Sort Method: quicksort
-> Seq Scan on compress_hyper_12_31_chunk (actual rows=5 loops=1)
-> Custom Scan (DecompressChunk) on _hyper_11_27_chunk (actual rows=2520 loops=1)
-> Seq Scan on compress_hyper_12_30_chunk (actual rows=5 loops=1)
-> Custom Scan (DecompressChunk) on _hyper_11_26_chunk (actual rows=1800 loops=1)
-> Seq Scan on compress_hyper_12_29_chunk (actual rows=5 loops=1)
(11 rows)
-> Custom Scan (DecompressChunk) on _hyper_11_27_chunk (never executed)
-> Sort (never executed)
Sort Key: compress_hyper_12_30_chunk._ts_meta_max_1 DESC
-> Seq Scan on compress_hyper_12_30_chunk (never executed)
-> Custom Scan (DecompressChunk) on _hyper_11_26_chunk (never executed)
-> Sort (never executed)
Sort Key: compress_hyper_12_29_chunk._ts_meta_max_1 DESC
-> Seq Scan on compress_hyper_12_29_chunk (never executed)
(16 rows)
-- should have ordered DecompressChunk path because segmentby columns have equality constraints
:PREFIX SELECT * FROM metrics_ordered WHERE device_id = 1 AND device_id_peer = 3 ORDER BY time DESC LIMIT 10;

View File

@ -215,26 +215,36 @@ ORDER BY 1,
3,
4;
QUERY PLAN
-----------------------------------------------------------------------------------------------------------------------
-----------------------------------------------------------------------------------------------------------------------------------
Sort (actual rows=10 loops=1)
Sort Key: _hyper_1_5_chunk."time", _hyper_1_5_chunk.device_id, _hyper_1_5_chunk.device_id_peer, _hyper_1_5_chunk.v0
Sort Key: metrics_ordered_idx."time", metrics_ordered_idx.device_id, metrics_ordered_idx.device_id_peer, metrics_ordered_idx.v0
Sort Method: quicksort
-> Limit (actual rows=10 loops=1)
-> Sort (actual rows=10 loops=1)
Sort Key: _hyper_1_5_chunk."time" DESC
Sort Method: top-N heapsort
-> Append (actual rows=1541 loops=1)
-> Custom Scan (ChunkAppend) on metrics_ordered_idx (actual rows=10 loops=1)
Order: metrics_ordered_idx."time" DESC
-> Custom Scan (DecompressChunk) on _hyper_1_5_chunk (actual rows=5 loops=1)
-> Sort (actual rows=5 loops=1)
Sort Key: compress_hyper_2_10_chunk._ts_meta_max_1 DESC
Sort Method: quicksort
-> Seq Scan on compress_hyper_2_10_chunk (actual rows=5 loops=1)
-> Custom Scan (DecompressChunk) on _hyper_1_4_chunk (actual rows=48 loops=1)
-> Custom Scan (DecompressChunk) on _hyper_1_4_chunk (actual rows=5 loops=1)
-> Sort (actual rows=1 loops=1)
Sort Key: compress_hyper_2_9_chunk._ts_meta_max_1 DESC
Sort Method: quicksort
-> Seq Scan on compress_hyper_2_9_chunk (actual rows=1 loops=1)
-> Custom Scan (DecompressChunk) on _hyper_1_3_chunk (actual rows=48 loops=1)
-> Seq Scan on compress_hyper_2_8_chunk (actual rows=1 loops=1)
-> Custom Scan (DecompressChunk) on _hyper_1_2_chunk (actual rows=960 loops=1)
-> Seq Scan on compress_hyper_2_7_chunk (actual rows=5 loops=1)
-> Custom Scan (DecompressChunk) on _hyper_1_1_chunk (actual rows=480 loops=1)
-> Seq Scan on compress_hyper_2_6_chunk (actual rows=5 loops=1)
(18 rows)
-> Custom Scan (DecompressChunk) on _hyper_1_3_chunk (never executed)
-> Sort (never executed)
Sort Key: compress_hyper_2_8_chunk._ts_meta_max_1 DESC
-> Seq Scan on compress_hyper_2_8_chunk (never executed)
-> Custom Scan (DecompressChunk) on _hyper_1_2_chunk (never executed)
-> Sort (never executed)
Sort Key: compress_hyper_2_7_chunk._ts_meta_max_1 DESC
-> Seq Scan on compress_hyper_2_7_chunk (never executed)
-> Custom Scan (DecompressChunk) on _hyper_1_1_chunk (never executed)
-> Sort (never executed)
Sort Key: compress_hyper_2_6_chunk._ts_meta_max_1 DESC
-> Seq Scan on compress_hyper_2_6_chunk (never executed)
(28 rows)
-- should have ordered DecompressChunk path because segmentby columns have equality constraints
:PREFIX
@ -463,36 +473,39 @@ WHERE mt.time > nd.start_time
ORDER BY time;
QUERY PLAN
---------------------------------------------------------------------------------------------------------------------------------------------------------------
Sort (actual rows=48 loops=1)
Sort Key: mt."time"
Nested Loop (actual rows=48 loops=1)
Join Filter: ((mt."time" > nd.start_time) AND (mt."time" < nd.stop_time) AND (mt.device_id = nd.node))
Rows Removed by Join Filter: 1493
-> Custom Scan (ChunkAppend) on metrics_ordered_idx mt (actual rows=1541 loops=1)
Order: mt."time"
-> Custom Scan (DecompressChunk) on _hyper_1_1_chunk mt_1 (actual rows=480 loops=1)
-> Sort (actual rows=5 loops=1)
Sort Key: compress_hyper_2_6_chunk._ts_meta_min_1
Sort Method: quicksort
-> Index Scan using compress_hyper_2_6_chunk__compressed_hypertable_2_device_id_dev on compress_hyper_2_6_chunk (actual rows=5 loops=1)
-> Custom Scan (DecompressChunk) on _hyper_1_2_chunk mt_2 (actual rows=960 loops=1)
-> Sort (actual rows=5 loops=1)
Sort Key: compress_hyper_2_7_chunk._ts_meta_min_1
Sort Method: quicksort
-> Index Scan using compress_hyper_2_7_chunk__compressed_hypertable_2_device_id_dev on compress_hyper_2_7_chunk (actual rows=5 loops=1)
-> Custom Scan (DecompressChunk) on _hyper_1_3_chunk mt_3 (actual rows=48 loops=1)
-> Sort (actual rows=1 loops=1)
Sort Key: compress_hyper_2_8_chunk._ts_meta_min_1
Sort Method: quicksort
-> Index Scan using compress_hyper_2_8_chunk__compressed_hypertable_2_device_id_dev on compress_hyper_2_8_chunk (actual rows=1 loops=1)
-> Custom Scan (DecompressChunk) on _hyper_1_4_chunk mt_4 (actual rows=48 loops=1)
-> Sort (actual rows=1 loops=1)
Sort Key: compress_hyper_2_9_chunk._ts_meta_min_1
Sort Method: quicksort
-> Nested Loop (actual rows=48 loops=1)
-> Seq Scan on nodetime nd (actual rows=1 loops=1)
-> Append (actual rows=48 loops=1)
-> Custom Scan (DecompressChunk) on _hyper_1_1_chunk mt (actual rows=0 loops=1)
Filter: (("time" > nd.start_time) AND ("time" < nd.stop_time) AND (nd.node = device_id))
Rows Removed by Filter: 96
-> Index Scan using compress_hyper_2_6_chunk__compressed_hypertable_2_device_id_dev on compress_hyper_2_6_chunk (actual rows=1 loops=1)
Index Cond: (device_id = nd.node)
-> Custom Scan (DecompressChunk) on _hyper_1_2_chunk mt_1 (actual rows=0 loops=1)
Filter: (("time" > nd.start_time) AND ("time" < nd.stop_time) AND (nd.node = device_id))
Rows Removed by Filter: 192
-> Index Scan using compress_hyper_2_7_chunk__compressed_hypertable_2_device_id_dev on compress_hyper_2_7_chunk (actual rows=1 loops=1)
Index Cond: (device_id = nd.node)
-> Custom Scan (DecompressChunk) on _hyper_1_3_chunk mt_2 (actual rows=0 loops=1)
Filter: (("time" > nd.start_time) AND ("time" < nd.stop_time) AND (nd.node = device_id))
-> Index Scan using compress_hyper_2_8_chunk__compressed_hypertable_2_device_id_dev on compress_hyper_2_8_chunk (actual rows=0 loops=1)
Index Cond: (device_id = nd.node)
-> Custom Scan (DecompressChunk) on _hyper_1_4_chunk mt_3 (actual rows=48 loops=1)
Filter: (("time" > nd.start_time) AND ("time" < nd.stop_time) AND (nd.node = device_id))
-> Index Scan using compress_hyper_2_9_chunk__compressed_hypertable_2_device_id_dev on compress_hyper_2_9_chunk (actual rows=1 loops=1)
Index Cond: (device_id = nd.node)
-> Custom Scan (DecompressChunk) on _hyper_1_5_chunk mt_4 (actual rows=0 loops=1)
Filter: (("time" > nd.start_time) AND ("time" < nd.stop_time) AND (nd.node = device_id))
Rows Removed by Filter: 1
-> Index Scan using compress_hyper_2_10_chunk__compressed_hypertable_2_device_id_de on compress_hyper_2_10_chunk (actual rows=1 loops=1)
Index Cond: (device_id = nd.node)
(29 rows)
-> Custom Scan (DecompressChunk) on _hyper_1_5_chunk mt_5 (actual rows=5 loops=1)
-> Sort (actual rows=5 loops=1)
Sort Key: compress_hyper_2_10_chunk._ts_meta_min_1
Sort Method: quicksort
-> Index Scan using compress_hyper_2_10_chunk__compressed_hypertable_2_device_id_de on compress_hyper_2_10_chunk (actual rows=5 loops=1)
-> Materialize (actual rows=1 loops=1541)
-> Seq Scan on nodetime nd (actual rows=1 loops=1)
(32 rows)
SET enable_seqscan = TRUE;
SET enable_bitmapscan = TRUE;
@ -526,18 +539,34 @@ ORDER BY time;
-> Sort (actual rows=1250 loops=1)
Sort Key: mt.device_id
Sort Method: quicksort
-> Append (actual rows=1541 loops=1)
-> Custom Scan (DecompressChunk) on _hyper_1_1_chunk mt (actual rows=480 loops=1)
-> Custom Scan (ChunkAppend) on metrics_ordered_idx mt (actual rows=1541 loops=1)
Order: mt."time"
-> Custom Scan (DecompressChunk) on _hyper_1_1_chunk mt_1 (actual rows=480 loops=1)
-> Sort (actual rows=5 loops=1)
Sort Key: compress_hyper_2_6_chunk._ts_meta_min_1
Sort Method: quicksort
-> Seq Scan on compress_hyper_2_6_chunk (actual rows=5 loops=1)
-> Custom Scan (DecompressChunk) on _hyper_1_2_chunk mt_1 (actual rows=960 loops=1)
-> Custom Scan (DecompressChunk) on _hyper_1_2_chunk mt_2 (actual rows=960 loops=1)
-> Sort (actual rows=5 loops=1)
Sort Key: compress_hyper_2_7_chunk._ts_meta_min_1
Sort Method: quicksort
-> Seq Scan on compress_hyper_2_7_chunk (actual rows=5 loops=1)
-> Custom Scan (DecompressChunk) on _hyper_1_3_chunk mt_2 (actual rows=48 loops=1)
-> Custom Scan (DecompressChunk) on _hyper_1_3_chunk mt_3 (actual rows=48 loops=1)
-> Sort (actual rows=1 loops=1)
Sort Key: compress_hyper_2_8_chunk._ts_meta_min_1
Sort Method: quicksort
-> Seq Scan on compress_hyper_2_8_chunk (actual rows=1 loops=1)
-> Custom Scan (DecompressChunk) on _hyper_1_4_chunk mt_3 (actual rows=48 loops=1)
-> Custom Scan (DecompressChunk) on _hyper_1_4_chunk mt_4 (actual rows=48 loops=1)
-> Sort (actual rows=1 loops=1)
Sort Key: compress_hyper_2_9_chunk._ts_meta_min_1
Sort Method: quicksort
-> Seq Scan on compress_hyper_2_9_chunk (actual rows=1 loops=1)
-> Custom Scan (DecompressChunk) on _hyper_1_5_chunk mt_4 (actual rows=5 loops=1)
-> Custom Scan (DecompressChunk) on _hyper_1_5_chunk mt_5 (actual rows=5 loops=1)
-> Sort (actual rows=5 loops=1)
Sort Key: compress_hyper_2_10_chunk._ts_meta_min_1
Sort Method: quicksort
-> Seq Scan on compress_hyper_2_10_chunk (actual rows=5 loops=1)
(25 rows)
(41 rows)
SET enable_mergejoin = FALSE;
SET enable_hashjoin = TRUE;
@ -732,27 +761,31 @@ ORDER BY 1,
3,
4;
QUERY PLAN
------------------------------------------------------------------------------------------------------------------------------------
-----------------------------------------------------------------------------------------------------------------------------------
Sort (actual rows=10 loops=1)
Sort Key: metrics_ordered_idx."time", metrics_ordered_idx.device_id, metrics_ordered_idx.device_id_peer, metrics_ordered_idx.v0
Sort Method: quicksort
-> Limit (actual rows=10 loops=1)
-> Sort (actual rows=10 loops=1)
Sort Key: metrics_ordered_idx."time" DESC
Sort Method: top-N heapsort
-> Custom Scan (ConstraintAwareAppend) (actual rows=53 loops=1)
-> Custom Scan (ConstraintAwareAppend) (actual rows=10 loops=1)
Hypertable: metrics_ordered_idx
Chunks excluded during startup: 0
-> Append (actual rows=53 loops=1)
-> Custom Scan (DecompressChunk) on _hyper_1_4_chunk (actual rows=48 loops=1)
-> Merge Append (actual rows=10 loops=1)
Sort Key: _hyper_1_4_chunk."time" DESC
-> Custom Scan (DecompressChunk) on _hyper_1_4_chunk (actual rows=5 loops=1)
Filter: (("time" > 'Tue Jan 01 00:00:00 2002 PST'::timestamp with time zone) AND ("time" < now()))
-> Sort (actual rows=1 loops=1)
Sort Key: compress_hyper_2_9_chunk._ts_meta_max_1 DESC
Sort Method: quicksort
-> Seq Scan on compress_hyper_2_9_chunk (actual rows=1 loops=1)
Filter: (_ts_meta_max_1 > 'Tue Jan 01 00:00:00 2002 PST'::timestamp with time zone)
-> Custom Scan (DecompressChunk) on _hyper_1_5_chunk (actual rows=5 loops=1)
Filter: (("time" > 'Tue Jan 01 00:00:00 2002 PST'::timestamp with time zone) AND ("time" < now()))
-> Sort (actual rows=5 loops=1)
Sort Key: compress_hyper_2_10_chunk._ts_meta_max_1 DESC
Sort Method: quicksort
-> Seq Scan on compress_hyper_2_10_chunk (actual rows=5 loops=1)
Filter: (_ts_meta_max_1 > 'Tue Jan 01 00:00:00 2002 PST'::timestamp with time zone)
(19 rows)
(23 rows)
-- DecompressChunk path because segmentby columns have equality constraints
:PREFIX

View File

@ -215,26 +215,36 @@ ORDER BY 1,
3,
4;
QUERY PLAN
-----------------------------------------------------------------------------------------------------------------------
-----------------------------------------------------------------------------------------------------------------------------------
Sort (actual rows=10 loops=1)
Sort Key: _hyper_1_5_chunk."time", _hyper_1_5_chunk.device_id, _hyper_1_5_chunk.device_id_peer, _hyper_1_5_chunk.v0
Sort Key: metrics_ordered_idx."time", metrics_ordered_idx.device_id, metrics_ordered_idx.device_id_peer, metrics_ordered_idx.v0
Sort Method: quicksort
-> Limit (actual rows=10 loops=1)
-> Sort (actual rows=10 loops=1)
Sort Key: _hyper_1_5_chunk."time" DESC
Sort Method: top-N heapsort
-> Append (actual rows=1541 loops=1)
-> Custom Scan (ChunkAppend) on metrics_ordered_idx (actual rows=10 loops=1)
Order: metrics_ordered_idx."time" DESC
-> Custom Scan (DecompressChunk) on _hyper_1_5_chunk (actual rows=5 loops=1)
-> Sort (actual rows=5 loops=1)
Sort Key: compress_hyper_2_10_chunk._ts_meta_max_1 DESC
Sort Method: quicksort
-> Seq Scan on compress_hyper_2_10_chunk (actual rows=5 loops=1)
-> Custom Scan (DecompressChunk) on _hyper_1_4_chunk (actual rows=48 loops=1)
-> Custom Scan (DecompressChunk) on _hyper_1_4_chunk (actual rows=5 loops=1)
-> Sort (actual rows=1 loops=1)
Sort Key: compress_hyper_2_9_chunk._ts_meta_max_1 DESC
Sort Method: quicksort
-> Seq Scan on compress_hyper_2_9_chunk (actual rows=1 loops=1)
-> Custom Scan (DecompressChunk) on _hyper_1_3_chunk (actual rows=48 loops=1)
-> Seq Scan on compress_hyper_2_8_chunk (actual rows=1 loops=1)
-> Custom Scan (DecompressChunk) on _hyper_1_2_chunk (actual rows=960 loops=1)
-> Seq Scan on compress_hyper_2_7_chunk (actual rows=5 loops=1)
-> Custom Scan (DecompressChunk) on _hyper_1_1_chunk (actual rows=480 loops=1)
-> Seq Scan on compress_hyper_2_6_chunk (actual rows=5 loops=1)
(18 rows)
-> Custom Scan (DecompressChunk) on _hyper_1_3_chunk (never executed)
-> Sort (never executed)
Sort Key: compress_hyper_2_8_chunk._ts_meta_max_1 DESC
-> Seq Scan on compress_hyper_2_8_chunk (never executed)
-> Custom Scan (DecompressChunk) on _hyper_1_2_chunk (never executed)
-> Sort (never executed)
Sort Key: compress_hyper_2_7_chunk._ts_meta_max_1 DESC
-> Seq Scan on compress_hyper_2_7_chunk (never executed)
-> Custom Scan (DecompressChunk) on _hyper_1_1_chunk (never executed)
-> Sort (never executed)
Sort Key: compress_hyper_2_6_chunk._ts_meta_max_1 DESC
-> Seq Scan on compress_hyper_2_6_chunk (never executed)
(28 rows)
-- should have ordered DecompressChunk path because segmentby columns have equality constraints
:PREFIX
@ -463,36 +473,39 @@ WHERE mt.time > nd.start_time
ORDER BY time;
QUERY PLAN
---------------------------------------------------------------------------------------------------------------------------------------------------------------
Sort (actual rows=48 loops=1)
Sort Key: mt_1."time"
Nested Loop (actual rows=48 loops=1)
Join Filter: ((mt."time" > nd.start_time) AND (mt."time" < nd.stop_time) AND (mt.device_id = nd.node))
Rows Removed by Join Filter: 1493
-> Custom Scan (ChunkAppend) on metrics_ordered_idx mt (actual rows=1541 loops=1)
Order: mt."time"
-> Custom Scan (DecompressChunk) on _hyper_1_1_chunk mt_1 (actual rows=480 loops=1)
-> Sort (actual rows=5 loops=1)
Sort Key: compress_hyper_2_6_chunk._ts_meta_min_1
Sort Method: quicksort
-> Nested Loop (actual rows=48 loops=1)
-> Seq Scan on nodetime nd (actual rows=1 loops=1)
-> Append (actual rows=48 loops=1)
-> Custom Scan (DecompressChunk) on _hyper_1_1_chunk mt_1 (actual rows=0 loops=1)
Filter: (("time" > nd.start_time) AND ("time" < nd.stop_time) AND (nd.node = device_id))
Rows Removed by Filter: 96
-> Index Scan using compress_hyper_2_6_chunk__compressed_hypertable_2_device_id_dev on compress_hyper_2_6_chunk (actual rows=1 loops=1)
Index Cond: (device_id = nd.node)
-> Custom Scan (DecompressChunk) on _hyper_1_2_chunk mt_2 (actual rows=0 loops=1)
Filter: (("time" > nd.start_time) AND ("time" < nd.stop_time) AND (nd.node = device_id))
Rows Removed by Filter: 192
-> Index Scan using compress_hyper_2_7_chunk__compressed_hypertable_2_device_id_dev on compress_hyper_2_7_chunk (actual rows=1 loops=1)
Index Cond: (device_id = nd.node)
-> Custom Scan (DecompressChunk) on _hyper_1_3_chunk mt_3 (actual rows=0 loops=1)
Filter: (("time" > nd.start_time) AND ("time" < nd.stop_time) AND (nd.node = device_id))
-> Index Scan using compress_hyper_2_8_chunk__compressed_hypertable_2_device_id_dev on compress_hyper_2_8_chunk (actual rows=0 loops=1)
Index Cond: (device_id = nd.node)
-> Index Scan using compress_hyper_2_6_chunk__compressed_hypertable_2_device_id_dev on compress_hyper_2_6_chunk (actual rows=5 loops=1)
-> Custom Scan (DecompressChunk) on _hyper_1_2_chunk mt_2 (actual rows=960 loops=1)
-> Sort (actual rows=5 loops=1)
Sort Key: compress_hyper_2_7_chunk._ts_meta_min_1
Sort Method: quicksort
-> Index Scan using compress_hyper_2_7_chunk__compressed_hypertable_2_device_id_dev on compress_hyper_2_7_chunk (actual rows=5 loops=1)
-> Custom Scan (DecompressChunk) on _hyper_1_3_chunk mt_3 (actual rows=48 loops=1)
-> Sort (actual rows=1 loops=1)
Sort Key: compress_hyper_2_8_chunk._ts_meta_min_1
Sort Method: quicksort
-> Index Scan using compress_hyper_2_8_chunk__compressed_hypertable_2_device_id_dev on compress_hyper_2_8_chunk (actual rows=1 loops=1)
-> Custom Scan (DecompressChunk) on _hyper_1_4_chunk mt_4 (actual rows=48 loops=1)
Filter: (("time" > nd.start_time) AND ("time" < nd.stop_time) AND (nd.node = device_id))
-> Sort (actual rows=1 loops=1)
Sort Key: compress_hyper_2_9_chunk._ts_meta_min_1
Sort Method: quicksort
-> Index Scan using compress_hyper_2_9_chunk__compressed_hypertable_2_device_id_dev on compress_hyper_2_9_chunk (actual rows=1 loops=1)
Index Cond: (device_id = nd.node)
-> Custom Scan (DecompressChunk) on _hyper_1_5_chunk mt_5 (actual rows=0 loops=1)
Filter: (("time" > nd.start_time) AND ("time" < nd.stop_time) AND (nd.node = device_id))
Rows Removed by Filter: 1
-> Index Scan using compress_hyper_2_10_chunk__compressed_hypertable_2_device_id_de on compress_hyper_2_10_chunk (actual rows=1 loops=1)
Index Cond: (device_id = nd.node)
(29 rows)
-> Custom Scan (DecompressChunk) on _hyper_1_5_chunk mt_5 (actual rows=5 loops=1)
-> Sort (actual rows=5 loops=1)
Sort Key: compress_hyper_2_10_chunk._ts_meta_min_1
Sort Method: quicksort
-> Index Scan using compress_hyper_2_10_chunk__compressed_hypertable_2_device_id_de on compress_hyper_2_10_chunk (actual rows=5 loops=1)
-> Materialize (actual rows=1 loops=1541)
-> Seq Scan on nodetime nd (actual rows=1 loops=1)
(32 rows)
SET enable_seqscan = TRUE;
SET enable_bitmapscan = TRUE;
@ -513,31 +526,47 @@ ORDER BY time;
QUERY PLAN
----------------------------------------------------------------------------------------------------------
Sort (actual rows=48 loops=1)
Sort Key: mt_1."time"
Sort Key: mt."time"
Sort Method: quicksort
-> Merge Join (actual rows=48 loops=1)
Merge Cond: (nd.node = mt_1.device_id)
Join Filter: ((mt_1."time" > nd.start_time) AND (mt_1."time" < nd.stop_time))
Merge Cond: (nd.node = mt.device_id)
Join Filter: ((mt."time" > nd.start_time) AND (mt."time" < nd.stop_time))
Rows Removed by Join Filter: 289
-> Sort (actual rows=1 loops=1)
Sort Key: nd.node
Sort Method: quicksort
-> Seq Scan on nodetime nd (actual rows=1 loops=1)
-> Sort (actual rows=1250 loops=1)
Sort Key: mt_1.device_id
Sort Key: mt.device_id
Sort Method: quicksort
-> Append (actual rows=1541 loops=1)
-> Custom Scan (ChunkAppend) on metrics_ordered_idx mt (actual rows=1541 loops=1)
Order: mt."time"
-> Custom Scan (DecompressChunk) on _hyper_1_1_chunk mt_1 (actual rows=480 loops=1)
-> Sort (actual rows=5 loops=1)
Sort Key: compress_hyper_2_6_chunk._ts_meta_min_1
Sort Method: quicksort
-> Seq Scan on compress_hyper_2_6_chunk (actual rows=5 loops=1)
-> Custom Scan (DecompressChunk) on _hyper_1_2_chunk mt_2 (actual rows=960 loops=1)
-> Sort (actual rows=5 loops=1)
Sort Key: compress_hyper_2_7_chunk._ts_meta_min_1
Sort Method: quicksort
-> Seq Scan on compress_hyper_2_7_chunk (actual rows=5 loops=1)
-> Custom Scan (DecompressChunk) on _hyper_1_3_chunk mt_3 (actual rows=48 loops=1)
-> Sort (actual rows=1 loops=1)
Sort Key: compress_hyper_2_8_chunk._ts_meta_min_1
Sort Method: quicksort
-> Seq Scan on compress_hyper_2_8_chunk (actual rows=1 loops=1)
-> Custom Scan (DecompressChunk) on _hyper_1_4_chunk mt_4 (actual rows=48 loops=1)
-> Sort (actual rows=1 loops=1)
Sort Key: compress_hyper_2_9_chunk._ts_meta_min_1
Sort Method: quicksort
-> Seq Scan on compress_hyper_2_9_chunk (actual rows=1 loops=1)
-> Custom Scan (DecompressChunk) on _hyper_1_5_chunk mt_5 (actual rows=5 loops=1)
-> Sort (actual rows=5 loops=1)
Sort Key: compress_hyper_2_10_chunk._ts_meta_min_1
Sort Method: quicksort
-> Seq Scan on compress_hyper_2_10_chunk (actual rows=5 loops=1)
(25 rows)
(41 rows)
SET enable_mergejoin = FALSE;
SET enable_hashjoin = TRUE;
@ -732,27 +761,31 @@ ORDER BY 1,
3,
4;
QUERY PLAN
------------------------------------------------------------------------------------------------------------------------------------
-----------------------------------------------------------------------------------------------------------------------------------
Sort (actual rows=10 loops=1)
Sort Key: metrics_ordered_idx."time", metrics_ordered_idx.device_id, metrics_ordered_idx.device_id_peer, metrics_ordered_idx.v0
Sort Method: quicksort
-> Limit (actual rows=10 loops=1)
-> Sort (actual rows=10 loops=1)
Sort Key: metrics_ordered_idx."time" DESC
Sort Method: top-N heapsort
-> Custom Scan (ConstraintAwareAppend) (actual rows=53 loops=1)
-> Custom Scan (ConstraintAwareAppend) (actual rows=10 loops=1)
Hypertable: metrics_ordered_idx
Chunks excluded during startup: 0
-> Append (actual rows=53 loops=1)
-> Custom Scan (DecompressChunk) on _hyper_1_4_chunk (actual rows=48 loops=1)
-> Merge Append (actual rows=10 loops=1)
Sort Key: _hyper_1_4_chunk."time" DESC
-> Custom Scan (DecompressChunk) on _hyper_1_4_chunk (actual rows=5 loops=1)
Filter: (("time" > 'Tue Jan 01 00:00:00 2002 PST'::timestamp with time zone) AND ("time" < now()))
-> Sort (actual rows=1 loops=1)
Sort Key: compress_hyper_2_9_chunk._ts_meta_max_1 DESC
Sort Method: quicksort
-> Seq Scan on compress_hyper_2_9_chunk (actual rows=1 loops=1)
Filter: (_ts_meta_max_1 > 'Tue Jan 01 00:00:00 2002 PST'::timestamp with time zone)
-> Custom Scan (DecompressChunk) on _hyper_1_5_chunk (actual rows=5 loops=1)
Filter: (("time" > 'Tue Jan 01 00:00:00 2002 PST'::timestamp with time zone) AND ("time" < now()))
-> Sort (actual rows=5 loops=1)
Sort Key: compress_hyper_2_10_chunk._ts_meta_max_1 DESC
Sort Method: quicksort
-> Seq Scan on compress_hyper_2_10_chunk (actual rows=5 loops=1)
Filter: (_ts_meta_max_1 > 'Tue Jan 01 00:00:00 2002 PST'::timestamp with time zone)
(19 rows)
(23 rows)
-- DecompressChunk path because segmentby columns have equality constraints
:PREFIX

View File

@ -215,26 +215,36 @@ ORDER BY 1,
3,
4;
QUERY PLAN
-----------------------------------------------------------------------------------------------------------------------
-----------------------------------------------------------------------------------------------------------------------------------
Sort (actual rows=10 loops=1)
Sort Key: _hyper_1_5_chunk."time", _hyper_1_5_chunk.device_id, _hyper_1_5_chunk.device_id_peer, _hyper_1_5_chunk.v0
Sort Key: metrics_ordered_idx."time", metrics_ordered_idx.device_id, metrics_ordered_idx.device_id_peer, metrics_ordered_idx.v0
Sort Method: quicksort
-> Limit (actual rows=10 loops=1)
-> Sort (actual rows=10 loops=1)
Sort Key: _hyper_1_5_chunk."time" DESC
Sort Method: top-N heapsort
-> Append (actual rows=1541 loops=1)
-> Custom Scan (ChunkAppend) on metrics_ordered_idx (actual rows=10 loops=1)
Order: metrics_ordered_idx."time" DESC
-> Custom Scan (DecompressChunk) on _hyper_1_5_chunk (actual rows=5 loops=1)
-> Sort (actual rows=5 loops=1)
Sort Key: compress_hyper_2_10_chunk._ts_meta_max_1 DESC
Sort Method: quicksort
-> Seq Scan on compress_hyper_2_10_chunk (actual rows=5 loops=1)
-> Custom Scan (DecompressChunk) on _hyper_1_4_chunk (actual rows=48 loops=1)
-> Custom Scan (DecompressChunk) on _hyper_1_4_chunk (actual rows=5 loops=1)
-> Sort (actual rows=1 loops=1)
Sort Key: compress_hyper_2_9_chunk._ts_meta_max_1 DESC
Sort Method: quicksort
-> Seq Scan on compress_hyper_2_9_chunk (actual rows=1 loops=1)
-> Custom Scan (DecompressChunk) on _hyper_1_3_chunk (actual rows=48 loops=1)
-> Seq Scan on compress_hyper_2_8_chunk (actual rows=1 loops=1)
-> Custom Scan (DecompressChunk) on _hyper_1_2_chunk (actual rows=960 loops=1)
-> Seq Scan on compress_hyper_2_7_chunk (actual rows=5 loops=1)
-> Custom Scan (DecompressChunk) on _hyper_1_1_chunk (actual rows=480 loops=1)
-> Seq Scan on compress_hyper_2_6_chunk (actual rows=5 loops=1)
(18 rows)
-> Custom Scan (DecompressChunk) on _hyper_1_3_chunk (never executed)
-> Sort (never executed)
Sort Key: compress_hyper_2_8_chunk._ts_meta_max_1 DESC
-> Seq Scan on compress_hyper_2_8_chunk (never executed)
-> Custom Scan (DecompressChunk) on _hyper_1_2_chunk (never executed)
-> Sort (never executed)
Sort Key: compress_hyper_2_7_chunk._ts_meta_max_1 DESC
-> Seq Scan on compress_hyper_2_7_chunk (never executed)
-> Custom Scan (DecompressChunk) on _hyper_1_1_chunk (never executed)
-> Sort (never executed)
Sort Key: compress_hyper_2_6_chunk._ts_meta_max_1 DESC
-> Seq Scan on compress_hyper_2_6_chunk (never executed)
(28 rows)
-- should have ordered DecompressChunk path because segmentby columns have equality constraints
:PREFIX
@ -463,36 +473,39 @@ WHERE mt.time > nd.start_time
ORDER BY time;
QUERY PLAN
---------------------------------------------------------------------------------------------------------------------------------------------------------------
Sort (actual rows=48 loops=1)
Sort Key: mt_1."time"
Nested Loop (actual rows=48 loops=1)
Join Filter: ((mt."time" > nd.start_time) AND (mt."time" < nd.stop_time) AND (mt.device_id = nd.node))
Rows Removed by Join Filter: 1493
-> Custom Scan (ChunkAppend) on metrics_ordered_idx mt (actual rows=1541 loops=1)
Order: mt."time"
-> Custom Scan (DecompressChunk) on _hyper_1_1_chunk mt_1 (actual rows=480 loops=1)
-> Sort (actual rows=5 loops=1)
Sort Key: compress_hyper_2_6_chunk._ts_meta_min_1
Sort Method: quicksort
-> Nested Loop (actual rows=48 loops=1)
-> Seq Scan on nodetime nd (actual rows=1 loops=1)
-> Append (actual rows=48 loops=1)
-> Custom Scan (DecompressChunk) on _hyper_1_1_chunk mt_1 (actual rows=0 loops=1)
Filter: (("time" > nd.start_time) AND ("time" < nd.stop_time) AND (nd.node = device_id))
Rows Removed by Filter: 96
-> Index Scan using compress_hyper_2_6_chunk__compressed_hypertable_2_device_id_dev on compress_hyper_2_6_chunk (actual rows=1 loops=1)
Index Cond: (device_id = nd.node)
-> Custom Scan (DecompressChunk) on _hyper_1_2_chunk mt_2 (actual rows=0 loops=1)
Filter: (("time" > nd.start_time) AND ("time" < nd.stop_time) AND (nd.node = device_id))
Rows Removed by Filter: 192
-> Index Scan using compress_hyper_2_7_chunk__compressed_hypertable_2_device_id_dev on compress_hyper_2_7_chunk (actual rows=1 loops=1)
Index Cond: (device_id = nd.node)
-> Custom Scan (DecompressChunk) on _hyper_1_3_chunk mt_3 (actual rows=0 loops=1)
Filter: (("time" > nd.start_time) AND ("time" < nd.stop_time) AND (nd.node = device_id))
-> Index Scan using compress_hyper_2_8_chunk__compressed_hypertable_2_device_id_dev on compress_hyper_2_8_chunk (actual rows=0 loops=1)
Index Cond: (device_id = nd.node)
-> Index Scan using compress_hyper_2_6_chunk__compressed_hypertable_2_device_id_dev on compress_hyper_2_6_chunk (actual rows=5 loops=1)
-> Custom Scan (DecompressChunk) on _hyper_1_2_chunk mt_2 (actual rows=960 loops=1)
-> Sort (actual rows=5 loops=1)
Sort Key: compress_hyper_2_7_chunk._ts_meta_min_1
Sort Method: quicksort
-> Index Scan using compress_hyper_2_7_chunk__compressed_hypertable_2_device_id_dev on compress_hyper_2_7_chunk (actual rows=5 loops=1)
-> Custom Scan (DecompressChunk) on _hyper_1_3_chunk mt_3 (actual rows=48 loops=1)
-> Sort (actual rows=1 loops=1)
Sort Key: compress_hyper_2_8_chunk._ts_meta_min_1
Sort Method: quicksort
-> Index Scan using compress_hyper_2_8_chunk__compressed_hypertable_2_device_id_dev on compress_hyper_2_8_chunk (actual rows=1 loops=1)
-> Custom Scan (DecompressChunk) on _hyper_1_4_chunk mt_4 (actual rows=48 loops=1)
Filter: (("time" > nd.start_time) AND ("time" < nd.stop_time) AND (nd.node = device_id))
-> Sort (actual rows=1 loops=1)
Sort Key: compress_hyper_2_9_chunk._ts_meta_min_1
Sort Method: quicksort
-> Index Scan using compress_hyper_2_9_chunk__compressed_hypertable_2_device_id_dev on compress_hyper_2_9_chunk (actual rows=1 loops=1)
Index Cond: (device_id = nd.node)
-> Custom Scan (DecompressChunk) on _hyper_1_5_chunk mt_5 (actual rows=0 loops=1)
Filter: (("time" > nd.start_time) AND ("time" < nd.stop_time) AND (nd.node = device_id))
Rows Removed by Filter: 1
-> Index Scan using compress_hyper_2_10_chunk__compressed_hypertable_2_device_id_de on compress_hyper_2_10_chunk (actual rows=1 loops=1)
Index Cond: (device_id = nd.node)
(29 rows)
-> Custom Scan (DecompressChunk) on _hyper_1_5_chunk mt_5 (actual rows=5 loops=1)
-> Sort (actual rows=5 loops=1)
Sort Key: compress_hyper_2_10_chunk._ts_meta_min_1
Sort Method: quicksort
-> Index Scan using compress_hyper_2_10_chunk__compressed_hypertable_2_device_id_de on compress_hyper_2_10_chunk (actual rows=5 loops=1)
-> Materialize (actual rows=1 loops=1541)
-> Seq Scan on nodetime nd (actual rows=1 loops=1)
(32 rows)
SET enable_seqscan = TRUE;
SET enable_bitmapscan = TRUE;
@ -513,31 +526,47 @@ ORDER BY time;
QUERY PLAN
----------------------------------------------------------------------------------------------------------
Sort (actual rows=48 loops=1)
Sort Key: mt_1."time"
Sort Key: mt."time"
Sort Method: quicksort
-> Merge Join (actual rows=48 loops=1)
Merge Cond: (nd.node = mt_1.device_id)
Join Filter: ((mt_1."time" > nd.start_time) AND (mt_1."time" < nd.stop_time))
Merge Cond: (nd.node = mt.device_id)
Join Filter: ((mt."time" > nd.start_time) AND (mt."time" < nd.stop_time))
Rows Removed by Join Filter: 289
-> Sort (actual rows=1 loops=1)
Sort Key: nd.node
Sort Method: quicksort
-> Seq Scan on nodetime nd (actual rows=1 loops=1)
-> Sort (actual rows=1250 loops=1)
Sort Key: mt_1.device_id
Sort Key: mt.device_id
Sort Method: quicksort
-> Append (actual rows=1541 loops=1)
-> Custom Scan (ChunkAppend) on metrics_ordered_idx mt (actual rows=1541 loops=1)
Order: mt."time"
-> Custom Scan (DecompressChunk) on _hyper_1_1_chunk mt_1 (actual rows=480 loops=1)
-> Sort (actual rows=5 loops=1)
Sort Key: compress_hyper_2_6_chunk._ts_meta_min_1
Sort Method: quicksort
-> Seq Scan on compress_hyper_2_6_chunk (actual rows=5 loops=1)
-> Custom Scan (DecompressChunk) on _hyper_1_2_chunk mt_2 (actual rows=960 loops=1)
-> Sort (actual rows=5 loops=1)
Sort Key: compress_hyper_2_7_chunk._ts_meta_min_1
Sort Method: quicksort
-> Seq Scan on compress_hyper_2_7_chunk (actual rows=5 loops=1)
-> Custom Scan (DecompressChunk) on _hyper_1_3_chunk mt_3 (actual rows=48 loops=1)
-> Sort (actual rows=1 loops=1)
Sort Key: compress_hyper_2_8_chunk._ts_meta_min_1
Sort Method: quicksort
-> Seq Scan on compress_hyper_2_8_chunk (actual rows=1 loops=1)
-> Custom Scan (DecompressChunk) on _hyper_1_4_chunk mt_4 (actual rows=48 loops=1)
-> Sort (actual rows=1 loops=1)
Sort Key: compress_hyper_2_9_chunk._ts_meta_min_1
Sort Method: quicksort
-> Seq Scan on compress_hyper_2_9_chunk (actual rows=1 loops=1)
-> Custom Scan (DecompressChunk) on _hyper_1_5_chunk mt_5 (actual rows=5 loops=1)
-> Sort (actual rows=5 loops=1)
Sort Key: compress_hyper_2_10_chunk._ts_meta_min_1
Sort Method: quicksort
-> Seq Scan on compress_hyper_2_10_chunk (actual rows=5 loops=1)
(25 rows)
(41 rows)
SET enable_mergejoin = FALSE;
SET enable_hashjoin = TRUE;
@ -732,27 +761,31 @@ ORDER BY 1,
3,
4;
QUERY PLAN
------------------------------------------------------------------------------------------------------------------------------------
-----------------------------------------------------------------------------------------------------------------------------------
Sort (actual rows=10 loops=1)
Sort Key: metrics_ordered_idx."time", metrics_ordered_idx.device_id, metrics_ordered_idx.device_id_peer, metrics_ordered_idx.v0
Sort Method: quicksort
-> Limit (actual rows=10 loops=1)
-> Sort (actual rows=10 loops=1)
Sort Key: metrics_ordered_idx."time" DESC
Sort Method: top-N heapsort
-> Custom Scan (ConstraintAwareAppend) (actual rows=53 loops=1)
-> Custom Scan (ConstraintAwareAppend) (actual rows=10 loops=1)
Hypertable: metrics_ordered_idx
Chunks excluded during startup: 0
-> Append (actual rows=53 loops=1)
-> Custom Scan (DecompressChunk) on _hyper_1_4_chunk (actual rows=48 loops=1)
-> Merge Append (actual rows=10 loops=1)
Sort Key: _hyper_1_4_chunk."time" DESC
-> Custom Scan (DecompressChunk) on _hyper_1_4_chunk (actual rows=5 loops=1)
Filter: (("time" > 'Tue Jan 01 00:00:00 2002 PST'::timestamp with time zone) AND ("time" < now()))
-> Sort (actual rows=1 loops=1)
Sort Key: compress_hyper_2_9_chunk._ts_meta_max_1 DESC
Sort Method: quicksort
-> Seq Scan on compress_hyper_2_9_chunk (actual rows=1 loops=1)
Filter: (_ts_meta_max_1 > 'Tue Jan 01 00:00:00 2002 PST'::timestamp with time zone)
-> Custom Scan (DecompressChunk) on _hyper_1_5_chunk (actual rows=5 loops=1)
Filter: (("time" > 'Tue Jan 01 00:00:00 2002 PST'::timestamp with time zone) AND ("time" < now()))
-> Sort (actual rows=5 loops=1)
Sort Key: compress_hyper_2_10_chunk._ts_meta_max_1 DESC
Sort Method: quicksort
-> Seq Scan on compress_hyper_2_10_chunk (actual rows=5 loops=1)
Filter: (_ts_meta_max_1 > 'Tue Jan 01 00:00:00 2002 PST'::timestamp with time zone)
(19 rows)
(23 rows)
-- DecompressChunk path because segmentby columns have equality constraints
:PREFIX

View File

@ -216,26 +216,36 @@ ORDER BY 1,
3,
4;
QUERY PLAN
-----------------------------------------------------------------------------------------------------------------------
-----------------------------------------------------------------------------------------------------------------------------------
Sort (actual rows=10 loops=1)
Sort Key: _hyper_1_5_chunk."time", _hyper_1_5_chunk.device_id, _hyper_1_5_chunk.device_id_peer, _hyper_1_5_chunk.v0
Sort Key: metrics_ordered_idx."time", metrics_ordered_idx.device_id, metrics_ordered_idx.device_id_peer, metrics_ordered_idx.v0
Sort Method: quicksort
-> Limit (actual rows=10 loops=1)
-> Sort (actual rows=10 loops=1)
Sort Key: _hyper_1_5_chunk."time" DESC
Sort Method: top-N heapsort
-> Append (actual rows=1541 loops=1)
-> Custom Scan (ChunkAppend) on metrics_ordered_idx (actual rows=10 loops=1)
Order: metrics_ordered_idx."time" DESC
-> Custom Scan (DecompressChunk) on _hyper_1_5_chunk (actual rows=5 loops=1)
-> Sort (actual rows=5 loops=1)
Sort Key: compress_hyper_2_10_chunk._ts_meta_max_1 DESC
Sort Method: quicksort
-> Seq Scan on compress_hyper_2_10_chunk (actual rows=5 loops=1)
-> Custom Scan (DecompressChunk) on _hyper_1_4_chunk (actual rows=48 loops=1)
-> Custom Scan (DecompressChunk) on _hyper_1_4_chunk (actual rows=5 loops=1)
-> Sort (actual rows=1 loops=1)
Sort Key: compress_hyper_2_9_chunk._ts_meta_max_1 DESC
Sort Method: quicksort
-> Seq Scan on compress_hyper_2_9_chunk (actual rows=1 loops=1)
-> Custom Scan (DecompressChunk) on _hyper_1_3_chunk (actual rows=48 loops=1)
-> Seq Scan on compress_hyper_2_8_chunk (actual rows=1 loops=1)
-> Custom Scan (DecompressChunk) on _hyper_1_2_chunk (actual rows=960 loops=1)
-> Seq Scan on compress_hyper_2_7_chunk (actual rows=5 loops=1)
-> Custom Scan (DecompressChunk) on _hyper_1_1_chunk (actual rows=480 loops=1)
-> Seq Scan on compress_hyper_2_6_chunk (actual rows=5 loops=1)
(18 rows)
-> Custom Scan (DecompressChunk) on _hyper_1_3_chunk (never executed)
-> Sort (never executed)
Sort Key: compress_hyper_2_8_chunk._ts_meta_max_1 DESC
-> Seq Scan on compress_hyper_2_8_chunk (never executed)
-> Custom Scan (DecompressChunk) on _hyper_1_2_chunk (never executed)
-> Sort (never executed)
Sort Key: compress_hyper_2_7_chunk._ts_meta_max_1 DESC
-> Seq Scan on compress_hyper_2_7_chunk (never executed)
-> Custom Scan (DecompressChunk) on _hyper_1_1_chunk (never executed)
-> Sort (never executed)
Sort Key: compress_hyper_2_6_chunk._ts_meta_max_1 DESC
-> Seq Scan on compress_hyper_2_6_chunk (never executed)
(28 rows)
-- should have ordered DecompressChunk path because segmentby columns have equality constraints
:PREFIX
@ -465,36 +475,39 @@ WHERE mt.time > nd.start_time
ORDER BY time;
QUERY PLAN
---------------------------------------------------------------------------------------------------------------------------------------------------------------
Sort (actual rows=48 loops=1)
Sort Key: mt_1."time"
Nested Loop (actual rows=48 loops=1)
Join Filter: ((mt."time" > nd.start_time) AND (mt."time" < nd.stop_time) AND (mt.device_id = nd.node))
Rows Removed by Join Filter: 1493
-> Custom Scan (ChunkAppend) on metrics_ordered_idx mt (actual rows=1541 loops=1)
Order: mt."time"
-> Custom Scan (DecompressChunk) on _hyper_1_1_chunk mt_1 (actual rows=480 loops=1)
-> Sort (actual rows=5 loops=1)
Sort Key: compress_hyper_2_6_chunk._ts_meta_min_1
Sort Method: quicksort
-> Nested Loop (actual rows=48 loops=1)
-> Seq Scan on nodetime nd (actual rows=1 loops=1)
-> Append (actual rows=48 loops=1)
-> Custom Scan (DecompressChunk) on _hyper_1_1_chunk mt_1 (actual rows=0 loops=1)
Filter: (("time" > nd.start_time) AND ("time" < nd.stop_time) AND (nd.node = device_id))
Rows Removed by Filter: 96
-> Index Scan using compress_hyper_2_6_chunk__compressed_hypertable_2_device_id_dev on compress_hyper_2_6_chunk (actual rows=1 loops=1)
Index Cond: (device_id = nd.node)
-> Custom Scan (DecompressChunk) on _hyper_1_2_chunk mt_2 (actual rows=0 loops=1)
Filter: (("time" > nd.start_time) AND ("time" < nd.stop_time) AND (nd.node = device_id))
Rows Removed by Filter: 192
-> Index Scan using compress_hyper_2_7_chunk__compressed_hypertable_2_device_id_dev on compress_hyper_2_7_chunk (actual rows=1 loops=1)
Index Cond: (device_id = nd.node)
-> Custom Scan (DecompressChunk) on _hyper_1_3_chunk mt_3 (actual rows=0 loops=1)
Filter: (("time" > nd.start_time) AND ("time" < nd.stop_time) AND (nd.node = device_id))
-> Index Scan using compress_hyper_2_8_chunk__compressed_hypertable_2_device_id_dev on compress_hyper_2_8_chunk (actual rows=0 loops=1)
Index Cond: (device_id = nd.node)
-> Index Scan using compress_hyper_2_6_chunk__compressed_hypertable_2_device_id_dev on compress_hyper_2_6_chunk (actual rows=5 loops=1)
-> Custom Scan (DecompressChunk) on _hyper_1_2_chunk mt_2 (actual rows=960 loops=1)
-> Sort (actual rows=5 loops=1)
Sort Key: compress_hyper_2_7_chunk._ts_meta_min_1
Sort Method: quicksort
-> Index Scan using compress_hyper_2_7_chunk__compressed_hypertable_2_device_id_dev on compress_hyper_2_7_chunk (actual rows=5 loops=1)
-> Custom Scan (DecompressChunk) on _hyper_1_3_chunk mt_3 (actual rows=48 loops=1)
-> Sort (actual rows=1 loops=1)
Sort Key: compress_hyper_2_8_chunk._ts_meta_min_1
Sort Method: quicksort
-> Index Scan using compress_hyper_2_8_chunk__compressed_hypertable_2_device_id_dev on compress_hyper_2_8_chunk (actual rows=1 loops=1)
-> Custom Scan (DecompressChunk) on _hyper_1_4_chunk mt_4 (actual rows=48 loops=1)
Filter: (("time" > nd.start_time) AND ("time" < nd.stop_time) AND (nd.node = device_id))
-> Sort (actual rows=1 loops=1)
Sort Key: compress_hyper_2_9_chunk._ts_meta_min_1
Sort Method: quicksort
-> Index Scan using compress_hyper_2_9_chunk__compressed_hypertable_2_device_id_dev on compress_hyper_2_9_chunk (actual rows=1 loops=1)
Index Cond: (device_id = nd.node)
-> Custom Scan (DecompressChunk) on _hyper_1_5_chunk mt_5 (actual rows=0 loops=1)
Filter: (("time" > nd.start_time) AND ("time" < nd.stop_time) AND (nd.node = device_id))
Rows Removed by Filter: 1
-> Index Scan using compress_hyper_2_10_chunk__compressed_hypertable_2_device_id_de on compress_hyper_2_10_chunk (actual rows=1 loops=1)
Index Cond: (device_id = nd.node)
(29 rows)
-> Custom Scan (DecompressChunk) on _hyper_1_5_chunk mt_5 (actual rows=5 loops=1)
-> Sort (actual rows=5 loops=1)
Sort Key: compress_hyper_2_10_chunk._ts_meta_min_1
Sort Method: quicksort
-> Index Scan using compress_hyper_2_10_chunk__compressed_hypertable_2_device_id_de on compress_hyper_2_10_chunk (actual rows=5 loops=1)
-> Materialize (actual rows=1 loops=1541)
-> Seq Scan on nodetime nd (actual rows=1 loops=1)
(32 rows)
SET enable_seqscan = TRUE;
SET enable_bitmapscan = TRUE;
@ -515,31 +528,47 @@ ORDER BY time;
QUERY PLAN
----------------------------------------------------------------------------------------------------------
Sort (actual rows=48 loops=1)
Sort Key: mt_1."time"
Sort Key: mt."time"
Sort Method: quicksort
-> Merge Join (actual rows=48 loops=1)
Merge Cond: (nd.node = mt_1.device_id)
Join Filter: ((mt_1."time" > nd.start_time) AND (mt_1."time" < nd.stop_time))
Merge Cond: (nd.node = mt.device_id)
Join Filter: ((mt."time" > nd.start_time) AND (mt."time" < nd.stop_time))
Rows Removed by Join Filter: 289
-> Sort (actual rows=1 loops=1)
Sort Key: nd.node
Sort Method: quicksort
-> Seq Scan on nodetime nd (actual rows=1 loops=1)
-> Sort (actual rows=1250 loops=1)
Sort Key: mt_1.device_id
Sort Key: mt.device_id
Sort Method: quicksort
-> Append (actual rows=1541 loops=1)
-> Custom Scan (ChunkAppend) on metrics_ordered_idx mt (actual rows=1541 loops=1)
Order: mt."time"
-> Custom Scan (DecompressChunk) on _hyper_1_1_chunk mt_1 (actual rows=480 loops=1)
-> Sort (actual rows=5 loops=1)
Sort Key: compress_hyper_2_6_chunk._ts_meta_min_1
Sort Method: quicksort
-> Seq Scan on compress_hyper_2_6_chunk (actual rows=5 loops=1)
-> Custom Scan (DecompressChunk) on _hyper_1_2_chunk mt_2 (actual rows=960 loops=1)
-> Sort (actual rows=5 loops=1)
Sort Key: compress_hyper_2_7_chunk._ts_meta_min_1
Sort Method: quicksort
-> Seq Scan on compress_hyper_2_7_chunk (actual rows=5 loops=1)
-> Custom Scan (DecompressChunk) on _hyper_1_3_chunk mt_3 (actual rows=48 loops=1)
-> Sort (actual rows=1 loops=1)
Sort Key: compress_hyper_2_8_chunk._ts_meta_min_1
Sort Method: quicksort
-> Seq Scan on compress_hyper_2_8_chunk (actual rows=1 loops=1)
-> Custom Scan (DecompressChunk) on _hyper_1_4_chunk mt_4 (actual rows=48 loops=1)
-> Sort (actual rows=1 loops=1)
Sort Key: compress_hyper_2_9_chunk._ts_meta_min_1
Sort Method: quicksort
-> Seq Scan on compress_hyper_2_9_chunk (actual rows=1 loops=1)
-> Custom Scan (DecompressChunk) on _hyper_1_5_chunk mt_5 (actual rows=5 loops=1)
-> Sort (actual rows=5 loops=1)
Sort Key: compress_hyper_2_10_chunk._ts_meta_min_1
Sort Method: quicksort
-> Seq Scan on compress_hyper_2_10_chunk (actual rows=5 loops=1)
(25 rows)
(41 rows)
SET enable_mergejoin = FALSE;
SET enable_hashjoin = TRUE;
@ -734,27 +763,31 @@ ORDER BY 1,
3,
4;
QUERY PLAN
------------------------------------------------------------------------------------------------------------------------------------
-----------------------------------------------------------------------------------------------------------------------------------
Sort (actual rows=10 loops=1)
Sort Key: metrics_ordered_idx."time", metrics_ordered_idx.device_id, metrics_ordered_idx.device_id_peer, metrics_ordered_idx.v0
Sort Method: quicksort
-> Limit (actual rows=10 loops=1)
-> Sort (actual rows=10 loops=1)
Sort Key: metrics_ordered_idx."time" DESC
Sort Method: top-N heapsort
-> Custom Scan (ConstraintAwareAppend) (actual rows=53 loops=1)
-> Custom Scan (ConstraintAwareAppend) (actual rows=10 loops=1)
Hypertable: metrics_ordered_idx
Chunks excluded during startup: 0
-> Append (actual rows=53 loops=1)
-> Custom Scan (DecompressChunk) on _hyper_1_4_chunk (actual rows=48 loops=1)
-> Merge Append (actual rows=10 loops=1)
Sort Key: _hyper_1_4_chunk."time" DESC
-> Custom Scan (DecompressChunk) on _hyper_1_4_chunk (actual rows=5 loops=1)
Filter: (("time" > 'Tue Jan 01 00:00:00 2002 PST'::timestamp with time zone) AND ("time" < now()))
-> Sort (actual rows=1 loops=1)
Sort Key: compress_hyper_2_9_chunk._ts_meta_max_1 DESC
Sort Method: quicksort
-> Seq Scan on compress_hyper_2_9_chunk (actual rows=1 loops=1)
Filter: (_ts_meta_max_1 > 'Tue Jan 01 00:00:00 2002 PST'::timestamp with time zone)
-> Custom Scan (DecompressChunk) on _hyper_1_5_chunk (actual rows=5 loops=1)
Filter: (("time" > 'Tue Jan 01 00:00:00 2002 PST'::timestamp with time zone) AND ("time" < now()))
-> Sort (actual rows=5 loops=1)
Sort Key: compress_hyper_2_10_chunk._ts_meta_max_1 DESC
Sort Method: quicksort
-> Seq Scan on compress_hyper_2_10_chunk (actual rows=5 loops=1)
Filter: (_ts_meta_max_1 > 'Tue Jan 01 00:00:00 2002 PST'::timestamp with time zone)
(19 rows)
(23 rows)
-- DecompressChunk path because segmentby columns have equality constraints
:PREFIX

View File

@ -188,18 +188,19 @@ EXPLAIN (analyze,costs off,timing off,summary off)
SELECT * from test_chartab
WHERE check_equal_228(rtt) and ts < '2019-12-15 00:00:00' order by ts;
QUERY PLAN
-----------------------------------------------------------------------------------------------------------------
Sort (actual rows=1 loops=1)
Sort Key: test_chartab.ts
Sort Method: quicksort
-> Custom Scan (ChunkAppend) on test_chartab (actual rows=1 loops=1)
-----------------------------------------------------------------------------------------------------------
Custom Scan (ChunkAppend) on test_chartab (actual rows=1 loops=1)
Order: test_chartab.ts
Chunks excluded during startup: 0
-> Custom Scan (DecompressChunk) on _hyper_1_1_chunk (actual rows=1 loops=1)
Filter: ((ts < 'Sun Dec 15 00:00:00 2019'::timestamp without time zone) AND check_equal_228(rtt))
Rows Removed by Filter: 2
-> Sort (actual rows=3 loops=1)
Sort Key: compress_hyper_2_3_chunk._ts_meta_min_1
Sort Method: quicksort
-> Seq Scan on compress_hyper_2_3_chunk (actual rows=3 loops=1)
Filter: (_ts_meta_min_1 < 'Sun Dec 15 00:00:00 2019'::timestamp without time zone)
(10 rows)
(11 rows)
-- test pseudoconstant qual #3241
CREATE TABLE pseudo(time timestamptz NOT NULL);

View File

@ -25,6 +25,13 @@ SELECT CASE WHEN current_setting('server_version_num')::int/10000 >= 13 THEN set
off
(1 row)
-- In the following test cases, we test that certain indexes are used. By using the
-- timescaledb.enable_decompression_sorted_merge optimization, we are pushing a sort node
-- below the DecompressChunk node, which operates on the batches. This could lead to flaky
-- tests because the input data is small and PostgreSQL switches from IndexScans to
-- SequentialScans. Disable the optimization for the following tests to ensure we have
-- stable query plans in all CI environments.
SET timescaledb.enable_decompression_sorted_merge = 0;
set max_parallel_workers_per_gather to 0;
\set TEST_TABLE 'metrics'
\ir :TEST_QUERY_NAME

View File

@ -150,43 +150,50 @@ FROM :TEST_TABLE m1
LEFT JOIN LATERAL (SELECT time FROM :TEST_TABLE m2 WHERE m1.time = m2.time LIMIT 1) m2 ON true
ORDER BY m1.time;
QUERY PLAN
Sort (actual rows=68370 loops=1)
Sort Key: m1_1."time"
Sort Method: quicksort
-> Nested Loop Left Join (actual rows=68370 loops=1)
-> Append (actual rows=68370 loops=1)
Nested Loop Left Join (actual rows=68370 loops=1)
-> Custom Scan (ChunkAppend) on metrics_compressed m1 (actual rows=68370 loops=1)
Order: m1."time"
-> Custom Scan (DecompressChunk) on _hyper_X_X_chunk m1_1 (actual rows=17990 loops=1)
-> Sort (actual rows=20 loops=1)
Sort Key: compress_hyper_X_X_chunk._ts_meta_min_1
Sort Method: quicksort
-> Seq Scan on compress_hyper_X_X_chunk (actual rows=20 loops=1)
-> Custom Scan (DecompressChunk) on _hyper_X_X_chunk m1_2 (actual rows=25190 loops=1)
-> Sort (actual rows=30 loops=1)
Sort Key: compress_hyper_X_X_chunk._ts_meta_min_1
Sort Method: quicksort
-> Seq Scan on compress_hyper_X_X_chunk (actual rows=30 loops=1)
-> Custom Scan (DecompressChunk) on _hyper_X_X_chunk m1_3 (actual rows=25190 loops=1)
-> Sort (actual rows=30 loops=1)
Sort Key: compress_hyper_X_X_chunk._ts_meta_min_1
Sort Method: quicksort
-> Seq Scan on compress_hyper_X_X_chunk (actual rows=30 loops=1)
-> Memoize (actual rows=1 loops=68370)
Cache Key: m1_1."time"
Cache Key: m1."time"
Cache Mode: binary
Hits: 54696 Misses: 13674 Evictions: 0 Overflows: 0
-> Limit (actual rows=1 loops=13674)
-> Custom Scan (ChunkAppend) on metrics_compressed m2 (actual rows=1 loops=13674)
Chunks excluded during runtime: 2
-> Custom Scan (DecompressChunk) on _hyper_X_X_chunk m2_1 (actual rows=1 loops=3598)
Filter: (m1_1."time" = "time")
Filter: (m1."time" = "time")
Rows Removed by Filter: 466
-> Seq Scan on compress_hyper_X_X_chunk compress_hyper_X_X_chunk_1 (actual rows=1 loops=3598)
Filter: ((_ts_meta_min_1 <= m1_1."time") AND (_ts_meta_max_1 >= m1_1."time"))
Filter: ((_ts_meta_min_1 <= m1."time") AND (_ts_meta_max_1 >= m1."time"))
Rows Removed by Filter: 1
-> Custom Scan (DecompressChunk) on _hyper_X_X_chunk m2_2 (actual rows=1 loops=5038)
Filter: (m1_1."time" = "time")
Filter: (m1."time" = "time")
Rows Removed by Filter: 496
-> Seq Scan on compress_hyper_X_X_chunk compress_hyper_X_X_chunk_1 (actual rows=1 loops=5038)
Filter: ((_ts_meta_min_1 <= m1_1."time") AND (_ts_meta_max_1 >= m1_1."time"))
Filter: ((_ts_meta_min_1 <= m1."time") AND (_ts_meta_max_1 >= m1."time"))
Rows Removed by Filter: 2
-> Custom Scan (DecompressChunk) on _hyper_X_X_chunk m2_3 (actual rows=1 loops=5038)
Filter: (m1_1."time" = "time")
Filter: (m1."time" = "time")
Rows Removed by Filter: 496
-> Seq Scan on compress_hyper_X_X_chunk compress_hyper_X_X_chunk_1 (actual rows=1 loops=5038)
Filter: ((_ts_meta_min_1 <= m1_1."time") AND (_ts_meta_max_1 >= m1_1."time"))
Filter: ((_ts_meta_min_1 <= m1."time") AND (_ts_meta_max_1 >= m1."time"))
Rows Removed by Filter: 2
(36 rows)
(43 rows)
\set TEST_TABLE 'metrics_space_compressed'
\ir :TEST_QUERY_NAME
@ -204,79 +211,110 @@ FROM :TEST_TABLE m1
LEFT JOIN LATERAL (SELECT time FROM :TEST_TABLE m2 WHERE m1.time = m2.time LIMIT 1) m2 ON true
ORDER BY m1.time;
QUERY PLAN
Sort (actual rows=68370 loops=1)
Nested Loop Left Join (actual rows=68370 loops=1)
-> Custom Scan (ChunkAppend) on metrics_space_compressed m1 (actual rows=68370 loops=1)
Order: m1."time"
-> Merge Append (actual rows=17990 loops=1)
Sort Key: m1_1."time"
Sort Method: quicksort
-> Nested Loop Left Join (actual rows=68370 loops=1)
-> Append (actual rows=68370 loops=1)
-> Custom Scan (DecompressChunk) on _hyper_X_X_chunk m1_1 (actual rows=3598 loops=1)
-> Sort (actual rows=4 loops=1)
Sort Key: compress_hyper_X_X_chunk._ts_meta_min_1
Sort Method: quicksort
-> Seq Scan on compress_hyper_X_X_chunk (actual rows=4 loops=1)
-> Custom Scan (DecompressChunk) on _hyper_X_X_chunk m1_2 (actual rows=10794 loops=1)
-> Sort (actual rows=12 loops=1)
Sort Key: compress_hyper_X_X_chunk._ts_meta_min_1
Sort Method: quicksort
-> Seq Scan on compress_hyper_X_X_chunk (actual rows=12 loops=1)
-> Custom Scan (DecompressChunk) on _hyper_X_X_chunk m1_3 (actual rows=3598 loops=1)
-> Sort (actual rows=4 loops=1)
Sort Key: compress_hyper_X_X_chunk._ts_meta_min_1
Sort Method: quicksort
-> Seq Scan on compress_hyper_X_X_chunk (actual rows=4 loops=1)
-> Merge Append (actual rows=25190 loops=1)
Sort Key: m1_4."time"
-> Custom Scan (DecompressChunk) on _hyper_X_X_chunk m1_4 (actual rows=5038 loops=1)
-> Sort (actual rows=6 loops=1)
Sort Key: compress_hyper_X_X_chunk._ts_meta_min_1
Sort Method: quicksort
-> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1)
-> Custom Scan (DecompressChunk) on _hyper_X_X_chunk m1_5 (actual rows=15114 loops=1)
-> Sort (actual rows=18 loops=1)
Sort Key: compress_hyper_X_X_chunk._ts_meta_min_1
Sort Method: quicksort
-> Seq Scan on compress_hyper_X_X_chunk (actual rows=18 loops=1)
-> Custom Scan (DecompressChunk) on _hyper_X_X_chunk m1_6 (actual rows=5038 loops=1)
-> Sort (actual rows=6 loops=1)
Sort Key: compress_hyper_X_X_chunk._ts_meta_min_1
Sort Method: quicksort
-> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1)
-> Merge Append (actual rows=25190 loops=1)
Sort Key: m1_7."time"
-> Custom Scan (DecompressChunk) on _hyper_X_X_chunk m1_7 (actual rows=5038 loops=1)
-> Sort (actual rows=6 loops=1)
Sort Key: compress_hyper_X_X_chunk._ts_meta_min_1
Sort Method: quicksort
-> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1)
-> Custom Scan (DecompressChunk) on _hyper_X_X_chunk m1_8 (actual rows=15114 loops=1)
-> Sort (actual rows=18 loops=1)
Sort Key: compress_hyper_X_X_chunk._ts_meta_min_1
Sort Method: quicksort
-> Seq Scan on compress_hyper_X_X_chunk (actual rows=18 loops=1)
-> Custom Scan (DecompressChunk) on _hyper_X_X_chunk m1_9 (actual rows=5038 loops=1)
-> Sort (actual rows=6 loops=1)
Sort Key: compress_hyper_X_X_chunk._ts_meta_min_1
Sort Method: quicksort
-> Seq Scan on compress_hyper_X_X_chunk (actual rows=6 loops=1)
-> Memoize (actual rows=1 loops=68370)
Cache Key: m1_1."time"
Cache Key: m1."time"
Cache Mode: binary
Hits: 54696 Misses: 13674 Evictions: 0 Overflows: 0
-> Limit (actual rows=1 loops=13674)
-> Custom Scan (ChunkAppend) on metrics_space_compressed m2 (actual rows=1 loops=13674)
Chunks excluded during runtime: 6
-> Custom Scan (DecompressChunk) on _hyper_X_X_chunk m2_1 (actual rows=1 loops=3598)
Filter: (m1_1."time" = "time")
Filter: (m1."time" = "time")
Rows Removed by Filter: 466
-> Seq Scan on compress_hyper_X_X_chunk compress_hyper_X_X_chunk_1 (actual rows=1 loops=3598)
Filter: ((_ts_meta_min_1 <= m1_1."time") AND (_ts_meta_max_1 >= m1_1."time"))
Filter: ((_ts_meta_min_1 <= m1."time") AND (_ts_meta_max_1 >= m1."time"))
Rows Removed by Filter: 1
-> Custom Scan (DecompressChunk) on _hyper_X_X_chunk m2_2 (never executed)
Filter: (m1_1."time" = "time")
Filter: (m1."time" = "time")
-> Seq Scan on compress_hyper_X_X_chunk compress_hyper_X_X_chunk_1 (never executed)
Filter: ((_ts_meta_min_1 <= m1_1."time") AND (_ts_meta_max_1 >= m1_1."time"))
Filter: ((_ts_meta_min_1 <= m1."time") AND (_ts_meta_max_1 >= m1."time"))
-> Custom Scan (DecompressChunk) on _hyper_X_X_chunk m2_3 (never executed)
Filter: (m1_1."time" = "time")
Filter: (m1."time" = "time")
-> Seq Scan on compress_hyper_X_X_chunk compress_hyper_X_X_chunk_1 (never executed)
Filter: ((_ts_meta_min_1 <= m1_1."time") AND (_ts_meta_max_1 >= m1_1."time"))
Filter: ((_ts_meta_min_1 <= m1."time") AND (_ts_meta_max_1 >= m1."time"))
-> Custom Scan (DecompressChunk) on _hyper_X_X_chunk m2_4 (actual rows=1 loops=5038)
Filter: (m1_1."time" = "time")
Filter: (m1."time" = "time")
Rows Removed by Filter: 496
-> Seq Scan on compress_hyper_X_X_chunk compress_hyper_X_X_chunk_1 (actual rows=1 loops=5038)
Filter: ((_ts_meta_min_1 <= m1_1."time") AND (_ts_meta_max_1 >= m1_1."time"))
Filter: ((_ts_meta_min_1 <= m1."time") AND (_ts_meta_max_1 >= m1."time"))
Rows Removed by Filter: 2
-> Custom Scan (DecompressChunk) on _hyper_X_X_chunk m2_5 (never executed)
Filter: (m1_1."time" = "time")
Filter: (m1."time" = "time")
-> Seq Scan on compress_hyper_X_X_chunk compress_hyper_X_X_chunk_1 (never executed)
Filter: ((_ts_meta_min_1 <= m1_1."time") AND (_ts_meta_max_1 >= m1_1."time"))
Filter: ((_ts_meta_min_1 <= m1."time") AND (_ts_meta_max_1 >= m1."time"))
-> Custom Scan (DecompressChunk) on _hyper_X_X_chunk m2_6 (never executed)
Filter: (m1_1."time" = "time")
Filter: (m1."time" = "time")
-> Seq Scan on compress_hyper_X_X_chunk compress_hyper_X_X_chunk_1 (never executed)
Filter: ((_ts_meta_min_1 <= m1_1."time") AND (_ts_meta_max_1 >= m1_1."time"))
Filter: ((_ts_meta_min_1 <= m1."time") AND (_ts_meta_max_1 >= m1."time"))
-> Custom Scan (DecompressChunk) on _hyper_X_X_chunk m2_7 (actual rows=1 loops=5038)
Filter: (m1_1."time" = "time")
Filter: (m1."time" = "time")
Rows Removed by Filter: 496
-> Seq Scan on compress_hyper_X_X_chunk compress_hyper_X_X_chunk_1 (actual rows=1 loops=5038)
Filter: ((_ts_meta_min_1 <= m1_1."time") AND (_ts_meta_max_1 >= m1_1."time"))
Filter: ((_ts_meta_min_1 <= m1."time") AND (_ts_meta_max_1 >= m1."time"))
Rows Removed by Filter: 2
-> Custom Scan (DecompressChunk) on _hyper_X_X_chunk m2_8 (never executed)
Filter: (m1_1."time" = "time")
Filter: (m1."time" = "time")
-> Seq Scan on compress_hyper_X_X_chunk compress_hyper_X_X_chunk_1 (never executed)
Filter: ((_ts_meta_min_1 <= m1_1."time") AND (_ts_meta_max_1 >= m1_1."time"))
Filter: ((_ts_meta_min_1 <= m1."time") AND (_ts_meta_max_1 >= m1."time"))
-> Custom Scan (DecompressChunk) on _hyper_X_X_chunk m2_9 (never executed)
Filter: (m1_1."time" = "time")
Filter: (m1."time" = "time")
-> Seq Scan on compress_hyper_X_X_chunk compress_hyper_X_X_chunk_1 (never executed)
Filter: ((_ts_meta_min_1 <= m1_1."time") AND (_ts_meta_max_1 >= m1_1."time"))
(72 rows)
Filter: ((_ts_meta_min_1 <= m1."time") AND (_ts_meta_max_1 >= m1."time"))
(103 rows)
-- get results for all the queries
-- run queries with and without memoize

View File

@ -18,6 +18,13 @@ set max_parallel_workers_per_gather to 0;
-- This file and its contents are licensed under the Timescale License.
-- Please see the included NOTICE for copyright information and
-- LICENSE-TIMESCALE for a copy of the license.
-- In the following test cases, we test that certain indexes are used. By using the
-- timescaledb.enable_decompression_sorted_merge optimization, we are pushing a sort node
-- below the DecompressChunk node, which operates on the batches. This could lead to flaky
-- tests because the input data is small and PostgreSQL switches from IndexScans to
-- SequentialScans. Disable the optimization for the following tests to ensure we have
-- stable query plans in all CI environments.
SET timescaledb.enable_decompression_sorted_merge = 0;
-- test ASC for ordered chunks
:PREFIX
SELECT time
@ -766,6 +773,13 @@ QUERY PLAN
-- This file and its contents are licensed under the Timescale License.
-- Please see the included NOTICE for copyright information and
-- LICENSE-TIMESCALE for a copy of the license.
-- In the following test cases, we test that certain indexes are used. By using the
-- timescaledb.enable_decompression_sorted_merge optimization, we are pushing a sort node
-- below the DecompressChunk node, which operates on the batches. This could lead to flaky
-- tests because the input data is small and PostgreSQL switches from IndexScans to
-- SequentialScans. Disable the optimization for the following tests to ensure we have
-- stable query plans in all CI environments.
SET timescaledb.enable_decompression_sorted_merge = 0;
-- test ASC for ordered chunks
:PREFIX
SELECT time
@ -2082,6 +2096,13 @@ QUERY PLAN
-- This file and its contents are licensed under the Timescale License.
-- Please see the included NOTICE for copyright information and
-- LICENSE-TIMESCALE for a copy of the license.
-- In the following test cases, we test that certain indexes are used. By using the
-- timescaledb.enable_decompression_sorted_merge optimization, we are pushing a sort node
-- below the DecompressChunk node, which operates on the batches. This could lead to flaky
-- tests because the input data is small and PostgreSQL switches from IndexScans to
-- SequentialScans. Disable the optimization for the following tests to ensure we have
-- stable query plans in all CI environments.
SET timescaledb.enable_decompression_sorted_merge = 0;
-- test ASC for ordered chunks
:PREFIX
SELECT time
@ -2929,6 +2950,13 @@ QUERY PLAN
-- This file and its contents are licensed under the Timescale License.
-- Please see the included NOTICE for copyright information and
-- LICENSE-TIMESCALE for a copy of the license.
-- In the following test cases, we test that certain indexes are used. By using the
-- timescaledb.enable_decompression_sorted_merge optimization, we are pushing a sort node
-- below the DecompressChunk node, which operates on the batches. This could lead to flaky
-- tests because the input data is small and PostgreSQL switches from IndexScans to
-- SequentialScans. Disable the optimization for the following tests to ensure we have
-- stable query plans in all CI environments.
SET timescaledb.enable_decompression_sorted_merge = 0;
-- test ASC for ordered chunks
:PREFIX
SELECT time

View File

@ -18,6 +18,13 @@ set max_parallel_workers_per_gather to 0;
-- This file and its contents are licensed under the Timescale License.
-- Please see the included NOTICE for copyright information and
-- LICENSE-TIMESCALE for a copy of the license.
-- In the following test cases, we test that certain indexes are used. By using the
-- timescaledb.enable_decompression_sorted_merge optimization, we are pushing a sort node
-- below the DecompressChunk node, which operates on the batches. This could lead to flaky
-- tests because the input data is small and PostgreSQL switches from IndexScans to
-- SequentialScans. Disable the optimization for the following tests to ensure we have
-- stable query plans in all CI environments.
SET timescaledb.enable_decompression_sorted_merge = 0;
-- test ASC for ordered chunks
:PREFIX
SELECT time
@ -769,6 +776,13 @@ QUERY PLAN
-- This file and its contents are licensed under the Timescale License.
-- Please see the included NOTICE for copyright information and
-- LICENSE-TIMESCALE for a copy of the license.
-- In the following test cases, we test that certain indexes are used. By using the
-- timescaledb.enable_decompression_sorted_merge optimization, we are pushing a sort node
-- below the DecompressChunk node, which operates on the batches. This could lead to flaky
-- tests because the input data is small and PostgreSQL switches from IndexScans to
-- SequentialScans. Disable the optimization for the following tests to ensure we have
-- stable query plans in all CI environments.
SET timescaledb.enable_decompression_sorted_merge = 0;
-- test ASC for ordered chunks
:PREFIX
SELECT time
@ -2085,6 +2099,13 @@ QUERY PLAN
-- This file and its contents are licensed under the Timescale License.
-- Please see the included NOTICE for copyright information and
-- LICENSE-TIMESCALE for a copy of the license.
-- In the following test cases, we test that certain indexes are used. By using the
-- timescaledb.enable_decompression_sorted_merge optimization, we are pushing a sort node
-- below the DecompressChunk node, which operates on the batches. This could lead to flaky
-- tests because the input data is small and PostgreSQL switches from IndexScans to
-- SequentialScans. Disable the optimization for the following tests to ensure we have
-- stable query plans in all CI environments.
SET timescaledb.enable_decompression_sorted_merge = 0;
-- test ASC for ordered chunks
:PREFIX
SELECT time
@ -2932,6 +2953,13 @@ QUERY PLAN
-- This file and its contents are licensed under the Timescale License.
-- Please see the included NOTICE for copyright information and
-- LICENSE-TIMESCALE for a copy of the license.
-- In the following test cases, we test that certain indexes are used. By using the
-- timescaledb.enable_decompression_sorted_merge optimization, we are pushing a sort node
-- below the DecompressChunk node, which operates on the batches. This could lead to flaky
-- tests because the input data is small and PostgreSQL switches from IndexScans to
-- SequentialScans. Disable the optimization for the following tests to ensure we have
-- stable query plans in all CI environments.
SET timescaledb.enable_decompression_sorted_merge = 0;
-- test ASC for ordered chunks
:PREFIX
SELECT time

View File

@ -18,6 +18,13 @@ set max_parallel_workers_per_gather to 0;
-- This file and its contents are licensed under the Timescale License.
-- Please see the included NOTICE for copyright information and
-- LICENSE-TIMESCALE for a copy of the license.
-- In the following test cases, we test that certain indexes are used. By using the
-- timescaledb.enable_decompression_sorted_merge optimization, we are pushing a sort node
-- below the DecompressChunk node, which operates on the batches. This could lead to flaky
-- tests because the input data is small and PostgreSQL switches from IndexScans to
-- SequentialScans. Disable the optimization for the following tests to ensure we have
-- stable query plans in all CI environments.
SET timescaledb.enable_decompression_sorted_merge = 0;
-- test ASC for ordered chunks
:PREFIX
SELECT time
@ -769,6 +776,13 @@ QUERY PLAN
-- This file and its contents are licensed under the Timescale License.
-- Please see the included NOTICE for copyright information and
-- LICENSE-TIMESCALE for a copy of the license.
-- In the following test cases, we test that certain indexes are used. By using the
-- timescaledb.enable_decompression_sorted_merge optimization, we are pushing a sort node
-- below the DecompressChunk node, which operates on the batches. This could lead to flaky
-- tests because the input data is small and PostgreSQL switches from IndexScans to
-- SequentialScans. Disable the optimization for the following tests to ensure we have
-- stable query plans in all CI environments.
SET timescaledb.enable_decompression_sorted_merge = 0;
-- test ASC for ordered chunks
:PREFIX
SELECT time
@ -2085,6 +2099,13 @@ QUERY PLAN
-- This file and its contents are licensed under the Timescale License.
-- Please see the included NOTICE for copyright information and
-- LICENSE-TIMESCALE for a copy of the license.
-- In the following test cases, we test that certain indexes are used. By using the
-- timescaledb.enable_decompression_sorted_merge optimization, we are pushing a sort node
-- below the DecompressChunk node, which operates on the batches. This could lead to flaky
-- tests because the input data is small and PostgreSQL switches from IndexScans to
-- SequentialScans. Disable the optimization for the following tests to ensure we have
-- stable query plans in all CI environments.
SET timescaledb.enable_decompression_sorted_merge = 0;
-- test ASC for ordered chunks
:PREFIX
SELECT time
@ -2932,6 +2953,13 @@ QUERY PLAN
-- This file and its contents are licensed under the Timescale License.
-- Please see the included NOTICE for copyright information and
-- LICENSE-TIMESCALE for a copy of the license.
-- In the following test cases, we test that certain indexes are used. By using the
-- timescaledb.enable_decompression_sorted_merge optimization, we are pushing a sort node
-- below the DecompressChunk node, which operates on the batches. This could lead to flaky
-- tests because the input data is small and PostgreSQL switches from IndexScans to
-- SequentialScans. Disable the optimization for the following tests to ensure we have
-- stable query plans in all CI environments.
SET timescaledb.enable_decompression_sorted_merge = 0;
-- test ASC for ordered chunks
:PREFIX
SELECT time

View File

@ -18,6 +18,13 @@ set max_parallel_workers_per_gather to 0;
-- This file and its contents are licensed under the Timescale License.
-- Please see the included NOTICE for copyright information and
-- LICENSE-TIMESCALE for a copy of the license.
-- In the following test cases, we test that certain indexes are used. By using the
-- timescaledb.enable_decompression_sorted_merge optimization, we are pushing a sort node
-- below the DecompressChunk node, which operates on the batches. This could lead to flaky
-- tests because the input data is small and PostgreSQL switches from IndexScans to
-- SequentialScans. Disable the optimization for the following tests to ensure we have
-- stable query plans in all CI environments.
SET timescaledb.enable_decompression_sorted_merge = 0;
-- test ASC for ordered chunks
:PREFIX
SELECT time
@ -778,6 +785,13 @@ QUERY PLAN
-- This file and its contents are licensed under the Timescale License.
-- Please see the included NOTICE for copyright information and
-- LICENSE-TIMESCALE for a copy of the license.
-- In the following test cases, we test that certain indexes are used. By using the
-- timescaledb.enable_decompression_sorted_merge optimization, we are pushing a sort node
-- below the DecompressChunk node, which operates on the batches. This could lead to flaky
-- tests because the input data is small and PostgreSQL switches from IndexScans to
-- SequentialScans. Disable the optimization for the following tests to ensure we have
-- stable query plans in all CI environments.
SET timescaledb.enable_decompression_sorted_merge = 0;
-- test ASC for ordered chunks
:PREFIX
SELECT time
@ -2103,6 +2117,13 @@ QUERY PLAN
-- This file and its contents are licensed under the Timescale License.
-- Please see the included NOTICE for copyright information and
-- LICENSE-TIMESCALE for a copy of the license.
-- In the following test cases, we test that certain indexes are used. By using the
-- timescaledb.enable_decompression_sorted_merge optimization, we are pushing a sort node
-- below the DecompressChunk node, which operates on the batches. This could lead to flaky
-- tests because the input data is small and PostgreSQL switches from IndexScans to
-- SequentialScans. Disable the optimization for the following tests to ensure we have
-- stable query plans in all CI environments.
SET timescaledb.enable_decompression_sorted_merge = 0;
-- test ASC for ordered chunks
:PREFIX
SELECT time
@ -2951,6 +2972,13 @@ QUERY PLAN
-- This file and its contents are licensed under the Timescale License.
-- Please see the included NOTICE for copyright information and
-- LICENSE-TIMESCALE for a copy of the license.
-- In the following test cases, we test that certain indexes are used. By using the
-- timescaledb.enable_decompression_sorted_merge optimization, we are pushing a sort node
-- below the DecompressChunk node, which operates on the batches. This could lead to flaky
-- tests because the input data is small and PostgreSQL switches from IndexScans to
-- SequentialScans. Disable the optimization for the following tests to ensure we have
-- stable query plans in all CI environments.
SET timescaledb.enable_decompression_sorted_merge = 0;
-- test ASC for ordered chunks
:PREFIX
SELECT time

View File

@ -19,6 +19,13 @@ set max_parallel_workers_per_gather to 0;
-- This file and its contents are licensed under the Timescale License.
-- Please see the included NOTICE for copyright information and
-- LICENSE-TIMESCALE for a copy of the license.
-- In the following test cases, we test that certain indexes are used. By using the
-- timescaledb.enable_decompression_sorted_merge optimization, we are pushing a sort node
-- below the DecompressChunk node, which operates on the batches. This could lead to flaky
-- tests because the input data is small and PostgreSQL switches from IndexScans to
-- SequentialScans. Disable the optimization for the following tests to ensure we have
-- stable query plans in all CI environments.
SET timescaledb.enable_decompression_sorted_merge = 0;
-- test LATERAL with ordered append in the outer query
:PREFIX
SELECT time,
@ -814,6 +821,13 @@ RESET enable_seqscan;
-- This file and its contents are licensed under the Timescale License.
-- Please see the included NOTICE for copyright information and
-- LICENSE-TIMESCALE for a copy of the license.
-- In the following test cases, we test that certain indexes are used. By using the
-- timescaledb.enable_decompression_sorted_merge optimization, we are pushing a sort node
-- below the DecompressChunk node, which operates on the batches. This could lead to flaky
-- tests because the input data is small and PostgreSQL switches from IndexScans to
-- SequentialScans. Disable the optimization for the following tests to ensure we have
-- stable query plans in all CI environments.
SET timescaledb.enable_decompression_sorted_merge = 0;
-- test LATERAL with ordered append in the outer query
:PREFIX
SELECT time,
@ -1807,6 +1821,13 @@ RESET enable_seqscan;
-- This file and its contents are licensed under the Timescale License.
-- Please see the included NOTICE for copyright information and
-- LICENSE-TIMESCALE for a copy of the license.
-- In the following test cases, we test that certain indexes are used. By using the
-- timescaledb.enable_decompression_sorted_merge optimization, we are pushing a sort node
-- below the DecompressChunk node, which operates on the batches. This could lead to flaky
-- tests because the input data is small and PostgreSQL switches from IndexScans to
-- SequentialScans. Disable the optimization for the following tests to ensure we have
-- stable query plans in all CI environments.
SET timescaledb.enable_decompression_sorted_merge = 0;
-- test LATERAL with ordered append in the outer query
:PREFIX
SELECT time,
@ -2656,6 +2677,13 @@ RESET enable_seqscan;
-- This file and its contents are licensed under the Timescale License.
-- Please see the included NOTICE for copyright information and
-- LICENSE-TIMESCALE for a copy of the license.
-- In the following test cases, we test that certain indexes are used. By using the
-- timescaledb.enable_decompression_sorted_merge optimization, we are pushing a sort node
-- below the DecompressChunk node, which operates on the batches. This could lead to flaky
-- tests because the input data is small and PostgreSQL switches from IndexScans to
-- SequentialScans. Disable the optimization for the following tests to ensure we have
-- stable query plans in all CI environments.
SET timescaledb.enable_decompression_sorted_merge = 0;
-- test LATERAL with ordered append in the outer query
:PREFIX
SELECT time,

View File

@ -19,6 +19,13 @@ set max_parallel_workers_per_gather to 0;
-- This file and its contents are licensed under the Timescale License.
-- Please see the included NOTICE for copyright information and
-- LICENSE-TIMESCALE for a copy of the license.
-- In the following test cases, we test that certain indexes are used. By using the
-- timescaledb.enable_decompression_sorted_merge optimization, we are pushing a sort node
-- below the DecompressChunk node, which operates on the batches. This could lead to flaky
-- tests because the input data is small and PostgreSQL switches from IndexScans to
-- SequentialScans. Disable the optimization for the following tests to ensure we have
-- stable query plans in all CI environments.
SET timescaledb.enable_decompression_sorted_merge = 0;
-- test LATERAL with ordered append in the outer query
:PREFIX
SELECT time,
@ -814,6 +821,13 @@ RESET enable_seqscan;
-- This file and its contents are licensed under the Timescale License.
-- Please see the included NOTICE for copyright information and
-- LICENSE-TIMESCALE for a copy of the license.
-- In the following test cases, we test that certain indexes are used. By using the
-- timescaledb.enable_decompression_sorted_merge optimization, we are pushing a sort node
-- below the DecompressChunk node, which operates on the batches. This could lead to flaky
-- tests because the input data is small and PostgreSQL switches from IndexScans to
-- SequentialScans. Disable the optimization for the following tests to ensure we have
-- stable query plans in all CI environments.
SET timescaledb.enable_decompression_sorted_merge = 0;
-- test LATERAL with ordered append in the outer query
:PREFIX
SELECT time,
@ -1807,6 +1821,13 @@ RESET enable_seqscan;
-- This file and its contents are licensed under the Timescale License.
-- Please see the included NOTICE for copyright information and
-- LICENSE-TIMESCALE for a copy of the license.
-- In the following test cases, we test that certain indexes are used. By using the
-- timescaledb.enable_decompression_sorted_merge optimization, we are pushing a sort node
-- below the DecompressChunk node, which operates on the batches. This could lead to flaky
-- tests because the input data is small and PostgreSQL switches from IndexScans to
-- SequentialScans. Disable the optimization for the following tests to ensure we have
-- stable query plans in all CI environments.
SET timescaledb.enable_decompression_sorted_merge = 0;
-- test LATERAL with ordered append in the outer query
:PREFIX
SELECT time,
@ -2656,6 +2677,13 @@ RESET enable_seqscan;
-- This file and its contents are licensed under the Timescale License.
-- Please see the included NOTICE for copyright information and
-- LICENSE-TIMESCALE for a copy of the license.
-- In the following test cases, we test that certain indexes are used. By using the
-- timescaledb.enable_decompression_sorted_merge optimization, we are pushing a sort node
-- below the DecompressChunk node, which operates on the batches. This could lead to flaky
-- tests because the input data is small and PostgreSQL switches from IndexScans to
-- SequentialScans. Disable the optimization for the following tests to ensure we have
-- stable query plans in all CI environments.
SET timescaledb.enable_decompression_sorted_merge = 0;
-- test LATERAL with ordered append in the outer query
:PREFIX
SELECT time,

View File

@ -19,6 +19,13 @@ set max_parallel_workers_per_gather to 0;
-- This file and its contents are licensed under the Timescale License.
-- Please see the included NOTICE for copyright information and
-- LICENSE-TIMESCALE for a copy of the license.
-- In the following test cases, we test that certain indexes are used. By using the
-- timescaledb.enable_decompression_sorted_merge optimization, we are pushing a sort node
-- below the DecompressChunk node, which operates on the batches. This could lead to flaky
-- tests because the input data is small and PostgreSQL switches from IndexScans to
-- SequentialScans. Disable the optimization for the following tests to ensure we have
-- stable query plans in all CI environments.
SET timescaledb.enable_decompression_sorted_merge = 0;
-- test LATERAL with ordered append in the outer query
:PREFIX
SELECT time,
@ -814,6 +821,13 @@ RESET enable_seqscan;
-- This file and its contents are licensed under the Timescale License.
-- Please see the included NOTICE for copyright information and
-- LICENSE-TIMESCALE for a copy of the license.
-- In the following test cases, we test that certain indexes are used. By using the
-- timescaledb.enable_decompression_sorted_merge optimization, we are pushing a sort node
-- below the DecompressChunk node, which operates on the batches. This could lead to flaky
-- tests because the input data is small and PostgreSQL switches from IndexScans to
-- SequentialScans. Disable the optimization for the following tests to ensure we have
-- stable query plans in all CI environments.
SET timescaledb.enable_decompression_sorted_merge = 0;
-- test LATERAL with ordered append in the outer query
:PREFIX
SELECT time,
@ -1807,6 +1821,13 @@ RESET enable_seqscan;
-- This file and its contents are licensed under the Timescale License.
-- Please see the included NOTICE for copyright information and
-- LICENSE-TIMESCALE for a copy of the license.
-- In the following test cases, we test that certain indexes are used. By using the
-- timescaledb.enable_decompression_sorted_merge optimization, we are pushing a sort node
-- below the DecompressChunk node, which operates on the batches. This could lead to flaky
-- tests because the input data is small and PostgreSQL switches from IndexScans to
-- SequentialScans. Disable the optimization for the following tests to ensure we have
-- stable query plans in all CI environments.
SET timescaledb.enable_decompression_sorted_merge = 0;
-- test LATERAL with ordered append in the outer query
:PREFIX
SELECT time,
@ -2656,6 +2677,13 @@ RESET enable_seqscan;
-- This file and its contents are licensed under the Timescale License.
-- Please see the included NOTICE for copyright information and
-- LICENSE-TIMESCALE for a copy of the license.
-- In the following test cases, we test that certain indexes are used. By using the
-- timescaledb.enable_decompression_sorted_merge optimization, we are pushing a sort node
-- below the DecompressChunk node, which operates on the batches. This could lead to flaky
-- tests because the input data is small and PostgreSQL switches from IndexScans to
-- SequentialScans. Disable the optimization for the following tests to ensure we have
-- stable query plans in all CI environments.
SET timescaledb.enable_decompression_sorted_merge = 0;
-- test LATERAL with ordered append in the outer query
:PREFIX
SELECT time,

View File

@ -19,6 +19,13 @@ set max_parallel_workers_per_gather to 0;
-- This file and its contents are licensed under the Timescale License.
-- Please see the included NOTICE for copyright information and
-- LICENSE-TIMESCALE for a copy of the license.
-- In the following test cases, we test that certain indexes are used. By using the
-- timescaledb.enable_decompression_sorted_merge optimization, we are pushing a sort node
-- below the DecompressChunk node, which operates on the batches. This could lead to flaky
-- tests because the input data is small and PostgreSQL switches from IndexScans to
-- SequentialScans. Disable the optimization for the following tests to ensure we have
-- stable query plans in all CI environments.
SET timescaledb.enable_decompression_sorted_merge = 0;
-- test LATERAL with ordered append in the outer query
:PREFIX
SELECT time,
@ -820,6 +827,13 @@ RESET enable_seqscan;
-- This file and its contents are licensed under the Timescale License.
-- Please see the included NOTICE for copyright information and
-- LICENSE-TIMESCALE for a copy of the license.
-- In the following test cases, we test that certain indexes are used. By using the
-- timescaledb.enable_decompression_sorted_merge optimization, we are pushing a sort node
-- below the DecompressChunk node, which operates on the batches. This could lead to flaky
-- tests because the input data is small and PostgreSQL switches from IndexScans to
-- SequentialScans. Disable the optimization for the following tests to ensure we have
-- stable query plans in all CI environments.
SET timescaledb.enable_decompression_sorted_merge = 0;
-- test LATERAL with ordered append in the outer query
:PREFIX
SELECT time,
@ -1819,6 +1833,13 @@ RESET enable_seqscan;
-- This file and its contents are licensed under the Timescale License.
-- Please see the included NOTICE for copyright information and
-- LICENSE-TIMESCALE for a copy of the license.
-- In the following test cases, we test that certain indexes are used. By using the
-- timescaledb.enable_decompression_sorted_merge optimization, we are pushing a sort node
-- below the DecompressChunk node, which operates on the batches. This could lead to flaky
-- tests because the input data is small and PostgreSQL switches from IndexScans to
-- SequentialScans. Disable the optimization for the following tests to ensure we have
-- stable query plans in all CI environments.
SET timescaledb.enable_decompression_sorted_merge = 0;
-- test LATERAL with ordered append in the outer query
:PREFIX
SELECT time,
@ -2672,6 +2693,13 @@ RESET enable_seqscan;
-- This file and its contents are licensed under the Timescale License.
-- Please see the included NOTICE for copyright information and
-- LICENSE-TIMESCALE for a copy of the license.
-- In the following test cases, we test that certain indexes are used. By using the
-- timescaledb.enable_decompression_sorted_merge optimization, we are pushing a sort node
-- below the DecompressChunk node, which operates on the batches. This could lead to flaky
-- tests because the input data is small and PostgreSQL switches from IndexScans to
-- SequentialScans. Disable the optimization for the following tests to ensure we have
-- stable query plans in all CI environments.
SET timescaledb.enable_decompression_sorted_merge = 0;
-- test LATERAL with ordered append in the outer query
:PREFIX
SELECT time,

View File

@ -216,12 +216,12 @@ QUERY PLAN
:PREFIX SELECT * FROM :TEST_TABLE WHERE device_id = length(substring(version(), 1, 3)) ORDER BY time, device_id LIMIT 10;
QUERY PLAN
Limit (actual rows=10 loops=1)
-> Sort (actual rows=10 loops=1)
Sort Key: _hyper_X_X_chunk."time"
Sort Method: top-N heapsort
-> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=3598 loops=1)
-> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=10 loops=1)
Filter: (device_id = length("substring"(version(), 1, 3)))
Rows Removed by Filter: 14392
Rows Removed by Filter: 2392
-> Sort (actual rows=6 loops=1)
Sort Key: compress_hyper_X_X_chunk._ts_meta_min_1
Sort Method: quicksort
-> Seq Scan on compress_hyper_X_X_chunk (actual rows=20 loops=1)
(8 rows)
@ -413,10 +413,10 @@ QUERY PLAN
:PREFIX SELECT time FROM :TEST_TABLE ORDER BY time DESC LIMIT 10;
QUERY PLAN
Limit (actual rows=10 loops=1)
-> Sort (actual rows=10 loops=1)
Sort Key: _hyper_X_X_chunk."time" DESC
Sort Method: top-N heapsort
-> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=17990 loops=1)
-> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=10 loops=1)
-> Sort (actual rows=6 loops=1)
Sort Key: compress_hyper_X_X_chunk._ts_meta_max_1 DESC
Sort Method: quicksort
-> Seq Scan on compress_hyper_X_X_chunk (actual rows=20 loops=1)
(6 rows)
@ -480,10 +480,10 @@ QUERY PLAN
Sort Key: q.v1
Sort Method: quicksort
-> Subquery Scan on q (actual rows=17990 loops=1)
-> Sort (actual rows=17990 loops=1)
Sort Key: _hyper_X_X_chunk."time"
Sort Method: quicksort
-> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=17990 loops=1)
-> Sort (actual rows=20 loops=1)
Sort Key: compress_hyper_X_X_chunk._ts_meta_min_1
Sort Method: quicksort
-> Seq Scan on compress_hyper_X_X_chunk (actual rows=20 loops=1)
(9 rows)

View File

@ -216,12 +216,12 @@ QUERY PLAN
:PREFIX SELECT * FROM :TEST_TABLE WHERE device_id = length(substring(version(), 1, 3)) ORDER BY time, device_id LIMIT 10;
QUERY PLAN
Limit (actual rows=10 loops=1)
-> Sort (actual rows=10 loops=1)
Sort Key: _hyper_X_X_chunk."time"
Sort Method: top-N heapsort
-> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=3598 loops=1)
-> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=10 loops=1)
Filter: (device_id = length("substring"(version(), 1, 3)))
Rows Removed by Filter: 14392
Rows Removed by Filter: 2392
-> Sort (actual rows=6 loops=1)
Sort Key: compress_hyper_X_X_chunk._ts_meta_min_1
Sort Method: quicksort
-> Seq Scan on compress_hyper_X_X_chunk (actual rows=20 loops=1)
(8 rows)
@ -413,10 +413,10 @@ QUERY PLAN
:PREFIX SELECT time FROM :TEST_TABLE ORDER BY time DESC LIMIT 10;
QUERY PLAN
Limit (actual rows=10 loops=1)
-> Sort (actual rows=10 loops=1)
Sort Key: _hyper_X_X_chunk."time" DESC
Sort Method: top-N heapsort
-> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=17990 loops=1)
-> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=10 loops=1)
-> Sort (actual rows=6 loops=1)
Sort Key: compress_hyper_X_X_chunk._ts_meta_max_1 DESC
Sort Method: quicksort
-> Seq Scan on compress_hyper_X_X_chunk (actual rows=20 loops=1)
(6 rows)
@ -482,10 +482,10 @@ QUERY PLAN
Sort Key: q.v1
Sort Method: quicksort
-> Subquery Scan on q (actual rows=17990 loops=1)
-> Sort (actual rows=17990 loops=1)
Sort Key: _hyper_X_X_chunk."time"
Sort Method: quicksort
-> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=17990 loops=1)
-> Sort (actual rows=20 loops=1)
Sort Key: compress_hyper_X_X_chunk._ts_meta_min_1
Sort Method: quicksort
-> Seq Scan on compress_hyper_X_X_chunk (actual rows=20 loops=1)
(9 rows)

View File

@ -216,12 +216,12 @@ QUERY PLAN
:PREFIX SELECT * FROM :TEST_TABLE WHERE device_id = length(substring(version(), 1, 3)) ORDER BY time, device_id LIMIT 10;
QUERY PLAN
Limit (actual rows=10 loops=1)
-> Sort (actual rows=10 loops=1)
Sort Key: _hyper_X_X_chunk."time"
Sort Method: top-N heapsort
-> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=3598 loops=1)
-> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=10 loops=1)
Filter: (device_id = length("substring"(version(), 1, 3)))
Rows Removed by Filter: 14392
Rows Removed by Filter: 2392
-> Sort (actual rows=6 loops=1)
Sort Key: compress_hyper_X_X_chunk._ts_meta_min_1
Sort Method: quicksort
-> Seq Scan on compress_hyper_X_X_chunk (actual rows=20 loops=1)
(8 rows)
@ -413,10 +413,10 @@ QUERY PLAN
:PREFIX SELECT time FROM :TEST_TABLE ORDER BY time DESC LIMIT 10;
QUERY PLAN
Limit (actual rows=10 loops=1)
-> Sort (actual rows=10 loops=1)
Sort Key: _hyper_X_X_chunk."time" DESC
Sort Method: top-N heapsort
-> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=17990 loops=1)
-> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=10 loops=1)
-> Sort (actual rows=6 loops=1)
Sort Key: compress_hyper_X_X_chunk._ts_meta_max_1 DESC
Sort Method: quicksort
-> Seq Scan on compress_hyper_X_X_chunk (actual rows=20 loops=1)
(6 rows)
@ -482,10 +482,10 @@ QUERY PLAN
Sort Key: q.v1
Sort Method: quicksort
-> Subquery Scan on q (actual rows=17990 loops=1)
-> Sort (actual rows=17990 loops=1)
Sort Key: _hyper_X_X_chunk."time"
Sort Method: quicksort
-> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=17990 loops=1)
-> Sort (actual rows=20 loops=1)
Sort Key: compress_hyper_X_X_chunk._ts_meta_min_1
Sort Method: quicksort
-> Seq Scan on compress_hyper_X_X_chunk (actual rows=20 loops=1)
(9 rows)

View File

@ -218,12 +218,12 @@ QUERY PLAN
:PREFIX SELECT * FROM :TEST_TABLE WHERE device_id = length(substring(version(), 1, 3)) ORDER BY time, device_id LIMIT 10;
QUERY PLAN
Limit (actual rows=10 loops=1)
-> Sort (actual rows=10 loops=1)
Sort Key: _hyper_X_X_chunk."time"
Sort Method: top-N heapsort
-> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=3598 loops=1)
-> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=10 loops=1)
Filter: (device_id = length("substring"(version(), 1, 3)))
Rows Removed by Filter: 14392
Rows Removed by Filter: 2392
-> Sort (actual rows=6 loops=1)
Sort Key: compress_hyper_X_X_chunk._ts_meta_min_1
Sort Method: quicksort
-> Seq Scan on compress_hyper_X_X_chunk (actual rows=20 loops=1)
(8 rows)
@ -415,10 +415,10 @@ QUERY PLAN
:PREFIX SELECT time FROM :TEST_TABLE ORDER BY time DESC LIMIT 10;
QUERY PLAN
Limit (actual rows=10 loops=1)
-> Sort (actual rows=10 loops=1)
Sort Key: _hyper_X_X_chunk."time" DESC
Sort Method: top-N heapsort
-> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=17990 loops=1)
-> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=10 loops=1)
-> Sort (actual rows=6 loops=1)
Sort Key: compress_hyper_X_X_chunk._ts_meta_max_1 DESC
Sort Method: quicksort
-> Seq Scan on compress_hyper_X_X_chunk (actual rows=20 loops=1)
(6 rows)
@ -484,10 +484,10 @@ QUERY PLAN
Sort Key: q.v1
Sort Method: quicksort
-> Subquery Scan on q (actual rows=17990 loops=1)
-> Sort (actual rows=17990 loops=1)
Sort Key: _hyper_X_X_chunk."time"
Sort Method: quicksort
-> Custom Scan (DecompressChunk) on _hyper_X_X_chunk (actual rows=17990 loops=1)
-> Sort (actual rows=20 loops=1)
Sort Key: compress_hyper_X_X_chunk._ts_meta_min_1
Sort Method: quicksort
-> Seq Scan on compress_hyper_X_X_chunk (actual rows=20 loops=1)
(9 rows)

View File

@ -22,6 +22,15 @@ set work_mem to '64MB';
-- disable incremental sort here to make plans comparable to PG < 13
SELECT CASE WHEN current_setting('server_version_num')::int/10000 >= 13 THEN set_config('enable_incremental_sort','off',false) ELSE 'off' END;
-- In the following test cases, we test that certain indexes are used. By using the
-- timescaledb.enable_decompression_sorted_merge optimization, we are pushing a sort node
-- below the DecompressChunk node, which operates on the batches. This could lead to flaky
-- tests because the input data is small and PostgreSQL switches from IndexScans to
-- SequentialScans. Disable the optimization for the following tests to ensure we have
-- stable query plans in all CI environments.
SET timescaledb.enable_decompression_sorted_merge = 0;
set max_parallel_workers_per_gather to 0;
\set TEST_TABLE 'metrics'
\ir :TEST_QUERY_NAME

View File

@ -2,6 +2,15 @@
-- Please see the included NOTICE for copyright information and
-- LICENSE-TIMESCALE for a copy of the license.
-- In the following test cases, we test that certain indexes are used. By using the
-- timescaledb.enable_decompression_sorted_merge optimization, we are pushing a sort node
-- below the DecompressChunk node, which operates on the batches. This could lead to flaky
-- tests because the input data is small and PostgreSQL switches from IndexScans to
-- SequentialScans. Disable the optimization for the following tests to ensure we have
-- stable query plans in all CI environments.
SET timescaledb.enable_decompression_sorted_merge = 0;
-- test ASC for ordered chunks
:PREFIX
SELECT time

View File

@ -2,6 +2,15 @@
-- Please see the included NOTICE for copyright information and
-- LICENSE-TIMESCALE for a copy of the license.
-- In the following test cases, we test that certain indexes are used. By using the
-- timescaledb.enable_decompression_sorted_merge optimization, we are pushing a sort node
-- below the DecompressChunk node, which operates on the batches. This could lead to flaky
-- tests because the input data is small and PostgreSQL switches from IndexScans to
-- SequentialScans. Disable the optimization for the following tests to ensure we have
-- stable query plans in all CI environments.
SET timescaledb.enable_decompression_sorted_merge = 0;
-- test LATERAL with ordered append in the outer query
:PREFIX
SELECT time,
@ -28,6 +37,7 @@ FROM (
ORDER BY time DESC
LIMIT 2) l;
-- test plan with best index is chosen
-- this should use device_id, time index
:PREFIX

View File

@ -4,6 +4,7 @@
/cagg_permissions-*.sql
/cagg_query-*.sql
/cagg_union_view-*.sql
/compression_sorted_merge-*.sql
/compression_insert-*.sql
/compression_permissions-*.sql
/continuous_aggs-*.sql

View File

@ -148,7 +148,10 @@ if(${PG_VERSION_MAJOR} EQUAL "15" AND ${PG_VERSION_MINOR} LESS "3")
endif()
set(TEST_TEMPLATES
compression_insert.sql.in cagg_union_view.sql.in plan_skip_scan.sql.in
compression_insert.sql.in
compression_sorted_merge.sql.in
cagg_union_view.sql.in
plan_skip_scan.sql.in
transparent_decompression.sql.in
transparent_decompression_ordered_index.sql.in)

View File

@ -0,0 +1,466 @@
-- This file and its contents are licensed under the Timescale License.
-- Please see the included NOTICE for copyright information and
-- LICENSE-TIMESCALE for a copy of the license.
\set PREFIX 'EXPLAIN (analyze, verbose, costs off, timing off, summary off)'
CREATE TABLE test1 (
time timestamptz NOT NULL,
x1 integer,
x2 integer,
x3 integer,
x4 integer,
x5 integer);
SELECT FROM create_hypertable('test1', 'time');
ALTER TABLE test1 SET (timescaledb.compress, timescaledb.compress_segmentby='x1, x2, x5', timescaledb.compress_orderby = 'time DESC, x3 ASC, x4 ASC');
INSERT INTO test1 (time, x1, x2, x3, x4, x5) values('2000-01-01 00:00:00-00', 1, 2, 1, 1, 0);
INSERT INTO test1 (time, x1, x2, x3, x4, x5) values('2000-01-01 01:00:00-00', 1, 3, 2, 2, 0);
INSERT INTO test1 (time, x1, x2, x3, x4, x5) values('2000-01-01 02:00:00-00', 2, 1, 3, 3, 0);
INSERT INTO test1 (time, x1, x2, x3, x4, x5) values('2000-01-01 03:00:00-00', 1, 2, 4, 4, 0);
SELECT compress_chunk(i) FROM show_chunks('test1') i;
CREATE TABLE test2 (
time timestamptz NOT NULL,
x1 integer,
x2 integer,
x3 integer,
x4 integer,
x5 integer);
SELECT FROM create_hypertable('test2', 'time');
ALTER TABLE test2 SET (timescaledb.compress, timescaledb.compress_segmentby='x1, x2, x5', timescaledb.compress_orderby = 'time ASC, x3 DESC, x4 DESC');
INSERT INTO test2 (time, x1, x2, x3, x4, x5) values('2000-01-01 00:00:00-00', 1, 2, 1, 1, 0);
INSERT INTO test2 (time, x1, x2, x3, x4, x5) values('2000-01-01 01:00:00-00', 1, 3, 2, 2, 0);
INSERT INTO test2 (time, x1, x2, x3, x4, x5) values('2000-01-01 02:00:00-00', 2, 1, 3, 3, 0);
INSERT INTO test2 (time, x1, x2, x3, x4, x5) values('2000-01-01 03:00:00-00', 1, 2, 4, 4, 0);
SELECT compress_chunk(i) FROM show_chunks('test2') i;
CREATE TABLE test_with_defined_null (
time timestamptz NOT NULL,
x1 integer,
x2 integer,
x3 integer);
SELECT FROM create_hypertable('test_with_defined_null','time');
ALTER TABLE test_with_defined_null SET (timescaledb.compress,timescaledb.compress_segmentby='x1', timescaledb.compress_orderby='x2 ASC NULLS FIRST');
INSERT INTO test_with_defined_null (time, x1, x2) values('2000-01-01', '1', NULL);
INSERT INTO test_with_defined_null (time, x1, x2) values('2000-01-01','2', NULL);
INSERT INTO test_with_defined_null (time, x1, x2) values('2000-01-01','1',1);
INSERT INTO test_with_defined_null (time, x1, x2) values('2000-01-01','1',2);
SELECT compress_chunk(i) FROM show_chunks('test_with_defined_null') i;
-- test1 uses compress_segmentby='x1, x2, x5' and compress_orderby = 'time DESC, x3 ASC, x4 ASC'
-- test2 uses compress_segmentby='x1, x2, x5' and compress_orderby = 'time ASC, x3 DESC, x4 DESC'
-- test_with_defined_null uses compress_segmentby='x1' and compress_orderby = 'x2 ASC NULLS FIRST'
------
-- Tests based on ordering
------
-- Should be optimized (implicit NULLS first)
:PREFIX
SELECT * FROM test1 ORDER BY time DESC;
-- Should be optimized
:PREFIX
SELECT * FROM test1 ORDER BY time DESC NULLS FIRST;
-- Should not be optimized (NULL order wrong)
:PREFIX
SELECT * FROM test1 ORDER BY time DESC NULLS LAST;
-- Should be optimized (implicit NULLS last)
:PREFIX
SELECT * FROM test1 ORDER BY time ASC;
-- Should be optimized
:PREFIX
SELECT * FROM test1 ORDER BY time ASC NULLS LAST;
-- Should not be optimized (NULL order wrong)
:PREFIX
SELECT * FROM test1 ORDER BY time ASC NULLS FIRST;
-- Should be optimized
:PREFIX
SELECT * FROM test1 ORDER BY time DESC NULLS FIRST, x3 ASC NULLS LAST;
-- Should be optimized
:PREFIX
SELECT * FROM test1 ORDER BY time DESC NULLS FIRST, x3 ASC NULLS LAST, x4 ASC NULLS LAST;
-- Should not be optimized (wrong order for x4)
:PREFIX
SELECT * FROM test1 ORDER BY time DESC NULLS FIRST, x3 ASC NULLS LAST, x4 DESC NULLS FIRST;
-- Should be optimized (backward scan)
:PREFIX
SELECT * FROM test1 ORDER BY time ASC NULLS LAST;
-- Should be optimized (backward scan)
:PREFIX
SELECT * FROM test1 ORDER BY time ASC NULLS LAST, x3 DESC NULLS FIRST;
-- Should be optimized (backward scan)
:PREFIX
SELECT * FROM test1 ORDER BY time ASC NULLS LAST, x3 DESC NULLS FIRST, x4 DESC NULLS FIRST;
-- Should not be optimized (wrong order for x4 in backward scan)
:PREFIX
SELECT * FROM test1 ORDER BY time ASC NULLS FIRST, x3 DESC NULLS LAST, x4 ASC;
-- Should be optimized
:PREFIX
SELECT * FROM test2 ORDER BY time ASC;
-- Should be optimized
:PREFIX
SELECT * FROM test2 ORDER BY time ASC, x3 DESC;
-- Should be optimized
:PREFIX
SELECT * FROM test2 ORDER BY time ASC, x3 DESC, x4 DESC;
-- Should not be optimized (wrong order for x3)
:PREFIX
SELECT * FROM test2 ORDER BY time ASC, x3 ASC NULLS LAST, x4 DESC;
-- Should not be optimized (wrong order for x3)
:PREFIX
SELECT * FROM test2 ORDER BY time ASC, x3 ASC NULLS FIRST, x4 DESC;
-- Should be optimized (backward scan)
:PREFIX
SELECT * FROM test2 ORDER BY time DESC NULLS FIRST;
-- Should be optimized (backward scan)
:PREFIX
SELECT * FROM test2 ORDER BY time DESC NULLS FIRST, x3 ASC NULLS LAST;
-- Should be optimized (backward scan)
:PREFIX
SELECT * FROM test2 ORDER BY time DESC NULLS FIRST, x3 ASC NULLS LAST, x4 NULLS LAST;
-- Should not be optimized (wrong order for x3 in backward scan)
:PREFIX
SELECT * FROM test2 ORDER BY time DESC NULLS LAST, x3 DESC NULLS FIRST, x4 NULLS FIRST;
-- Should not be optimized (wrong order for x3 in backward scan)
:PREFIX
SELECT * FROM test2 ORDER BY time DESC NULLS LAST, x3 DESC NULLS LAST, x4 NULLS FIRST;
-- Should be optimized
:PREFIX
SELECT * FROM test_with_defined_null ORDER BY x2 ASC NULLS FIRST;
-- Should be optimized (backward scan)
:PREFIX
SELECT * FROM test_with_defined_null ORDER BY x2 DESC NULLS LAST;
-- Should not be optimized
:PREFIX
SELECT * FROM test_with_defined_null ORDER BY x2 ASC NULLS LAST;
-- Should not be optimized
:PREFIX
SELECT * FROM test_with_defined_null ORDER BY x2 DESC NULLS FIRST;
------
-- Tests based on attributes
------
-- Should be optimized (some batches qualify by pushed down filter on _ts_meta_max_3)
:PREFIX
SELECT * FROM test1 WHERE x4 > 0 ORDER BY time DESC;
-- Should be optimized (no batches qualify by pushed down filter on _ts_meta_max_3)
:PREFIX
SELECT * FROM test1 WHERE x4 > 100 ORDER BY time DESC;
-- Should be optimized
:PREFIX
SELECT * FROM test1 WHERE x4 > 100 ORDER BY time DESC, x3, x4;
-- Should be optimized (duplicate order by attributes)
:PREFIX
SELECT * FROM test1 WHERE x4 > 100 ORDER BY time DESC, x3, x3;
-- Should be optimized (duplicate order by attributes)
:PREFIX
SELECT * FROM test1 WHERE x4 > 100 ORDER BY time DESC, x3, x4, x3, x4;
-- Should not be optimized
:PREFIX
SELECT * FROM test1 WHERE x4 > 100 ORDER BY time DESC, x4, x3;
-- Should not be optimized
:PREFIX
SELECT * FROM test1 WHERE x4 > 100 ORDER BY time ASC, x3, x4;
------
-- Tests based on results
------
-- Forward scan
SELECT * FROM test1 ORDER BY time DESC;
-- Backward scan
SELECT * FROM test1 ORDER BY time ASC NULLS FIRST;
-- Forward scan
SELECT * FROM test2 ORDER BY time ASC;
-- Backward scan
SELECT * FROM test2 ORDER BY time DESC NULLS LAST;
-- With selection on compressed column (value larger as max value for all batches, so no batch has to be opened)
SELECT * FROM test1 WHERE x4 > 100 ORDER BY time DESC;
-- With selection on compressed column (value smaller as max value for some batches, so batches are opened and filter has to be applied)
SELECT * FROM test1 WHERE x4 > 2 ORDER BY time DESC;
-- With selection on segment_by column
SELECT * FROM test1 WHERE time < '1980-01-01 00:00:00-00' ORDER BY time DESC;
SELECT * FROM test1 WHERE time > '1980-01-01 00:00:00-00' ORDER BY time DESC;
-- With selection on segment_by and compressed column
SELECT * FROM test1 WHERE time > '1980-01-01 00:00:00-00' ORDER BY time DESC;
SELECT * FROM test1 WHERE time > '1980-01-01 00:00:00-00' AND x4 > 100 ORDER BY time DESC;
-- Without projection
SELECT * FROM test1 ORDER BY time DESC;
-- With projection on time
SELECT time FROM test1 ORDER BY time DESC;
-- With projection on x3
SELECT x3 FROM test1 ORDER BY time DESC;
-- With projection on x3 and time
SELECT x3,time FROM test1 ORDER BY time DESC;
-- With projection on time and x3
SELECT time,x3 FROM test1 ORDER BY time DESC;
-- Test with projection and constants
EXPLAIN (verbose) SELECT 1 as one, 2 as two, 3 as three, time, x2 FROM test1 ORDER BY time DESC;
SELECT 1 as one, 2 as two, 3 as three, time, x2 FROM test1 ORDER BY time DESC;
-- Test with projection and constants
EXPLAIN (verbose) SELECT 1 as one, 2 as two, 3 as three, x2, time FROM test1 ORDER BY time DESC;
SELECT 1 as one, 2 as two, 3 as three, x2, time FROM test1 ORDER BY time DESC;
-- With projection and selection on compressed column (value smaller as max value for some batches, so batches are opened and filter has to be applied)
SELECT x4 FROM test1 WHERE x4 > 2 ORDER BY time DESC;
-- Aggregation with count
SELECT count(*) FROM test1;
-- Test with default values
ALTER TABLE test1 ADD COLUMN c1 int;
ALTER TABLE test1 ADD COLUMN c2 int NOT NULL DEFAULT 42;
SELECT * FROM test1 ORDER BY time DESC;
-- Recompress
SELECT decompress_chunk(i) FROM show_chunks('test1') i;
SELECT compress_chunk(i) FROM show_chunks('test1') i;
-- Test with a changed physical layout
-- build_physical_tlist() can not be used for the scan on the compressed chunk anymore
SELECT * FROM test1 ORDER BY time DESC;
ALTER TABLE test1 DROP COLUMN c2;
SELECT * FROM test1 ORDER BY time DESC;
-- Test with a re-created column
ALTER TABLE test1 ADD COLUMN c2 int NOT NULL DEFAULT 43;
SELECT * FROM test1 ORDER BY time DESC;
-- Test with the recreated column
:PREFIX
SELECT * FROM test1 ORDER BY time DESC;
SELECT * FROM test1 ORDER BY time DESC;
-- Test with projection and recreated column
:PREFIX
SELECT time, x2, x1, c2 FROM test1 ORDER BY time DESC;
SELECT time, x2, x1, c2 FROM test1 ORDER BY time DESC;
-- Test with projection and recreated column
:PREFIX
SELECT x2, x1, c2, time FROM test1 ORDER BY time DESC;
SELECT x2, x1, c2, time FROM test1 ORDER BY time DESC;
-- Test with projection, constants and recreated column
:PREFIX
SELECT 1 as one, 2 as two, 3 as three, x2, x1, c2, time FROM test1 ORDER BY time DESC;
SELECT 1 as one, 2 as two, 3 as three, x2, x1, c2, time FROM test1 ORDER BY time DESC;
-- Test with null values
SELECT * FROM test_with_defined_null ORDER BY x2 ASC NULLS FIRST;
SELECT * FROM test_with_defined_null ORDER BY x2 DESC NULLS LAST;
SELECT * FROM test_with_defined_null ORDER BY x2 ASC NULLS LAST;
SELECT * FROM test_with_defined_null ORDER BY x2 DESC NULLS FIRST;
------
-- Tests based on chunk state
------
BEGIN TRANSACTION;
INSERT INTO test1 (time, x1, x2, x3, x4, x5) values('2000-01-01 02:01:00-00', 10, 20, 30, 40, 50);
-- Should not be optimized because of the partially compressed chunk
:PREFIX
SELECT * FROM test1 ORDER BY time ASC NULLS LAST;
ROLLBACK;
-- Should be optimized again
:PREFIX
SELECT * FROM test1 ORDER BY time ASC NULLS LAST;
------
-- Tests on a larger relation
------
CREATE TABLE sensor_data (
time timestamptz NOT NULL,
sensor_id integer NOT NULL,
cpu double precision NULL,
temperature double precision NULL);
SELECT FROM create_hypertable('sensor_data','time');
INSERT INTO sensor_data
SELECT
time + (INTERVAL '1 minute' * random()) AS time,
sensor_id,
random() AS cpu,
random() * 100 AS temperature
FROM
generate_series('1980-01-01 00:00:00-00', '1980-02-01 00:00:00-00', INTERVAL '10 minute') AS g1(time),
generate_series(1, 100, 1 ) AS g2(sensor_id)
ORDER BY
time;
ALTER TABLE sensor_data SET (timescaledb.compress, timescaledb.compress_segmentby='sensor_id', timescaledb.compress_orderby = 'time DESC');
SELECT add_compression_policy('sensor_data','1 minute'::INTERVAL);
SELECT compress_chunk(i) FROM show_chunks('sensor_data') i;
-- Ensure the optimization is used for queries on this table
:PREFIX
SELECT * FROM sensor_data ORDER BY time DESC LIMIT 1;
-- Verify that we produce the same order without and with the optimization
CREATE PROCEDURE order_test(query text) LANGUAGE plpgsql AS $$
DECLARE
count integer;
BEGIN
SET timescaledb.enable_decompression_sorted_merge = 0;
EXECUTE format('CREATE TABLE temp_data1 AS %s;', query);
ALTER TABLE temp_data1 ADD COLUMN new_id SERIAL PRIMARY KEY;
SET timescaledb.enable_decompression_sorted_merge = 1;
EXECUTE format('CREATE TABLE temp_data2 AS %s;', query);
ALTER TABLE temp_data2 ADD COLUMN new_id SERIAL PRIMARY KEY;
CREATE TEMP TABLE temp_data3 AS (
SELECT * FROM temp_data1 UNION ALL SELECT * FROM temp_data2
);
count := (SELECT COUNT(*) FROM (SELECT COUNT(*) FROM temp_data3 GROUP BY time, new_id HAVING COUNT(*) != 2) AS s);
IF count > 0 THEN
RAISE EXCEPTION 'Detected different order with and without the optimization %', count;
END IF;
-- Drop old tables
DROP TABLE temp_data1;
DROP TABLE temp_data2;
DROP TABLE temp_data3;
END;
$$;
CALL order_test('SELECT * FROM sensor_data ORDER BY time DESC');
CALL order_test('SELECT * FROM sensor_data ORDER BY time DESC LIMIT 100');
CALL order_test('SELECT * FROM sensor_data ORDER BY time ASC NULLS FIRST');
CALL order_test('SELECT * FROM sensor_data ORDER BY time ASC NULLS FIRST LIMIT 100');
CALL order_test('SELECT * FROM test1 ORDER BY time DESC');
CALL order_test('SELECT * FROM test1 ORDER BY time ASC NULLS LAST');
------
-- Test enabling and disabling the optimization based on costs
------
CREATE TABLE test_costs (
time timestamptz NOT NULL,
segment_by integer NOT NULL,
x1 integer NOT NULL);
SELECT FROM create_hypertable('test_costs', 'time');
ALTER TABLE test_costs SET (timescaledb.compress, timescaledb.compress_segmentby='segment_by', timescaledb.compress_orderby = 'time DESC, x1');
-- Create 100 segments
INSERT INTO test_costs
SELECT
'2000-01-01 02:01:00-00'::timestamptz AS time,
segment_by,
random() as x1
FROM
generate_series(1, 100, 1) AS g2(segment_by)
ORDER BY time;
SELECT add_compression_policy('test_costs','1 minute'::INTERVAL);
SELECT compress_chunk(i) FROM show_chunks('test_costs') i;
-- Number of segments
SELECT count(*) FROM (SELECT segment_by from test_costs group by segment_by) AS s;
-- Test query plan (should be optimized due to 100 different segments)
:PREFIX
SELECT time, segment_by, x1 FROM test_costs ORDER BY time DESC;
-- Decompress chunk
SELECT decompress_chunk(i) FROM show_chunks('test_costs') i;
-- Add 900 segments (1000 segments total)
INSERT INTO test_costs
SELECT
'2000-01-01 02:01:00-00'::timestamptz AS time,
segment_by,
random() as x1
FROM
generate_series(100, 1000, 1) AS g2(segment_by)
ORDER BY time;
-- Recompress chunk
SELECT compress_chunk(i) FROM show_chunks('test_costs') i;
-- Number of segments
SELECT count(*) FROM (SELECT segment_by from test_costs group by segment_by) AS s;
-- Test query plan (should not be optimized due to 1000 different segments)
:PREFIX
SELECT time, segment_by, x1 FROM test_costs ORDER BY time DESC;
-- Test query plan with predicate (query should be optimized due to ~100 segments)
:PREFIX
SELECT time, segment_by, x1 FROM test_costs WHERE segment_by > 900 and segment_by < 999 ORDER BY time DESC;