Add joininfo to compressed rel

If the joininfo for a rel is not available, the index path
cannot compute the correct filters for parameterized paths
as the RelOptInfo's ppilist is setup using information
from the joininfo.

Fixes 1558
This commit is contained in:
gayyappan 2019-12-24 13:51:49 -05:00 committed by gayyappan
parent 533df9645a
commit 6dad1f246a
8 changed files with 768 additions and 32 deletions

View File

@ -357,7 +357,7 @@ ts_decompress_chunk_generate_paths(PlannerInfo *root, RelOptInfo *chunk_rel, Hyp
compressed_rel = info->compressed_rel;
compressed_rel->consider_parallel = chunk_rel->consider_parallel;
/* translate chunk_rel->baserestrictinfo */
pushdown_quals(root, chunk_rel, compressed_rel, info->hypertable_compression_info);
set_baserel_size_estimates(root, compressed_rel);
new_row_estimate = compressed_rel->rows * DECOMPRESS_CHUNK_BATCH_SIZE;
@ -544,6 +544,129 @@ compressed_rel_setup_reltarget(RelOptInfo *compressed_rel, CompressionInfo *info
}
}
static Bitmapset *
decompress_chunk_adjust_child_relids(Bitmapset *src, int chunk_relid, int compressed_chunk_relid)
{
Bitmapset *result = NULL;
if (src != NULL)
{
result = bms_copy(src);
result = bms_del_member(result, chunk_relid);
result = bms_add_member(result, compressed_chunk_relid);
}
return result;
}
/* based on adjust_appendrel_attrs_mutator handling of RestrictInfo */
static Node *
chunk_joininfo_mutator(Node *node, CompressionInfo *context)
{
if (node == NULL)
return NULL;
if (IsA(node, Var))
{
Var *var = castNode(Var, node);
Var *compress_var = copyObject(var);
char *column_name;
AttrNumber compressed_attno;
FormData_hypertable_compression *compressioninfo;
if (var->varno != context->chunk_rel->relid)
return (Node *) var;
column_name = get_attname_compat(context->chunk_rte->relid, var->varattno, false);
compressioninfo =
get_column_compressioninfo(context->hypertable_compression_info, column_name);
compressed_attno =
get_attnum(context->compressed_rte->relid, compressioninfo->attname.data);
compress_var->varno = context->compressed_rel->relid;
compress_var->varattno = compressed_attno;
return (Node *) compress_var;
}
else if (IsA(node, RestrictInfo))
{
RestrictInfo *oldinfo = (RestrictInfo *) node;
RestrictInfo *newinfo = makeNode(RestrictInfo);
/* Copy all flat-copiable fields */
memcpy(newinfo, oldinfo, sizeof(RestrictInfo));
/* Recursively fix the clause itself */
newinfo->clause = (Expr *) chunk_joininfo_mutator((Node *) oldinfo->clause, context);
/* and the modified version, if an OR clause */
newinfo->orclause = (Expr *) chunk_joininfo_mutator((Node *) oldinfo->orclause, context);
/* adjust relid sets too */
newinfo->clause_relids =
decompress_chunk_adjust_child_relids(oldinfo->clause_relids,
context->chunk_rel->relid,
context->compressed_rel->relid);
newinfo->required_relids =
decompress_chunk_adjust_child_relids(oldinfo->required_relids,
context->chunk_rel->relid,
context->compressed_rel->relid);
newinfo->outer_relids =
decompress_chunk_adjust_child_relids(oldinfo->outer_relids,
context->chunk_rel->relid,
context->compressed_rel->relid);
newinfo->nullable_relids =
decompress_chunk_adjust_child_relids(oldinfo->nullable_relids,
context->chunk_rel->relid,
context->compressed_rel->relid);
newinfo->left_relids = decompress_chunk_adjust_child_relids(oldinfo->left_relids,
context->chunk_rel->relid,
context->compressed_rel->relid);
newinfo->right_relids =
decompress_chunk_adjust_child_relids(oldinfo->right_relids,
context->chunk_rel->relid,
context->compressed_rel->relid);
newinfo->eval_cost.startup = -1;
newinfo->norm_selec = -1;
newinfo->outer_selec = -1;
newinfo->left_em = NULL;
newinfo->right_em = NULL;
newinfo->scansel_cache = NIL;
newinfo->left_bucketsize = -1;
newinfo->right_bucketsize = -1;
#if PG11_GE
newinfo->left_mcvfreq = -1;
newinfo->right_mcvfreq = -1;
#endif
return (Node *) newinfo;
}
return expression_tree_mutator(node, chunk_joininfo_mutator, context);
}
/* translate chunk_rel->joininfo for compressed_rel
* this is necessary for create_index_path which gets join clauses from
* rel->joininfo and sets up paramaterized paths (in rel->ppilist).
* ppi_clauses is finally used to add any additional filters on the
* indexpath when creating a plan in create_indexscan_plan.
* Otherwise we miss additional filters that need to be applied after
* the index plan is executed (github issue 1558)
*/
static void
compressed_rel_setup_joininfo(RelOptInfo *compressed_rel, CompressionInfo *info)
{
RelOptInfo *chunk_rel = info->chunk_rel;
ListCell *lc;
List *compress_joininfo = NIL;
foreach (lc, chunk_rel->joininfo)
{
RestrictInfo *compress_ri;
RestrictInfo *ri = (RestrictInfo *) lfirst(lc);
Node *result = chunk_joininfo_mutator((Node *) ri, info);
Assert(IsA(result, RestrictInfo));
compress_ri = (RestrictInfo *) result;
compress_joininfo = lappend(compress_joininfo, compress_ri);
}
compressed_rel->joininfo = compress_joininfo;
}
typedef struct EMCreationContext
{
List *compression_info;
@ -652,8 +775,8 @@ add_segmentby_to_equivalence_class(EquivalenceClass *cur_ec, CompressionInfo *in
if (var->varno != info->chunk_rel->relid)
continue;
/* given that the em is a var of the uncompressed chunk, the relid of the chunk should be
* set on the em */
/* given that the em is a var of the uncompressed chunk, the relid of the chunk should
* be set on the em */
Assert(bms_overlap(cur_em->em_relids, uncompressed_chunk_relids));
context->current_col_info =
@ -752,6 +875,7 @@ static void
decompress_chunk_add_plannerinfo(PlannerInfo *root, CompressionInfo *info, Chunk *chunk,
RelOptInfo *chunk_rel, bool needs_sequence_num)
{
ListCell *lc;
Index compressed_index = root->simple_rel_array_size;
Chunk *compressed_chunk = ts_chunk_get_by_id(chunk->fd.compressed_chunk_id, 0, true);
Oid compressed_relid = compressed_chunk->table_id;
@ -780,12 +904,34 @@ decompress_chunk_add_plannerinfo(PlannerInfo *root, CompressionInfo *info, Chunk
#else
compressed_rel = build_simple_rel(root, compressed_index, NULL);
#endif
/* github issue :1558
* set up top_parent_relids for this rel as the same as the
* original hypertable, otherwise eq classes are not computed correctly
* in generate_join_implied_equalities (called by
* get_baserel_parampathinfo <- create_index_paths)
*/
#if !PG96
Assert(chunk_rel->top_parent_relids != NULL);
compressed_rel->top_parent_relids = bms_copy(chunk_rel->top_parent_relids);
#endif
root->simple_rel_array[compressed_index] = compressed_rel;
info->compressed_rel = compressed_rel;
foreach (lc, info->hypertable_compression_info)
{
FormData_hypertable_compression *fd = lfirst(lc);
if (fd->segmentby_column_index <= 0)
{
/* store attnos for the compressed chunk here */
AttrNumber compressed_chunk_attno =
get_attnum(info->compressed_rte->relid, NameStr(fd->attname));
info->compressed_chunk_compressed_attnos =
bms_add_member(info->compressed_chunk_compressed_attnos, compressed_chunk_attno);
}
}
compressed_rel_setup_reltarget(compressed_rel, info, needs_sequence_num);
compressed_rel_setup_equivalence_classes(root, info);
/* translate chunk_rel->joininfo for compressed_rel */
compressed_rel_setup_joininfo(compressed_rel, info);
}
static DecompressChunkPath *

View File

@ -32,6 +32,8 @@ typedef struct CompressionInfo
Bitmapset *chunk_segmentby_attnos;
/* chunk attribute numbers that have equality constraint in baserestrictinfo */
Bitmapset *chunk_segmentby_ri;
/* compressed chunk attribute numbers for columns that are compressed */
Bitmapset *compressed_chunk_compressed_attnos;
} CompressionInfo;

View File

@ -286,6 +286,33 @@ replace_compressed_vars(Node *node, CompressionInfo *info)
return expression_tree_mutator(node, replace_compressed_vars, (void *) info);
}
typedef struct CompressedAttnoContext
{
Bitmapset *compressed_attnos;
Index compress_relid;
} CompressedAttnoContext;
/* check if the clause refers to any attributes that are in compressed
* form.
*/
static bool
clause_has_compressed_attrs(Node *node, void *context)
{
if (node == NULL)
return true;
if (IsA(node, Var))
{
CompressedAttnoContext *cxt = (CompressedAttnoContext *) context;
Var *var = (Var *) node;
if (var->varno == cxt->compress_relid)
{
if (bms_is_member(var->varattno, cxt->compressed_attnos))
return true;
}
}
return expression_tree_walker(node, clause_has_compressed_attrs, context);
}
Plan *
decompress_chunk_plan_create(PlannerInfo *root, RelOptInfo *rel, CustomPath *path, List *tlist,
List *clauses, List *custom_plans)
@ -313,6 +340,8 @@ decompress_chunk_plan_create(PlannerInfo *root, RelOptInfo *rel, CustomPath *pat
/* from create_indexscan_plan() */
IndexPath *ipath = castNode(IndexPath, compressed_path);
ListCell *lc;
List *indexqual = NIL;
Plan *indexplan;
foreach (lc, clauses)
{
RestrictInfo *rinfo = lfirst_node(RestrictInfo, lc);
@ -320,6 +349,29 @@ decompress_chunk_plan_create(PlannerInfo *root, RelOptInfo *rel, CustomPath *pat
continue; /* dup or derived from same EquivalenceClass */
cscan->scan.plan.qual = lappend(cscan->scan.plan.qual, rinfo->clause);
}
/* joininfo clauses on the compressed chunk rel have to
* contain clauses on both compressed and
* decompressed attnos. joininfo clauses get translated into
* ParamPathInfo for the indexpath. But the index scans can't
* handle compressed attributes, so remove them from the
* indexscans here. (these are included in the `clauses` passed in
* to the function and so were added as filters
* for cscan->scan.plan.qual in the loop above. )
*/
indexplan = linitial(custom_plans);
Assert(IsA(indexplan, IndexScan) || IsA(indexplan, IndexOnlyScan));
foreach (lc, indexplan->qual)
{
Node *expr = (Node *) lfirst(lc);
CompressedAttnoContext cxt;
Index compress_relid = dcpath->info->compressed_rel->relid;
cxt.compress_relid = compress_relid;
cxt.compressed_attnos = dcpath->info->compressed_chunk_compressed_attnos;
if (!clause_has_compressed_attrs((Node *) expr, &cxt))
indexqual = lappend(indexqual, expr);
}
indexplan->qual = indexqual;
}
else if (IsA(compressed_path, BitmapHeapPath))
{

View File

@ -47,10 +47,10 @@ ANALYZE metrics_space;
\set PREFIX ''
\set PREFIX_VERBOSE ''
\set ECHO none
psql:include/transparent_decompression_query.sql:284: INFO: new materialization range for public.metrics (time column time) (948067200000000)
psql:include/transparent_decompression_query.sql:284: INFO: materializing continuous aggregate public.cagg_test: nothing to invalidate, new range up to 948067200000000
psql:include/transparent_decompression_query.sql:284: INFO: new materialization range for public.metrics_space (time column time) (948067200000000)
psql:include/transparent_decompression_query.sql:284: INFO: materializing continuous aggregate public.cagg_test: nothing to invalidate, new range up to 948067200000000
psql:include/transparent_decompression_query.sql:286: INFO: new materialization range for public.metrics (time column time) (948067200000000)
psql:include/transparent_decompression_query.sql:286: INFO: materializing continuous aggregate public.cagg_test: nothing to invalidate, new range up to 948067200000000
psql:include/transparent_decompression_query.sql:286: INFO: new materialization range for public.metrics_space (time column time) (948067200000000)
psql:include/transparent_decompression_query.sql:286: INFO: materializing continuous aggregate public.cagg_test: nothing to invalidate, new range up to 948067200000000
-- compress first and last chunk on the hypertable
ALTER TABLE metrics SET (timescaledb.compress, timescaledb.compress_orderby='v0, v1 desc, time', timescaledb.compress_segmentby='device_id,device_id_peer');
NOTICE: adding index _compressed_hypertable_5_device_id__ts_meta_sequence_num_idx ON _timescaledb_internal._compressed_hypertable_5 USING BTREE(device_id, _ts_meta_sequence_num)
@ -129,10 +129,10 @@ ANALYZE metrics_space;
\set PREFIX ''
\set PREFIX_VERBOSE ''
\set ECHO none
psql:include/transparent_decompression_query.sql:284: INFO: new materialization range for public.metrics (time column time) (948067200000000)
psql:include/transparent_decompression_query.sql:284: INFO: materializing continuous aggregate public.cagg_test: nothing to invalidate, new range up to 948067200000000
psql:include/transparent_decompression_query.sql:284: INFO: new materialization range for public.metrics_space (time column time) (948067200000000)
psql:include/transparent_decompression_query.sql:284: INFO: materializing continuous aggregate public.cagg_test: nothing to invalidate, new range up to 948067200000000
psql:include/transparent_decompression_query.sql:286: INFO: new materialization range for public.metrics (time column time) (948067200000000)
psql:include/transparent_decompression_query.sql:286: INFO: materializing continuous aggregate public.cagg_test: nothing to invalidate, new range up to 948067200000000
psql:include/transparent_decompression_query.sql:286: INFO: new materialization range for public.metrics_space (time column time) (948067200000000)
psql:include/transparent_decompression_query.sql:286: INFO: materializing continuous aggregate public.cagg_test: nothing to invalidate, new range up to 948067200000000
-- look at postgres version to decide whether we run with analyze or without
SELECT
CASE WHEN current_setting('server_version_num')::int >= 100000
@ -2318,8 +2318,8 @@ SET client_min_messages TO error;
CREATE VIEW cagg_test WITH (timescaledb.continuous) AS SELECT time_bucket('1d',time) AS time, device_id, avg(v1) FROM :TEST_TABLE WHERE device_id=1 GROUP BY 1,2;
SET timescaledb.current_timestamp_mock = 'Wed Jan 19 15:55:00 2000 PST';
REFRESH MATERIALIZED VIEW cagg_test;
psql:include/transparent_decompression_query.sql:284: INFO: new materialization range for public.metrics (time column time) (948067200000000)
psql:include/transparent_decompression_query.sql:284: INFO: materializing continuous aggregate public.cagg_test: nothing to invalidate, new range up to 948067200000000
psql:include/transparent_decompression_query.sql:286: INFO: new materialization range for public.metrics (time column time) (948067200000000)
psql:include/transparent_decompression_query.sql:286: INFO: materializing continuous aggregate public.cagg_test: nothing to invalidate, new range up to 948067200000000
SELECT time FROM cagg_test ORDER BY time LIMIT 1;
time
------------------------------
@ -2328,6 +2328,54 @@ SELECT time FROM cagg_test ORDER BY time LIMIT 1;
DROP VIEW cagg_test CASCADE;
RESET client_min_messages;
--github issue 1558. nested loop with index scan needed
--disables parallel scan
set enable_seqscan = false;
set enable_bitmapscan = false;
set max_parallel_workers_per_gather = 0;
set enable_hashjoin = false;
set enable_mergejoin = false;
:PREFIX select * from metrics, metrics_space where metrics.time > metrics_space.time and metrics.device_id = metrics_space.device_id and metrics.time < metrics_space.time;
QUERY PLAN
--------------------------------------------------------------------------------------------------------------------------------------------------
Nested Loop (actual rows=0 loops=1)
-> Append (actual rows=27360 loops=1)
-> Custom Scan (DecompressChunk) on _hyper_2_4_chunk (actual rows=1440 loops=1)
-> Index Scan using compress_hyper_6_17_chunk_c_space_index_2 on compress_hyper_6_17_chunk (actual rows=2 loops=1)
-> Custom Scan (DecompressChunk) on _hyper_2_5_chunk (actual rows=4320 loops=1)
-> Index Scan using compress_hyper_6_18_chunk_c_space_index_2 on compress_hyper_6_18_chunk (actual rows=6 loops=1)
-> Custom Scan (DecompressChunk) on _hyper_2_6_chunk (actual rows=1440 loops=1)
-> Index Scan using compress_hyper_6_19_chunk_c_space_index_2 on compress_hyper_6_19_chunk (actual rows=2 loops=1)
-> Index Scan Backward using _hyper_2_7_chunk_device_id_device_id_peer_v0_v1_time_idx2 on _hyper_2_7_chunk (actual rows=2016 loops=1)
-> Index Scan Backward using _hyper_2_8_chunk_device_id_device_id_peer_v0_v1_time_idx2 on _hyper_2_8_chunk (actual rows=6048 loops=1)
-> Index Scan Backward using _hyper_2_9_chunk_device_id_device_id_peer_v0_v1_time_idx2 on _hyper_2_9_chunk (actual rows=2016 loops=1)
-> Custom Scan (DecompressChunk) on _hyper_2_10_chunk (actual rows=2016 loops=1)
-> Index Scan using compress_hyper_6_20_chunk_c_space_index_2 on compress_hyper_6_20_chunk (actual rows=3 loops=1)
-> Custom Scan (DecompressChunk) on _hyper_2_11_chunk (actual rows=6048 loops=1)
-> Index Scan using compress_hyper_6_21_chunk_c_space_index_2 on compress_hyper_6_21_chunk (actual rows=9 loops=1)
-> Index Scan Backward using _hyper_2_12_chunk_device_id_device_id_peer_v0_v1_time_idx2 on _hyper_2_12_chunk (actual rows=2016 loops=1)
-> Append (actual rows=0 loops=27360)
-> Custom Scan (DecompressChunk) on _hyper_1_1_chunk (actual rows=0 loops=27360)
Filter: (("time" > _hyper_2_4_chunk."time") AND ("time" < _hyper_2_4_chunk."time"))
Rows Removed by Filter: 1440
-> Index Scan using compress_hyper_5_15_chunk_c_index_2 on compress_hyper_5_15_chunk (actual rows=2 loops=27360)
Index Cond: (device_id = _hyper_2_4_chunk.device_id)
-> Index Scan using _hyper_1_2_chunk_metrics_time_idx on _hyper_1_2_chunk (actual rows=0 loops=27360)
Index Cond: (("time" > _hyper_2_4_chunk."time") AND ("time" < _hyper_2_4_chunk."time"))
Filter: (_hyper_2_4_chunk.device_id = device_id)
-> Custom Scan (DecompressChunk) on _hyper_1_3_chunk (actual rows=0 loops=27360)
Filter: (("time" > _hyper_2_4_chunk."time") AND ("time" < _hyper_2_4_chunk."time"))
Rows Removed by Filter: 2016
-> Index Scan using compress_hyper_5_16_chunk_c_index_2 on compress_hyper_5_16_chunk (actual rows=3 loops=27360)
Index Cond: (device_id = _hyper_2_4_chunk.device_id)
(30 rows)
set enable_seqscan = true;
set enable_bitmapscan = true;
set max_parallel_workers_per_gather = 0;
set enable_hashjoin = true;
set enable_mergejoin = true;
---end github issue 1558
\set TEST_TABLE 'metrics_space'
\ir :TEST_QUERY_NAME
-- This file and its contents are licensed under the Timescale License.
@ -5618,8 +5666,8 @@ SET client_min_messages TO error;
CREATE VIEW cagg_test WITH (timescaledb.continuous) AS SELECT time_bucket('1d',time) AS time, device_id, avg(v1) FROM :TEST_TABLE WHERE device_id=1 GROUP BY 1,2;
SET timescaledb.current_timestamp_mock = 'Wed Jan 19 15:55:00 2000 PST';
REFRESH MATERIALIZED VIEW cagg_test;
psql:include/transparent_decompression_query.sql:284: INFO: new materialization range for public.metrics_space (time column time) (948067200000000)
psql:include/transparent_decompression_query.sql:284: INFO: materializing continuous aggregate public.cagg_test: nothing to invalidate, new range up to 948067200000000
psql:include/transparent_decompression_query.sql:286: INFO: new materialization range for public.metrics_space (time column time) (948067200000000)
psql:include/transparent_decompression_query.sql:286: INFO: materializing continuous aggregate public.cagg_test: nothing to invalidate, new range up to 948067200000000
SELECT time FROM cagg_test ORDER BY time LIMIT 1;
time
------------------------------
@ -5628,6 +5676,54 @@ SELECT time FROM cagg_test ORDER BY time LIMIT 1;
DROP VIEW cagg_test CASCADE;
RESET client_min_messages;
--github issue 1558. nested loop with index scan needed
--disables parallel scan
set enable_seqscan = false;
set enable_bitmapscan = false;
set max_parallel_workers_per_gather = 0;
set enable_hashjoin = false;
set enable_mergejoin = false;
:PREFIX select * from metrics, metrics_space where metrics.time > metrics_space.time and metrics.device_id = metrics_space.device_id and metrics.time < metrics_space.time;
QUERY PLAN
--------------------------------------------------------------------------------------------------------------------------------------------------
Nested Loop (actual rows=0 loops=1)
-> Append (actual rows=27360 loops=1)
-> Custom Scan (DecompressChunk) on _hyper_2_4_chunk (actual rows=1440 loops=1)
-> Index Scan using compress_hyper_6_17_chunk_c_space_index_2 on compress_hyper_6_17_chunk (actual rows=2 loops=1)
-> Custom Scan (DecompressChunk) on _hyper_2_5_chunk (actual rows=4320 loops=1)
-> Index Scan using compress_hyper_6_18_chunk_c_space_index_2 on compress_hyper_6_18_chunk (actual rows=6 loops=1)
-> Custom Scan (DecompressChunk) on _hyper_2_6_chunk (actual rows=1440 loops=1)
-> Index Scan using compress_hyper_6_19_chunk_c_space_index_2 on compress_hyper_6_19_chunk (actual rows=2 loops=1)
-> Index Scan Backward using _hyper_2_7_chunk_device_id_device_id_peer_v0_v1_time_idx2 on _hyper_2_7_chunk (actual rows=2016 loops=1)
-> Index Scan Backward using _hyper_2_8_chunk_device_id_device_id_peer_v0_v1_time_idx2 on _hyper_2_8_chunk (actual rows=6048 loops=1)
-> Index Scan Backward using _hyper_2_9_chunk_device_id_device_id_peer_v0_v1_time_idx2 on _hyper_2_9_chunk (actual rows=2016 loops=1)
-> Custom Scan (DecompressChunk) on _hyper_2_10_chunk (actual rows=2016 loops=1)
-> Index Scan using compress_hyper_6_20_chunk_c_space_index_2 on compress_hyper_6_20_chunk (actual rows=3 loops=1)
-> Custom Scan (DecompressChunk) on _hyper_2_11_chunk (actual rows=6048 loops=1)
-> Index Scan using compress_hyper_6_21_chunk_c_space_index_2 on compress_hyper_6_21_chunk (actual rows=9 loops=1)
-> Index Scan Backward using _hyper_2_12_chunk_device_id_device_id_peer_v0_v1_time_idx2 on _hyper_2_12_chunk (actual rows=2016 loops=1)
-> Append (actual rows=0 loops=27360)
-> Custom Scan (DecompressChunk) on _hyper_1_1_chunk (actual rows=0 loops=27360)
Filter: (("time" > _hyper_2_4_chunk."time") AND ("time" < _hyper_2_4_chunk."time"))
Rows Removed by Filter: 1440
-> Index Scan using compress_hyper_5_15_chunk_c_index_2 on compress_hyper_5_15_chunk (actual rows=2 loops=27360)
Index Cond: (device_id = _hyper_2_4_chunk.device_id)
-> Index Scan using _hyper_1_2_chunk_metrics_time_idx on _hyper_1_2_chunk (actual rows=0 loops=27360)
Index Cond: (("time" > _hyper_2_4_chunk."time") AND ("time" < _hyper_2_4_chunk."time"))
Filter: (_hyper_2_4_chunk.device_id = device_id)
-> Custom Scan (DecompressChunk) on _hyper_1_3_chunk (actual rows=0 loops=27360)
Filter: (("time" > _hyper_2_4_chunk."time") AND ("time" < _hyper_2_4_chunk."time"))
Rows Removed by Filter: 2016
-> Index Scan using compress_hyper_5_16_chunk_c_index_2 on compress_hyper_5_16_chunk (actual rows=3 loops=27360)
Index Cond: (device_id = _hyper_2_4_chunk.device_id)
(30 rows)
set enable_seqscan = true;
set enable_bitmapscan = true;
set max_parallel_workers_per_gather = 0;
set enable_hashjoin = true;
set enable_mergejoin = true;
---end github issue 1558
\ir include/transparent_decompression_ordered.sql
-- This file and its contents are licensed under the Timescale License.
-- Please see the included NOTICE for copyright information and
@ -7435,6 +7531,11 @@ CREATE TABLE device_tbl(device_id int, descr text);
INSERT into device_tbl select generate_series(1, 6,1), 'devicex';
INSERT into device_tbl select 8, 'device8';
analyze device_tbl;
-- table for joins ---
create table nodetime( node int,
start_time timestamp ,
stop_time timestamp );
insert into nodetime values( 4, '2018-01-06 00:00'::timestamp, '2018-12-02 12:00'::timestamp);
-- run queries on uncompressed hypertable and store result
\set PREFIX ''
\set PREFIX_VERBOSE ''
@ -7458,7 +7559,7 @@ ORDER BY c.id;
-- run queries on compressed hypertable and store result
\set PREFIX ''
\set PREFIX_VERBOSE ''
\set ECHO none
\set ECHO none
-- diff compressed and uncompressed results
:DIFF_CMD_IDX
-- look at postgres version to decide whether we run with analyze or without
@ -7719,6 +7820,144 @@ FROM metrics_ordered_idx d INNER JOIN LATERAL (SELECT * FROM metrics_ordered_idx
Filter: (device_id = d.device_id)
(39 rows)
--github issue 1558
set enable_seqscan = false;
set enable_bitmapscan = false;
set max_parallel_workers_per_gather = 0;
set enable_hashjoin = false;
set enable_mergejoin = false;
:PREFIX select device_id, count(*) from
(select * from metrics_ordered_idx mt, nodetime nd
where mt.time > nd.start_time and mt.device_id = nd.node and mt.time < nd.stop_time) as subq group by device_id;
QUERY PLAN
------------------------------------------------------------------------------------------------------------------------------------------------------------------
GroupAggregate (actual rows=1 loops=1)
Group Key: mt.device_id
-> Nested Loop (actual rows=4291 loops=1)
Join Filter: ((mt."time" > nd.start_time) AND (mt."time" < nd.stop_time) AND (mt.device_id = nd.node))
Rows Removed by Join Filter: 8616
-> Merge Append (actual rows=12907 loops=1)
Sort Key: mt.device_id
-> Custom Scan (DecompressChunk) on _hyper_15_1438_chunk mt (actual rows=1440 loops=1)
-> Index Scan using compress_hyper_16_1443_chunk__compressed_hypertable_16_device_i on compress_hyper_16_1443_chunk (actual rows=5 loops=1)
-> Custom Scan (DecompressChunk) on _hyper_15_1439_chunk mt_1 (actual rows=2880 loops=1)
-> Index Scan using compress_hyper_16_1444_chunk__compressed_hypertable_16_device_i on compress_hyper_16_1444_chunk (actual rows=5 loops=1)
-> Custom Scan (DecompressChunk) on _hyper_15_1440_chunk mt_2 (actual rows=4291 loops=1)
-> Index Scan using compress_hyper_16_1445_chunk__compressed_hypertable_16_device_i on compress_hyper_16_1445_chunk (actual rows=5 loops=1)
-> Custom Scan (DecompressChunk) on _hyper_15_1441_chunk mt_3 (actual rows=4291 loops=1)
-> Index Scan using compress_hyper_16_1446_chunk__compressed_hypertable_16_device_i on compress_hyper_16_1446_chunk (actual rows=5 loops=1)
-> Custom Scan (DecompressChunk) on _hyper_15_1442_chunk mt_4 (actual rows=5 loops=1)
-> Index Scan using compress_hyper_16_1447_chunk__compressed_hypertable_16_device_i on compress_hyper_16_1447_chunk (actual rows=5 loops=1)
-> Materialize (actual rows=1 loops=12907)
-> Seq Scan on nodetime nd (actual rows=1 loops=1)
(19 rows)
:PREFIX select nd.node, mt.* from metrics_ordered_idx mt, nodetime nd
where mt.time > nd.start_time and mt.device_id = nd.node and mt.time < nd.stop_time order by time;
QUERY PLAN
------------------------------------------------------------------------------------------------------------------------------------------------------------------
Sort (actual rows=4291 loops=1)
Sort Key: mt."time"
Sort Method: quicksort
-> Nested Loop (actual rows=4291 loops=1)
-> Seq Scan on nodetime nd (actual rows=1 loops=1)
-> Append (actual rows=4291 loops=1)
-> Custom Scan (DecompressChunk) on _hyper_15_1438_chunk mt (actual rows=0 loops=1)
Filter: (("time" > nd.start_time) AND ("time" < nd.stop_time))
Rows Removed by Filter: 288
-> Index Scan using compress_hyper_16_1443_chunk__compressed_hypertable_16_device_i on compress_hyper_16_1443_chunk (actual rows=1 loops=1)
Index Cond: (device_id = nd.node)
-> Custom Scan (DecompressChunk) on _hyper_15_1439_chunk mt_1 (actual rows=0 loops=1)
Filter: (("time" > nd.start_time) AND ("time" < nd.stop_time))
Rows Removed by Filter: 576
-> Index Scan using compress_hyper_16_1444_chunk__compressed_hypertable_16_device_i on compress_hyper_16_1444_chunk (actual rows=1 loops=1)
Index Cond: (device_id = nd.node)
-> Custom Scan (DecompressChunk) on _hyper_15_1440_chunk mt_2 (actual rows=0 loops=1)
Filter: (("time" > nd.start_time) AND ("time" < nd.stop_time))
-> Index Scan using compress_hyper_16_1445_chunk__compressed_hypertable_16_device_i on compress_hyper_16_1445_chunk (actual rows=0 loops=1)
Index Cond: (device_id = nd.node)
-> Custom Scan (DecompressChunk) on _hyper_15_1441_chunk mt_3 (actual rows=4291 loops=1)
Filter: (("time" > nd.start_time) AND ("time" < nd.stop_time))
-> Index Scan using compress_hyper_16_1446_chunk__compressed_hypertable_16_device_i on compress_hyper_16_1446_chunk (actual rows=5 loops=1)
Index Cond: (device_id = nd.node)
-> Custom Scan (DecompressChunk) on _hyper_15_1442_chunk mt_4 (actual rows=0 loops=1)
Filter: (("time" > nd.start_time) AND ("time" < nd.stop_time))
Rows Removed by Filter: 1
-> Index Scan using compress_hyper_16_1447_chunk__compressed_hypertable_16_device_i on compress_hyper_16_1447_chunk (actual rows=1 loops=1)
Index Cond: (device_id = nd.node)
(29 rows)
set enable_seqscan = true;
set enable_bitmapscan = true;
set enable_seqscan = true;
set enable_bitmapscan = true;
set max_parallel_workers_per_gather = 0;
set enable_mergejoin = true;
set enable_hashjoin = false;
:PREFIX select nd.node, mt.* from metrics_ordered_idx mt, nodetime nd
where mt.time > nd.start_time and mt.device_id = nd.node and mt.time < nd.stop_time order by time;
QUERY PLAN
---------------------------------------------------------------------------------------------------------------
Sort (actual rows=4291 loops=1)
Sort Key: mt."time"
Sort Method: quicksort
-> Merge Join (actual rows=4291 loops=1)
Merge Cond: (nd.node = mt.device_id)
Join Filter: ((mt."time" > nd.start_time) AND (mt."time" < nd.stop_time))
Rows Removed by Join Filter: 865
-> Sort (actual rows=1 loops=1)
Sort Key: nd.node
Sort Method: quicksort
-> Seq Scan on nodetime nd (actual rows=1 loops=1)
-> Sort (actual rows=12040 loops=1)
Sort Key: mt.device_id
Sort Method: quicksort
-> Append (actual rows=12907 loops=1)
-> Custom Scan (DecompressChunk) on _hyper_15_1438_chunk mt (actual rows=1440 loops=1)
-> Seq Scan on compress_hyper_16_1443_chunk (actual rows=5 loops=1)
-> Custom Scan (DecompressChunk) on _hyper_15_1439_chunk mt_1 (actual rows=2880 loops=1)
-> Seq Scan on compress_hyper_16_1444_chunk (actual rows=5 loops=1)
-> Custom Scan (DecompressChunk) on _hyper_15_1440_chunk mt_2 (actual rows=4291 loops=1)
-> Seq Scan on compress_hyper_16_1445_chunk (actual rows=5 loops=1)
-> Custom Scan (DecompressChunk) on _hyper_15_1441_chunk mt_3 (actual rows=4291 loops=1)
-> Seq Scan on compress_hyper_16_1446_chunk (actual rows=5 loops=1)
-> Custom Scan (DecompressChunk) on _hyper_15_1442_chunk mt_4 (actual rows=5 loops=1)
-> Seq Scan on compress_hyper_16_1447_chunk (actual rows=5 loops=1)
(25 rows)
set enable_mergejoin = false;
set enable_hashjoin = true;
:PREFIX select nd.node, mt.* from metrics_ordered_idx mt, nodetime nd
where mt.time > nd.start_time and mt.device_id = nd.node and mt.time < nd.stop_time order by time;
QUERY PLAN
---------------------------------------------------------------------------------------------------------
Sort (actual rows=4291 loops=1)
Sort Key: mt."time"
Sort Method: quicksort
-> Hash Join (actual rows=4291 loops=1)
Hash Cond: (mt.device_id = nd.node)
Join Filter: ((mt."time" > nd.start_time) AND (mt."time" < nd.stop_time))
Rows Removed by Join Filter: 865
-> Append (actual rows=12907 loops=1)
-> Custom Scan (DecompressChunk) on _hyper_15_1438_chunk mt (actual rows=1440 loops=1)
-> Seq Scan on compress_hyper_16_1443_chunk (actual rows=5 loops=1)
-> Custom Scan (DecompressChunk) on _hyper_15_1439_chunk mt_1 (actual rows=2880 loops=1)
-> Seq Scan on compress_hyper_16_1444_chunk (actual rows=5 loops=1)
-> Custom Scan (DecompressChunk) on _hyper_15_1440_chunk mt_2 (actual rows=4291 loops=1)
-> Seq Scan on compress_hyper_16_1445_chunk (actual rows=5 loops=1)
-> Custom Scan (DecompressChunk) on _hyper_15_1441_chunk mt_3 (actual rows=4291 loops=1)
-> Seq Scan on compress_hyper_16_1446_chunk (actual rows=5 loops=1)
-> Custom Scan (DecompressChunk) on _hyper_15_1442_chunk mt_4 (actual rows=5 loops=1)
-> Seq Scan on compress_hyper_16_1447_chunk (actual rows=5 loops=1)
-> Hash (actual rows=1 loops=1)
Buckets: 2048 Batches: 1
-> Seq Scan on nodetime nd (actual rows=1 loops=1)
(21 rows)
--enable all joins after the tests
set enable_mergejoin = true;
set enable_hashjoin = true;
--end github issue 1558
set enable_seqscan = true;
\ir include/transparent_decompression_constraintaware.sql
-- This file and its contents are licensed under the Timescale License.

View File

@ -47,10 +47,10 @@ ANALYZE metrics_space;
\set PREFIX ''
\set PREFIX_VERBOSE ''
\set ECHO none
psql:include/transparent_decompression_query.sql:284: INFO: new materialization range for public.metrics (time column time) (948067200000000)
psql:include/transparent_decompression_query.sql:284: INFO: materializing continuous aggregate public.cagg_test: nothing to invalidate, new range up to 948067200000000
psql:include/transparent_decompression_query.sql:284: INFO: new materialization range for public.metrics_space (time column time) (948067200000000)
psql:include/transparent_decompression_query.sql:284: INFO: materializing continuous aggregate public.cagg_test: nothing to invalidate, new range up to 948067200000000
psql:include/transparent_decompression_query.sql:286: INFO: new materialization range for public.metrics (time column time) (948067200000000)
psql:include/transparent_decompression_query.sql:286: INFO: materializing continuous aggregate public.cagg_test: nothing to invalidate, new range up to 948067200000000
psql:include/transparent_decompression_query.sql:286: INFO: new materialization range for public.metrics_space (time column time) (948067200000000)
psql:include/transparent_decompression_query.sql:286: INFO: materializing continuous aggregate public.cagg_test: nothing to invalidate, new range up to 948067200000000
-- compress first and last chunk on the hypertable
ALTER TABLE metrics SET (timescaledb.compress, timescaledb.compress_orderby='v0, v1 desc, time', timescaledb.compress_segmentby='device_id,device_id_peer');
NOTICE: adding index _compressed_hypertable_5_device_id__ts_meta_sequence_num_idx ON _timescaledb_internal._compressed_hypertable_5 USING BTREE(device_id, _ts_meta_sequence_num)
@ -129,10 +129,10 @@ ANALYZE metrics_space;
\set PREFIX ''
\set PREFIX_VERBOSE ''
\set ECHO none
psql:include/transparent_decompression_query.sql:284: INFO: new materialization range for public.metrics (time column time) (948067200000000)
psql:include/transparent_decompression_query.sql:284: INFO: materializing continuous aggregate public.cagg_test: nothing to invalidate, new range up to 948067200000000
psql:include/transparent_decompression_query.sql:284: INFO: new materialization range for public.metrics_space (time column time) (948067200000000)
psql:include/transparent_decompression_query.sql:284: INFO: materializing continuous aggregate public.cagg_test: nothing to invalidate, new range up to 948067200000000
psql:include/transparent_decompression_query.sql:286: INFO: new materialization range for public.metrics (time column time) (948067200000000)
psql:include/transparent_decompression_query.sql:286: INFO: materializing continuous aggregate public.cagg_test: nothing to invalidate, new range up to 948067200000000
psql:include/transparent_decompression_query.sql:286: INFO: new materialization range for public.metrics_space (time column time) (948067200000000)
psql:include/transparent_decompression_query.sql:286: INFO: materializing continuous aggregate public.cagg_test: nothing to invalidate, new range up to 948067200000000
-- look at postgres version to decide whether we run with analyze or without
SELECT
CASE WHEN current_setting('server_version_num')::int >= 100000
@ -2428,8 +2428,8 @@ SET client_min_messages TO error;
CREATE VIEW cagg_test WITH (timescaledb.continuous) AS SELECT time_bucket('1d',time) AS time, device_id, avg(v1) FROM :TEST_TABLE WHERE device_id=1 GROUP BY 1,2;
SET timescaledb.current_timestamp_mock = 'Wed Jan 19 15:55:00 2000 PST';
REFRESH MATERIALIZED VIEW cagg_test;
psql:include/transparent_decompression_query.sql:284: INFO: new materialization range for public.metrics (time column time) (948067200000000)
psql:include/transparent_decompression_query.sql:284: INFO: materializing continuous aggregate public.cagg_test: nothing to invalidate, new range up to 948067200000000
psql:include/transparent_decompression_query.sql:286: INFO: new materialization range for public.metrics (time column time) (948067200000000)
psql:include/transparent_decompression_query.sql:286: INFO: materializing continuous aggregate public.cagg_test: nothing to invalidate, new range up to 948067200000000
SELECT time FROM cagg_test ORDER BY time LIMIT 1;
time
------------------------------
@ -2438,6 +2438,54 @@ SELECT time FROM cagg_test ORDER BY time LIMIT 1;
DROP VIEW cagg_test CASCADE;
RESET client_min_messages;
--github issue 1558. nested loop with index scan needed
--disables parallel scan
set enable_seqscan = false;
set enable_bitmapscan = false;
set max_parallel_workers_per_gather = 0;
set enable_hashjoin = false;
set enable_mergejoin = false;
:PREFIX select * from metrics, metrics_space where metrics.time > metrics_space.time and metrics.device_id = metrics_space.device_id and metrics.time < metrics_space.time;
QUERY PLAN
--------------------------------------------------------------------------------------------------------------------------------------------------
Nested Loop (actual rows=0 loops=1)
-> Append (actual rows=27360 loops=1)
-> Custom Scan (DecompressChunk) on _hyper_2_4_chunk (actual rows=1440 loops=1)
-> Index Scan using compress_hyper_6_17_chunk_c_space_index_2 on compress_hyper_6_17_chunk (actual rows=2 loops=1)
-> Custom Scan (DecompressChunk) on _hyper_2_5_chunk (actual rows=4320 loops=1)
-> Index Scan using compress_hyper_6_18_chunk_c_space_index_2 on compress_hyper_6_18_chunk (actual rows=6 loops=1)
-> Custom Scan (DecompressChunk) on _hyper_2_6_chunk (actual rows=1440 loops=1)
-> Index Scan using compress_hyper_6_19_chunk_c_space_index_2 on compress_hyper_6_19_chunk (actual rows=2 loops=1)
-> Index Scan Backward using _hyper_2_7_chunk_device_id_device_id_peer_v0_v1_time_idx2 on _hyper_2_7_chunk (actual rows=2016 loops=1)
-> Index Scan Backward using _hyper_2_8_chunk_device_id_device_id_peer_v0_v1_time_idx2 on _hyper_2_8_chunk (actual rows=6048 loops=1)
-> Index Scan Backward using _hyper_2_9_chunk_device_id_device_id_peer_v0_v1_time_idx2 on _hyper_2_9_chunk (actual rows=2016 loops=1)
-> Custom Scan (DecompressChunk) on _hyper_2_10_chunk (actual rows=2016 loops=1)
-> Index Scan using compress_hyper_6_20_chunk_c_space_index_2 on compress_hyper_6_20_chunk (actual rows=3 loops=1)
-> Custom Scan (DecompressChunk) on _hyper_2_11_chunk (actual rows=6048 loops=1)
-> Index Scan using compress_hyper_6_21_chunk_c_space_index_2 on compress_hyper_6_21_chunk (actual rows=9 loops=1)
-> Index Scan Backward using _hyper_2_12_chunk_device_id_device_id_peer_v0_v1_time_idx2 on _hyper_2_12_chunk (actual rows=2016 loops=1)
-> Append (actual rows=0 loops=27360)
-> Custom Scan (DecompressChunk) on _hyper_1_1_chunk (actual rows=0 loops=27360)
Filter: (("time" > _hyper_2_4_chunk."time") AND ("time" < _hyper_2_4_chunk."time"))
Rows Removed by Filter: 1440
-> Index Scan using compress_hyper_5_15_chunk_c_index_2 on compress_hyper_5_15_chunk (actual rows=2 loops=27360)
Index Cond: (device_id = _hyper_2_4_chunk.device_id)
-> Index Scan using _hyper_1_2_chunk_metrics_time_idx on _hyper_1_2_chunk (actual rows=0 loops=27360)
Index Cond: (("time" > _hyper_2_4_chunk."time") AND ("time" < _hyper_2_4_chunk."time"))
Filter: (_hyper_2_4_chunk.device_id = device_id)
-> Custom Scan (DecompressChunk) on _hyper_1_3_chunk (actual rows=0 loops=27360)
Filter: (("time" > _hyper_2_4_chunk."time") AND ("time" < _hyper_2_4_chunk."time"))
Rows Removed by Filter: 2016
-> Index Scan using compress_hyper_5_16_chunk_c_index_2 on compress_hyper_5_16_chunk (actual rows=3 loops=27360)
Index Cond: (device_id = _hyper_2_4_chunk.device_id)
(30 rows)
set enable_seqscan = true;
set enable_bitmapscan = true;
set max_parallel_workers_per_gather = 0;
set enable_hashjoin = true;
set enable_mergejoin = true;
---end github issue 1558
\set TEST_TABLE 'metrics_space'
\ir :TEST_QUERY_NAME
-- This file and its contents are licensed under the Timescale License.
@ -5742,8 +5790,8 @@ SET client_min_messages TO error;
CREATE VIEW cagg_test WITH (timescaledb.continuous) AS SELECT time_bucket('1d',time) AS time, device_id, avg(v1) FROM :TEST_TABLE WHERE device_id=1 GROUP BY 1,2;
SET timescaledb.current_timestamp_mock = 'Wed Jan 19 15:55:00 2000 PST';
REFRESH MATERIALIZED VIEW cagg_test;
psql:include/transparent_decompression_query.sql:284: INFO: new materialization range for public.metrics_space (time column time) (948067200000000)
psql:include/transparent_decompression_query.sql:284: INFO: materializing continuous aggregate public.cagg_test: nothing to invalidate, new range up to 948067200000000
psql:include/transparent_decompression_query.sql:286: INFO: new materialization range for public.metrics_space (time column time) (948067200000000)
psql:include/transparent_decompression_query.sql:286: INFO: materializing continuous aggregate public.cagg_test: nothing to invalidate, new range up to 948067200000000
SELECT time FROM cagg_test ORDER BY time LIMIT 1;
time
------------------------------
@ -5752,6 +5800,54 @@ SELECT time FROM cagg_test ORDER BY time LIMIT 1;
DROP VIEW cagg_test CASCADE;
RESET client_min_messages;
--github issue 1558. nested loop with index scan needed
--disables parallel scan
set enable_seqscan = false;
set enable_bitmapscan = false;
set max_parallel_workers_per_gather = 0;
set enable_hashjoin = false;
set enable_mergejoin = false;
:PREFIX select * from metrics, metrics_space where metrics.time > metrics_space.time and metrics.device_id = metrics_space.device_id and metrics.time < metrics_space.time;
QUERY PLAN
--------------------------------------------------------------------------------------------------------------------------------------------------
Nested Loop (actual rows=0 loops=1)
-> Append (actual rows=27360 loops=1)
-> Custom Scan (DecompressChunk) on _hyper_2_4_chunk (actual rows=1440 loops=1)
-> Index Scan using compress_hyper_6_17_chunk_c_space_index_2 on compress_hyper_6_17_chunk (actual rows=2 loops=1)
-> Custom Scan (DecompressChunk) on _hyper_2_5_chunk (actual rows=4320 loops=1)
-> Index Scan using compress_hyper_6_18_chunk_c_space_index_2 on compress_hyper_6_18_chunk (actual rows=6 loops=1)
-> Custom Scan (DecompressChunk) on _hyper_2_6_chunk (actual rows=1440 loops=1)
-> Index Scan using compress_hyper_6_19_chunk_c_space_index_2 on compress_hyper_6_19_chunk (actual rows=2 loops=1)
-> Index Scan Backward using _hyper_2_7_chunk_device_id_device_id_peer_v0_v1_time_idx2 on _hyper_2_7_chunk (actual rows=2016 loops=1)
-> Index Scan Backward using _hyper_2_8_chunk_device_id_device_id_peer_v0_v1_time_idx2 on _hyper_2_8_chunk (actual rows=6048 loops=1)
-> Index Scan Backward using _hyper_2_9_chunk_device_id_device_id_peer_v0_v1_time_idx2 on _hyper_2_9_chunk (actual rows=2016 loops=1)
-> Custom Scan (DecompressChunk) on _hyper_2_10_chunk (actual rows=2016 loops=1)
-> Index Scan using compress_hyper_6_20_chunk_c_space_index_2 on compress_hyper_6_20_chunk (actual rows=3 loops=1)
-> Custom Scan (DecompressChunk) on _hyper_2_11_chunk (actual rows=6048 loops=1)
-> Index Scan using compress_hyper_6_21_chunk_c_space_index_2 on compress_hyper_6_21_chunk (actual rows=9 loops=1)
-> Index Scan Backward using _hyper_2_12_chunk_device_id_device_id_peer_v0_v1_time_idx2 on _hyper_2_12_chunk (actual rows=2016 loops=1)
-> Append (actual rows=0 loops=27360)
-> Custom Scan (DecompressChunk) on _hyper_1_1_chunk (actual rows=0 loops=27360)
Filter: (("time" > _hyper_2_4_chunk."time") AND ("time" < _hyper_2_4_chunk."time"))
Rows Removed by Filter: 1440
-> Index Scan using compress_hyper_5_15_chunk_c_index_2 on compress_hyper_5_15_chunk (actual rows=2 loops=27360)
Index Cond: (device_id = _hyper_2_4_chunk.device_id)
-> Index Scan using _hyper_1_2_chunk_metrics_time_idx on _hyper_1_2_chunk (actual rows=0 loops=27360)
Index Cond: (("time" > _hyper_2_4_chunk."time") AND ("time" < _hyper_2_4_chunk."time"))
Filter: (_hyper_2_4_chunk.device_id = device_id)
-> Custom Scan (DecompressChunk) on _hyper_1_3_chunk (actual rows=0 loops=27360)
Filter: (("time" > _hyper_2_4_chunk."time") AND ("time" < _hyper_2_4_chunk."time"))
Rows Removed by Filter: 2016
-> Index Scan using compress_hyper_5_16_chunk_c_index_2 on compress_hyper_5_16_chunk (actual rows=3 loops=27360)
Index Cond: (device_id = _hyper_2_4_chunk.device_id)
(30 rows)
set enable_seqscan = true;
set enable_bitmapscan = true;
set max_parallel_workers_per_gather = 0;
set enable_hashjoin = true;
set enable_mergejoin = true;
---end github issue 1558
\ir include/transparent_decompression_ordered.sql
-- This file and its contents are licensed under the Timescale License.
-- Please see the included NOTICE for copyright information and
@ -7561,6 +7657,11 @@ CREATE TABLE device_tbl(device_id int, descr text);
INSERT into device_tbl select generate_series(1, 6,1), 'devicex';
INSERT into device_tbl select 8, 'device8';
analyze device_tbl;
-- table for joins ---
create table nodetime( node int,
start_time timestamp ,
stop_time timestamp );
insert into nodetime values( 4, '2018-01-06 00:00'::timestamp, '2018-12-02 12:00'::timestamp);
-- run queries on uncompressed hypertable and store result
\set PREFIX ''
\set PREFIX_VERBOSE ''
@ -7584,7 +7685,7 @@ ORDER BY c.id;
-- run queries on compressed hypertable and store result
\set PREFIX ''
\set PREFIX_VERBOSE ''
\set ECHO none
\set ECHO none
-- diff compressed and uncompressed results
:DIFF_CMD_IDX
-- look at postgres version to decide whether we run with analyze or without
@ -7845,6 +7946,144 @@ FROM metrics_ordered_idx d INNER JOIN LATERAL (SELECT * FROM metrics_ordered_idx
Filter: (device_id = d.device_id)
(39 rows)
--github issue 1558
set enable_seqscan = false;
set enable_bitmapscan = false;
set max_parallel_workers_per_gather = 0;
set enable_hashjoin = false;
set enable_mergejoin = false;
:PREFIX select device_id, count(*) from
(select * from metrics_ordered_idx mt, nodetime nd
where mt.time > nd.start_time and mt.device_id = nd.node and mt.time < nd.stop_time) as subq group by device_id;
QUERY PLAN
------------------------------------------------------------------------------------------------------------------------------------------------------------------
GroupAggregate (actual rows=1 loops=1)
Group Key: mt.device_id
-> Nested Loop (actual rows=4291 loops=1)
Join Filter: ((mt."time" > nd.start_time) AND (mt."time" < nd.stop_time) AND (mt.device_id = nd.node))
Rows Removed by Join Filter: 8616
-> Merge Append (actual rows=12907 loops=1)
Sort Key: mt.device_id
-> Custom Scan (DecompressChunk) on _hyper_15_1438_chunk mt (actual rows=1440 loops=1)
-> Index Scan using compress_hyper_16_1443_chunk__compressed_hypertable_16_device_i on compress_hyper_16_1443_chunk (actual rows=5 loops=1)
-> Custom Scan (DecompressChunk) on _hyper_15_1439_chunk mt_1 (actual rows=2880 loops=1)
-> Index Scan using compress_hyper_16_1444_chunk__compressed_hypertable_16_device_i on compress_hyper_16_1444_chunk (actual rows=5 loops=1)
-> Custom Scan (DecompressChunk) on _hyper_15_1440_chunk mt_2 (actual rows=4291 loops=1)
-> Index Scan using compress_hyper_16_1445_chunk__compressed_hypertable_16_device_i on compress_hyper_16_1445_chunk (actual rows=5 loops=1)
-> Custom Scan (DecompressChunk) on _hyper_15_1441_chunk mt_3 (actual rows=4291 loops=1)
-> Index Scan using compress_hyper_16_1446_chunk__compressed_hypertable_16_device_i on compress_hyper_16_1446_chunk (actual rows=5 loops=1)
-> Custom Scan (DecompressChunk) on _hyper_15_1442_chunk mt_4 (actual rows=5 loops=1)
-> Index Scan using compress_hyper_16_1447_chunk__compressed_hypertable_16_device_i on compress_hyper_16_1447_chunk (actual rows=5 loops=1)
-> Materialize (actual rows=1 loops=12907)
-> Seq Scan on nodetime nd (actual rows=1 loops=1)
(19 rows)
:PREFIX select nd.node, mt.* from metrics_ordered_idx mt, nodetime nd
where mt.time > nd.start_time and mt.device_id = nd.node and mt.time < nd.stop_time order by time;
QUERY PLAN
------------------------------------------------------------------------------------------------------------------------------------------------------------------
Sort (actual rows=4291 loops=1)
Sort Key: mt."time"
Sort Method: quicksort
-> Nested Loop (actual rows=4291 loops=1)
-> Seq Scan on nodetime nd (actual rows=1 loops=1)
-> Append (actual rows=4291 loops=1)
-> Custom Scan (DecompressChunk) on _hyper_15_1438_chunk mt (actual rows=0 loops=1)
Filter: (("time" > nd.start_time) AND ("time" < nd.stop_time))
Rows Removed by Filter: 288
-> Index Scan using compress_hyper_16_1443_chunk__compressed_hypertable_16_device_i on compress_hyper_16_1443_chunk (actual rows=1 loops=1)
Index Cond: (device_id = nd.node)
-> Custom Scan (DecompressChunk) on _hyper_15_1439_chunk mt_1 (actual rows=0 loops=1)
Filter: (("time" > nd.start_time) AND ("time" < nd.stop_time))
Rows Removed by Filter: 576
-> Index Scan using compress_hyper_16_1444_chunk__compressed_hypertable_16_device_i on compress_hyper_16_1444_chunk (actual rows=1 loops=1)
Index Cond: (device_id = nd.node)
-> Custom Scan (DecompressChunk) on _hyper_15_1440_chunk mt_2 (actual rows=0 loops=1)
Filter: (("time" > nd.start_time) AND ("time" < nd.stop_time))
-> Index Scan using compress_hyper_16_1445_chunk__compressed_hypertable_16_device_i on compress_hyper_16_1445_chunk (actual rows=0 loops=1)
Index Cond: (device_id = nd.node)
-> Custom Scan (DecompressChunk) on _hyper_15_1441_chunk mt_3 (actual rows=4291 loops=1)
Filter: (("time" > nd.start_time) AND ("time" < nd.stop_time))
-> Index Scan using compress_hyper_16_1446_chunk__compressed_hypertable_16_device_i on compress_hyper_16_1446_chunk (actual rows=5 loops=1)
Index Cond: (device_id = nd.node)
-> Custom Scan (DecompressChunk) on _hyper_15_1442_chunk mt_4 (actual rows=0 loops=1)
Filter: (("time" > nd.start_time) AND ("time" < nd.stop_time))
Rows Removed by Filter: 1
-> Index Scan using compress_hyper_16_1447_chunk__compressed_hypertable_16_device_i on compress_hyper_16_1447_chunk (actual rows=1 loops=1)
Index Cond: (device_id = nd.node)
(29 rows)
set enable_seqscan = true;
set enable_bitmapscan = true;
set enable_seqscan = true;
set enable_bitmapscan = true;
set max_parallel_workers_per_gather = 0;
set enable_mergejoin = true;
set enable_hashjoin = false;
:PREFIX select nd.node, mt.* from metrics_ordered_idx mt, nodetime nd
where mt.time > nd.start_time and mt.device_id = nd.node and mt.time < nd.stop_time order by time;
QUERY PLAN
---------------------------------------------------------------------------------------------------------------
Sort (actual rows=4291 loops=1)
Sort Key: mt."time"
Sort Method: quicksort
-> Merge Join (actual rows=4291 loops=1)
Merge Cond: (nd.node = mt.device_id)
Join Filter: ((mt."time" > nd.start_time) AND (mt."time" < nd.stop_time))
Rows Removed by Join Filter: 865
-> Sort (actual rows=1 loops=1)
Sort Key: nd.node
Sort Method: quicksort
-> Seq Scan on nodetime nd (actual rows=1 loops=1)
-> Sort (actual rows=12040 loops=1)
Sort Key: mt.device_id
Sort Method: quicksort
-> Append (actual rows=12907 loops=1)
-> Custom Scan (DecompressChunk) on _hyper_15_1438_chunk mt (actual rows=1440 loops=1)
-> Seq Scan on compress_hyper_16_1443_chunk (actual rows=5 loops=1)
-> Custom Scan (DecompressChunk) on _hyper_15_1439_chunk mt_1 (actual rows=2880 loops=1)
-> Seq Scan on compress_hyper_16_1444_chunk (actual rows=5 loops=1)
-> Custom Scan (DecompressChunk) on _hyper_15_1440_chunk mt_2 (actual rows=4291 loops=1)
-> Seq Scan on compress_hyper_16_1445_chunk (actual rows=5 loops=1)
-> Custom Scan (DecompressChunk) on _hyper_15_1441_chunk mt_3 (actual rows=4291 loops=1)
-> Seq Scan on compress_hyper_16_1446_chunk (actual rows=5 loops=1)
-> Custom Scan (DecompressChunk) on _hyper_15_1442_chunk mt_4 (actual rows=5 loops=1)
-> Seq Scan on compress_hyper_16_1447_chunk (actual rows=5 loops=1)
(25 rows)
set enable_mergejoin = false;
set enable_hashjoin = true;
:PREFIX select nd.node, mt.* from metrics_ordered_idx mt, nodetime nd
where mt.time > nd.start_time and mt.device_id = nd.node and mt.time < nd.stop_time order by time;
QUERY PLAN
---------------------------------------------------------------------------------------------------------
Sort (actual rows=4291 loops=1)
Sort Key: mt."time"
Sort Method: quicksort
-> Hash Join (actual rows=4291 loops=1)
Hash Cond: (mt.device_id = nd.node)
Join Filter: ((mt."time" > nd.start_time) AND (mt."time" < nd.stop_time))
Rows Removed by Join Filter: 865
-> Append (actual rows=12907 loops=1)
-> Custom Scan (DecompressChunk) on _hyper_15_1438_chunk mt (actual rows=1440 loops=1)
-> Seq Scan on compress_hyper_16_1443_chunk (actual rows=5 loops=1)
-> Custom Scan (DecompressChunk) on _hyper_15_1439_chunk mt_1 (actual rows=2880 loops=1)
-> Seq Scan on compress_hyper_16_1444_chunk (actual rows=5 loops=1)
-> Custom Scan (DecompressChunk) on _hyper_15_1440_chunk mt_2 (actual rows=4291 loops=1)
-> Seq Scan on compress_hyper_16_1445_chunk (actual rows=5 loops=1)
-> Custom Scan (DecompressChunk) on _hyper_15_1441_chunk mt_3 (actual rows=4291 loops=1)
-> Seq Scan on compress_hyper_16_1446_chunk (actual rows=5 loops=1)
-> Custom Scan (DecompressChunk) on _hyper_15_1442_chunk mt_4 (actual rows=5 loops=1)
-> Seq Scan on compress_hyper_16_1447_chunk (actual rows=5 loops=1)
-> Hash (actual rows=1 loops=1)
Buckets: 2048 Batches: 1
-> Seq Scan on nodetime nd (actual rows=1 loops=1)
(21 rows)
--enable all joins after the tests
set enable_mergejoin = true;
set enable_hashjoin = true;
--end github issue 1558
set enable_seqscan = true;
\ir include/transparent_decompression_constraintaware.sql
-- This file and its contents are licensed under the Timescale License.

View File

@ -17,3 +17,37 @@ SET work_mem TO '50MB';
:PREFIX SELECT d.device_id, m.time, m.time
FROM metrics_ordered_idx d INNER JOIN LATERAL (SELECT * FROM metrics_ordered_idx m WHERE m.device_id=d.device_id AND m.device_id_peer = 3 ORDER BY time DESC LIMIT 1 ) m ON m.device_id_peer = d.device_id_peer;
--github issue 1558
set enable_seqscan = false;
set enable_bitmapscan = false;
set max_parallel_workers_per_gather = 0;
set enable_hashjoin = false;
set enable_mergejoin = false;
:PREFIX select device_id, count(*) from
(select * from metrics_ordered_idx mt, nodetime nd
where mt.time > nd.start_time and mt.device_id = nd.node and mt.time < nd.stop_time) as subq group by device_id;
:PREFIX select nd.node, mt.* from metrics_ordered_idx mt, nodetime nd
where mt.time > nd.start_time and mt.device_id = nd.node and mt.time < nd.stop_time order by time;
set enable_seqscan = true;
set enable_bitmapscan = true;
set enable_seqscan = true;
set enable_bitmapscan = true;
set max_parallel_workers_per_gather = 0;
set enable_mergejoin = true;
set enable_hashjoin = false;
:PREFIX select nd.node, mt.* from metrics_ordered_idx mt, nodetime nd
where mt.time > nd.start_time and mt.device_id = nd.node and mt.time < nd.stop_time order by time;
set enable_mergejoin = false;
set enable_hashjoin = true;
:PREFIX select nd.node, mt.* from metrics_ordered_idx mt, nodetime nd
where mt.time > nd.start_time and mt.device_id = nd.node and mt.time < nd.stop_time order by time;
--enable all joins after the tests
set enable_mergejoin = true;
set enable_hashjoin = true;
--end github issue 1558

View File

@ -221,8 +221,10 @@ SET enable_hashjoin TO false;
--with multiple values can get a nested loop.
:PREFIX_VERBOSE SELECT device_id_peer FROM :TEST_TABLE WHERE device_id_peer IN (VALUES (1), (2));
RESET enable_hashjoin;
:PREFIX_VERBOSE SELECT device_id_peer FROM :TEST_TABLE WHERE device_id IN (VALUES (1));
--with multiple values can get a semi-join or nested loop depending on seq_page_cost.
@ -286,3 +288,19 @@ SELECT time FROM cagg_test ORDER BY time LIMIT 1;
DROP VIEW cagg_test CASCADE;
RESET client_min_messages;
--github issue 1558. nested loop with index scan needed
--disables parallel scan
set enable_seqscan = false;
set enable_bitmapscan = false;
set max_parallel_workers_per_gather = 0;
set enable_hashjoin = false;
set enable_mergejoin = false;
:PREFIX select * from metrics, metrics_space where metrics.time > metrics_space.time and metrics.device_id = metrics_space.device_id and metrics.time < metrics_space.time;
set enable_seqscan = true;
set enable_bitmapscan = true;
set max_parallel_workers_per_gather = 0;
set enable_hashjoin = true;
set enable_mergejoin = true;
---end github issue 1558

View File

@ -155,6 +155,12 @@ INSERT into device_tbl select generate_series(1, 6,1), 'devicex';
INSERT into device_tbl select 8, 'device8';
analyze device_tbl;
-- table for joins ---
create table nodetime( node int,
start_time timestamp ,
stop_time timestamp );
insert into nodetime values( 4, '2018-01-06 00:00'::timestamp, '2018-12-02 12:00'::timestamp);
-- run queries on uncompressed hypertable and store result
\set PREFIX ''
\set PREFIX_VERBOSE ''
@ -178,7 +184,7 @@ ORDER BY c.id;
-- run queries on compressed hypertable and store result
\set PREFIX ''
\set PREFIX_VERBOSE ''
\set ECHO none
\set ECHO none
SET client_min_messages TO error;
set enable_seqscan = false;
\o :TEST_RESULTS_COMPRESSED_IDX