diff --git a/src/cross_module_fn.c b/src/cross_module_fn.c index 01f148bda..2081d37dc 100644 --- a/src/cross_module_fn.c +++ b/src/cross_module_fn.c @@ -407,7 +407,8 @@ TSDLLEXPORT CrossModuleFunctions ts_cm_functions_default = { .remove_reorder_policy = error_no_default_fn_pg_enterprise, .remove_compress_chunks_policy = error_no_default_fn_pg_community, .create_upper_paths_hook = NULL, - .set_rel_pathlist_hook = NULL, + .set_rel_pathlist_dml = NULL, + .set_rel_pathlist_query = NULL, .gapfill_marker = error_no_default_fn_pg_community, .gapfill_int16_time_bucket = error_no_default_fn_pg_community, .gapfill_int32_time_bucket = error_no_default_fn_pg_community, diff --git a/src/cross_module_fn.h b/src/cross_module_fn.h index 593a5277f..c263cb08a 100644 --- a/src/cross_module_fn.h +++ b/src/cross_module_fn.h @@ -45,8 +45,9 @@ typedef struct CrossModuleFunctions Datum (*remove_reorder_policy)(PG_FUNCTION_ARGS); Datum (*remove_compress_chunks_policy)(PG_FUNCTION_ARGS); void (*create_upper_paths_hook)(PlannerInfo *, UpperRelationKind, RelOptInfo *, RelOptInfo *); - void (*set_rel_pathlist_hook)(PlannerInfo *, RelOptInfo *, Index, RangeTblEntry *, Hypertable *, - bool isdml); + void (*set_rel_pathlist_dml)(PlannerInfo *, RelOptInfo *, Index, RangeTblEntry *, Hypertable *); + void (*set_rel_pathlist_query)(PlannerInfo *, RelOptInfo *, Index, RangeTblEntry *, + Hypertable *); PGFunction gapfill_marker; PGFunction gapfill_int16_time_bucket; PGFunction gapfill_int32_time_bucket; diff --git a/src/planner.c b/src/planner.c index 93bdf1de2..9ee2fd127 100644 --- a/src/planner.c +++ b/src/planner.c @@ -343,15 +343,15 @@ timescaledb_set_rel_pathlist_query(PlannerInfo *root, RelOptInfo *rel, Index rti if (!should_optimize_query(ht)) return; - if (ts_guc_optimize_non_hypertables) - { - /* if optimizing all tables, apply optimization to any table */ + /* + * Since the sort optimization adds new paths to the rel it has + * to happen before any optimizations that replace pathlist. + */ + if (ts_guc_optimize_non_hypertables || (ht != NULL && is_append_child(rel, rte))) ts_sort_transform_optimization(root, rel); - } - else if (ht != NULL && is_append_child(rel, rte)) - { - ts_sort_transform_optimization(root, rel); - } + + if (ts_cm_functions->set_rel_pathlist_query != NULL) + ts_cm_functions->set_rel_pathlist_query(root, rel, rti, rte, ht); if ( /* @@ -455,12 +455,16 @@ timescaledb_set_rel_pathlist(PlannerInfo *root, RelOptInfo *rel, Index rti, Rang ht = ts_hypertable_cache_get_entry(hcache, ht_reloid); - if (ts_cm_functions->set_rel_pathlist_hook != NULL) - ts_cm_functions->set_rel_pathlist_hook(root, rel, rti, rte, ht, is_htdml); if (!is_htdml) { timescaledb_set_rel_pathlist_query(root, rel, rti, rte, ht); } + else + { + if (ts_cm_functions->set_rel_pathlist_dml != NULL) + ts_cm_functions->set_rel_pathlist_dml(root, rel, rti, rte, ht); + } + ts_cache_release(hcache); } diff --git a/tsl/src/init.c b/tsl/src/init.c index 525cc03cd..9b963e5e6 100644 --- a/tsl/src/init.c +++ b/tsl/src/init.c @@ -79,7 +79,8 @@ CrossModuleFunctions tsl_cm_functions = { .remove_reorder_policy = reorder_remove_policy, .remove_compress_chunks_policy = compress_chunks_remove_policy, .create_upper_paths_hook = tsl_create_upper_paths_hook, - .set_rel_pathlist_hook = tsl_set_rel_pathlist_hook, + .set_rel_pathlist_dml = tsl_set_rel_pathlist_dml, + .set_rel_pathlist_query = tsl_set_rel_pathlist_query, .gapfill_marker = gapfill_marker, .gapfill_int16_time_bucket = gapfill_int16_time_bucket, .gapfill_int32_time_bucket = gapfill_int32_time_bucket, diff --git a/tsl/src/planner.c b/tsl/src/planner.c index 5df7ac3da..415615ea9 100644 --- a/tsl/src/planner.c +++ b/tsl/src/planner.c @@ -29,38 +29,37 @@ tsl_create_upper_paths_hook(PlannerInfo *root, UpperRelationKind stage, RelOptIn } void -tsl_set_rel_pathlist_hook(PlannerInfo *root, RelOptInfo *rel, Index rti, RangeTblEntry *rte, - Hypertable *ht, bool isdml) +tsl_set_rel_pathlist_query(PlannerInfo *root, RelOptInfo *rel, Index rti, RangeTblEntry *rte, + Hypertable *ht) { - if (isdml) + if (ts_guc_enable_transparent_decompression && ht != NULL && + rel->reloptkind == RELOPT_OTHER_MEMBER_REL && ht->fd.compressed_hypertable_id > 0) { - if (ht != NULL && TS_HYPERTABLE_HAS_COMPRESSION_ON(ht)) + Chunk *chunk = ts_chunk_get_by_relid(rte->relid, 0, true); + + if (chunk->fd.compressed_chunk_id > 0) + ts_decompress_chunk_generate_paths(root, rel, ht, chunk); + } +} +void +tsl_set_rel_pathlist_dml(PlannerInfo *root, RelOptInfo *rel, Index rti, RangeTblEntry *rte, + Hypertable *ht) +{ + if (ht != NULL && TS_HYPERTABLE_HAS_COMPRESSION_ON(ht)) + { + ListCell *lc; + /* is this a chunk under compressed hypertable ? */ + AppendRelInfo *appinfo = ts_get_appendrelinfo(root, rti, false); + Oid parent_oid = appinfo->parent_reloid; + Chunk *chunk = ts_chunk_get_by_relid(rte->relid, 0, true); + Assert(parent_oid == ht->main_table_relid && (parent_oid == chunk->hypertable_relid)); + if (chunk->fd.compressed_chunk_id > 0) { - ListCell *lc; - /* is this a chunk under compressed hypertable ? */ - AppendRelInfo *appinfo = ts_get_appendrelinfo(root, rti, false); - Oid parent_oid = appinfo->parent_reloid; - Chunk *chunk = ts_chunk_get_by_relid(rte->relid, 0, true); - Assert(parent_oid == ht->main_table_relid && (parent_oid == chunk->hypertable_relid)); - if (chunk->fd.compressed_chunk_id > 0) + foreach (lc, rel->pathlist) { - foreach (lc, rel->pathlist) - { - Path **pathptr = (Path **) &lfirst(lc); - *pathptr = compress_chunk_dml_generate_paths(*pathptr, chunk); - } + Path **pathptr = (Path **) &lfirst(lc); + *pathptr = compress_chunk_dml_generate_paths(*pathptr, chunk); } } } - else - { - if (ts_guc_enable_transparent_decompression && ht != NULL && - rel->reloptkind == RELOPT_OTHER_MEMBER_REL && ht->fd.compressed_hypertable_id > 0) - { - Chunk *chunk = ts_chunk_get_by_relid(rte->relid, 0, true); - - if (chunk->fd.compressed_chunk_id > 0) - ts_decompress_chunk_generate_paths(root, rel, ht, chunk); - } - } } diff --git a/tsl/src/planner.h b/tsl/src/planner.h index 44288e0a0..5ba6d017a 100644 --- a/tsl/src/planner.h +++ b/tsl/src/planner.h @@ -10,7 +10,7 @@ #include "hypertable.h" void tsl_create_upper_paths_hook(PlannerInfo *, UpperRelationKind, RelOptInfo *, RelOptInfo *); -void tsl_set_rel_pathlist_hook(PlannerInfo *, RelOptInfo *, Index, RangeTblEntry *, Hypertable *, - bool isdml); +void tsl_set_rel_pathlist_query(PlannerInfo *, RelOptInfo *, Index, RangeTblEntry *, Hypertable *); +void tsl_set_rel_pathlist_dml(PlannerInfo *, RelOptInfo *, Index, RangeTblEntry *, Hypertable *); #endif /* TIMESCALEDB_TSL_PLANNER_H */ diff --git a/tsl/test/expected/transparent_decompression-10.out b/tsl/test/expected/transparent_decompression-10.out index 0b7016864..7579b18f2 100644 --- a/tsl/test/expected/transparent_decompression-10.out +++ b/tsl/test/expected/transparent_decompression-10.out @@ -43,10 +43,10 @@ ANALYZE metrics_space; \set PREFIX '' \set PREFIX_VERBOSE '' \set ECHO none -psql:include/transparent_decompression_query.sql:261: INFO: new materialization range for public.metrics (time column time) (948067200000000) -psql:include/transparent_decompression_query.sql:261: INFO: materializing continuous aggregate public.cagg_test: new range up to 948067200000000 -psql:include/transparent_decompression_query.sql:261: INFO: new materialization range for public.metrics_space (time column time) (948067200000000) -psql:include/transparent_decompression_query.sql:261: INFO: materializing continuous aggregate public.cagg_test: new range up to 948067200000000 +psql:include/transparent_decompression_query.sql:267: INFO: new materialization range for public.metrics (time column time) (948067200000000) +psql:include/transparent_decompression_query.sql:267: INFO: materializing continuous aggregate public.cagg_test: new range up to 948067200000000 +psql:include/transparent_decompression_query.sql:267: INFO: new materialization range for public.metrics_space (time column time) (948067200000000) +psql:include/transparent_decompression_query.sql:267: INFO: materializing continuous aggregate public.cagg_test: new range up to 948067200000000 -- compress first and last chunk on the hypertable ALTER TABLE metrics SET (timescaledb.compress, timescaledb.compress_orderby='v0, v1 desc, time', timescaledb.compress_segmentby='device_id,device_id_peer'); NOTICE: adding index _compressed_hypertable_5_device_id__ts_meta_sequence_num_idx ON _timescaledb_internal._compressed_hypertable_5 USING BTREE(device_id, _ts_meta_sequence_num) @@ -125,10 +125,10 @@ ANALYZE metrics_space; \set PREFIX '' \set PREFIX_VERBOSE '' \set ECHO none -psql:include/transparent_decompression_query.sql:261: INFO: new materialization range for public.metrics (time column time) (948067200000000) -psql:include/transparent_decompression_query.sql:261: INFO: materializing continuous aggregate public.cagg_test: new range up to 948067200000000 -psql:include/transparent_decompression_query.sql:261: INFO: new materialization range for public.metrics_space (time column time) (948067200000000) -psql:include/transparent_decompression_query.sql:261: INFO: materializing continuous aggregate public.cagg_test: new range up to 948067200000000 +psql:include/transparent_decompression_query.sql:267: INFO: new materialization range for public.metrics (time column time) (948067200000000) +psql:include/transparent_decompression_query.sql:267: INFO: materializing continuous aggregate public.cagg_test: new range up to 948067200000000 +psql:include/transparent_decompression_query.sql:267: INFO: new materialization range for public.metrics_space (time column time) (948067200000000) +psql:include/transparent_decompression_query.sql:267: INFO: materializing continuous aggregate public.cagg_test: new range up to 948067200000000 -- look at postgres version to decide whether we run with analyze or without SELECT CASE WHEN current_setting('server_version_num')::int >= 100000 @@ -872,6 +872,56 @@ FROM :TEST_TABLE WHERE device_id IN (1,2) ORDER BY time, device_id; -> Seq Scan on compress_hyper_5_16_chunk (actual rows=15 loops=1) (14 rows) +-- test sort optimization interaction +:PREFIX SELECT time FROM :TEST_TABLE ORDER BY time DESC LIMIT 10; + QUERY PLAN +---------------------------------------------------------------------------------------------------------- + Limit (actual rows=10 loops=1) + -> Custom Scan (ChunkAppend) on metrics (actual rows=10 loops=1) + Order: metrics."time" DESC + -> Sort (actual rows=10 loops=1) + Sort Key: _hyper_1_3_chunk."time" DESC + Sort Method: top-N heapsort + -> Custom Scan (DecompressChunk) on _hyper_1_3_chunk (actual rows=10080 loops=1) + -> Seq Scan on compress_hyper_5_16_chunk (actual rows=15 loops=1) + -> Index Only Scan using _hyper_1_2_chunk_metrics_time_idx on _hyper_1_2_chunk (never executed) + Heap Fetches: 0 + -> Sort (never executed) + Sort Key: _hyper_1_1_chunk."time" DESC + -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk (never executed) + -> Seq Scan on compress_hyper_5_15_chunk (never executed) +(14 rows) + +:PREFIX SELECT time,device_id FROM :TEST_TABLE ORDER BY time DESC, device_id LIMIT 10; + QUERY PLAN +------------------------------------------------------------------------------------------------- + Limit (actual rows=10 loops=1) + -> Sort (actual rows=10 loops=1) + Sort Key: _hyper_1_3_chunk."time" DESC, _hyper_1_3_chunk.device_id + Sort Method: top-N heapsort + -> Append (actual rows=27360 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_1_3_chunk (actual rows=10080 loops=1) + -> Seq Scan on compress_hyper_5_16_chunk (actual rows=15 loops=1) + -> Seq Scan on _hyper_1_2_chunk (actual rows=10080 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk (actual rows=7200 loops=1) + -> Seq Scan on compress_hyper_5_15_chunk (actual rows=10 loops=1) +(10 rows) + +:PREFIX SELECT time,device_id FROM :TEST_TABLE ORDER BY device_id, time DESC LIMIT 10; + QUERY PLAN +------------------------------------------------------------------------------------------------- + Limit (actual rows=10 loops=1) + -> Sort (actual rows=10 loops=1) + Sort Key: _hyper_1_1_chunk.device_id, _hyper_1_1_chunk."time" DESC + Sort Method: top-N heapsort + -> Append (actual rows=27360 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk (actual rows=7200 loops=1) + -> Seq Scan on compress_hyper_5_15_chunk (actual rows=10 loops=1) + -> Seq Scan on _hyper_1_2_chunk (actual rows=10080 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_1_3_chunk (actual rows=10080 loops=1) + -> Seq Scan on compress_hyper_5_16_chunk (actual rows=15 loops=1) +(10 rows) + -- -- test ordered path -- @@ -2226,8 +2276,8 @@ DEALLOCATE param_prep; SET client_min_messages TO error; CREATE VIEW cagg_test WITH (timescaledb.continuous) AS SELECT time_bucket('1d',time) AS time, device_id, avg(v1) FROM :TEST_TABLE WHERE device_id=1 GROUP BY 1,2; REFRESH MATERIALIZED VIEW cagg_test; -psql:include/transparent_decompression_query.sql:261: INFO: new materialization range for public.metrics (time column time) (948067200000000) -psql:include/transparent_decompression_query.sql:261: INFO: materializing continuous aggregate public.cagg_test: new range up to 948067200000000 +psql:include/transparent_decompression_query.sql:267: INFO: new materialization range for public.metrics (time column time) (948067200000000) +psql:include/transparent_decompression_query.sql:267: INFO: materializing continuous aggregate public.cagg_test: new range up to 948067200000000 SELECT time FROM cagg_test ORDER BY time LIMIT 1; time ------------------------------ @@ -3363,6 +3413,128 @@ FROM :TEST_TABLE WHERE device_id IN (1,2) ORDER BY time, device_id; Filter: ("time" < now()) (31 rows) +-- test sort optimization interaction +:PREFIX SELECT time FROM :TEST_TABLE ORDER BY time DESC LIMIT 10; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------- + Limit (actual rows=10 loops=1) + -> Custom Scan (ChunkAppend) on metrics_space (actual rows=10 loops=1) + Order: metrics_space."time" DESC + -> Merge Append (actual rows=10 loops=1) + Sort Key: _hyper_2_12_chunk."time" DESC + -> Index Only Scan using _hyper_2_12_chunk_metrics_space_time_idx on _hyper_2_12_chunk (actual rows=3 loops=1) + Heap Fetches: 3 + -> Sort (actual rows=6 loops=1) + Sort Key: _hyper_2_11_chunk."time" DESC + Sort Method: top-N heapsort + -> Sort (actual rows=6048 loops=1) + Sort Key: _hyper_2_11_chunk."time" DESC + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_2_11_chunk (actual rows=6048 loops=1) + -> Seq Scan on compress_hyper_6_21_chunk (actual rows=9 loops=1) + -> Sort (actual rows=3 loops=1) + Sort Key: _hyper_2_10_chunk."time" DESC + Sort Method: top-N heapsort + -> Sort (actual rows=2016 loops=1) + Sort Key: _hyper_2_10_chunk."time" DESC + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_2_10_chunk (actual rows=2016 loops=1) + -> Seq Scan on compress_hyper_6_20_chunk (actual rows=3 loops=1) + -> Merge Append (never executed) + Sort Key: _hyper_2_9_chunk."time" DESC + -> Index Only Scan using _hyper_2_9_chunk_metrics_space_time_idx on _hyper_2_9_chunk (never executed) + Heap Fetches: 0 + -> Index Only Scan using _hyper_2_8_chunk_metrics_space_time_idx on _hyper_2_8_chunk (never executed) + Heap Fetches: 0 + -> Index Only Scan using _hyper_2_7_chunk_metrics_space_time_idx on _hyper_2_7_chunk (never executed) + Heap Fetches: 0 + -> Merge Append (never executed) + Sort Key: _hyper_2_6_chunk."time" DESC + -> Sort (never executed) + Sort Key: _hyper_2_6_chunk."time" DESC + -> Sort (never executed) + Sort Key: _hyper_2_6_chunk."time" DESC + -> Custom Scan (DecompressChunk) on _hyper_2_6_chunk (never executed) + -> Seq Scan on compress_hyper_6_19_chunk (never executed) + -> Sort (never executed) + Sort Key: _hyper_2_5_chunk."time" DESC + -> Sort (never executed) + Sort Key: _hyper_2_5_chunk."time" DESC + -> Custom Scan (DecompressChunk) on _hyper_2_5_chunk (never executed) + -> Seq Scan on compress_hyper_6_18_chunk (never executed) + -> Sort (never executed) + Sort Key: _hyper_2_4_chunk."time" DESC + -> Sort (never executed) + Sort Key: _hyper_2_4_chunk."time" DESC + -> Custom Scan (DecompressChunk) on _hyper_2_4_chunk (never executed) + -> Seq Scan on compress_hyper_6_17_chunk (never executed) +(51 rows) + +:PREFIX SELECT time,device_id FROM :TEST_TABLE ORDER BY time DESC, device_id LIMIT 10; + QUERY PLAN +------------------------------------------------------------------------------------------------- + Limit (actual rows=10 loops=1) + -> Sort (actual rows=10 loops=1) + Sort Key: _hyper_2_12_chunk."time" DESC, _hyper_2_12_chunk.device_id + Sort Method: top-N heapsort + -> Append (actual rows=27360 loops=1) + -> Seq Scan on _hyper_2_12_chunk (actual rows=2016 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_2_11_chunk (actual rows=6048 loops=1) + -> Seq Scan on compress_hyper_6_21_chunk (actual rows=9 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_2_10_chunk (actual rows=2016 loops=1) + -> Seq Scan on compress_hyper_6_20_chunk (actual rows=3 loops=1) + -> Seq Scan on _hyper_2_9_chunk (actual rows=2016 loops=1) + -> Seq Scan on _hyper_2_8_chunk (actual rows=6048 loops=1) + -> Seq Scan on _hyper_2_7_chunk (actual rows=2016 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_2_6_chunk (actual rows=1440 loops=1) + -> Seq Scan on compress_hyper_6_19_chunk (actual rows=2 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_2_5_chunk (actual rows=4320 loops=1) + -> Seq Scan on compress_hyper_6_18_chunk (actual rows=6 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_2_4_chunk (actual rows=1440 loops=1) + -> Seq Scan on compress_hyper_6_17_chunk (actual rows=2 loops=1) +(19 rows) + +:PREFIX SELECT time,device_id FROM :TEST_TABLE ORDER BY device_id, time DESC LIMIT 10; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------------- + Limit (actual rows=10 loops=1) + -> Merge Append (actual rows=10 loops=1) + Sort Key: _hyper_2_4_chunk.device_id, _hyper_2_4_chunk."time" DESC + -> Sort (actual rows=1 loops=1) + Sort Key: _hyper_2_4_chunk.device_id, _hyper_2_4_chunk."time" DESC + Sort Method: top-N heapsort + -> Custom Scan (DecompressChunk) on _hyper_2_4_chunk (actual rows=1440 loops=1) + -> Seq Scan on compress_hyper_6_17_chunk (actual rows=2 loops=1) + -> Sort (actual rows=1 loops=1) + Sort Key: _hyper_2_5_chunk.device_id, _hyper_2_5_chunk."time" DESC + Sort Method: top-N heapsort + -> Custom Scan (DecompressChunk) on _hyper_2_5_chunk (actual rows=4320 loops=1) + -> Seq Scan on compress_hyper_6_18_chunk (actual rows=6 loops=1) + -> Sort (actual rows=1 loops=1) + Sort Key: _hyper_2_6_chunk.device_id, _hyper_2_6_chunk."time" DESC + Sort Method: top-N heapsort + -> Custom Scan (DecompressChunk) on _hyper_2_6_chunk (actual rows=1440 loops=1) + -> Seq Scan on compress_hyper_6_19_chunk (actual rows=2 loops=1) + -> Index Only Scan using _hyper_2_7_chunk_metrics_space_device_id_time_idx on _hyper_2_7_chunk (actual rows=1 loops=1) + Heap Fetches: 1 + -> Index Only Scan using _hyper_2_8_chunk_metrics_space_device_id_time_idx on _hyper_2_8_chunk (actual rows=1 loops=1) + Heap Fetches: 1 + -> Index Only Scan using _hyper_2_9_chunk_metrics_space_device_id_time_idx on _hyper_2_9_chunk (actual rows=1 loops=1) + Heap Fetches: 1 + -> Sort (actual rows=10 loops=1) + Sort Key: _hyper_2_10_chunk.device_id, _hyper_2_10_chunk."time" DESC + Sort Method: top-N heapsort + -> Custom Scan (DecompressChunk) on _hyper_2_10_chunk (actual rows=2016 loops=1) + -> Seq Scan on compress_hyper_6_20_chunk (actual rows=3 loops=1) + -> Sort (actual rows=1 loops=1) + Sort Key: _hyper_2_11_chunk.device_id, _hyper_2_11_chunk."time" DESC + Sort Method: top-N heapsort + -> Custom Scan (DecompressChunk) on _hyper_2_11_chunk (actual rows=6048 loops=1) + -> Seq Scan on compress_hyper_6_21_chunk (actual rows=9 loops=1) + -> Index Only Scan using _hyper_2_12_chunk_metrics_space_device_id_time_idx on _hyper_2_12_chunk (actual rows=1 loops=1) + Heap Fetches: 1 +(36 rows) + -- -- test ordered path -- @@ -5306,8 +5478,8 @@ DEALLOCATE param_prep; SET client_min_messages TO error; CREATE VIEW cagg_test WITH (timescaledb.continuous) AS SELECT time_bucket('1d',time) AS time, device_id, avg(v1) FROM :TEST_TABLE WHERE device_id=1 GROUP BY 1,2; REFRESH MATERIALIZED VIEW cagg_test; -psql:include/transparent_decompression_query.sql:261: INFO: new materialization range for public.metrics_space (time column time) (948067200000000) -psql:include/transparent_decompression_query.sql:261: INFO: materializing continuous aggregate public.cagg_test: new range up to 948067200000000 +psql:include/transparent_decompression_query.sql:267: INFO: new materialization range for public.metrics_space (time column time) (948067200000000) +psql:include/transparent_decompression_query.sql:267: INFO: materializing continuous aggregate public.cagg_test: new range up to 948067200000000 SELECT time FROM cagg_test ORDER BY time LIMIT 1; time ------------------------------ diff --git a/tsl/test/expected/transparent_decompression-11.out b/tsl/test/expected/transparent_decompression-11.out index f93c8450c..510bf5198 100644 --- a/tsl/test/expected/transparent_decompression-11.out +++ b/tsl/test/expected/transparent_decompression-11.out @@ -43,10 +43,10 @@ ANALYZE metrics_space; \set PREFIX '' \set PREFIX_VERBOSE '' \set ECHO none -psql:include/transparent_decompression_query.sql:261: INFO: new materialization range for public.metrics (time column time) (948067200000000) -psql:include/transparent_decompression_query.sql:261: INFO: materializing continuous aggregate public.cagg_test: new range up to 948067200000000 -psql:include/transparent_decompression_query.sql:261: INFO: new materialization range for public.metrics_space (time column time) (948067200000000) -psql:include/transparent_decompression_query.sql:261: INFO: materializing continuous aggregate public.cagg_test: new range up to 948067200000000 +psql:include/transparent_decompression_query.sql:267: INFO: new materialization range for public.metrics (time column time) (948067200000000) +psql:include/transparent_decompression_query.sql:267: INFO: materializing continuous aggregate public.cagg_test: new range up to 948067200000000 +psql:include/transparent_decompression_query.sql:267: INFO: new materialization range for public.metrics_space (time column time) (948067200000000) +psql:include/transparent_decompression_query.sql:267: INFO: materializing continuous aggregate public.cagg_test: new range up to 948067200000000 -- compress first and last chunk on the hypertable ALTER TABLE metrics SET (timescaledb.compress, timescaledb.compress_orderby='v0, v1 desc, time', timescaledb.compress_segmentby='device_id,device_id_peer'); NOTICE: adding index _compressed_hypertable_5_device_id__ts_meta_sequence_num_idx ON _timescaledb_internal._compressed_hypertable_5 USING BTREE(device_id, _ts_meta_sequence_num) @@ -125,10 +125,10 @@ ANALYZE metrics_space; \set PREFIX '' \set PREFIX_VERBOSE '' \set ECHO none -psql:include/transparent_decompression_query.sql:261: INFO: new materialization range for public.metrics (time column time) (948067200000000) -psql:include/transparent_decompression_query.sql:261: INFO: materializing continuous aggregate public.cagg_test: new range up to 948067200000000 -psql:include/transparent_decompression_query.sql:261: INFO: new materialization range for public.metrics_space (time column time) (948067200000000) -psql:include/transparent_decompression_query.sql:261: INFO: materializing continuous aggregate public.cagg_test: new range up to 948067200000000 +psql:include/transparent_decompression_query.sql:267: INFO: new materialization range for public.metrics (time column time) (948067200000000) +psql:include/transparent_decompression_query.sql:267: INFO: materializing continuous aggregate public.cagg_test: new range up to 948067200000000 +psql:include/transparent_decompression_query.sql:267: INFO: new materialization range for public.metrics_space (time column time) (948067200000000) +psql:include/transparent_decompression_query.sql:267: INFO: materializing continuous aggregate public.cagg_test: new range up to 948067200000000 -- look at postgres version to decide whether we run with analyze or without SELECT CASE WHEN current_setting('server_version_num')::int >= 100000 @@ -872,6 +872,56 @@ FROM :TEST_TABLE WHERE device_id IN (1,2) ORDER BY time, device_id; -> Seq Scan on compress_hyper_5_16_chunk (actual rows=15 loops=1) (14 rows) +-- test sort optimization interaction +:PREFIX SELECT time FROM :TEST_TABLE ORDER BY time DESC LIMIT 10; + QUERY PLAN +---------------------------------------------------------------------------------------------------------- + Limit (actual rows=10 loops=1) + -> Custom Scan (ChunkAppend) on metrics (actual rows=10 loops=1) + Order: metrics."time" DESC + -> Sort (actual rows=10 loops=1) + Sort Key: _hyper_1_3_chunk."time" DESC + Sort Method: top-N heapsort + -> Custom Scan (DecompressChunk) on _hyper_1_3_chunk (actual rows=10080 loops=1) + -> Seq Scan on compress_hyper_5_16_chunk (actual rows=15 loops=1) + -> Index Only Scan using _hyper_1_2_chunk_metrics_time_idx on _hyper_1_2_chunk (never executed) + Heap Fetches: 0 + -> Sort (never executed) + Sort Key: _hyper_1_1_chunk."time" DESC + -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk (never executed) + -> Seq Scan on compress_hyper_5_15_chunk (never executed) +(14 rows) + +:PREFIX SELECT time,device_id FROM :TEST_TABLE ORDER BY time DESC, device_id LIMIT 10; + QUERY PLAN +------------------------------------------------------------------------------------------------- + Limit (actual rows=10 loops=1) + -> Sort (actual rows=10 loops=1) + Sort Key: _hyper_1_3_chunk."time" DESC, _hyper_1_3_chunk.device_id + Sort Method: top-N heapsort + -> Append (actual rows=27360 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_1_3_chunk (actual rows=10080 loops=1) + -> Seq Scan on compress_hyper_5_16_chunk (actual rows=15 loops=1) + -> Seq Scan on _hyper_1_2_chunk (actual rows=10080 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk (actual rows=7200 loops=1) + -> Seq Scan on compress_hyper_5_15_chunk (actual rows=10 loops=1) +(10 rows) + +:PREFIX SELECT time,device_id FROM :TEST_TABLE ORDER BY device_id, time DESC LIMIT 10; + QUERY PLAN +------------------------------------------------------------------------------------------------- + Limit (actual rows=10 loops=1) + -> Sort (actual rows=10 loops=1) + Sort Key: _hyper_1_1_chunk.device_id, _hyper_1_1_chunk."time" DESC + Sort Method: top-N heapsort + -> Append (actual rows=27360 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk (actual rows=7200 loops=1) + -> Seq Scan on compress_hyper_5_15_chunk (actual rows=10 loops=1) + -> Seq Scan on _hyper_1_2_chunk (actual rows=10080 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_1_3_chunk (actual rows=10080 loops=1) + -> Seq Scan on compress_hyper_5_16_chunk (actual rows=15 loops=1) +(10 rows) + -- -- test ordered path -- @@ -2316,8 +2366,8 @@ DEALLOCATE param_prep; SET client_min_messages TO error; CREATE VIEW cagg_test WITH (timescaledb.continuous) AS SELECT time_bucket('1d',time) AS time, device_id, avg(v1) FROM :TEST_TABLE WHERE device_id=1 GROUP BY 1,2; REFRESH MATERIALIZED VIEW cagg_test; -psql:include/transparent_decompression_query.sql:261: INFO: new materialization range for public.metrics (time column time) (948067200000000) -psql:include/transparent_decompression_query.sql:261: INFO: materializing continuous aggregate public.cagg_test: new range up to 948067200000000 +psql:include/transparent_decompression_query.sql:267: INFO: new materialization range for public.metrics (time column time) (948067200000000) +psql:include/transparent_decompression_query.sql:267: INFO: materializing continuous aggregate public.cagg_test: new range up to 948067200000000 SELECT time FROM cagg_test ORDER BY time LIMIT 1; time ------------------------------ @@ -3453,6 +3503,128 @@ FROM :TEST_TABLE WHERE device_id IN (1,2) ORDER BY time, device_id; Filter: ("time" < now()) (31 rows) +-- test sort optimization interaction +:PREFIX SELECT time FROM :TEST_TABLE ORDER BY time DESC LIMIT 10; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------- + Limit (actual rows=10 loops=1) + -> Custom Scan (ChunkAppend) on metrics_space (actual rows=10 loops=1) + Order: metrics_space."time" DESC + -> Merge Append (actual rows=10 loops=1) + Sort Key: _hyper_2_12_chunk."time" DESC + -> Index Only Scan using _hyper_2_12_chunk_metrics_space_time_idx on _hyper_2_12_chunk (actual rows=3 loops=1) + Heap Fetches: 3 + -> Sort (actual rows=6 loops=1) + Sort Key: _hyper_2_11_chunk."time" DESC + Sort Method: top-N heapsort + -> Sort (actual rows=6048 loops=1) + Sort Key: _hyper_2_11_chunk."time" DESC + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_2_11_chunk (actual rows=6048 loops=1) + -> Seq Scan on compress_hyper_6_21_chunk (actual rows=9 loops=1) + -> Sort (actual rows=3 loops=1) + Sort Key: _hyper_2_10_chunk."time" DESC + Sort Method: top-N heapsort + -> Sort (actual rows=2016 loops=1) + Sort Key: _hyper_2_10_chunk."time" DESC + Sort Method: quicksort + -> Custom Scan (DecompressChunk) on _hyper_2_10_chunk (actual rows=2016 loops=1) + -> Seq Scan on compress_hyper_6_20_chunk (actual rows=3 loops=1) + -> Merge Append (never executed) + Sort Key: _hyper_2_9_chunk."time" DESC + -> Index Only Scan using _hyper_2_9_chunk_metrics_space_time_idx on _hyper_2_9_chunk (never executed) + Heap Fetches: 0 + -> Index Only Scan using _hyper_2_8_chunk_metrics_space_time_idx on _hyper_2_8_chunk (never executed) + Heap Fetches: 0 + -> Index Only Scan using _hyper_2_7_chunk_metrics_space_time_idx on _hyper_2_7_chunk (never executed) + Heap Fetches: 0 + -> Merge Append (never executed) + Sort Key: _hyper_2_6_chunk."time" DESC + -> Sort (never executed) + Sort Key: _hyper_2_6_chunk."time" DESC + -> Sort (never executed) + Sort Key: _hyper_2_6_chunk."time" DESC + -> Custom Scan (DecompressChunk) on _hyper_2_6_chunk (never executed) + -> Seq Scan on compress_hyper_6_19_chunk (never executed) + -> Sort (never executed) + Sort Key: _hyper_2_5_chunk."time" DESC + -> Sort (never executed) + Sort Key: _hyper_2_5_chunk."time" DESC + -> Custom Scan (DecompressChunk) on _hyper_2_5_chunk (never executed) + -> Seq Scan on compress_hyper_6_18_chunk (never executed) + -> Sort (never executed) + Sort Key: _hyper_2_4_chunk."time" DESC + -> Sort (never executed) + Sort Key: _hyper_2_4_chunk."time" DESC + -> Custom Scan (DecompressChunk) on _hyper_2_4_chunk (never executed) + -> Seq Scan on compress_hyper_6_17_chunk (never executed) +(51 rows) + +:PREFIX SELECT time,device_id FROM :TEST_TABLE ORDER BY time DESC, device_id LIMIT 10; + QUERY PLAN +------------------------------------------------------------------------------------------------- + Limit (actual rows=10 loops=1) + -> Sort (actual rows=10 loops=1) + Sort Key: _hyper_2_12_chunk."time" DESC, _hyper_2_12_chunk.device_id + Sort Method: top-N heapsort + -> Append (actual rows=27360 loops=1) + -> Seq Scan on _hyper_2_12_chunk (actual rows=2016 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_2_11_chunk (actual rows=6048 loops=1) + -> Seq Scan on compress_hyper_6_21_chunk (actual rows=9 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_2_10_chunk (actual rows=2016 loops=1) + -> Seq Scan on compress_hyper_6_20_chunk (actual rows=3 loops=1) + -> Seq Scan on _hyper_2_9_chunk (actual rows=2016 loops=1) + -> Seq Scan on _hyper_2_8_chunk (actual rows=6048 loops=1) + -> Seq Scan on _hyper_2_7_chunk (actual rows=2016 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_2_6_chunk (actual rows=1440 loops=1) + -> Seq Scan on compress_hyper_6_19_chunk (actual rows=2 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_2_5_chunk (actual rows=4320 loops=1) + -> Seq Scan on compress_hyper_6_18_chunk (actual rows=6 loops=1) + -> Custom Scan (DecompressChunk) on _hyper_2_4_chunk (actual rows=1440 loops=1) + -> Seq Scan on compress_hyper_6_17_chunk (actual rows=2 loops=1) +(19 rows) + +:PREFIX SELECT time,device_id FROM :TEST_TABLE ORDER BY device_id, time DESC LIMIT 10; + QUERY PLAN +----------------------------------------------------------------------------------------------------------------------------------- + Limit (actual rows=10 loops=1) + -> Merge Append (actual rows=10 loops=1) + Sort Key: _hyper_2_4_chunk.device_id, _hyper_2_4_chunk."time" DESC + -> Sort (actual rows=1 loops=1) + Sort Key: _hyper_2_4_chunk.device_id, _hyper_2_4_chunk."time" DESC + Sort Method: top-N heapsort + -> Custom Scan (DecompressChunk) on _hyper_2_4_chunk (actual rows=1440 loops=1) + -> Seq Scan on compress_hyper_6_17_chunk (actual rows=2 loops=1) + -> Sort (actual rows=1 loops=1) + Sort Key: _hyper_2_5_chunk.device_id, _hyper_2_5_chunk."time" DESC + Sort Method: top-N heapsort + -> Custom Scan (DecompressChunk) on _hyper_2_5_chunk (actual rows=4320 loops=1) + -> Seq Scan on compress_hyper_6_18_chunk (actual rows=6 loops=1) + -> Sort (actual rows=1 loops=1) + Sort Key: _hyper_2_6_chunk.device_id, _hyper_2_6_chunk."time" DESC + Sort Method: top-N heapsort + -> Custom Scan (DecompressChunk) on _hyper_2_6_chunk (actual rows=1440 loops=1) + -> Seq Scan on compress_hyper_6_19_chunk (actual rows=2 loops=1) + -> Index Only Scan using _hyper_2_7_chunk_metrics_space_device_id_time_idx on _hyper_2_7_chunk (actual rows=1 loops=1) + Heap Fetches: 1 + -> Index Only Scan using _hyper_2_8_chunk_metrics_space_device_id_time_idx on _hyper_2_8_chunk (actual rows=1 loops=1) + Heap Fetches: 1 + -> Index Only Scan using _hyper_2_9_chunk_metrics_space_device_id_time_idx on _hyper_2_9_chunk (actual rows=1 loops=1) + Heap Fetches: 1 + -> Sort (actual rows=10 loops=1) + Sort Key: _hyper_2_10_chunk.device_id, _hyper_2_10_chunk."time" DESC + Sort Method: top-N heapsort + -> Custom Scan (DecompressChunk) on _hyper_2_10_chunk (actual rows=2016 loops=1) + -> Seq Scan on compress_hyper_6_20_chunk (actual rows=3 loops=1) + -> Sort (actual rows=1 loops=1) + Sort Key: _hyper_2_11_chunk.device_id, _hyper_2_11_chunk."time" DESC + Sort Method: top-N heapsort + -> Custom Scan (DecompressChunk) on _hyper_2_11_chunk (actual rows=6048 loops=1) + -> Seq Scan on compress_hyper_6_21_chunk (actual rows=9 loops=1) + -> Index Only Scan using _hyper_2_12_chunk_metrics_space_device_id_time_idx on _hyper_2_12_chunk (actual rows=1 loops=1) + Heap Fetches: 1 +(36 rows) + -- -- test ordered path -- @@ -5410,8 +5582,8 @@ DEALLOCATE param_prep; SET client_min_messages TO error; CREATE VIEW cagg_test WITH (timescaledb.continuous) AS SELECT time_bucket('1d',time) AS time, device_id, avg(v1) FROM :TEST_TABLE WHERE device_id=1 GROUP BY 1,2; REFRESH MATERIALIZED VIEW cagg_test; -psql:include/transparent_decompression_query.sql:261: INFO: new materialization range for public.metrics_space (time column time) (948067200000000) -psql:include/transparent_decompression_query.sql:261: INFO: materializing continuous aggregate public.cagg_test: new range up to 948067200000000 +psql:include/transparent_decompression_query.sql:267: INFO: new materialization range for public.metrics_space (time column time) (948067200000000) +psql:include/transparent_decompression_query.sql:267: INFO: materializing continuous aggregate public.cagg_test: new range up to 948067200000000 SELECT time FROM cagg_test ORDER BY time LIMIT 1; time ------------------------------ diff --git a/tsl/test/expected/transparent_decompression-9.6.out b/tsl/test/expected/transparent_decompression-9.6.out index ee304a4ad..f8927da2e 100644 --- a/tsl/test/expected/transparent_decompression-9.6.out +++ b/tsl/test/expected/transparent_decompression-9.6.out @@ -43,10 +43,10 @@ ANALYZE metrics_space; \set PREFIX '' \set PREFIX_VERBOSE '' \set ECHO none -psql:include/transparent_decompression_query.sql:261: INFO: new materialization range for public.metrics (time column time) (948067200000000) -psql:include/transparent_decompression_query.sql:261: INFO: materializing continuous aggregate public.cagg_test: new range up to 948067200000000 -psql:include/transparent_decompression_query.sql:261: INFO: new materialization range for public.metrics_space (time column time) (948067200000000) -psql:include/transparent_decompression_query.sql:261: INFO: materializing continuous aggregate public.cagg_test: new range up to 948067200000000 +psql:include/transparent_decompression_query.sql:267: INFO: new materialization range for public.metrics (time column time) (948067200000000) +psql:include/transparent_decompression_query.sql:267: INFO: materializing continuous aggregate public.cagg_test: new range up to 948067200000000 +psql:include/transparent_decompression_query.sql:267: INFO: new materialization range for public.metrics_space (time column time) (948067200000000) +psql:include/transparent_decompression_query.sql:267: INFO: materializing continuous aggregate public.cagg_test: new range up to 948067200000000 -- compress first and last chunk on the hypertable ALTER TABLE metrics SET (timescaledb.compress, timescaledb.compress_orderby='v0, v1 desc, time', timescaledb.compress_segmentby='device_id,device_id_peer'); NOTICE: adding index _compressed_hypertable_5_device_id__ts_meta_sequence_num_idx ON _timescaledb_internal._compressed_hypertable_5 USING BTREE(device_id, _ts_meta_sequence_num) @@ -125,10 +125,10 @@ ANALYZE metrics_space; \set PREFIX '' \set PREFIX_VERBOSE '' \set ECHO none -psql:include/transparent_decompression_query.sql:261: INFO: new materialization range for public.metrics (time column time) (948067200000000) -psql:include/transparent_decompression_query.sql:261: INFO: materializing continuous aggregate public.cagg_test: new range up to 948067200000000 -psql:include/transparent_decompression_query.sql:261: INFO: new materialization range for public.metrics_space (time column time) (948067200000000) -psql:include/transparent_decompression_query.sql:261: INFO: materializing continuous aggregate public.cagg_test: new range up to 948067200000000 +psql:include/transparent_decompression_query.sql:267: INFO: new materialization range for public.metrics (time column time) (948067200000000) +psql:include/transparent_decompression_query.sql:267: INFO: materializing continuous aggregate public.cagg_test: new range up to 948067200000000 +psql:include/transparent_decompression_query.sql:267: INFO: new materialization range for public.metrics_space (time column time) (948067200000000) +psql:include/transparent_decompression_query.sql:267: INFO: materializing continuous aggregate public.cagg_test: new range up to 948067200000000 -- look at postgres version to decide whether we run with analyze or without SELECT CASE WHEN current_setting('server_version_num')::int >= 100000 @@ -782,6 +782,52 @@ FROM :TEST_TABLE WHERE device_id IN (1,2) ORDER BY time, device_id; -> Seq Scan on compress_hyper_5_16_chunk (13 rows) +-- test sort optimization interaction +:PREFIX SELECT time FROM :TEST_TABLE ORDER BY time DESC LIMIT 10; + QUERY PLAN +----------------------------------------------------------------------------------------- + Limit + -> Custom Scan (ChunkAppend) on metrics + Order: metrics."time" DESC + -> Sort + Sort Key: _hyper_1_3_chunk."time" DESC + -> Custom Scan (DecompressChunk) on _hyper_1_3_chunk + -> Seq Scan on compress_hyper_5_16_chunk + -> Index Only Scan using _hyper_1_2_chunk_metrics_time_idx on _hyper_1_2_chunk + -> Sort + Sort Key: _hyper_1_1_chunk."time" DESC + -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk + -> Seq Scan on compress_hyper_5_15_chunk +(12 rows) + +:PREFIX SELECT time,device_id FROM :TEST_TABLE ORDER BY time DESC, device_id LIMIT 10; + QUERY PLAN +---------------------------------------------------------------------------- + Limit + -> Sort + Sort Key: _hyper_1_3_chunk."time" DESC, _hyper_1_3_chunk.device_id + -> Append + -> Custom Scan (DecompressChunk) on _hyper_1_3_chunk + -> Seq Scan on compress_hyper_5_16_chunk + -> Seq Scan on _hyper_1_2_chunk + -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk + -> Seq Scan on compress_hyper_5_15_chunk +(9 rows) + +:PREFIX SELECT time,device_id FROM :TEST_TABLE ORDER BY device_id, time DESC LIMIT 10; + QUERY PLAN +---------------------------------------------------------------------------- + Limit + -> Sort + Sort Key: _hyper_1_1_chunk.device_id, _hyper_1_1_chunk."time" DESC + -> Append + -> Custom Scan (DecompressChunk) on _hyper_1_1_chunk + -> Seq Scan on compress_hyper_5_15_chunk + -> Seq Scan on _hyper_1_2_chunk + -> Custom Scan (DecompressChunk) on _hyper_1_3_chunk + -> Seq Scan on compress_hyper_5_16_chunk +(9 rows) + -- -- test ordered path -- @@ -2005,8 +2051,8 @@ DEALLOCATE param_prep; SET client_min_messages TO error; CREATE VIEW cagg_test WITH (timescaledb.continuous) AS SELECT time_bucket('1d',time) AS time, device_id, avg(v1) FROM :TEST_TABLE WHERE device_id=1 GROUP BY 1,2; REFRESH MATERIALIZED VIEW cagg_test; -psql:include/transparent_decompression_query.sql:261: INFO: new materialization range for public.metrics (time column time) (948067200000000) -psql:include/transparent_decompression_query.sql:261: INFO: materializing continuous aggregate public.cagg_test: new range up to 948067200000000 +psql:include/transparent_decompression_query.sql:267: INFO: new materialization range for public.metrics (time column time) (948067200000000) +psql:include/transparent_decompression_query.sql:267: INFO: materializing continuous aggregate public.cagg_test: new range up to 948067200000000 SELECT time FROM cagg_test ORDER BY time LIMIT 1; time ------------------------------ @@ -3011,6 +3057,110 @@ FROM :TEST_TABLE WHERE device_id IN (1,2) ORDER BY time, device_id; Filter: ("time" < now()) (30 rows) +-- test sort optimization interaction +:PREFIX SELECT time FROM :TEST_TABLE ORDER BY time DESC LIMIT 10; + QUERY PLAN +------------------------------------------------------------------------------------------------------- + Limit + -> Custom Scan (ChunkAppend) on metrics_space + Order: metrics_space."time" DESC + -> Merge Append + Sort Key: _hyper_2_12_chunk."time" DESC + -> Index Only Scan using _hyper_2_12_chunk_metrics_space_time_idx on _hyper_2_12_chunk + -> Sort + Sort Key: _hyper_2_11_chunk."time" DESC + -> Sort + Sort Key: _hyper_2_11_chunk."time" DESC + -> Custom Scan (DecompressChunk) on _hyper_2_11_chunk + -> Seq Scan on compress_hyper_6_21_chunk + -> Sort + Sort Key: _hyper_2_10_chunk."time" DESC + -> Sort + Sort Key: _hyper_2_10_chunk."time" DESC + -> Custom Scan (DecompressChunk) on _hyper_2_10_chunk + -> Seq Scan on compress_hyper_6_20_chunk + -> Merge Append + Sort Key: _hyper_2_9_chunk."time" DESC + -> Index Only Scan using _hyper_2_9_chunk_metrics_space_time_idx on _hyper_2_9_chunk + -> Index Only Scan using _hyper_2_8_chunk_metrics_space_time_idx on _hyper_2_8_chunk + -> Index Only Scan using _hyper_2_7_chunk_metrics_space_time_idx on _hyper_2_7_chunk + -> Merge Append + Sort Key: _hyper_2_6_chunk."time" DESC + -> Sort + Sort Key: _hyper_2_6_chunk."time" DESC + -> Sort + Sort Key: _hyper_2_6_chunk."time" DESC + -> Custom Scan (DecompressChunk) on _hyper_2_6_chunk + -> Seq Scan on compress_hyper_6_19_chunk + -> Sort + Sort Key: _hyper_2_5_chunk."time" DESC + -> Sort + Sort Key: _hyper_2_5_chunk."time" DESC + -> Custom Scan (DecompressChunk) on _hyper_2_5_chunk + -> Seq Scan on compress_hyper_6_18_chunk + -> Sort + Sort Key: _hyper_2_4_chunk."time" DESC + -> Sort + Sort Key: _hyper_2_4_chunk."time" DESC + -> Custom Scan (DecompressChunk) on _hyper_2_4_chunk + -> Seq Scan on compress_hyper_6_17_chunk +(43 rows) + +:PREFIX SELECT time,device_id FROM :TEST_TABLE ORDER BY time DESC, device_id LIMIT 10; + QUERY PLAN +------------------------------------------------------------------------------ + Limit + -> Sort + Sort Key: _hyper_2_12_chunk."time" DESC, _hyper_2_12_chunk.device_id + -> Append + -> Seq Scan on _hyper_2_12_chunk + -> Custom Scan (DecompressChunk) on _hyper_2_11_chunk + -> Seq Scan on compress_hyper_6_21_chunk + -> Custom Scan (DecompressChunk) on _hyper_2_10_chunk + -> Seq Scan on compress_hyper_6_20_chunk + -> Seq Scan on _hyper_2_9_chunk + -> Seq Scan on _hyper_2_8_chunk + -> Seq Scan on _hyper_2_7_chunk + -> Custom Scan (DecompressChunk) on _hyper_2_6_chunk + -> Seq Scan on compress_hyper_6_19_chunk + -> Custom Scan (DecompressChunk) on _hyper_2_5_chunk + -> Seq Scan on compress_hyper_6_18_chunk + -> Custom Scan (DecompressChunk) on _hyper_2_4_chunk + -> Seq Scan on compress_hyper_6_17_chunk +(18 rows) + +:PREFIX SELECT time,device_id FROM :TEST_TABLE ORDER BY device_id, time DESC LIMIT 10; + QUERY PLAN +----------------------------------------------------------------------------------------------------------- + Limit + -> Merge Append + Sort Key: _hyper_2_4_chunk.device_id, _hyper_2_4_chunk."time" DESC + -> Sort + Sort Key: _hyper_2_4_chunk.device_id, _hyper_2_4_chunk."time" DESC + -> Custom Scan (DecompressChunk) on _hyper_2_4_chunk + -> Seq Scan on compress_hyper_6_17_chunk + -> Sort + Sort Key: _hyper_2_5_chunk.device_id, _hyper_2_5_chunk."time" DESC + -> Custom Scan (DecompressChunk) on _hyper_2_5_chunk + -> Seq Scan on compress_hyper_6_18_chunk + -> Sort + Sort Key: _hyper_2_6_chunk.device_id, _hyper_2_6_chunk."time" DESC + -> Custom Scan (DecompressChunk) on _hyper_2_6_chunk + -> Seq Scan on compress_hyper_6_19_chunk + -> Index Only Scan using _hyper_2_7_chunk_metrics_space_device_id_time_idx on _hyper_2_7_chunk + -> Index Only Scan using _hyper_2_8_chunk_metrics_space_device_id_time_idx on _hyper_2_8_chunk + -> Index Only Scan using _hyper_2_9_chunk_metrics_space_device_id_time_idx on _hyper_2_9_chunk + -> Sort + Sort Key: _hyper_2_10_chunk.device_id, _hyper_2_10_chunk."time" DESC + -> Custom Scan (DecompressChunk) on _hyper_2_10_chunk + -> Seq Scan on compress_hyper_6_20_chunk + -> Sort + Sort Key: _hyper_2_11_chunk.device_id, _hyper_2_11_chunk."time" DESC + -> Custom Scan (DecompressChunk) on _hyper_2_11_chunk + -> Seq Scan on compress_hyper_6_21_chunk + -> Index Only Scan using _hyper_2_12_chunk_metrics_space_device_id_time_idx on _hyper_2_12_chunk +(27 rows) + -- -- test ordered path -- @@ -4785,8 +4935,8 @@ DEALLOCATE param_prep; SET client_min_messages TO error; CREATE VIEW cagg_test WITH (timescaledb.continuous) AS SELECT time_bucket('1d',time) AS time, device_id, avg(v1) FROM :TEST_TABLE WHERE device_id=1 GROUP BY 1,2; REFRESH MATERIALIZED VIEW cagg_test; -psql:include/transparent_decompression_query.sql:261: INFO: new materialization range for public.metrics_space (time column time) (948067200000000) -psql:include/transparent_decompression_query.sql:261: INFO: materializing continuous aggregate public.cagg_test: new range up to 948067200000000 +psql:include/transparent_decompression_query.sql:267: INFO: new materialization range for public.metrics_space (time column time) (948067200000000) +psql:include/transparent_decompression_query.sql:267: INFO: materializing continuous aggregate public.cagg_test: new range up to 948067200000000 SELECT time FROM cagg_test ORDER BY time LIMIT 1; time ------------------------------ diff --git a/tsl/test/sql/include/transparent_decompression_query.sql b/tsl/test/sql/include/transparent_decompression_query.sql index e4b2e652a..d24bb2a9b 100644 --- a/tsl/test/sql/include/transparent_decompression_query.sql +++ b/tsl/test/sql/include/transparent_decompression_query.sql @@ -98,6 +98,12 @@ FROM :TEST_TABLE WHERE device_id IN (1,2) ORDER BY time, device_id; --functions not yet optimized :PREFIX SELECT * FROM :TEST_TABLE WHERE time < now() ORDER BY time, device_id LIMIT 10; +-- test sort optimization interaction + +:PREFIX SELECT time FROM :TEST_TABLE ORDER BY time DESC LIMIT 10; +:PREFIX SELECT time,device_id FROM :TEST_TABLE ORDER BY time DESC, device_id LIMIT 10; +:PREFIX SELECT time,device_id FROM :TEST_TABLE ORDER BY device_id, time DESC LIMIT 10; + -- -- test ordered path --