Support transparent decompression on individual chunks

This patch adds support for transparent decompression in queries
on individual chunks.
This is required for distributed hypertables with compression
when enable_per_data_node_queries is set to false. Without
this functionality queries on distributed hypertables with
compression would not return data for compressed chunks as
the generated FDW queries would target individual chunks.

Fixes #3714
This commit is contained in:
Sven Klemm 2021-10-18 23:54:15 +02:00 committed by Sven Klemm
parent 8862081e1c
commit acc6abee92
16 changed files with 3825 additions and 71 deletions

View File

@ -23,13 +23,13 @@ jobs:
build_type: [ Debug ]
include:
- pg: "12.8"
ignores: append-12 debug_notice transparent_decompression-12 plan_skip_scan-12 pg_dump
ignores: append-12 debug_notice transparent_decompression-12 transparent_decompress_chunk-12 plan_skip_scan-12 pg_dump
pg_major: 12
- pg: "13.4"
ignores: append-13 debug_notice transparent_decompression-13 pg_dump
ignores: append-13 debug_notice transparent_decompression-13 transparent_decompress_chunk-13 pg_dump
pg_major: 13
- pg: "14.0"
ignores: append-14 debug_notice transparent_decompression-14 pg_dump
ignores: append-14 debug_notice transparent_decompression-14 transparent_decompress_chunk-14 pg_dump
pg_major: 14
steps:

View File

@ -94,8 +94,8 @@ rte_mark_for_expansion(RangeTblEntry *rte)
rte->inh = false;
}
static bool
rte_is_marked_for_expansion(const RangeTblEntry *rte)
bool
ts_rte_is_marked_for_expansion(const RangeTblEntry *rte)
{
if (NULL == rte->ctename)
return false;
@ -240,7 +240,7 @@ preprocess_query(Node *node, Query *rootquery)
/* This lookup will warm the cache with all hypertables in the query */
ht = ts_hypertable_cache_get_entry(hcache, rte->relid, CACHE_FLAG_MISSING_OK);
if (NULL != ht)
if (ht)
{
/* Mark hypertable RTEs we'd like to expand ourselves */
if (ts_guc_enable_optimizations && ts_guc_enable_constraint_exclusion &&
@ -258,6 +258,22 @@ preprocess_query(Node *node, Query *rootquery)
Assert(ht != NULL);
}
}
else
{
/* To properly keep track of SELECT FROM ONLY <chunk> we
* have to mark the rte here because postgres will set
* rte->inh to false (when it detects the chunk has no
* children which is true for all our chunks) before it
* reaches set_rel_pathlist hook. But chunks from queries
* like SELECT .. FROM ONLY <chunk> has rte->inh set to
* false and other chunks have rte->inh set to true.
* We want to distinguish between the two cases here by
* marking the chunk when rte->inh is true.
*/
Chunk *chunk = ts_chunk_get_by_relid(rte->relid, false);
if (chunk && rte->inh)
rte_mark_for_expansion(rte);
}
break;
default:
break;
@ -563,7 +579,7 @@ rte_should_expand(const RangeTblEntry *rte)
{
bool is_hypertable = ts_rte_is_hypertable(rte, NULL);
return is_hypertable && !rte->inh && rte_is_marked_for_expansion(rte);
return is_hypertable && !rte->inh && ts_rte_is_marked_for_expansion(rte);
}
static void
@ -770,7 +786,7 @@ timescaledb_set_rel_pathlist(PlannerInfo *root, RelOptInfo *rel, Index rti, Rang
reltype = classify_relation(root, rel, &ht);
/* Check for unexpanded hypertable */
if (!rte->inh && rte_is_marked_for_expansion(rte))
if (!rte->inh && ts_rte_is_marked_for_expansion(rte))
reenable_inheritance(root, rel, rti, rte);
/* Call other extensions. Do it after table expansion. */
@ -910,7 +926,7 @@ join_involves_hypertable(const PlannerInfo *root, const RelOptInfo *rel)
/* This might give a false positive for chunks in case of PostgreSQL
* expansion since the ctename is copied from the parent hypertable
* to the chunk */
return rte_is_marked_for_expansion(rte);
return ts_rte_is_marked_for_expansion(rte);
}
return false;
}

View File

@ -28,6 +28,7 @@ typedef struct TimescaleDBPrivate
} TimescaleDBPrivate;
extern TSDLLEXPORT bool ts_rte_is_hypertable(const RangeTblEntry *rte, bool *isdistributed);
extern TSDLLEXPORT bool ts_rte_is_marked_for_expansion(const RangeTblEntry *rte);
static inline TimescaleDBPrivate *
ts_create_private_reloptinfo(RelOptInfo *rel)

View File

@ -19,6 +19,8 @@
#include <utils/lsyscache.h>
#include <utils/typcache.h>
#include <planner.h>
#include "hypertable_compression.h"
#include "import/planner.h"
#include "compression/create.h"
@ -279,8 +281,18 @@ build_compressioninfo(PlannerInfo *root, Hypertable *ht, RelOptInfo *chunk_rel)
info->chunk_rel = chunk_rel;
info->chunk_rte = planner_rt_fetch(chunk_rel->relid, root);
appinfo = ts_get_appendrelinfo(root, chunk_rel->relid, false);
info->ht_rte = planner_rt_fetch(appinfo->parent_relid, root);
if (chunk_rel->reloptkind == RELOPT_OTHER_MEMBER_REL)
{
appinfo = ts_get_appendrelinfo(root, chunk_rel->relid, false);
info->ht_rte = planner_rt_fetch(appinfo->parent_relid, root);
}
else
{
Assert(chunk_rel->reloptkind == RELOPT_BASEREL);
info->single_chunk = true;
info->ht_rte = info->chunk_rte;
}
info->hypertable_id = ht->fd.id;
info->hypertable_compression_info = ts_hypertable_compression_get(ht->fd.id);
@ -325,12 +337,16 @@ ts_decompress_chunk_generate_paths(PlannerInfo *root, RelOptInfo *chunk_rel, Hyp
Chunk *chunk)
{
RelOptInfo *compressed_rel;
RelOptInfo *hypertable_rel;
ListCell *lc;
double new_row_estimate;
Index ht_relid = 0;
CompressionInfo *info = build_compressioninfo(root, ht, chunk_rel);
Index ht_index;
/* double check we don't end up here on single chunk queries with ONLY */
Assert(info->chunk_rel->reloptkind == RELOPT_OTHER_MEMBER_REL ||
(info->chunk_rel->reloptkind == RELOPT_BASEREL &&
ts_rte_is_marked_for_expansion(info->chunk_rte)));
/*
* since we rely on parallel coordination from the scan below
@ -338,14 +354,8 @@ ts_decompress_chunk_generate_paths(PlannerInfo *root, RelOptInfo *chunk_rel, Hyp
* than a single worker per chunk
*/
int parallel_workers = 1;
AppendRelInfo *chunk_info = ts_get_appendrelinfo(root, chunk_rel->relid, false);
SortInfo sort_info = build_sortinfo(chunk, chunk_rel, info, root->query_pathkeys);
Assert(chunk_info != NULL);
Assert(chunk_info->parent_reloid == ht->main_table_relid);
ht_index = chunk_info->parent_relid;
hypertable_rel = root->simple_rel_array[ht_index];
Assert(chunk->fd.compressed_chunk_id > 0);
chunk_rel->pathlist = NIL;
@ -360,8 +370,17 @@ ts_decompress_chunk_generate_paths(PlannerInfo *root, RelOptInfo *chunk_rel, Hyp
pushdown_quals(root, chunk_rel, compressed_rel, info->hypertable_compression_info);
set_baserel_size_estimates(root, compressed_rel);
new_row_estimate = compressed_rel->rows * DECOMPRESS_CHUNK_BATCH_SIZE;
/* adjust the parent's estimate by the diff of new and old estimate */
hypertable_rel->rows += (new_row_estimate - chunk_rel->rows);
if (!info->single_chunk)
{
/* adjust the parent's estimate by the diff of new and old estimate */
AppendRelInfo *chunk_info = ts_get_appendrelinfo(root, chunk_rel->relid, false);
Assert(chunk_info->parent_reloid == ht->main_table_relid);
ht_relid = chunk_info->parent_relid;
RelOptInfo *hypertable_rel = root->simple_rel_array[ht_relid];
hypertable_rel->rows += (new_row_estimate - chunk_rel->rows);
}
chunk_rel->rows = new_row_estimate;
create_compressed_scan_paths(root,
compressed_rel,
@ -370,7 +389,10 @@ ts_decompress_chunk_generate_paths(PlannerInfo *root, RelOptInfo *chunk_rel, Hyp
&sort_info);
/* compute parent relids of the chunk and use it to filter paths*/
Relids parent_relids = find_childrel_parents(root, chunk_rel);
Relids parent_relids = NULL;
if (!info->single_chunk)
parent_relids = find_childrel_parents(root, chunk_rel);
/* create non-parallel paths */
foreach (lc, compressed_rel->pathlist)
{
@ -455,7 +477,8 @@ ts_decompress_chunk_generate_paths(PlannerInfo *root, RelOptInfo *chunk_rel, Hyp
DecompressChunkPath *path;
if (child_path->param_info != NULL &&
(bms_is_member(chunk_rel->relid, child_path->param_info->ppi_req_outer) ||
bms_is_member(ht_index, child_path->param_info->ppi_req_outer)))
(!info->single_chunk &&
bms_is_member(ht_relid, child_path->param_info->ppi_req_outer))))
continue;
path = decompress_chunk_path_create(root, info, parallel_workers, child_path);
add_partial_path(chunk_rel, &path->cpath.path);
@ -920,20 +943,7 @@ decompress_chunk_add_plannerinfo(PlannerInfo *root, CompressionInfo *info, Chunk
Oid compressed_relid = compressed_chunk->table_id;
RelOptInfo *compressed_rel;
/* repalloc() does not work with NULL argument */
Assert(root->simple_rel_array);
Assert(root->simple_rte_array);
Assert(root->append_rel_array);
root->simple_rel_array_size++;
root->simple_rel_array =
repalloc(root->simple_rel_array, root->simple_rel_array_size * sizeof(RelOptInfo *));
root->simple_rte_array =
repalloc(root->simple_rte_array, root->simple_rel_array_size * sizeof(RangeTblEntry *));
root->append_rel_array =
repalloc(root->append_rel_array, root->simple_rel_array_size * sizeof(AppendRelInfo *));
root->append_rel_array[compressed_index] = NULL;
expand_planner_arrays(root, 1);
info->compressed_rte = decompress_chunk_make_rte(compressed_relid, AccessShareLock);
root->simple_rte_array[compressed_index] = info->compressed_rte;
@ -948,7 +958,7 @@ decompress_chunk_add_plannerinfo(PlannerInfo *root, CompressionInfo *info, Chunk
* in generate_join_implied_equalities (called by
* get_baserel_parampathinfo <- create_index_paths)
*/
Assert(chunk_rel->top_parent_relids != NULL);
Assert(info->single_chunk || chunk_rel->top_parent_relids != NULL);
compressed_rel->top_parent_relids = bms_copy(chunk_rel->top_parent_relids);
root->simple_rel_array[compressed_index] = compressed_rel;
@ -1132,25 +1142,6 @@ get_column_compressioninfo(List *hypertable_compression_info, char *column_name)
pg_unreachable();
}
/*
* find matching column attno for compressed chunk based on hypertable attno
*
* since we dont want aliasing to interfere we lookup directly in catalog
* instead of using RangeTblEntry
*/
AttrNumber
get_compressed_attno(CompressionInfo *info, AttrNumber ht_attno)
{
AttrNumber compressed_attno;
char *chunk_col = get_attname(info->ht_rte->relid, ht_attno, false);
compressed_attno = get_attnum(info->compressed_rte->relid, chunk_col);
if (compressed_attno == InvalidAttrNumber)
elog(ERROR, "No matching column in compressed chunk found.");
return compressed_attno;
}
/*
* Find toplevel equality constraints of segmentby columns in baserestrictinfo
*

View File

@ -34,6 +34,8 @@ typedef struct CompressionInfo
/* compressed chunk attribute numbers for columns that are compressed */
Bitmapset *compressed_chunk_compressed_attnos;
bool single_chunk; /* query on explicit chunk */
} CompressionInfo;
typedef struct DecompressChunkPath
@ -57,6 +59,5 @@ void ts_decompress_chunk_generate_paths(PlannerInfo *root, RelOptInfo *rel, Hype
FormData_hypertable_compression *get_column_compressioninfo(List *hypertable_compression_info,
char *column_name);
AttrNumber get_compressed_attno(CompressionInfo *info, AttrNumber chunk_attno);
#endif /* TIMESCALEDB_DECOMPRESS_CHUNK_H */

View File

@ -70,6 +70,26 @@ make_compressed_scan_meta_targetentry(DecompressChunkPath *path, char *column_na
return makeTargetEntry((Expr *) scan_var, tle_index, NULL, false);
}
/*
* Find matching column attno for compressed chunk based on hypertable attno.
*
* Since we dont want aliasing to interfere we lookup directly in catalog
* instead of using RangeTblEntry.
*/
static AttrNumber
get_compressed_attno(CompressionInfo *info, AttrNumber ht_attno)
{
AttrNumber compressed_attno;
Assert(info->ht_rte);
char *chunk_col = get_attname(info->ht_rte->relid, ht_attno, false);
compressed_attno = get_attnum(info->compressed_rte->relid, chunk_col);
if (compressed_attno == InvalidAttrNumber)
elog(ERROR, "no matching column in compressed chunk found");
return compressed_attno;
}
static TargetEntry *
make_compressed_scan_targetentry(DecompressChunkPath *path, AttrNumber ht_attno, int tle_index)
{

View File

@ -93,9 +93,19 @@ void
tsl_set_rel_pathlist_query(PlannerInfo *root, RelOptInfo *rel, Index rti, RangeTblEntry *rte,
Hypertable *ht)
{
if (ts_guc_enable_transparent_decompression && ht != NULL &&
rel->reloptkind == RELOPT_OTHER_MEMBER_REL && TS_HYPERTABLE_HAS_COMPRESSION_TABLE(ht) &&
rel->fdw_private != NULL && ((TimescaleDBPrivate *) rel->fdw_private)->compressed)
/* We can get here via query on hypertable in that case reloptkind
* will be RELOPT_OTHER_MEMBER_REL or via direct query on chunk
* in that case reloptkind will be RELOPT_BASEREL.
* If we get here via SELECT * FROM <chunk>, we decompress the chunk,
* unless the query was SELECT * FROM ONLY <chunk>.
* We check if it is the ONLY case by calling ts_rte_is_marked_for_expansion.
* Respecting ONLY here is important to not break postgres tools like pg_dump.
*/
if (ts_guc_enable_transparent_decompression && ht &&
(rel->reloptkind == RELOPT_OTHER_MEMBER_REL ||
(rel->reloptkind == RELOPT_BASEREL && ts_rte_is_marked_for_expansion(rte))) &&
TS_HYPERTABLE_HAS_COMPRESSION_TABLE(ht) && rel->fdw_private != NULL &&
((TimescaleDBPrivate *) rel->fdw_private)->compressed)
{
Chunk *chunk = ts_chunk_get_by_relid(rte->relid, true);

View File

@ -878,6 +878,15 @@ SELECT count(*) from test_recomp_int;
9
(1 row)
-- check with per datanode queries disabled
SET timescaledb.enable_per_data_node_queries TO false;
SELECT count(*) from test_recomp_int;
count
-------
9
(1 row)
RESET timescaledb.enable_per_data_node_queries;
SELECT * from test_recomp_int_chunk_status ORDER BY 1;
chunk_name | chunk_status
------------------------+--------------
@ -936,9 +945,9 @@ SELECT * from test_recomp_int_chunk_status ORDER BY 1;
_dist_hyper_4_16_chunk | 3
(3 rows)
SELECT time_bucket(20, time ), count(*)
SELECT time_bucket(20, time), count(*)
FROM test_recomp_int
GROUP BY time_bucket( 20, time) ORDER BY 1;
GROUP BY time_bucket(20, time) ORDER BY 1;
time_bucket | count
-------------+-------
0 | 14
@ -946,6 +955,19 @@ GROUP BY time_bucket( 20, time) ORDER BY 1;
100 | 5
(3 rows)
-- check with per datanode queries disabled
SET timescaledb.enable_per_data_node_queries TO false;
SELECT time_bucket(20, time), count(*)
FROM test_recomp_int
GROUP BY time_bucket(20, time) ORDER BY 1;
time_bucket | count
-------------+-------
0 | 14
60 | 3
100 | 5
(3 rows)
RESET timescaledb.enable_per_data_node_queries;
--check compression_status afterwards--
SELECT recompress_chunk(chunk, true) FROM
( SELECT chunk FROM show_chunks('test_recomp_int') AS chunk ORDER BY chunk LIMIT 2)q;

View File

@ -35,10 +35,15 @@ _timescaledb_internal._hyper_1_1_chunk
(1 row)
step Cc: COMMIT;
step SC1: SELECT count(*) from _timescaledb_internal._hyper_1_1_chunk;
step SC1: SELECT count(*) AS only FROM ONLY _timescaledb_internal._hyper_1_1_chunk; SELECT count(*) FROM _timescaledb_internal._hyper_1_1_chunk;
only
----
0
(1 row)
count
-----
0
11
(1 row)
step S1: SELECT count(*) from ts_device_table;

View File

@ -33,7 +33,7 @@ step "SChunkStat" { SELECT status from _timescaledb_catalog.chunk
session "S"
step "S1" { SELECT count(*) from ts_device_table; }
step "SC1" { SELECT count(*) from _timescaledb_internal._hyper_1_1_chunk; }
step "SC1" { SELECT count(*) AS only FROM ONLY _timescaledb_internal._hyper_1_1_chunk; SELECT count(*) FROM _timescaledb_internal._hyper_1_1_chunk; }
step "SH" { SELECT total_chunks, number_compressed_chunks from hypertable_compression_stats('ts_device_table'); }
session "LCT"
@ -43,7 +43,7 @@ step "LockChunkTuple" {
WHERE id = ( select min(ch.id) FROM _timescaledb_catalog.hypertable ht, _timescaledb_catalog.chunk ch WHERE ch.hypertable_id = ht.id AND ht.table_name like 'ts_device_table') FOR UPDATE;
}
step "UnlockChunkTuple" { ROLLBACK; }
session "LC"
step "LockChunk1" {
BEGIN;

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -15,8 +15,9 @@ if(CMAKE_BUILD_TYPE MATCHES Debug)
list(APPEND TEST_FILES_SHARED timestamp_limits.sql with_clause_parser.sql)
endif(CMAKE_BUILD_TYPE MATCHES Debug)
set(TEST_TEMPLATES_SHARED gapfill.sql.in generated_columns.sql.in
ordered_append.sql.in ordered_append_join.sql.in)
set(TEST_TEMPLATES_SHARED
gapfill.sql.in generated_columns.sql.in transparent_decompress_chunk.sql.in
ordered_append.sql.in ordered_append_join.sql.in)
# Regression tests that vary with PostgreSQL version. Generated test files are
# put in the original source directory since all tests must be in the same

View File

@ -0,0 +1,326 @@
-- This file and its contents are licensed under the Timescale License.
-- Please see the included NOTICE for copyright information and
-- LICENSE-TIMESCALE for a copy of the license.
\set PREFIX 'EXPLAIN (analyze, costs off, timing off, summary off)'
\set PREFIX_VERBOSE 'EXPLAIN (analyze, costs off, timing off, summary off, verbose)'
\set PREFIX_NO_ANALYZE 'EXPLAIN (verbose, costs off)'
SELECT show_chunks('metrics_compressed') AS "TEST_TABLE" ORDER BY 1::text LIMIT 1 \gset
-- this should use DecompressChunk node
:PREFIX_VERBOSE
SELECT * FROM :TEST_TABLE WHERE device_id = 1 ORDER BY time LIMIT 5;
-- must not use DecompressChunk node
:PREFIX_VERBOSE
SELECT * FROM ONLY :TEST_TABLE WHERE device_id = 1 ORDER BY time LIMIT 5;
-- test expressions
:PREFIX
SELECT time_bucket ('1d', time),
v1 + v2 AS "sum",
COALESCE(NULL, v1, v2) AS "coalesce",
NULL AS "NULL",
'text' AS "text",
t AS "RECORD"
FROM :TEST_TABLE t
WHERE device_id IN (1, 2)
ORDER BY time, device_id;
-- test empty targetlist
:PREFIX SELECT FROM :TEST_TABLE;
-- test empty resultset
:PREFIX SELECT * FROM :TEST_TABLE WHERE device_id < 0;
-- test targetlist not referencing columns
:PREFIX SELECT 1 FROM :TEST_TABLE;
-- test constraints not present in targetlist
:PREFIX SELECT v1 FROM :TEST_TABLE WHERE device_id = 1 ORDER BY v1;
-- test order not present in targetlist
:PREFIX SELECT v2 FROM :TEST_TABLE WHERE device_id = 1 ORDER BY v1;
-- test column with all NULL
:PREFIX SELECT v3 FROM :TEST_TABLE WHERE device_id = 1;
--
-- test qual pushdown
--
-- v3 is not segment by or order by column so should not be pushed down
:PREFIX_VERBOSE SELECT * FROM :TEST_TABLE WHERE v3 > 10.0 ORDER BY time, device_id;
-- device_id constraint should be pushed down
:PREFIX SELECT * FROM :TEST_TABLE WHERE device_id = 1 ORDER BY time, device_id LIMIT 10;
-- test IS NULL / IS NOT NULL
:PREFIX SELECT * FROM :TEST_TABLE WHERE device_id IS NOT NULL ORDER BY time, device_id LIMIT 10;
:PREFIX SELECT * FROM :TEST_TABLE WHERE device_id IS NULL ORDER BY time, device_id LIMIT 10;
-- test IN (Const,Const)
:PREFIX SELECT * FROM :TEST_TABLE WHERE device_id IN (1, 2) ORDER BY time, device_id LIMIT 10;
-- test cast pushdown
:PREFIX SELECT * FROM :TEST_TABLE WHERE device_id = '1'::text::int ORDER BY time, device_id LIMIT 10;
--test var op var
:PREFIX SELECT * FROM :TEST_TABLE WHERE device_id = v0 ORDER BY time, device_id LIMIT 10;
:PREFIX SELECT * FROM :TEST_TABLE WHERE device_id < v1 ORDER BY time, device_id LIMIT 10;
-- test expressions
:PREFIX SELECT * FROM :TEST_TABLE WHERE device_id = 1 + 4 / 2 ORDER BY time, device_id LIMIT 10;
-- test function calls
-- not yet pushed down
:PREFIX SELECT * FROM :TEST_TABLE WHERE device_id = length(substring(version(), 1, 3)) ORDER BY time, device_id LIMIT 10;
--
-- test segment meta pushdown
--
-- order by column and const
:PREFIX SELECT * FROM :TEST_TABLE WHERE time = '2000-01-01 1:00:00+0' ORDER BY time, device_id LIMIT 10;
:PREFIX SELECT * FROM :TEST_TABLE WHERE time < '2000-01-01 1:00:00+0' ORDER BY time, device_id LIMIT 10;
:PREFIX SELECT * FROM :TEST_TABLE WHERE time <= '2000-01-01 1:00:00+0' ORDER BY time, device_id LIMIT 10;
:PREFIX SELECT * FROM :TEST_TABLE WHERE time >= '2000-01-01 1:00:00+0' ORDER BY time, device_id LIMIT 10;
:PREFIX SELECT * FROM :TEST_TABLE WHERE time > '2000-01-01 1:00:00+0' ORDER BY time, device_id LIMIT 10;
:PREFIX SELECT * FROM :TEST_TABLE WHERE '2000-01-01 1:00:00+0' < time ORDER BY time, device_id LIMIT 10;
--pushdowns between order by and segment by columns
:PREFIX SELECT * FROM :TEST_TABLE WHERE v0 < 1 ORDER BY time, device_id LIMIT 10;
:PREFIX SELECT * FROM :TEST_TABLE WHERE v0 < device_id ORDER BY time, device_id LIMIT 10;
:PREFIX SELECT * FROM :TEST_TABLE WHERE device_id < v0 ORDER BY time, device_id LIMIT 10;
:PREFIX SELECT * FROM :TEST_TABLE WHERE v1 = device_id ORDER BY time, device_id LIMIT 10;
--pushdown between two order by column (not pushed down)
:PREFIX SELECT * FROM :TEST_TABLE WHERE v0 = v1 ORDER BY time, device_id LIMIT 10;
--pushdown of quals on order by and segment by cols anded together
:PREFIX_VERBOSE SELECT * FROM :TEST_TABLE WHERE time > '2000-01-01 1:00:00+0' AND device_id = 1 ORDER BY time, device_id LIMIT 10;
--pushdown of quals on order by and segment by cols or together (not pushed down)
:PREFIX SELECT * FROM :TEST_TABLE WHERE time > '2000-01-01 1:00:00+0' OR device_id = 1 ORDER BY time, device_id LIMIT 10;
--functions not yet optimized
:PREFIX SELECT * FROM :TEST_TABLE WHERE time < now() ORDER BY time, device_id LIMIT 10;
-- test sort optimization interaction
:PREFIX SELECT time FROM :TEST_TABLE ORDER BY time DESC LIMIT 10;
:PREFIX SELECT time, device_id FROM :TEST_TABLE ORDER BY time DESC, device_id LIMIT 10;
:PREFIX SELECT time, device_id FROM :TEST_TABLE ORDER BY device_id, time DESC LIMIT 10;
-- test aggregate
:PREFIX SELECT count(*) FROM :TEST_TABLE;
-- test aggregate with GROUP BY
:PREFIX SELECT count(*) FROM :TEST_TABLE GROUP BY device_id ORDER BY device_id;
-- test window functions with GROUP BY
:PREFIX SELECT sum(count(*)) OVER () FROM :TEST_TABLE GROUP BY device_id ORDER BY device_id;
-- test CTE
:PREFIX WITH q AS (
SELECT v1 FROM :TEST_TABLE ORDER BY time
)
SELECT * FROM q ORDER BY v1;
-- test CTE join
:PREFIX WITH q1 AS (
SELECT time, v1 FROM :TEST_TABLE WHERE device_id = 1 ORDER BY time
),
q2 AS (
SELECT time, v2 FROM :TEST_TABLE WHERE device_id = 2 ORDER BY time
)
SELECT * FROM q1 INNER JOIN q2 ON q1.time = q2.time ORDER BY q1.time;
--
-- test indexes
--
SET enable_seqscan TO FALSE;
-- IndexScans should work
:PREFIX_VERBOSE SELECT time, device_id FROM :TEST_TABLE WHERE device_id = 1 ORDER BY device_id, time;
-- globs should not plan IndexOnlyScans
:PREFIX_VERBOSE SELECT * FROM :TEST_TABLE WHERE device_id = 1 ORDER BY device_id, time;
-- whole row reference should work
:PREFIX_VERBOSE SELECT test_table FROM :TEST_TABLE AS test_table WHERE device_id = 1 ORDER BY device_id, time;
-- even when we select only a segmentby column, we still need count
:PREFIX_VERBOSE SELECT device_id FROM :TEST_TABLE WHERE device_id = 1 ORDER BY device_id;
:PREFIX_VERBOSE SELECT count(*) FROM :TEST_TABLE WHERE device_id = 1;
--ensure that we can get a nested loop
SET enable_seqscan TO TRUE;
SET enable_hashjoin TO FALSE;
:PREFIX_VERBOSE SELECT device_id FROM :TEST_TABLE WHERE device_id IN ( VALUES (1));
--with multiple values can get a nested loop.
:PREFIX_VERBOSE SELECT device_id FROM :TEST_TABLE WHERE device_id IN ( VALUES (1), (2));
RESET enable_hashjoin;
:PREFIX_VERBOSE SELECT device_id FROM :TEST_TABLE WHERE device_id IN (VALUES (1));
--with multiple values can get a semi-join or nested loop depending on seq_page_cost.
:PREFIX_VERBOSE SELECT device_id FROM :TEST_TABLE WHERE device_id IN (VALUES (1), (2));
SET seq_page_cost = 100;
-- loop/row counts of this query is different on windows so we run it without analyze
:PREFIX_NO_ANALYZE SELECT device_id FROM :TEST_TABLE WHERE device_id IN (VALUES (1), (2));
RESET seq_page_cost;
-- test view
CREATE OR REPLACE VIEW compressed_view AS SELECT time, device_id, v1, v2 FROM :TEST_TABLE;
:PREFIX SELECT * FROM compressed_view WHERE device_id = 1 ORDER BY time DESC LIMIT 10;
DROP VIEW compressed_view;
-- test INNER JOIN
:PREFIX
SELECT *
FROM :TEST_TABLE m1
INNER JOIN :TEST_TABLE m2 ON m1.time = m2.time
AND m1.device_id = m2.device_id
ORDER BY m1.time,
m1.device_id
LIMIT 10;
:PREFIX
SELECT *
FROM :TEST_TABLE m1
INNER JOIN :TEST_TABLE m2 ON m1.time = m2.time
INNER JOIN :TEST_TABLE m3 ON m2.time = m3.time
AND m1.device_id = m2.device_id
AND m3.device_id = 3
ORDER BY m1.time,
m1.device_id
LIMIT 10;
:PREFIX
SELECT *
FROM :TEST_TABLE m1
INNER JOIN :TEST_TABLE m2 ON m1.time = m2.time
AND m1.device_id = 1
AND m2.device_id = 2
ORDER BY m1.time,
m1.device_id,
m2.time,
m2.device_id
LIMIT 100;
:PREFIX
SELECT *
FROM metrics m1
INNER JOIN metrics_space m2 ON m1.time = m2.time
AND m1.device_id = 1
AND m2.device_id = 2
ORDER BY m1.time,
m1.device_id,
m2.time,
m2.device_id
LIMIT 100;
-- test OUTER JOIN
:PREFIX
SELECT *
FROM :TEST_TABLE m1
LEFT OUTER JOIN :TEST_TABLE m2 ON m1.time = m2.time
AND m1.device_id = m2.device_id
ORDER BY m1.time,
m1.device_id
LIMIT 10;
:PREFIX
SELECT *
FROM :TEST_TABLE m1
LEFT OUTER JOIN :TEST_TABLE m2 ON m1.time = m2.time
AND m1.device_id = 1
AND m2.device_id = 2
ORDER BY m1.time,
m1.device_id,
m2.time,
m2.device_id
LIMIT 100;
SET parallel_leader_participation TO false;
-- test implicit self-join
:PREFIX
SELECT *
FROM :TEST_TABLE m1,
:TEST_TABLE m2
WHERE m1.time = m2.time
ORDER BY m1.time,
m1.device_id,
m2.time,
m2.device_id
LIMIT 20;
-- test self-join with sub-query
:PREFIX
SELECT *
FROM (
SELECT *
FROM :TEST_TABLE m1) m1
INNER JOIN (
SELECT *
FROM :TEST_TABLE m2) m2 ON m1.time = m2.time
ORDER BY m1.time,
m1.device_id,
m2.device_id
LIMIT 10;
RESET parallel_leader_participation;
:PREFIX
SELECT *
FROM generate_series('2000-01-01'::timestamptz, '2000-02-01'::timestamptz, '1d'::interval) g (time)
INNER JOIN LATERAL (
SELECT time
FROM :TEST_TABLE m1
WHERE m1.time = g.time
LIMIT 1) m1 ON TRUE;
-- test prepared statement
SET plan_cache_mode TO force_generic_plan;
PREPARE prep AS SELECT count(time) FROM :TEST_TABLE WHERE device_id = 1;
:PREFIX EXECUTE prep;
EXECUTE prep;
EXECUTE prep;
EXECUTE prep;
EXECUTE prep;
EXECUTE prep;
EXECUTE prep;
DEALLOCATE prep;
-- test prepared statement with params pushdown
PREPARE param_prep (int) AS
SELECT *
FROM generate_series('2000-01-01'::timestamptz, '2000-02-01'::timestamptz, '1d'::interval) g (time)
INNER JOIN LATERAL (
SELECT time
FROM :TEST_TABLE m1
WHERE m1.time = g.time
AND device_id = $1
LIMIT 1) m1 ON TRUE;
:PREFIX EXECUTE param_prep (1);
:PREFIX EXECUTE param_prep (2);
EXECUTE param_prep (1);
EXECUTE param_prep (2);
EXECUTE param_prep (1);
EXECUTE param_prep (2);
EXECUTE param_prep (1);
DEALLOCATE param_prep;
RESET plan_cache_mode;

View File

@ -348,6 +348,12 @@ SELECT * from test_recomp_int_chunk_status ORDER BY 1;
--run the compression policy job, it will recompress chunks that are unordered
CALL run_job(:compressjob_id);
SELECT count(*) from test_recomp_int;
-- check with per datanode queries disabled
SET timescaledb.enable_per_data_node_queries TO false;
SELECT count(*) from test_recomp_int;
RESET timescaledb.enable_per_data_node_queries;
SELECT * from test_recomp_int_chunk_status ORDER BY 1;
---run copy tests
@ -387,9 +393,16 @@ COPY test_recomp_int FROM STDIN WITH DELIMITER ',';
\.
SELECT * from test_recomp_int_chunk_status ORDER BY 1;
SELECT time_bucket(20, time ), count(*)
SELECT time_bucket(20, time), count(*)
FROM test_recomp_int
GROUP BY time_bucket( 20, time) ORDER BY 1;
GROUP BY time_bucket(20, time) ORDER BY 1;
-- check with per datanode queries disabled
SET timescaledb.enable_per_data_node_queries TO false;
SELECT time_bucket(20, time), count(*)
FROM test_recomp_int
GROUP BY time_bucket(20, time) ORDER BY 1;
RESET timescaledb.enable_per_data_node_queries;
--check compression_status afterwards--
SELECT recompress_chunk(chunk, true) FROM