Add test infrastructure with shared tables

This PR adds test infrastructure for running tests with shared tables.
This allows having hypertables with specific configurations usable for
all tests. Since these tests also don't require creating a new database
for each test case some of the overhead of the normal tests is removed.
While this will lead to much faster query tests some tests will still
require their own database to test things, but most queres could be moved
to this infrastructure to improve test coverage and speed them up.
This commit is contained in:
Sven Klemm 2019-10-22 18:19:22 +02:00 committed by Matvey Arye
parent e2df62c81c
commit 819414df02
20 changed files with 17071 additions and 5279 deletions

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

46
test/runner_shared.sh Executable file
View File

@ -0,0 +1,46 @@
#!/usr/bin/env bash
set -u
set -e
CURRENT_DIR=$(dirname $0)
EXE_DIR=${EXE_DIR:-${CURRENT_DIR}}
PG_REGRESS_PSQL=$1
PSQL=${PSQL:-$PG_REGRESS_PSQL}
PSQL="${PSQL} -X" # Prevent any .psqlrc files from being executed during the tests
TEST_PGUSER=${TEST_PGUSER:-postgres}
TEST_INPUT_DIR=${TEST_INPUT_DIR:-${EXE_DIR}}
TEST_OUTPUT_DIR=${TEST_OUTPUT_DIR:-${EXE_DIR}}
#docker doesn't set user
USER=${USER:-`whoami`}
TEST_SPINWAIT_ITERS=${TEST_SPINWAIT_ITERS:-10}
TEST_ROLE_SUPERUSER=${TEST_ROLE_SUPERUSER:-super_user}
TEST_ROLE_DEFAULT_PERM_USER=${TEST_ROLE_DEFAULT_PERM_USER:-default_perm_user}
TEST_ROLE_DEFAULT_PERM_USER_2=${TEST_ROLE_DEFAULT_PERM_USER_2:-default_perm_user_2}
shift
# setup clusterwide settings on first run
if [[ ! -f ${TEST_OUTPUT_DIR}/.pg_init ]]; then
touch ${TEST_OUTPUT_DIR}/.pg_init
${PSQL} $@ -U ${USER} -d postgres -v ECHO=none -c "ALTER USER ${TEST_ROLE_SUPERUSER} WITH SUPERUSER;" >/dev/null
${PSQL} $@ -U $TEST_PGUSER -d ${TEST_DBNAME} -v ECHO=none < ${TEST_INPUT_DIR}/shared/sql/include/shared_setup.sql >/dev/null
fi
cd ${EXE_DIR}/sql
# we strip out any output between <exclude_from_test></exclude_from_test>
# and the part about memory usage in EXPLAIN ANALYZE output of Sort nodes
${PSQL} -U ${TEST_PGUSER} \
-v ON_ERROR_STOP=1 \
-v VERBOSITY=terse \
-v ECHO=all \
-v TEST_INPUT_DIR=${TEST_INPUT_DIR} \
-v TEST_OUTPUT_DIR=${TEST_OUTPUT_DIR} \
-v TEST_SPINWAIT_ITERS=${TEST_SPINWAIT_ITERS} \
-v ROLE_SUPERUSER=${TEST_ROLE_SUPERUSER} \
-v ROLE_DEFAULT_PERM_USER=${TEST_ROLE_DEFAULT_PERM_USER} \
-v ROLE_DEFAULT_PERM_USER_2=${TEST_ROLE_DEFAULT_PERM_USER_2} \
$@ -d ${TEST_DBNAME} 2>&1 | sed -e '/<exclude_from_test>/,/<\/exclude_from_test>/d' -e 's! Memory: [0-9]\{1,\}kB!!' -e 's! Memory Usage: [0-9]\{1,\}kB!!'

View File

@ -144,11 +144,6 @@ set(TEST_TEMPLATES
# be in the same directory. These files are updated when the template
# is edited, but not when the output file is deleted. If the output is
# deleted either recreate it manually, or rerun cmake on the root dir.
if (${PG_VERSION_MAJOR} GREATER "9")
set(TEST_VERSION_SUFFIX ${PG_VERSION_MAJOR})
else ()
set(TEST_VERSION_SUFFIX ${PG_VERSION_MAJOR}.${PG_VERSION_MINOR})
endif ()
foreach(TEMPLATE_FILE ${TEST_TEMPLATES})
string(LENGTH ${TEMPLATE_FILE} TEMPLATE_NAME_LEN)
math(EXPR TEMPLATE_NAME_LEN ${TEMPLATE_NAME_LEN}-7)

View File

@ -18,16 +18,6 @@ INSERT INTO devices VALUES
(2,'Device 2'),
(3,'Device 3');
-- create a table where we create chunks in order
CREATE TABLE ordered_append(time timestamptz NOT NULL, device_id INT, value float);
SELECT create_hypertable('ordered_append','time');
CREATE index on ordered_append(time DESC,device_id);
CREATE index on ordered_append(device_id,time DESC);
INSERT INTO ordered_append SELECT generate_series('2000-01-01'::timestamptz,'2000-01-18'::timestamptz,'1m'::interval), 1, 0.5;
INSERT INTO ordered_append SELECT generate_series('2000-01-01'::timestamptz,'2000-01-18'::timestamptz,'1m'::interval), 2, 1.5;
INSERT INTO ordered_append SELECT generate_series('2000-01-01'::timestamptz,'2000-01-18'::timestamptz,'1m'::interval), 3, 2.5;
-- create a second table where we create chunks in reverse order
CREATE TABLE ordered_append_reverse(time timestamptz NOT NULL, device_id INT, value float);
SELECT create_hypertable('ordered_append_reverse','time');
@ -60,7 +50,6 @@ INSERT INTO dimension_only VALUES
('2000-01-07');
ANALYZE devices;
ANALYZE ordered_append;
ANALYZE ordered_append_reverse;
ANALYZE dimension_last;
ANALYZE dimension_only;
@ -104,18 +93,6 @@ INSERT INTO ht_dropped_columns(time,device_id,value) SELECT generate_series('200
ANALYZE ht_dropped_columns;
CREATE TABLE space(time timestamptz NOT NULL, device_id int NOT NULL, value float);
SELECT create_hypertable('space','time','device_id',number_partitions:=4);
INSERT INTO space SELECT generate_series('2000-01-10'::timestamptz,'2000-01-01'::timestamptz,'-1m'::interval), 1, 1.5;
INSERT INTO space SELECT generate_series('2000-01-10'::timestamptz,'2000-01-01'::timestamptz,'-1m'::interval), 2, 2.5;
INSERT INTO space SELECT generate_series('2000-01-10'::timestamptz,'2000-01-01'::timestamptz,'-1m'::interval), 3, 3.5;
INSERT INTO space SELECT generate_series('2000-01-10'::timestamptz,'2000-01-01'::timestamptz,'-1m'::interval), 4, 4.5;
INSERT INTO space SELECT generate_series('2000-01-10'::timestamptz,'2000-01-01'::timestamptz,'-1m'::interval), 5, 5.5;
INSERT INTO space SELECT generate_series('2000-01-10'::timestamptz,'2000-01-01'::timestamptz,'-1m'::interval), 6, 6.5;
ANALYZE space;
CREATE TABLE space2(time timestamptz NOT NULL, device_id int NOT NULL, tag_id int NOT NULL, value float);
SELECT create_hypertable('space2','time','device_id',number_partitions:=3);
SELECT add_dimension('space2','tag_id',number_partitions:=3);

View File

@ -15,18 +15,6 @@ FROM
INNER JOIN _timescaledb_catalog.hypertable ht ON d.hypertable_id = ht.id
ORDER BY ht.table_name, range_start, chunk;
-- test ASC for ordered chunks
:PREFIX SELECT
time, device_id, value
FROM ordered_append
ORDER BY time ASC LIMIT 1;
-- test DESC for ordered chunks
:PREFIX SELECT
time, device_id, value
FROM ordered_append
ORDER BY time DESC LIMIT 1;
-- test ASC for reverse ordered chunks
:PREFIX SELECT
time, device_id, value
@ -39,121 +27,6 @@ ORDER BY time ASC LIMIT 1;
FROM ordered_append_reverse
ORDER BY time DESC LIMIT 1;
-- test query with ORDER BY column not in targetlist
:PREFIX SELECT
device_id, value
FROM ordered_append
ORDER BY time ASC LIMIT 1;
-- ORDER BY may include other columns after time column
:PREFIX SELECT
time, device_id, value
FROM ordered_append
ORDER BY time DESC, device_id LIMIT 1;
-- test RECORD in targetlist
:PREFIX SELECT
(time, device_id, value)
FROM ordered_append
ORDER BY time DESC, device_id LIMIT 1;
-- test sort column not in targetlist
:PREFIX SELECT
time_bucket('1h',time)
FROM ordered_append
ORDER BY time DESC LIMIT 1;
-- queries with ORDER BY non-time column shouldn't use ordered append
:PREFIX SELECT
time, device_id, value
FROM ordered_append
ORDER BY device_id LIMIT 1;
-- time column must be primary sort order
:PREFIX SELECT
time, device_id, value
FROM ordered_append
ORDER BY device_id, time LIMIT 1;
-- queries without LIMIT should use ordered append
:PREFIX SELECT
time, device_id, value
FROM ordered_append
ORDER BY time ASC;
-- queries without ORDER BY shouldnt use ordered append
:PREFIX SELECT
time, device_id, value
FROM ordered_append
LIMIT 1;
-- test interaction with constraint exclusion
:PREFIX SELECT
time, device_id, value
FROM ordered_append
WHERE time > '2000-01-07'
ORDER BY time ASC LIMIT 1;
:PREFIX SELECT
time, device_id, value
FROM ordered_append
WHERE time > '2000-01-07'
ORDER BY time DESC LIMIT 1;
-- test interaction with constraint aware append
:PREFIX SELECT
time, device_id, value
FROM ordered_append
WHERE time > now_s()
ORDER BY time ASC LIMIT 1;
:PREFIX SELECT
time, device_id, value
FROM ordered_append
WHERE time < now_s()
ORDER BY time ASC LIMIT 1;
-- test constraint exclusion
:PREFIX SELECT
time, device_id, value
FROM ordered_append
WHERE time > now_s() AND time < '2000-01-10'
ORDER BY time ASC LIMIT 1;
:PREFIX SELECT
time, device_id, value
FROM ordered_append
WHERE time < now_s() AND time > '2000-01-07'
ORDER BY time ASC LIMIT 1;
-- min/max queries
:PREFIX SELECT max(time) FROM ordered_append;
:PREFIX SELECT min(time) FROM ordered_append;
-- test first/last (doesn't use ordered append yet)
:PREFIX SELECT first(time, time) FROM ordered_append;
:PREFIX SELECT last(time, time) FROM ordered_append;
-- test query with time_bucket
:PREFIX SELECT
time_bucket('1d',time), device_id, value
FROM ordered_append
ORDER BY time ASC LIMIT 1;
-- test query with ORDER BY time_bucket
:PREFIX SELECT
time_bucket('1d',time), device_id, value
FROM ordered_append
ORDER BY 1 LIMIT 1;
-- test query with ORDER BY time_bucket
:PREFIX SELECT
time_bucket('1d',time), device_id, value
FROM ordered_append
ORDER BY time_bucket('1d',time) LIMIT 1;
-- test query with ORDER BY time_bucket, device_id
-- must not use ordered append
:PREFIX SELECT
@ -161,18 +34,6 @@ ORDER BY time_bucket('1d',time) LIMIT 1;
FROM dimension_last
ORDER BY time_bucket('1d',time), device_id LIMIT 1;
-- test query with ORDER BY date_trunc
:PREFIX SELECT
time_bucket('1d',time), device_id, value
FROM ordered_append
ORDER BY date_trunc('day', time) LIMIT 1;
-- test query with ORDER BY date_trunc
:PREFIX SELECT
date_trunc('day',time), device_id, value
FROM ordered_append
ORDER BY 1 LIMIT 1;
-- test query with ORDER BY date_trunc, device_id
-- must not use ordered append
:PREFIX SELECT
@ -180,28 +41,6 @@ ORDER BY 1 LIMIT 1;
FROM dimension_last
ORDER BY 1,2 LIMIT 1;
-- test query with now() should result in ordered ChunkAppend
:PREFIX SELECT * FROM ordered_append WHERE time < now() + '1 month'
ORDER BY time DESC limit 1;
-- test CTE
:PREFIX WITH i AS (SELECT * FROM ordered_append WHERE time < now() ORDER BY time DESC limit 100)
SELECT * FROM i;
-- test LATERAL with ordered append in the outer query
:PREFIX SELECT * FROM ordered_append, LATERAL(SELECT * FROM (VALUES (1),(2)) v) l ORDER BY time DESC limit 2;
-- test LATERAL with ordered append in the lateral query
:PREFIX SELECT * FROM (VALUES (1),(2)) v, LATERAL(SELECT * FROM ordered_append ORDER BY time DESC limit 2) l;
-- test plan with best index is chosen
-- this should use device_id, time index
:PREFIX SELECT * FROM ordered_append WHERE device_id = 1 ORDER BY time DESC LIMIT 1;
-- test plan with best index is chosen
-- this should use time index
:PREFIX SELECT * FROM ordered_append ORDER BY time DESC LIMIT 1;
-- test with table with only dimension column
:PREFIX SELECT * FROM dimension_only ORDER BY time DESC LIMIT 1;
@ -261,48 +100,6 @@ FROM ht_dropped_columns
WHERE device_id = 1
ORDER BY time DESC;
-- test hypertable with space partitioning
:PREFIX SELECT
time, device_id, value
FROM space
ORDER BY time;
-- test hypertable with space partitioning and exclusion in space
-- should remove 3 of 4 space partitions (2 chunks scanned)
:PREFIX SELECT
time, device_id, value
FROM space
WHERE device_id = 1
ORDER BY time;
-- test hypertable with space partitioning and exclusion in space
-- should remove 2 of 4 space partitions (2 + 2 chunks scanned)
:PREFIX SELECT
time, device_id, value
FROM space
WHERE device_id IN (1, 4)
ORDER BY time;
-- test hypertable with space partitioning and reverse order
:PREFIX SELECT
time, device_id, value
FROM space
ORDER BY time DESC;
-- test hypertable with space partitioning ORDER BY multiple columns
-- does not use ordered append
:PREFIX SELECT
time, device_id, value
FROM space
ORDER BY time, device_id LIMIT 1;
-- test hypertable with space partitioning ORDER BY non-time column
-- does not use ordered append
:PREFIX SELECT
time, device_id, value
FROM space
ORDER BY device_id, time LIMIT 1;
-- test hypertable with 2 space dimensions
:PREFIX SELECT
time, device_id, value
@ -315,176 +112,6 @@ ORDER BY time DESC;
FROM space3
ORDER BY time DESC;
-- expressions in ORDER BY clause
:PREFIX SELECT
time_bucket('1h',time)
FROM space
ORDER BY 1 LIMIT 10;
:PREFIX SELECT
time_bucket('1h',time)
FROM space
ORDER BY 1 DESC LIMIT 10;
-- test LATERAL with correlated query
-- only last chunk should be executed
:PREFIX SELECT *
FROM generate_series('2000-01-01'::timestamptz,'2000-01-03','1d') AS g(time)
LEFT OUTER JOIN LATERAL(
SELECT * FROM ordered_append o
WHERE o.time >= g.time AND o.time < g.time + '1d'::interval ORDER BY time DESC LIMIT 1
) l ON true;
-- test LATERAL with correlated query
-- only 2nd chunk should be executed
:PREFIX SELECT *
FROM generate_series('2000-01-10'::timestamptz,'2000-01-11','1d') AS g(time)
LEFT OUTER JOIN LATERAL(
SELECT * FROM ordered_append o
WHERE o.time >= g.time AND o.time < g.time + '1d'::interval ORDER BY time LIMIT 1
) l ON true;
-- test startup and runtime exclusion together
:PREFIX SELECT *
FROM generate_series('2000-01-01'::timestamptz,'2000-01-03','1d') AS g(time)
LEFT OUTER JOIN LATERAL(
SELECT * FROM ordered_append o
WHERE o.time >= g.time AND o.time < g.time + '1d'::interval AND o.time < now() ORDER BY time DESC LIMIT 1
) l ON true;
-- test startup and runtime exclusion together
-- all chunks should be filtered
:PREFIX SELECT *
FROM generate_series('2000-01-01'::timestamptz,'2000-01-03','1d') AS g(time)
LEFT OUTER JOIN LATERAL(
SELECT * FROM ordered_append o
WHERE o.time >= g.time AND o.time < g.time + '1d'::interval AND o.time > now() ORDER BY time DESC LIMIT 1
) l ON true;
-- test CTE
-- no chunk exclusion for CTE because cte query is not pulled up
:PREFIX WITH cte AS (SELECT * FROM ordered_append ORDER BY time)
SELECT * FROM cte WHERE time < '2000-02-01'::timestamptz;
-- test JOIN
-- no exclusion on joined table because quals are not propagated yet
:PREFIX SELECT *
FROM ordered_append o1
INNER JOIN ordered_append o2 ON o1.time = o2.time
WHERE o1.time < '2000-02-01'
ORDER BY o1.time;
-- test JOIN
-- last chunk of o2 should not be executed
:PREFIX SELECT *
FROM ordered_append o1
INNER JOIN (SELECT * FROM ordered_append o2 ORDER BY time) o2 ON o1.time = o2.time
WHERE o1.time < '2000-01-08'
ORDER BY o1.time;
-- test subquery
-- not ChunkAppend so no chunk exclusion
:PREFIX SELECT *
FROM ordered_append WHERE time = (SELECT max(time) FROM ordered_append) ORDER BY time;
-- test join against max query
-- not ChunkAppend so no chunk exclusion
:PREFIX SELECT *
FROM ordered_append o1 INNER JOIN (SELECT max(time) AS max_time FROM ordered_append) o2 ON o1.time = o2.max_time ORDER BY time;
-- test ordered append with limit expression
:PREFIX SELECT *
FROM ordered_append ORDER BY time LIMIT (SELECT length('four'));
-- test with ordered guc disabled
SET timescaledb.enable_ordered_append TO off;
:PREFIX SELECT *
FROM ordered_append ORDER BY time LIMIT 3;
RESET timescaledb.enable_ordered_append;
:PREFIX SELECT *
FROM ordered_append ORDER BY time LIMIT 3;
-- test with chunk append disabled
SET timescaledb.enable_chunk_append TO off;
:PREFIX SELECT *
FROM ordered_append ORDER BY time LIMIT 3;
RESET timescaledb.enable_chunk_append;
:PREFIX SELECT *
FROM ordered_append ORDER BY time LIMIT 3;
-- test space partitioning with startup exclusion
:PREFIX SELECT *
FROM space WHERE time < now() ORDER BY time;
-- test runtime exclusion together with space partitioning
:PREFIX SELECT *
FROM generate_series('2000-01-01'::timestamptz,'2000-01-03','1d') AS g(time)
LEFT OUTER JOIN LATERAL(
SELECT * FROM space o
WHERE o.time >= g.time AND o.time < g.time + '1d'::interval ORDER BY time DESC LIMIT 1
) l ON true;
-- test startup and runtime exclusion together with space partitioning
:PREFIX SELECT *
FROM generate_series('2000-01-01'::timestamptz,'2000-01-03','1d') AS g(time)
LEFT OUTER JOIN LATERAL(
SELECT * FROM space o
WHERE o.time >= g.time AND o.time < g.time + '1d'::interval AND o.time < now() ORDER BY time DESC LIMIT 1
) l ON true;
-- test JOIN on time column
-- should use 2 ChunkAppend
:PREFIX SELECT * FROM ordered_append o1 INNER JOIN ordered_append o2 ON o1.time = o2.time ORDER BY o1.time LIMIT 100;
-- test JOIN on time column with USING
-- should use 2 ChunkAppend
:PREFIX SELECT * FROM ordered_append o1 INNER JOIN ordered_append o2 USING(time) ORDER BY o1.time LIMIT 100;
-- test NATURAL JOIN on time column
-- should use 2 ChunkAppend
:PREFIX SELECT * FROM ordered_append o1 NATURAL INNER JOIN ordered_append o2 ORDER BY o1.time LIMIT 100;
-- test LEFT JOIN on time column
-- should use 2 ChunkAppend
:PREFIX SELECT * FROM ordered_append o1 LEFT JOIN ordered_append o2 ON o1.time=o2.time ORDER BY o1.time LIMIT 100;
-- test RIGHT JOIN on time column
-- should use 2 ChunkAppend
:PREFIX SELECT * FROM ordered_append o1 RIGHT JOIN ordered_append o2 ON o1.time=o2.time ORDER BY o2.time LIMIT 100;
-- test JOIN on time column with ON clause expression order switched
-- should use 2 ChunkAppend
:PREFIX SELECT * FROM ordered_append o1 INNER JOIN ordered_append o2 ON o2.time = o1.time ORDER BY o1.time LIMIT 100;
-- test JOIN on time column with equality condition in WHERE clause
-- should use 2 ChunkAppend
:PREFIX SELECT * FROM ordered_append o1 INNER JOIN ordered_append o2 ON true WHERE o1.time = o2.time ORDER BY o1.time LIMIT 100;
-- test JOIN on time column with ORDER BY 2nd hypertable
-- should use 2 ChunkAppend
:PREFIX SELECT * FROM ordered_append o1 INNER JOIN ordered_append o2 ON o1.time = o2.time ORDER BY o2.time LIMIT 100;
-- test JOIN on time column and device_id
-- should use 2 ChunkAppend
:PREFIX SELECT * FROM ordered_append o1 INNER JOIN ordered_append o2 ON o1.device_id = o2.device_id AND o1.time = o2.time ORDER BY o1.time LIMIT 100;
-- test JOIN on device_id
-- should not use ordered append for 2nd hypertable
:PREFIX SELECT * FROM ordered_append o1 INNER JOIN ordered_append o2 ON o1.device_id = o2.device_id ORDER BY o1.time LIMIT 100;
-- test JOIN on time column with implicit join
-- should use 2 ChunkAppend
:PREFIX SELECT * FROM ordered_append o1, ordered_append o2 WHERE o1.time = o2.time ORDER BY o1.time LIMIT 100;
-- test JOIN on time column with 3 hypertables
-- should use 3 ChunkAppend
:PREFIX SELECT * FROM ordered_append o1 INNER JOIN ordered_append o2 ON o1.time = o2.time INNER JOIN ordered_append o3 ON o1.time = o3.time ORDER BY o1.time LIMIT 100;
-- test with space partitioning
:PREFIX SELECT * FROM space s1 INNER JOIN space s2 ON s1.time = s2.time ORDER BY s1.time LIMIT 100;
-- test COLLATION
-- cant be tested in our ci because alpine doesnt support locales
-- :PREFIX SELECT * FROM sortopt_test ORDER BY time, device COLLATE "en_US.utf8";

View File

@ -13,6 +13,7 @@ set(TEST_PGUSER ${TEST_ROLE_DEFAULT_PERM_USER} CACHE STRING "The PostgreSQL test
set(TEST_DBNAME single CACHE STRING "The database name to use for tests")
set(TEST_PGPORT_TEMP_INSTANCE 15432 CACHE STRING "The port to run a temporary test PostgreSQL instance on")
set(TEST_SCHEDULE ${CMAKE_CURRENT_BINARY_DIR}/test_schedule)
set(TEST_SCHEDULE_SHARED ${CMAKE_CURRENT_BINARY_DIR}/shared/test_schedule_shared)
set(ISOLATION_TEST_SCHEDULE ${CMAKE_CURRENT_BINARY_DIR}/isolation_test_schedule)
set(PG_REGRESS_OPTS_BASE
@ -25,6 +26,11 @@ set(PG_REGRESS_OPTS_EXTRA
--dbname=${TEST_DBNAME}
--launcher=${PRIMARY_TEST_DIR}/runner.sh)
set(PG_REGRESS_SHARED_OPTS_EXTRA
--create-role=${TEST_ROLE_SUPERUSER},${TEST_ROLE_DEFAULT_PERM_USER},${TEST_ROLE_DEFAULT_PERM_USER_2}
--dbname=${TEST_DBNAME}
--launcher=${PRIMARY_TEST_DIR}/runner_shared.sh)
set(PG_ISOLATION_REGRESS_OPTS_EXTRA
--create-role=${TEST_ROLE_SUPERUSER},${TEST_ROLE_DEFAULT_PERM_USER},${TEST_ROLE_DEFAULT_PERM_USER_2}
--dbname=${TEST_DBNAME})
@ -33,6 +39,11 @@ set(PG_REGRESS_OPTS_INOUT
--inputdir=${TEST_INPUT_DIR}
--outputdir=${TEST_OUTPUT_DIR})
set(PG_REGRESS_SHARED_OPTS_INOUT
--inputdir=${TEST_INPUT_DIR}/shared
--outputdir=${TEST_OUTPUT_DIR}/shared
--load-extension=timescaledb)
set(PG_ISOLATION_REGRESS_OPTS_INOUT
--inputdir=${TEST_INPUT_DIR}/isolation
--outputdir=${TEST_OUTPUT_DIR}/isolation
@ -80,3 +91,10 @@ if(PG_ISOLATION_REGRESS)
ISOLATION_TEST_SCHEDULE=${ISOLATION_TEST_SCHEDULE}
PG_ISOLATION_REGRESS=${PG_ISOLATION_REGRESS})
endif()
if (${PG_VERSION_MAJOR} GREATER "9")
set(TEST_VERSION_SUFFIX ${PG_VERSION_MAJOR})
else ()
set(TEST_VERSION_SUFFIX ${PG_VERSION_MAJOR}.${PG_VERSION_MINOR})
endif ()

View File

@ -30,6 +30,38 @@ if(PG_REGRESS)
list(APPEND _local_install_checks regresschecklocal-t)
list(APPEND _install_checks regresscheck-t)
# shared tests also provide compressed hypertables which we do not support on 9.6
if(${PG_VERSION_MAJOR} GREATER "9")
add_custom_target(regresscheck-shared
COMMAND ${CMAKE_COMMAND} -E env
${PG_REGRESS_ENV}
EXE_DIR=${CMAKE_CURRENT_SOURCE_DIR}/shared
TEST_SCHEDULE=${TEST_SCHEDULE_SHARED}
${PRIMARY_TEST_DIR}/pg_regress.sh
${PG_REGRESS_OPTS_BASE}
${PG_REGRESS_SHARED_OPTS_EXTRA}
${PG_REGRESS_SHARED_OPTS_INOUT}
${PG_REGRESS_OPTS_TEMP_INSTANCE}
USES_TERMINAL)
add_custom_target(regresschecklocal-shared
COMMAND ${CMAKE_COMMAND} -E env
${PG_REGRESS_ENV}
EXE_DIR=${CMAKE_CURRENT_SOURCE_DIR}/shared
TEST_SCHEDULE=${TEST_SCHEDULE_SHARED}
${PRIMARY_TEST_DIR}/pg_regress.sh
${PG_REGRESS_OPTS_BASE}
${PG_REGRESS_SHARED_OPTS_EXTRA}
${PG_REGRESS_SHARED_OPTS_INOUT}
${PG_REGRESS_OPTS_LOCAL_INSTANCE}
USES_TERMINAL)
list(APPEND _install_checks regresscheck-shared)
list(APPEND _local_install_checks regresschecklocal-shared)
endif()
endif()
if(PG_ISOLATION_REGRESS)
@ -61,13 +93,14 @@ if(PG_ISOLATION_REGRESS)
list(APPEND _install_checks isolationcheck-t)
endif()
add_subdirectory(shared)
add_subdirectory(sql)
add_subdirectory(isolation)
# installchecklocal tests against an existing postgres instance
add_custom_target(installchecklocal-t DEPENDS ${_local_install_checks})
add_custom_target(installcheck-t DEPENDS ${_install_checks})
add_subdirectory(sql)
add_subdirectory(isolation)
if (CMAKE_BUILD_TYPE MATCHES Debug)
add_subdirectory(src)
endif (CMAKE_BUILD_TYPE MATCHES Debug)

View File

@ -1,20 +1,22 @@
set(TEST_FILES
continuous_aggs_insert.spec
continuous_aggs_multi.spec)
set(TEST_FILES_GT_9
compression_ddl.spec)
continuous_aggs_multi.spec
)
set(TEST_TEMPLATES
reorder_deadlock.spec.in
reorder_vs_insert_other_chunk.spec.in)
reorder_vs_insert_other_chunk.spec.in
)
set(TEST_TEMPLATES_DEBUG
reorder_vs_insert.spec.in
reorder_vs_select.spec.in)
reorder_vs_select.spec.in
)
if (${PG_VERSION_MAJOR} GREATER "9")
list( INSERT TEST_FILES 0 ${TEST_FILES_GT_9})
list(APPEND TEST_FILES
compression_ddl.spec
)
endif ()
if (CMAKE_BUILD_TYPE MATCHES Debug)
@ -30,7 +32,9 @@ endforeach(TEMPLATE_FILE)
file(REMOVE ${ISOLATION_TEST_SCHEDULE})
list(SORT TEST_FILES)
foreach(TEST_FILE ${TEST_FILES})
string(REGEX REPLACE "(.+)\.spec" "\\1" TESTS_TO_RUN ${TEST_FILE})
file(APPEND ${ISOLATION_TEST_SCHEDULE} "test: ${TESTS_TO_RUN}\n")
string(REGEX REPLACE "(.+)\.spec" "\\1" TESTS_TO_RUN ${TEST_FILE})
file(APPEND ${ISOLATION_TEST_SCHEDULE} "test: ${TESTS_TO_RUN}\n")
endforeach(TEST_FILE)

View File

@ -0,0 +1 @@
add_subdirectory(sql)

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

2
tsl/test/shared/sql/.gitignore vendored Normal file
View File

@ -0,0 +1,2 @@
ordered_append-10.sql
ordered_append-11.sql

View File

@ -0,0 +1,33 @@
set(TEST_FILES_SHARED
)
set(TEST_TEMPLATES_SHARED
)
if (${PG_VERSION_MAJOR} EQUAL "10" OR ${PG_VERSION_MAJOR} GREATER "10")
list(APPEND TEST_TEMPLATES_SHARED
ordered_append.sql.in
)
endif()
# Regression tests that vary with PostgreSQL version. Generated test
# files are put in the original source directory since all tests must
# be in the same directory. These files are updated when the template
# is edited, but not when the output file is deleted. If the output is
# deleted either recreate it manually, or rerun cmake on the root dir.
foreach(TEMPLATE_FILE ${TEST_TEMPLATES_SHARED})
string(LENGTH ${TEMPLATE_FILE} TEMPLATE_NAME_LEN)
math(EXPR TEMPLATE_NAME_LEN ${TEMPLATE_NAME_LEN}-7)
string(SUBSTRING ${TEMPLATE_FILE} 0 ${TEMPLATE_NAME_LEN} TEMPLATE)
set(TEST_FILE ${TEMPLATE}-${TEST_VERSION_SUFFIX}.sql)
configure_file(${TEMPLATE_FILE} ${CMAKE_CURRENT_SOURCE_DIR}/${TEST_FILE} COPYONLY)
list(APPEND TEST_FILES_SHARED ${TEST_FILE})
endforeach(TEMPLATE_FILE)
list(SORT TEST_FILES_SHARED)
file(REMOVE ${TEST_SCHEDULE_SHARED})
foreach(TEST_FILE ${TEST_FILES_SHARED})
string(REGEX REPLACE "(.+)\.sql" "\\1" TESTS_TO_RUN ${TEST_FILE})
file(APPEND ${TEST_SCHEDULE_SHARED} "test: ${TESTS_TO_RUN}\n")
endforeach(TEST_FILE)

View File

@ -0,0 +1,314 @@
-- test ASC for ordered chunks
:PREFIX SELECT
time
FROM :TEST_TABLE
ORDER BY time ASC LIMIT 1;
-- test DESC for ordered chunks
:PREFIX SELECT
time
FROM :TEST_TABLE
ORDER BY time DESC LIMIT 1;
-- test query with ORDER BY column not in targetlist
:PREFIX SELECT
pg_typeof(device_id), pg_typeof(v2)
FROM :TEST_TABLE
ORDER BY time ASC LIMIT 1;
-- ORDER BY may include other columns after time column
:PREFIX SELECT
time, device_id, v0
FROM :TEST_TABLE
ORDER BY time DESC, device_id LIMIT 1;
-- test RECORD in targetlist
:PREFIX SELECT
(time, device_id, v0)
FROM :TEST_TABLE
ORDER BY time DESC, device_id LIMIT 1;
-- test sort column not in targetlist
:PREFIX SELECT
time_bucket('1h',time)
FROM :TEST_TABLE
ORDER BY time DESC LIMIT 1;
-- queries with ORDER BY non-time column shouldn't use ordered append
:PREFIX SELECT
device_id
FROM :TEST_TABLE
ORDER BY device_id LIMIT 1;
-- time column must be primary sort order
:PREFIX SELECT
time, device_id
FROM :TEST_TABLE
ORDER BY device_id, time LIMIT 1;
-- test equality constraint on ORDER BY prefix
-- currently not optimized
:PREFIX SELECT
time, device_id
FROM :TEST_TABLE
WHERE device_id = 1
ORDER BY device_id, time LIMIT 10;
-- queries without LIMIT should use ordered append
:PREFIX SELECT
time
FROM :TEST_TABLE
ORDER BY time ASC;
-- queries without ORDER BY shouldnt use ordered append
:PREFIX SELECT
pg_typeof(time)
FROM :TEST_TABLE
LIMIT 1;
-- test interaction with constraint exclusion
:PREFIX SELECT
time
FROM :TEST_TABLE
WHERE time > '2000-01-07'
ORDER BY time ASC LIMIT 1;
:PREFIX SELECT
time
FROM :TEST_TABLE
WHERE time > '2000-01-07'
ORDER BY time DESC LIMIT 1;
-- test interaction with runtime exclusion
:PREFIX SELECT
time
FROM :TEST_TABLE
WHERE time > '2000-01-08'::text::timestamptz
ORDER BY time ASC LIMIT 1;
:PREFIX SELECT
time
FROM :TEST_TABLE
WHERE time < '2000-01-08'::text::timestamptz
ORDER BY time ASC LIMIT 1;
-- test constraint exclusion
:PREFIX SELECT
time
FROM :TEST_TABLE
WHERE time > '2000-01-08'::text::timestamptz AND time < '2000-01-10'
ORDER BY time ASC LIMIT 1;
:PREFIX SELECT
time
FROM :TEST_TABLE
WHERE time < '2000-01-08'::text::timestamptz AND time > '2000-01-07'
ORDER BY time ASC LIMIT 1;
-- min/max queries
:PREFIX SELECT max(time) FROM :TEST_TABLE;
:PREFIX SELECT min(time) FROM :TEST_TABLE;
-- test first/last (doesn't use ordered append yet)
:PREFIX SELECT first(time, time) FROM :TEST_TABLE;
:PREFIX SELECT last(time, time) FROM :TEST_TABLE;
-- test query with time_bucket
:PREFIX SELECT
time_bucket('1d',time)
FROM :TEST_TABLE
ORDER BY time ASC LIMIT 1;
-- test query with ORDER BY time_bucket
:PREFIX SELECT
time_bucket('1d',time)
FROM :TEST_TABLE
ORDER BY 1 LIMIT 1;
-- test query with ORDER BY time_bucket, device_id
-- must not use ordered append
:PREFIX SELECT
time_bucket('1d',time), device_id, v0
FROM :TEST_TABLE
ORDER BY time_bucket('1d',time), device_id LIMIT 1;
-- test query with ORDER BY date_trunc
:PREFIX SELECT
time_bucket('1d',time)
FROM :TEST_TABLE
ORDER BY date_trunc('day', time) LIMIT 1;
-- test query with ORDER BY date_trunc
:PREFIX SELECT
date_trunc('day',time), device_id, v0
FROM :TEST_TABLE
ORDER BY 1 LIMIT 1;
-- test query with ORDER BY date_trunc, device_id
-- must not use ordered append
:PREFIX SELECT
date_trunc('day',time), device_id, v0
FROM :TEST_TABLE
ORDER BY 1,2 LIMIT 1;
-- test query with now() should result in ordered ChunkAppend
:PREFIX SELECT time FROM :TEST_TABLE WHERE time < now() + '1 month'
ORDER BY time DESC limit 1;
-- test CTE
:PREFIX WITH i AS (SELECT time FROM :TEST_TABLE WHERE time < now() ORDER BY time DESC limit 100)
SELECT * FROM i;
-- test LATERAL with ordered append in the outer query
:PREFIX SELECT time, pg_typeof(l) FROM :TEST_TABLE, LATERAL(SELECT * FROM (VALUES (1),(2)) v) l ORDER BY time DESC limit 2;
-- test LATERAL with ordered append in the lateral query
:PREFIX SELECT time, pg_typeof(v) FROM (VALUES (1),(2)) v, LATERAL(SELECT * FROM :TEST_TABLE ORDER BY time DESC limit 2) l;
-- test plan with best index is chosen
-- this should use device_id, time index
:PREFIX SELECT time, device_id FROM :TEST_TABLE WHERE device_id = 1 ORDER BY time DESC LIMIT 1;
-- test plan with best index is chosen
-- this should use time index
:PREFIX SELECT time FROM :TEST_TABLE ORDER BY time DESC LIMIT 1;
-- test LATERAL with correlated query
-- only last chunk should be executed
:PREFIX SELECT g.time, l.time
FROM generate_series('2000-01-01'::timestamptz,'2000-01-03','1d') AS g(time)
LEFT OUTER JOIN LATERAL(
SELECT * FROM :TEST_TABLE o
WHERE o.time >= g.time AND o.time < g.time + '1d'::interval ORDER BY time DESC LIMIT 1
) l ON true;
-- test LATERAL with correlated query
-- only 2nd chunk should be executed
:PREFIX SELECT g.time, l.time
FROM generate_series('2000-01-10'::timestamptz,'2000-01-11','1d') AS g(time)
LEFT OUTER JOIN LATERAL(
SELECT * FROM :TEST_TABLE o
WHERE o.time >= g.time AND o.time < g.time + '1d'::interval ORDER BY time LIMIT 1
) l ON true;
-- test startup and runtime exclusion together
:PREFIX SELECT g.time, l.time
FROM generate_series('2000-01-01'::timestamptz,'2000-01-03','1d') AS g(time)
LEFT OUTER JOIN LATERAL(
SELECT * FROM :TEST_TABLE o
WHERE o.time >= g.time AND o.time < g.time + '1d'::interval AND o.time < now() ORDER BY time DESC LIMIT 1
) l ON true;
-- test startup and runtime exclusion together
-- all chunks should be filtered
:PREFIX SELECT g.time, l.time
FROM generate_series('2000-01-01'::timestamptz,'2000-01-03','1d') AS g(time)
LEFT OUTER JOIN LATERAL(
SELECT * FROM :TEST_TABLE o
WHERE o.time >= g.time AND o.time < g.time + '1d'::interval AND o.time > now() ORDER BY time DESC LIMIT 1
) l ON true;
-- test CTE
-- no chunk exclusion for CTE because cte query is not pulled up
:PREFIX WITH cte AS (SELECT time FROM :TEST_TABLE ORDER BY time)
SELECT * FROM cte WHERE time < '2000-02-01'::timestamptz;
-- test JOIN
-- no exclusion on joined table because quals are not propagated yet
:PREFIX SELECT o1.time, o2.time
FROM :TEST_TABLE o1
INNER JOIN :TEST_TABLE o2 ON o1.time = o2.time
WHERE o1.time < '2000-02-01'
ORDER BY o1.time;
-- test JOIN
-- last chunk of o2 should not be executed
:PREFIX SELECT o1.time, o2.time
FROM :TEST_TABLE o1
INNER JOIN (SELECT * FROM :TEST_TABLE o2 ORDER BY time) o2 ON o1.time = o2.time
WHERE o1.time < '2000-01-08'
ORDER BY o1.time LIMIT 10;
-- test subquery
-- not ChunkAppend so no chunk exclusion
:PREFIX SELECT time
FROM :TEST_TABLE WHERE time = (SELECT max(time) FROM :TEST_TABLE) ORDER BY time;
-- test join against max query
-- not ChunkAppend so no chunk exclusion
:PREFIX SELECT o1.time, o2.*
FROM :TEST_TABLE o1 INNER JOIN (SELECT max(time) AS max_time FROM :TEST_TABLE) o2 ON o1.time = o2.max_time ORDER BY time;
-- test ordered append with limit expression
:PREFIX SELECT time
FROM :TEST_TABLE ORDER BY time LIMIT (SELECT length('four'));
-- test with ordered guc disabled
SET timescaledb.enable_ordered_append TO off;
:PREFIX SELECT time
FROM :TEST_TABLE ORDER BY time LIMIT 3;
RESET timescaledb.enable_ordered_append;
:PREFIX SELECT time
FROM :TEST_TABLE ORDER BY time LIMIT 3;
-- test with chunk append disabled
SET timescaledb.enable_chunk_append TO off;
:PREFIX SELECT time
FROM :TEST_TABLE ORDER BY time LIMIT 3;
RESET timescaledb.enable_chunk_append;
:PREFIX SELECT time
FROM :TEST_TABLE ORDER BY time LIMIT 3;
-- test JOIN on time column
-- should use 2 ChunkAppend
:PREFIX SELECT o1.time FROM :TEST_TABLE o1 INNER JOIN :TEST_TABLE o2 ON o1.time = o2.time ORDER BY o1.time LIMIT 100;
-- test JOIN on time column with USING
-- should use 2 ChunkAppend
:PREFIX SELECT o1.time FROM :TEST_TABLE o1 INNER JOIN :TEST_TABLE o2 USING(time) ORDER BY o1.time LIMIT 100;
-- test NATURAL JOIN on time column
-- should use 2 ChunkAppend
:PREFIX SELECT o1.time FROM :TEST_TABLE o1 NATURAL INNER JOIN :TEST_TABLE o2 ORDER BY o1.time LIMIT 100;
-- test LEFT JOIN on time column
-- should use 2 ChunkAppend
:PREFIX SELECT o1.time FROM :TEST_TABLE o1 LEFT JOIN :TEST_TABLE o2 ON o1.time=o2.time ORDER BY o1.time LIMIT 100;
-- test RIGHT JOIN on time column
-- should use 2 ChunkAppend
:PREFIX SELECT o1.time FROM :TEST_TABLE o1 RIGHT JOIN :TEST_TABLE o2 ON o1.time=o2.time ORDER BY o2.time LIMIT 100;
-- test JOIN on time column with ON clause expression order switched
-- should use 2 ChunkAppend
:PREFIX SELECT o1.time FROM :TEST_TABLE o1 INNER JOIN :TEST_TABLE o2 ON o2.time = o1.time ORDER BY o1.time LIMIT 100;
-- test JOIN on time column with equality condition in WHERE clause
-- should use 2 ChunkAppend
:PREFIX SELECT o1.time FROM :TEST_TABLE o1 INNER JOIN :TEST_TABLE o2 ON true WHERE o1.time = o2.time ORDER BY o1.time LIMIT 100;
-- test JOIN on time column with ORDER BY 2nd hypertable
-- should use 2 ChunkAppend
:PREFIX SELECT o1.time FROM :TEST_TABLE o1 INNER JOIN :TEST_TABLE o2 ON o1.time = o2.time ORDER BY o2.time LIMIT 100;
-- test JOIN on time column and device_id
-- should use 2 ChunkAppend
:PREFIX SELECT o1.time FROM :TEST_TABLE o1 INNER JOIN :TEST_TABLE o2 ON o1.device_id = o2.device_id AND o1.time = o2.time ORDER BY o1.time LIMIT 100;
-- test JOIN on device_id
-- should not use ordered append for 2nd hypertable
:PREFIX SELECT o1.time FROM :TEST_TABLE o1 INNER JOIN :TEST_TABLE o2 ON o1.device_id = o2.device_id WHERE o1.device_id = 1 ORDER BY o1.time LIMIT 100;
-- test JOIN on time column with implicit join
-- should use 2 ChunkAppend
:PREFIX SELECT o1.time FROM :TEST_TABLE o1, :TEST_TABLE o2 WHERE o1.time = o2.time ORDER BY o1.time LIMIT 100;
-- test JOIN on time column with 3 hypertables
-- should use 3 ChunkAppend
:PREFIX SELECT o1.time FROM :TEST_TABLE o1 INNER JOIN :TEST_TABLE o2 ON o1.time = o2.time INNER JOIN :TEST_TABLE o3 ON o1.time = o3.time ORDER BY o1.time LIMIT 100;

View File

@ -0,0 +1,70 @@
SET client_min_messages TO ERROR;
-- create normal hypertable with dropped columns, each chunk will have different attribute numbers
CREATE TABLE metrics(filler_1 int, filler_2 int, filler_3 int, time timestamptz NOT NULL, device_id int, v0 int, v1 int, v2 float, v3 float);
CREATE INDEX ON metrics(time DESC);
CREATE INDEX ON metrics(device_id,time DESC);
SELECT create_hypertable('metrics','time',create_default_indexes:=false);
ALTER TABLE metrics DROP COLUMN filler_1;
INSERT INTO metrics(time,device_id,v0,v1,v2,v3) SELECT time, device_id, device_id+1, device_id + 2, device_id + 0.5, NULL FROM generate_series('2000-01-01 0:00:00+0'::timestamptz,'2000-01-05 23:55:00+0','2m') gtime(time), generate_series(1,5,1) gdevice(device_id);
ALTER TABLE metrics DROP COLUMN filler_2;
INSERT INTO metrics(time,device_id,v0,v1,v2,v3) SELECT time, device_id, device_id-1, device_id + 2, device_id + 0.5, NULL FROM generate_series('2000-01-06 0:00:00+0'::timestamptz,'2000-01-12 23:55:00+0','2m') gtime(time), generate_series(1,5,1) gdevice(device_id);
ALTER TABLE metrics DROP COLUMN filler_3;
INSERT INTO metrics(time,device_id,v0,v1,v2,v3) SELECT time, device_id, device_id, device_id + 2, device_id + 0.5, NULL FROM generate_series('2000-01-13 0:00:00+0'::timestamptz,'2000-01-19 23:55:00+0','2m') gtime(time), generate_series(1,5,1) gdevice(device_id);
ANALYZE metrics;
-- create identical hypertable with space partitioning
CREATE TABLE metrics_space(filler_1 int, filler_2 int, filler_3 int, time timestamptz NOT NULL, device_id int, v0 int, v1 float, v2 float, v3 float);
CREATE INDEX ON metrics_space(time);
CREATE INDEX ON metrics_space(device_id,time);
SELECT create_hypertable('metrics_space','time','device_id',3,create_default_indexes:=false);
ALTER TABLE metrics_space DROP COLUMN filler_1;
INSERT INTO metrics_space(time,device_id,v0,v1,v2,v3) SELECT time, device_id, device_id+1, device_id + 2, device_id + 0.5, NULL FROM generate_series('2000-01-01 0:00:00+0'::timestamptz,'2000-01-05 23:55:00+0','2m') gtime(time), generate_series(1,5,1) gdevice(device_id);
ALTER TABLE metrics_space DROP COLUMN filler_2;
INSERT INTO metrics_space(time,device_id,v0,v1,v2,v3) SELECT time, device_id, device_id+1, device_id + 2, device_id + 0.5, NULL FROM generate_series('2000-01-06 0:00:00+0'::timestamptz,'2000-01-12 23:55:00+0','2m') gtime(time), generate_series(1,5,1) gdevice(device_id);
ALTER TABLE metrics_space DROP COLUMN filler_3;
INSERT INTO metrics_space(time,device_id,v0,v1,v2,v3) SELECT time, device_id, device_id+1, device_id + 2, device_id + 0.5, NULL FROM generate_series('2000-01-13 0:00:00+0'::timestamptz,'2000-01-19 23:55:00+0','2m') gtime(time), generate_series(1,5,1) gdevice(device_id);
ANALYZE metrics_space;
-- create hypertable with compression
CREATE TABLE metrics_compressed(filler_1 int, filler_2 int, filler_3 int, time timestamptz NOT NULL, device_id int, v0 int, v1 int, v2 float, v3 float);
CREATE INDEX ON metrics_compressed(time);
CREATE INDEX ON metrics_compressed(device_id,time);
SELECT create_hypertable('metrics_compressed','time',create_default_indexes:=false);
ALTER TABLE metrics_compressed DROP COLUMN filler_1;
INSERT INTO metrics_compressed(time,device_id,v0,v1,v2,v3) SELECT time, device_id, device_id+1, device_id + 2, device_id + 0.5, NULL FROM generate_series('2000-01-01 0:00:00+0'::timestamptz,'2000-01-05 23:55:00+0','2m') gtime(time), generate_series(1,5,1) gdevice(device_id);
ALTER TABLE metrics_compressed DROP COLUMN filler_2;
INSERT INTO metrics_compressed(time,device_id,v0,v1,v2,v3) SELECT time, device_id, device_id-1, device_id + 2, device_id + 0.5, NULL FROM generate_series('2000-01-06 0:00:00+0'::timestamptz,'2000-01-12 23:55:00+0','2m') gtime(time), generate_series(1,5,1) gdevice(device_id);
ALTER TABLE metrics_compressed DROP COLUMN filler_3;
INSERT INTO metrics_compressed(time,device_id,v0,v1,v2,v3) SELECT time, device_id, device_id, device_id + 2, device_id + 0.5, NULL FROM generate_series('2000-01-13 0:00:00+0'::timestamptz,'2000-01-19 23:55:00+0','2m') gtime(time), generate_series(1,5,1) gdevice(device_id);
ANALYZE metrics_compressed;
-- compress chunks
ALTER TABLE metrics_compressed SET (timescaledb.compress, timescaledb.compress_orderby='time DESC', timescaledb.compress_segmentby='device_id');
SELECT compress_chunk(c.schema_name|| '.' || c.table_name)
FROM _timescaledb_catalog.chunk c, _timescaledb_catalog.hypertable ht where c.hypertable_id = ht.id and ht.table_name = 'metrics_compressed' and c.compressed_chunk_id IS NULL;
-- create hypertable with space partitioning and compression
CREATE TABLE metrics_space_compressed(filler_1 int, filler_2 int, filler_3 int, time timestamptz NOT NULL, device_id int, v0 int, v1 float, v2 float, v3 float);
CREATE INDEX ON metrics_space_compressed(time);
CREATE INDEX ON metrics_space_compressed(device_id,time);
SELECT create_hypertable('metrics_space_compressed','time','device_id',3,create_default_indexes:=false);
ALTER TABLE metrics_space_compressed DROP COLUMN filler_1;
INSERT INTO metrics_space_compressed(time,device_id,v0,v1,v2,v3) SELECT time, device_id, device_id+1, device_id + 2, device_id + 0.5, NULL FROM generate_series('2000-01-01 0:00:00+0'::timestamptz,'2000-01-05 23:55:00+0','2m') gtime(time), generate_series(1,5,1) gdevice(device_id);
ALTER TABLE metrics_space_compressed DROP COLUMN filler_2;
INSERT INTO metrics_space_compressed(time,device_id,v0,v1,v2,v3) SELECT time, device_id, device_id+1, device_id + 2, device_id + 0.5, NULL FROM generate_series('2000-01-06 0:00:00+0'::timestamptz,'2000-01-12 23:55:00+0','2m') gtime(time), generate_series(1,5,1) gdevice(device_id);
ALTER TABLE metrics_space_compressed DROP COLUMN filler_3;
INSERT INTO metrics_space_compressed(time,device_id,v0,v1,v2,v3) SELECT time, device_id, device_id+1, device_id + 2, device_id + 0.5, NULL FROM generate_series('2000-01-13 0:00:00+0'::timestamptz,'2000-01-19 23:55:00+0','2m') gtime(time), generate_series(1,5,1) gdevice(device_id);
ANALYZE metrics_space_compressed;
-- compress chunks
ALTER TABLE metrics_space_compressed SET (timescaledb.compress, timescaledb.compress_orderby='time DESC', timescaledb.compress_segmentby='device_id');
SELECT compress_chunk(c.schema_name|| '.' || c.table_name)
FROM _timescaledb_catalog.chunk c, _timescaledb_catalog.hypertable ht where c.hypertable_id = ht.id and ht.table_name = 'metrics_space_compressed' and c.compressed_chunk_id IS NULL;

View File

@ -0,0 +1,72 @@
-- This file and its contents are licensed under the Timescale License.
-- Please see the included NOTICE for copyright information and
-- LICENSE-TIMESCALE for a copy of the license.
\set TEST_BASE_NAME ordered_append
SELECT
format('include/%s_query.sql', :'TEST_BASE_NAME') as "TEST_QUERY_NAME",
format('%s/shared/results/%s_results_uncompressed.out', :'TEST_OUTPUT_DIR', :'TEST_BASE_NAME') as "TEST_RESULTS_UNCOMPRESSED",
format('%s/shared/results/%s_results_uncompressed_idx.out', :'TEST_OUTPUT_DIR', :'TEST_BASE_NAME') as "TEST_RESULTS_UNCOMPRESSED_IDX",
format('%s/shared/results/%s_results_compressed.out', :'TEST_OUTPUT_DIR', :'TEST_BASE_NAME') as "TEST_RESULTS_COMPRESSED",
format('%s/shared/results/%s_results_compressed_idx.out', :'TEST_OUTPUT_DIR', :'TEST_BASE_NAME') as "TEST_RESULTS_COMPRESSED_IDX"
\gset
SELECT format('\! diff -u --label "Uncompressed results" --label "Compressed results" %s %s', :'TEST_RESULTS_UNCOMPRESSED', :'TEST_RESULTS_COMPRESSED') as "DIFF_CMD"
\gset
-- get EXPLAIN output for all variations
-- look at postgres version to decide whether we run with analyze or without
SELECT
CASE WHEN current_setting('server_version_num')::int >= 100000
THEN 'EXPLAIN (analyze, costs off, timing off, summary off)'
ELSE 'EXPLAIN (costs off)'
END AS "PREFIX",
CASE WHEN current_setting('server_version_num')::int >= 100000
THEN 'EXPLAIN (analyze, costs off, timing off, summary off, verbose)'
ELSE 'EXPLAIN (costs off, verbose)'
END AS "PREFIX_VERBOSE"
\gset
set work_mem to '64MB';
set max_parallel_workers_per_gather to 0;
\set TEST_TABLE 'metrics'
\ir :TEST_QUERY_NAME
\set TEST_TABLE 'metrics_space'
\ir :TEST_QUERY_NAME
\set TEST_TABLE 'metrics_compressed'
\ir :TEST_QUERY_NAME
\set TEST_TABLE 'metrics_space_compressed'
\ir :TEST_QUERY_NAME
-- get results for all the queries
-- run queries on uncompressed hypertable and store result
\set PREFIX ''
\set PREFIX_VERBOSE ''
\set ECHO none
SET client_min_messages TO error;
-- run queries on compressed hypertable and store result
\set TEST_TABLE 'metrics'
\o :TEST_RESULTS_UNCOMPRESSED
\ir :TEST_QUERY_NAME
\set TEST_TABLE 'metrics_compressed'
\o :TEST_RESULTS_COMPRESSED
\ir :TEST_QUERY_NAME
\o
-- diff compressed and uncompressed results
:DIFF_CMD
-- do the same for space partitioned hypertable
\set TEST_TABLE 'metrics_space'
\o :TEST_RESULTS_UNCOMPRESSED
\ir :TEST_QUERY_NAME
\set TEST_TABLE 'metrics_space_compressed'
\o :TEST_RESULTS_COMPRESSED
\ir :TEST_QUERY_NAME
\o
-- diff compressed and uncompressed results
:DIFF_CMD

View File

@ -0,0 +1,72 @@
-- This file and its contents are licensed under the Timescale License.
-- Please see the included NOTICE for copyright information and
-- LICENSE-TIMESCALE for a copy of the license.
\set TEST_BASE_NAME ordered_append
SELECT
format('include/%s_query.sql', :'TEST_BASE_NAME') as "TEST_QUERY_NAME",
format('%s/shared/results/%s_results_uncompressed.out', :'TEST_OUTPUT_DIR', :'TEST_BASE_NAME') as "TEST_RESULTS_UNCOMPRESSED",
format('%s/shared/results/%s_results_uncompressed_idx.out', :'TEST_OUTPUT_DIR', :'TEST_BASE_NAME') as "TEST_RESULTS_UNCOMPRESSED_IDX",
format('%s/shared/results/%s_results_compressed.out', :'TEST_OUTPUT_DIR', :'TEST_BASE_NAME') as "TEST_RESULTS_COMPRESSED",
format('%s/shared/results/%s_results_compressed_idx.out', :'TEST_OUTPUT_DIR', :'TEST_BASE_NAME') as "TEST_RESULTS_COMPRESSED_IDX"
\gset
SELECT format('\! diff -u --label "Uncompressed results" --label "Compressed results" %s %s', :'TEST_RESULTS_UNCOMPRESSED', :'TEST_RESULTS_COMPRESSED') as "DIFF_CMD"
\gset
-- get EXPLAIN output for all variations
-- look at postgres version to decide whether we run with analyze or without
SELECT
CASE WHEN current_setting('server_version_num')::int >= 100000
THEN 'EXPLAIN (analyze, costs off, timing off, summary off)'
ELSE 'EXPLAIN (costs off)'
END AS "PREFIX",
CASE WHEN current_setting('server_version_num')::int >= 100000
THEN 'EXPLAIN (analyze, costs off, timing off, summary off, verbose)'
ELSE 'EXPLAIN (costs off, verbose)'
END AS "PREFIX_VERBOSE"
\gset
set work_mem to '64MB';
set max_parallel_workers_per_gather to 0;
\set TEST_TABLE 'metrics'
\ir :TEST_QUERY_NAME
\set TEST_TABLE 'metrics_space'
\ir :TEST_QUERY_NAME
\set TEST_TABLE 'metrics_compressed'
\ir :TEST_QUERY_NAME
\set TEST_TABLE 'metrics_space_compressed'
\ir :TEST_QUERY_NAME
-- get results for all the queries
-- run queries on uncompressed hypertable and store result
\set PREFIX ''
\set PREFIX_VERBOSE ''
\set ECHO none
SET client_min_messages TO error;
-- run queries on compressed hypertable and store result
\set TEST_TABLE 'metrics'
\o :TEST_RESULTS_UNCOMPRESSED
\ir :TEST_QUERY_NAME
\set TEST_TABLE 'metrics_compressed'
\o :TEST_RESULTS_COMPRESSED
\ir :TEST_QUERY_NAME
\o
-- diff compressed and uncompressed results
:DIFF_CMD
-- do the same for space partitioned hypertable
\set TEST_TABLE 'metrics_space'
\o :TEST_RESULTS_UNCOMPRESSED
\ir :TEST_QUERY_NAME
\set TEST_TABLE 'metrics_space_compressed'
\o :TEST_RESULTS_COMPRESSED
\ir :TEST_QUERY_NAME
\o
-- diff compressed and uncompressed results
:DIFF_CMD

View File

@ -1,37 +1,24 @@
set(TEST_FILES
continuous_aggs_dump.sql
continuous_aggs_errors.sql
continuous_aggs_usage.sql
continuous_aggs_watermark.sql
edition.sql
gapfill.sql
move.sql
partialize_finalize.sql
reorder.sql
continuous_aggs_dump.sql
continuous_aggs_errors.sql
continuous_aggs_usage.sql
continuous_aggs_watermark.sql
edition.sql
gapfill.sql
move.sql
partialize_finalize.sql
reorder.sql
)
set(TEST_FILES_DEBUG
bgw_policy.sql
bgw_reorder_drop_chunks.sql
continuous_aggs.sql
continuous_aggs_bgw.sql
continuous_aggs_materialize.sql
continuous_aggs_multi.sql
ddl_hook.sql
tsl_tables.sql
)
#only test for version PG10 onwards
set(TEST_FILES_DEBUG_GT_9
compress_table.sql
compression.sql
compression_algos.sql
compression_ddl.sql
compression_errors.sql
compression_hypertable.sql
compression_segment_meta.sql
compression_bgw.sql
compress_bgw_drop_chunks.sql
bgw_policy.sql
bgw_reorder_drop_chunks.sql
continuous_aggs.sql
continuous_aggs_bgw.sql
continuous_aggs_materialize.sql
continuous_aggs_multi.sql
ddl_hook.sql
tsl_tables.sql
)
set(TEST_TEMPLATES
@ -42,15 +29,26 @@ set(TEST_TEMPLATES
)
#compression only for PG > 9
set(TEST_TEMPLATES_DEBUG_GT_9
compression_permissions.sql.in
transparent_decompression.sql.in
)
if (${PG_VERSION_MAJOR} GREATER "9")
list(APPEND TEST_FILES_DEBUG ${TEST_FILES_DEBUG_GT_9})
list(APPEND TEST_TEMPLATES ${TEST_TEMPLATES_DEBUG_GT_9})
list(APPEND TEST_FILES_DEBUG
compress_table.sql
compression.sql
compression_algos.sql
compression_ddl.sql
compression_errors.sql
compression_hypertable.sql
compression_segment_meta.sql
compression_bgw.sql
compress_bgw_drop_chunks.sql
)
list(APPEND TEST_TEMPLATES_DEBUG
compression_permissions.sql.in
transparent_decompression.sql.in
)
endif ()
if (CMAKE_BUILD_TYPE MATCHES Debug)
list(APPEND TEST_FILES ${TEST_FILES_DEBUG})
endif(CMAKE_BUILD_TYPE MATCHES Debug)
@ -60,12 +58,6 @@ endif(CMAKE_BUILD_TYPE MATCHES Debug)
# be in the same directory. These files are updated when the template
# is edited, but not when the output file is deleted. If the output is
# deleted either recreate it manually, or rerun cmake on the root dir.
if (${PG_VERSION_MAJOR} GREATER "9")
set(TEST_VERSION_SUFFIX ${PG_VERSION_MAJOR})
else ()
set(TEST_VERSION_SUFFIX ${PG_VERSION_MAJOR}.${PG_VERSION_MINOR})
endif ()
foreach(TEMPLATE_FILE ${TEST_TEMPLATES})
string(LENGTH ${TEMPLATE_FILE} TEMPLATE_NAME_LEN)
math(EXPR TEMPLATE_NAME_LEN ${TEMPLATE_NAME_LEN}-7)
@ -82,3 +74,4 @@ foreach(TEST_FILE ${TEST_FILES})
string(REGEX REPLACE "(.+)\.sql" "\\1" TESTS_TO_RUN ${TEST_FILE})
file(APPEND ${TEST_SCHEDULE} "test: ${TESTS_TO_RUN}\n")
endforeach(TEST_FILE)