mirror of
https://github.com/timescale/timescaledb.git
synced 2025-05-17 11:03:36 +08:00
Fix dist_hypertable test for parallel execution
Change database names to be unique over the test suite by adding the test database name in front of the created database names in the test. This will allow the test to be executed in parallel with other tests since it will not have conflicting databases in the same cluster. Previously, there were a few directories created for tablespaces, but this commit changes that to create one directory for each test where the tablespace can be put. This is done by using a directory prefix for each tablespace and each test should then create a subdirectory under that prefix for the tablespace. The commit keeps variables for the old tablespace paths around so that old tests work while transitioning to the new system.
This commit is contained in:
parent
e9cb14985e
commit
2cf3af1eb6
@ -8,9 +8,9 @@ build_script:
|
||||
|
||||
# Create directories for tablespaces
|
||||
|
||||
New-Item -ItemType directory -Path "C:\Users\$env:UserName\Documents\tablespace1"
|
||||
New-Item -ItemType directory -Path "C:\Users\$env:UserName\Documents\tablespace1\_default"
|
||||
|
||||
New-Item -ItemType directory -Path "C:\Users\$env:UserName\Documents\tablespace2"
|
||||
New-Item -ItemType directory -Path "C:\Users\$env:UserName\Documents\tablespace2\_default"
|
||||
|
||||
New-Item -ItemType directory -Path "C:\Users\$env:UserName\Documents\log"
|
||||
|
||||
@ -185,7 +185,7 @@ test_script:
|
||||
|
||||
#right now we only run timescale regression tests, others will be set up later
|
||||
|
||||
docker exec -e IGNORES="chunk_utils loader" -e TEST_TABLESPACE1_PATH="C:\Users\$env:UserName\Documents\tablespace1\" -e TEST_TABLESPACE2_PATH="C:\Users\$env:UserName\Documents\tablespace2\" -e TEST_SPINWAIT_ITERS=10000 -e USER=postgres -e PG_REGRESS_OPTS="--bindir=/usr/local/bin/" -it pgregress /bin/bash -c "cd /timescaledb/build && make regresschecklocal"
|
||||
docker exec -e IGNORES="chunk_utils loader" -e TEST_TABLESPACE1_PREFIX="C:\Users\$env:UserName\Documents\tablespace1\" -e TEST_TABLESPACE2_PREFIX="C:\Users\$env:UserName\Documents\tablespace2\" -e TEST_SPINWAIT_ITERS=10000 -e USER=postgres -e PG_REGRESS_OPTS="--bindir=/usr/local/bin/" -it pgregress /bin/bash -c "cd /timescaledb/build && make regresschecklocal"
|
||||
|
||||
$TESTS1 = $?
|
||||
|
||||
@ -197,7 +197,7 @@ test_script:
|
||||
# killer. Therefore, we need to ignore the results of the
|
||||
# remote_connection and remote_txn tests.
|
||||
|
||||
docker exec -e IGNORES="bgw_db_scheduler compression_algos continuous_aggs_bgw remote_connection remote_txn " -e SKIPS="bgw_db_scheduler" -e TEST_TABLESPACE1_PATH="C:\Users\$env:UserName\Documents\tablespace1\" -e TEST_TABLESPACE2_PATH="C:\Users\$env:UserName\Documents\tablespace2\" -e TEST_SPINWAIT_ITERS=10000 -e USER=postgres -e PG_REGRESS_OPTS="--bindir=/usr/local/bin/" -it pgregress /bin/bash -c "cd /timescaledb/build && make regresschecklocal-t"
|
||||
docker exec -e IGNORES="bgw_db_scheduler compression_algos continuous_aggs_bgw remote_connection remote_txn " -e SKIPS="bgw_db_scheduler" -e TEST_TABLESPACE1_PREFIX="C:\Users\$env:UserName\Documents\tablespace1\" -e TEST_TABLESPACE2_PREFIX="C:\Users\$env:UserName\Documents\tablespace2\" -e TEST_SPINWAIT_ITERS=10000 -e USER=postgres -e PG_REGRESS_OPTS="--bindir=/usr/local/bin/" -it pgregress /bin/bash -c "cd /timescaledb/build && make regresschecklocal-t"
|
||||
|
||||
if( -not $? -or -not $TESTS1 ) { exit 1 }
|
||||
|
||||
|
@ -102,19 +102,32 @@ fi
|
||||
|
||||
function cleanup() {
|
||||
rm -rf ${EXE_DIR}/sql/dump
|
||||
rm -rf ${TEST_TABLESPACE1_PATH}
|
||||
rm -rf ${TEST_TABLESPACE2_PATH}
|
||||
rm -rf ${TEST_TABLESPACE3_PATH}
|
||||
rm -rf ${TEST_TABLESPACE1_PREFIX}
|
||||
rm -rf ${TEST_TABLESPACE2_PREFIX}
|
||||
rm -rf ${TEST_TABLESPACE3_PREFIX}
|
||||
rm -f ${TEMP_SCHEDULE}
|
||||
rm -rf ${TEST_OUTPUT_DIR}/.pg_init
|
||||
}
|
||||
|
||||
trap cleanup EXIT
|
||||
|
||||
# Generating a prefix directory for all test tablespaces. This should
|
||||
# be used to build a full path for the tablespace. Note that we
|
||||
# terminate the prefix with the directory separator so that we can
|
||||
# easily generate paths independent of the OS.
|
||||
#
|
||||
# This mktemp line will work on both OSX and GNU systems
|
||||
TEST_TABLESPACE1_PATH=${TEST_TABLESPACE1_PATH:-$(mktemp -d 2>/dev/null || mktemp -d -t 'timescaledb_regress')}
|
||||
TEST_TABLESPACE2_PATH=${TEST_TABLESPACE2_PATH:-$(mktemp -d 2>/dev/null || mktemp -d -t 'timescaledb_regress')}
|
||||
TEST_TABLESPACE3_PATH=${TEST_TABLESPACE3_PATH:-$(mktemp -d 2>/dev/null || mktemp -d -t 'timescaledb_regress')}
|
||||
TEST_TABLESPACE1_PREFIX=${TEST_TABLESPACE1_PREFIX:-$(mktemp -d 2>/dev/null || mktemp -d -t 'timescaledb_regress')/}
|
||||
TEST_TABLESPACE2_PREFIX=${TEST_TABLESPACE2_PREFIX:-$(mktemp -d 2>/dev/null || mktemp -d -t 'timescaledb_regress')/}
|
||||
TEST_TABLESPACE3_PREFIX=${TEST_TABLESPACE3_PREFIX:-$(mktemp -d 2>/dev/null || mktemp -d -t 'timescaledb_regress')/}
|
||||
|
||||
# Creating some defaults for transitioning tests to use the prefix.
|
||||
TEST_TABLESPACE1_PATH=${TEST_TABLESPACE1_PATH:-${TEST_TABLESPACE1_PREFIX}_default}
|
||||
TEST_TABLESPACE2_PATH=${TEST_TABLESPACE2_PATH:-${TEST_TABLESPACE2_PREFIX}_default}
|
||||
TEST_TABLESPACE3_PATH=${TEST_TABLESPACE3_PATH:-${TEST_TABLESPACE3_PREFIX}_default}
|
||||
mkdir $TEST_TABLESPACE1_PATH $TEST_TABLESPACE2_PATH $TEST_TABLESPACE3_PATH
|
||||
|
||||
export TEST_TABLESPACE1_PREFIX TEST_TABLESPACE2_PREFIX TEST_TABLESPACE3_PREFIX
|
||||
export TEST_TABLESPACE1_PATH TEST_TABLESPACE2_PATH TEST_TABLESPACE3_PATH
|
||||
|
||||
rm -rf ${TEST_OUTPUT_DIR}/.pg_init
|
||||
|
@ -15,7 +15,11 @@ TEST_SUPPORT_FILE=${CURRENT_DIR}/sql/utils/testsupport.sql
|
||||
# PGAPPNAME will be 'pg_regress/test' so we cut off the prefix
|
||||
# to get the name of the test
|
||||
CURRENT_TEST=${PGAPPNAME##pg_regress/}
|
||||
TEST_DBNAME="db_${CURRENT_TEST}"
|
||||
|
||||
# Since PG11 and PG12 tests do not run in parallel, we remove the
|
||||
# trailing "-11" (or "-12") suffix to get a good symbol that can be
|
||||
# used as identifier as well.
|
||||
TEST_DBNAME="db_${CURRENT_TEST%%-[0-9][0-9]}"
|
||||
|
||||
# Read the extension version from version.config
|
||||
read -r VERSION < ${CURRENT_DIR}/../version.config
|
||||
@ -85,6 +89,9 @@ ${PSQL} -U ${TEST_PGUSER} \
|
||||
-v VERBOSITY=terse \
|
||||
-v ECHO=all \
|
||||
-v TEST_DBNAME="${TEST_DBNAME}" \
|
||||
-v TEST_TABLESPACE1_PREFIX=${TEST_TABLESPACE1_PREFIX} \
|
||||
-v TEST_TABLESPACE2_PREFIX=${TEST_TABLESPACE2_PREFIX} \
|
||||
-v TEST_TABLESPACE3_PREFIX=${TEST_TABLESPACE3_PREFIX} \
|
||||
-v TEST_TABLESPACE1_PATH=\'${TEST_TABLESPACE1_PATH}\' \
|
||||
-v TEST_TABLESPACE2_PATH=\'${TEST_TABLESPACE2_PATH}\' \
|
||||
-v TEST_TABLESPACE3_PATH=\'${TEST_TABLESPACE3_PATH}\' \
|
||||
|
@ -280,3 +280,14 @@ BEGIN
|
||||
END
|
||||
$BODY$;
|
||||
|
||||
CREATE OR REPLACE FUNCTION test.make_tablespace_path(prefix TEXT, test_name TEXT)
|
||||
RETURNS TEXT LANGUAGE plpgsql AS
|
||||
$BODY$
|
||||
DECLARE
|
||||
dirPath TEXT := format('%s%s', prefix, test_name);
|
||||
createDir TEXT := format('mkdir %s', dirPath);
|
||||
BEGIN
|
||||
EXECUTE format('COPY (SELECT 1) TO PROGRAM %s', quote_literal(createDir));
|
||||
RETURN dirPath;
|
||||
END;
|
||||
$BODY$;
|
||||
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -125,8 +125,6 @@ set(SOLO_TESTS
|
||||
debug_notice
|
||||
dist_api_calls
|
||||
dist_commands
|
||||
dist_hypertable-11
|
||||
dist_hypertable-12
|
||||
dist_hypertable_am
|
||||
dist_hypertable_with_oids
|
||||
dist_partial_agg
|
||||
|
@ -13,9 +13,15 @@
|
||||
\o
|
||||
\set ECHO all
|
||||
|
||||
\set DATA_NODE_1 dist_hypertable_1
|
||||
\set DATA_NODE_2 dist_hypertable_2
|
||||
\set DATA_NODE_3 dist_hypertable_3
|
||||
\set DATA_NODE_1 :TEST_DBNAME _1
|
||||
\set DATA_NODE_2 :TEST_DBNAME _2
|
||||
\set DATA_NODE_3 :TEST_DBNAME _3
|
||||
\set TABLESPACE_1 :TEST_DBNAME _1
|
||||
\set TABLESPACE_2 :TEST_DBNAME _2
|
||||
SELECT
|
||||
test.make_tablespace_path(:'TEST_TABLESPACE1_PREFIX', :'TEST_DBNAME') AS spc1path,
|
||||
test.make_tablespace_path(:'TEST_TABLESPACE2_PREFIX', :'TEST_DBNAME') AS spc2path
|
||||
\gset
|
||||
|
||||
SELECT (add_data_node (name, host => 'localhost', DATABASE => name)).*
|
||||
FROM (VALUES (:'DATA_NODE_1'), (:'DATA_NODE_2'), (:'DATA_NODE_3')) v (name);
|
||||
@ -728,7 +734,7 @@ SET timescaledb.max_insert_batch_size=4;
|
||||
--
|
||||
-- Execute and filter mentioned data node name in the error message.
|
||||
\set ON_ERROR_STOP 0
|
||||
SELECT test.execute_sql_and_filter_data_node_name_on_error($$ INSERT INTO twodim VALUES ('2019-02-10 17:54', 0, 10.2) $$, 'dist_hypertable');
|
||||
SELECT test.execute_sql_and_filter_data_node_name_on_error($$ INSERT INTO twodim VALUES ('2019-02-10 17:54', 0, 10.2) $$, :'TEST_DBNAME');
|
||||
\set ON_ERROR_STOP 1
|
||||
|
||||
-- Disable batching, reverting to FDW tuple-by-tuple inserts.
|
||||
@ -893,16 +899,16 @@ $$);
|
||||
|
||||
-- Tests for using tablespaces with distributed hypertables
|
||||
\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER;
|
||||
CREATE TABLESPACE tablespace1 OWNER :ROLE_1 LOCATION :TEST_TABLESPACE1_PATH;
|
||||
CREATE TABLESPACE tablespace2 OWNER :ROLE_1 LOCATION :TEST_TABLESPACE2_PATH;
|
||||
CREATE TABLESPACE :TABLESPACE_1 OWNER :ROLE_1 LOCATION :'spc1path';
|
||||
CREATE TABLESPACE :TABLESPACE_2 OWNER :ROLE_1 LOCATION :'spc2path';
|
||||
\set ON_ERROR_STOP 0
|
||||
SELECT attach_tablespace('tablespace1', 'disttable');
|
||||
SELECT detach_tablespace('tablespace1', 'disttable');
|
||||
SELECT attach_tablespace(:'TABLESPACE_1', 'disttable');
|
||||
SELECT detach_tablespace(:'TABLESPACE_1', 'disttable');
|
||||
\set ON_ERROR_STOP 1
|
||||
SELECT detach_tablespaces('disttable');
|
||||
|
||||
-- Continue to use previously attached tablespace, but block attach/detach
|
||||
CREATE TABLE disttable2(time timestamptz, device int, temp float) TABLESPACE tablespace1;
|
||||
CREATE TABLE disttable2(time timestamptz, device int, temp float) TABLESPACE :TABLESPACE_1;
|
||||
SELECT create_distributed_hypertable('disttable2', 'time', chunk_time_interval => 1000000::bigint);
|
||||
|
||||
-- Ensure that table is created on the data nodes without a tablespace
|
||||
@ -919,29 +925,29 @@ FROM pg_class cl, (SELECT show_chunks AS chunk FROM show_chunks('disttable2')) c
|
||||
WHERE cl.oid = ch.chunk::regclass;
|
||||
|
||||
\set ON_ERROR_STOP 0
|
||||
SELECT attach_tablespace('tablespace2', 'disttable2');
|
||||
SELECT detach_tablespace('tablespace2', 'disttable2');
|
||||
SELECT attach_tablespace(:'TABLESPACE_2', 'disttable2');
|
||||
SELECT detach_tablespace(:'TABLESPACE_2', 'disttable2');
|
||||
\set ON_ERROR_STOP 1
|
||||
SELECT detach_tablespaces('disttable2');
|
||||
|
||||
SELECT * FROM show_tablespaces('disttable2');
|
||||
|
||||
-- Ensure tablespace API works for data nodes
|
||||
CALL distributed_exec($$
|
||||
SELECT attach_tablespace('tablespace2', 'disttable2');
|
||||
$$);
|
||||
CALL distributed_exec($$
|
||||
SELECT detach_tablespace('tablespace2', 'disttable2');
|
||||
$$);
|
||||
CALL distributed_exec($$
|
||||
SELECT attach_tablespace('tablespace2', 'disttable2');
|
||||
$$);
|
||||
CALL distributed_exec(format($$
|
||||
SELECT attach_tablespace(%L, 'disttable2');
|
||||
$$, :'TABLESPACE_2'));
|
||||
CALL distributed_exec(format($$
|
||||
SELECT detach_tablespace(%L, 'disttable2');
|
||||
$$, :'TABLESPACE_2'));
|
||||
CALL distributed_exec(format($$
|
||||
SELECT attach_tablespace(%L, 'disttable2');
|
||||
$$, :'TABLESPACE_2'));
|
||||
CALL distributed_exec($$
|
||||
SELECT detach_tablespaces('disttable2');
|
||||
$$);
|
||||
DROP TABLE disttable2;
|
||||
|
||||
CREATE TABLE disttable2(time timestamptz, device int, temp float) TABLESPACE tablespace1;
|
||||
CREATE TABLE disttable2(time timestamptz, device int, temp float) TABLESPACE :TABLESPACE_1;
|
||||
SELECT create_hypertable('disttable2', 'time', chunk_time_interval => 1000000::bigint, replication_factor => 1);
|
||||
|
||||
-- Ensure that table is created on the data nodes without a tablespace
|
||||
@ -958,15 +964,15 @@ FROM pg_class cl, (SELECT show_chunks AS chunk FROM show_chunks('disttable2')) c
|
||||
WHERE cl.oid = ch.chunk::regclass;
|
||||
|
||||
\set ON_ERROR_STOP 0
|
||||
SELECT attach_tablespace('tablespace2', 'disttable2');
|
||||
SELECT detach_tablespace('tablespace2', 'disttable2');
|
||||
SELECT attach_tablespace(:'TABLESPACE_2', 'disttable2');
|
||||
SELECT detach_tablespace(:'TABLESPACE_2', 'disttable2');
|
||||
\set ON_ERROR_STOP 1
|
||||
|
||||
SELECT * FROM show_tablespaces('disttable2');
|
||||
DROP TABLE disttable2;
|
||||
|
||||
DROP TABLESPACE tablespace1;
|
||||
DROP TABLESPACE tablespace2;
|
||||
DROP TABLESPACE :TABLESPACE_1;
|
||||
DROP TABLESPACE :TABLESPACE_2;
|
||||
|
||||
-- Make sure table qualified name is used in chunks_in function. Otherwise having a table name same as a column name might yield an error
|
||||
CREATE TABLE dist_device(time timestamptz, dist_device int, temp float);
|
||||
@ -1433,10 +1439,10 @@ SELECT * FROM create_distributed_hypertable('whatever', 'timestamp', 'user_id',
|
||||
if_not_exists => true, chunk_time_interval => INTERVAL '1 day');
|
||||
-- Check the hypertable sequence before and after call to ensure that
|
||||
-- the hypertable sequence was not increased with the second call.
|
||||
SELECT * FROM _timescaledb_catalog.hypertable_id_seq;
|
||||
SELECT last_value FROM _timescaledb_catalog.hypertable_id_seq;
|
||||
SELECT * FROM create_distributed_hypertable('whatever', 'timestamp', 'user_id',
|
||||
if_not_exists => true, chunk_time_interval => INTERVAL '1 day');
|
||||
SELECT * FROM _timescaledb_catalog.hypertable_id_seq;
|
||||
SELECT last_value FROM _timescaledb_catalog.hypertable_id_seq;
|
||||
|
||||
-- Test that creating a distributed hypertable from a table with data
|
||||
-- fails, and that migrate_data blocked.
|
||||
|
Loading…
x
Reference in New Issue
Block a user