add test from postgres-kafka-consumer

Added missing files.
This commit is contained in:
Olof Rensfelt 2016-11-11 15:10:43 +01:00
parent c9f76446da
commit e15da09f45
16 changed files with 370 additions and 5 deletions

34
sql/setup/add_cluster_user.sh Executable file
View File

@ -0,0 +1,34 @@
#!/bin/bash
# To avoid pw writing, add localhost:5432:*:postgres:test to ~/.pgpass
set -u
set -e
PWD=`pwd`
DIR=`dirname $0`
POSTGRES_HOST=${POSTGRES_HOST:-localhost}
POSTGRES_USER=${POSTGRES_USER:-postgres}
INSTALL_DB=${INSTALL_DB:-meta}
if [[ "$#" -eq 0 || "$#" -gt 2 ]] ; then
echo "usage: $0 user [pass]"
exit 1
fi
if [ "$#" == 2 ] ; then
PASS=$2
else
PASS="NULL"
fi
PG_USER_TO_ADD=$1
echo "Connecting to $POSTGRES_HOST as user $POSTGRES_USER and with db $INSTALL_DB"
echo "SELECT add_cluster_user('$PG_USER_TO_ADD', $PASS);"
cd $DIR
psql -U $POSTGRES_USER -h $POSTGRES_HOST -d $INSTALL_DB -v ON_ERROR_STOP=1 <<EOF
SELECT add_cluster_user('$PG_USER_TO_ADD', $PASS);
EOF
cd $PWD

27
sql/setup/add_node.sh Executable file
View File

@ -0,0 +1,27 @@
#!/bin/bash
# To avoid pw writing, add localhost:5432:*:postgres:test to ~/.pgpass
set -u
set -e
PWD=`pwd`
DIR=`dirname $0`
POSTGRES_HOST=${POSTGRES_HOST:-localhost}
POSTGRES_USER=${POSTGRES_USER:-postgres}
INSTALL_DB=${INSTALL_DB:-meta}
if [ "$#" -ne 2 ] ; then
echo "usage: $0 node host"
exit 1
fi
NODENAME=$1
NODEHOST=$2
echo "Connecting to $POSTGRES_HOST as user $POSTGRES_USER and with db $INSTALL_DB"
psql -U $POSTGRES_USER -h $POSTGRES_HOST -d $INSTALL_DB -v ON_ERROR_STOP=1 <<EOF
SELECT add_node('$NODENAME' :: NAME, '$NODEHOST');
EOF
cd $PWD

View File

@ -0,0 +1,59 @@
#!/bin/bash
# To avoid pw writing, add localhost:5432:*:postgres:test to ~/.pgpass
set -u
set -e
PWD=`pwd`
DIR=`dirname $0`
POSTGRES_HOST=${POSTGRES_HOST:-localhost}
POSTGRES_USER=${POSTGRES_USER:-postgres}
INSTALL_DB_META=${INSTALL_DB_META:-meta}
INSTALL_DB_MAIN=${INSTALL_DB_MAIN:-Test1}
echo "Connecting to $POSTGRES_HOST as user $POSTGRES_USER and with meta db $INSTALL_DB_META and main db $INSTALL_DB_MAIN"
cd $DIR
# Todo - read the ns and fields from the csv/tsv file
NAMESPACE="33_testNs"
psql -U $POSTGRES_USER -h $POSTGRES_HOST -d $INSTALL_DB_META -v ON_ERROR_STOP=1 <<EOF
SELECT add_namespace('$NAMESPACE' :: NAME);
SELECT add_field('$NAMESPACE' :: NAME, 'device_id', 'text', TRUE, TRUE, ARRAY ['VALUE-TIME'] :: field_index_type []);
SELECT add_field('$NAMESPACE' :: NAME, 'num_1', 'double precision', FALSE, FALSE, ARRAY ['VALUE-TIME'] :: field_index_type []);
SELECT add_field('$NAMESPACE' :: NAME, 'num_2', 'double precision', FALSE, FALSE, ARRAY ['VALUE-TIME'] :: field_index_type []);
SELECT add_field('$NAMESPACE' :: NAME, 'bool_1', 'boolean', FALSE, FALSE, ARRAY ['VALUE-TIME'] :: field_index_type []);
SELECT add_field('$NAMESPACE' :: NAME, 'string_1', 'text', FALSE, FALSE, ARRAY ['VALUE-TIME'] :: field_index_type []);
SELECT add_field('$NAMESPACE' :: NAME, 'string_2', 'text', FALSE, FALSE, ARRAY ['VALUE-TIME'] :: field_index_type []);
SELECT add_field('$NAMESPACE' :: NAME, 'field_only_ref2', 'text', FALSE, FALSE, ARRAY ['VALUE-TIME'] :: field_index_type []);
SELECT add_field('$NAMESPACE' :: NAME, 'field_only_dev2', 'double precision', FALSE, FALSE, ARRAY ['VALUE-TIME'] :: field_index_type []);
EOF
INPUT_DATA_DIR="input_data"
FILE_SUFFIX=".tsv"
DATASETS=`ls $INPUT_DATA_DIR/*$FILE_SUFFIX`
TEMPTABLENAME="copy_t"
for DS_PATH in $DATASETS; do
DATASET=`basename $DS_PATH $FILE_SUFFIX`
PARTITION_KEY=`echo $DATASET | cut -f2 -d_ `
echo "Setting up $DATASET with partitionkey $PARTITION_KEY"
psql -U $POSTGRES_USER -h $POSTGRES_HOST -d $INSTALL_DB_MAIN -v ON_ERROR_STOP=1 <<EOF
BEGIN;
DROP TABLE IF EXISTS $TEMPTABLENAME;
SELECT *
FROM create_temp_copy_table_one_partition('copy_t'::text, get_partition_for_key('$PARTITION_KEY'::text, 10 :: SMALLINT), 10 :: SMALLINT);
\COPY $TEMPTABLENAME FROM '$DS_PATH';
DROP TABLE IF EXISTS $DATASET;
CREATE SCHEMA IF NOT EXISTS test_input_data;
CREATE TABLE test_input_data.$DATASET AS SELECT * FROM $TEMPTABLENAME;
COMMIT;
EOF
done
cd $PWD

View File

@ -0,0 +1,47 @@
#!/bin/bash
# To avoid pw writing, add localhost:5432:*:postgres:test to ~/.pgpass
set -u
set -e
PWD=`pwd`
DIR=`dirname $0`
POSTGRES_HOST=${POSTGRES_HOST:-localhost}
POSTGRES_USER=${POSTGRES_USER:-postgres}
INSTALL_DB_META=${INSTALL_DB_META:-meta}
INSTALL_DB_MAIN=${INSTALL_DB_MAIN:-Test1}
echo "Connecting to $POSTGRES_HOST as user $POSTGRES_USER and with meta db $INSTALL_DB_META and main db $INSTALL_DB_MAIN"
cd $DIR
OUTPUT_DATA_DIR="expected_outputs"
FILE_SUFFIX="csv"
DATAFILES=`ls $OUTPUT_DATA_DIR/*.$FILE_SUFFIX`
SCHEMA_NAME="test_outputs"
DELIMITER=";"
TEMPFILE="tempfile.tmp"
for DS_PATH in $DATAFILES; do
DATASET=`basename $DS_PATH .$FILE_SUFFIX `
echo "Setting up output $DATASET"
COLUMNS=`head -n 1 $DS_PATH | sed "s/$DELIMITER/,/g"`
tail -n +2 $DS_PATH > $TEMPFILE
psql -U $POSTGRES_USER -h $POSTGRES_HOST -d $INSTALL_DB_MAIN -v ON_ERROR_STOP=1 <<EOF
BEGIN;
CREATE SCHEMA IF NOT EXISTS $SCHEMA_NAME;
DROP TABLE IF EXISTS $SCHEMA_NAME.$DATASET;
CREATE TABLE $SCHEMA_NAME.$DATASET ($COLUMNS);
\COPY $SCHEMA_NAME.$DATASET FROM '$TEMPFILE' DELIMITER '$DELIMITER' NULL 'null';
COMMIT;
EOF
done
rm $TEMPFILE
cd $PWD

View File

@ -0,0 +1,29 @@
*.csv files in this directory will be imported to the schema expected_outputs in tables named as the files excluding the file suffix.
The column delimiter is semi colon to allow commas within jsons values.
the first row of the file is interpreted as column names and types
ex:
file: example.csv
s text;i int
foo;2
bar;3
becomes a table in the test_outputs schema
select * from test_outputs.example;
s | i
-----+---
foo | 2
bar | 3
(2 rows)
\d test_outputs.example
Table "test_outputs.example"
Column | Type | Modifiers
--------+---------+-----------
s | text |
i | integer |

View File

@ -0,0 +1,3 @@
s text;i int
foo;2
bar;3
1 s text i int
2 foo 2
3 bar 3

View File

@ -0,0 +1,6 @@
time bigint; bool_1 boolean; device_id text; field_only_dev2 double precision; field_only_ref2 text; num_1 double precision; num_2 double precision; string_1 text; string_2 text
1257897600000000000;false;dev1;null;null;null;5;const;five
1257894001000000000;true;dev1;null;null;null;4;const;four
1257894000000001000;true;dev1;null;null;null;3;const;three
1257894000000000000;true;dev1;null;null;null;1;const;one
1257894000000000000;false;dev1;null;null;null;2;const;two
1 time bigint bool_1 boolean device_id text field_only_dev2 double precision field_only_ref2 text num_1 double precision num_2 double precision string_1 text string_2 text
2 1257897600000000000 false dev1 null null null 5 const five
3 1257894001000000000 true dev1 null null null 4 const four
4 1257894000000001000 true dev1 null null null 3 const three
5 1257894000000000000 true dev1 null null null 1 const one
6 1257894000000000000 false dev1 null null null 2 const two

View File

@ -0,0 +1,6 @@
r json
{"time":1257897600000000000,"bool_1":false,"device_id":"dev1","field_only_dev2":null,"field_only_ref2":null,"num_1":null,"num_2":5,"string_1":"const","string_2":"five"}
{"time":1257894001000000000,"bool_1":true,"device_id":"dev1","field_only_dev2":null,"field_only_ref2":null,"num_1":null,"num_2":4,"string_1":"const","string_2":"four"}
{"time":1257894000000001000,"bool_1":true,"device_id":"dev1","field_only_dev2":null,"field_only_ref2":null,"num_1":null,"num_2":3,"string_1":"const","string_2":"three"}
{"time":1257894000000000000,"bool_1":true,"device_id":"dev1","field_only_dev2":null,"field_only_ref2":null,"num_1":null,"num_2":1,"string_1":"const","string_2":"one"}
{"time":1257894000000000000,"bool_1":false,"device_id":"dev1","field_only_dev2":null,"field_only_ref2":null,"num_1":null,"num_2":2,"string_1":"const","string_2":"two"}
Can't render this file because it contains an unexpected character in line 2 and column 2.

View File

@ -0,0 +1,4 @@
All files name *_DEVICEID.tsv in this directory will be added to the DB as tables by add_test_data.sh.
ex. batch1_dev1.tsv will we inserted into a table called batch1_dev1 as one
partition matching the partition key dev1.

View File

@ -0,0 +1,5 @@
33_testNs 1257894000000000000 {"string_2":"one","device_id":"dev1","nUm_1":1.5,"num_2":1,"bool_1":true,"string_1":"const"}
33_testNs 1257894000000000000 {"string_2":"two","device_id":"dev1","nUm_1":1.5,"num_2":2,"bool_1":false,"string_1":"const"}
33_testNs 1257894000000001000 {"string_2":"three","device_id":"dev1","nUm_1":1.5,"num_2":3,"bool_1":true,"string_1":"const"}
33_testNs 1257894001000000000 {"string_2":"four","device_id":"dev1","nUm_1":1.5,"num_2":4,"bool_1":true,"string_1":"const"}
33_testNs 1257897600000000000 {"string_2":"five","device_id":"dev1","nUm_1":1.5,"num_2":5,"bool_1":false,"string_1":"const"}
Can't render this file because it contains an unexpected character in line 1 and column 32.

View File

@ -0,0 +1,2 @@
33_testNs 1257894000000000000 {"bool_1":true,"string_1":"const","string_2":"one","field_only_dev2":3,"device_id":"dev2","nUm_1":1.5,"num_2":9}
33_testNs 1257894000000000000 {"bool_1":true,"string_1":"const","string_2":"two","device_id":"dev2","nUm_1":1.5,"num_2":10}
Can't render this file because it contains an unexpected character in line 1 and column 32.

View File

@ -0,0 +1,3 @@
33_testNs 1257987600000000000 {"num_2":6,"bool_1":false,"string_1":"const","string_2":"one","field_only_ref2":"one","device_id":"dev1","nUm_1":1.5}
33_testNs 1257987600000000000 {"num_2":7,"bool_1":true,"string_1":"const","string_2":"two","field_only_ref2":"two","device_id":"dev1","nUm_1":1.5}
33_testNs 1257894002000000000 {"num_2":8,"bool_1":true,"string_1":"const","string_2":"three","device_id":"dev1","nUm_1":1.5}
Can't render this file because it contains an unexpected character in line 1 and column 32.

View File

@ -0,0 +1,71 @@
DROP FUNCTION IF EXISTS unit_tests.test_ioql_queries_empty();
CREATE FUNCTION unit_tests.test_ioql_queries_empty()
RETURNS test_result
AS
$$
DECLARE
message test_result;
diffcount integer;
result json[];
expected json[];
expected_row json;
cursor REFCURSOR;
rowvar record;
expected_record record;
result_jsonb jsonb[];
expected_cursor REFCURSOR;
BEGIN
PERFORM insert_data_one_partition('test_input_data.batch1_dev1', get_partition_for_key('dev1'::text, 10 :: SMALLINT), 10 :: SMALLINT);
-- Test json output from query (q1_response_json.csv)
SELECT Array (select * FROM ioql_exec_query(new_ioql_query(namespace_name => '33_testNs'))) into result;
SELECT Array (select * FROM test_outputs.q1_response_json) into expected;
IF to_jsonb(result) != to_jsonb(expected) THEN
SELECT assert.fail('Bad json return from rows from query.') INTO message;
RETURN message;
END IF;
-- Test cursor based queries, compare to json table (q1_response_json.csv)
SELECT ioql_exec_query_record_cursor(new_ioql_query(namespace_name => '33_testNs'), 'cursor') into cursor;
FOREACH expected_row IN ARRAY expected
LOOP
FETCH cursor into rowvar;
IF FOUND = FALSE THEN
raise exception 'Expected row: %v got nothing', to_jsonb(expected_row);
EXIT;
END IF;
IF to_jsonb(expected_row) != to_jsonb(rowvar) THEN
raise exception 'Expected row: %v got: %v', to_jsonb(expected_row), to_jsonb(rowvar);
END IF;
END LOOP;
-- Test cursor based queries, compare to records table (q1_response_fields.csv)
CLOSE cursor;
SELECT ioql_exec_query_record_cursor(new_ioql_query(namespace_name => '33_testNs'), 'cursor') into cursor;
FOR expected_record in SELECT * FROM test_outputs.q1_response_fields
LOOP
FETCH cursor into rowvar;
IF FOUND = FALSE THEN
raise exception 'Expected row: %v got nothing', to_jsonb(expected_record);
EXIT;
END IF;
-- Record comparison fails on different types of columns
IF expected_record != rowvar THEN
raise exception 'Expected row: %v got: %v', to_jsonb(expected_record), to_jsonb(rowvar);
END IF;
END LOOP;
SELECT assert.ok('End of test.') INTO message;
RETURN message;
END
$$
LANGUAGE plpgsql;

View File

@ -0,0 +1,63 @@
DROP FUNCTION IF EXISTS unit_tests.kafka_get_start_and_next_offset_test();
CREATE FUNCTION unit_tests.kafka_get_start_and_next_offset_test()
RETURNS test_result
AS
$$
DECLARE
message test_result;
start_offset_var integer;
next_offset_var integer;
DEFAULT_START_OFFSET integer;
BEGIN
DEFAULT_START_OFFSET := 42;
SELECT start_offset, next_offset FROM kafka_get_start_and_next_offset('topic'::text, 0::SMALLINT, DEFAULT_START_OFFSET)
INTO start_offset_var, next_offset_var;
IF start_offset_var != DEFAULT_START_OFFSET THEN
SELECT assert.fail('Bad default start offset.') INTO message;
RETURN message;
END IF;
IF next_offset_var != DEFAULT_START_OFFSET THEN
SELECT assert.fail('Bad initial next_offset.') INTO message;
RETURN message;
END IF;
PERFORM kafka_set_next_offset(
topic => 'topic'::text,
partition_number => 0::SMALLINT,
start_offset => DEFAULT_START_OFFSET,
next_offset => DEFAULT_START_OFFSET + 1
);
SELECT start_offset, next_offset FROM kafka_get_start_and_next_offset('topic'::text, 0::SMALLINT, DEFAULT_START_OFFSET)
INTO start_offset_var, next_offset_var;
IF start_offset_var != DEFAULT_START_OFFSET THEN
SELECT assert.fail('Bad start offset after update.') INTO message;
RETURN message;
END IF;
IF next_offset_var != DEFAULT_START_OFFSET + 1 THEN
SELECT assert.fail('Bad next offset after update.') INTO message;
RETURN message;
END IF;
BEGIN
PERFORM kafka_set_next_offset(
topic => 'newtopic'::text,
partition_number => 0::SMALLINT,
start_offset => DEFAULT_START_OFFSET,
next_offset => DEFAULT_START_OFFSET + 1
);
EXCEPTION
WHEN sqlstate 'IO501' THEN
RAISE NOTICE 'right exception thrown';
END;
SELECT assert.ok('End of test.') INTO message;
RETURN message;
END
$$
LANGUAGE plpgsql;

View File

@ -1,4 +1,3 @@
\ir ../../main/names.sql \ir ../../main/names.sql
DROP FUNCTION IF EXISTS unit_tests.names_tests_start_set(); DROP FUNCTION IF EXISTS unit_tests.names_tests_start_set();
@ -23,7 +22,6 @@ END
$$ $$
LANGUAGE plpgsql; LANGUAGE plpgsql;
DROP FUNCTION IF EXISTS unit_tests.names_tests_stop_set(); DROP FUNCTION IF EXISTS unit_tests.names_tests_stop_set();
CREATE FUNCTION unit_tests.names_tests_stop_set() CREATE FUNCTION unit_tests.names_tests_stop_set()
RETURNS test_result RETURNS test_result

View File

@ -13,15 +13,23 @@ INSTALL_DB=${INSTALL_DB:-Test1}
echo "Connecting to $POSTGRES_HOST as user $POSTGRES_USER" echo "Connecting to $POSTGRES_HOST as user $POSTGRES_USER"
NODES="Test1 test2" NODES="Test1 test2"
SETUPDIR="../../setup"
if [ $RESET_POSTGRES_DB == "true" ]; then if [ $RESET_POSTGRES_DB == "true" ]; then
echo "Cleaning up DB" echo "Cleaning up DB"
../../setup/setup_meta.sh $SETUPDIR/setup_meta.sh
$SETUPDIR/add_cluster_user.sh postgres
for node in $NODES; do for node in $NODES; do
../../setup/setup_node.sh $node $SETUPDIR/setup_node.sh $node
../../setup/setup_kafka.sh $node $SETUPDIR/setup_kafka.sh $node
$SETUPDIR/add_node.sh $node $POSTGRES_HOST
done done
./add_test_inputs.sh
./add_test_outputs.sh
psql -U $POSTGRES_USER -h $POSTGRES_HOST -v ON_ERROR_STOP=1 -d $INSTALL_DB -f ../../plpgunit/install/1.install-unit-test.sql psql -U $POSTGRES_USER -h $POSTGRES_HOST -v ON_ERROR_STOP=1 -d $INSTALL_DB -f ../../plpgunit/install/1.install-unit-test.sql
fi fi