From e15da09f4576abd9c882aeec2f9978841de09c68 Mon Sep 17 00:00:00 2001 From: Olof Rensfelt Date: Fri, 11 Nov 2016 15:10:43 +0100 Subject: [PATCH] add test from postgres-kafka-consumer Added missing files. --- sql/setup/add_cluster_user.sh | 34 +++++++++ sql/setup/add_node.sh | 27 +++++++ sql/tests/unit/add_test_inputs.sh | 59 +++++++++++++++ sql/tests/unit/add_test_outputs.sh | 47 ++++++++++++ sql/tests/unit/expected_outputs/README.md | 29 ++++++++ sql/tests/unit/expected_outputs/example.csv | 3 + .../expected_outputs/q1_response_fields.csv | 6 ++ .../expected_outputs/q1_response_json.csv | 6 ++ sql/tests/unit/input_data/README.md | 4 ++ sql/tests/unit/input_data/batch1_dev1.tsv | 5 ++ sql/tests/unit/input_data/batch1_dev2.tsv | 2 + sql/tests/unit/input_data/batch2_dev1.tsv | 3 + sql/tests/unit/ioql_test.sql | 71 +++++++++++++++++++ .../unit/kafka_offset_functions_test.sql | 63 ++++++++++++++++ sql/tests/unit/names_test.sql | 2 - sql/tests/unit/run.sh | 14 +++- 16 files changed, 370 insertions(+), 5 deletions(-) create mode 100755 sql/setup/add_cluster_user.sh create mode 100755 sql/setup/add_node.sh create mode 100755 sql/tests/unit/add_test_inputs.sh create mode 100755 sql/tests/unit/add_test_outputs.sh create mode 100644 sql/tests/unit/expected_outputs/README.md create mode 100644 sql/tests/unit/expected_outputs/example.csv create mode 100644 sql/tests/unit/expected_outputs/q1_response_fields.csv create mode 100644 sql/tests/unit/expected_outputs/q1_response_json.csv create mode 100644 sql/tests/unit/input_data/README.md create mode 100644 sql/tests/unit/input_data/batch1_dev1.tsv create mode 100644 sql/tests/unit/input_data/batch1_dev2.tsv create mode 100644 sql/tests/unit/input_data/batch2_dev1.tsv create mode 100644 sql/tests/unit/ioql_test.sql create mode 100644 sql/tests/unit/kafka_offset_functions_test.sql diff --git a/sql/setup/add_cluster_user.sh b/sql/setup/add_cluster_user.sh new file mode 100755 index 000000000..02eadabc3 --- /dev/null +++ b/sql/setup/add_cluster_user.sh @@ -0,0 +1,34 @@ +#!/bin/bash + +# To avoid pw writing, add localhost:5432:*:postgres:test to ~/.pgpass +set -u +set -e + +PWD=`pwd` +DIR=`dirname $0` + +POSTGRES_HOST=${POSTGRES_HOST:-localhost} +POSTGRES_USER=${POSTGRES_USER:-postgres} +INSTALL_DB=${INSTALL_DB:-meta} + +if [[ "$#" -eq 0 || "$#" -gt 2 ]] ; then + echo "usage: $0 user [pass]" + exit 1 +fi + +if [ "$#" == 2 ] ; then + PASS=$2 +else + PASS="NULL" +fi + +PG_USER_TO_ADD=$1 + +echo "Connecting to $POSTGRES_HOST as user $POSTGRES_USER and with db $INSTALL_DB" +echo "SELECT add_cluster_user('$PG_USER_TO_ADD', $PASS);" +cd $DIR +psql -U $POSTGRES_USER -h $POSTGRES_HOST -d $INSTALL_DB -v ON_ERROR_STOP=1 < $TEMPFILE + +psql -U $POSTGRES_USER -h $POSTGRES_HOST -d $INSTALL_DB_MAIN -v ON_ERROR_STOP=1 < '33_testNs'))) into result; + SELECT Array (select * FROM test_outputs.q1_response_json) into expected; + + IF to_jsonb(result) != to_jsonb(expected) THEN + SELECT assert.fail('Bad json return from rows from query.') INTO message; + RETURN message; + END IF; + + -- Test cursor based queries, compare to json table (q1_response_json.csv) + SELECT ioql_exec_query_record_cursor(new_ioql_query(namespace_name => '33_testNs'), 'cursor') into cursor; + + FOREACH expected_row IN ARRAY expected + LOOP + FETCH cursor into rowvar; + IF FOUND = FALSE THEN + raise exception 'Expected row: %v got nothing', to_jsonb(expected_row); + EXIT; + END IF; + + IF to_jsonb(expected_row) != to_jsonb(rowvar) THEN + raise exception 'Expected row: %v got: %v', to_jsonb(expected_row), to_jsonb(rowvar); + END IF; + END LOOP; + + -- Test cursor based queries, compare to records table (q1_response_fields.csv) + + CLOSE cursor; + + SELECT ioql_exec_query_record_cursor(new_ioql_query(namespace_name => '33_testNs'), 'cursor') into cursor; + + FOR expected_record in SELECT * FROM test_outputs.q1_response_fields + LOOP + FETCH cursor into rowvar; + IF FOUND = FALSE THEN + raise exception 'Expected row: %v got nothing', to_jsonb(expected_record); + EXIT; + END IF; + + -- Record comparison fails on different types of columns + IF expected_record != rowvar THEN + raise exception 'Expected row: %v got: %v', to_jsonb(expected_record), to_jsonb(rowvar); + END IF; + END LOOP; + + SELECT assert.ok('End of test.') INTO message; + RETURN message; +END +$$ +LANGUAGE plpgsql; \ No newline at end of file diff --git a/sql/tests/unit/kafka_offset_functions_test.sql b/sql/tests/unit/kafka_offset_functions_test.sql new file mode 100644 index 000000000..dc32eb73e --- /dev/null +++ b/sql/tests/unit/kafka_offset_functions_test.sql @@ -0,0 +1,63 @@ + +DROP FUNCTION IF EXISTS unit_tests.kafka_get_start_and_next_offset_test(); +CREATE FUNCTION unit_tests.kafka_get_start_and_next_offset_test() +RETURNS test_result +AS +$$ +DECLARE +message test_result; +start_offset_var integer; +next_offset_var integer; +DEFAULT_START_OFFSET integer; +BEGIN + DEFAULT_START_OFFSET := 42; + SELECT start_offset, next_offset FROM kafka_get_start_and_next_offset('topic'::text, 0::SMALLINT, DEFAULT_START_OFFSET) + INTO start_offset_var, next_offset_var; + + IF start_offset_var != DEFAULT_START_OFFSET THEN + SELECT assert.fail('Bad default start offset.') INTO message; + RETURN message; + END IF; + + IF next_offset_var != DEFAULT_START_OFFSET THEN + SELECT assert.fail('Bad initial next_offset.') INTO message; + RETURN message; + END IF; + + PERFORM kafka_set_next_offset( + topic => 'topic'::text, + partition_number => 0::SMALLINT, + start_offset => DEFAULT_START_OFFSET, + next_offset => DEFAULT_START_OFFSET + 1 + ); + + SELECT start_offset, next_offset FROM kafka_get_start_and_next_offset('topic'::text, 0::SMALLINT, DEFAULT_START_OFFSET) + INTO start_offset_var, next_offset_var; + + IF start_offset_var != DEFAULT_START_OFFSET THEN + SELECT assert.fail('Bad start offset after update.') INTO message; + RETURN message; + END IF; + + IF next_offset_var != DEFAULT_START_OFFSET + 1 THEN + SELECT assert.fail('Bad next offset after update.') INTO message; + RETURN message; + END IF; + + BEGIN + PERFORM kafka_set_next_offset( + topic => 'newtopic'::text, + partition_number => 0::SMALLINT, + start_offset => DEFAULT_START_OFFSET, + next_offset => DEFAULT_START_OFFSET + 1 + ); + EXCEPTION + WHEN sqlstate 'IO501' THEN + RAISE NOTICE 'right exception thrown'; + END; + + SELECT assert.ok('End of test.') INTO message; + RETURN message; +END +$$ +LANGUAGE plpgsql; diff --git a/sql/tests/unit/names_test.sql b/sql/tests/unit/names_test.sql index c9879ba70..4fed268ad 100644 --- a/sql/tests/unit/names_test.sql +++ b/sql/tests/unit/names_test.sql @@ -1,4 +1,3 @@ - \ir ../../main/names.sql DROP FUNCTION IF EXISTS unit_tests.names_tests_start_set(); @@ -23,7 +22,6 @@ END $$ LANGUAGE plpgsql; - DROP FUNCTION IF EXISTS unit_tests.names_tests_stop_set(); CREATE FUNCTION unit_tests.names_tests_stop_set() RETURNS test_result diff --git a/sql/tests/unit/run.sh b/sql/tests/unit/run.sh index 5c96f2f80..b6f8d8fbc 100755 --- a/sql/tests/unit/run.sh +++ b/sql/tests/unit/run.sh @@ -13,15 +13,23 @@ INSTALL_DB=${INSTALL_DB:-Test1} echo "Connecting to $POSTGRES_HOST as user $POSTGRES_USER" NODES="Test1 test2" +SETUPDIR="../../setup" + if [ $RESET_POSTGRES_DB == "true" ]; then echo "Cleaning up DB" - ../../setup/setup_meta.sh + $SETUPDIR/setup_meta.sh + $SETUPDIR/add_cluster_user.sh postgres + for node in $NODES; do - ../../setup/setup_node.sh $node - ../../setup/setup_kafka.sh $node + $SETUPDIR/setup_node.sh $node + $SETUPDIR/setup_kafka.sh $node + $SETUPDIR/add_node.sh $node $POSTGRES_HOST done + ./add_test_inputs.sh + ./add_test_outputs.sh + psql -U $POSTGRES_USER -h $POSTGRES_HOST -v ON_ERROR_STOP=1 -d $INSTALL_DB -f ../../plpgunit/install/1.install-unit-test.sql fi