test and bug fixes; getting rid of jinja2

This commit is contained in:
Matvey Arye 2016-10-26 17:02:08 -04:00
parent d300471c86
commit a075899beb
25 changed files with 439 additions and 182 deletions

View File

@ -5,7 +5,7 @@ CREATE TABLE IF NOT EXISTS node (
hostname TEXT NOT NULL
);
CREATE TABLE cluster_user (
CREATE TABLE IF NOT EXISTS cluster_user (
username TEXT PRIMARY KEY NOT NULL,
password TEXT --not any more of a security hole than usual since stored in pg_user_mapping anyway
);

View File

@ -18,7 +18,8 @@ BEGIN
END LOOP;
RETURN NEW;
END
$BODY$;
$BODY$
SET SEARCH_PATH = 'public';
BEGIN;
DROP TRIGGER IF EXISTS trigger_on_create_cluster_user

View File

@ -65,7 +65,8 @@ $BODY$;
CREATE OR REPLACE FUNCTION create_remote_distinct_table(
schema_name NAME,
table_name NAME,
local_table_name NAME,
remote_table_name NAME,
cluster_table_name NAME,
server_name NAME
)
@ -74,9 +75,10 @@ $BODY$
BEGIN
EXECUTE format(
$$
CREATE FOREIGN TABLE IF NOT EXISTS %1$I.%2$I () INHERITS(%1$I.%3$I) SERVER %4$I OPTIONS (schema_name '%1$I')
CREATE FOREIGN TABLE IF NOT EXISTS %1$I.%2$I () INHERITS(%1$I.%3$I) SERVER %4$I
OPTIONS (schema_name %1$L, table_name %5$L)
$$,
schema_name, table_name, cluster_table_name, server_name);
schema_name, remote_table_name, cluster_table_name, server_name, local_table_name);
END
$BODY$;
@ -98,7 +100,8 @@ $BODY$;
CREATE OR REPLACE FUNCTION create_remote_table(
schema_name NAME,
table_name NAME,
remote_table_name NAME,
master_table_name NAME,
cluster_table_name NAME,
server_name NAME
)
@ -107,9 +110,9 @@ $BODY$
BEGIN
EXECUTE format(
$$
CREATE FOREIGN TABLE IF NOT EXISTS %1$I.%2$I () INHERITS(%1$I.%3$I) SERVER %4$I OPTIONS (schema_name '%1$I')
CREATE FOREIGN TABLE IF NOT EXISTS %1$I.%2$I () INHERITS(%1$I.%3$I) SERVER %4$I OPTIONS (schema_name %1$L, table_name %5$L)
$$,
schema_name, table_name, cluster_table_name, server_name);
schema_name, remote_table_name, cluster_table_name, server_name, master_table_name);
END
$BODY$;
@ -165,9 +168,11 @@ BEGIN
FROM node AS n
WHERE n.database_name = NEW.database_name;
PERFORM create_remote_table(namespace_row.schema_name, NEW.remote_table_name, namespace_row.cluster_table_name,
PERFORM create_remote_table(namespace_row.schema_name, NEW.remote_table_name, NEW.master_table_name,
namespace_row.cluster_table_name,
node_row.server_name);
PERFORM create_remote_distinct_table(namespace_row.schema_name, NEW.distinct_remote_table_name,
PERFORM create_remote_distinct_table(namespace_row.schema_name, NEW.distinct_local_table_name,
NEW.distinct_remote_table_name,
namespace_row.cluster_distinct_table_name,
node_row.server_name);
END IF;
@ -177,7 +182,8 @@ $BODY$
SET SEARCH_PATH = 'public';
BEGIN;
DROP TRIGGER IF EXISTS trigger_on_create_namespace_node ON namespace_node;
DROP TRIGGER IF EXISTS trigger_on_create_namespace_node
ON namespace_node;
CREATE TRIGGER trigger_on_create_namespace_node AFTER INSERT OR UPDATE OR DELETE ON namespace_node
FOR EACH ROW EXECUTE PROCEDURE on_create_namespace_node();
COMMIT;

View File

@ -1,6 +1,6 @@
CREATE OR REPLACE FUNCTION get_partition_for_key(key TEXT, num_nodes INT)
RETURNS INT LANGUAGE SQL IMMUTABLE STRICT AS $$
SELECT (public.hash_string(key, 'murmur3' :: TEXT, 1 :: INT4) & x'7fffffff' :: INTEGER) % num_nodes;
CREATE OR REPLACE FUNCTION get_partition_for_key(key TEXT, num_nodes SMALLINT)
RETURNS SMALLINT LANGUAGE SQL IMMUTABLE STRICT AS $$
SELECT ((public.hash_string(key, 'murmur3' :: TEXT, 1 :: INT4) & x'7fffffff' :: INTEGER) % num_nodes)::SMALLINT;
$$;

View File

@ -26,7 +26,7 @@ CREATE TABLE IF NOT EXISTS data_table (
);
--TODO: any constrants for when total_partitions change?
CREATE SEQUENCE data_table_index_name_prefix;
CREATE SEQUENCE IF NOT EXISTS data_table_index_name_prefix;
CREATE TABLE IF NOT EXISTS data_table_index (
table_oid REGCLASS NOT NULL REFERENCES data_table (table_oid) ON DELETE CASCADE,

View File

@ -45,6 +45,11 @@ BEGIN
INSERT INTO %I.node SELECT * from node;
$$,
NEW.schema_name);
EXECUTE format(
$$
INSERT INTO %I.cluster_user SELECT * from cluster_user;
$$,
NEW.schema_name);
FOR schema_name IN
SELECT n.schema_name

View File

@ -1,48 +0,0 @@
{% macro create_database(db) %}
DROP DATABASE IF EXISTS "{{db}}";
CREATE DATABASE "{{db}}";
{%- endmacro %}
{% macro load_scripts(db) %}
\c {{db}}
\ir ../common/extensions.sql
\ir ../common/types.sql
\ir ../common/tables.sql
\ir ../common/cluster_setup_functions.sql
{%- endmacro %}
{% macro load_scripts_main(db) %}
\c {{db}}
\ir ../main/cluster_user_triggers.sql
\ir ../main/node_triggers.sql
\ir ../main/namespace_triggers.sql
\ir ../main/field_triggers.sql
\ir ../main/partitioning.sql
\ir ../main/schema_info.sql
\ir ../main/names.sql
\ir ../main/tables.sql
\ir ../main/data_table_triggers.sql
\ir ../main/partition_table_triggers.sql
\ir ../main/data_table_constructors.sql
\ir ../main/insert.sql
{%- endmacro %}
{% macro load_scripts_main_kafka(db) %}
\c {{db}}
\ir ../main/kafka_offset_table.sql
\ir ../main/kafka_offset_node_trigger.sql
\ir ../main/kafka_offset_functions.sql
{%- endmacro %}
{% macro load_scripts_meta() %}
\c meta
\ir ../meta/names.sql
\ir ../meta/cluster.sql
\ir ../meta/node_triggers.sql
\ir ../meta/cluster_user_triggers.sql
\ir ../meta/namespace_triggers.sql
\ir ../meta/field_triggers.sql
{%- endmacro %}

View File

@ -1,28 +1,13 @@
\set ON_ERROR_STOP 1
{% set databases = ['Test1', 'test2'] %}
{% import 'admin.sql.j2' as admin %}
{{admin.create_database('meta')}}
{% for db in databases -%}
{{admin.create_database(db)}}
{%- endfor %}
{{admin.load_scripts('meta')}}
{{admin.load_scripts_meta()}}
{% for db in databases -%}
{{admin.load_scripts(db)}}
{{admin.load_scripts_main(db)}}
{%- endfor %}
\ir create_clustered_db.sql
\set ECHO ALL
\c meta
SELECT add_cluster_user('postgres', NULL);
{% for db in databases -%}
SELECT add_node('{{ db }}' :: NAME, 'localhost');
{%- endfor %}
SELECT add_node('Test1' :: NAME, 'localhost');
SELECT add_node('test2' :: NAME, 'localhost');
SELECT add_namespace('testNs' :: NAME);
SELECT add_field('testNs' :: NAME, 'Device_id', 'text', TRUE, TRUE, ARRAY['TIME-VALUE'] :: field_index_type []);

View File

@ -0,0 +1,18 @@
DROP DATABASE IF EXISTS meta;
DROP DATABASE IF EXISTS "Test1";
DROP DATABASE IF EXISTS test2;
CREATE DATABASE meta;
CREATE DATABASE "Test1";
CREATE DATABASE test2;
\c meta
\ir load_common.sql
\ir load_meta.sql
\c Test1
\ir load_common.sql
\ir load_main.sql
\c test2
\ir load_common.sql
\ir load_main.sql

View File

Can't render this file because it contains an unexpected character in line 1 and column 29.

View File

@ -0,0 +1,3 @@
testNs 1257987600000000000 {"device_id":"dev1","series_0":1.5,"series_1":1,"series_3":3,"series_4":4}
testNs 1257987600000000000 {"device_id":"dev1","series_0":1.5,"series_1":2}
testNs 1257894002000000000 {"device_id":"dev1","series_0":2.5,"series_1":3,"series_4":4}
Can't render this file because it contains an unexpected character in line 1 and column 29.

View File

@ -0,0 +1,2 @@
testNs 1257894000000000000 {"series_1":1,"series_5":5,"series_6":6,"device_id":"dev2","series_0":1.5}
testNs 1257894000000000000 {"series_1":2,"series_6":6.1,"device_id":"dev2","series_0":1.5}
Can't render this file because it contains an unexpected character in line 1 and column 29.

View File

@ -5,12 +5,13 @@ SELECT add_cluster_user('postgres', NULL);
(1 row)
SELECT add_node('Test1' :: NAME, 'localhost');SELECT add_node('test2' :: NAME, 'localhost');
SELECT add_node('Test1' :: NAME, 'localhost');
add_node
----------
(1 row)
SELECT add_node('test2' :: NAME, 'localhost');
add_node
----------
@ -259,7 +260,7 @@ Indexes:
"4-really_long_field_goes_on_and_on_and_on_and_on_and_on_and_on_" btree (really_long_field_goes_on_and_on_and_on_and_on_and_on_and_on_an, "time" DESC NULLS LAST) WHERE really_long_field_goes_on_and_on_and_on_and_on_and_on_and_on_an IS NOT NULL
"data_0_10_1477008000_pidx" btree ("time" DESC NULLS LAST, "Device_id")
Check constraints:
"partition" CHECK (get_partition_for_key("Device_id", 10) = 0)
"partition" CHECK (get_partition_for_key("Device_id", '10'::smallint) = '0'::smallint)
"time_range" CHECK ("time" >= '1477008000000000000'::bigint AND "time" <= NULL::bigint)
Inherits: "testNs".partition_0_10
@ -274,7 +275,7 @@ Inherits: "testNs".partition_0_10
latitude | bigint | | plain | |
really_long_field_goes_on_and_on_and_on_and_on_and_on_and_on_an | bigint | | plain | |
Check constraints:
"partition" CHECK (get_partition_for_key("Device_id", 10) = 0)
"partition" CHECK (get_partition_for_key("Device_id", '10'::smallint) = '0'::smallint)
Inherits: "testNs".master
Child tables: "testNs".data_0_10_1477008000
@ -301,7 +302,7 @@ Indexes:
"4-really_long_field_goes_on_and_on_and_on_and_on_and_on_and_on_" btree (really_long_field_goes_on_and_on_and_on_and_on_and_on_and_on_an, "time" DESC NULLS LAST) WHERE really_long_field_goes_on_and_on_and_on_and_on_and_on_and_on_an IS NOT NULL
"data_0_10_1477008000_pidx" btree ("time" DESC NULLS LAST, "Device_id")
Check constraints:
"partition" CHECK (get_partition_for_key("Device_id", 10) = 0)
"partition" CHECK (get_partition_for_key("Device_id", '10'::smallint) = '0'::smallint)
"time_range" CHECK ("time" >= '1477008000000000000'::bigint AND "time" <= '1477094399999999999'::bigint)
Inherits: "testNs".partition_0_10

View File

@ -0,0 +1,157 @@
\c meta
SELECT add_cluster_user('postgres', NULL);
add_cluster_user
------------------
(1 row)
SELECT add_node('Test1' :: NAME, 'localhost');
add_node
----------
(1 row)
SELECT add_node('test2' :: NAME, 'localhost');
add_node
----------
(1 row)
SELECT add_namespace('testNs' :: NAME);
add_namespace
---------------
(1 row)
SELECT add_field('testNs' :: NAME, 'device_id', 'text', TRUE, TRUE, ARRAY['VALUE-TIME'] :: field_index_type []);
add_field
-----------
(1 row)
SELECT add_field('testNs' :: NAME, 'series_0', 'double precision', FALSE, FALSE, ARRAY['TIME-VALUE'] :: field_index_type []);
add_field
-----------
(1 row)
SELECT add_field('testNs' :: NAME, 'series_1', 'double precision', FALSE, FALSE, ARRAY['TIME-VALUE'] :: field_index_type []);
add_field
-----------
(1 row)
SELECT add_field('testNs' :: NAME, 'series_2', 'double precision', FALSE, FALSE, ARRAY['TIME-VALUE'] :: field_index_type []);
add_field
-----------
(1 row)
SELECT add_field('testNs' :: NAME, 'series_bool', 'boolean', FALSE, FALSE, ARRAY['TIME-VALUE'] :: field_index_type []);
add_field
-----------
(1 row)
\c Test1
BEGIN;
SELECT * FROM create_temp_copy_table_one_partition('copy_t',get_partition_for_key('dev1', 10::SMALLINT),10::SMALLINT);
create_temp_copy_table_one_partition
--------------------------------------
copy_t
(1 row)
\COPY copy_t FROM 'data/ds1_dev1_1.tsv';
SELECT * FROM insert_data_one_partition('copy_t',get_partition_for_key('dev1', 10::SMALLINT),10::SMALLINT);
insert_data_one_partition
---------------------------
(1 row)
COMMIT;
SELECT close_data_table_end(dt.table_oid) FROM data_table dt WHERE dt.namespace_name = 'testNs';
close_data_table_end
----------------------
(1 row)
BEGIN;
SELECT * FROM create_temp_copy_table_one_partition('copy_t',get_partition_for_key('dev1', 10::SMALLINT),10::SMALLINT);
create_temp_copy_table_one_partition
--------------------------------------
copy_t
(1 row)
\COPY copy_t FROM 'data/ds1_dev1_2.tsv';
SELECT * FROM insert_data_one_partition('copy_t',get_partition_for_key('dev1', 10::SMALLINT),10::SMALLINT);
insert_data_one_partition
---------------------------
(1 row)
COMMIT;
\c test2
BEGIN;
SELECT * FROM create_temp_copy_table_one_partition('copy_t',get_partition_for_key('dev2', 10::SMALLINT),10::SMALLINT);
create_temp_copy_table_one_partition
--------------------------------------
copy_t
(1 row)
\COPY copy_t FROM 'data/ds1_dev2_1.tsv';
SELECT * FROM insert_data_one_partition('copy_t',get_partition_for_key('dev2', 10::SMALLINT),10::SMALLINT);
insert_data_one_partition
---------------------------
(1 row)
COMMIT;
\c Test1
\dt "testNs".*
List of relations
Schema | Name | Type | Owner
--------+----------------------+-------+----------
testNs | cluster | table | postgres
testNs | data_4_10_1257811200 | table | postgres
testNs | data_4_10_1257984000 | table | postgres
testNs | distinct | table | postgres
testNs | local_distinct | table | postgres
testNs | master | table | postgres
testNs | partition_4_10 | table | postgres
(7 rows)
\c test2
\dt "testNs".*
List of relations
Schema | Name | Type | Owner
--------+----------------------+-------+----------
testNs | cluster | table | postgres
testNs | data_0_10_1257811200 | table | postgres
testNs | distinct | table | postgres
testNs | local_distinct | table | postgres
testNs | master | table | postgres
testNs | partition_0_10 | table | postgres
(6 rows)
SELECT * FROM "testNs".cluster;
time | device_id | series_0 | series_1 | series_2 | series_bool
---------------------+-----------+----------+----------+----------+-------------
1257894000000000000 | dev1 | 1.5 | 1 | 2 | t
1257894000000000000 | dev1 | 1.5 | 2 | |
1257894000000001000 | dev1 | 2.5 | 3 | |
1257894001000000000 | dev1 | 3.5 | 4 | |
1257897600000000000 | dev1 | 4.5 | 5 | | f
1257894002000000000 | dev1 | 2.5 | 3 | |
1257987600000000000 | dev1 | 1.5 | 1 | |
1257987600000000000 | dev1 | 1.5 | 2 | |
1257894000000000000 | dev2 | 1.5 | 1 | |
1257894000000000000 | dev2 | 1.5 | 2 | |
(10 rows)
SELECT * FROM "testNs".distinct;
field | value | last_time_approx
-----------+-------+---------------------
device_id | dev1 | 1258074000000000000
device_id | dev2 | 1257894000000000000
(2 rows)

View File

@ -0,0 +1,105 @@
\c meta
SELECT add_cluster_user('postgres', NULL);
add_cluster_user
------------------
(1 row)
SELECT add_node('Test1' :: NAME, 'localhost');
add_node
----------
(1 row)
SELECT add_node('test2' :: NAME, 'localhost');
add_node
----------
(1 row)
\c Test1
\dt public.*
List of relations
Schema | Name | Type | Owner
--------+----------------------+-------+----------
public | cluster_user | table | postgres
public | data_table | table | postgres
public | data_table_index | table | postgres
public | field | table | postgres
public | kafka_offset_cluster | table | postgres
public | kafka_offset_local | table | postgres
public | kafka_offset_node | table | postgres
public | namespace | table | postgres
public | namespace_node | table | postgres
public | node | table | postgres
public | partition_table | table | postgres
(11 rows)
\det+ public.*
List of foreign tables
Schema | Table | Server | FDW Options | Description
--------+---------------------------+--------+---------------------------------------------------------+-------------
public | kafka_offset_remote_test2 | test2 | (schema_name 'public', table_name 'kafka_offset_local') |
(1 row)
SELECT * FROM kafka_get_start_and_next_offset('topic', 0::SMALLINT, 0);
start_offset | next_offset
--------------+-------------
0 | 0
(1 row)
SELECT * FROM kafka_get_start_and_next_offset('topic', 0::SMALLINT, 0);
start_offset | next_offset
--------------+-------------
0 | 0
(1 row)
SELECT * FROM kafka_set_next_offset('topic', 0::SMALLINT, 0, 100);
kafka_set_next_offset
-----------------------
(1 row)
SELECT * FROM kafka_get_start_and_next_offset('topic', 0::SMALLINT, 0);
start_offset | next_offset
--------------+-------------
0 | 100
(1 row)
SELECT * FROM kafka_get_start_and_next_offset('topic', 1::SMALLINT, 0);
start_offset | next_offset
--------------+-------------
0 | 0
(1 row)
\c test2
SELECT * FROM kafka_get_start_and_next_offset('topic', 0::SMALLINT, 0);
start_offset | next_offset
--------------+-------------
100 | 100
(1 row)
\set ON_ERROR_STOP 0
SELECT * FROM kafka_set_next_offset('topic', 0::SMALLINT, 0, 101);
\set ON_ERROR_STOP 1
SELECT * FROM kafka_set_next_offset('topic', 0::SMALLINT, 100, 101);
kafka_set_next_offset
-----------------------
(1 row)
\c Test1
SELECT * FROM kafka_get_start_and_next_offset('topic', 0::SMALLINT, 0);
start_offset | next_offset
--------------+-------------
101 | 101
(1 row)
SELECT * FROM kafka_offset_local;
topic | partition_number | start_offset | next_offset | database_name
-------+------------------+--------------+-------------+---------------
topic | 0 | 0 | 100 | Test1
topic | 1 | 0 | 0 | Test1
topic | 0 | 101 | 101 | Test1
(3 rows)

View File

@ -1,21 +0,0 @@
\set ON_ERROR_STOP 1
{% set databases = ['Test1', 'test2'] %}
{% import 'admin.sql.j2' as admin %}
{{admin.create_database('meta')}}
{% for db in databases -%}
{{admin.create_database(db)}}
{%- endfor %}
{{admin.load_scripts('meta')}}
{{admin.load_scripts('meta')}}
{{admin.load_scripts_meta()}}
{{admin.load_scripts_meta()}}
{% for db in databases -%}
{{admin.load_scripts(db)}}
{{admin.load_scripts(db)}}
{{admin.load_scripts_main(db)}}
{{admin.load_scripts_main(db)}}
{%- endfor %}

View File

@ -0,0 +1,15 @@
\set ON_ERROR_STOP 1
\ir create_clustered_db.sql
\c meta
\ir load_common.sql
\ir load_meta.sql
\c Test1
\ir load_common.sql
\ir load_main.sql
\c test2
\ir load_common.sql
\ir load_main.sql

47
sql/tests/insert.sql Normal file
View File

@ -0,0 +1,47 @@
\set ON_ERROR_STOP 1
\ir create_clustered_db.sql
\set ECHO ALL
\c meta
SELECT add_cluster_user('postgres', NULL);
SELECT add_node('Test1' :: NAME, 'localhost');
SELECT add_node('test2' :: NAME, 'localhost');
SELECT add_namespace('testNs' :: NAME);
SELECT add_field('testNs' :: NAME, 'device_id', 'text', TRUE, TRUE, ARRAY['VALUE-TIME'] :: field_index_type []);
SELECT add_field('testNs' :: NAME, 'series_0', 'double precision', FALSE, FALSE, ARRAY['TIME-VALUE'] :: field_index_type []);
SELECT add_field('testNs' :: NAME, 'series_1', 'double precision', FALSE, FALSE, ARRAY['TIME-VALUE'] :: field_index_type []);
SELECT add_field('testNs' :: NAME, 'series_2', 'double precision', FALSE, FALSE, ARRAY['TIME-VALUE'] :: field_index_type []);
SELECT add_field('testNs' :: NAME, 'series_bool', 'boolean', FALSE, FALSE, ARRAY['TIME-VALUE'] :: field_index_type []);
\c Test1
BEGIN;
SELECT * FROM create_temp_copy_table_one_partition('copy_t',get_partition_for_key('dev1', 10::SMALLINT),10::SMALLINT);
\COPY copy_t FROM 'data/ds1_dev1_1.tsv';
SELECT * FROM insert_data_one_partition('copy_t',get_partition_for_key('dev1', 10::SMALLINT),10::SMALLINT);
COMMIT;
SELECT close_data_table_end(dt.table_oid) FROM data_table dt WHERE dt.namespace_name = 'testNs';
BEGIN;
SELECT * FROM create_temp_copy_table_one_partition('copy_t',get_partition_for_key('dev1', 10::SMALLINT),10::SMALLINT);
\COPY copy_t FROM 'data/ds1_dev1_2.tsv';
SELECT * FROM insert_data_one_partition('copy_t',get_partition_for_key('dev1', 10::SMALLINT),10::SMALLINT);
COMMIT;
\c test2
BEGIN;
SELECT * FROM create_temp_copy_table_one_partition('copy_t',get_partition_for_key('dev2', 10::SMALLINT),10::SMALLINT);
\COPY copy_t FROM 'data/ds1_dev2_1.tsv';
SELECT * FROM insert_data_one_partition('copy_t',get_partition_for_key('dev2', 10::SMALLINT),10::SMALLINT);
COMMIT;
\c Test1
\dt "testNs".*
\c test2
\dt "testNs".*
SELECT * FROM "testNs".cluster;
SELECT * FROM "testNs".distinct;

View File

@ -1,40 +0,0 @@
\set ON_ERROR_STOP 1
{% set databases = ['Test1', 'test2'] %}
{% import 'admin.sql.j2' as admin %}
{{admin.create_database('meta')}}
{% for db in databases -%}
{{admin.create_database(db)}}
{%- endfor %}
{{admin.load_scripts('meta')}}
{{admin.load_scripts_meta()}}
{% for db in databases -%}
{{admin.load_scripts(db)}}
{{admin.load_scripts_main(db)}}
{%- endfor %}
{% for db in databases -%}
{{admin.init_fdw(db, databases)}}
{%- endfor %}
\set ECHO ALL
\c meta
{% for db in databases -%}
SELECT add_node('{{ db }}' :: NAME, '{{ db }}' :: NAME);
{%- endfor %}
SELECT add_namespace('testNs' :: NAME);
SELECT add_field('testNs' :: NAME, 'device_id', 'text', TRUE, TRUE, ARRAY['VALUE-TIME'] :: field_index_type []);
SELECT add_field('testNs' :: NAME, 'series_0', 'double precision', FALSE, FALSE, ARRAY['TIME-VALUE'] :: field_index_type []);
SELECT add_field('testNs' :: NAME, 'series_1', 'double precision', FALSE, FALSE, ARRAY['TIME-VALUE'] :: field_index_type []);
SELECT add_field('testNs' :: NAME, 'series_2', 'double precision', FALSE, FALSE, ARRAY['TIME-VALUE'] :: field_index_type []);
SELECT add_field('testNs' :: NAME, 'series_bool', 'boolean', FALSE, FALSE, ARRAY['TIME-VALUE'] :: field_index_type []);
\c Test1
BEGIN;
SELECT * FROM create_temp_copy_table_one_partition('copy_t',4::SMALLINT,10::SMALLINT);
\COPY copy_t FROM 'data/1.tsv';
SELECT * FROM insert_data_one_partition('copy_t',4::SMALLINT,10::SMALLINT);
COMMIT;

View File

@ -1,29 +1,18 @@
\set ON_ERROR_STOP 1
{% set databases = ['Test1', 'test2'] %}
{% import 'admin.sql.j2' as admin %}
{{admin.create_database('meta')}}
{% for db in databases -%}
{{admin.create_database(db)}}
{%- endfor %}
{{admin.load_scripts('meta')}}
{{admin.load_scripts_meta()}}
{% for db in databases -%}
{{admin.load_scripts(db)}}
{{admin.load_scripts_main(db)}}
{{admin.load_scripts_main_kafka(db)}}
{%- endfor %}
{% for db in databases -%}
{{admin.init_fdw(db, databases)}}
{%- endfor %}
\ir create_clustered_db.sql
\c Test1
\ir load_kafka.sql
\c test2
\ir load_kafka.sql
\set ECHO ALL
\c meta
{% for db in databases -%}
SELECT add_node('{{ db }}' :: NAME, '{{ db }}' :: NAME);
{%- endfor %}
SELECT add_cluster_user('postgres', NULL);
SELECT add_node('Test1' :: NAME, 'localhost');
SELECT add_node('test2' :: NAME, 'localhost');
\c Test1
\dt public.*

View File

@ -0,0 +1,4 @@
\ir ../common/extensions.sql
\ir ../common/types.sql
\ir ../common/tables.sql
\ir ../common/cluster_setup_functions.sql

3
sql/tests/load_kafka.sql Normal file
View File

@ -0,0 +1,3 @@
\ir ../main/kafka_offset_table.sql
\ir ../main/kafka_offset_node_trigger.sql
\ir ../main/kafka_offset_functions.sql

12
sql/tests/load_main.sql Normal file
View File

@ -0,0 +1,12 @@
\ir ../main/cluster_user_triggers.sql
\ir ../main/node_triggers.sql
\ir ../main/namespace_triggers.sql
\ir ../main/field_triggers.sql
\ir ../main/partitioning.sql
\ir ../main/schema_info.sql
\ir ../main/names.sql
\ir ../main/tables.sql
\ir ../main/data_table_triggers.sql
\ir ../main/partition_table_triggers.sql
\ir ../main/data_table_constructors.sql
\ir ../main/insert.sql

6
sql/tests/load_meta.sql Normal file
View File

@ -0,0 +1,6 @@
\ir ../meta/names.sql
\ir ../meta/cluster.sql
\ir ../meta/node_triggers.sql
\ir ../meta/cluster_user_triggers.sql
\ir ../meta/namespace_triggers.sql
\ir ../meta/field_triggers.sql

View File

@ -1,16 +1,23 @@
#!/bin/bash
UPDATE_GOLDEN=true
j2 cluster.sql.j2 | psql -h localhost -U postgres -q -X > actual/cluster.out
diff actual/cluster.out expected/cluster.out
if [ $? -eq 0 ]
then
echo "cluster matches golden file"
else
if [ $UPDATE_GOLDEN = true ]
then
echo "updating cluster golden file"
mv actual/cluster.out expected/cluster.out
else
echo "ERROR: golden file doesn't match: cluster.out"
fi
fi
UPDATE=${UPDATE:-false}
golden_test() {
psql -h localhost -U postgres -q -X -f $1 > actual/$2
diff actual/$2 expected/$2
if [ $? -eq 0 ]
then
echo "$2 matches golden file"
else
if [ $UPDATE = true ]
then
echo "updating $2 golden file"
mv actual/$2 expected/$2
else
echo "ERROR: golden file doesn't match: $2"
fi
fi
}
golden_test cluster.sql cluster.out
golden_test kafka.sql kafka.out
golden_test insert.sql insert.out