Disallow hash partitioning on primary column

The new "create_hypertable" API using the dimension info inadvertantly
allowed creating hypertables with hash partitioning on the primary
column. Since the rest of the machinery (policies, tiering, etc.) does
not support hash partitions on primary column properly, we restrict it
now in the new API. The older "create_hypertable" API was disallowing
it earlier anyways.

Fixes #6993
This commit is contained in:
Nikhil Sontakke 2024-06-04 13:34:48 +05:30 committed by Nikhil
parent 8880139e56
commit 577b923822
6 changed files with 20 additions and 29 deletions

1
.unreleased/pr_6993 Normal file
View File

@ -0,0 +1 @@
Fixes: #6993 Disallow hash partitioning on primary column

View File

@ -1677,6 +1677,16 @@ ts_hypertable_create_general(PG_FUNCTION_ARGS)
bool if_not_exists = PG_ARGISNULL(3) ? false : PG_GETARG_BOOL(3);
bool migrate_data = PG_ARGISNULL(4) ? false : PG_GETARG_BOOL(4);
/*
* We do not support closed (hash) dimensions for the main partitioning
* column. Check that first. The behavior then becomes consistent with the
* earlier "ts_hypertable_create_time_prev" implementation.
*/
if (IS_CLOSED_DIMENSION(dim_info))
ereport(ERROR,
(errmsg("cannot partition using a closed dimension on primary column"),
errhint("Use range partitioning on the primary column.")));
/*
* Current implementation requires to provide a valid chunk sizing function
* that is being used to populate hypertable catalog information.

View File

@ -820,22 +820,3 @@ NOTICE: chunk "_hyper_17_344_chunk" is already compressed
(1 row)
ROLLBACK;
-- test segfault when compression hypertable with primary space dimension #6977
CREATE TABLE test_by_hash(id BIGINT, value float8);
SELECT create_hypertable('test_by_hash', by_hash('id', 8));
create_hypertable
-------------------
(19,t)
(1 row)
ALTER TABLE test_by_hash SET (timescaledb.compress = true);
WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes
NOTICE: default segment by for hypertable "test_by_hash" is set to ""
NOTICE: default order by for hypertable "test_by_hash" is set to "id DESC"
INSERT INTO test_by_hash VALUES (1, 1.0), (2, 2.0), (3, 3.0);
SELECT compress_chunk('_timescaledb_internal._hyper_19_351_chunk');
compress_chunk
-------------------------------------------
_timescaledb_internal._hyper_19_351_chunk
(1 row)

View File

@ -446,10 +446,13 @@ SELECT * FROM _timescaledb_functions.get_create_command('test_table_int');
SELECT create_hypertable('public.test_table_int', 'id', chunk_time_interval => 10, create_default_indexes=>FALSE);
(1 row)
-- Should throw an error when if_not_exists is not set
\set ON_ERROR_STOP 0
-- Should throw an error when if_not_exists is not set
SELECT create_hypertable('test_table_int', by_range('id', 10));
ERROR: table "test_table_int" is already a hypertable
-- Should error out when hash partitioning is used as the main partitioning scheme
SELECT create_hypertable('test_table_int', by_hash('device', number_partitions => 2));
ERROR: cannot partition using a closed dimension on primary column
\set ON_ERROR_STOP 1
DROP TABLE test_table_int;
-- Add dimension
@ -462,6 +465,7 @@ NOTICE: adding not-null constraint to column "id"
(1 row)
INSERT INTO test_table_int SELECT t, t%10, '01-01-2023 11:00'::TIMESTAMPTZ FROM generate_series(1, 50, 1) t;
-- adding a space dimension via "by_hash" should work
SELECT add_dimension('test_table_int', by_hash('device', number_partitions => 2));
add_dimension
---------------

View File

@ -307,11 +307,3 @@ BEGIN;
-- should be rolled up
SELECT hypertable_name, range_start, range_end FROM timescaledb_information.chunks WHERE hypertable_name = 'test9' ORDER BY 2;
ROLLBACK;
-- test segfault when compression hypertable with primary space dimension #6977
CREATE TABLE test_by_hash(id BIGINT, value float8);
SELECT create_hypertable('test_by_hash', by_hash('id', 8));
ALTER TABLE test_by_hash SET (timescaledb.compress = true);
INSERT INTO test_by_hash VALUES (1, 1.0), (2, 2.0), (3, 3.0);
SELECT compress_chunk('_timescaledb_internal._hyper_19_351_chunk');

View File

@ -222,9 +222,11 @@ SELECT create_hypertable('test_table_int', by_range('id', 10));
SELECT create_hypertable('test_table_int', by_range('id', 10), if_not_exists => true);
SELECT * FROM _timescaledb_functions.get_create_command('test_table_int');
-- Should throw an error when if_not_exists is not set
\set ON_ERROR_STOP 0
-- Should throw an error when if_not_exists is not set
SELECT create_hypertable('test_table_int', by_range('id', 10));
-- Should error out when hash partitioning is used as the main partitioning scheme
SELECT create_hypertable('test_table_int', by_hash('device', number_partitions => 2));
\set ON_ERROR_STOP 1
DROP TABLE test_table_int;
@ -235,6 +237,7 @@ SELECT create_hypertable('test_table_int', by_range('id', 10), migrate_data => t
INSERT INTO test_table_int SELECT t, t%10, '01-01-2023 11:00'::TIMESTAMPTZ FROM generate_series(1, 50, 1) t;
-- adding a space dimension via "by_hash" should work
SELECT add_dimension('test_table_int', by_hash('device', number_partitions => 2));
SELECT hypertable_name, dimension_number, column_name FROM timescaledb_information.dimensions WHERE hypertable_name = 'test_table_int';