diff --git a/.unreleased/pr_6993 b/.unreleased/pr_6993
new file mode 100644
index 000000000..a48483f3d
--- /dev/null
+++ b/.unreleased/pr_6993
@@ -0,0 +1 @@
+Fixes: #6993 Disallow hash partitioning on primary column 
diff --git a/src/hypertable.c b/src/hypertable.c
index fff1d1efd..25465e27b 100644
--- a/src/hypertable.c
+++ b/src/hypertable.c
@@ -1677,6 +1677,16 @@ ts_hypertable_create_general(PG_FUNCTION_ARGS)
 	bool if_not_exists = PG_ARGISNULL(3) ? false : PG_GETARG_BOOL(3);
 	bool migrate_data = PG_ARGISNULL(4) ? false : PG_GETARG_BOOL(4);
 
+	/*
+	 * We do not support closed (hash) dimensions for the main partitioning
+	 * column. Check that first. The behavior then becomes consistent with the
+	 * earlier "ts_hypertable_create_time_prev" implementation.
+	 */
+	if (IS_CLOSED_DIMENSION(dim_info))
+		ereport(ERROR,
+				(errmsg("cannot partition using a closed dimension on primary column"),
+				 errhint("Use range partitioning on the primary column.")));
+
 	/*
 	 * Current implementation requires to provide a valid chunk sizing function
 	 * that is being used to populate hypertable catalog information.
diff --git a/tsl/test/expected/compression_merge.out b/tsl/test/expected/compression_merge.out
index 8f5dfc3f0..ed873aa1d 100644
--- a/tsl/test/expected/compression_merge.out
+++ b/tsl/test/expected/compression_merge.out
@@ -820,22 +820,3 @@ NOTICE:  chunk "_hyper_17_344_chunk" is already compressed
 (1 row)
 
 ROLLBACK;
--- test segfault when compression hypertable with primary space dimension #6977
-CREATE TABLE test_by_hash(id BIGINT, value float8);
-SELECT create_hypertable('test_by_hash', by_hash('id', 8));
- create_hypertable 
--------------------
- (19,t)
-(1 row)
-
-ALTER TABLE test_by_hash SET (timescaledb.compress = true);
-WARNING:  there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes
-NOTICE:  default segment by for hypertable "test_by_hash" is set to ""
-NOTICE:  default order by for hypertable "test_by_hash" is set to "id DESC"
-INSERT INTO test_by_hash VALUES (1, 1.0), (2, 2.0), (3, 3.0);
-SELECT compress_chunk('_timescaledb_internal._hyper_19_351_chunk');
-              compress_chunk               
--------------------------------------------
- _timescaledb_internal._hyper_19_351_chunk
-(1 row)
-
diff --git a/tsl/test/expected/hypertable_generalization.out b/tsl/test/expected/hypertable_generalization.out
index afd23df86..b1a2ba889 100644
--- a/tsl/test/expected/hypertable_generalization.out
+++ b/tsl/test/expected/hypertable_generalization.out
@@ -446,10 +446,13 @@ SELECT * FROM _timescaledb_functions.get_create_command('test_table_int');
  SELECT create_hypertable('public.test_table_int', 'id', chunk_time_interval => 10, create_default_indexes=>FALSE);
 (1 row)
 
--- Should throw an error when if_not_exists is not set
 \set ON_ERROR_STOP 0
+-- Should throw an error when if_not_exists is not set
 SELECT create_hypertable('test_table_int', by_range('id', 10));
 ERROR:  table "test_table_int" is already a hypertable
+-- Should error out when hash partitioning is used as the main partitioning scheme
+SELECT create_hypertable('test_table_int', by_hash('device', number_partitions => 2));
+ERROR:  cannot partition using a closed dimension on primary column
 \set ON_ERROR_STOP 1
 DROP TABLE test_table_int;
 -- Add dimension
@@ -462,6 +465,7 @@ NOTICE:  adding not-null constraint to column "id"
 (1 row)
 
 INSERT INTO test_table_int SELECT t, t%10, '01-01-2023 11:00'::TIMESTAMPTZ FROM generate_series(1, 50, 1) t;
+-- adding a space dimension via "by_hash" should work
 SELECT add_dimension('test_table_int', by_hash('device', number_partitions => 2));
  add_dimension 
 ---------------
diff --git a/tsl/test/sql/compression_merge.sql b/tsl/test/sql/compression_merge.sql
index fe5c949d5..9d56fe3ba 100644
--- a/tsl/test/sql/compression_merge.sql
+++ b/tsl/test/sql/compression_merge.sql
@@ -307,11 +307,3 @@ BEGIN;
   -- should be rolled up
   SELECT hypertable_name, range_start, range_end FROM timescaledb_information.chunks WHERE hypertable_name = 'test9' ORDER BY 2;
 ROLLBACK;
-
--- test segfault when compression hypertable with primary space dimension #6977
-CREATE TABLE test_by_hash(id BIGINT, value float8);
-SELECT create_hypertable('test_by_hash', by_hash('id', 8));
-ALTER TABLE test_by_hash SET (timescaledb.compress = true);
-INSERT INTO test_by_hash VALUES (1, 1.0), (2, 2.0), (3, 3.0);
-SELECT compress_chunk('_timescaledb_internal._hyper_19_351_chunk');
-
diff --git a/tsl/test/sql/hypertable_generalization.sql b/tsl/test/sql/hypertable_generalization.sql
index 0ef91af24..05ab6f32a 100644
--- a/tsl/test/sql/hypertable_generalization.sql
+++ b/tsl/test/sql/hypertable_generalization.sql
@@ -222,9 +222,11 @@ SELECT create_hypertable('test_table_int', by_range('id', 10));
 SELECT create_hypertable('test_table_int', by_range('id', 10), if_not_exists => true);
 SELECT * FROM _timescaledb_functions.get_create_command('test_table_int');
 
--- Should throw an error when if_not_exists is not set
 \set ON_ERROR_STOP 0
+-- Should throw an error when if_not_exists is not set
 SELECT create_hypertable('test_table_int', by_range('id', 10));
+-- Should error out when hash partitioning is used as the main partitioning scheme
+SELECT create_hypertable('test_table_int', by_hash('device', number_partitions => 2));
 \set ON_ERROR_STOP 1
 
 DROP TABLE test_table_int;
@@ -235,6 +237,7 @@ SELECT create_hypertable('test_table_int', by_range('id', 10), migrate_data => t
 
 INSERT INTO test_table_int SELECT t, t%10, '01-01-2023 11:00'::TIMESTAMPTZ FROM generate_series(1, 50, 1) t;
 
+-- adding a space dimension via "by_hash" should work
 SELECT add_dimension('test_table_int', by_hash('device', number_partitions => 2));
 
 SELECT hypertable_name, dimension_number, column_name FROM timescaledb_information.dimensions WHERE hypertable_name = 'test_table_int';