diff --git a/.github/workflows/pgspot.yaml b/.github/workflows/pgspot.yaml
index ffde182e4..9833e9927 100644
--- a/.github/workflows/pgspot.yaml
+++ b/.github/workflows/pgspot.yaml
@@ -26,7 +26,7 @@ jobs:
         --proc-without-search-path
           '_timescaledb_functions.policy_compression_execute(job_id integer,htid integer,lag anyelement,maxchunks integer,verbose_log boolean,recompress_enabled boolean,use_creation_time boolean)'
         --proc-without-search-path
-          '_timescaledb_functions.policy_compression_execute(job_id integer,htid integer,lag anyelement,maxchunks integer,verbose_log boolean,recompress_enabled boolean,use_creation_time boolean,amname name)'
+          '_timescaledb_functions.policy_compression_execute(job_id integer,htid integer,lag anyelement,maxchunks integer,verbose_log boolean,recompress_enabled boolean,use_creation_time boolean,useam boolean)'
         --proc-without-search-path
           '_timescaledb_internal.policy_compression_execute(job_id integer,htid integer,lag anyelement,maxchunks integer,verbose_log boolean,recompress_enabled boolean)'
         --proc-without-search-path
diff --git a/.unreleased/pr_7411 b/.unreleased/pr_7411
new file mode 100644
index 000000000..0068b46e0
--- /dev/null
+++ b/.unreleased/pr_7411
@@ -0,0 +1 @@
+Implements: #7411 Change parameter name to enable Hypercore TAM
diff --git a/sql/maintenance_utils.sql b/sql/maintenance_utils.sql
index 931596159..430f62dd8 100644
--- a/sql/maintenance_utils.sql
+++ b/sql/maintenance_utils.sql
@@ -36,7 +36,7 @@ CREATE OR REPLACE FUNCTION @extschema@.compress_chunk(
     uncompressed_chunk REGCLASS,
     if_not_compressed BOOLEAN = true,
     recompress BOOLEAN = false,
-    compress_using NAME = NULL
+    hypercore_use_access_method BOOL = NULL
 ) RETURNS REGCLASS AS '@MODULE_PATHNAME@', 'ts_compress_chunk' LANGUAGE C VOLATILE;
 
 CREATE OR REPLACE FUNCTION @extschema@.decompress_chunk(
diff --git a/sql/policy_api.sql b/sql/policy_api.sql
index 895bdf8aa..ac352e894 100644
--- a/sql/policy_api.sql
+++ b/sql/policy_api.sql
@@ -53,7 +53,7 @@ CREATE OR REPLACE FUNCTION @extschema@.add_compression_policy(
     initial_start TIMESTAMPTZ = NULL,
     timezone TEXT = NULL,
     compress_created_before INTERVAL = NULL,
-    compress_using NAME = NULL
+    hypercore_use_access_method BOOL = NULL
 )
 RETURNS INTEGER
 AS '@MODULE_PATHNAME@', 'ts_policy_compression_add'
@@ -95,7 +95,7 @@ CREATE OR REPLACE FUNCTION timescaledb_experimental.add_policies(
     refresh_end_offset "any" = NULL,
     compress_after "any" = NULL,
     drop_after "any" = NULL,
-    compress_using NAME = NULL)
+    hypercore_use_access_method BOOL = NULL)
 RETURNS BOOL
 AS '@MODULE_PATHNAME@', 'ts_policies_add'
 LANGUAGE C VOLATILE;
diff --git a/sql/policy_internal.sql b/sql/policy_internal.sql
index b5264a772..971c5896a 100644
--- a/sql/policy_internal.sql
+++ b/sql/policy_internal.sql
@@ -43,7 +43,7 @@ _timescaledb_functions.policy_compression_execute(
   verbose_log         BOOLEAN,
   recompress_enabled  BOOLEAN,
   use_creation_time   BOOLEAN,
-  amname              NAME = NULL)
+  useam               BOOLEAN = NULL)
 AS $$
 DECLARE
   htoid       REGCLASS;
@@ -109,7 +109,7 @@ BEGIN
   LOOP
     IF chunk_rec.status = 0 THEN
       BEGIN
-        PERFORM @extschema@.compress_chunk(chunk_rec.oid, compress_using => amname);
+        PERFORM @extschema@.compress_chunk(chunk_rec.oid, hypercore_use_access_method => useam);
       EXCEPTION WHEN OTHERS THEN
         GET STACKED DIAGNOSTICS
             _message = MESSAGE_TEXT,
@@ -134,7 +134,7 @@ BEGIN
           PERFORM _timescaledb_functions.recompress_chunk_segmentwise(chunk_rec.oid);
         ELSE
           PERFORM @extschema@.decompress_chunk(chunk_rec.oid, if_compressed => true);
-          PERFORM @extschema@.compress_chunk(chunk_rec.oid, compress_using => amname);
+          PERFORM @extschema@.compress_chunk(chunk_rec.oid, hypercore_use_access_method => useam);
         END IF;
       EXCEPTION WHEN OTHERS THEN
         GET STACKED DIAGNOSTICS
@@ -187,7 +187,7 @@ DECLARE
   numchunks           INTEGER := 1;
   recompress_enabled  BOOL;
   use_creation_time   BOOL := FALSE;
-  compress_using      TEXT;
+  hypercore_use_access_method   BOOL;
 BEGIN
 
   -- procedures with SET clause cannot execute transaction
@@ -228,29 +228,29 @@ BEGIN
     lag_value := compress_after;
   END IF;
 
-  compress_using := jsonb_object_field_text(config, 'compress_using')::name;
+  hypercore_use_access_method := jsonb_object_field_text(config, 'hypercore_use_access_method')::bool;
 
   -- execute the properly type casts for the lag value
   CASE dimtype
     WHEN 'TIMESTAMP'::regtype, 'TIMESTAMPTZ'::regtype, 'DATE'::regtype, 'INTERVAL' ::regtype  THEN
       CALL _timescaledb_functions.policy_compression_execute(
         job_id, htid, lag_value::INTERVAL,
-        maxchunks, verbose_log, recompress_enabled, use_creation_time, compress_using
+        maxchunks, verbose_log, recompress_enabled, use_creation_time, hypercore_use_access_method
       );
     WHEN 'BIGINT'::regtype THEN
       CALL _timescaledb_functions.policy_compression_execute(
         job_id, htid, lag_value::BIGINT,
-        maxchunks, verbose_log, recompress_enabled, use_creation_time, compress_using
+        maxchunks, verbose_log, recompress_enabled, use_creation_time, hypercore_use_access_method
       );
     WHEN 'INTEGER'::regtype THEN
       CALL _timescaledb_functions.policy_compression_execute(
         job_id, htid, lag_value::INTEGER,
-        maxchunks, verbose_log, recompress_enabled, use_creation_time, compress_using
+        maxchunks, verbose_log, recompress_enabled, use_creation_time, hypercore_use_access_method
       );
     WHEN 'SMALLINT'::regtype THEN
       CALL _timescaledb_functions.policy_compression_execute(
         job_id, htid, lag_value::SMALLINT,
-        maxchunks, verbose_log, recompress_enabled, use_creation_time, compress_using
+        maxchunks, verbose_log, recompress_enabled, use_creation_time, hypercore_use_access_method
       );
   END CASE;
 END;
diff --git a/sql/updates/latest-dev.sql b/sql/updates/latest-dev.sql
index 807dd1658..66ad63058 100644
--- a/sql/updates/latest-dev.sql
+++ b/sql/updates/latest-dev.sql
@@ -11,7 +11,7 @@ CREATE FUNCTION @extschema@.compress_chunk(
     uncompressed_chunk REGCLASS,
     if_not_compressed BOOLEAN = true,
     recompress BOOLEAN = false,
-    compress_using NAME = NULL
+    hypercore_use_access_method BOOL = NULL
 ) RETURNS REGCLASS AS '@MODULE_PATHNAME@', 'ts_update_placeholder' LANGUAGE C VOLATILE;
 
 DROP FUNCTION IF EXISTS @extschema@.add_compression_policy(hypertable REGCLASS, compress_after "any", if_not_exists BOOL, schedule_interval INTERVAL, initial_start TIMESTAMPTZ, timezone TEXT, compress_created_before INTERVAL);
@@ -24,7 +24,7 @@ CREATE FUNCTION @extschema@.add_compression_policy(
     initial_start TIMESTAMPTZ = NULL,
     timezone TEXT = NULL,
     compress_created_before INTERVAL = NULL,
-    compress_using NAME = NULL
+    hypercore_use_access_method BOOL = NULL
 )
 RETURNS INTEGER
 AS '@MODULE_PATHNAME@', 'ts_update_placeholder'
@@ -39,7 +39,7 @@ CREATE FUNCTION timescaledb_experimental.add_policies(
     refresh_end_offset "any" = NULL,
     compress_after "any" = NULL,
     drop_after "any" = NULL,
-    compress_using NAME = NULL)
+    hypercore_use_access_method BOOL = NULL)
 RETURNS BOOL
 AS '@MODULE_PATHNAME@', 'ts_update_placeholder'
 LANGUAGE C VOLATILE;
diff --git a/sql/updates/reverse-dev.sql b/sql/updates/reverse-dev.sql
index 2a6c6dcaa..e6817621d 100644
--- a/sql/updates/reverse-dev.sql
+++ b/sql/updates/reverse-dev.sql
@@ -5,7 +5,7 @@ DROP ACCESS METHOD IF EXISTS hypercore;
 DROP FUNCTION IF EXISTS ts_hypercore_handler;
 DROP FUNCTION IF EXISTS _timescaledb_debug.is_compressed_tid;
 
-DROP FUNCTION IF EXISTS @extschema@.compress_chunk(uncompressed_chunk REGCLASS,	if_not_compressed BOOLEAN, recompress BOOLEAN, compress_using NAME);
+DROP FUNCTION IF EXISTS @extschema@.compress_chunk(uncompressed_chunk REGCLASS,	if_not_compressed BOOLEAN, recompress BOOLEAN, hypercore_use_access_method BOOL);
 
 CREATE FUNCTION @extschema@.compress_chunk(
     uncompressed_chunk REGCLASS,
@@ -13,7 +13,7 @@ CREATE FUNCTION @extschema@.compress_chunk(
     recompress BOOLEAN = false
 ) RETURNS REGCLASS AS '@MODULE_PATHNAME@', 'ts_compress_chunk' LANGUAGE C STRICT VOLATILE;
 
-DROP FUNCTION IF EXISTS @extschema@.add_compression_policy(hypertable REGCLASS, compress_after "any", if_not_exists BOOL, schedule_interval INTERVAL, initial_start TIMESTAMPTZ, timezone TEXT, compress_created_before INTERVAL, compress_using NAME);
+DROP FUNCTION IF EXISTS @extschema@.add_compression_policy(hypertable REGCLASS, compress_after "any", if_not_exists BOOL, schedule_interval INTERVAL, initial_start TIMESTAMPTZ, timezone TEXT, compress_created_before INTERVAL, hypercore_use_access_method BOOL);
 
 CREATE FUNCTION @extschema@.add_compression_policy(
     hypertable REGCLASS,
@@ -28,7 +28,7 @@ RETURNS INTEGER
 AS '@MODULE_PATHNAME@', 'ts_policy_compression_add'
 LANGUAGE C VOLATILE;
 
-DROP FUNCTION IF EXISTS timescaledb_experimental.add_policies(relation REGCLASS, if_not_exists BOOL, refresh_start_offset "any", refresh_end_offset "any", compress_after "any", drop_after "any", compress_using NAME);
+DROP FUNCTION IF EXISTS timescaledb_experimental.add_policies(relation REGCLASS, if_not_exists BOOL, refresh_start_offset "any", refresh_end_offset "any", compress_after "any", drop_after "any", hypercore_use_access_method BOOL);
 
 CREATE FUNCTION timescaledb_experimental.add_policies(
     relation REGCLASS,
@@ -41,6 +41,6 @@ RETURNS BOOL
 AS '@MODULE_PATHNAME@', 'ts_policies_add'
 LANGUAGE C VOLATILE;
 
-DROP PROCEDURE IF EXISTS _timescaledb_functions.policy_compression_execute(job_id INTEGER, htid INTEGER, lag ANYELEMENT, maxchunks INTEGER, verbose_log BOOLEAN, recompress_enabled  BOOLEAN, use_creation_time BOOLEAN, amname NAME);
+DROP PROCEDURE IF EXISTS _timescaledb_functions.policy_compression_execute(job_id INTEGER, htid INTEGER, lag ANYELEMENT, maxchunks INTEGER, verbose_log BOOLEAN, recompress_enabled  BOOLEAN, use_creation_time BOOLEAN, useam BOOLEAN);
 
 DROP PROCEDURE IF EXISTS _timescaledb_functions.policy_compression(job_id INTEGER, config JSONB);
diff --git a/tsl/src/bgw_policy/compression_api.c b/tsl/src/bgw_policy/compression_api.c
index b359108d0..dbccd2ef8 100644
--- a/tsl/src/bgw_policy/compression_api.c
+++ b/tsl/src/bgw_policy/compression_api.c
@@ -6,6 +6,7 @@
 
 #include <postgres.h>
 #include <access/xact.h>
+#include <fmgr.h>
 #include <miscadmin.h>
 #include <utils/builtins.h>
 
@@ -18,6 +19,7 @@
 #include "bgw_policy/job.h"
 #include "bgw_policy/job_api.h"
 #include "bgw_policy/policies_v2.h"
+#include "compression/api.h"
 #include "errors.h"
 #include "guc.h"
 #include "hypertable.h"
@@ -158,7 +160,7 @@ policy_compression_add_internal(Oid user_rel_oid, Datum compress_after_datum,
 								Interval *default_schedule_interval,
 								bool user_defined_schedule_interval, bool if_not_exists,
 								bool fixed_schedule, TimestampTz initial_start,
-								const char *timezone, const char *compress_using)
+								const char *timezone, UseAccessMethod use_access_method)
 {
 	NameData application_name;
 	NameData proc_name, proc_schema, check_schema, check_name, owner;
@@ -282,12 +284,6 @@ policy_compression_add_internal(Oid user_rel_oid, Datum compress_after_datum,
 		}
 	}
 
-	if (compress_using != NULL && strcmp(compress_using, "heap") != 0 &&
-		strcmp(compress_using, TS_HYPERCORE_TAM_NAME) != 0)
-		ereport(ERROR,
-				(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
-				 errmsg("can only compress using \"heap\" or \"%s\"", TS_HYPERCORE_TAM_NAME)));
-
 	/* insert a new job into jobs table */
 	namestrcpy(&application_name, "Compression Policy");
 	namestrcpy(&proc_name, POLICY_COMPRESSION_PROC_NAME);
@@ -302,8 +298,10 @@ policy_compression_add_internal(Oid user_rel_oid, Datum compress_after_datum,
 	ts_jsonb_add_int32(parse_state, POL_COMPRESSION_CONF_KEY_HYPERTABLE_ID, hypertable->fd.id);
 	validate_compress_after_type(dim, partitioning_type, compress_after_type);
 
-	if (NULL != compress_using)
-		ts_jsonb_add_str(parse_state, POL_COMPRESSION_CONF_KEY_COMPRESS_USING, compress_using);
+	if (use_access_method != USE_AM_NULL)
+		ts_jsonb_add_bool(parse_state,
+						  POL_COMPRESSION_CONF_KEY_USE_ACCESS_METHOD,
+						  use_access_method);
 
 	switch (compress_after_type)
 	{
@@ -406,7 +404,7 @@ policy_compression_add(PG_FUNCTION_ARGS)
 	text *timezone = PG_ARGISNULL(5) ? NULL : PG_GETARG_TEXT_PP(5);
 	char *valid_timezone = NULL;
 	Interval *created_before = PG_GETARG_INTERVAL_P(6);
-	Name compress_using = PG_ARGISNULL(7) ? NULL : PG_GETARG_NAME(7);
+	UseAccessMethod use_access_method = PG_ARGISNULL(7) ? USE_AM_NULL : PG_GETARG_BOOL(7);
 
 	ts_feature_flag_check(FEATURE_POLICY);
 	TS_PREVENT_FUNC_IF_READ_ONLY();
@@ -440,7 +438,7 @@ policy_compression_add(PG_FUNCTION_ARGS)
 											 fixed_schedule,
 											 initial_start,
 											 valid_timezone,
-											 compress_using ? NameStr(*compress_using) : NULL);
+											 use_access_method);
 
 	if (!TIMESTAMP_NOT_FINITE(initial_start))
 	{
diff --git a/tsl/src/bgw_policy/compression_api.h b/tsl/src/bgw_policy/compression_api.h
index b8096bea2..9d717ec71 100644
--- a/tsl/src/bgw_policy/compression_api.h
+++ b/tsl/src/bgw_policy/compression_api.h
@@ -6,6 +6,7 @@
 #pragma once
 
 #include <postgres.h>
+#include "compression/api.h"
 #include <utils/jsonb.h>
 #include <utils/timestamp.h>
 
@@ -26,5 +27,5 @@ Datum policy_compression_add_internal(Oid user_rel_oid, Datum compress_after_dat
 									  Interval *default_schedule_interval,
 									  bool user_defined_schedule_interval, bool if_not_exists,
 									  bool fixed_schedule, TimestampTz initial_start,
-									  const char *timezone, const char *compress_using);
+									  const char *timezone, UseAccessMethod use_access_method);
 bool policy_compression_remove_internal(Oid user_rel_oid, bool if_exists);
diff --git a/tsl/src/bgw_policy/policies_v2.c b/tsl/src/bgw_policy/policies_v2.c
index 550a5dc22..9bfbe9e3a 100644
--- a/tsl/src/bgw_policy/policies_v2.c
+++ b/tsl/src/bgw_policy/policies_v2.c
@@ -6,6 +6,7 @@
 
 #include <postgres.h>
 #include <access/xact.h>
+#include <fmgr.h>
 #include <miscadmin.h>
 #include <parser/parse_coerce.h>
 #include <utils/builtins.h>
@@ -233,7 +234,7 @@ validate_and_create_policies(policies_info all_policies, bool if_exists)
 											false,
 											DT_NOBEGIN,
 											NULL,
-											all_policies.compress->compress_using);
+											all_policies.compress->use_access_method);
 	}
 
 	if (all_policies.retention && all_policies.retention->create_policy)
@@ -310,7 +311,7 @@ policies_add(PG_FUNCTION_ARGS)
 			.create_policy = true,
 			.compress_after = PG_GETARG_DATUM(4),
 			.compress_after_type = get_fn_expr_argtype(fcinfo->flinfo, 4),
-			.compress_using = PG_ARGISNULL(6) ? NULL : NameStr(*PG_GETARG_NAME(6)),
+			.use_access_method = PG_ARGISNULL(6) ? USE_AM_NULL : PG_GETARG_BOOL(6),
 		};
 		comp = tmp;
 		all_policies.compress = &comp;
diff --git a/tsl/src/bgw_policy/policies_v2.h b/tsl/src/bgw_policy/policies_v2.h
index ee39f61b7..4a5eeb852 100644
--- a/tsl/src/bgw_policy/policies_v2.h
+++ b/tsl/src/bgw_policy/policies_v2.h
@@ -6,6 +6,7 @@
 #pragma once
 
 #include <postgres.h>
+#include "compression/api.h"
 #include "dimension.h"
 #include <bgw_policy/compression_api.h>
 #include <bgw_policy/continuous_aggregate_api.h>
@@ -25,7 +26,7 @@
 #define POL_COMPRESSION_CONF_KEY_COMPRESS_AFTER "compress_after"
 #define POL_COMPRESSION_CONF_KEY_MAXCHUNKS_TO_COMPRESS "maxchunks_to_compress"
 #define POL_COMPRESSION_CONF_KEY_COMPRESS_CREATED_BEFORE "compress_created_before"
-#define POL_COMPRESSION_CONF_KEY_COMPRESS_USING "compress_using"
+#define POL_COMPRESSION_CONF_KEY_USE_ACCESS_METHOD "hypercore_use_access_method"
 
 #define POLICY_RECOMPRESSION_PROC_NAME "policy_recompression"
 #define POL_RECOMPRESSION_CONF_KEY_RECOMPRESS_AFTER "recompress_after"
@@ -89,7 +90,7 @@ typedef struct compression_policy
 	Datum compress_after;
 	Oid compress_after_type;
 	bool create_policy;
-	const char *compress_using;
+	UseAccessMethod use_access_method;
 } compression_policy;
 
 typedef struct retention_policy
diff --git a/tsl/src/compression/api.c b/tsl/src/compression/api.c
index 911a6a15f..029f2ade7 100644
--- a/tsl/src/compression/api.c
+++ b/tsl/src/compression/api.c
@@ -779,31 +779,6 @@ set_access_method(Oid relid, const char *amname)
 	return relid;
 }
 
-enum UseAccessMethod
-{
-	USE_AM_FALSE,
-	USE_AM_TRUE,
-	USE_AM_NULL,
-};
-
-static enum UseAccessMethod
-parse_use_access_method(const char *compress_using)
-{
-	if (compress_using == NULL)
-		return USE_AM_NULL;
-
-	if (strcmp(compress_using, "heap") == 0)
-		return USE_AM_FALSE;
-	else if (strcmp(compress_using, TS_HYPERCORE_TAM_NAME) == 0)
-		return USE_AM_TRUE;
-
-	ereport(ERROR,
-			(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
-			 errmsg("can only compress using \"heap\" or \"%s\"", TS_HYPERCORE_TAM_NAME)));
-
-	pg_unreachable();
-}
-
 /*
  * When using compress_chunk() with hypercore, there are three cases to
  * handle:
@@ -815,7 +790,7 @@ parse_use_access_method(const char *compress_using)
  * 3. Recompress a hypercore
  */
 static Oid
-compress_hypercore(Chunk *chunk, bool rel_is_hypercore, enum UseAccessMethod useam,
+compress_hypercore(Chunk *chunk, bool rel_is_hypercore, UseAccessMethod useam,
 				   bool if_not_compressed, bool recompress)
 {
 	Oid relid = InvalidOid;
@@ -869,14 +844,13 @@ tsl_compress_chunk(PG_FUNCTION_ARGS)
 	Oid uncompressed_chunk_id = PG_ARGISNULL(0) ? InvalidOid : PG_GETARG_OID(0);
 	bool if_not_compressed = PG_ARGISNULL(1) ? true : PG_GETARG_BOOL(1);
 	bool recompress = PG_ARGISNULL(2) ? false : PG_GETARG_BOOL(2);
-	const char *compress_using = PG_ARGISNULL(3) ? NULL : NameStr(*PG_GETARG_NAME(3));
+	UseAccessMethod useam = PG_ARGISNULL(3) ? USE_AM_NULL : PG_GETARG_BOOL(3);
 
 	ts_feature_flag_check(FEATURE_HYPERTABLE_COMPRESSION);
 
 	TS_PREVENT_FUNC_IF_READ_ONLY();
 	Chunk *chunk = ts_chunk_get_by_relid(uncompressed_chunk_id, true);
 	bool rel_is_hypercore = get_table_am_oid(TS_HYPERCORE_TAM_NAME, false) == chunk->amoid;
-	enum UseAccessMethod useam = parse_use_access_method(compress_using);
 
 	if (rel_is_hypercore || useam == USE_AM_TRUE)
 		uncompressed_chunk_id =
diff --git a/tsl/src/compression/api.h b/tsl/src/compression/api.h
index 44c8660da..ab2b99a02 100644
--- a/tsl/src/compression/api.h
+++ b/tsl/src/compression/api.h
@@ -11,6 +11,21 @@
 
 #include "chunk.h"
 
+/*
+ * Decide if the access method should be used for compression, or if it is
+ * undefined. Used for parameter values to PostgreSQL functions and is a
+ * nullable boolean.
+ *
+ * Using explicit values of TRUE = 1 and FALSE = 0 since this enum is cast to
+ * boolean value in the code.
+ */
+typedef enum UseAccessMethod
+{
+	USE_AM_FALSE = 0,
+	USE_AM_TRUE = 1,
+	USE_AM_NULL = 2,
+} UseAccessMethod;
+
 extern Datum tsl_create_compressed_chunk(PG_FUNCTION_ARGS);
 extern Datum tsl_compress_chunk(PG_FUNCTION_ARGS);
 extern Datum tsl_decompress_chunk(PG_FUNCTION_ARGS);
diff --git a/tsl/test/expected/hypercore_copy.out b/tsl/test/expected/hypercore_copy.out
index bc812adfe..39ee9865a 100644
--- a/tsl/test/expected/hypercore_copy.out
+++ b/tsl/test/expected/hypercore_copy.out
@@ -112,7 +112,7 @@ select cl.oid::regclass as rel, am.amname, inh.inhparent::regclass as relparent
   left join pg_inherits inh on (inh.inhrelid = cl.oid);
 -- Compress the chunks and check that the counts are the same
 select location_id, count(*) into orig from :hypertable GROUP BY location_id;
-select compress_chunk(show_chunks(:'hypertable'), compress_using => 'hypercore');
+select compress_chunk(show_chunks(:'hypertable'), hypercore_use_access_method => true);
              compress_chunk             
 ----------------------------------------
  _timescaledb_internal._hyper_1_1_chunk
diff --git a/tsl/test/expected/hypercore_create.out b/tsl/test/expected/hypercore_create.out
index 9e9dd944c..4e56b1486 100644
--- a/tsl/test/expected/hypercore_create.out
+++ b/tsl/test/expected/hypercore_create.out
@@ -230,7 +230,7 @@ select * from amrels where rel=:'chunk'::regclass;
 
 -- Try same thing with compress_chunk()
 alter table :chunk set access method heap;
-select compress_chunk(:'chunk', compress_using => 'hypercore');
+select compress_chunk(:'chunk', hypercore_use_access_method => true);
              compress_chunk              
 -----------------------------------------
  _timescaledb_internal._hyper_4_13_chunk
@@ -250,7 +250,7 @@ select relname, amname
 alter table :chunk set access method hypercore;
 -- Test recompression after changing compression settings
 alter table test3 set (timescaledb.compress_segmentby='device');
-select compress_chunk(:'chunk', compress_using => 'hypercore', recompress => true);
+select compress_chunk(:'chunk', hypercore_use_access_method => true, recompress => true);
              compress_chunk              
 -----------------------------------------
  _timescaledb_internal._hyper_4_13_chunk
@@ -398,7 +398,7 @@ from compressed_rel_size_stats;
 
 -- Create hypercores again and check that compression size stats are
 -- updated showing compressed data
-select compress_chunk(ch, compress_using => 'hypercore')
+select compress_chunk(ch, hypercore_use_access_method => true)
 from show_chunks('test2') ch;
              compress_chunk              
 -----------------------------------------
@@ -410,7 +410,7 @@ from show_chunks('test2') ch;
  _timescaledb_internal._hyper_1_11_chunk
 (6 rows)
 
-select compress_chunk(ch, compress_using => 'hypercore')
+select compress_chunk(ch, hypercore_use_access_method => true)
 from show_chunks('test3') ch;
              compress_chunk              
 -----------------------------------------
@@ -457,8 +457,8 @@ from show_chunks('test2') ch;
  _timescaledb_internal._hyper_1_11_chunk
 (6 rows)
 
--- Using compress_using => NULL should be the same as "heap"
-select compress_chunk(decompress_chunk(ch), compress_using => NULL)
+--- Using hypercore_use_access_method => NULL should be the same as "heap"
+select compress_chunk(decompress_chunk(ch), hypercore_use_access_method => NULL)
 from show_chunks('test3') ch;
              compress_chunk              
 -----------------------------------------
@@ -515,11 +515,11 @@ set client_min_messages=DEBUG1;
 with chunks as (
 	 select ch from show_chunks('test2') ch offset 1
 )
-select compress_chunk(ch, compress_using => 'hypercore') from chunks;
+select compress_chunk(ch, hypercore_use_access_method => true) from chunks;
 LOG:  statement: with chunks as (
 	 select ch from show_chunks('test2') ch offset 1
 )
-select compress_chunk(ch, compress_using => 'hypercore') from chunks;
+select compress_chunk(ch, hypercore_use_access_method => true) from chunks;
 DEBUG:  migrating table "_hyper_1_3_chunk" to hypercore
 DEBUG:  building index "_hyper_1_3_chunk_test2_device_id_created_at_idx" on table "_hyper_1_3_chunk" serially
 DEBUG:  index "_hyper_1_3_chunk_test2_device_id_created_at_idx" can safely use deduplication
@@ -643,18 +643,13 @@ commit;
 -- Trying to convert a hypercore to a hypercore should be an error
 -- if if_not_compressed is false and the hypercore is fully
 -- compressed.
-select compress_chunk(ch, compress_using => 'hypercore', if_not_compressed => false)
+select compress_chunk(ch, hypercore_use_access_method => true, if_not_compressed => false)
 from show_chunks('test2') ch;
 ERROR:  chunk "_hyper_1_1_chunk" is already compressed
--- Compressing using something different than "hypercore" or "heap"
--- should not be allowed
-select compress_chunk(ch, compress_using => 'non_existing_am')
-from show_chunks('test2') ch;
-ERROR:  can only compress using "heap" or "hypercore"
 \set ON_ERROR_STOP 1
--- Compressing from hypercore with compress_using=>heap should lead
--- to recompression of hypercore with a notice.
-select compress_chunk(ch, compress_using => 'heap')
+-- Compressing from hypercore not using access method should lead to
+-- recompression of hypercore with a notice.
+select compress_chunk(ch, hypercore_use_access_method => false)
 from show_chunks('test2') ch;
 NOTICE:  cannot compress hypercore "_hyper_1_1_chunk" using heap, recompressing instead
 NOTICE:  chunk "_hyper_1_1_chunk" is already compressed
@@ -678,8 +673,8 @@ NOTICE:  chunk "_hyper_1_11_chunk" is already compressed
  _timescaledb_internal._hyper_1_11_chunk
 (6 rows)
 
--- Compressing a hypercore without specifying compress_using should
--- lead to recompression. First check that :chunk is a hypercore.
+-- Compressing a hypercore should by default lead to
+-- recompression. First check that :chunk is a hypercore.
 select ch as chunk from show_chunks('test2') ch limit 1 \gset
 select * from compressed_rel_size_stats
 where amname = 'hypercore' and rel = :'chunk'::regclass;
@@ -707,8 +702,8 @@ select ctid from :chunk where created_at = '2022-06-01 10:01' and device_id = 6;
  (2147484675,14)
 (1 row)
 
--- Compressing a hypercore with compress_using=>hypercore should
--- also lead to recompression
+-- Compressing a hypercore using the access method should also lead to
+-- recompression
 insert into :chunk values ('2022-06-01 11:02', 7, 7, 7.0, 7.0);
 select ctid from :chunk where created_at = '2022-06-01 11:02' and device_id = 7;
  ctid  
@@ -716,7 +711,7 @@ select ctid from :chunk where created_at = '2022-06-01 11:02' and device_id = 7;
  (0,3)
 (1 row)
 
-select compress_chunk(:'chunk', compress_using => 'hypercore');
+select compress_chunk(:'chunk', hypercore_use_access_method => true);
              compress_chunk             
 ----------------------------------------
  _timescaledb_internal._hyper_1_1_chunk
@@ -748,13 +743,13 @@ select decompress_chunk(rel) ch
 -- cleaned up between two or more commands in same transaction.
 select ch as chunk2 from show_chunks('test2') ch offset 1 limit 1 \gset
 start transaction;
-select compress_chunk(:'chunk', compress_using => 'hypercore');
+select compress_chunk(:'chunk', hypercore_use_access_method => true);
              compress_chunk             
 ----------------------------------------
  _timescaledb_internal._hyper_1_1_chunk
 (1 row)
 
-select compress_chunk(:'chunk2', compress_using => 'hypercore');
+select compress_chunk(:'chunk2', hypercore_use_access_method => true);
              compress_chunk             
 ----------------------------------------
  _timescaledb_internal._hyper_1_3_chunk
@@ -770,9 +765,9 @@ order by rel;
  _timescaledb_internal._hyper_1_3_chunk | hypercore | test2     |                    2016 |                       10 |                         10
 (2 rows)
 
--- Test that we can compress old way using compress_using=>heap
+-- Test that we can compress old way by not using the access method
 select ch as chunk3 from show_chunks('test2') ch offset 2 limit 1 \gset
-select compress_chunk(:'chunk3', compress_using => 'heap');
+select compress_chunk(:'chunk3', hypercore_use_access_method => false);
              compress_chunk             
 ----------------------------------------
  _timescaledb_internal._hyper_1_5_chunk
@@ -787,13 +782,13 @@ order by rel;
 (1 row)
 
 \set ON_ERROR_STOP 0
--- If we call compress_chunk with compress_using=>'heap' on a
+-- If we call compress_chunk using the table access method on a
 -- heap-compressed chunk, it should lead to an error if
 -- if_not_compressed is false. The commands below are all equivalent
 -- in this case.
-select compress_chunk(:'chunk3', compress_using => 'heap', if_not_compressed=>false);
+select compress_chunk(:'chunk3', hypercore_use_access_method => false, if_not_compressed=>false);
 ERROR:  chunk "_hyper_1_5_chunk" is already compressed
-select compress_chunk(:'chunk3', compress_using => NULL, if_not_compressed=>false);
+select compress_chunk(:'chunk3', hypercore_use_access_method => NULL, if_not_compressed=>false);
 ERROR:  chunk "_hyper_1_5_chunk" is already compressed
 select compress_chunk(:'chunk3', if_not_compressed=>false);
 ERROR:  chunk "_hyper_1_5_chunk" is already compressed
@@ -801,14 +796,14 @@ ERROR:  chunk "_hyper_1_5_chunk" is already compressed
 -- For a heap-compressed chunk, these should all be equivalent and
 -- should not do anything when there is nothing to recompress. A
 -- notice should be raised instead of an error.
-select compress_chunk(:'chunk3', compress_using => 'heap');
+select compress_chunk(:'chunk3', hypercore_use_access_method => false);
 NOTICE:  chunk "_hyper_1_5_chunk" is already compressed
              compress_chunk             
 ----------------------------------------
  _timescaledb_internal._hyper_1_5_chunk
 (1 row)
 
-select compress_chunk(:'chunk3', compress_using => NULL);
+select compress_chunk(:'chunk3', hypercore_use_access_method => NULL);
 NOTICE:  chunk "_hyper_1_5_chunk" is already compressed
              compress_chunk             
 ----------------------------------------
@@ -832,7 +827,7 @@ select * from only :chunk3;
  Wed Jun 15 16:00:00 2022 PDT |           8 |         8 |    8 |        8
 (1 row)
 
-select compress_chunk(:'chunk3', compress_using => 'heap');
+select compress_chunk(:'chunk3', hypercore_use_access_method => false);
              compress_chunk             
 ----------------------------------------
  _timescaledb_internal._hyper_1_5_chunk
@@ -892,7 +887,7 @@ insert into rides values
 (6,'2016-01-01 00:00:02','2016-01-01 00:11:55',1,1.20,-73.979423522949219,40.744613647460938,1,-73.992034912109375,40.753944396972656,2,9,0.5,0.5,0,0,0.3,10.3),
 (356,'2016-01-01 00:00:01','2016-01-01 00:11:55',1,1.20,-73.979423522949219,40.744613647460938,1,-73.992034912109375,40.753944396972656,2,9,0.5,0.5,0,0,0.3,10.3);
 -- Check that it is possible to compress
-select compress_chunk(ch, compress_using=>'hypercore') from show_chunks('rides') ch;
+select compress_chunk(ch, hypercore_use_access_method => true) from show_chunks('rides') ch;
              compress_chunk              
 -----------------------------------------
  _timescaledb_internal._hyper_8_44_chunk
diff --git a/tsl/test/expected/hypercore_cursor.out b/tsl/test/expected/hypercore_cursor.out
index e0609d8f6..ca4249fa3 100644
--- a/tsl/test/expected/hypercore_cursor.out
+++ b/tsl/test/expected/hypercore_cursor.out
@@ -139,7 +139,7 @@ begin
 end;
 $$
 language plpgsql;
-select compress_chunk(show_chunks(:'hypertable'), compress_using => 'hypercore');
+select compress_chunk(show_chunks(:'hypertable'), hypercore_use_access_method => true);
              compress_chunk             
 ----------------------------------------
  _timescaledb_internal._hyper_1_1_chunk
@@ -236,7 +236,7 @@ NOTICE:  adding not-null constraint to column "time"
 
 alter table backward_cursor set (timescaledb.compress, timescaledb.compress_segmentby='location_id', timescaledb.compress_orderby='time asc');
 insert into backward_cursor values ('2024-01-01 01:00', 1, 1.0), ('2024-01-01 02:00', 1, 2.0), ('2024-01-01 03:00', 2, 3.0), ('2024-01-01 04:00', 2, 4.0);
-select compress_chunk(ch, compress_using=>'hypercore') from show_chunks('backward_cursor') ch;
+select compress_chunk(ch, hypercore_use_access_method => true) from show_chunks('backward_cursor') ch;
              compress_chunk              
 -----------------------------------------
  _timescaledb_internal._hyper_3_13_chunk
diff --git a/tsl/test/expected/hypercore_ddl.out b/tsl/test/expected/hypercore_ddl.out
index 9e25efb80..7626f040e 100644
--- a/tsl/test/expected/hypercore_ddl.out
+++ b/tsl/test/expected/hypercore_ddl.out
@@ -31,7 +31,7 @@ alter table readings
 insert into readings (time, location, device, temp, humidity, jdata)
 select t, ceil(random()*10), ceil(random()*30), random()*40, random()*100, '{"a":1,"b":2}'::jsonb
 from generate_series('2022-06-01'::timestamptz, '2022-06-04'::timestamptz, '5m') t;
-select compress_chunk(show_chunks('readings'), compress_using => 'hypercore');
+select compress_chunk(show_chunks('readings'), hypercore_use_access_method => true);
              compress_chunk             
 ----------------------------------------
  _timescaledb_internal._hyper_1_1_chunk
diff --git a/tsl/test/expected/hypercore_index_btree.out b/tsl/test/expected/hypercore_index_btree.out
index 892531327..805b4d219 100644
--- a/tsl/test/expected/hypercore_index_btree.out
+++ b/tsl/test/expected/hypercore_index_btree.out
@@ -359,7 +359,7 @@ select created_at, location_id, temp from :chunk2 where location_id=1 and temp=2
  Wed Jun 01 17:00:00 2022 PDT |           1 |    2
 (1 row)
 
-select compress_chunk(show_chunks(:'hypertable'), compress_using => 'hypercore');
+select compress_chunk(show_chunks(:'hypertable'), hypercore_use_access_method => true);
              compress_chunk             
 ----------------------------------------
  _timescaledb_internal._hyper_1_1_chunk
@@ -992,7 +992,7 @@ select * from only_nulls_null;
 (4 rows)
 
 -- Convert all chunks to hypercore and run same queries
-select compress_chunk(ch, compress_using=>'hypercore') from show_chunks('nullvalues') ch;
+select compress_chunk(ch, hypercore_use_access_method => true) from show_chunks('nullvalues') ch;
              compress_chunk              
 -----------------------------------------
  _timescaledb_internal._hyper_5_15_chunk
diff --git a/tsl/test/expected/hypercore_index_hash.out b/tsl/test/expected/hypercore_index_hash.out
index 60a1cea15..a3a82d6b8 100644
--- a/tsl/test/expected/hypercore_index_hash.out
+++ b/tsl/test/expected/hypercore_index_hash.out
@@ -222,7 +222,7 @@ select created_at, location_id, temp from :chunk2 where location_id=1 and temp=2
  Wed Jun 01 17:00:00 2022 PDT |           1 |    2
 (1 row)
 
-select compress_chunk(show_chunks(:'hypertable'), compress_using => 'hypercore');
+select compress_chunk(show_chunks(:'hypertable'), hypercore_use_access_method => true);
              compress_chunk             
 ----------------------------------------
  _timescaledb_internal._hyper_1_1_chunk
diff --git a/tsl/test/expected/hypercore_insert.out b/tsl/test/expected/hypercore_insert.out
index c6c83ecf2..87a2c660b 100644
--- a/tsl/test/expected/hypercore_insert.out
+++ b/tsl/test/expected/hypercore_insert.out
@@ -107,7 +107,7 @@ select format('%I.%I', chunk_schema, chunk_name)::regclass as chunk2
  limit 1 offset 1 \gset
 -- Compress the chunks and check that the counts are the same
 select location_id, count(*) into orig from :hypertable GROUP BY location_id;
-select compress_chunk(show_chunks(:'hypertable'), compress_using => 'hypercore');
+select compress_chunk(show_chunks(:'hypertable'), hypercore_use_access_method => true);
              compress_chunk             
 ----------------------------------------
  _timescaledb_internal._hyper_1_1_chunk
diff --git a/tsl/test/expected/hypercore_join.out b/tsl/test/expected/hypercore_join.out
index 9cf901eac..d901dc4c9 100644
--- a/tsl/test/expected/hypercore_join.out
+++ b/tsl/test/expected/hypercore_join.out
@@ -141,7 +141,7 @@ alter table the_hypercore set (
       timescaledb.compress_segmentby = '',
       timescaledb.compress_orderby = 'updated_at desc'
 );
-select compress_chunk(show_chunks('the_hypercore'), compress_using => 'hypercore');
+select compress_chunk(show_chunks('the_hypercore'), hypercore_use_access_method => true);
              compress_chunk             
 ----------------------------------------
  _timescaledb_internal._hyper_3_7_chunk
diff --git a/tsl/test/expected/hypercore_merge.out b/tsl/test/expected/hypercore_merge.out
index 954022711..c45432f2b 100644
--- a/tsl/test/expected/hypercore_merge.out
+++ b/tsl/test/expected/hypercore_merge.out
@@ -111,7 +111,7 @@ set enable_mergejoin to false;
 set enable_hashjoin to false;
 -- There are already tests to merge into uncompressed tables, so just
 -- compress all chunks using Hypercore.
-select compress_chunk(show_chunks(:'hypertable'), compress_using => 'hypercore');
+select compress_chunk(show_chunks(:'hypertable'), hypercore_use_access_method => true);
              compress_chunk             
 ----------------------------------------
  _timescaledb_internal._hyper_1_1_chunk
@@ -220,7 +220,7 @@ humidity    | 1
 \x off
 -- Recompress all and try to insert the same rows again. This there
 -- should be no rows inserted.
-select compress_chunk(show_chunks(:'hypertable'), compress_using => 'hypercore');
+select compress_chunk(show_chunks(:'hypertable'), hypercore_use_access_method => true);
 NOTICE:  chunk "_hyper_1_2_chunk" is already compressed
 NOTICE:  chunk "_hyper_1_3_chunk" is already compressed
 NOTICE:  chunk "_hyper_1_4_chunk" is already compressed
diff --git a/tsl/test/expected/hypercore_parallel.out b/tsl/test/expected/hypercore_parallel.out
index 91a85096d..7219ebcd3 100644
--- a/tsl/test/expected/hypercore_parallel.out
+++ b/tsl/test/expected/hypercore_parallel.out
@@ -156,7 +156,7 @@ select device_id, count(*) into orig_chunk from :chunk1 group by device_id;
 -----------------------
 -- Enable hypercore --
 -----------------------
-select compress_chunk(show_chunks(:'hypertable'), compress_using => 'hypercore');
+select compress_chunk(show_chunks(:'hypertable'), hypercore_use_access_method => true);
              compress_chunk             
 ----------------------------------------
  _timescaledb_internal._hyper_1_1_chunk
diff --git a/tsl/test/expected/hypercore_policy.out b/tsl/test/expected/hypercore_policy.out
index 283e23cb6..53c9e301b 100644
--- a/tsl/test/expected/hypercore_policy.out
+++ b/tsl/test/expected/hypercore_policy.out
@@ -27,16 +27,9 @@ from timescaledb_information.chunks ch
 join pg_class cl on (format('%I.%I', ch.chunk_schema, ch.chunk_name)::regclass = cl.oid)
 join pg_am am on (am.oid = cl.relam);
 set timezone to pst8pdt;
-\set ON_ERROR_STOP 0
--- Test invalid compress_using option
-select add_compression_policy('readings',
-                              compress_after => '1000 years'::interval,
-                              compress_using => 'foo');
-ERROR:  can only compress using "heap" or "hypercore"
-\set ON_ERROR_STOP 1
--- Check that compress_using is not part of the policy if not set. Use
--- a large compress_after to ensure the policy doesn't do anything at
--- this time.
+-- Check that hypercore_use_access_method is not part of the policy if
+-- not set. Use a large compress_after to ensure the policy doesn't do
+-- anything at this time.
 select add_compression_policy('readings', compress_after => '1000 years'::interval)
 as compression_job \gset
 select config from timescaledb_information.jobs where job_id = :compression_job;
@@ -51,10 +44,11 @@ select remove_compression_policy('readings');
  t
 (1 row)
 
--- Check that compress_using is not part of the policy if set to NULL
+-- Check that hypercore_use_access_method is not part of the policy if
+-- set to NULL
 select add_compression_policy('readings',
                               compress_after => '1000 years'::interval,
-                              compress_using => NULL)
+                              hypercore_use_access_method => NULL)
 as compression_job \gset
 select config from timescaledb_information.jobs where job_id = :compression_job;
                          config                         
@@ -77,15 +71,16 @@ order by chunk;
  readings   | _hyper_1_1_chunk | heap   | f
 (1 row)
 
--- Check that compress_using is part of the policy config when non-NULL
+-- Check that hypercore_use_access_method is part of the policy config
+-- when enabled.
 select add_compression_policy('readings',
                               compress_after => '1 day'::interval,
-                              compress_using => 'hypercore')
+                              hypercore_use_access_method => true)
 as compression_job \gset
 select config from timescaledb_information.jobs where job_id = :compression_job;
-                                      config                                      
-----------------------------------------------------------------------------------
- {"hypertable_id": 1, "compress_after": "@ 1 day", "compress_using": "hypercore"}
+                                         config                                         
+----------------------------------------------------------------------------------------
+ {"hypertable_id": 1, "compress_after": "@ 1 day", "hypercore_use_access_method": true}
 (1 row)
 
 -- Make sure the policy runs
@@ -120,7 +115,7 @@ where time = '2022-06-01 10:14' and device = 1;
 -- recompress hypercores.
 select add_compression_policy('readings',
                               compress_after => '1 day'::interval,
-                              compress_using => 'heap')
+                              hypercore_use_access_method => false)
 as compression_job \gset
 -- Run the policy job again to recompress
 call run_job(:'compression_job');
@@ -150,7 +145,7 @@ select * from readings where time = '2022-06-01 10:14' and device = 1;
 (1 row)
 
 -- Test recompression again with a policy that doesn't specify
--- compress_using
+-- hypercore_use_access_method
 select remove_compression_policy('readings');
  remove_compression_policy 
 ---------------------------
@@ -203,7 +198,7 @@ select timescaledb_experimental.add_policies('daily',
        refresh_start_offset => '8 days'::interval,
        refresh_end_offset => '1 day'::interval,
        compress_after => '9 days'::interval,
-       compress_using => 'hypercore');
+       hypercore_use_access_method => true);
  add_policies 
 --------------
  t
diff --git a/tsl/test/expected/hypercore_types.out b/tsl/test/expected/hypercore_types.out
index 8f812eb8e..4de32b945 100644
--- a/tsl/test/expected/hypercore_types.out
+++ b/tsl/test/expected/hypercore_types.out
@@ -97,7 +97,7 @@ from generate_series('2022-06-01'::timestamp, '2022-06-10', '1 minute') t;
 -- table and a heap table produce the same result.
 create table :saved_table as select * from :the_table;
 -- Compress the rows in the hypercore.
-select compress_chunk(show_chunks(:'the_table'), compress_using => 'hypercore');
+select compress_chunk(show_chunks(:'the_table'), hypercore_use_access_method => true);
              compress_chunk             
 ----------------------------------------
  _timescaledb_internal._hyper_1_1_chunk
@@ -169,7 +169,7 @@ from generate_series('2022-06-01'::timestamp, '2022-06-10', '1 minute') t;
 -- table and a heap table produce the same result.
 create table :saved_table as select * from :the_table;
 -- Compress the rows in the hypercore.
-select compress_chunk(show_chunks(:'the_table'), compress_using => 'hypercore');
+select compress_chunk(show_chunks(:'the_table'), hypercore_use_access_method => true);
              compress_chunk             
 ----------------------------------------
  _timescaledb_internal._hyper_3_7_chunk
@@ -240,7 +240,7 @@ from generate_series('2022-06-01'::timestamp, '2022-06-10', '1 minute') t;
 -- table and a heap table produce the same result.
 create table :saved_table as select * from :the_table;
 -- Compress the rows in the hypercore.
-select compress_chunk(show_chunks(:'the_table'), compress_using => 'hypercore');
+select compress_chunk(show_chunks(:'the_table'), hypercore_use_access_method => true);
              compress_chunk              
 -----------------------------------------
  _timescaledb_internal._hyper_5_13_chunk
@@ -313,7 +313,7 @@ from generate_series('2022-06-01'::timestamp, '2022-06-10', '1 minute') t;
 -- table and a heap table produce the same result.
 create table :saved_table as select * from :the_table;
 -- Compress the rows in the hypercore.
-select compress_chunk(show_chunks(:'the_table'), compress_using => 'hypercore');
+select compress_chunk(show_chunks(:'the_table'), hypercore_use_access_method => true);
              compress_chunk              
 -----------------------------------------
  _timescaledb_internal._hyper_7_19_chunk
@@ -386,7 +386,7 @@ from generate_series('2022-06-01'::timestamp, '2022-06-10', '1 minute') t;
 -- table and a heap table produce the same result.
 create table :saved_table as select * from :the_table;
 -- Compress the rows in the hypercore.
-select compress_chunk(show_chunks(:'the_table'), compress_using => 'hypercore');
+select compress_chunk(show_chunks(:'the_table'), hypercore_use_access_method => true);
              compress_chunk              
 -----------------------------------------
  _timescaledb_internal._hyper_9_25_chunk
@@ -460,7 +460,7 @@ from generate_series('2022-06-01'::timestamp, '2022-06-10', '1 minute') t;
 -- table and a heap table produce the same result.
 create table :saved_table as select * from :the_table;
 -- Compress the rows in the hypercore.
-select compress_chunk(show_chunks(:'the_table'), compress_using => 'hypercore');
+select compress_chunk(show_chunks(:'the_table'), hypercore_use_access_method => true);
               compress_chunk              
 ------------------------------------------
  _timescaledb_internal._hyper_11_31_chunk
@@ -534,7 +534,7 @@ from generate_series('2022-06-01'::timestamp, '2022-06-10', '1 minute') t;
 -- table and a heap table produce the same result.
 create table :saved_table as select * from :the_table;
 -- Compress the rows in the hypercore.
-select compress_chunk(show_chunks(:'the_table'), compress_using => 'hypercore');
+select compress_chunk(show_chunks(:'the_table'), hypercore_use_access_method => true);
               compress_chunk              
 ------------------------------------------
  _timescaledb_internal._hyper_13_37_chunk
diff --git a/tsl/test/expected/hypercore_update.out b/tsl/test/expected/hypercore_update.out
index 1b3510de1..3426606c1 100644
--- a/tsl/test/expected/hypercore_update.out
+++ b/tsl/test/expected/hypercore_update.out
@@ -108,7 +108,7 @@ select format('%I.%I', chunk_schema, chunk_name)::regclass as chunk2
  limit 1 offset 1 \gset
 -- TODO(#1068) Parallel sequence scan does not work
 set max_parallel_workers_per_gather to 0;
-select compress_chunk(show_chunks(:'hypertable'), compress_using => 'hypercore');
+select compress_chunk(show_chunks(:'hypertable'), hypercore_use_access_method => true);
              compress_chunk             
 ----------------------------------------
  _timescaledb_internal._hyper_1_1_chunk
@@ -291,7 +291,7 @@ select * from :hypertable where humidity = 200.0 order by metric_id;
 commit;
 -- Test update of a segment-by column. The selection is to make sure
 -- that we have a mix of compressed and uncompressed tuples.
-select compress_chunk(show_chunks(:'hypertable'), compress_using => 'hypercore');
+select compress_chunk(show_chunks(:'hypertable'), hypercore_use_access_method => true);
              compress_chunk             
 ----------------------------------------
  _timescaledb_internal._hyper_1_1_chunk
@@ -343,7 +343,7 @@ order by metric_id;
 (11 rows)
 
 -- Compress all chunks again before testing RETURNING
-select compress_chunk(show_chunks(:'hypertable'), compress_using => 'hypercore');
+select compress_chunk(show_chunks(:'hypertable'), hypercore_use_access_method => true);
              compress_chunk             
 ----------------------------------------
  _timescaledb_internal._hyper_1_1_chunk
@@ -395,7 +395,7 @@ returning _timescaledb_debug.is_compressed_tid(ctid), *;
 
 -- Test update of a segment-by column directly on the chunk. This
 -- should fail for compressed rows even for segment-by columns.
-select compress_chunk(:'chunk1', compress_using => 'hypercore');
+select compress_chunk(:'chunk1', hypercore_use_access_method => true);
              compress_chunk             
 ----------------------------------------
  _timescaledb_internal._hyper_1_1_chunk
diff --git a/tsl/test/shared/expected/extension.out b/tsl/test/shared/expected/extension.out
index c76357849..0e70e20d9 100644
--- a/tsl/test/shared/expected/extension.out
+++ b/tsl/test/shared/expected/extension.out
@@ -98,7 +98,7 @@ ORDER BY pronamespace::regnamespace::text COLLATE "C", p.oid::regprocedure::text
  _timescaledb_functions.partialize_agg(anyelement)
  _timescaledb_functions.policy_compression(integer,jsonb)
  _timescaledb_functions.policy_compression_check(jsonb)
- _timescaledb_functions.policy_compression_execute(integer,integer,anyelement,integer,boolean,boolean,boolean,name)
+ _timescaledb_functions.policy_compression_execute(integer,integer,anyelement,integer,boolean,boolean,boolean,boolean)
  _timescaledb_functions.policy_job_stat_history_retention(integer,jsonb)
  _timescaledb_functions.policy_job_stat_history_retention_check(jsonb)
  _timescaledb_functions.policy_recompression(integer,jsonb)
@@ -210,7 +210,7 @@ ORDER BY pronamespace::regnamespace::text COLLATE "C", p.oid::regprocedure::text
  ts_hypercore_handler(internal)
  ts_hypercore_proxy_handler(internal)
  ts_now_mock()
- add_compression_policy(regclass,"any",boolean,interval,timestamp with time zone,text,interval,name)
+ add_compression_policy(regclass,"any",boolean,interval,timestamp with time zone,text,interval,boolean)
  add_continuous_aggregate_policy(regclass,"any","any",interval,boolean,timestamp with time zone,text)
  add_dimension(regclass,_timescaledb_internal.dimension_info,boolean)
  add_dimension(regclass,name,integer,anyelement,regproc,boolean)
@@ -225,7 +225,7 @@ ORDER BY pronamespace::regnamespace::text COLLATE "C", p.oid::regprocedure::text
  cagg_migrate(regclass,boolean,boolean)
  chunk_compression_stats(regclass)
  chunks_detailed_size(regclass)
- compress_chunk(regclass,boolean,boolean,name)
+ compress_chunk(regclass,boolean,boolean,boolean)
  create_hypertable(regclass,_timescaledb_internal.dimension_info,boolean,boolean,boolean)
  create_hypertable(regclass,name,name,integer,name,name,anyelement,boolean,boolean,regproc,boolean,text,regproc,regproc)
  decompress_chunk(regclass,boolean)
@@ -291,7 +291,7 @@ ORDER BY pronamespace::regnamespace::text COLLATE "C", p.oid::regprocedure::text
  time_bucket_gapfill(smallint,smallint,smallint,smallint)
  timescaledb_post_restore()
  timescaledb_pre_restore()
- timescaledb_experimental.add_policies(regclass,boolean,"any","any","any","any",name)
+ timescaledb_experimental.add_policies(regclass,boolean,"any","any","any","any",boolean)
  timescaledb_experimental.alter_policies(regclass,boolean,"any","any","any","any")
  timescaledb_experimental.remove_all_policies(regclass,boolean)
  timescaledb_experimental.remove_policies(regclass,boolean,text[])
diff --git a/tsl/test/sql/hypercore_copy.sql b/tsl/test/sql/hypercore_copy.sql
index 2a72fffbc..f051697ce 100644
--- a/tsl/test/sql/hypercore_copy.sql
+++ b/tsl/test/sql/hypercore_copy.sql
@@ -12,7 +12,7 @@ select cl.oid::regclass as rel, am.amname, inh.inhparent::regclass as relparent
 
 -- Compress the chunks and check that the counts are the same
 select location_id, count(*) into orig from :hypertable GROUP BY location_id;
-select compress_chunk(show_chunks(:'hypertable'), compress_using => 'hypercore');
+select compress_chunk(show_chunks(:'hypertable'), hypercore_use_access_method => true);
 select location_id, count(*) into comp from :hypertable GROUP BY location_id;
 select * from orig join comp using (location_id) where orig.count != comp.count;
 drop table orig, comp;
diff --git a/tsl/test/sql/hypercore_create.sql b/tsl/test/sql/hypercore_create.sql
index 35f7a80ab..9b0bd02ae 100644
--- a/tsl/test/sql/hypercore_create.sql
+++ b/tsl/test/sql/hypercore_create.sql
@@ -125,7 +125,7 @@ select * from amrels where rel=:'chunk'::regclass;
 
 -- Try same thing with compress_chunk()
 alter table :chunk set access method heap;
-select compress_chunk(:'chunk', compress_using => 'hypercore');
+select compress_chunk(:'chunk', hypercore_use_access_method => true);
 
 -- Check that chunk is using hypercore
 select relname, amname
@@ -138,7 +138,7 @@ alter table :chunk set access method hypercore;
 
 -- Test recompression after changing compression settings
 alter table test3 set (timescaledb.compress_segmentby='device');
-select compress_chunk(:'chunk', compress_using => 'hypercore', recompress => true);
+select compress_chunk(:'chunk', hypercore_use_access_method => true, recompress => true);
 
 -- Create a second chunk
 insert into test3 values ('2022-08-01', 1, 1.0);
@@ -219,9 +219,9 @@ from compressed_rel_size_stats;
 
 -- Create hypercores again and check that compression size stats are
 -- updated showing compressed data
-select compress_chunk(ch, compress_using => 'hypercore')
+select compress_chunk(ch, hypercore_use_access_method => true)
 from show_chunks('test2') ch;
-select compress_chunk(ch, compress_using => 'hypercore')
+select compress_chunk(ch, hypercore_use_access_method => true)
 from show_chunks('test3') ch;
 
 -- Save the stats for later comparison. Exclude the amname column
@@ -241,8 +241,8 @@ select * from compressed_rel_size_stats order by rel;
 -- compression size stats
 select compress_chunk(decompress_chunk(ch))
 from show_chunks('test2') ch;
--- Using compress_using => NULL should be the same as "heap"
-select compress_chunk(decompress_chunk(ch), compress_using => NULL)
+--- Using hypercore_use_access_method => NULL should be the same as "heap"
+select compress_chunk(decompress_chunk(ch), hypercore_use_access_method => NULL)
 from show_chunks('test3') ch;
 
 select * from compressed_rel_size_stats order by rel;
@@ -276,7 +276,7 @@ set client_min_messages=DEBUG1;
 with chunks as (
 	 select ch from show_chunks('test2') ch offset 1
 )
-select compress_chunk(ch, compress_using => 'hypercore') from chunks;
+select compress_chunk(ch, hypercore_use_access_method => true) from chunks;
 
 -- Test direct migration of the remaining chunk via SET ACCESS
 -- METHOD. Add some uncompressed data to test migration with partially
@@ -316,23 +316,18 @@ commit;
 -- Trying to convert a hypercore to a hypercore should be an error
 -- if if_not_compressed is false and the hypercore is fully
 -- compressed.
-select compress_chunk(ch, compress_using => 'hypercore', if_not_compressed => false)
-from show_chunks('test2') ch;
-
--- Compressing using something different than "hypercore" or "heap"
--- should not be allowed
-select compress_chunk(ch, compress_using => 'non_existing_am')
+select compress_chunk(ch, hypercore_use_access_method => true, if_not_compressed => false)
 from show_chunks('test2') ch;
 
 \set ON_ERROR_STOP 1
 
--- Compressing from hypercore with compress_using=>heap should lead
--- to recompression of hypercore with a notice.
-select compress_chunk(ch, compress_using => 'heap')
+-- Compressing from hypercore not using access method should lead to
+-- recompression of hypercore with a notice.
+select compress_chunk(ch, hypercore_use_access_method => false)
 from show_chunks('test2') ch;
 
--- Compressing a hypercore without specifying compress_using should
--- lead to recompression. First check that :chunk is a hypercore.
+-- Compressing a hypercore should by default lead to
+-- recompression. First check that :chunk is a hypercore.
 select ch as chunk from show_chunks('test2') ch limit 1 \gset
 select * from compressed_rel_size_stats
 where amname = 'hypercore' and rel = :'chunk'::regclass;
@@ -340,11 +335,11 @@ insert into :chunk values ('2022-06-01 10:01', 6, 6, 6.0, 6.0);
 select ctid from :chunk where created_at = '2022-06-01 10:01' and device_id = 6;
 select compress_chunk(:'chunk');
 select ctid from :chunk where created_at = '2022-06-01 10:01' and device_id = 6;
--- Compressing a hypercore with compress_using=>hypercore should
--- also lead to recompression
+-- Compressing a hypercore using the access method should also lead to
+-- recompression
 insert into :chunk values ('2022-06-01 11:02', 7, 7, 7.0, 7.0);
 select ctid from :chunk where created_at = '2022-06-01 11:02' and device_id = 7;
-select compress_chunk(:'chunk', compress_using => 'hypercore');
+select compress_chunk(:'chunk', hypercore_use_access_method => true);
 select ctid from :chunk where created_at = '2022-06-01 11:02' and device_id = 7;
 
 -- Convert all hypercores back to heap
@@ -358,37 +353,37 @@ select decompress_chunk(rel) ch
 -- cleaned up between two or more commands in same transaction.
 select ch as chunk2 from show_chunks('test2') ch offset 1 limit 1 \gset
 start transaction;
-select compress_chunk(:'chunk', compress_using => 'hypercore');
-select compress_chunk(:'chunk2', compress_using => 'hypercore');
+select compress_chunk(:'chunk', hypercore_use_access_method => true);
+select compress_chunk(:'chunk2', hypercore_use_access_method => true);
 commit;
 
 select * from compressed_rel_size_stats
 where amname = 'hypercore' and relparent = 'test2'::regclass
 order by rel;
 
--- Test that we can compress old way using compress_using=>heap
+-- Test that we can compress old way by not using the access method
 select ch as chunk3 from show_chunks('test2') ch offset 2 limit 1 \gset
-select compress_chunk(:'chunk3', compress_using => 'heap');
+select compress_chunk(:'chunk3', hypercore_use_access_method => false);
 
 select * from compressed_rel_size_stats
 where amname = 'heap' and relparent = 'test2'::regclass
 order by rel;
 
 \set ON_ERROR_STOP 0
--- If we call compress_chunk with compress_using=>'heap' on a
+-- If we call compress_chunk using the table access method on a
 -- heap-compressed chunk, it should lead to an error if
 -- if_not_compressed is false. The commands below are all equivalent
 -- in this case.
-select compress_chunk(:'chunk3', compress_using => 'heap', if_not_compressed=>false);
-select compress_chunk(:'chunk3', compress_using => NULL, if_not_compressed=>false);
+select compress_chunk(:'chunk3', hypercore_use_access_method => false, if_not_compressed=>false);
+select compress_chunk(:'chunk3', hypercore_use_access_method => NULL, if_not_compressed=>false);
 select compress_chunk(:'chunk3', if_not_compressed=>false);
 \set ON_ERROR_STOP 1
 
 -- For a heap-compressed chunk, these should all be equivalent and
 -- should not do anything when there is nothing to recompress. A
 -- notice should be raised instead of an error.
-select compress_chunk(:'chunk3', compress_using => 'heap');
-select compress_chunk(:'chunk3', compress_using => NULL);
+select compress_chunk(:'chunk3', hypercore_use_access_method => false);
+select compress_chunk(:'chunk3', hypercore_use_access_method => NULL);
 select compress_chunk(:'chunk3');
 
 -- Insert new data to create a "partially compressed" chunk. Note that
@@ -396,7 +391,7 @@ select compress_chunk(:'chunk3');
 -- doesn't properly update the partially compressed state.
 insert into test2 values ('2022-06-15 16:00', 8, 8, 8.0, 8.0);
 select * from only :chunk3;
-select compress_chunk(:'chunk3', compress_using => 'heap');
+select compress_chunk(:'chunk3', hypercore_use_access_method => false);
 -- The tuple should no longer be in the non-compressed chunk
 select * from only :chunk3;
 -- But the tuple is returned in a query without ONLY
@@ -439,7 +434,7 @@ insert into rides values
 (6,'2016-01-01 00:00:02','2016-01-01 00:11:55',1,1.20,-73.979423522949219,40.744613647460938,1,-73.992034912109375,40.753944396972656,2,9,0.5,0.5,0,0,0.3,10.3),
 (356,'2016-01-01 00:00:01','2016-01-01 00:11:55',1,1.20,-73.979423522949219,40.744613647460938,1,-73.992034912109375,40.753944396972656,2,9,0.5,0.5,0,0,0.3,10.3);
 -- Check that it is possible to compress
-select compress_chunk(ch, compress_using=>'hypercore') from show_chunks('rides') ch;
+select compress_chunk(ch, hypercore_use_access_method => true) from show_chunks('rides') ch;
 select rel, amname from compressed_rel_size_stats
 where relparent::regclass = 'rides'::regclass;
 
diff --git a/tsl/test/sql/hypercore_cursor.sql b/tsl/test/sql/hypercore_cursor.sql
index 4025b77cf..a1a493bf9 100644
--- a/tsl/test/sql/hypercore_cursor.sql
+++ b/tsl/test/sql/hypercore_cursor.sql
@@ -40,7 +40,7 @@ end;
 $$
 language plpgsql;
 
-select compress_chunk(show_chunks(:'hypertable'), compress_using => 'hypercore');
+select compress_chunk(show_chunks(:'hypertable'), hypercore_use_access_method => true);
 
 -- Compare executing the function with a cursor with a query fetching
 -- the same data directly from the hypertable.
@@ -107,7 +107,7 @@ create table backward_cursor (time timestamptz, location_id bigint, temp float8)
 select create_hypertable('backward_cursor', 'time', create_default_indexes=>false);
 alter table backward_cursor set (timescaledb.compress, timescaledb.compress_segmentby='location_id', timescaledb.compress_orderby='time asc');
 insert into backward_cursor values ('2024-01-01 01:00', 1, 1.0), ('2024-01-01 02:00', 1, 2.0), ('2024-01-01 03:00', 2, 3.0), ('2024-01-01 04:00', 2, 4.0);
-select compress_chunk(ch, compress_using=>'hypercore') from show_chunks('backward_cursor') ch;
+select compress_chunk(ch, hypercore_use_access_method => true) from show_chunks('backward_cursor') ch;
 insert into backward_cursor values ('2024-01-01 05:00', 3, 5.0), ('2024-01-01 06:00', 3, 6.0);
 
 begin;
diff --git a/tsl/test/sql/hypercore_ddl.sql b/tsl/test/sql/hypercore_ddl.sql
index 63f0556d3..14d622797 100644
--- a/tsl/test/sql/hypercore_ddl.sql
+++ b/tsl/test/sql/hypercore_ddl.sql
@@ -46,7 +46,7 @@ insert into readings (time, location, device, temp, humidity, jdata)
 select t, ceil(random()*10), ceil(random()*30), random()*40, random()*100, '{"a":1,"b":2}'::jsonb
 from generate_series('2022-06-01'::timestamptz, '2022-06-04'::timestamptz, '5m') t;
 
-select compress_chunk(show_chunks('readings'), compress_using => 'hypercore');
+select compress_chunk(show_chunks('readings'), hypercore_use_access_method => true);
 
 -- Insert some extra data to get some non-compressed data as well.
 insert into readings (time, location, device, temp, humidity, jdata)
diff --git a/tsl/test/sql/hypercore_index_btree.sql b/tsl/test/sql/hypercore_index_btree.sql
index daf79eafc..bb32489df 100644
--- a/tsl/test/sql/hypercore_index_btree.sql
+++ b/tsl/test/sql/hypercore_index_btree.sql
@@ -135,7 +135,7 @@ select explain_anonymize(format($$
 $$, :'chunk2'));
 select created_at, location_id, temp from :chunk2 where location_id=1 and temp=2.0;
 
-select compress_chunk(show_chunks(:'hypertable'), compress_using => 'hypercore');
+select compress_chunk(show_chunks(:'hypertable'), hypercore_use_access_method => true);
 
 vacuum analyze :hypertable;
 
@@ -378,7 +378,7 @@ select * from nullvalues where only_nulls is null;
 select * from only_nulls_null;
 
 -- Convert all chunks to hypercore and run same queries
-select compress_chunk(ch, compress_using=>'hypercore') from show_chunks('nullvalues') ch;
+select compress_chunk(ch, hypercore_use_access_method => true) from show_chunks('nullvalues') ch;
 
 select c.relname, a.amname FROM pg_class c
 join pg_am a on (c.relam = a.oid)
diff --git a/tsl/test/sql/hypercore_index_hash.sql b/tsl/test/sql/hypercore_index_hash.sql
index 5b35f28a8..d095af2d9 100644
--- a/tsl/test/sql/hypercore_index_hash.sql
+++ b/tsl/test/sql/hypercore_index_hash.sql
@@ -67,7 +67,7 @@ select explain_anonymize(format($$
 $$, :'chunk2'));
 select created_at, location_id, temp from :chunk2 where location_id=1 and temp=2.0;
 
-select compress_chunk(show_chunks(:'hypertable'), compress_using => 'hypercore');
+select compress_chunk(show_chunks(:'hypertable'), hypercore_use_access_method => true);
 
 vacuum analyze :hypertable;
 
diff --git a/tsl/test/sql/hypercore_insert.sql b/tsl/test/sql/hypercore_insert.sql
index 93ef61af1..a8ba60cf1 100644
--- a/tsl/test/sql/hypercore_insert.sql
+++ b/tsl/test/sql/hypercore_insert.sql
@@ -6,7 +6,7 @@
 
 -- Compress the chunks and check that the counts are the same
 select location_id, count(*) into orig from :hypertable GROUP BY location_id;
-select compress_chunk(show_chunks(:'hypertable'), compress_using => 'hypercore');
+select compress_chunk(show_chunks(:'hypertable'), hypercore_use_access_method => true);
 select location_id, count(*) into comp from :hypertable GROUP BY location_id;
 select * from orig join comp using (location_id) where orig.count != comp.count;
 drop table orig, comp;
diff --git a/tsl/test/sql/hypercore_join.sql b/tsl/test/sql/hypercore_join.sql
index 0877e5420..33338c1e0 100644
--- a/tsl/test/sql/hypercore_join.sql
+++ b/tsl/test/sql/hypercore_join.sql
@@ -43,7 +43,7 @@ alter table the_hypercore set (
       timescaledb.compress_segmentby = '',
       timescaledb.compress_orderby = 'updated_at desc'
 );
-select compress_chunk(show_chunks('the_hypercore'), compress_using => 'hypercore');
+select compress_chunk(show_chunks('the_hypercore'), hypercore_use_access_method => true);
 
 vacuum analyze the_hypercore;
 
diff --git a/tsl/test/sql/hypercore_merge.sql b/tsl/test/sql/hypercore_merge.sql
index ffada9a5e..d6d0be0cb 100644
--- a/tsl/test/sql/hypercore_merge.sql
+++ b/tsl/test/sql/hypercore_merge.sql
@@ -12,7 +12,7 @@ set enable_hashjoin to false;
 
 -- There are already tests to merge into uncompressed tables, so just
 -- compress all chunks using Hypercore.
-select compress_chunk(show_chunks(:'hypertable'), compress_using => 'hypercore');
+select compress_chunk(show_chunks(:'hypertable'), hypercore_use_access_method => true);
 
 create table source_data (
        created_at timestamptz not null,
@@ -62,7 +62,7 @@ select * from :hypertable where not _timescaledb_debug.is_compressed_tid(ctid);
 
 -- Recompress all and try to insert the same rows again. This there
 -- should be no rows inserted.
-select compress_chunk(show_chunks(:'hypertable'), compress_using => 'hypercore');
+select compress_chunk(show_chunks(:'hypertable'), hypercore_use_access_method => true);
 
 \x on
 select * from :hypertable where not _timescaledb_debug.is_compressed_tid(ctid);
diff --git a/tsl/test/sql/hypercore_parallel.sql b/tsl/test/sql/hypercore_parallel.sql
index 2137c1e8f..1125605c5 100644
--- a/tsl/test/sql/hypercore_parallel.sql
+++ b/tsl/test/sql/hypercore_parallel.sql
@@ -29,7 +29,7 @@ select device_id, count(*) into orig_chunk from :chunk1 group by device_id;
 -----------------------
 -- Enable hypercore --
 -----------------------
-select compress_chunk(show_chunks(:'hypertable'), compress_using => 'hypercore');
+select compress_chunk(show_chunks(:'hypertable'), hypercore_use_access_method => true);
 
 -- Show count without parallel plan and without ColumnarScan
 set timescaledb.enable_columnarscan=false;
diff --git a/tsl/test/sql/hypercore_policy.sql b/tsl/test/sql/hypercore_policy.sql
index 2980ebb82..0e1038a9f 100644
--- a/tsl/test/sql/hypercore_policy.sql
+++ b/tsl/test/sql/hypercore_policy.sql
@@ -23,25 +23,19 @@ join pg_am am on (am.oid = cl.relam);
 
 set timezone to pst8pdt;
 
-\set ON_ERROR_STOP 0
--- Test invalid compress_using option
-select add_compression_policy('readings',
-                              compress_after => '1000 years'::interval,
-                              compress_using => 'foo');
-\set ON_ERROR_STOP 1
-
--- Check that compress_using is not part of the policy if not set. Use
--- a large compress_after to ensure the policy doesn't do anything at
--- this time.
+-- Check that hypercore_use_access_method is not part of the policy if
+-- not set. Use a large compress_after to ensure the policy doesn't do
+-- anything at this time.
 select add_compression_policy('readings', compress_after => '1000 years'::interval)
 as compression_job \gset
 select config from timescaledb_information.jobs where job_id = :compression_job;
 select remove_compression_policy('readings');
 
--- Check that compress_using is not part of the policy if set to NULL
+-- Check that hypercore_use_access_method is not part of the policy if
+-- set to NULL
 select add_compression_policy('readings',
                               compress_after => '1000 years'::interval,
-                              compress_using => NULL)
+                              hypercore_use_access_method => NULL)
 as compression_job \gset
 select config from timescaledb_information.jobs where job_id = :compression_job;
 select remove_compression_policy('readings');
@@ -51,10 +45,11 @@ select * from chunk_info
 where hypertable = 'readings'
 order by chunk;
 
--- Check that compress_using is part of the policy config when non-NULL
+-- Check that hypercore_use_access_method is part of the policy config
+-- when enabled.
 select add_compression_policy('readings',
                               compress_after => '1 day'::interval,
-                              compress_using => 'hypercore')
+                              hypercore_use_access_method => true)
 as compression_job \gset
 
 select config from timescaledb_information.jobs where job_id = :compression_job;
@@ -81,7 +76,7 @@ where time = '2022-06-01 10:14' and device = 1;
 -- recompress hypercores.
 select add_compression_policy('readings',
                               compress_after => '1 day'::interval,
-                              compress_using => 'heap')
+                              hypercore_use_access_method => false)
 as compression_job \gset
 
 -- Run the policy job again to recompress
@@ -98,7 +93,7 @@ select * from readings where time = '2022-06-01 10:14' and device = 1;
 select * from readings where time = '2022-06-01 10:14' and device = 1;
 
 -- Test recompression again with a policy that doesn't specify
--- compress_using
+-- hypercore_use_access_method
 select remove_compression_policy('readings');
 -- Insert one value into existing hypercore, also create a new non-hypercore chunk
 insert into readings values ('2022-06-01 10:14', 1, 1.0), ('2022-07-01 10:14', 2, 2.0);
@@ -134,7 +129,7 @@ select timescaledb_experimental.add_policies('daily',
        refresh_start_offset => '8 days'::interval,
        refresh_end_offset => '1 day'::interval,
        compress_after => '9 days'::interval,
-       compress_using => 'hypercore');
+       hypercore_use_access_method => true);
 
 select job_id as cagg_compression_job, materialization_hypertable_name as mathyper
 from timescaledb_information.jobs j
diff --git a/tsl/test/sql/hypercore_update.sql b/tsl/test/sql/hypercore_update.sql
index f3f425cc4..081270d5c 100644
--- a/tsl/test/sql/hypercore_update.sql
+++ b/tsl/test/sql/hypercore_update.sql
@@ -9,7 +9,7 @@
 -- TODO(#1068) Parallel sequence scan does not work
 set max_parallel_workers_per_gather to 0;
 
-select compress_chunk(show_chunks(:'hypertable'), compress_using => 'hypercore');
+select compress_chunk(show_chunks(:'hypertable'), hypercore_use_access_method => true);
 
 -- Check that all chunks are compressed
 select chunk_name, compression_status from chunk_compression_stats(:'hypertable');
@@ -71,7 +71,7 @@ commit;
 
 -- Test update of a segment-by column. The selection is to make sure
 -- that we have a mix of compressed and uncompressed tuples.
-select compress_chunk(show_chunks(:'hypertable'), compress_using => 'hypercore');
+select compress_chunk(show_chunks(:'hypertable'), hypercore_use_access_method => true);
 
 select _timescaledb_debug.is_compressed_tid(ctid), metric_id, created_at
 from :hypertable
@@ -87,7 +87,7 @@ where (created_at, metric_id) in (select created_at, metric_id from to_update)
 order by metric_id;
 
 -- Compress all chunks again before testing RETURNING
-select compress_chunk(show_chunks(:'hypertable'), compress_using => 'hypercore');
+select compress_chunk(show_chunks(:'hypertable'), hypercore_use_access_method => true);
 
 select _timescaledb_debug.is_compressed_tid(ctid), metric_id, created_at
 from :hypertable
@@ -102,7 +102,7 @@ returning _timescaledb_debug.is_compressed_tid(ctid), *;
 
 -- Test update of a segment-by column directly on the chunk. This
 -- should fail for compressed rows even for segment-by columns.
-select compress_chunk(:'chunk1', compress_using => 'hypercore');
+select compress_chunk(:'chunk1', hypercore_use_access_method => true);
 
 select metric_id from :chunk1 limit 1 \gset
 
diff --git a/tsl/test/sql/include/hypercore_type_table.sql b/tsl/test/sql/include/hypercore_type_table.sql
index c8a90eebc..72f669e06 100644
--- a/tsl/test/sql/include/hypercore_type_table.sql
+++ b/tsl/test/sql/include/hypercore_type_table.sql
@@ -30,7 +30,7 @@ from generate_series('2022-06-01'::timestamp, '2022-06-10', '1 minute') t;
 create table :saved_table as select * from :the_table;
 
 -- Compress the rows in the hypercore.
-select compress_chunk(show_chunks(:'the_table'), compress_using => 'hypercore');
+select compress_chunk(show_chunks(:'the_table'), hypercore_use_access_method => true);
 
 -- This part of the include file will run a query with the aggregate
 -- provided by the including file and test that using a hypercore