mirror of
https://github.com/timescale/timescaledb.git
synced 2025-05-14 17:43:34 +08:00
Change parameter name to enable Hypercore TAM
Changing from using the `compress_using` parameter with a table access method name to use the boolean parameter `hypercore_use_access_method` instead to avoid having to provide a name when using the table access method for compression.
This commit is contained in:
parent
fdce4439e6
commit
e5e94960d0
2
.github/workflows/pgspot.yaml
vendored
2
.github/workflows/pgspot.yaml
vendored
@ -26,7 +26,7 @@ jobs:
|
|||||||
--proc-without-search-path
|
--proc-without-search-path
|
||||||
'_timescaledb_functions.policy_compression_execute(job_id integer,htid integer,lag anyelement,maxchunks integer,verbose_log boolean,recompress_enabled boolean,use_creation_time boolean)'
|
'_timescaledb_functions.policy_compression_execute(job_id integer,htid integer,lag anyelement,maxchunks integer,verbose_log boolean,recompress_enabled boolean,use_creation_time boolean)'
|
||||||
--proc-without-search-path
|
--proc-without-search-path
|
||||||
'_timescaledb_functions.policy_compression_execute(job_id integer,htid integer,lag anyelement,maxchunks integer,verbose_log boolean,recompress_enabled boolean,use_creation_time boolean,amname name)'
|
'_timescaledb_functions.policy_compression_execute(job_id integer,htid integer,lag anyelement,maxchunks integer,verbose_log boolean,recompress_enabled boolean,use_creation_time boolean,useam boolean)'
|
||||||
--proc-without-search-path
|
--proc-without-search-path
|
||||||
'_timescaledb_internal.policy_compression_execute(job_id integer,htid integer,lag anyelement,maxchunks integer,verbose_log boolean,recompress_enabled boolean)'
|
'_timescaledb_internal.policy_compression_execute(job_id integer,htid integer,lag anyelement,maxchunks integer,verbose_log boolean,recompress_enabled boolean)'
|
||||||
--proc-without-search-path
|
--proc-without-search-path
|
||||||
|
1
.unreleased/pr_7411
Normal file
1
.unreleased/pr_7411
Normal file
@ -0,0 +1 @@
|
|||||||
|
Implements: #7411 Change parameter name to enable Hypercore TAM
|
@ -36,7 +36,7 @@ CREATE OR REPLACE FUNCTION @extschema@.compress_chunk(
|
|||||||
uncompressed_chunk REGCLASS,
|
uncompressed_chunk REGCLASS,
|
||||||
if_not_compressed BOOLEAN = true,
|
if_not_compressed BOOLEAN = true,
|
||||||
recompress BOOLEAN = false,
|
recompress BOOLEAN = false,
|
||||||
compress_using NAME = NULL
|
hypercore_use_access_method BOOL = NULL
|
||||||
) RETURNS REGCLASS AS '@MODULE_PATHNAME@', 'ts_compress_chunk' LANGUAGE C VOLATILE;
|
) RETURNS REGCLASS AS '@MODULE_PATHNAME@', 'ts_compress_chunk' LANGUAGE C VOLATILE;
|
||||||
|
|
||||||
CREATE OR REPLACE FUNCTION @extschema@.decompress_chunk(
|
CREATE OR REPLACE FUNCTION @extschema@.decompress_chunk(
|
||||||
|
@ -53,7 +53,7 @@ CREATE OR REPLACE FUNCTION @extschema@.add_compression_policy(
|
|||||||
initial_start TIMESTAMPTZ = NULL,
|
initial_start TIMESTAMPTZ = NULL,
|
||||||
timezone TEXT = NULL,
|
timezone TEXT = NULL,
|
||||||
compress_created_before INTERVAL = NULL,
|
compress_created_before INTERVAL = NULL,
|
||||||
compress_using NAME = NULL
|
hypercore_use_access_method BOOL = NULL
|
||||||
)
|
)
|
||||||
RETURNS INTEGER
|
RETURNS INTEGER
|
||||||
AS '@MODULE_PATHNAME@', 'ts_policy_compression_add'
|
AS '@MODULE_PATHNAME@', 'ts_policy_compression_add'
|
||||||
@ -95,7 +95,7 @@ CREATE OR REPLACE FUNCTION timescaledb_experimental.add_policies(
|
|||||||
refresh_end_offset "any" = NULL,
|
refresh_end_offset "any" = NULL,
|
||||||
compress_after "any" = NULL,
|
compress_after "any" = NULL,
|
||||||
drop_after "any" = NULL,
|
drop_after "any" = NULL,
|
||||||
compress_using NAME = NULL)
|
hypercore_use_access_method BOOL = NULL)
|
||||||
RETURNS BOOL
|
RETURNS BOOL
|
||||||
AS '@MODULE_PATHNAME@', 'ts_policies_add'
|
AS '@MODULE_PATHNAME@', 'ts_policies_add'
|
||||||
LANGUAGE C VOLATILE;
|
LANGUAGE C VOLATILE;
|
||||||
|
@ -43,7 +43,7 @@ _timescaledb_functions.policy_compression_execute(
|
|||||||
verbose_log BOOLEAN,
|
verbose_log BOOLEAN,
|
||||||
recompress_enabled BOOLEAN,
|
recompress_enabled BOOLEAN,
|
||||||
use_creation_time BOOLEAN,
|
use_creation_time BOOLEAN,
|
||||||
amname NAME = NULL)
|
useam BOOLEAN = NULL)
|
||||||
AS $$
|
AS $$
|
||||||
DECLARE
|
DECLARE
|
||||||
htoid REGCLASS;
|
htoid REGCLASS;
|
||||||
@ -109,7 +109,7 @@ BEGIN
|
|||||||
LOOP
|
LOOP
|
||||||
IF chunk_rec.status = 0 THEN
|
IF chunk_rec.status = 0 THEN
|
||||||
BEGIN
|
BEGIN
|
||||||
PERFORM @extschema@.compress_chunk(chunk_rec.oid, compress_using => amname);
|
PERFORM @extschema@.compress_chunk(chunk_rec.oid, hypercore_use_access_method => useam);
|
||||||
EXCEPTION WHEN OTHERS THEN
|
EXCEPTION WHEN OTHERS THEN
|
||||||
GET STACKED DIAGNOSTICS
|
GET STACKED DIAGNOSTICS
|
||||||
_message = MESSAGE_TEXT,
|
_message = MESSAGE_TEXT,
|
||||||
@ -134,7 +134,7 @@ BEGIN
|
|||||||
PERFORM _timescaledb_functions.recompress_chunk_segmentwise(chunk_rec.oid);
|
PERFORM _timescaledb_functions.recompress_chunk_segmentwise(chunk_rec.oid);
|
||||||
ELSE
|
ELSE
|
||||||
PERFORM @extschema@.decompress_chunk(chunk_rec.oid, if_compressed => true);
|
PERFORM @extschema@.decompress_chunk(chunk_rec.oid, if_compressed => true);
|
||||||
PERFORM @extschema@.compress_chunk(chunk_rec.oid, compress_using => amname);
|
PERFORM @extschema@.compress_chunk(chunk_rec.oid, hypercore_use_access_method => useam);
|
||||||
END IF;
|
END IF;
|
||||||
EXCEPTION WHEN OTHERS THEN
|
EXCEPTION WHEN OTHERS THEN
|
||||||
GET STACKED DIAGNOSTICS
|
GET STACKED DIAGNOSTICS
|
||||||
@ -187,7 +187,7 @@ DECLARE
|
|||||||
numchunks INTEGER := 1;
|
numchunks INTEGER := 1;
|
||||||
recompress_enabled BOOL;
|
recompress_enabled BOOL;
|
||||||
use_creation_time BOOL := FALSE;
|
use_creation_time BOOL := FALSE;
|
||||||
compress_using TEXT;
|
hypercore_use_access_method BOOL;
|
||||||
BEGIN
|
BEGIN
|
||||||
|
|
||||||
-- procedures with SET clause cannot execute transaction
|
-- procedures with SET clause cannot execute transaction
|
||||||
@ -228,29 +228,29 @@ BEGIN
|
|||||||
lag_value := compress_after;
|
lag_value := compress_after;
|
||||||
END IF;
|
END IF;
|
||||||
|
|
||||||
compress_using := jsonb_object_field_text(config, 'compress_using')::name;
|
hypercore_use_access_method := jsonb_object_field_text(config, 'hypercore_use_access_method')::bool;
|
||||||
|
|
||||||
-- execute the properly type casts for the lag value
|
-- execute the properly type casts for the lag value
|
||||||
CASE dimtype
|
CASE dimtype
|
||||||
WHEN 'TIMESTAMP'::regtype, 'TIMESTAMPTZ'::regtype, 'DATE'::regtype, 'INTERVAL' ::regtype THEN
|
WHEN 'TIMESTAMP'::regtype, 'TIMESTAMPTZ'::regtype, 'DATE'::regtype, 'INTERVAL' ::regtype THEN
|
||||||
CALL _timescaledb_functions.policy_compression_execute(
|
CALL _timescaledb_functions.policy_compression_execute(
|
||||||
job_id, htid, lag_value::INTERVAL,
|
job_id, htid, lag_value::INTERVAL,
|
||||||
maxchunks, verbose_log, recompress_enabled, use_creation_time, compress_using
|
maxchunks, verbose_log, recompress_enabled, use_creation_time, hypercore_use_access_method
|
||||||
);
|
);
|
||||||
WHEN 'BIGINT'::regtype THEN
|
WHEN 'BIGINT'::regtype THEN
|
||||||
CALL _timescaledb_functions.policy_compression_execute(
|
CALL _timescaledb_functions.policy_compression_execute(
|
||||||
job_id, htid, lag_value::BIGINT,
|
job_id, htid, lag_value::BIGINT,
|
||||||
maxchunks, verbose_log, recompress_enabled, use_creation_time, compress_using
|
maxchunks, verbose_log, recompress_enabled, use_creation_time, hypercore_use_access_method
|
||||||
);
|
);
|
||||||
WHEN 'INTEGER'::regtype THEN
|
WHEN 'INTEGER'::regtype THEN
|
||||||
CALL _timescaledb_functions.policy_compression_execute(
|
CALL _timescaledb_functions.policy_compression_execute(
|
||||||
job_id, htid, lag_value::INTEGER,
|
job_id, htid, lag_value::INTEGER,
|
||||||
maxchunks, verbose_log, recompress_enabled, use_creation_time, compress_using
|
maxchunks, verbose_log, recompress_enabled, use_creation_time, hypercore_use_access_method
|
||||||
);
|
);
|
||||||
WHEN 'SMALLINT'::regtype THEN
|
WHEN 'SMALLINT'::regtype THEN
|
||||||
CALL _timescaledb_functions.policy_compression_execute(
|
CALL _timescaledb_functions.policy_compression_execute(
|
||||||
job_id, htid, lag_value::SMALLINT,
|
job_id, htid, lag_value::SMALLINT,
|
||||||
maxchunks, verbose_log, recompress_enabled, use_creation_time, compress_using
|
maxchunks, verbose_log, recompress_enabled, use_creation_time, hypercore_use_access_method
|
||||||
);
|
);
|
||||||
END CASE;
|
END CASE;
|
||||||
END;
|
END;
|
||||||
|
@ -11,7 +11,7 @@ CREATE FUNCTION @extschema@.compress_chunk(
|
|||||||
uncompressed_chunk REGCLASS,
|
uncompressed_chunk REGCLASS,
|
||||||
if_not_compressed BOOLEAN = true,
|
if_not_compressed BOOLEAN = true,
|
||||||
recompress BOOLEAN = false,
|
recompress BOOLEAN = false,
|
||||||
compress_using NAME = NULL
|
hypercore_use_access_method BOOL = NULL
|
||||||
) RETURNS REGCLASS AS '@MODULE_PATHNAME@', 'ts_update_placeholder' LANGUAGE C VOLATILE;
|
) RETURNS REGCLASS AS '@MODULE_PATHNAME@', 'ts_update_placeholder' LANGUAGE C VOLATILE;
|
||||||
|
|
||||||
DROP FUNCTION IF EXISTS @extschema@.add_compression_policy(hypertable REGCLASS, compress_after "any", if_not_exists BOOL, schedule_interval INTERVAL, initial_start TIMESTAMPTZ, timezone TEXT, compress_created_before INTERVAL);
|
DROP FUNCTION IF EXISTS @extschema@.add_compression_policy(hypertable REGCLASS, compress_after "any", if_not_exists BOOL, schedule_interval INTERVAL, initial_start TIMESTAMPTZ, timezone TEXT, compress_created_before INTERVAL);
|
||||||
@ -24,7 +24,7 @@ CREATE FUNCTION @extschema@.add_compression_policy(
|
|||||||
initial_start TIMESTAMPTZ = NULL,
|
initial_start TIMESTAMPTZ = NULL,
|
||||||
timezone TEXT = NULL,
|
timezone TEXT = NULL,
|
||||||
compress_created_before INTERVAL = NULL,
|
compress_created_before INTERVAL = NULL,
|
||||||
compress_using NAME = NULL
|
hypercore_use_access_method BOOL = NULL
|
||||||
)
|
)
|
||||||
RETURNS INTEGER
|
RETURNS INTEGER
|
||||||
AS '@MODULE_PATHNAME@', 'ts_update_placeholder'
|
AS '@MODULE_PATHNAME@', 'ts_update_placeholder'
|
||||||
@ -39,7 +39,7 @@ CREATE FUNCTION timescaledb_experimental.add_policies(
|
|||||||
refresh_end_offset "any" = NULL,
|
refresh_end_offset "any" = NULL,
|
||||||
compress_after "any" = NULL,
|
compress_after "any" = NULL,
|
||||||
drop_after "any" = NULL,
|
drop_after "any" = NULL,
|
||||||
compress_using NAME = NULL)
|
hypercore_use_access_method BOOL = NULL)
|
||||||
RETURNS BOOL
|
RETURNS BOOL
|
||||||
AS '@MODULE_PATHNAME@', 'ts_update_placeholder'
|
AS '@MODULE_PATHNAME@', 'ts_update_placeholder'
|
||||||
LANGUAGE C VOLATILE;
|
LANGUAGE C VOLATILE;
|
||||||
|
@ -5,7 +5,7 @@ DROP ACCESS METHOD IF EXISTS hypercore;
|
|||||||
DROP FUNCTION IF EXISTS ts_hypercore_handler;
|
DROP FUNCTION IF EXISTS ts_hypercore_handler;
|
||||||
DROP FUNCTION IF EXISTS _timescaledb_debug.is_compressed_tid;
|
DROP FUNCTION IF EXISTS _timescaledb_debug.is_compressed_tid;
|
||||||
|
|
||||||
DROP FUNCTION IF EXISTS @extschema@.compress_chunk(uncompressed_chunk REGCLASS, if_not_compressed BOOLEAN, recompress BOOLEAN, compress_using NAME);
|
DROP FUNCTION IF EXISTS @extschema@.compress_chunk(uncompressed_chunk REGCLASS, if_not_compressed BOOLEAN, recompress BOOLEAN, hypercore_use_access_method BOOL);
|
||||||
|
|
||||||
CREATE FUNCTION @extschema@.compress_chunk(
|
CREATE FUNCTION @extschema@.compress_chunk(
|
||||||
uncompressed_chunk REGCLASS,
|
uncompressed_chunk REGCLASS,
|
||||||
@ -13,7 +13,7 @@ CREATE FUNCTION @extschema@.compress_chunk(
|
|||||||
recompress BOOLEAN = false
|
recompress BOOLEAN = false
|
||||||
) RETURNS REGCLASS AS '@MODULE_PATHNAME@', 'ts_compress_chunk' LANGUAGE C STRICT VOLATILE;
|
) RETURNS REGCLASS AS '@MODULE_PATHNAME@', 'ts_compress_chunk' LANGUAGE C STRICT VOLATILE;
|
||||||
|
|
||||||
DROP FUNCTION IF EXISTS @extschema@.add_compression_policy(hypertable REGCLASS, compress_after "any", if_not_exists BOOL, schedule_interval INTERVAL, initial_start TIMESTAMPTZ, timezone TEXT, compress_created_before INTERVAL, compress_using NAME);
|
DROP FUNCTION IF EXISTS @extschema@.add_compression_policy(hypertable REGCLASS, compress_after "any", if_not_exists BOOL, schedule_interval INTERVAL, initial_start TIMESTAMPTZ, timezone TEXT, compress_created_before INTERVAL, hypercore_use_access_method BOOL);
|
||||||
|
|
||||||
CREATE FUNCTION @extschema@.add_compression_policy(
|
CREATE FUNCTION @extschema@.add_compression_policy(
|
||||||
hypertable REGCLASS,
|
hypertable REGCLASS,
|
||||||
@ -28,7 +28,7 @@ RETURNS INTEGER
|
|||||||
AS '@MODULE_PATHNAME@', 'ts_policy_compression_add'
|
AS '@MODULE_PATHNAME@', 'ts_policy_compression_add'
|
||||||
LANGUAGE C VOLATILE;
|
LANGUAGE C VOLATILE;
|
||||||
|
|
||||||
DROP FUNCTION IF EXISTS timescaledb_experimental.add_policies(relation REGCLASS, if_not_exists BOOL, refresh_start_offset "any", refresh_end_offset "any", compress_after "any", drop_after "any", compress_using NAME);
|
DROP FUNCTION IF EXISTS timescaledb_experimental.add_policies(relation REGCLASS, if_not_exists BOOL, refresh_start_offset "any", refresh_end_offset "any", compress_after "any", drop_after "any", hypercore_use_access_method BOOL);
|
||||||
|
|
||||||
CREATE FUNCTION timescaledb_experimental.add_policies(
|
CREATE FUNCTION timescaledb_experimental.add_policies(
|
||||||
relation REGCLASS,
|
relation REGCLASS,
|
||||||
@ -41,6 +41,6 @@ RETURNS BOOL
|
|||||||
AS '@MODULE_PATHNAME@', 'ts_policies_add'
|
AS '@MODULE_PATHNAME@', 'ts_policies_add'
|
||||||
LANGUAGE C VOLATILE;
|
LANGUAGE C VOLATILE;
|
||||||
|
|
||||||
DROP PROCEDURE IF EXISTS _timescaledb_functions.policy_compression_execute(job_id INTEGER, htid INTEGER, lag ANYELEMENT, maxchunks INTEGER, verbose_log BOOLEAN, recompress_enabled BOOLEAN, use_creation_time BOOLEAN, amname NAME);
|
DROP PROCEDURE IF EXISTS _timescaledb_functions.policy_compression_execute(job_id INTEGER, htid INTEGER, lag ANYELEMENT, maxchunks INTEGER, verbose_log BOOLEAN, recompress_enabled BOOLEAN, use_creation_time BOOLEAN, useam BOOLEAN);
|
||||||
|
|
||||||
DROP PROCEDURE IF EXISTS _timescaledb_functions.policy_compression(job_id INTEGER, config JSONB);
|
DROP PROCEDURE IF EXISTS _timescaledb_functions.policy_compression(job_id INTEGER, config JSONB);
|
||||||
|
@ -6,6 +6,7 @@
|
|||||||
|
|
||||||
#include <postgres.h>
|
#include <postgres.h>
|
||||||
#include <access/xact.h>
|
#include <access/xact.h>
|
||||||
|
#include <fmgr.h>
|
||||||
#include <miscadmin.h>
|
#include <miscadmin.h>
|
||||||
#include <utils/builtins.h>
|
#include <utils/builtins.h>
|
||||||
|
|
||||||
@ -18,6 +19,7 @@
|
|||||||
#include "bgw_policy/job.h"
|
#include "bgw_policy/job.h"
|
||||||
#include "bgw_policy/job_api.h"
|
#include "bgw_policy/job_api.h"
|
||||||
#include "bgw_policy/policies_v2.h"
|
#include "bgw_policy/policies_v2.h"
|
||||||
|
#include "compression/api.h"
|
||||||
#include "errors.h"
|
#include "errors.h"
|
||||||
#include "guc.h"
|
#include "guc.h"
|
||||||
#include "hypertable.h"
|
#include "hypertable.h"
|
||||||
@ -158,7 +160,7 @@ policy_compression_add_internal(Oid user_rel_oid, Datum compress_after_datum,
|
|||||||
Interval *default_schedule_interval,
|
Interval *default_schedule_interval,
|
||||||
bool user_defined_schedule_interval, bool if_not_exists,
|
bool user_defined_schedule_interval, bool if_not_exists,
|
||||||
bool fixed_schedule, TimestampTz initial_start,
|
bool fixed_schedule, TimestampTz initial_start,
|
||||||
const char *timezone, const char *compress_using)
|
const char *timezone, UseAccessMethod use_access_method)
|
||||||
{
|
{
|
||||||
NameData application_name;
|
NameData application_name;
|
||||||
NameData proc_name, proc_schema, check_schema, check_name, owner;
|
NameData proc_name, proc_schema, check_schema, check_name, owner;
|
||||||
@ -282,12 +284,6 @@ policy_compression_add_internal(Oid user_rel_oid, Datum compress_after_datum,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (compress_using != NULL && strcmp(compress_using, "heap") != 0 &&
|
|
||||||
strcmp(compress_using, TS_HYPERCORE_TAM_NAME) != 0)
|
|
||||||
ereport(ERROR,
|
|
||||||
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
|
|
||||||
errmsg("can only compress using \"heap\" or \"%s\"", TS_HYPERCORE_TAM_NAME)));
|
|
||||||
|
|
||||||
/* insert a new job into jobs table */
|
/* insert a new job into jobs table */
|
||||||
namestrcpy(&application_name, "Compression Policy");
|
namestrcpy(&application_name, "Compression Policy");
|
||||||
namestrcpy(&proc_name, POLICY_COMPRESSION_PROC_NAME);
|
namestrcpy(&proc_name, POLICY_COMPRESSION_PROC_NAME);
|
||||||
@ -302,8 +298,10 @@ policy_compression_add_internal(Oid user_rel_oid, Datum compress_after_datum,
|
|||||||
ts_jsonb_add_int32(parse_state, POL_COMPRESSION_CONF_KEY_HYPERTABLE_ID, hypertable->fd.id);
|
ts_jsonb_add_int32(parse_state, POL_COMPRESSION_CONF_KEY_HYPERTABLE_ID, hypertable->fd.id);
|
||||||
validate_compress_after_type(dim, partitioning_type, compress_after_type);
|
validate_compress_after_type(dim, partitioning_type, compress_after_type);
|
||||||
|
|
||||||
if (NULL != compress_using)
|
if (use_access_method != USE_AM_NULL)
|
||||||
ts_jsonb_add_str(parse_state, POL_COMPRESSION_CONF_KEY_COMPRESS_USING, compress_using);
|
ts_jsonb_add_bool(parse_state,
|
||||||
|
POL_COMPRESSION_CONF_KEY_USE_ACCESS_METHOD,
|
||||||
|
use_access_method);
|
||||||
|
|
||||||
switch (compress_after_type)
|
switch (compress_after_type)
|
||||||
{
|
{
|
||||||
@ -406,7 +404,7 @@ policy_compression_add(PG_FUNCTION_ARGS)
|
|||||||
text *timezone = PG_ARGISNULL(5) ? NULL : PG_GETARG_TEXT_PP(5);
|
text *timezone = PG_ARGISNULL(5) ? NULL : PG_GETARG_TEXT_PP(5);
|
||||||
char *valid_timezone = NULL;
|
char *valid_timezone = NULL;
|
||||||
Interval *created_before = PG_GETARG_INTERVAL_P(6);
|
Interval *created_before = PG_GETARG_INTERVAL_P(6);
|
||||||
Name compress_using = PG_ARGISNULL(7) ? NULL : PG_GETARG_NAME(7);
|
UseAccessMethod use_access_method = PG_ARGISNULL(7) ? USE_AM_NULL : PG_GETARG_BOOL(7);
|
||||||
|
|
||||||
ts_feature_flag_check(FEATURE_POLICY);
|
ts_feature_flag_check(FEATURE_POLICY);
|
||||||
TS_PREVENT_FUNC_IF_READ_ONLY();
|
TS_PREVENT_FUNC_IF_READ_ONLY();
|
||||||
@ -440,7 +438,7 @@ policy_compression_add(PG_FUNCTION_ARGS)
|
|||||||
fixed_schedule,
|
fixed_schedule,
|
||||||
initial_start,
|
initial_start,
|
||||||
valid_timezone,
|
valid_timezone,
|
||||||
compress_using ? NameStr(*compress_using) : NULL);
|
use_access_method);
|
||||||
|
|
||||||
if (!TIMESTAMP_NOT_FINITE(initial_start))
|
if (!TIMESTAMP_NOT_FINITE(initial_start))
|
||||||
{
|
{
|
||||||
|
@ -6,6 +6,7 @@
|
|||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#include <postgres.h>
|
#include <postgres.h>
|
||||||
|
#include "compression/api.h"
|
||||||
#include <utils/jsonb.h>
|
#include <utils/jsonb.h>
|
||||||
#include <utils/timestamp.h>
|
#include <utils/timestamp.h>
|
||||||
|
|
||||||
@ -26,5 +27,5 @@ Datum policy_compression_add_internal(Oid user_rel_oid, Datum compress_after_dat
|
|||||||
Interval *default_schedule_interval,
|
Interval *default_schedule_interval,
|
||||||
bool user_defined_schedule_interval, bool if_not_exists,
|
bool user_defined_schedule_interval, bool if_not_exists,
|
||||||
bool fixed_schedule, TimestampTz initial_start,
|
bool fixed_schedule, TimestampTz initial_start,
|
||||||
const char *timezone, const char *compress_using);
|
const char *timezone, UseAccessMethod use_access_method);
|
||||||
bool policy_compression_remove_internal(Oid user_rel_oid, bool if_exists);
|
bool policy_compression_remove_internal(Oid user_rel_oid, bool if_exists);
|
||||||
|
@ -6,6 +6,7 @@
|
|||||||
|
|
||||||
#include <postgres.h>
|
#include <postgres.h>
|
||||||
#include <access/xact.h>
|
#include <access/xact.h>
|
||||||
|
#include <fmgr.h>
|
||||||
#include <miscadmin.h>
|
#include <miscadmin.h>
|
||||||
#include <parser/parse_coerce.h>
|
#include <parser/parse_coerce.h>
|
||||||
#include <utils/builtins.h>
|
#include <utils/builtins.h>
|
||||||
@ -233,7 +234,7 @@ validate_and_create_policies(policies_info all_policies, bool if_exists)
|
|||||||
false,
|
false,
|
||||||
DT_NOBEGIN,
|
DT_NOBEGIN,
|
||||||
NULL,
|
NULL,
|
||||||
all_policies.compress->compress_using);
|
all_policies.compress->use_access_method);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (all_policies.retention && all_policies.retention->create_policy)
|
if (all_policies.retention && all_policies.retention->create_policy)
|
||||||
@ -310,7 +311,7 @@ policies_add(PG_FUNCTION_ARGS)
|
|||||||
.create_policy = true,
|
.create_policy = true,
|
||||||
.compress_after = PG_GETARG_DATUM(4),
|
.compress_after = PG_GETARG_DATUM(4),
|
||||||
.compress_after_type = get_fn_expr_argtype(fcinfo->flinfo, 4),
|
.compress_after_type = get_fn_expr_argtype(fcinfo->flinfo, 4),
|
||||||
.compress_using = PG_ARGISNULL(6) ? NULL : NameStr(*PG_GETARG_NAME(6)),
|
.use_access_method = PG_ARGISNULL(6) ? USE_AM_NULL : PG_GETARG_BOOL(6),
|
||||||
};
|
};
|
||||||
comp = tmp;
|
comp = tmp;
|
||||||
all_policies.compress = ∁
|
all_policies.compress = ∁
|
||||||
|
@ -6,6 +6,7 @@
|
|||||||
#pragma once
|
#pragma once
|
||||||
|
|
||||||
#include <postgres.h>
|
#include <postgres.h>
|
||||||
|
#include "compression/api.h"
|
||||||
#include "dimension.h"
|
#include "dimension.h"
|
||||||
#include <bgw_policy/compression_api.h>
|
#include <bgw_policy/compression_api.h>
|
||||||
#include <bgw_policy/continuous_aggregate_api.h>
|
#include <bgw_policy/continuous_aggregate_api.h>
|
||||||
@ -25,7 +26,7 @@
|
|||||||
#define POL_COMPRESSION_CONF_KEY_COMPRESS_AFTER "compress_after"
|
#define POL_COMPRESSION_CONF_KEY_COMPRESS_AFTER "compress_after"
|
||||||
#define POL_COMPRESSION_CONF_KEY_MAXCHUNKS_TO_COMPRESS "maxchunks_to_compress"
|
#define POL_COMPRESSION_CONF_KEY_MAXCHUNKS_TO_COMPRESS "maxchunks_to_compress"
|
||||||
#define POL_COMPRESSION_CONF_KEY_COMPRESS_CREATED_BEFORE "compress_created_before"
|
#define POL_COMPRESSION_CONF_KEY_COMPRESS_CREATED_BEFORE "compress_created_before"
|
||||||
#define POL_COMPRESSION_CONF_KEY_COMPRESS_USING "compress_using"
|
#define POL_COMPRESSION_CONF_KEY_USE_ACCESS_METHOD "hypercore_use_access_method"
|
||||||
|
|
||||||
#define POLICY_RECOMPRESSION_PROC_NAME "policy_recompression"
|
#define POLICY_RECOMPRESSION_PROC_NAME "policy_recompression"
|
||||||
#define POL_RECOMPRESSION_CONF_KEY_RECOMPRESS_AFTER "recompress_after"
|
#define POL_RECOMPRESSION_CONF_KEY_RECOMPRESS_AFTER "recompress_after"
|
||||||
@ -89,7 +90,7 @@ typedef struct compression_policy
|
|||||||
Datum compress_after;
|
Datum compress_after;
|
||||||
Oid compress_after_type;
|
Oid compress_after_type;
|
||||||
bool create_policy;
|
bool create_policy;
|
||||||
const char *compress_using;
|
UseAccessMethod use_access_method;
|
||||||
} compression_policy;
|
} compression_policy;
|
||||||
|
|
||||||
typedef struct retention_policy
|
typedef struct retention_policy
|
||||||
|
@ -779,31 +779,6 @@ set_access_method(Oid relid, const char *amname)
|
|||||||
return relid;
|
return relid;
|
||||||
}
|
}
|
||||||
|
|
||||||
enum UseAccessMethod
|
|
||||||
{
|
|
||||||
USE_AM_FALSE,
|
|
||||||
USE_AM_TRUE,
|
|
||||||
USE_AM_NULL,
|
|
||||||
};
|
|
||||||
|
|
||||||
static enum UseAccessMethod
|
|
||||||
parse_use_access_method(const char *compress_using)
|
|
||||||
{
|
|
||||||
if (compress_using == NULL)
|
|
||||||
return USE_AM_NULL;
|
|
||||||
|
|
||||||
if (strcmp(compress_using, "heap") == 0)
|
|
||||||
return USE_AM_FALSE;
|
|
||||||
else if (strcmp(compress_using, TS_HYPERCORE_TAM_NAME) == 0)
|
|
||||||
return USE_AM_TRUE;
|
|
||||||
|
|
||||||
ereport(ERROR,
|
|
||||||
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
|
|
||||||
errmsg("can only compress using \"heap\" or \"%s\"", TS_HYPERCORE_TAM_NAME)));
|
|
||||||
|
|
||||||
pg_unreachable();
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* When using compress_chunk() with hypercore, there are three cases to
|
* When using compress_chunk() with hypercore, there are three cases to
|
||||||
* handle:
|
* handle:
|
||||||
@ -815,7 +790,7 @@ parse_use_access_method(const char *compress_using)
|
|||||||
* 3. Recompress a hypercore
|
* 3. Recompress a hypercore
|
||||||
*/
|
*/
|
||||||
static Oid
|
static Oid
|
||||||
compress_hypercore(Chunk *chunk, bool rel_is_hypercore, enum UseAccessMethod useam,
|
compress_hypercore(Chunk *chunk, bool rel_is_hypercore, UseAccessMethod useam,
|
||||||
bool if_not_compressed, bool recompress)
|
bool if_not_compressed, bool recompress)
|
||||||
{
|
{
|
||||||
Oid relid = InvalidOid;
|
Oid relid = InvalidOid;
|
||||||
@ -869,14 +844,13 @@ tsl_compress_chunk(PG_FUNCTION_ARGS)
|
|||||||
Oid uncompressed_chunk_id = PG_ARGISNULL(0) ? InvalidOid : PG_GETARG_OID(0);
|
Oid uncompressed_chunk_id = PG_ARGISNULL(0) ? InvalidOid : PG_GETARG_OID(0);
|
||||||
bool if_not_compressed = PG_ARGISNULL(1) ? true : PG_GETARG_BOOL(1);
|
bool if_not_compressed = PG_ARGISNULL(1) ? true : PG_GETARG_BOOL(1);
|
||||||
bool recompress = PG_ARGISNULL(2) ? false : PG_GETARG_BOOL(2);
|
bool recompress = PG_ARGISNULL(2) ? false : PG_GETARG_BOOL(2);
|
||||||
const char *compress_using = PG_ARGISNULL(3) ? NULL : NameStr(*PG_GETARG_NAME(3));
|
UseAccessMethod useam = PG_ARGISNULL(3) ? USE_AM_NULL : PG_GETARG_BOOL(3);
|
||||||
|
|
||||||
ts_feature_flag_check(FEATURE_HYPERTABLE_COMPRESSION);
|
ts_feature_flag_check(FEATURE_HYPERTABLE_COMPRESSION);
|
||||||
|
|
||||||
TS_PREVENT_FUNC_IF_READ_ONLY();
|
TS_PREVENT_FUNC_IF_READ_ONLY();
|
||||||
Chunk *chunk = ts_chunk_get_by_relid(uncompressed_chunk_id, true);
|
Chunk *chunk = ts_chunk_get_by_relid(uncompressed_chunk_id, true);
|
||||||
bool rel_is_hypercore = get_table_am_oid(TS_HYPERCORE_TAM_NAME, false) == chunk->amoid;
|
bool rel_is_hypercore = get_table_am_oid(TS_HYPERCORE_TAM_NAME, false) == chunk->amoid;
|
||||||
enum UseAccessMethod useam = parse_use_access_method(compress_using);
|
|
||||||
|
|
||||||
if (rel_is_hypercore || useam == USE_AM_TRUE)
|
if (rel_is_hypercore || useam == USE_AM_TRUE)
|
||||||
uncompressed_chunk_id =
|
uncompressed_chunk_id =
|
||||||
|
@ -11,6 +11,21 @@
|
|||||||
|
|
||||||
#include "chunk.h"
|
#include "chunk.h"
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Decide if the access method should be used for compression, or if it is
|
||||||
|
* undefined. Used for parameter values to PostgreSQL functions and is a
|
||||||
|
* nullable boolean.
|
||||||
|
*
|
||||||
|
* Using explicit values of TRUE = 1 and FALSE = 0 since this enum is cast to
|
||||||
|
* boolean value in the code.
|
||||||
|
*/
|
||||||
|
typedef enum UseAccessMethod
|
||||||
|
{
|
||||||
|
USE_AM_FALSE = 0,
|
||||||
|
USE_AM_TRUE = 1,
|
||||||
|
USE_AM_NULL = 2,
|
||||||
|
} UseAccessMethod;
|
||||||
|
|
||||||
extern Datum tsl_create_compressed_chunk(PG_FUNCTION_ARGS);
|
extern Datum tsl_create_compressed_chunk(PG_FUNCTION_ARGS);
|
||||||
extern Datum tsl_compress_chunk(PG_FUNCTION_ARGS);
|
extern Datum tsl_compress_chunk(PG_FUNCTION_ARGS);
|
||||||
extern Datum tsl_decompress_chunk(PG_FUNCTION_ARGS);
|
extern Datum tsl_decompress_chunk(PG_FUNCTION_ARGS);
|
||||||
|
@ -112,7 +112,7 @@ select cl.oid::regclass as rel, am.amname, inh.inhparent::regclass as relparent
|
|||||||
left join pg_inherits inh on (inh.inhrelid = cl.oid);
|
left join pg_inherits inh on (inh.inhrelid = cl.oid);
|
||||||
-- Compress the chunks and check that the counts are the same
|
-- Compress the chunks and check that the counts are the same
|
||||||
select location_id, count(*) into orig from :hypertable GROUP BY location_id;
|
select location_id, count(*) into orig from :hypertable GROUP BY location_id;
|
||||||
select compress_chunk(show_chunks(:'hypertable'), compress_using => 'hypercore');
|
select compress_chunk(show_chunks(:'hypertable'), hypercore_use_access_method => true);
|
||||||
compress_chunk
|
compress_chunk
|
||||||
----------------------------------------
|
----------------------------------------
|
||||||
_timescaledb_internal._hyper_1_1_chunk
|
_timescaledb_internal._hyper_1_1_chunk
|
||||||
|
@ -230,7 +230,7 @@ select * from amrels where rel=:'chunk'::regclass;
|
|||||||
|
|
||||||
-- Try same thing with compress_chunk()
|
-- Try same thing with compress_chunk()
|
||||||
alter table :chunk set access method heap;
|
alter table :chunk set access method heap;
|
||||||
select compress_chunk(:'chunk', compress_using => 'hypercore');
|
select compress_chunk(:'chunk', hypercore_use_access_method => true);
|
||||||
compress_chunk
|
compress_chunk
|
||||||
-----------------------------------------
|
-----------------------------------------
|
||||||
_timescaledb_internal._hyper_4_13_chunk
|
_timescaledb_internal._hyper_4_13_chunk
|
||||||
@ -250,7 +250,7 @@ select relname, amname
|
|||||||
alter table :chunk set access method hypercore;
|
alter table :chunk set access method hypercore;
|
||||||
-- Test recompression after changing compression settings
|
-- Test recompression after changing compression settings
|
||||||
alter table test3 set (timescaledb.compress_segmentby='device');
|
alter table test3 set (timescaledb.compress_segmentby='device');
|
||||||
select compress_chunk(:'chunk', compress_using => 'hypercore', recompress => true);
|
select compress_chunk(:'chunk', hypercore_use_access_method => true, recompress => true);
|
||||||
compress_chunk
|
compress_chunk
|
||||||
-----------------------------------------
|
-----------------------------------------
|
||||||
_timescaledb_internal._hyper_4_13_chunk
|
_timescaledb_internal._hyper_4_13_chunk
|
||||||
@ -398,7 +398,7 @@ from compressed_rel_size_stats;
|
|||||||
|
|
||||||
-- Create hypercores again and check that compression size stats are
|
-- Create hypercores again and check that compression size stats are
|
||||||
-- updated showing compressed data
|
-- updated showing compressed data
|
||||||
select compress_chunk(ch, compress_using => 'hypercore')
|
select compress_chunk(ch, hypercore_use_access_method => true)
|
||||||
from show_chunks('test2') ch;
|
from show_chunks('test2') ch;
|
||||||
compress_chunk
|
compress_chunk
|
||||||
-----------------------------------------
|
-----------------------------------------
|
||||||
@ -410,7 +410,7 @@ from show_chunks('test2') ch;
|
|||||||
_timescaledb_internal._hyper_1_11_chunk
|
_timescaledb_internal._hyper_1_11_chunk
|
||||||
(6 rows)
|
(6 rows)
|
||||||
|
|
||||||
select compress_chunk(ch, compress_using => 'hypercore')
|
select compress_chunk(ch, hypercore_use_access_method => true)
|
||||||
from show_chunks('test3') ch;
|
from show_chunks('test3') ch;
|
||||||
compress_chunk
|
compress_chunk
|
||||||
-----------------------------------------
|
-----------------------------------------
|
||||||
@ -457,8 +457,8 @@ from show_chunks('test2') ch;
|
|||||||
_timescaledb_internal._hyper_1_11_chunk
|
_timescaledb_internal._hyper_1_11_chunk
|
||||||
(6 rows)
|
(6 rows)
|
||||||
|
|
||||||
-- Using compress_using => NULL should be the same as "heap"
|
--- Using hypercore_use_access_method => NULL should be the same as "heap"
|
||||||
select compress_chunk(decompress_chunk(ch), compress_using => NULL)
|
select compress_chunk(decompress_chunk(ch), hypercore_use_access_method => NULL)
|
||||||
from show_chunks('test3') ch;
|
from show_chunks('test3') ch;
|
||||||
compress_chunk
|
compress_chunk
|
||||||
-----------------------------------------
|
-----------------------------------------
|
||||||
@ -515,11 +515,11 @@ set client_min_messages=DEBUG1;
|
|||||||
with chunks as (
|
with chunks as (
|
||||||
select ch from show_chunks('test2') ch offset 1
|
select ch from show_chunks('test2') ch offset 1
|
||||||
)
|
)
|
||||||
select compress_chunk(ch, compress_using => 'hypercore') from chunks;
|
select compress_chunk(ch, hypercore_use_access_method => true) from chunks;
|
||||||
LOG: statement: with chunks as (
|
LOG: statement: with chunks as (
|
||||||
select ch from show_chunks('test2') ch offset 1
|
select ch from show_chunks('test2') ch offset 1
|
||||||
)
|
)
|
||||||
select compress_chunk(ch, compress_using => 'hypercore') from chunks;
|
select compress_chunk(ch, hypercore_use_access_method => true) from chunks;
|
||||||
DEBUG: migrating table "_hyper_1_3_chunk" to hypercore
|
DEBUG: migrating table "_hyper_1_3_chunk" to hypercore
|
||||||
DEBUG: building index "_hyper_1_3_chunk_test2_device_id_created_at_idx" on table "_hyper_1_3_chunk" serially
|
DEBUG: building index "_hyper_1_3_chunk_test2_device_id_created_at_idx" on table "_hyper_1_3_chunk" serially
|
||||||
DEBUG: index "_hyper_1_3_chunk_test2_device_id_created_at_idx" can safely use deduplication
|
DEBUG: index "_hyper_1_3_chunk_test2_device_id_created_at_idx" can safely use deduplication
|
||||||
@ -643,18 +643,13 @@ commit;
|
|||||||
-- Trying to convert a hypercore to a hypercore should be an error
|
-- Trying to convert a hypercore to a hypercore should be an error
|
||||||
-- if if_not_compressed is false and the hypercore is fully
|
-- if if_not_compressed is false and the hypercore is fully
|
||||||
-- compressed.
|
-- compressed.
|
||||||
select compress_chunk(ch, compress_using => 'hypercore', if_not_compressed => false)
|
select compress_chunk(ch, hypercore_use_access_method => true, if_not_compressed => false)
|
||||||
from show_chunks('test2') ch;
|
from show_chunks('test2') ch;
|
||||||
ERROR: chunk "_hyper_1_1_chunk" is already compressed
|
ERROR: chunk "_hyper_1_1_chunk" is already compressed
|
||||||
-- Compressing using something different than "hypercore" or "heap"
|
|
||||||
-- should not be allowed
|
|
||||||
select compress_chunk(ch, compress_using => 'non_existing_am')
|
|
||||||
from show_chunks('test2') ch;
|
|
||||||
ERROR: can only compress using "heap" or "hypercore"
|
|
||||||
\set ON_ERROR_STOP 1
|
\set ON_ERROR_STOP 1
|
||||||
-- Compressing from hypercore with compress_using=>heap should lead
|
-- Compressing from hypercore not using access method should lead to
|
||||||
-- to recompression of hypercore with a notice.
|
-- recompression of hypercore with a notice.
|
||||||
select compress_chunk(ch, compress_using => 'heap')
|
select compress_chunk(ch, hypercore_use_access_method => false)
|
||||||
from show_chunks('test2') ch;
|
from show_chunks('test2') ch;
|
||||||
NOTICE: cannot compress hypercore "_hyper_1_1_chunk" using heap, recompressing instead
|
NOTICE: cannot compress hypercore "_hyper_1_1_chunk" using heap, recompressing instead
|
||||||
NOTICE: chunk "_hyper_1_1_chunk" is already compressed
|
NOTICE: chunk "_hyper_1_1_chunk" is already compressed
|
||||||
@ -678,8 +673,8 @@ NOTICE: chunk "_hyper_1_11_chunk" is already compressed
|
|||||||
_timescaledb_internal._hyper_1_11_chunk
|
_timescaledb_internal._hyper_1_11_chunk
|
||||||
(6 rows)
|
(6 rows)
|
||||||
|
|
||||||
-- Compressing a hypercore without specifying compress_using should
|
-- Compressing a hypercore should by default lead to
|
||||||
-- lead to recompression. First check that :chunk is a hypercore.
|
-- recompression. First check that :chunk is a hypercore.
|
||||||
select ch as chunk from show_chunks('test2') ch limit 1 \gset
|
select ch as chunk from show_chunks('test2') ch limit 1 \gset
|
||||||
select * from compressed_rel_size_stats
|
select * from compressed_rel_size_stats
|
||||||
where amname = 'hypercore' and rel = :'chunk'::regclass;
|
where amname = 'hypercore' and rel = :'chunk'::regclass;
|
||||||
@ -707,8 +702,8 @@ select ctid from :chunk where created_at = '2022-06-01 10:01' and device_id = 6;
|
|||||||
(2147484675,14)
|
(2147484675,14)
|
||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
-- Compressing a hypercore with compress_using=>hypercore should
|
-- Compressing a hypercore using the access method should also lead to
|
||||||
-- also lead to recompression
|
-- recompression
|
||||||
insert into :chunk values ('2022-06-01 11:02', 7, 7, 7.0, 7.0);
|
insert into :chunk values ('2022-06-01 11:02', 7, 7, 7.0, 7.0);
|
||||||
select ctid from :chunk where created_at = '2022-06-01 11:02' and device_id = 7;
|
select ctid from :chunk where created_at = '2022-06-01 11:02' and device_id = 7;
|
||||||
ctid
|
ctid
|
||||||
@ -716,7 +711,7 @@ select ctid from :chunk where created_at = '2022-06-01 11:02' and device_id = 7;
|
|||||||
(0,3)
|
(0,3)
|
||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
select compress_chunk(:'chunk', compress_using => 'hypercore');
|
select compress_chunk(:'chunk', hypercore_use_access_method => true);
|
||||||
compress_chunk
|
compress_chunk
|
||||||
----------------------------------------
|
----------------------------------------
|
||||||
_timescaledb_internal._hyper_1_1_chunk
|
_timescaledb_internal._hyper_1_1_chunk
|
||||||
@ -748,13 +743,13 @@ select decompress_chunk(rel) ch
|
|||||||
-- cleaned up between two or more commands in same transaction.
|
-- cleaned up between two or more commands in same transaction.
|
||||||
select ch as chunk2 from show_chunks('test2') ch offset 1 limit 1 \gset
|
select ch as chunk2 from show_chunks('test2') ch offset 1 limit 1 \gset
|
||||||
start transaction;
|
start transaction;
|
||||||
select compress_chunk(:'chunk', compress_using => 'hypercore');
|
select compress_chunk(:'chunk', hypercore_use_access_method => true);
|
||||||
compress_chunk
|
compress_chunk
|
||||||
----------------------------------------
|
----------------------------------------
|
||||||
_timescaledb_internal._hyper_1_1_chunk
|
_timescaledb_internal._hyper_1_1_chunk
|
||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
select compress_chunk(:'chunk2', compress_using => 'hypercore');
|
select compress_chunk(:'chunk2', hypercore_use_access_method => true);
|
||||||
compress_chunk
|
compress_chunk
|
||||||
----------------------------------------
|
----------------------------------------
|
||||||
_timescaledb_internal._hyper_1_3_chunk
|
_timescaledb_internal._hyper_1_3_chunk
|
||||||
@ -770,9 +765,9 @@ order by rel;
|
|||||||
_timescaledb_internal._hyper_1_3_chunk | hypercore | test2 | 2016 | 10 | 10
|
_timescaledb_internal._hyper_1_3_chunk | hypercore | test2 | 2016 | 10 | 10
|
||||||
(2 rows)
|
(2 rows)
|
||||||
|
|
||||||
-- Test that we can compress old way using compress_using=>heap
|
-- Test that we can compress old way by not using the access method
|
||||||
select ch as chunk3 from show_chunks('test2') ch offset 2 limit 1 \gset
|
select ch as chunk3 from show_chunks('test2') ch offset 2 limit 1 \gset
|
||||||
select compress_chunk(:'chunk3', compress_using => 'heap');
|
select compress_chunk(:'chunk3', hypercore_use_access_method => false);
|
||||||
compress_chunk
|
compress_chunk
|
||||||
----------------------------------------
|
----------------------------------------
|
||||||
_timescaledb_internal._hyper_1_5_chunk
|
_timescaledb_internal._hyper_1_5_chunk
|
||||||
@ -787,13 +782,13 @@ order by rel;
|
|||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
\set ON_ERROR_STOP 0
|
\set ON_ERROR_STOP 0
|
||||||
-- If we call compress_chunk with compress_using=>'heap' on a
|
-- If we call compress_chunk using the table access method on a
|
||||||
-- heap-compressed chunk, it should lead to an error if
|
-- heap-compressed chunk, it should lead to an error if
|
||||||
-- if_not_compressed is false. The commands below are all equivalent
|
-- if_not_compressed is false. The commands below are all equivalent
|
||||||
-- in this case.
|
-- in this case.
|
||||||
select compress_chunk(:'chunk3', compress_using => 'heap', if_not_compressed=>false);
|
select compress_chunk(:'chunk3', hypercore_use_access_method => false, if_not_compressed=>false);
|
||||||
ERROR: chunk "_hyper_1_5_chunk" is already compressed
|
ERROR: chunk "_hyper_1_5_chunk" is already compressed
|
||||||
select compress_chunk(:'chunk3', compress_using => NULL, if_not_compressed=>false);
|
select compress_chunk(:'chunk3', hypercore_use_access_method => NULL, if_not_compressed=>false);
|
||||||
ERROR: chunk "_hyper_1_5_chunk" is already compressed
|
ERROR: chunk "_hyper_1_5_chunk" is already compressed
|
||||||
select compress_chunk(:'chunk3', if_not_compressed=>false);
|
select compress_chunk(:'chunk3', if_not_compressed=>false);
|
||||||
ERROR: chunk "_hyper_1_5_chunk" is already compressed
|
ERROR: chunk "_hyper_1_5_chunk" is already compressed
|
||||||
@ -801,14 +796,14 @@ ERROR: chunk "_hyper_1_5_chunk" is already compressed
|
|||||||
-- For a heap-compressed chunk, these should all be equivalent and
|
-- For a heap-compressed chunk, these should all be equivalent and
|
||||||
-- should not do anything when there is nothing to recompress. A
|
-- should not do anything when there is nothing to recompress. A
|
||||||
-- notice should be raised instead of an error.
|
-- notice should be raised instead of an error.
|
||||||
select compress_chunk(:'chunk3', compress_using => 'heap');
|
select compress_chunk(:'chunk3', hypercore_use_access_method => false);
|
||||||
NOTICE: chunk "_hyper_1_5_chunk" is already compressed
|
NOTICE: chunk "_hyper_1_5_chunk" is already compressed
|
||||||
compress_chunk
|
compress_chunk
|
||||||
----------------------------------------
|
----------------------------------------
|
||||||
_timescaledb_internal._hyper_1_5_chunk
|
_timescaledb_internal._hyper_1_5_chunk
|
||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
select compress_chunk(:'chunk3', compress_using => NULL);
|
select compress_chunk(:'chunk3', hypercore_use_access_method => NULL);
|
||||||
NOTICE: chunk "_hyper_1_5_chunk" is already compressed
|
NOTICE: chunk "_hyper_1_5_chunk" is already compressed
|
||||||
compress_chunk
|
compress_chunk
|
||||||
----------------------------------------
|
----------------------------------------
|
||||||
@ -832,7 +827,7 @@ select * from only :chunk3;
|
|||||||
Wed Jun 15 16:00:00 2022 PDT | 8 | 8 | 8 | 8
|
Wed Jun 15 16:00:00 2022 PDT | 8 | 8 | 8 | 8
|
||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
select compress_chunk(:'chunk3', compress_using => 'heap');
|
select compress_chunk(:'chunk3', hypercore_use_access_method => false);
|
||||||
compress_chunk
|
compress_chunk
|
||||||
----------------------------------------
|
----------------------------------------
|
||||||
_timescaledb_internal._hyper_1_5_chunk
|
_timescaledb_internal._hyper_1_5_chunk
|
||||||
@ -892,7 +887,7 @@ insert into rides values
|
|||||||
(6,'2016-01-01 00:00:02','2016-01-01 00:11:55',1,1.20,-73.979423522949219,40.744613647460938,1,-73.992034912109375,40.753944396972656,2,9,0.5,0.5,0,0,0.3,10.3),
|
(6,'2016-01-01 00:00:02','2016-01-01 00:11:55',1,1.20,-73.979423522949219,40.744613647460938,1,-73.992034912109375,40.753944396972656,2,9,0.5,0.5,0,0,0.3,10.3),
|
||||||
(356,'2016-01-01 00:00:01','2016-01-01 00:11:55',1,1.20,-73.979423522949219,40.744613647460938,1,-73.992034912109375,40.753944396972656,2,9,0.5,0.5,0,0,0.3,10.3);
|
(356,'2016-01-01 00:00:01','2016-01-01 00:11:55',1,1.20,-73.979423522949219,40.744613647460938,1,-73.992034912109375,40.753944396972656,2,9,0.5,0.5,0,0,0.3,10.3);
|
||||||
-- Check that it is possible to compress
|
-- Check that it is possible to compress
|
||||||
select compress_chunk(ch, compress_using=>'hypercore') from show_chunks('rides') ch;
|
select compress_chunk(ch, hypercore_use_access_method => true) from show_chunks('rides') ch;
|
||||||
compress_chunk
|
compress_chunk
|
||||||
-----------------------------------------
|
-----------------------------------------
|
||||||
_timescaledb_internal._hyper_8_44_chunk
|
_timescaledb_internal._hyper_8_44_chunk
|
||||||
|
@ -139,7 +139,7 @@ begin
|
|||||||
end;
|
end;
|
||||||
$$
|
$$
|
||||||
language plpgsql;
|
language plpgsql;
|
||||||
select compress_chunk(show_chunks(:'hypertable'), compress_using => 'hypercore');
|
select compress_chunk(show_chunks(:'hypertable'), hypercore_use_access_method => true);
|
||||||
compress_chunk
|
compress_chunk
|
||||||
----------------------------------------
|
----------------------------------------
|
||||||
_timescaledb_internal._hyper_1_1_chunk
|
_timescaledb_internal._hyper_1_1_chunk
|
||||||
@ -236,7 +236,7 @@ NOTICE: adding not-null constraint to column "time"
|
|||||||
|
|
||||||
alter table backward_cursor set (timescaledb.compress, timescaledb.compress_segmentby='location_id', timescaledb.compress_orderby='time asc');
|
alter table backward_cursor set (timescaledb.compress, timescaledb.compress_segmentby='location_id', timescaledb.compress_orderby='time asc');
|
||||||
insert into backward_cursor values ('2024-01-01 01:00', 1, 1.0), ('2024-01-01 02:00', 1, 2.0), ('2024-01-01 03:00', 2, 3.0), ('2024-01-01 04:00', 2, 4.0);
|
insert into backward_cursor values ('2024-01-01 01:00', 1, 1.0), ('2024-01-01 02:00', 1, 2.0), ('2024-01-01 03:00', 2, 3.0), ('2024-01-01 04:00', 2, 4.0);
|
||||||
select compress_chunk(ch, compress_using=>'hypercore') from show_chunks('backward_cursor') ch;
|
select compress_chunk(ch, hypercore_use_access_method => true) from show_chunks('backward_cursor') ch;
|
||||||
compress_chunk
|
compress_chunk
|
||||||
-----------------------------------------
|
-----------------------------------------
|
||||||
_timescaledb_internal._hyper_3_13_chunk
|
_timescaledb_internal._hyper_3_13_chunk
|
||||||
|
@ -31,7 +31,7 @@ alter table readings
|
|||||||
insert into readings (time, location, device, temp, humidity, jdata)
|
insert into readings (time, location, device, temp, humidity, jdata)
|
||||||
select t, ceil(random()*10), ceil(random()*30), random()*40, random()*100, '{"a":1,"b":2}'::jsonb
|
select t, ceil(random()*10), ceil(random()*30), random()*40, random()*100, '{"a":1,"b":2}'::jsonb
|
||||||
from generate_series('2022-06-01'::timestamptz, '2022-06-04'::timestamptz, '5m') t;
|
from generate_series('2022-06-01'::timestamptz, '2022-06-04'::timestamptz, '5m') t;
|
||||||
select compress_chunk(show_chunks('readings'), compress_using => 'hypercore');
|
select compress_chunk(show_chunks('readings'), hypercore_use_access_method => true);
|
||||||
compress_chunk
|
compress_chunk
|
||||||
----------------------------------------
|
----------------------------------------
|
||||||
_timescaledb_internal._hyper_1_1_chunk
|
_timescaledb_internal._hyper_1_1_chunk
|
||||||
|
@ -359,7 +359,7 @@ select created_at, location_id, temp from :chunk2 where location_id=1 and temp=2
|
|||||||
Wed Jun 01 17:00:00 2022 PDT | 1 | 2
|
Wed Jun 01 17:00:00 2022 PDT | 1 | 2
|
||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
select compress_chunk(show_chunks(:'hypertable'), compress_using => 'hypercore');
|
select compress_chunk(show_chunks(:'hypertable'), hypercore_use_access_method => true);
|
||||||
compress_chunk
|
compress_chunk
|
||||||
----------------------------------------
|
----------------------------------------
|
||||||
_timescaledb_internal._hyper_1_1_chunk
|
_timescaledb_internal._hyper_1_1_chunk
|
||||||
@ -992,7 +992,7 @@ select * from only_nulls_null;
|
|||||||
(4 rows)
|
(4 rows)
|
||||||
|
|
||||||
-- Convert all chunks to hypercore and run same queries
|
-- Convert all chunks to hypercore and run same queries
|
||||||
select compress_chunk(ch, compress_using=>'hypercore') from show_chunks('nullvalues') ch;
|
select compress_chunk(ch, hypercore_use_access_method => true) from show_chunks('nullvalues') ch;
|
||||||
compress_chunk
|
compress_chunk
|
||||||
-----------------------------------------
|
-----------------------------------------
|
||||||
_timescaledb_internal._hyper_5_15_chunk
|
_timescaledb_internal._hyper_5_15_chunk
|
||||||
|
@ -222,7 +222,7 @@ select created_at, location_id, temp from :chunk2 where location_id=1 and temp=2
|
|||||||
Wed Jun 01 17:00:00 2022 PDT | 1 | 2
|
Wed Jun 01 17:00:00 2022 PDT | 1 | 2
|
||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
select compress_chunk(show_chunks(:'hypertable'), compress_using => 'hypercore');
|
select compress_chunk(show_chunks(:'hypertable'), hypercore_use_access_method => true);
|
||||||
compress_chunk
|
compress_chunk
|
||||||
----------------------------------------
|
----------------------------------------
|
||||||
_timescaledb_internal._hyper_1_1_chunk
|
_timescaledb_internal._hyper_1_1_chunk
|
||||||
|
@ -107,7 +107,7 @@ select format('%I.%I', chunk_schema, chunk_name)::regclass as chunk2
|
|||||||
limit 1 offset 1 \gset
|
limit 1 offset 1 \gset
|
||||||
-- Compress the chunks and check that the counts are the same
|
-- Compress the chunks and check that the counts are the same
|
||||||
select location_id, count(*) into orig from :hypertable GROUP BY location_id;
|
select location_id, count(*) into orig from :hypertable GROUP BY location_id;
|
||||||
select compress_chunk(show_chunks(:'hypertable'), compress_using => 'hypercore');
|
select compress_chunk(show_chunks(:'hypertable'), hypercore_use_access_method => true);
|
||||||
compress_chunk
|
compress_chunk
|
||||||
----------------------------------------
|
----------------------------------------
|
||||||
_timescaledb_internal._hyper_1_1_chunk
|
_timescaledb_internal._hyper_1_1_chunk
|
||||||
|
@ -141,7 +141,7 @@ alter table the_hypercore set (
|
|||||||
timescaledb.compress_segmentby = '',
|
timescaledb.compress_segmentby = '',
|
||||||
timescaledb.compress_orderby = 'updated_at desc'
|
timescaledb.compress_orderby = 'updated_at desc'
|
||||||
);
|
);
|
||||||
select compress_chunk(show_chunks('the_hypercore'), compress_using => 'hypercore');
|
select compress_chunk(show_chunks('the_hypercore'), hypercore_use_access_method => true);
|
||||||
compress_chunk
|
compress_chunk
|
||||||
----------------------------------------
|
----------------------------------------
|
||||||
_timescaledb_internal._hyper_3_7_chunk
|
_timescaledb_internal._hyper_3_7_chunk
|
||||||
|
@ -111,7 +111,7 @@ set enable_mergejoin to false;
|
|||||||
set enable_hashjoin to false;
|
set enable_hashjoin to false;
|
||||||
-- There are already tests to merge into uncompressed tables, so just
|
-- There are already tests to merge into uncompressed tables, so just
|
||||||
-- compress all chunks using Hypercore.
|
-- compress all chunks using Hypercore.
|
||||||
select compress_chunk(show_chunks(:'hypertable'), compress_using => 'hypercore');
|
select compress_chunk(show_chunks(:'hypertable'), hypercore_use_access_method => true);
|
||||||
compress_chunk
|
compress_chunk
|
||||||
----------------------------------------
|
----------------------------------------
|
||||||
_timescaledb_internal._hyper_1_1_chunk
|
_timescaledb_internal._hyper_1_1_chunk
|
||||||
@ -220,7 +220,7 @@ humidity | 1
|
|||||||
\x off
|
\x off
|
||||||
-- Recompress all and try to insert the same rows again. This there
|
-- Recompress all and try to insert the same rows again. This there
|
||||||
-- should be no rows inserted.
|
-- should be no rows inserted.
|
||||||
select compress_chunk(show_chunks(:'hypertable'), compress_using => 'hypercore');
|
select compress_chunk(show_chunks(:'hypertable'), hypercore_use_access_method => true);
|
||||||
NOTICE: chunk "_hyper_1_2_chunk" is already compressed
|
NOTICE: chunk "_hyper_1_2_chunk" is already compressed
|
||||||
NOTICE: chunk "_hyper_1_3_chunk" is already compressed
|
NOTICE: chunk "_hyper_1_3_chunk" is already compressed
|
||||||
NOTICE: chunk "_hyper_1_4_chunk" is already compressed
|
NOTICE: chunk "_hyper_1_4_chunk" is already compressed
|
||||||
|
@ -156,7 +156,7 @@ select device_id, count(*) into orig_chunk from :chunk1 group by device_id;
|
|||||||
-----------------------
|
-----------------------
|
||||||
-- Enable hypercore --
|
-- Enable hypercore --
|
||||||
-----------------------
|
-----------------------
|
||||||
select compress_chunk(show_chunks(:'hypertable'), compress_using => 'hypercore');
|
select compress_chunk(show_chunks(:'hypertable'), hypercore_use_access_method => true);
|
||||||
compress_chunk
|
compress_chunk
|
||||||
----------------------------------------
|
----------------------------------------
|
||||||
_timescaledb_internal._hyper_1_1_chunk
|
_timescaledb_internal._hyper_1_1_chunk
|
||||||
|
@ -27,16 +27,9 @@ from timescaledb_information.chunks ch
|
|||||||
join pg_class cl on (format('%I.%I', ch.chunk_schema, ch.chunk_name)::regclass = cl.oid)
|
join pg_class cl on (format('%I.%I', ch.chunk_schema, ch.chunk_name)::regclass = cl.oid)
|
||||||
join pg_am am on (am.oid = cl.relam);
|
join pg_am am on (am.oid = cl.relam);
|
||||||
set timezone to pst8pdt;
|
set timezone to pst8pdt;
|
||||||
\set ON_ERROR_STOP 0
|
-- Check that hypercore_use_access_method is not part of the policy if
|
||||||
-- Test invalid compress_using option
|
-- not set. Use a large compress_after to ensure the policy doesn't do
|
||||||
select add_compression_policy('readings',
|
-- anything at this time.
|
||||||
compress_after => '1000 years'::interval,
|
|
||||||
compress_using => 'foo');
|
|
||||||
ERROR: can only compress using "heap" or "hypercore"
|
|
||||||
\set ON_ERROR_STOP 1
|
|
||||||
-- Check that compress_using is not part of the policy if not set. Use
|
|
||||||
-- a large compress_after to ensure the policy doesn't do anything at
|
|
||||||
-- this time.
|
|
||||||
select add_compression_policy('readings', compress_after => '1000 years'::interval)
|
select add_compression_policy('readings', compress_after => '1000 years'::interval)
|
||||||
as compression_job \gset
|
as compression_job \gset
|
||||||
select config from timescaledb_information.jobs where job_id = :compression_job;
|
select config from timescaledb_information.jobs where job_id = :compression_job;
|
||||||
@ -51,10 +44,11 @@ select remove_compression_policy('readings');
|
|||||||
t
|
t
|
||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
-- Check that compress_using is not part of the policy if set to NULL
|
-- Check that hypercore_use_access_method is not part of the policy if
|
||||||
|
-- set to NULL
|
||||||
select add_compression_policy('readings',
|
select add_compression_policy('readings',
|
||||||
compress_after => '1000 years'::interval,
|
compress_after => '1000 years'::interval,
|
||||||
compress_using => NULL)
|
hypercore_use_access_method => NULL)
|
||||||
as compression_job \gset
|
as compression_job \gset
|
||||||
select config from timescaledb_information.jobs where job_id = :compression_job;
|
select config from timescaledb_information.jobs where job_id = :compression_job;
|
||||||
config
|
config
|
||||||
@ -77,15 +71,16 @@ order by chunk;
|
|||||||
readings | _hyper_1_1_chunk | heap | f
|
readings | _hyper_1_1_chunk | heap | f
|
||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
-- Check that compress_using is part of the policy config when non-NULL
|
-- Check that hypercore_use_access_method is part of the policy config
|
||||||
|
-- when enabled.
|
||||||
select add_compression_policy('readings',
|
select add_compression_policy('readings',
|
||||||
compress_after => '1 day'::interval,
|
compress_after => '1 day'::interval,
|
||||||
compress_using => 'hypercore')
|
hypercore_use_access_method => true)
|
||||||
as compression_job \gset
|
as compression_job \gset
|
||||||
select config from timescaledb_information.jobs where job_id = :compression_job;
|
select config from timescaledb_information.jobs where job_id = :compression_job;
|
||||||
config
|
config
|
||||||
----------------------------------------------------------------------------------
|
----------------------------------------------------------------------------------------
|
||||||
{"hypertable_id": 1, "compress_after": "@ 1 day", "compress_using": "hypercore"}
|
{"hypertable_id": 1, "compress_after": "@ 1 day", "hypercore_use_access_method": true}
|
||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
-- Make sure the policy runs
|
-- Make sure the policy runs
|
||||||
@ -120,7 +115,7 @@ where time = '2022-06-01 10:14' and device = 1;
|
|||||||
-- recompress hypercores.
|
-- recompress hypercores.
|
||||||
select add_compression_policy('readings',
|
select add_compression_policy('readings',
|
||||||
compress_after => '1 day'::interval,
|
compress_after => '1 day'::interval,
|
||||||
compress_using => 'heap')
|
hypercore_use_access_method => false)
|
||||||
as compression_job \gset
|
as compression_job \gset
|
||||||
-- Run the policy job again to recompress
|
-- Run the policy job again to recompress
|
||||||
call run_job(:'compression_job');
|
call run_job(:'compression_job');
|
||||||
@ -150,7 +145,7 @@ select * from readings where time = '2022-06-01 10:14' and device = 1;
|
|||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
-- Test recompression again with a policy that doesn't specify
|
-- Test recompression again with a policy that doesn't specify
|
||||||
-- compress_using
|
-- hypercore_use_access_method
|
||||||
select remove_compression_policy('readings');
|
select remove_compression_policy('readings');
|
||||||
remove_compression_policy
|
remove_compression_policy
|
||||||
---------------------------
|
---------------------------
|
||||||
@ -203,7 +198,7 @@ select timescaledb_experimental.add_policies('daily',
|
|||||||
refresh_start_offset => '8 days'::interval,
|
refresh_start_offset => '8 days'::interval,
|
||||||
refresh_end_offset => '1 day'::interval,
|
refresh_end_offset => '1 day'::interval,
|
||||||
compress_after => '9 days'::interval,
|
compress_after => '9 days'::interval,
|
||||||
compress_using => 'hypercore');
|
hypercore_use_access_method => true);
|
||||||
add_policies
|
add_policies
|
||||||
--------------
|
--------------
|
||||||
t
|
t
|
||||||
|
@ -97,7 +97,7 @@ from generate_series('2022-06-01'::timestamp, '2022-06-10', '1 minute') t;
|
|||||||
-- table and a heap table produce the same result.
|
-- table and a heap table produce the same result.
|
||||||
create table :saved_table as select * from :the_table;
|
create table :saved_table as select * from :the_table;
|
||||||
-- Compress the rows in the hypercore.
|
-- Compress the rows in the hypercore.
|
||||||
select compress_chunk(show_chunks(:'the_table'), compress_using => 'hypercore');
|
select compress_chunk(show_chunks(:'the_table'), hypercore_use_access_method => true);
|
||||||
compress_chunk
|
compress_chunk
|
||||||
----------------------------------------
|
----------------------------------------
|
||||||
_timescaledb_internal._hyper_1_1_chunk
|
_timescaledb_internal._hyper_1_1_chunk
|
||||||
@ -169,7 +169,7 @@ from generate_series('2022-06-01'::timestamp, '2022-06-10', '1 minute') t;
|
|||||||
-- table and a heap table produce the same result.
|
-- table and a heap table produce the same result.
|
||||||
create table :saved_table as select * from :the_table;
|
create table :saved_table as select * from :the_table;
|
||||||
-- Compress the rows in the hypercore.
|
-- Compress the rows in the hypercore.
|
||||||
select compress_chunk(show_chunks(:'the_table'), compress_using => 'hypercore');
|
select compress_chunk(show_chunks(:'the_table'), hypercore_use_access_method => true);
|
||||||
compress_chunk
|
compress_chunk
|
||||||
----------------------------------------
|
----------------------------------------
|
||||||
_timescaledb_internal._hyper_3_7_chunk
|
_timescaledb_internal._hyper_3_7_chunk
|
||||||
@ -240,7 +240,7 @@ from generate_series('2022-06-01'::timestamp, '2022-06-10', '1 minute') t;
|
|||||||
-- table and a heap table produce the same result.
|
-- table and a heap table produce the same result.
|
||||||
create table :saved_table as select * from :the_table;
|
create table :saved_table as select * from :the_table;
|
||||||
-- Compress the rows in the hypercore.
|
-- Compress the rows in the hypercore.
|
||||||
select compress_chunk(show_chunks(:'the_table'), compress_using => 'hypercore');
|
select compress_chunk(show_chunks(:'the_table'), hypercore_use_access_method => true);
|
||||||
compress_chunk
|
compress_chunk
|
||||||
-----------------------------------------
|
-----------------------------------------
|
||||||
_timescaledb_internal._hyper_5_13_chunk
|
_timescaledb_internal._hyper_5_13_chunk
|
||||||
@ -313,7 +313,7 @@ from generate_series('2022-06-01'::timestamp, '2022-06-10', '1 minute') t;
|
|||||||
-- table and a heap table produce the same result.
|
-- table and a heap table produce the same result.
|
||||||
create table :saved_table as select * from :the_table;
|
create table :saved_table as select * from :the_table;
|
||||||
-- Compress the rows in the hypercore.
|
-- Compress the rows in the hypercore.
|
||||||
select compress_chunk(show_chunks(:'the_table'), compress_using => 'hypercore');
|
select compress_chunk(show_chunks(:'the_table'), hypercore_use_access_method => true);
|
||||||
compress_chunk
|
compress_chunk
|
||||||
-----------------------------------------
|
-----------------------------------------
|
||||||
_timescaledb_internal._hyper_7_19_chunk
|
_timescaledb_internal._hyper_7_19_chunk
|
||||||
@ -386,7 +386,7 @@ from generate_series('2022-06-01'::timestamp, '2022-06-10', '1 minute') t;
|
|||||||
-- table and a heap table produce the same result.
|
-- table and a heap table produce the same result.
|
||||||
create table :saved_table as select * from :the_table;
|
create table :saved_table as select * from :the_table;
|
||||||
-- Compress the rows in the hypercore.
|
-- Compress the rows in the hypercore.
|
||||||
select compress_chunk(show_chunks(:'the_table'), compress_using => 'hypercore');
|
select compress_chunk(show_chunks(:'the_table'), hypercore_use_access_method => true);
|
||||||
compress_chunk
|
compress_chunk
|
||||||
-----------------------------------------
|
-----------------------------------------
|
||||||
_timescaledb_internal._hyper_9_25_chunk
|
_timescaledb_internal._hyper_9_25_chunk
|
||||||
@ -460,7 +460,7 @@ from generate_series('2022-06-01'::timestamp, '2022-06-10', '1 minute') t;
|
|||||||
-- table and a heap table produce the same result.
|
-- table and a heap table produce the same result.
|
||||||
create table :saved_table as select * from :the_table;
|
create table :saved_table as select * from :the_table;
|
||||||
-- Compress the rows in the hypercore.
|
-- Compress the rows in the hypercore.
|
||||||
select compress_chunk(show_chunks(:'the_table'), compress_using => 'hypercore');
|
select compress_chunk(show_chunks(:'the_table'), hypercore_use_access_method => true);
|
||||||
compress_chunk
|
compress_chunk
|
||||||
------------------------------------------
|
------------------------------------------
|
||||||
_timescaledb_internal._hyper_11_31_chunk
|
_timescaledb_internal._hyper_11_31_chunk
|
||||||
@ -534,7 +534,7 @@ from generate_series('2022-06-01'::timestamp, '2022-06-10', '1 minute') t;
|
|||||||
-- table and a heap table produce the same result.
|
-- table and a heap table produce the same result.
|
||||||
create table :saved_table as select * from :the_table;
|
create table :saved_table as select * from :the_table;
|
||||||
-- Compress the rows in the hypercore.
|
-- Compress the rows in the hypercore.
|
||||||
select compress_chunk(show_chunks(:'the_table'), compress_using => 'hypercore');
|
select compress_chunk(show_chunks(:'the_table'), hypercore_use_access_method => true);
|
||||||
compress_chunk
|
compress_chunk
|
||||||
------------------------------------------
|
------------------------------------------
|
||||||
_timescaledb_internal._hyper_13_37_chunk
|
_timescaledb_internal._hyper_13_37_chunk
|
||||||
|
@ -108,7 +108,7 @@ select format('%I.%I', chunk_schema, chunk_name)::regclass as chunk2
|
|||||||
limit 1 offset 1 \gset
|
limit 1 offset 1 \gset
|
||||||
-- TODO(#1068) Parallel sequence scan does not work
|
-- TODO(#1068) Parallel sequence scan does not work
|
||||||
set max_parallel_workers_per_gather to 0;
|
set max_parallel_workers_per_gather to 0;
|
||||||
select compress_chunk(show_chunks(:'hypertable'), compress_using => 'hypercore');
|
select compress_chunk(show_chunks(:'hypertable'), hypercore_use_access_method => true);
|
||||||
compress_chunk
|
compress_chunk
|
||||||
----------------------------------------
|
----------------------------------------
|
||||||
_timescaledb_internal._hyper_1_1_chunk
|
_timescaledb_internal._hyper_1_1_chunk
|
||||||
@ -291,7 +291,7 @@ select * from :hypertable where humidity = 200.0 order by metric_id;
|
|||||||
commit;
|
commit;
|
||||||
-- Test update of a segment-by column. The selection is to make sure
|
-- Test update of a segment-by column. The selection is to make sure
|
||||||
-- that we have a mix of compressed and uncompressed tuples.
|
-- that we have a mix of compressed and uncompressed tuples.
|
||||||
select compress_chunk(show_chunks(:'hypertable'), compress_using => 'hypercore');
|
select compress_chunk(show_chunks(:'hypertable'), hypercore_use_access_method => true);
|
||||||
compress_chunk
|
compress_chunk
|
||||||
----------------------------------------
|
----------------------------------------
|
||||||
_timescaledb_internal._hyper_1_1_chunk
|
_timescaledb_internal._hyper_1_1_chunk
|
||||||
@ -343,7 +343,7 @@ order by metric_id;
|
|||||||
(11 rows)
|
(11 rows)
|
||||||
|
|
||||||
-- Compress all chunks again before testing RETURNING
|
-- Compress all chunks again before testing RETURNING
|
||||||
select compress_chunk(show_chunks(:'hypertable'), compress_using => 'hypercore');
|
select compress_chunk(show_chunks(:'hypertable'), hypercore_use_access_method => true);
|
||||||
compress_chunk
|
compress_chunk
|
||||||
----------------------------------------
|
----------------------------------------
|
||||||
_timescaledb_internal._hyper_1_1_chunk
|
_timescaledb_internal._hyper_1_1_chunk
|
||||||
@ -395,7 +395,7 @@ returning _timescaledb_debug.is_compressed_tid(ctid), *;
|
|||||||
|
|
||||||
-- Test update of a segment-by column directly on the chunk. This
|
-- Test update of a segment-by column directly on the chunk. This
|
||||||
-- should fail for compressed rows even for segment-by columns.
|
-- should fail for compressed rows even for segment-by columns.
|
||||||
select compress_chunk(:'chunk1', compress_using => 'hypercore');
|
select compress_chunk(:'chunk1', hypercore_use_access_method => true);
|
||||||
compress_chunk
|
compress_chunk
|
||||||
----------------------------------------
|
----------------------------------------
|
||||||
_timescaledb_internal._hyper_1_1_chunk
|
_timescaledb_internal._hyper_1_1_chunk
|
||||||
|
@ -98,7 +98,7 @@ ORDER BY pronamespace::regnamespace::text COLLATE "C", p.oid::regprocedure::text
|
|||||||
_timescaledb_functions.partialize_agg(anyelement)
|
_timescaledb_functions.partialize_agg(anyelement)
|
||||||
_timescaledb_functions.policy_compression(integer,jsonb)
|
_timescaledb_functions.policy_compression(integer,jsonb)
|
||||||
_timescaledb_functions.policy_compression_check(jsonb)
|
_timescaledb_functions.policy_compression_check(jsonb)
|
||||||
_timescaledb_functions.policy_compression_execute(integer,integer,anyelement,integer,boolean,boolean,boolean,name)
|
_timescaledb_functions.policy_compression_execute(integer,integer,anyelement,integer,boolean,boolean,boolean,boolean)
|
||||||
_timescaledb_functions.policy_job_stat_history_retention(integer,jsonb)
|
_timescaledb_functions.policy_job_stat_history_retention(integer,jsonb)
|
||||||
_timescaledb_functions.policy_job_stat_history_retention_check(jsonb)
|
_timescaledb_functions.policy_job_stat_history_retention_check(jsonb)
|
||||||
_timescaledb_functions.policy_recompression(integer,jsonb)
|
_timescaledb_functions.policy_recompression(integer,jsonb)
|
||||||
@ -210,7 +210,7 @@ ORDER BY pronamespace::regnamespace::text COLLATE "C", p.oid::regprocedure::text
|
|||||||
ts_hypercore_handler(internal)
|
ts_hypercore_handler(internal)
|
||||||
ts_hypercore_proxy_handler(internal)
|
ts_hypercore_proxy_handler(internal)
|
||||||
ts_now_mock()
|
ts_now_mock()
|
||||||
add_compression_policy(regclass,"any",boolean,interval,timestamp with time zone,text,interval,name)
|
add_compression_policy(regclass,"any",boolean,interval,timestamp with time zone,text,interval,boolean)
|
||||||
add_continuous_aggregate_policy(regclass,"any","any",interval,boolean,timestamp with time zone,text)
|
add_continuous_aggregate_policy(regclass,"any","any",interval,boolean,timestamp with time zone,text)
|
||||||
add_dimension(regclass,_timescaledb_internal.dimension_info,boolean)
|
add_dimension(regclass,_timescaledb_internal.dimension_info,boolean)
|
||||||
add_dimension(regclass,name,integer,anyelement,regproc,boolean)
|
add_dimension(regclass,name,integer,anyelement,regproc,boolean)
|
||||||
@ -225,7 +225,7 @@ ORDER BY pronamespace::regnamespace::text COLLATE "C", p.oid::regprocedure::text
|
|||||||
cagg_migrate(regclass,boolean,boolean)
|
cagg_migrate(regclass,boolean,boolean)
|
||||||
chunk_compression_stats(regclass)
|
chunk_compression_stats(regclass)
|
||||||
chunks_detailed_size(regclass)
|
chunks_detailed_size(regclass)
|
||||||
compress_chunk(regclass,boolean,boolean,name)
|
compress_chunk(regclass,boolean,boolean,boolean)
|
||||||
create_hypertable(regclass,_timescaledb_internal.dimension_info,boolean,boolean,boolean)
|
create_hypertable(regclass,_timescaledb_internal.dimension_info,boolean,boolean,boolean)
|
||||||
create_hypertable(regclass,name,name,integer,name,name,anyelement,boolean,boolean,regproc,boolean,text,regproc,regproc)
|
create_hypertable(regclass,name,name,integer,name,name,anyelement,boolean,boolean,regproc,boolean,text,regproc,regproc)
|
||||||
decompress_chunk(regclass,boolean)
|
decompress_chunk(regclass,boolean)
|
||||||
@ -291,7 +291,7 @@ ORDER BY pronamespace::regnamespace::text COLLATE "C", p.oid::regprocedure::text
|
|||||||
time_bucket_gapfill(smallint,smallint,smallint,smallint)
|
time_bucket_gapfill(smallint,smallint,smallint,smallint)
|
||||||
timescaledb_post_restore()
|
timescaledb_post_restore()
|
||||||
timescaledb_pre_restore()
|
timescaledb_pre_restore()
|
||||||
timescaledb_experimental.add_policies(regclass,boolean,"any","any","any","any",name)
|
timescaledb_experimental.add_policies(regclass,boolean,"any","any","any","any",boolean)
|
||||||
timescaledb_experimental.alter_policies(regclass,boolean,"any","any","any","any")
|
timescaledb_experimental.alter_policies(regclass,boolean,"any","any","any","any")
|
||||||
timescaledb_experimental.remove_all_policies(regclass,boolean)
|
timescaledb_experimental.remove_all_policies(regclass,boolean)
|
||||||
timescaledb_experimental.remove_policies(regclass,boolean,text[])
|
timescaledb_experimental.remove_policies(regclass,boolean,text[])
|
||||||
|
@ -12,7 +12,7 @@ select cl.oid::regclass as rel, am.amname, inh.inhparent::regclass as relparent
|
|||||||
|
|
||||||
-- Compress the chunks and check that the counts are the same
|
-- Compress the chunks and check that the counts are the same
|
||||||
select location_id, count(*) into orig from :hypertable GROUP BY location_id;
|
select location_id, count(*) into orig from :hypertable GROUP BY location_id;
|
||||||
select compress_chunk(show_chunks(:'hypertable'), compress_using => 'hypercore');
|
select compress_chunk(show_chunks(:'hypertable'), hypercore_use_access_method => true);
|
||||||
select location_id, count(*) into comp from :hypertable GROUP BY location_id;
|
select location_id, count(*) into comp from :hypertable GROUP BY location_id;
|
||||||
select * from orig join comp using (location_id) where orig.count != comp.count;
|
select * from orig join comp using (location_id) where orig.count != comp.count;
|
||||||
drop table orig, comp;
|
drop table orig, comp;
|
||||||
|
@ -125,7 +125,7 @@ select * from amrels where rel=:'chunk'::regclass;
|
|||||||
|
|
||||||
-- Try same thing with compress_chunk()
|
-- Try same thing with compress_chunk()
|
||||||
alter table :chunk set access method heap;
|
alter table :chunk set access method heap;
|
||||||
select compress_chunk(:'chunk', compress_using => 'hypercore');
|
select compress_chunk(:'chunk', hypercore_use_access_method => true);
|
||||||
|
|
||||||
-- Check that chunk is using hypercore
|
-- Check that chunk is using hypercore
|
||||||
select relname, amname
|
select relname, amname
|
||||||
@ -138,7 +138,7 @@ alter table :chunk set access method hypercore;
|
|||||||
|
|
||||||
-- Test recompression after changing compression settings
|
-- Test recompression after changing compression settings
|
||||||
alter table test3 set (timescaledb.compress_segmentby='device');
|
alter table test3 set (timescaledb.compress_segmentby='device');
|
||||||
select compress_chunk(:'chunk', compress_using => 'hypercore', recompress => true);
|
select compress_chunk(:'chunk', hypercore_use_access_method => true, recompress => true);
|
||||||
|
|
||||||
-- Create a second chunk
|
-- Create a second chunk
|
||||||
insert into test3 values ('2022-08-01', 1, 1.0);
|
insert into test3 values ('2022-08-01', 1, 1.0);
|
||||||
@ -219,9 +219,9 @@ from compressed_rel_size_stats;
|
|||||||
|
|
||||||
-- Create hypercores again and check that compression size stats are
|
-- Create hypercores again and check that compression size stats are
|
||||||
-- updated showing compressed data
|
-- updated showing compressed data
|
||||||
select compress_chunk(ch, compress_using => 'hypercore')
|
select compress_chunk(ch, hypercore_use_access_method => true)
|
||||||
from show_chunks('test2') ch;
|
from show_chunks('test2') ch;
|
||||||
select compress_chunk(ch, compress_using => 'hypercore')
|
select compress_chunk(ch, hypercore_use_access_method => true)
|
||||||
from show_chunks('test3') ch;
|
from show_chunks('test3') ch;
|
||||||
|
|
||||||
-- Save the stats for later comparison. Exclude the amname column
|
-- Save the stats for later comparison. Exclude the amname column
|
||||||
@ -241,8 +241,8 @@ select * from compressed_rel_size_stats order by rel;
|
|||||||
-- compression size stats
|
-- compression size stats
|
||||||
select compress_chunk(decompress_chunk(ch))
|
select compress_chunk(decompress_chunk(ch))
|
||||||
from show_chunks('test2') ch;
|
from show_chunks('test2') ch;
|
||||||
-- Using compress_using => NULL should be the same as "heap"
|
--- Using hypercore_use_access_method => NULL should be the same as "heap"
|
||||||
select compress_chunk(decompress_chunk(ch), compress_using => NULL)
|
select compress_chunk(decompress_chunk(ch), hypercore_use_access_method => NULL)
|
||||||
from show_chunks('test3') ch;
|
from show_chunks('test3') ch;
|
||||||
|
|
||||||
select * from compressed_rel_size_stats order by rel;
|
select * from compressed_rel_size_stats order by rel;
|
||||||
@ -276,7 +276,7 @@ set client_min_messages=DEBUG1;
|
|||||||
with chunks as (
|
with chunks as (
|
||||||
select ch from show_chunks('test2') ch offset 1
|
select ch from show_chunks('test2') ch offset 1
|
||||||
)
|
)
|
||||||
select compress_chunk(ch, compress_using => 'hypercore') from chunks;
|
select compress_chunk(ch, hypercore_use_access_method => true) from chunks;
|
||||||
|
|
||||||
-- Test direct migration of the remaining chunk via SET ACCESS
|
-- Test direct migration of the remaining chunk via SET ACCESS
|
||||||
-- METHOD. Add some uncompressed data to test migration with partially
|
-- METHOD. Add some uncompressed data to test migration with partially
|
||||||
@ -316,23 +316,18 @@ commit;
|
|||||||
-- Trying to convert a hypercore to a hypercore should be an error
|
-- Trying to convert a hypercore to a hypercore should be an error
|
||||||
-- if if_not_compressed is false and the hypercore is fully
|
-- if if_not_compressed is false and the hypercore is fully
|
||||||
-- compressed.
|
-- compressed.
|
||||||
select compress_chunk(ch, compress_using => 'hypercore', if_not_compressed => false)
|
select compress_chunk(ch, hypercore_use_access_method => true, if_not_compressed => false)
|
||||||
from show_chunks('test2') ch;
|
|
||||||
|
|
||||||
-- Compressing using something different than "hypercore" or "heap"
|
|
||||||
-- should not be allowed
|
|
||||||
select compress_chunk(ch, compress_using => 'non_existing_am')
|
|
||||||
from show_chunks('test2') ch;
|
from show_chunks('test2') ch;
|
||||||
|
|
||||||
\set ON_ERROR_STOP 1
|
\set ON_ERROR_STOP 1
|
||||||
|
|
||||||
-- Compressing from hypercore with compress_using=>heap should lead
|
-- Compressing from hypercore not using access method should lead to
|
||||||
-- to recompression of hypercore with a notice.
|
-- recompression of hypercore with a notice.
|
||||||
select compress_chunk(ch, compress_using => 'heap')
|
select compress_chunk(ch, hypercore_use_access_method => false)
|
||||||
from show_chunks('test2') ch;
|
from show_chunks('test2') ch;
|
||||||
|
|
||||||
-- Compressing a hypercore without specifying compress_using should
|
-- Compressing a hypercore should by default lead to
|
||||||
-- lead to recompression. First check that :chunk is a hypercore.
|
-- recompression. First check that :chunk is a hypercore.
|
||||||
select ch as chunk from show_chunks('test2') ch limit 1 \gset
|
select ch as chunk from show_chunks('test2') ch limit 1 \gset
|
||||||
select * from compressed_rel_size_stats
|
select * from compressed_rel_size_stats
|
||||||
where amname = 'hypercore' and rel = :'chunk'::regclass;
|
where amname = 'hypercore' and rel = :'chunk'::regclass;
|
||||||
@ -340,11 +335,11 @@ insert into :chunk values ('2022-06-01 10:01', 6, 6, 6.0, 6.0);
|
|||||||
select ctid from :chunk where created_at = '2022-06-01 10:01' and device_id = 6;
|
select ctid from :chunk where created_at = '2022-06-01 10:01' and device_id = 6;
|
||||||
select compress_chunk(:'chunk');
|
select compress_chunk(:'chunk');
|
||||||
select ctid from :chunk where created_at = '2022-06-01 10:01' and device_id = 6;
|
select ctid from :chunk where created_at = '2022-06-01 10:01' and device_id = 6;
|
||||||
-- Compressing a hypercore with compress_using=>hypercore should
|
-- Compressing a hypercore using the access method should also lead to
|
||||||
-- also lead to recompression
|
-- recompression
|
||||||
insert into :chunk values ('2022-06-01 11:02', 7, 7, 7.0, 7.0);
|
insert into :chunk values ('2022-06-01 11:02', 7, 7, 7.0, 7.0);
|
||||||
select ctid from :chunk where created_at = '2022-06-01 11:02' and device_id = 7;
|
select ctid from :chunk where created_at = '2022-06-01 11:02' and device_id = 7;
|
||||||
select compress_chunk(:'chunk', compress_using => 'hypercore');
|
select compress_chunk(:'chunk', hypercore_use_access_method => true);
|
||||||
select ctid from :chunk where created_at = '2022-06-01 11:02' and device_id = 7;
|
select ctid from :chunk where created_at = '2022-06-01 11:02' and device_id = 7;
|
||||||
|
|
||||||
-- Convert all hypercores back to heap
|
-- Convert all hypercores back to heap
|
||||||
@ -358,37 +353,37 @@ select decompress_chunk(rel) ch
|
|||||||
-- cleaned up between two or more commands in same transaction.
|
-- cleaned up between two or more commands in same transaction.
|
||||||
select ch as chunk2 from show_chunks('test2') ch offset 1 limit 1 \gset
|
select ch as chunk2 from show_chunks('test2') ch offset 1 limit 1 \gset
|
||||||
start transaction;
|
start transaction;
|
||||||
select compress_chunk(:'chunk', compress_using => 'hypercore');
|
select compress_chunk(:'chunk', hypercore_use_access_method => true);
|
||||||
select compress_chunk(:'chunk2', compress_using => 'hypercore');
|
select compress_chunk(:'chunk2', hypercore_use_access_method => true);
|
||||||
commit;
|
commit;
|
||||||
|
|
||||||
select * from compressed_rel_size_stats
|
select * from compressed_rel_size_stats
|
||||||
where amname = 'hypercore' and relparent = 'test2'::regclass
|
where amname = 'hypercore' and relparent = 'test2'::regclass
|
||||||
order by rel;
|
order by rel;
|
||||||
|
|
||||||
-- Test that we can compress old way using compress_using=>heap
|
-- Test that we can compress old way by not using the access method
|
||||||
select ch as chunk3 from show_chunks('test2') ch offset 2 limit 1 \gset
|
select ch as chunk3 from show_chunks('test2') ch offset 2 limit 1 \gset
|
||||||
select compress_chunk(:'chunk3', compress_using => 'heap');
|
select compress_chunk(:'chunk3', hypercore_use_access_method => false);
|
||||||
|
|
||||||
select * from compressed_rel_size_stats
|
select * from compressed_rel_size_stats
|
||||||
where amname = 'heap' and relparent = 'test2'::regclass
|
where amname = 'heap' and relparent = 'test2'::regclass
|
||||||
order by rel;
|
order by rel;
|
||||||
|
|
||||||
\set ON_ERROR_STOP 0
|
\set ON_ERROR_STOP 0
|
||||||
-- If we call compress_chunk with compress_using=>'heap' on a
|
-- If we call compress_chunk using the table access method on a
|
||||||
-- heap-compressed chunk, it should lead to an error if
|
-- heap-compressed chunk, it should lead to an error if
|
||||||
-- if_not_compressed is false. The commands below are all equivalent
|
-- if_not_compressed is false. The commands below are all equivalent
|
||||||
-- in this case.
|
-- in this case.
|
||||||
select compress_chunk(:'chunk3', compress_using => 'heap', if_not_compressed=>false);
|
select compress_chunk(:'chunk3', hypercore_use_access_method => false, if_not_compressed=>false);
|
||||||
select compress_chunk(:'chunk3', compress_using => NULL, if_not_compressed=>false);
|
select compress_chunk(:'chunk3', hypercore_use_access_method => NULL, if_not_compressed=>false);
|
||||||
select compress_chunk(:'chunk3', if_not_compressed=>false);
|
select compress_chunk(:'chunk3', if_not_compressed=>false);
|
||||||
\set ON_ERROR_STOP 1
|
\set ON_ERROR_STOP 1
|
||||||
|
|
||||||
-- For a heap-compressed chunk, these should all be equivalent and
|
-- For a heap-compressed chunk, these should all be equivalent and
|
||||||
-- should not do anything when there is nothing to recompress. A
|
-- should not do anything when there is nothing to recompress. A
|
||||||
-- notice should be raised instead of an error.
|
-- notice should be raised instead of an error.
|
||||||
select compress_chunk(:'chunk3', compress_using => 'heap');
|
select compress_chunk(:'chunk3', hypercore_use_access_method => false);
|
||||||
select compress_chunk(:'chunk3', compress_using => NULL);
|
select compress_chunk(:'chunk3', hypercore_use_access_method => NULL);
|
||||||
select compress_chunk(:'chunk3');
|
select compress_chunk(:'chunk3');
|
||||||
|
|
||||||
-- Insert new data to create a "partially compressed" chunk. Note that
|
-- Insert new data to create a "partially compressed" chunk. Note that
|
||||||
@ -396,7 +391,7 @@ select compress_chunk(:'chunk3');
|
|||||||
-- doesn't properly update the partially compressed state.
|
-- doesn't properly update the partially compressed state.
|
||||||
insert into test2 values ('2022-06-15 16:00', 8, 8, 8.0, 8.0);
|
insert into test2 values ('2022-06-15 16:00', 8, 8, 8.0, 8.0);
|
||||||
select * from only :chunk3;
|
select * from only :chunk3;
|
||||||
select compress_chunk(:'chunk3', compress_using => 'heap');
|
select compress_chunk(:'chunk3', hypercore_use_access_method => false);
|
||||||
-- The tuple should no longer be in the non-compressed chunk
|
-- The tuple should no longer be in the non-compressed chunk
|
||||||
select * from only :chunk3;
|
select * from only :chunk3;
|
||||||
-- But the tuple is returned in a query without ONLY
|
-- But the tuple is returned in a query without ONLY
|
||||||
@ -439,7 +434,7 @@ insert into rides values
|
|||||||
(6,'2016-01-01 00:00:02','2016-01-01 00:11:55',1,1.20,-73.979423522949219,40.744613647460938,1,-73.992034912109375,40.753944396972656,2,9,0.5,0.5,0,0,0.3,10.3),
|
(6,'2016-01-01 00:00:02','2016-01-01 00:11:55',1,1.20,-73.979423522949219,40.744613647460938,1,-73.992034912109375,40.753944396972656,2,9,0.5,0.5,0,0,0.3,10.3),
|
||||||
(356,'2016-01-01 00:00:01','2016-01-01 00:11:55',1,1.20,-73.979423522949219,40.744613647460938,1,-73.992034912109375,40.753944396972656,2,9,0.5,0.5,0,0,0.3,10.3);
|
(356,'2016-01-01 00:00:01','2016-01-01 00:11:55',1,1.20,-73.979423522949219,40.744613647460938,1,-73.992034912109375,40.753944396972656,2,9,0.5,0.5,0,0,0.3,10.3);
|
||||||
-- Check that it is possible to compress
|
-- Check that it is possible to compress
|
||||||
select compress_chunk(ch, compress_using=>'hypercore') from show_chunks('rides') ch;
|
select compress_chunk(ch, hypercore_use_access_method => true) from show_chunks('rides') ch;
|
||||||
select rel, amname from compressed_rel_size_stats
|
select rel, amname from compressed_rel_size_stats
|
||||||
where relparent::regclass = 'rides'::regclass;
|
where relparent::regclass = 'rides'::regclass;
|
||||||
|
|
||||||
|
@ -40,7 +40,7 @@ end;
|
|||||||
$$
|
$$
|
||||||
language plpgsql;
|
language plpgsql;
|
||||||
|
|
||||||
select compress_chunk(show_chunks(:'hypertable'), compress_using => 'hypercore');
|
select compress_chunk(show_chunks(:'hypertable'), hypercore_use_access_method => true);
|
||||||
|
|
||||||
-- Compare executing the function with a cursor with a query fetching
|
-- Compare executing the function with a cursor with a query fetching
|
||||||
-- the same data directly from the hypertable.
|
-- the same data directly from the hypertable.
|
||||||
@ -107,7 +107,7 @@ create table backward_cursor (time timestamptz, location_id bigint, temp float8)
|
|||||||
select create_hypertable('backward_cursor', 'time', create_default_indexes=>false);
|
select create_hypertable('backward_cursor', 'time', create_default_indexes=>false);
|
||||||
alter table backward_cursor set (timescaledb.compress, timescaledb.compress_segmentby='location_id', timescaledb.compress_orderby='time asc');
|
alter table backward_cursor set (timescaledb.compress, timescaledb.compress_segmentby='location_id', timescaledb.compress_orderby='time asc');
|
||||||
insert into backward_cursor values ('2024-01-01 01:00', 1, 1.0), ('2024-01-01 02:00', 1, 2.0), ('2024-01-01 03:00', 2, 3.0), ('2024-01-01 04:00', 2, 4.0);
|
insert into backward_cursor values ('2024-01-01 01:00', 1, 1.0), ('2024-01-01 02:00', 1, 2.0), ('2024-01-01 03:00', 2, 3.0), ('2024-01-01 04:00', 2, 4.0);
|
||||||
select compress_chunk(ch, compress_using=>'hypercore') from show_chunks('backward_cursor') ch;
|
select compress_chunk(ch, hypercore_use_access_method => true) from show_chunks('backward_cursor') ch;
|
||||||
insert into backward_cursor values ('2024-01-01 05:00', 3, 5.0), ('2024-01-01 06:00', 3, 6.0);
|
insert into backward_cursor values ('2024-01-01 05:00', 3, 5.0), ('2024-01-01 06:00', 3, 6.0);
|
||||||
|
|
||||||
begin;
|
begin;
|
||||||
|
@ -46,7 +46,7 @@ insert into readings (time, location, device, temp, humidity, jdata)
|
|||||||
select t, ceil(random()*10), ceil(random()*30), random()*40, random()*100, '{"a":1,"b":2}'::jsonb
|
select t, ceil(random()*10), ceil(random()*30), random()*40, random()*100, '{"a":1,"b":2}'::jsonb
|
||||||
from generate_series('2022-06-01'::timestamptz, '2022-06-04'::timestamptz, '5m') t;
|
from generate_series('2022-06-01'::timestamptz, '2022-06-04'::timestamptz, '5m') t;
|
||||||
|
|
||||||
select compress_chunk(show_chunks('readings'), compress_using => 'hypercore');
|
select compress_chunk(show_chunks('readings'), hypercore_use_access_method => true);
|
||||||
|
|
||||||
-- Insert some extra data to get some non-compressed data as well.
|
-- Insert some extra data to get some non-compressed data as well.
|
||||||
insert into readings (time, location, device, temp, humidity, jdata)
|
insert into readings (time, location, device, temp, humidity, jdata)
|
||||||
|
@ -135,7 +135,7 @@ select explain_anonymize(format($$
|
|||||||
$$, :'chunk2'));
|
$$, :'chunk2'));
|
||||||
select created_at, location_id, temp from :chunk2 where location_id=1 and temp=2.0;
|
select created_at, location_id, temp from :chunk2 where location_id=1 and temp=2.0;
|
||||||
|
|
||||||
select compress_chunk(show_chunks(:'hypertable'), compress_using => 'hypercore');
|
select compress_chunk(show_chunks(:'hypertable'), hypercore_use_access_method => true);
|
||||||
|
|
||||||
vacuum analyze :hypertable;
|
vacuum analyze :hypertable;
|
||||||
|
|
||||||
@ -378,7 +378,7 @@ select * from nullvalues where only_nulls is null;
|
|||||||
select * from only_nulls_null;
|
select * from only_nulls_null;
|
||||||
|
|
||||||
-- Convert all chunks to hypercore and run same queries
|
-- Convert all chunks to hypercore and run same queries
|
||||||
select compress_chunk(ch, compress_using=>'hypercore') from show_chunks('nullvalues') ch;
|
select compress_chunk(ch, hypercore_use_access_method => true) from show_chunks('nullvalues') ch;
|
||||||
|
|
||||||
select c.relname, a.amname FROM pg_class c
|
select c.relname, a.amname FROM pg_class c
|
||||||
join pg_am a on (c.relam = a.oid)
|
join pg_am a on (c.relam = a.oid)
|
||||||
|
@ -67,7 +67,7 @@ select explain_anonymize(format($$
|
|||||||
$$, :'chunk2'));
|
$$, :'chunk2'));
|
||||||
select created_at, location_id, temp from :chunk2 where location_id=1 and temp=2.0;
|
select created_at, location_id, temp from :chunk2 where location_id=1 and temp=2.0;
|
||||||
|
|
||||||
select compress_chunk(show_chunks(:'hypertable'), compress_using => 'hypercore');
|
select compress_chunk(show_chunks(:'hypertable'), hypercore_use_access_method => true);
|
||||||
|
|
||||||
vacuum analyze :hypertable;
|
vacuum analyze :hypertable;
|
||||||
|
|
||||||
|
@ -6,7 +6,7 @@
|
|||||||
|
|
||||||
-- Compress the chunks and check that the counts are the same
|
-- Compress the chunks and check that the counts are the same
|
||||||
select location_id, count(*) into orig from :hypertable GROUP BY location_id;
|
select location_id, count(*) into orig from :hypertable GROUP BY location_id;
|
||||||
select compress_chunk(show_chunks(:'hypertable'), compress_using => 'hypercore');
|
select compress_chunk(show_chunks(:'hypertable'), hypercore_use_access_method => true);
|
||||||
select location_id, count(*) into comp from :hypertable GROUP BY location_id;
|
select location_id, count(*) into comp from :hypertable GROUP BY location_id;
|
||||||
select * from orig join comp using (location_id) where orig.count != comp.count;
|
select * from orig join comp using (location_id) where orig.count != comp.count;
|
||||||
drop table orig, comp;
|
drop table orig, comp;
|
||||||
|
@ -43,7 +43,7 @@ alter table the_hypercore set (
|
|||||||
timescaledb.compress_segmentby = '',
|
timescaledb.compress_segmentby = '',
|
||||||
timescaledb.compress_orderby = 'updated_at desc'
|
timescaledb.compress_orderby = 'updated_at desc'
|
||||||
);
|
);
|
||||||
select compress_chunk(show_chunks('the_hypercore'), compress_using => 'hypercore');
|
select compress_chunk(show_chunks('the_hypercore'), hypercore_use_access_method => true);
|
||||||
|
|
||||||
vacuum analyze the_hypercore;
|
vacuum analyze the_hypercore;
|
||||||
|
|
||||||
|
@ -12,7 +12,7 @@ set enable_hashjoin to false;
|
|||||||
|
|
||||||
-- There are already tests to merge into uncompressed tables, so just
|
-- There are already tests to merge into uncompressed tables, so just
|
||||||
-- compress all chunks using Hypercore.
|
-- compress all chunks using Hypercore.
|
||||||
select compress_chunk(show_chunks(:'hypertable'), compress_using => 'hypercore');
|
select compress_chunk(show_chunks(:'hypertable'), hypercore_use_access_method => true);
|
||||||
|
|
||||||
create table source_data (
|
create table source_data (
|
||||||
created_at timestamptz not null,
|
created_at timestamptz not null,
|
||||||
@ -62,7 +62,7 @@ select * from :hypertable where not _timescaledb_debug.is_compressed_tid(ctid);
|
|||||||
|
|
||||||
-- Recompress all and try to insert the same rows again. This there
|
-- Recompress all and try to insert the same rows again. This there
|
||||||
-- should be no rows inserted.
|
-- should be no rows inserted.
|
||||||
select compress_chunk(show_chunks(:'hypertable'), compress_using => 'hypercore');
|
select compress_chunk(show_chunks(:'hypertable'), hypercore_use_access_method => true);
|
||||||
|
|
||||||
\x on
|
\x on
|
||||||
select * from :hypertable where not _timescaledb_debug.is_compressed_tid(ctid);
|
select * from :hypertable where not _timescaledb_debug.is_compressed_tid(ctid);
|
||||||
|
@ -29,7 +29,7 @@ select device_id, count(*) into orig_chunk from :chunk1 group by device_id;
|
|||||||
-----------------------
|
-----------------------
|
||||||
-- Enable hypercore --
|
-- Enable hypercore --
|
||||||
-----------------------
|
-----------------------
|
||||||
select compress_chunk(show_chunks(:'hypertable'), compress_using => 'hypercore');
|
select compress_chunk(show_chunks(:'hypertable'), hypercore_use_access_method => true);
|
||||||
|
|
||||||
-- Show count without parallel plan and without ColumnarScan
|
-- Show count without parallel plan and without ColumnarScan
|
||||||
set timescaledb.enable_columnarscan=false;
|
set timescaledb.enable_columnarscan=false;
|
||||||
|
@ -23,25 +23,19 @@ join pg_am am on (am.oid = cl.relam);
|
|||||||
|
|
||||||
set timezone to pst8pdt;
|
set timezone to pst8pdt;
|
||||||
|
|
||||||
\set ON_ERROR_STOP 0
|
-- Check that hypercore_use_access_method is not part of the policy if
|
||||||
-- Test invalid compress_using option
|
-- not set. Use a large compress_after to ensure the policy doesn't do
|
||||||
select add_compression_policy('readings',
|
-- anything at this time.
|
||||||
compress_after => '1000 years'::interval,
|
|
||||||
compress_using => 'foo');
|
|
||||||
\set ON_ERROR_STOP 1
|
|
||||||
|
|
||||||
-- Check that compress_using is not part of the policy if not set. Use
|
|
||||||
-- a large compress_after to ensure the policy doesn't do anything at
|
|
||||||
-- this time.
|
|
||||||
select add_compression_policy('readings', compress_after => '1000 years'::interval)
|
select add_compression_policy('readings', compress_after => '1000 years'::interval)
|
||||||
as compression_job \gset
|
as compression_job \gset
|
||||||
select config from timescaledb_information.jobs where job_id = :compression_job;
|
select config from timescaledb_information.jobs where job_id = :compression_job;
|
||||||
select remove_compression_policy('readings');
|
select remove_compression_policy('readings');
|
||||||
|
|
||||||
-- Check that compress_using is not part of the policy if set to NULL
|
-- Check that hypercore_use_access_method is not part of the policy if
|
||||||
|
-- set to NULL
|
||||||
select add_compression_policy('readings',
|
select add_compression_policy('readings',
|
||||||
compress_after => '1000 years'::interval,
|
compress_after => '1000 years'::interval,
|
||||||
compress_using => NULL)
|
hypercore_use_access_method => NULL)
|
||||||
as compression_job \gset
|
as compression_job \gset
|
||||||
select config from timescaledb_information.jobs where job_id = :compression_job;
|
select config from timescaledb_information.jobs where job_id = :compression_job;
|
||||||
select remove_compression_policy('readings');
|
select remove_compression_policy('readings');
|
||||||
@ -51,10 +45,11 @@ select * from chunk_info
|
|||||||
where hypertable = 'readings'
|
where hypertable = 'readings'
|
||||||
order by chunk;
|
order by chunk;
|
||||||
|
|
||||||
-- Check that compress_using is part of the policy config when non-NULL
|
-- Check that hypercore_use_access_method is part of the policy config
|
||||||
|
-- when enabled.
|
||||||
select add_compression_policy('readings',
|
select add_compression_policy('readings',
|
||||||
compress_after => '1 day'::interval,
|
compress_after => '1 day'::interval,
|
||||||
compress_using => 'hypercore')
|
hypercore_use_access_method => true)
|
||||||
as compression_job \gset
|
as compression_job \gset
|
||||||
|
|
||||||
select config from timescaledb_information.jobs where job_id = :compression_job;
|
select config from timescaledb_information.jobs where job_id = :compression_job;
|
||||||
@ -81,7 +76,7 @@ where time = '2022-06-01 10:14' and device = 1;
|
|||||||
-- recompress hypercores.
|
-- recompress hypercores.
|
||||||
select add_compression_policy('readings',
|
select add_compression_policy('readings',
|
||||||
compress_after => '1 day'::interval,
|
compress_after => '1 day'::interval,
|
||||||
compress_using => 'heap')
|
hypercore_use_access_method => false)
|
||||||
as compression_job \gset
|
as compression_job \gset
|
||||||
|
|
||||||
-- Run the policy job again to recompress
|
-- Run the policy job again to recompress
|
||||||
@ -98,7 +93,7 @@ select * from readings where time = '2022-06-01 10:14' and device = 1;
|
|||||||
select * from readings where time = '2022-06-01 10:14' and device = 1;
|
select * from readings where time = '2022-06-01 10:14' and device = 1;
|
||||||
|
|
||||||
-- Test recompression again with a policy that doesn't specify
|
-- Test recompression again with a policy that doesn't specify
|
||||||
-- compress_using
|
-- hypercore_use_access_method
|
||||||
select remove_compression_policy('readings');
|
select remove_compression_policy('readings');
|
||||||
-- Insert one value into existing hypercore, also create a new non-hypercore chunk
|
-- Insert one value into existing hypercore, also create a new non-hypercore chunk
|
||||||
insert into readings values ('2022-06-01 10:14', 1, 1.0), ('2022-07-01 10:14', 2, 2.0);
|
insert into readings values ('2022-06-01 10:14', 1, 1.0), ('2022-07-01 10:14', 2, 2.0);
|
||||||
@ -134,7 +129,7 @@ select timescaledb_experimental.add_policies('daily',
|
|||||||
refresh_start_offset => '8 days'::interval,
|
refresh_start_offset => '8 days'::interval,
|
||||||
refresh_end_offset => '1 day'::interval,
|
refresh_end_offset => '1 day'::interval,
|
||||||
compress_after => '9 days'::interval,
|
compress_after => '9 days'::interval,
|
||||||
compress_using => 'hypercore');
|
hypercore_use_access_method => true);
|
||||||
|
|
||||||
select job_id as cagg_compression_job, materialization_hypertable_name as mathyper
|
select job_id as cagg_compression_job, materialization_hypertable_name as mathyper
|
||||||
from timescaledb_information.jobs j
|
from timescaledb_information.jobs j
|
||||||
|
@ -9,7 +9,7 @@
|
|||||||
-- TODO(#1068) Parallel sequence scan does not work
|
-- TODO(#1068) Parallel sequence scan does not work
|
||||||
set max_parallel_workers_per_gather to 0;
|
set max_parallel_workers_per_gather to 0;
|
||||||
|
|
||||||
select compress_chunk(show_chunks(:'hypertable'), compress_using => 'hypercore');
|
select compress_chunk(show_chunks(:'hypertable'), hypercore_use_access_method => true);
|
||||||
|
|
||||||
-- Check that all chunks are compressed
|
-- Check that all chunks are compressed
|
||||||
select chunk_name, compression_status from chunk_compression_stats(:'hypertable');
|
select chunk_name, compression_status from chunk_compression_stats(:'hypertable');
|
||||||
@ -71,7 +71,7 @@ commit;
|
|||||||
|
|
||||||
-- Test update of a segment-by column. The selection is to make sure
|
-- Test update of a segment-by column. The selection is to make sure
|
||||||
-- that we have a mix of compressed and uncompressed tuples.
|
-- that we have a mix of compressed and uncompressed tuples.
|
||||||
select compress_chunk(show_chunks(:'hypertable'), compress_using => 'hypercore');
|
select compress_chunk(show_chunks(:'hypertable'), hypercore_use_access_method => true);
|
||||||
|
|
||||||
select _timescaledb_debug.is_compressed_tid(ctid), metric_id, created_at
|
select _timescaledb_debug.is_compressed_tid(ctid), metric_id, created_at
|
||||||
from :hypertable
|
from :hypertable
|
||||||
@ -87,7 +87,7 @@ where (created_at, metric_id) in (select created_at, metric_id from to_update)
|
|||||||
order by metric_id;
|
order by metric_id;
|
||||||
|
|
||||||
-- Compress all chunks again before testing RETURNING
|
-- Compress all chunks again before testing RETURNING
|
||||||
select compress_chunk(show_chunks(:'hypertable'), compress_using => 'hypercore');
|
select compress_chunk(show_chunks(:'hypertable'), hypercore_use_access_method => true);
|
||||||
|
|
||||||
select _timescaledb_debug.is_compressed_tid(ctid), metric_id, created_at
|
select _timescaledb_debug.is_compressed_tid(ctid), metric_id, created_at
|
||||||
from :hypertable
|
from :hypertable
|
||||||
@ -102,7 +102,7 @@ returning _timescaledb_debug.is_compressed_tid(ctid), *;
|
|||||||
|
|
||||||
-- Test update of a segment-by column directly on the chunk. This
|
-- Test update of a segment-by column directly on the chunk. This
|
||||||
-- should fail for compressed rows even for segment-by columns.
|
-- should fail for compressed rows even for segment-by columns.
|
||||||
select compress_chunk(:'chunk1', compress_using => 'hypercore');
|
select compress_chunk(:'chunk1', hypercore_use_access_method => true);
|
||||||
|
|
||||||
select metric_id from :chunk1 limit 1 \gset
|
select metric_id from :chunk1 limit 1 \gset
|
||||||
|
|
||||||
|
@ -30,7 +30,7 @@ from generate_series('2022-06-01'::timestamp, '2022-06-10', '1 minute') t;
|
|||||||
create table :saved_table as select * from :the_table;
|
create table :saved_table as select * from :the_table;
|
||||||
|
|
||||||
-- Compress the rows in the hypercore.
|
-- Compress the rows in the hypercore.
|
||||||
select compress_chunk(show_chunks(:'the_table'), compress_using => 'hypercore');
|
select compress_chunk(show_chunks(:'the_table'), hypercore_use_access_method => true);
|
||||||
|
|
||||||
-- This part of the include file will run a query with the aggregate
|
-- This part of the include file will run a query with the aggregate
|
||||||
-- provided by the including file and test that using a hypercore
|
-- provided by the including file and test that using a hypercore
|
||||||
|
Loading…
x
Reference in New Issue
Block a user