mirror of
https://github.com/timescale/timescaledb.git
synced 2025-05-14 17:43:34 +08:00
Add include_tiered_data
option to cagg policy
The `include_tiered_data` option allows user to override the value of `timescaledb.enable_tiered_reads` defined on instance level for a particular continuous aggregate policy.
This commit is contained in:
parent
3e021795b5
commit
60ecd46052
1
.unreleased/pr_7587
Normal file
1
.unreleased/pr_7587
Normal file
@ -0,0 +1 @@
|
|||||||
|
Implements: #7587 Add `include_tiered_data` parameter to `add_continuous_aggregate_policy` API
|
@ -85,7 +85,8 @@ CREATE OR REPLACE FUNCTION @extschema@.add_continuous_aggregate_policy(
|
|||||||
end_offset "any", schedule_interval INTERVAL,
|
end_offset "any", schedule_interval INTERVAL,
|
||||||
if_not_exists BOOL = false,
|
if_not_exists BOOL = false,
|
||||||
initial_start TIMESTAMPTZ = NULL,
|
initial_start TIMESTAMPTZ = NULL,
|
||||||
timezone TEXT = NULL
|
timezone TEXT = NULL,
|
||||||
|
include_tiered_data BOOL = NULL
|
||||||
)
|
)
|
||||||
RETURNS INTEGER
|
RETURNS INTEGER
|
||||||
AS '@MODULE_PATHNAME@', 'ts_policy_refresh_cagg_add'
|
AS '@MODULE_PATHNAME@', 'ts_policy_refresh_cagg_add'
|
||||||
|
@ -125,3 +125,22 @@ CREATE PROCEDURE @extschema@.refresh_continuous_aggregate(
|
|||||||
force BOOLEAN = FALSE
|
force BOOLEAN = FALSE
|
||||||
) LANGUAGE C AS '@MODULE_PATHNAME@', 'ts_update_placeholder';
|
) LANGUAGE C AS '@MODULE_PATHNAME@', 'ts_update_placeholder';
|
||||||
|
|
||||||
|
-- Add `include_tiered_data` argument to `add_continuous_aggregate_policy`
|
||||||
|
DROP FUNCTION @extschema@.add_continuous_aggregate_policy(
|
||||||
|
continuous_aggregate REGCLASS, start_offset "any",
|
||||||
|
end_offset "any", schedule_interval INTERVAL,
|
||||||
|
if_not_exists BOOL,
|
||||||
|
initial_start TIMESTAMPTZ,
|
||||||
|
timezone TEXT
|
||||||
|
);
|
||||||
|
CREATE FUNCTION @extschema@.add_continuous_aggregate_policy(
|
||||||
|
continuous_aggregate REGCLASS, start_offset "any",
|
||||||
|
end_offset "any", schedule_interval INTERVAL,
|
||||||
|
if_not_exists BOOL = false,
|
||||||
|
initial_start TIMESTAMPTZ = NULL,
|
||||||
|
timezone TEXT = NULL,
|
||||||
|
include_tiered_data BOOL = NULL
|
||||||
|
)
|
||||||
|
RETURNS INTEGER
|
||||||
|
AS '@MODULE_PATHNAME@', 'ts_update_placeholder'
|
||||||
|
LANGUAGE C VOLATILE;
|
||||||
|
@ -67,3 +67,23 @@ CREATE PROCEDURE @extschema@.refresh_continuous_aggregate(
|
|||||||
window_start "any",
|
window_start "any",
|
||||||
window_end "any"
|
window_end "any"
|
||||||
) LANGUAGE C AS '@MODULE_PATHNAME@', 'ts_continuous_agg_refresh';
|
) LANGUAGE C AS '@MODULE_PATHNAME@', 'ts_continuous_agg_refresh';
|
||||||
|
|
||||||
|
-- Remove `include_tiered_data` argument from `add_continuous_aggregate_policy`
|
||||||
|
DROP FUNCTION @extschema@.add_continuous_aggregate_policy(
|
||||||
|
continuous_aggregate REGCLASS, start_offset "any",
|
||||||
|
end_offset "any", schedule_interval INTERVAL,
|
||||||
|
if_not_exists BOOL,
|
||||||
|
initial_start TIMESTAMPTZ,
|
||||||
|
timezone TEXT,
|
||||||
|
include_tiered_data BOOL
|
||||||
|
);
|
||||||
|
CREATE FUNCTION @extschema@.add_continuous_aggregate_policy(
|
||||||
|
continuous_aggregate REGCLASS, start_offset "any",
|
||||||
|
end_offset "any", schedule_interval INTERVAL,
|
||||||
|
if_not_exists BOOL = false,
|
||||||
|
initial_start TIMESTAMPTZ = NULL,
|
||||||
|
timezone TEXT = NULL
|
||||||
|
)
|
||||||
|
RETURNS INTEGER
|
||||||
|
AS '@MODULE_PATHNAME@', 'ts_policy_refresh_cagg_add'
|
||||||
|
LANGUAGE C VOLATILE;
|
||||||
|
@ -44,7 +44,7 @@ extern bool ts_guc_enable_cagg_reorder_groupby;
|
|||||||
extern TSDLLEXPORT int ts_guc_cagg_max_individual_materializations;
|
extern TSDLLEXPORT int ts_guc_cagg_max_individual_materializations;
|
||||||
extern bool ts_guc_enable_now_constify;
|
extern bool ts_guc_enable_now_constify;
|
||||||
extern bool ts_guc_enable_foreign_key_propagation;
|
extern bool ts_guc_enable_foreign_key_propagation;
|
||||||
extern bool ts_guc_enable_osm_reads;
|
extern TSDLLEXPORT bool ts_guc_enable_osm_reads;
|
||||||
#if PG16_GE
|
#if PG16_GE
|
||||||
extern TSDLLEXPORT bool ts_guc_enable_cagg_sort_pushdown;
|
extern TSDLLEXPORT bool ts_guc_enable_cagg_sort_pushdown;
|
||||||
#endif
|
#endif
|
||||||
|
@ -136,6 +136,16 @@ policy_refresh_cagg_get_refresh_end(const Dimension *dim, const Jsonb *config, b
|
|||||||
return res;
|
return res;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool
|
||||||
|
policy_refresh_cagg_get_include_tiered_data(const Jsonb *config, bool *isnull)
|
||||||
|
{
|
||||||
|
bool found;
|
||||||
|
bool res = ts_jsonb_get_bool_field(config, POL_REFRESH_CONF_KEY_INCLUDE_TIERED_DATA, &found);
|
||||||
|
|
||||||
|
*isnull = !found;
|
||||||
|
return res;
|
||||||
|
}
|
||||||
|
|
||||||
/* returns false if a policy could not be found */
|
/* returns false if a policy could not be found */
|
||||||
bool
|
bool
|
||||||
policy_refresh_cagg_exists(int32 materialization_id)
|
policy_refresh_cagg_exists(int32 materialization_id)
|
||||||
@ -519,7 +529,8 @@ Datum
|
|||||||
policy_refresh_cagg_add_internal(Oid cagg_oid, Oid start_offset_type, NullableDatum start_offset,
|
policy_refresh_cagg_add_internal(Oid cagg_oid, Oid start_offset_type, NullableDatum start_offset,
|
||||||
Oid end_offset_type, NullableDatum end_offset,
|
Oid end_offset_type, NullableDatum end_offset,
|
||||||
Interval refresh_interval, bool if_not_exists, bool fixed_schedule,
|
Interval refresh_interval, bool if_not_exists, bool fixed_schedule,
|
||||||
TimestampTz initial_start, const char *timezone)
|
TimestampTz initial_start, const char *timezone,
|
||||||
|
NullableDatum include_tiered_data)
|
||||||
{
|
{
|
||||||
NameData application_name;
|
NameData application_name;
|
||||||
NameData proc_name, proc_schema, check_name, check_schema, owner;
|
NameData proc_name, proc_schema, check_name, check_schema, owner;
|
||||||
@ -630,6 +641,10 @@ policy_refresh_cagg_add_internal(Oid cagg_oid, Oid start_offset_type, NullableDa
|
|||||||
policyconf.offset_end.value);
|
policyconf.offset_end.value);
|
||||||
else
|
else
|
||||||
ts_jsonb_add_null(parse_state, POL_REFRESH_CONF_KEY_END_OFFSET);
|
ts_jsonb_add_null(parse_state, POL_REFRESH_CONF_KEY_END_OFFSET);
|
||||||
|
if (!include_tiered_data.isnull)
|
||||||
|
ts_jsonb_add_bool(parse_state,
|
||||||
|
POL_REFRESH_CONF_KEY_INCLUDE_TIERED_DATA,
|
||||||
|
include_tiered_data.value);
|
||||||
JsonbValue *result = pushJsonbValue(&parse_state, WJB_END_OBJECT, NULL);
|
JsonbValue *result = pushJsonbValue(&parse_state, WJB_END_OBJECT, NULL);
|
||||||
Jsonb *config = JsonbValueToJsonb(result);
|
Jsonb *config = JsonbValueToJsonb(result);
|
||||||
|
|
||||||
@ -660,6 +675,7 @@ policy_refresh_cagg_add(PG_FUNCTION_ARGS)
|
|||||||
Interval refresh_interval;
|
Interval refresh_interval;
|
||||||
bool if_not_exists;
|
bool if_not_exists;
|
||||||
NullableDatum start_offset, end_offset;
|
NullableDatum start_offset, end_offset;
|
||||||
|
NullableDatum include_tiered_data;
|
||||||
|
|
||||||
ts_feature_flag_check(FEATURE_POLICY);
|
ts_feature_flag_check(FEATURE_POLICY);
|
||||||
|
|
||||||
@ -682,6 +698,8 @@ policy_refresh_cagg_add(PG_FUNCTION_ARGS)
|
|||||||
bool fixed_schedule = !PG_ARGISNULL(5);
|
bool fixed_schedule = !PG_ARGISNULL(5);
|
||||||
text *timezone = PG_ARGISNULL(6) ? NULL : PG_GETARG_TEXT_PP(6);
|
text *timezone = PG_ARGISNULL(6) ? NULL : PG_GETARG_TEXT_PP(6);
|
||||||
char *valid_timezone = NULL;
|
char *valid_timezone = NULL;
|
||||||
|
include_tiered_data.value = PG_GETARG_DATUM(7);
|
||||||
|
include_tiered_data.isnull = PG_ARGISNULL(7);
|
||||||
|
|
||||||
Datum retval;
|
Datum retval;
|
||||||
/* if users pass in -infinity for initial_start, then use the current_timestamp instead */
|
/* if users pass in -infinity for initial_start, then use the current_timestamp instead */
|
||||||
@ -704,7 +722,8 @@ policy_refresh_cagg_add(PG_FUNCTION_ARGS)
|
|||||||
if_not_exists,
|
if_not_exists,
|
||||||
fixed_schedule,
|
fixed_schedule,
|
||||||
initial_start,
|
initial_start,
|
||||||
valid_timezone);
|
valid_timezone,
|
||||||
|
include_tiered_data);
|
||||||
if (!TIMESTAMP_NOT_FINITE(initial_start))
|
if (!TIMESTAMP_NOT_FINITE(initial_start))
|
||||||
{
|
{
|
||||||
int32 job_id = DatumGetInt32(retval);
|
int32 job_id = DatumGetInt32(retval);
|
||||||
|
@ -20,6 +20,7 @@ int64 policy_refresh_cagg_get_refresh_start(const ContinuousAgg *cagg, const Dim
|
|||||||
const Jsonb *config, bool *start_isnull);
|
const Jsonb *config, bool *start_isnull);
|
||||||
int64 policy_refresh_cagg_get_refresh_end(const Dimension *dim, const Jsonb *config,
|
int64 policy_refresh_cagg_get_refresh_end(const Dimension *dim, const Jsonb *config,
|
||||||
bool *end_isnull);
|
bool *end_isnull);
|
||||||
|
bool policy_refresh_cagg_get_include_tiered_data(const Jsonb *config, bool *isnull);
|
||||||
bool policy_refresh_cagg_refresh_start_lt(int32 materialization_id, Oid cmp_type,
|
bool policy_refresh_cagg_refresh_start_lt(int32 materialization_id, Oid cmp_type,
|
||||||
Datum cmp_interval);
|
Datum cmp_interval);
|
||||||
bool policy_refresh_cagg_exists(int32 materialization_id);
|
bool policy_refresh_cagg_exists(int32 materialization_id);
|
||||||
@ -28,5 +29,6 @@ Datum policy_refresh_cagg_add_internal(Oid cagg_oid, Oid start_offset_type,
|
|||||||
NullableDatum start_offset, Oid end_offset_type,
|
NullableDatum start_offset, Oid end_offset_type,
|
||||||
NullableDatum end_offset, Interval refresh_interval,
|
NullableDatum end_offset, Interval refresh_interval,
|
||||||
bool if_not_exists, bool fixed_schedule,
|
bool if_not_exists, bool fixed_schedule,
|
||||||
TimestampTz initial_start, const char *timezone);
|
TimestampTz initial_start, const char *timezone,
|
||||||
|
NullableDatum include_tiered_data);
|
||||||
Datum policy_refresh_cagg_remove_internal(Oid cagg_oid, bool if_exists);
|
Datum policy_refresh_cagg_remove_internal(Oid cagg_oid, bool if_exists);
|
||||||
|
@ -19,6 +19,7 @@
|
|||||||
#include <parser/parser.h>
|
#include <parser/parser.h>
|
||||||
#include <tcop/pquery.h>
|
#include <tcop/pquery.h>
|
||||||
#include <utils/builtins.h>
|
#include <utils/builtins.h>
|
||||||
|
#include <utils/guc.h>
|
||||||
#include <utils/lsyscache.h>
|
#include <utils/lsyscache.h>
|
||||||
#include <utils/portal.h>
|
#include <utils/portal.h>
|
||||||
#include <utils/snapmgr.h>
|
#include <utils/snapmgr.h>
|
||||||
@ -51,6 +52,7 @@
|
|||||||
#include "dimension_slice.h"
|
#include "dimension_slice.h"
|
||||||
#include "dimension_vector.h"
|
#include "dimension_vector.h"
|
||||||
#include "errors.h"
|
#include "errors.h"
|
||||||
|
#include "guc.h"
|
||||||
#include "job.h"
|
#include "job.h"
|
||||||
#include "reorder.h"
|
#include "reorder.h"
|
||||||
#include "utils.h"
|
#include "utils.h"
|
||||||
@ -372,7 +374,21 @@ policy_refresh_cagg_execute(int32 job_id, Jsonb *config)
|
|||||||
{
|
{
|
||||||
PolicyContinuousAggData policy_data;
|
PolicyContinuousAggData policy_data;
|
||||||
|
|
||||||
|
StringInfo str = makeStringInfo();
|
||||||
|
JsonbToCStringIndent(str, &config->root, VARSIZE(config));
|
||||||
|
|
||||||
policy_refresh_cagg_read_and_validate_config(config, &policy_data);
|
policy_refresh_cagg_read_and_validate_config(config, &policy_data);
|
||||||
|
|
||||||
|
bool enable_osm_reads_old = ts_guc_enable_osm_reads;
|
||||||
|
|
||||||
|
if (!policy_data.include_tiered_data_isnull)
|
||||||
|
{
|
||||||
|
SetConfigOption("timescaledb.enable_tiered_reads",
|
||||||
|
policy_data.include_tiered_data ? "on" : "off",
|
||||||
|
PGC_USERSET,
|
||||||
|
PGC_S_SESSION);
|
||||||
|
}
|
||||||
|
|
||||||
continuous_agg_refresh_internal(policy_data.cagg,
|
continuous_agg_refresh_internal(policy_data.cagg,
|
||||||
&policy_data.refresh_window,
|
&policy_data.refresh_window,
|
||||||
CAGG_REFRESH_POLICY,
|
CAGG_REFRESH_POLICY,
|
||||||
@ -380,6 +396,14 @@ policy_refresh_cagg_execute(int32 job_id, Jsonb *config)
|
|||||||
policy_data.end_is_null,
|
policy_data.end_is_null,
|
||||||
false);
|
false);
|
||||||
|
|
||||||
|
if (!policy_data.include_tiered_data_isnull)
|
||||||
|
{
|
||||||
|
SetConfigOption("timescaledb.enable_tiered_reads",
|
||||||
|
enable_osm_reads_old ? "on" : "off",
|
||||||
|
PGC_USERSET,
|
||||||
|
PGC_S_SESSION);
|
||||||
|
}
|
||||||
|
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -392,6 +416,7 @@ policy_refresh_cagg_read_and_validate_config(Jsonb *config, PolicyContinuousAggD
|
|||||||
Oid dim_type;
|
Oid dim_type;
|
||||||
int64 refresh_start, refresh_end;
|
int64 refresh_start, refresh_end;
|
||||||
bool start_isnull, end_isnull;
|
bool start_isnull, end_isnull;
|
||||||
|
bool include_tiered_data, include_tiered_data_isnull;
|
||||||
|
|
||||||
materialization_id = policy_continuous_aggregate_get_mat_hypertable_id(config);
|
materialization_id = policy_continuous_aggregate_get_mat_hypertable_id(config);
|
||||||
mat_ht = ts_hypertable_get_by_id(materialization_id);
|
mat_ht = ts_hypertable_get_by_id(materialization_id);
|
||||||
@ -418,6 +443,9 @@ policy_refresh_cagg_read_and_validate_config(Jsonb *config, PolicyContinuousAggD
|
|||||||
ts_internal_to_time_string(refresh_end, dim_type)),
|
ts_internal_to_time_string(refresh_end, dim_type)),
|
||||||
errhint("The start of the window must be before the end.")));
|
errhint("The start of the window must be before the end.")));
|
||||||
|
|
||||||
|
include_tiered_data =
|
||||||
|
policy_refresh_cagg_get_include_tiered_data(config, &include_tiered_data_isnull);
|
||||||
|
|
||||||
if (policy_data)
|
if (policy_data)
|
||||||
{
|
{
|
||||||
policy_data->refresh_window.type = dim_type;
|
policy_data->refresh_window.type = dim_type;
|
||||||
@ -426,6 +454,8 @@ policy_refresh_cagg_read_and_validate_config(Jsonb *config, PolicyContinuousAggD
|
|||||||
policy_data->cagg = cagg;
|
policy_data->cagg = cagg;
|
||||||
policy_data->start_is_null = start_isnull;
|
policy_data->start_is_null = start_isnull;
|
||||||
policy_data->end_is_null = end_isnull;
|
policy_data->end_is_null = end_isnull;
|
||||||
|
policy_data->include_tiered_data = include_tiered_data;
|
||||||
|
policy_data->include_tiered_data_isnull = include_tiered_data_isnull;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -36,7 +36,8 @@ typedef struct PolicyContinuousAggData
|
|||||||
{
|
{
|
||||||
InternalTimeRange refresh_window;
|
InternalTimeRange refresh_window;
|
||||||
ContinuousAgg *cagg;
|
ContinuousAgg *cagg;
|
||||||
bool start_is_null, end_is_null;
|
bool include_tiered_data;
|
||||||
|
bool start_is_null, end_is_null, include_tiered_data_isnull;
|
||||||
} PolicyContinuousAggData;
|
} PolicyContinuousAggData;
|
||||||
|
|
||||||
typedef struct PolicyCompressionData
|
typedef struct PolicyCompressionData
|
||||||
|
@ -206,6 +206,8 @@ validate_and_create_policies(policies_info all_policies, bool if_exists)
|
|||||||
/* Create policies as required, delete the old ones if coming from alter */
|
/* Create policies as required, delete the old ones if coming from alter */
|
||||||
if (all_policies.refresh && all_policies.refresh->create_policy)
|
if (all_policies.refresh && all_policies.refresh->create_policy)
|
||||||
{
|
{
|
||||||
|
NullableDatum include_tiered_data = { .isnull = true };
|
||||||
|
|
||||||
if (all_policies.is_alter_policy)
|
if (all_policies.is_alter_policy)
|
||||||
policy_refresh_cagg_remove_internal(all_policies.rel_oid, if_exists);
|
policy_refresh_cagg_remove_internal(all_policies.rel_oid, if_exists);
|
||||||
refresh_job_id = policy_refresh_cagg_add_internal(all_policies.rel_oid,
|
refresh_job_id = policy_refresh_cagg_add_internal(all_policies.rel_oid,
|
||||||
@ -217,7 +219,8 @@ validate_and_create_policies(policies_info all_policies, bool if_exists)
|
|||||||
false,
|
false,
|
||||||
false,
|
false,
|
||||||
DT_NOBEGIN,
|
DT_NOBEGIN,
|
||||||
NULL);
|
NULL,
|
||||||
|
include_tiered_data);
|
||||||
}
|
}
|
||||||
if (all_policies.compress && all_policies.compress->create_policy)
|
if (all_policies.compress && all_policies.compress->create_policy)
|
||||||
{
|
{
|
||||||
|
@ -19,6 +19,7 @@
|
|||||||
#define POL_REFRESH_CONF_KEY_MAT_HYPERTABLE_ID "mat_hypertable_id"
|
#define POL_REFRESH_CONF_KEY_MAT_HYPERTABLE_ID "mat_hypertable_id"
|
||||||
#define POL_REFRESH_CONF_KEY_START_OFFSET "start_offset"
|
#define POL_REFRESH_CONF_KEY_START_OFFSET "start_offset"
|
||||||
#define POL_REFRESH_CONF_KEY_END_OFFSET "end_offset"
|
#define POL_REFRESH_CONF_KEY_END_OFFSET "end_offset"
|
||||||
|
#define POL_REFRESH_CONF_KEY_INCLUDE_TIERED_DATA "include_tiered_data"
|
||||||
|
|
||||||
#define POLICY_COMPRESSION_PROC_NAME "policy_compression"
|
#define POLICY_COMPRESSION_PROC_NAME "policy_compression"
|
||||||
#define POLICY_COMPRESSION_CHECK_NAME "policy_compression_check"
|
#define POLICY_COMPRESSION_CHECK_NAME "policy_compression_check"
|
||||||
|
@ -819,6 +819,87 @@ SELECT * FROM ht_try_weekly;
|
|||||||
|
|
||||||
DROP MATERIALIZED VIEW ht_try_weekly;
|
DROP MATERIALIZED VIEW ht_try_weekly;
|
||||||
NOTICE: drop cascades to table _timescaledb_internal._hyper_6_12_chunk
|
NOTICE: drop cascades to table _timescaledb_internal._hyper_6_12_chunk
|
||||||
|
-- Test refresh policy with different settings of `include_tiered_data` parameter
|
||||||
|
CREATE FUNCTION create_test_cagg(include_tiered_data BOOL)
|
||||||
|
RETURNS INTEGER AS
|
||||||
|
$$
|
||||||
|
DECLARE
|
||||||
|
cfg jsonb;
|
||||||
|
job_id INTEGER;
|
||||||
|
BEGIN
|
||||||
|
CREATE MATERIALIZED VIEW ht_try_weekly
|
||||||
|
WITH (timescaledb.continuous) AS
|
||||||
|
SELECT time_bucket(interval '1 week', timec) AS ts_bucket, avg(value)
|
||||||
|
FROM ht_try
|
||||||
|
GROUP BY 1
|
||||||
|
WITH NO DATA;
|
||||||
|
|
||||||
|
job_id := add_continuous_aggregate_policy(
|
||||||
|
'ht_try_weekly',
|
||||||
|
start_offset => NULL,
|
||||||
|
end_offset => INTERVAL '1 hour',
|
||||||
|
schedule_interval => INTERVAL '1 hour',
|
||||||
|
include_tiered_data => include_tiered_data
|
||||||
|
);
|
||||||
|
|
||||||
|
cfg := config FROM _timescaledb_config.bgw_job WHERE id = job_id;
|
||||||
|
RAISE NOTICE 'config: %', jsonb_pretty(cfg);
|
||||||
|
|
||||||
|
RETURN job_id;
|
||||||
|
END
|
||||||
|
$$ LANGUAGE plpgsql;
|
||||||
|
-- include tiered data
|
||||||
|
SELECT create_test_cagg(true) AS job_id \gset
|
||||||
|
NOTICE: config: {
|
||||||
|
"end_offset": "@ 1 hour",
|
||||||
|
"start_offset": null,
|
||||||
|
"mat_hypertable_id": 7,
|
||||||
|
"include_tiered_data": true
|
||||||
|
}
|
||||||
|
CALL run_job(:job_id);
|
||||||
|
SELECT * FROM ht_try_weekly ORDER BY 1;
|
||||||
|
ts_bucket | avg
|
||||||
|
------------------------------+-----------------------
|
||||||
|
Sun Dec 29 16:00:00 2019 PST | 1000.0000000000000000
|
||||||
|
Sun May 01 17:00:00 2022 PDT | 222.0000000000000000
|
||||||
|
(2 rows)
|
||||||
|
|
||||||
|
DROP MATERIALIZED VIEW ht_try_weekly;
|
||||||
|
NOTICE: drop cascades to 2 other objects
|
||||||
|
-- exclude tiered data
|
||||||
|
SELECT create_test_cagg(false) AS job_id \gset
|
||||||
|
NOTICE: config: {
|
||||||
|
"end_offset": "@ 1 hour",
|
||||||
|
"start_offset": null,
|
||||||
|
"mat_hypertable_id": 8,
|
||||||
|
"include_tiered_data": false
|
||||||
|
}
|
||||||
|
CALL run_job(:job_id);
|
||||||
|
SELECT * FROM ht_try_weekly ORDER BY 1;
|
||||||
|
ts_bucket | avg
|
||||||
|
------------------------------+----------------------
|
||||||
|
Sun May 01 17:00:00 2022 PDT | 222.0000000000000000
|
||||||
|
(1 row)
|
||||||
|
|
||||||
|
DROP MATERIALIZED VIEW ht_try_weekly;
|
||||||
|
NOTICE: drop cascades to table _timescaledb_internal._hyper_8_15_chunk
|
||||||
|
-- default behavior: use instance-wide GUC value
|
||||||
|
SELECT create_test_cagg(null) AS job_id \gset
|
||||||
|
NOTICE: config: {
|
||||||
|
"end_offset": "@ 1 hour",
|
||||||
|
"start_offset": null,
|
||||||
|
"mat_hypertable_id": 9
|
||||||
|
}
|
||||||
|
CALL run_job(:job_id);
|
||||||
|
SELECT * FROM ht_try_weekly ORDER BY 1;
|
||||||
|
ts_bucket | avg
|
||||||
|
------------------------------+-----------------------
|
||||||
|
Sun Dec 29 16:00:00 2019 PST | 1000.0000000000000000
|
||||||
|
Sun May 01 17:00:00 2022 PDT | 222.0000000000000000
|
||||||
|
(2 rows)
|
||||||
|
|
||||||
|
DROP MATERIALIZED VIEW ht_try_weekly;
|
||||||
|
NOTICE: drop cascades to 2 other objects
|
||||||
-- This test verifies that a bugfix regarding the way `ROWID_VAR`s are adjusted
|
-- This test verifies that a bugfix regarding the way `ROWID_VAR`s are adjusted
|
||||||
-- in the chunks' targetlists on DELETE/UPDATE works (including partially
|
-- in the chunks' targetlists on DELETE/UPDATE works (including partially
|
||||||
-- compressed chunks)
|
-- compressed chunks)
|
||||||
@ -831,7 +912,7 @@ SELECT compress_chunk(show_chunks('ht_try', newer_than => '2021-01-01'::timestam
|
|||||||
compress_chunk
|
compress_chunk
|
||||||
-----------------------------------------
|
-----------------------------------------
|
||||||
_timescaledb_internal._hyper_5_10_chunk
|
_timescaledb_internal._hyper_5_10_chunk
|
||||||
_timescaledb_internal._hyper_5_13_chunk
|
_timescaledb_internal._hyper_5_18_chunk
|
||||||
(2 rows)
|
(2 rows)
|
||||||
|
|
||||||
INSERT INTO ht_try VALUES ('2021-06-05 01:00', 10, 222);
|
INSERT INTO ht_try VALUES ('2021-06-05 01:00', 10, 222);
|
||||||
@ -934,7 +1015,7 @@ Indexes:
|
|||||||
Triggers:
|
Triggers:
|
||||||
ts_insert_blocker BEFORE INSERT ON ht_try FOR EACH ROW EXECUTE FUNCTION _timescaledb_functions.insert_blocker()
|
ts_insert_blocker BEFORE INSERT ON ht_try FOR EACH ROW EXECUTE FUNCTION _timescaledb_functions.insert_blocker()
|
||||||
Child tables: _timescaledb_internal._hyper_5_10_chunk,
|
Child tables: _timescaledb_internal._hyper_5_10_chunk,
|
||||||
_timescaledb_internal._hyper_5_13_chunk
|
_timescaledb_internal._hyper_5_18_chunk
|
||||||
|
|
||||||
-- verify that still can read from the table after catalog manipulations
|
-- verify that still can read from the table after catalog manipulations
|
||||||
EXPLAIN (ANALYZE, COSTS OFF, TIMING OFF, SUMMARY OFF) SELECT * FROM ht_try;
|
EXPLAIN (ANALYZE, COSTS OFF, TIMING OFF, SUMMARY OFF) SELECT * FROM ht_try;
|
||||||
@ -942,10 +1023,10 @@ EXPLAIN (ANALYZE, COSTS OFF, TIMING OFF, SUMMARY OFF) SELECT * FROM ht_try;
|
|||||||
----------------------------------------------------------------------------------
|
----------------------------------------------------------------------------------
|
||||||
Append (actual rows=3 loops=1)
|
Append (actual rows=3 loops=1)
|
||||||
-> Custom Scan (DecompressChunk) on _hyper_5_10_chunk (actual rows=1 loops=1)
|
-> Custom Scan (DecompressChunk) on _hyper_5_10_chunk (actual rows=1 loops=1)
|
||||||
-> Seq Scan on compress_hyper_7_14_chunk (actual rows=1 loops=1)
|
-> Seq Scan on compress_hyper_10_19_chunk (actual rows=1 loops=1)
|
||||||
-> Custom Scan (DecompressChunk) on _hyper_5_13_chunk (actual rows=1 loops=1)
|
-> Custom Scan (DecompressChunk) on _hyper_5_18_chunk (actual rows=1 loops=1)
|
||||||
-> Seq Scan on compress_hyper_7_15_chunk (actual rows=1 loops=1)
|
-> Seq Scan on compress_hyper_10_20_chunk (actual rows=1 loops=1)
|
||||||
-> Seq Scan on _hyper_5_13_chunk (actual rows=1 loops=1)
|
-> Seq Scan on _hyper_5_18_chunk (actual rows=1 loops=1)
|
||||||
(6 rows)
|
(6 rows)
|
||||||
|
|
||||||
ROLLBACK;
|
ROLLBACK;
|
||||||
@ -996,9 +1077,9 @@ RESTRICT
|
|||||||
,CHECK ( temp > 10)
|
,CHECK ( temp > 10)
|
||||||
);
|
);
|
||||||
SELECT create_hypertable('hyper_constr', 'time', chunk_time_interval => 10);
|
SELECT create_hypertable('hyper_constr', 'time', chunk_time_interval => 10);
|
||||||
create_hypertable
|
create_hypertable
|
||||||
---------------------------
|
----------------------------
|
||||||
(8,public,hyper_constr,t)
|
(11,public,hyper_constr,t)
|
||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
INSERT INTO hyper_constr VALUES( 10, 200, 22, 1, 111, 44);
|
INSERT INTO hyper_constr VALUES( 10, 200, 22, 1, 111, 44);
|
||||||
@ -1031,7 +1112,7 @@ WHERE hypertable_id IN (SELECT id from _timescaledb_catalog.hypertable
|
|||||||
ORDER BY table_name;
|
ORDER BY table_name;
|
||||||
table_name | status | osm_chunk
|
table_name | status | osm_chunk
|
||||||
--------------------+--------+-----------
|
--------------------+--------+-----------
|
||||||
_hyper_8_16_chunk | 0 | f
|
_hyper_11_21_chunk | 0 | f
|
||||||
child_hyper_constr | 0 | t
|
child_hyper_constr | 0 | t
|
||||||
(2 rows)
|
(2 rows)
|
||||||
|
|
||||||
@ -1119,15 +1200,15 @@ where hypertable_id = (Select id from _timescaledb_catalog.hypertable where tabl
|
|||||||
ORDER BY id;
|
ORDER BY id;
|
||||||
id | table_name
|
id | table_name
|
||||||
----+--------------------
|
----+--------------------
|
||||||
16 | _hyper_8_16_chunk
|
21 | _hyper_11_21_chunk
|
||||||
17 | child_hyper_constr
|
22 | child_hyper_constr
|
||||||
(2 rows)
|
(2 rows)
|
||||||
|
|
||||||
-- show_chunks will not show the OSM chunk which is visible via the above query
|
-- show_chunks will not show the OSM chunk which is visible via the above query
|
||||||
SELECT show_chunks('hyper_constr');
|
SELECT show_chunks('hyper_constr');
|
||||||
show_chunks
|
show_chunks
|
||||||
-----------------------------------------
|
------------------------------------------
|
||||||
_timescaledb_internal._hyper_8_16_chunk
|
_timescaledb_internal._hyper_11_21_chunk
|
||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
ROLLBACK;
|
ROLLBACK;
|
||||||
@ -1157,9 +1238,9 @@ CREATE TABLE test1.copy_test (
|
|||||||
"value" double precision NOT NULL
|
"value" double precision NOT NULL
|
||||||
);
|
);
|
||||||
SELECT create_hypertable('test1.copy_test', 'time', chunk_time_interval => interval '1 day');
|
SELECT create_hypertable('test1.copy_test', 'time', chunk_time_interval => interval '1 day');
|
||||||
create_hypertable
|
create_hypertable
|
||||||
-----------------------
|
------------------------
|
||||||
(9,test1,copy_test,t)
|
(12,test1,copy_test,t)
|
||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
COPY test1.copy_test FROM STDIN DELIMITER ',';
|
COPY test1.copy_test FROM STDIN DELIMITER ',';
|
||||||
@ -1178,15 +1259,15 @@ SELECT _timescaledb_functions.freeze_chunk( :'COPY_CHNAME');
|
|||||||
-- Check state
|
-- Check state
|
||||||
SELECT table_name, status
|
SELECT table_name, status
|
||||||
FROM _timescaledb_catalog.chunk WHERE table_name = :'COPY_CHUNK_NAME';
|
FROM _timescaledb_catalog.chunk WHERE table_name = :'COPY_CHUNK_NAME';
|
||||||
table_name | status
|
table_name | status
|
||||||
-------------------+--------
|
--------------------+--------
|
||||||
_hyper_9_18_chunk | 4
|
_hyper_12_23_chunk | 4
|
||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
\set ON_ERROR_STOP 0
|
\set ON_ERROR_STOP 0
|
||||||
-- Copy should fail because one of che chunks is frozen
|
-- Copy should fail because one of che chunks is frozen
|
||||||
COPY test1.copy_test FROM STDIN DELIMITER ',';
|
COPY test1.copy_test FROM STDIN DELIMITER ',';
|
||||||
ERROR: cannot INSERT into frozen chunk "_hyper_9_18_chunk"
|
ERROR: cannot INSERT into frozen chunk "_hyper_12_23_chunk"
|
||||||
\set ON_ERROR_STOP 1
|
\set ON_ERROR_STOP 1
|
||||||
-- Count existing rows
|
-- Count existing rows
|
||||||
SELECT COUNT(*) FROM test1.copy_test;
|
SELECT COUNT(*) FROM test1.copy_test;
|
||||||
@ -1198,15 +1279,15 @@ SELECT COUNT(*) FROM test1.copy_test;
|
|||||||
-- Check state
|
-- Check state
|
||||||
SELECT table_name, status
|
SELECT table_name, status
|
||||||
FROM _timescaledb_catalog.chunk WHERE table_name = :'COPY_CHUNK_NAME';
|
FROM _timescaledb_catalog.chunk WHERE table_name = :'COPY_CHUNK_NAME';
|
||||||
table_name | status
|
table_name | status
|
||||||
-------------------+--------
|
--------------------+--------
|
||||||
_hyper_9_18_chunk | 4
|
_hyper_12_23_chunk | 4
|
||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
\set ON_ERROR_STOP 0
|
\set ON_ERROR_STOP 0
|
||||||
-- Copy should fail because one of che chunks is frozen
|
-- Copy should fail because one of che chunks is frozen
|
||||||
COPY test1.copy_test FROM STDIN DELIMITER ',';
|
COPY test1.copy_test FROM STDIN DELIMITER ',';
|
||||||
ERROR: cannot INSERT into frozen chunk "_hyper_9_18_chunk"
|
ERROR: cannot INSERT into frozen chunk "_hyper_12_23_chunk"
|
||||||
\set ON_ERROR_STOP 1
|
\set ON_ERROR_STOP 1
|
||||||
-- Count existing rows
|
-- Count existing rows
|
||||||
SELECT COUNT(*) FROM test1.copy_test;
|
SELECT COUNT(*) FROM test1.copy_test;
|
||||||
@ -1225,9 +1306,9 @@ SELECT _timescaledb_functions.unfreeze_chunk( :'COPY_CHNAME');
|
|||||||
-- Check state
|
-- Check state
|
||||||
SELECT table_name, status
|
SELECT table_name, status
|
||||||
FROM _timescaledb_catalog.chunk WHERE table_name = :'COPY_CHUNK_NAME';
|
FROM _timescaledb_catalog.chunk WHERE table_name = :'COPY_CHUNK_NAME';
|
||||||
table_name | status
|
table_name | status
|
||||||
-------------------+--------
|
--------------------+--------
|
||||||
_hyper_9_18_chunk | 0
|
_hyper_12_23_chunk | 0
|
||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
-- Copy should work now
|
-- Copy should work now
|
||||||
@ -1330,12 +1411,12 @@ WHERE ht.table_name LIKE 'osm%'
|
|||||||
ORDER BY 2,3;
|
ORDER BY 2,3;
|
||||||
table_name | id | dimension_id | range_start | range_end
|
table_name | id | dimension_id | range_start | range_end
|
||||||
------------+----+--------------+---------------------+---------------------
|
------------+----+--------------+---------------------+---------------------
|
||||||
osm_int2 | 17 | 8 | 9223372036854775806 | 9223372036854775807
|
osm_int2 | 22 | 11 | 9223372036854775806 | 9223372036854775807
|
||||||
osm_int4 | 18 | 9 | 9223372036854775806 | 9223372036854775807
|
osm_int4 | 23 | 12 | 9223372036854775806 | 9223372036854775807
|
||||||
osm_int8 | 19 | 10 | 9223372036854775806 | 9223372036854775807
|
osm_int8 | 24 | 13 | 9223372036854775806 | 9223372036854775807
|
||||||
osm_date | 20 | 11 | 9223372036854775806 | 9223372036854775807
|
osm_date | 25 | 14 | 9223372036854775806 | 9223372036854775807
|
||||||
osm_ts | 21 | 12 | 9223372036854775806 | 9223372036854775807
|
osm_ts | 26 | 15 | 9223372036854775806 | 9223372036854775807
|
||||||
osm_tstz | 22 | 13 | 9223372036854775806 | 9223372036854775807
|
osm_tstz | 27 | 16 | 9223372036854775806 | 9223372036854775807
|
||||||
(6 rows)
|
(6 rows)
|
||||||
|
|
||||||
-- test that correct slice is found and updated for table with multiple chunk constraints
|
-- test that correct slice is found and updated for table with multiple chunk constraints
|
||||||
@ -1348,8 +1429,8 @@ _timescaledb_catalog.chunk c, _timescaledb_catalog.chunk_constraint cc WHERE c.h
|
|||||||
AND c.id = cc.chunk_id;
|
AND c.id = cc.chunk_id;
|
||||||
id | hypertable_id | schema_name | table_name | compressed_chunk_id | dropped | status | osm_chunk | chunk_id | dimension_slice_id | constraint_name | hypertable_constraint_name
|
id | hypertable_id | schema_name | table_name | compressed_chunk_id | dropped | status | osm_chunk | chunk_id | dimension_slice_id | constraint_name | hypertable_constraint_name
|
||||||
----+---------------+-----------------------+--------------------+---------------------+---------+--------+-----------+----------+--------------------+-----------------------------+----------------------------
|
----+---------------+-----------------------+--------------------+---------------------+---------+--------+-----------+----------+--------------------+-----------------------------+----------------------------
|
||||||
26 | 16 | _timescaledb_internal | _hyper_16_26_chunk | | f | 0 | f | 26 | | 26_5_test_multicon_time_key | test_multicon_time_key
|
31 | 19 | _timescaledb_internal | _hyper_19_31_chunk | | f | 0 | f | 31 | | 31_5_test_multicon_time_key | test_multicon_time_key
|
||||||
26 | 16 | _timescaledb_internal | _hyper_16_26_chunk | | f | 0 | f | 26 | 23 | constraint_23 |
|
31 | 19 | _timescaledb_internal | _hyper_19_31_chunk | | f | 0 | f | 31 | 28 | constraint_28 |
|
||||||
(2 rows)
|
(2 rows)
|
||||||
|
|
||||||
\c :TEST_DBNAME :ROLE_SUPERUSER ;
|
\c :TEST_DBNAME :ROLE_SUPERUSER ;
|
||||||
@ -1367,7 +1448,7 @@ FROM _timescaledb_catalog.chunk c, _timescaledb_catalog.chunk_constraint cc, _ti
|
|||||||
WHERE c.hypertable_id = :htid AND cc.chunk_id = c.id AND ds.id = cc.dimension_slice_id;
|
WHERE c.hypertable_id = :htid AND cc.chunk_id = c.id AND ds.id = cc.dimension_slice_id;
|
||||||
chunk_id | table_name | status | osm_chunk | dimension_slice_id | range_start | range_end
|
chunk_id | table_name | status | osm_chunk | dimension_slice_id | range_start | range_end
|
||||||
----------+--------------------+--------+-----------+--------------------+------------------+------------------
|
----------+--------------------+--------+-----------+--------------------+------------------+------------------
|
||||||
26 | _hyper_16_26_chunk | 0 | t | 23 | 1577955600000000 | 1578128400000000
|
31 | _hyper_19_31_chunk | 0 | t | 28 | 1577955600000000 | 1578128400000000
|
||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
-- check that range was reset to default - infinity
|
-- check that range was reset to default - infinity
|
||||||
@ -1395,7 +1476,7 @@ FROM _timescaledb_catalog.chunk c, _timescaledb_catalog.chunk_constraint cc, _ti
|
|||||||
WHERE c.hypertable_id = :htid AND cc.chunk_id = c.id AND ds.id = cc.dimension_slice_id ORDER BY cc.chunk_id;
|
WHERE c.hypertable_id = :htid AND cc.chunk_id = c.id AND ds.id = cc.dimension_slice_id ORDER BY cc.chunk_id;
|
||||||
chunk_id | table_name | status | osm_chunk | dimension_slice_id | range_start | range_end
|
chunk_id | table_name | status | osm_chunk | dimension_slice_id | range_start | range_end
|
||||||
----------+--------------------+--------+-----------+--------------------+---------------------+---------------------
|
----------+--------------------+--------+-----------+--------------------+---------------------+---------------------
|
||||||
26 | _hyper_16_26_chunk | 0 | t | 23 | 9223372036854775806 | 9223372036854775807
|
31 | _hyper_19_31_chunk | 0 | t | 28 | 9223372036854775806 | 9223372036854775807
|
||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
-- TEST for orderedappend that depends on hypertable_osm_range_update functionality
|
-- TEST for orderedappend that depends on hypertable_osm_range_update functionality
|
||||||
@ -1420,9 +1501,9 @@ FROM _timescaledb_catalog.chunk c, _timescaledb_catalog.chunk_constraint cc, _ti
|
|||||||
WHERE c.hypertable_id = :htid AND cc.chunk_id = c.id AND ds.id = cc.dimension_slice_id ORDER BY cc.chunk_id;
|
WHERE c.hypertable_id = :htid AND cc.chunk_id = c.id AND ds.id = cc.dimension_slice_id ORDER BY cc.chunk_id;
|
||||||
chunk_id | table_name | status | osm_chunk | dimension_slice_id | range_start | range_end
|
chunk_id | table_name | status | osm_chunk | dimension_slice_id | range_start | range_end
|
||||||
----------+-------------------------+--------+-----------+--------------------+---------------------+---------------------
|
----------+-------------------------+--------+-----------+--------------------+---------------------+---------------------
|
||||||
27 | _hyper_17_27_chunk | 0 | f | 24 | 1577836800000000 | 1577923200000000
|
32 | _hyper_20_32_chunk | 0 | f | 29 | 1577836800000000 | 1577923200000000
|
||||||
28 | _hyper_17_28_chunk | 0 | f | 25 | 1577923200000000 | 1578009600000000
|
33 | _hyper_20_33_chunk | 0 | f | 30 | 1577923200000000 | 1578009600000000
|
||||||
29 | test_chunkapp_fdw_child | 0 | t | 26 | 9223372036854775806 | 9223372036854775807
|
34 | test_chunkapp_fdw_child | 0 | t | 31 | 9223372036854775806 | 9223372036854775807
|
||||||
(3 rows)
|
(3 rows)
|
||||||
|
|
||||||
-- attempt to update overlapping range, should fail
|
-- attempt to update overlapping range, should fail
|
||||||
@ -1443,9 +1524,9 @@ FROM _timescaledb_catalog.chunk c, _timescaledb_catalog.chunk_constraint cc, _ti
|
|||||||
WHERE c.hypertable_id = :htid AND cc.chunk_id = c.id AND ds.id = cc.dimension_slice_id ORDER BY cc.chunk_id;
|
WHERE c.hypertable_id = :htid AND cc.chunk_id = c.id AND ds.id = cc.dimension_slice_id ORDER BY cc.chunk_id;
|
||||||
chunk_id | table_name | status | osm_chunk | dimension_slice_id | range_start | range_end
|
chunk_id | table_name | status | osm_chunk | dimension_slice_id | range_start | range_end
|
||||||
----------+-------------------------+--------+-----------+--------------------+------------------+------------------
|
----------+-------------------------+--------+-----------+--------------------+------------------+------------------
|
||||||
27 | _hyper_17_27_chunk | 0 | f | 24 | 1577836800000000 | 1577923200000000
|
32 | _hyper_20_32_chunk | 0 | f | 29 | 1577836800000000 | 1577923200000000
|
||||||
28 | _hyper_17_28_chunk | 0 | f | 25 | 1577923200000000 | 1578009600000000
|
33 | _hyper_20_33_chunk | 0 | f | 30 | 1577923200000000 | 1578009600000000
|
||||||
29 | test_chunkapp_fdw_child | 0 | t | 26 | 1578038400000000 | 1578124800000000
|
34 | test_chunkapp_fdw_child | 0 | t | 31 | 1578038400000000 | 1578124800000000
|
||||||
(3 rows)
|
(3 rows)
|
||||||
|
|
||||||
-- ordered append should be possible as ranges do not overlap
|
-- ordered append should be possible as ranges do not overlap
|
||||||
@ -1454,8 +1535,8 @@ WHERE c.hypertable_id = :htid AND cc.chunk_id = c.id AND ds.id = cc.dimension_sl
|
|||||||
-------------------------------------------------------------------------------------------------
|
-------------------------------------------------------------------------------------------------
|
||||||
Custom Scan (ChunkAppend) on test_chunkapp
|
Custom Scan (ChunkAppend) on test_chunkapp
|
||||||
Order: test_chunkapp."time"
|
Order: test_chunkapp."time"
|
||||||
-> Index Scan Backward using _hyper_17_27_chunk_test_chunkapp_time_idx on _hyper_17_27_chunk
|
-> Index Scan Backward using _hyper_20_32_chunk_test_chunkapp_time_idx on _hyper_20_32_chunk
|
||||||
-> Index Scan Backward using _hyper_17_28_chunk_test_chunkapp_time_idx on _hyper_17_28_chunk
|
-> Index Scan Backward using _hyper_20_33_chunk_test_chunkapp_time_idx on _hyper_20_33_chunk
|
||||||
-> Foreign Scan on test_chunkapp_fdw_child
|
-> Foreign Scan on test_chunkapp_fdw_child
|
||||||
(5 rows)
|
(5 rows)
|
||||||
|
|
||||||
@ -1496,9 +1577,9 @@ SELECT _timescaledb_functions.hypertable_osm_range_update('test_chunkapp',empty:
|
|||||||
QUERY PLAN
|
QUERY PLAN
|
||||||
-------------------------------------------------------------------------------------------------
|
-------------------------------------------------------------------------------------------------
|
||||||
Merge Append
|
Merge Append
|
||||||
Sort Key: _hyper_17_27_chunk."time"
|
Sort Key: _hyper_20_32_chunk."time"
|
||||||
-> Index Scan Backward using _hyper_17_27_chunk_test_chunkapp_time_idx on _hyper_17_27_chunk
|
-> Index Scan Backward using _hyper_20_32_chunk_test_chunkapp_time_idx on _hyper_20_32_chunk
|
||||||
-> Index Scan Backward using _hyper_17_28_chunk_test_chunkapp_time_idx on _hyper_17_28_chunk
|
-> Index Scan Backward using _hyper_20_33_chunk_test_chunkapp_time_idx on _hyper_20_33_chunk
|
||||||
-> Foreign Scan on test_chunkapp_fdw_child
|
-> Foreign Scan on test_chunkapp_fdw_child
|
||||||
(5 rows)
|
(5 rows)
|
||||||
|
|
||||||
@ -1515,9 +1596,9 @@ FROM _timescaledb_catalog.chunk c, _timescaledb_catalog.chunk_constraint cc, _ti
|
|||||||
WHERE c.hypertable_id = :htid AND cc.chunk_id = c.id AND ds.id = cc.dimension_slice_id ORDER BY cc.chunk_id;
|
WHERE c.hypertable_id = :htid AND cc.chunk_id = c.id AND ds.id = cc.dimension_slice_id ORDER BY cc.chunk_id;
|
||||||
chunk_id | table_name | status | osm_chunk | dimension_slice_id | range_start | range_end
|
chunk_id | table_name | status | osm_chunk | dimension_slice_id | range_start | range_end
|
||||||
----------+-------------------------+--------+-----------+--------------------+---------------------+---------------------
|
----------+-------------------------+--------+-----------+--------------------+---------------------+---------------------
|
||||||
27 | _hyper_17_27_chunk | 0 | f | 24 | 1577836800000000 | 1577923200000000
|
32 | _hyper_20_32_chunk | 0 | f | 29 | 1577836800000000 | 1577923200000000
|
||||||
28 | _hyper_17_28_chunk | 0 | f | 25 | 1577923200000000 | 1578009600000000
|
33 | _hyper_20_33_chunk | 0 | f | 30 | 1577923200000000 | 1578009600000000
|
||||||
29 | test_chunkapp_fdw_child | 0 | t | 26 | 9223372036854775806 | 9223372036854775807
|
34 | test_chunkapp_fdw_child | 0 | t | 31 | 9223372036854775806 | 9223372036854775807
|
||||||
(3 rows)
|
(3 rows)
|
||||||
|
|
||||||
-- but also, OSM chunk should be included in the scan, since range is invalid and chunk is not empty
|
-- but also, OSM chunk should be included in the scan, since range is invalid and chunk is not empty
|
||||||
@ -1525,10 +1606,10 @@ WHERE c.hypertable_id = :htid AND cc.chunk_id = c.id AND ds.id = cc.dimension_sl
|
|||||||
QUERY PLAN
|
QUERY PLAN
|
||||||
-------------------------------------------------------------------------------------------------
|
-------------------------------------------------------------------------------------------------
|
||||||
Merge Append
|
Merge Append
|
||||||
Sort Key: _hyper_17_27_chunk."time"
|
Sort Key: _hyper_20_32_chunk."time"
|
||||||
-> Index Scan Backward using _hyper_17_27_chunk_test_chunkapp_time_idx on _hyper_17_27_chunk
|
-> Index Scan Backward using _hyper_20_32_chunk_test_chunkapp_time_idx on _hyper_20_32_chunk
|
||||||
Index Cond: ("time" < 'Sun Jan 01 00:00:00 2023 PST'::timestamp with time zone)
|
Index Cond: ("time" < 'Sun Jan 01 00:00:00 2023 PST'::timestamp with time zone)
|
||||||
-> Index Scan Backward using _hyper_17_28_chunk_test_chunkapp_time_idx on _hyper_17_28_chunk
|
-> Index Scan Backward using _hyper_20_33_chunk_test_chunkapp_time_idx on _hyper_20_33_chunk
|
||||||
Index Cond: ("time" < 'Sun Jan 01 00:00:00 2023 PST'::timestamp with time zone)
|
Index Cond: ("time" < 'Sun Jan 01 00:00:00 2023 PST'::timestamp with time zone)
|
||||||
-> Foreign Scan on test_chunkapp_fdw_child
|
-> Foreign Scan on test_chunkapp_fdw_child
|
||||||
(7 rows)
|
(7 rows)
|
||||||
@ -1556,8 +1637,8 @@ SELECT _timescaledb_functions.hypertable_osm_range_update('test_chunkapp', NULL:
|
|||||||
-------------------------------------------------------------------------------------------------
|
-------------------------------------------------------------------------------------------------
|
||||||
Custom Scan (ChunkAppend) on test_chunkapp
|
Custom Scan (ChunkAppend) on test_chunkapp
|
||||||
Order: test_chunkapp."time"
|
Order: test_chunkapp."time"
|
||||||
-> Index Scan Backward using _hyper_17_27_chunk_test_chunkapp_time_idx on _hyper_17_27_chunk
|
-> Index Scan Backward using _hyper_20_32_chunk_test_chunkapp_time_idx on _hyper_20_32_chunk
|
||||||
-> Index Scan Backward using _hyper_17_28_chunk_test_chunkapp_time_idx on _hyper_17_28_chunk
|
-> Index Scan Backward using _hyper_20_33_chunk_test_chunkapp_time_idx on _hyper_20_33_chunk
|
||||||
-> Foreign Scan on test_chunkapp_fdw_child
|
-> Foreign Scan on test_chunkapp_fdw_child
|
||||||
(5 rows)
|
(5 rows)
|
||||||
|
|
||||||
@ -1574,9 +1655,9 @@ SELECT * FROM test_chunkapp ORDER BY 1;
|
|||||||
-------------------------------------------------------------------------------------------------
|
-------------------------------------------------------------------------------------------------
|
||||||
Custom Scan (ChunkAppend) on test_chunkapp
|
Custom Scan (ChunkAppend) on test_chunkapp
|
||||||
Order: test_chunkapp."time"
|
Order: test_chunkapp."time"
|
||||||
-> Index Scan Backward using _hyper_17_27_chunk_test_chunkapp_time_idx on _hyper_17_27_chunk
|
-> Index Scan Backward using _hyper_20_32_chunk_test_chunkapp_time_idx on _hyper_20_32_chunk
|
||||||
Index Cond: ("time" < 'Sun Jan 01 00:00:00 2023 PST'::timestamp with time zone)
|
Index Cond: ("time" < 'Sun Jan 01 00:00:00 2023 PST'::timestamp with time zone)
|
||||||
-> Index Scan Backward using _hyper_17_28_chunk_test_chunkapp_time_idx on _hyper_17_28_chunk
|
-> Index Scan Backward using _hyper_20_33_chunk_test_chunkapp_time_idx on _hyper_20_33_chunk
|
||||||
Index Cond: ("time" < 'Sun Jan 01 00:00:00 2023 PST'::timestamp with time zone)
|
Index Cond: ("time" < 'Sun Jan 01 00:00:00 2023 PST'::timestamp with time zone)
|
||||||
(6 rows)
|
(6 rows)
|
||||||
|
|
||||||
@ -1613,7 +1694,7 @@ CREATE TABLE test2(time timestamptz not null, a int);
|
|||||||
SELECT create_hypertable('test2', 'time');
|
SELECT create_hypertable('test2', 'time');
|
||||||
create_hypertable
|
create_hypertable
|
||||||
---------------------
|
---------------------
|
||||||
(18,public,test2,t)
|
(21,public,test2,t)
|
||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
INSERT INTO test2 VALUES ('2020-01-01'::timestamptz, 1);
|
INSERT INTO test2 VALUES ('2020-01-01'::timestamptz, 1);
|
||||||
@ -1624,7 +1705,7 @@ psql:include/chunk_utils_internal_orderedappend.sql:138: NOTICE: default order
|
|||||||
SELECT compress_chunk(show_chunks('test2'));
|
SELECT compress_chunk(show_chunks('test2'));
|
||||||
compress_chunk
|
compress_chunk
|
||||||
------------------------------------------
|
------------------------------------------
|
||||||
_timescaledb_internal._hyper_18_30_chunk
|
_timescaledb_internal._hyper_21_35_chunk
|
||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
-- find internal compression table, call API function on it
|
-- find internal compression table, call API function on it
|
||||||
@ -1633,7 +1714,7 @@ FROM _timescaledb_catalog.hypertable ht, _timescaledb_catalog.hypertable cht
|
|||||||
WHERE ht.table_name = 'test2' and cht.id = ht.compressed_hypertable_id \gset
|
WHERE ht.table_name = 'test2' and cht.id = ht.compressed_hypertable_id \gset
|
||||||
\set ON_ERROR_STOP 0
|
\set ON_ERROR_STOP 0
|
||||||
SELECT _timescaledb_functions.hypertable_osm_range_update(:'COMPRESSION_TBLNM'::regclass, '2020-01-01'::timestamptz);
|
SELECT _timescaledb_functions.hypertable_osm_range_update(:'COMPRESSION_TBLNM'::regclass, '2020-01-01'::timestamptz);
|
||||||
psql:include/chunk_utils_internal_orderedappend.sql:145: ERROR: could not find time dimension for hypertable _timescaledb_internal._compressed_hypertable_19
|
psql:include/chunk_utils_internal_orderedappend.sql:145: ERROR: could not find time dimension for hypertable _timescaledb_internal._compressed_hypertable_22
|
||||||
\set ON_ERROR_STOP 1
|
\set ON_ERROR_STOP 1
|
||||||
-- test wrong/incompatible data types with hypertable time dimension
|
-- test wrong/incompatible data types with hypertable time dimension
|
||||||
-- update range of int2 with int4
|
-- update range of int2 with int4
|
||||||
|
@ -213,7 +213,7 @@ ORDER BY pronamespace::regnamespace::text COLLATE "C", p.oid::regprocedure::text
|
|||||||
ts_now_mock()
|
ts_now_mock()
|
||||||
add_columnstore_policy(regclass,"any",boolean,interval,timestamp with time zone,text,interval,boolean)
|
add_columnstore_policy(regclass,"any",boolean,interval,timestamp with time zone,text,interval,boolean)
|
||||||
add_compression_policy(regclass,"any",boolean,interval,timestamp with time zone,text,interval,boolean)
|
add_compression_policy(regclass,"any",boolean,interval,timestamp with time zone,text,interval,boolean)
|
||||||
add_continuous_aggregate_policy(regclass,"any","any",interval,boolean,timestamp with time zone,text)
|
add_continuous_aggregate_policy(regclass,"any","any",interval,boolean,timestamp with time zone,text,boolean)
|
||||||
add_dimension(regclass,_timescaledb_internal.dimension_info,boolean)
|
add_dimension(regclass,_timescaledb_internal.dimension_info,boolean)
|
||||||
add_dimension(regclass,name,integer,anyelement,regproc,boolean)
|
add_dimension(regclass,name,integer,anyelement,regproc,boolean)
|
||||||
add_job(regproc,interval,jsonb,timestamp with time zone,boolean,regproc,boolean,text)
|
add_job(regproc,interval,jsonb,timestamp with time zone,boolean,regproc,boolean,text)
|
||||||
|
@ -440,6 +440,54 @@ CALL refresh_continuous_aggregate('ht_try_weekly', '2019-12-29', '2020-01-10', f
|
|||||||
SELECT * FROM ht_try_weekly;
|
SELECT * FROM ht_try_weekly;
|
||||||
DROP MATERIALIZED VIEW ht_try_weekly;
|
DROP MATERIALIZED VIEW ht_try_weekly;
|
||||||
|
|
||||||
|
-- Test refresh policy with different settings of `include_tiered_data` parameter
|
||||||
|
CREATE FUNCTION create_test_cagg(include_tiered_data BOOL)
|
||||||
|
RETURNS INTEGER AS
|
||||||
|
$$
|
||||||
|
DECLARE
|
||||||
|
cfg jsonb;
|
||||||
|
job_id INTEGER;
|
||||||
|
BEGIN
|
||||||
|
CREATE MATERIALIZED VIEW ht_try_weekly
|
||||||
|
WITH (timescaledb.continuous) AS
|
||||||
|
SELECT time_bucket(interval '1 week', timec) AS ts_bucket, avg(value)
|
||||||
|
FROM ht_try
|
||||||
|
GROUP BY 1
|
||||||
|
WITH NO DATA;
|
||||||
|
|
||||||
|
job_id := add_continuous_aggregate_policy(
|
||||||
|
'ht_try_weekly',
|
||||||
|
start_offset => NULL,
|
||||||
|
end_offset => INTERVAL '1 hour',
|
||||||
|
schedule_interval => INTERVAL '1 hour',
|
||||||
|
include_tiered_data => include_tiered_data
|
||||||
|
);
|
||||||
|
|
||||||
|
cfg := config FROM _timescaledb_config.bgw_job WHERE id = job_id;
|
||||||
|
RAISE NOTICE 'config: %', jsonb_pretty(cfg);
|
||||||
|
|
||||||
|
RETURN job_id;
|
||||||
|
END
|
||||||
|
$$ LANGUAGE plpgsql;
|
||||||
|
|
||||||
|
-- include tiered data
|
||||||
|
SELECT create_test_cagg(true) AS job_id \gset
|
||||||
|
CALL run_job(:job_id);
|
||||||
|
SELECT * FROM ht_try_weekly ORDER BY 1;
|
||||||
|
DROP MATERIALIZED VIEW ht_try_weekly;
|
||||||
|
|
||||||
|
-- exclude tiered data
|
||||||
|
SELECT create_test_cagg(false) AS job_id \gset
|
||||||
|
CALL run_job(:job_id);
|
||||||
|
SELECT * FROM ht_try_weekly ORDER BY 1;
|
||||||
|
DROP MATERIALIZED VIEW ht_try_weekly;
|
||||||
|
|
||||||
|
-- default behavior: use instance-wide GUC value
|
||||||
|
SELECT create_test_cagg(null) AS job_id \gset
|
||||||
|
CALL run_job(:job_id);
|
||||||
|
SELECT * FROM ht_try_weekly ORDER BY 1;
|
||||||
|
DROP MATERIALIZED VIEW ht_try_weekly;
|
||||||
|
|
||||||
-- This test verifies that a bugfix regarding the way `ROWID_VAR`s are adjusted
|
-- This test verifies that a bugfix regarding the way `ROWID_VAR`s are adjusted
|
||||||
-- in the chunks' targetlists on DELETE/UPDATE works (including partially
|
-- in the chunks' targetlists on DELETE/UPDATE works (including partially
|
||||||
-- compressed chunks)
|
-- compressed chunks)
|
||||||
|
Loading…
x
Reference in New Issue
Block a user