mirror of
https://github.com/timescale/timescaledb.git
synced 2025-05-18 19:59:48 +08:00
Use plpgsql procedure for executing compression policy
This PR removes the C code that executes the compression policy. Instead we use a PL/pgSQL procedure to execute the policy. PG13.4 and PG12.8 introduced some changes that require PortalContexts while executing transactions. The compression policy procedure compresses chunks in multiple transactions. We have seen some issues with snapshots and portal management in the policy code (due to the PG13.4 code changes). SPI API has transaction-portal management code. However, the compression policy code does not use SPI interfaces. But it is fairly easy to just convert this into a PL/pgSQL procedure (which calls SPI) rather than replicating portal managment code in C to manage multiple txns in the compression policy. This PR also disallows decompress_chunk, compress_chunk and recompress_chunk in txn read only mode. Fixes #3656
This commit is contained in:
parent
c55cbb9350
commit
fffd6c2350
sql
src
tsl
@ -10,10 +10,6 @@ CREATE OR REPLACE PROCEDURE _timescaledb_internal.policy_reorder(job_id INTEGER,
|
||||
AS '@MODULE_PATHNAME@', 'ts_policy_reorder_proc'
|
||||
LANGUAGE C;
|
||||
|
||||
CREATE OR REPLACE PROCEDURE _timescaledb_internal.policy_compression(job_id INTEGER, config JSONB)
|
||||
AS '@MODULE_PATHNAME@', 'ts_policy_compression_proc'
|
||||
LANGUAGE C;
|
||||
|
||||
CREATE OR REPLACE PROCEDURE _timescaledb_internal.policy_recompression(job_id INTEGER, config JSONB)
|
||||
AS '@MODULE_PATHNAME@', 'ts_policy_recompression_proc'
|
||||
LANGUAGE C;
|
||||
@ -21,3 +17,165 @@ LANGUAGE C;
|
||||
CREATE OR REPLACE PROCEDURE _timescaledb_internal.policy_refresh_continuous_aggregate(job_id INTEGER, config JSONB)
|
||||
AS '@MODULE_PATHNAME@', 'ts_policy_refresh_cagg_proc'
|
||||
LANGUAGE C;
|
||||
|
||||
CREATE OR REPLACE PROCEDURE
|
||||
_timescaledb_internal.policy_compression_interval( job_id INTEGER,
|
||||
htid INTEGER,
|
||||
lag INTERVAL,
|
||||
maxchunks INTEGER,
|
||||
verbose_log BOOLEAN,
|
||||
recompress_enabled BOOLEAN)
|
||||
AS $$
|
||||
DECLARE
|
||||
htoid regclass;
|
||||
chunk_rec record;
|
||||
numchunks integer := 1;
|
||||
BEGIN
|
||||
|
||||
SELECT format('%I.%I',schema_name, table_name) INTO htoid
|
||||
FROM _timescaledb_catalog.hypertable
|
||||
WHERE id = htid;
|
||||
|
||||
FOR chunk_rec IN
|
||||
SELECT show.oid, ch.schema_name, ch.table_name, ch.status
|
||||
FROM show_chunks( htoid, older_than => lag) as show(oid)
|
||||
INNER JOIN pg_class pgc ON pgc.oid = show.oid
|
||||
INNER JOIN pg_namespace pgns ON pgc.relnamespace = pgns.oid
|
||||
INNER JOIN _timescaledb_catalog.chunk ch ON ch.table_name = pgc.relname and ch.schema_name = pgns.nspname and ch.hypertable_id = htid
|
||||
WHERE ch.dropped is false and (ch.status = 0 OR ch.status = 3)
|
||||
LOOP
|
||||
IF chunk_rec.status = 0 THEN
|
||||
PERFORM compress_chunk( chunk_rec.oid );
|
||||
ELSIF chunk_rec.status = 3 AND recompress_enabled = 'true' THEN
|
||||
PERFORM recompress_chunk( chunk_rec.oid );
|
||||
END IF;
|
||||
COMMIT;
|
||||
IF verbose_log THEN
|
||||
RAISE LOG 'job % completed processing chunk %.%', job_id, chunk_rec.schema_name, chunk_rec.table_name;
|
||||
END IF;
|
||||
numchunks := numchunks + 1;
|
||||
IF maxchunks > 0 AND numchunks >= maxchunks THEN
|
||||
EXIT;
|
||||
END IF;
|
||||
END LOOP;
|
||||
END;
|
||||
$$ LANGUAGE PLPGSQL;
|
||||
|
||||
CREATE OR REPLACE PROCEDURE
|
||||
_timescaledb_internal.policy_compression_integer( job_id INTEGER,
|
||||
htid INTEGER,
|
||||
lag BIGINT,
|
||||
maxchunks INTEGER,
|
||||
verbose_log BOOLEAN,
|
||||
recompress_enabled BOOLEAN)
|
||||
AS $$
|
||||
DECLARE
|
||||
htoid regclass;
|
||||
chunk_rec record;
|
||||
numchunks integer := 0;
|
||||
lag_integer BIGINT;
|
||||
BEGIN
|
||||
|
||||
SELECT format('%I.%I',schema_name, table_name) INTO htoid
|
||||
FROM _timescaledb_catalog.hypertable
|
||||
WHERE id = htid;
|
||||
|
||||
--for the integer case , we have to compute the lag w.r.t
|
||||
-- the integer_now function and then pass on to show_chunks
|
||||
lag_integer := _timescaledb_internal.subtract_integer_from_now( htoid, lag);
|
||||
|
||||
FOR chunk_rec IN
|
||||
SELECT show.oid, ch.schema_name, ch.table_name, ch.status
|
||||
FROM show_chunks( htoid, older_than => lag_integer) SHOW (oid)
|
||||
INNER JOIN pg_class pgc ON pgc.oid = show.oid
|
||||
INNER JOIN pg_namespace pgns ON pgc.relnamespace = pgns.oid
|
||||
INNER JOIN _timescaledb_catalog.chunk ch ON ch.table_name = pgc.relname and ch.schema_name = pgns.nspname and ch.hypertable_id = htid
|
||||
WHERE ch.dropped is false and (ch.status = 0 OR ch.status = 3)
|
||||
LOOP
|
||||
IF chunk_rec.status = 0 THEN
|
||||
PERFORM compress_chunk( chunk_rec.oid );
|
||||
ELSIF chunk_rec.status = 3 AND recompress_enabled = 'true' THEN
|
||||
PERFORM recompress_chunk( chunk_rec.oid );
|
||||
END IF;
|
||||
COMMIT;
|
||||
IF verbose_log THEN
|
||||
RAISE LOG 'job % completed processing chunk %.%', job_id, chunk_rec.schema_name, chunk_rec.table_name;
|
||||
END IF;
|
||||
|
||||
numchunks := numchunks + 1;
|
||||
IF maxchunks > 0 AND numchunks >= maxchunks THEN
|
||||
EXIT;
|
||||
END IF;
|
||||
END LOOP;
|
||||
END;
|
||||
$$ LANGUAGE PLPGSQL;
|
||||
|
||||
CREATE OR REPLACE PROCEDURE
|
||||
_timescaledb_internal.policy_compression( job_id INTEGER, config JSONB)
|
||||
AS $$
|
||||
DECLARE
|
||||
dimtype regtype;
|
||||
compress_after text;
|
||||
lag_interval interval;
|
||||
lag_integer bigint;
|
||||
htid integer;
|
||||
htoid regclass;
|
||||
chunk_rec record;
|
||||
verbose_log bool;
|
||||
maxchunks integer := 0;
|
||||
numchunks integer := 1;
|
||||
recompress_enabled bool;
|
||||
BEGIN
|
||||
IF config IS NULL THEN
|
||||
RAISE EXCEPTION 'job % has null config', job_id;
|
||||
END IF;
|
||||
|
||||
htid := jsonb_object_field_text (config, 'hypertable_id')::integer;
|
||||
IF htid is NULL THEN
|
||||
RAISE EXCEPTION 'job % config must have hypertable_id', job_id;
|
||||
END IF;
|
||||
|
||||
verbose_log := jsonb_object_field_text (config, 'verbose_log')::boolean;
|
||||
IF verbose_log is NULL THEN
|
||||
verbose_log = 'false';
|
||||
END IF;
|
||||
|
||||
maxchunks := jsonb_object_field_text (config, 'maxchunks_to_compress')::integer;
|
||||
IF maxchunks IS NULL THEN
|
||||
maxchunks = 0;
|
||||
END IF;
|
||||
|
||||
recompress_enabled := jsonb_object_field_text (config, 'recompress')::boolean;
|
||||
IF recompress_enabled IS NULL THEN
|
||||
recompress_enabled = 'true';
|
||||
END IF;
|
||||
|
||||
compress_after := jsonb_object_field_text(config, 'compress_after');
|
||||
IF compress_after IS NULL THEN
|
||||
RAISE EXCEPTION 'job % config must have compress_after', job_id;
|
||||
END IF;
|
||||
|
||||
-- find primary dimension type --
|
||||
SELECT column_type INTO STRICT dimtype
|
||||
FROM ( SELECT ht.schema_name, ht.table_name, dim.column_name, dim.column_type,
|
||||
row_number() over(partition by ht.id order by dim.id) as rn
|
||||
FROM _timescaledb_catalog.hypertable ht ,
|
||||
_timescaledb_catalog.dimension dim
|
||||
WHERE ht.id = dim.hypertable_id and ht.id = htid ) q
|
||||
WHERE rn = 1;
|
||||
|
||||
CASE WHEN (dimtype = 'TIMESTAMP'::regtype
|
||||
OR dimtype = 'TIMESTAMPTZ'::regtype
|
||||
OR dimtype = 'DATE'::regtype) THEN
|
||||
lag_interval := jsonb_object_field_text(config, 'compress_after')::interval ;
|
||||
CALL _timescaledb_internal.policy_compression_interval(
|
||||
job_id, htid, lag_interval,
|
||||
maxchunks, verbose_log, recompress_enabled);
|
||||
ELSE
|
||||
lag_integer := jsonb_object_field_text(config, 'compress_after')::bigint;
|
||||
CALL _timescaledb_internal.policy_compression_integer(
|
||||
job_id, htid, lag_integer,
|
||||
maxchunks, verbose_log, recompress_enabled );
|
||||
END CASE;
|
||||
END;
|
||||
$$ LANGUAGE PLPGSQL;
|
||||
|
@ -1,2 +1,5 @@
|
||||
DROP FUNCTION IF EXISTS _timescaledb_internal.time_col_name_for_chunk(name,name);
|
||||
DROP FUNCTION IF EXISTS _timescaledb_internal.time_col_type_for_chunk(name,name);
|
||||
|
||||
CREATE OR REPLACE FUNCTION _timescaledb_internal.subtract_integer_from_now( hypertable_relid REGCLASS, lag INT8 )
|
||||
RETURNS INT8 AS '@MODULE_PATHNAME@', 'ts_subtract_integer_from_now' LANGUAGE C STABLE STRICT;
|
||||
|
@ -1,2 +1,9 @@
|
||||
DROP FUNCTION IF EXISTS timescaledb_experimental.time_bucket_ng(bucket_width INTERVAL, ts TIMESTAMPTZ, timezone TEXT);
|
||||
DROP FUNCTION IF EXISTS timescaledb_experimental.time_bucket_ng(bucket_width INTERVAL, ts TIMESTAMPTZ, origin TIMESTAMPTZ, timezone TEXT);
|
||||
|
||||
DROP FUNCTION IF EXISTS _timescaledb_internal.subtract_integer_from_now;
|
||||
|
||||
--changes for compression policy ---
|
||||
DROP PROCEDURE IF EXISTS _timescaledb_internal.policy_compression;
|
||||
DROP PROCEDURE IF EXISTS _timescaledb_internal.policy_compression_interval;
|
||||
DROP PROCEDURE IF EXISTS _timescaledb_internal.policy_compression_integer;
|
||||
|
@ -22,7 +22,6 @@
|
||||
|
||||
/* bgw policy functions */
|
||||
CROSSMODULE_WRAPPER(policy_compression_add);
|
||||
CROSSMODULE_WRAPPER(policy_compression_proc);
|
||||
CROSSMODULE_WRAPPER(policy_compression_remove);
|
||||
CROSSMODULE_WRAPPER(policy_recompression_proc);
|
||||
CROSSMODULE_WRAPPER(policy_refresh_cagg_add);
|
||||
@ -315,7 +314,6 @@ TSDLLEXPORT CrossModuleFunctions ts_cm_functions_default = {
|
||||
|
||||
/* bgw policies */
|
||||
.policy_compression_add = error_no_default_fn_pg_community,
|
||||
.policy_compression_proc = error_no_default_fn_pg_community,
|
||||
.policy_compression_remove = error_no_default_fn_pg_community,
|
||||
.policy_recompression_proc = error_no_default_fn_pg_community,
|
||||
.policy_refresh_cagg_add = error_no_default_fn_pg_community,
|
||||
|
@ -42,7 +42,6 @@ typedef struct CrossModuleFunctions
|
||||
void (*add_tsl_telemetry_info)(JsonbParseState **parse_state);
|
||||
|
||||
PGFunction policy_compression_add;
|
||||
PGFunction policy_compression_proc;
|
||||
PGFunction policy_compression_remove;
|
||||
PGFunction policy_recompression_proc;
|
||||
PGFunction policy_refresh_cagg_add;
|
||||
|
11
src/utils.c
11
src/utils.c
@ -825,8 +825,15 @@ ts_get_integer_now_func(const Dimension *open_dim)
|
||||
return now_func;
|
||||
}
|
||||
|
||||
/* subtract passed in interval from the now.
|
||||
* Arguments:
|
||||
* now_func : function used to compute now.
|
||||
* interval : integer value
|
||||
* Returns:
|
||||
* now_func() - interval
|
||||
*/
|
||||
int64
|
||||
subtract_integer_from_now(int64 interval, Oid time_dim_type, Oid now_func)
|
||||
ts_sub_integer_from_now(int64 interval, Oid time_dim_type, Oid now_func)
|
||||
{
|
||||
Datum now;
|
||||
int64 res;
|
||||
@ -882,7 +889,7 @@ ts_subtract_integer_from_now(PG_FUNCTION_ARGS)
|
||||
if (now_func == InvalidOid)
|
||||
elog(ERROR, "could not find valid integer_now function for hypertable");
|
||||
Assert(IS_INTEGER_TYPE(partitioning_type));
|
||||
int64 res = subtract_integer_from_now(lag, partitioning_type, now_func);
|
||||
int64 res = ts_sub_integer_from_now(lag, partitioning_type, now_func);
|
||||
ts_cache_release(hcache);
|
||||
return Int64GetDatum(res);
|
||||
}
|
||||
|
@ -81,7 +81,7 @@ extern TSDLLEXPORT Oid ts_get_cast_func(Oid source, Oid target);
|
||||
typedef struct Dimension Dimension;
|
||||
|
||||
extern TSDLLEXPORT Oid ts_get_integer_now_func(const Dimension *open_dim);
|
||||
extern TSDLLEXPORT int64 subtract_integer_from_now(int64 interval, Oid time_dim_type, Oid now_func);
|
||||
extern TSDLLEXPORT int64 ts_sub_integer_from_now(int64 interval, Oid time_dim_type, Oid now_func);
|
||||
|
||||
extern TSDLLEXPORT void *ts_create_struct_from_slot(TupleTableSlot *slot, MemoryContext mctx,
|
||||
size_t alloc_size, size_t copy_size);
|
||||
|
@ -138,19 +138,6 @@ policy_recompression_get_recompress_after_interval(const Jsonb *config)
|
||||
return interval;
|
||||
}
|
||||
|
||||
Datum
|
||||
policy_compression_proc(PG_FUNCTION_ARGS)
|
||||
{
|
||||
if (PG_NARGS() != 2 || PG_ARGISNULL(0) || PG_ARGISNULL(1))
|
||||
PG_RETURN_VOID();
|
||||
|
||||
TS_PREVENT_FUNC_IF_READ_ONLY();
|
||||
|
||||
policy_compression_execute(PG_GETARG_INT32(0), PG_GETARG_JSONB_P(1));
|
||||
|
||||
PG_RETURN_VOID();
|
||||
}
|
||||
|
||||
Datum
|
||||
policy_recompression_proc(PG_FUNCTION_ARGS)
|
||||
{
|
||||
|
@ -121,7 +121,7 @@ get_window_boundary(const Dimension *dim, const Jsonb *config, int64 (*int_gette
|
||||
|
||||
Assert(now_func);
|
||||
|
||||
res = subtract_integer_from_now(lag, partitioning_type, now_func);
|
||||
res = ts_sub_integer_from_now(lag, partitioning_type, now_func);
|
||||
return Int64GetDatum(res);
|
||||
}
|
||||
else
|
||||
@ -131,33 +131,6 @@ get_window_boundary(const Dimension *dim, const Jsonb *config, int64 (*int_gette
|
||||
}
|
||||
}
|
||||
|
||||
static List *
|
||||
get_chunk_to_compress(const Dimension *dim, const Jsonb *config)
|
||||
{
|
||||
Oid partitioning_type = ts_dimension_get_partition_type(dim);
|
||||
StrategyNumber end_strategy = BTLessStrategyNumber;
|
||||
bool recompress = policy_compression_get_recompress(config);
|
||||
/* numchunks = 0 if the config does not specify it. This means there is no
|
||||
* limit.
|
||||
*/
|
||||
int32 numchunks = policy_compression_get_maxchunks_per_job(config);
|
||||
|
||||
Datum boundary = get_window_boundary(dim,
|
||||
config,
|
||||
policy_compression_get_compress_after_int,
|
||||
policy_compression_get_compress_after_interval);
|
||||
|
||||
return ts_dimension_slice_get_chunkids_to_compress(dim->fd.id,
|
||||
InvalidStrategy, /*start_strategy*/
|
||||
-1, /*start_value*/
|
||||
end_strategy,
|
||||
ts_time_value_to_internal(boundary,
|
||||
partitioning_type),
|
||||
true,
|
||||
recompress,
|
||||
numchunks);
|
||||
}
|
||||
|
||||
static List *
|
||||
get_chunk_to_recompress(const Dimension *dim, const Jsonb *config)
|
||||
{
|
||||
@ -407,63 +380,6 @@ policy_refresh_cagg_read_and_validate_config(Jsonb *config, PolicyContinuousAggD
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Invoke compress_chunk via fmgr so that the call can be deparsed and sent to
|
||||
* remote data nodes.
|
||||
*/
|
||||
static void
|
||||
policy_invoke_compress_chunk(Chunk *chunk)
|
||||
{
|
||||
EState *estate;
|
||||
ExprContext *econtext;
|
||||
FuncExpr *fexpr;
|
||||
Oid relid = chunk->table_id;
|
||||
Oid restype;
|
||||
Oid func_oid;
|
||||
List *args = NIL;
|
||||
int i;
|
||||
bool isnull;
|
||||
Const *argarr[COMPRESS_CHUNK_NARGS] = {
|
||||
makeConst(REGCLASSOID,
|
||||
-1,
|
||||
InvalidOid,
|
||||
sizeof(relid),
|
||||
ObjectIdGetDatum(relid),
|
||||
false,
|
||||
false),
|
||||
castNode(Const, makeBoolConst(true, false)),
|
||||
};
|
||||
Oid type_id[COMPRESS_CHUNK_NARGS] = { REGCLASSOID, BOOLOID };
|
||||
char *schema_name = ts_extension_schema_name();
|
||||
List *fqn = list_make2(makeString(schema_name), makeString(COMPRESS_CHUNK_FUNCNAME));
|
||||
|
||||
StaticAssertStmt(lengthof(type_id) == lengthof(argarr),
|
||||
"argarr and type_id should have matching lengths");
|
||||
|
||||
func_oid = LookupFuncName(fqn, lengthof(type_id), type_id, false);
|
||||
Assert(func_oid); /* LookupFuncName should not return an invalid OID */
|
||||
|
||||
/* Prepare the function expr with argument list */
|
||||
get_func_result_type(func_oid, &restype, NULL);
|
||||
|
||||
for (i = 0; i < lengthof(argarr); i++)
|
||||
args = lappend(args, argarr[i]);
|
||||
|
||||
fexpr = makeFuncExpr(func_oid, restype, args, InvalidOid, InvalidOid, COERCE_EXPLICIT_CALL);
|
||||
fexpr->funcretset = false;
|
||||
|
||||
estate = CreateExecutorState();
|
||||
econtext = CreateExprContext(estate);
|
||||
|
||||
ExprState *exprstate = ExecInitExpr(&fexpr->xpr, NULL);
|
||||
|
||||
ExecEvalExprSwitchContext(exprstate, econtext, &isnull);
|
||||
|
||||
/* Cleanup */
|
||||
FreeExprContext(econtext, false);
|
||||
FreeExecutorState(estate);
|
||||
}
|
||||
|
||||
/*
|
||||
* Invoke recompress_chunk via fmgr so that the call can be deparsed and sent to
|
||||
* remote data nodes.
|
||||
@ -521,110 +437,6 @@ policy_invoke_recompress_chunk(Chunk *chunk)
|
||||
FreeExecutorState(estate);
|
||||
}
|
||||
|
||||
bool
|
||||
policy_compression_execute(int32 job_id, Jsonb *config)
|
||||
{
|
||||
List *chunkid_lst;
|
||||
ListCell *lc;
|
||||
const Dimension *dim;
|
||||
PolicyCompressionData policy_data;
|
||||
bool distributed, used_portalcxt = false, verbose_log;
|
||||
MemoryContext saved_cxt, multitxn_cxt;
|
||||
|
||||
policy_compression_read_and_validate_config(config, &policy_data);
|
||||
dim = hyperspace_get_open_dimension(policy_data.hypertable->space, 0);
|
||||
distributed = hypertable_is_distributed(policy_data.hypertable);
|
||||
verbose_log = policy_compression_get_verbose_log(config);
|
||||
/* we want the chunk id list to survive across transactions. So alloc in
|
||||
* a different context
|
||||
*/
|
||||
if (PortalContext)
|
||||
{
|
||||
/*if we have a portal context use that - it will get freed automatically*/
|
||||
multitxn_cxt = PortalContext;
|
||||
used_portalcxt = true;
|
||||
}
|
||||
else
|
||||
{
|
||||
/* background worker job does not go via usual CALL path, so we do
|
||||
* not have a PortalContext */
|
||||
multitxn_cxt =
|
||||
AllocSetContextCreate(TopMemoryContext, "CompressionJobCxt", ALLOCSET_DEFAULT_SIZES);
|
||||
}
|
||||
saved_cxt = MemoryContextSwitchTo(multitxn_cxt);
|
||||
chunkid_lst = get_chunk_to_compress(dim, config);
|
||||
MemoryContextSwitchTo(saved_cxt);
|
||||
|
||||
if (!chunkid_lst)
|
||||
{
|
||||
elog(NOTICE,
|
||||
"no chunks for hypertable %s.%s that satisfy compress chunk policy",
|
||||
policy_data.hypertable->fd.schema_name.data,
|
||||
policy_data.hypertable->fd.table_name.data);
|
||||
ts_cache_release(policy_data.hcache);
|
||||
if (!used_portalcxt)
|
||||
MemoryContextDelete(multitxn_cxt);
|
||||
return true;
|
||||
}
|
||||
ts_cache_release(policy_data.hcache);
|
||||
if (ActiveSnapshotSet())
|
||||
{
|
||||
/* we have atleast 1 chunk that needs processing and will commit the
|
||||
* current txn (below) and start a new one to process the chunk. Any
|
||||
* active snapshot has to be popped before we can commit
|
||||
*/
|
||||
PopActiveSnapshot();
|
||||
}
|
||||
/* process each chunk in a new transaction */
|
||||
int total_chunks = list_length(chunkid_lst), num_chunks = 0;
|
||||
foreach (lc, chunkid_lst)
|
||||
{
|
||||
CommitTransactionCommand();
|
||||
StartTransactionCommand();
|
||||
int32 chunkid = lfirst_int(lc);
|
||||
Chunk *chunk = ts_chunk_get_by_id(chunkid, true);
|
||||
num_chunks++;
|
||||
if (!chunk || !ts_chunk_is_uncompressed_or_unordered(chunk))
|
||||
{
|
||||
continue;
|
||||
}
|
||||
StringInfo query = makeStringInfo();
|
||||
appendStringInfo(query,
|
||||
"compressing chunk %s.%s() , completed %d out of %d",
|
||||
quote_identifier(NameStr(chunk->fd.schema_name)),
|
||||
quote_identifier(NameStr(chunk->fd.table_name)),
|
||||
(num_chunks - 1),
|
||||
total_chunks);
|
||||
pgstat_report_activity(STATE_RUNNING, query->data);
|
||||
|
||||
if (distributed)
|
||||
{
|
||||
if (ts_chunk_is_unordered(chunk))
|
||||
policy_invoke_recompress_chunk(chunk);
|
||||
else
|
||||
policy_invoke_compress_chunk(chunk);
|
||||
}
|
||||
else
|
||||
{
|
||||
if (ts_chunk_is_unordered(chunk))
|
||||
tsl_recompress_chunk_wrapper(chunk);
|
||||
else
|
||||
tsl_compress_chunk_wrapper(chunk, true);
|
||||
}
|
||||
if (verbose_log)
|
||||
elog(LOG,
|
||||
"job %d completed compressing chunk %s.%s",
|
||||
job_id,
|
||||
NameStr(chunk->fd.schema_name),
|
||||
NameStr(chunk->fd.table_name));
|
||||
}
|
||||
|
||||
if (!used_portalcxt)
|
||||
MemoryContextDelete(multitxn_cxt);
|
||||
elog(DEBUG1, "job %d completed compressing chunk", job_id);
|
||||
return true;
|
||||
}
|
||||
|
||||
/* Read configuration for compression job from config object. */
|
||||
void
|
||||
policy_compression_read_and_validate_config(Jsonb *config, PolicyCompressionData *policy_data)
|
||||
|
@ -51,7 +51,6 @@ typedef void (*reorder_func)(Oid tableOid, Oid indexOid, bool verbose, Oid wait_
|
||||
extern bool policy_reorder_execute(int32 job_id, Jsonb *config);
|
||||
extern bool policy_retention_execute(int32 job_id, Jsonb *config);
|
||||
extern bool policy_refresh_cagg_execute(int32 job_id, Jsonb *config);
|
||||
extern bool policy_compression_execute(int32 job_id, Jsonb *config);
|
||||
extern bool policy_recompression_execute(int32 job_id, Jsonb *config);
|
||||
extern void policy_reorder_read_and_validate_config(Jsonb *config, PolicyReorderData *policy_data);
|
||||
extern void policy_retention_read_and_validate_config(Jsonb *config,
|
||||
|
@ -439,6 +439,7 @@ tsl_compress_chunk(PG_FUNCTION_ARGS)
|
||||
{
|
||||
Oid uncompressed_chunk_id = PG_ARGISNULL(0) ? InvalidOid : PG_GETARG_OID(0);
|
||||
bool if_not_compressed = PG_ARGISNULL(1) ? false : PG_GETARG_BOOL(1);
|
||||
TS_PREVENT_FUNC_IF_READ_ONLY();
|
||||
Chunk *chunk = ts_chunk_get_by_relid(uncompressed_chunk_id, true);
|
||||
|
||||
if (chunk->relkind == RELKIND_FOREIGN_TABLE)
|
||||
@ -468,6 +469,7 @@ tsl_decompress_chunk(PG_FUNCTION_ARGS)
|
||||
{
|
||||
Oid uncompressed_chunk_id = PG_ARGISNULL(0) ? InvalidOid : PG_GETARG_OID(0);
|
||||
bool if_compressed = PG_ARGISNULL(1) ? false : PG_GETARG_BOOL(1);
|
||||
TS_PREVENT_FUNC_IF_READ_ONLY();
|
||||
Chunk *uncompressed_chunk = ts_chunk_get_by_relid(uncompressed_chunk_id, true);
|
||||
|
||||
if (NULL == uncompressed_chunk)
|
||||
@ -589,6 +591,8 @@ tsl_recompress_chunk(PG_FUNCTION_ARGS)
|
||||
{
|
||||
Oid uncompressed_chunk_id = PG_ARGISNULL(0) ? InvalidOid : PG_GETARG_OID(0);
|
||||
bool if_compressed = PG_ARGISNULL(1) ? false : PG_GETARG_BOOL(1);
|
||||
TS_PREVENT_FUNC_IF_READ_ONLY();
|
||||
|
||||
Chunk *uncompressed_chunk =
|
||||
ts_chunk_get_by_relid(uncompressed_chunk_id, true /* fail_if_not_found */);
|
||||
if (!ts_chunk_is_unordered(uncompressed_chunk))
|
||||
|
@ -90,7 +90,6 @@ CrossModuleFunctions tsl_cm_functions = {
|
||||
|
||||
/* bgw policies */
|
||||
.policy_compression_add = policy_compression_add,
|
||||
.policy_compression_proc = policy_compression_proc,
|
||||
.policy_compression_remove = policy_compression_remove,
|
||||
.policy_recompression_proc = policy_recompression_proc,
|
||||
.policy_refresh_cagg_add = policy_refresh_cagg_add,
|
||||
|
@ -453,6 +453,44 @@ SELECT * FROM _timescaledb_internal.compressed_chunk_stats ORDER BY chunk_name;
|
||||
public | conditions | _timescaledb_internal | _hyper_1_3_chunk | Compressed | 8192 | 16384 | 8192 | 32768 | 8192 | 16384 | 8192 | 32768
|
||||
(3 rows)
|
||||
|
||||
--TEST compression job after inserting data into previously compressed chunk
|
||||
INSERT INTO conditions
|
||||
SELECT generate_series('2021-08-01 00:00'::timestamp, '2021-08-31 00:00'::timestamp, '1 day'), 'NYC', 'nycity', 40, 40;
|
||||
SELECT id, table_name, status from _timescaledb_catalog.chunk
|
||||
where hypertable_id = (select id from _timescaledb_catalog.hypertable
|
||||
where table_name = 'conditions')
|
||||
order by id;
|
||||
id | table_name | status
|
||||
----+------------------+--------
|
||||
1 | _hyper_1_1_chunk | 3
|
||||
2 | _hyper_1_2_chunk | 3
|
||||
3 | _hyper_1_3_chunk | 3
|
||||
(3 rows)
|
||||
|
||||
--running job second time, wait for it to complete
|
||||
select t.schedule_interval FROM alter_job(:job_id_4, next_start=> now() ) t;
|
||||
schedule_interval
|
||||
-------------------
|
||||
@ 7 days 12 hours
|
||||
(1 row)
|
||||
|
||||
SELECT wait_for_job_to_run(:job_id_4, 2);
|
||||
wait_for_job_to_run
|
||||
---------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
SELECT id, table_name, status from _timescaledb_catalog.chunk
|
||||
where hypertable_id = (select id from _timescaledb_catalog.hypertable
|
||||
where table_name = 'conditions')
|
||||
order by id;
|
||||
id | table_name | status
|
||||
----+------------------+--------
|
||||
1 | _hyper_1_1_chunk | 1
|
||||
2 | _hyper_1_2_chunk | 1
|
||||
3 | _hyper_1_3_chunk | 1
|
||||
(3 rows)
|
||||
|
||||
-- Decompress chunks before create the cagg
|
||||
SELECT decompress_chunk(c) FROM show_chunks('conditions') c;
|
||||
decompress_chunk
|
||||
@ -462,7 +500,7 @@ SELECT decompress_chunk(c) FROM show_chunks('conditions') c;
|
||||
_timescaledb_internal._hyper_1_3_chunk
|
||||
(3 rows)
|
||||
|
||||
-- Continuous Aggregate
|
||||
-- TEST Continuous Aggregate job
|
||||
CREATE MATERIALIZED VIEW conditions_summary_daily
|
||||
WITH (timescaledb.continuous) AS
|
||||
SELECT location,
|
||||
@ -481,6 +519,12 @@ SELECT wait_for_job_to_run(:job_id_5, 1);
|
||||
t
|
||||
(1 row)
|
||||
|
||||
SELECT count(*) FROM conditions_summary_daily;
|
||||
count
|
||||
-------
|
||||
62
|
||||
(1 row)
|
||||
|
||||
-- Stop Background Workers
|
||||
SELECT _timescaledb_internal.stop_background_workers();
|
||||
stop_background_workers
|
||||
|
@ -254,7 +254,7 @@ SELECT alter_job(id,config:=jsonb_set(config,'{verbose_log}', 'true'))
|
||||
|
||||
set client_min_messages TO LOG;
|
||||
CALL run_job(:job_id);
|
||||
LOG: job 1002 completed compressing chunk _timescaledb_internal._hyper_7_26_chunk
|
||||
LOG: job 1002 completed processing chunk _timescaledb_internal._hyper_7_26_chunk
|
||||
set client_min_messages TO NOTICE;
|
||||
SELECT count(*) FROM timescaledb_information.chunks
|
||||
WHERE hypertable_name = 'conditions' and is_compressed = true;
|
||||
@ -386,7 +386,6 @@ SELECT add_compression_policy AS job_id
|
||||
FROM add_compression_policy('test2', '30d'::interval) \gset
|
||||
CALL run_job(:job_id);
|
||||
CALL run_job(:job_id);
|
||||
psql:include/recompress_basic.sql:85: NOTICE: no chunks for hypertable public.test2 that satisfy compress chunk policy
|
||||
-- status should be compressed ---
|
||||
SELECT chunk_status,
|
||||
chunk_name as "CHUNK_NAME"
|
||||
@ -436,7 +435,6 @@ SELECT add_job('_timescaledb_internal.policy_compression','1w','{"hypertable_id"
|
||||
CALL run_job(:JOB_COMPRESS);
|
||||
-- 2nd call should do nothing
|
||||
CALL run_job(:JOB_COMPRESS);
|
||||
psql:include/recompress_basic.sql:117: NOTICE: no chunks for hypertable public.metrics that satisfy compress chunk policy
|
||||
---- status should be 1
|
||||
SELECT chunk_status FROM compressed_chunk_info_view WHERE hypertable_name = 'metrics';
|
||||
chunk_status
|
||||
@ -471,7 +469,6 @@ SELECT alter_job(id,config:=jsonb_set(config,'{recompress}','false')) FROM _time
|
||||
|
||||
-- nothing to do
|
||||
CALL run_job(:JOB_COMPRESS);
|
||||
psql:include/recompress_basic.sql:138: NOTICE: no chunks for hypertable public.metrics that satisfy compress chunk policy
|
||||
---- status should be 1
|
||||
SELECT chunk_status FROM compressed_chunk_info_view WHERE hypertable_name = 'metrics';
|
||||
chunk_status
|
||||
@ -490,7 +487,6 @@ SELECT chunk_status FROM compressed_chunk_info_view WHERE hypertable_name = 'met
|
||||
|
||||
-- still nothing to do since we disabled recompress
|
||||
CALL run_job(:JOB_COMPRESS);
|
||||
psql:include/recompress_basic.sql:150: NOTICE: no chunks for hypertable public.metrics that satisfy compress chunk policy
|
||||
---- status should be 3
|
||||
SELECT chunk_status FROM compressed_chunk_info_view WHERE hypertable_name = 'metrics';
|
||||
chunk_status
|
||||
|
@ -424,3 +424,74 @@ SELECT decompress_chunk(:'CHUNK_NAME');
|
||||
(1 row)
|
||||
|
||||
ALTER TABLE table_constr2 SET (timescaledb.compress=false);
|
||||
-- TEST compression policy
|
||||
-- modify the config to trigger errors at runtime
|
||||
CREATE TABLE test_table_int(time bigint, val int);
|
||||
SELECT create_hypertable('test_table_int', 'time', chunk_time_interval => 1);
|
||||
NOTICE: adding not-null constraint to column "time"
|
||||
DETAIL: Time dimensions cannot have NULL values.
|
||||
create_hypertable
|
||||
------------------------------
|
||||
(21,public,test_table_int,t)
|
||||
(1 row)
|
||||
|
||||
CREATE OR REPLACE function dummy_now() returns BIGINT LANGUAGE SQL IMMUTABLE as 'SELECT 5::BIGINT';
|
||||
SELECT set_integer_now_func('test_table_int', 'dummy_now');
|
||||
set_integer_now_func
|
||||
----------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
INSERT INTO test_table_int SELECT generate_series(1,5), 10;
|
||||
ALTER TABLE test_table_int set (timescaledb.compress);
|
||||
SELECT add_compression_policy('test_table_int', 2::int) AS compressjob_id
|
||||
\gset
|
||||
\c :TEST_DBNAME :ROLE_SUPERUSER
|
||||
UPDATE _timescaledb_config.bgw_job
|
||||
SET config = config - 'compress_after'
|
||||
WHERE id = :compressjob_id;
|
||||
SELECT config FROM _timescaledb_config.bgw_job WHERE id = :compressjob_id;
|
||||
config
|
||||
-----------------------
|
||||
{"hypertable_id": 21}
|
||||
(1 row)
|
||||
|
||||
--should fail
|
||||
CALL run_job(:compressjob_id);
|
||||
ERROR: job 1000 config must have compress_after
|
||||
CONTEXT: PL/pgSQL function _timescaledb_internal.policy_compression(integer,jsonb) line 41 at RAISE
|
||||
SELECT remove_compression_policy('test_table_int');
|
||||
remove_compression_policy
|
||||
---------------------------
|
||||
t
|
||||
(1 row)
|
||||
|
||||
--again add a new policy that we'll tamper with
|
||||
SELECT add_compression_policy('test_table_int', 2::int) AS compressjob_id
|
||||
\gset
|
||||
UPDATE _timescaledb_config.bgw_job
|
||||
SET config = config - 'hypertable_id'
|
||||
WHERE id = :compressjob_id;
|
||||
SELECT config FROM _timescaledb_config.bgw_job WHERE id = :compressjob_id;
|
||||
config
|
||||
-----------------------
|
||||
{"compress_after": 2}
|
||||
(1 row)
|
||||
|
||||
--should fail
|
||||
CALL run_job(:compressjob_id);
|
||||
ERROR: job 1001 config must have hypertable_id
|
||||
CONTEXT: PL/pgSQL function _timescaledb_internal.policy_compression(integer,jsonb) line 21 at RAISE
|
||||
UPDATE _timescaledb_config.bgw_job
|
||||
SET config = NULL
|
||||
WHERE id = :compressjob_id;
|
||||
SELECT config FROM _timescaledb_config.bgw_job WHERE id = :compressjob_id;
|
||||
config
|
||||
--------
|
||||
|
||||
(1 row)
|
||||
|
||||
--should fail
|
||||
CALL run_job(:compressjob_id);
|
||||
ERROR: job 1001 has null config
|
||||
CONTEXT: PL/pgSQL function _timescaledb_internal.policy_compression(integer,jsonb) line 16 at RAISE
|
||||
|
@ -767,7 +767,6 @@ select * from _timescaledb_config.bgw_job where id=:compressjob_id;
|
||||
\gset
|
||||
CALL run_job(:compressjob_id);
|
||||
CALL run_job(:compressjob_id);
|
||||
NOTICE: no chunks for hypertable public.test_table_int that satisfy compress chunk policy
|
||||
select chunk_name, node_name, before_compression_total_bytes, after_compression_total_bytes
|
||||
from chunk_compression_stats('test_table_int') where compression_status like 'Compressed' order by chunk_name;
|
||||
chunk_name | node_name | before_compression_total_bytes | after_compression_total_bytes
|
||||
@ -976,7 +975,6 @@ SELECT * from test_recomp_int_chunk_status ORDER BY 1;
|
||||
--verify that there are no errors if the policy/recompress_chunk is executed again
|
||||
--on previously compressed chunks
|
||||
CALL run_job(:compressjob_id);
|
||||
NOTICE: no chunks for hypertable public.test_recomp_int that satisfy compress chunk policy
|
||||
SELECT recompress_chunk(chunk, true) FROM
|
||||
( SELECT chunk FROM show_chunks('test_recomp_int') AS chunk ORDER BY chunk )q;
|
||||
NOTICE: nothing to recompress in chunk "_dist_hyper_4_14_chunk"
|
||||
|
@ -280,8 +280,44 @@ FROM
|
||||
GROUP BY bucket, device_id WITH NO DATA;
|
||||
ERROR: cannot execute CREATE MATERIALIZED VIEW in a read-only transaction
|
||||
-- policy API
|
||||
CALL _timescaledb_internal.policy_compression(1,'{}');
|
||||
ERROR: cannot execute policy_compression() in a read-only transaction
|
||||
-- compression policy will throw an error only if it attempts to compress
|
||||
-- atleast 1 chunk
|
||||
SET default_transaction_read_only TO off;
|
||||
CREATE TABLE test_table_int(time bigint NOT NULL, device int);
|
||||
SELECt create_hypertable('test_table_int', 'time', chunk_time_interval=>'1'::bigint);
|
||||
create_hypertable
|
||||
-----------------------------
|
||||
(5,public,test_table_int,t)
|
||||
(1 row)
|
||||
|
||||
create or replace function dummy_now() returns BIGINT LANGUAGE SQL IMMUTABLE as 'SELECT 5::BIGINT';
|
||||
select set_integer_now_func('test_table_int', 'dummy_now');
|
||||
set_integer_now_func
|
||||
----------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
ALTER TABLE test_table_int SET (timescaledb.compress);
|
||||
INSERT INTO test_table_int VALUES (0, 1), (10,10);
|
||||
SELECT add_compression_policy('test_table_int', '1'::integer) as comp_job_id \gset
|
||||
SELECT config as comp_job_config
|
||||
FROM _timescaledb_config.bgw_job WHERE id = :comp_job_id \gset
|
||||
SET default_transaction_read_only TO on;
|
||||
CALL _timescaledb_internal.policy_compression(:comp_job_id, :'comp_job_config');
|
||||
ERROR: cannot execute compress_chunk() in a read-only transaction
|
||||
SET default_transaction_read_only TO off;
|
||||
--verify chunks are not compressed
|
||||
SELECT count(*) , count(*) FILTER ( WHERE is_compressed is true)
|
||||
FROM timescaledb_information.chunks
|
||||
WHERE hypertable_name = 'test_table_int';
|
||||
count | count
|
||||
-------+-------
|
||||
2 | 0
|
||||
(1 row)
|
||||
|
||||
--cleanup
|
||||
DROP TABLE test_table_int;
|
||||
SET default_transaction_read_only TO on;
|
||||
CALL _timescaledb_internal.policy_refresh_continuous_aggregate(1,'{}');
|
||||
ERROR: cannot execute policy_refresh_continuous_aggregate() in a read-only transaction
|
||||
CALL _timescaledb_internal.policy_reorder(1,'{}');
|
||||
|
@ -251,10 +251,29 @@ SELECT wait_for_job_to_run(:job_id_4, 1);
|
||||
-- Chunk compress stats
|
||||
SELECT * FROM _timescaledb_internal.compressed_chunk_stats ORDER BY chunk_name;
|
||||
|
||||
--TEST compression job after inserting data into previously compressed chunk
|
||||
INSERT INTO conditions
|
||||
SELECT generate_series('2021-08-01 00:00'::timestamp, '2021-08-31 00:00'::timestamp, '1 day'), 'NYC', 'nycity', 40, 40;
|
||||
|
||||
SELECT id, table_name, status from _timescaledb_catalog.chunk
|
||||
where hypertable_id = (select id from _timescaledb_catalog.hypertable
|
||||
where table_name = 'conditions')
|
||||
order by id;
|
||||
|
||||
--running job second time, wait for it to complete
|
||||
select t.schedule_interval FROM alter_job(:job_id_4, next_start=> now() ) t;
|
||||
SELECT wait_for_job_to_run(:job_id_4, 2);
|
||||
|
||||
SELECT id, table_name, status from _timescaledb_catalog.chunk
|
||||
where hypertable_id = (select id from _timescaledb_catalog.hypertable
|
||||
where table_name = 'conditions')
|
||||
order by id;
|
||||
|
||||
|
||||
-- Decompress chunks before create the cagg
|
||||
SELECT decompress_chunk(c) FROM show_chunks('conditions') c;
|
||||
|
||||
-- Continuous Aggregate
|
||||
-- TEST Continuous Aggregate job
|
||||
CREATE MATERIALIZED VIEW conditions_summary_daily
|
||||
WITH (timescaledb.continuous) AS
|
||||
SELECT location,
|
||||
@ -269,6 +288,7 @@ WITH NO DATA;
|
||||
-- Refresh Continous Aggregate by Job
|
||||
SELECT add_job('custom_proc5', '1h', config := '{"type":"procedure"}'::jsonb, initial_start := now()) AS job_id_5 \gset
|
||||
SELECT wait_for_job_to_run(:job_id_5, 1);
|
||||
SELECT count(*) FROM conditions_summary_daily;
|
||||
|
||||
-- Stop Background Workers
|
||||
SELECT _timescaledb_internal.stop_background_workers();
|
||||
|
@ -243,3 +243,43 @@ ALTER TABLE table_constr2 set (timescaledb.compress=false);
|
||||
SELECT decompress_chunk(:'CHUNK_NAME');
|
||||
ALTER TABLE table_constr2 SET (timescaledb.compress=false);
|
||||
|
||||
-- TEST compression policy
|
||||
-- modify the config to trigger errors at runtime
|
||||
CREATE TABLE test_table_int(time bigint, val int);
|
||||
SELECT create_hypertable('test_table_int', 'time', chunk_time_interval => 1);
|
||||
|
||||
CREATE OR REPLACE function dummy_now() returns BIGINT LANGUAGE SQL IMMUTABLE as 'SELECT 5::BIGINT';
|
||||
SELECT set_integer_now_func('test_table_int', 'dummy_now');
|
||||
INSERT INTO test_table_int SELECT generate_series(1,5), 10;
|
||||
ALTER TABLE test_table_int set (timescaledb.compress);
|
||||
SELECT add_compression_policy('test_table_int', 2::int) AS compressjob_id
|
||||
\gset
|
||||
|
||||
\c :TEST_DBNAME :ROLE_SUPERUSER
|
||||
UPDATE _timescaledb_config.bgw_job
|
||||
SET config = config - 'compress_after'
|
||||
WHERE id = :compressjob_id;
|
||||
SELECT config FROM _timescaledb_config.bgw_job WHERE id = :compressjob_id;
|
||||
--should fail
|
||||
CALL run_job(:compressjob_id);
|
||||
|
||||
SELECT remove_compression_policy('test_table_int');
|
||||
|
||||
--again add a new policy that we'll tamper with
|
||||
SELECT add_compression_policy('test_table_int', 2::int) AS compressjob_id
|
||||
\gset
|
||||
UPDATE _timescaledb_config.bgw_job
|
||||
SET config = config - 'hypertable_id'
|
||||
WHERE id = :compressjob_id;
|
||||
SELECT config FROM _timescaledb_config.bgw_job WHERE id = :compressjob_id;
|
||||
|
||||
--should fail
|
||||
CALL run_job(:compressjob_id);
|
||||
|
||||
UPDATE _timescaledb_config.bgw_job
|
||||
SET config = NULL
|
||||
WHERE id = :compressjob_id;
|
||||
SELECT config FROM _timescaledb_config.bgw_job WHERE id = :compressjob_id;
|
||||
|
||||
--should fail
|
||||
CALL run_job(:compressjob_id);
|
||||
|
@ -245,7 +245,29 @@ FROM
|
||||
GROUP BY bucket, device_id WITH NO DATA;
|
||||
|
||||
-- policy API
|
||||
CALL _timescaledb_internal.policy_compression(1,'{}');
|
||||
-- compression policy will throw an error only if it attempts to compress
|
||||
-- atleast 1 chunk
|
||||
SET default_transaction_read_only TO off;
|
||||
CREATE TABLE test_table_int(time bigint NOT NULL, device int);
|
||||
SELECt create_hypertable('test_table_int', 'time', chunk_time_interval=>'1'::bigint);
|
||||
create or replace function dummy_now() returns BIGINT LANGUAGE SQL IMMUTABLE as 'SELECT 5::BIGINT';
|
||||
select set_integer_now_func('test_table_int', 'dummy_now');
|
||||
ALTER TABLE test_table_int SET (timescaledb.compress);
|
||||
INSERT INTO test_table_int VALUES (0, 1), (10,10);
|
||||
SELECT add_compression_policy('test_table_int', '1'::integer) as comp_job_id \gset
|
||||
SELECT config as comp_job_config
|
||||
FROM _timescaledb_config.bgw_job WHERE id = :comp_job_id \gset
|
||||
SET default_transaction_read_only TO on;
|
||||
CALL _timescaledb_internal.policy_compression(:comp_job_id, :'comp_job_config');
|
||||
SET default_transaction_read_only TO off;
|
||||
--verify chunks are not compressed
|
||||
SELECT count(*) , count(*) FILTER ( WHERE is_compressed is true)
|
||||
FROM timescaledb_information.chunks
|
||||
WHERE hypertable_name = 'test_table_int';
|
||||
--cleanup
|
||||
DROP TABLE test_table_int;
|
||||
|
||||
SET default_transaction_read_only TO on;
|
||||
CALL _timescaledb_internal.policy_refresh_continuous_aggregate(1,'{}');
|
||||
CALL _timescaledb_internal.policy_reorder(1,'{}');
|
||||
CALL _timescaledb_internal.policy_retention(1,'{}');
|
||||
|
Loading…
x
Reference in New Issue
Block a user