mirror of
https://github.com/timescale/timescaledb.git
synced 2025-05-17 02:53:51 +08:00
Support compression on distributed hypertables
Initial support for compression on distributed hypertables. This _only_ includes the ability to run `compress_chunk` and `decompress_chunk` on a distributed hypertable. There is no support for automation, at least not beyond what one can do individually on each data node. Note that an access node keeps no local metadata about which distributed hypertables have compressed chunks. This information needs to be fetched directly from data nodes, although such functionality is not yet implemented. For example, informational views on the access nodes will not yet report the correct compression states for distributed hypertables.
This commit is contained in:
parent
bf343d7718
commit
686860ea23
16
src/chunk.c
16
src/chunk.c
@ -860,6 +860,22 @@ chunk_assign_data_nodes(Chunk *chunk, Hypertable *ht)
|
||||
return chunk_data_nodes;
|
||||
}
|
||||
|
||||
List *
|
||||
ts_chunk_get_data_node_name_list(const Chunk *chunk)
|
||||
{
|
||||
List *datanodes = NULL;
|
||||
ListCell *lc;
|
||||
|
||||
foreach (lc, chunk->data_nodes)
|
||||
{
|
||||
ChunkDataNode *cdn = lfirst(lc);
|
||||
|
||||
datanodes = lappend(datanodes, NameStr(cdn->fd.node_name));
|
||||
}
|
||||
|
||||
return datanodes;
|
||||
}
|
||||
|
||||
static inline const char *
|
||||
get_chunk_name_suffix(const char relkind)
|
||||
{
|
||||
|
@ -170,6 +170,7 @@ extern TSDLLEXPORT bool ts_chunk_contains_compressed_data(Chunk *chunk);
|
||||
extern TSDLLEXPORT bool ts_chunk_can_be_compressed(int32 chunk_id);
|
||||
extern TSDLLEXPORT Datum ts_chunk_id_from_relid(PG_FUNCTION_ARGS);
|
||||
extern TSDLLEXPORT List *ts_chunk_get_chunk_ids_by_hypertable_id(int32 hypertable_id);
|
||||
extern TSDLLEXPORT List *ts_chunk_get_data_node_name_list(const Chunk *chunk);
|
||||
extern List *ts_chunk_data_nodes_copy(Chunk *chunk);
|
||||
|
||||
#define chunk_get_by_name(schema_name, table_name, fail_if_not_found) \
|
||||
|
@ -349,7 +349,8 @@ execute_compress_chunks_policy(BgwJob *job)
|
||||
else
|
||||
{
|
||||
chunk = ts_chunk_get_by_id(chunkid, true);
|
||||
tsl_compress_chunk_wrapper(chunk->table_id, false);
|
||||
tsl_compress_chunk_wrapper(chunk, false);
|
||||
|
||||
elog(LOG,
|
||||
"completed compressing chunk %s.%s",
|
||||
NameStr(chunk->fd.schema_name),
|
||||
|
@ -13,11 +13,16 @@
|
||||
#include <miscadmin.h>
|
||||
#include <nodes/makefuncs.h>
|
||||
#include <nodes/pg_list.h>
|
||||
#include <nodes/parsenodes.h>
|
||||
#include <storage/lmgr.h>
|
||||
#include <trigger.h>
|
||||
#include <utils/elog.h>
|
||||
#include <utils/builtins.h>
|
||||
#include <libpq-fe.h>
|
||||
|
||||
#include <remote/dist_commands.h>
|
||||
#include "compat.h"
|
||||
#include "cache.h"
|
||||
#include "chunk.h"
|
||||
#include "errors.h"
|
||||
#include "hypertable.h"
|
||||
@ -311,28 +316,112 @@ decompress_chunk_impl(Oid uncompressed_hypertable_relid, Oid uncompressed_chunk_
|
||||
}
|
||||
|
||||
bool
|
||||
tsl_compress_chunk_wrapper(Oid chunk_relid, bool if_not_compressed)
|
||||
tsl_compress_chunk_wrapper(Chunk *chunk, bool if_not_compressed)
|
||||
{
|
||||
Chunk *srcchunk = ts_chunk_get_by_relid(chunk_relid, true);
|
||||
if (srcchunk->fd.compressed_chunk_id != INVALID_CHUNK_ID)
|
||||
if (chunk->fd.compressed_chunk_id != INVALID_CHUNK_ID)
|
||||
{
|
||||
ereport((if_not_compressed ? NOTICE : ERROR),
|
||||
(errcode(ERRCODE_DUPLICATE_OBJECT),
|
||||
errmsg("chunk \"%s\" is already compressed", get_rel_name(chunk_relid))));
|
||||
errmsg("chunk \"%s\" is already compressed", get_rel_name(chunk->table_id))));
|
||||
return false;
|
||||
}
|
||||
|
||||
compress_chunk_impl(srcchunk->hypertable_relid, chunk_relid);
|
||||
compress_chunk_impl(chunk->hypertable_relid, chunk->table_id);
|
||||
return true;
|
||||
}
|
||||
|
||||
#if PG_VERSION_SUPPORTS_MULTINODE
|
||||
|
||||
/*
|
||||
* Helper for remote invocation of chunk compression and decompression.
|
||||
*/
|
||||
static bool
|
||||
invoke_compression_func_remotely(FunctionCallInfo fcinfo, const Chunk *chunk)
|
||||
{
|
||||
List *datanodes;
|
||||
DistCmdResult *distres;
|
||||
bool isnull_result = true;
|
||||
Size i;
|
||||
|
||||
Assert(chunk->relkind == RELKIND_FOREIGN_TABLE);
|
||||
Assert(chunk->data_nodes != NIL);
|
||||
datanodes = ts_chunk_get_data_node_name_list(chunk);
|
||||
distres = ts_dist_cmd_invoke_func_call_on_data_nodes(fcinfo, datanodes);
|
||||
|
||||
for (i = 0; i < ts_dist_cmd_response_count(distres); i++)
|
||||
{
|
||||
const char *node_name;
|
||||
bool isnull;
|
||||
Datum PG_USED_FOR_ASSERTS_ONLY d;
|
||||
|
||||
d = ts_dist_cmd_get_single_scalar_result_by_index(distres, i, &isnull, &node_name);
|
||||
|
||||
/* Make sure data nodes either (1) all return NULL, or (2) all return
|
||||
* a non-null result. */
|
||||
if (i > 0 && isnull_result != isnull)
|
||||
elog(ERROR, "inconsistent result from data node \"%s\"", node_name);
|
||||
|
||||
isnull_result = isnull;
|
||||
|
||||
if (!isnull)
|
||||
{
|
||||
Assert(OidIsValid(DatumGetObjectId(d)));
|
||||
}
|
||||
}
|
||||
|
||||
ts_dist_cmd_close_response(distres);
|
||||
|
||||
return !isnull_result;
|
||||
}
|
||||
|
||||
static bool
|
||||
compress_remote_chunk(FunctionCallInfo fcinfo, const Chunk *chunk, bool if_not_compressed)
|
||||
{
|
||||
bool success = invoke_compression_func_remotely(fcinfo, chunk);
|
||||
|
||||
if (!success)
|
||||
ereport((if_not_compressed ? NOTICE : ERROR),
|
||||
(errcode(ERRCODE_DUPLICATE_OBJECT),
|
||||
errmsg("chunk \"%s\" is already compressed", get_rel_name(chunk->table_id))));
|
||||
|
||||
return success;
|
||||
}
|
||||
|
||||
static bool
|
||||
decompress_remote_chunk(FunctionCallInfo fcinfo, const Chunk *chunk, bool if_compressed)
|
||||
{
|
||||
bool success = invoke_compression_func_remotely(fcinfo, chunk);
|
||||
|
||||
if (!success)
|
||||
ereport((if_compressed ? NOTICE : ERROR),
|
||||
(errcode(ERRCODE_DUPLICATE_OBJECT),
|
||||
errmsg("chunk \"%s\" is not compressed", get_rel_name(chunk->table_id))));
|
||||
|
||||
return success;
|
||||
}
|
||||
|
||||
#endif /* PG_VERSION_SUPPORTS_MULTINODE */
|
||||
|
||||
Datum
|
||||
tsl_compress_chunk(PG_FUNCTION_ARGS)
|
||||
{
|
||||
Oid uncompressed_chunk_id = PG_ARGISNULL(0) ? InvalidOid : PG_GETARG_OID(0);
|
||||
bool if_not_compressed = PG_ARGISNULL(1) ? false : PG_GETARG_BOOL(1);
|
||||
if (!tsl_compress_chunk_wrapper(uncompressed_chunk_id, if_not_compressed))
|
||||
Chunk *chunk = ts_chunk_get_by_relid(uncompressed_chunk_id, true);
|
||||
|
||||
#if PG_VERSION_SUPPORTS_MULTINODE
|
||||
if (chunk->relkind == RELKIND_FOREIGN_TABLE)
|
||||
{
|
||||
if (!compress_remote_chunk(fcinfo, chunk, if_not_compressed))
|
||||
PG_RETURN_NULL();
|
||||
|
||||
PG_RETURN_OID(uncompressed_chunk_id);
|
||||
}
|
||||
#endif
|
||||
|
||||
if (!tsl_compress_chunk_wrapper(chunk, if_not_compressed))
|
||||
PG_RETURN_NULL();
|
||||
|
||||
PG_RETURN_OID(uncompressed_chunk_id);
|
||||
}
|
||||
|
||||
@ -342,12 +431,24 @@ tsl_decompress_chunk(PG_FUNCTION_ARGS)
|
||||
Oid uncompressed_chunk_id = PG_ARGISNULL(0) ? InvalidOid : PG_GETARG_OID(0);
|
||||
bool if_compressed = PG_ARGISNULL(1) ? false : PG_GETARG_BOOL(1);
|
||||
Chunk *uncompressed_chunk = ts_chunk_get_by_relid(uncompressed_chunk_id, true);
|
||||
|
||||
if (NULL == uncompressed_chunk)
|
||||
elog(ERROR, "unknown chunk id %d", uncompressed_chunk_id);
|
||||
|
||||
#if PG_VERSION_SUPPORTS_MULTINODE
|
||||
if (uncompressed_chunk->relkind == RELKIND_FOREIGN_TABLE)
|
||||
{
|
||||
if (!decompress_remote_chunk(fcinfo, uncompressed_chunk, if_compressed))
|
||||
PG_RETURN_NULL();
|
||||
|
||||
PG_RETURN_OID(uncompressed_chunk_id);
|
||||
}
|
||||
#endif
|
||||
|
||||
if (!decompress_chunk_impl(uncompressed_chunk->hypertable_relid,
|
||||
uncompressed_chunk_id,
|
||||
if_compressed))
|
||||
PG_RETURN_NULL();
|
||||
|
||||
PG_RETURN_OID(uncompressed_chunk_id);
|
||||
}
|
||||
|
@ -6,8 +6,11 @@
|
||||
#ifndef TIMESCALEDB_TSL_COMPRESSION_UTILS_H
|
||||
#define TIMESCALEDB_TSL_COMPRESSION_UTILS_H
|
||||
|
||||
#include <postgres.h>
|
||||
#include <fmgr.h>
|
||||
|
||||
extern Datum tsl_compress_chunk(PG_FUNCTION_ARGS);
|
||||
extern Datum tsl_decompress_chunk(PG_FUNCTION_ARGS);
|
||||
extern bool tsl_compress_chunk_wrapper(Oid chunk_relid, bool if_not_compressed);
|
||||
extern bool tsl_compress_chunk_wrapper(Chunk *chunk, bool if_not_compressed);
|
||||
|
||||
#endif // TIMESCALEDB_TSL_COMPRESSION_UTILS_H
|
||||
#endif /* TIMESCALEDB_TSL_COMPRESSION_UTILS_H */
|
||||
|
@ -912,9 +912,10 @@ disable_compression(Hypertable *ht, WithClauseResult *with_clause_options)
|
||||
/*
|
||||
* enables compression for the passed in table by
|
||||
* creating a compression hypertable with special properties
|
||||
Note:
|
||||
caller should check security permissions
|
||||
*/
|
||||
* Note: caller should check security permissions
|
||||
*
|
||||
* Return true if compression was enabled, false otherwise.
|
||||
*/
|
||||
bool
|
||||
tsl_process_compress_table(AlterTableCmd *cmd, Hypertable *ht,
|
||||
WithClauseResult *with_clause_options)
|
||||
@ -981,6 +982,14 @@ tsl_process_compress_table(AlterTableCmd *cmd, Hypertable *ht,
|
||||
drop_existing_compression_table(ht);
|
||||
}
|
||||
|
||||
if (hypertable_is_distributed(ht))
|
||||
{
|
||||
/* On a distributed hypertable, there's no data locally, so don't
|
||||
* create local compression tables and data but let the DDL pass on to
|
||||
* data nodes. */
|
||||
return true;
|
||||
}
|
||||
|
||||
compress_htid = create_compression_table(ownerid, &compress_cols);
|
||||
ts_hypertable_set_compressed_id(ht, compress_htid);
|
||||
|
||||
|
@ -7,12 +7,13 @@
|
||||
#include <utils/builtins.h>
|
||||
#include <utils/guc.h>
|
||||
#include <catalog/namespace.h>
|
||||
|
||||
#include <funcapi.h>
|
||||
#include <libpq-fe.h>
|
||||
|
||||
#include "remote/dist_commands.h"
|
||||
#include "remote/dist_txn.h"
|
||||
#include "remote/connection_cache.h"
|
||||
#include "dist_commands.h"
|
||||
#include "dist_txn.h"
|
||||
#include "connection_cache.h"
|
||||
#include "async.h"
|
||||
#include "data_node.h"
|
||||
#include "dist_util.h"
|
||||
#include "miscadmin.h"
|
||||
@ -34,6 +35,11 @@ typedef struct DistCmdResponse
|
||||
typedef struct DistCmdResult
|
||||
{
|
||||
Size num_responses;
|
||||
TypeFuncClass funcclass; /* Function class of invoked function, if any */
|
||||
Oid typeid; /* Expected result type, or InvalidOid */
|
||||
TupleDesc tupdesc; /* Tuple descriptor of invoked function
|
||||
* result. Set if typeid is valid and has a
|
||||
* composite return value */
|
||||
DistCmdResponse responses[FLEXIBLE_ARRAY_MEMBER];
|
||||
} DistCmdResult;
|
||||
|
||||
@ -44,7 +50,7 @@ ts_dist_cmd_collect_responses(List *requests)
|
||||
AsyncResponseResult *ar;
|
||||
ListCell *lc;
|
||||
DistCmdResult *results =
|
||||
palloc(sizeof(DistCmdResult) + requests->length * sizeof(DistCmdResponse));
|
||||
palloc0(sizeof(DistCmdResult) + requests->length * sizeof(DistCmdResponse));
|
||||
int i = 0;
|
||||
|
||||
foreach (lc, requests)
|
||||
@ -110,6 +116,7 @@ ts_dist_cmd_invoke_on_data_nodes(const char *sql, List *data_nodes, bool transac
|
||||
|
||||
results = ts_dist_cmd_collect_responses(requests);
|
||||
list_free(requests);
|
||||
Assert(ts_dist_cmd_response_count(results) == list_length(data_nodes));
|
||||
|
||||
return results;
|
||||
}
|
||||
@ -161,10 +168,18 @@ ts_dist_cmd_invoke_on_all_data_nodes(const char *sql)
|
||||
DistCmdResult *
|
||||
ts_dist_cmd_invoke_func_call_on_data_nodes(FunctionCallInfo fcinfo, List *data_nodes)
|
||||
{
|
||||
DistCmdResult *result;
|
||||
|
||||
if (NIL == data_nodes)
|
||||
data_nodes = data_node_get_node_name_list();
|
||||
|
||||
return ts_dist_cmd_invoke_on_data_nodes(deparse_func_call(fcinfo), data_nodes, true);
|
||||
result = ts_dist_cmd_invoke_on_data_nodes(deparse_func_call(fcinfo), data_nodes, true);
|
||||
|
||||
/* Initialize result conversion info in case caller wants to convert the
|
||||
* result to datums. */
|
||||
result->funcclass = get_call_result_type(fcinfo, &result->typeid, &result->tupdesc);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
DistCmdResult *
|
||||
@ -228,6 +243,66 @@ ts_dist_cmd_get_result_by_index(DistCmdResult *response, Size index, const char
|
||||
return async_response_result_get_pg_result(rsp->result);
|
||||
}
|
||||
|
||||
/*
|
||||
* Get the number of responses in a distributed command result.
|
||||
*/
|
||||
Size
|
||||
ts_dist_cmd_response_count(DistCmdResult *result)
|
||||
{
|
||||
return result->num_responses;
|
||||
}
|
||||
|
||||
/*
|
||||
* Convert an expected scalar return value.
|
||||
*
|
||||
* Convert the result of a remote function invokation returning a single
|
||||
* scalar value. For example, a function returning a bool.
|
||||
*/
|
||||
Datum
|
||||
ts_dist_cmd_get_single_scalar_result_by_index(DistCmdResult *result, Size index, bool *isnull,
|
||||
const char **node_name_out)
|
||||
{
|
||||
PGresult *pgres;
|
||||
Oid typioparam;
|
||||
Oid typinfunc;
|
||||
const char *node_name;
|
||||
|
||||
if (!OidIsValid(result->typeid))
|
||||
elog(ERROR, "invalid result type of distributed command");
|
||||
|
||||
if (result->funcclass != TYPEFUNC_SCALAR)
|
||||
elog(ERROR, "distributed command result is not scalar");
|
||||
|
||||
pgres = ts_dist_cmd_get_result_by_index(result, index, &node_name);
|
||||
|
||||
if (NULL == pgres)
|
||||
elog(ERROR, "invalid index for distributed command result");
|
||||
|
||||
if (node_name_out)
|
||||
*node_name_out = node_name;
|
||||
|
||||
if (PQresultStatus(pgres) != PGRES_TUPLES_OK || PQntuples(pgres) != 1 || PQnfields(pgres) != 1)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_TS_UNEXPECTED),
|
||||
errmsg("unexpected response from data node \"%s\"", node_name)));
|
||||
|
||||
if (PQgetisnull(pgres, 0, 0))
|
||||
{
|
||||
if (isnull)
|
||||
*isnull = true;
|
||||
|
||||
return (Datum) 0;
|
||||
}
|
||||
|
||||
if (isnull)
|
||||
*isnull = false;
|
||||
|
||||
getTypeInputInfo(result->typeid, &typinfunc, &typioparam);
|
||||
Assert(OidIsValid(typinfunc));
|
||||
|
||||
return OidInputFunctionCall(typinfunc, PQgetvalue(pgres, 0, 0), typioparam, -1);
|
||||
}
|
||||
|
||||
void
|
||||
ts_dist_cmd_close_response(DistCmdResult *response)
|
||||
{
|
||||
|
@ -22,11 +22,14 @@ extern DistCmdResult *ts_dist_cmd_invoke_on_all_data_nodes(const char *sql);
|
||||
extern DistCmdResult *ts_dist_cmd_invoke_func_call_on_all_data_nodes(FunctionCallInfo fcinfo);
|
||||
extern DistCmdResult *ts_dist_cmd_invoke_func_call_on_data_nodes(FunctionCallInfo fcinfo,
|
||||
List *data_nodes);
|
||||
extern Datum ts_dist_cmd_get_single_scalar_result_by_index(DistCmdResult *result, Size index,
|
||||
bool *isnull, const char **node_name);
|
||||
extern void ts_dist_cmd_func_call_on_data_nodes(FunctionCallInfo fcinfo, List *data_nodes);
|
||||
extern PGresult *ts_dist_cmd_get_result_by_node_name(DistCmdResult *response,
|
||||
const char *node_name);
|
||||
extern PGresult *ts_dist_cmd_get_result_by_index(DistCmdResult *response, Size index,
|
||||
const char **node_name);
|
||||
extern Size ts_dist_cmd_response_count(DistCmdResult *result);
|
||||
|
||||
extern void ts_dist_cmd_close_response(DistCmdResult *response);
|
||||
|
||||
|
@ -8,6 +8,7 @@
|
||||
#include <utils/guc.h>
|
||||
#include <catalog/pg_trigger.h>
|
||||
#include <catalog/namespace.h>
|
||||
#include <nodes/parsenodes.h>
|
||||
|
||||
#include <guc.h>
|
||||
#include "hypertable_data_node.h"
|
||||
@ -51,6 +52,38 @@ static DistDDLState dist_ddl_state;
|
||||
|
||||
#define dist_ddl_scheduled_for_execution() (dist_ddl_state.exec_type != DIST_DDL_EXEC_NONE)
|
||||
|
||||
/*
|
||||
* Set the exec type for a distributed command, i.e., whether to forward the
|
||||
* DDL statement before or after PostgreSQL has processed it locally.
|
||||
*
|
||||
* In multi-command statements (e.g., ALTER), it should not be possible to
|
||||
* have a mix of sub-commands that require both START and END processing. Such
|
||||
* mixing would require splitting the original ALTER across both START and END
|
||||
* processing, which would prevent simply forwarding the original statement to
|
||||
* the data nodes. For instance, consider:
|
||||
*
|
||||
* ALTER TABLE foo SET (newoption = true), ADD CONSTRAINT mycheck CHECK (count > 0);
|
||||
*
|
||||
* which contains two sub-commands (SET and ADD CONSTRAINT), where the first
|
||||
* command (SET) is handled at START, while the latter is handled at
|
||||
* END. While we could always distribute commands at START, this would prevent
|
||||
* local validation by PostgreSQL.
|
||||
*/
|
||||
static void
|
||||
set_dist_exec_type(DistDDLExecType type)
|
||||
{
|
||||
if (dist_ddl_state.exec_type != DIST_DDL_EXEC_NONE && dist_ddl_state.exec_type != type)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
|
||||
errmsg("incompatible sub-commands in single statement"),
|
||||
errdetail("The statement contains sub-commands that require different "
|
||||
"handling to distribute to data nodes and can therefore not "
|
||||
"be mixed in a single statement."),
|
||||
errhint("Try executing the sub-commands in separate statements.")));
|
||||
|
||||
dist_ddl_state.exec_type = type;
|
||||
}
|
||||
|
||||
void
|
||||
dist_ddl_init(void)
|
||||
{
|
||||
@ -202,7 +235,7 @@ dist_ddl_preprocess(ProcessUtilityArgs *args)
|
||||
DropStmt *stmt = (DropStmt *) args->parsetree;
|
||||
|
||||
if (stmt->removeType == OBJECT_TABLE || stmt->removeType == OBJECT_SCHEMA)
|
||||
dist_ddl_state.exec_type = DIST_DDL_EXEC_ON_END;
|
||||
set_dist_exec_type(DIST_DDL_EXEC_ON_END);
|
||||
}
|
||||
|
||||
return;
|
||||
@ -222,7 +255,7 @@ dist_ddl_preprocess(ProcessUtilityArgs *args)
|
||||
*/
|
||||
case T_AlterObjectSchemaStmt:
|
||||
case T_RenameStmt:
|
||||
dist_ddl_state.exec_type = DIST_DDL_EXEC_ON_END;
|
||||
set_dist_exec_type(DIST_DDL_EXEC_ON_END);
|
||||
dist_ddl_state.relid = relid;
|
||||
return;
|
||||
|
||||
@ -295,7 +328,20 @@ dist_ddl_preprocess(ProcessUtilityArgs *args)
|
||||
case AT_DropConstraint:
|
||||
case AT_DropConstraintRecurse:
|
||||
case AT_AddIndex:
|
||||
/* supported commands */
|
||||
case AT_ReplaceRelOptions:
|
||||
case AT_ResetRelOptions:
|
||||
set_dist_exec_type(DIST_DDL_EXEC_ON_END);
|
||||
break;
|
||||
case AT_SetRelOptions:
|
||||
/* Custom TimescaleDB options (e.g.,
|
||||
* compression-related options) are not recognized by
|
||||
* PostgreSQL and thus cannot mix with other (PG)
|
||||
* options. As a consequence, custom reloptions are
|
||||
* not forwarded/handled by PostgreSQL and thus never
|
||||
* reach END processing. Therefore, to distributed
|
||||
* SetRelOptions to other nodes, it needs to happen at
|
||||
* START. */
|
||||
set_dist_exec_type(DIST_DDL_EXEC_ON_START);
|
||||
break;
|
||||
default:
|
||||
dist_ddl_error_raise_unsupported();
|
||||
@ -303,7 +349,6 @@ dist_ddl_preprocess(ProcessUtilityArgs *args)
|
||||
}
|
||||
}
|
||||
|
||||
dist_ddl_state.exec_type = DIST_DDL_EXEC_ON_END;
|
||||
break;
|
||||
}
|
||||
case T_DropStmt:
|
||||
@ -319,14 +364,13 @@ dist_ddl_preprocess(ProcessUtilityArgs *args)
|
||||
* combination with table or schema drop.
|
||||
*/
|
||||
Assert(((DropStmt *) args->parsetree)->removeType == OBJECT_INDEX);
|
||||
|
||||
dist_ddl_state.exec_type = DIST_DDL_EXEC_ON_END;
|
||||
set_dist_exec_type(DIST_DDL_EXEC_ON_END);
|
||||
break;
|
||||
|
||||
case T_IndexStmt:
|
||||
/* Since we have custom CREATE INDEX implementation, currently it
|
||||
* does not support ddl_command_end trigger. */
|
||||
dist_ddl_state.exec_type = DIST_DDL_EXEC_ON_START;
|
||||
set_dist_exec_type(DIST_DDL_EXEC_ON_START);
|
||||
break;
|
||||
|
||||
case T_CreateTrigStmt:
|
||||
@ -342,9 +386,8 @@ dist_ddl_preprocess(ProcessUtilityArgs *args)
|
||||
case T_GrantStmt:
|
||||
/* If there is one or more distributed hypertables, we need to do a 2PC. */
|
||||
if (num_dist_hypertables > 0)
|
||||
dist_ddl_state.exec_type = DIST_DDL_EXEC_ON_START;
|
||||
set_dist_exec_type(DIST_DDL_EXEC_ON_START);
|
||||
break;
|
||||
|
||||
case T_TruncateStmt:
|
||||
{
|
||||
TruncateStmt *stmt = (TruncateStmt *) args->parsetree;
|
||||
@ -362,7 +405,7 @@ dist_ddl_preprocess(ProcessUtilityArgs *args)
|
||||
* sets of nodes. */
|
||||
if (num_dist_hypertables == 1 && num_regular_tables == 0 && num_hypertables == 0 &&
|
||||
num_dist_hypertable_members == 0)
|
||||
dist_ddl_state.exec_type = DIST_DDL_EXEC_ON_START;
|
||||
set_dist_exec_type(DIST_DDL_EXEC_ON_START);
|
||||
else
|
||||
dist_ddl_error_raise_unsupported();
|
||||
break;
|
||||
@ -373,7 +416,7 @@ dist_ddl_preprocess(ProcessUtilityArgs *args)
|
||||
case T_ClusterStmt:
|
||||
/* Those commands are also targets for execute_on_start in since they
|
||||
* are not supported by event triggers. */
|
||||
dist_ddl_state.exec_type = DIST_DDL_EXEC_ON_START;
|
||||
set_dist_exec_type(DIST_DDL_EXEC_ON_START);
|
||||
|
||||
/* fall through */
|
||||
default:
|
||||
|
396
tsl/test/expected/dist_compression.out
Normal file
396
tsl/test/expected/dist_compression.out
Normal file
@ -0,0 +1,396 @@
|
||||
-- This file and its contents are licensed under the Timescale License.
|
||||
-- Please see the included NOTICE for copyright information and
|
||||
-- LICENSE-TIMESCALE for a copy of the license.
|
||||
---------------------------------------------------
|
||||
-- Test compression on a distributed hypertable
|
||||
---------------------------------------------------
|
||||
\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER
|
||||
SET client_min_messages TO ERROR;
|
||||
DROP DATABASE IF EXISTS data_node_1;
|
||||
DROP DATABASE IF EXISTS data_node_2;
|
||||
DROP DATABASE IF EXISTS data_node_3;
|
||||
\ir include/remote_exec.sql
|
||||
-- This file and its contents are licensed under the Timescale License.
|
||||
-- Please see the included NOTICE for copyright information and
|
||||
-- LICENSE-TIMESCALE for a copy of the license.
|
||||
CREATE SCHEMA IF NOT EXISTS test;
|
||||
GRANT USAGE ON SCHEMA test TO PUBLIC;
|
||||
CREATE OR REPLACE FUNCTION test.remote_exec(srv_name name[], command text)
|
||||
RETURNS VOID
|
||||
AS :TSL_MODULE_PATHNAME, 'ts_remote_exec'
|
||||
LANGUAGE C;
|
||||
SELECT * FROM add_data_node('data_node_1', host => 'localhost',
|
||||
database => 'data_node_1');
|
||||
node_name | host | port | database | node_created | database_created | extension_created
|
||||
-------------+-----------+-------+-------------+--------------+------------------+-------------------
|
||||
data_node_1 | localhost | 15432 | data_node_1 | t | t | t
|
||||
(1 row)
|
||||
|
||||
SELECT * FROM add_data_node('data_node_2', host => 'localhost',
|
||||
database => 'data_node_2');
|
||||
node_name | host | port | database | node_created | database_created | extension_created
|
||||
-------------+-----------+-------+-------------+--------------+------------------+-------------------
|
||||
data_node_2 | localhost | 15432 | data_node_2 | t | t | t
|
||||
(1 row)
|
||||
|
||||
SELECT * FROM add_data_node('data_node_3', host => 'localhost',
|
||||
database => 'data_node_3');
|
||||
node_name | host | port | database | node_created | database_created | extension_created
|
||||
-------------+-----------+-------+-------------+--------------+------------------+-------------------
|
||||
data_node_3 | localhost | 15432 | data_node_3 | t | t | t
|
||||
(1 row)
|
||||
|
||||
GRANT USAGE ON FOREIGN SERVER data_node_1, data_node_2, data_node_3 TO :ROLE_1;
|
||||
SET client_min_messages TO NOTICE;
|
||||
SET ROLE :ROLE_1;
|
||||
SELECT setseed(1);
|
||||
setseed
|
||||
---------
|
||||
|
||||
(1 row)
|
||||
|
||||
CREATE TABLE compressed(time timestamptz, device int, temp float);
|
||||
-- Replicate twice to see that compress_chunk compresses all replica chunks
|
||||
SELECT create_distributed_hypertable('compressed', 'time', 'device', replication_factor => 2);
|
||||
NOTICE: adding not-null constraint to column "time"
|
||||
create_distributed_hypertable
|
||||
-------------------------------
|
||||
(1,public,compressed,t)
|
||||
(1 row)
|
||||
|
||||
INSERT INTO compressed SELECT t, (abs(timestamp_hash(t::timestamp)) % 10) + 1, random()*80
|
||||
FROM generate_series('2018-03-02 1:00'::TIMESTAMPTZ, '2018-03-04 1:00', '1 hour') t;
|
||||
ALTER TABLE compressed SET (timescaledb.compress, timescaledb.compress_segmentby='device', timescaledb.compress_orderby = 'time DESC');
|
||||
SELECT test.remote_exec(NULL, $$
|
||||
SELECT table_name, compressed_hypertable_id
|
||||
FROM _timescaledb_catalog.hypertable
|
||||
WHERE table_name = 'compressed';
|
||||
$$);
|
||||
NOTICE: [data_node_1]:
|
||||
SELECT table_name, compressed_hypertable_id
|
||||
FROM _timescaledb_catalog.hypertable
|
||||
WHERE table_name = 'compressed'
|
||||
NOTICE: [data_node_1]:
|
||||
table_name|compressed_hypertable_id
|
||||
----------+------------------------
|
||||
compressed| 2
|
||||
(1 row)
|
||||
|
||||
|
||||
NOTICE: [data_node_2]:
|
||||
SELECT table_name, compressed_hypertable_id
|
||||
FROM _timescaledb_catalog.hypertable
|
||||
WHERE table_name = 'compressed'
|
||||
NOTICE: [data_node_2]:
|
||||
table_name|compressed_hypertable_id
|
||||
----------+------------------------
|
||||
compressed| 2
|
||||
(1 row)
|
||||
|
||||
|
||||
NOTICE: [data_node_3]:
|
||||
SELECT table_name, compressed_hypertable_id
|
||||
FROM _timescaledb_catalog.hypertable
|
||||
WHERE table_name = 'compressed'
|
||||
NOTICE: [data_node_3]:
|
||||
table_name|compressed_hypertable_id
|
||||
----------+------------------------
|
||||
compressed| 2
|
||||
(1 row)
|
||||
|
||||
|
||||
remote_exec
|
||||
-------------
|
||||
|
||||
(1 row)
|
||||
|
||||
-- There should be no compressed chunks
|
||||
SELECT test.remote_exec(NULL, $$
|
||||
SELECT * FROM timescaledb_information.compressed_chunk_stats
|
||||
WHERE hypertable_name = 'compressed'::regclass
|
||||
ORDER BY hypertable_name, chunk_name;
|
||||
$$);
|
||||
NOTICE: [data_node_1]:
|
||||
SELECT * FROM timescaledb_information.compressed_chunk_stats
|
||||
WHERE hypertable_name = 'compressed'::regclass
|
||||
ORDER BY hypertable_name, chunk_name
|
||||
NOTICE: [data_node_1]:
|
||||
hypertable_name|chunk_name |compression_status|uncompressed_heap_bytes|uncompressed_index_bytes|uncompressed_toast_bytes|uncompressed_total_bytes|compressed_heap_bytes|compressed_index_bytes|compressed_toast_bytes|compressed_total_bytes
|
||||
---------------+-------------------------------------------+------------------+-----------------------+------------------------+------------------------+------------------------+---------------------+----------------------+----------------------+----------------------
|
||||
compressed |_timescaledb_internal._hyper_1_1_dist_chunk|Uncompressed | | | | | | | |
|
||||
compressed |_timescaledb_internal._hyper_1_3_dist_chunk|Uncompressed | | | | | | | |
|
||||
(2 rows)
|
||||
|
||||
|
||||
NOTICE: [data_node_2]:
|
||||
SELECT * FROM timescaledb_information.compressed_chunk_stats
|
||||
WHERE hypertable_name = 'compressed'::regclass
|
||||
ORDER BY hypertable_name, chunk_name
|
||||
NOTICE: [data_node_2]:
|
||||
hypertable_name|chunk_name |compression_status|uncompressed_heap_bytes|uncompressed_index_bytes|uncompressed_toast_bytes|uncompressed_total_bytes|compressed_heap_bytes|compressed_index_bytes|compressed_toast_bytes|compressed_total_bytes
|
||||
---------------+-------------------------------------------+------------------+-----------------------+------------------------+------------------------+------------------------+---------------------+----------------------+----------------------+----------------------
|
||||
compressed |_timescaledb_internal._hyper_1_1_dist_chunk|Uncompressed | | | | | | | |
|
||||
compressed |_timescaledb_internal._hyper_1_2_dist_chunk|Uncompressed | | | | | | | |
|
||||
(2 rows)
|
||||
|
||||
|
||||
NOTICE: [data_node_3]:
|
||||
SELECT * FROM timescaledb_information.compressed_chunk_stats
|
||||
WHERE hypertable_name = 'compressed'::regclass
|
||||
ORDER BY hypertable_name, chunk_name
|
||||
NOTICE: [data_node_3]:
|
||||
hypertable_name|chunk_name |compression_status|uncompressed_heap_bytes|uncompressed_index_bytes|uncompressed_toast_bytes|uncompressed_total_bytes|compressed_heap_bytes|compressed_index_bytes|compressed_toast_bytes|compressed_total_bytes
|
||||
---------------+-------------------------------------------+------------------+-----------------------+------------------------+------------------------+------------------------+---------------------+----------------------+----------------------+----------------------
|
||||
compressed |_timescaledb_internal._hyper_1_2_dist_chunk|Uncompressed | | | | | | | |
|
||||
compressed |_timescaledb_internal._hyper_1_3_dist_chunk|Uncompressed | | | | | | | |
|
||||
(2 rows)
|
||||
|
||||
|
||||
remote_exec
|
||||
-------------
|
||||
|
||||
(1 row)
|
||||
|
||||
-- Test that compression is rolled back on aborted transaction
|
||||
BEGIN;
|
||||
SELECT compress_chunk(chunk)
|
||||
FROM show_chunks('compressed') AS chunk
|
||||
ORDER BY chunk
|
||||
LIMIT 1;
|
||||
compress_chunk
|
||||
---------------------------------------------
|
||||
_timescaledb_internal._hyper_1_1_dist_chunk
|
||||
(1 row)
|
||||
|
||||
-- Data nodes should now report compressed chunks
|
||||
SELECT test.remote_exec(NULL, $$
|
||||
SELECT * FROM timescaledb_information.compressed_chunk_stats
|
||||
WHERE hypertable_name = 'compressed'::regclass
|
||||
ORDER BY hypertable_name, chunk_name;
|
||||
$$);
|
||||
NOTICE: [data_node_1]:
|
||||
SELECT * FROM timescaledb_information.compressed_chunk_stats
|
||||
WHERE hypertable_name = 'compressed'::regclass
|
||||
ORDER BY hypertable_name, chunk_name
|
||||
NOTICE: [data_node_1]:
|
||||
hypertable_name|chunk_name |compression_status|uncompressed_heap_bytes|uncompressed_index_bytes|uncompressed_toast_bytes|uncompressed_total_bytes|compressed_heap_bytes|compressed_index_bytes|compressed_toast_bytes|compressed_total_bytes
|
||||
---------------+-------------------------------------------+------------------+-----------------------+------------------------+------------------------+------------------------+---------------------+----------------------+----------------------+----------------------
|
||||
compressed |_timescaledb_internal._hyper_1_1_dist_chunk|Compressed |8192 bytes |32 kB |0 bytes |40 kB |8192 bytes |16 kB |8192 bytes |32 kB
|
||||
compressed |_timescaledb_internal._hyper_1_3_dist_chunk|Uncompressed | | | | | | | |
|
||||
(2 rows)
|
||||
|
||||
|
||||
NOTICE: [data_node_2]:
|
||||
SELECT * FROM timescaledb_information.compressed_chunk_stats
|
||||
WHERE hypertable_name = 'compressed'::regclass
|
||||
ORDER BY hypertable_name, chunk_name
|
||||
NOTICE: [data_node_2]:
|
||||
hypertable_name|chunk_name |compression_status|uncompressed_heap_bytes|uncompressed_index_bytes|uncompressed_toast_bytes|uncompressed_total_bytes|compressed_heap_bytes|compressed_index_bytes|compressed_toast_bytes|compressed_total_bytes
|
||||
---------------+-------------------------------------------+------------------+-----------------------+------------------------+------------------------+------------------------+---------------------+----------------------+----------------------+----------------------
|
||||
compressed |_timescaledb_internal._hyper_1_1_dist_chunk|Compressed |8192 bytes |32 kB |0 bytes |40 kB |8192 bytes |16 kB |8192 bytes |32 kB
|
||||
compressed |_timescaledb_internal._hyper_1_2_dist_chunk|Uncompressed | | | | | | | |
|
||||
(2 rows)
|
||||
|
||||
|
||||
NOTICE: [data_node_3]:
|
||||
SELECT * FROM timescaledb_information.compressed_chunk_stats
|
||||
WHERE hypertable_name = 'compressed'::regclass
|
||||
ORDER BY hypertable_name, chunk_name
|
||||
NOTICE: [data_node_3]:
|
||||
hypertable_name|chunk_name |compression_status|uncompressed_heap_bytes|uncompressed_index_bytes|uncompressed_toast_bytes|uncompressed_total_bytes|compressed_heap_bytes|compressed_index_bytes|compressed_toast_bytes|compressed_total_bytes
|
||||
---------------+-------------------------------------------+------------------+-----------------------+------------------------+------------------------+------------------------+---------------------+----------------------+----------------------+----------------------
|
||||
compressed |_timescaledb_internal._hyper_1_2_dist_chunk|Uncompressed | | | | | | | |
|
||||
compressed |_timescaledb_internal._hyper_1_3_dist_chunk|Uncompressed | | | | | | | |
|
||||
(2 rows)
|
||||
|
||||
|
||||
remote_exec
|
||||
-------------
|
||||
|
||||
(1 row)
|
||||
|
||||
-- Abort the transaction
|
||||
ROLLBACK;
|
||||
-- No compressed chunks since we rolled back
|
||||
SELECT test.remote_exec(NULL, $$
|
||||
SELECT * FROM timescaledb_information.compressed_chunk_stats
|
||||
WHERE hypertable_name = 'compressed'::regclass
|
||||
ORDER BY hypertable_name, chunk_name;
|
||||
$$);
|
||||
NOTICE: [data_node_1]:
|
||||
SELECT * FROM timescaledb_information.compressed_chunk_stats
|
||||
WHERE hypertable_name = 'compressed'::regclass
|
||||
ORDER BY hypertable_name, chunk_name
|
||||
NOTICE: [data_node_1]:
|
||||
hypertable_name|chunk_name |compression_status|uncompressed_heap_bytes|uncompressed_index_bytes|uncompressed_toast_bytes|uncompressed_total_bytes|compressed_heap_bytes|compressed_index_bytes|compressed_toast_bytes|compressed_total_bytes
|
||||
---------------+-------------------------------------------+------------------+-----------------------+------------------------+------------------------+------------------------+---------------------+----------------------+----------------------+----------------------
|
||||
compressed |_timescaledb_internal._hyper_1_1_dist_chunk|Uncompressed | | | | | | | |
|
||||
compressed |_timescaledb_internal._hyper_1_3_dist_chunk|Uncompressed | | | | | | | |
|
||||
(2 rows)
|
||||
|
||||
|
||||
NOTICE: [data_node_2]:
|
||||
SELECT * FROM timescaledb_information.compressed_chunk_stats
|
||||
WHERE hypertable_name = 'compressed'::regclass
|
||||
ORDER BY hypertable_name, chunk_name
|
||||
NOTICE: [data_node_2]:
|
||||
hypertable_name|chunk_name |compression_status|uncompressed_heap_bytes|uncompressed_index_bytes|uncompressed_toast_bytes|uncompressed_total_bytes|compressed_heap_bytes|compressed_index_bytes|compressed_toast_bytes|compressed_total_bytes
|
||||
---------------+-------------------------------------------+------------------+-----------------------+------------------------+------------------------+------------------------+---------------------+----------------------+----------------------+----------------------
|
||||
compressed |_timescaledb_internal._hyper_1_1_dist_chunk|Uncompressed | | | | | | | |
|
||||
compressed |_timescaledb_internal._hyper_1_2_dist_chunk|Uncompressed | | | | | | | |
|
||||
(2 rows)
|
||||
|
||||
|
||||
NOTICE: [data_node_3]:
|
||||
SELECT * FROM timescaledb_information.compressed_chunk_stats
|
||||
WHERE hypertable_name = 'compressed'::regclass
|
||||
ORDER BY hypertable_name, chunk_name
|
||||
NOTICE: [data_node_3]:
|
||||
hypertable_name|chunk_name |compression_status|uncompressed_heap_bytes|uncompressed_index_bytes|uncompressed_toast_bytes|uncompressed_total_bytes|compressed_heap_bytes|compressed_index_bytes|compressed_toast_bytes|compressed_total_bytes
|
||||
---------------+-------------------------------------------+------------------+-----------------------+------------------------+------------------------+------------------------+---------------------+----------------------+----------------------+----------------------
|
||||
compressed |_timescaledb_internal._hyper_1_2_dist_chunk|Uncompressed | | | | | | | |
|
||||
compressed |_timescaledb_internal._hyper_1_3_dist_chunk|Uncompressed | | | | | | | |
|
||||
(2 rows)
|
||||
|
||||
|
||||
remote_exec
|
||||
-------------
|
||||
|
||||
(1 row)
|
||||
|
||||
-- Compress for real this time
|
||||
SELECT compress_chunk(chunk)
|
||||
FROM show_chunks('compressed') AS chunk
|
||||
ORDER BY chunk
|
||||
LIMIT 1;
|
||||
compress_chunk
|
||||
---------------------------------------------
|
||||
_timescaledb_internal._hyper_1_1_dist_chunk
|
||||
(1 row)
|
||||
|
||||
-- Check that one chunk, and its replica, is compressed
|
||||
SELECT test.remote_exec(NULL, $$
|
||||
SELECT * FROM timescaledb_information.compressed_chunk_stats
|
||||
WHERE hypertable_name = 'compressed'::regclass
|
||||
ORDER BY hypertable_name, chunk_name;
|
||||
$$);
|
||||
NOTICE: [data_node_1]:
|
||||
SELECT * FROM timescaledb_information.compressed_chunk_stats
|
||||
WHERE hypertable_name = 'compressed'::regclass
|
||||
ORDER BY hypertable_name, chunk_name
|
||||
NOTICE: [data_node_1]:
|
||||
hypertable_name|chunk_name |compression_status|uncompressed_heap_bytes|uncompressed_index_bytes|uncompressed_toast_bytes|uncompressed_total_bytes|compressed_heap_bytes|compressed_index_bytes|compressed_toast_bytes|compressed_total_bytes
|
||||
---------------+-------------------------------------------+------------------+-----------------------+------------------------+------------------------+------------------------+---------------------+----------------------+----------------------+----------------------
|
||||
compressed |_timescaledb_internal._hyper_1_1_dist_chunk|Compressed |8192 bytes |32 kB |0 bytes |40 kB |8192 bytes |16 kB |8192 bytes |32 kB
|
||||
compressed |_timescaledb_internal._hyper_1_3_dist_chunk|Uncompressed | | | | | | | |
|
||||
(2 rows)
|
||||
|
||||
|
||||
NOTICE: [data_node_2]:
|
||||
SELECT * FROM timescaledb_information.compressed_chunk_stats
|
||||
WHERE hypertable_name = 'compressed'::regclass
|
||||
ORDER BY hypertable_name, chunk_name
|
||||
NOTICE: [data_node_2]:
|
||||
hypertable_name|chunk_name |compression_status|uncompressed_heap_bytes|uncompressed_index_bytes|uncompressed_toast_bytes|uncompressed_total_bytes|compressed_heap_bytes|compressed_index_bytes|compressed_toast_bytes|compressed_total_bytes
|
||||
---------------+-------------------------------------------+------------------+-----------------------+------------------------+------------------------+------------------------+---------------------+----------------------+----------------------+----------------------
|
||||
compressed |_timescaledb_internal._hyper_1_1_dist_chunk|Compressed |8192 bytes |32 kB |0 bytes |40 kB |8192 bytes |16 kB |8192 bytes |32 kB
|
||||
compressed |_timescaledb_internal._hyper_1_2_dist_chunk|Uncompressed | | | | | | | |
|
||||
(2 rows)
|
||||
|
||||
|
||||
NOTICE: [data_node_3]:
|
||||
SELECT * FROM timescaledb_information.compressed_chunk_stats
|
||||
WHERE hypertable_name = 'compressed'::regclass
|
||||
ORDER BY hypertable_name, chunk_name
|
||||
NOTICE: [data_node_3]:
|
||||
hypertable_name|chunk_name |compression_status|uncompressed_heap_bytes|uncompressed_index_bytes|uncompressed_toast_bytes|uncompressed_total_bytes|compressed_heap_bytes|compressed_index_bytes|compressed_toast_bytes|compressed_total_bytes
|
||||
---------------+-------------------------------------------+------------------+-----------------------+------------------------+------------------------+------------------------+---------------------+----------------------+----------------------+----------------------
|
||||
compressed |_timescaledb_internal._hyper_1_2_dist_chunk|Uncompressed | | | | | | | |
|
||||
compressed |_timescaledb_internal._hyper_1_3_dist_chunk|Uncompressed | | | | | | | |
|
||||
(2 rows)
|
||||
|
||||
|
||||
remote_exec
|
||||
-------------
|
||||
|
||||
(1 row)
|
||||
|
||||
-- Compress twice to generate NOTICE that the chunk is already compressed
|
||||
SELECT compress_chunk(chunk, if_not_compressed => true)
|
||||
FROM show_chunks('compressed') AS chunk
|
||||
ORDER BY chunk
|
||||
LIMIT 1;
|
||||
NOTICE: chunk "_hyper_1_1_dist_chunk" is already compressed
|
||||
compress_chunk
|
||||
----------------
|
||||
|
||||
(1 row)
|
||||
|
||||
-- Decompress the chunk and replica
|
||||
SELECT decompress_chunk(chunk)
|
||||
FROM show_chunks('compressed') AS chunk
|
||||
ORDER BY chunk
|
||||
LIMIT 1;
|
||||
decompress_chunk
|
||||
---------------------------------------------
|
||||
_timescaledb_internal._hyper_1_1_dist_chunk
|
||||
(1 row)
|
||||
|
||||
-- Should now be decompressed
|
||||
SELECT test.remote_exec(NULL, $$
|
||||
SELECT * FROM timescaledb_information.compressed_chunk_stats
|
||||
WHERE hypertable_name = 'compressed'::regclass
|
||||
ORDER BY hypertable_name, chunk_name;
|
||||
$$);
|
||||
NOTICE: [data_node_1]:
|
||||
SELECT * FROM timescaledb_information.compressed_chunk_stats
|
||||
WHERE hypertable_name = 'compressed'::regclass
|
||||
ORDER BY hypertable_name, chunk_name
|
||||
NOTICE: [data_node_1]:
|
||||
hypertable_name|chunk_name |compression_status|uncompressed_heap_bytes|uncompressed_index_bytes|uncompressed_toast_bytes|uncompressed_total_bytes|compressed_heap_bytes|compressed_index_bytes|compressed_toast_bytes|compressed_total_bytes
|
||||
---------------+-------------------------------------------+------------------+-----------------------+------------------------+------------------------+------------------------+---------------------+----------------------+----------------------+----------------------
|
||||
compressed |_timescaledb_internal._hyper_1_1_dist_chunk|Uncompressed | | | | | | | |
|
||||
compressed |_timescaledb_internal._hyper_1_3_dist_chunk|Uncompressed | | | | | | | |
|
||||
(2 rows)
|
||||
|
||||
|
||||
NOTICE: [data_node_2]:
|
||||
SELECT * FROM timescaledb_information.compressed_chunk_stats
|
||||
WHERE hypertable_name = 'compressed'::regclass
|
||||
ORDER BY hypertable_name, chunk_name
|
||||
NOTICE: [data_node_2]:
|
||||
hypertable_name|chunk_name |compression_status|uncompressed_heap_bytes|uncompressed_index_bytes|uncompressed_toast_bytes|uncompressed_total_bytes|compressed_heap_bytes|compressed_index_bytes|compressed_toast_bytes|compressed_total_bytes
|
||||
---------------+-------------------------------------------+------------------+-----------------------+------------------------+------------------------+------------------------+---------------------+----------------------+----------------------+----------------------
|
||||
compressed |_timescaledb_internal._hyper_1_1_dist_chunk|Uncompressed | | | | | | | |
|
||||
compressed |_timescaledb_internal._hyper_1_2_dist_chunk|Uncompressed | | | | | | | |
|
||||
(2 rows)
|
||||
|
||||
|
||||
NOTICE: [data_node_3]:
|
||||
SELECT * FROM timescaledb_information.compressed_chunk_stats
|
||||
WHERE hypertable_name = 'compressed'::regclass
|
||||
ORDER BY hypertable_name, chunk_name
|
||||
NOTICE: [data_node_3]:
|
||||
hypertable_name|chunk_name |compression_status|uncompressed_heap_bytes|uncompressed_index_bytes|uncompressed_toast_bytes|uncompressed_total_bytes|compressed_heap_bytes|compressed_index_bytes|compressed_toast_bytes|compressed_total_bytes
|
||||
---------------+-------------------------------------------+------------------+-----------------------+------------------------+------------------------+------------------------+---------------------+----------------------+----------------------+----------------------
|
||||
compressed |_timescaledb_internal._hyper_1_2_dist_chunk|Uncompressed | | | | | | | |
|
||||
compressed |_timescaledb_internal._hyper_1_3_dist_chunk|Uncompressed | | | | | | | |
|
||||
(2 rows)
|
||||
|
||||
|
||||
remote_exec
|
||||
-------------
|
||||
|
||||
(1 row)
|
||||
|
||||
-- Decompress twice to generate NOTICE that the chunk is already decompressed
|
||||
SELECT decompress_chunk(chunk, if_compressed => true)
|
||||
FROM show_chunks('compressed') AS chunk
|
||||
ORDER BY chunk
|
||||
LIMIT 1;
|
||||
NOTICE: chunk "_hyper_1_1_dist_chunk" is not compressed
|
||||
decompress_chunk
|
||||
------------------
|
||||
|
||||
(1 row)
|
||||
|
@ -874,6 +874,25 @@ some_dist_table_time_idx|{time} | |f |f |f |
|
||||
(1 row)
|
||||
|
||||
DROP TABLE some_dist_table;
|
||||
-- DDL with multiple sub-commands (ALTER)
|
||||
BEGIN;
|
||||
CREATE TABLE some_dist_table(time timestamptz, device int);
|
||||
SELECT * FROM create_distributed_hypertable('some_dist_table', 'time');
|
||||
NOTICE: adding not-null constraint to column "time"
|
||||
hypertable_id | schema_name | table_name | created
|
||||
---------------+-------------+-----------------+---------
|
||||
9 | public | some_dist_table | t
|
||||
(1 row)
|
||||
|
||||
\set ON_ERROR_STOP 0
|
||||
-- Mixing SET and other options not supported. This is to protect
|
||||
-- against mixing custom (compression) options with other
|
||||
-- sub-commands.
|
||||
ALTER TABLE some_dist_table SET (fillfactor = 10),
|
||||
ADD CONSTRAINT device_check CHECK (device > 0);
|
||||
ERROR: ALTER TABLE <hypertable> SET does not support multiple clauses
|
||||
\set ON_ERROR_STOP 1
|
||||
ROLLBACK;
|
||||
-- Multi-statement transactions
|
||||
-- BEGIN/COMMIT
|
||||
CREATE TABLE some_dist_table(time timestamptz, device int);
|
||||
@ -881,7 +900,7 @@ SELECT * FROM create_hypertable('some_dist_table', 'time', replication_factor =>
|
||||
NOTICE: adding not-null constraint to column "time"
|
||||
hypertable_id | schema_name | table_name | created
|
||||
---------------+-------------+-----------------+---------
|
||||
9 | public | some_dist_table | t
|
||||
10 | public | some_dist_table | t
|
||||
(1 row)
|
||||
|
||||
BEGIN;
|
||||
@ -974,7 +993,7 @@ SELECT * FROM create_hypertable('some_dist_table', 'time', replication_factor =>
|
||||
NOTICE: adding not-null constraint to column "time"
|
||||
hypertable_id | schema_name | table_name | created
|
||||
---------------+-------------+-----------------+---------
|
||||
10 | public | some_dist_table | t
|
||||
11 | public | some_dist_table | t
|
||||
(1 row)
|
||||
|
||||
BEGIN;
|
||||
@ -1060,7 +1079,7 @@ SELECT * FROM create_hypertable('some_dist_table', 'time', replication_factor =>
|
||||
NOTICE: adding not-null constraint to column "time"
|
||||
hypertable_id | schema_name | table_name | created
|
||||
---------------+-------------+-----------------+---------
|
||||
11 | public | some_dist_table | t
|
||||
12 | public | some_dist_table | t
|
||||
(1 row)
|
||||
|
||||
BEGIN;
|
||||
@ -1155,7 +1174,7 @@ SELECT * FROM create_hypertable('some_dist_table', 'time', replication_factor =>
|
||||
NOTICE: adding not-null constraint to column "time"
|
||||
hypertable_id | schema_name | table_name | created
|
||||
---------------+-------------+-----------------+---------
|
||||
12 | public | some_dist_table | t
|
||||
13 | public | some_dist_table | t
|
||||
(1 row)
|
||||
|
||||
BEGIN;
|
||||
@ -1247,7 +1266,7 @@ SELECT * FROM create_hypertable('some_dist_table', 'time', replication_factor =>
|
||||
NOTICE: adding not-null constraint to column "time"
|
||||
hypertable_id | schema_name | table_name | created
|
||||
---------------+-------------+-----------------+---------
|
||||
13 | public | some_dist_table | t
|
||||
14 | public | some_dist_table | t
|
||||
(1 row)
|
||||
|
||||
BEGIN;
|
||||
@ -1335,7 +1354,7 @@ SELECT * FROM create_hypertable('some_dist_table', 'time', replication_factor =>
|
||||
NOTICE: adding not-null constraint to column "time"
|
||||
hypertable_id | schema_name | table_name | created
|
||||
---------------+-------------+-----------------+---------
|
||||
14 | public | some_dist_table | t
|
||||
15 | public | some_dist_table | t
|
||||
(1 row)
|
||||
|
||||
BEGIN;
|
||||
@ -1424,7 +1443,7 @@ SELECT * FROM create_hypertable('some_dist_table', 'time', replication_factor =>
|
||||
NOTICE: adding not-null constraint to column "time"
|
||||
hypertable_id | schema_name | table_name | created
|
||||
---------------+-------------+-----------------+---------
|
||||
15 | public | some_dist_table | t
|
||||
16 | public | some_dist_table | t
|
||||
(1 row)
|
||||
|
||||
BEGIN;
|
||||
@ -1518,14 +1537,14 @@ SELECT * FROM create_hypertable('disttable', 'time', replication_factor => 3);
|
||||
NOTICE: adding not-null constraint to column "time"
|
||||
hypertable_id | schema_name | table_name | created
|
||||
---------------+-------------+------------+---------
|
||||
16 | public | disttable | t
|
||||
17 | public | disttable | t
|
||||
(1 row)
|
||||
|
||||
INSERT INTO disttable VALUES ('2017-01-01 06:01', 0, 1, 0.0);
|
||||
SELECT show_chunks('disttable');
|
||||
show_chunks
|
||||
----------------------------------------------
|
||||
_timescaledb_internal._hyper_16_1_dist_chunk
|
||||
_timescaledb_internal._hyper_17_1_dist_chunk
|
||||
(1 row)
|
||||
|
||||
SELECT * FROM test.show_constraints('disttable');
|
||||
@ -1534,7 +1553,8 @@ SELECT * FROM test.show_constraints('disttable');
|
||||
color_check | c | {color} | - | (color > 0) | f | f | t
|
||||
(1 row)
|
||||
|
||||
SELECT * FROM test.show_constraints('_timescaledb_internal._hyper_16_1_dist_chunk');
|
||||
SELECT (test.show_constraints(chunk)).*
|
||||
FROM show_chunks('disttable') AS chunk;
|
||||
Constraint | Type | Columns | Index | Expr | Deferrable | Deferred | Validated
|
||||
--------------+------+---------+-------+------------------------------------------------------------------------------------------------------------------------------------------------+------------+----------+-----------
|
||||
color_check | c | {color} | - | (color > 0) | f | f | t
|
||||
@ -1547,7 +1567,8 @@ SELECT * FROM test.show_constraints('disttable');
|
||||
------------+------+---------+-------+------+------------+----------+-----------
|
||||
(0 rows)
|
||||
|
||||
SELECT * FROM test.show_constraints('_timescaledb_internal._hyper_16_1_dist_chunk');
|
||||
SELECT (test.show_constraints(chunk)).*
|
||||
FROM show_chunks('disttable') AS chunk;
|
||||
Constraint | Type | Columns | Index | Expr | Deferrable | Deferred | Validated
|
||||
--------------+------+---------+-------+------------------------------------------------------------------------------------------------------------------------------------------------+------------+----------+-----------
|
||||
constraint_1 | c | {time} | - | (("time" >= 'Wed Dec 28 16:00:00 2016 PST'::timestamp with time zone) AND ("time" < 'Wed Jan 04 16:00:00 2017 PST'::timestamp with time zone)) | f | f | t
|
||||
@ -1556,14 +1577,15 @@ SELECT * FROM test.show_constraints('_timescaledb_internal._hyper_16_1_dist_chun
|
||||
SELECT * FROM test.remote_exec(NULL, $$
|
||||
SELECT show_chunks('disttable');
|
||||
SELECT * FROM test.show_constraints('disttable');
|
||||
SELECT * FROM test.show_constraints('_timescaledb_internal._hyper_16_1_dist_chunk');
|
||||
SELECT (test.show_constraints(chunk)).*
|
||||
FROM show_chunks('disttable') AS chunk;
|
||||
$$);
|
||||
NOTICE: [data_node_1]:
|
||||
SELECT show_chunks('disttable')
|
||||
NOTICE: [data_node_1]:
|
||||
show_chunks
|
||||
--------------------------------------------
|
||||
_timescaledb_internal._hyper_16_1_dist_chunk
|
||||
_timescaledb_internal._hyper_17_1_dist_chunk
|
||||
(1 row)
|
||||
|
||||
|
||||
@ -1576,7 +1598,8 @@ Constraint|Type|Columns|Index|Expr|Deferrable|Deferred|Validated
|
||||
|
||||
|
||||
NOTICE: [data_node_1]:
|
||||
SELECT * FROM test.show_constraints('_timescaledb_internal._hyper_16_1_dist_chunk')
|
||||
SELECT (test.show_constraints(chunk)).*
|
||||
FROM show_chunks('disttable') AS chunk
|
||||
NOTICE: [data_node_1]:
|
||||
Constraint |Type|Columns|Index|Expr |Deferrable|Deferred|Validated
|
||||
------------+----+-------+-----+----------------------------------------------------------------------------------------------------------------------------------------------+----------+--------+---------
|
||||
@ -1589,7 +1612,7 @@ SELECT show_chunks('disttable')
|
||||
NOTICE: [data_node_2]:
|
||||
show_chunks
|
||||
--------------------------------------------
|
||||
_timescaledb_internal._hyper_16_1_dist_chunk
|
||||
_timescaledb_internal._hyper_17_1_dist_chunk
|
||||
(1 row)
|
||||
|
||||
|
||||
@ -1602,7 +1625,8 @@ Constraint|Type|Columns|Index|Expr|Deferrable|Deferred|Validated
|
||||
|
||||
|
||||
NOTICE: [data_node_2]:
|
||||
SELECT * FROM test.show_constraints('_timescaledb_internal._hyper_16_1_dist_chunk')
|
||||
SELECT (test.show_constraints(chunk)).*
|
||||
FROM show_chunks('disttable') AS chunk
|
||||
NOTICE: [data_node_2]:
|
||||
Constraint |Type|Columns|Index|Expr |Deferrable|Deferred|Validated
|
||||
------------+----+-------+-----+----------------------------------------------------------------------------------------------------------------------------------------------+----------+--------+---------
|
||||
@ -1615,7 +1639,7 @@ SELECT show_chunks('disttable')
|
||||
NOTICE: [data_node_3]:
|
||||
show_chunks
|
||||
--------------------------------------------
|
||||
_timescaledb_internal._hyper_16_1_dist_chunk
|
||||
_timescaledb_internal._hyper_17_1_dist_chunk
|
||||
(1 row)
|
||||
|
||||
|
||||
@ -1628,7 +1652,8 @@ Constraint|Type|Columns|Index|Expr|Deferrable|Deferred|Validated
|
||||
|
||||
|
||||
NOTICE: [data_node_3]:
|
||||
SELECT * FROM test.show_constraints('_timescaledb_internal._hyper_16_1_dist_chunk')
|
||||
SELECT (test.show_constraints(chunk)).*
|
||||
FROM show_chunks('disttable') AS chunk
|
||||
NOTICE: [data_node_3]:
|
||||
Constraint |Type|Columns|Index|Expr |Deferrable|Deferred|Validated
|
||||
------------+----+-------+-----+----------------------------------------------------------------------------------------------------------------------------------------------+----------+--------+---------
|
||||
@ -1671,7 +1696,7 @@ SELECT * FROM create_hypertable('disttable', 'time', replication_factor => 3);
|
||||
NOTICE: adding not-null constraint to column "time"
|
||||
hypertable_id | schema_name | table_name | created
|
||||
---------------+-------------+------------+---------
|
||||
17 | public | disttable | t
|
||||
18 | public | disttable | t
|
||||
(1 row)
|
||||
|
||||
CREATE INDEX disttable_device_idx ON disttable (device);
|
||||
|
@ -88,6 +88,7 @@ if (PG_VERSION_SUPPORTS_MULTINODE)
|
||||
deparse.sql
|
||||
deparse_fail.sql
|
||||
dist_commands.sql
|
||||
dist_compression.sql
|
||||
dist_ddl.sql
|
||||
dist_grant.sql
|
||||
dist_partial_agg.sql
|
||||
|
108
tsl/test/sql/dist_compression.sql
Normal file
108
tsl/test/sql/dist_compression.sql
Normal file
@ -0,0 +1,108 @@
|
||||
-- This file and its contents are licensed under the Timescale License.
|
||||
-- Please see the included NOTICE for copyright information and
|
||||
-- LICENSE-TIMESCALE for a copy of the license.
|
||||
|
||||
---------------------------------------------------
|
||||
-- Test compression on a distributed hypertable
|
||||
---------------------------------------------------
|
||||
\c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER
|
||||
|
||||
SET client_min_messages TO ERROR;
|
||||
DROP DATABASE IF EXISTS data_node_1;
|
||||
DROP DATABASE IF EXISTS data_node_2;
|
||||
DROP DATABASE IF EXISTS data_node_3;
|
||||
\ir include/remote_exec.sql
|
||||
|
||||
SELECT * FROM add_data_node('data_node_1', host => 'localhost',
|
||||
database => 'data_node_1');
|
||||
SELECT * FROM add_data_node('data_node_2', host => 'localhost',
|
||||
database => 'data_node_2');
|
||||
SELECT * FROM add_data_node('data_node_3', host => 'localhost',
|
||||
database => 'data_node_3');
|
||||
|
||||
GRANT USAGE ON FOREIGN SERVER data_node_1, data_node_2, data_node_3 TO :ROLE_1;
|
||||
SET client_min_messages TO NOTICE;
|
||||
SET ROLE :ROLE_1;
|
||||
SELECT setseed(1);
|
||||
|
||||
CREATE TABLE compressed(time timestamptz, device int, temp float);
|
||||
-- Replicate twice to see that compress_chunk compresses all replica chunks
|
||||
SELECT create_distributed_hypertable('compressed', 'time', 'device', replication_factor => 2);
|
||||
INSERT INTO compressed SELECT t, (abs(timestamp_hash(t::timestamp)) % 10) + 1, random()*80
|
||||
FROM generate_series('2018-03-02 1:00'::TIMESTAMPTZ, '2018-03-04 1:00', '1 hour') t;
|
||||
ALTER TABLE compressed SET (timescaledb.compress, timescaledb.compress_segmentby='device', timescaledb.compress_orderby = 'time DESC');
|
||||
|
||||
SELECT test.remote_exec(NULL, $$
|
||||
SELECT table_name, compressed_hypertable_id
|
||||
FROM _timescaledb_catalog.hypertable
|
||||
WHERE table_name = 'compressed';
|
||||
$$);
|
||||
|
||||
-- There should be no compressed chunks
|
||||
SELECT test.remote_exec(NULL, $$
|
||||
SELECT * FROM timescaledb_information.compressed_chunk_stats
|
||||
WHERE hypertable_name = 'compressed'::regclass
|
||||
ORDER BY hypertable_name, chunk_name;
|
||||
$$);
|
||||
|
||||
-- Test that compression is rolled back on aborted transaction
|
||||
BEGIN;
|
||||
SELECT compress_chunk(chunk)
|
||||
FROM show_chunks('compressed') AS chunk
|
||||
ORDER BY chunk
|
||||
LIMIT 1;
|
||||
|
||||
-- Data nodes should now report compressed chunks
|
||||
SELECT test.remote_exec(NULL, $$
|
||||
SELECT * FROM timescaledb_information.compressed_chunk_stats
|
||||
WHERE hypertable_name = 'compressed'::regclass
|
||||
ORDER BY hypertable_name, chunk_name;
|
||||
$$);
|
||||
-- Abort the transaction
|
||||
ROLLBACK;
|
||||
|
||||
-- No compressed chunks since we rolled back
|
||||
SELECT test.remote_exec(NULL, $$
|
||||
SELECT * FROM timescaledb_information.compressed_chunk_stats
|
||||
WHERE hypertable_name = 'compressed'::regclass
|
||||
ORDER BY hypertable_name, chunk_name;
|
||||
$$);
|
||||
|
||||
-- Compress for real this time
|
||||
SELECT compress_chunk(chunk)
|
||||
FROM show_chunks('compressed') AS chunk
|
||||
ORDER BY chunk
|
||||
LIMIT 1;
|
||||
|
||||
|
||||
-- Check that one chunk, and its replica, is compressed
|
||||
SELECT test.remote_exec(NULL, $$
|
||||
SELECT * FROM timescaledb_information.compressed_chunk_stats
|
||||
WHERE hypertable_name = 'compressed'::regclass
|
||||
ORDER BY hypertable_name, chunk_name;
|
||||
$$);
|
||||
|
||||
-- Compress twice to generate NOTICE that the chunk is already compressed
|
||||
SELECT compress_chunk(chunk, if_not_compressed => true)
|
||||
FROM show_chunks('compressed') AS chunk
|
||||
ORDER BY chunk
|
||||
LIMIT 1;
|
||||
|
||||
-- Decompress the chunk and replica
|
||||
SELECT decompress_chunk(chunk)
|
||||
FROM show_chunks('compressed') AS chunk
|
||||
ORDER BY chunk
|
||||
LIMIT 1;
|
||||
|
||||
-- Should now be decompressed
|
||||
SELECT test.remote_exec(NULL, $$
|
||||
SELECT * FROM timescaledb_information.compressed_chunk_stats
|
||||
WHERE hypertable_name = 'compressed'::regclass
|
||||
ORDER BY hypertable_name, chunk_name;
|
||||
$$);
|
||||
|
||||
-- Decompress twice to generate NOTICE that the chunk is already decompressed
|
||||
SELECT decompress_chunk(chunk, if_compressed => true)
|
||||
FROM show_chunks('compressed') AS chunk
|
||||
ORDER BY chunk
|
||||
LIMIT 1;
|
@ -130,7 +130,6 @@ ALTER INDEX disttable_description_idx RENAME to disttable_descr_idx;
|
||||
ALTER TABLE disttable SET SCHEMA some_unexist_schema;
|
||||
ALTER TABLE disttable SET SCHEMA some_schema;
|
||||
|
||||
|
||||
DROP TABLE non_disttable1, disttable;
|
||||
DROP TABLE disttable, non_disttable2;
|
||||
DROP TABLE disttable, disttable;
|
||||
@ -239,6 +238,19 @@ SELECT * FROM test.show_indexes('some_dist_table');
|
||||
SELECT * FROM test.remote_exec(NULL, $$ SELECT * FROM test.show_indexes('some_dist_table') $$);
|
||||
DROP TABLE some_dist_table;
|
||||
|
||||
-- DDL with multiple sub-commands (ALTER)
|
||||
BEGIN;
|
||||
CREATE TABLE some_dist_table(time timestamptz, device int);
|
||||
SELECT * FROM create_distributed_hypertable('some_dist_table', 'time');
|
||||
\set ON_ERROR_STOP 0
|
||||
-- Mixing SET and other options not supported. This is to protect
|
||||
-- against mixing custom (compression) options with other
|
||||
-- sub-commands.
|
||||
ALTER TABLE some_dist_table SET (fillfactor = 10),
|
||||
ADD CONSTRAINT device_check CHECK (device > 0);
|
||||
\set ON_ERROR_STOP 1
|
||||
ROLLBACK;
|
||||
|
||||
-- Multi-statement transactions
|
||||
|
||||
-- BEGIN/COMMIT
|
||||
@ -375,14 +387,19 @@ SELECT * FROM create_hypertable('disttable', 'time', replication_factor => 3);
|
||||
INSERT INTO disttable VALUES ('2017-01-01 06:01', 0, 1, 0.0);
|
||||
SELECT show_chunks('disttable');
|
||||
SELECT * FROM test.show_constraints('disttable');
|
||||
SELECT * FROM test.show_constraints('_timescaledb_internal._hyper_16_1_dist_chunk');
|
||||
SELECT (test.show_constraints(chunk)).*
|
||||
FROM show_chunks('disttable') AS chunk;
|
||||
|
||||
ALTER TABLE disttable DROP CONSTRAINT color_check;
|
||||
SELECT * FROM test.show_constraints('disttable');
|
||||
SELECT * FROM test.show_constraints('_timescaledb_internal._hyper_16_1_dist_chunk');
|
||||
SELECT (test.show_constraints(chunk)).*
|
||||
FROM show_chunks('disttable') AS chunk;
|
||||
|
||||
SELECT * FROM test.remote_exec(NULL, $$
|
||||
SELECT show_chunks('disttable');
|
||||
SELECT * FROM test.show_constraints('disttable');
|
||||
SELECT * FROM test.show_constraints('_timescaledb_internal._hyper_16_1_dist_chunk');
|
||||
SELECT (test.show_constraints(chunk)).*
|
||||
FROM show_chunks('disttable') AS chunk;
|
||||
$$);
|
||||
DROP TABLE disttable;
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user