mirror of
https://github.com/timescale/timescaledb.git
synced 2025-05-22 13:40:56 +08:00
Hyperstore renamed to hypercore
This changes the names of all symbols, comments, files, and functions to use "hypercore" rather than "hyperstore".
This commit is contained in:
parent
406901d838
commit
e0a7a6f6e1
.github/workflows
cmake
sql
src
tsl
src
CMakeLists.txt
bgw_policy
compression
hypercore
CMakeLists.txtarrow_tts.carrow_tts.hattr_capture.chypercore_handler.chypercore_handler.hhypercore_proxy.chypercore_proxy.hutils.cutils.h
init.cnodes/columnar_scan
planner.cprocess_utility.ctest
expected
hypercore.outhypercore_columnar.outhypercore_copy.outhypercore_create.outhypercore_cursor.outhypercore_ddl.outhypercore_delete.outhypercore_index_btree.outhypercore_index_hash.outhypercore_insert.outhypercore_join.outhypercore_merge.outhypercore_parallel.outhypercore_policy.outhypercore_scans.outhypercore_stats.outhypercore_types.outhypercore_update.outhypercore_vacuum.outhypercore_vacuum_full.out
isolation/specs
shared/expected
sql
CMakeLists.txthypercore.sqlhypercore_columnar.sqlhypercore_copy.sqlhypercore_create.sqlhypercore_cursor.sqlhypercore_ddl.sqlhypercore_delete.sqlhypercore_index_btree.sqlhypercore_index_hash.sqlhypercore_insert.sqlhypercore_join.sqlhypercore_merge.sqlhypercore_parallel.sqlhypercore_policy.sqlhypercore_scans.sqlhypercore_stats.sqlhypercore_types.sqlhypercore_update.sqlhypercore_vacuum.sqlhypercore_vacuum_full.sql
include
@ -47,7 +47,7 @@ jobs:
|
||||
CC: clang-14
|
||||
CXX: clang++-14
|
||||
DEBIAN_FRONTEND: noninteractive
|
||||
IGNORES: "append-* transparent_decompression-* transparent_decompress_chunk-* pg_dump telemetry bgw_db_scheduler* hyperstore_vacuum"
|
||||
IGNORES: "append-* transparent_decompression-* transparent_decompress_chunk-* pg_dump telemetry bgw_db_scheduler* hypercore_vacuum"
|
||||
SKIPS: chunk_adaptive histogram_test-*
|
||||
EXTENSIONS: "postgres_fdw test_decoding pageinspect pgstattuple"
|
||||
strategy:
|
||||
|
@ -27,7 +27,7 @@ set(PRE_INSTALL_FUNCTION_FILES
|
||||
set(SOURCE_FILES
|
||||
hypertable.sql
|
||||
chunk.sql
|
||||
hyperstore.sql
|
||||
hypercore.sql
|
||||
ddl_internal.sql
|
||||
util_time.sql
|
||||
util_internal_table_ddl.sql
|
||||
|
@ -2,25 +2,25 @@
|
||||
-- Please see the included NOTICE for copyright information and
|
||||
-- LICENSE-APACHE for a copy of the license.
|
||||
|
||||
CREATE FUNCTION ts_hyperstore_handler(internal) RETURNS table_am_handler
|
||||
AS '@MODULE_PATHNAME@', 'ts_hyperstore_handler' LANGUAGE C;
|
||||
CREATE FUNCTION ts_hypercore_handler(internal) RETURNS table_am_handler
|
||||
AS '@MODULE_PATHNAME@', 'ts_hypercore_handler' LANGUAGE C;
|
||||
|
||||
CREATE ACCESS METHOD hyperstore TYPE TABLE HANDLER ts_hyperstore_handler;
|
||||
COMMENT ON ACCESS METHOD hyperstore IS 'Storage engine using hybrid row/columnar compression';
|
||||
CREATE ACCESS METHOD hypercore TYPE TABLE HANDLER ts_hypercore_handler;
|
||||
COMMENT ON ACCESS METHOD hypercore IS 'Storage engine using hybrid row/columnar compression';
|
||||
|
||||
CREATE FUNCTION ts_hsproxy_handler(internal) RETURNS index_am_handler
|
||||
AS '@MODULE_PATHNAME@', 'ts_hsproxy_handler' LANGUAGE C;
|
||||
CREATE FUNCTION ts_hypercore_proxy_handler(internal) RETURNS index_am_handler
|
||||
AS '@MODULE_PATHNAME@', 'ts_hypercore_proxy_handler' LANGUAGE C;
|
||||
|
||||
CREATE ACCESS METHOD hsproxy TYPE INDEX HANDLER ts_hsproxy_handler;
|
||||
COMMENT ON ACCESS METHOD hsproxy IS 'Hyperstore proxy index access method';
|
||||
CREATE ACCESS METHOD hypercore_proxy TYPE INDEX HANDLER ts_hypercore_proxy_handler;
|
||||
COMMENT ON ACCESS METHOD hypercore_proxy IS 'Hypercore proxy index access method';
|
||||
|
||||
-- An index AM needs at least one operator class for the column type
|
||||
-- that the index will be defined on. To create the index, at least
|
||||
-- one column needs to be defined. For "hsproxy", the "count" column
|
||||
-- on the hyperstore's internal compressed relation is used since it
|
||||
-- one column needs to be defined. For "hypercore_proxy", the "count" column
|
||||
-- on the hypercore's internal compressed relation is used since it
|
||||
-- is always present. Since "count" has type int, we need a
|
||||
-- corresponding operator class.
|
||||
CREATE OPERATOR CLASS int4_ops
|
||||
DEFAULT FOR TYPE int4 USING hsproxy AS
|
||||
DEFAULT FOR TYPE int4 USING hypercore_proxy AS
|
||||
OPERATOR 1 = (int4, int4),
|
||||
FUNCTION 1 hashint4(int4);
|
||||
|
@ -1,4 +1,4 @@
|
||||
-- Hyperstore updates
|
||||
-- Hypercore updates
|
||||
CREATE FUNCTION _timescaledb_debug.is_compressed_tid(tid) RETURNS BOOL
|
||||
AS '@MODULE_PATHNAME@', 'ts_update_placeholder' LANGUAGE C STRICT;
|
||||
|
||||
|
@ -1,8 +1,8 @@
|
||||
-- Hyperstore AM
|
||||
DROP ACCESS METHOD IF EXISTS hsproxy;
|
||||
DROP FUNCTION IF EXISTS ts_hsproxy_handler;
|
||||
DROP ACCESS METHOD IF EXISTS hyperstore;
|
||||
DROP FUNCTION IF EXISTS ts_hyperstore_handler;
|
||||
-- Hypercore AM
|
||||
DROP ACCESS METHOD IF EXISTS hypercore_proxy;
|
||||
DROP FUNCTION IF EXISTS ts_hypercore_proxy_handler;
|
||||
DROP ACCESS METHOD IF EXISTS hypercore;
|
||||
DROP FUNCTION IF EXISTS ts_hypercore_handler;
|
||||
DROP FUNCTION IF EXISTS _timescaledb_debug.is_compressed_tid;
|
||||
|
||||
DROP FUNCTION IF EXISTS @extschema@.compress_chunk(uncompressed_chunk REGCLASS, if_not_compressed BOOLEAN, recompress BOOLEAN, compress_using NAME);
|
||||
|
@ -79,8 +79,8 @@ CROSSMODULE_WRAPPER(array_compressor_finish);
|
||||
CROSSMODULE_WRAPPER(create_compressed_chunk);
|
||||
CROSSMODULE_WRAPPER(compress_chunk);
|
||||
CROSSMODULE_WRAPPER(decompress_chunk);
|
||||
CROSSMODULE_WRAPPER(hyperstore_handler);
|
||||
CROSSMODULE_WRAPPER(hsproxy_handler);
|
||||
CROSSMODULE_WRAPPER(hypercore_handler);
|
||||
CROSSMODULE_WRAPPER(hypercore_proxy_handler);
|
||||
|
||||
/* continuous aggregate */
|
||||
CROSSMODULE_WRAPPER(continuous_agg_invalidation_trigger);
|
||||
@ -99,7 +99,7 @@ CROSSMODULE_WRAPPER(chunk_create_empty_table);
|
||||
CROSSMODULE_WRAPPER(recompress_chunk_segmentwise);
|
||||
CROSSMODULE_WRAPPER(get_compressed_chunk_index_for_recompression);
|
||||
|
||||
/* hyperstore */
|
||||
/* hypercore */
|
||||
CROSSMODULE_WRAPPER(is_compressed_tid);
|
||||
|
||||
/*
|
||||
@ -120,7 +120,7 @@ error_no_default_fn_community(void)
|
||||
}
|
||||
|
||||
static bytea *
|
||||
error_hsproxy_index_options(Datum reloptions, bool validate)
|
||||
error_hypercore_proxy_index_options(Datum reloptions, bool validate)
|
||||
{
|
||||
error_no_default_fn_community();
|
||||
return NULL;
|
||||
@ -133,14 +133,14 @@ error_hsproxy_index_options(Datum reloptions, bool validate)
|
||||
* parsing index options instead.
|
||||
*/
|
||||
static Datum
|
||||
error_pg_community_hsproxy_handler(PG_FUNCTION_ARGS)
|
||||
error_pg_community_hypercore_proxy_handler(PG_FUNCTION_ARGS)
|
||||
{
|
||||
IndexAmRoutine *amroutine = makeNode(IndexAmRoutine);
|
||||
|
||||
amroutine->amstrategies = 0;
|
||||
amroutine->amsupport = 1;
|
||||
amroutine->amoptsprocnum = 0;
|
||||
amroutine->amoptions = error_hsproxy_index_options;
|
||||
amroutine->amoptions = error_hypercore_proxy_index_options;
|
||||
|
||||
PG_RETURN_POINTER(amroutine);
|
||||
}
|
||||
@ -395,8 +395,8 @@ TSDLLEXPORT CrossModuleFunctions ts_cm_functions_default = {
|
||||
.dictionary_compressor_finish = error_no_default_fn_pg_community,
|
||||
.array_compressor_append = error_no_default_fn_pg_community,
|
||||
.array_compressor_finish = error_no_default_fn_pg_community,
|
||||
.hyperstore_handler = error_no_default_fn_pg_community,
|
||||
.hsproxy_handler = error_pg_community_hsproxy_handler,
|
||||
.hypercore_handler = error_no_default_fn_pg_community,
|
||||
.hypercore_proxy_handler = error_pg_community_hypercore_proxy_handler,
|
||||
.is_compressed_tid = error_no_default_fn_pg_community,
|
||||
|
||||
.show_chunk = error_no_default_fn_pg_community,
|
||||
|
@ -147,8 +147,8 @@ typedef struct CrossModuleFunctions
|
||||
PGFunction dictionary_compressor_finish;
|
||||
PGFunction array_compressor_append;
|
||||
PGFunction array_compressor_finish;
|
||||
PGFunction hyperstore_handler;
|
||||
PGFunction hsproxy_handler;
|
||||
PGFunction hypercore_handler;
|
||||
PGFunction hypercore_proxy_handler;
|
||||
PGFunction is_compressed_tid;
|
||||
|
||||
PGFunction create_chunk;
|
||||
|
@ -15,6 +15,7 @@
|
||||
#define TS_LIBDIR "$libdir/"
|
||||
#define EXTENSION_SO TS_LIBDIR "" EXTENSION_NAME
|
||||
#define EXTENSION_TSL_SO TS_LIBDIR TSL_LIBRARY_NAME "-" TIMESCALEDB_VERSION_MOD
|
||||
#define TS_HYPERCORE_TAM_NAME "hypercore"
|
||||
|
||||
#define MAKE_EXTOPTION(NAME) (EXTENSION_NAMESPACE "." NAME)
|
||||
|
||||
|
26
src/guc.c
26
src/guc.c
@ -41,7 +41,7 @@ bool
|
||||
ts_is_whitelisted_indexam(const char *amname)
|
||||
{
|
||||
ListCell *cell;
|
||||
char *rawname = pstrdup(ts_guc_hyperstore_indexam_whitelist);
|
||||
char *rawname = pstrdup(ts_guc_hypercore_indexam_whitelist);
|
||||
|
||||
List *namelist;
|
||||
if (!SplitIdentifierString(rawname, ',', &namelist))
|
||||
@ -85,21 +85,25 @@ static const struct config_enum_entry loglevel_options[] = {
|
||||
* Setting to enable or disable transparent decompression plans.
|
||||
*
|
||||
* The setting is an integer instead of boolean because it is possible to
|
||||
* enable transparent decompression plans also when using the Hyperstore table
|
||||
* enable transparent decompression plans also when using the Hypercore table
|
||||
* access method. But this is not enabled by default. The options are as
|
||||
* follows:
|
||||
*
|
||||
* (0) = off, disabled completely.
|
||||
*
|
||||
* (1) = on, enabled for compressed tables but not tables using Hyperstore
|
||||
* (1) = on, enabled for compressed tables but not tables using Hypercore
|
||||
* TAM. This is the default setting.
|
||||
*
|
||||
* (2) = hyperstore, enabled for compressed tables and those using Hyperstore
|
||||
* (2) = hypercore, enabled for compressed tables and those using Hypercore
|
||||
* TAM. This is useful mostly for debugging/testing and as a fallback.
|
||||
*/
|
||||
static const struct config_enum_entry transparent_decompression_options[] = {
|
||||
{ "on", 1, false }, { "true", 1, false }, { "off", 0, false },
|
||||
{ "false", 0, false }, { "hyperstore", 2, false }, { NULL, 0, false }
|
||||
{ "on", 1, false },
|
||||
{ "true", 1, false },
|
||||
{ "off", 0, false },
|
||||
{ "false", 0, false },
|
||||
{ TS_HYPERCORE_TAM_NAME, 2, false },
|
||||
{ NULL, 0, false }
|
||||
};
|
||||
|
||||
bool ts_guc_enable_deprecation_warnings = true;
|
||||
@ -142,7 +146,7 @@ TSDLLEXPORT bool ts_guc_enable_job_execution_logging = false;
|
||||
bool ts_guc_enable_tss_callbacks = true;
|
||||
TSDLLEXPORT bool ts_guc_enable_delete_after_compression = false;
|
||||
TSDLLEXPORT bool ts_guc_enable_merge_on_cagg_refresh = false;
|
||||
TSDLLEXPORT char *ts_guc_hyperstore_indexam_whitelist;
|
||||
TSDLLEXPORT char *ts_guc_hypercore_indexam_whitelist;
|
||||
|
||||
/* default value of ts_guc_max_open_chunks_per_insert and
|
||||
* ts_guc_max_cached_chunks_per_hypertable will be set as their respective boot-value when the
|
||||
@ -956,12 +960,12 @@ _guc_init(void)
|
||||
/* show_hook= */ NULL);
|
||||
#endif
|
||||
|
||||
DefineCustomStringVariable(MAKE_EXTOPTION("hyperstore_indexam_whitelist"),
|
||||
DefineCustomStringVariable(MAKE_EXTOPTION("hypercore_indexam_whitelist"),
|
||||
gettext_noop(
|
||||
"Whitelist for index access methods supported by hyperstore."),
|
||||
"Whitelist for index access methods supported by hypercore."),
|
||||
gettext_noop(
|
||||
"List of index access method names supported by hyperstore."),
|
||||
/* valueAddr= */ &ts_guc_hyperstore_indexam_whitelist,
|
||||
"List of index access method names supported by hypercore."),
|
||||
/* valueAddr= */ &ts_guc_hypercore_indexam_whitelist,
|
||||
/* Value= */ "btree,hash",
|
||||
/* context= */ PGC_SIGHUP,
|
||||
/* flags= */ GUC_LIST_INPUT | GUC_SUPERUSER_ONLY,
|
||||
|
@ -98,7 +98,7 @@ extern TSDLLEXPORT bool ts_guc_enable_rowlevel_compression_locking;
|
||||
extern TSDLLEXPORT bool ts_guc_debug_require_batch_sorted_merge;
|
||||
|
||||
extern TSDLLEXPORT bool ts_guc_debug_allow_cagg_with_deprecated_funcs;
|
||||
extern TSDLLEXPORT char *ts_guc_hyperstore_indexam_whitelist;
|
||||
extern TSDLLEXPORT char *ts_guc_hypercore_indexam_whitelist;
|
||||
|
||||
void _guc_init(void);
|
||||
|
||||
|
@ -727,7 +727,7 @@ add_chunk_to_vacuum(Hypertable *ht, Oid chunk_relid, void *arg)
|
||||
makeVacuumRelation(chunk_range_var, chunk_relid, ctx->ht_vacuum_rel->va_cols);
|
||||
ctx->chunk_rels = lappend(ctx->chunk_rels, chunk_vacuum_rel);
|
||||
|
||||
/* If we have a compressed chunk and the chunk is not using hyperstore
|
||||
/* If we have a compressed chunk and the chunk is not using hypercore
|
||||
* access method, make sure to analyze it as well */
|
||||
if (chunk->fd.compressed_chunk_id != INVALID_CHUNK_ID && !ts_is_hypercore_am(chunk->amoid))
|
||||
{
|
||||
@ -2531,11 +2531,11 @@ process_index_chunk(Hypertable *ht, Oid chunk_relid, void *arg)
|
||||
hypertable_index_rel = index_open(info->obj.objectId, AccessShareLock);
|
||||
indexinfo = BuildIndexInfo(hypertable_index_rel);
|
||||
|
||||
/* Hyperstore does not support arbitrary index, so abort if a non-approved
|
||||
/* Hypercore does not support arbitrary index, so abort if a non-approved
|
||||
* index type is used.
|
||||
*
|
||||
* We are using a whitelist rather than a blacklist because supporting
|
||||
* indexes on Hyperstore requires special considerations given its
|
||||
* indexes on Hypercore requires special considerations given its
|
||||
* dual-heap implementation. */
|
||||
if (ts_is_hypercore_am(chunk->amoid))
|
||||
{
|
||||
@ -2545,7 +2545,7 @@ process_index_chunk(Hypertable *ht, Oid chunk_relid, void *arg)
|
||||
ereport(ERROR,
|
||||
errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
|
||||
errmsg("index access method \"%s\" not supported", amname),
|
||||
errdetail("Available candidates: %s", ts_guc_hyperstore_indexam_whitelist));
|
||||
errdetail("Available candidates: %s", ts_guc_hypercore_indexam_whitelist));
|
||||
}
|
||||
|
||||
if (chunk_index_columns_changed(info->extended_options.n_ht_atts, RelationGetDescr(chunk_rel)))
|
||||
@ -3495,7 +3495,7 @@ process_set_access_method(AlterTableCmd *cmd, ProcessUtilityArgs *args)
|
||||
Oid relid = AlterTableLookupRelation(stmt, NoLock);
|
||||
Cache *hcache;
|
||||
Hypertable *ht = ts_hypertable_cache_get_cache_and_entry(relid, CACHE_FLAG_MISSING_OK, &hcache);
|
||||
if (ht && (strcmp(cmd->name, "hyperstore") == 0))
|
||||
if (ht && (strcmp(cmd->name, TS_HYPERCORE_TAM_NAME) == 0))
|
||||
{
|
||||
/* For hypertables, we automatically add command to set the
|
||||
* compression flag if we are setting the access method to be a
|
||||
@ -4369,21 +4369,23 @@ process_create_stmt(ProcessUtilityArgs *args)
|
||||
{
|
||||
CreateStmt *stmt = castNode(CreateStmt, args->parsetree);
|
||||
|
||||
if (stmt->accessMethod && strcmp(stmt->accessMethod, "hyperstore") == 0)
|
||||
if (stmt->accessMethod && strcmp(stmt->accessMethod, TS_HYPERCORE_TAM_NAME) == 0)
|
||||
ereport(ERROR,
|
||||
errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
|
||||
errmsg("hyperstore access method not supported on \"%s\"", stmt->relation->relname),
|
||||
errdetail("The hyperstore access method is only supported for hypertables."),
|
||||
errmsg("hypercore access method not supported on \"%s\"", stmt->relation->relname),
|
||||
errdetail("The hypercore access method is only supported for hypertables."),
|
||||
errhint("Create a hypertable from a table using another access method (e.g., heap),"
|
||||
" then use \"ALTER TABLE\" to set the access method to hyperstore."));
|
||||
" then use \"ALTER TABLE\" to set the access method to hypercore."));
|
||||
|
||||
if (default_table_access_method && strcmp(default_table_access_method, "hyperstore") == 0)
|
||||
if (default_table_access_method &&
|
||||
strcmp(default_table_access_method, TS_HYPERCORE_TAM_NAME) == 0)
|
||||
ereport(ERROR,
|
||||
errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
|
||||
errmsg("hyperstore access method not supported on \"%s\"", stmt->relation->relname),
|
||||
errdetail("The hyperstore access method is only supported for hypertables."),
|
||||
errmsg("hypercore access method not supported on \"%s\"", stmt->relation->relname),
|
||||
errdetail("The hypercore access method is only supported for hypertables."),
|
||||
errhint("It does not make sense to set the default access method for all tables "
|
||||
"to \"hyperstore\" since it is only supported for hypertables."));
|
||||
"to \"%s\" since it is only supported for hypertables.",
|
||||
TS_HYPERCORE_TAM_NAME));
|
||||
|
||||
return DDL_CONTINUE;
|
||||
}
|
||||
|
@ -1824,7 +1824,7 @@ bool
|
||||
ts_is_hypercore_am(Oid amoid)
|
||||
{
|
||||
if (!OidIsValid(hypercore_amoid))
|
||||
hypercore_amoid = get_table_am_oid("hyperstore", true);
|
||||
hypercore_amoid = get_table_am_oid(TS_HYPERCORE_TAM_NAME, true);
|
||||
|
||||
if (!OidIsValid(amoid) || !OidIsValid(hypercore_amoid))
|
||||
return false;
|
||||
|
@ -49,5 +49,5 @@ add_subdirectory(bgw_policy)
|
||||
add_subdirectory(compression)
|
||||
add_subdirectory(continuous_aggs)
|
||||
add_subdirectory(import)
|
||||
add_subdirectory(hyperstore)
|
||||
add_subdirectory(hypercore)
|
||||
add_subdirectory(nodes)
|
||||
|
@ -283,10 +283,10 @@ policy_compression_add_internal(Oid user_rel_oid, Datum compress_after_datum,
|
||||
}
|
||||
|
||||
if (compress_using != NULL && strcmp(compress_using, "heap") != 0 &&
|
||||
strcmp(compress_using, "hyperstore") != 0)
|
||||
strcmp(compress_using, TS_HYPERCORE_TAM_NAME) != 0)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
|
||||
errmsg("can only compress using \"heap\" or \"hyperstore\"")));
|
||||
errmsg("can only compress using \"heap\" or \"%s\"", TS_HYPERCORE_TAM_NAME)));
|
||||
|
||||
/* insert a new job into jobs table */
|
||||
namestrcpy(&application_name, "Compression Policy");
|
||||
|
@ -44,9 +44,9 @@
|
||||
#include "debug_point.h"
|
||||
#include "error_utils.h"
|
||||
#include "errors.h"
|
||||
#include "hypercore/hypercore_handler.h"
|
||||
#include "hypercore/utils.h"
|
||||
#include "hypercube.h"
|
||||
#include "hyperstore/hyperstore_handler.h"
|
||||
#include "hyperstore/utils.h"
|
||||
#include "hypertable.h"
|
||||
#include "hypertable_cache.h"
|
||||
#include "scan_iterator.h"
|
||||
@ -448,7 +448,7 @@ compress_chunk_impl(Oid hypertable_relid, Oid chunk_relid)
|
||||
EventTriggerAlterTableStart(create_dummy_query());
|
||||
/* create compressed chunk and a new table */
|
||||
compress_ht_chunk = create_compress_chunk(cxt.compress_ht, cxt.srcht_chunk, InvalidOid);
|
||||
/* Associate compressed chunk with main chunk. Needed for Hyperstore
|
||||
/* Associate compressed chunk with main chunk. Needed for Hypercore
|
||||
* TAM to not recreate the compressed chunk again when the main chunk
|
||||
* rel is opened. */
|
||||
ts_chunk_set_compressed_chunk(cxt.srcht_chunk, compress_ht_chunk->fd.id);
|
||||
@ -759,21 +759,21 @@ set_access_method(Oid relid, const char *amname)
|
||||
.subtype = AT_SetAccessMethod,
|
||||
.name = pstrdup(amname),
|
||||
};
|
||||
bool to_hyperstore = strcmp(amname, "hyperstore") == 0;
|
||||
bool to_hypercore = strcmp(amname, TS_HYPERCORE_TAM_NAME) == 0;
|
||||
Oid amoid = ts_get_rel_am(relid);
|
||||
|
||||
/* Setting the same access method is a no-op */
|
||||
if (amoid == get_am_oid(amname, false))
|
||||
return relid;
|
||||
|
||||
hyperstore_alter_access_method_begin(relid, !to_hyperstore);
|
||||
hypercore_alter_access_method_begin(relid, !to_hypercore);
|
||||
AlterTableInternal(relid, list_make1(&cmd), false);
|
||||
hyperstore_alter_access_method_finish(relid, !to_hyperstore);
|
||||
hypercore_alter_access_method_finish(relid, !to_hypercore);
|
||||
|
||||
#else
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
|
||||
errmsg("compression using hyperstore is not supported")));
|
||||
errmsg("compression using hypercore is not supported")));
|
||||
#endif
|
||||
return relid;
|
||||
}
|
||||
@ -793,45 +793,45 @@ parse_use_access_method(const char *compress_using)
|
||||
|
||||
if (strcmp(compress_using, "heap") == 0)
|
||||
return USE_AM_FALSE;
|
||||
else if (strcmp(compress_using, "hyperstore") == 0)
|
||||
else if (strcmp(compress_using, TS_HYPERCORE_TAM_NAME) == 0)
|
||||
return USE_AM_TRUE;
|
||||
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_INVALID_PARAMETER_VALUE),
|
||||
errmsg("can only compress using \"heap\" or \"hyperstore\"")));
|
||||
errmsg("can only compress using \"heap\" or \"%s\"", TS_HYPERCORE_TAM_NAME)));
|
||||
|
||||
pg_unreachable();
|
||||
}
|
||||
|
||||
/*
|
||||
* When using compress_chunk() with hyperstore, there are three cases to
|
||||
* When using compress_chunk() with hypercore, there are three cases to
|
||||
* handle:
|
||||
*
|
||||
* 1. Convert from (uncompressed) heap to hyperstore
|
||||
* 1. Convert from (uncompressed) heap to hypercore
|
||||
*
|
||||
* 2. Convert from compressed heap to hyperstore
|
||||
* 2. Convert from compressed heap to hypercore
|
||||
*
|
||||
* 3. Recompress a hyperstore
|
||||
* 3. Recompress a hypercore
|
||||
*/
|
||||
static Oid
|
||||
compress_hyperstore(Chunk *chunk, bool rel_is_hyperstore, enum UseAccessMethod useam,
|
||||
bool if_not_compressed, bool recompress)
|
||||
compress_hypercore(Chunk *chunk, bool rel_is_hypercore, enum UseAccessMethod useam,
|
||||
bool if_not_compressed, bool recompress)
|
||||
{
|
||||
Oid relid = InvalidOid;
|
||||
|
||||
/* Either the chunk is already a hyperstore (and in that case recompress),
|
||||
/* Either the chunk is already a hypercore (and in that case recompress),
|
||||
* or it is being converted to one */
|
||||
Assert(rel_is_hyperstore || useam == USE_AM_TRUE);
|
||||
Assert(rel_is_hypercore || useam == USE_AM_TRUE);
|
||||
|
||||
if (ts_chunk_is_compressed(chunk) && !rel_is_hyperstore)
|
||||
if (ts_chunk_is_compressed(chunk) && !rel_is_hypercore)
|
||||
{
|
||||
Assert(useam == USE_AM_TRUE);
|
||||
char *relname = get_rel_name(chunk->table_id);
|
||||
char *relschema = get_namespace_name(get_rel_namespace(chunk->table_id));
|
||||
const RangeVar *rv = makeRangeVar(relschema, relname, -1);
|
||||
/* Do quick migration to hyperstore of already compressed data by
|
||||
* simply changing the access method to hyperstore in pg_am. */
|
||||
hyperstore_set_am(rv);
|
||||
/* Do quick migration to hypercore of already compressed data by
|
||||
* simply changing the access method to hypercore in pg_am. */
|
||||
hypercore_set_am(rv);
|
||||
return chunk->table_id;
|
||||
}
|
||||
|
||||
@ -839,21 +839,21 @@ compress_hyperstore(Chunk *chunk, bool rel_is_hyperstore, enum UseAccessMethod u
|
||||
{
|
||||
case USE_AM_FALSE:
|
||||
elog(NOTICE,
|
||||
"cannot compress hyperstore \"%s\" using heap, recompressing instead",
|
||||
"cannot compress hypercore \"%s\" using heap, recompressing instead",
|
||||
get_rel_name(chunk->table_id));
|
||||
TS_FALLTHROUGH;
|
||||
case USE_AM_NULL:
|
||||
Assert(rel_is_hyperstore);
|
||||
Assert(rel_is_hypercore);
|
||||
relid = tsl_compress_chunk_wrapper(chunk, if_not_compressed, recompress);
|
||||
break;
|
||||
case USE_AM_TRUE:
|
||||
if (rel_is_hyperstore)
|
||||
if (rel_is_hypercore)
|
||||
relid = tsl_compress_chunk_wrapper(chunk, if_not_compressed, recompress);
|
||||
else
|
||||
{
|
||||
/* Convert to a compressed hyperstore by simply calling ALTER TABLE
|
||||
* <chunk> SET ACCESS METHOD hyperstore */
|
||||
set_access_method(chunk->table_id, "hyperstore");
|
||||
/* Convert to a compressed hypercore by simply calling ALTER TABLE
|
||||
* <chunk> SET ACCESS METHOD hypercore */
|
||||
set_access_method(chunk->table_id, TS_HYPERCORE_TAM_NAME);
|
||||
relid = chunk->table_id;
|
||||
}
|
||||
break;
|
||||
@ -874,12 +874,12 @@ tsl_compress_chunk(PG_FUNCTION_ARGS)
|
||||
|
||||
TS_PREVENT_FUNC_IF_READ_ONLY();
|
||||
Chunk *chunk = ts_chunk_get_by_relid(uncompressed_chunk_id, true);
|
||||
bool rel_is_hyperstore = get_table_am_oid("hyperstore", false) == chunk->amoid;
|
||||
bool rel_is_hypercore = get_table_am_oid(TS_HYPERCORE_TAM_NAME, false) == chunk->amoid;
|
||||
enum UseAccessMethod useam = parse_use_access_method(compress_using);
|
||||
|
||||
if (rel_is_hyperstore || useam == USE_AM_TRUE)
|
||||
if (rel_is_hypercore || useam == USE_AM_TRUE)
|
||||
uncompressed_chunk_id =
|
||||
compress_hyperstore(chunk, rel_is_hyperstore, useam, if_not_compressed, recompress);
|
||||
compress_hypercore(chunk, rel_is_hypercore, useam, if_not_compressed, recompress);
|
||||
else
|
||||
uncompressed_chunk_id = tsl_compress_chunk_wrapper(chunk, if_not_compressed, recompress);
|
||||
|
||||
@ -1567,10 +1567,10 @@ recompress_chunk_segmentwise_impl(Chunk *uncompressed_chunk)
|
||||
/* changed chunk status, so invalidate any plans involving this chunk */
|
||||
CacheInvalidateRelcacheByRelid(uncompressed_chunk_id);
|
||||
|
||||
/* Need to rebuild indexes if the relation is using hyperstore
|
||||
/* Need to rebuild indexes if the relation is using hypercore
|
||||
* TAM. Alternatively, we could insert into indexes when inserting into
|
||||
* the compressed rel. */
|
||||
if (uncompressed_chunk_rel->rd_tableam == hyperstore_routine())
|
||||
if (uncompressed_chunk_rel->rd_tableam == hypercore_routine())
|
||||
{
|
||||
ReindexParams params = {
|
||||
.options = 0,
|
||||
|
@ -4,8 +4,8 @@ set(SOURCES
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/arrow_array.c
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/arrow_cache_explain.c
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/attr_capture.c
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/hyperstore_handler.c
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/hsproxy.c
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/hypercore_handler.c
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/hypercore_proxy.c
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/relstats.c
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/utils.c)
|
||||
if(PG_VERSION VERSION_GREATER_EQUAL "17.0")
|
||||
|
@ -17,7 +17,7 @@
|
||||
#include "compression/compression.h"
|
||||
#include "compression/create.h"
|
||||
#include "custom_type_cache.h"
|
||||
#include "hyperstore_handler.h"
|
||||
#include "hypercore_handler.h"
|
||||
#include "utils/palloc.h"
|
||||
|
||||
Datum
|
||||
@ -53,7 +53,7 @@ arrow_slot_get_attribute_offset_map_slow(TupleTableSlot *slot)
|
||||
/* Get the mappings from the relation cache, but put them in the slot's
|
||||
* memory context since the cache might become invalidated and rebuilt. */
|
||||
const Relation rel = RelationIdGetRelation(relid);
|
||||
const HyperstoreInfo *hsinfo = RelationGetHyperstoreInfo(rel);
|
||||
const HypercoreInfo *hsinfo = RelationGetHypercoreInfo(rel);
|
||||
|
||||
for (int i = 0; i < hsinfo->num_columns; i++)
|
||||
{
|
||||
@ -302,14 +302,14 @@ tts_arrow_store_tuple(TupleTableSlot *slot, TupleTableSlot *child_slot, uint16 t
|
||||
* compressed tuple again, just with a new tuple index */
|
||||
ItemPointerData decoded_tid;
|
||||
|
||||
hyperstore_tid_decode(&decoded_tid, &slot->tts_tid);
|
||||
hypercore_tid_decode(&decoded_tid, &slot->tts_tid);
|
||||
|
||||
if (!ItemPointerEquals(&decoded_tid, &child_slot->tts_tid))
|
||||
clear_arrow_parent(slot);
|
||||
}
|
||||
}
|
||||
|
||||
hyperstore_tid_encode(&slot->tts_tid, &child_slot->tts_tid, tuple_index);
|
||||
hypercore_tid_encode(&slot->tts_tid, &child_slot->tts_tid, tuple_index);
|
||||
|
||||
/* Stored a compressed tuple so clear the non-compressed slot */
|
||||
ExecClearTuple(aslot->noncompressed_slot);
|
||||
|
@ -95,7 +95,7 @@ extern TupleTableSlot *ExecStoreArrowTuple(TupleTableSlot *slot, uint16 tuple_in
|
||||
#define TTS_IS_ARROWTUPLE(slot) ((slot)->tts_ops == &TTSOpsArrowTuple)
|
||||
|
||||
/*
|
||||
* The encoded Hyperstore TID can address a specific value in a compressed tuple by
|
||||
* The encoded Hypercore TID can address a specific value in a compressed tuple by
|
||||
* adding an extra "tuple index" to the TID, which is the index into the array of values
|
||||
* in the compressed tuple. The new encoding consists of the block number (CBLOCK) and offset
|
||||
* number (COFFSET) of the TID for the compressed row as block number and the
|
||||
@ -127,7 +127,7 @@ extern TupleTableSlot *ExecStoreArrowTuple(TupleTableSlot *slot, uint16 tuple_in
|
||||
#define OFFSET_MASK (OFFSET_LIMIT - 1)
|
||||
|
||||
static inline void
|
||||
hyperstore_tid_encode(ItemPointerData *out_tid, const ItemPointerData *in_tid, uint16 tuple_index)
|
||||
hypercore_tid_encode(ItemPointerData *out_tid, const ItemPointerData *in_tid, uint16 tuple_index)
|
||||
{
|
||||
const BlockNumber block = ItemPointerGetBlockNumber(in_tid);
|
||||
const OffsetNumber offset = ItemPointerGetOffsetNumber(in_tid);
|
||||
@ -143,7 +143,7 @@ hyperstore_tid_encode(ItemPointerData *out_tid, const ItemPointerData *in_tid, u
|
||||
}
|
||||
|
||||
static inline uint16
|
||||
hyperstore_tid_decode(ItemPointerData *out_tid, const ItemPointerData *in_tid)
|
||||
hypercore_tid_decode(ItemPointerData *out_tid, const ItemPointerData *in_tid)
|
||||
{
|
||||
const uint64 encoded_tid = ~COMPRESSED_FLAG & ItemPointerGetBlockNumber(in_tid);
|
||||
const uint16 tuple_index = ItemPointerGetOffsetNumber(in_tid);
|
||||
@ -158,7 +158,7 @@ hyperstore_tid_decode(ItemPointerData *out_tid, const ItemPointerData *in_tid)
|
||||
}
|
||||
|
||||
static inline void
|
||||
hyperstore_tid_set_tuple_index(ItemPointerData *tid, uint32 tuple_index)
|
||||
hypercore_tid_set_tuple_index(ItemPointerData *tid, uint32 tuple_index)
|
||||
{
|
||||
/* Assert that we do not overflow the increment: we only have 10 bits for the tuple index */
|
||||
Assert(tuple_index < 1024);
|
||||
@ -166,10 +166,10 @@ hyperstore_tid_set_tuple_index(ItemPointerData *tid, uint32 tuple_index)
|
||||
}
|
||||
|
||||
static inline void
|
||||
hyperstore_tid_increment(ItemPointerData *tid, uint16 increment)
|
||||
hypercore_tid_increment(ItemPointerData *tid, uint16 increment)
|
||||
{
|
||||
/* Assert that we do not overflow the increment: we only have 10 bits for the tuple index */
|
||||
hyperstore_tid_set_tuple_index(tid, ItemPointerGetOffsetNumber(tid) + increment);
|
||||
hypercore_tid_set_tuple_index(tid, ItemPointerGetOffsetNumber(tid) + increment);
|
||||
}
|
||||
|
||||
static inline bool
|
||||
@ -304,7 +304,7 @@ ExecIncrOrDecrArrowTuple(TupleTableSlot *slot, int32 amount)
|
||||
}
|
||||
|
||||
Assert(tuple_index > 0 && tuple_index <= aslot->total_row_count);
|
||||
hyperstore_tid_set_tuple_index(&slot->tts_tid, tuple_index);
|
||||
hypercore_tid_set_tuple_index(&slot->tts_tid, tuple_index);
|
||||
aslot->tuple_index = (uint16) tuple_index;
|
||||
slot->tts_flags &= ~TTS_FLAG_EMPTY;
|
||||
slot->tts_nvalid = 0;
|
||||
|
@ -99,10 +99,10 @@ collect_targets(List *targetlist, struct CaptureAttributesContext *context)
|
||||
/*
|
||||
* Capture index attributes.
|
||||
*
|
||||
* The attributes referenced by an index is captured so that the hyperstore
|
||||
* The attributes referenced by an index is captured so that the hypercore
|
||||
* TAM can later identify the index as a segmentby index (one that only
|
||||
* indexes compressed segments/tuples). When a segmentby index is identified
|
||||
* by hyperstore, it will "unwrap" the compressed tuples on-the-fly into
|
||||
* by hypercore, it will "unwrap" the compressed tuples on-the-fly into
|
||||
* individual (uncompressed) tuples even though the index only references the
|
||||
* compressed segments.
|
||||
*/
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -18,13 +18,13 @@
|
||||
* individual access methods, so use bit 16. */
|
||||
#define SK_NO_COMPRESSED 0x8000
|
||||
|
||||
extern void hyperstore_set_analyze_relid(Oid relid);
|
||||
extern const TableAmRoutine *hyperstore_routine(void);
|
||||
extern void hyperstore_set_rel_pathlist(PlannerInfo *root, RelOptInfo *rel, Hypertable *ht);
|
||||
extern void hyperstore_alter_access_method_begin(Oid relid, bool to_other_am);
|
||||
extern void hyperstore_alter_access_method_finish(Oid relid, bool to_other_am);
|
||||
extern Datum hyperstore_handler(PG_FUNCTION_ARGS);
|
||||
extern void hyperstore_xact_event(XactEvent event, void *arg);
|
||||
extern void hypercore_set_analyze_relid(Oid relid);
|
||||
extern const TableAmRoutine *hypercore_routine(void);
|
||||
extern void hypercore_set_rel_pathlist(PlannerInfo *root, RelOptInfo *rel, Hypertable *ht);
|
||||
extern void hypercore_alter_access_method_begin(Oid relid, bool to_other_am);
|
||||
extern void hypercore_alter_access_method_finish(Oid relid, bool to_other_am);
|
||||
extern Datum hypercore_handler(PG_FUNCTION_ARGS);
|
||||
extern void hypercore_xact_event(XactEvent event, void *arg);
|
||||
|
||||
typedef struct ColumnCompressionSettings
|
||||
{
|
||||
@ -43,7 +43,7 @@ typedef struct ColumnCompressionSettings
|
||||
* This struct is cached in a relcache entry's rd_amcache pointer and needs to
|
||||
* have a structure that can be palloc'ed in a single memory chunk.
|
||||
*/
|
||||
typedef struct HyperstoreInfo
|
||||
typedef struct HypercoreInfo
|
||||
{
|
||||
int32 hypertable_id; /* TimescaleDB ID of parent hypertable */
|
||||
int32 relation_id; /* TimescaleDB ID of relation (chunk ID) */
|
||||
@ -54,6 +54,6 @@ typedef struct HyperstoreInfo
|
||||
* compressed rel */
|
||||
/* Per-column information follows. */
|
||||
ColumnCompressionSettings columns[FLEXIBLE_ARRAY_MEMBER];
|
||||
} HyperstoreInfo;
|
||||
} HypercoreInfo;
|
||||
|
||||
extern HyperstoreInfo *RelationGetHyperstoreInfo(Relation rel);
|
||||
extern HypercoreInfo *RelationGetHypercoreInfo(Relation rel);
|
||||
|
@ -22,61 +22,61 @@
|
||||
#include <utils/regproc.h>
|
||||
|
||||
#include <compat/compat.h>
|
||||
#include "hyperstore/arrow_tts.h"
|
||||
#include "hyperstore/hsproxy.h"
|
||||
#include "hypercore/arrow_tts.h"
|
||||
#include "hypercore/hypercore_proxy.h"
|
||||
#include <chunk.h>
|
||||
|
||||
/**
|
||||
* Hyperstore proxy index AM (hsproxy).
|
||||
* Hypercore proxy index AM (hypercore_proxy).
|
||||
*
|
||||
* The hsproxy index AM doesn't provide any indexing functionality itself. It
|
||||
* is only used to "proxy" vacuum calls between a hyperstore's internal
|
||||
* The hypercore_proxy index AM doesn't provide any indexing functionality itself. It
|
||||
* is only used to "proxy" vacuum calls between a hypercore's internal
|
||||
* compressed relation (holding compressed data) and the indexes defined on
|
||||
* the user-visible hyperstore relation (holding non-compressed data).
|
||||
* the user-visible hypercore relation (holding non-compressed data).
|
||||
*
|
||||
* A hyperstore consists of two relations internally: the user-visible
|
||||
* "hyperstore" relation and the internal compressed relation and indexes on a
|
||||
* hyperstore encompass data from both these relations. This creates a
|
||||
* A hypercore consists of two relations internally: the user-visible
|
||||
* "hypercore" relation and the internal compressed relation and indexes on a
|
||||
* hypercore encompass data from both these relations. This creates a
|
||||
* complication when vacuuming a relation because only he indexes defined on
|
||||
* the relation are vacuumed. Therefore, a vacuum on a hyperstore's
|
||||
* the relation are vacuumed. Therefore, a vacuum on a hypercore's
|
||||
* non-compressed relation will vacuum pointers to non-compressed tuples from
|
||||
* the indexes, but not pointers to compressed tuples. A vacuum on the
|
||||
* compressed relation, on the other hand, will not vacuum anything from the
|
||||
* hyperstore indexes because they are defined on the non-compressed relation
|
||||
* hypercore indexes because they are defined on the non-compressed relation
|
||||
* and only indexes defined directly on the internal compressed relation will
|
||||
* be vacuumed.
|
||||
*
|
||||
* The hsproxy index fixes this issue by relaying vacuum (bulkdelete calls)
|
||||
* The hypercore_proxy index fixes this issue by relaying vacuum (bulkdelete calls)
|
||||
* from the compressed relation to all indexes defined on the non-compressed
|
||||
* relation. There needs to be only one hsproxy index defined on a compressed
|
||||
* relation. There needs to be only one hypercore_proxy index defined on a compressed
|
||||
* relation to vacuum all indexes.
|
||||
*
|
||||
* The hsproxy index needs to be defined on at least one column on the
|
||||
* The hypercore_proxy index needs to be defined on at least one column on the
|
||||
* compressed relation (it does not really matter which one). By default it
|
||||
* uses the "count" column of the compressed relation and when a set of
|
||||
* compressed tuples are vacuumed, its bulkdelete callback is called with
|
||||
* those tuples. The callback relays that call to the hyperstore indexes and
|
||||
* those tuples. The callback relays that call to the hypercore indexes and
|
||||
* also decodes TIDs from the indexes to match the TIDs in the compressed
|
||||
* relation.
|
||||
*/
|
||||
|
||||
/*
|
||||
* Given the internal compressed relid, lookup the corresponding hyperstore
|
||||
* Given the internal compressed relid, lookup the corresponding hypercore
|
||||
* relid.
|
||||
*
|
||||
* Currently, this relies on information in the "chunk" metadata
|
||||
* table. Ideally, the lookup should not have any dependencies on chunks and,
|
||||
* instead, the hyperstore mappings should be self-contained in compression
|
||||
* settings or a dedicated hyperstore settings table. Another idea is to keep
|
||||
* instead, the hypercore mappings should be self-contained in compression
|
||||
* settings or a dedicated hypercore settings table. Another idea is to keep
|
||||
* the mappings in index reloptions, but this does not handle relation name
|
||||
* changes well.
|
||||
*/
|
||||
static Oid
|
||||
get_hyperstore_relid(Oid compress_relid)
|
||||
get_hypercore_relid(Oid compress_relid)
|
||||
{
|
||||
Datum datid = DirectFunctionCall1(ts_chunk_id_from_relid, ObjectIdGetDatum(compress_relid));
|
||||
ScanIterator iterator = ts_scan_iterator_create(CHUNK, AccessShareLock, CurrentMemoryContext);
|
||||
Oid hyperstore_relid = InvalidOid;
|
||||
Oid hypercore_relid = InvalidOid;
|
||||
|
||||
iterator.ctx.index =
|
||||
catalog_get_index(ts_catalog_get(), CHUNK, CHUNK_COMPRESSED_CHUNK_ID_INDEX);
|
||||
@ -96,18 +96,18 @@ get_hyperstore_relid(Oid compress_relid)
|
||||
|
||||
if (!isnull)
|
||||
{
|
||||
hyperstore_relid = ts_chunk_get_relid(DatumGetInt32(datum), true);
|
||||
hypercore_relid = ts_chunk_get_relid(DatumGetInt32(datum), true);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
ts_scan_iterator_close(&iterator);
|
||||
|
||||
return hyperstore_relid;
|
||||
return hypercore_relid;
|
||||
}
|
||||
|
||||
static IndexBuildResult *
|
||||
hsproxy_build(Relation rel, Relation index, struct IndexInfo *indexInfo)
|
||||
hypercore_proxy_build(Relation rel, Relation index, struct IndexInfo *indexInfo)
|
||||
{
|
||||
IndexBuildResult *result = palloc0(sizeof(IndexBuildResult));
|
||||
result->heap_tuples = 0;
|
||||
@ -119,7 +119,7 @@ hsproxy_build(Relation rel, Relation index, struct IndexInfo *indexInfo)
|
||||
* HSProxy doesn't store any data, so buildempty() is a dummy.
|
||||
*/
|
||||
static void
|
||||
hsproxy_buildempty(Relation index)
|
||||
hypercore_proxy_buildempty(Relation index)
|
||||
{
|
||||
}
|
||||
|
||||
@ -132,13 +132,13 @@ typedef struct HSProxyCallbackState
|
||||
} HSProxyCallbackState;
|
||||
|
||||
/*
|
||||
* IndexBulkDeleteCallback for determining if a hyperstore index entry (TID)
|
||||
* IndexBulkDeleteCallback for determining if a hypercore index entry (TID)
|
||||
* can be deleted.
|
||||
*
|
||||
* The state pointer contains to original callback and state.
|
||||
*/
|
||||
static bool
|
||||
hsproxy_can_delete_tid(ItemPointer tid, void *state)
|
||||
hypercore_proxy_can_delete_tid(ItemPointer tid, void *state)
|
||||
{
|
||||
HSProxyCallbackState *delstate = state;
|
||||
ItemPointerData decoded_tid;
|
||||
@ -149,7 +149,7 @@ hsproxy_can_delete_tid(ItemPointer tid, void *state)
|
||||
return false;
|
||||
|
||||
/* Decode the TID into the original compressed relation TID */
|
||||
hyperstore_tid_decode(&decoded_tid, tid);
|
||||
hypercore_tid_decode(&decoded_tid, tid);
|
||||
|
||||
/* Check if this is the same TID as in the last call. This is a simple
|
||||
* optimization for when we are just traversing "compressed" TIDs that all
|
||||
@ -185,7 +185,7 @@ bulkdelete_one_index(Relation hsrel, Relation indexrel, IndexBulkDeleteResult *i
|
||||
ivinfo.strategy = strategy;
|
||||
|
||||
IndexBulkDeleteResult *result =
|
||||
index_bulk_delete(&ivinfo, istat, hsproxy_can_delete_tid, delstate);
|
||||
index_bulk_delete(&ivinfo, istat, hypercore_proxy_can_delete_tid, delstate);
|
||||
|
||||
return result;
|
||||
}
|
||||
@ -208,15 +208,15 @@ typedef struct HSProxyVacuumState
|
||||
* calling the IndexBulkDeleteCallback function for every TID in the index to
|
||||
* ask whether it should be removed or not.
|
||||
*
|
||||
* In the hsproxy case, this call is simply relayed to all indexes on the
|
||||
* user-visible hyperstore relation, calling our own callback instead.
|
||||
* In the hypercore_proxy case, this call is simply relayed to all indexes on the
|
||||
* user-visible hypercore relation, calling our own callback instead.
|
||||
*/
|
||||
static IndexBulkDeleteResult *
|
||||
hsproxy_bulkdelete(IndexVacuumInfo *info, IndexBulkDeleteResult *stats,
|
||||
IndexBulkDeleteCallback callback, void *callback_state)
|
||||
hypercore_proxy_bulkdelete(IndexVacuumInfo *info, IndexBulkDeleteResult *stats,
|
||||
IndexBulkDeleteCallback callback, void *callback_state)
|
||||
{
|
||||
Oid hyperstore_relid = get_hyperstore_relid(info->index->rd_index->indrelid);
|
||||
Relation hsrel = table_open(hyperstore_relid, ShareUpdateExclusiveLock);
|
||||
Oid hypercore_relid = get_hypercore_relid(info->index->rd_index->indrelid);
|
||||
Relation hsrel = table_open(hypercore_relid, ShareUpdateExclusiveLock);
|
||||
HSProxyCallbackState delstate = {
|
||||
.orig_callback = callback,
|
||||
.orig_state = callback_state,
|
||||
@ -242,8 +242,8 @@ hsproxy_bulkdelete(IndexVacuumInfo *info, IndexBulkDeleteResult *stats,
|
||||
|
||||
for (int i = 0; i < nindexes; i++)
|
||||
{
|
||||
/* There should never be any hsproxy indexes that we proxy */
|
||||
Assert(indrels[i]->rd_indam->ambuildempty != hsproxy_buildempty);
|
||||
/* There should never be any hypercore_proxy indexes that we proxy */
|
||||
Assert(indrels[i]->rd_indam->ambuildempty != hypercore_proxy_buildempty);
|
||||
bulkdelete_one_index(hsrel, indrels[i], &vacstate->indstats[i], info->strategy, &delstate);
|
||||
}
|
||||
|
||||
@ -302,10 +302,10 @@ vacuumcleanup_one_index(Relation hsrel, Relation indexrel, IndexBulkDeleteResult
|
||||
* first. Therefore, we cannot always assume that vacstate has been created.
|
||||
*/
|
||||
static IndexBulkDeleteResult *
|
||||
hsproxy_vacuumcleanup(IndexVacuumInfo *info, IndexBulkDeleteResult *stats)
|
||||
hypercore_proxy_vacuumcleanup(IndexVacuumInfo *info, IndexBulkDeleteResult *stats)
|
||||
{
|
||||
Oid hyperstore_relid = get_hyperstore_relid(info->index->rd_index->indrelid);
|
||||
Relation hsrel = table_open(hyperstore_relid, ShareUpdateExclusiveLock);
|
||||
Oid hypercore_relid = get_hypercore_relid(info->index->rd_index->indrelid);
|
||||
Relation hsrel = table_open(hypercore_relid, ShareUpdateExclusiveLock);
|
||||
HSProxyVacuumState *vacstate = (HSProxyVacuumState *) stats;
|
||||
Relation *indrels;
|
||||
int nindexes = 0;
|
||||
@ -324,8 +324,8 @@ hsproxy_vacuumcleanup(IndexVacuumInfo *info, IndexBulkDeleteResult *stats)
|
||||
|
||||
for (int i = 0; i < nindexes; i++)
|
||||
{
|
||||
/* There should never be any hsproxy indexes that we proxy */
|
||||
Assert(indrels[i]->rd_indam->ambuildempty != hsproxy_buildempty);
|
||||
/* There should never be any hypercore_proxy indexes that we proxy */
|
||||
Assert(indrels[i]->rd_indam->ambuildempty != hypercore_proxy_buildempty);
|
||||
IndexBulkDeleteResult *result = vacuumcleanup_one_index(hsrel,
|
||||
indrels[i],
|
||||
&vacstate->indstats[i],
|
||||
@ -356,9 +356,10 @@ hsproxy_vacuumcleanup(IndexVacuumInfo *info, IndexBulkDeleteResult *stats)
|
||||
* make the cost so high that the index is effectively never used in a query.
|
||||
*/
|
||||
static void
|
||||
hsproxy_costestimate(struct PlannerInfo *root, struct IndexPath *path, double loop_count,
|
||||
Cost *indexStartupCost, Cost *indexTotalCost, Selectivity *indexSelectivity,
|
||||
double *indexCorrelation, double *indexPages)
|
||||
hypercore_proxy_costestimate(struct PlannerInfo *root, struct IndexPath *path, double loop_count,
|
||||
Cost *indexStartupCost, Cost *indexTotalCost,
|
||||
Selectivity *indexSelectivity, double *indexCorrelation,
|
||||
double *indexPages)
|
||||
{
|
||||
*indexTotalCost = *indexStartupCost = *indexCorrelation = INFINITY;
|
||||
*indexSelectivity = 1;
|
||||
@ -367,13 +368,13 @@ hsproxy_costestimate(struct PlannerInfo *root, struct IndexPath *path, double lo
|
||||
|
||||
/* parse index reloptions */
|
||||
static bytea *
|
||||
hsproxy_options(Datum reloptions, bool validate)
|
||||
hypercore_proxy_options(Datum reloptions, bool validate)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static bool
|
||||
hsproxy_validate(Oid opclassoid)
|
||||
hypercore_proxy_validate(Oid opclassoid)
|
||||
{
|
||||
/* Not really using opclass, so simply return true */
|
||||
return true;
|
||||
@ -383,22 +384,22 @@ hsproxy_validate(Oid opclassoid)
|
||||
* Index insert.
|
||||
*
|
||||
* Currently needed as a dummy. Could be used to insert into all indexes on
|
||||
* the hyperstore rel when inserting data into the compressed rel during,
|
||||
* the hypercore rel when inserting data into the compressed rel during,
|
||||
* e.g., recompression.
|
||||
*/
|
||||
static bool
|
||||
hsproxy_insert(Relation indexRelation, Datum *values, bool *isnull, ItemPointer heap_tid,
|
||||
Relation heapRelation, IndexUniqueCheck checkUnique,
|
||||
hypercore_proxy_insert(Relation indexRelation, Datum *values, bool *isnull, ItemPointer heap_tid,
|
||||
Relation heapRelation, IndexUniqueCheck checkUnique,
|
||||
#if PG14_GE
|
||||
bool indexUnchanged,
|
||||
bool indexUnchanged,
|
||||
#endif
|
||||
struct IndexInfo *indexInfo)
|
||||
struct IndexInfo *indexInfo)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
Datum
|
||||
hsproxy_handler(PG_FUNCTION_ARGS)
|
||||
hypercore_proxy_handler(PG_FUNCTION_ARGS)
|
||||
{
|
||||
IndexAmRoutine *amroutine = makeNode(IndexAmRoutine);
|
||||
|
||||
@ -426,19 +427,19 @@ hsproxy_handler(PG_FUNCTION_ARGS)
|
||||
amroutine->amkeytype = InvalidOid;
|
||||
|
||||
/* Callbacks */
|
||||
amroutine->ambuild = hsproxy_build;
|
||||
amroutine->ambuildempty = hsproxy_buildempty;
|
||||
amroutine->ambulkdelete = hsproxy_bulkdelete;
|
||||
amroutine->amvacuumcleanup = hsproxy_vacuumcleanup;
|
||||
amroutine->amcostestimate = hsproxy_costestimate;
|
||||
amroutine->amoptions = hsproxy_options;
|
||||
amroutine->ambuild = hypercore_proxy_build;
|
||||
amroutine->ambuildempty = hypercore_proxy_buildempty;
|
||||
amroutine->ambulkdelete = hypercore_proxy_bulkdelete;
|
||||
amroutine->amvacuumcleanup = hypercore_proxy_vacuumcleanup;
|
||||
amroutine->amcostestimate = hypercore_proxy_costestimate;
|
||||
amroutine->amoptions = hypercore_proxy_options;
|
||||
|
||||
/* Optional callbacks */
|
||||
amroutine->aminsert = hsproxy_insert;
|
||||
amroutine->aminsert = hypercore_proxy_insert;
|
||||
amroutine->amcanreturn = NULL;
|
||||
amroutine->amproperty = NULL;
|
||||
amroutine->ambuildphasename = NULL;
|
||||
amroutine->amvalidate = hsproxy_validate;
|
||||
amroutine->amvalidate = hypercore_proxy_validate;
|
||||
#if PG14_GE
|
||||
amroutine->amadjustmembers = NULL;
|
||||
#endif
|
||||
|
@ -8,5 +8,5 @@
|
||||
#include <postgres.h>
|
||||
#include <fmgr.h>
|
||||
|
||||
extern void _hsproxy_init(void);
|
||||
extern Datum hsproxy_handler(PG_FUNCTION_ARGS);
|
||||
extern void _hypercore_proxy_init(void);
|
||||
extern Datum hypercore_proxy_handler(PG_FUNCTION_ARGS);
|
||||
|
@ -16,16 +16,17 @@
|
||||
#include <utils/lsyscache.h>
|
||||
#include <utils/syscache.h>
|
||||
|
||||
#include "extension_constants.h"
|
||||
#include "utils.h"
|
||||
#include <src/utils.h>
|
||||
|
||||
/*
|
||||
* Make a relation use hyperstore without rewriting any data, simply by
|
||||
* Make a relation use hypercore without rewriting any data, simply by
|
||||
* updating the AM in pg_class. This only works if the relation is already
|
||||
* using (non-hyperstore) compression.
|
||||
* using (non-hypercore) compression.
|
||||
*/
|
||||
void
|
||||
hyperstore_set_am(const RangeVar *rv)
|
||||
hypercore_set_am(const RangeVar *rv)
|
||||
{
|
||||
HeapTuple tp;
|
||||
Oid relid = RangeVarGetRelid(rv, NoLock, false);
|
||||
@ -34,12 +35,12 @@ hyperstore_set_am(const RangeVar *rv)
|
||||
if (HeapTupleIsValid(tp))
|
||||
{
|
||||
Form_pg_class reltup = (Form_pg_class) GETSTRUCT(tp);
|
||||
Oid hyperstore_amoid = get_table_am_oid("hyperstore", false);
|
||||
Oid hypercore_amoid = get_table_am_oid(TS_HYPERCORE_TAM_NAME, false);
|
||||
Relation class_rel = table_open(RelationRelationId, RowExclusiveLock);
|
||||
|
||||
elog(DEBUG1, "migrating table \"%s\" to hyperstore", get_rel_name(relid));
|
||||
elog(DEBUG1, "migrating table \"%s\" to hypercore", get_rel_name(relid));
|
||||
|
||||
reltup->relam = hyperstore_amoid;
|
||||
reltup->relam = hypercore_amoid;
|
||||
/* Set the new table access method */
|
||||
CatalogTupleUpdate(class_rel, &tp->t_self, tp);
|
||||
/* Also update pg_am dependency for the relation */
|
||||
@ -49,7 +50,7 @@ hyperstore_set_am(const RangeVar *rv)
|
||||
};
|
||||
ObjectAddress referenced = {
|
||||
.classId = AccessMethodRelationId,
|
||||
.objectId = hyperstore_amoid,
|
||||
.objectId = hypercore_amoid,
|
||||
};
|
||||
|
||||
recordDependencyOn(&depender, &referenced, DEPENDENCY_NORMAL);
|
||||
|
@ -7,4 +7,4 @@
|
||||
|
||||
#include <postgres.h>
|
||||
|
||||
extern void hyperstore_set_am(const RangeVar *rv);
|
||||
extern void hypercore_set_am(const RangeVar *rv);
|
||||
|
@ -34,11 +34,11 @@
|
||||
#include "continuous_aggs/utils.h"
|
||||
#include "cross_module_fn.h"
|
||||
#include "export.h"
|
||||
#include "hyperstore/arrow_cache_explain.h"
|
||||
#include "hyperstore/arrow_tts.h"
|
||||
#include "hyperstore/attr_capture.h"
|
||||
#include "hyperstore/hsproxy.h"
|
||||
#include "hyperstore/hyperstore_handler.h"
|
||||
#include "hypercore/arrow_cache_explain.h"
|
||||
#include "hypercore/arrow_tts.h"
|
||||
#include "hypercore/attr_capture.h"
|
||||
#include "hypercore/hypercore_handler.h"
|
||||
#include "hypercore/hypercore_proxy.h"
|
||||
#include "hypertable.h"
|
||||
#include "license_guc.h"
|
||||
#include "nodes/columnar_scan/columnar_scan.h"
|
||||
@ -66,7 +66,7 @@ extern void PGDLLEXPORT _PG_init(void);
|
||||
static void
|
||||
tsl_xact_event(XactEvent event, void *arg)
|
||||
{
|
||||
hyperstore_xact_event(event, arg);
|
||||
hypercore_xact_event(event, arg);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -173,8 +173,8 @@ CrossModuleFunctions tsl_cm_functions = {
|
||||
.decompress_chunk = tsl_decompress_chunk,
|
||||
.decompress_batches_for_insert = decompress_batches_for_insert,
|
||||
.decompress_target_segments = decompress_target_segments,
|
||||
.hyperstore_handler = hyperstore_handler,
|
||||
.hsproxy_handler = hsproxy_handler,
|
||||
.hypercore_handler = hypercore_handler,
|
||||
.hypercore_proxy_handler = hypercore_proxy_handler,
|
||||
.is_compressed_tid = tsl_is_compressed_tid,
|
||||
.ddl_command_start = tsl_ddl_command_start,
|
||||
.ddl_command_end = tsl_ddl_command_end,
|
||||
|
@ -29,8 +29,8 @@
|
||||
#include "columnar_scan.h"
|
||||
#include "compression/arrow_c_data_interface.h"
|
||||
#include "compression/compression.h"
|
||||
#include "hyperstore/arrow_tts.h"
|
||||
#include "hyperstore/hyperstore_handler.h"
|
||||
#include "hypercore/arrow_tts.h"
|
||||
#include "hypercore/hypercore_handler.h"
|
||||
#include "import/ts_explain.h"
|
||||
#include "nodes/decompress_chunk/vector_quals.h"
|
||||
|
||||
@ -140,7 +140,7 @@ vector_qual_state_init(VectorQualState *vqstate, ExprContext *econtext)
|
||||
* The scankey quals returned in pass 1 is used for EXPLAIN.
|
||||
*/
|
||||
static List *
|
||||
process_scan_key_quals(const HyperstoreInfo *hsinfo, Index relid, const List *quals,
|
||||
process_scan_key_quals(const HypercoreInfo *hsinfo, Index relid, const List *quals,
|
||||
List **remaining_quals, ScanKey scankeys, unsigned scankeys_capacity)
|
||||
{
|
||||
List *scankey_quals = NIL;
|
||||
@ -269,14 +269,14 @@ process_scan_key_quals(const HyperstoreInfo *hsinfo, Index relid, const List *qu
|
||||
}
|
||||
|
||||
static List *
|
||||
extract_scankey_quals(const HyperstoreInfo *hsinfo, Index relid, const List *quals,
|
||||
extract_scankey_quals(const HypercoreInfo *hsinfo, Index relid, const List *quals,
|
||||
List **remaining_quals)
|
||||
{
|
||||
return process_scan_key_quals(hsinfo, relid, quals, remaining_quals, NULL, 0);
|
||||
}
|
||||
|
||||
static ScanKey
|
||||
create_scankeys_from_quals(const HyperstoreInfo *hsinfo, Index relid, const List *quals)
|
||||
create_scankeys_from_quals(const HypercoreInfo *hsinfo, Index relid, const List *quals)
|
||||
{
|
||||
unsigned capacity = list_length(quals);
|
||||
ScanKey scankeys = palloc0(sizeof(ScanKeyData) * capacity);
|
||||
@ -631,7 +631,7 @@ columnar_scan_begin(CustomScanState *state, EState *estate, int eflags)
|
||||
|
||||
if (cstate->nscankeys > 0)
|
||||
{
|
||||
const HyperstoreInfo *hsinfo = RelationGetHyperstoreInfo(state->ss.ss_currentRelation);
|
||||
const HypercoreInfo *hsinfo = RelationGetHypercoreInfo(state->ss.ss_currentRelation);
|
||||
Scan *scan = (Scan *) state->ss.ps.plan;
|
||||
cstate->scankeys =
|
||||
create_scankeys_from_quals(hsinfo, scan->scanrelid, cstate->scankey_quals);
|
||||
@ -845,11 +845,11 @@ static CustomScanMethods columnar_scan_plan_methods = {
|
||||
.CreateCustomScanState = columnar_scan_state_create,
|
||||
};
|
||||
|
||||
typedef struct VectorQualInfoHyperstore
|
||||
typedef struct VectorQualInfoHypercore
|
||||
{
|
||||
VectorQualInfo vqinfo;
|
||||
const HyperstoreInfo *hsinfo;
|
||||
} VectorQualInfoHyperstore;
|
||||
const HypercoreInfo *hsinfo;
|
||||
} VectorQualInfoHypercore;
|
||||
|
||||
static bool *
|
||||
columnar_scan_build_vector_attrs(const ColumnCompressionSettings *columns, int numcolumns)
|
||||
@ -877,14 +877,14 @@ columnar_scan_plan_create(PlannerInfo *root, RelOptInfo *rel, CustomPath *best_p
|
||||
CustomScan *columnar_scan_plan = makeNode(CustomScan);
|
||||
RangeTblEntry *rte = planner_rt_fetch(rel->relid, root);
|
||||
Relation relation = RelationIdGetRelation(rte->relid);
|
||||
HyperstoreInfo *hsinfo = RelationGetHyperstoreInfo(relation);
|
||||
HypercoreInfo *hsinfo = RelationGetHypercoreInfo(relation);
|
||||
List *vectorized_quals = NIL;
|
||||
List *nonvectorized_quals = NIL;
|
||||
List *scankey_quals = NIL;
|
||||
List *remaining_quals = NIL;
|
||||
ListCell *lc;
|
||||
|
||||
VectorQualInfoHyperstore vqih = {
|
||||
VectorQualInfoHypercore vqih = {
|
||||
.vqinfo = {
|
||||
.rti = rel->relid,
|
||||
.vector_attrs = columnar_scan_build_vector_attrs(hsinfo->columns, hsinfo->num_columns),
|
||||
|
@ -18,7 +18,7 @@
|
||||
#include "chunkwise_agg.h"
|
||||
#include "continuous_aggs/planner.h"
|
||||
#include "guc.h"
|
||||
#include "hyperstore/hyperstore_handler.h"
|
||||
#include "hypercore/hypercore_handler.h"
|
||||
#include "hypertable.h"
|
||||
#include "nodes/columnar_scan/columnar_scan.h"
|
||||
#include "nodes/decompress_chunk/decompress_chunk.h"
|
||||
@ -164,7 +164,7 @@ tsl_set_rel_pathlist_query(PlannerInfo *root, RelOptInfo *rel, Index rti, RangeT
|
||||
if (ts_guc_enable_columnarscan)
|
||||
columnar_scan_set_rel_pathlist(root, rel, ht);
|
||||
|
||||
hyperstore_set_rel_pathlist(root, rel, ht);
|
||||
hypercore_set_rel_pathlist(root, rel, ht);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -16,8 +16,8 @@
|
||||
|
||||
#include "compression/create.h"
|
||||
#include "continuous_aggs/create.h"
|
||||
#include "hyperstore/hyperstore_handler.h"
|
||||
#include "hyperstore/utils.h"
|
||||
#include "hypercore/hypercore_handler.h"
|
||||
#include "hypercore/utils.h"
|
||||
#include "hypertable_cache.h"
|
||||
#include "process_utility.h"
|
||||
#include "ts_catalog/continuous_agg.h"
|
||||
@ -44,29 +44,29 @@ tsl_ddl_command_start(ProcessUtilityArgs *args)
|
||||
case AT_SetAccessMethod:
|
||||
{
|
||||
Oid relid = AlterTableLookupRelation(stmt, NoLock);
|
||||
bool to_hyperstore = (strcmp(cmd->name, "hyperstore") == 0);
|
||||
bool to_hypercore = (strcmp(cmd->name, TS_HYPERCORE_TAM_NAME) == 0);
|
||||
Relation rel = RelationIdGetRelation(relid);
|
||||
bool is_hyperstore = rel->rd_tableam == hyperstore_routine();
|
||||
bool is_hypercore = rel->rd_tableam == hypercore_routine();
|
||||
RelationClose(rel);
|
||||
|
||||
/* If neither the current tableam nor the desired
|
||||
* tableam is hyperstore, we do nothing. We also do
|
||||
* nothing if the table is already using hyperstore
|
||||
* and we are trying to convert to hyperstore
|
||||
* tableam is hypercore, we do nothing. We also do
|
||||
* nothing if the table is already using hypercore
|
||||
* and we are trying to convert to hypercore
|
||||
* again. */
|
||||
if (is_hyperstore == to_hyperstore)
|
||||
if (is_hypercore == to_hypercore)
|
||||
break;
|
||||
/* Here we know that we are either moving to or from a
|
||||
* hyperstore. Check that it is on a chunk or
|
||||
* hypercore. Check that it is on a chunk or
|
||||
* hypertable. */
|
||||
Chunk *chunk = ts_chunk_get_by_relid(relid, false);
|
||||
|
||||
if (chunk)
|
||||
{
|
||||
/* Check if we can do quick migration */
|
||||
if (!is_hyperstore && ts_chunk_is_compressed(chunk))
|
||||
if (!is_hypercore && ts_chunk_is_compressed(chunk))
|
||||
{
|
||||
hyperstore_set_am(stmt->relation);
|
||||
hypercore_set_am(stmt->relation);
|
||||
/* Skip this command in the alter table
|
||||
* statement since we process it via quick
|
||||
* migration */
|
||||
@ -74,14 +74,14 @@ tsl_ddl_command_start(ProcessUtilityArgs *args)
|
||||
continue;
|
||||
}
|
||||
|
||||
hyperstore_alter_access_method_begin(relid, !to_hyperstore);
|
||||
hypercore_alter_access_method_begin(relid, !to_hypercore);
|
||||
}
|
||||
else if (!ts_is_hypertable(relid))
|
||||
ereport(ERROR,
|
||||
errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
|
||||
errmsg("hyperstore access method not supported on \"%s\"",
|
||||
errmsg("hypercore access method not supported on \"%s\"",
|
||||
stmt->relation->relname),
|
||||
errdetail("Hyperstore access method is only supported on "
|
||||
errdetail("Hypercore access method is only supported on "
|
||||
"hypertables and chunks."));
|
||||
|
||||
break;
|
||||
@ -187,8 +187,8 @@ tsl_ddl_command_end(EventTriggerData *command)
|
||||
case AT_SetAccessMethod:
|
||||
{
|
||||
Oid relid = AlterTableLookupRelation(stmt, NoLock);
|
||||
bool to_hyperstore = (strcmp(cmd->name, "hyperstore") == 0);
|
||||
hyperstore_alter_access_method_finish(relid, !to_hyperstore);
|
||||
bool to_hypercore = (strcmp(cmd->name, TS_HYPERCORE_TAM_NAME) == 0);
|
||||
hypercore_alter_access_method_finish(relid, !to_hypercore);
|
||||
break;
|
||||
}
|
||||
#endif
|
||||
|
@ -2,9 +2,9 @@
|
||||
-- Please see the included NOTICE for copyright information and
|
||||
-- LICENSE-TIMESCALE for a copy of the license.
|
||||
\c :TEST_DBNAME :ROLE_SUPERUSER
|
||||
show timescaledb.hyperstore_indexam_whitelist;
|
||||
timescaledb.hyperstore_indexam_whitelist
|
||||
------------------------------------------
|
||||
show timescaledb.hypercore_indexam_whitelist;
|
||||
timescaledb.hypercore_indexam_whitelist
|
||||
-----------------------------------------
|
||||
btree,hash
|
||||
(1 row)
|
||||
|
||||
@ -89,16 +89,16 @@ WHERE location = 1;
|
||||
|
||||
-- We should be able to set the table access method for a chunk, which
|
||||
-- will automatically compress the chunk.
|
||||
ALTER TABLE :chunk SET ACCESS METHOD hyperstore;
|
||||
ALTER TABLE :chunk SET ACCESS METHOD hypercore;
|
||||
SET timescaledb.enable_transparent_decompression TO false;
|
||||
vacuum analyze readings;
|
||||
-- Show access method used on chunk
|
||||
SELECT c.relname, a.amname FROM pg_class c
|
||||
INNER JOIN pg_am a ON (c.relam = a.oid)
|
||||
WHERE c.oid = :'chunk'::regclass;
|
||||
relname | amname
|
||||
------------------+------------
|
||||
_hyper_1_1_chunk | hyperstore
|
||||
relname | amname
|
||||
------------------+-----------
|
||||
_hyper_1_1_chunk | hypercore
|
||||
(1 row)
|
||||
|
||||
-- This should show the chunk as compressed
|
||||
@ -228,7 +228,7 @@ SELECT * FROM :chunk WHERE device < 4 ORDER BY time, device LIMIT 5;
|
||||
SET enable_indexscan = false;
|
||||
-- Compare the output to transparent decompression. Heap output is
|
||||
-- shown further down.
|
||||
SET timescaledb.enable_transparent_decompression TO 'hyperstore';
|
||||
SET timescaledb.enable_transparent_decompression TO 'hypercore';
|
||||
EXPLAIN (costs off, timing off, summary off)
|
||||
SELECT * FROM :chunk WHERE device < 4 ORDER BY time, device LIMIT 5;
|
||||
QUERY PLAN
|
||||
@ -305,7 +305,7 @@ SET enable_indexscan = false;
|
||||
SET enable_seqscan = true;
|
||||
SET timescaledb.enable_columnarscan = true;
|
||||
-- With transparent decompression
|
||||
SET timescaledb.enable_transparent_decompression TO 'hyperstore';
|
||||
SET timescaledb.enable_transparent_decompression TO 'hypercore';
|
||||
SELECT * FROM :chunk WHERE location < 4 ORDER BY time, device LIMIT 5;
|
||||
time | location | device | temp | humidity | jdata
|
||||
------------------------------+----------+--------+------+------------------+------------------
|
||||
@ -338,7 +338,7 @@ SELECT * FROM :chunk ORDER BY location ASC LIMIT 5;
|
||||
(5 rows)
|
||||
|
||||
-- Show with transparent decompression
|
||||
SET timescaledb.enable_transparent_decompression TO 'hyperstore';
|
||||
SET timescaledb.enable_transparent_decompression TO 'hypercore';
|
||||
SELECT * FROM :chunk ORDER BY location ASC LIMIT 5;
|
||||
time | location | device | temp | humidity | jdata
|
||||
------------------------------+----------+--------+------+------------------+------------------
|
||||
@ -371,7 +371,7 @@ ON (c1.compressed_chunk_id = c2.id);
|
||||
(1 row)
|
||||
|
||||
ALTER TABLE :chunk SET ACCESS METHOD heap;
|
||||
SET timescaledb.enable_transparent_decompression TO 'hyperstore';
|
||||
SET timescaledb.enable_transparent_decompression TO 'hypercore';
|
||||
-- The compressed chunk should no longer exist
|
||||
SELECT format('%I.%I', c2.schema_name, c2.table_name)::regclass AS cchunk
|
||||
FROM _timescaledb_catalog.chunk c1
|
||||
@ -444,9 +444,9 @@ FROM orig JOIN decomp USING (device) WHERE orig.count != decomp.count;
|
||||
--------+------------+--------------+------
|
||||
(0 rows)
|
||||
|
||||
-- Convert back to hyperstore to check that metadata was cleaned up
|
||||
-- from last time this table used hyperstore
|
||||
ALTER TABLE :chunk SET ACCESS METHOD hyperstore;
|
||||
-- Convert back to hypercore to check that metadata was cleaned up
|
||||
-- from last time this table used hypercore
|
||||
ALTER TABLE :chunk SET ACCESS METHOD hypercore;
|
||||
SET timescaledb.enable_transparent_decompression TO false;
|
||||
-- Get the chunk's corresponding compressed chunk
|
||||
SELECT format('%I.%I', c2.schema_name, c2.table_name)::regclass AS cchunk
|
||||
|
@ -1,7 +1,7 @@
|
||||
-- This file and its contents are licensed under the Timescale License.
|
||||
-- Please see the included NOTICE for copyright information and
|
||||
-- LICENSE-TIMESCALE for a copy of the license.
|
||||
\ir include/hyperstore_helpers.sql
|
||||
\ir include/hypercore_helpers.sql
|
||||
-- This file and its contents are licensed under the Timescale License.
|
||||
-- Please see the included NOTICE for copyright information and
|
||||
-- LICENSE-TIMESCALE for a copy of the license.
|
||||
@ -88,7 +88,7 @@ select format('%I.%I', chunk_schema, chunk_name)::regclass as chunk
|
||||
from timescaledb_information.chunks
|
||||
where format('%I.%I', hypertable_schema, hypertable_name)::regclass = 'readings'::regclass
|
||||
limit 1 \gset
|
||||
alter table :chunk set access method hyperstore;
|
||||
alter table :chunk set access method hypercore;
|
||||
-- Test that filtering is not removed on ColumnarScan when it includes
|
||||
-- columns that cannot be scankeys.
|
||||
select explain_analyze_anonymize(format($$
|
||||
|
@ -1,12 +1,12 @@
|
||||
-- This file and its contents are licensed under the Timescale License.
|
||||
-- Please see the included NOTICE for copyright information and
|
||||
-- LICENSE-TIMESCALE for a copy of the license.
|
||||
\ir include/setup_hyperstore.sql
|
||||
\ir include/setup_hypercore.sql
|
||||
-- This file and its contents are licensed under the Timescale License.
|
||||
-- Please see the included NOTICE for copyright information and
|
||||
-- LICENSE-TIMESCALE for a copy of the license.
|
||||
\set hypertable readings
|
||||
\ir hyperstore_helpers.sql
|
||||
\ir hypercore_helpers.sql
|
||||
-- This file and its contents are licensed under the Timescale License.
|
||||
-- Please see the included NOTICE for copyright information and
|
||||
-- LICENSE-TIMESCALE for a copy of the license.
|
||||
@ -112,7 +112,7 @@ select cl.oid::regclass as rel, am.amname, inh.inhparent::regclass as relparent
|
||||
left join pg_inherits inh on (inh.inhrelid = cl.oid);
|
||||
-- Compress the chunks and check that the counts are the same
|
||||
select location_id, count(*) into orig from :hypertable GROUP BY location_id;
|
||||
select compress_chunk(show_chunks(:'hypertable'), compress_using => 'hyperstore');
|
||||
select compress_chunk(show_chunks(:'hypertable'), compress_using => 'hypercore');
|
||||
compress_chunk
|
||||
----------------------------------------
|
||||
_timescaledb_internal._hyper_1_1_chunk
|
||||
@ -188,7 +188,7 @@ NOTICE: adding not-null constraint to column "created_at"
|
||||
(3,public,copy_test1,t)
|
||||
(1 row)
|
||||
|
||||
alter table copy_test1 set access method hyperstore;
|
||||
alter table copy_test1 set access method hypercore;
|
||||
WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes
|
||||
NOTICE: default segment by for hypertable "copy_test1" is set to ""
|
||||
NOTICE: default order by for hypertable "copy_test1" is set to "created_at DESC"
|
||||
@ -245,7 +245,7 @@ select * from amrels where relparent = 'test1'::regclass;
|
||||
_timescaledb_internal._hyper_5_15_chunk | heap | test1
|
||||
(1 row)
|
||||
|
||||
alter table test1 set access method hyperstore;
|
||||
alter table test1 set access method hypercore;
|
||||
copy test1 from stdin delimiter ',';
|
||||
select count(*) from test1;
|
||||
count
|
||||
|
@ -1,7 +1,7 @@
|
||||
-- This file and its contents are licensed under the Timescale License.
|
||||
-- Please see the included NOTICE for copyright information and
|
||||
-- LICENSE-TIMESCALE for a copy of the license.
|
||||
\ir include/hyperstore_helpers.sql
|
||||
\ir include/hypercore_helpers.sql
|
||||
-- This file and its contents are licensed under the Timescale License.
|
||||
-- Please see the included NOTICE for copyright information and
|
||||
-- LICENSE-TIMESCALE for a copy of the license.
|
||||
@ -58,26 +58,26 @@ select setseed(0.3);
|
||||
|
||||
(1 row)
|
||||
|
||||
-- Testing the basic API for creating a hyperstore
|
||||
-- Testing the basic API for creating a hypercore
|
||||
-- This should just fail because you cannot create a plain table with
|
||||
-- hyperstore (yet).
|
||||
-- hypercore (yet).
|
||||
\set ON_ERROR_STOP 0
|
||||
\set VERBOSITY default
|
||||
create table test2(
|
||||
created_at timestamp with time zone not null,
|
||||
location_id int
|
||||
) using hyperstore;
|
||||
ERROR: hyperstore access method not supported on "test2"
|
||||
DETAIL: The hyperstore access method is only supported for hypertables.
|
||||
HINT: Create a hypertable from a table using another access method (e.g., heap), then use "ALTER TABLE" to set the access method to hyperstore.
|
||||
set default_table_access_method to 'hyperstore';
|
||||
) using hypercore;
|
||||
ERROR: hypercore access method not supported on "test2"
|
||||
DETAIL: The hypercore access method is only supported for hypertables.
|
||||
HINT: Create a hypertable from a table using another access method (e.g., heap), then use "ALTER TABLE" to set the access method to hypercore.
|
||||
set default_table_access_method to 'hypercore';
|
||||
create table test2(
|
||||
created_at timestamp with time zone not null,
|
||||
location_id int
|
||||
);
|
||||
ERROR: hyperstore access method not supported on "test2"
|
||||
DETAIL: The hyperstore access method is only supported for hypertables.
|
||||
HINT: It does not make sense to set the default access method for all tables to "hyperstore" since it is only supported for hypertables.
|
||||
ERROR: hypercore access method not supported on "test2"
|
||||
DETAIL: The hypercore access method is only supported for hypertables.
|
||||
HINT: It does not make sense to set the default access method for all tables to "hypercore" since it is only supported for hypertables.
|
||||
reset default_table_access_method;
|
||||
\set VERBOSITY terse
|
||||
\set ON_ERROR_STOP 1
|
||||
@ -90,8 +90,8 @@ CREATE TABLE test2(
|
||||
);
|
||||
create index on test2(device_id, created_at);
|
||||
\set ON_ERROR_STOP 0
|
||||
alter table test2 set access method hyperstore;
|
||||
ERROR: hyperstore access method not supported on "test2"
|
||||
alter table test2 set access method hypercore;
|
||||
ERROR: hypercore access method not supported on "test2"
|
||||
\set ON_ERROR_STOP 1
|
||||
select create_hypertable('test2', 'created_at');
|
||||
create_hypertable
|
||||
@ -102,7 +102,7 @@ select create_hypertable('test2', 'created_at');
|
||||
\set ON_ERROR_STOP 0
|
||||
-- Should show error since there is no namespace.
|
||||
alter table test2
|
||||
set access method hyperstore,
|
||||
set access method hypercore,
|
||||
set (compress_segmentby = 'location_id');
|
||||
WARNING: there was some uncertainty picking the default segment by for the hypertable: Please make sure device_id is not a unique column and appropriate for a segment by
|
||||
NOTICE: default segment by for hypertable "test2" is set to "device_id"
|
||||
@ -110,33 +110,33 @@ NOTICE: default order by for hypertable "test2" is set to "created_at DESC"
|
||||
ERROR: unrecognized parameter "compress_segmentby"
|
||||
\set ON_ERROR_STOP 1
|
||||
alter table test2
|
||||
set access method hyperstore,
|
||||
set access method hypercore,
|
||||
set (timescaledb.compress_segmentby = 'location_id');
|
||||
NOTICE: default order by for hypertable "test2" is set to "created_at DESC, device_id"
|
||||
-- Test altering hypertable to hyperstore again. It should be allowed
|
||||
-- Test altering hypertable to hypercore again. It should be allowed
|
||||
-- and be a no-op.
|
||||
alter table test2 set access method hyperstore;
|
||||
alter table test2 set access method hypercore;
|
||||
\set ON_ERROR_STOP 0
|
||||
-- This shows an error but the error is weird, we should probably get
|
||||
-- a better one.
|
||||
alter table test2
|
||||
set access method hyperstore,
|
||||
set access method hypercore,
|
||||
set (compress_segmentby = 'location_id');
|
||||
ERROR: unrecognized parameter "compress_segmentby"
|
||||
\set ON_ERROR_STOP 1
|
||||
-- Create view for hyperstore rels
|
||||
-- Create view for hypercore rels
|
||||
create view amrels as
|
||||
select cl.oid::regclass as rel, am.amname, inh.inhparent::regclass as relparent
|
||||
from pg_class cl
|
||||
inner join pg_am am on (cl.relam = am.oid)
|
||||
left join pg_inherits inh on (inh.inhrelid = cl.oid);
|
||||
-- Show that test2 is a hyperstore
|
||||
-- Show that test2 is a hypercore
|
||||
select rel, amname
|
||||
from amrels
|
||||
where rel='test2'::regclass;
|
||||
rel | amname
|
||||
-------+------------
|
||||
test2 | hyperstore
|
||||
rel | amname
|
||||
-------+-----------
|
||||
test2 | hypercore
|
||||
(1 row)
|
||||
|
||||
-- This will create new chunks for the hypertable
|
||||
@ -145,20 +145,20 @@ select t, ceil(random()*10), ceil(random()*30), random()*40, random()*100
|
||||
from generate_series('2022-06-01'::timestamptz, '2022-07-01', '5m') t;
|
||||
-- Save the count for test2 for later comparison
|
||||
select count(*) as orig_test2_count from test2 \gset
|
||||
-- All chunks should use the hyperstore access method
|
||||
-- All chunks should use the hypercore access method
|
||||
select * from amrels
|
||||
where relparent='test2'::regclass;
|
||||
rel | amname | relparent
|
||||
-----------------------------------------+------------+-----------
|
||||
_timescaledb_internal._hyper_1_1_chunk | hyperstore | test2
|
||||
_timescaledb_internal._hyper_1_3_chunk | hyperstore | test2
|
||||
_timescaledb_internal._hyper_1_5_chunk | hyperstore | test2
|
||||
_timescaledb_internal._hyper_1_7_chunk | hyperstore | test2
|
||||
_timescaledb_internal._hyper_1_9_chunk | hyperstore | test2
|
||||
_timescaledb_internal._hyper_1_11_chunk | hyperstore | test2
|
||||
rel | amname | relparent
|
||||
-----------------------------------------+-----------+-----------
|
||||
_timescaledb_internal._hyper_1_1_chunk | hypercore | test2
|
||||
_timescaledb_internal._hyper_1_3_chunk | hypercore | test2
|
||||
_timescaledb_internal._hyper_1_5_chunk | hypercore | test2
|
||||
_timescaledb_internal._hyper_1_7_chunk | hypercore | test2
|
||||
_timescaledb_internal._hyper_1_9_chunk | hypercore | test2
|
||||
_timescaledb_internal._hyper_1_11_chunk | hypercore | test2
|
||||
(6 rows)
|
||||
|
||||
-- Show compression settings for hyperstore across catalog and views
|
||||
-- Show compression settings for hypercore across catalog and views
|
||||
select * from _timescaledb_catalog.compression_settings;
|
||||
relid | segmentby | orderby | orderby_desc | orderby_nullsfirst
|
||||
-------------------------------------------------+---------------+------------------------+--------------+--------------------
|
||||
@ -204,7 +204,7 @@ select create_hypertable('test3', 'time');
|
||||
insert into test3 values ('2022-06-01', 1, 1.0);
|
||||
-- save chunk as variable
|
||||
select ch as chunk from show_chunks('test3') ch limit 1 \gset
|
||||
-- Check that chunk is NOT using hyperstore
|
||||
-- Check that chunk is NOT using hypercore
|
||||
select rel, amname
|
||||
from amrels
|
||||
where relparent='test3'::regclass;
|
||||
@ -214,43 +214,43 @@ where relparent='test3'::regclass;
|
||||
(1 row)
|
||||
|
||||
\set ON_ERROR_STOP 0
|
||||
-- Cannot create hyperstore if missing compression settings
|
||||
alter table :chunk set access method hyperstore;
|
||||
-- Cannot create hypercore if missing compression settings
|
||||
alter table :chunk set access method hypercore;
|
||||
ERROR: hypertable "test3" is missing compression settings
|
||||
\set ON_ERROR_STOP 1
|
||||
-- Add compression settings
|
||||
alter table test3 set (timescaledb.compress, timescaledb.compress_orderby='time desc', timescaledb.compress_segmentby='');
|
||||
alter table :chunk set access method hyperstore;
|
||||
-- Check that chunk is using hyperstore
|
||||
alter table :chunk set access method hypercore;
|
||||
-- Check that chunk is using hypercore
|
||||
select * from amrels where rel=:'chunk'::regclass;
|
||||
rel | amname | relparent
|
||||
-----------------------------------------+------------+-----------
|
||||
_timescaledb_internal._hyper_4_13_chunk | hyperstore | test3
|
||||
rel | amname | relparent
|
||||
-----------------------------------------+-----------+-----------
|
||||
_timescaledb_internal._hyper_4_13_chunk | hypercore | test3
|
||||
(1 row)
|
||||
|
||||
-- Try same thing with compress_chunk()
|
||||
alter table :chunk set access method heap;
|
||||
select compress_chunk(:'chunk', compress_using => 'hyperstore');
|
||||
select compress_chunk(:'chunk', compress_using => 'hypercore');
|
||||
compress_chunk
|
||||
-----------------------------------------
|
||||
_timescaledb_internal._hyper_4_13_chunk
|
||||
(1 row)
|
||||
|
||||
-- Check that chunk is using hyperstore
|
||||
-- Check that chunk is using hypercore
|
||||
select relname, amname
|
||||
from show_chunks('test3') as chunk
|
||||
join pg_class on (pg_class.oid = chunk)
|
||||
join pg_am on (relam = pg_am.oid);
|
||||
relname | amname
|
||||
-------------------+------------
|
||||
_hyper_4_13_chunk | hyperstore
|
||||
relname | amname
|
||||
-------------------+-----------
|
||||
_hyper_4_13_chunk | hypercore
|
||||
(1 row)
|
||||
|
||||
-- Test setting same access method again
|
||||
alter table :chunk set access method hyperstore;
|
||||
alter table :chunk set access method hypercore;
|
||||
-- Test recompression after changing compression settings
|
||||
alter table test3 set (timescaledb.compress_segmentby='device');
|
||||
select compress_chunk(:'chunk', compress_using => 'hyperstore', recompress => true);
|
||||
select compress_chunk(:'chunk', compress_using => 'hypercore', recompress => true);
|
||||
compress_chunk
|
||||
-----------------------------------------
|
||||
_timescaledb_internal._hyper_4_13_chunk
|
||||
@ -258,29 +258,29 @@ select compress_chunk(:'chunk', compress_using => 'hyperstore', recompress => tr
|
||||
|
||||
-- Create a second chunk
|
||||
insert into test3 values ('2022-08-01', 1, 1.0);
|
||||
-- The second chunk should not be a hyperstore chunk
|
||||
-- The second chunk should not be a hypercore chunk
|
||||
select * from amrels where relparent='test3'::regclass;
|
||||
rel | amname | relparent
|
||||
-----------------------------------------+------------+-----------
|
||||
_timescaledb_internal._hyper_4_13_chunk | hyperstore | test3
|
||||
_timescaledb_internal._hyper_4_17_chunk | heap | test3
|
||||
rel | amname | relparent
|
||||
-----------------------------------------+-----------+-----------
|
||||
_timescaledb_internal._hyper_4_13_chunk | hypercore | test3
|
||||
_timescaledb_internal._hyper_4_17_chunk | heap | test3
|
||||
(2 rows)
|
||||
|
||||
-- Set hyperstore on hypertable
|
||||
alter table test3 set access method hyperstore;
|
||||
-- Set hypercore on hypertable
|
||||
alter table test3 set access method hypercore;
|
||||
-- Create a third chunk
|
||||
insert into test3 values ('2022-10-01', 1, 1.0);
|
||||
-- The third chunk should be a hyperstore chunk
|
||||
-- The third chunk should be a hypercore chunk
|
||||
select * from amrels where relparent='test3'::regclass;
|
||||
rel | amname | relparent
|
||||
-----------------------------------------+------------+-----------
|
||||
_timescaledb_internal._hyper_4_13_chunk | hyperstore | test3
|
||||
_timescaledb_internal._hyper_4_17_chunk | heap | test3
|
||||
_timescaledb_internal._hyper_4_18_chunk | hyperstore | test3
|
||||
rel | amname | relparent
|
||||
-----------------------------------------+-----------+-----------
|
||||
_timescaledb_internal._hyper_4_13_chunk | hypercore | test3
|
||||
_timescaledb_internal._hyper_4_17_chunk | heap | test3
|
||||
_timescaledb_internal._hyper_4_18_chunk | hypercore | test3
|
||||
(3 rows)
|
||||
|
||||
-- Test that we can DDL on a hypertable that is not a Hyperstore but
|
||||
-- has one chunk that is a Hyperstore works.
|
||||
-- Test that we can DDL on a hypertable that is not a Hypercore but
|
||||
-- has one chunk that is a Hypercore works.
|
||||
create table test4 (time timestamptz not null, device int, temp float);
|
||||
select created from create_hypertable('test4', 'time');
|
||||
created
|
||||
@ -301,12 +301,12 @@ alter table test4 set (timescaledb.compress);
|
||||
WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes
|
||||
NOTICE: default segment by for hypertable "test4" is set to ""
|
||||
NOTICE: default order by for hypertable "test4" is set to ""time" DESC"
|
||||
alter table :chunk set access method hyperstore;
|
||||
alter table :chunk set access method hypercore;
|
||||
select * from amrels where relparent='test4'::regclass;
|
||||
rel | amname | relparent
|
||||
-----------------------------------------+------------+-----------
|
||||
_timescaledb_internal._hyper_6_20_chunk | hyperstore | test4
|
||||
_timescaledb_internal._hyper_6_21_chunk | heap | test4
|
||||
rel | amname | relparent
|
||||
-----------------------------------------+-----------+-----------
|
||||
_timescaledb_internal._hyper_6_20_chunk | hypercore | test4
|
||||
_timescaledb_internal._hyper_6_21_chunk | heap | test4
|
||||
(2 rows)
|
||||
|
||||
-- test that alter table on the hypertable works
|
||||
@ -325,7 +325,7 @@ Check constraints:
|
||||
"constraint_10" CHECK ("time" >= 'Wed May 25 17:00:00 2022 PDT'::timestamp with time zone AND "time" < 'Wed Jun 01 17:00:00 2022 PDT'::timestamp with time zone)
|
||||
Inherits: test4
|
||||
|
||||
-- Test that dropping a table with one chunk being a hyperstore works.
|
||||
-- Test that dropping a table with one chunk being a hypercore works.
|
||||
drop table test4;
|
||||
-- Create view to see compression stats. Left join chunks with stats
|
||||
-- to detect missing stats. Only show row counts because size stats
|
||||
@ -348,34 +348,34 @@ inner join pg_am am
|
||||
inner join pg_inherits inh
|
||||
on (inh.inhrelid = cl.oid)
|
||||
where c.compressed_chunk_id is not null;
|
||||
-- There should be no hyperstore chunks that lack compression size stats
|
||||
-- There should be no hypercore chunks that lack compression size stats
|
||||
select count(*) as num_stats_missing from compressed_rel_size_stats
|
||||
where amname = 'hyperstore' and numrows_pre_compression is null;
|
||||
where amname = 'hypercore' and numrows_pre_compression is null;
|
||||
num_stats_missing
|
||||
-------------------
|
||||
0
|
||||
(1 row)
|
||||
|
||||
-- Show stats for hyperstore chunks. Note that many stats are 0 since
|
||||
-- Show stats for hypercore chunks. Note that many stats are 0 since
|
||||
-- chunks were created as a result of inserts and not really
|
||||
-- compressed
|
||||
select * from compressed_rel_size_stats order by rel;
|
||||
rel | amname | relparent | numrows_pre_compression | numrows_post_compression | numrows_frozen_immediately
|
||||
-----------------------------------------+------------+-----------+-------------------------+--------------------------+----------------------------
|
||||
_timescaledb_internal._hyper_1_1_chunk | hyperstore | test2 | 0 | 0 | 0
|
||||
_timescaledb_internal._hyper_1_3_chunk | hyperstore | test2 | 0 | 0 | 0
|
||||
_timescaledb_internal._hyper_1_5_chunk | hyperstore | test2 | 0 | 0 | 0
|
||||
_timescaledb_internal._hyper_1_7_chunk | hyperstore | test2 | 0 | 0 | 0
|
||||
_timescaledb_internal._hyper_1_9_chunk | hyperstore | test2 | 0 | 0 | 0
|
||||
_timescaledb_internal._hyper_1_11_chunk | hyperstore | test2 | 0 | 0 | 0
|
||||
_timescaledb_internal._hyper_4_13_chunk | hyperstore | test3 | 1 | 1 | 1
|
||||
_timescaledb_internal._hyper_4_18_chunk | hyperstore | test3 | 0 | 0 | 0
|
||||
rel | amname | relparent | numrows_pre_compression | numrows_post_compression | numrows_frozen_immediately
|
||||
-----------------------------------------+-----------+-----------+-------------------------+--------------------------+----------------------------
|
||||
_timescaledb_internal._hyper_1_1_chunk | hypercore | test2 | 0 | 0 | 0
|
||||
_timescaledb_internal._hyper_1_3_chunk | hypercore | test2 | 0 | 0 | 0
|
||||
_timescaledb_internal._hyper_1_5_chunk | hypercore | test2 | 0 | 0 | 0
|
||||
_timescaledb_internal._hyper_1_7_chunk | hypercore | test2 | 0 | 0 | 0
|
||||
_timescaledb_internal._hyper_1_9_chunk | hypercore | test2 | 0 | 0 | 0
|
||||
_timescaledb_internal._hyper_1_11_chunk | hypercore | test2 | 0 | 0 | 0
|
||||
_timescaledb_internal._hyper_4_13_chunk | hypercore | test3 | 1 | 1 | 1
|
||||
_timescaledb_internal._hyper_4_18_chunk | hypercore | test3 | 0 | 0 | 0
|
||||
(8 rows)
|
||||
|
||||
-- Decompress hyperstores to check that stats are removed
|
||||
-- Decompress hypercores to check that stats are removed
|
||||
select decompress_chunk(rel)
|
||||
from compressed_rel_size_stats
|
||||
where amname = 'hyperstore';
|
||||
where amname = 'hypercore';
|
||||
decompress_chunk
|
||||
-----------------------------------------
|
||||
_timescaledb_internal._hyper_1_1_chunk
|
||||
@ -396,9 +396,9 @@ from compressed_rel_size_stats;
|
||||
0
|
||||
(1 row)
|
||||
|
||||
-- Create hyperstores again and check that compression size stats are
|
||||
-- Create hypercores again and check that compression size stats are
|
||||
-- updated showing compressed data
|
||||
select compress_chunk(ch, compress_using => 'hyperstore')
|
||||
select compress_chunk(ch, compress_using => 'hypercore')
|
||||
from show_chunks('test2') ch;
|
||||
compress_chunk
|
||||
-----------------------------------------
|
||||
@ -410,7 +410,7 @@ from show_chunks('test2') ch;
|
||||
_timescaledb_internal._hyper_1_11_chunk
|
||||
(6 rows)
|
||||
|
||||
select compress_chunk(ch, compress_using => 'hyperstore')
|
||||
select compress_chunk(ch, compress_using => 'hypercore')
|
||||
from show_chunks('test3') ch;
|
||||
compress_chunk
|
||||
-----------------------------------------
|
||||
@ -430,17 +430,17 @@ select
|
||||
numrows_frozen_immediately
|
||||
from compressed_rel_size_stats;
|
||||
select * from compressed_rel_size_stats order by rel;
|
||||
rel | amname | relparent | numrows_pre_compression | numrows_post_compression | numrows_frozen_immediately
|
||||
-----------------------------------------+------------+-----------+-------------------------+--------------------------+----------------------------
|
||||
_timescaledb_internal._hyper_1_1_chunk | hyperstore | test2 | 204 | 10 | 10
|
||||
_timescaledb_internal._hyper_1_3_chunk | hyperstore | test2 | 2016 | 10 | 10
|
||||
_timescaledb_internal._hyper_1_5_chunk | hyperstore | test2 | 2016 | 10 | 10
|
||||
_timescaledb_internal._hyper_1_7_chunk | hyperstore | test2 | 2016 | 10 | 10
|
||||
_timescaledb_internal._hyper_1_9_chunk | hyperstore | test2 | 2016 | 10 | 10
|
||||
_timescaledb_internal._hyper_1_11_chunk | hyperstore | test2 | 373 | 10 | 10
|
||||
_timescaledb_internal._hyper_4_13_chunk | hyperstore | test3 | 0 | 0 | 0
|
||||
_timescaledb_internal._hyper_4_17_chunk | hyperstore | test3 | 1 | 1 | 1
|
||||
_timescaledb_internal._hyper_4_18_chunk | hyperstore | test3 | 1 | 1 | 1
|
||||
rel | amname | relparent | numrows_pre_compression | numrows_post_compression | numrows_frozen_immediately
|
||||
-----------------------------------------+-----------+-----------+-------------------------+--------------------------+----------------------------
|
||||
_timescaledb_internal._hyper_1_1_chunk | hypercore | test2 | 204 | 10 | 10
|
||||
_timescaledb_internal._hyper_1_3_chunk | hypercore | test2 | 2016 | 10 | 10
|
||||
_timescaledb_internal._hyper_1_5_chunk | hypercore | test2 | 2016 | 10 | 10
|
||||
_timescaledb_internal._hyper_1_7_chunk | hypercore | test2 | 2016 | 10 | 10
|
||||
_timescaledb_internal._hyper_1_9_chunk | hypercore | test2 | 2016 | 10 | 10
|
||||
_timescaledb_internal._hyper_1_11_chunk | hypercore | test2 | 373 | 10 | 10
|
||||
_timescaledb_internal._hyper_4_13_chunk | hypercore | test3 | 0 | 0 | 0
|
||||
_timescaledb_internal._hyper_4_17_chunk | hypercore | test3 | 1 | 1 | 1
|
||||
_timescaledb_internal._hyper_4_18_chunk | hypercore | test3 | 1 | 1 | 1
|
||||
(9 rows)
|
||||
|
||||
-- Convert back to heap and compress the old way to compare
|
||||
@ -481,7 +481,7 @@ select * from compressed_rel_size_stats order by rel;
|
||||
_timescaledb_internal._hyper_4_18_chunk | heap | test3 | 1 | 1 | 1
|
||||
(9 rows)
|
||||
|
||||
-- Check that stats are the same for hyperstore and now with
|
||||
-- Check that stats are the same for hypercore and now with
|
||||
-- compression. Should return zero rows if they are the same.
|
||||
select
|
||||
rel,
|
||||
@ -496,7 +496,7 @@ select * from saved_stats;
|
||||
-----+-----------+-------------------------+--------------------------+----------------------------
|
||||
(0 rows)
|
||||
|
||||
-- Try migration to hyperstore directly from compressed heap. Run in a
|
||||
-- Try migration to hypercore directly from compressed heap. Run in a
|
||||
-- transaction block to make sure changes are visible to following
|
||||
-- commands.
|
||||
begin;
|
||||
@ -515,32 +515,32 @@ set client_min_messages=DEBUG1;
|
||||
with chunks as (
|
||||
select ch from show_chunks('test2') ch offset 1
|
||||
)
|
||||
select compress_chunk(ch, compress_using => 'hyperstore') from chunks;
|
||||
select compress_chunk(ch, compress_using => 'hypercore') from chunks;
|
||||
LOG: statement: with chunks as (
|
||||
select ch from show_chunks('test2') ch offset 1
|
||||
)
|
||||
select compress_chunk(ch, compress_using => 'hyperstore') from chunks;
|
||||
DEBUG: migrating table "_hyper_1_3_chunk" to hyperstore
|
||||
select compress_chunk(ch, compress_using => 'hypercore') from chunks;
|
||||
DEBUG: migrating table "_hyper_1_3_chunk" to hypercore
|
||||
DEBUG: building index "_hyper_1_3_chunk_test2_device_id_created_at_idx" on table "_hyper_1_3_chunk" serially
|
||||
DEBUG: index "_hyper_1_3_chunk_test2_device_id_created_at_idx" can safely use deduplication
|
||||
DEBUG: building index "_hyper_1_3_chunk_test2_created_at_idx" on table "_hyper_1_3_chunk" serially
|
||||
DEBUG: index "_hyper_1_3_chunk_test2_created_at_idx" can safely use deduplication
|
||||
DEBUG: migrating table "_hyper_1_5_chunk" to hyperstore
|
||||
DEBUG: migrating table "_hyper_1_5_chunk" to hypercore
|
||||
DEBUG: building index "_hyper_1_5_chunk_test2_device_id_created_at_idx" on table "_hyper_1_5_chunk" serially
|
||||
DEBUG: index "_hyper_1_5_chunk_test2_device_id_created_at_idx" can safely use deduplication
|
||||
DEBUG: building index "_hyper_1_5_chunk_test2_created_at_idx" on table "_hyper_1_5_chunk" serially
|
||||
DEBUG: index "_hyper_1_5_chunk_test2_created_at_idx" can safely use deduplication
|
||||
DEBUG: migrating table "_hyper_1_7_chunk" to hyperstore
|
||||
DEBUG: migrating table "_hyper_1_7_chunk" to hypercore
|
||||
DEBUG: building index "_hyper_1_7_chunk_test2_device_id_created_at_idx" on table "_hyper_1_7_chunk" serially
|
||||
DEBUG: index "_hyper_1_7_chunk_test2_device_id_created_at_idx" can safely use deduplication
|
||||
DEBUG: building index "_hyper_1_7_chunk_test2_created_at_idx" on table "_hyper_1_7_chunk" serially
|
||||
DEBUG: index "_hyper_1_7_chunk_test2_created_at_idx" can safely use deduplication
|
||||
DEBUG: migrating table "_hyper_1_9_chunk" to hyperstore
|
||||
DEBUG: migrating table "_hyper_1_9_chunk" to hypercore
|
||||
DEBUG: building index "_hyper_1_9_chunk_test2_device_id_created_at_idx" on table "_hyper_1_9_chunk" serially
|
||||
DEBUG: index "_hyper_1_9_chunk_test2_device_id_created_at_idx" can safely use deduplication
|
||||
DEBUG: building index "_hyper_1_9_chunk_test2_created_at_idx" on table "_hyper_1_9_chunk" serially
|
||||
DEBUG: index "_hyper_1_9_chunk_test2_created_at_idx" can safely use deduplication
|
||||
DEBUG: migrating table "_hyper_1_11_chunk" to hyperstore
|
||||
DEBUG: migrating table "_hyper_1_11_chunk" to hypercore
|
||||
DEBUG: building index "_hyper_1_11_chunk_test2_device_id_created_at_idx" on table "_hyper_1_11_chunk" serially
|
||||
DEBUG: index "_hyper_1_11_chunk_test2_device_id_created_at_idx" can safely use deduplication
|
||||
DEBUG: building index "_hyper_1_11_chunk_test2_created_at_idx" on table "_hyper_1_11_chunk" serially
|
||||
@ -561,9 +561,9 @@ select ch as alter_chunk from show_chunks('test2') ch limit 1 \gset
|
||||
LOG: statement: select ch as alter_chunk from show_chunks('test2') ch limit 1
|
||||
insert into :alter_chunk values ('2022-06-01 10:00', 4, 4, 4.0, 4.0);
|
||||
LOG: statement: insert into _timescaledb_internal._hyper_1_1_chunk values ('2022-06-01 10:00', 4, 4, 4.0, 4.0);
|
||||
alter table :alter_chunk set access method hyperstore;
|
||||
LOG: statement: alter table _timescaledb_internal._hyper_1_1_chunk set access method hyperstore;
|
||||
DEBUG: migrating table "_hyper_1_1_chunk" to hyperstore
|
||||
alter table :alter_chunk set access method hypercore;
|
||||
LOG: statement: alter table _timescaledb_internal._hyper_1_1_chunk set access method hypercore;
|
||||
DEBUG: migrating table "_hyper_1_1_chunk" to hypercore
|
||||
DEBUG: building index "_hyper_1_1_chunk_test2_device_id_created_at_idx" on table "_hyper_1_1_chunk" serially
|
||||
DEBUG: index "_hyper_1_1_chunk_test2_device_id_created_at_idx" can safely use deduplication
|
||||
DEBUG: building index "_hyper_1_1_chunk_test2_created_at_idx" on table "_hyper_1_1_chunk" serially
|
||||
@ -576,27 +576,27 @@ select dep.objid::regclass, am.amname
|
||||
from show_chunks('test2') ch
|
||||
join pg_depend dep on (ch = dep.objid)
|
||||
join pg_am am on (dep.refobjid = am.oid);
|
||||
objid | amname
|
||||
-----------------------------------------+------------
|
||||
_timescaledb_internal._hyper_1_1_chunk | hyperstore
|
||||
_timescaledb_internal._hyper_1_3_chunk | hyperstore
|
||||
_timescaledb_internal._hyper_1_5_chunk | hyperstore
|
||||
_timescaledb_internal._hyper_1_7_chunk | hyperstore
|
||||
_timescaledb_internal._hyper_1_9_chunk | hyperstore
|
||||
_timescaledb_internal._hyper_1_11_chunk | hyperstore
|
||||
objid | amname
|
||||
-----------------------------------------+-----------
|
||||
_timescaledb_internal._hyper_1_1_chunk | hypercore
|
||||
_timescaledb_internal._hyper_1_3_chunk | hypercore
|
||||
_timescaledb_internal._hyper_1_5_chunk | hypercore
|
||||
_timescaledb_internal._hyper_1_7_chunk | hypercore
|
||||
_timescaledb_internal._hyper_1_9_chunk | hypercore
|
||||
_timescaledb_internal._hyper_1_11_chunk | hypercore
|
||||
(6 rows)
|
||||
|
||||
-- All chunks should use hyperstore and have rel_size_stats
|
||||
-- All chunks should use hypercore and have rel_size_stats
|
||||
select * from compressed_rel_size_stats
|
||||
where amname = 'hyperstore' order by rel;
|
||||
rel | amname | relparent | numrows_pre_compression | numrows_post_compression | numrows_frozen_immediately
|
||||
-----------------------------------------+------------+-----------+-------------------------+--------------------------+----------------------------
|
||||
_timescaledb_internal._hyper_1_1_chunk | hyperstore | test2 | 204 | 10 | 10
|
||||
_timescaledb_internal._hyper_1_3_chunk | hyperstore | test2 | 2016 | 10 | 10
|
||||
_timescaledb_internal._hyper_1_5_chunk | hyperstore | test2 | 2016 | 10 | 10
|
||||
_timescaledb_internal._hyper_1_7_chunk | hyperstore | test2 | 2016 | 10 | 10
|
||||
_timescaledb_internal._hyper_1_9_chunk | hyperstore | test2 | 2016 | 10 | 10
|
||||
_timescaledb_internal._hyper_1_11_chunk | hyperstore | test2 | 373 | 10 | 10
|
||||
where amname = 'hypercore' order by rel;
|
||||
rel | amname | relparent | numrows_pre_compression | numrows_post_compression | numrows_frozen_immediately
|
||||
-----------------------------------------+-----------+-----------+-------------------------+--------------------------+----------------------------
|
||||
_timescaledb_internal._hyper_1_1_chunk | hypercore | test2 | 204 | 10 | 10
|
||||
_timescaledb_internal._hyper_1_3_chunk | hypercore | test2 | 2016 | 10 | 10
|
||||
_timescaledb_internal._hyper_1_5_chunk | hypercore | test2 | 2016 | 10 | 10
|
||||
_timescaledb_internal._hyper_1_7_chunk | hypercore | test2 | 2016 | 10 | 10
|
||||
_timescaledb_internal._hyper_1_9_chunk | hypercore | test2 | 2016 | 10 | 10
|
||||
_timescaledb_internal._hyper_1_11_chunk | hypercore | test2 | 373 | 10 | 10
|
||||
(6 rows)
|
||||
|
||||
-- Check that query plan is now ColumnarScan and that all data, except
|
||||
@ -640,33 +640,33 @@ select count(*)=(:orig_test2_count + 1) as count_as_expected from test2;
|
||||
|
||||
commit;
|
||||
\set ON_ERROR_STOP 0
|
||||
-- Trying to convert a hyperstore to a hyperstore should be an error
|
||||
-- if if_not_compressed is false and the hyperstore is fully
|
||||
-- Trying to convert a hypercore to a hypercore should be an error
|
||||
-- if if_not_compressed is false and the hypercore is fully
|
||||
-- compressed.
|
||||
select compress_chunk(ch, compress_using => 'hyperstore', if_not_compressed => false)
|
||||
select compress_chunk(ch, compress_using => 'hypercore', if_not_compressed => false)
|
||||
from show_chunks('test2') ch;
|
||||
ERROR: chunk "_hyper_1_1_chunk" is already compressed
|
||||
-- Compressing using something different than "hyperstore" or "heap"
|
||||
-- Compressing using something different than "hypercore" or "heap"
|
||||
-- should not be allowed
|
||||
select compress_chunk(ch, compress_using => 'non_existing_am')
|
||||
from show_chunks('test2') ch;
|
||||
ERROR: can only compress using "heap" or "hyperstore"
|
||||
ERROR: can only compress using "heap" or "hypercore"
|
||||
\set ON_ERROR_STOP 1
|
||||
-- Compressing from hyperstore with compress_using=>heap should lead
|
||||
-- to recompression of hyperstore with a notice.
|
||||
-- Compressing from hypercore with compress_using=>heap should lead
|
||||
-- to recompression of hypercore with a notice.
|
||||
select compress_chunk(ch, compress_using => 'heap')
|
||||
from show_chunks('test2') ch;
|
||||
NOTICE: cannot compress hyperstore "_hyper_1_1_chunk" using heap, recompressing instead
|
||||
NOTICE: cannot compress hypercore "_hyper_1_1_chunk" using heap, recompressing instead
|
||||
NOTICE: chunk "_hyper_1_1_chunk" is already compressed
|
||||
NOTICE: cannot compress hyperstore "_hyper_1_3_chunk" using heap, recompressing instead
|
||||
NOTICE: cannot compress hypercore "_hyper_1_3_chunk" using heap, recompressing instead
|
||||
NOTICE: chunk "_hyper_1_3_chunk" is already compressed
|
||||
NOTICE: cannot compress hyperstore "_hyper_1_5_chunk" using heap, recompressing instead
|
||||
NOTICE: cannot compress hypercore "_hyper_1_5_chunk" using heap, recompressing instead
|
||||
NOTICE: chunk "_hyper_1_5_chunk" is already compressed
|
||||
NOTICE: cannot compress hyperstore "_hyper_1_7_chunk" using heap, recompressing instead
|
||||
NOTICE: cannot compress hypercore "_hyper_1_7_chunk" using heap, recompressing instead
|
||||
NOTICE: chunk "_hyper_1_7_chunk" is already compressed
|
||||
NOTICE: cannot compress hyperstore "_hyper_1_9_chunk" using heap, recompressing instead
|
||||
NOTICE: cannot compress hypercore "_hyper_1_9_chunk" using heap, recompressing instead
|
||||
NOTICE: chunk "_hyper_1_9_chunk" is already compressed
|
||||
NOTICE: cannot compress hyperstore "_hyper_1_11_chunk" using heap, recompressing instead
|
||||
NOTICE: cannot compress hypercore "_hyper_1_11_chunk" using heap, recompressing instead
|
||||
NOTICE: chunk "_hyper_1_11_chunk" is already compressed
|
||||
compress_chunk
|
||||
-----------------------------------------
|
||||
@ -678,14 +678,14 @@ NOTICE: chunk "_hyper_1_11_chunk" is already compressed
|
||||
_timescaledb_internal._hyper_1_11_chunk
|
||||
(6 rows)
|
||||
|
||||
-- Compressing a hyperstore without specifying compress_using should
|
||||
-- lead to recompression. First check that :chunk is a hyperstore.
|
||||
-- Compressing a hypercore without specifying compress_using should
|
||||
-- lead to recompression. First check that :chunk is a hypercore.
|
||||
select ch as chunk from show_chunks('test2') ch limit 1 \gset
|
||||
select * from compressed_rel_size_stats
|
||||
where amname = 'hyperstore' and rel = :'chunk'::regclass;
|
||||
rel | amname | relparent | numrows_pre_compression | numrows_post_compression | numrows_frozen_immediately
|
||||
----------------------------------------+------------+-----------+-------------------------+--------------------------+----------------------------
|
||||
_timescaledb_internal._hyper_1_1_chunk | hyperstore | test2 | 204 | 10 | 10
|
||||
where amname = 'hypercore' and rel = :'chunk'::regclass;
|
||||
rel | amname | relparent | numrows_pre_compression | numrows_post_compression | numrows_frozen_immediately
|
||||
----------------------------------------+-----------+-----------+-------------------------+--------------------------+----------------------------
|
||||
_timescaledb_internal._hyper_1_1_chunk | hypercore | test2 | 204 | 10 | 10
|
||||
(1 row)
|
||||
|
||||
insert into :chunk values ('2022-06-01 10:01', 6, 6, 6.0, 6.0);
|
||||
@ -707,7 +707,7 @@ select ctid from :chunk where created_at = '2022-06-01 10:01' and device_id = 6;
|
||||
(2147484675,14)
|
||||
(1 row)
|
||||
|
||||
-- Compressing a hyperstore with compress_using=>hyperstore should
|
||||
-- Compressing a hypercore with compress_using=>hypercore should
|
||||
-- also lead to recompression
|
||||
insert into :chunk values ('2022-06-01 11:02', 7, 7, 7.0, 7.0);
|
||||
select ctid from :chunk where created_at = '2022-06-01 11:02' and device_id = 7;
|
||||
@ -716,7 +716,7 @@ select ctid from :chunk where created_at = '2022-06-01 11:02' and device_id = 7;
|
||||
(0,3)
|
||||
(1 row)
|
||||
|
||||
select compress_chunk(:'chunk', compress_using => 'hyperstore');
|
||||
select compress_chunk(:'chunk', compress_using => 'hypercore');
|
||||
compress_chunk
|
||||
----------------------------------------
|
||||
_timescaledb_internal._hyper_1_1_chunk
|
||||
@ -728,10 +728,10 @@ select ctid from :chunk where created_at = '2022-06-01 11:02' and device_id = 7;
|
||||
(2147484676,12)
|
||||
(1 row)
|
||||
|
||||
-- Convert all hyperstores back to heap
|
||||
-- Convert all hypercores back to heap
|
||||
select decompress_chunk(rel) ch
|
||||
from compressed_rel_size_stats
|
||||
where amname = 'hyperstore'
|
||||
where amname = 'hypercore'
|
||||
order by ch;
|
||||
ch
|
||||
-----------------------------------------
|
||||
@ -743,18 +743,18 @@ select decompress_chunk(rel) ch
|
||||
_timescaledb_internal._hyper_1_11_chunk
|
||||
(6 rows)
|
||||
|
||||
-- Test that it is possible to convert multiple hyperstores in the
|
||||
-- Test that it is possible to convert multiple hypercores in the
|
||||
-- same transaction. The goal is to check that all the state is
|
||||
-- cleaned up between two or more commands in same transaction.
|
||||
select ch as chunk2 from show_chunks('test2') ch offset 1 limit 1 \gset
|
||||
start transaction;
|
||||
select compress_chunk(:'chunk', compress_using => 'hyperstore');
|
||||
select compress_chunk(:'chunk', compress_using => 'hypercore');
|
||||
compress_chunk
|
||||
----------------------------------------
|
||||
_timescaledb_internal._hyper_1_1_chunk
|
||||
(1 row)
|
||||
|
||||
select compress_chunk(:'chunk2', compress_using => 'hyperstore');
|
||||
select compress_chunk(:'chunk2', compress_using => 'hypercore');
|
||||
compress_chunk
|
||||
----------------------------------------
|
||||
_timescaledb_internal._hyper_1_3_chunk
|
||||
@ -762,12 +762,12 @@ select compress_chunk(:'chunk2', compress_using => 'hyperstore');
|
||||
|
||||
commit;
|
||||
select * from compressed_rel_size_stats
|
||||
where amname = 'hyperstore' and relparent = 'test2'::regclass
|
||||
where amname = 'hypercore' and relparent = 'test2'::regclass
|
||||
order by rel;
|
||||
rel | amname | relparent | numrows_pre_compression | numrows_post_compression | numrows_frozen_immediately
|
||||
----------------------------------------+------------+-----------+-------------------------+--------------------------+----------------------------
|
||||
_timescaledb_internal._hyper_1_1_chunk | hyperstore | test2 | 207 | 10 | 10
|
||||
_timescaledb_internal._hyper_1_3_chunk | hyperstore | test2 | 2016 | 10 | 10
|
||||
rel | amname | relparent | numrows_pre_compression | numrows_post_compression | numrows_frozen_immediately
|
||||
----------------------------------------+-----------+-----------+-------------------------+--------------------------+----------------------------
|
||||
_timescaledb_internal._hyper_1_1_chunk | hypercore | test2 | 207 | 10 | 10
|
||||
_timescaledb_internal._hyper_1_3_chunk | hypercore | test2 | 2016 | 10 | 10
|
||||
(2 rows)
|
||||
|
||||
-- Test that we can compress old way using compress_using=>heap
|
||||
@ -852,7 +852,7 @@ select * from :chunk3 where created_at = '2022-06-15 16:00' and device_id = 8;
|
||||
(1 row)
|
||||
|
||||
-- Test a more complicated schema from the NYC Taxi data set. This is
|
||||
-- to test that compression using hyperstore works, since there was an
|
||||
-- to test that compression using hypercore works, since there was an
|
||||
-- issue with setting up the tuple sort state during compression.
|
||||
create table rides (
|
||||
vendor_id text,
|
||||
@ -892,7 +892,7 @@ insert into rides values
|
||||
(6,'2016-01-01 00:00:02','2016-01-01 00:11:55',1,1.20,-73.979423522949219,40.744613647460938,1,-73.992034912109375,40.753944396972656,2,9,0.5,0.5,0,0,0.3,10.3),
|
||||
(356,'2016-01-01 00:00:01','2016-01-01 00:11:55',1,1.20,-73.979423522949219,40.744613647460938,1,-73.992034912109375,40.753944396972656,2,9,0.5,0.5,0,0,0.3,10.3);
|
||||
-- Check that it is possible to compress
|
||||
select compress_chunk(ch, compress_using=>'hyperstore') from show_chunks('rides') ch;
|
||||
select compress_chunk(ch, compress_using=>'hypercore') from show_chunks('rides') ch;
|
||||
compress_chunk
|
||||
-----------------------------------------
|
||||
_timescaledb_internal._hyper_8_44_chunk
|
||||
@ -900,9 +900,9 @@ select compress_chunk(ch, compress_using=>'hyperstore') from show_chunks('rides'
|
||||
|
||||
select rel, amname from compressed_rel_size_stats
|
||||
where relparent::regclass = 'rides'::regclass;
|
||||
rel | amname
|
||||
-----------------------------------------+------------
|
||||
_timescaledb_internal._hyper_8_44_chunk | hyperstore
|
||||
rel | amname
|
||||
-----------------------------------------+-----------
|
||||
_timescaledb_internal._hyper_8_44_chunk | hypercore
|
||||
(1 row)
|
||||
|
||||
-- Query to check everything is OK
|
||||
|
@ -1,12 +1,12 @@
|
||||
-- This file and its contents are licensed under the Timescale License.
|
||||
-- Please see the included NOTICE for copyright information and
|
||||
-- LICENSE-TIMESCALE for a copy of the license.
|
||||
\ir include/setup_hyperstore.sql
|
||||
\ir include/setup_hypercore.sql
|
||||
-- This file and its contents are licensed under the Timescale License.
|
||||
-- Please see the included NOTICE for copyright information and
|
||||
-- LICENSE-TIMESCALE for a copy of the license.
|
||||
\set hypertable readings
|
||||
\ir hyperstore_helpers.sql
|
||||
\ir hypercore_helpers.sql
|
||||
-- This file and its contents are licensed under the Timescale License.
|
||||
-- Please see the included NOTICE for copyright information and
|
||||
-- LICENSE-TIMESCALE for a copy of the license.
|
||||
@ -107,7 +107,7 @@ select format('%I.%I', chunk_schema, chunk_name)::regclass as chunk2
|
||||
limit 1 offset 1 \gset
|
||||
-- To generate plans consistently.
|
||||
set max_parallel_workers_per_gather to 0;
|
||||
-- Create a function that uses a cursor to scan the the Hyperstore
|
||||
-- Create a function that uses a cursor to scan the the Hypercore
|
||||
-- table. This should work equivalent to a query on the same table.
|
||||
create function location_humidity_for(
|
||||
in p_owner integer,
|
||||
@ -139,7 +139,7 @@ begin
|
||||
end;
|
||||
$$
|
||||
language plpgsql;
|
||||
select compress_chunk(show_chunks(:'hypertable'), compress_using => 'hyperstore');
|
||||
select compress_chunk(show_chunks(:'hypertable'), compress_using => 'hypercore');
|
||||
compress_chunk
|
||||
----------------------------------------
|
||||
_timescaledb_internal._hyper_1_1_chunk
|
||||
@ -236,7 +236,7 @@ NOTICE: adding not-null constraint to column "time"
|
||||
|
||||
alter table backward_cursor set (timescaledb.compress, timescaledb.compress_segmentby='location_id', timescaledb.compress_orderby='time asc');
|
||||
insert into backward_cursor values ('2024-01-01 01:00', 1, 1.0), ('2024-01-01 02:00', 1, 2.0), ('2024-01-01 03:00', 2, 3.0), ('2024-01-01 04:00', 2, 4.0);
|
||||
select compress_chunk(ch, compress_using=>'hyperstore') from show_chunks('backward_cursor') ch;
|
||||
select compress_chunk(ch, compress_using=>'hypercore') from show_chunks('backward_cursor') ch;
|
||||
compress_chunk
|
||||
-----------------------------------------
|
||||
_timescaledb_internal._hyper_3_13_chunk
|
||||
|
@ -31,7 +31,7 @@ alter table readings
|
||||
insert into readings (time, location, device, temp, humidity, jdata)
|
||||
select t, ceil(random()*10), ceil(random()*30), random()*40, random()*100, '{"a":1,"b":2}'::jsonb
|
||||
from generate_series('2022-06-01'::timestamptz, '2022-06-04'::timestamptz, '5m') t;
|
||||
select compress_chunk(show_chunks('readings'), compress_using => 'hyperstore');
|
||||
select compress_chunk(show_chunks('readings'), compress_using => 'hypercore');
|
||||
compress_chunk
|
||||
----------------------------------------
|
||||
_timescaledb_internal._hyper_1_1_chunk
|
||||
@ -45,12 +45,12 @@ insert into readings (time, location, device, temp, humidity, jdata)
|
||||
select t, ceil(random()*10), ceil(random()*30), random()*40, random()*100, '{"a":1,"b":2}'::jsonb
|
||||
from generate_series('2022-06-01 00:01:00'::timestamptz, '2022-06-04'::timestamptz, '5m') t;
|
||||
select chunk, amname from chunk_info where hypertable = 'readings'::regclass;
|
||||
chunk | amname
|
||||
----------------------------------------+------------
|
||||
_timescaledb_internal._hyper_1_1_chunk | hyperstore
|
||||
_timescaledb_internal._hyper_1_2_chunk | hyperstore
|
||||
_timescaledb_internal._hyper_1_3_chunk | hyperstore
|
||||
_timescaledb_internal._hyper_1_4_chunk | hyperstore
|
||||
chunk | amname
|
||||
----------------------------------------+-----------
|
||||
_timescaledb_internal._hyper_1_1_chunk | hypercore
|
||||
_timescaledb_internal._hyper_1_2_chunk | hypercore
|
||||
_timescaledb_internal._hyper_1_3_chunk | hypercore
|
||||
_timescaledb_internal._hyper_1_4_chunk | hypercore
|
||||
(4 rows)
|
||||
|
||||
-- Pick a chunk to truncate that is not the first chunk. This is
|
||||
|
@ -24,9 +24,9 @@ insert into metrics values ('2024-01-01', 1, 1, 1.0), ('2024-01-01', 2, 2, 2.0),
|
||||
alter table metrics add constraint device_fk foreign key (device) references devices (id) on delete cascade;
|
||||
alter table metrics set (timescaledb.compress_segmentby = 'device');
|
||||
NOTICE: default order by for hypertable "metrics" is set to ""time" DESC"
|
||||
-- Make the one chunk a Hyperstore
|
||||
-- Make the one chunk a Hypercore
|
||||
select ch as chunk from show_chunks('metrics') ch limit 1 \gset
|
||||
alter table :chunk set access method hyperstore;
|
||||
alter table :chunk set access method hypercore;
|
||||
-- Show that all data is compressed
|
||||
select _timescaledb_debug.is_compressed_tid(ctid) as compressed, * from metrics order by time, device;
|
||||
compressed | time | device | location | temp
|
||||
|
@ -4,12 +4,12 @@
|
||||
\c :TEST_DBNAME :ROLE_SUPERUSER
|
||||
create extension pageinspect;
|
||||
set role :ROLE_DEFAULT_PERM_USER;
|
||||
\ir include/setup_hyperstore.sql
|
||||
\ir include/setup_hypercore.sql
|
||||
-- This file and its contents are licensed under the Timescale License.
|
||||
-- Please see the included NOTICE for copyright information and
|
||||
-- LICENSE-TIMESCALE for a copy of the license.
|
||||
\set hypertable readings
|
||||
\ir hyperstore_helpers.sql
|
||||
\ir hypercore_helpers.sql
|
||||
-- This file and its contents are licensed under the Timescale License.
|
||||
-- Please see the included NOTICE for copyright information and
|
||||
-- LICENSE-TIMESCALE for a copy of the license.
|
||||
@ -169,7 +169,7 @@ create index hypertable_location_id_include_humidity_idx on :hypertable (locatio
|
||||
create index hypertable_device_id_idx on :hypertable (device_id) include (humidity);
|
||||
create index hypertable_owner_idx on :hypertable (owner_id);
|
||||
create index hypertable_location_id_owner_id_idx on :hypertable (location_id, owner_id);
|
||||
-- Save index size before switching to hyperstore so that we can
|
||||
-- Save index size before switching to hypercore so that we can
|
||||
-- compare sizes after. Don't show the actual sizes because it varies
|
||||
-- slightly on different platforms.
|
||||
create table index_sizes_before as
|
||||
@ -178,11 +178,11 @@ from chunk_indexes
|
||||
where chunk::regclass = :'chunk2'::regclass
|
||||
and (attname='location_id' or attname='device_id' or attname='owner_id');
|
||||
-- Drop some segmentby indexes and recreate them after converting to
|
||||
-- hyperstore. This is to test having some created before conversion
|
||||
-- hypercore. This is to test having some created before conversion
|
||||
-- and some after.
|
||||
drop index hypertable_owner_idx;
|
||||
drop index hypertable_location_id_owner_id_idx;
|
||||
alter table :chunk2 set access method hyperstore;
|
||||
alter table :chunk2 set access method hypercore;
|
||||
-- count without indexes
|
||||
select owner_id, count(*) into owner_orig from :hypertable
|
||||
where owner_id in (3,4,5) group by owner_id;
|
||||
@ -234,7 +234,7 @@ select * from owner_orig join owner_comp using (owner_id) where owner_orig.count
|
||||
----------+-------+-------
|
||||
(0 rows)
|
||||
|
||||
-- the indexes on segmentby columns should be smaller on hyperstore,
|
||||
-- the indexes on segmentby columns should be smaller on hypercore,
|
||||
-- except for the covering index on location_id (because it also
|
||||
-- includes the non-segmentby column humidity). The device_id index
|
||||
-- should also remain the same size since it is not on a segmentby
|
||||
@ -355,7 +355,7 @@ select created_at, location_id, temp from :chunk2 where location_id=1 and temp=2
|
||||
Wed Jun 01 17:00:00 2022 PDT | 1 | 2
|
||||
(1 row)
|
||||
|
||||
select compress_chunk(show_chunks(:'hypertable'), compress_using => 'hyperstore');
|
||||
select compress_chunk(show_chunks(:'hypertable'), compress_using => 'hypercore');
|
||||
compress_chunk
|
||||
----------------------------------------
|
||||
_timescaledb_internal._hyper_1_1_chunk
|
||||
@ -575,12 +575,12 @@ $$, :'hypertable'));
|
||||
(17 rows)
|
||||
|
||||
-- We just compare the counts here, not the full content.
|
||||
select heapam.count as heapam, hyperstore.count as hyperstore
|
||||
select heapam.count as heapam, hypercore.count as hypercore
|
||||
from (select count(location_id) from :hypertable where location_id between 5 and 10) heapam,
|
||||
(select count(location_id) from :hypertable where location_id between 5 and 10) hyperstore;
|
||||
heapam | hyperstore
|
||||
--------+------------
|
||||
5126 | 5126
|
||||
(select count(location_id) from :hypertable where location_id between 5 and 10) hypercore;
|
||||
heapam | hypercore
|
||||
--------+-----------
|
||||
5126 | 5126
|
||||
(1 row)
|
||||
|
||||
drop table saved_hypertable;
|
||||
@ -765,7 +765,7 @@ $$, :'chunk1'));
|
||||
-------------------------------------
|
||||
\set VERBOSITY default
|
||||
---
|
||||
-- Test that building a UNIQUE index won't work on a hyperstore table
|
||||
-- Test that building a UNIQUE index won't work on a hypercore table
|
||||
-- that contains non-unique values.
|
||||
---
|
||||
create table non_unique_metrics (time timestamptz, temp float, device int);
|
||||
@ -780,10 +780,10 @@ DETAIL: Dimensions cannot have NULL values.
|
||||
insert into non_unique_metrics values ('2024-01-01', 1.0, 1), ('2024-01-01', 2.0, 1), ('2024-01-02', 3.0, 2);
|
||||
select ch as non_unique_chunk from show_chunks('non_unique_metrics') ch limit 1 \gset
|
||||
alter table non_unique_metrics set (timescaledb.compress_segmentby = 'device', timescaledb.compress_orderby = 'time');
|
||||
alter table :non_unique_chunk set access method hyperstore;
|
||||
alter table :non_unique_chunk set access method hypercore;
|
||||
\set ON_ERROR_STOP 0
|
||||
---
|
||||
-- UNIQUE index creation on compressed hyperstore should fail due to
|
||||
-- UNIQUE index creation on compressed hypercore should fail due to
|
||||
-- non-unique values
|
||||
---
|
||||
create unique index on non_unique_metrics (time);
|
||||
@ -985,8 +985,8 @@ select * from only_nulls_null;
|
||||
4 | Thu Jan 04 00:01:00 2024 PST | 2 | 3 | 4 |
|
||||
(4 rows)
|
||||
|
||||
-- Convert all chunks to hyperstore and run same queries
|
||||
select compress_chunk(ch, compress_using=>'hyperstore') from show_chunks('nullvalues') ch;
|
||||
-- Convert all chunks to hypercore and run same queries
|
||||
select compress_chunk(ch, compress_using=>'hypercore') from show_chunks('nullvalues') ch;
|
||||
compress_chunk
|
||||
-----------------------------------------
|
||||
_timescaledb_internal._hyper_5_15_chunk
|
||||
@ -996,15 +996,15 @@ select compress_chunk(ch, compress_using=>'hyperstore') from show_chunks('nullva
|
||||
select c.relname, a.amname FROM pg_class c
|
||||
join pg_am a on (c.relam = a.oid)
|
||||
join show_chunks('nullvalues') ch on (ch = c.oid);
|
||||
relname | amname
|
||||
-------------------+------------
|
||||
_hyper_5_15_chunk | hyperstore
|
||||
_hyper_5_16_chunk | hyperstore
|
||||
relname | amname
|
||||
-------------------+-----------
|
||||
_hyper_5_15_chunk | hypercore
|
||||
_hyper_5_16_chunk | hypercore
|
||||
(2 rows)
|
||||
|
||||
-- The explains should be index scans and there should be no rows
|
||||
-- returned if the result is the same as before when the chunks where
|
||||
-- not hyperstores.
|
||||
-- not hypercores.
|
||||
explain (costs off) select * from nullvalues where location is not null;
|
||||
QUERY PLAN
|
||||
----------------------------------------------------------------------------------------------
|
||||
|
@ -1,12 +1,12 @@
|
||||
-- This file and its contents are licensed under the Timescale License.
|
||||
-- Please see the included NOTICE for copyright information and
|
||||
-- LICENSE-TIMESCALE for a copy of the license.
|
||||
\ir include/setup_hyperstore.sql
|
||||
\ir include/setup_hypercore.sql
|
||||
-- This file and its contents are licensed under the Timescale License.
|
||||
-- Please see the included NOTICE for copyright information and
|
||||
-- LICENSE-TIMESCALE for a copy of the license.
|
||||
\set hypertable readings
|
||||
\ir hyperstore_helpers.sql
|
||||
\ir hypercore_helpers.sql
|
||||
-- This file and its contents are licensed under the Timescale License.
|
||||
-- Please see the included NOTICE for copyright information and
|
||||
-- LICENSE-TIMESCALE for a copy of the license.
|
||||
@ -172,7 +172,7 @@ $$, :'hypertable'));
|
||||
|
||||
select location_id, count(*) into orig from :hypertable
|
||||
where location_id in (3,4,5) group by location_id;
|
||||
alter table :chunk2 set access method hyperstore;
|
||||
alter table :chunk2 set access method hypercore;
|
||||
--
|
||||
-- test that indexes work after updates
|
||||
--
|
||||
@ -222,7 +222,7 @@ select created_at, location_id, temp from :chunk2 where location_id=1 and temp=2
|
||||
Wed Jun 01 17:00:00 2022 PDT | 1 | 2
|
||||
(1 row)
|
||||
|
||||
select compress_chunk(show_chunks(:'hypertable'), compress_using => 'hyperstore');
|
||||
select compress_chunk(show_chunks(:'hypertable'), compress_using => 'hypercore');
|
||||
compress_chunk
|
||||
----------------------------------------
|
||||
_timescaledb_internal._hyper_1_1_chunk
|
||||
|
@ -1,12 +1,12 @@
|
||||
-- This file and its contents are licensed under the Timescale License.
|
||||
-- Please see the included NOTICE for copyright information and
|
||||
-- LICENSE-TIMESCALE for a copy of the license.
|
||||
\ir include/setup_hyperstore.sql
|
||||
\ir include/setup_hypercore.sql
|
||||
-- This file and its contents are licensed under the Timescale License.
|
||||
-- Please see the included NOTICE for copyright information and
|
||||
-- LICENSE-TIMESCALE for a copy of the license.
|
||||
\set hypertable readings
|
||||
\ir hyperstore_helpers.sql
|
||||
\ir hypercore_helpers.sql
|
||||
-- This file and its contents are licensed under the Timescale License.
|
||||
-- Please see the included NOTICE for copyright information and
|
||||
-- LICENSE-TIMESCALE for a copy of the license.
|
||||
@ -107,7 +107,7 @@ select format('%I.%I', chunk_schema, chunk_name)::regclass as chunk2
|
||||
limit 1 offset 1 \gset
|
||||
-- Compress the chunks and check that the counts are the same
|
||||
select location_id, count(*) into orig from :hypertable GROUP BY location_id;
|
||||
select compress_chunk(show_chunks(:'hypertable'), compress_using => 'hyperstore');
|
||||
select compress_chunk(show_chunks(:'hypertable'), compress_using => 'hypercore');
|
||||
compress_chunk
|
||||
----------------------------------------
|
||||
_timescaledb_internal._hyper_1_1_chunk
|
||||
@ -301,7 +301,7 @@ order by location_id;
|
||||
(3 rows)
|
||||
|
||||
drop table :hypertable;
|
||||
-- Check that we can write to a hyperstore table from another kind of
|
||||
-- Check that we can write to a hypercore table from another kind of
|
||||
-- slot even if we have dropped and added attributes.
|
||||
create table test2 (itime integer, b bigint, t text);
|
||||
select create_hypertable('test2', by_range('itime', 10));
|
||||
@ -322,7 +322,7 @@ alter table test2 add column d int;
|
||||
-- with a second set of attributes where one is dropped.
|
||||
insert into test2 select t, 'second'::text, 120, 1 from generate_series(11, 15) t;
|
||||
alter table test2
|
||||
set access method hyperstore,
|
||||
set access method hypercore,
|
||||
set (timescaledb.compress_segmentby = '', timescaledb.compress_orderby = 'c, itime desc');
|
||||
WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes
|
||||
NOTICE: default segment by for hypertable "test2" is set to ""
|
||||
|
@ -1,12 +1,12 @@
|
||||
-- This file and its contents are licensed under the Timescale License.
|
||||
-- Please see the included NOTICE for copyright information and
|
||||
-- LICENSE-TIMESCALE for a copy of the license.
|
||||
\ir include/setup_hyperstore.sql
|
||||
\ir include/setup_hypercore.sql
|
||||
-- This file and its contents are licensed under the Timescale License.
|
||||
-- Please see the included NOTICE for copyright information and
|
||||
-- LICENSE-TIMESCALE for a copy of the license.
|
||||
\set hypertable readings
|
||||
\ir hyperstore_helpers.sql
|
||||
\ir hypercore_helpers.sql
|
||||
-- This file and its contents are licensed under the Timescale License.
|
||||
-- Please see the included NOTICE for copyright information and
|
||||
-- LICENSE-TIMESCALE for a copy of the license.
|
||||
@ -109,37 +109,37 @@ select format('%I.%I', chunk_schema, chunk_name)::regclass as chunk2
|
||||
-- test for this.
|
||||
set timescaledb.enable_columnarscan to false;
|
||||
set enable_memoize to false;
|
||||
-- Create a hyperstore with a few rows and use the big table to join
|
||||
-- with it. This should put the hyperstore as the inner relation and
|
||||
-- Create a hypercore with a few rows and use the big table to join
|
||||
-- with it. This should put the hypercore as the inner relation and
|
||||
-- trigger rescans.
|
||||
create table the_hyperstore (
|
||||
create table the_hypercore (
|
||||
updated_at timestamptz not null unique,
|
||||
device_id int,
|
||||
height float
|
||||
);
|
||||
create index on the_hyperstore (device_id);
|
||||
select from create_hypertable('the_hyperstore', 'updated_at');
|
||||
create index on the_hypercore (device_id);
|
||||
select from create_hypertable('the_hypercore', 'updated_at');
|
||||
--
|
||||
(1 row)
|
||||
|
||||
-- Fill the table with some data, but less than a single chunk, so
|
||||
-- that we will get it as an inner relation in the nested loop join.
|
||||
insert into the_hyperstore
|
||||
insert into the_hypercore
|
||||
select t, ceil(random()*5), random()*40
|
||||
from generate_series('2022-06-01'::timestamptz, '2022-06-10', '1 hour') t;
|
||||
-- Run joins before making it a hyperstore to have something to
|
||||
-- Run joins before making it a hypercore to have something to
|
||||
-- compare with.
|
||||
select * into expected_inner from :chunk1 join the_hyperstore using (device_id);
|
||||
select * into expected_inner from :chunk1 join the_hypercore using (device_id);
|
||||
select created_at, updated_at, o.device_id, i.humidity, o.height
|
||||
into expected_left
|
||||
from :chunk1 i left join the_hyperstore o
|
||||
from :chunk1 i left join the_hypercore o
|
||||
on i.created_at = o.updated_at and i.device_id = o.device_id;
|
||||
alter table the_hyperstore set (
|
||||
alter table the_hypercore set (
|
||||
timescaledb.compress,
|
||||
timescaledb.compress_segmentby = '',
|
||||
timescaledb.compress_orderby = 'updated_at desc'
|
||||
);
|
||||
select compress_chunk(show_chunks('the_hyperstore'), compress_using => 'hyperstore');
|
||||
select compress_chunk(show_chunks('the_hypercore'), compress_using => 'hypercore');
|
||||
compress_chunk
|
||||
----------------------------------------
|
||||
_timescaledb_internal._hyper_3_7_chunk
|
||||
@ -147,14 +147,14 @@ select compress_chunk(show_chunks('the_hyperstore'), compress_using => 'hypersto
|
||||
_timescaledb_internal._hyper_3_9_chunk
|
||||
(3 rows)
|
||||
|
||||
vacuum analyze the_hyperstore;
|
||||
vacuum analyze the_hypercore;
|
||||
-- Test a merge join. We explicitly set what join methods to enable
|
||||
-- and disable to avoid flaky tests.
|
||||
set enable_mergejoin to true;
|
||||
set enable_hashjoin to false;
|
||||
set enable_nestloop to false;
|
||||
\set jointype merge
|
||||
\ir include/hyperstore_join_test.sql
|
||||
\ir include/hypercore_join_test.sql
|
||||
-- This file and its contents are licensed under the Timescale License.
|
||||
-- Please see the included NOTICE for copyright information and
|
||||
-- LICENSE-TIMESCALE for a copy of the license.
|
||||
@ -162,17 +162,17 @@ set enable_nestloop to false;
|
||||
\set outer :jointype _outer_join
|
||||
-- Test inner join to make sure that it works.
|
||||
select explain_analyze_anonymize(format($$
|
||||
select * from %s join the_hyperstore using (device_id)
|
||||
select * from %s join the_hypercore using (device_id)
|
||||
$$, :'chunk1'));
|
||||
explain_analyze_anonymize
|
||||
------------------------------------------------------------------------------------------------------------------------
|
||||
explain_analyze_anonymize
|
||||
-----------------------------------------------------------------------------------------------------------------------
|
||||
Merge Join (actual rows=N loops=N)
|
||||
Merge Cond: (_hyper_I_N_chunk.device_id = _hyper_I_N_chunk.device_id)
|
||||
-> Merge Append (actual rows=N loops=N)
|
||||
Sort Key: _hyper_I_N_chunk.device_id
|
||||
-> Index Scan using _hyper_I_N_chunk_the_hyperstore_device_id_idx on _hyper_I_N_chunk (actual rows=N loops=N)
|
||||
-> Index Scan using _hyper_I_N_chunk_the_hyperstore_device_id_idx on _hyper_I_N_chunk (actual rows=N loops=N)
|
||||
-> Index Scan using _hyper_I_N_chunk_the_hyperstore_device_id_idx on _hyper_I_N_chunk (actual rows=N loops=N)
|
||||
-> Index Scan using _hyper_I_N_chunk_the_hypercore_device_id_idx on _hyper_I_N_chunk (actual rows=N loops=N)
|
||||
-> Index Scan using _hyper_I_N_chunk_the_hypercore_device_id_idx on _hyper_I_N_chunk (actual rows=N loops=N)
|
||||
-> Index Scan using _hyper_I_N_chunk_the_hypercore_device_id_idx on _hyper_I_N_chunk (actual rows=N loops=N)
|
||||
-> Index Scan using _hyper_I_N_chunk_hypertable_device_id_idx on _hyper_I_N_chunk (actual rows=N loops=N)
|
||||
Array Cache Hits: N
|
||||
Array Cache Misses: N
|
||||
@ -181,7 +181,7 @@ $$, :'chunk1'));
|
||||
(12 rows)
|
||||
|
||||
-- Check that it generates the right result
|
||||
select * into :inner from :chunk1 join the_hyperstore using (device_id);
|
||||
select * into :inner from :chunk1 join the_hypercore using (device_id);
|
||||
\x on
|
||||
select * from :inner r full join expected_inner e on row(r) = row(e)
|
||||
where r.device_id is null or e.device_id is null;
|
||||
@ -191,12 +191,12 @@ where r.device_id is null or e.device_id is null;
|
||||
-- Test outer join (left in this case) to make sure that it works.
|
||||
select explain_analyze_anonymize(format($$
|
||||
select created_at, updated_at, o.device_id, i.humidity, o.height
|
||||
from :chunk1 i left join the_hyperstore o
|
||||
from :chunk1 i left join the_hypercore o
|
||||
on i.created_at = o.updated_at and i.device_id = o.device_id;
|
||||
|
||||
select created_at, updated_at, o.device_id, i.humidity, o.height
|
||||
into :outer
|
||||
from :chunk1 i left join the_hyperstore o
|
||||
from :chunk1 i left join the_hypercore o
|
||||
on i.created_at = o.updated_at and i.device_id = o.device_id;
|
||||
$$, :'chunk1'));
|
||||
psql:include/hyperstore_join_test.sql:31: ERROR: syntax error at or near ":" at character 152
|
||||
psql:include/hypercore_join_test.sql:31: ERROR: syntax error at or near ":" at character 152
|
||||
|
@ -2,12 +2,12 @@
|
||||
-- Please see the included NOTICE for copyright information and
|
||||
-- LICENSE-TIMESCALE for a copy of the license.
|
||||
\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER;
|
||||
\ir include/setup_hyperstore.sql
|
||||
\ir include/setup_hypercore.sql
|
||||
-- This file and its contents are licensed under the Timescale License.
|
||||
-- Please see the included NOTICE for copyright information and
|
||||
-- LICENSE-TIMESCALE for a copy of the license.
|
||||
\set hypertable readings
|
||||
\ir hyperstore_helpers.sql
|
||||
\ir hypercore_helpers.sql
|
||||
-- This file and its contents are licensed under the Timescale License.
|
||||
-- Please see the included NOTICE for copyright information and
|
||||
-- LICENSE-TIMESCALE for a copy of the license.
|
||||
@ -110,8 +110,8 @@ select format('%I.%I', chunk_schema, chunk_name)::regclass as chunk2
|
||||
set enable_mergejoin to false;
|
||||
set enable_hashjoin to false;
|
||||
-- There are already tests to merge into uncompressed tables, so just
|
||||
-- compress all chunks using Hyperstore.
|
||||
select compress_chunk(show_chunks(:'hypertable'), compress_using => 'hyperstore');
|
||||
-- compress all chunks using Hypercore.
|
||||
select compress_chunk(show_chunks(:'hypertable'), compress_using => 'hypercore');
|
||||
compress_chunk
|
||||
----------------------------------------
|
||||
_timescaledb_internal._hyper_1_1_chunk
|
||||
@ -220,7 +220,7 @@ humidity | 1
|
||||
\x off
|
||||
-- Recompress all and try to insert the same rows again. This there
|
||||
-- should be no rows inserted.
|
||||
select compress_chunk(show_chunks(:'hypertable'), compress_using => 'hyperstore');
|
||||
select compress_chunk(show_chunks(:'hypertable'), compress_using => 'hypercore');
|
||||
NOTICE: chunk "_hyper_1_2_chunk" is already compressed
|
||||
NOTICE: chunk "_hyper_1_3_chunk" is already compressed
|
||||
NOTICE: chunk "_hyper_1_4_chunk" is already compressed
|
||||
|
@ -1,12 +1,12 @@
|
||||
-- This file and its contents are licensed under the Timescale License.
|
||||
-- Please see the included NOTICE for copyright information and
|
||||
-- LICENSE-TIMESCALE for a copy of the license.
|
||||
\ir include/setup_hyperstore.sql
|
||||
\ir include/setup_hypercore.sql
|
||||
-- This file and its contents are licensed under the Timescale License.
|
||||
-- Please see the included NOTICE for copyright information and
|
||||
-- LICENSE-TIMESCALE for a copy of the license.
|
||||
\set hypertable readings
|
||||
\ir hyperstore_helpers.sql
|
||||
\ir hypercore_helpers.sql
|
||||
-- This file and its contents are licensed under the Timescale License.
|
||||
-- Please see the included NOTICE for copyright information and
|
||||
-- LICENSE-TIMESCALE for a copy of the license.
|
||||
@ -111,7 +111,7 @@ set parallel_setup_cost to 0;
|
||||
-- We need to drop the index to trigger parallel plans. Otherwise they
|
||||
-- will use the index.
|
||||
drop index hypertable_device_id_idx;
|
||||
-- Show parallel plan and count on uncompressed (non-hyperstore)
|
||||
-- Show parallel plan and count on uncompressed (non-hypercore)
|
||||
-- hypertable
|
||||
set max_parallel_workers_per_gather=2;
|
||||
select explain_anonymize(format($$
|
||||
@ -154,9 +154,9 @@ select device_id, count(*) into orig from :hypertable group by device_id;
|
||||
-- Save counts over a single chunk
|
||||
select device_id, count(*) into orig_chunk from :chunk1 group by device_id;
|
||||
-----------------------
|
||||
-- Enable hyperstore --
|
||||
-- Enable hypercore --
|
||||
-----------------------
|
||||
select compress_chunk(show_chunks(:'hypertable'), compress_using => 'hyperstore');
|
||||
select compress_chunk(show_chunks(:'hypertable'), compress_using => 'hypercore');
|
||||
compress_chunk
|
||||
----------------------------------------
|
||||
_timescaledb_internal._hyper_1_1_chunk
|
||||
@ -382,7 +382,7 @@ select owner_id, count(*) from :hypertable where owner_id=1 group by owner_id;
|
||||
1 | 1729
|
||||
(1 row)
|
||||
|
||||
-- Parallel plan with hyperstore on single chunk
|
||||
-- Parallel plan with hypercore on single chunk
|
||||
select explain_anonymize(format($$
|
||||
select device_id, count(*) from %s where device_id=1 group by device_id
|
||||
$$, :'hypertable'));
|
||||
@ -453,8 +453,8 @@ select owner_id, count(*) from :chunk1 where owner_id=1 group by owner_id;
|
||||
1 | 44
|
||||
(1 row)
|
||||
|
||||
-- Compare hyperstore per-location counts with original counts without
|
||||
-- hyperstore
|
||||
-- Compare hypercore per-location counts with original counts without
|
||||
-- hypercore
|
||||
select device_id, count(*) into comp from :hypertable group by device_id;
|
||||
select * from orig join comp using (device_id) where orig.count != comp.count;
|
||||
device_id | count | count
|
||||
|
@ -32,7 +32,7 @@ set timezone to pst8pdt;
|
||||
select add_compression_policy('readings',
|
||||
compress_after => '1000 years'::interval,
|
||||
compress_using => 'foo');
|
||||
ERROR: can only compress using "heap" or "hyperstore"
|
||||
ERROR: can only compress using "heap" or "hypercore"
|
||||
\set ON_ERROR_STOP 1
|
||||
-- Check that compress_using is not part of the policy if not set. Use
|
||||
-- a large compress_after to ensure the policy doesn't do anything at
|
||||
@ -80,23 +80,23 @@ order by chunk;
|
||||
-- Check that compress_using is part of the policy config when non-NULL
|
||||
select add_compression_policy('readings',
|
||||
compress_after => '1 day'::interval,
|
||||
compress_using => 'hyperstore')
|
||||
compress_using => 'hypercore')
|
||||
as compression_job \gset
|
||||
select config from timescaledb_information.jobs where job_id = :compression_job;
|
||||
config
|
||||
-----------------------------------------------------------------------------------
|
||||
{"hypertable_id": 1, "compress_after": "@ 1 day", "compress_using": "hyperstore"}
|
||||
config
|
||||
----------------------------------------------------------------------------------
|
||||
{"hypertable_id": 1, "compress_after": "@ 1 day", "compress_using": "hypercore"}
|
||||
(1 row)
|
||||
|
||||
-- Make sure the policy runs
|
||||
call run_job(:'compression_job');
|
||||
-- After policy run all the chunks should be hyperstores
|
||||
-- After policy run all the chunks should be hypercores
|
||||
select * from chunk_info
|
||||
where hypertable = 'readings'
|
||||
order by chunk;
|
||||
hypertable | chunk | amname | is_compressed
|
||||
------------+------------------+------------+---------------
|
||||
readings | _hyper_1_1_chunk | hyperstore | t
|
||||
hypertable | chunk | amname | is_compressed
|
||||
------------+------------------+-----------+---------------
|
||||
readings | _hyper_1_1_chunk | hypercore | t
|
||||
(1 row)
|
||||
|
||||
select remove_compression_policy('readings');
|
||||
@ -116,8 +116,8 @@ where time = '2022-06-01 10:14' and device = 1;
|
||||
f | Wed Jun 01 10:14:00 2022 PDT | 1 | 1
|
||||
(1 row)
|
||||
|
||||
-- Add a new policy that doesn't specify hyperstore. It should still
|
||||
-- recompress hyperstores.
|
||||
-- Add a new policy that doesn't specify hypercore. It should still
|
||||
-- recompress hypercores.
|
||||
select add_compression_policy('readings',
|
||||
compress_after => '1 day'::interval,
|
||||
compress_using => 'heap')
|
||||
@ -157,31 +157,31 @@ select remove_compression_policy('readings');
|
||||
t
|
||||
(1 row)
|
||||
|
||||
-- Insert one value into existing hyperstore, also create a new non-hyperstore chunk
|
||||
-- Insert one value into existing hypercore, also create a new non-hypercore chunk
|
||||
insert into readings values ('2022-06-01 10:14', 1, 1.0), ('2022-07-01 10:14', 2, 2.0);
|
||||
-- The new chunk should be heap and not compressed
|
||||
select * from chunk_info
|
||||
where hypertable = 'readings'
|
||||
order by chunk;
|
||||
hypertable | chunk | amname | is_compressed
|
||||
------------+------------------+------------+---------------
|
||||
readings | _hyper_1_1_chunk | hyperstore | t
|
||||
readings | _hyper_1_3_chunk | heap | f
|
||||
hypertable | chunk | amname | is_compressed
|
||||
------------+------------------+-----------+---------------
|
||||
readings | _hyper_1_1_chunk | hypercore | t
|
||||
readings | _hyper_1_3_chunk | heap | f
|
||||
(2 rows)
|
||||
|
||||
select add_compression_policy('readings',
|
||||
compress_after => '1 day'::interval)
|
||||
as compression_job \gset
|
||||
-- Run the policy job to recompress hyperstores and compress the new
|
||||
-- chunk using non-hyperstore compression
|
||||
-- Run the policy job to recompress hypercores and compress the new
|
||||
-- chunk using non-hypercore compression
|
||||
call run_job(:'compression_job');
|
||||
select * from chunk_info
|
||||
where hypertable = 'readings'
|
||||
order by chunk;
|
||||
hypertable | chunk | amname | is_compressed
|
||||
------------+------------------+------------+---------------
|
||||
readings | _hyper_1_1_chunk | hyperstore | t
|
||||
readings | _hyper_1_3_chunk | heap | t
|
||||
hypertable | chunk | amname | is_compressed
|
||||
------------+------------------+-----------+---------------
|
||||
readings | _hyper_1_1_chunk | hypercore | t
|
||||
readings | _hyper_1_3_chunk | heap | t
|
||||
(2 rows)
|
||||
|
||||
select remove_compression_policy('readings');
|
||||
@ -203,7 +203,7 @@ select timescaledb_experimental.add_policies('daily',
|
||||
refresh_start_offset => '8 days'::interval,
|
||||
refresh_end_offset => '1 day'::interval,
|
||||
compress_after => '9 days'::interval,
|
||||
compress_using => 'hyperstore');
|
||||
compress_using => 'hypercore');
|
||||
add_policies
|
||||
--------------
|
||||
t
|
||||
@ -226,8 +226,8 @@ call run_job(:'cagg_compression_job');
|
||||
select * from chunk_info
|
||||
where hypertable = :'mathyper'
|
||||
order by chunk;
|
||||
hypertable | chunk | amname | is_compressed
|
||||
----------------------------+------------------+------------+---------------
|
||||
_materialized_hypertable_3 | _hyper_3_5_chunk | hyperstore | t
|
||||
hypertable | chunk | amname | is_compressed
|
||||
----------------------------+------------------+-----------+---------------
|
||||
_materialized_hypertable_3 | _hyper_3_5_chunk | hypercore | t
|
||||
(1 row)
|
||||
|
||||
|
@ -36,7 +36,7 @@ select format('%I.%I', chunk_schema, chunk_name)::regclass as chunk
|
||||
from timescaledb_information.chunks
|
||||
where format('%I.%I', hypertable_schema, hypertable_name)::regclass = 'readings'::regclass
|
||||
limit 1 \gset
|
||||
alter table :chunk set access method hyperstore;
|
||||
alter table :chunk set access method hypercore;
|
||||
--
|
||||
-- Check that TID scan works for both compressed and non-compressed
|
||||
-- rows.
|
||||
|
@ -1,12 +1,12 @@
|
||||
-- This file and its contents are licensed under the Timescale License.
|
||||
-- Please see the included NOTICE for copyright information and
|
||||
-- LICENSE-TIMESCALE for a copy of the license.
|
||||
\ir include/setup_hyperstore.sql
|
||||
\ir include/setup_hypercore.sql
|
||||
-- This file and its contents are licensed under the Timescale License.
|
||||
-- Please see the included NOTICE for copyright information and
|
||||
-- LICENSE-TIMESCALE for a copy of the license.
|
||||
\set hypertable readings
|
||||
\ir hyperstore_helpers.sql
|
||||
\ir hypercore_helpers.sql
|
||||
-- This file and its contents are licensed under the Timescale License.
|
||||
-- Please see the included NOTICE for copyright information and
|
||||
-- LICENSE-TIMESCALE for a copy of the license.
|
||||
@ -173,13 +173,13 @@ select * from normaltable where location_id = 1;
|
||||
Rows Removed by Filter: 185
|
||||
(3 rows)
|
||||
|
||||
-- Changing to hyperstore will update relstats since it process all
|
||||
-- Changing to hypercore will update relstats since it process all
|
||||
-- the data
|
||||
alter table :chunk1 set access method hyperstore;
|
||||
alter table :chunk1 set access method hypercore;
|
||||
-- Creating an index on normaltable will also update relstats
|
||||
create index normaltable_location_id_idx on normaltable (location_id);
|
||||
-- Relstats should be the same for both tables, except for pages since
|
||||
-- a hyperstore is compressed. Column stats is not updated.
|
||||
-- a hypercore is compressed. Column stats is not updated.
|
||||
select * from relstats_compare;
|
||||
relid | reltuples
|
||||
----------------------------------------+-----------
|
||||
@ -315,7 +315,7 @@ select * from attrstats_same;
|
||||
|
||||
-- ANALYZE also via hypertable root and show that it will recurse to
|
||||
-- chunks. Make sure the chunk also has partially compressed data
|
||||
alter table :chunk2 set access method hyperstore;
|
||||
alter table :chunk2 set access method hypercore;
|
||||
update :hypertable set device_id = 2 where device_id = 1;
|
||||
select * from relstats where relid = :'chunk2'::regclass;
|
||||
relid | reltuples
|
||||
|
@ -1,7 +1,7 @@
|
||||
-- This file and its contents are licensed under the Timescale License.
|
||||
-- Please see the included NOTICE for copyright information and
|
||||
-- LICENSE-TIMESCALE for a copy of the license.
|
||||
\ir include/hyperstore_helpers.sql
|
||||
\ir include/hypercore_helpers.sql
|
||||
-- This file and its contents are licensed under the Timescale License.
|
||||
-- Please see the included NOTICE for copyright information and
|
||||
-- LICENSE-TIMESCALE for a copy of the license.
|
||||
@ -65,7 +65,7 @@ select setseed(1);
|
||||
\set the_generator ceil(random()*10)
|
||||
\set the_aggregate sum(value)
|
||||
\set the_clause value > 0.5
|
||||
\ir include/hyperstore_type_table.sql
|
||||
\ir include/hypercore_type_table.sql
|
||||
-- This file and its contents are licensed under the Timescale License.
|
||||
-- Please see the included NOTICE for copyright information and
|
||||
-- LICENSE-TIMESCALE for a copy of the license.
|
||||
@ -93,11 +93,11 @@ select setseed(1);
|
||||
insert into test_float(created_at, value)
|
||||
select t, ceil(random()*10)
|
||||
from generate_series('2022-06-01'::timestamp, '2022-06-10', '1 minute') t;
|
||||
-- Save away the table so that we can make sure that a hyperstore
|
||||
-- Save away the table so that we can make sure that a hypercore
|
||||
-- table and a heap table produce the same result.
|
||||
create table :saved_table as select * from :the_table;
|
||||
-- Compress the rows in the hyperstore.
|
||||
select compress_chunk(show_chunks(:'the_table'), compress_using => 'hyperstore');
|
||||
-- Compress the rows in the hypercore.
|
||||
select compress_chunk(show_chunks(:'the_table'), compress_using => 'hypercore');
|
||||
compress_chunk
|
||||
----------------------------------------
|
||||
_timescaledb_internal._hyper_1_1_chunk
|
||||
@ -106,7 +106,7 @@ select compress_chunk(show_chunks(:'the_table'), compress_using => 'hyperstore')
|
||||
(3 rows)
|
||||
|
||||
-- This part of the include file will run a query with the aggregate
|
||||
-- provided by the including file and test that using a hyperstore
|
||||
-- provided by the including file and test that using a hypercore
|
||||
-- with compressed rows and a normal table produces the same result
|
||||
-- for the query with the given aggregate.
|
||||
\set ECHO queries
|
||||
@ -137,7 +137,7 @@ drop table test_float_saved;
|
||||
\set the_generator ceil(random()*10)
|
||||
\set the_aggregate sum(value)
|
||||
\set the_clause value > 0.5
|
||||
\ir include/hyperstore_type_table.sql
|
||||
\ir include/hypercore_type_table.sql
|
||||
-- This file and its contents are licensed under the Timescale License.
|
||||
-- Please see the included NOTICE for copyright information and
|
||||
-- LICENSE-TIMESCALE for a copy of the license.
|
||||
@ -165,11 +165,11 @@ select setseed(1);
|
||||
insert into test_numeric(created_at, value)
|
||||
select t, ceil(random()*10)
|
||||
from generate_series('2022-06-01'::timestamp, '2022-06-10', '1 minute') t;
|
||||
-- Save away the table so that we can make sure that a hyperstore
|
||||
-- Save away the table so that we can make sure that a hypercore
|
||||
-- table and a heap table produce the same result.
|
||||
create table :saved_table as select * from :the_table;
|
||||
-- Compress the rows in the hyperstore.
|
||||
select compress_chunk(show_chunks(:'the_table'), compress_using => 'hyperstore');
|
||||
-- Compress the rows in the hypercore.
|
||||
select compress_chunk(show_chunks(:'the_table'), compress_using => 'hypercore');
|
||||
compress_chunk
|
||||
----------------------------------------
|
||||
_timescaledb_internal._hyper_3_7_chunk
|
||||
@ -178,7 +178,7 @@ select compress_chunk(show_chunks(:'the_table'), compress_using => 'hyperstore')
|
||||
(3 rows)
|
||||
|
||||
-- This part of the include file will run a query with the aggregate
|
||||
-- provided by the including file and test that using a hyperstore
|
||||
-- provided by the including file and test that using a hypercore
|
||||
-- with compressed rows and a normal table produces the same result
|
||||
-- for the query with the given aggregate.
|
||||
\set ECHO queries
|
||||
@ -208,7 +208,7 @@ drop table test_numeric_saved;
|
||||
\set the_generator (random() > 0.5)
|
||||
\set the_aggregate count(value)
|
||||
\set the_clause value = true
|
||||
\ir include/hyperstore_type_table.sql
|
||||
\ir include/hypercore_type_table.sql
|
||||
-- This file and its contents are licensed under the Timescale License.
|
||||
-- Please see the included NOTICE for copyright information and
|
||||
-- LICENSE-TIMESCALE for a copy of the license.
|
||||
@ -236,11 +236,11 @@ select setseed(1);
|
||||
insert into test_bool(created_at, value)
|
||||
select t, (random()>0.5)
|
||||
from generate_series('2022-06-01'::timestamp, '2022-06-10', '1 minute') t;
|
||||
-- Save away the table so that we can make sure that a hyperstore
|
||||
-- Save away the table so that we can make sure that a hypercore
|
||||
-- table and a heap table produce the same result.
|
||||
create table :saved_table as select * from :the_table;
|
||||
-- Compress the rows in the hyperstore.
|
||||
select compress_chunk(show_chunks(:'the_table'), compress_using => 'hyperstore');
|
||||
-- Compress the rows in the hypercore.
|
||||
select compress_chunk(show_chunks(:'the_table'), compress_using => 'hypercore');
|
||||
compress_chunk
|
||||
-----------------------------------------
|
||||
_timescaledb_internal._hyper_5_13_chunk
|
||||
@ -249,7 +249,7 @@ select compress_chunk(show_chunks(:'the_table'), compress_using => 'hyperstore')
|
||||
(3 rows)
|
||||
|
||||
-- This part of the include file will run a query with the aggregate
|
||||
-- provided by the including file and test that using a hyperstore
|
||||
-- provided by the including file and test that using a hypercore
|
||||
-- with compressed rows and a normal table produces the same result
|
||||
-- for the query with the given aggregate.
|
||||
\set ECHO queries
|
||||
@ -281,7 +281,7 @@ drop table test_bool_saved;
|
||||
\set the_generator gen_random_uuid()::text
|
||||
\set the_aggregate count(*)
|
||||
\set the_clause value = :'my_uuid'
|
||||
\ir include/hyperstore_type_table.sql
|
||||
\ir include/hypercore_type_table.sql
|
||||
-- This file and its contents are licensed under the Timescale License.
|
||||
-- Please see the included NOTICE for copyright information and
|
||||
-- LICENSE-TIMESCALE for a copy of the license.
|
||||
@ -309,11 +309,11 @@ select setseed(1);
|
||||
insert into test_text(created_at, value)
|
||||
select t, gen_random_uuid()::text
|
||||
from generate_series('2022-06-01'::timestamp, '2022-06-10', '1 minute') t;
|
||||
-- Save away the table so that we can make sure that a hyperstore
|
||||
-- Save away the table so that we can make sure that a hypercore
|
||||
-- table and a heap table produce the same result.
|
||||
create table :saved_table as select * from :the_table;
|
||||
-- Compress the rows in the hyperstore.
|
||||
select compress_chunk(show_chunks(:'the_table'), compress_using => 'hyperstore');
|
||||
-- Compress the rows in the hypercore.
|
||||
select compress_chunk(show_chunks(:'the_table'), compress_using => 'hypercore');
|
||||
compress_chunk
|
||||
-----------------------------------------
|
||||
_timescaledb_internal._hyper_7_19_chunk
|
||||
@ -322,7 +322,7 @@ select compress_chunk(show_chunks(:'the_table'), compress_using => 'hyperstore')
|
||||
(3 rows)
|
||||
|
||||
-- This part of the include file will run a query with the aggregate
|
||||
-- provided by the including file and test that using a hyperstore
|
||||
-- provided by the including file and test that using a hypercore
|
||||
-- with compressed rows and a normal table produces the same result
|
||||
-- for the query with the given aggregate.
|
||||
\set ECHO queries
|
||||
@ -354,7 +354,7 @@ drop table test_text_saved;
|
||||
\set the_generator jsonb_build_object(:'a_name',round(random()*100))
|
||||
\set the_aggregate sum((value->:'a_name')::int)
|
||||
\set the_clause true
|
||||
\ir include/hyperstore_type_table.sql
|
||||
\ir include/hypercore_type_table.sql
|
||||
-- This file and its contents are licensed under the Timescale License.
|
||||
-- Please see the included NOTICE for copyright information and
|
||||
-- LICENSE-TIMESCALE for a copy of the license.
|
||||
@ -382,11 +382,11 @@ select setseed(1);
|
||||
insert into test_jsonb(created_at, value)
|
||||
select t, jsonb_build_object('temp',round(random()*100))
|
||||
from generate_series('2022-06-01'::timestamp, '2022-06-10', '1 minute') t;
|
||||
-- Save away the table so that we can make sure that a hyperstore
|
||||
-- Save away the table so that we can make sure that a hypercore
|
||||
-- table and a heap table produce the same result.
|
||||
create table :saved_table as select * from :the_table;
|
||||
-- Compress the rows in the hyperstore.
|
||||
select compress_chunk(show_chunks(:'the_table'), compress_using => 'hyperstore');
|
||||
-- Compress the rows in the hypercore.
|
||||
select compress_chunk(show_chunks(:'the_table'), compress_using => 'hypercore');
|
||||
compress_chunk
|
||||
-----------------------------------------
|
||||
_timescaledb_internal._hyper_9_25_chunk
|
||||
@ -395,7 +395,7 @@ select compress_chunk(show_chunks(:'the_table'), compress_using => 'hyperstore')
|
||||
(3 rows)
|
||||
|
||||
-- This part of the include file will run a query with the aggregate
|
||||
-- provided by the including file and test that using a hyperstore
|
||||
-- provided by the including file and test that using a hypercore
|
||||
-- with compressed rows and a normal table produces the same result
|
||||
-- for the query with the given aggregate.
|
||||
\set ECHO queries
|
||||
@ -428,7 +428,7 @@ drop table test_jsonb_saved;
|
||||
\set the_generator jsonb_build_object(:'a_name',round(random()*100))
|
||||
\set the_aggregate sum((value->:'a_name')::int)
|
||||
\set the_clause ((value->:'a_name')::numeric >= 0.5) and ((value->:'a_name')::numeric <= 0.6)
|
||||
\ir include/hyperstore_type_table.sql
|
||||
\ir include/hypercore_type_table.sql
|
||||
-- This file and its contents are licensed under the Timescale License.
|
||||
-- Please see the included NOTICE for copyright information and
|
||||
-- LICENSE-TIMESCALE for a copy of the license.
|
||||
@ -456,11 +456,11 @@ select setseed(1);
|
||||
insert into test_jsonb(created_at, value)
|
||||
select t, jsonb_build_object('temp',round(random()*100))
|
||||
from generate_series('2022-06-01'::timestamp, '2022-06-10', '1 minute') t;
|
||||
-- Save away the table so that we can make sure that a hyperstore
|
||||
-- Save away the table so that we can make sure that a hypercore
|
||||
-- table and a heap table produce the same result.
|
||||
create table :saved_table as select * from :the_table;
|
||||
-- Compress the rows in the hyperstore.
|
||||
select compress_chunk(show_chunks(:'the_table'), compress_using => 'hyperstore');
|
||||
-- Compress the rows in the hypercore.
|
||||
select compress_chunk(show_chunks(:'the_table'), compress_using => 'hypercore');
|
||||
compress_chunk
|
||||
------------------------------------------
|
||||
_timescaledb_internal._hyper_11_31_chunk
|
||||
@ -469,7 +469,7 @@ select compress_chunk(show_chunks(:'the_table'), compress_using => 'hyperstore')
|
||||
(3 rows)
|
||||
|
||||
-- This part of the include file will run a query with the aggregate
|
||||
-- provided by the including file and test that using a hyperstore
|
||||
-- provided by the including file and test that using a hypercore
|
||||
-- with compressed rows and a normal table produces the same result
|
||||
-- for the query with the given aggregate.
|
||||
\set ECHO queries
|
||||
@ -502,7 +502,7 @@ drop table test_jsonb_saved;
|
||||
\set the_generator gen_random_uuid()::name
|
||||
\set the_aggregate count(*)
|
||||
\set the_clause value = :'my_uuid'
|
||||
\ir include/hyperstore_type_table.sql
|
||||
\ir include/hypercore_type_table.sql
|
||||
-- This file and its contents are licensed under the Timescale License.
|
||||
-- Please see the included NOTICE for copyright information and
|
||||
-- LICENSE-TIMESCALE for a copy of the license.
|
||||
@ -530,11 +530,11 @@ select setseed(1);
|
||||
insert into test_name(created_at, value)
|
||||
select t, gen_random_uuid()::name
|
||||
from generate_series('2022-06-01'::timestamp, '2022-06-10', '1 minute') t;
|
||||
-- Save away the table so that we can make sure that a hyperstore
|
||||
-- Save away the table so that we can make sure that a hypercore
|
||||
-- table and a heap table produce the same result.
|
||||
create table :saved_table as select * from :the_table;
|
||||
-- Compress the rows in the hyperstore.
|
||||
select compress_chunk(show_chunks(:'the_table'), compress_using => 'hyperstore');
|
||||
-- Compress the rows in the hypercore.
|
||||
select compress_chunk(show_chunks(:'the_table'), compress_using => 'hypercore');
|
||||
compress_chunk
|
||||
------------------------------------------
|
||||
_timescaledb_internal._hyper_13_37_chunk
|
||||
@ -543,7 +543,7 @@ select compress_chunk(show_chunks(:'the_table'), compress_using => 'hyperstore')
|
||||
(3 rows)
|
||||
|
||||
-- This part of the include file will run a query with the aggregate
|
||||
-- provided by the including file and test that using a hyperstore
|
||||
-- provided by the including file and test that using a hypercore
|
||||
-- with compressed rows and a normal table produces the same result
|
||||
-- for the query with the given aggregate.
|
||||
\set ECHO queries
|
||||
|
@ -2,12 +2,12 @@
|
||||
-- Please see the included NOTICE for copyright information and
|
||||
-- LICENSE-TIMESCALE for a copy of the license.
|
||||
\c :TEST_DBNAME :ROLE_SUPERUSER
|
||||
\ir include/setup_hyperstore.sql
|
||||
\ir include/setup_hypercore.sql
|
||||
-- This file and its contents are licensed under the Timescale License.
|
||||
-- Please see the included NOTICE for copyright information and
|
||||
-- LICENSE-TIMESCALE for a copy of the license.
|
||||
\set hypertable readings
|
||||
\ir hyperstore_helpers.sql
|
||||
\ir hypercore_helpers.sql
|
||||
-- This file and its contents are licensed under the Timescale License.
|
||||
-- Please see the included NOTICE for copyright information and
|
||||
-- LICENSE-TIMESCALE for a copy of the license.
|
||||
@ -108,7 +108,7 @@ select format('%I.%I', chunk_schema, chunk_name)::regclass as chunk2
|
||||
limit 1 offset 1 \gset
|
||||
-- TODO(#1068) Parallel sequence scan does not work
|
||||
set max_parallel_workers_per_gather to 0;
|
||||
select compress_chunk(show_chunks(:'hypertable'), compress_using => 'hyperstore');
|
||||
select compress_chunk(show_chunks(:'hypertable'), compress_using => 'hypercore');
|
||||
compress_chunk
|
||||
----------------------------------------
|
||||
_timescaledb_internal._hyper_1_1_chunk
|
||||
@ -135,14 +135,14 @@ select relname, amname
|
||||
from pg_class join pg_am on (relam = pg_am.oid)
|
||||
where pg_class.oid in (select show_chunks(:'hypertable'))
|
||||
order by relname;
|
||||
relname | amname
|
||||
------------------+------------
|
||||
_hyper_1_1_chunk | hyperstore
|
||||
_hyper_1_2_chunk | hyperstore
|
||||
_hyper_1_3_chunk | hyperstore
|
||||
_hyper_1_4_chunk | hyperstore
|
||||
_hyper_1_5_chunk | hyperstore
|
||||
_hyper_1_6_chunk | hyperstore
|
||||
relname | amname
|
||||
------------------+-----------
|
||||
_hyper_1_1_chunk | hypercore
|
||||
_hyper_1_2_chunk | hypercore
|
||||
_hyper_1_3_chunk | hypercore
|
||||
_hyper_1_4_chunk | hypercore
|
||||
_hyper_1_5_chunk | hypercore
|
||||
_hyper_1_6_chunk | hypercore
|
||||
(6 rows)
|
||||
|
||||
-- Pick a random row to update
|
||||
@ -291,7 +291,7 @@ select * from :hypertable where humidity = 200.0 order by metric_id;
|
||||
commit;
|
||||
-- Test update of a segment-by column. The selection is to make sure
|
||||
-- that we have a mix of compressed and uncompressed tuples.
|
||||
select compress_chunk(show_chunks(:'hypertable'), compress_using => 'hyperstore');
|
||||
select compress_chunk(show_chunks(:'hypertable'), compress_using => 'hypercore');
|
||||
compress_chunk
|
||||
----------------------------------------
|
||||
_timescaledb_internal._hyper_1_1_chunk
|
||||
@ -343,7 +343,7 @@ order by metric_id;
|
||||
(11 rows)
|
||||
|
||||
-- Compress all chunks again before testing RETURNING
|
||||
select compress_chunk(show_chunks(:'hypertable'), compress_using => 'hyperstore');
|
||||
select compress_chunk(show_chunks(:'hypertable'), compress_using => 'hypercore');
|
||||
compress_chunk
|
||||
----------------------------------------
|
||||
_timescaledb_internal._hyper_1_1_chunk
|
||||
@ -395,7 +395,7 @@ returning _timescaledb_debug.is_compressed_tid(ctid), *;
|
||||
|
||||
-- Test update of a segment-by column directly on the chunk. This
|
||||
-- should fail for compressed rows even for segment-by columns.
|
||||
select compress_chunk(:'chunk1', compress_using => 'hyperstore');
|
||||
select compress_chunk(:'chunk1', compress_using => 'hypercore');
|
||||
compress_chunk
|
||||
----------------------------------------
|
||||
_timescaledb_internal._hyper_1_1_chunk
|
||||
|
@ -21,7 +21,7 @@ select format('%I.%I', reg_chunk.schema_name, reg_chunk.table_name)::regclass as
|
||||
from _timescaledb_catalog.chunk cpr_chunk
|
||||
inner join reg_chunk on (cpr_chunk.id = reg_chunk.compressed_chunk_id);
|
||||
-- Create two hypertables with same config and data, apart from one
|
||||
-- having a hyperstore chunk (hystable). The regular table (regtable)
|
||||
-- having a hypercore chunk (hystable). The regular table (regtable)
|
||||
-- will be used as a reference.
|
||||
create table hystable(time timestamptz, location bigint, device smallint, temp float4);
|
||||
create table regtable(time timestamptz, location bigint, device smallint, temp float4);
|
||||
@ -45,9 +45,9 @@ values ('2022-06-01 00:01', 1, 1, 1.0),
|
||||
('2022-06-01 00:03', 1, 3, 3.0),
|
||||
('2022-06-01 00:04', 2, 3, 4.0);
|
||||
insert into hystable select * from regtable;
|
||||
-- Make sure new chunks are hyperstore from the start, except
|
||||
-- Make sure new chunks are hypercore from the start, except
|
||||
-- obviously for the chunk that was already created.
|
||||
alter table hystable set access method hyperstore, set (
|
||||
alter table hystable set access method hypercore, set (
|
||||
timescaledb.compress_orderby = 'time',
|
||||
timescaledb.compress_segmentby = 'location'
|
||||
);
|
||||
@ -97,7 +97,7 @@ select indexrelid::regclass as hystable_location_chunk_idx
|
||||
from pg_index i inner join pg_class c on (i.indexrelid=c.oid)
|
||||
where indrelid = :'hystable_chunk'::regclass
|
||||
and relname like '%hystable_location%' \gset
|
||||
alter table :hystable_chunk set access method hyperstore;
|
||||
alter table :hystable_chunk set access method hypercore;
|
||||
-- Show new access method on chunk
|
||||
select ch chunk, amname access_method
|
||||
from show_chunks('hystable') ch
|
||||
@ -105,7 +105,7 @@ inner join pg_class cl on (cl.oid = ch)
|
||||
inner join pg_am am on (cl.relam = am.oid);
|
||||
chunk | access_method
|
||||
----------------------------------------+---------------
|
||||
_timescaledb_internal._hyper_1_2_chunk | hyperstore
|
||||
_timescaledb_internal._hyper_1_2_chunk | hypercore
|
||||
(1 row)
|
||||
|
||||
-- Reset to superuser in order to run bt_page_items()
|
||||
@ -273,30 +273,30 @@ insert into regtable (time, location, device, temp)
|
||||
select t, ceil(random()*10), ceil(random()*30), random()*40
|
||||
from generate_series('2022-06-01'::timestamptz, '2022-06-10', '60s') t;
|
||||
insert into hystable select * from regtable;
|
||||
-- All new chunks should be hyperstores since we configured hyperstore
|
||||
-- All new chunks should be hypercores since we configured hypercore
|
||||
-- as default hypertable AM
|
||||
select ch, amname
|
||||
from show_chunks('hystable') ch
|
||||
inner join pg_class cl on (cl.oid = ch)
|
||||
inner join pg_am am on (cl.relam = am.oid);
|
||||
ch | amname
|
||||
----------------------------------------+------------
|
||||
_timescaledb_internal._hyper_1_2_chunk | hyperstore
|
||||
_timescaledb_internal._hyper_1_6_chunk | hyperstore
|
||||
_timescaledb_internal._hyper_1_8_chunk | hyperstore
|
||||
ch | amname
|
||||
----------------------------------------+-----------
|
||||
_timescaledb_internal._hyper_1_2_chunk | hypercore
|
||||
_timescaledb_internal._hyper_1_6_chunk | hypercore
|
||||
_timescaledb_internal._hyper_1_8_chunk | hypercore
|
||||
(3 rows)
|
||||
|
||||
-- All (new) compressed chunks should have a hsproxy index
|
||||
-- All (new) compressed chunks should have a hypercore_proxy index
|
||||
select indexrelid::regclass
|
||||
from pg_index i inner join
|
||||
compressed_rels crels on (i.indrelid = crels.compressed_relid);
|
||||
indexrelid
|
||||
---------------------------------------------------------------------------------------
|
||||
_timescaledb_internal.compress_hyper_3_3_chunk_ts_hsproxy_idx
|
||||
_timescaledb_internal.compress_hyper_3_3_chunk_ts_hypercore_proxy_idx
|
||||
_timescaledb_internal.compress_hyper_3_3_chunk_location__ts_meta_min_1__ts_meta_m_idx
|
||||
_timescaledb_internal.compress_hyper_3_7_chunk_ts_hsproxy_idx
|
||||
_timescaledb_internal.compress_hyper_3_7_chunk_ts_hypercore_proxy_idx
|
||||
_timescaledb_internal.compress_hyper_3_7_chunk_location__ts_meta_min_1__ts_meta_m_idx
|
||||
_timescaledb_internal.compress_hyper_3_9_chunk_ts_hsproxy_idx
|
||||
_timescaledb_internal.compress_hyper_3_9_chunk_ts_hypercore_proxy_idx
|
||||
_timescaledb_internal.compress_hyper_3_9_chunk_location__ts_meta_min_1__ts_meta_m_idx
|
||||
(6 rows)
|
||||
|
||||
@ -388,7 +388,7 @@ NOTICE: adding not-null constraint to column "time"
|
||||
(1 row)
|
||||
|
||||
-- This time create the table without a segmentby column
|
||||
alter table hystable set access method hyperstore, set (
|
||||
alter table hystable set access method hypercore, set (
|
||||
timescaledb.compress_orderby = 'time'
|
||||
);
|
||||
WARNING: there was some uncertainty picking the default segment by for the hypertable: You do not have any indexes on columns that can be used for segment_by and thus we are not using segment_by for compression. Please make sure you are not missing any indexes
|
||||
@ -398,27 +398,27 @@ NOTICE: default segment by for hypertable "hystable" is set to ""
|
||||
-- vacuum on empty table
|
||||
vacuum (index_cleanup on) hystable;
|
||||
insert into hystable select * from regtable;
|
||||
-- All chunks should be hyperstores
|
||||
-- All chunks should be hypercores
|
||||
select ch, amname
|
||||
from show_chunks('hystable') ch
|
||||
inner join pg_class cl on (cl.oid = ch)
|
||||
inner join pg_am am on (cl.relam = am.oid);
|
||||
ch | amname
|
||||
-----------------------------------------+------------
|
||||
_timescaledb_internal._hyper_4_10_chunk | hyperstore
|
||||
_timescaledb_internal._hyper_4_11_chunk | hyperstore
|
||||
_timescaledb_internal._hyper_4_12_chunk | hyperstore
|
||||
ch | amname
|
||||
-----------------------------------------+-----------
|
||||
_timescaledb_internal._hyper_4_10_chunk | hypercore
|
||||
_timescaledb_internal._hyper_4_11_chunk | hypercore
|
||||
_timescaledb_internal._hyper_4_12_chunk | hypercore
|
||||
(3 rows)
|
||||
|
||||
-- All compressed chunks should have a hsproxy index
|
||||
-- All compressed chunks should have a hypercore_proxy index
|
||||
select indexrelid::regclass
|
||||
from pg_index i inner join
|
||||
compressed_rels crels on (i.indrelid = crels.compressed_relid);
|
||||
indexrelid
|
||||
----------------------------------------------------------------
|
||||
_timescaledb_internal.compress_hyper_5_13_chunk_ts_hsproxy_idx
|
||||
_timescaledb_internal.compress_hyper_5_14_chunk_ts_hsproxy_idx
|
||||
_timescaledb_internal.compress_hyper_5_15_chunk_ts_hsproxy_idx
|
||||
indexrelid
|
||||
------------------------------------------------------------------------
|
||||
_timescaledb_internal.compress_hyper_5_13_chunk_ts_hypercore_proxy_idx
|
||||
_timescaledb_internal.compress_hyper_5_14_chunk_ts_hypercore_proxy_idx
|
||||
_timescaledb_internal.compress_hyper_5_15_chunk_ts_hypercore_proxy_idx
|
||||
(3 rows)
|
||||
|
||||
-- delete some data to generate garbage
|
||||
@ -441,7 +441,7 @@ insert into readings (time, location, device, temp, humidity)
|
||||
select t, ceil(random()*10), ceil(random()*30), random()*40, random()*100
|
||||
from generate_series('2022-06-01'::timestamptz, '2022-07-01', '5m') t;
|
||||
alter table readings
|
||||
set access method hyperstore,
|
||||
set access method hypercore,
|
||||
set (timescaledb.compress_orderby = 'time',
|
||||
timescaledb.compress_segmentby = 'device');
|
||||
vacuum analyze readings;
|
||||
|
@ -1,12 +1,12 @@
|
||||
-- This file and its contents are licensed under the Timescale License.
|
||||
-- Please see the included NOTICE for copyright information and
|
||||
-- LICENSE-TIMESCALE for a copy of the license.
|
||||
\ir include/setup_hyperstore.sql
|
||||
\ir include/setup_hypercore.sql
|
||||
-- This file and its contents are licensed under the Timescale License.
|
||||
-- Please see the included NOTICE for copyright information and
|
||||
-- LICENSE-TIMESCALE for a copy of the license.
|
||||
\set hypertable readings
|
||||
\ir hyperstore_helpers.sql
|
||||
\ir hypercore_helpers.sql
|
||||
-- This file and its contents are licensed under the Timescale License.
|
||||
-- Please see the included NOTICE for copyright information and
|
||||
-- LICENSE-TIMESCALE for a copy of the license.
|
||||
@ -105,7 +105,7 @@ select format('%I.%I', chunk_schema, chunk_name)::regclass as chunk2
|
||||
where format('%I.%I', hypertable_schema, hypertable_name)::regclass = :'hypertable'::regclass
|
||||
order by chunk2 asc
|
||||
limit 1 offset 1 \gset
|
||||
alter table :chunk1 set access method hyperstore;
|
||||
alter table :chunk1 set access method hypercore;
|
||||
-- check that all chunks are compressed
|
||||
select chunk_name, compression_status from chunk_compression_stats(:'hypertable');
|
||||
chunk_name | compression_status
|
||||
@ -143,7 +143,7 @@ update :hypertable set temp=1.0 where location_id=1;
|
||||
cluster :chunk1;
|
||||
ERROR: there is no previously clustered index for table "_hyper_1_1_chunk"
|
||||
cluster :hypertable using hypertable_location_id_idx;
|
||||
ERROR: cannot cluster a hyperstore table
|
||||
ERROR: cannot cluster a hypercore table
|
||||
\set ON_ERROR_STOP 1
|
||||
-- some, but not all, data decompressed
|
||||
select count(*) from :cchunk1;
|
||||
|
@ -28,8 +28,8 @@ if(PG_VERSION VERSION_GREATER_EQUAL "14.0")
|
||||
endif()
|
||||
|
||||
if(PG_VERSION VERSION_GREATER_EQUAL "15.0")
|
||||
list(APPEND TEST_FILES hyperstore_tuple_lock.spec
|
||||
hyperstore_concurrent_vacuum.spec)
|
||||
list(APPEND TEST_FILES hypercore_tuple_lock.spec
|
||||
hypercore_concurrent_vacuum.spec)
|
||||
endif()
|
||||
|
||||
if(CMAKE_BUILD_TYPE MATCHES Debug)
|
||||
|
@ -19,14 +19,14 @@ setup {
|
||||
|
||||
alter table metrics set (timescaledb.compress, timescaledb.compress_segmentby='device_id');
|
||||
|
||||
-- Convert to hyperstore and give chunks predictible names
|
||||
-- Convert to hypercore and give chunks predictible names
|
||||
do $$
|
||||
declare
|
||||
chunk regclass;
|
||||
count int = 1;
|
||||
begin
|
||||
for chunk in select ch from show_chunks('metrics') ch loop
|
||||
execute format('alter table %s set access method hyperstore', chunk);
|
||||
execute format('alter table %s set access method hypercore', chunk);
|
||||
execute format('alter table %s rename to test_chunk_%s', chunk, count);
|
||||
count = count + 1;
|
||||
end loop;
|
||||
|
@ -24,7 +24,7 @@ setup {
|
||||
chunk regclass;
|
||||
begin
|
||||
for chunk in select ch from show_chunks('metrics') ch loop
|
||||
execute format('alter table %s set access method hyperstore', chunk);
|
||||
execute format('alter table %s set access method hypercore', chunk);
|
||||
end loop;
|
||||
end;
|
||||
$$;
|
||||
|
@ -207,8 +207,8 @@ ORDER BY pronamespace::regnamespace::text COLLATE "C", p.oid::regprocedure::text
|
||||
debug_waitpoint_enable(text)
|
||||
debug_waitpoint_id(text)
|
||||
debug_waitpoint_release(text)
|
||||
ts_hsproxy_handler(internal)
|
||||
ts_hyperstore_handler(internal)
|
||||
ts_hypercore_handler(internal)
|
||||
ts_hypercore_proxy_handler(internal)
|
||||
ts_now_mock()
|
||||
add_compression_policy(regclass,"any",boolean,interval,timestamp with time zone,text,interval,name)
|
||||
add_continuous_aggregate_policy(regclass,"any","any",interval,boolean,timestamp with time zone,text)
|
||||
|
@ -115,29 +115,29 @@ if((${PG_VERSION_MAJOR} GREATER_EQUAL "15"))
|
||||
merge_compress.sql
|
||||
cagg_query_using_merge.sql
|
||||
cagg_refresh_using_merge.sql
|
||||
hyperstore.sql
|
||||
hyperstore_columnar.sql
|
||||
hyperstore_copy.sql
|
||||
hyperstore_create.sql
|
||||
hyperstore_cursor.sql
|
||||
hyperstore_ddl.sql
|
||||
hyperstore_delete.sql
|
||||
hyperstore_index_btree.sql
|
||||
hyperstore_index_hash.sql
|
||||
hyperstore_insert.sql
|
||||
hyperstore_join.sql
|
||||
hyperstore_merge.sql
|
||||
hyperstore_policy.sql
|
||||
hyperstore_scans.sql
|
||||
hyperstore_stats.sql
|
||||
hyperstore_types.sql
|
||||
hyperstore_update.sql
|
||||
hyperstore_vacuum.sql
|
||||
hyperstore_vacuum_full.sql)
|
||||
hypercore.sql
|
||||
hypercore_columnar.sql
|
||||
hypercore_copy.sql
|
||||
hypercore_create.sql
|
||||
hypercore_cursor.sql
|
||||
hypercore_ddl.sql
|
||||
hypercore_delete.sql
|
||||
hypercore_index_btree.sql
|
||||
hypercore_index_hash.sql
|
||||
hypercore_insert.sql
|
||||
hypercore_join.sql
|
||||
hypercore_merge.sql
|
||||
hypercore_policy.sql
|
||||
hypercore_scans.sql
|
||||
hypercore_stats.sql
|
||||
hypercore_types.sql
|
||||
hypercore_update.sql
|
||||
hypercore_vacuum.sql
|
||||
hypercore_vacuum_full.sql)
|
||||
endif()
|
||||
|
||||
if((${PG_VERSION_MAJOR} GREATER_EQUAL "16"))
|
||||
list(APPEND TEST_FILES hyperstore_parallel.sql)
|
||||
list(APPEND TEST_FILES hypercore_parallel.sql)
|
||||
endif()
|
||||
|
||||
if((${PG_VERSION_MAJOR} GREATER_EQUAL "17"))
|
||||
|
@ -3,7 +3,7 @@
|
||||
-- LICENSE-TIMESCALE for a copy of the license.
|
||||
|
||||
\c :TEST_DBNAME :ROLE_SUPERUSER
|
||||
show timescaledb.hyperstore_indexam_whitelist;
|
||||
show timescaledb.hypercore_indexam_whitelist;
|
||||
set role :ROLE_DEFAULT_PERM_USER;
|
||||
|
||||
SET timescaledb.arrow_cache_maxsize = 4;
|
||||
@ -63,7 +63,7 @@ WHERE location = 1;
|
||||
|
||||
-- We should be able to set the table access method for a chunk, which
|
||||
-- will automatically compress the chunk.
|
||||
ALTER TABLE :chunk SET ACCESS METHOD hyperstore;
|
||||
ALTER TABLE :chunk SET ACCESS METHOD hypercore;
|
||||
SET timescaledb.enable_transparent_decompression TO false;
|
||||
|
||||
vacuum analyze readings;
|
||||
@ -131,7 +131,7 @@ SET enable_indexscan = false;
|
||||
|
||||
-- Compare the output to transparent decompression. Heap output is
|
||||
-- shown further down.
|
||||
SET timescaledb.enable_transparent_decompression TO 'hyperstore';
|
||||
SET timescaledb.enable_transparent_decompression TO 'hypercore';
|
||||
EXPLAIN (costs off, timing off, summary off)
|
||||
SELECT * FROM :chunk WHERE device < 4 ORDER BY time, device LIMIT 5;
|
||||
SELECT * FROM :chunk WHERE device < 4 ORDER BY time, device LIMIT 5;
|
||||
@ -154,7 +154,7 @@ SET enable_seqscan = true;
|
||||
SET timescaledb.enable_columnarscan = true;
|
||||
|
||||
-- With transparent decompression
|
||||
SET timescaledb.enable_transparent_decompression TO 'hyperstore';
|
||||
SET timescaledb.enable_transparent_decompression TO 'hypercore';
|
||||
SELECT * FROM :chunk WHERE location < 4 ORDER BY time, device LIMIT 5;
|
||||
SET timescaledb.enable_transparent_decompression TO false;
|
||||
|
||||
@ -165,7 +165,7 @@ SELECT * FROM :chunk ORDER BY location ASC LIMIT 5;
|
||||
SELECT * FROM :chunk ORDER BY location ASC LIMIT 5;
|
||||
|
||||
-- Show with transparent decompression
|
||||
SET timescaledb.enable_transparent_decompression TO 'hyperstore';
|
||||
SET timescaledb.enable_transparent_decompression TO 'hypercore';
|
||||
SELECT * FROM :chunk ORDER BY location ASC LIMIT 5;
|
||||
SET timescaledb.enable_transparent_decompression TO false;
|
||||
|
||||
@ -182,7 +182,7 @@ FROM _timescaledb_catalog.chunk c1
|
||||
INNER JOIN _timescaledb_catalog.chunk c2
|
||||
ON (c1.compressed_chunk_id = c2.id);
|
||||
ALTER TABLE :chunk SET ACCESS METHOD heap;
|
||||
SET timescaledb.enable_transparent_decompression TO 'hyperstore';
|
||||
SET timescaledb.enable_transparent_decompression TO 'hypercore';
|
||||
|
||||
-- The compressed chunk should no longer exist
|
||||
SELECT format('%I.%I', c2.schema_name, c2.table_name)::regclass AS cchunk
|
||||
@ -224,9 +224,9 @@ SELECT device, count(*) INTO decomp FROM readings GROUP BY device;
|
||||
SELECT device, orig.count AS orig_count, decomp.count AS decomp_count, (decomp.count - orig.count) AS diff
|
||||
FROM orig JOIN decomp USING (device) WHERE orig.count != decomp.count;
|
||||
|
||||
-- Convert back to hyperstore to check that metadata was cleaned up
|
||||
-- from last time this table used hyperstore
|
||||
ALTER TABLE :chunk SET ACCESS METHOD hyperstore;
|
||||
-- Convert back to hypercore to check that metadata was cleaned up
|
||||
-- from last time this table used hypercore
|
||||
ALTER TABLE :chunk SET ACCESS METHOD hypercore;
|
||||
SET timescaledb.enable_transparent_decompression TO false;
|
||||
|
||||
-- Get the chunk's corresponding compressed chunk
|
||||
|
@ -2,7 +2,7 @@
|
||||
-- Please see the included NOTICE for copyright information and
|
||||
-- LICENSE-TIMESCALE for a copy of the license.
|
||||
|
||||
\ir include/hyperstore_helpers.sql
|
||||
\ir include/hypercore_helpers.sql
|
||||
|
||||
create table readings(
|
||||
metric_id serial,
|
||||
@ -34,7 +34,7 @@ select format('%I.%I', chunk_schema, chunk_name)::regclass as chunk
|
||||
where format('%I.%I', hypertable_schema, hypertable_name)::regclass = 'readings'::regclass
|
||||
limit 1 \gset
|
||||
|
||||
alter table :chunk set access method hyperstore;
|
||||
alter table :chunk set access method hypercore;
|
||||
|
||||
-- Test that filtering is not removed on ColumnarScan when it includes
|
||||
-- columns that cannot be scankeys.
|
||||
|
@ -2,7 +2,7 @@
|
||||
-- Please see the included NOTICE for copyright information and
|
||||
-- LICENSE-TIMESCALE for a copy of the license.
|
||||
|
||||
\ir include/setup_hyperstore.sql
|
||||
\ir include/setup_hypercore.sql
|
||||
|
||||
create view amrels as
|
||||
select cl.oid::regclass as rel, am.amname, inh.inhparent::regclass as relparent
|
||||
@ -12,7 +12,7 @@ select cl.oid::regclass as rel, am.amname, inh.inhparent::regclass as relparent
|
||||
|
||||
-- Compress the chunks and check that the counts are the same
|
||||
select location_id, count(*) into orig from :hypertable GROUP BY location_id;
|
||||
select compress_chunk(show_chunks(:'hypertable'), compress_using => 'hyperstore');
|
||||
select compress_chunk(show_chunks(:'hypertable'), compress_using => 'hypercore');
|
||||
select location_id, count(*) into comp from :hypertable GROUP BY location_id;
|
||||
select * from orig join comp using (location_id) where orig.count != comp.count;
|
||||
drop table orig, comp;
|
||||
@ -66,7 +66,7 @@ create table copy_test1(
|
||||
humidity float
|
||||
);
|
||||
select create_hypertable('copy_test1', 'created_at');
|
||||
alter table copy_test1 set access method hyperstore;
|
||||
alter table copy_test1 set access method hypercore;
|
||||
\copy copy_test1 from 'data/magic.csv' with csv header
|
||||
select * from copy_test1 order by metric_id;
|
||||
|
||||
@ -88,7 +88,7 @@ select count(*) from test1;
|
||||
|
||||
select * from amrels where relparent = 'test1'::regclass;
|
||||
|
||||
alter table test1 set access method hyperstore;
|
||||
alter table test1 set access method hypercore;
|
||||
|
||||
copy test1 from stdin delimiter ',';
|
||||
2020-01-02 11:16:00-05,11,16,copy
|
||||
|
@ -2,21 +2,21 @@
|
||||
-- Please see the included NOTICE for copyright information and
|
||||
-- LICENSE-TIMESCALE for a copy of the license.
|
||||
|
||||
\ir include/hyperstore_helpers.sql
|
||||
\ir include/hypercore_helpers.sql
|
||||
select setseed(0.3);
|
||||
|
||||
-- Testing the basic API for creating a hyperstore
|
||||
-- Testing the basic API for creating a hypercore
|
||||
|
||||
-- This should just fail because you cannot create a plain table with
|
||||
-- hyperstore (yet).
|
||||
-- hypercore (yet).
|
||||
\set ON_ERROR_STOP 0
|
||||
\set VERBOSITY default
|
||||
create table test2(
|
||||
created_at timestamp with time zone not null,
|
||||
location_id int
|
||||
) using hyperstore;
|
||||
) using hypercore;
|
||||
|
||||
set default_table_access_method to 'hyperstore';
|
||||
set default_table_access_method to 'hypercore';
|
||||
create table test2(
|
||||
created_at timestamp with time zone not null,
|
||||
location_id int
|
||||
@ -37,7 +37,7 @@ CREATE TABLE test2(
|
||||
create index on test2(device_id, created_at);
|
||||
|
||||
\set ON_ERROR_STOP 0
|
||||
alter table test2 set access method hyperstore;
|
||||
alter table test2 set access method hypercore;
|
||||
\set ON_ERROR_STOP 1
|
||||
|
||||
select create_hypertable('test2', 'created_at');
|
||||
@ -45,34 +45,34 @@ select create_hypertable('test2', 'created_at');
|
||||
\set ON_ERROR_STOP 0
|
||||
-- Should show error since there is no namespace.
|
||||
alter table test2
|
||||
set access method hyperstore,
|
||||
set access method hypercore,
|
||||
set (compress_segmentby = 'location_id');
|
||||
\set ON_ERROR_STOP 1
|
||||
|
||||
alter table test2
|
||||
set access method hyperstore,
|
||||
set access method hypercore,
|
||||
set (timescaledb.compress_segmentby = 'location_id');
|
||||
|
||||
-- Test altering hypertable to hyperstore again. It should be allowed
|
||||
-- Test altering hypertable to hypercore again. It should be allowed
|
||||
-- and be a no-op.
|
||||
alter table test2 set access method hyperstore;
|
||||
alter table test2 set access method hypercore;
|
||||
|
||||
\set ON_ERROR_STOP 0
|
||||
-- This shows an error but the error is weird, we should probably get
|
||||
-- a better one.
|
||||
alter table test2
|
||||
set access method hyperstore,
|
||||
set access method hypercore,
|
||||
set (compress_segmentby = 'location_id');
|
||||
\set ON_ERROR_STOP 1
|
||||
|
||||
-- Create view for hyperstore rels
|
||||
-- Create view for hypercore rels
|
||||
create view amrels as
|
||||
select cl.oid::regclass as rel, am.amname, inh.inhparent::regclass as relparent
|
||||
from pg_class cl
|
||||
inner join pg_am am on (cl.relam = am.oid)
|
||||
left join pg_inherits inh on (inh.inhrelid = cl.oid);
|
||||
|
||||
-- Show that test2 is a hyperstore
|
||||
-- Show that test2 is a hypercore
|
||||
select rel, amname
|
||||
from amrels
|
||||
where rel='test2'::regclass;
|
||||
@ -85,11 +85,11 @@ from generate_series('2022-06-01'::timestamptz, '2022-07-01', '5m') t;
|
||||
-- Save the count for test2 for later comparison
|
||||
select count(*) as orig_test2_count from test2 \gset
|
||||
|
||||
-- All chunks should use the hyperstore access method
|
||||
-- All chunks should use the hypercore access method
|
||||
select * from amrels
|
||||
where relparent='test2'::regclass;
|
||||
|
||||
-- Show compression settings for hyperstore across catalog and views
|
||||
-- Show compression settings for hypercore across catalog and views
|
||||
select * from _timescaledb_catalog.compression_settings;
|
||||
select * from timescaledb_information.compression_settings;
|
||||
select * from timescaledb_information.chunk_compression_settings;
|
||||
@ -106,57 +106,57 @@ insert into test3 values ('2022-06-01', 1, 1.0);
|
||||
-- save chunk as variable
|
||||
select ch as chunk from show_chunks('test3') ch limit 1 \gset
|
||||
|
||||
-- Check that chunk is NOT using hyperstore
|
||||
-- Check that chunk is NOT using hypercore
|
||||
select rel, amname
|
||||
from amrels
|
||||
where relparent='test3'::regclass;
|
||||
|
||||
\set ON_ERROR_STOP 0
|
||||
-- Cannot create hyperstore if missing compression settings
|
||||
alter table :chunk set access method hyperstore;
|
||||
-- Cannot create hypercore if missing compression settings
|
||||
alter table :chunk set access method hypercore;
|
||||
\set ON_ERROR_STOP 1
|
||||
|
||||
-- Add compression settings
|
||||
alter table test3 set (timescaledb.compress, timescaledb.compress_orderby='time desc', timescaledb.compress_segmentby='');
|
||||
alter table :chunk set access method hyperstore;
|
||||
alter table :chunk set access method hypercore;
|
||||
|
||||
-- Check that chunk is using hyperstore
|
||||
-- Check that chunk is using hypercore
|
||||
select * from amrels where rel=:'chunk'::regclass;
|
||||
|
||||
-- Try same thing with compress_chunk()
|
||||
alter table :chunk set access method heap;
|
||||
select compress_chunk(:'chunk', compress_using => 'hyperstore');
|
||||
select compress_chunk(:'chunk', compress_using => 'hypercore');
|
||||
|
||||
-- Check that chunk is using hyperstore
|
||||
-- Check that chunk is using hypercore
|
||||
select relname, amname
|
||||
from show_chunks('test3') as chunk
|
||||
join pg_class on (pg_class.oid = chunk)
|
||||
join pg_am on (relam = pg_am.oid);
|
||||
|
||||
-- Test setting same access method again
|
||||
alter table :chunk set access method hyperstore;
|
||||
alter table :chunk set access method hypercore;
|
||||
|
||||
-- Test recompression after changing compression settings
|
||||
alter table test3 set (timescaledb.compress_segmentby='device');
|
||||
select compress_chunk(:'chunk', compress_using => 'hyperstore', recompress => true);
|
||||
select compress_chunk(:'chunk', compress_using => 'hypercore', recompress => true);
|
||||
|
||||
-- Create a second chunk
|
||||
insert into test3 values ('2022-08-01', 1, 1.0);
|
||||
|
||||
-- The second chunk should not be a hyperstore chunk
|
||||
-- The second chunk should not be a hypercore chunk
|
||||
select * from amrels where relparent='test3'::regclass;
|
||||
|
||||
-- Set hyperstore on hypertable
|
||||
alter table test3 set access method hyperstore;
|
||||
-- Set hypercore on hypertable
|
||||
alter table test3 set access method hypercore;
|
||||
|
||||
-- Create a third chunk
|
||||
insert into test3 values ('2022-10-01', 1, 1.0);
|
||||
|
||||
-- The third chunk should be a hyperstore chunk
|
||||
-- The third chunk should be a hypercore chunk
|
||||
select * from amrels where relparent='test3'::regclass;
|
||||
|
||||
-- Test that we can DDL on a hypertable that is not a Hyperstore but
|
||||
-- has one chunk that is a Hyperstore works.
|
||||
-- Test that we can DDL on a hypertable that is not a Hypercore but
|
||||
-- has one chunk that is a Hypercore works.
|
||||
create table test4 (time timestamptz not null, device int, temp float);
|
||||
select created from create_hypertable('test4', 'time');
|
||||
|
||||
@ -166,7 +166,7 @@ select count(ch) from show_chunks('test4') ch;
|
||||
select ch as chunk from show_chunks('test4') ch limit 1 \gset
|
||||
|
||||
alter table test4 set (timescaledb.compress);
|
||||
alter table :chunk set access method hyperstore;
|
||||
alter table :chunk set access method hypercore;
|
||||
select * from amrels where relparent='test4'::regclass;
|
||||
|
||||
-- test that alter table on the hypertable works
|
||||
@ -174,7 +174,7 @@ alter table test4 add column magic int;
|
||||
|
||||
\d :chunk
|
||||
|
||||
-- Test that dropping a table with one chunk being a hyperstore works.
|
||||
-- Test that dropping a table with one chunk being a hypercore works.
|
||||
drop table test4;
|
||||
|
||||
-- Create view to see compression stats. Left join chunks with stats
|
||||
@ -199,29 +199,29 @@ inner join pg_inherits inh
|
||||
on (inh.inhrelid = cl.oid)
|
||||
where c.compressed_chunk_id is not null;
|
||||
|
||||
-- There should be no hyperstore chunks that lack compression size stats
|
||||
-- There should be no hypercore chunks that lack compression size stats
|
||||
select count(*) as num_stats_missing from compressed_rel_size_stats
|
||||
where amname = 'hyperstore' and numrows_pre_compression is null;
|
||||
where amname = 'hypercore' and numrows_pre_compression is null;
|
||||
|
||||
-- Show stats for hyperstore chunks. Note that many stats are 0 since
|
||||
-- Show stats for hypercore chunks. Note that many stats are 0 since
|
||||
-- chunks were created as a result of inserts and not really
|
||||
-- compressed
|
||||
select * from compressed_rel_size_stats order by rel;
|
||||
|
||||
-- Decompress hyperstores to check that stats are removed
|
||||
-- Decompress hypercores to check that stats are removed
|
||||
select decompress_chunk(rel)
|
||||
from compressed_rel_size_stats
|
||||
where amname = 'hyperstore';
|
||||
where amname = 'hypercore';
|
||||
|
||||
-- All stats should be removed
|
||||
select count(*) as orphaned_stats
|
||||
from compressed_rel_size_stats;
|
||||
|
||||
-- Create hyperstores again and check that compression size stats are
|
||||
-- Create hypercores again and check that compression size stats are
|
||||
-- updated showing compressed data
|
||||
select compress_chunk(ch, compress_using => 'hyperstore')
|
||||
select compress_chunk(ch, compress_using => 'hypercore')
|
||||
from show_chunks('test2') ch;
|
||||
select compress_chunk(ch, compress_using => 'hyperstore')
|
||||
select compress_chunk(ch, compress_using => 'hypercore')
|
||||
from show_chunks('test3') ch;
|
||||
|
||||
-- Save the stats for later comparison. Exclude the amname column
|
||||
@ -247,7 +247,7 @@ from show_chunks('test3') ch;
|
||||
|
||||
select * from compressed_rel_size_stats order by rel;
|
||||
|
||||
-- Check that stats are the same for hyperstore and now with
|
||||
-- Check that stats are the same for hypercore and now with
|
||||
-- compression. Should return zero rows if they are the same.
|
||||
select
|
||||
rel,
|
||||
@ -259,7 +259,7 @@ from compressed_rel_size_stats
|
||||
except
|
||||
select * from saved_stats;
|
||||
|
||||
-- Try migration to hyperstore directly from compressed heap. Run in a
|
||||
-- Try migration to hypercore directly from compressed heap. Run in a
|
||||
-- transaction block to make sure changes are visible to following
|
||||
-- commands.
|
||||
begin;
|
||||
@ -276,14 +276,14 @@ set client_min_messages=DEBUG1;
|
||||
with chunks as (
|
||||
select ch from show_chunks('test2') ch offset 1
|
||||
)
|
||||
select compress_chunk(ch, compress_using => 'hyperstore') from chunks;
|
||||
select compress_chunk(ch, compress_using => 'hypercore') from chunks;
|
||||
|
||||
-- Test direct migration of the remaining chunk via SET ACCESS
|
||||
-- METHOD. Add some uncompressed data to test migration with partially
|
||||
-- compressed chunks.
|
||||
select ch as alter_chunk from show_chunks('test2') ch limit 1 \gset
|
||||
insert into :alter_chunk values ('2022-06-01 10:00', 4, 4, 4.0, 4.0);
|
||||
alter table :alter_chunk set access method hyperstore;
|
||||
alter table :alter_chunk set access method hypercore;
|
||||
|
||||
reset client_min_messages;
|
||||
|
||||
@ -294,9 +294,9 @@ from show_chunks('test2') ch
|
||||
join pg_depend dep on (ch = dep.objid)
|
||||
join pg_am am on (dep.refobjid = am.oid);
|
||||
|
||||
-- All chunks should use hyperstore and have rel_size_stats
|
||||
-- All chunks should use hypercore and have rel_size_stats
|
||||
select * from compressed_rel_size_stats
|
||||
where amname = 'hyperstore' order by rel;
|
||||
where amname = 'hypercore' order by rel;
|
||||
|
||||
-- Check that query plan is now ColumnarScan and that all data, except
|
||||
-- the one uncompressed row, is still compressed after migration
|
||||
@ -313,57 +313,57 @@ select count(*)=(:orig_test2_count + 1) as count_as_expected from test2;
|
||||
commit;
|
||||
|
||||
\set ON_ERROR_STOP 0
|
||||
-- Trying to convert a hyperstore to a hyperstore should be an error
|
||||
-- if if_not_compressed is false and the hyperstore is fully
|
||||
-- Trying to convert a hypercore to a hypercore should be an error
|
||||
-- if if_not_compressed is false and the hypercore is fully
|
||||
-- compressed.
|
||||
select compress_chunk(ch, compress_using => 'hyperstore', if_not_compressed => false)
|
||||
select compress_chunk(ch, compress_using => 'hypercore', if_not_compressed => false)
|
||||
from show_chunks('test2') ch;
|
||||
|
||||
-- Compressing using something different than "hyperstore" or "heap"
|
||||
-- Compressing using something different than "hypercore" or "heap"
|
||||
-- should not be allowed
|
||||
select compress_chunk(ch, compress_using => 'non_existing_am')
|
||||
from show_chunks('test2') ch;
|
||||
|
||||
\set ON_ERROR_STOP 1
|
||||
|
||||
-- Compressing from hyperstore with compress_using=>heap should lead
|
||||
-- to recompression of hyperstore with a notice.
|
||||
-- Compressing from hypercore with compress_using=>heap should lead
|
||||
-- to recompression of hypercore with a notice.
|
||||
select compress_chunk(ch, compress_using => 'heap')
|
||||
from show_chunks('test2') ch;
|
||||
|
||||
-- Compressing a hyperstore without specifying compress_using should
|
||||
-- lead to recompression. First check that :chunk is a hyperstore.
|
||||
-- Compressing a hypercore without specifying compress_using should
|
||||
-- lead to recompression. First check that :chunk is a hypercore.
|
||||
select ch as chunk from show_chunks('test2') ch limit 1 \gset
|
||||
select * from compressed_rel_size_stats
|
||||
where amname = 'hyperstore' and rel = :'chunk'::regclass;
|
||||
where amname = 'hypercore' and rel = :'chunk'::regclass;
|
||||
insert into :chunk values ('2022-06-01 10:01', 6, 6, 6.0, 6.0);
|
||||
select ctid from :chunk where created_at = '2022-06-01 10:01' and device_id = 6;
|
||||
select compress_chunk(:'chunk');
|
||||
select ctid from :chunk where created_at = '2022-06-01 10:01' and device_id = 6;
|
||||
-- Compressing a hyperstore with compress_using=>hyperstore should
|
||||
-- Compressing a hypercore with compress_using=>hypercore should
|
||||
-- also lead to recompression
|
||||
insert into :chunk values ('2022-06-01 11:02', 7, 7, 7.0, 7.0);
|
||||
select ctid from :chunk where created_at = '2022-06-01 11:02' and device_id = 7;
|
||||
select compress_chunk(:'chunk', compress_using => 'hyperstore');
|
||||
select compress_chunk(:'chunk', compress_using => 'hypercore');
|
||||
select ctid from :chunk where created_at = '2022-06-01 11:02' and device_id = 7;
|
||||
|
||||
-- Convert all hyperstores back to heap
|
||||
-- Convert all hypercores back to heap
|
||||
select decompress_chunk(rel) ch
|
||||
from compressed_rel_size_stats
|
||||
where amname = 'hyperstore'
|
||||
where amname = 'hypercore'
|
||||
order by ch;
|
||||
|
||||
-- Test that it is possible to convert multiple hyperstores in the
|
||||
-- Test that it is possible to convert multiple hypercores in the
|
||||
-- same transaction. The goal is to check that all the state is
|
||||
-- cleaned up between two or more commands in same transaction.
|
||||
select ch as chunk2 from show_chunks('test2') ch offset 1 limit 1 \gset
|
||||
start transaction;
|
||||
select compress_chunk(:'chunk', compress_using => 'hyperstore');
|
||||
select compress_chunk(:'chunk2', compress_using => 'hyperstore');
|
||||
select compress_chunk(:'chunk', compress_using => 'hypercore');
|
||||
select compress_chunk(:'chunk2', compress_using => 'hypercore');
|
||||
commit;
|
||||
|
||||
select * from compressed_rel_size_stats
|
||||
where amname = 'hyperstore' and relparent = 'test2'::regclass
|
||||
where amname = 'hypercore' and relparent = 'test2'::regclass
|
||||
order by rel;
|
||||
|
||||
-- Test that we can compress old way using compress_using=>heap
|
||||
@ -403,7 +403,7 @@ select * from only :chunk3;
|
||||
select * from :chunk3 where created_at = '2022-06-15 16:00' and device_id = 8;
|
||||
|
||||
-- Test a more complicated schema from the NYC Taxi data set. This is
|
||||
-- to test that compression using hyperstore works, since there was an
|
||||
-- to test that compression using hypercore works, since there was an
|
||||
-- issue with setting up the tuple sort state during compression.
|
||||
create table rides (
|
||||
vendor_id text,
|
||||
@ -439,7 +439,7 @@ insert into rides values
|
||||
(6,'2016-01-01 00:00:02','2016-01-01 00:11:55',1,1.20,-73.979423522949219,40.744613647460938,1,-73.992034912109375,40.753944396972656,2,9,0.5,0.5,0,0,0.3,10.3),
|
||||
(356,'2016-01-01 00:00:01','2016-01-01 00:11:55',1,1.20,-73.979423522949219,40.744613647460938,1,-73.992034912109375,40.753944396972656,2,9,0.5,0.5,0,0,0.3,10.3);
|
||||
-- Check that it is possible to compress
|
||||
select compress_chunk(ch, compress_using=>'hyperstore') from show_chunks('rides') ch;
|
||||
select compress_chunk(ch, compress_using=>'hypercore') from show_chunks('rides') ch;
|
||||
select rel, amname from compressed_rel_size_stats
|
||||
where relparent::regclass = 'rides'::regclass;
|
||||
|
||||
|
@ -2,12 +2,12 @@
|
||||
-- Please see the included NOTICE for copyright information and
|
||||
-- LICENSE-TIMESCALE for a copy of the license.
|
||||
|
||||
\ir include/setup_hyperstore.sql
|
||||
\ir include/setup_hypercore.sql
|
||||
|
||||
-- To generate plans consistently.
|
||||
set max_parallel_workers_per_gather to 0;
|
||||
|
||||
-- Create a function that uses a cursor to scan the the Hyperstore
|
||||
-- Create a function that uses a cursor to scan the the Hypercore
|
||||
-- table. This should work equivalent to a query on the same table.
|
||||
create function location_humidity_for(
|
||||
in p_owner integer,
|
||||
@ -40,7 +40,7 @@ end;
|
||||
$$
|
||||
language plpgsql;
|
||||
|
||||
select compress_chunk(show_chunks(:'hypertable'), compress_using => 'hyperstore');
|
||||
select compress_chunk(show_chunks(:'hypertable'), compress_using => 'hypercore');
|
||||
|
||||
-- Compare executing the function with a cursor with a query fetching
|
||||
-- the same data directly from the hypertable.
|
||||
@ -107,7 +107,7 @@ create table backward_cursor (time timestamptz, location_id bigint, temp float8)
|
||||
select create_hypertable('backward_cursor', 'time', create_default_indexes=>false);
|
||||
alter table backward_cursor set (timescaledb.compress, timescaledb.compress_segmentby='location_id', timescaledb.compress_orderby='time asc');
|
||||
insert into backward_cursor values ('2024-01-01 01:00', 1, 1.0), ('2024-01-01 02:00', 1, 2.0), ('2024-01-01 03:00', 2, 3.0), ('2024-01-01 04:00', 2, 4.0);
|
||||
select compress_chunk(ch, compress_using=>'hyperstore') from show_chunks('backward_cursor') ch;
|
||||
select compress_chunk(ch, compress_using=>'hypercore') from show_chunks('backward_cursor') ch;
|
||||
insert into backward_cursor values ('2024-01-01 05:00', 3, 5.0), ('2024-01-01 06:00', 3, 6.0);
|
||||
|
||||
begin;
|
||||
|
@ -20,7 +20,7 @@ with
|
||||
join pg_inherits inh on inh.inhrelid = cl.oid)
|
||||
select hypertable, chunk, compressed_chunk, amname from chunks join parents using (chunk);
|
||||
|
||||
\ir include/hyperstore_helpers.sql
|
||||
\ir include/hypercore_helpers.sql
|
||||
\set ECHO all
|
||||
|
||||
-- Disable incremental sort to make tests stable
|
||||
@ -46,7 +46,7 @@ insert into readings (time, location, device, temp, humidity, jdata)
|
||||
select t, ceil(random()*10), ceil(random()*30), random()*40, random()*100, '{"a":1,"b":2}'::jsonb
|
||||
from generate_series('2022-06-01'::timestamptz, '2022-06-04'::timestamptz, '5m') t;
|
||||
|
||||
select compress_chunk(show_chunks('readings'), compress_using => 'hyperstore');
|
||||
select compress_chunk(show_chunks('readings'), compress_using => 'hypercore');
|
||||
|
||||
-- Insert some extra data to get some non-compressed data as well.
|
||||
insert into readings (time, location, device, temp, humidity, jdata)
|
||||
|
@ -15,9 +15,9 @@ insert into metrics values ('2024-01-01', 1, 1, 1.0), ('2024-01-01', 2, 2, 2.0),
|
||||
alter table metrics add constraint device_fk foreign key (device) references devices (id) on delete cascade;
|
||||
alter table metrics set (timescaledb.compress_segmentby = 'device');
|
||||
|
||||
-- Make the one chunk a Hyperstore
|
||||
-- Make the one chunk a Hypercore
|
||||
select ch as chunk from show_chunks('metrics') ch limit 1 \gset
|
||||
alter table :chunk set access method hyperstore;
|
||||
alter table :chunk set access method hypercore;
|
||||
|
||||
-- Show that all data is compressed
|
||||
select _timescaledb_debug.is_compressed_tid(ctid) as compressed, * from metrics order by time, device;
|
||||
|
@ -6,7 +6,7 @@
|
||||
create extension pageinspect;
|
||||
set role :ROLE_DEFAULT_PERM_USER;
|
||||
|
||||
\ir include/setup_hyperstore.sql
|
||||
\ir include/setup_hypercore.sql
|
||||
|
||||
-- Avoid parallel (index) scans to make test stable
|
||||
set max_parallel_workers_per_gather to 0;
|
||||
@ -43,7 +43,7 @@ create index hypertable_device_id_idx on :hypertable (device_id) include (humidi
|
||||
create index hypertable_owner_idx on :hypertable (owner_id);
|
||||
create index hypertable_location_id_owner_id_idx on :hypertable (location_id, owner_id);
|
||||
|
||||
-- Save index size before switching to hyperstore so that we can
|
||||
-- Save index size before switching to hypercore so that we can
|
||||
-- compare sizes after. Don't show the actual sizes because it varies
|
||||
-- slightly on different platforms.
|
||||
create table index_sizes_before as
|
||||
@ -53,12 +53,12 @@ where chunk::regclass = :'chunk2'::regclass
|
||||
and (attname='location_id' or attname='device_id' or attname='owner_id');
|
||||
|
||||
-- Drop some segmentby indexes and recreate them after converting to
|
||||
-- hyperstore. This is to test having some created before conversion
|
||||
-- hypercore. This is to test having some created before conversion
|
||||
-- and some after.
|
||||
drop index hypertable_owner_idx;
|
||||
drop index hypertable_location_id_owner_id_idx;
|
||||
|
||||
alter table :chunk2 set access method hyperstore;
|
||||
alter table :chunk2 set access method hypercore;
|
||||
|
||||
-- count without indexes
|
||||
select owner_id, count(*) into owner_orig from :hypertable
|
||||
@ -77,7 +77,7 @@ select owner_id, count(*) into owner_comp from :hypertable
|
||||
where owner_id in (3,4,5) group by owner_id;
|
||||
select * from owner_orig join owner_comp using (owner_id) where owner_orig.count != owner_comp.count;
|
||||
|
||||
-- the indexes on segmentby columns should be smaller on hyperstore,
|
||||
-- the indexes on segmentby columns should be smaller on hypercore,
|
||||
-- except for the covering index on location_id (because it also
|
||||
-- includes the non-segmentby column humidity). The device_id index
|
||||
-- should also remain the same size since it is not on a segmentby
|
||||
@ -131,7 +131,7 @@ select explain_anonymize(format($$
|
||||
$$, :'chunk2'));
|
||||
select created_at, location_id, temp from :chunk2 where location_id=1 and temp=2.0;
|
||||
|
||||
select compress_chunk(show_chunks(:'hypertable'), compress_using => 'hyperstore');
|
||||
select compress_chunk(show_chunks(:'hypertable'), compress_using => 'hypercore');
|
||||
|
||||
vacuum analyze :hypertable;
|
||||
|
||||
@ -191,9 +191,9 @@ select explain_analyze_anonymize(format($$
|
||||
$$, :'hypertable'));
|
||||
|
||||
-- We just compare the counts here, not the full content.
|
||||
select heapam.count as heapam, hyperstore.count as hyperstore
|
||||
select heapam.count as heapam, hypercore.count as hypercore
|
||||
from (select count(location_id) from :hypertable where location_id between 5 and 10) heapam,
|
||||
(select count(location_id) from :hypertable where location_id between 5 and 10) hyperstore;
|
||||
(select count(location_id) from :hypertable where location_id between 5 and 10) hypercore;
|
||||
|
||||
drop table saved_hypertable;
|
||||
|
||||
@ -239,7 +239,7 @@ $$, :'chunk1'));
|
||||
\set VERBOSITY default
|
||||
|
||||
---
|
||||
-- Test that building a UNIQUE index won't work on a hyperstore table
|
||||
-- Test that building a UNIQUE index won't work on a hypercore table
|
||||
-- that contains non-unique values.
|
||||
---
|
||||
create table non_unique_metrics (time timestamptz, temp float, device int);
|
||||
@ -247,11 +247,11 @@ select create_hypertable('non_unique_metrics', 'time', create_default_indexes =>
|
||||
insert into non_unique_metrics values ('2024-01-01', 1.0, 1), ('2024-01-01', 2.0, 1), ('2024-01-02', 3.0, 2);
|
||||
select ch as non_unique_chunk from show_chunks('non_unique_metrics') ch limit 1 \gset
|
||||
alter table non_unique_metrics set (timescaledb.compress_segmentby = 'device', timescaledb.compress_orderby = 'time');
|
||||
alter table :non_unique_chunk set access method hyperstore;
|
||||
alter table :non_unique_chunk set access method hypercore;
|
||||
|
||||
\set ON_ERROR_STOP 0
|
||||
---
|
||||
-- UNIQUE index creation on compressed hyperstore should fail due to
|
||||
-- UNIQUE index creation on compressed hypercore should fail due to
|
||||
-- non-unique values
|
||||
---
|
||||
create unique index on non_unique_metrics (time);
|
||||
@ -369,8 +369,8 @@ create table only_nulls_null as
|
||||
select * from nullvalues where only_nulls is null;
|
||||
select * from only_nulls_null;
|
||||
|
||||
-- Convert all chunks to hyperstore and run same queries
|
||||
select compress_chunk(ch, compress_using=>'hyperstore') from show_chunks('nullvalues') ch;
|
||||
-- Convert all chunks to hypercore and run same queries
|
||||
select compress_chunk(ch, compress_using=>'hypercore') from show_chunks('nullvalues') ch;
|
||||
|
||||
select c.relname, a.amname FROM pg_class c
|
||||
join pg_am a on (c.relam = a.oid)
|
||||
@ -378,7 +378,7 @@ join show_chunks('nullvalues') ch on (ch = c.oid);
|
||||
|
||||
-- The explains should be index scans and there should be no rows
|
||||
-- returned if the result is the same as before when the chunks where
|
||||
-- not hyperstores.
|
||||
-- not hypercores.
|
||||
explain (costs off) select * from nullvalues where location is not null;
|
||||
select * from nullvalues where location is not null
|
||||
except
|
||||
|
@ -2,7 +2,7 @@
|
||||
-- Please see the included NOTICE for copyright information and
|
||||
-- LICENSE-TIMESCALE for a copy of the license.
|
||||
|
||||
\ir include/setup_hyperstore.sql
|
||||
\ir include/setup_hypercore.sql
|
||||
|
||||
-- Set the number of parallel workers to zero to disable parallel
|
||||
-- plans. This differs between different PG versions.
|
||||
@ -35,7 +35,7 @@ $$, :'hypertable'));
|
||||
select location_id, count(*) into orig from :hypertable
|
||||
where location_id in (3,4,5) group by location_id;
|
||||
|
||||
alter table :chunk2 set access method hyperstore;
|
||||
alter table :chunk2 set access method hypercore;
|
||||
|
||||
--
|
||||
-- test that indexes work after updates
|
||||
@ -67,7 +67,7 @@ select explain_anonymize(format($$
|
||||
$$, :'chunk2'));
|
||||
select created_at, location_id, temp from :chunk2 where location_id=1 and temp=2.0;
|
||||
|
||||
select compress_chunk(show_chunks(:'hypertable'), compress_using => 'hyperstore');
|
||||
select compress_chunk(show_chunks(:'hypertable'), compress_using => 'hypercore');
|
||||
|
||||
vacuum analyze :hypertable;
|
||||
|
||||
|
@ -2,11 +2,11 @@
|
||||
-- Please see the included NOTICE for copyright information and
|
||||
-- LICENSE-TIMESCALE for a copy of the license.
|
||||
|
||||
\ir include/setup_hyperstore.sql
|
||||
\ir include/setup_hypercore.sql
|
||||
|
||||
-- Compress the chunks and check that the counts are the same
|
||||
select location_id, count(*) into orig from :hypertable GROUP BY location_id;
|
||||
select compress_chunk(show_chunks(:'hypertable'), compress_using => 'hyperstore');
|
||||
select compress_chunk(show_chunks(:'hypertable'), compress_using => 'hypercore');
|
||||
select location_id, count(*) into comp from :hypertable GROUP BY location_id;
|
||||
select * from orig join comp using (location_id) where orig.count != comp.count;
|
||||
drop table orig, comp;
|
||||
@ -174,7 +174,7 @@ order by location_id;
|
||||
|
||||
drop table :hypertable;
|
||||
|
||||
-- Check that we can write to a hyperstore table from another kind of
|
||||
-- Check that we can write to a hypercore table from another kind of
|
||||
-- slot even if we have dropped and added attributes.
|
||||
create table test2 (itime integer, b bigint, t text);
|
||||
select create_hypertable('test2', by_range('itime', 10));
|
||||
@ -195,7 +195,7 @@ alter table test2 add column d int;
|
||||
insert into test2 select t, 'second'::text, 120, 1 from generate_series(11, 15) t;
|
||||
|
||||
alter table test2
|
||||
set access method hyperstore,
|
||||
set access method hypercore,
|
||||
set (timescaledb.compress_segmentby = '', timescaledb.compress_orderby = 'c, itime desc');
|
||||
|
||||
select compress_chunk(show_chunks('test2'));
|
||||
|
@ -2,7 +2,7 @@
|
||||
-- Please see the included NOTICE for copyright information and
|
||||
-- LICENSE-TIMESCALE for a copy of the license.
|
||||
|
||||
\ir include/setup_hyperstore.sql
|
||||
\ir include/setup_hypercore.sql
|
||||
|
||||
-- We disable columnar scan for these tests since we have a dedicated
|
||||
-- test for this.
|
||||
@ -10,40 +10,40 @@ set timescaledb.enable_columnarscan to false;
|
||||
|
||||
set enable_memoize to false;
|
||||
|
||||
-- Create a hyperstore with a few rows and use the big table to join
|
||||
-- with it. This should put the hyperstore as the inner relation and
|
||||
-- Create a hypercore with a few rows and use the big table to join
|
||||
-- with it. This should put the hypercore as the inner relation and
|
||||
-- trigger rescans.
|
||||
create table the_hyperstore (
|
||||
create table the_hypercore (
|
||||
updated_at timestamptz not null unique,
|
||||
device_id int,
|
||||
height float
|
||||
);
|
||||
create index on the_hyperstore (device_id);
|
||||
select from create_hypertable('the_hyperstore', 'updated_at');
|
||||
create index on the_hypercore (device_id);
|
||||
select from create_hypertable('the_hypercore', 'updated_at');
|
||||
|
||||
-- Fill the table with some data, but less than a single chunk, so
|
||||
-- that we will get it as an inner relation in the nested loop join.
|
||||
insert into the_hyperstore
|
||||
insert into the_hypercore
|
||||
select t, ceil(random()*5), random()*40
|
||||
from generate_series('2022-06-01'::timestamptz, '2022-06-10', '1 hour') t;
|
||||
|
||||
-- Run joins before making it a hyperstore to have something to
|
||||
-- Run joins before making it a hypercore to have something to
|
||||
-- compare with.
|
||||
select * into expected_inner from :chunk1 join the_hyperstore using (device_id);
|
||||
select * into expected_inner from :chunk1 join the_hypercore using (device_id);
|
||||
|
||||
select created_at, updated_at, o.device_id, i.humidity, o.height
|
||||
into expected_left
|
||||
from :chunk1 i left join the_hyperstore o
|
||||
from :chunk1 i left join the_hypercore o
|
||||
on i.created_at = o.updated_at and i.device_id = o.device_id;
|
||||
|
||||
alter table the_hyperstore set (
|
||||
alter table the_hypercore set (
|
||||
timescaledb.compress,
|
||||
timescaledb.compress_segmentby = '',
|
||||
timescaledb.compress_orderby = 'updated_at desc'
|
||||
);
|
||||
select compress_chunk(show_chunks('the_hyperstore'), compress_using => 'hyperstore');
|
||||
select compress_chunk(show_chunks('the_hypercore'), compress_using => 'hypercore');
|
||||
|
||||
vacuum analyze the_hyperstore;
|
||||
vacuum analyze the_hypercore;
|
||||
|
||||
-- Test a merge join. We explicitly set what join methods to enable
|
||||
-- and disable to avoid flaky tests.
|
||||
@ -52,7 +52,7 @@ set enable_hashjoin to false;
|
||||
set enable_nestloop to false;
|
||||
|
||||
\set jointype merge
|
||||
\ir include/hyperstore_join_test.sql
|
||||
\ir include/hypercore_join_test.sql
|
||||
|
||||
-- Test nested loop join.
|
||||
set enable_mergejoin to false;
|
||||
@ -60,7 +60,7 @@ set enable_hashjoin to false;
|
||||
set enable_nestloop to true;
|
||||
|
||||
\set jointype nestloop
|
||||
\ir include/hyperstore_join_test.sql
|
||||
\ir include/hypercore_join_test.sql
|
||||
|
||||
-- Test a hash join.
|
||||
set enable_mergejoin to false;
|
||||
@ -68,6 +68,6 @@ set enable_hashjoin to true;
|
||||
set enable_nestloop to false;
|
||||
|
||||
\set jointype hash
|
||||
\ir include/hyperstore_join_test.sql
|
||||
\ir include/hypercore_join_test.sql
|
||||
|
||||
drop table expected_inner, expected_left;
|
||||
|
@ -4,15 +4,15 @@
|
||||
|
||||
\c :TEST_DBNAME :ROLE_DEFAULT_PERM_USER;
|
||||
|
||||
\ir include/setup_hyperstore.sql
|
||||
\ir include/setup_hypercore.sql
|
||||
|
||||
-- Disable merge and hash join to avoid flaky test.
|
||||
set enable_mergejoin to false;
|
||||
set enable_hashjoin to false;
|
||||
|
||||
-- There are already tests to merge into uncompressed tables, so just
|
||||
-- compress all chunks using Hyperstore.
|
||||
select compress_chunk(show_chunks(:'hypertable'), compress_using => 'hyperstore');
|
||||
-- compress all chunks using Hypercore.
|
||||
select compress_chunk(show_chunks(:'hypertable'), compress_using => 'hypercore');
|
||||
|
||||
create table source_data (
|
||||
created_at timestamptz not null,
|
||||
@ -62,7 +62,7 @@ select * from :hypertable where not _timescaledb_debug.is_compressed_tid(ctid);
|
||||
|
||||
-- Recompress all and try to insert the same rows again. This there
|
||||
-- should be no rows inserted.
|
||||
select compress_chunk(show_chunks(:'hypertable'), compress_using => 'hyperstore');
|
||||
select compress_chunk(show_chunks(:'hypertable'), compress_using => 'hypercore');
|
||||
|
||||
\x on
|
||||
select * from :hypertable where not _timescaledb_debug.is_compressed_tid(ctid);
|
||||
|
@ -2,7 +2,7 @@
|
||||
-- Please see the included NOTICE for copyright information and
|
||||
-- LICENSE-TIMESCALE for a copy of the license.
|
||||
|
||||
\ir include/setup_hyperstore.sql
|
||||
\ir include/setup_hypercore.sql
|
||||
|
||||
-- Set parallel cost to zero to force parallel plans and avoid flaky test.
|
||||
set parallel_tuple_cost to 0;
|
||||
@ -12,7 +12,7 @@ set parallel_setup_cost to 0;
|
||||
-- will use the index.
|
||||
drop index hypertable_device_id_idx;
|
||||
|
||||
-- Show parallel plan and count on uncompressed (non-hyperstore)
|
||||
-- Show parallel plan and count on uncompressed (non-hypercore)
|
||||
-- hypertable
|
||||
set max_parallel_workers_per_gather=2;
|
||||
|
||||
@ -27,9 +27,9 @@ select device_id, count(*) into orig from :hypertable group by device_id;
|
||||
select device_id, count(*) into orig_chunk from :chunk1 group by device_id;
|
||||
|
||||
-----------------------
|
||||
-- Enable hyperstore --
|
||||
-- Enable hypercore --
|
||||
-----------------------
|
||||
select compress_chunk(show_chunks(:'hypertable'), compress_using => 'hyperstore');
|
||||
select compress_chunk(show_chunks(:'hypertable'), compress_using => 'hypercore');
|
||||
|
||||
-- Show count without parallel plan and without ColumnarScan
|
||||
set timescaledb.enable_columnarscan=false;
|
||||
@ -71,7 +71,7 @@ select explain_anonymize(format($$
|
||||
$$, :'hypertable'));
|
||||
select owner_id, count(*) from :hypertable where owner_id=1 group by owner_id;
|
||||
|
||||
-- Parallel plan with hyperstore on single chunk
|
||||
-- Parallel plan with hypercore on single chunk
|
||||
select explain_anonymize(format($$
|
||||
select device_id, count(*) from %s where device_id=1 group by device_id
|
||||
$$, :'hypertable'));
|
||||
@ -82,8 +82,8 @@ select explain_anonymize(format($$
|
||||
$$, :'hypertable'));
|
||||
select owner_id, count(*) from :chunk1 where owner_id=1 group by owner_id;
|
||||
|
||||
-- Compare hyperstore per-location counts with original counts without
|
||||
-- hyperstore
|
||||
-- Compare hypercore per-location counts with original counts without
|
||||
-- hypercore
|
||||
select device_id, count(*) into comp from :hypertable group by device_id;
|
||||
select * from orig join comp using (device_id) where orig.count != comp.count;
|
||||
|
||||
|
@ -54,7 +54,7 @@ order by chunk;
|
||||
-- Check that compress_using is part of the policy config when non-NULL
|
||||
select add_compression_policy('readings',
|
||||
compress_after => '1 day'::interval,
|
||||
compress_using => 'hyperstore')
|
||||
compress_using => 'hypercore')
|
||||
as compression_job \gset
|
||||
|
||||
select config from timescaledb_information.jobs where job_id = :compression_job;
|
||||
@ -62,7 +62,7 @@ select config from timescaledb_information.jobs where job_id = :compression_job;
|
||||
-- Make sure the policy runs
|
||||
call run_job(:'compression_job');
|
||||
|
||||
-- After policy run all the chunks should be hyperstores
|
||||
-- After policy run all the chunks should be hypercores
|
||||
select * from chunk_info
|
||||
where hypertable = 'readings'
|
||||
order by chunk;
|
||||
@ -77,8 +77,8 @@ select _timescaledb_debug.is_compressed_tid(ctid), *
|
||||
from readings
|
||||
where time = '2022-06-01 10:14' and device = 1;
|
||||
|
||||
-- Add a new policy that doesn't specify hyperstore. It should still
|
||||
-- recompress hyperstores.
|
||||
-- Add a new policy that doesn't specify hypercore. It should still
|
||||
-- recompress hypercores.
|
||||
select add_compression_policy('readings',
|
||||
compress_after => '1 day'::interval,
|
||||
compress_using => 'heap')
|
||||
@ -100,7 +100,7 @@ select * from readings where time = '2022-06-01 10:14' and device = 1;
|
||||
-- Test recompression again with a policy that doesn't specify
|
||||
-- compress_using
|
||||
select remove_compression_policy('readings');
|
||||
-- Insert one value into existing hyperstore, also create a new non-hyperstore chunk
|
||||
-- Insert one value into existing hypercore, also create a new non-hypercore chunk
|
||||
insert into readings values ('2022-06-01 10:14', 1, 1.0), ('2022-07-01 10:14', 2, 2.0);
|
||||
|
||||
-- The new chunk should be heap and not compressed
|
||||
@ -112,8 +112,8 @@ select add_compression_policy('readings',
|
||||
compress_after => '1 day'::interval)
|
||||
as compression_job \gset
|
||||
|
||||
-- Run the policy job to recompress hyperstores and compress the new
|
||||
-- chunk using non-hyperstore compression
|
||||
-- Run the policy job to recompress hypercores and compress the new
|
||||
-- chunk using non-hypercore compression
|
||||
call run_job(:'compression_job');
|
||||
|
||||
select * from chunk_info
|
||||
@ -134,7 +134,7 @@ select timescaledb_experimental.add_policies('daily',
|
||||
refresh_start_offset => '8 days'::interval,
|
||||
refresh_end_offset => '1 day'::interval,
|
||||
compress_after => '9 days'::interval,
|
||||
compress_using => 'hyperstore');
|
||||
compress_using => 'hypercore');
|
||||
|
||||
select job_id as cagg_compression_job, materialization_hypertable_name as mathyper
|
||||
from timescaledb_information.jobs j
|
||||
|
@ -32,7 +32,7 @@ select format('%I.%I', chunk_schema, chunk_name)::regclass as chunk
|
||||
where format('%I.%I', hypertable_schema, hypertable_name)::regclass = 'readings'::regclass
|
||||
limit 1 \gset
|
||||
|
||||
alter table :chunk set access method hyperstore;
|
||||
alter table :chunk set access method hypercore;
|
||||
|
||||
--
|
||||
-- Check that TID scan works for both compressed and non-compressed
|
||||
|
@ -2,7 +2,7 @@
|
||||
-- Please see the included NOTICE for copyright information and
|
||||
-- LICENSE-TIMESCALE for a copy of the license.
|
||||
|
||||
\ir include/setup_hyperstore.sql
|
||||
\ir include/setup_hypercore.sql
|
||||
|
||||
-- To generate plans consistently.
|
||||
set max_parallel_workers_per_gather to 0;
|
||||
@ -59,14 +59,14 @@ select * from :chunk1 where location_id = 1;
|
||||
explain (analyze, costs off, timing off, summary off)
|
||||
select * from normaltable where location_id = 1;
|
||||
|
||||
-- Changing to hyperstore will update relstats since it process all
|
||||
-- Changing to hypercore will update relstats since it process all
|
||||
-- the data
|
||||
alter table :chunk1 set access method hyperstore;
|
||||
alter table :chunk1 set access method hypercore;
|
||||
-- Creating an index on normaltable will also update relstats
|
||||
create index normaltable_location_id_idx on normaltable (location_id);
|
||||
|
||||
-- Relstats should be the same for both tables, except for pages since
|
||||
-- a hyperstore is compressed. Column stats is not updated.
|
||||
-- a hypercore is compressed. Column stats is not updated.
|
||||
select * from relstats_compare;
|
||||
select * from attrstats_compare;
|
||||
|
||||
@ -125,7 +125,7 @@ select * from attrstats_same;
|
||||
|
||||
-- ANALYZE also via hypertable root and show that it will recurse to
|
||||
-- chunks. Make sure the chunk also has partially compressed data
|
||||
alter table :chunk2 set access method hyperstore;
|
||||
alter table :chunk2 set access method hypercore;
|
||||
update :hypertable set device_id = 2 where device_id = 1;
|
||||
select * from relstats where relid = :'chunk2'::regclass;
|
||||
select * from attrstats where relid = :'chunk2'::regclass;
|
||||
|
@ -2,7 +2,7 @@
|
||||
-- Please see the included NOTICE for copyright information and
|
||||
-- LICENSE-TIMESCALE for a copy of the license.
|
||||
|
||||
\ir include/hyperstore_helpers.sql
|
||||
\ir include/hypercore_helpers.sql
|
||||
|
||||
select setseed(1);
|
||||
|
||||
@ -13,7 +13,7 @@ select setseed(1);
|
||||
\set the_generator ceil(random()*10)
|
||||
\set the_aggregate sum(value)
|
||||
\set the_clause value > 0.5
|
||||
\ir include/hyperstore_type_table.sql
|
||||
\ir include/hypercore_type_table.sql
|
||||
|
||||
-- Test that decompressing and scanning numerics works. These are not
|
||||
-- batch decompressable.
|
||||
@ -22,7 +22,7 @@ select setseed(1);
|
||||
\set the_generator ceil(random()*10)
|
||||
\set the_aggregate sum(value)
|
||||
\set the_clause value > 0.5
|
||||
\ir include/hyperstore_type_table.sql
|
||||
\ir include/hypercore_type_table.sql
|
||||
|
||||
-- Test that decompressing and scanning boolean columns works.
|
||||
\set the_table test_bool
|
||||
@ -30,7 +30,7 @@ select setseed(1);
|
||||
\set the_generator (random() > 0.5)
|
||||
\set the_aggregate count(value)
|
||||
\set the_clause value = true
|
||||
\ir include/hyperstore_type_table.sql
|
||||
\ir include/hypercore_type_table.sql
|
||||
|
||||
\set my_uuid e0317dfc-77cd-46da-a4e9-8626ce49ccad
|
||||
|
||||
@ -41,7 +41,7 @@ select setseed(1);
|
||||
\set the_generator gen_random_uuid()::text
|
||||
\set the_aggregate count(*)
|
||||
\set the_clause value = :'my_uuid'
|
||||
\ir include/hyperstore_type_table.sql
|
||||
\ir include/hypercore_type_table.sql
|
||||
|
||||
-- Test that we can decompress and scan JSON fields without
|
||||
-- filters. This just tests that decompression works.
|
||||
@ -51,7 +51,7 @@ select setseed(1);
|
||||
\set the_generator jsonb_build_object(:'a_name',round(random()*100))
|
||||
\set the_aggregate sum((value->:'a_name')::int)
|
||||
\set the_clause true
|
||||
\ir include/hyperstore_type_table.sql
|
||||
\ir include/hypercore_type_table.sql
|
||||
|
||||
-- Test that we can decompress and scan JSON fields with a filter
|
||||
-- using JSON operators (these are function calls, so they do not have
|
||||
@ -62,7 +62,7 @@ select setseed(1);
|
||||
\set the_generator jsonb_build_object(:'a_name',round(random()*100))
|
||||
\set the_aggregate sum((value->:'a_name')::int)
|
||||
\set the_clause ((value->:'a_name')::numeric >= 0.5) and ((value->:'a_name')::numeric <= 0.6)
|
||||
\ir include/hyperstore_type_table.sql
|
||||
\ir include/hypercore_type_table.sql
|
||||
|
||||
-- Test that we can use NAME type for a field and compare with a
|
||||
-- constant value. This is a fixed-size type with a size > 8 and we
|
||||
@ -73,4 +73,4 @@ select setseed(1);
|
||||
\set the_generator gen_random_uuid()::name
|
||||
\set the_aggregate count(*)
|
||||
\set the_clause value = :'my_uuid'
|
||||
\ir include/hyperstore_type_table.sql
|
||||
\ir include/hypercore_type_table.sql
|
||||
|
@ -4,12 +4,12 @@
|
||||
|
||||
\c :TEST_DBNAME :ROLE_SUPERUSER
|
||||
|
||||
\ir include/setup_hyperstore.sql
|
||||
\ir include/setup_hypercore.sql
|
||||
|
||||
-- TODO(#1068) Parallel sequence scan does not work
|
||||
set max_parallel_workers_per_gather to 0;
|
||||
|
||||
select compress_chunk(show_chunks(:'hypertable'), compress_using => 'hyperstore');
|
||||
select compress_chunk(show_chunks(:'hypertable'), compress_using => 'hypercore');
|
||||
|
||||
-- Check that all chunks are compressed
|
||||
select chunk_name, compression_status from chunk_compression_stats(:'hypertable');
|
||||
@ -71,7 +71,7 @@ commit;
|
||||
|
||||
-- Test update of a segment-by column. The selection is to make sure
|
||||
-- that we have a mix of compressed and uncompressed tuples.
|
||||
select compress_chunk(show_chunks(:'hypertable'), compress_using => 'hyperstore');
|
||||
select compress_chunk(show_chunks(:'hypertable'), compress_using => 'hypercore');
|
||||
|
||||
select _timescaledb_debug.is_compressed_tid(ctid), metric_id, created_at
|
||||
from :hypertable
|
||||
@ -87,7 +87,7 @@ where (created_at, metric_id) in (select created_at, metric_id from to_update)
|
||||
order by metric_id;
|
||||
|
||||
-- Compress all chunks again before testing RETURNING
|
||||
select compress_chunk(show_chunks(:'hypertable'), compress_using => 'hyperstore');
|
||||
select compress_chunk(show_chunks(:'hypertable'), compress_using => 'hypercore');
|
||||
|
||||
select _timescaledb_debug.is_compressed_tid(ctid), metric_id, created_at
|
||||
from :hypertable
|
||||
@ -102,7 +102,7 @@ returning _timescaledb_debug.is_compressed_tid(ctid), *;
|
||||
|
||||
-- Test update of a segment-by column directly on the chunk. This
|
||||
-- should fail for compressed rows even for segment-by columns.
|
||||
select compress_chunk(:'chunk1', compress_using => 'hyperstore');
|
||||
select compress_chunk(:'chunk1', compress_using => 'hypercore');
|
||||
|
||||
select metric_id from :chunk1 limit 1 \gset
|
||||
|
||||
|
@ -19,7 +19,7 @@ from _timescaledb_catalog.chunk cpr_chunk
|
||||
inner join reg_chunk on (cpr_chunk.id = reg_chunk.compressed_chunk_id);
|
||||
|
||||
-- Create two hypertables with same config and data, apart from one
|
||||
-- having a hyperstore chunk (hystable). The regular table (regtable)
|
||||
-- having a hypercore chunk (hystable). The regular table (regtable)
|
||||
-- will be used as a reference.
|
||||
create table hystable(time timestamptz, location bigint, device smallint, temp float4);
|
||||
create table regtable(time timestamptz, location bigint, device smallint, temp float4);
|
||||
@ -34,9 +34,9 @@ values ('2022-06-01 00:01', 1, 1, 1.0),
|
||||
|
||||
insert into hystable select * from regtable;
|
||||
|
||||
-- Make sure new chunks are hyperstore from the start, except
|
||||
-- Make sure new chunks are hypercore from the start, except
|
||||
-- obviously for the chunk that was already created.
|
||||
alter table hystable set access method hyperstore, set (
|
||||
alter table hystable set access method hypercore, set (
|
||||
timescaledb.compress_orderby = 'time',
|
||||
timescaledb.compress_segmentby = 'location'
|
||||
);
|
||||
@ -93,7 +93,7 @@ from pg_index i inner join pg_class c on (i.indexrelid=c.oid)
|
||||
where indrelid = :'hystable_chunk'::regclass
|
||||
and relname like '%hystable_location%' \gset
|
||||
|
||||
alter table :hystable_chunk set access method hyperstore;
|
||||
alter table :hystable_chunk set access method hypercore;
|
||||
|
||||
-- Show new access method on chunk
|
||||
select ch chunk, amname access_method
|
||||
@ -167,14 +167,14 @@ from generate_series('2022-06-01'::timestamptz, '2022-06-10', '60s') t;
|
||||
|
||||
insert into hystable select * from regtable;
|
||||
|
||||
-- All new chunks should be hyperstores since we configured hyperstore
|
||||
-- All new chunks should be hypercores since we configured hypercore
|
||||
-- as default hypertable AM
|
||||
select ch, amname
|
||||
from show_chunks('hystable') ch
|
||||
inner join pg_class cl on (cl.oid = ch)
|
||||
inner join pg_am am on (cl.relam = am.oid);
|
||||
|
||||
-- All (new) compressed chunks should have a hsproxy index
|
||||
-- All (new) compressed chunks should have a hypercore_proxy index
|
||||
select indexrelid::regclass
|
||||
from pg_index i inner join
|
||||
compressed_rels crels on (i.indrelid = crels.compressed_relid);
|
||||
@ -215,7 +215,7 @@ create table hystable(time timestamptz, location int, device int, temp float);
|
||||
select create_hypertable('hystable', 'time', create_default_indexes => false);
|
||||
|
||||
-- This time create the table without a segmentby column
|
||||
alter table hystable set access method hyperstore, set (
|
||||
alter table hystable set access method hypercore, set (
|
||||
timescaledb.compress_orderby = 'time'
|
||||
);
|
||||
|
||||
@ -224,13 +224,13 @@ vacuum (index_cleanup on) hystable;
|
||||
|
||||
insert into hystable select * from regtable;
|
||||
|
||||
-- All chunks should be hyperstores
|
||||
-- All chunks should be hypercores
|
||||
select ch, amname
|
||||
from show_chunks('hystable') ch
|
||||
inner join pg_class cl on (cl.oid = ch)
|
||||
inner join pg_am am on (cl.relam = am.oid);
|
||||
|
||||
-- All compressed chunks should have a hsproxy index
|
||||
-- All compressed chunks should have a hypercore_proxy index
|
||||
select indexrelid::regclass
|
||||
from pg_index i inner join
|
||||
compressed_rels crels on (i.indrelid = crels.compressed_relid);
|
||||
@ -254,7 +254,7 @@ select t, ceil(random()*10), ceil(random()*30), random()*40, random()*100
|
||||
from generate_series('2022-06-01'::timestamptz, '2022-07-01', '5m') t;
|
||||
|
||||
alter table readings
|
||||
set access method hyperstore,
|
||||
set access method hypercore,
|
||||
set (timescaledb.compress_orderby = 'time',
|
||||
timescaledb.compress_segmentby = 'device');
|
||||
|
||||
|
@ -2,9 +2,9 @@
|
||||
-- Please see the included NOTICE for copyright information and
|
||||
-- LICENSE-TIMESCALE for a copy of the license.
|
||||
|
||||
\ir include/setup_hyperstore.sql
|
||||
\ir include/setup_hypercore.sql
|
||||
|
||||
alter table :chunk1 set access method hyperstore;
|
||||
alter table :chunk1 set access method hypercore;
|
||||
|
||||
-- check that all chunks are compressed
|
||||
select chunk_name, compression_status from chunk_compression_stats(:'hypertable');
|
||||
|
@ -7,11 +7,11 @@
|
||||
|
||||
-- Test inner join to make sure that it works.
|
||||
select explain_analyze_anonymize(format($$
|
||||
select * from %s join the_hyperstore using (device_id)
|
||||
select * from %s join the_hypercore using (device_id)
|
||||
$$, :'chunk1'));
|
||||
|
||||
-- Check that it generates the right result
|
||||
select * into :inner from :chunk1 join the_hyperstore using (device_id);
|
||||
select * into :inner from :chunk1 join the_hypercore using (device_id);
|
||||
|
||||
\x on
|
||||
select * from :inner r full join expected_inner e on row(r) = row(e)
|
||||
@ -21,12 +21,12 @@ where r.device_id is null or e.device_id is null;
|
||||
-- Test outer join (left in this case) to make sure that it works.
|
||||
select explain_analyze_anonymize(format($$
|
||||
select created_at, updated_at, o.device_id, i.humidity, o.height
|
||||
from :chunk1 i left join the_hyperstore o
|
||||
from :chunk1 i left join the_hypercore o
|
||||
on i.created_at = o.updated_at and i.device_id = o.device_id;
|
||||
|
||||
select created_at, updated_at, o.device_id, i.humidity, o.height
|
||||
into :outer
|
||||
from :chunk1 i left join the_hyperstore o
|
||||
from :chunk1 i left join the_hypercore o
|
||||
on i.created_at = o.updated_at and i.device_id = o.device_id;
|
||||
$$, :'chunk1'));
|
||||
|
||||
|
@ -25,15 +25,15 @@ select t, :the_generator
|
||||
from generate_series('2022-06-01'::timestamp, '2022-06-10', '1 minute') t;
|
||||
\set ECHO all
|
||||
|
||||
-- Save away the table so that we can make sure that a hyperstore
|
||||
-- Save away the table so that we can make sure that a hypercore
|
||||
-- table and a heap table produce the same result.
|
||||
create table :saved_table as select * from :the_table;
|
||||
|
||||
-- Compress the rows in the hyperstore.
|
||||
select compress_chunk(show_chunks(:'the_table'), compress_using => 'hyperstore');
|
||||
-- Compress the rows in the hypercore.
|
||||
select compress_chunk(show_chunks(:'the_table'), compress_using => 'hypercore');
|
||||
|
||||
-- This part of the include file will run a query with the aggregate
|
||||
-- provided by the including file and test that using a hyperstore
|
||||
-- provided by the including file and test that using a hypercore
|
||||
-- with compressed rows and a normal table produces the same result
|
||||
-- for the query with the given aggregate.
|
||||
\set ECHO queries
|
||||
|
@ -4,7 +4,7 @@
|
||||
|
||||
\set hypertable readings
|
||||
|
||||
\ir hyperstore_helpers.sql
|
||||
\ir hypercore_helpers.sql
|
||||
|
||||
create table :hypertable(
|
||||
metric_id serial,
|
||||
|
Loading…
x
Reference in New Issue
Block a user