mirror of
https://github.com/timescale/timescaledb.git
synced 2025-05-16 10:33:27 +08:00
Add WARNING when doing min-max heap scan for adaptive chunking
Adaptive chunking uses the min and max value of previous chunks to estimate their "fill factor". Ideally, min and max should be retreived using an index, but if no index exists we fall back to a heap scan. A heap scan can be very expensive, so we now raise a WARNING if no index exists. This change also renames set_adaptive_chunk_sizing() to simply set_adaptive_chunking().
This commit is contained in:
parent
6b452a8b9e
commit
2e7b32cd91
@ -32,12 +32,12 @@ CREATE OR REPLACE FUNCTION create_hypertable(
|
|||||||
) RETURNS VOID AS '@MODULE_PATHNAME@', 'hypertable_create' LANGUAGE C VOLATILE;
|
) RETURNS VOID AS '@MODULE_PATHNAME@', 'hypertable_create' LANGUAGE C VOLATILE;
|
||||||
|
|
||||||
-- Set adaptive chunking. To disable, set chunk_target_size => 'off'.
|
-- Set adaptive chunking. To disable, set chunk_target_size => 'off'.
|
||||||
CREATE OR REPLACE FUNCTION set_adaptive_chunk_sizing(
|
CREATE OR REPLACE FUNCTION set_adaptive_chunking(
|
||||||
hypertable REGCLASS,
|
hypertable REGCLASS,
|
||||||
chunk_target_size TEXT,
|
chunk_target_size TEXT,
|
||||||
INOUT chunk_sizing_func REGPROC = '_timescaledb_internal.calculate_chunk_interval'::regproc,
|
INOUT chunk_sizing_func REGPROC = '_timescaledb_internal.calculate_chunk_interval'::regproc,
|
||||||
OUT chunk_target_size BIGINT
|
OUT chunk_target_size BIGINT
|
||||||
) RETURNS RECORD AS '@MODULE_PATHNAME@', 'chunk_adaptive_set_chunk_sizing' LANGUAGE C VOLATILE;
|
) RETURNS RECORD AS '@MODULE_PATHNAME@', 'chunk_adaptive_set' LANGUAGE C VOLATILE;
|
||||||
|
|
||||||
-- Update chunk_time_interval for a hypertable.
|
-- Update chunk_time_interval for a hypertable.
|
||||||
--
|
--
|
||||||
|
41
src/chunk.c
41
src/chunk.c
@ -1055,15 +1055,33 @@ chunk_scan_internal(int indexid,
|
|||||||
return scanner_scan(&ctx);
|
return scanner_scan(&ctx);
|
||||||
}
|
}
|
||||||
|
|
||||||
#define DEFAULT_CHUNKS_PER_INTERVAL 4
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Get a window of chunks that "preceed" the given dimensional point.
|
* Get a window of chunks that "preceed" the given dimensional point.
|
||||||
*
|
*
|
||||||
* For instance, if the dimension is "time", then given a point in time the
|
* For instance, if the dimension is "time", then given a point in time the
|
||||||
* function returns the recent chunks that come before the chunk that includes
|
* function returns the recent chunks that come before the chunk that includes
|
||||||
* that point. The count parameter determines the number or slices the window
|
* that point. The count parameter determines the number or slices the window
|
||||||
* should include in the given dimension.
|
* should include in the given dimension. Note, that with multi-dimensional
|
||||||
|
* partitioning, there might be multiple chunks in each dimensional slice that
|
||||||
|
* all preceed the given point. For instance, the example below shows two
|
||||||
|
* different situations that each go "back" two slices (count = 2) in the
|
||||||
|
* x-dimension, but returns two vs. eight chunks due to different
|
||||||
|
* partitioning.
|
||||||
|
*
|
||||||
|
* '_____________
|
||||||
|
* '| | | * |
|
||||||
|
* '|___|___|___|
|
||||||
|
* '
|
||||||
|
* '
|
||||||
|
* '____ ________
|
||||||
|
* '| | | * |
|
||||||
|
* '|___|___|___|
|
||||||
|
* '| | | |
|
||||||
|
* '|___|___|___|
|
||||||
|
* '| | | |
|
||||||
|
* '|___|___|___|
|
||||||
|
* '| | | |
|
||||||
|
* '|___|___|___|
|
||||||
*
|
*
|
||||||
* Note that the returned chunks will be allocated on the given memory
|
* Note that the returned chunks will be allocated on the given memory
|
||||||
* context, inlcuding the list itself. So, beware of not leaking the list if
|
* context, inlcuding the list itself. So, beware of not leaking the list if
|
||||||
@ -1073,17 +1091,30 @@ List *
|
|||||||
chunk_get_window(int32 dimension_id, int64 point, int count, MemoryContext mctx)
|
chunk_get_window(int32 dimension_id, int64 point, int count, MemoryContext mctx)
|
||||||
{
|
{
|
||||||
List *chunks = NIL;
|
List *chunks = NIL;
|
||||||
DimensionVec *dimvec = dimension_slice_scan_by_dimension_before_point(dimension_id, point, count, BackwardScanDirection, mctx);
|
DimensionVec *dimvec;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
|
/* Scan for "count" slices that preceeds the point in the given dimension */
|
||||||
|
dimvec = dimension_slice_scan_by_dimension_before_point(dimension_id,
|
||||||
|
point,
|
||||||
|
count,
|
||||||
|
BackwardScanDirection,
|
||||||
|
mctx);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* For each slice, join with any constraints that reference the slice.
|
||||||
|
* There might be multiple constraints for each slice in case of
|
||||||
|
* multi-dimensional partitioning.
|
||||||
|
*/
|
||||||
for (i = 0; i < dimvec->num_slices; i++)
|
for (i = 0; i < dimvec->num_slices; i++)
|
||||||
{
|
{
|
||||||
DimensionSlice *slice = dimvec->slices[i];
|
DimensionSlice *slice = dimvec->slices[i];
|
||||||
ChunkConstraints *ccs = chunk_constraints_alloc(DEFAULT_CHUNKS_PER_INTERVAL, mctx);
|
ChunkConstraints *ccs = chunk_constraints_alloc(1, mctx);
|
||||||
int j;
|
int j;
|
||||||
|
|
||||||
chunk_constraint_scan_by_dimension_slice_id(slice->fd.id, ccs, mctx);
|
chunk_constraint_scan_by_dimension_slice_id(slice->fd.id, ccs, mctx);
|
||||||
|
|
||||||
|
/* For each constraint, find the corresponding chunk */
|
||||||
for (j = 0; j < ccs->num_constraints; j++)
|
for (j = 0; j < ccs->num_constraints; j++)
|
||||||
{
|
{
|
||||||
ChunkConstraint *cc = &ccs->constraints[j];
|
ChunkConstraint *cc = &ccs->constraints[j];
|
||||||
|
@ -105,7 +105,9 @@ set_effective_memory_cache_size(PG_FUNCTION_ARGS)
|
|||||||
* of the system, while a common recommended setting for shared_buffers is 1/4
|
* of the system, while a common recommended setting for shared_buffers is 1/4
|
||||||
* of system memory. In case shared_buffers is set higher than
|
* of system memory. In case shared_buffers is set higher than
|
||||||
* effective_cache_size, we use the max of the two (a larger shared_buffers is a
|
* effective_cache_size, we use the max of the two (a larger shared_buffers is a
|
||||||
* strange setting though). Ultimately we are limited by system memory.
|
* strange setting though). Ultimately we are limited by system memory. Thus,
|
||||||
|
* this functions returns a value effective_memory_cache which is:
|
||||||
|
* shared_buffers >= effective_memory_cache <= system_mem / 2.
|
||||||
*
|
*
|
||||||
* Note that this relies on the user setting a good value for
|
* Note that this relies on the user setting a good value for
|
||||||
* effective_cache_size, or otherwise our estimate will be off. Alternatively,
|
* effective_cache_size, or otherwise our estimate will be off. Alternatively,
|
||||||
@ -160,9 +162,9 @@ estimate_effective_memory_cache_size(void)
|
|||||||
return memory_bytes;
|
return memory_bytes;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* The default concurrency factor, i.e., the number of chunks we expect to fit
|
/* The default the number of chunks we expect to be able to have in cache
|
||||||
* in memory at the same time */
|
* memory at the same time */
|
||||||
#define DEFAULT_CONCURRENT_CHUNK_USAGE 4
|
#define DEFAULT_NUM_CHUNKS_TO_FIT_IN_CACHE_MEM 4
|
||||||
|
|
||||||
static inline int64
|
static inline int64
|
||||||
calculate_initial_chunk_target_size(void)
|
calculate_initial_chunk_target_size(void)
|
||||||
@ -175,23 +177,28 @@ calculate_initial_chunk_target_size(void)
|
|||||||
* hypertables in all schemas and databases, and might not be a good
|
* hypertables in all schemas and databases, and might not be a good
|
||||||
* estimate in case of many "old" (unused) hypertables.
|
* estimate in case of many "old" (unused) hypertables.
|
||||||
*/
|
*/
|
||||||
return estimate_effective_memory_cache_size() / DEFAULT_CONCURRENT_CHUNK_USAGE;
|
return estimate_effective_memory_cache_size() / DEFAULT_NUM_CHUNKS_TO_FIT_IN_CACHE_MEM;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
typedef enum MinMaxResult
|
||||||
|
{
|
||||||
|
MINMAX_NO_INDEX,
|
||||||
|
MINMAX_NO_TUPLES,
|
||||||
|
MINMAX_FOUND,
|
||||||
|
} MinMaxResult;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Use a heap scan to find the min and max of a given column of a chunk. This
|
* Use a heap scan to find the min and max of a given column of a chunk. This
|
||||||
* could be a rather costly operation. Should figure out how to keep min-max
|
* could be a rather costly operation. Should figure out how to keep min-max
|
||||||
* stats cached.
|
* stats cached.
|
||||||
*
|
|
||||||
* Returns true iff min and max is found.
|
|
||||||
*/
|
*/
|
||||||
static bool
|
static MinMaxResult
|
||||||
minmax_heapscan(Relation rel, Oid atttype, AttrNumber attnum, Datum minmax[2])
|
minmax_heapscan(Relation rel, Oid atttype, AttrNumber attnum, Datum minmax[2])
|
||||||
{
|
{
|
||||||
HeapScanDesc scan;
|
HeapScanDesc scan;
|
||||||
HeapTuple tuple;
|
HeapTuple tuple;
|
||||||
TypeCacheEntry *tce;
|
TypeCacheEntry *tce;
|
||||||
bool minmaxnull[2] = {true};
|
bool nulls[2] = {true};
|
||||||
|
|
||||||
/* Lookup the tuple comparison function from the type cache */
|
/* Lookup the tuple comparison function from the type cache */
|
||||||
tce = lookup_type_cache(atttype, TYPECACHE_CMP_PROC | TYPECACHE_CMP_PROC_FINFO);
|
tce = lookup_type_cache(atttype, TYPECACHE_CMP_PROC | TYPECACHE_CMP_PROC_FINFO);
|
||||||
@ -206,53 +213,106 @@ minmax_heapscan(Relation rel, Oid atttype, AttrNumber attnum, Datum minmax[2])
|
|||||||
bool isnull;
|
bool isnull;
|
||||||
Datum value = heap_getattr(tuple, attnum, RelationGetDescr(rel), &isnull);
|
Datum value = heap_getattr(tuple, attnum, RelationGetDescr(rel), &isnull);
|
||||||
|
|
||||||
|
if (isnull)
|
||||||
|
continue;
|
||||||
|
|
||||||
/* Check for new min */
|
/* Check for new min */
|
||||||
if (minmaxnull[0] || DatumGetInt32(FunctionCall2(&tce->cmp_proc_finfo, value, minmax[0])) < 0)
|
if (nulls[0] || DatumGetInt32(FunctionCall2(&tce->cmp_proc_finfo, value, minmax[0])) < 0)
|
||||||
{
|
{
|
||||||
minmaxnull[0] = false;
|
nulls[0] = false;
|
||||||
minmax[0] = value;
|
minmax[0] = value;
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Check for new max */
|
/* Check for new max */
|
||||||
if (minmaxnull[1] || DatumGetInt32(FunctionCall2(&tce->cmp_proc_finfo, value, minmax[1])) > 0)
|
if (nulls[1] || DatumGetInt32(FunctionCall2(&tce->cmp_proc_finfo, value, minmax[1])) > 0)
|
||||||
{
|
{
|
||||||
minmaxnull[1] = false;
|
nulls[1] = false;
|
||||||
minmax[1] = value;
|
minmax[1] = value;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
heap_endscan(scan);
|
heap_endscan(scan);
|
||||||
|
|
||||||
return !minmaxnull[0] && !minmaxnull[1];
|
return (nulls[0] || nulls[1]) ? MINMAX_NO_TUPLES : MINMAX_FOUND;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Use an index scan to find the min and max of a given column of a chunk.
|
* Use an index scan to find the min and max of a given column of a chunk.
|
||||||
*
|
|
||||||
* Returns true iff min and max is found.
|
|
||||||
*/
|
*/
|
||||||
static bool
|
static MinMaxResult
|
||||||
minmax_indexscan(Relation rel, Relation idxrel, AttrNumber attnum, Datum minmax[2])
|
minmax_indexscan(Relation rel, Relation idxrel, AttrNumber attnum, Datum minmax[2])
|
||||||
{
|
{
|
||||||
IndexScanDesc scan = index_beginscan(rel, idxrel, GetTransactionSnapshot(), 0, 0);
|
IndexScanDesc scan = index_beginscan(rel, idxrel, GetTransactionSnapshot(), 0, 0);
|
||||||
HeapTuple tuple;
|
HeapTuple tuple;
|
||||||
bool isnull;
|
bool isnull;
|
||||||
|
bool nulls[2] = {true};
|
||||||
int n = 0;
|
int n = 0;
|
||||||
|
|
||||||
|
nulls[0] = nulls[1] = true;
|
||||||
|
|
||||||
tuple = index_getnext(scan, BackwardScanDirection);
|
tuple = index_getnext(scan, BackwardScanDirection);
|
||||||
|
|
||||||
if (HeapTupleIsValid(tuple))
|
if (HeapTupleIsValid(tuple))
|
||||||
minmax[n++] = heap_getattr(tuple, attnum, RelationGetDescr(rel), &isnull);
|
{
|
||||||
|
minmax[n] = heap_getattr(tuple, attnum, RelationGetDescr(rel), &isnull);
|
||||||
|
nulls[n++] = false;
|
||||||
|
}
|
||||||
|
|
||||||
index_rescan(scan, NULL, 0, NULL, 0);
|
index_rescan(scan, NULL, 0, NULL, 0);
|
||||||
tuple = index_getnext(scan, ForwardScanDirection);
|
tuple = index_getnext(scan, ForwardScanDirection);
|
||||||
|
|
||||||
if (HeapTupleIsValid(tuple))
|
if (HeapTupleIsValid(tuple))
|
||||||
minmax[n++] = heap_getattr(tuple, attnum, RelationGetDescr(rel), &isnull);
|
{
|
||||||
|
minmax[n] = heap_getattr(tuple, attnum, RelationGetDescr(rel), &isnull);
|
||||||
|
nulls[n++] = false;
|
||||||
|
}
|
||||||
|
|
||||||
index_endscan(scan);
|
index_endscan(scan);
|
||||||
|
|
||||||
return n == 2;
|
return (nulls[0] || nulls[1]) ? MINMAX_NO_TUPLES : MINMAX_FOUND;
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Do a scan for min and max using and index on the given column.
|
||||||
|
*/
|
||||||
|
static MinMaxResult
|
||||||
|
relation_minmax_indexscan(Relation rel,
|
||||||
|
Oid atttype,
|
||||||
|
AttrNumber attnum,
|
||||||
|
Datum minmax[2])
|
||||||
|
{
|
||||||
|
List *indexlist = RelationGetIndexList(rel);
|
||||||
|
ListCell *lc;
|
||||||
|
MinMaxResult res = MINMAX_NO_INDEX;
|
||||||
|
|
||||||
|
foreach(lc, indexlist)
|
||||||
|
{
|
||||||
|
Relation idxrel;
|
||||||
|
|
||||||
|
idxrel = index_open(lfirst_oid(lc), AccessShareLock);
|
||||||
|
|
||||||
|
if (idxrel->rd_att->attrs[0]->attnum == attnum)
|
||||||
|
res = minmax_indexscan(rel, idxrel, attnum, minmax);
|
||||||
|
|
||||||
|
index_close(idxrel, AccessShareLock);
|
||||||
|
|
||||||
|
if (res == MINMAX_FOUND)
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
return res;
|
||||||
|
}
|
||||||
|
|
||||||
|
static bool
|
||||||
|
table_has_minmax_index(Oid relid, Oid atttype, AttrNumber attnum)
|
||||||
|
{
|
||||||
|
Datum minmax[2];
|
||||||
|
Relation rel = heap_open(relid, AccessShareLock);
|
||||||
|
MinMaxResult res = relation_minmax_indexscan(rel, atttype, attnum, minmax);
|
||||||
|
|
||||||
|
heap_close(rel, AccessShareLock);
|
||||||
|
|
||||||
|
return res != MINMAX_NO_INDEX;
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -264,31 +324,29 @@ static bool
|
|||||||
chunk_get_minmax(Oid relid, Oid atttype, AttrNumber attnum, Datum minmax[2])
|
chunk_get_minmax(Oid relid, Oid atttype, AttrNumber attnum, Datum minmax[2])
|
||||||
{
|
{
|
||||||
Relation rel = heap_open(relid, AccessShareLock);
|
Relation rel = heap_open(relid, AccessShareLock);
|
||||||
List *indexlist = RelationGetIndexList(rel);
|
MinMaxResult res = relation_minmax_indexscan(rel, atttype, attnum, minmax);
|
||||||
ListCell *lc;
|
|
||||||
bool found = false;
|
|
||||||
|
|
||||||
foreach(lc, indexlist)
|
if (res == MINMAX_NO_INDEX)
|
||||||
{
|
{
|
||||||
Relation idxrel;
|
ereport(WARNING,
|
||||||
|
(errmsg("no index on \"%s\" found for adaptive chunking on chunk \"%s\"",
|
||||||
|
get_attname(relid, attnum), get_rel_name(relid)),
|
||||||
|
errdetail("Adaptive chunking works best with an index on the dimension being adapted.")));
|
||||||
|
|
||||||
idxrel = index_open(lfirst_oid(lc), AccessShareLock);
|
res = minmax_heapscan(rel, atttype, attnum, minmax);
|
||||||
|
|
||||||
if (idxrel->rd_att->attrs[0]->attnum == attnum)
|
|
||||||
found = minmax_indexscan(rel, idxrel, attnum, minmax);
|
|
||||||
|
|
||||||
index_close(idxrel, AccessShareLock);
|
|
||||||
|
|
||||||
if (found)
|
|
||||||
break;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!found)
|
|
||||||
found = minmax_heapscan(rel, atttype, attnum, minmax);
|
|
||||||
|
|
||||||
heap_close(rel, AccessShareLock);
|
heap_close(rel, AccessShareLock);
|
||||||
|
|
||||||
return found;
|
return res == MINMAX_FOUND;
|
||||||
|
}
|
||||||
|
|
||||||
|
static AttrNumber
|
||||||
|
chunk_get_attno(Oid hypertable_relid, Oid chunk_relid, AttrNumber hypertable_attnum)
|
||||||
|
{
|
||||||
|
const char *attname = get_attname(hypertable_relid, hypertable_attnum);
|
||||||
|
|
||||||
|
return get_attnum(chunk_relid, attname);
|
||||||
}
|
}
|
||||||
|
|
||||||
#define CHUNK_SIZING_FUNC_NARGS 3
|
#define CHUNK_SIZING_FUNC_NARGS 3
|
||||||
@ -427,6 +485,7 @@ calculate_chunk_interval(PG_FUNCTION_ARGS)
|
|||||||
int64 chunk_size,
|
int64 chunk_size,
|
||||||
slice_interval;
|
slice_interval;
|
||||||
Datum minmax[2];
|
Datum minmax[2];
|
||||||
|
AttrNumber attno = chunk_get_attno(ht->main_table_relid, chunk->table_id, dim->column_attno);
|
||||||
|
|
||||||
Assert(NULL != slice);
|
Assert(NULL != slice);
|
||||||
|
|
||||||
@ -435,7 +494,8 @@ calculate_chunk_interval(PG_FUNCTION_ARGS)
|
|||||||
|
|
||||||
slice_interval = slice->fd.range_end - slice->fd.range_start;
|
slice_interval = slice->fd.range_end - slice->fd.range_start;
|
||||||
|
|
||||||
if (chunk_get_minmax(chunk->table_id, dim->fd.column_type, dim->column_attno, minmax))
|
|
||||||
|
if (chunk_get_minmax(chunk->table_id, dim->fd.column_type, attno, minmax))
|
||||||
{
|
{
|
||||||
int64 min = time_value_to_internal(minmax[0], dim->fd.column_type, false);
|
int64 min = time_value_to_internal(minmax[0], dim->fd.column_type, false);
|
||||||
int64 max = time_value_to_internal(minmax[1], dim->fd.column_type, false);
|
int64 max = time_value_to_internal(minmax[1], dim->fd.column_type, false);
|
||||||
@ -559,7 +619,7 @@ chunk_sizing_func_validate(regproc func, ChunkSizingInfo *info)
|
|||||||
ReleaseSysCache(tuple);
|
ReleaseSysCache(tuple);
|
||||||
ereport(ERROR,
|
ereport(ERROR,
|
||||||
(errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
|
(errcode(ERRCODE_INVALID_FUNCTION_DEFINITION),
|
||||||
errmsg("invalid number of function arguments"),
|
errmsg("invalid function signature"),
|
||||||
errhint("A chunk sizing function's signature should be (int, bigint, bigint) -> bigint")));
|
errhint("A chunk sizing function's signature should be (int, bigint, bigint) -> bigint")));
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -583,37 +643,85 @@ chunk_target_size_in_bytes(const text *target_size_text)
|
|||||||
pg_strcasecmp(target_size, "disable") == 0)
|
pg_strcasecmp(target_size, "disable") == 0)
|
||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
if (pg_strcasecmp(target_size, "estimate") != 0)
|
if (pg_strcasecmp(target_size, "estimate") == 0)
|
||||||
|
target_size_bytes = calculate_initial_chunk_target_size();
|
||||||
|
else
|
||||||
target_size_bytes = convert_text_memory_amount_to_bytes(target_size);
|
target_size_bytes = convert_text_memory_amount_to_bytes(target_size);
|
||||||
|
|
||||||
|
/* Disable if target size is zero or less */
|
||||||
if (target_size_bytes <= 0)
|
if (target_size_bytes <= 0)
|
||||||
target_size_bytes = calculate_initial_chunk_target_size();
|
target_size_bytes = 0;
|
||||||
|
|
||||||
return target_size_bytes;
|
return target_size_bytes;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#define MB (1024*1024)
|
||||||
|
|
||||||
void
|
void
|
||||||
chunk_adaptive_validate_sizing_info(ChunkSizingInfo *info)
|
chunk_adaptive_sizing_info_validate(ChunkSizingInfo *info)
|
||||||
{
|
{
|
||||||
|
AttrNumber attnum;
|
||||||
|
Oid atttype;
|
||||||
|
|
||||||
|
if (!OidIsValid(info->table_relid))
|
||||||
|
ereport(ERROR,
|
||||||
|
(errcode(ERRCODE_UNDEFINED_TABLE),
|
||||||
|
errmsg("table does not exist")));
|
||||||
|
|
||||||
|
if (NULL == info->colname)
|
||||||
|
ereport(ERROR,
|
||||||
|
(errcode(ERRCODE_IO_DIMENSION_NOT_EXIST),
|
||||||
|
errmsg("no open dimension found for adaptive chunking")));
|
||||||
|
|
||||||
|
attnum = get_attnum(info->table_relid, info->colname);
|
||||||
|
atttype = get_atttype(info->table_relid, attnum);
|
||||||
|
|
||||||
|
if (!OidIsValid(atttype))
|
||||||
|
ereport(ERROR,
|
||||||
|
(errcode(ERRCODE_IO_DIMENSION_NOT_EXIST),
|
||||||
|
errmsg("no open dimension found for adaptive chunking")));
|
||||||
|
|
||||||
chunk_sizing_func_validate(info->func, info);
|
chunk_sizing_func_validate(info->func, info);
|
||||||
|
|
||||||
if (NULL != info->target_size)
|
if (NULL == info->target_size)
|
||||||
info->target_size_bytes = chunk_target_size_in_bytes(info->target_size);
|
|
||||||
else
|
|
||||||
info->target_size_bytes = 0;
|
info->target_size_bytes = 0;
|
||||||
|
else
|
||||||
|
info->target_size_bytes = chunk_target_size_in_bytes(info->target_size);
|
||||||
|
|
||||||
|
/* Don't validate further if disabled */
|
||||||
|
if (info->target_size_bytes <= 0 || !OidIsValid(info->func))
|
||||||
|
return;
|
||||||
|
|
||||||
|
/* Warn of small target sizes */
|
||||||
|
if (info->target_size_bytes > 0 &&
|
||||||
|
info->target_size_bytes < (10 * MB))
|
||||||
|
elog(WARNING, "target chunk size for adaptive chunking is less than 10 MB");
|
||||||
|
|
||||||
|
if (info->check_for_index &&
|
||||||
|
!table_has_minmax_index(info->table_relid, atttype, attnum))
|
||||||
|
ereport(WARNING,
|
||||||
|
(errmsg("no index on \"%s\" found for adaptive chunking on hypertable \"%s\"",
|
||||||
|
info->colname, get_rel_name(info->table_relid)),
|
||||||
|
errdetail("Adaptive chunking works best with an index on the dimension being adapted.")));
|
||||||
}
|
}
|
||||||
|
|
||||||
TS_FUNCTION_INFO_V1(chunk_adaptive_set_chunk_sizing);
|
TS_FUNCTION_INFO_V1(chunk_adaptive_set);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Change the settings for adaptive chunking.
|
||||||
|
*/
|
||||||
Datum
|
Datum
|
||||||
chunk_adaptive_set_chunk_sizing(PG_FUNCTION_ARGS)
|
chunk_adaptive_set(PG_FUNCTION_ARGS)
|
||||||
{
|
{
|
||||||
Oid relid = PG_GETARG_OID(0);
|
|
||||||
ChunkSizingInfo info = {
|
ChunkSizingInfo info = {
|
||||||
.func = PG_ARGISNULL(2) ? InvalidOid : PG_GETARG_OID(2),
|
.table_relid = PG_GETARG_OID(0),
|
||||||
.target_size = PG_ARGISNULL(1) ? NULL : PG_GETARG_TEXT_P(1),
|
.target_size = PG_ARGISNULL(1) ? NULL : PG_GETARG_TEXT_P(1),
|
||||||
|
.func = PG_ARGISNULL(2) ? InvalidOid : PG_GETARG_OID(2),
|
||||||
|
.colname = NULL,
|
||||||
|
.check_for_index = true,
|
||||||
};
|
};
|
||||||
Hypertable *ht;
|
Hypertable *ht;
|
||||||
|
Dimension *dim;
|
||||||
Cache *hcache;
|
Cache *hcache;
|
||||||
HeapTuple tuple;
|
HeapTuple tuple;
|
||||||
TupleDesc tupdesc;
|
TupleDesc tupdesc;
|
||||||
@ -621,24 +729,31 @@ chunk_adaptive_set_chunk_sizing(PG_FUNCTION_ARGS)
|
|||||||
Datum values[2];
|
Datum values[2];
|
||||||
bool nulls[2] = {false, false};
|
bool nulls[2] = {false, false};
|
||||||
|
|
||||||
if (!OidIsValid(relid))
|
if (!OidIsValid(info.table_relid))
|
||||||
ereport(ERROR,
|
ereport(ERROR,
|
||||||
(errcode(ERRCODE_UNDEFINED_TABLE),
|
(errcode(ERRCODE_UNDEFINED_TABLE),
|
||||||
errmsg("table does not exist")));
|
errmsg("table does not exist")));
|
||||||
|
|
||||||
hcache = hypertable_cache_pin();
|
hcache = hypertable_cache_pin();
|
||||||
ht = hypertable_cache_get_entry(hcache, relid);
|
ht = hypertable_cache_get_entry(hcache, info.table_relid);
|
||||||
|
|
||||||
if (NULL == ht)
|
if (NULL == ht)
|
||||||
ereport(ERROR,
|
ereport(ERROR,
|
||||||
(errcode(ERRCODE_IO_HYPERTABLE_NOT_EXIST),
|
(errcode(ERRCODE_IO_HYPERTABLE_NOT_EXIST),
|
||||||
errmsg("table \"%s\" is not a hypertable",
|
errmsg("table \"%s\" is not a hypertable",
|
||||||
get_rel_name(relid))));
|
get_rel_name(info.table_relid))));
|
||||||
|
|
||||||
chunk_adaptive_validate_sizing_info(&info);
|
/* Get the first open dimension that we will adapt on */
|
||||||
|
dim = hyperspace_get_dimension(ht->space, DIMENSION_TYPE_OPEN, 0);
|
||||||
|
|
||||||
if (NULL != info.target_size)
|
if (NULL == dim)
|
||||||
info.target_size_bytes = chunk_target_size_in_bytes(info.target_size);
|
ereport(ERROR,
|
||||||
|
(errcode(ERRCODE_IO_DIMENSION_NOT_EXIST),
|
||||||
|
errmsg("no open dimension found for adaptive chunking")));
|
||||||
|
|
||||||
|
info.colname = NameStr(dim->fd.column_name);
|
||||||
|
|
||||||
|
chunk_adaptive_sizing_info_validate(&info);
|
||||||
|
|
||||||
if (get_call_result_type(fcinfo, NULL, &tupdesc) != TYPEFUNC_COMPOSITE)
|
if (get_call_result_type(fcinfo, NULL, &tupdesc) != TYPEFUNC_COMPOSITE)
|
||||||
elog(ERROR, "function returning record called in context that cannot accept type record");
|
elog(ERROR, "function returning record called in context that cannot accept type record");
|
||||||
|
@ -5,9 +5,14 @@
|
|||||||
|
|
||||||
typedef struct ChunkSizingInfo
|
typedef struct ChunkSizingInfo
|
||||||
{
|
{
|
||||||
|
Oid table_relid;
|
||||||
/* Set manually */
|
/* Set manually */
|
||||||
Oid func;
|
Oid func;
|
||||||
text *target_size;
|
text *target_size;
|
||||||
|
const char *colname; /* The column of the dimension we are adapting
|
||||||
|
* on */
|
||||||
|
bool check_for_index; /* Set if we should check for an index on
|
||||||
|
* the dimension we are adapting on */
|
||||||
|
|
||||||
/* Validated info */
|
/* Validated info */
|
||||||
NameData func_name;
|
NameData func_name;
|
||||||
@ -15,6 +20,6 @@ typedef struct ChunkSizingInfo
|
|||||||
int64 target_size_bytes;
|
int64 target_size_bytes;
|
||||||
} ChunkSizingInfo;
|
} ChunkSizingInfo;
|
||||||
|
|
||||||
void chunk_adaptive_validate_sizing_info(ChunkSizingInfo *info);
|
void chunk_adaptive_sizing_info_validate(ChunkSizingInfo *info);
|
||||||
|
|
||||||
#endif /* TIMESCALEDB_CHUNK_ADAPTIVE_H */
|
#endif /* TIMESCALEDB_CHUNK_ADAPTIVE_H */
|
||||||
|
@ -224,11 +224,14 @@ hypertable_tuple_update(TupleInfo *ti, void *data)
|
|||||||
|
|
||||||
if (OidIsValid(ht->chunk_sizing_func))
|
if (OidIsValid(ht->chunk_sizing_func))
|
||||||
{
|
{
|
||||||
|
Dimension *dim = hyperspace_get_dimension(ht->space, DIMENSION_TYPE_OPEN, 0);
|
||||||
ChunkSizingInfo info = {
|
ChunkSizingInfo info = {
|
||||||
|
.table_relid = ht->main_table_relid,
|
||||||
|
.colname = dim == NULL ? NULL : NameStr(dim->fd.column_name),
|
||||||
.func = ht->chunk_sizing_func,
|
.func = ht->chunk_sizing_func,
|
||||||
};
|
};
|
||||||
|
|
||||||
chunk_adaptive_validate_sizing_info(&info);
|
chunk_adaptive_sizing_info_validate(&info);
|
||||||
|
|
||||||
namestrcpy(&ht->fd.chunk_sizing_func_schema, NameStr(info.func_schema));
|
namestrcpy(&ht->fd.chunk_sizing_func_schema, NameStr(info.func_schema));
|
||||||
namestrcpy(&ht->fd.chunk_sizing_func_name, NameStr(info.func_name));
|
namestrcpy(&ht->fd.chunk_sizing_func_name, NameStr(info.func_name));
|
||||||
@ -1210,8 +1213,11 @@ hypertable_create(PG_FUNCTION_ARGS)
|
|||||||
.partitioning_func = PG_ARGISNULL(9) ? InvalidOid : PG_GETARG_OID(9),
|
.partitioning_func = PG_ARGISNULL(9) ? InvalidOid : PG_GETARG_OID(9),
|
||||||
};
|
};
|
||||||
ChunkSizingInfo chunk_sizing_info = {
|
ChunkSizingInfo chunk_sizing_info = {
|
||||||
|
.table_relid = table_relid,
|
||||||
.target_size = PG_ARGISNULL(11) ? NULL : PG_GETARG_TEXT_P(11),
|
.target_size = PG_ARGISNULL(11) ? NULL : PG_GETARG_TEXT_P(11),
|
||||||
.func = PG_ARGISNULL(12) ? InvalidOid : PG_GETARG_OID(12),
|
.func = PG_ARGISNULL(12) ? InvalidOid : PG_GETARG_OID(12),
|
||||||
|
.colname = PG_ARGISNULL(1) ? NULL : PG_GETARG_CSTRING(1),
|
||||||
|
.check_for_index = !create_default_indexes,
|
||||||
};
|
};
|
||||||
Cache *hcache;
|
Cache *hcache;
|
||||||
Hypertable *ht;
|
Hypertable *ht;
|
||||||
@ -1370,7 +1376,7 @@ hypertable_create(PG_FUNCTION_ARGS)
|
|||||||
|
|
||||||
/* Validate and set chunk sizing information */
|
/* Validate and set chunk sizing information */
|
||||||
if (OidIsValid(chunk_sizing_info.func))
|
if (OidIsValid(chunk_sizing_info.func))
|
||||||
chunk_adaptive_validate_sizing_info(&chunk_sizing_info);
|
chunk_adaptive_sizing_info_validate(&chunk_sizing_info);
|
||||||
|
|
||||||
hypertable_insert(&schema_name,
|
hypertable_insert(&schema_name,
|
||||||
&table_name,
|
&table_name,
|
||||||
|
@ -36,12 +36,13 @@ CREATE TABLE test_adaptive(time timestamptz, temp float, location int);
|
|||||||
SELECT create_hypertable('test_adaptive', 'time',
|
SELECT create_hypertable('test_adaptive', 'time',
|
||||||
chunk_target_size => '1MB',
|
chunk_target_size => '1MB',
|
||||||
chunk_sizing_func => 'bad_calculate_chunk_interval');
|
chunk_sizing_func => 'bad_calculate_chunk_interval');
|
||||||
ERROR: invalid number of function arguments
|
ERROR: invalid function signature
|
||||||
\set ON_ERROR_STOP 1
|
\set ON_ERROR_STOP 1
|
||||||
-- Setting sizing func with correct signature should work
|
-- Setting sizing func with correct signature should work
|
||||||
SELECT create_hypertable('test_adaptive', 'time',
|
SELECT create_hypertable('test_adaptive', 'time',
|
||||||
chunk_target_size => '1MB',
|
chunk_target_size => '1MB',
|
||||||
chunk_sizing_func => 'calculate_chunk_interval');
|
chunk_sizing_func => 'calculate_chunk_interval');
|
||||||
|
WARNING: target chunk size for adaptive chunking is less than 10 MB
|
||||||
NOTICE: adding not-null constraint to column "time"
|
NOTICE: adding not-null constraint to column "time"
|
||||||
create_hypertable
|
create_hypertable
|
||||||
-------------------
|
-------------------
|
||||||
@ -54,6 +55,7 @@ CREATE TABLE test_adaptive(time timestamptz, temp float, location int);
|
|||||||
SELECT create_hypertable('test_adaptive', 'time',
|
SELECT create_hypertable('test_adaptive', 'time',
|
||||||
chunk_target_size => '1MB',
|
chunk_target_size => '1MB',
|
||||||
create_default_indexes => true);
|
create_default_indexes => true);
|
||||||
|
WARNING: target chunk size for adaptive chunking is less than 10 MB
|
||||||
NOTICE: adding not-null constraint to column "time"
|
NOTICE: adding not-null constraint to column "time"
|
||||||
create_hypertable
|
create_hypertable
|
||||||
-------------------
|
-------------------
|
||||||
@ -68,7 +70,8 @@ FROM _timescaledb_catalog.hypertable;
|
|||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
-- Change the target size
|
-- Change the target size
|
||||||
SELECT * FROM set_adaptive_chunk_sizing('test_adaptive', '2MB');
|
SELECT * FROM set_adaptive_chunking('test_adaptive', '2MB');
|
||||||
|
WARNING: target chunk size for adaptive chunking is less than 10 MB
|
||||||
chunk_sizing_func | chunk_target_size
|
chunk_sizing_func | chunk_target_size
|
||||||
------------------------------------------------+-------------------
|
------------------------------------------------+-------------------
|
||||||
_timescaledb_internal.calculate_chunk_interval | 2097152
|
_timescaledb_internal.calculate_chunk_interval | 2097152
|
||||||
@ -83,11 +86,11 @@ FROM _timescaledb_catalog.hypertable;
|
|||||||
|
|
||||||
\set ON_ERROR_STOP 0
|
\set ON_ERROR_STOP 0
|
||||||
-- Setting NULL func should fail
|
-- Setting NULL func should fail
|
||||||
SELECT * FROM set_adaptive_chunk_sizing('test_adaptive', '1MB', NULL);
|
SELECT * FROM set_adaptive_chunking('test_adaptive', '1MB', NULL);
|
||||||
ERROR: invalid chunk sizing function
|
ERROR: invalid chunk sizing function
|
||||||
\set ON_ERROR_STOP 1
|
\set ON_ERROR_STOP 1
|
||||||
-- Setting NULL size disables adaptive chunking
|
-- Setting NULL size disables adaptive chunking
|
||||||
SELECT * FROM set_adaptive_chunk_sizing('test_adaptive', NULL);
|
SELECT * FROM set_adaptive_chunking('test_adaptive', NULL);
|
||||||
chunk_sizing_func | chunk_target_size
|
chunk_sizing_func | chunk_target_size
|
||||||
------------------------------------------------+-------------------
|
------------------------------------------------+-------------------
|
||||||
_timescaledb_internal.calculate_chunk_interval | 0
|
_timescaledb_internal.calculate_chunk_interval | 0
|
||||||
@ -100,14 +103,15 @@ FROM _timescaledb_catalog.hypertable;
|
|||||||
test_adaptive | _timescaledb_internal | calculate_chunk_interval | 0
|
test_adaptive | _timescaledb_internal | calculate_chunk_interval | 0
|
||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
SELECT * FROM set_adaptive_chunk_sizing('test_adaptive', '1MB');
|
SELECT * FROM set_adaptive_chunking('test_adaptive', '1MB');
|
||||||
|
WARNING: target chunk size for adaptive chunking is less than 10 MB
|
||||||
chunk_sizing_func | chunk_target_size
|
chunk_sizing_func | chunk_target_size
|
||||||
------------------------------------------------+-------------------
|
------------------------------------------------+-------------------
|
||||||
_timescaledb_internal.calculate_chunk_interval | 1048576
|
_timescaledb_internal.calculate_chunk_interval | 1048576
|
||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
-- Setting size to 'off' should also disable
|
-- Setting size to 'off' should also disable
|
||||||
SELECT * FROM set_adaptive_chunk_sizing('test_adaptive', 'off');
|
SELECT * FROM set_adaptive_chunking('test_adaptive', 'off');
|
||||||
chunk_sizing_func | chunk_target_size
|
chunk_sizing_func | chunk_target_size
|
||||||
------------------------------------------------+-------------------
|
------------------------------------------------+-------------------
|
||||||
_timescaledb_internal.calculate_chunk_interval | 0
|
_timescaledb_internal.calculate_chunk_interval | 0
|
||||||
@ -120,28 +124,36 @@ FROM _timescaledb_catalog.hypertable;
|
|||||||
test_adaptive | _timescaledb_internal | calculate_chunk_interval | 0
|
test_adaptive | _timescaledb_internal | calculate_chunk_interval | 0
|
||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
-- Setting 0 size should do an estimate.
|
-- Setting 0 size should also disable
|
||||||
SELECT * FROM set_adaptive_chunk_sizing('test_adaptive', '0MB');
|
SELECT * FROM set_adaptive_chunking('test_adaptive', '0MB');
|
||||||
chunk_sizing_func | chunk_target_size
|
chunk_sizing_func | chunk_target_size
|
||||||
------------------------------------------------+-------------------
|
------------------------------------------------+-------------------
|
||||||
_timescaledb_internal.calculate_chunk_interval | 536870912
|
_timescaledb_internal.calculate_chunk_interval | 0
|
||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
SELECT table_name, chunk_sizing_func_schema, chunk_sizing_func_name, chunk_target_size
|
SELECT table_name, chunk_sizing_func_schema, chunk_sizing_func_name, chunk_target_size
|
||||||
FROM _timescaledb_catalog.hypertable;
|
FROM _timescaledb_catalog.hypertable;
|
||||||
table_name | chunk_sizing_func_schema | chunk_sizing_func_name | chunk_target_size
|
table_name | chunk_sizing_func_schema | chunk_sizing_func_name | chunk_target_size
|
||||||
---------------+--------------------------+--------------------------+-------------------
|
---------------+--------------------------+--------------------------+-------------------
|
||||||
test_adaptive | _timescaledb_internal | calculate_chunk_interval | 536870912
|
test_adaptive | _timescaledb_internal | calculate_chunk_interval | 0
|
||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
SELECT * FROM set_adaptive_chunk_sizing('test_adaptive', '1MB');
|
SELECT * FROM set_adaptive_chunking('test_adaptive', '1MB');
|
||||||
|
WARNING: target chunk size for adaptive chunking is less than 10 MB
|
||||||
chunk_sizing_func | chunk_target_size
|
chunk_sizing_func | chunk_target_size
|
||||||
------------------------------------------------+-------------------
|
------------------------------------------------+-------------------
|
||||||
_timescaledb_internal.calculate_chunk_interval | 1048576
|
_timescaledb_internal.calculate_chunk_interval | 1048576
|
||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
|
-- No warning about small target size if > 10MB
|
||||||
|
SELECT * FROM set_adaptive_chunking('test_adaptive', '11MB');
|
||||||
|
chunk_sizing_func | chunk_target_size
|
||||||
|
------------------------------------------------+-------------------
|
||||||
|
_timescaledb_internal.calculate_chunk_interval | 11534336
|
||||||
|
(1 row)
|
||||||
|
|
||||||
-- Setting size to 'estimate' should also estimate size
|
-- Setting size to 'estimate' should also estimate size
|
||||||
SELECT * FROM set_adaptive_chunk_sizing('test_adaptive', 'estimate');
|
SELECT * FROM set_adaptive_chunking('test_adaptive', 'estimate');
|
||||||
chunk_sizing_func | chunk_target_size
|
chunk_sizing_func | chunk_target_size
|
||||||
------------------------------------------------+-------------------
|
------------------------------------------------+-------------------
|
||||||
_timescaledb_internal.calculate_chunk_interval | 536870912
|
_timescaledb_internal.calculate_chunk_interval | 536870912
|
||||||
@ -161,7 +173,7 @@ SELECT * FROM test.set_effective_memory_cache_size('512MB');
|
|||||||
536870912
|
536870912
|
||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
SELECT * FROM set_adaptive_chunk_sizing('test_adaptive', 'estimate');
|
SELECT * FROM set_adaptive_chunking('test_adaptive', 'estimate');
|
||||||
chunk_sizing_func | chunk_target_size
|
chunk_sizing_func | chunk_target_size
|
||||||
------------------------------------------------+-------------------
|
------------------------------------------------+-------------------
|
||||||
_timescaledb_internal.calculate_chunk_interval | 134217728
|
_timescaledb_internal.calculate_chunk_interval | 134217728
|
||||||
@ -182,7 +194,8 @@ SELECT * FROM test.set_effective_memory_cache_size('2GB');
|
|||||||
(1 row)
|
(1 row)
|
||||||
|
|
||||||
-- Set a reasonable test value
|
-- Set a reasonable test value
|
||||||
SELECT * FROM set_adaptive_chunk_sizing('test_adaptive', '1MB');
|
SELECT * FROM set_adaptive_chunking('test_adaptive', '1MB');
|
||||||
|
WARNING: target chunk size for adaptive chunking is less than 10 MB
|
||||||
chunk_sizing_func | chunk_target_size
|
chunk_sizing_func | chunk_target_size
|
||||||
------------------------------------------------+-------------------
|
------------------------------------------------+-------------------
|
||||||
_timescaledb_internal.calculate_chunk_interval | 1048576
|
_timescaledb_internal.calculate_chunk_interval | 1048576
|
||||||
@ -223,9 +236,12 @@ SELECT * FROM chunk_relation_size('test_adaptive');
|
|||||||
-- both the calculation of fill-factor of the chunk and its size
|
-- both the calculation of fill-factor of the chunk and its size
|
||||||
CREATE TABLE test_adaptive_no_index(time timestamptz, temp float, location int);
|
CREATE TABLE test_adaptive_no_index(time timestamptz, temp float, location int);
|
||||||
-- Size but no explicit func should use default func
|
-- Size but no explicit func should use default func
|
||||||
|
-- No default indexes should warn and use heap scan for min and max
|
||||||
SELECT create_hypertable('test_adaptive_no_index', 'time',
|
SELECT create_hypertable('test_adaptive_no_index', 'time',
|
||||||
chunk_target_size => '1MB',
|
chunk_target_size => '1MB',
|
||||||
create_default_indexes => false);
|
create_default_indexes => false);
|
||||||
|
WARNING: target chunk size for adaptive chunking is less than 10 MB
|
||||||
|
WARNING: no index on "time" found for adaptive chunking on hypertable "test_adaptive_no_index"
|
||||||
NOTICE: adding not-null constraint to column "time"
|
NOTICE: adding not-null constraint to column "time"
|
||||||
create_hypertable
|
create_hypertable
|
||||||
-------------------
|
-------------------
|
||||||
@ -244,6 +260,21 @@ SELECT time, random() * 35, _timescaledb_internal.get_partition_hash(time) FROM
|
|||||||
generate_series('2017-03-07T18:18:03+00'::timestamptz - interval '175 days',
|
generate_series('2017-03-07T18:18:03+00'::timestamptz - interval '175 days',
|
||||||
'2017-03-07T18:18:03+00'::timestamptz,
|
'2017-03-07T18:18:03+00'::timestamptz,
|
||||||
'2 minutes') as time;
|
'2 minutes') as time;
|
||||||
|
WARNING: no index on "time" found for adaptive chunking on chunk "_hyper_3_12_chunk"
|
||||||
|
WARNING: no index on "time" found for adaptive chunking on chunk "_hyper_3_12_chunk"
|
||||||
|
WARNING: no index on "time" found for adaptive chunking on chunk "_hyper_3_13_chunk"
|
||||||
|
WARNING: no index on "time" found for adaptive chunking on chunk "_hyper_3_12_chunk"
|
||||||
|
WARNING: no index on "time" found for adaptive chunking on chunk "_hyper_3_13_chunk"
|
||||||
|
WARNING: no index on "time" found for adaptive chunking on chunk "_hyper_3_14_chunk"
|
||||||
|
WARNING: no index on "time" found for adaptive chunking on chunk "_hyper_3_13_chunk"
|
||||||
|
WARNING: no index on "time" found for adaptive chunking on chunk "_hyper_3_14_chunk"
|
||||||
|
WARNING: no index on "time" found for adaptive chunking on chunk "_hyper_3_15_chunk"
|
||||||
|
WARNING: no index on "time" found for adaptive chunking on chunk "_hyper_3_14_chunk"
|
||||||
|
WARNING: no index on "time" found for adaptive chunking on chunk "_hyper_3_15_chunk"
|
||||||
|
WARNING: no index on "time" found for adaptive chunking on chunk "_hyper_3_16_chunk"
|
||||||
|
WARNING: no index on "time" found for adaptive chunking on chunk "_hyper_3_15_chunk"
|
||||||
|
WARNING: no index on "time" found for adaptive chunking on chunk "_hyper_3_16_chunk"
|
||||||
|
WARNING: no index on "time" found for adaptive chunking on chunk "_hyper_3_17_chunk"
|
||||||
SELECT * FROM chunk_relation_size('test_adaptive_no_index');
|
SELECT * FROM chunk_relation_size('test_adaptive_no_index');
|
||||||
chunk_id | chunk_table | partitioning_columns | partitioning_column_types | partitioning_hash_functions | ranges | table_bytes | index_bytes | toast_bytes | total_bytes
|
chunk_id | chunk_table | partitioning_columns | partitioning_column_types | partitioning_hash_functions | ranges | table_bytes | index_bytes | toast_bytes | total_bytes
|
||||||
----------+-----------------------------------------+----------------------+------------------------------+-----------------------------+-----------------------------------------+-------------+-------------+-------------+-------------
|
----------+-----------------------------------------+----------------------+------------------------------+-----------------------------+-----------------------------------------+-------------+-------------+-------------+-------------
|
||||||
@ -263,6 +294,7 @@ CREATE TABLE test_adaptive_space(time timestamptz, temp float, location int);
|
|||||||
SELECT create_hypertable('test_adaptive_space', 'time', 'location', 2,
|
SELECT create_hypertable('test_adaptive_space', 'time', 'location', 2,
|
||||||
chunk_target_size => '1MB',
|
chunk_target_size => '1MB',
|
||||||
create_default_indexes => true);
|
create_default_indexes => true);
|
||||||
|
WARNING: target chunk size for adaptive chunking is less than 10 MB
|
||||||
NOTICE: adding not-null constraint to column "time"
|
NOTICE: adding not-null constraint to column "time"
|
||||||
create_hypertable
|
create_hypertable
|
||||||
-------------------
|
-------------------
|
||||||
|
@ -27,7 +27,7 @@ ORDER BY proname;
|
|||||||
indexes_relation_size
|
indexes_relation_size
|
||||||
indexes_relation_size_pretty
|
indexes_relation_size_pretty
|
||||||
last
|
last
|
||||||
set_adaptive_chunk_sizing
|
set_adaptive_chunking
|
||||||
set_chunk_time_interval
|
set_chunk_time_interval
|
||||||
set_number_partitions
|
set_number_partitions
|
||||||
show_tablespaces
|
show_tablespaces
|
||||||
|
@ -58,7 +58,8 @@ BEGIN
|
|||||||
RETURN -1;
|
RETURN -1;
|
||||||
END
|
END
|
||||||
$BODY$;
|
$BODY$;
|
||||||
SELECT * FROM set_adaptive_chunk_sizing('"test_schema"."two_Partitions"', '1 MB', 'custom_calculate_chunk_interval');
|
SELECT * FROM set_adaptive_chunking('"test_schema"."two_Partitions"', '1 MB', 'custom_calculate_chunk_interval');
|
||||||
|
WARNING: target chunk size for adaptive chunking is less than 10 MB
|
||||||
chunk_sizing_func | chunk_target_size
|
chunk_sizing_func | chunk_target_size
|
||||||
---------------------------------+-------------------
|
---------------------------------+-------------------
|
||||||
custom_calculate_chunk_interval | 1048576
|
custom_calculate_chunk_interval | 1048576
|
||||||
|
@ -54,42 +54,45 @@ SELECT table_name, chunk_sizing_func_schema, chunk_sizing_func_name, chunk_targe
|
|||||||
FROM _timescaledb_catalog.hypertable;
|
FROM _timescaledb_catalog.hypertable;
|
||||||
|
|
||||||
-- Change the target size
|
-- Change the target size
|
||||||
SELECT * FROM set_adaptive_chunk_sizing('test_adaptive', '2MB');
|
SELECT * FROM set_adaptive_chunking('test_adaptive', '2MB');
|
||||||
SELECT table_name, chunk_sizing_func_schema, chunk_sizing_func_name, chunk_target_size
|
SELECT table_name, chunk_sizing_func_schema, chunk_sizing_func_name, chunk_target_size
|
||||||
FROM _timescaledb_catalog.hypertable;
|
FROM _timescaledb_catalog.hypertable;
|
||||||
|
|
||||||
\set ON_ERROR_STOP 0
|
\set ON_ERROR_STOP 0
|
||||||
-- Setting NULL func should fail
|
-- Setting NULL func should fail
|
||||||
SELECT * FROM set_adaptive_chunk_sizing('test_adaptive', '1MB', NULL);
|
SELECT * FROM set_adaptive_chunking('test_adaptive', '1MB', NULL);
|
||||||
\set ON_ERROR_STOP 1
|
\set ON_ERROR_STOP 1
|
||||||
|
|
||||||
-- Setting NULL size disables adaptive chunking
|
-- Setting NULL size disables adaptive chunking
|
||||||
SELECT * FROM set_adaptive_chunk_sizing('test_adaptive', NULL);
|
SELECT * FROM set_adaptive_chunking('test_adaptive', NULL);
|
||||||
SELECT table_name, chunk_sizing_func_schema, chunk_sizing_func_name, chunk_target_size
|
SELECT table_name, chunk_sizing_func_schema, chunk_sizing_func_name, chunk_target_size
|
||||||
FROM _timescaledb_catalog.hypertable;
|
FROM _timescaledb_catalog.hypertable;
|
||||||
|
|
||||||
SELECT * FROM set_adaptive_chunk_sizing('test_adaptive', '1MB');
|
SELECT * FROM set_adaptive_chunking('test_adaptive', '1MB');
|
||||||
|
|
||||||
-- Setting size to 'off' should also disable
|
-- Setting size to 'off' should also disable
|
||||||
SELECT * FROM set_adaptive_chunk_sizing('test_adaptive', 'off');
|
SELECT * FROM set_adaptive_chunking('test_adaptive', 'off');
|
||||||
SELECT table_name, chunk_sizing_func_schema, chunk_sizing_func_name, chunk_target_size
|
SELECT table_name, chunk_sizing_func_schema, chunk_sizing_func_name, chunk_target_size
|
||||||
FROM _timescaledb_catalog.hypertable;
|
FROM _timescaledb_catalog.hypertable;
|
||||||
|
|
||||||
-- Setting 0 size should do an estimate.
|
-- Setting 0 size should also disable
|
||||||
SELECT * FROM set_adaptive_chunk_sizing('test_adaptive', '0MB');
|
SELECT * FROM set_adaptive_chunking('test_adaptive', '0MB');
|
||||||
SELECT table_name, chunk_sizing_func_schema, chunk_sizing_func_name, chunk_target_size
|
SELECT table_name, chunk_sizing_func_schema, chunk_sizing_func_name, chunk_target_size
|
||||||
FROM _timescaledb_catalog.hypertable;
|
FROM _timescaledb_catalog.hypertable;
|
||||||
|
|
||||||
SELECT * FROM set_adaptive_chunk_sizing('test_adaptive', '1MB');
|
SELECT * FROM set_adaptive_chunking('test_adaptive', '1MB');
|
||||||
|
|
||||||
|
-- No warning about small target size if > 10MB
|
||||||
|
SELECT * FROM set_adaptive_chunking('test_adaptive', '11MB');
|
||||||
|
|
||||||
-- Setting size to 'estimate' should also estimate size
|
-- Setting size to 'estimate' should also estimate size
|
||||||
SELECT * FROM set_adaptive_chunk_sizing('test_adaptive', 'estimate');
|
SELECT * FROM set_adaptive_chunking('test_adaptive', 'estimate');
|
||||||
SELECT table_name, chunk_sizing_func_schema, chunk_sizing_func_name, chunk_target_size
|
SELECT table_name, chunk_sizing_func_schema, chunk_sizing_func_name, chunk_target_size
|
||||||
FROM _timescaledb_catalog.hypertable;
|
FROM _timescaledb_catalog.hypertable;
|
||||||
|
|
||||||
-- Use a lower memory setting to test that the calculated chunk_target_size is reduced
|
-- Use a lower memory setting to test that the calculated chunk_target_size is reduced
|
||||||
SELECT * FROM test.set_effective_memory_cache_size('512MB');
|
SELECT * FROM test.set_effective_memory_cache_size('512MB');
|
||||||
SELECT * FROM set_adaptive_chunk_sizing('test_adaptive', 'estimate');
|
SELECT * FROM set_adaptive_chunking('test_adaptive', 'estimate');
|
||||||
SELECT table_name, chunk_sizing_func_schema, chunk_sizing_func_name, chunk_target_size
|
SELECT table_name, chunk_sizing_func_schema, chunk_sizing_func_name, chunk_target_size
|
||||||
FROM _timescaledb_catalog.hypertable;
|
FROM _timescaledb_catalog.hypertable;
|
||||||
|
|
||||||
@ -97,7 +100,7 @@ FROM _timescaledb_catalog.hypertable;
|
|||||||
SELECT * FROM test.set_effective_memory_cache_size('2GB');
|
SELECT * FROM test.set_effective_memory_cache_size('2GB');
|
||||||
|
|
||||||
-- Set a reasonable test value
|
-- Set a reasonable test value
|
||||||
SELECT * FROM set_adaptive_chunk_sizing('test_adaptive', '1MB');
|
SELECT * FROM set_adaptive_chunking('test_adaptive', '1MB');
|
||||||
|
|
||||||
-- Show the interval length before and after adaptation
|
-- Show the interval length before and after adaptation
|
||||||
SELECT id, hypertable_id, interval_length FROM _timescaledb_catalog.dimension;
|
SELECT id, hypertable_id, interval_length FROM _timescaledb_catalog.dimension;
|
||||||
@ -118,6 +121,7 @@ SELECT * FROM chunk_relation_size('test_adaptive');
|
|||||||
CREATE TABLE test_adaptive_no_index(time timestamptz, temp float, location int);
|
CREATE TABLE test_adaptive_no_index(time timestamptz, temp float, location int);
|
||||||
|
|
||||||
-- Size but no explicit func should use default func
|
-- Size but no explicit func should use default func
|
||||||
|
-- No default indexes should warn and use heap scan for min and max
|
||||||
SELECT create_hypertable('test_adaptive_no_index', 'time',
|
SELECT create_hypertable('test_adaptive_no_index', 'time',
|
||||||
chunk_target_size => '1MB',
|
chunk_target_size => '1MB',
|
||||||
create_default_indexes => false);
|
create_default_indexes => false);
|
||||||
|
@ -34,7 +34,7 @@ BEGIN
|
|||||||
END
|
END
|
||||||
$BODY$;
|
$BODY$;
|
||||||
|
|
||||||
SELECT * FROM set_adaptive_chunk_sizing('"test_schema"."two_Partitions"', '1 MB', 'custom_calculate_chunk_interval');
|
SELECT * FROM set_adaptive_chunking('"test_schema"."two_Partitions"', '1 MB', 'custom_calculate_chunk_interval');
|
||||||
|
|
||||||
-- Chunk sizing func set
|
-- Chunk sizing func set
|
||||||
SELECT * FROM _timescaledb_catalog.hypertable;
|
SELECT * FROM _timescaledb_catalog.hypertable;
|
||||||
|
Loading…
x
Reference in New Issue
Block a user