mirror of
https://github.com/timescale/timescaledb.git
synced 2025-05-16 02:23:49 +08:00
Refactor chunk creation to handle chunk collisions and alignment
When new chunks are created, the calculated chunk hypercube might collide or not align with existing chunks when partitioning has changed in one or more dimensions. In such cases, the chunk should be cut to fit the alignment criteria and any collisions should be resolved. Unfortunately, alignment and collision detection wasn't properly handled. This refactoring adds proper axis-aligned bounding box collision detection generalized to N dimensions. It also correctly handles dimension alignment.
This commit is contained in:
parent
bb8ea37965
commit
c2f686dbba
2
Makefile
2
Makefile
@ -47,6 +47,8 @@ SRCS = \
|
||||
src/hypertable.c \
|
||||
src/dimension.c \
|
||||
src/dimension_slice.c \
|
||||
src/dimension_vector.c \
|
||||
src/hypercube.c \
|
||||
src/ddl_utils.c \
|
||||
src/chunk_constraint.c \
|
||||
src/partitioning.c \
|
||||
|
351
sql/chunk.sql
351
sql/chunk.sql
@ -1,349 +1,16 @@
|
||||
CREATE OR REPLACE FUNCTION _timescaledb_internal.chunk_get_dimension_constraint_sql(
|
||||
dimension_id INTEGER,
|
||||
dimension_value BIGINT
|
||||
)
|
||||
RETURNS TEXT LANGUAGE SQL IMMUTABLE AS
|
||||
$BODY$
|
||||
SELECT format($$
|
||||
SELECT cc.chunk_id
|
||||
FROM _timescaledb_catalog.dimension_slice ds
|
||||
INNER JOIN _timescaledb_catalog.chunk_constraint cc ON (ds.id = cc.dimension_slice_id)
|
||||
WHERE ds.dimension_id = %1$L and ds.range_start <= %2$L and ds.range_end > %2$L
|
||||
$$,
|
||||
dimension_id, dimension_value);
|
||||
$BODY$;
|
||||
|
||||
-- get a chunk if it exists
|
||||
CREATE OR REPLACE FUNCTION _timescaledb_internal.chunk_get_dimensions_constraint_sql(
|
||||
dimension_ids INTEGER[],
|
||||
dimension_values BIGINT[]
|
||||
)
|
||||
RETURNS TEXT LANGUAGE SQL STABLE AS
|
||||
$BODY$
|
||||
|
||||
SELECT string_agg(_timescaledb_internal.chunk_get_dimension_constraint_sql(dimension_id,
|
||||
dimension_value),
|
||||
' INTERSECT ')
|
||||
FROM (SELECT unnest(dimension_ids) AS dimension_id,
|
||||
unnest(dimension_values) AS dimension_value
|
||||
) AS sub;
|
||||
$BODY$;
|
||||
|
||||
CREATE OR REPLACE FUNCTION _timescaledb_internal.chunk_id_get_by_dimensions(
|
||||
dimension_ids INTEGER[],
|
||||
dimension_values BIGINT[]
|
||||
)
|
||||
RETURNS SETOF INTEGER LANGUAGE PLPGSQL STABLE AS
|
||||
$BODY$
|
||||
BEGIN
|
||||
IF array_length(dimension_ids, 1) > 0 THEN
|
||||
RETURN QUERY EXECUTE _timescaledb_internal.chunk_get_dimensions_constraint_sql(dimension_ids,
|
||||
dimension_values);
|
||||
END IF;
|
||||
END
|
||||
$BODY$;
|
||||
|
||||
CREATE OR REPLACE FUNCTION _timescaledb_internal.chunk_get(
|
||||
dimension_ids INTEGER[],
|
||||
dimension_values BIGINT[]
|
||||
)
|
||||
RETURNS _timescaledb_catalog.chunk LANGUAGE PLPGSQL STABLE AS
|
||||
$BODY$
|
||||
DECLARE
|
||||
chunk_row _timescaledb_catalog.chunk;
|
||||
BEGIN
|
||||
SELECT *
|
||||
INTO chunk_row
|
||||
FROM _timescaledb_catalog.chunk
|
||||
WHERE
|
||||
id = (SELECT _timescaledb_internal.chunk_id_get_by_dimensions(dimension_ids,
|
||||
dimension_values));
|
||||
RETURN chunk_row;
|
||||
EXCEPTION
|
||||
WHEN NO_DATA_FOUND THEN
|
||||
RETURN NULL;
|
||||
END
|
||||
$BODY$;
|
||||
CREATE OR REPLACE FUNCTION _timescaledb_internal.dimension_calculate_default_range_open(
|
||||
dimension_value BIGINT,
|
||||
interval_length BIGINT,
|
||||
OUT range_start BIGINT,
|
||||
OUT range_end BIGINT)
|
||||
AS '$libdir/timescaledb', 'dimension_calculate_open_range_default' LANGUAGE C STABLE;
|
||||
|
||||
CREATE OR REPLACE FUNCTION _timescaledb_internal.dimension_calculate_default_range_closed(
|
||||
dimension_value BIGINT,
|
||||
num_slices SMALLINT,
|
||||
range_max BIGINT = 2147483647,
|
||||
OUT range_start BIGINT,
|
||||
OUT range_end BIGINT)
|
||||
LANGUAGE PLPGSQL STABLE AS
|
||||
$BODY$
|
||||
DECLARE
|
||||
inter BIGINT;
|
||||
BEGIN
|
||||
IF dimension_value < 0 THEN
|
||||
RAISE 'Dimension values for closed dimensions should be positive. Got: %', dimension_value;
|
||||
END IF;
|
||||
inter := ( range_max / num_slices);
|
||||
IF dimension_value >= inter * (num_slices - 1) THEN
|
||||
--put overflow from integer-division errors in last range
|
||||
range_start = inter * (num_slices - 1);
|
||||
range_end = range_max;
|
||||
ELSE
|
||||
range_start = (dimension_value / inter) * inter;
|
||||
range_end := range_start + inter;
|
||||
END IF;
|
||||
END
|
||||
$BODY$;
|
||||
|
||||
CREATE OR REPLACE FUNCTION _timescaledb_internal.dimension_calculate_default_range_open(
|
||||
dimension_value BIGINT,
|
||||
interval_length BIGINT,
|
||||
OUT range_start BIGINT,
|
||||
OUT range_end BIGINT)
|
||||
LANGUAGE PLPGSQL STABLE AS
|
||||
$BODY$
|
||||
DECLARE
|
||||
BEGIN
|
||||
-- For positive values, integer division finds a lower bound which is BEFORE
|
||||
-- the value we want. For negative values, integer division finds a upper bound which
|
||||
-- is AFTER the value we want. Therefore for positive numbers we find the
|
||||
-- range_start via integer division, while for negative we find the range_end.
|
||||
IF dimension_value >= 0 THEN
|
||||
range_start := (dimension_value / interval_length) * interval_length;
|
||||
range_end := range_start + interval_length;
|
||||
ELSE
|
||||
--the +1 in (dimension_value + 1) makes this work with inclusive range_start exclusive range_end
|
||||
range_end := ((dimension_value + 1) / interval_length) * interval_length;
|
||||
range_start := range_end - interval_length;
|
||||
END IF;
|
||||
END
|
||||
$BODY$;
|
||||
|
||||
|
||||
|
||||
--todo: unit test
|
||||
CREATE OR REPLACE FUNCTION _timescaledb_internal.dimension_calculate_default_range(
|
||||
dimension_id INTEGER,
|
||||
dimension_value BIGINT,
|
||||
OUT range_start BIGINT,
|
||||
OUT range_end BIGINT)
|
||||
LANGUAGE PLPGSQL STABLE AS
|
||||
$BODY$
|
||||
DECLARE
|
||||
dimension_row _timescaledb_catalog.dimension;
|
||||
BEGIN
|
||||
SELECT *
|
||||
FROM _timescaledb_catalog.dimension
|
||||
INTO STRICT dimension_row
|
||||
WHERE id = dimension_id;
|
||||
|
||||
IF dimension_row.interval_length IS NOT NULL THEN
|
||||
SELECT * INTO STRICT range_start, range_end
|
||||
FROM _timescaledb_internal.dimension_calculate_default_range_open(dimension_value, dimension_row.interval_length);
|
||||
ELSE
|
||||
SELECT * INTO STRICT range_start, range_end
|
||||
FROM _timescaledb_internal.dimension_calculate_default_range_closed(dimension_value,
|
||||
dimension_row.num_slices);
|
||||
END IF;
|
||||
END
|
||||
$BODY$;
|
||||
|
||||
-- calculate the range for a free dimension.
|
||||
-- assumes one other fixed dimension.
|
||||
CREATE OR REPLACE FUNCTION _timescaledb_internal.chunk_calculate_new_ranges(
|
||||
free_dimension_id INTEGER,
|
||||
free_dimension_value BIGINT,
|
||||
fixed_dimension_ids INTEGER[],
|
||||
fixed_dimension_values BIGINT[],
|
||||
align BOOLEAN,
|
||||
OUT new_range_start BIGINT,
|
||||
OUT new_range_end BIGINT
|
||||
)
|
||||
LANGUAGE PLPGSQL STABLE AS
|
||||
$BODY$
|
||||
DECLARE
|
||||
overlap_value BIGINT;
|
||||
alignment_found BOOLEAN := FALSE;
|
||||
BEGIN
|
||||
new_range_start := NULL;
|
||||
new_range_end := NULL;
|
||||
|
||||
IF align THEN
|
||||
--if i am aligning then fix see if other chunks have values that fit me in the free dimension
|
||||
SELECT free_slice.range_start, free_slice.range_end
|
||||
INTO new_range_start, new_range_end
|
||||
FROM _timescaledb_catalog.chunk c
|
||||
INNER JOIN _timescaledb_catalog.chunk_constraint cc ON (cc.chunk_id = c.id)
|
||||
INNER JOIN _timescaledb_catalog.dimension_slice free_slice ON (free_slice.id = cc.dimension_slice_id AND free_slice.dimension_id = free_dimension_id)
|
||||
WHERE
|
||||
free_slice.range_end > free_dimension_value and free_slice.range_start <= free_dimension_value
|
||||
LIMIT 1;
|
||||
|
||||
SELECT new_range_start IS NOT NULL INTO alignment_found;
|
||||
END IF;
|
||||
|
||||
IF NOT alignment_found THEN
|
||||
--either not aligned or did not find an alignment
|
||||
SELECT *
|
||||
INTO new_range_start, new_range_end
|
||||
FROM _timescaledb_internal.dimension_calculate_default_range(free_dimension_id, free_dimension_value);
|
||||
END IF;
|
||||
|
||||
-- Check whether the new chunk interval overlaps with existing chunks.
|
||||
-- new_range_start overlaps
|
||||
SELECT free_slice.range_end
|
||||
INTO overlap_value
|
||||
FROM _timescaledb_catalog.chunk c
|
||||
INNER JOIN _timescaledb_catalog.chunk_constraint cc ON (cc.chunk_id = c.id)
|
||||
INNER JOIN _timescaledb_catalog.dimension_slice free_slice ON (free_slice.id = cc.dimension_slice_id AND free_slice.dimension_id = free_dimension_id)
|
||||
WHERE
|
||||
c.id = (
|
||||
SELECT _timescaledb_internal.chunk_id_get_by_dimensions(free_dimension_id || fixed_dimension_ids,
|
||||
new_range_start || fixed_dimension_values)
|
||||
)
|
||||
ORDER BY free_slice.range_end DESC
|
||||
LIMIT 1;
|
||||
|
||||
IF FOUND THEN
|
||||
-- There is a chunk that overlaps with new_range_start, cut
|
||||
-- new_range_start to begin where that chunk ends
|
||||
IF alignment_found THEN
|
||||
RAISE EXCEPTION 'Should never happen: needed to cut an aligned dimension.
|
||||
Free_dimension %. Existing(end): %, New(start):%',
|
||||
free_dimension_id, overlap_value, new_range_start
|
||||
USING ERRCODE = 'IO501';
|
||||
END IF;
|
||||
new_range_start := overlap_value;
|
||||
END IF;
|
||||
|
||||
--check for new_range_end overlap
|
||||
SELECT free_slice.range_start
|
||||
INTO overlap_value
|
||||
FROM _timescaledb_catalog.chunk c
|
||||
INNER JOIN _timescaledb_catalog.chunk_constraint cc
|
||||
ON (cc.chunk_id = c.id)
|
||||
INNER JOIN _timescaledb_catalog.dimension_slice free_slice
|
||||
ON (free_slice.id = cc.dimension_slice_id AND free_slice.dimension_id = free_dimension_id)
|
||||
WHERE
|
||||
c.id = (
|
||||
SELECT _timescaledb_internal.chunk_id_get_by_dimensions(free_dimension_id || fixed_dimension_ids,
|
||||
new_range_end - 1 || fixed_dimension_values)
|
||||
)
|
||||
ORDER BY free_slice.range_start ASC
|
||||
LIMIT 1;
|
||||
|
||||
IF FOUND THEN
|
||||
-- there is at least one table that starts inside, cut the end to match
|
||||
IF alignment_found THEN
|
||||
RAISE EXCEPTION 'Should never happen: needed to cut an aligned dimension.
|
||||
Free_dimension %. Existing(start): %, New(end):%',
|
||||
free_dimension_id, overlap_value, new_range_end
|
||||
USING ERRCODE = 'IO501';
|
||||
END IF;
|
||||
new_range_end := overlap_value;
|
||||
END IF;
|
||||
END
|
||||
$BODY$;
|
||||
|
||||
-- creates the row in the chunk table. Prerequisite: appropriate lock.
|
||||
CREATE OR REPLACE FUNCTION _timescaledb_internal.chunk_create_after_lock(
|
||||
dimension_ids INTEGER[],
|
||||
dimension_values BIGINT[]
|
||||
)
|
||||
RETURNS VOID LANGUAGE PLPGSQL VOLATILE AS
|
||||
$BODY$
|
||||
DECLARE
|
||||
dimension_row _timescaledb_catalog.dimension;
|
||||
hypertable_id INTEGER;
|
||||
free_index INTEGER;
|
||||
fixed_dimension_ids INTEGER[];
|
||||
fixed_values BIGINT[];
|
||||
free_range_start BIGINT;
|
||||
free_range_end BIGINT;
|
||||
slice_ids INTEGER[];
|
||||
slice_id INTEGER;
|
||||
BEGIN
|
||||
SELECT d.hypertable_id
|
||||
INTO STRICT hypertable_id
|
||||
FROM _timescaledb_catalog.dimension d
|
||||
WHERE d.id = dimension_ids[1];
|
||||
|
||||
slice_ids = NULL;
|
||||
FOR free_index IN 1 .. array_upper(dimension_ids, 1)
|
||||
LOOP
|
||||
--keep one dimension free and the rest fixed
|
||||
fixed_dimension_ids = dimension_ids[:free_index-1]
|
||||
|| dimension_ids[free_index+1:];
|
||||
fixed_values = dimension_values[:free_index-1]
|
||||
|| dimension_values[free_index+1:];
|
||||
|
||||
SELECT *
|
||||
INTO STRICT dimension_row
|
||||
FROM _timescaledb_catalog.dimension
|
||||
WHERE id = dimension_ids[free_index];
|
||||
|
||||
SELECT *
|
||||
INTO free_range_start, free_range_end
|
||||
FROM _timescaledb_internal.chunk_calculate_new_ranges(
|
||||
dimension_ids[free_index], dimension_values[free_index],
|
||||
fixed_dimension_ids, fixed_values, dimension_row.aligned);
|
||||
|
||||
--do not use RETURNING here (ON CONFLICT DO NOTHING)
|
||||
INSERT INTO _timescaledb_catalog.dimension_slice
|
||||
(dimension_id, range_start, range_end)
|
||||
VALUES(dimension_ids[free_index], free_range_start, free_range_end)
|
||||
ON CONFLICT DO NOTHING;
|
||||
|
||||
SELECT id INTO STRICT slice_id
|
||||
FROM _timescaledb_catalog.dimension_slice ds
|
||||
WHERE ds.dimension_id = dimension_ids[free_index] AND
|
||||
ds.range_start = free_range_start AND ds.range_end = free_range_end;
|
||||
|
||||
slice_ids = slice_ids || slice_id;
|
||||
END LOOP;
|
||||
|
||||
WITH chunk AS (
|
||||
INSERT INTO _timescaledb_catalog.chunk (id, hypertable_id, schema_name, table_name)
|
||||
SELECT seq_id, h.id, h.associated_schema_name,
|
||||
format('%s_%s_chunk', h.associated_table_prefix, seq_id)
|
||||
FROM
|
||||
nextval(pg_get_serial_sequence('_timescaledb_catalog.chunk','id')) seq_id,
|
||||
_timescaledb_catalog.hypertable h
|
||||
WHERE h.id = hypertable_id
|
||||
RETURNING *
|
||||
)
|
||||
INSERT INTO _timescaledb_catalog.chunk_constraint (dimension_slice_id, chunk_id)
|
||||
SELECT slice_id_to_insert, chunk.id FROM chunk, unnest(slice_ids) AS slice_id_to_insert;
|
||||
END
|
||||
$BODY$;
|
||||
|
||||
-- Creates and returns a new chunk, taking a lock on the chunk table.
|
||||
-- static
|
||||
CREATE OR REPLACE FUNCTION _timescaledb_internal.chunk_create(
|
||||
dimension_ids INTEGER[],
|
||||
dimension_values BIGINT[]
|
||||
)
|
||||
RETURNS _timescaledb_catalog.chunk LANGUAGE PLPGSQL VOLATILE
|
||||
SECURITY DEFINER SET search_path = ''
|
||||
AS
|
||||
$BODY$
|
||||
DECLARE
|
||||
chunk_row _timescaledb_catalog.chunk;
|
||||
BEGIN
|
||||
LOCK TABLE _timescaledb_catalog.chunk IN EXCLUSIVE MODE;
|
||||
|
||||
-- recheck:
|
||||
chunk_row := _timescaledb_internal.chunk_get(dimension_ids, dimension_values);
|
||||
|
||||
IF chunk_row IS NULL THEN
|
||||
PERFORM _timescaledb_internal.chunk_create_after_lock(dimension_ids, dimension_values);
|
||||
chunk_row := _timescaledb_internal.chunk_get(dimension_ids, dimension_values);
|
||||
END IF;
|
||||
|
||||
IF chunk_row IS NULL THEN -- recheck
|
||||
RAISE EXCEPTION 'Should never happen: chunk not found after creation'
|
||||
USING ERRCODE = 'IO501';
|
||||
END IF;
|
||||
|
||||
RETURN chunk_row;
|
||||
END
|
||||
$BODY$;
|
||||
OUT range_start BIGINT,
|
||||
OUT range_end BIGINT)
|
||||
AS '$libdir/timescaledb', 'dimension_calculate_closed_range_default' LANGUAGE C STABLE;
|
||||
|
||||
-- Trigger for when chunk rows are changed.
|
||||
-- On Insert: create chunk table, add indexes, add triggers.
|
||||
|
@ -0,0 +1,19 @@
|
||||
ALTER EXTENSION timescaledb DROP FUNCTION _timescaledb_internal.chunk_create(INTEGER[], BIGINT[]);
|
||||
ALTER EXTENSION timescaledb DROP FUNCTION _timescaledb_internal.chunk_get(INTEGER[], BIGINT[]);
|
||||
ALTER EXTENSION timescaledb DROP FUNCTION _timescaledb_internal.chunk_create_after_lock(INTEGER[], BIGINT[]);
|
||||
ALTER EXTENSION timescaledb DROP FUNCTION _timescaledb_internal.dimension_calculate_default_range_closed(BIGINT, SMALLINT, BIGINT, OUT BIGINT, OUT BIGINT);
|
||||
ALTER EXTENSION timescaledb DROP FUNCTION _timescaledb_internal.dimension_calculate_default_range(INTEGER, BIGINT, OUT BIGINT, OUT BIGINT);
|
||||
ALTER EXTENSION timescaledb DROP FUNCTION _timescaledb_internal.chunk_calculate_new_ranges(INTEGER, BIGINT, INTEGER[], BIGINT[], BOOLEAN, OUT BIGINT, OUT BIGINT);
|
||||
ALTER EXTENSION timescaledb DROP FUNCTION _timescaledb_internal.chunk_id_get_by_dimensions(INTEGER[], BIGINT[]);
|
||||
ALTER EXTENSION timescaledb DROP FUNCTION _timescaledb_internal.chunk_get_dimensions_constraint_sql(INTEGER[], BIGINT[]);
|
||||
ALTER EXTENSION timescaledb DROP FUNCTION _timescaledb_internal.chunk_get_dimension_constraint_sql(INTEGER, BIGINT);
|
||||
|
||||
DROP FUNCTION IF EXISTS _timescaledb_internal.chunk_create(INTEGER[], BIGINT[]) CASCADE;
|
||||
DROP FUNCTION IF EXISTS _timescaledb_internal.chunk_get(INTEGER[], BIGINT[]) CASCADE;
|
||||
DROP FUNCTION IF EXISTS _timescaledb_internal.chunk_create_after_lock(INTEGER[], BIGINT[]) CASCADE;
|
||||
DROP FUNCTION IF EXISTS _timescaledb_internal.dimension_calculate_default_range_closed(BIGINT, SMALLINT, BIGINT, OUT BIGINT, OUT BIGINT) CASCADE;
|
||||
DROP FUNCTION IF EXISTS _timescaledb_internal.dimension_calculate_default_range(INTEGER, BIGINT, OUT BIGINT, OUT BIGINT) CASCADE;
|
||||
DROP FUNCTION IF EXISTS _timescaledb_internal.chunk_calculate_new_ranges(INTEGER, BIGINT, INTEGER[], BIGINT[], BOOLEAN, OUT BIGINT, OUT BIGINT) CASCADE;
|
||||
DROP FUNCTION IF EXISTS _timescaledb_internal.chunk_id_get_by_dimensions(INTEGER[], BIGINT[]) CASCADE;
|
||||
DROP FUNCTION IF EXISTS _timescaledb_internal.chunk_get_dimensions_constraint_sql(INTEGER[], BIGINT[]) CASCADE;
|
||||
DROP FUNCTION IF EXISTS _timescaledb_internal.chunk_get_dimension_constraint_sql(INTEGER, BIGINT) CASCADE;
|
128
src/catalog.c
128
src/catalog.c
@ -1,8 +1,15 @@
|
||||
#include <postgres.h>
|
||||
#include <catalog/pg_namespace.h>
|
||||
#include <catalog/namespace.h>
|
||||
#include <catalog/indexing.h>
|
||||
#include <utils/lsyscache.h>
|
||||
#include <utils/builtins.h>
|
||||
#include <utils/syscache.h>
|
||||
#include <access/xact.h>
|
||||
#include <access/htup_details.h>
|
||||
#include <miscadmin.h>
|
||||
#include <commands/dbcommands.h>
|
||||
#include <commands/sequence.h>
|
||||
|
||||
#include "catalog.h"
|
||||
#include "extension.h"
|
||||
@ -21,7 +28,7 @@ typedef struct TableIndexDef
|
||||
char **names;
|
||||
} TableIndexDef;
|
||||
|
||||
const static TableIndexDef catalog_table_index_definitions[_MAX_CATALOG_TABLES] = {
|
||||
static const TableIndexDef catalog_table_index_definitions[_MAX_CATALOG_TABLES] = {
|
||||
[HYPERTABLE] = {
|
||||
.length = _MAX_HYPERTABLE_INDEX,
|
||||
.names = (char *[]) {
|
||||
@ -58,6 +65,14 @@ const static TableIndexDef catalog_table_index_definitions[_MAX_CATALOG_TABLES]
|
||||
}
|
||||
};
|
||||
|
||||
static const char *catalog_table_serial_id_names[_MAX_CATALOG_TABLES] = {
|
||||
[HYPERTABLE] = CATALOG_SCHEMA_NAME ".hypertable_id_seq",
|
||||
[DIMENSION] = CATALOG_SCHEMA_NAME ".dimension_id_seq",
|
||||
[DIMENSION_SLICE] = CATALOG_SCHEMA_NAME ".dimension_slice_id_seq",
|
||||
[CHUNK] = CATALOG_SCHEMA_NAME ".chunk_id_seq",
|
||||
[CHUNK_CONSTRAINT] = NULL,
|
||||
};
|
||||
|
||||
/* Names for proxy tables used for cache invalidation. Must match names in
|
||||
* sql/cache.sql */
|
||||
static const char *cache_proxy_table_names[_MAX_CACHE_TYPES] = {
|
||||
@ -76,6 +91,30 @@ catalog_is_valid(Catalog *catalog)
|
||||
return catalog != NULL && OidIsValid(catalog->database_id);
|
||||
}
|
||||
|
||||
/*
|
||||
* Get the user ID of the catalog owner.
|
||||
*/
|
||||
static Oid
|
||||
catalog_owner(void)
|
||||
{
|
||||
HeapTuple tuple;
|
||||
Oid owner_oid;
|
||||
Oid nsp_oid = get_namespace_oid(CATALOG_SCHEMA_NAME, false);
|
||||
|
||||
tuple = SearchSysCache1(NAMESPACEOID, ObjectIdGetDatum(nsp_oid));
|
||||
|
||||
if (!HeapTupleIsValid(tuple))
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_UNDEFINED_SCHEMA),
|
||||
errmsg("schema with OID %u does not exist", nsp_oid)));
|
||||
|
||||
owner_oid = ((Form_pg_namespace) GETSTRUCT(tuple))->nspowner;
|
||||
|
||||
ReleaseSysCache(tuple);
|
||||
|
||||
return owner_oid;
|
||||
}
|
||||
|
||||
Catalog *
|
||||
catalog_get(void)
|
||||
{
|
||||
@ -94,6 +133,7 @@ catalog_get(void)
|
||||
catalog.database_id = MyDatabaseId;
|
||||
strncpy(catalog.database_name, get_database_name(MyDatabaseId), NAMEDATALEN);
|
||||
catalog.schema_id = get_namespace_oid(CATALOG_SCHEMA_NAME, false);
|
||||
catalog.owner_uid = catalog_owner();
|
||||
|
||||
if (catalog.schema_id == InvalidOid)
|
||||
elog(ERROR, "Oid lookup failed for schema %s", CATALOG_SCHEMA_NAME);
|
||||
@ -101,6 +141,7 @@ catalog_get(void)
|
||||
for (i = 0; i < _MAX_CATALOG_TABLES; i++)
|
||||
{
|
||||
Oid id;
|
||||
const char *sequence_name;
|
||||
int number_indexes,
|
||||
j;
|
||||
|
||||
@ -127,6 +168,17 @@ catalog_get(void)
|
||||
}
|
||||
|
||||
catalog.tables[i].name = catalog_table_names[i];
|
||||
sequence_name = catalog_table_serial_id_names[i];
|
||||
|
||||
if (NULL != sequence_name)
|
||||
{
|
||||
RangeVar *sequence;
|
||||
|
||||
sequence = makeRangeVarFromNameList(stringToQualifiedNameList(sequence_name));
|
||||
catalog.tables[i].serial_relid = RangeVarGetRelid(sequence, NoLock, false);
|
||||
}
|
||||
else
|
||||
catalog.tables[i].serial_relid = InvalidOid;
|
||||
}
|
||||
|
||||
catalog.cache_schema_id = get_namespace_oid(CACHE_SCHEMA_NAME, false);
|
||||
@ -175,3 +227,77 @@ catalog_get_cache_proxy_id_by_name(Catalog *catalog, const char *relname)
|
||||
|
||||
return catalog->caches[i].inval_proxy_id;
|
||||
}
|
||||
|
||||
/*
|
||||
* Become the user that owns the catalog schema.
|
||||
*
|
||||
* This might be necessary for users that do operations that require changes to
|
||||
* the catalog.
|
||||
*
|
||||
* The caller should pass a CatalogSecurityContext where the current security
|
||||
* context will be saved. The original security context can later be restored
|
||||
* with catalog_restore_user().
|
||||
*/
|
||||
bool
|
||||
catalog_become_owner(Catalog *catalog, CatalogSecurityContext *sec_ctx)
|
||||
{
|
||||
GetUserIdAndSecContext(&sec_ctx->saved_uid, &sec_ctx->saved_security_context);
|
||||
|
||||
if (sec_ctx->saved_uid != catalog->owner_uid)
|
||||
{
|
||||
SetUserIdAndSecContext(catalog->owner_uid,
|
||||
sec_ctx->saved_security_context | SECURITY_LOCAL_USERID_CHANGE);
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
* Restore the security context of the original user after becoming the catalog
|
||||
* owner. The user should pass the original CatalogSecurityContext that was used
|
||||
* with catalog_become_owner().
|
||||
*/
|
||||
void
|
||||
catalog_restore_user(CatalogSecurityContext *sec_ctx)
|
||||
{
|
||||
SetUserIdAndSecContext(sec_ctx->saved_uid, sec_ctx->saved_security_context);
|
||||
}
|
||||
|
||||
/*
|
||||
* Get the next serial ID for a catalog table, if one exists for the given table.
|
||||
*/
|
||||
int64
|
||||
catalog_table_next_seq_id(Catalog *catalog, enum CatalogTable table)
|
||||
{
|
||||
Oid relid = catalog->tables[table].serial_relid;
|
||||
|
||||
if (!OidIsValid(relid))
|
||||
elog(ERROR, "No serial id column for table %s", catalog_table_names[table]);
|
||||
|
||||
return DatumGetInt64(DirectFunctionCall1(nextval_oid, ObjectIdGetDatum(relid)));
|
||||
}
|
||||
|
||||
/*
|
||||
* Insert a new row into a catalog table.
|
||||
*/
|
||||
void
|
||||
catalog_insert(Relation rel, HeapTuple tuple)
|
||||
{
|
||||
simple_heap_insert(rel, tuple);
|
||||
CatalogUpdateIndexes(rel, tuple);
|
||||
/* Make changes visible */
|
||||
CommandCounterIncrement();
|
||||
}
|
||||
|
||||
/*
|
||||
* Insert a new row into a catalog table.
|
||||
*/
|
||||
void
|
||||
catalog_insert_values(Relation rel, TupleDesc tupdesc, Datum *values, bool *nulls)
|
||||
{
|
||||
HeapTuple tuple = heap_form_tuple(tupdesc, values, nulls);
|
||||
|
||||
catalog_insert(rel, tuple);
|
||||
heap_freetuple(tuple);
|
||||
}
|
||||
|
@ -3,6 +3,8 @@
|
||||
|
||||
#include <postgres.h>
|
||||
|
||||
#include <utils/rel.h>
|
||||
#include <access/heapam.h>
|
||||
/*
|
||||
* TimescaleDB catalog.
|
||||
*
|
||||
@ -312,6 +314,7 @@ typedef struct Catalog
|
||||
const char *name;
|
||||
Oid id;
|
||||
Oid index_ids[_MAX_TABLE_INDEXES];
|
||||
Oid serial_relid;
|
||||
} tables[_MAX_CATALOG_TABLES];
|
||||
|
||||
Oid cache_schema_id;
|
||||
@ -319,8 +322,16 @@ typedef struct Catalog
|
||||
{
|
||||
Oid inval_proxy_id;
|
||||
} caches[_MAX_CACHE_TYPES];
|
||||
Oid owner_uid;
|
||||
} Catalog;
|
||||
|
||||
|
||||
typedef struct CatalogSecurityContext
|
||||
{
|
||||
Oid saved_uid;
|
||||
int saved_security_context;
|
||||
} CatalogSecurityContext;
|
||||
|
||||
bool catalog_is_valid(Catalog *catalog);
|
||||
Catalog *catalog_get(void);
|
||||
void catalog_reset(void);
|
||||
@ -330,4 +341,12 @@ Oid catalog_get_cache_proxy_id_by_name(Catalog *catalog, const char *relname);
|
||||
|
||||
const char *catalog_get_cache_proxy_name(CacheType type);
|
||||
|
||||
bool catalog_become_owner(Catalog *catalog, CatalogSecurityContext *sec_ctx);
|
||||
void catalog_restore_user(CatalogSecurityContext *sec_ctx);
|
||||
|
||||
int64 catalog_table_next_seq_id(Catalog *catalog, enum CatalogTable table);
|
||||
|
||||
void catalog_insert(Relation rel, HeapTuple tuple);
|
||||
void catalog_insert_values(Relation rel, TupleDesc tupdesc, Datum *values, bool *nulls);
|
||||
|
||||
#endif /* TIMESCALEDB_CATALOG_H */
|
||||
|
492
src/chunk.c
492
src/chunk.c
@ -1,20 +1,26 @@
|
||||
#include <postgres.h>
|
||||
#include <catalog/namespace.h>
|
||||
#include <fmgr.h>
|
||||
#include <utils/builtins.h>
|
||||
#include <utils/lsyscache.h>
|
||||
#include <utils/hsearch.h>
|
||||
#include <utils/memutils.h>
|
||||
#include <access/htup_details.h>
|
||||
|
||||
#include "chunk.h"
|
||||
#include "catalog.h"
|
||||
#include "dimension.h"
|
||||
#include "dimension_slice.h"
|
||||
#include "dimension_vector.h"
|
||||
#include "partitioning.h"
|
||||
#include "hypertable.h"
|
||||
#include "hypercube.h"
|
||||
#include "metadata_queries.h"
|
||||
#include "scanner.h"
|
||||
|
||||
typedef bool (*on_chunk_func) (ChunkScanCtx *ctx, Chunk *chunk);
|
||||
|
||||
static void chunk_scan_ctx_init(ChunkScanCtx *ctx, Hyperspace *hs, Point *p);
|
||||
static void chunk_scan_ctx_destroy(ChunkScanCtx *ctx);
|
||||
static void chunk_collision_scan(ChunkScanCtx *scanctx, Hypercube *cube);
|
||||
static int chunk_scan_ctx_foreach_chunk(ChunkScanCtx *ctx, on_chunk_func on_chunk, uint16 limit);
|
||||
|
||||
static void
|
||||
chunk_fill(Chunk *chunk, HeapTuple tuple)
|
||||
@ -41,12 +47,288 @@ chunk_create_from_tuple(HeapTuple tuple, int16 num_constraints)
|
||||
return chunk;
|
||||
}
|
||||
|
||||
Chunk *
|
||||
chunk_create(Hyperspace *hs, Point *p)
|
||||
/*-
|
||||
* Align a chunk's hypercube in 'aligned' dimensions.
|
||||
*
|
||||
* Alignment ensures that chunks line up in a particular dimension, i.e., their
|
||||
* ranges should either be identical or not overlap at all.
|
||||
*
|
||||
* Non-aligned:
|
||||
*
|
||||
* ' [---------] <- existing slice
|
||||
* ' [---------] <- calculated (new) slice
|
||||
*
|
||||
* To align the slices above there are two cases depending on where the
|
||||
* insertion point happens:
|
||||
*
|
||||
* Case 1 (reuse slice):
|
||||
*
|
||||
* ' [---------]
|
||||
* ' [--x------]
|
||||
*
|
||||
* The insertion point x falls within the range of the existing slice. We should
|
||||
* reuse the existing slice rather than creating a new one.
|
||||
*
|
||||
* Case 2 (cut to align):
|
||||
*
|
||||
* ' [---------]
|
||||
* ' [-------x-]
|
||||
*
|
||||
* The insertion point falls outside the range of the existing slice and we need
|
||||
* to cut the new slice to line up.
|
||||
*
|
||||
* ' [---------]
|
||||
* ' cut [---]
|
||||
* '
|
||||
*
|
||||
* Note that slice reuse (case 1) happens already when calculating the tentative
|
||||
* hypercube for the chunk, and is thus already performed once reaching this
|
||||
* function. Thus, we deal only with case 2 here. Also note that a new slice
|
||||
* might overlap in complicated ways, requiring multiple cuts. For instance,
|
||||
* consider the following situation:
|
||||
*
|
||||
* ' [------] [-] [---]
|
||||
* ' [---x-------] <- calculated slice
|
||||
*
|
||||
* This should but cut-to-align as follows:
|
||||
*
|
||||
* ' [------] [-] [---]
|
||||
* ' [x]
|
||||
*
|
||||
* After a chunk collision scan, this function is called for each chunk in the
|
||||
* chunk scan context. Chunks in the scan context may have only a partial set of
|
||||
* slices if they only overlap in some, but not all, dimensions (see
|
||||
* illustrations below). Still, partial chunks may still be of interest for
|
||||
* alignment in a particular dimension. Thus, if a chunk has an overlapping
|
||||
* slice in an aligned dimension, we cut to not overlap with that slice.
|
||||
*/
|
||||
static bool
|
||||
do_dimension_alignment(ChunkScanCtx *scanctx, Chunk *chunk)
|
||||
{
|
||||
Chunk *chunk;
|
||||
Hypercube *cube = scanctx->data;
|
||||
Hyperspace *space = scanctx->space;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < space->num_dimensions; i++)
|
||||
{
|
||||
Dimension *dim = &space->dimensions[i];
|
||||
DimensionSlice *chunk_slice,
|
||||
*cube_slice;
|
||||
int64 coord = scanctx->point->coordinates[i];
|
||||
|
||||
if (!dim->fd.aligned)
|
||||
continue;
|
||||
|
||||
/*
|
||||
* The chunk might not have a slice for each dimension, so we cannot
|
||||
* use array indexing. Fetch slice by dimension ID instead.
|
||||
*/
|
||||
chunk_slice = hypercube_get_slice_by_dimension_id(chunk->cube, dim->fd.id);
|
||||
|
||||
if (NULL == chunk_slice)
|
||||
continue;
|
||||
|
||||
cube_slice = cube->slices[i];
|
||||
|
||||
/*
|
||||
* Only cut-to-align if the slices collide and are not identical
|
||||
* (i.e., if we are reusing an existing slice we should not cut it)
|
||||
*/
|
||||
if (!dimension_slices_equal(cube_slice, chunk_slice) &&
|
||||
dimension_slices_collide(cube_slice, chunk_slice))
|
||||
dimension_slice_cut(cube_slice, chunk_slice, coord);
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* Resolve chunk collisions.
|
||||
*
|
||||
* After a chunk collision scan, this function is called for each chunk in the
|
||||
* chunk scan context. We only care about chunks that have a full set of
|
||||
* slices/constraints that overlap with our tentative hypercube, i.e., they
|
||||
* fully collide. We resolve those collisions by cutting the hypercube.
|
||||
*/
|
||||
static bool
|
||||
do_collision_resolution(ChunkScanCtx *scanctx, Chunk *chunk)
|
||||
{
|
||||
Hypercube *cube = scanctx->data;
|
||||
Hyperspace *space = scanctx->space;
|
||||
int i;
|
||||
|
||||
if (chunk->cube->num_slices != space->num_dimensions ||
|
||||
!hypercubes_collide(cube, chunk->cube))
|
||||
return true;
|
||||
|
||||
for (i = 0; i < space->num_dimensions; i++)
|
||||
{
|
||||
DimensionSlice *cube_slice = cube->slices[i];
|
||||
DimensionSlice *chunk_slice = chunk->cube->slices[i];
|
||||
int64 coord = scanctx->point->coordinates[i];
|
||||
|
||||
/*
|
||||
* Only cut if we aren't reusing an existing slice and there is a
|
||||
* collision
|
||||
*/
|
||||
if (!dimension_slices_equal(cube_slice, chunk_slice) &&
|
||||
dimension_slices_collide(cube_slice, chunk_slice))
|
||||
{
|
||||
dimension_slice_cut(cube_slice, chunk_slice, coord);
|
||||
|
||||
/*
|
||||
* Redo the collision check after each cut since cutting in one
|
||||
* dimension might have resolved the collision in another
|
||||
*/
|
||||
if (!hypercubes_collide(cube, chunk->cube))
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
Assert(!hypercubes_collide(cube, chunk->cube));
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/*-
|
||||
* Resolve collisions and perform alignmment.
|
||||
*
|
||||
* Chunks collide only if their hypercubes overlap in all dimensions. For
|
||||
* instance, the 2D chunks below collide because they overlap in both the X and
|
||||
* Y dimensions:
|
||||
*
|
||||
* ' _____
|
||||
* ' | |
|
||||
* ' | ___|__
|
||||
* ' |_|__| |
|
||||
* ' | |
|
||||
* ' |_____|
|
||||
*
|
||||
* While the following chunks do not collide, although they still overlap in the
|
||||
* X dimension:
|
||||
*
|
||||
* ' _____
|
||||
* ' | |
|
||||
* ' | |
|
||||
* ' |____|
|
||||
* ' ______
|
||||
* ' | |
|
||||
* ' | *|
|
||||
* ' |_____|
|
||||
*
|
||||
* For the collision case above we obviously want to cut our hypercube to no
|
||||
* longer collide with existing chunks. However, the second case might still be
|
||||
* of interest for alignment in case X is an 'aligned' dimension. If '*' is the
|
||||
* insertion point, then we still want to cut the hypercube to ensure that the
|
||||
* dimension remains aligned, like so:
|
||||
*
|
||||
* ' _____
|
||||
* ' | |
|
||||
* ' | |
|
||||
* ' |____|
|
||||
* ' ___
|
||||
* ' | |
|
||||
* ' |*|
|
||||
* ' |_|
|
||||
*
|
||||
*
|
||||
* We perform alignment first as that might actually resolve chunk
|
||||
* collisions. After alignment we check for any remaining collisions.
|
||||
*/
|
||||
static void
|
||||
chunk_collision_resolve(Hyperspace *hs, Hypercube *cube, Point *p)
|
||||
{
|
||||
ChunkScanCtx scanctx;
|
||||
|
||||
chunk_scan_ctx_init(&scanctx, hs, p);
|
||||
|
||||
/* Scan for all chunks that collide with the hypercube of the new chunk */
|
||||
chunk_collision_scan(&scanctx, cube);
|
||||
scanctx.data = cube;
|
||||
|
||||
/* Cut the hypercube in any aligned dimensions */
|
||||
chunk_scan_ctx_foreach_chunk(&scanctx, do_dimension_alignment, 0);
|
||||
|
||||
/*
|
||||
* If there are any remaining collisions with chunks, then cut-to-fit to
|
||||
* resolve those collisions
|
||||
*/
|
||||
chunk_scan_ctx_foreach_chunk(&scanctx, do_collision_resolution, 0);
|
||||
|
||||
chunk_scan_ctx_destroy(&scanctx);
|
||||
}
|
||||
|
||||
static Chunk *
|
||||
chunk_create_after_lock(Hyperspace *hs, Point *p, const char *schema, const char *prefix)
|
||||
{
|
||||
Oid schema_oid = get_namespace_oid(schema, false);
|
||||
Catalog *catalog = catalog_get();
|
||||
CatalogSecurityContext sec_ctx;
|
||||
Hypercube *cube;
|
||||
Chunk *chunk;
|
||||
int i;
|
||||
|
||||
catalog_become_owner(catalog, &sec_ctx);
|
||||
|
||||
/* Calculate the hypercube for a new chunk that covers the tuple's point */
|
||||
cube = hypercube_calculate_from_point(hs, p);
|
||||
|
||||
/* Resolve collisions with other chunks by cutting the new hypercube */
|
||||
chunk_collision_resolve(hs, cube, p);
|
||||
|
||||
/* Create a new chunk based on the hypercube */
|
||||
chunk = chunk_create_stub(catalog_table_next_seq_id(catalog, CHUNK), hs->num_dimensions);
|
||||
chunk->fd.hypertable_id = hs->hypertable_id;
|
||||
chunk->cube = cube;
|
||||
chunk->num_constraints = chunk->capacity;
|
||||
namestrcpy(&chunk->fd.schema_name, schema);
|
||||
|
||||
snprintf(chunk->fd.table_name.data, NAMEDATALEN,
|
||||
"%s_%d_chunk", prefix, chunk->fd.id);
|
||||
|
||||
/* Insert any new dimension slices */
|
||||
dimension_slice_insert_multi(cube->slices, cube->num_slices);
|
||||
|
||||
/* All slices now have assigned ID's so update the chunk's constraints */
|
||||
for (i = 0; i < hs->num_dimensions; i++)
|
||||
{
|
||||
chunk->constraints[i].fd.chunk_id = chunk->fd.id;
|
||||
chunk->constraints[i].fd.dimension_slice_id = cube->slices[i]->fd.id;
|
||||
}
|
||||
|
||||
/* Insert the new chunk constraints */
|
||||
chunk_constraint_insert_multi(chunk->constraints, chunk->num_constraints);
|
||||
|
||||
/*
|
||||
* Create the chunk table entry. This will also create the chunk table as
|
||||
* a side-effect
|
||||
*/
|
||||
spi_chunk_insert(chunk->fd.id, hs->hypertable_id, schema, NameStr(chunk->fd.table_name));
|
||||
|
||||
chunk->table_id = get_relname_relid(NameStr(chunk->fd.table_name), schema_oid);
|
||||
|
||||
catalog_restore_user(&sec_ctx);
|
||||
|
||||
return chunk;
|
||||
}
|
||||
|
||||
Chunk *
|
||||
chunk_create(Hyperspace *hs, Point *p, const char *schema, const char *prefix)
|
||||
{
|
||||
Catalog *catalog = catalog_get();
|
||||
Chunk *chunk;
|
||||
Relation rel;
|
||||
|
||||
rel = heap_open(catalog->tables[CHUNK].id, ExclusiveLock);
|
||||
|
||||
/* Recheck if someone else created the chunk before we got the table lock */
|
||||
chunk = chunk_find(hs, p);
|
||||
|
||||
if (NULL == chunk)
|
||||
chunk = chunk_create_after_lock(hs, p, schema, prefix);
|
||||
|
||||
heap_close(rel, ExclusiveLock);
|
||||
|
||||
chunk = spi_chunk_create(hs, p);
|
||||
Assert(chunk != NULL);
|
||||
|
||||
return chunk;
|
||||
@ -60,8 +342,8 @@ chunk_create_stub(int32 id, int16 num_constraints)
|
||||
chunk = palloc0(CHUNK_SIZE(num_constraints));
|
||||
chunk->capacity = num_constraints;
|
||||
chunk->num_constraints = 0;
|
||||
|
||||
chunk->fd.id = id;
|
||||
|
||||
return chunk;
|
||||
}
|
||||
|
||||
@ -109,7 +391,16 @@ chunk_fill_stub(Chunk *chunk_stub, bool tuplock)
|
||||
if (num_found != 1)
|
||||
elog(ERROR, "No chunk found with ID %d", chunk_stub->fd.id);
|
||||
|
||||
chunk_stub->cube = hypercube_from_constraints(chunk_stub->constraints, chunk_stub->num_constraints);
|
||||
if (NULL == chunk_stub->cube)
|
||||
chunk_stub->cube = hypercube_from_constraints(chunk_stub->constraints,
|
||||
chunk_stub->num_constraints);
|
||||
else
|
||||
|
||||
/*
|
||||
* The hypercube slices were filled in during the scan. Now we need to
|
||||
* sort them in dimension order.
|
||||
*/
|
||||
hypercube_slice_sort(chunk_stub->cube);
|
||||
|
||||
return chunk_stub;
|
||||
}
|
||||
@ -135,8 +426,14 @@ chunk_add_constraint_from_tuple(Chunk *chunk, HeapTuple constraint_tuple)
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* Initialize a chunk scan context.
|
||||
*
|
||||
* A chunk scan context is used to join chunk-related information from metadata
|
||||
* tables during scans.
|
||||
*/
|
||||
static void
|
||||
chunk_scan_ctx_init(ChunkScanCtx *ctx, int16 num_dimensions)
|
||||
chunk_scan_ctx_init(ChunkScanCtx *ctx, Hyperspace *hs, Point *p)
|
||||
{
|
||||
struct HASHCTL hctl = {
|
||||
.keysize = sizeof(int32),
|
||||
@ -145,20 +442,120 @@ chunk_scan_ctx_init(ChunkScanCtx *ctx, int16 num_dimensions)
|
||||
};
|
||||
|
||||
ctx->htab = hash_create("chunk-scan-context", 20, &hctl, HASH_ELEM | HASH_CONTEXT | HASH_BLOBS);
|
||||
ctx->num_dimensions = num_dimensions;
|
||||
ctx->space = hs;
|
||||
ctx->point = p;
|
||||
ctx->early_abort = false;
|
||||
}
|
||||
|
||||
/*
|
||||
* Destroy the chunk scan context.
|
||||
*
|
||||
* This will free the hash table in the context, but not the chunks within since
|
||||
* they are not allocated on the hash tables memory context.
|
||||
*/
|
||||
static void
|
||||
chunk_scan_ctx_destroy(ChunkScanCtx *ctx)
|
||||
{
|
||||
hash_destroy(ctx->htab);
|
||||
}
|
||||
|
||||
static Chunk *
|
||||
chunk_scan_ctx_find_chunk(ChunkScanCtx *ctx)
|
||||
static inline void
|
||||
dimension_slice_and_chunk_constraint_join(ChunkScanCtx *scanctx, DimensionVec *vec)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < vec->num_slices; i++)
|
||||
{
|
||||
/*
|
||||
* For each dimension slice, find matching constraints. These will be
|
||||
* saved in the scan context
|
||||
*/
|
||||
chunk_constraint_scan_by_dimension_slice_id(vec->slices[i], scanctx);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Scan for the chunk that encloses the given point.
|
||||
*
|
||||
* In each dimension there can be one or more slices that match the point's
|
||||
* coordinate in that dimension. Slices are collected in the scan context's hash
|
||||
* table according to the chunk IDs they are associated with. A slice might
|
||||
* represent the dimensional bound of multiple chunks, and thus is added to all
|
||||
* the hash table slots of those chunks. At the end of the scan there will be at
|
||||
* most one chunk that has a complete set of slices, since a point cannot belong
|
||||
* to two chunks.
|
||||
*/
|
||||
static void
|
||||
chunk_point_scan(ChunkScanCtx *scanctx, Point *p)
|
||||
{
|
||||
int i;
|
||||
|
||||
/* Scan all dimensions for slices enclosing the point */
|
||||
for (i = 0; i < scanctx->space->num_dimensions; i++)
|
||||
{
|
||||
DimensionVec *vec;
|
||||
|
||||
vec = dimension_slice_scan(scanctx->space->dimensions[i].fd.id,
|
||||
p->coordinates[i]);
|
||||
|
||||
dimension_slice_and_chunk_constraint_join(scanctx, vec);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Scan for chunks that collide with the given hypercube.
|
||||
*
|
||||
* Collisions are determined using axis-aligned bounding box collision detection
|
||||
* generalized to N dimensions. Slices are collected in the scan context's hash
|
||||
* table according to the chunk IDs they are associated with. A slice might
|
||||
* represent the dimensional bound of multiple chunks, and thus is added to all
|
||||
* the hash table slots of those chunks. At the end of the scan, those chunks
|
||||
* that have a full set of slices are the ones that actually collide with the
|
||||
* given hypercube.
|
||||
*
|
||||
* Chunks in the scan context that do not collide (do not have a full set of
|
||||
* slices), might still be important for ensuring alignment in those dimensions
|
||||
* that require alignment.
|
||||
*/
|
||||
static void
|
||||
chunk_collision_scan(ChunkScanCtx *scanctx, Hypercube *cube)
|
||||
{
|
||||
int i;
|
||||
|
||||
/* Scan all dimensions for colliding slices */
|
||||
for (i = 0; i < scanctx->space->num_dimensions; i++)
|
||||
{
|
||||
DimensionVec *vec;
|
||||
DimensionSlice *slice = cube->slices[i];
|
||||
|
||||
vec = dimension_slice_collision_scan(slice->fd.dimension_id,
|
||||
slice->fd.range_start,
|
||||
slice->fd.range_end);
|
||||
|
||||
/* Add the slices to all the chunks they are associated with */
|
||||
dimension_slice_and_chunk_constraint_join(scanctx, vec);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/*
|
||||
* Apply a function to each chunk in the scan context's hash table. If the limit
|
||||
* is greater than zero only a limited number of chunks will be processed.
|
||||
*
|
||||
* The chunk handler function (on_chunk_func) should return true if the chunk
|
||||
* should be considered processed and count towards the given limit, otherwise
|
||||
* false.
|
||||
*
|
||||
* Returns the number of processed chunks.
|
||||
*/
|
||||
static int
|
||||
chunk_scan_ctx_foreach_chunk(ChunkScanCtx *ctx,
|
||||
on_chunk_func on_chunk,
|
||||
uint16 limit)
|
||||
{
|
||||
HASH_SEQ_STATUS status;
|
||||
ChunkScanEntry *entry;
|
||||
uint16 num_found = 0;
|
||||
|
||||
hash_seq_init(&status, ctx->htab);
|
||||
|
||||
@ -166,16 +563,45 @@ chunk_scan_ctx_find_chunk(ChunkScanCtx *ctx)
|
||||
entry != NULL;
|
||||
entry = hash_seq_search(&status))
|
||||
{
|
||||
Chunk *chunk = entry->chunk;
|
||||
|
||||
if (chunk->num_constraints == ctx->num_dimensions)
|
||||
if (on_chunk(ctx, entry->chunk))
|
||||
{
|
||||
hash_seq_term(&status);
|
||||
return chunk;
|
||||
num_found++;
|
||||
|
||||
if (limit > 0 && num_found == limit)
|
||||
{
|
||||
hash_seq_term(&status);
|
||||
return num_found;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return NULL;
|
||||
return num_found;
|
||||
}
|
||||
|
||||
/* Returns true if the chunk has a full set of constraints, otherwise
|
||||
* false. Used to find a chunk matching a point in an N-dimensional
|
||||
* hyperspace. */
|
||||
static bool
|
||||
chunk_is_complete(ChunkScanCtx *scanctx, Chunk *chunk)
|
||||
{
|
||||
if (scanctx->space->num_dimensions != chunk->num_constraints)
|
||||
return false;
|
||||
|
||||
scanctx->data = chunk;
|
||||
return true;
|
||||
}
|
||||
|
||||
/* Finds the first chunk that has a complete set of constraints. There should be
|
||||
* only one such chunk in the scan context when scanning for the chunk that
|
||||
* holds a particular tuple/point. */
|
||||
static Chunk *
|
||||
chunk_scan_ctx_get_chunk(ChunkScanCtx *ctx)
|
||||
{
|
||||
ctx->data = NULL;
|
||||
|
||||
chunk_scan_ctx_foreach_chunk(ctx, chunk_is_complete, 1);
|
||||
|
||||
return ctx->data;
|
||||
}
|
||||
|
||||
/*
|
||||
@ -204,38 +630,24 @@ chunk_find(Hyperspace *hs, Point *p)
|
||||
{
|
||||
Chunk *chunk;
|
||||
ChunkScanCtx ctx;
|
||||
int16 num_dimensions = HYPERSPACE_NUM_DIMENSIONS(hs);
|
||||
int i,
|
||||
j;
|
||||
|
||||
/* The scan context will keep the state accumulated during the scan */
|
||||
chunk_scan_ctx_init(&ctx, num_dimensions);
|
||||
chunk_scan_ctx_init(&ctx, hs, p);
|
||||
|
||||
/* First, scan all dimensions for matching slices */
|
||||
for (i = 0; i < HYPERSPACE_NUM_DIMENSIONS(hs); i++)
|
||||
{
|
||||
DimensionVec *vec;
|
||||
/* Abort the scan when the chunk is found */
|
||||
ctx.early_abort = true;
|
||||
|
||||
vec = dimension_slice_scan(hs->dimensions[i].fd.id, p->coordinates[i]);
|
||||
|
||||
for (j = 0; j < vec->num_slices; j++)
|
||||
|
||||
/*
|
||||
* For each dimension slice, find matching constraints. These will
|
||||
* be saved in the scan context
|
||||
*/
|
||||
chunk_constraint_scan_by_dimension_slice_id(vec->slices[j], &ctx);
|
||||
}
|
||||
/* Scan for the chunk matching the point */
|
||||
chunk_point_scan(&ctx, p);
|
||||
|
||||
/* Find the chunk that has N matching constraints */
|
||||
chunk = chunk_scan_ctx_find_chunk(&ctx);
|
||||
chunk = chunk_scan_ctx_get_chunk(&ctx);
|
||||
|
||||
chunk_scan_ctx_destroy(&ctx);
|
||||
|
||||
if (NULL != chunk)
|
||||
{
|
||||
/* Fill in the rest of the chunk's data from the chunk table */
|
||||
chunk_fill_stub(chunk, false);
|
||||
}
|
||||
|
||||
return chunk;
|
||||
}
|
||||
|
16
src/chunk.h
16
src/chunk.h
@ -4,6 +4,7 @@
|
||||
#include <postgres.h>
|
||||
#include <access/htup.h>
|
||||
#include <access/tupdesc.h>
|
||||
#include <utils/hsearch.h>
|
||||
|
||||
#include "catalog.h"
|
||||
#include "chunk_constraint.h"
|
||||
@ -40,18 +41,19 @@ typedef struct Chunk
|
||||
(sizeof(Chunk) + sizeof(ChunkConstraint) * (num_constraints))
|
||||
|
||||
/*
|
||||
* ChunkScanCtx is used to scan for a chunk matching a specific point in a
|
||||
* hypertable's N-dimensional hyperspace.
|
||||
* ChunkScanCtx is used to scan for chunks in a hypertable's N-dimensional
|
||||
* hyperspace.
|
||||
*
|
||||
* For every matching constraint, a corresponding chunk will be created in the
|
||||
* context's hash table, keyed on the chunk ID. At the end of the scan, there
|
||||
* will be only one chunk in the hash table that has N number of matching
|
||||
* constraints, and this is the chunk that encloses the point.
|
||||
* context's hash table, keyed on the chunk ID.
|
||||
*/
|
||||
typedef struct ChunkScanCtx
|
||||
{
|
||||
HTAB *htab;
|
||||
int16 num_dimensions;
|
||||
Hyperspace *space;
|
||||
Point *point;
|
||||
bool early_abort;
|
||||
void *data;
|
||||
} ChunkScanCtx;
|
||||
|
||||
/* The hash table entry for the ChunkScanCtx */
|
||||
@ -62,7 +64,7 @@ typedef struct ChunkScanEntry
|
||||
} ChunkScanEntry;
|
||||
|
||||
extern Chunk *chunk_create_from_tuple(HeapTuple tuple, int16 num_constraints);
|
||||
extern Chunk *chunk_create(Hyperspace *hs, Point *p);
|
||||
extern Chunk *chunk_create(Hyperspace *hs, Point *p, const char *schema, const char *prefix);
|
||||
extern Chunk *chunk_create_stub(int32 id, int16 num_constraints);
|
||||
extern bool chunk_add_constraint(Chunk *chunk, ChunkConstraint *constraint);
|
||||
extern bool chunk_add_constraint_from_tuple(Chunk *chunk, HeapTuple constraint_tuple);
|
||||
|
@ -1,9 +1,14 @@
|
||||
#include <postgres.h>
|
||||
#include <utils/hsearch.h>
|
||||
#include <utils/rel.h>
|
||||
#include <access/heapam.h>
|
||||
#include <access/xact.h>
|
||||
#include <catalog/indexing.h>
|
||||
|
||||
#include "scanner.h"
|
||||
#include "chunk_constraint.h"
|
||||
#include "dimension_slice.h"
|
||||
#include "hypercube.h"
|
||||
#include "chunk.h"
|
||||
|
||||
static inline ChunkConstraint *
|
||||
@ -63,10 +68,18 @@ chunk_constraint_scan_by_chunk_id(Chunk *chunk)
|
||||
return chunk;
|
||||
}
|
||||
|
||||
typedef struct ChunkConstraintScanData
|
||||
{
|
||||
ChunkScanCtx *scanctx;
|
||||
DimensionSlice *slice;
|
||||
} ChunkConstraintScanData;
|
||||
|
||||
static bool
|
||||
chunk_constraint_dimension_id_tuple_found(TupleInfo *ti, void *data)
|
||||
{
|
||||
ChunkScanCtx *ctx = data;
|
||||
ChunkConstraintScanData *ccsd = data;
|
||||
ChunkScanCtx *scanctx = ccsd->scanctx;
|
||||
Hyperspace *hs = scanctx->space;
|
||||
ChunkConstraint constraint;
|
||||
Chunk *chunk;
|
||||
ChunkScanEntry *entry;
|
||||
@ -74,43 +87,51 @@ chunk_constraint_dimension_id_tuple_found(TupleInfo *ti, void *data)
|
||||
|
||||
chunk_constraint_fill(&constraint, ti->tuple);
|
||||
|
||||
entry = hash_search(ctx->htab, &constraint.fd.chunk_id, HASH_ENTER, &found);
|
||||
entry = hash_search(scanctx->htab, &constraint.fd.chunk_id, HASH_ENTER, &found);
|
||||
|
||||
if (!found)
|
||||
{
|
||||
chunk = chunk_create_stub(constraint.fd.chunk_id, ctx->num_dimensions);
|
||||
chunk = chunk_create_stub(constraint.fd.chunk_id, hs->num_dimensions);
|
||||
chunk->cube = hypercube_alloc(hs->num_dimensions);
|
||||
entry->chunk = chunk;
|
||||
}
|
||||
else
|
||||
{
|
||||
chunk = entry->chunk;
|
||||
}
|
||||
|
||||
chunk_add_constraint(chunk, &constraint);
|
||||
hypercube_add_slice(chunk->cube, ccsd->slice);
|
||||
|
||||
/*
|
||||
* If the chunk has N constraints, it is the chunk we are looking for and
|
||||
* the scan can be aborted.
|
||||
*/
|
||||
if (chunk->num_constraints == ctx->num_dimensions)
|
||||
if (scanctx->early_abort && chunk->num_constraints == hs->num_dimensions)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* Scan for all chunk constraints that match the given slice ID. The chunk
|
||||
* constraints are saved in the chunk scan context.
|
||||
*/
|
||||
int
|
||||
chunk_constraint_scan_by_dimension_slice_id(DimensionSlice *slice, ChunkScanCtx *ctx)
|
||||
{
|
||||
Catalog *catalog = catalog_get();
|
||||
ScanKeyData scankey[1];
|
||||
int num_found;
|
||||
ChunkConstraintScanData data = {
|
||||
.scanctx = ctx,
|
||||
.slice = slice,
|
||||
};
|
||||
ScannerCtx scanCtx = {
|
||||
.table = catalog->tables[CHUNK_CONSTRAINT].id,
|
||||
.index = catalog->tables[CHUNK_CONSTRAINT].index_ids[CHUNK_CONSTRAINT_CHUNK_ID_DIMENSION_SLICE_ID_IDX],
|
||||
.scantype = ScannerTypeIndex,
|
||||
.nkeys = 1,
|
||||
.scankey = scankey,
|
||||
.data = ctx,
|
||||
.data = &data,
|
||||
.tuple_found = chunk_constraint_dimension_id_tuple_found,
|
||||
.lockmode = AccessShareLock,
|
||||
.scandirection = ForwardScanDirection,
|
||||
@ -123,3 +144,35 @@ chunk_constraint_scan_by_dimension_slice_id(DimensionSlice *slice, ChunkScanCtx
|
||||
|
||||
return num_found;
|
||||
}
|
||||
|
||||
static inline void
|
||||
chunk_constraint_insert_relation(Relation rel, ChunkConstraint *constraint)
|
||||
{
|
||||
TupleDesc desc = RelationGetDescr(rel);
|
||||
Datum values[Natts_chunk_constraint];
|
||||
bool nulls[Natts_chunk_constraint] = {false};
|
||||
|
||||
memset(values, 0, sizeof(values));
|
||||
values[Anum_chunk_constraint_chunk_id - 1] = constraint->fd.chunk_id;
|
||||
values[Anum_chunk_constraint_dimension_slice_id - 1] = constraint->fd.dimension_slice_id;
|
||||
|
||||
catalog_insert_values(rel, desc, values, nulls);
|
||||
}
|
||||
|
||||
/*
|
||||
* Insert chunk constraints into the catalog.
|
||||
*/
|
||||
void
|
||||
chunk_constraint_insert_multi(ChunkConstraint *constraints, Size num_constraints)
|
||||
{
|
||||
Catalog *catalog = catalog_get();
|
||||
Relation rel;
|
||||
Size i;
|
||||
|
||||
rel = heap_open(catalog->tables[CHUNK_CONSTRAINT].id, RowExclusiveLock);
|
||||
|
||||
for (i = 0; i < num_constraints; i++)
|
||||
chunk_constraint_insert_relation(rel, &constraints[i]);
|
||||
|
||||
heap_close(rel, RowExclusiveLock);
|
||||
}
|
||||
|
@ -25,5 +25,6 @@ typedef struct ChunkScanCtx ChunkScanCtx;
|
||||
|
||||
extern Chunk *chunk_constraint_scan_by_chunk_id(Chunk *chunk);
|
||||
extern int chunk_constraint_scan_by_dimension_slice_id(DimensionSlice *slice, ChunkScanCtx *ctx);
|
||||
extern void chunk_constraint_insert_multi(ChunkConstraint *constraints, Size num_constraints);
|
||||
|
||||
#endif /* TIMESCALEDB_CHUNK_CONSTRAINT_H */
|
||||
|
@ -19,7 +19,7 @@ chunk_dispatch_create(Hypertable *ht, EState *estate, Query *parse)
|
||||
cp->estate = estate;
|
||||
cp->hypertable_result_rel_info = NULL;
|
||||
cp->parse = parse;
|
||||
cp->cache = subspace_store_init(HYPERSPACE_NUM_DIMENSIONS(ht->space), estate->es_query_cxt);
|
||||
cp->cache = subspace_store_init(ht->space->num_dimensions, estate->es_query_cxt);
|
||||
return cp;
|
||||
}
|
||||
|
||||
|
219
src/dimension.c
219
src/dimension.c
@ -1,6 +1,7 @@
|
||||
#include <postgres.h>
|
||||
#include <access/relscan.h>
|
||||
#include <utils/lsyscache.h>
|
||||
#include <funcapi.h>
|
||||
|
||||
#include "catalog.h"
|
||||
#include "dimension.h"
|
||||
@ -8,7 +9,51 @@
|
||||
#include "scanner.h"
|
||||
#include "partitioning.h"
|
||||
#include "utils.h"
|
||||
#include "dimension_slice.h"
|
||||
|
||||
static int
|
||||
cmp_dimension_id(const void *left, const void *right)
|
||||
{
|
||||
const Dimension *diml = (Dimension *) left;
|
||||
const Dimension *dimr = (Dimension *) right;
|
||||
|
||||
if (diml->fd.id < dimr->fd.id)
|
||||
return -1;
|
||||
|
||||
if (diml->fd.id > dimr->fd.id)
|
||||
return 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
Dimension *
|
||||
hyperspace_get_dimension_by_id(Hyperspace *hs, int32 id)
|
||||
{
|
||||
Dimension dim = {
|
||||
.fd.id = id,
|
||||
};
|
||||
|
||||
return bsearch(&dim, hs->dimensions, hs->num_dimensions,
|
||||
sizeof(Dimension), cmp_dimension_id);
|
||||
}
|
||||
|
||||
Dimension *
|
||||
hyperspace_get_dimension(Hyperspace *hs, DimensionType type, Index n)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < hs->num_dimensions; i++)
|
||||
{
|
||||
if (hs->dimensions[i].type == type)
|
||||
{
|
||||
if (n == 0)
|
||||
return &hs->dimensions[i];
|
||||
n--;
|
||||
}
|
||||
}
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline DimensionType
|
||||
dimension_type(HeapTuple tuple)
|
||||
@ -20,20 +65,162 @@ dimension_type(HeapTuple tuple)
|
||||
}
|
||||
|
||||
static void
|
||||
dimension_fill_in_from_tuple(Dimension *d, HeapTuple tuple, Oid main_table_relid)
|
||||
dimension_fill_in_from_tuple(Dimension *d, TupleInfo *ti, Oid main_table_relid)
|
||||
{
|
||||
memcpy(&d->fd, GETSTRUCT(tuple), sizeof(FormData_dimension));
|
||||
d->type = dimension_type(tuple);
|
||||
Datum values[Natts_dimension];
|
||||
bool isnull[Natts_dimension];
|
||||
|
||||
/*
|
||||
* With need to use heap_deform_tuple() rather than GETSTRUCT(), since
|
||||
* optional values may be omitted from the tuple.
|
||||
*/
|
||||
heap_deform_tuple(ti->tuple, ti->desc, values, isnull);
|
||||
|
||||
d->type = dimension_type(ti->tuple);
|
||||
d->fd.id = DatumGetInt32(values[Anum_dimension_id - 1]);
|
||||
d->fd.hypertable_id = DatumGetInt32(values[Anum_dimension_hypertable_id - 1]);
|
||||
d->fd.aligned = DatumGetBool(values[Anum_dimension_aligned - 1]);
|
||||
d->fd.column_type = DatumGetObjectId(values[Anum_dimension_column_type - 1]);
|
||||
memcpy(&d->fd.column_name,
|
||||
DatumGetName(values[Anum_dimension_column_name - 1]),
|
||||
NAMEDATALEN);
|
||||
|
||||
if (d->type == DIMENSION_TYPE_CLOSED)
|
||||
{
|
||||
d->fd.num_slices = DatumGetInt16(values[Anum_dimension_num_slices - 1]);
|
||||
memcpy(&d->fd.partitioning_func_schema,
|
||||
DatumGetName(values[Anum_dimension_partitioning_func_schema - 1]),
|
||||
NAMEDATALEN);
|
||||
memcpy(&d->fd.partitioning_func,
|
||||
DatumGetName(values[Anum_dimension_partitioning_func - 1]),
|
||||
NAMEDATALEN);
|
||||
|
||||
d->partitioning = partitioning_info_create(d->fd.num_slices,
|
||||
NameStr(d->fd.partitioning_func_schema),
|
||||
NameStr(d->fd.partitioning_func),
|
||||
NameStr(d->fd.column_name),
|
||||
main_table_relid);
|
||||
}
|
||||
else
|
||||
d->fd.interval_length = DatumGetInt64(values[Anum_dimension_interval_length - 1]);
|
||||
|
||||
d->column_attno = get_attnum(main_table_relid, NameStr(d->fd.column_name));
|
||||
}
|
||||
|
||||
static Datum
|
||||
create_range_datum(FunctionCallInfo fcinfo, DimensionSlice *slice)
|
||||
{
|
||||
TupleDesc tupdesc;
|
||||
Datum values[2];
|
||||
bool nulls[2] = {false};
|
||||
HeapTuple tuple;
|
||||
|
||||
if (get_call_result_type(fcinfo, NULL, &tupdesc) != TYPEFUNC_COMPOSITE)
|
||||
elog(ERROR, "Function returning record called in context that cannot accept type record");
|
||||
|
||||
tupdesc = BlessTupleDesc(tupdesc);
|
||||
|
||||
values[0] = Int64GetDatum(slice->fd.range_start);
|
||||
values[1] = Int64GetDatum(slice->fd.range_end);
|
||||
tuple = heap_form_tuple(tupdesc, values, nulls);
|
||||
|
||||
return HeapTupleGetDatum(tuple);
|
||||
}
|
||||
|
||||
#define RANGE_VALUE_MAX PG_INT32_MAX
|
||||
|
||||
static DimensionSlice *
|
||||
calculate_open_range_default(Dimension *dim, int64 value)
|
||||
{
|
||||
int64 range_start,
|
||||
range_end;
|
||||
|
||||
if (value < 0)
|
||||
{
|
||||
range_end = ((value + 1) / dim->fd.interval_length) * dim->fd.interval_length;
|
||||
range_start = range_end - dim->fd.interval_length;
|
||||
}
|
||||
else
|
||||
{
|
||||
range_start = (value / dim->fd.interval_length) * dim->fd.interval_length;
|
||||
range_end = range_start + dim->fd.interval_length;
|
||||
}
|
||||
|
||||
return dimension_slice_create(dim->fd.id, range_start, range_end);
|
||||
}
|
||||
|
||||
PG_FUNCTION_INFO_V1(dimension_calculate_open_range_default);
|
||||
|
||||
/*
|
||||
* Expose open dimension range calculation for testing purposes.
|
||||
*/
|
||||
Datum
|
||||
dimension_calculate_open_range_default(PG_FUNCTION_ARGS)
|
||||
{
|
||||
int64 value = PG_GETARG_INT64(0);
|
||||
Dimension dim = {
|
||||
.fd.id = 0,
|
||||
.fd.interval_length = PG_GETARG_INT64(1),
|
||||
};
|
||||
DimensionSlice *slice = calculate_open_range_default(&dim, value);
|
||||
|
||||
PG_RETURN_DATUM(create_range_datum(fcinfo, slice));
|
||||
}
|
||||
|
||||
static DimensionSlice *
|
||||
calculate_closed_range_default(Dimension *dim, int64 value)
|
||||
{
|
||||
int64 range_start,
|
||||
range_end;
|
||||
|
||||
/* The interval that divides the dimension into N equal sized slices */
|
||||
int32 interval = RANGE_VALUE_MAX / dim->fd.num_slices;
|
||||
|
||||
if (value < 0)
|
||||
elog(ERROR, "Invalid value " INT64_FORMAT " for closed dimension", value);
|
||||
|
||||
if (value >= (interval * (dim->fd.num_slices - 1)))
|
||||
{
|
||||
/* put overflow from integer-division errors in last range */
|
||||
range_start = interval * (dim->fd.num_slices - 1);
|
||||
range_end = RANGE_VALUE_MAX;
|
||||
}
|
||||
else
|
||||
{
|
||||
range_start = (value / interval) * interval;
|
||||
range_end = range_start + interval;
|
||||
}
|
||||
|
||||
return dimension_slice_create(dim->fd.id, range_start, range_end);
|
||||
}
|
||||
|
||||
PG_FUNCTION_INFO_V1(dimension_calculate_closed_range_default);
|
||||
|
||||
/*
|
||||
* Exposed closed dimension range calculation for testing purposes.
|
||||
*/
|
||||
Datum
|
||||
dimension_calculate_closed_range_default(PG_FUNCTION_ARGS)
|
||||
{
|
||||
int64 value = PG_GETARG_INT64(0);
|
||||
Dimension dim = {
|
||||
.fd.id = 0,
|
||||
.fd.num_slices = PG_GETARG_INT16(1),
|
||||
};
|
||||
DimensionSlice *slice = calculate_closed_range_default(&dim, value);
|
||||
|
||||
PG_RETURN_DATUM(create_range_datum(fcinfo, slice));
|
||||
}
|
||||
|
||||
DimensionSlice *
|
||||
dimension_calculate_default_slice(Dimension *dim, int64 value)
|
||||
{
|
||||
if (IS_OPEN_DIMENSION(dim))
|
||||
return calculate_open_range_default(dim, value);
|
||||
|
||||
return calculate_closed_range_default(dim, value);
|
||||
}
|
||||
|
||||
static Hyperspace *
|
||||
hyperspace_create(int32 hypertable_id, Oid main_table_relid, uint16 num_dimensions)
|
||||
{
|
||||
@ -42,7 +229,7 @@ hyperspace_create(int32 hypertable_id, Oid main_table_relid, uint16 num_dimensio
|
||||
hs->hypertable_id = hypertable_id;
|
||||
hs->main_table_relid = main_table_relid;
|
||||
hs->capacity = num_dimensions;
|
||||
hs->num_closed_dimensions = hs->num_open_dimensions = 0;
|
||||
hs->num_dimensions = 0;
|
||||
return hs;
|
||||
}
|
||||
|
||||
@ -50,15 +237,9 @@ static bool
|
||||
dimension_tuple_found(TupleInfo *ti, void *data)
|
||||
{
|
||||
Hyperspace *hs = data;
|
||||
DimensionType type = dimension_type(ti->tuple);
|
||||
Dimension *d;
|
||||
Dimension *d = &hs->dimensions[hs->num_dimensions++];
|
||||
|
||||
if (type == DIMENSION_TYPE_OPEN)
|
||||
d = &hs->dimensions[hs->num_open_dimensions++];
|
||||
else
|
||||
d = &hs->dimensions[hs->capacity - 1 - hs->num_closed_dimensions++];
|
||||
|
||||
dimension_fill_in_from_tuple(d, ti->tuple, hs->main_table_relid);
|
||||
dimension_fill_in_from_tuple(d, ti, hs->main_table_relid);
|
||||
|
||||
return true;
|
||||
}
|
||||
@ -87,6 +268,9 @@ dimension_scan(int32 hypertable_id, Oid main_table_relid, int16 num_dimensions)
|
||||
|
||||
scanner_scan(&scanCtx);
|
||||
|
||||
/* Sort dimensions in ascending order to allow binary search lookups */
|
||||
qsort(space->dimensions, space->num_dimensions, sizeof(Dimension), cmp_dimension_id);
|
||||
|
||||
return space;
|
||||
}
|
||||
|
||||
@ -96,17 +280,18 @@ point_create(int16 num_dimensions)
|
||||
Point *p = palloc0(POINT_SIZE(num_dimensions));
|
||||
|
||||
p->cardinality = num_dimensions;
|
||||
p->num_closed = p->num_open = 0;
|
||||
p->num_coords = 0;
|
||||
|
||||
return p;
|
||||
}
|
||||
|
||||
Point *
|
||||
hyperspace_calculate_point(Hyperspace *hs, HeapTuple tuple, TupleDesc tupdesc)
|
||||
{
|
||||
Point *p = point_create(HYPERSPACE_NUM_DIMENSIONS(hs));
|
||||
Point *p = point_create(hs->num_dimensions);
|
||||
int i;
|
||||
|
||||
for (i = 0; i < HYPERSPACE_NUM_DIMENSIONS(hs); i++)
|
||||
for (i = 0; i < hs->num_dimensions; i++)
|
||||
{
|
||||
Dimension *d = &hs->dimensions[i];
|
||||
|
||||
@ -120,11 +305,11 @@ hyperspace_calculate_point(Hyperspace *hs, HeapTuple tuple, TupleDesc tupdesc)
|
||||
if (isnull)
|
||||
elog(ERROR, "Time attribute not found in tuple");
|
||||
|
||||
p->coordinates[p->num_open++] = time_value_to_internal(datum, d->fd.column_type);
|
||||
p->coordinates[p->num_coords++] = time_value_to_internal(datum, d->fd.column_type);
|
||||
}
|
||||
else
|
||||
{
|
||||
p->coordinates[p->num_open + p->num_closed++] =
|
||||
p->coordinates[p->num_coords++] =
|
||||
partitioning_func_apply_tuple(d->partitioning, tuple, tupdesc);
|
||||
}
|
||||
}
|
||||
|
@ -8,6 +8,7 @@
|
||||
#include "catalog.h"
|
||||
|
||||
typedef struct PartitioningInfo PartitioningInfo;
|
||||
typedef struct DimensionSlice DimensionSlice;
|
||||
|
||||
typedef enum DimensionType
|
||||
{
|
||||
@ -39,30 +40,21 @@ typedef struct Hyperspace
|
||||
int32 hypertable_id;
|
||||
Oid main_table_relid;
|
||||
uint16 capacity;
|
||||
int16 num_open_dimensions;
|
||||
int16 num_closed_dimensions;
|
||||
uint16 num_dimensions;
|
||||
/* Open dimensions should be stored before closed dimensions */
|
||||
Dimension dimensions[0];
|
||||
} Hyperspace;
|
||||
|
||||
#define HYPERSPACE_NUM_DIMENSIONS(hs) \
|
||||
((hs)->num_open_dimensions + \
|
||||
(hs)->num_closed_dimensions)
|
||||
|
||||
#define HYPERSPACE_SIZE(num_dimensions) \
|
||||
(sizeof(Hyperspace) + (sizeof(Dimension) * (num_dimensions)))
|
||||
|
||||
#define hyperspace_get_closed_dimension(hs, i) \
|
||||
(&(hs)->dimensions[(hs)->num_open_dimensions + i])
|
||||
|
||||
/*
|
||||
* A point in an N-dimensional hyperspace.
|
||||
*/
|
||||
typedef struct Point
|
||||
{
|
||||
int16 cardinality;
|
||||
uint8 num_open;
|
||||
uint8 num_closed;
|
||||
uint8 num_coords;
|
||||
/* Open dimension coordinates are stored before the closed coordinates */
|
||||
int64 coordinates[0];
|
||||
} Point;
|
||||
@ -71,6 +63,14 @@ typedef struct Point
|
||||
(sizeof(Point) + (sizeof(int64) * (cardinality)))
|
||||
|
||||
extern Hyperspace *dimension_scan(int32 hypertable_id, Oid main_table_relid, int16 num_dimension);
|
||||
extern DimensionSlice *dimension_calculate_default_slice(Dimension *dim, int64 value);
|
||||
extern Point *hyperspace_calculate_point(Hyperspace *h, HeapTuple tuple, TupleDesc tupdesc);
|
||||
extern Dimension *hyperspace_get_dimension_by_id(Hyperspace *hs, int32 id);
|
||||
extern Dimension *hyperspace_get_dimension(Hyperspace *hs, DimensionType type, Index n);
|
||||
|
||||
#define hyperspace_get_open_dimension(space, i) \
|
||||
hyperspace_get_dimension(space, DIMENSION_TYPE_OPEN, i)
|
||||
#define hyperspace_get_closed_dimension(space, i ) \
|
||||
hyperspace_get_dimension(space, DIMENSION_TYPE_CLOSED, i)
|
||||
|
||||
#endif /* TIMESCALEDB_DIMENSION_H */
|
||||
|
@ -1,6 +1,10 @@
|
||||
#include <stdlib.h>
|
||||
#include <postgres.h>
|
||||
#include <access/relscan.h>
|
||||
#include <access/xact.h>
|
||||
#include <access/heapam.h>
|
||||
#include <utils/rel.h>
|
||||
#include <catalog/indexing.h>
|
||||
|
||||
#include "catalog.h"
|
||||
#include "dimension_slice.h"
|
||||
@ -8,20 +12,24 @@
|
||||
#include "scanner.h"
|
||||
#include "dimension.h"
|
||||
#include "chunk_constraint.h"
|
||||
#include "dimension_vector.h"
|
||||
|
||||
static DimensionVec *dimension_vec_expand(DimensionVec *vec, int32 new_size);
|
||||
static DimensionVec *dimension_vec_add_slice(DimensionVec **vecptr, DimensionSlice *slice);
|
||||
static inline DimensionSlice *
|
||||
dimension_slice_alloc(void)
|
||||
{
|
||||
return palloc0(sizeof(DimensionSlice));
|
||||
}
|
||||
|
||||
static inline DimensionSlice *
|
||||
dimension_slice_from_form_data(Form_dimension_slice fd)
|
||||
{
|
||||
DimensionSlice *ds;
|
||||
DimensionSlice *slice = dimension_slice_alloc();
|
||||
|
||||
ds = palloc0(sizeof(DimensionSlice));
|
||||
memcpy(&ds->fd, fd, sizeof(FormData_dimension_slice));
|
||||
ds->storage_free = NULL;
|
||||
ds->storage = NULL;
|
||||
return ds;
|
||||
slice = palloc0(sizeof(DimensionSlice));
|
||||
memcpy(&slice->fd, fd, sizeof(FormData_dimension_slice));
|
||||
slice->storage_free = NULL;
|
||||
slice->storage = NULL;
|
||||
return slice;
|
||||
}
|
||||
|
||||
static inline DimensionSlice *
|
||||
@ -30,62 +38,77 @@ dimension_slice_from_tuple(HeapTuple tuple)
|
||||
return dimension_slice_from_form_data((Form_dimension_slice) GETSTRUCT(tuple));
|
||||
}
|
||||
|
||||
static inline Hypercube *
|
||||
hypercube_alloc(int16 num_dimensions)
|
||||
DimensionSlice *
|
||||
dimension_slice_create(int dimension_id, int64 range_start, int64 range_end)
|
||||
{
|
||||
Hypercube *hc = palloc0(HYPERCUBE_SIZE(num_dimensions));
|
||||
DimensionSlice *slice = dimension_slice_alloc();
|
||||
|
||||
hc->capacity = num_dimensions;
|
||||
return hc;
|
||||
slice->fd.dimension_id = dimension_id;
|
||||
slice->fd.range_start = range_start;
|
||||
slice->fd.range_end = range_end;
|
||||
|
||||
return slice;
|
||||
}
|
||||
|
||||
Hypercube *
|
||||
hypercube_copy(Hypercube *hc)
|
||||
typedef struct DimensionSliceScanData
|
||||
{
|
||||
Hypercube *copy;
|
||||
size_t nbytes = HYPERCUBE_SIZE(hc->capacity);
|
||||
int i;
|
||||
|
||||
copy = palloc(nbytes);
|
||||
memcpy(copy, hc, nbytes);
|
||||
|
||||
for (i = 0; i < hc->num_slices; i++)
|
||||
copy->slices[i] = dimension_slice_copy(hc->slices[i]);
|
||||
|
||||
return copy;
|
||||
}
|
||||
DimensionVec *slices;
|
||||
int limit;
|
||||
} DimensionSliceScanData;
|
||||
|
||||
static bool
|
||||
dimension_vec_tuple_found(TupleInfo *ti, void *data)
|
||||
{
|
||||
DimensionVec **vecptr = data;
|
||||
DimensionSliceScanData *scandata = data;
|
||||
DimensionSlice *slice = dimension_slice_from_tuple(ti->tuple);
|
||||
|
||||
dimension_vec_add_slice(vecptr, slice);
|
||||
dimension_vec_add_slice(&scandata->slices, slice);
|
||||
|
||||
if (scandata->limit == ti->count)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
DimensionVec *
|
||||
dimension_slice_scan(int32 dimension_id, int64 coordinate)
|
||||
static int
|
||||
dimension_slice_scan_limit_internal(ScanKeyData *scankey,
|
||||
Size num_scankeys,
|
||||
tuple_found_func on_tuple_found,
|
||||
void *scandata)
|
||||
{
|
||||
Catalog *catalog = catalog_get();
|
||||
DimensionVec *vec = dimension_vec_create(DIMENSION_VEC_DEFAULT_SIZE);
|
||||
ScanKeyData scankey[3];
|
||||
ScannerCtx scanCtx = {
|
||||
.table = catalog->tables[DIMENSION_SLICE].id,
|
||||
.index = catalog->tables[DIMENSION_SLICE].index_ids[DIMENSION_SLICE_DIMENSION_ID_RANGE_START_RANGE_END_IDX],
|
||||
.scantype = ScannerTypeIndex,
|
||||
.nkeys = 3,
|
||||
.nkeys = num_scankeys,
|
||||
.scankey = scankey,
|
||||
.data = &vec,
|
||||
.tuple_found = dimension_vec_tuple_found,
|
||||
.data = scandata,
|
||||
.tuple_found = on_tuple_found,
|
||||
.lockmode = AccessShareLock,
|
||||
.scandirection = ForwardScanDirection,
|
||||
};
|
||||
|
||||
return scanner_scan(&scanCtx);
|
||||
}
|
||||
|
||||
/*
|
||||
* Scan for slices that enclose the coordinate in the given dimension.
|
||||
*
|
||||
* Returns a dimension vector of slices that enclose the coordinate.
|
||||
*/
|
||||
DimensionVec *
|
||||
dimension_slice_scan_limit(int32 dimension_id, int64 coordinate, int limit)
|
||||
{
|
||||
ScanKeyData scankey[3];
|
||||
DimensionSliceScanData data = {
|
||||
.slices = dimension_vec_create(limit > 0 ? limit : DIMENSION_VEC_DEFAULT_SIZE),
|
||||
.limit = limit,
|
||||
};
|
||||
|
||||
/*
|
||||
* Perform an index scan for slices matching the dimension's ID and which
|
||||
* encloses the coordinate.
|
||||
* enclose the coordinate.
|
||||
*/
|
||||
ScanKeyInit(&scankey[0], Anum_dimension_slice_dimension_id_range_start_range_end_idx_dimension_id,
|
||||
BTEqualStrategyNumber, F_INT4EQ, Int32GetDatum(dimension_id));
|
||||
@ -94,9 +117,66 @@ dimension_slice_scan(int32 dimension_id, int64 coordinate)
|
||||
ScanKeyInit(&scankey[2], Anum_dimension_slice_dimension_id_range_start_range_end_idx_range_end,
|
||||
BTGreaterStrategyNumber, F_INT8GT, Int64GetDatum(coordinate));
|
||||
|
||||
scanner_scan(&scanCtx);
|
||||
dimension_slice_scan_limit_internal(scankey, 3, dimension_vec_tuple_found, &data);
|
||||
|
||||
return vec;
|
||||
return dimension_vec_sort(&data.slices);
|
||||
}
|
||||
|
||||
/*
|
||||
* Scan for slices that collide/overlap with the given range.
|
||||
*
|
||||
* Returns a dimension vector of colliding slices.
|
||||
*/
|
||||
DimensionVec *
|
||||
dimension_slice_collision_scan_limit(int32 dimension_id, int64 range_start, int64 range_end, int limit)
|
||||
{
|
||||
ScanKeyData scankey[3];
|
||||
DimensionSliceScanData data = {
|
||||
.slices = dimension_vec_create(limit > 0 ? limit : DIMENSION_VEC_DEFAULT_SIZE),
|
||||
.limit = limit,
|
||||
};
|
||||
|
||||
ScanKeyInit(&scankey[0], Anum_dimension_slice_dimension_id_range_start_range_end_idx_dimension_id,
|
||||
BTEqualStrategyNumber, F_INT4EQ, Int32GetDatum(dimension_id));
|
||||
ScanKeyInit(&scankey[1], Anum_dimension_slice_dimension_id_range_start_range_end_idx_range_start,
|
||||
BTLessStrategyNumber, F_INT8LT, Int64GetDatum(range_end));
|
||||
ScanKeyInit(&scankey[2], Anum_dimension_slice_dimension_id_range_start_range_end_idx_range_end,
|
||||
BTGreaterStrategyNumber, F_INT8GT, Int64GetDatum(range_start));
|
||||
|
||||
dimension_slice_scan_limit_internal(scankey, 3, dimension_vec_tuple_found, &data);
|
||||
|
||||
return dimension_vec_sort(&data.slices);
|
||||
}
|
||||
|
||||
|
||||
static bool
|
||||
dimension_slice_fill(TupleInfo *ti, void *data)
|
||||
{
|
||||
DimensionSlice **slice = data;
|
||||
|
||||
memcpy(&(*slice)->fd, GETSTRUCT(ti->tuple), sizeof(FormData_dimension_slice));
|
||||
return false;
|
||||
}
|
||||
|
||||
/*
|
||||
* Scan for an existing slice that exactly matches the given slice's dimension
|
||||
* and range. If a match is found, the given slice is updated with slice ID.
|
||||
*/
|
||||
DimensionSlice *
|
||||
dimension_slice_scan_for_existing(DimensionSlice *slice)
|
||||
{
|
||||
ScanKeyData scankey[3];
|
||||
|
||||
ScanKeyInit(&scankey[0], Anum_dimension_slice_dimension_id_range_start_range_end_idx_dimension_id,
|
||||
BTEqualStrategyNumber, F_INT4EQ, Int32GetDatum(slice->fd.dimension_id));
|
||||
ScanKeyInit(&scankey[1], Anum_dimension_slice_dimension_id_range_start_range_end_idx_range_start,
|
||||
BTEqualStrategyNumber, F_INT8EQ, Int64GetDatum(slice->fd.range_start));
|
||||
ScanKeyInit(&scankey[2], Anum_dimension_slice_dimension_id_range_start_range_end_idx_range_end,
|
||||
BTEqualStrategyNumber, F_INT8EQ, Int64GetDatum(slice->fd.range_end));
|
||||
|
||||
dimension_slice_scan_limit_internal(scankey, 3, dimension_slice_fill, &slice);
|
||||
|
||||
return slice;
|
||||
}
|
||||
|
||||
static bool
|
||||
@ -108,7 +188,7 @@ dimension_slice_tuple_found(TupleInfo *ti, void *data)
|
||||
return false;
|
||||
}
|
||||
|
||||
static DimensionSlice *
|
||||
DimensionSlice *
|
||||
dimension_slice_scan_by_id(int32 dimension_slice_id)
|
||||
{
|
||||
Catalog *catalog = catalog_get();
|
||||
@ -142,42 +222,75 @@ dimension_slice_copy(const DimensionSlice *original)
|
||||
return new;
|
||||
}
|
||||
|
||||
static int
|
||||
cmp_slices_by_dimension_id(const void *left, const void *right)
|
||||
/*
|
||||
* Check if two dimensions slices overlap by doing collision detection in one
|
||||
* dimension.
|
||||
*
|
||||
* Returns true if the slices collide, otherwise false.
|
||||
*/
|
||||
bool
|
||||
dimension_slices_collide(DimensionSlice *slice1, DimensionSlice *slice2)
|
||||
{
|
||||
const DimensionSlice *left_slice = *((DimensionSlice **) left);
|
||||
const DimensionSlice *right_slice = *((DimensionSlice **) right);
|
||||
Assert(slice1->fd.dimension_id == slice2->fd.dimension_id);
|
||||
|
||||
if (left_slice->fd.dimension_id == right_slice->fd.dimension_id)
|
||||
return 0;
|
||||
if (left_slice->fd.dimension_id < right_slice->fd.dimension_id)
|
||||
return -1;
|
||||
return 1;
|
||||
return (slice1->fd.range_start < slice2->fd.range_end &&
|
||||
slice1->fd.range_end > slice2->fd.range_start);
|
||||
}
|
||||
|
||||
|
||||
static void
|
||||
hypercube_slice_sort(Hypercube *hc)
|
||||
/*
|
||||
* Check whether two slices are identical.
|
||||
*
|
||||
* We require by assertion that the slices are in the same dimension and we only
|
||||
* compare the ranges (i.e., the slice ID is not important for equality).
|
||||
*
|
||||
* Returns true if the slices have identical ranges, otherwise false.
|
||||
*/
|
||||
bool
|
||||
dimension_slices_equal(DimensionSlice *slice1, DimensionSlice *slice2)
|
||||
{
|
||||
qsort(hc->slices, hc->num_slices, sizeof(DimensionSlice *), cmp_slices_by_dimension_id);
|
||||
Assert(slice1->fd.dimension_id == slice2->fd.dimension_id);
|
||||
|
||||
return slice1->fd.range_start == slice2->fd.range_start &&
|
||||
slice1->fd.range_end == slice2->fd.range_end;
|
||||
}
|
||||
|
||||
Hypercube *
|
||||
hypercube_from_constraints(ChunkConstraint constraints[], int16 num_constraints)
|
||||
/*-
|
||||
* Cut a slice that collides with another slice. The coordinate is the point of
|
||||
* insertion, and determines which end of the slice to cut.
|
||||
*
|
||||
* Case where we cut "after" the coordinate:
|
||||
*
|
||||
* ' [-x--------]
|
||||
* ' [--------]
|
||||
*
|
||||
* Case where we cut "before" the coordinate:
|
||||
*
|
||||
* ' [------x--]
|
||||
* ' [--------]
|
||||
*
|
||||
* Returns true if the slice was cut, otherwise false.
|
||||
*/
|
||||
bool
|
||||
dimension_slice_cut(DimensionSlice *to_cut, DimensionSlice *other, int64 coord)
|
||||
{
|
||||
Hypercube *hc = hypercube_alloc(num_constraints);
|
||||
int i;
|
||||
Assert(to_cut->fd.dimension_id == other->fd.dimension_id);
|
||||
|
||||
for (i = 0; i < num_constraints; i++)
|
||||
if (other->fd.range_end <= coord &&
|
||||
other->fd.range_end > to_cut->fd.range_start)
|
||||
{
|
||||
DimensionSlice *slice = dimension_slice_scan_by_id(constraints[i].fd.dimension_slice_id);
|
||||
|
||||
Assert(slice != NULL);
|
||||
hc->slices[hc->num_slices++] = slice;
|
||||
/* Cut "before" the coordinate */
|
||||
to_cut->fd.range_start = other->fd.range_end;
|
||||
return true;
|
||||
}
|
||||
else if (other->fd.range_start > coord &&
|
||||
other->fd.range_start < to_cut->fd.range_end)
|
||||
{
|
||||
/* Cut "after" the coordinate */
|
||||
to_cut->fd.range_end = other->fd.range_start;
|
||||
return true;
|
||||
}
|
||||
|
||||
hypercube_slice_sort(hc);
|
||||
return hc;
|
||||
return false;
|
||||
}
|
||||
|
||||
void
|
||||
@ -188,124 +301,43 @@ dimension_slice_free(DimensionSlice *slice)
|
||||
pfree(slice);
|
||||
}
|
||||
|
||||
static int
|
||||
cmp_slices(const void *left, const void *right)
|
||||
static bool
|
||||
dimension_slice_insert_relation(Relation rel, DimensionSlice *slice)
|
||||
{
|
||||
const DimensionSlice *left_slice = *((DimensionSlice **) left);
|
||||
const DimensionSlice *right_slice = *((DimensionSlice **) right);
|
||||
TupleDesc desc = RelationGetDescr(rel);
|
||||
Datum values[Natts_dimension_slice];
|
||||
bool nulls[Natts_dimension_slice] = {false};
|
||||
|
||||
if (left_slice->fd.range_start == right_slice->fd.range_start)
|
||||
{
|
||||
if (left_slice->fd.range_end == right_slice->fd.range_end)
|
||||
return 0;
|
||||
if (slice->fd.id > 0)
|
||||
/* Slice already exists in table */
|
||||
return false;
|
||||
|
||||
if (left_slice->fd.range_end > right_slice->fd.range_end)
|
||||
return 1;
|
||||
memset(values, 0, sizeof(values));
|
||||
slice->fd.id = catalog_table_next_seq_id(catalog_get(), DIMENSION_SLICE);
|
||||
values[Anum_dimension_slice_id - 1] = slice->fd.id;
|
||||
values[Anum_dimension_slice_dimension_id - 1] = slice->fd.dimension_id;
|
||||
values[Anum_dimension_slice_range_start - 1] = slice->fd.range_start;
|
||||
values[Anum_dimension_slice_range_end - 1] = slice->fd.range_end;
|
||||
|
||||
return -1;
|
||||
}
|
||||
catalog_insert_values(rel, desc, values, nulls);
|
||||
|
||||
if (left_slice->fd.range_start > right_slice->fd.range_start)
|
||||
return 1;
|
||||
|
||||
return -1;
|
||||
}
|
||||
|
||||
static int
|
||||
cmp_coordinate_and_slice(const void *left, const void *right)
|
||||
{
|
||||
int64 coord = *((int64 *) left);
|
||||
const DimensionSlice *slice = *((DimensionSlice **) right);
|
||||
|
||||
if (coord < slice->fd.range_start)
|
||||
return -1;
|
||||
|
||||
if (coord >= slice->fd.range_end)
|
||||
return 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static DimensionVec *
|
||||
dimension_vec_expand(DimensionVec *vec, int32 new_capacity)
|
||||
{
|
||||
if (vec != NULL && vec->capacity >= new_capacity)
|
||||
return vec;
|
||||
|
||||
if (NULL == vec)
|
||||
vec = palloc(DIMENSION_VEC_SIZE(new_capacity));
|
||||
else
|
||||
vec = repalloc(vec, DIMENSION_VEC_SIZE(new_capacity));
|
||||
|
||||
vec->capacity = new_capacity;
|
||||
|
||||
return vec;
|
||||
}
|
||||
|
||||
DimensionVec *
|
||||
dimension_vec_create(int32 initial_num_slices)
|
||||
{
|
||||
DimensionVec *vec = dimension_vec_expand(NULL, initial_num_slices);
|
||||
|
||||
vec->capacity = initial_num_slices;
|
||||
vec->num_slices = 0;
|
||||
return vec;
|
||||
}
|
||||
|
||||
static DimensionVec *
|
||||
dimension_vec_add_slice(DimensionVec **vecptr, DimensionSlice *slice)
|
||||
{
|
||||
DimensionVec *vec = *vecptr;
|
||||
|
||||
if (vec->num_slices + 1 > vec->capacity)
|
||||
*vecptr = vec = dimension_vec_expand(vec, vec->capacity + 10);
|
||||
|
||||
vec->slices[vec->num_slices++] = slice;
|
||||
|
||||
return vec;
|
||||
}
|
||||
|
||||
DimensionVec *
|
||||
dimension_vec_add_slice_sort(DimensionVec **vecptr, DimensionSlice *slice)
|
||||
{
|
||||
DimensionVec *vec = *vecptr;
|
||||
|
||||
*vecptr = vec = dimension_vec_add_slice(vecptr, slice);
|
||||
qsort(vec->slices, vec->num_slices, sizeof(DimensionSlice *), cmp_slices);
|
||||
return vec;
|
||||
return true;
|
||||
}
|
||||
|
||||
/*
|
||||
* Insert slices into the catalog.
|
||||
*/
|
||||
void
|
||||
dimension_vec_remove_slice(DimensionVec **vecptr, int32 index)
|
||||
dimension_slice_insert_multi(DimensionSlice **slices, Size num_slices)
|
||||
{
|
||||
DimensionVec *vec = *vecptr;
|
||||
Catalog *catalog = catalog_get();
|
||||
Relation rel;
|
||||
Size i;
|
||||
|
||||
dimension_slice_free(vec->slices[index]);
|
||||
memcpy(vec->slices + index, vec->slices + (index + 1), sizeof(DimensionSlice *) * (vec->num_slices - index - 1));
|
||||
vec->num_slices--;
|
||||
}
|
||||
|
||||
|
||||
DimensionSlice *
|
||||
dimension_vec_find_slice(DimensionVec *vec, int64 coordinate)
|
||||
{
|
||||
DimensionSlice **res;
|
||||
|
||||
res = bsearch(&coordinate, vec->slices, vec->num_slices,
|
||||
sizeof(DimensionSlice *), cmp_coordinate_and_slice);
|
||||
|
||||
if (res == NULL)
|
||||
return NULL;
|
||||
|
||||
return *res;
|
||||
}
|
||||
|
||||
void
|
||||
dimension_vec_free(DimensionVec *vec)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < vec->num_slices; i++)
|
||||
dimension_slice_free(vec->slices[i]);
|
||||
pfree(vec);
|
||||
rel = heap_open(catalog->tables[DIMENSION_SLICE].id, RowExclusiveLock);
|
||||
|
||||
for (i = 0; i < num_slices; i++)
|
||||
dimension_slice_insert_relation(rel, slices[i]);
|
||||
|
||||
heap_close(rel, RowExclusiveLock);
|
||||
}
|
||||
|
@ -11,54 +11,34 @@
|
||||
typedef struct DimensionSlice
|
||||
{
|
||||
FormData_dimension_slice fd;
|
||||
DimensionType type;
|
||||
void (*storage_free) (void *);
|
||||
void *storage;
|
||||
} DimensionSlice;
|
||||
|
||||
/*
|
||||
* Hypercube is a collection of slices from N distinct dimensions, i.e., the
|
||||
* N-dimensional analogue of a square or a cube.
|
||||
*/
|
||||
typedef struct Hypercube
|
||||
{
|
||||
int16 capacity; /* capacity of slices[] */
|
||||
int16 num_slices; /* actual number of slices (should equal
|
||||
* capacity after create) */
|
||||
/* Open slices are stored before closed slices */
|
||||
DimensionSlice *slices[0];
|
||||
} Hypercube;
|
||||
typedef struct DimensionVec DimensionVec;
|
||||
typedef struct Hypercube Hypercube;
|
||||
|
||||
#define HYPERCUBE_SIZE(num_dimensions) \
|
||||
(sizeof(Hypercube) + sizeof(DimensionSlice *) * (num_dimensions))
|
||||
|
||||
/*
|
||||
* DimensionVec is a collection of slices (ranges) along one dimension for a
|
||||
* time range.
|
||||
*/
|
||||
typedef struct DimensionVec
|
||||
{
|
||||
int32 capacity; /* The capacity of the slices array */
|
||||
int32 num_slices; /* The current number of slices in slices
|
||||
* array */
|
||||
DimensionSlice *slices[0];
|
||||
} DimensionVec;
|
||||
|
||||
#define DIMENSION_VEC_SIZE(num_slices) \
|
||||
(sizeof(DimensionVec) + sizeof(DimensionSlice *) * num_slices)
|
||||
|
||||
#define DIMENSION_VEC_DEFAULT_SIZE 10
|
||||
|
||||
extern DimensionVec *dimension_slice_scan(int32 dimension_id, int64 coordinate);
|
||||
extern DimensionVec *dimension_slice_scan_limit(int32 dimension_id, int64 coordinate, int limit);
|
||||
extern DimensionVec *dimension_slice_collision_scan_limit(int32 dimension_id, int64 range_start, int64 range_end, int limit);
|
||||
extern Hypercube *dimension_slice_point_scan(Hyperspace *space, int64 point[]);
|
||||
extern DimensionSlice *dimension_slice_scan_for_existing(DimensionSlice *slice);
|
||||
extern DimensionSlice *dimension_slice_scan_by_id(int32 dimension_slice_id);
|
||||
extern DimensionSlice *dimension_slice_create(int dimension_id, int64 range_start, int64 range_end);
|
||||
extern DimensionSlice *dimension_slice_copy(const DimensionSlice *original);
|
||||
extern bool dimension_slices_collide(DimensionSlice *slice1, DimensionSlice *slice2);
|
||||
extern bool dimension_slices_equal(DimensionSlice *slice1, DimensionSlice *slice2);
|
||||
extern bool dimension_slice_cut(DimensionSlice *to_cut, DimensionSlice *other, int64 coord);
|
||||
extern void dimension_slice_free(DimensionSlice *slice);
|
||||
extern DimensionVec *dimension_vec_create(int32 initial_num_slices);
|
||||
extern DimensionVec *dimension_vec_add_slice_sort(DimensionVec **vec, DimensionSlice *slice);
|
||||
extern void dimension_vec_remove_slice(DimensionVec **vecptr, int32 index);
|
||||
extern DimensionSlice *dimension_vec_find_slice(DimensionVec *vec, int64 coordinate);
|
||||
extern void dimension_vec_free(DimensionVec *vec);
|
||||
extern Hypercube *hypercube_from_constraints(ChunkConstraint constraints[], int16 num_constraints);
|
||||
extern Hypercube *hypercube_copy(Hypercube *hc);
|
||||
extern void dimension_slice_insert_multi(DimensionSlice **slice, Size num_slices);
|
||||
|
||||
#define dimension_slice_insert(slice) \
|
||||
dimension_slice_insert_multi(&(slice), 1)
|
||||
|
||||
#define dimension_slice_scan(dimension_id, coordinate) \
|
||||
dimension_slice_scan_limit(dimension_id, coordinate, 0)
|
||||
|
||||
#define dimension_slice_collision_scan(dimension_id, range_start, range_end) \
|
||||
dimension_slice_collision_scan_limit(dimension_id, range_start, range_end, 0)
|
||||
|
||||
|
||||
#endif /* TIMESCALEDB_DIMENSION_SLICE_H */
|
||||
|
157
src/dimension_vector.c
Normal file
157
src/dimension_vector.c
Normal file
@ -0,0 +1,157 @@
|
||||
#include "dimension_vector.h"
|
||||
|
||||
static int
|
||||
cmp_slices(const void *left, const void *right)
|
||||
{
|
||||
const DimensionSlice *left_slice = *((DimensionSlice **) left);
|
||||
const DimensionSlice *right_slice = *((DimensionSlice **) right);
|
||||
|
||||
if (left_slice->fd.range_start == right_slice->fd.range_start)
|
||||
{
|
||||
if (left_slice->fd.range_end == right_slice->fd.range_end)
|
||||
return 0;
|
||||
|
||||
if (left_slice->fd.range_end > right_slice->fd.range_end)
|
||||
return 1;
|
||||
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (left_slice->fd.range_start > right_slice->fd.range_start)
|
||||
return 1;
|
||||
|
||||
return -1;
|
||||
}
|
||||
|
||||
static int
|
||||
cmp_coordinate_and_slice(const void *left, const void *right)
|
||||
{
|
||||
int64 coord = *((int64 *) left);
|
||||
const DimensionSlice *slice = *((DimensionSlice **) right);
|
||||
|
||||
if (coord < slice->fd.range_start)
|
||||
return -1;
|
||||
|
||||
if (coord >= slice->fd.range_end)
|
||||
return 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static DimensionVec *
|
||||
dimension_vec_expand(DimensionVec *vec, int32 new_capacity)
|
||||
{
|
||||
if (vec != NULL && vec->capacity >= new_capacity)
|
||||
return vec;
|
||||
|
||||
if (NULL == vec)
|
||||
vec = palloc(DIMENSION_VEC_SIZE(new_capacity));
|
||||
else
|
||||
vec = repalloc(vec, DIMENSION_VEC_SIZE(new_capacity));
|
||||
|
||||
vec->capacity = new_capacity;
|
||||
|
||||
return vec;
|
||||
}
|
||||
|
||||
DimensionVec *
|
||||
dimension_vec_create(int32 initial_num_slices)
|
||||
{
|
||||
DimensionVec *vec = dimension_vec_expand(NULL, initial_num_slices);
|
||||
|
||||
vec->capacity = initial_num_slices;
|
||||
vec->num_slices = 0;
|
||||
|
||||
return vec;
|
||||
}
|
||||
|
||||
DimensionVec *
|
||||
dimension_vec_sort(DimensionVec **vecptr)
|
||||
{
|
||||
DimensionVec *vec = *vecptr;
|
||||
|
||||
qsort(vec->slices, vec->num_slices, sizeof(DimensionSlice *), cmp_slices);
|
||||
|
||||
return vec;
|
||||
}
|
||||
|
||||
DimensionVec *
|
||||
dimension_vec_add_slice(DimensionVec **vecptr, DimensionSlice *slice)
|
||||
{
|
||||
DimensionVec *vec = *vecptr;
|
||||
|
||||
/* Ensure consistent dimension */
|
||||
Assert(vec->num_slices == 0 || vec->slices[0]->fd.dimension_id == slice->fd.dimension_id);
|
||||
|
||||
if (vec->num_slices + 1 > vec->capacity)
|
||||
*vecptr = vec = dimension_vec_expand(vec, vec->capacity + 10);
|
||||
|
||||
vec->slices[vec->num_slices++] = slice;
|
||||
|
||||
return vec;
|
||||
}
|
||||
|
||||
DimensionVec *
|
||||
dimension_vec_add_slice_sort(DimensionVec **vecptr, DimensionSlice *slice)
|
||||
{
|
||||
DimensionVec *vec = *vecptr;
|
||||
|
||||
*vecptr = vec = dimension_vec_add_slice(vecptr, slice);
|
||||
return dimension_vec_sort(vecptr);
|
||||
}
|
||||
|
||||
void
|
||||
dimension_vec_remove_slice(DimensionVec **vecptr, int32 index)
|
||||
{
|
||||
DimensionVec *vec = *vecptr;
|
||||
|
||||
dimension_slice_free(vec->slices[index]);
|
||||
memcpy(vec->slices + index, vec->slices + (index + 1), sizeof(DimensionSlice *) * (vec->num_slices - index - 1));
|
||||
vec->num_slices--;
|
||||
}
|
||||
|
||||
#if defined(USE_ASSERT_CHECKING)
|
||||
static inline bool
|
||||
dimension_vec_is_sorted(DimensionVec *vec)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (vec->num_slices < 2)
|
||||
return true;
|
||||
|
||||
for (i = 1; i < vec->num_slices; i++)
|
||||
if (cmp_slices(&vec->slices[i - 1], &vec->slices[i]) > 0)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
#endif
|
||||
|
||||
DimensionSlice *
|
||||
dimension_vec_find_slice(DimensionVec *vec, int64 coordinate)
|
||||
{
|
||||
DimensionSlice **res;
|
||||
|
||||
if (vec->num_slices == 0)
|
||||
return NULL;
|
||||
|
||||
Assert(dimension_vec_is_sorted(vec));
|
||||
|
||||
res = bsearch(&coordinate, vec->slices, vec->num_slices,
|
||||
sizeof(DimensionSlice *), cmp_coordinate_and_slice);
|
||||
|
||||
if (res == NULL)
|
||||
return NULL;
|
||||
|
||||
return *res;
|
||||
}
|
||||
|
||||
void
|
||||
dimension_vec_free(DimensionVec *vec)
|
||||
{
|
||||
int i;
|
||||
|
||||
for (i = 0; i < vec->num_slices; i++)
|
||||
dimension_slice_free(vec->slices[i]);
|
||||
pfree(vec);
|
||||
}
|
33
src/dimension_vector.h
Normal file
33
src/dimension_vector.h
Normal file
@ -0,0 +1,33 @@
|
||||
#ifndef TIMESCALEDB_DIMENSION_VECTOR_H
|
||||
#define TIMESCALEDB_DIMENSION_VECTOR_H
|
||||
|
||||
#include <postgres.h>
|
||||
|
||||
#include "dimension_slice.h"
|
||||
|
||||
/*
|
||||
* DimensionVec is a collection of slices (ranges) along one dimension for a
|
||||
* time range.
|
||||
*/
|
||||
typedef struct DimensionVec
|
||||
{
|
||||
int32 capacity; /* The capacity of the slices array */
|
||||
int32 num_slices; /* The current number of slices in slices
|
||||
* array */
|
||||
DimensionSlice *slices[0];
|
||||
} DimensionVec;
|
||||
|
||||
#define DIMENSION_VEC_SIZE(num_slices) \
|
||||
(sizeof(DimensionVec) + sizeof(DimensionSlice *) * num_slices)
|
||||
|
||||
#define DIMENSION_VEC_DEFAULT_SIZE 10
|
||||
|
||||
extern DimensionVec *dimension_vec_create(int32 initial_num_slices);
|
||||
extern DimensionVec *dimension_vec_sort(DimensionVec **vec);
|
||||
extern DimensionVec *dimension_vec_add_slice_sort(DimensionVec **vec, DimensionSlice *slice);
|
||||
extern DimensionVec *dimension_vec_add_slice(DimensionVec **vecptr, DimensionSlice *slice);
|
||||
extern void dimension_vec_remove_slice(DimensionVec **vecptr, int32 index);
|
||||
extern DimensionSlice *dimension_vec_find_slice(DimensionVec *vec, int64 coordinate);
|
||||
extern void dimension_vec_free(DimensionVec *vec);
|
||||
|
||||
#endif /* TIMESCALEDB_DIMENSION_VECTOR_H */
|
240
src/hypercube.c
Normal file
240
src/hypercube.c
Normal file
@ -0,0 +1,240 @@
|
||||
#include "hypercube.h"
|
||||
#include "dimension_vector.h"
|
||||
|
||||
/*
|
||||
* A hypercube represents the partition bounds of a hypertable chunk.
|
||||
*
|
||||
* A hypercube consists of N slices that each represent a range in a particular
|
||||
* dimension that make up the hypercube. When a new tuple is inserted into a
|
||||
* hypertable, and no chunk exists that can hold that tuple, we need to
|
||||
* calculate a new hypercube that encloses the point corresponding to the
|
||||
* tuple. When calculating the hypercube, we need to account for alignment
|
||||
* requirements in dimensions marked as "aligned" and also ensure that there are
|
||||
* no collisions with existing chunks. Alignment issues and collisions can occur
|
||||
* when the partitioning configuration has changed (e.g., the time interval or
|
||||
* number of partitions in a particular dimension changed).
|
||||
*/
|
||||
Hypercube *
|
||||
hypercube_alloc(int16 num_dimensions)
|
||||
{
|
||||
Hypercube *hc = palloc0(HYPERCUBE_SIZE(num_dimensions));
|
||||
|
||||
hc->capacity = num_dimensions;
|
||||
return hc;
|
||||
}
|
||||
|
||||
#if defined(USE_ASSERT_CHECKING)
|
||||
static inline bool
|
||||
hypercube_is_sorted(Hypercube *hc)
|
||||
{
|
||||
int i;
|
||||
|
||||
if (hc->num_slices < 2)
|
||||
return true;
|
||||
|
||||
for (i = 1; i < hc->num_slices; i++)
|
||||
if (hc->slices[i]->fd.dimension_id < hc->slices[i - 1]->fd.dimension_id)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
#endif
|
||||
|
||||
Hypercube *
|
||||
hypercube_copy(Hypercube *hc)
|
||||
{
|
||||
Hypercube *copy;
|
||||
size_t nbytes = HYPERCUBE_SIZE(hc->capacity);
|
||||
int i;
|
||||
|
||||
copy = palloc(nbytes);
|
||||
memcpy(copy, hc, nbytes);
|
||||
|
||||
for (i = 0; i < hc->num_slices; i++)
|
||||
copy->slices[i] = dimension_slice_copy(hc->slices[i]);
|
||||
|
||||
return copy;
|
||||
}
|
||||
|
||||
static int
|
||||
cmp_slices_by_dimension_id(const void *left, const void *right)
|
||||
{
|
||||
const DimensionSlice *left_slice = *((DimensionSlice **) left);
|
||||
const DimensionSlice *right_slice = *((DimensionSlice **) right);
|
||||
|
||||
if (left_slice->fd.dimension_id == right_slice->fd.dimension_id)
|
||||
return 0;
|
||||
if (left_slice->fd.dimension_id < right_slice->fd.dimension_id)
|
||||
return -1;
|
||||
return 1;
|
||||
}
|
||||
|
||||
void
|
||||
hypercube_add_slice(Hypercube *hc, DimensionSlice *slice)
|
||||
{
|
||||
Assert(hc->capacity > hc->num_slices);
|
||||
|
||||
hc->slices[hc->num_slices++] = slice;
|
||||
|
||||
/* Check if we require a sort to maintain dimension order */
|
||||
if (hc->num_slices > 1 && slice->fd.dimension_id < hc->slices[hc->num_slices - 2]->fd.dimension_id)
|
||||
hypercube_slice_sort(hc);
|
||||
|
||||
Assert(hypercube_is_sorted(hc));
|
||||
}
|
||||
|
||||
/*
|
||||
* Sort the hypercubes slices in ascending dimension ID order. This allows us to
|
||||
* iterate slices in a consistent order.
|
||||
*/
|
||||
void
|
||||
hypercube_slice_sort(Hypercube *hc)
|
||||
{
|
||||
qsort(hc->slices, hc->num_slices, sizeof(DimensionSlice *), cmp_slices_by_dimension_id);
|
||||
}
|
||||
|
||||
DimensionSlice *
|
||||
hypercube_get_slice_by_dimension_id(Hypercube *hc, int32 dimension_id)
|
||||
{
|
||||
DimensionSlice slice = {
|
||||
.fd.dimension_id = dimension_id,
|
||||
};
|
||||
void *ptr = &slice;
|
||||
|
||||
if (hc->num_slices == 0)
|
||||
return NULL;
|
||||
|
||||
Assert(hypercube_is_sorted(hc));
|
||||
|
||||
ptr = bsearch(&ptr, hc->slices, hc->num_slices,
|
||||
sizeof(DimensionSlice *), cmp_slices_by_dimension_id);
|
||||
|
||||
if (NULL == ptr)
|
||||
return NULL;
|
||||
|
||||
return *((DimensionSlice **) ptr);
|
||||
}
|
||||
|
||||
/*
|
||||
* Given a set of constraints, build the corresponding hypercube.
|
||||
*/
|
||||
Hypercube *
|
||||
hypercube_from_constraints(ChunkConstraint constraints[], int16 num_constraints)
|
||||
{
|
||||
Hypercube *hc = hypercube_alloc(num_constraints);
|
||||
int i;
|
||||
|
||||
for (i = 0; i < num_constraints; i++)
|
||||
{
|
||||
DimensionSlice *slice = dimension_slice_scan_by_id(constraints[i].fd.dimension_slice_id);
|
||||
|
||||
Assert(slice != NULL);
|
||||
hc->slices[hc->num_slices++] = slice;
|
||||
}
|
||||
|
||||
hypercube_slice_sort(hc);
|
||||
|
||||
Assert(hypercube_is_sorted(hc));
|
||||
|
||||
return hc;
|
||||
}
|
||||
|
||||
/*
|
||||
* Calculate the hypercube that encloses the given point.
|
||||
*
|
||||
* The hypercube's dimensions are calculated one by one, and depend on the
|
||||
* current partitioning in each dimension of the N-dimensional hyperspace,
|
||||
* including any alignment requirements.
|
||||
*
|
||||
* For non-aligned dimensions, we simply calculate the hypercube's slice range
|
||||
* in that dimension given current partitioning configuration. If there is
|
||||
* already an identical slice for that dimension, we will reuse it rather than
|
||||
* creating a new one.
|
||||
*
|
||||
* For aligned dimensions, we first try to find an existing slice that covers
|
||||
* the insertion point. If an existing slice is found, we reuse it or otherwise
|
||||
* we calculate a new slice as described for non-aligned dimensions.
|
||||
*
|
||||
* If a hypercube has dimension slices that are not reused ones, we might need
|
||||
* to cut them to ensure alignment and avoid collisions with other chunk
|
||||
* hypercubes. This happens in a later step.
|
||||
*/
|
||||
Hypercube *
|
||||
hypercube_calculate_from_point(Hyperspace *hs, Point *p)
|
||||
{
|
||||
Hypercube *cube;
|
||||
int i;
|
||||
|
||||
cube = hypercube_alloc(hs->num_dimensions);
|
||||
|
||||
/* For each dimension, calculate the hypercube's slice in that dimension */
|
||||
for (i = 0; i < hs->num_dimensions; i++)
|
||||
{
|
||||
Dimension *dim = &hs->dimensions[i];
|
||||
int64 value = p->coordinates[i];
|
||||
bool found = false;
|
||||
|
||||
/* Assert that dimensions are in ascending order */
|
||||
Assert(i == 0 || dim->fd.id > hs->dimensions[i - 1].fd.id);
|
||||
|
||||
/*
|
||||
* If this is an aligned dimension, we'd like to reuse any existing
|
||||
* slice that covers the coordinate in the dimenion
|
||||
*/
|
||||
if (dim->fd.aligned)
|
||||
{
|
||||
DimensionVec *vec;
|
||||
|
||||
vec = dimension_slice_scan_limit(dim->fd.id, value, 1);
|
||||
|
||||
if (vec->num_slices > 0)
|
||||
{
|
||||
cube->slices[i] = vec->slices[0];
|
||||
found = true;
|
||||
}
|
||||
}
|
||||
|
||||
if (!found)
|
||||
{
|
||||
/*
|
||||
* No existing slice found, or we are not aligning, so calculate
|
||||
* the range of a new slice
|
||||
*/
|
||||
cube->slices[i] = dimension_calculate_default_slice(dim, value);
|
||||
|
||||
/*
|
||||
* Check if there's already an existing slice with the calculated
|
||||
* range. If a slice already exists, use that slice's ID instead
|
||||
* of a new one.
|
||||
*/
|
||||
dimension_slice_scan_for_existing(cube->slices[i]);
|
||||
}
|
||||
}
|
||||
|
||||
cube->num_slices = hs->num_dimensions;
|
||||
|
||||
Assert(hypercube_is_sorted(cube));
|
||||
|
||||
return cube;
|
||||
}
|
||||
|
||||
/*
|
||||
* Check if two hypercubes collide (overlap).
|
||||
*
|
||||
* This is basically an axis-aligned bounding box collision detection,
|
||||
* generalized to N dimensions. We check for dimension slice collisions in each
|
||||
* dimension and only if all dimensions collide there is a hypercube collision.
|
||||
*/
|
||||
bool
|
||||
hypercubes_collide(Hypercube *cube1, Hypercube *cube2)
|
||||
{
|
||||
int i;
|
||||
|
||||
Assert(cube1->num_slices == cube2->num_slices);
|
||||
|
||||
for (i = 0; i < cube1->num_slices; i++)
|
||||
if (!dimension_slices_collide(cube1->slices[i], cube2->slices[i]))
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
34
src/hypercube.h
Normal file
34
src/hypercube.h
Normal file
@ -0,0 +1,34 @@
|
||||
#ifndef TIMESCALEDB_HYPERCUBE_H
|
||||
#define TIMESCALEDB_HYPERCUBE_H
|
||||
|
||||
#include <postgres.h>
|
||||
|
||||
#include "dimension_slice.h"
|
||||
|
||||
/*
|
||||
* Hypercube is a collection of slices from N distinct dimensions, i.e., the
|
||||
* N-dimensional analogue of a cube.
|
||||
*/
|
||||
typedef struct Hypercube
|
||||
{
|
||||
int16 capacity; /* capacity of slices[] */
|
||||
int16 num_slices; /* actual number of slices (should equal
|
||||
* capacity after create) */
|
||||
/* Slices are stored in dimension order */
|
||||
DimensionSlice *slices[0];
|
||||
} Hypercube;
|
||||
|
||||
#define HYPERCUBE_SIZE(num_dimensions) \
|
||||
(sizeof(Hypercube) + sizeof(DimensionSlice *) * (num_dimensions))
|
||||
|
||||
|
||||
extern Hypercube *hypercube_alloc(int16 num_dimensions);
|
||||
extern void hypercube_add_slice(Hypercube *hc, DimensionSlice *slice);
|
||||
extern Hypercube *hypercube_from_constraints(ChunkConstraint constraints[], int16 num_constraints);
|
||||
extern Hypercube *hypercube_calculate_from_point(Hyperspace *hs, Point *p);
|
||||
extern bool hypercubes_collide(Hypercube *cube1, Hypercube *cube2);
|
||||
extern DimensionSlice *hypercube_get_slice_by_dimension_id(Hypercube *hc, int32 dimension_id);
|
||||
extern Hypercube *hypercube_copy(Hypercube *hc);
|
||||
extern void hypercube_slice_sort(Hypercube *hc);
|
||||
|
||||
#endif /* TIMESCALEDB_HYPERCUBE_H */
|
@ -22,7 +22,7 @@ hypertable_from_tuple(HeapTuple tuple)
|
||||
namespace_oid = get_namespace_oid(NameStr(h->fd.schema_name), false);
|
||||
h->main_table_relid = get_relname_relid(NameStr(h->fd.table_name), namespace_oid);
|
||||
h->space = dimension_scan(h->fd.id, h->main_table_relid, h->fd.num_dimensions);
|
||||
h->chunk_cache = subspace_store_init(HYPERSPACE_NUM_DIMENSIONS(h->space), CurrentMemoryContext);
|
||||
h->chunk_cache = subspace_store_init(h->space->num_dimensions, CurrentMemoryContext);
|
||||
|
||||
return h;
|
||||
}
|
||||
@ -43,13 +43,17 @@ hypertable_get_chunk(Hypertable *h, Point *point)
|
||||
*/
|
||||
chunk = chunk_find(h->space, point);
|
||||
|
||||
if (NULL == chunk)
|
||||
chunk = chunk_create(h->space, point,
|
||||
NameStr(h->fd.associated_schema_name),
|
||||
NameStr(h->fd.associated_table_prefix));
|
||||
|
||||
Assert(chunk != NULL);
|
||||
|
||||
old = MemoryContextSwitchTo(subspace_store_mcxt(h->chunk_cache));
|
||||
|
||||
if (NULL == chunk)
|
||||
chunk = chunk_create(h->space, point);
|
||||
else
|
||||
/* Make a copy which lives in the chunk cache's memory context */
|
||||
chunk = chunk_copy(chunk);
|
||||
/* Make a copy which lives in the chunk cache's memory context */
|
||||
chunk = chunk_copy(chunk);
|
||||
|
||||
Assert(NULL != chunk);
|
||||
subspace_store_add(h->chunk_cache, chunk->cube, chunk, pfree);
|
||||
|
@ -45,13 +45,6 @@ prepare_plan(const char *src, int nargs, Oid *argtypes)
|
||||
return plan; \
|
||||
}
|
||||
|
||||
#define INT8ARRAYOID 1016
|
||||
#define CHUNK_CREATE_ARGS (Oid[]) {INT4ARRAYOID, INT8ARRAYOID}
|
||||
#define CHUNK_CREATE "SELECT * FROM _timescaledb_internal.chunk_create($1, $2)"
|
||||
|
||||
/* plan for creating a chunk via create_chunk(). */
|
||||
DEFINE_PLAN(create_chunk_plan, CHUNK_CREATE, 2, CHUNK_CREATE_ARGS)
|
||||
|
||||
/* old_schema, old_name, new_schema, new_name */
|
||||
#define RENAME_HYPERTABLE_ARGS (Oid[]) {NAMEOID, NAMEOID, TEXTOID, TEXTOID}
|
||||
#define RENAME_HYPERTABLE "SELECT * FROM _timescaledb_internal.rename_hypertable($1, $2, $3, $4)"
|
||||
@ -66,64 +59,36 @@ DEFINE_PLAN(rename_hypertable_plan, RENAME_HYPERTABLE, 4, RENAME_HYPERTABLE_ARGS
|
||||
/* plan to truncate hypertable */
|
||||
DEFINE_PLAN(truncate_hypertable_plan, TRUNCATE_HYPERTABLE, 2, TRUNCATE_HYPERTABLE_ARGS)
|
||||
|
||||
static HeapTuple
|
||||
chunk_tuple_create_spi_connected(Hyperspace *hs, Point *p, SPIPlanPtr plan)
|
||||
#define CHUNK_INSERT_ARGS (Oid[]) {INT4OID, INT4OID, NAMEOID, NAMEOID}
|
||||
#define CHUNK_INSERT "INSERT INTO _timescaledb_catalog.chunk VALUES ($1, $2, $3, $4)"
|
||||
|
||||
DEFINE_PLAN(chunk_insert_plan, CHUNK_INSERT, 4, CHUNK_INSERT_ARGS)
|
||||
|
||||
void
|
||||
spi_chunk_insert(int32 chunk_id, int32 hypertable_id, const char *schema_name, const char *table_name)
|
||||
{
|
||||
int i,
|
||||
ret;
|
||||
HeapTuple tuple;
|
||||
Datum dimension_ids[HYPERSPACE_NUM_DIMENSIONS(hs)];
|
||||
Datum dimension_values[HYPERSPACE_NUM_DIMENSIONS(hs)];
|
||||
Datum args[2];
|
||||
|
||||
for (i = 0; i < HYPERSPACE_NUM_DIMENSIONS(hs); i++)
|
||||
{
|
||||
dimension_ids[i] = Int32GetDatum(hs->dimensions[i].fd.id);
|
||||
dimension_values[i] = Int64GetDatum(p->coordinates[i]);
|
||||
}
|
||||
|
||||
args[0] = PointerGetDatum(construct_array(dimension_ids, HYPERSPACE_NUM_DIMENSIONS(hs), INT4OID, 4, true, 'i'));
|
||||
args[1] = PointerGetDatum(construct_array(dimension_values, HYPERSPACE_NUM_DIMENSIONS(hs), INT8OID, 8, true, 'd'));
|
||||
|
||||
ret = SPI_execute_plan(plan, args, NULL, false, 4);
|
||||
|
||||
if (ret <= 0)
|
||||
elog(ERROR, "Got an SPI error %d", ret);
|
||||
|
||||
if (SPI_processed != 1)
|
||||
elog(ERROR, "Got not 1 row but %lu", SPI_processed);
|
||||
|
||||
tuple = SPI_tuptable->vals[0];
|
||||
|
||||
return tuple;
|
||||
}
|
||||
|
||||
Chunk *
|
||||
spi_chunk_create(Hyperspace *hs, Point *p)
|
||||
{
|
||||
HeapTuple tuple;
|
||||
Chunk *chunk;
|
||||
MemoryContext old,
|
||||
top = CurrentMemoryContext;
|
||||
SPIPlanPtr plan = create_chunk_plan();
|
||||
SPIPlanPtr plan = chunk_insert_plan();
|
||||
Datum args[4] = {
|
||||
Int32GetDatum(chunk_id),
|
||||
Int32GetDatum(hypertable_id),
|
||||
CStringGetDatum(schema_name),
|
||||
CStringGetDatum(table_name)
|
||||
};
|
||||
int ret;
|
||||
|
||||
if (SPI_connect() < 0)
|
||||
elog(ERROR, "Got an SPI connect error");
|
||||
|
||||
tuple = chunk_tuple_create_spi_connected(hs, p, plan);
|
||||
ret = SPI_execute_plan(plan, args, NULL, false, 0);
|
||||
|
||||
old = MemoryContextSwitchTo(top);
|
||||
chunk = chunk_create_from_tuple(tuple, HYPERSPACE_NUM_DIMENSIONS(hs));
|
||||
MemoryContextSwitchTo(old);
|
||||
if (ret <= 0)
|
||||
elog(ERROR, "Got an SPI error %d", ret);
|
||||
|
||||
SPI_finish();
|
||||
|
||||
return chunk;
|
||||
}
|
||||
|
||||
|
||||
static void
|
||||
hypertable_rename_spi_connected(Hypertable *ht, char *new_schema_name, char *new_table_name, SPIPlanPtr plan)
|
||||
hypertable_rename_spi_connected(Hypertable *ht, const char *new_schema_name, const char *new_table_name, SPIPlanPtr plan)
|
||||
{
|
||||
int ret;
|
||||
Datum args[4];
|
||||
@ -143,7 +108,7 @@ hypertable_rename_spi_connected(Hypertable *ht, char *new_schema_name, char *new
|
||||
}
|
||||
|
||||
void
|
||||
spi_hypertable_rename(Hypertable *ht, char *new_schema_name, char *new_table_name)
|
||||
spi_hypertable_rename(Hypertable *ht, const char *new_schema_name, const char *new_table_name)
|
||||
{
|
||||
SPIPlanPtr plan = rename_hypertable_plan();
|
||||
|
||||
|
@ -3,13 +3,10 @@
|
||||
|
||||
#include <postgres.h>
|
||||
|
||||
typedef struct Chunk Chunk;
|
||||
typedef struct Hyperspace Hyperspace;
|
||||
typedef struct Point Point;
|
||||
typedef struct Hypertable Hypertable;
|
||||
|
||||
extern Chunk *spi_chunk_create(Hyperspace *hs, Point *p);
|
||||
extern void spi_hypertable_rename(Hypertable *ht, char *new_schema_name, char *new_table_name);
|
||||
extern void spi_chunk_insert(int32 chunk_id, int32 hypertable_id, const char *schema_name, const char *table_name);
|
||||
extern void spi_hypertable_rename(Hypertable *ht, const char *new_schema_name, const char *new_table_name);
|
||||
extern void spi_hypertable_truncate(Hypertable *ht);
|
||||
|
||||
#endif /* TIMESCALEDB_METADATA_QUERIES_H */
|
||||
|
@ -154,7 +154,6 @@ scanner_scan(ScannerCtx *ctx)
|
||||
{
|
||||
TupleDesc tuple_desc;
|
||||
bool is_valid;
|
||||
int num_tuples = 0;
|
||||
Scanner *scanner = &scanners[ctx->scantype];
|
||||
InternalScannerCtx ictx = {
|
||||
.sctx = ctx,
|
||||
@ -179,7 +178,7 @@ scanner_scan(ScannerCtx *ctx)
|
||||
|
||||
if (ctx->filter == NULL || ctx->filter(&ictx.tinfo, ctx->data))
|
||||
{
|
||||
num_tuples++;
|
||||
ictx.tinfo.count++;
|
||||
|
||||
if (ctx->tuplock.enabled)
|
||||
{
|
||||
@ -209,10 +208,10 @@ scanner_scan(ScannerCtx *ctx)
|
||||
|
||||
/* Call post-scan handler, if any. */
|
||||
if (ctx->postscan != NULL)
|
||||
ctx->postscan(num_tuples, ctx->data);
|
||||
ctx->postscan(ictx.tinfo.count, ctx->data);
|
||||
|
||||
scanner->endscan(&ictx);
|
||||
scanner->close(&ictx);
|
||||
|
||||
return num_tuples;
|
||||
return ictx.tinfo.count;
|
||||
}
|
||||
|
@ -28,8 +28,11 @@ typedef struct TupleInfo
|
||||
* in lockresult.
|
||||
*/
|
||||
HTSU_Result lockresult;
|
||||
int count;
|
||||
} TupleInfo;
|
||||
|
||||
typedef bool (*tuple_found_func) (TupleInfo *ti, void *data);
|
||||
|
||||
typedef struct ScannerCtx
|
||||
{
|
||||
Oid table;
|
||||
|
@ -2,6 +2,8 @@
|
||||
|
||||
#include "dimension.h"
|
||||
#include "dimension_slice.h"
|
||||
#include "dimension_vector.h"
|
||||
#include "hypercube.h"
|
||||
#include "subspace_store.h"
|
||||
|
||||
/*
|
||||
|
@ -6,155 +6,113 @@ CREATE DATABASE single;
|
||||
\c single
|
||||
CREATE EXTENSION IF NOT EXISTS timescaledb;
|
||||
SET timescaledb.disable_optimizations = :DISABLE_OPTIMIZATIONS;
|
||||
CREATE TABLE chunk_test(
|
||||
time BIGINT,
|
||||
metric INTEGER,
|
||||
device_id TEXT
|
||||
);
|
||||
-- Test chunk closing/creation
|
||||
SELECT * FROM create_hypertable('chunk_test', 'time', 'device_id', 2, chunk_time_interval => 10);
|
||||
--
|
||||
-- This test will create chunks in two dimenisions, time (x) and
|
||||
-- space (y), where the time dimension is aligned. The figure below
|
||||
-- shows the expected result. The chunk number in the figure
|
||||
-- indicates the creation order.
|
||||
--
|
||||
-- +
|
||||
-- +
|
||||
-- + +-----+ +-----+
|
||||
-- + | 2 | | 3 |
|
||||
-- + | +---+-+ |
|
||||
-- + +-----+ 5 |6+-----+
|
||||
-- + | 1 +---+-+-----+ +---------+
|
||||
-- + | | |4| 7 | | 8 |
|
||||
-- +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
|
||||
-- 0 5 10 15 20
|
||||
--
|
||||
-- Partitioning:
|
||||
--
|
||||
-- Chunk # | time | space
|
||||
-- 1 | 3 | 2
|
||||
-- 4 | 1 | 3
|
||||
-- 5 | 5 | 3
|
||||
--
|
||||
CREATE TABLE chunk_test(time integer, temp float8, tag integer, color integer);
|
||||
SELECT create_hypertable('chunk_test', 'time', 'tag', 2, chunk_time_interval => 3);
|
||||
create_hypertable
|
||||
-------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
SELECT * FROM _timescaledb_catalog.hypertable;
|
||||
id | schema_name | table_name | associated_schema_name | associated_table_prefix | num_dimensions
|
||||
----+-------------+------------+------------------------+-------------------------+----------------
|
||||
1 | public | chunk_test | _timescaledb_internal | _hyper_1 | 2
|
||||
(1 row)
|
||||
INSERT INTO chunk_test VALUES (4, 24.3, 3, 1);
|
||||
SELECT * FROM _timescaledb_catalog.dimension_slice;
|
||||
id | dimension_id | range_start | range_end
|
||||
----+--------------+-------------+------------
|
||||
1 | 1 | 3 | 6
|
||||
2 | 2 | 0 | 1073741823
|
||||
(2 rows)
|
||||
|
||||
INSERT INTO chunk_test VALUES (1, 1, 'dev1'),
|
||||
(2, 2, 'dev2'),
|
||||
(45, 2, 'dev2'),
|
||||
(46, 2, 'dev2');
|
||||
SELECT * FROM set_chunk_time_interval('chunk_test', 40::bigint);
|
||||
INSERT INTO chunk_test VALUES (4, 24.3, 1, 1);
|
||||
INSERT INTO chunk_test VALUES (10, 24.3, 1, 1);
|
||||
SELECT c.table_name AS chunk_name, d.id AS dimension_id, ds.id AS slice_id, range_start, range_end FROM _timescaledb_catalog.chunk c
|
||||
LEFT JOIN _timescaledb_catalog.chunk_constraint cc ON (c.id = cc.chunk_id)
|
||||
LEFT JOIN _timescaledb_catalog.dimension_slice ds ON (ds.id = cc.dimension_slice_id)
|
||||
LEFT JOIN _timescaledb_catalog.dimension d ON (d.id = ds.dimension_id)
|
||||
LEFT JOIN _timescaledb_catalog.hypertable h ON (d.hypertable_id = h.id)
|
||||
WHERE h.schema_name = 'public' AND h.table_name = 'chunk_test'
|
||||
ORDER BY c.id, d.id;
|
||||
chunk_name | dimension_id | slice_id | range_start | range_end
|
||||
------------------+--------------+----------+-------------+------------
|
||||
_hyper_1_1_chunk | 1 | 1 | 3 | 6
|
||||
_hyper_1_1_chunk | 2 | 2 | 0 | 1073741823
|
||||
_hyper_1_2_chunk | 1 | 1 | 3 | 6
|
||||
_hyper_1_2_chunk | 2 | 3 | 1073741823 | 2147483647
|
||||
_hyper_1_3_chunk | 1 | 4 | 9 | 12
|
||||
_hyper_1_3_chunk | 2 | 3 | 1073741823 | 2147483647
|
||||
(6 rows)
|
||||
|
||||
UPDATE _timescaledb_catalog.dimension SET num_slices = 3 WHERE id = 2;
|
||||
SELECT set_chunk_time_interval('chunk_test', 1::bigint);
|
||||
set_chunk_time_interval
|
||||
-------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
INSERT INTO chunk_test VALUES(23, 3, 'dev3');
|
||||
SELECT * FROM chunk_test order by time, metric, device_id;
|
||||
time | metric | device_id
|
||||
------+--------+-----------
|
||||
1 | 1 | dev1
|
||||
2 | 2 | dev2
|
||||
23 | 3 | dev3
|
||||
45 | 2 | dev2
|
||||
46 | 2 | dev2
|
||||
(5 rows)
|
||||
|
||||
SELECT * FROM _timescaledb_catalog.chunk;
|
||||
id | hypertable_id | schema_name | table_name
|
||||
----+---------------+-----------------------+------------------
|
||||
1 | 1 | _timescaledb_internal | _hyper_1_1_chunk
|
||||
2 | 1 | _timescaledb_internal | _hyper_1_2_chunk
|
||||
3 | 1 | _timescaledb_internal | _hyper_1_3_chunk
|
||||
4 | 1 | _timescaledb_internal | _hyper_1_4_chunk
|
||||
(4 rows)
|
||||
|
||||
SELECT * FROM _timescaledb_catalog.hypertable;
|
||||
id | schema_name | table_name | associated_schema_name | associated_table_prefix | num_dimensions
|
||||
----+-------------+------------+------------------------+-------------------------+----------------
|
||||
1 | public | chunk_test | _timescaledb_internal | _hyper_1 | 2
|
||||
(1 row)
|
||||
|
||||
SELECT * FROM ONLY chunk_test;
|
||||
time | metric | device_id
|
||||
------+--------+-----------
|
||||
(0 rows)
|
||||
|
||||
SELECT * FROM _timescaledb_catalog.chunk c
|
||||
LEFT JOIN _timescaledb_catalog.chunk_constraint cc ON (c.id = cc.chunk_id)
|
||||
LEFT JOIN _timescaledb_catalog.dimension_slice ds ON (ds.id = cc.dimension_slice_id)
|
||||
LEFT JOIN _timescaledb_catalog.dimension d ON (d.id = ds.dimension_id)
|
||||
LEFT JOIN _timescaledb_catalog.hypertable h ON (d.hypertable_id = h.id)
|
||||
WHERE h.schema_name = 'public' AND h.table_name = 'chunk_test'
|
||||
ORDER BY c.id, d.id;
|
||||
id | hypertable_id | schema_name | table_name | chunk_id | dimension_slice_id | id | dimension_id | range_start | range_end | id | hypertable_id | column_name | column_type | aligned | num_slices | partitioning_func_schema | partitioning_func | interval_length | id | schema_name | table_name | associated_schema_name | associated_table_prefix | num_dimensions
|
||||
----+---------------+-----------------------+------------------+----------+--------------------+----+--------------+-------------+------------+----+---------------+-------------+-------------+---------+------------+--------------------------+-----------------------+-----------------+----+-------------+------------+------------------------+-------------------------+----------------
|
||||
1 | 1 | _timescaledb_internal | _hyper_1_1_chunk | 1 | 1 | 1 | 1 | 0 | 10 | 1 | 1 | time | bigint | t | | | | 40 | 1 | public | chunk_test | _timescaledb_internal | _hyper_1 | 2
|
||||
1 | 1 | _timescaledb_internal | _hyper_1_1_chunk | 1 | 2 | 2 | 2 | 1073741823 | 2147483647 | 2 | 1 | device_id | text | f | 2 | _timescaledb_internal | get_partition_for_key | | 1 | public | chunk_test | _timescaledb_internal | _hyper_1 | 2
|
||||
2 | 1 | _timescaledb_internal | _hyper_1_2_chunk | 2 | 1 | 1 | 1 | 0 | 10 | 1 | 1 | time | bigint | t | | | | 40 | 1 | public | chunk_test | _timescaledb_internal | _hyper_1 | 2
|
||||
2 | 1 | _timescaledb_internal | _hyper_1_2_chunk | 2 | 4 | 4 | 2 | 0 | 1073741823 | 2 | 1 | device_id | text | f | 2 | _timescaledb_internal | get_partition_for_key | | 1 | public | chunk_test | _timescaledb_internal | _hyper_1 | 2
|
||||
3 | 1 | _timescaledb_internal | _hyper_1_3_chunk | 3 | 5 | 5 | 1 | 40 | 50 | 1 | 1 | time | bigint | t | | | | 40 | 1 | public | chunk_test | _timescaledb_internal | _hyper_1 | 2
|
||||
3 | 1 | _timescaledb_internal | _hyper_1_3_chunk | 3 | 4 | 4 | 2 | 0 | 1073741823 | 2 | 1 | device_id | text | f | 2 | _timescaledb_internal | get_partition_for_key | | 1 | public | chunk_test | _timescaledb_internal | _hyper_1 | 2
|
||||
4 | 1 | _timescaledb_internal | _hyper_1_4_chunk | 4 | 7 | 7 | 1 | 10 | 40 | 1 | 1 | time | bigint | t | | | | 40 | 1 | public | chunk_test | _timescaledb_internal | _hyper_1 | 2
|
||||
4 | 1 | _timescaledb_internal | _hyper_1_4_chunk | 4 | 4 | 4 | 2 | 0 | 1073741823 | 2 | 1 | device_id | text | f | 2 | _timescaledb_internal | get_partition_for_key | | 1 | public | chunk_test | _timescaledb_internal | _hyper_1 | 2
|
||||
(8 rows)
|
||||
|
||||
-- Test chunk aligning between partitions
|
||||
CREATE TABLE chunk_align_test(
|
||||
time BIGINT,
|
||||
metric INTEGER,
|
||||
device_id TEXT
|
||||
);
|
||||
SELECT * FROM create_hypertable('chunk_align_test', 'time', 'device_id', 2, chunk_time_interval => 10);
|
||||
create_hypertable
|
||||
-------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
INSERT INTO chunk_align_test VALUES (1, 1, 'dev1'); -- this should create a 10 wide chunk
|
||||
SELECT * FROM _timescaledb_catalog.chunk c
|
||||
LEFT JOIN _timescaledb_catalog.chunk_constraint cc ON (c.id = cc.chunk_id)
|
||||
LEFT JOIN _timescaledb_catalog.dimension_slice ds ON (ds.id = cc.dimension_slice_id)
|
||||
LEFT JOIN _timescaledb_catalog.dimension d ON (d.id = ds.dimension_id)
|
||||
LEFT JOIN _timescaledb_catalog.hypertable h ON (d.hypertable_id = h.id)
|
||||
WHERE h.schema_name = 'public' AND h.table_name = 'chunk_align_test'
|
||||
AND d.column_name = 'time'
|
||||
ORDER BY c.id, d.id;
|
||||
id | hypertable_id | schema_name | table_name | chunk_id | dimension_slice_id | id | dimension_id | range_start | range_end | id | hypertable_id | column_name | column_type | aligned | num_slices | partitioning_func_schema | partitioning_func | interval_length | id | schema_name | table_name | associated_schema_name | associated_table_prefix | num_dimensions
|
||||
----+---------------+-----------------------+------------------+----------+--------------------+----+--------------+-------------+-----------+----+---------------+-------------+-------------+---------+------------+--------------------------+-------------------+-----------------+----+-------------+------------------+------------------------+-------------------------+----------------
|
||||
5 | 2 | _timescaledb_internal | _hyper_2_5_chunk | 5 | 9 | 9 | 3 | 0 | 10 | 3 | 2 | time | bigint | t | | | | 10 | 2 | public | chunk_align_test | _timescaledb_internal | _hyper_2 | 2
|
||||
(1 row)
|
||||
|
||||
|
||||
SELECT * FROM set_chunk_time_interval('chunk_align_test', 40::bigint);
|
||||
INSERT INTO chunk_test VALUES (8, 24.3, 79669, 1);
|
||||
SELECT set_chunk_time_interval('chunk_test', 5::bigint);
|
||||
set_chunk_time_interval
|
||||
-------------------------
|
||||
|
||||
(1 row)
|
||||
|
||||
INSERT INTO chunk_align_test VALUES (5, 1, 'dev2'); -- this should still create a 10 wide chunk
|
||||
INSERT INTO chunk_align_test VALUES (45, 1, 'dev2'); -- this should create a 40 wide chunk
|
||||
SELECT * FROM _timescaledb_catalog.chunk c
|
||||
LEFT JOIN _timescaledb_catalog.chunk_constraint cc ON (c.id = cc.chunk_id)
|
||||
LEFT JOIN _timescaledb_catalog.dimension_slice ds ON (ds.id = cc.dimension_slice_id)
|
||||
LEFT JOIN _timescaledb_catalog.dimension d ON (d.id = ds.dimension_id)
|
||||
LEFT JOIN _timescaledb_catalog.hypertable h ON (d.hypertable_id = h.id)
|
||||
WHERE h.schema_name = 'public' AND h.table_name = 'chunk_align_test'
|
||||
AND d.column_name = 'time'
|
||||
ORDER BY c.id, d.id;
|
||||
id | hypertable_id | schema_name | table_name | chunk_id | dimension_slice_id | id | dimension_id | range_start | range_end | id | hypertable_id | column_name | column_type | aligned | num_slices | partitioning_func_schema | partitioning_func | interval_length | id | schema_name | table_name | associated_schema_name | associated_table_prefix | num_dimensions
|
||||
----+---------------+-----------------------+------------------+----------+--------------------+----+--------------+-------------+-----------+----+---------------+-------------+-------------+---------+------------+--------------------------+-------------------+-----------------+----+-------------+------------------+------------------------+-------------------------+----------------
|
||||
5 | 2 | _timescaledb_internal | _hyper_2_5_chunk | 5 | 9 | 9 | 3 | 0 | 10 | 3 | 2 | time | bigint | t | | | | 40 | 2 | public | chunk_align_test | _timescaledb_internal | _hyper_2 | 2
|
||||
6 | 2 | _timescaledb_internal | _hyper_2_6_chunk | 6 | 9 | 9 | 3 | 0 | 10 | 3 | 2 | time | bigint | t | | | | 40 | 2 | public | chunk_align_test | _timescaledb_internal | _hyper_2 | 2
|
||||
7 | 2 | _timescaledb_internal | _hyper_2_7_chunk | 7 | 13 | 13 | 3 | 40 | 80 | 3 | 2 | time | bigint | t | | | | 40 | 2 | public | chunk_align_test | _timescaledb_internal | _hyper_2 | 2
|
||||
(3 rows)
|
||||
SELECT * FROM _timescaledb_catalog.dimension;
|
||||
id | hypertable_id | column_name | column_type | aligned | num_slices | partitioning_func_schema | partitioning_func | interval_length
|
||||
----+---------------+-------------+-------------+---------+------------+--------------------------+-----------------------+-----------------
|
||||
2 | 1 | tag | integer | f | 3 | _timescaledb_internal | get_partition_for_key |
|
||||
1 | 1 | time | integer | t | | | | 5
|
||||
(2 rows)
|
||||
|
||||
--check the cut-to-size with aligned dimensions code
|
||||
INSERT INTO chunk_align_test VALUES (35, 1, 'dev1');
|
||||
INSERT INTO chunk_align_test VALUES (35, 1, 'dev2');
|
||||
INSERT INTO chunk_align_test VALUES (81, 1, 'dev1');
|
||||
INSERT INTO chunk_align_test VALUES (81, 1, 'dev2');
|
||||
SELECT * FROM _timescaledb_catalog.chunk c
|
||||
LEFT JOIN _timescaledb_catalog.chunk_constraint cc ON (c.id = cc.chunk_id)
|
||||
LEFT JOIN _timescaledb_catalog.dimension_slice ds ON (ds.id = cc.dimension_slice_id)
|
||||
LEFT JOIN _timescaledb_catalog.dimension d ON (d.id = ds.dimension_id)
|
||||
LEFT JOIN _timescaledb_catalog.hypertable h ON (d.hypertable_id = h.id)
|
||||
WHERE h.schema_name = 'public' AND h.table_name = 'chunk_align_test'
|
||||
AND d.column_name = 'time'
|
||||
ORDER BY c.id, d.id;
|
||||
id | hypertable_id | schema_name | table_name | chunk_id | dimension_slice_id | id | dimension_id | range_start | range_end | id | hypertable_id | column_name | column_type | aligned | num_slices | partitioning_func_schema | partitioning_func | interval_length | id | schema_name | table_name | associated_schema_name | associated_table_prefix | num_dimensions
|
||||
----+---------------+-----------------------+-------------------+----------+--------------------+----+--------------+-------------+-----------+----+---------------+-------------+-------------+---------+------------+--------------------------+-------------------+-----------------+----+-------------+------------------+------------------------+-------------------------+----------------
|
||||
5 | 2 | _timescaledb_internal | _hyper_2_5_chunk | 5 | 9 | 9 | 3 | 0 | 10 | 3 | 2 | time | bigint | t | | | | 40 | 2 | public | chunk_align_test | _timescaledb_internal | _hyper_2 | 2
|
||||
6 | 2 | _timescaledb_internal | _hyper_2_6_chunk | 6 | 9 | 9 | 3 | 0 | 10 | 3 | 2 | time | bigint | t | | | | 40 | 2 | public | chunk_align_test | _timescaledb_internal | _hyper_2 | 2
|
||||
7 | 2 | _timescaledb_internal | _hyper_2_7_chunk | 7 | 13 | 13 | 3 | 40 | 80 | 3 | 2 | time | bigint | t | | | | 40 | 2 | public | chunk_align_test | _timescaledb_internal | _hyper_2 | 2
|
||||
8 | 2 | _timescaledb_internal | _hyper_2_8_chunk | 8 | 15 | 15 | 3 | 10 | 40 | 3 | 2 | time | bigint | t | | | | 40 | 2 | public | chunk_align_test | _timescaledb_internal | _hyper_2 | 2
|
||||
9 | 2 | _timescaledb_internal | _hyper_2_9_chunk | 9 | 15 | 15 | 3 | 10 | 40 | 3 | 2 | time | bigint | t | | | | 40 | 2 | public | chunk_align_test | _timescaledb_internal | _hyper_2 | 2
|
||||
10 | 2 | _timescaledb_internal | _hyper_2_10_chunk | 10 | 19 | 19 | 3 | 80 | 120 | 3 | 2 | time | bigint | t | | | | 40 | 2 | public | chunk_align_test | _timescaledb_internal | _hyper_2 | 2
|
||||
11 | 2 | _timescaledb_internal | _hyper_2_11_chunk | 11 | 19 | 19 | 3 | 80 | 120 | 3 | 2 | time | bigint | t | | | | 40 | 2 | public | chunk_align_test | _timescaledb_internal | _hyper_2 | 2
|
||||
(7 rows)
|
||||
INSERT INTO chunk_test VALUES (7, 24.3, 11233, 1);
|
||||
INSERT INTO chunk_test VALUES (8, 24.3, 11233, 1);
|
||||
INSERT INTO chunk_test VALUES (10, 24.3, 79669, 1);
|
||||
INSERT INTO chunk_test VALUES (16, 24.3, 79669, 1);
|
||||
SELECT c.table_name AS chunk_name, d.id AS dimension_id, ds.id AS slice_id, range_start, range_end FROM _timescaledb_catalog.chunk c
|
||||
LEFT JOIN _timescaledb_catalog.chunk_constraint cc ON (c.id = cc.chunk_id)
|
||||
LEFT JOIN _timescaledb_catalog.dimension_slice ds ON (ds.id = cc.dimension_slice_id)
|
||||
LEFT JOIN _timescaledb_catalog.dimension d ON (d.id = ds.dimension_id)
|
||||
LEFT JOIN _timescaledb_catalog.hypertable h ON (d.hypertable_id = h.id)
|
||||
WHERE h.schema_name = 'public' AND h.table_name = 'chunk_test'
|
||||
ORDER BY c.id, d.id;
|
||||
chunk_name | dimension_id | slice_id | range_start | range_end
|
||||
------------------+--------------+----------+-------------+------------
|
||||
_hyper_1_1_chunk | 1 | 1 | 3 | 6
|
||||
_hyper_1_1_chunk | 2 | 2 | 0 | 1073741823
|
||||
_hyper_1_2_chunk | 1 | 1 | 3 | 6
|
||||
_hyper_1_2_chunk | 2 | 3 | 1073741823 | 2147483647
|
||||
_hyper_1_3_chunk | 1 | 4 | 9 | 12
|
||||
_hyper_1_3_chunk | 2 | 3 | 1073741823 | 2147483647
|
||||
_hyper_1_4_chunk | 1 | 5 | 8 | 9
|
||||
_hyper_1_4_chunk | 2 | 6 | 0 | 715827882
|
||||
_hyper_1_5_chunk | 1 | 7 | 6 | 8
|
||||
_hyper_1_5_chunk | 2 | 8 | 715827882 | 1431655764
|
||||
_hyper_1_6_chunk | 1 | 5 | 8 | 9
|
||||
_hyper_1_6_chunk | 2 | 8 | 715827882 | 1431655764
|
||||
_hyper_1_7_chunk | 1 | 4 | 9 | 12
|
||||
_hyper_1_7_chunk | 2 | 6 | 0 | 715827882
|
||||
_hyper_1_8_chunk | 1 | 9 | 15 | 20
|
||||
_hyper_1_8_chunk | 2 | 6 | 0 | 715827882
|
||||
(16 rows)
|
||||
|
||||
|
@ -293,7 +293,7 @@ Indexes:
|
||||
"21-two_Partitions_timeCustom_idx" btree ("timeCustom" DESC)
|
||||
Check constraints:
|
||||
"constraint_2" CHECK (_timescaledb_internal.get_partition_for_key(device_id) >= 1073741823 AND _timescaledb_internal.get_partition_for_key(device_id) < 2147483647)
|
||||
"constraint_5" CHECK ("timeCustom" >= '1257985728000000000'::bigint AND "timeCustom" < '1257988320000000000'::bigint)
|
||||
"constraint_4" CHECK ("timeCustom" >= '1257985728000000000'::bigint AND "timeCustom" < '1257988320000000000'::bigint)
|
||||
Inherits: "two_Partitions"
|
||||
|
||||
Table "_timescaledb_internal._hyper_1_4_chunk"
|
||||
@ -315,7 +315,7 @@ Indexes:
|
||||
"28-two_Partitions_timeCustom_idx" btree ("timeCustom" DESC)
|
||||
Check constraints:
|
||||
"constraint_1" CHECK ("timeCustom" >= '1257892416000000000'::bigint AND "timeCustom" < '1257895008000000000'::bigint)
|
||||
"constraint_8" CHECK (_timescaledb_internal.get_partition_for_key(device_id) >= 0 AND _timescaledb_internal.get_partition_for_key(device_id) < 1073741823)
|
||||
"constraint_5" CHECK (_timescaledb_internal.get_partition_for_key(device_id) >= 0 AND _timescaledb_internal.get_partition_for_key(device_id) < 1073741823)
|
||||
Inherits: "two_Partitions"
|
||||
|
||||
-- Test that renaming hypertable works
|
||||
|
@ -301,7 +301,7 @@ Indexes:
|
||||
"21-two_Partitions_timeCustom_idx" btree ("timeCustom" DESC)
|
||||
Check constraints:
|
||||
"constraint_2" CHECK (_timescaledb_internal.get_partition_for_key(device_id) >= 1073741823 AND _timescaledb_internal.get_partition_for_key(device_id) < 2147483647)
|
||||
"constraint_5" CHECK ("timeCustom" >= '1257985728000000000'::bigint AND "timeCustom" < '1257988320000000000'::bigint)
|
||||
"constraint_4" CHECK ("timeCustom" >= '1257985728000000000'::bigint AND "timeCustom" < '1257988320000000000'::bigint)
|
||||
Inherits: "two_Partitions"
|
||||
|
||||
Table "_timescaledb_internal._hyper_1_4_chunk"
|
||||
@ -323,7 +323,7 @@ Indexes:
|
||||
"28-two_Partitions_timeCustom_idx" btree ("timeCustom" DESC)
|
||||
Check constraints:
|
||||
"constraint_1" CHECK ("timeCustom" >= '1257892416000000000'::bigint AND "timeCustom" < '1257895008000000000'::bigint)
|
||||
"constraint_8" CHECK (_timescaledb_internal.get_partition_for_key(device_id) >= 0 AND _timescaledb_internal.get_partition_for_key(device_id) < 1073741823)
|
||||
"constraint_5" CHECK (_timescaledb_internal.get_partition_for_key(device_id) >= 0 AND _timescaledb_internal.get_partition_for_key(device_id) < 1073741823)
|
||||
Inherits: "two_Partitions"
|
||||
|
||||
SELECT * FROM _timescaledb_catalog.chunk;
|
||||
|
@ -43,7 +43,7 @@ SELECT count(*)
|
||||
AND refobjid = (SELECT oid FROM pg_extension WHERE extname = 'timescaledb');
|
||||
count
|
||||
-------
|
||||
128
|
||||
120
|
||||
(1 row)
|
||||
|
||||
\c postgres
|
||||
@ -67,7 +67,7 @@ SELECT count(*)
|
||||
AND refobjid = (SELECT oid FROM pg_extension WHERE extname = 'timescaledb');
|
||||
count
|
||||
-------
|
||||
128
|
||||
120
|
||||
(1 row)
|
||||
|
||||
\c single
|
||||
|
@ -33,8 +33,3 @@ FROM _timescaledb_internal.dimension_calculate_default_range_closed(0,2::smallin
|
||||
|
||||
SELECT assert_equal(1073741823::bigint, actual_range_start), assert_equal(2147483647::bigint, actual_range_end)
|
||||
FROM _timescaledb_internal.dimension_calculate_default_range_closed(1073741823,2::smallint) AS res(actual_range_start, actual_range_end);
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
@ -1,81 +1,65 @@
|
||||
\ir include/create_single_db.sql
|
||||
--
|
||||
-- This test will create chunks in two dimenisions, time (x) and
|
||||
-- space (y), where the time dimension is aligned. The figure below
|
||||
-- shows the expected result. The chunk number in the figure
|
||||
-- indicates the creation order.
|
||||
--
|
||||
-- +
|
||||
-- +
|
||||
-- + +-----+ +-----+
|
||||
-- + | 2 | | 3 |
|
||||
-- + | +---+-+ |
|
||||
-- + +-----+ 5 |6+-----+
|
||||
-- + | 1 +---+-+-----+ +---------+
|
||||
-- + | | |4| 7 | | 8 |
|
||||
-- +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-
|
||||
-- 0 5 10 15 20
|
||||
--
|
||||
-- Partitioning:
|
||||
--
|
||||
-- Chunk # | time | space
|
||||
-- 1 | 3 | 2
|
||||
-- 4 | 1 | 3
|
||||
-- 5 | 5 | 3
|
||||
--
|
||||
|
||||
CREATE TABLE chunk_test(
|
||||
time BIGINT,
|
||||
metric INTEGER,
|
||||
device_id TEXT
|
||||
);
|
||||
CREATE TABLE chunk_test(time integer, temp float8, tag integer, color integer);
|
||||
|
||||
-- Test chunk closing/creation
|
||||
SELECT * FROM create_hypertable('chunk_test', 'time', 'device_id', 2, chunk_time_interval => 10);
|
||||
SELECT * FROM _timescaledb_catalog.hypertable;
|
||||
SELECT create_hypertable('chunk_test', 'time', 'tag', 2, chunk_time_interval => 3);
|
||||
|
||||
INSERT INTO chunk_test VALUES (1, 1, 'dev1'),
|
||||
(2, 2, 'dev2'),
|
||||
(45, 2, 'dev2'),
|
||||
(46, 2, 'dev2');
|
||||
INSERT INTO chunk_test VALUES (4, 24.3, 3, 1);
|
||||
|
||||
SELECT * FROM set_chunk_time_interval('chunk_test', 40::bigint);
|
||||
SELECT * FROM _timescaledb_catalog.dimension_slice;
|
||||
|
||||
INSERT INTO chunk_test VALUES(23, 3, 'dev3');
|
||||
INSERT INTO chunk_test VALUES (4, 24.3, 1, 1);
|
||||
INSERT INTO chunk_test VALUES (10, 24.3, 1, 1);
|
||||
|
||||
SELECT * FROM chunk_test order by time, metric, device_id;
|
||||
SELECT * FROM _timescaledb_catalog.chunk;
|
||||
SELECT * FROM _timescaledb_catalog.hypertable;
|
||||
SELECT c.table_name AS chunk_name, d.id AS dimension_id, ds.id AS slice_id, range_start, range_end FROM _timescaledb_catalog.chunk c
|
||||
LEFT JOIN _timescaledb_catalog.chunk_constraint cc ON (c.id = cc.chunk_id)
|
||||
LEFT JOIN _timescaledb_catalog.dimension_slice ds ON (ds.id = cc.dimension_slice_id)
|
||||
LEFT JOIN _timescaledb_catalog.dimension d ON (d.id = ds.dimension_id)
|
||||
LEFT JOIN _timescaledb_catalog.hypertable h ON (d.hypertable_id = h.id)
|
||||
WHERE h.schema_name = 'public' AND h.table_name = 'chunk_test'
|
||||
ORDER BY c.id, d.id;
|
||||
|
||||
SELECT * FROM ONLY chunk_test;
|
||||
SELECT * FROM _timescaledb_catalog.chunk c
|
||||
LEFT JOIN _timescaledb_catalog.chunk_constraint cc ON (c.id = cc.chunk_id)
|
||||
LEFT JOIN _timescaledb_catalog.dimension_slice ds ON (ds.id = cc.dimension_slice_id)
|
||||
LEFT JOIN _timescaledb_catalog.dimension d ON (d.id = ds.dimension_id)
|
||||
LEFT JOIN _timescaledb_catalog.hypertable h ON (d.hypertable_id = h.id)
|
||||
WHERE h.schema_name = 'public' AND h.table_name = 'chunk_test'
|
||||
ORDER BY c.id, d.id;
|
||||
UPDATE _timescaledb_catalog.dimension SET num_slices = 3 WHERE id = 2;
|
||||
SELECT set_chunk_time_interval('chunk_test', 1::bigint);
|
||||
|
||||
INSERT INTO chunk_test VALUES (8, 24.3, 79669, 1);
|
||||
|
||||
-- Test chunk aligning between partitions
|
||||
CREATE TABLE chunk_align_test(
|
||||
time BIGINT,
|
||||
metric INTEGER,
|
||||
device_id TEXT
|
||||
);
|
||||
SELECT * FROM create_hypertable('chunk_align_test', 'time', 'device_id', 2, chunk_time_interval => 10);
|
||||
SELECT set_chunk_time_interval('chunk_test', 5::bigint);
|
||||
|
||||
INSERT INTO chunk_align_test VALUES (1, 1, 'dev1'); -- this should create a 10 wide chunk
|
||||
|
||||
SELECT * FROM _timescaledb_catalog.chunk c
|
||||
LEFT JOIN _timescaledb_catalog.chunk_constraint cc ON (c.id = cc.chunk_id)
|
||||
LEFT JOIN _timescaledb_catalog.dimension_slice ds ON (ds.id = cc.dimension_slice_id)
|
||||
LEFT JOIN _timescaledb_catalog.dimension d ON (d.id = ds.dimension_id)
|
||||
LEFT JOIN _timescaledb_catalog.hypertable h ON (d.hypertable_id = h.id)
|
||||
WHERE h.schema_name = 'public' AND h.table_name = 'chunk_align_test'
|
||||
AND d.column_name = 'time'
|
||||
ORDER BY c.id, d.id;
|
||||
|
||||
SELECT * FROM set_chunk_time_interval('chunk_align_test', 40::bigint);
|
||||
INSERT INTO chunk_align_test VALUES (5, 1, 'dev2'); -- this should still create a 10 wide chunk
|
||||
INSERT INTO chunk_align_test VALUES (45, 1, 'dev2'); -- this should create a 40 wide chunk
|
||||
|
||||
SELECT * FROM _timescaledb_catalog.chunk c
|
||||
LEFT JOIN _timescaledb_catalog.chunk_constraint cc ON (c.id = cc.chunk_id)
|
||||
LEFT JOIN _timescaledb_catalog.dimension_slice ds ON (ds.id = cc.dimension_slice_id)
|
||||
LEFT JOIN _timescaledb_catalog.dimension d ON (d.id = ds.dimension_id)
|
||||
LEFT JOIN _timescaledb_catalog.hypertable h ON (d.hypertable_id = h.id)
|
||||
WHERE h.schema_name = 'public' AND h.table_name = 'chunk_align_test'
|
||||
AND d.column_name = 'time'
|
||||
ORDER BY c.id, d.id;
|
||||
|
||||
--check the cut-to-size with aligned dimensions code
|
||||
INSERT INTO chunk_align_test VALUES (35, 1, 'dev1');
|
||||
INSERT INTO chunk_align_test VALUES (35, 1, 'dev2');
|
||||
INSERT INTO chunk_align_test VALUES (81, 1, 'dev1');
|
||||
INSERT INTO chunk_align_test VALUES (81, 1, 'dev2');
|
||||
SELECT * FROM _timescaledb_catalog.chunk c
|
||||
LEFT JOIN _timescaledb_catalog.chunk_constraint cc ON (c.id = cc.chunk_id)
|
||||
LEFT JOIN _timescaledb_catalog.dimension_slice ds ON (ds.id = cc.dimension_slice_id)
|
||||
LEFT JOIN _timescaledb_catalog.dimension d ON (d.id = ds.dimension_id)
|
||||
LEFT JOIN _timescaledb_catalog.hypertable h ON (d.hypertable_id = h.id)
|
||||
WHERE h.schema_name = 'public' AND h.table_name = 'chunk_align_test'
|
||||
AND d.column_name = 'time'
|
||||
ORDER BY c.id, d.id;
|
||||
SELECT * FROM _timescaledb_catalog.dimension;
|
||||
INSERT INTO chunk_test VALUES (7, 24.3, 11233, 1);
|
||||
INSERT INTO chunk_test VALUES (8, 24.3, 11233, 1);
|
||||
INSERT INTO chunk_test VALUES (10, 24.3, 79669, 1);
|
||||
INSERT INTO chunk_test VALUES (16, 24.3, 79669, 1);
|
||||
|
||||
SELECT c.table_name AS chunk_name, d.id AS dimension_id, ds.id AS slice_id, range_start, range_end FROM _timescaledb_catalog.chunk c
|
||||
LEFT JOIN _timescaledb_catalog.chunk_constraint cc ON (c.id = cc.chunk_id)
|
||||
LEFT JOIN _timescaledb_catalog.dimension_slice ds ON (ds.id = cc.dimension_slice_id)
|
||||
LEFT JOIN _timescaledb_catalog.dimension d ON (d.id = ds.dimension_id)
|
||||
LEFT JOIN _timescaledb_catalog.hypertable h ON (d.hypertable_id = h.id)
|
||||
WHERE h.schema_name = 'public' AND h.table_name = 'chunk_test'
|
||||
ORDER BY c.id, d.id;
|
||||
|
Loading…
x
Reference in New Issue
Block a user