diff --git a/Makefile b/Makefile index 04b3d481b..e08a9520a 100644 --- a/Makefile +++ b/Makefile @@ -96,11 +96,14 @@ check-sql-files: install: $(EXT_SQL_FILE) -clean: clean-sql-files +clean: clean-sql-files clean-extra clean-sql-files: @rm -f sql/$(EXTENSION)--*.sql +clean-extra: + @rm -f src/*~ + package: clean $(EXT_SQL_FILE) @mkdir -p package/lib @mkdir -p package/extension diff --git a/sql/chunk.sql b/sql/chunk.sql index ba29d6cb0..dbc34febe 100644 --- a/sql/chunk.sql +++ b/sql/chunk.sql @@ -169,8 +169,10 @@ BEGIN SELECT free_slice.range_start INTO overlap_value FROM _timescaledb_catalog.chunk c - INNER JOIN _timescaledb_catalog.chunk_constraint cc ON (cc.chunk_id = c.id) - INNER JOIN _timescaledb_catalog.dimension_slice free_slice ON (free_slice.id = cc.dimension_slice_id AND free_slice.dimension_id = free_dimension_id) + INNER JOIN _timescaledb_catalog.chunk_constraint cc + ON (cc.chunk_id = c.id) + INNER JOIN _timescaledb_catalog.dimension_slice free_slice + ON (free_slice.id = cc.dimension_slice_id AND free_slice.dimension_id = free_dimension_id) WHERE c.id = ( SELECT _timescaledb_internal.chunk_id_get_by_dimensions(free_dimension_id || fixed_dimension_ids, @@ -190,11 +192,7 @@ BEGIN END $BODY$; - - - -- creates the row in the chunk table. Prerequisite: appropriate lock. --- static CREATE OR REPLACE FUNCTION _timescaledb_internal.chunk_create_after_lock( dimension_ids INTEGER[], dimension_values BIGINT[] @@ -295,34 +293,3 @@ BEGIN RETURN chunk_row; END $BODY$; - --- Return a chunk, creating one if it doesn't exist. --- This is the only non-static function in this file. -CREATE OR REPLACE FUNCTION _timescaledb_internal.chunk_get_or_create( - dimension_ids INTEGER[], - dimension_values BIGINT[] -) - RETURNS _timescaledb_catalog.chunk LANGUAGE PLPGSQL VOLATILE AS -$BODY$ -DECLARE - chunk_row _timescaledb_catalog.chunk; - dimension_ids INTEGER[]; - dimension_values bigint[]; -BEGIN - CASE WHEN space_dimension_id IS NOT NULL AND space_dimension_id <> 0 THEN - dimension_ids := ARRAY[time_dimension_id, space_dimension_id]; - dimension_values := ARRAY[time_value, space_value]; - ELSE - dimension_ids := ARRAY[time_dimension_id]; - dimension_values := ARRAY[time_value]; - END CASE; - - chunk_row := _timescaledb_internal.chunk_get(dimension_ids, dimension_values); - - IF chunk_row IS NULL THEN - chunk_row := _timescaledb_internal.chunk_create(dimension_ids, dimension_values); - END IF; - - RETURN chunk_row; -END -$BODY$; diff --git a/sql/ddl_api.sql b/sql/ddl_api.sql index 0da048a58..4aba97929 100644 --- a/sql/ddl_api.sql +++ b/sql/ddl_api.sql @@ -10,7 +10,6 @@ -- associated_table_prefix - (Optional) Prefix for internal hypertable table names -- chunk_time_interval - (Optional) Initial time interval for a chunk -- create_default_indexes - (Optional) Whether or not to create the default indexes. --- TODO: order of params doesn't match docs. CREATE OR REPLACE FUNCTION create_hypertable( main_table REGCLASS, time_column_name NAME, diff --git a/sql/tables.sql b/sql/tables.sql index 499693b8d..808256095 100644 --- a/sql/tables.sql +++ b/sql/tables.sql @@ -1,5 +1,5 @@ -- This file contains table definitions for various abstractions and data --- structures for representing hypertables and lower level concepts. +-- structures for representing hypertables and lower-level concepts. -- Hypertable -- ========== @@ -32,16 +32,10 @@ -- reconfigured, but the new partitioning only affects newly created -- chunks. -- --- NOTE: Due to current restrictions, only two dimensions are allowed, --- typically one open (time) and one closed (space) dimension. +-- NOTE: Due to current restrictions, a maximum of two dimensions are +-- allowed, typically one open (time) and one closed (space) +-- dimension. -- --- --- Schema notes ---------------- --- The table representing the hypertable is named by `schema_name`.`table_name` --- --- The name and type of the time column (used to partition on time) are defined --- in `time_column_name` and `time_column_type`. CREATE TABLE IF NOT EXISTS _timescaledb_catalog.hypertable ( id SERIAL PRIMARY KEY, schema_name NAME NOT NULL CHECK (schema_name != '_timescaledb_catalog'), @@ -55,6 +49,8 @@ CREATE TABLE IF NOT EXISTS _timescaledb_catalog.hypertable ( SELECT pg_catalog.pg_extension_config_dump('_timescaledb_catalog.hypertable', ''); SELECT pg_catalog.pg_extension_config_dump(pg_get_serial_sequence('_timescaledb_catalog.hypertable','id'), ''); +-- The tablespace table maps tablespaces to hypertables. +-- This allows spreading a hypertable's chunks across multiple disks. CREATE TABLE IF NOT EXISTS _timescaledb_catalog.tablespace ( id SERIAL PRIMARY KEY, hypertable_id INT NOT NULL REFERENCES _timescaledb_catalog.hypertable(id) ON DELETE CASCADE, @@ -63,9 +59,10 @@ CREATE TABLE IF NOT EXISTS _timescaledb_catalog.tablespace ( ); SELECT pg_catalog.pg_extension_config_dump('_timescaledb_catalog.tablespace', ''); -CREATE TABLE _timescaledb_catalog.dimension ( +-- A dimension represents an axis along which data is partitioned. +CREATE TABLE _timescaledb_catalog.dimension ( id SERIAL NOT NULL PRIMARY KEY, - hypertable_id INTEGER NOT NULL REFERENCES _timescaledb_catalog.hypertable(id) ON DELETE CASCADE, + hypertable_id INTEGER NOT NULL REFERENCES _timescaledb_catalog.hypertable(id) ON DELETE CASCADE, column_name NAME NOT NULL, column_type REGTYPE NOT NULL, aligned BOOLEAN NOT NULL, @@ -87,12 +84,12 @@ CREATE INDEX ON _timescaledb_catalog.dimension(hypertable_id); SELECT pg_catalog.pg_extension_config_dump('_timescaledb_catalog.dimension', ''); SELECT pg_catalog.pg_extension_config_dump(pg_get_serial_sequence('_timescaledb_catalog.dimension','id'), ''); - +-- A dimension slice defines a keyspace range along a dimension axis. CREATE TABLE _timescaledb_catalog.dimension_slice ( id SERIAL NOT NULL PRIMARY KEY, dimension_id INTEGER NOT NULL REFERENCES _timescaledb_catalog.dimension(id) ON DELETE CASCADE, range_start BIGINT NOT NULL CHECK (range_start >= 0), - range_end BIGINT NOT NULL CHECK (range_end >= 0), + range_end BIGINT NOT NULL CHECK (range_end >= 0), CHECK (range_start <= range_end), UNIQUE (dimension_id, range_start, range_end) ); @@ -100,20 +97,25 @@ CREATE INDEX ON _timescaledb_catalog.dimension_slice(dimension_id, range_start, SELECT pg_catalog.pg_extension_config_dump('_timescaledb_catalog.dimension_slice', ''); SELECT pg_catalog.pg_extension_config_dump(pg_get_serial_sequence('_timescaledb_catalog.dimension_slice','id'), ''); - --- Represent a chunk of data, which is data in a hypertable that is --- partitioned by both the partition_column and time. +-- A chunk is a partition (hypercube) in an N-dimensional +-- hyperspace. Each chunk is associated with N constraints that define +-- the chunk's hypercube. Tuples that fall within the chunk's +-- hypercube are stored in the chunk's data table, as given by +-- 'schema_name' and 'table_name'. CREATE TABLE IF NOT EXISTS _timescaledb_catalog.chunk ( id SERIAL NOT NULL PRIMARY KEY, hypertable_id INT NOT NULL REFERENCES _timescaledb_catalog.hypertable(id) ON DELETE CASCADE, schema_name NAME NOT NULL, - table_name NAME NOT NULL, + table_name NAME NOT NULL, UNIQUE (schema_name, table_name) ); CREATE INDEX ON _timescaledb_catalog.chunk(hypertable_id); SELECT pg_catalog.pg_extension_config_dump('_timescaledb_catalog.chunk', ''); SELECT pg_catalog.pg_extension_config_dump(pg_get_serial_sequence('_timescaledb_catalog.chunk','id'), ''); +-- A chunk constraint maps a dimension slice to a chunk. Each +-- constraint associated with a chunk will also be a table constraint +-- on the chunk's data table. CREATE TABLE _timescaledb_catalog.chunk_constraint ( chunk_id INTEGER NOT NULL REFERENCES _timescaledb_catalog.chunk(id) ON DELETE CASCADE, dimension_slice_id INTEGER NOT NULL REFERENCES _timescaledb_catalog.dimension_slice(id) ON DELETE CASCADE, diff --git a/sql/tablespace.sql b/sql/tablespace.sql index 7863700fb..05ad2421b 100644 --- a/sql/tablespace.sql +++ b/sql/tablespace.sql @@ -1,3 +1,6 @@ +-- select_tablespace() is used to assign a tablespace to a chunk. A +-- tablespace is selected from a set of tablespaces associated with +-- the chunk's hypertable, if any. CREATE OR REPLACE FUNCTION _timescaledb_internal.select_tablespace( hypertable_id INTEGER, chunk_id INTEGER @@ -54,7 +57,8 @@ BEGIN WHERE dimension_slices[i] = chunk_slice_id INTO STRICT chunk_slice_index; - -- Use the chunk's dimension slice index to pick a tablespace in the tablespaces array + -- Use the chunk's dimension slice index to pick a tablespace in + -- the tablespaces array RETURN tablespaces[chunk_slice_index % array_length(tablespaces, 1) + 1]; END $BODY$; diff --git a/src/catalog.c b/src/catalog.c index 59fd7cb27..cc794d415 100644 --- a/src/catalog.c +++ b/src/catalog.c @@ -59,7 +59,7 @@ const static TableIndexDef catalog_table_index_definitions[_MAX_CATALOG_TABLES] }; /* Names for proxy tables used for cache invalidation. Must match names in - * sql/common/caches.sql */ + * sql/cache.sql */ static const char *cache_proxy_table_names[_MAX_CACHE_TYPES] = { [CACHE_TYPE_HYPERTABLE] = "cache_inval_hypertable", [CACHE_TYPE_CHUNK] = "cache_inval_chunk", diff --git a/src/chunk.c b/src/chunk.c index 5aad16da6..2e9b2cb6c 100644 --- a/src/chunk.c +++ b/src/chunk.c @@ -42,7 +42,7 @@ chunk_create_from_tuple(HeapTuple tuple, int16 num_constraints) } Chunk * -chunk_create_new(Hyperspace *hs, Point *p) +chunk_create(Hyperspace *hs, Point *p) { Chunk *chunk; diff --git a/src/chunk.h b/src/chunk.h index 0f1f488cf..276187575 100644 --- a/src/chunk.h +++ b/src/chunk.h @@ -62,7 +62,7 @@ typedef struct ChunkScanEntry } ChunkScanEntry; extern Chunk *chunk_create_from_tuple(HeapTuple tuple, int16 num_constraints); -extern Chunk *chunk_create_new(Hyperspace *hs, Point *p); +extern Chunk *chunk_create(Hyperspace *hs, Point *p); extern Chunk *chunk_create_stub(int32 id, int16 num_constraints); extern bool chunk_add_constraint(Chunk *chunk, ChunkConstraint *constraint); extern bool chunk_add_constraint_from_tuple(Chunk *chunk, HeapTuple constraint_tuple); diff --git a/src/chunk_constraint.c b/src/chunk_constraint.c index 4e4ee0dc7..7a94dcc90 100644 --- a/src/chunk_constraint.c +++ b/src/chunk_constraint.c @@ -29,19 +29,14 @@ chunk_constraint_from_tuple(HeapTuple tuple) return chunk_constraint_from_form_data((Form_chunk_constraint) GETSTRUCT(tuple)); } -typedef struct ChunkConstraintCtx -{ - Chunk *chunk; -} ChunkConstraintCtx; - static bool chunk_constraint_tuple_found(TupleInfo *ti, void *data) { - ChunkConstraintCtx *ctx = data; + Chunk *chunk = data; - chunk_constraint_fill(&ctx->chunk->constraints[ctx->chunk->num_constraints++], ti->tuple); + chunk_constraint_fill(&chunk->constraints[chunk->num_constraints++], ti->tuple); - if (ctx->chunk->capacity == ctx->chunk->num_constraints) + if (chunk->capacity == chunk->num_constraints) return false; return true; @@ -58,9 +53,6 @@ chunk_constraint_scan_by_chunk_id(Chunk *chunk) { Catalog *catalog = catalog_get(); ScanKeyData scankey[1]; - ChunkConstraintCtx data = { - .chunk = chunk, - }; int num_found; ScannerCtx scanCtx = { .table = catalog->tables[CHUNK_CONSTRAINT].id, @@ -68,7 +60,7 @@ chunk_constraint_scan_by_chunk_id(Chunk *chunk) .scantype = ScannerTypeIndex, .nkeys = 1, .scankey = scankey, - .data = &data, + .data = chunk, .tuple_found = chunk_constraint_tuple_found, .lockmode = AccessShareLock, .scandirection = ForwardScanDirection, @@ -114,7 +106,7 @@ chunk_constraint_dimension_id_tuple_found(TupleInfo *ti, void *data) /* * If the chunk has N constraints, it is the chunk we are looking for and - * can abort the scan + * the scan can be aborted. */ if (chunk->num_constraints == ctx->num_dimensions) return false; diff --git a/src/dimension.c b/src/dimension.c index b03eb7b71..3f8047a0a 100644 --- a/src/dimension.c +++ b/src/dimension.c @@ -38,7 +38,6 @@ static Hyperspace * hyperspace_create(int32 hypertable_id, Oid main_table_relid, uint16 num_dimensions) { Hyperspace *hs = palloc0(HYPERSPACE_SIZE(num_dimensions)); - hs->hypertable_id = hypertable_id; hs->main_table_relid = main_table_relid; hs->capacity = num_dimensions; @@ -94,29 +93,11 @@ static Point * point_create(int16 num_dimensions) { Point *p = palloc0(POINT_SIZE(num_dimensions)); - p->cardinality = num_dimensions; p->num_closed = p->num_open = 0; return p; } -const char * -point_to_string(Point *p) -{ - char *buf = palloc(100); - int i, - j = 1; - - buf[0] = '('; - - for (i = 0; i < p->cardinality; i++) - j += snprintf(buf + j, 100, "" INT64_FORMAT ",", p->coordinates[i]); - - buf[j - 1] = ')'; - - return buf; -} - Point * hyperspace_calculate_point(Hyperspace *hs, HeapTuple tuple, TupleDesc tupdesc) { diff --git a/src/dimension.h b/src/dimension.h index d5ac2e275..0e63bb2a8 100644 --- a/src/dimension.h +++ b/src/dimension.h @@ -70,18 +70,7 @@ typedef struct Point #define POINT_SIZE(cardinality) \ (sizeof(Point) + (sizeof(int64) * (cardinality))) -#define point_coordinate_is_in_slice(slice, coord) \ - (coord >= (slice)->range_start && coord < (slice)->range_end) - -#define point_get_open_dimension_coordinate(p, i) \ - (p)->coordinates[i] - -#define point_get_closed_dimension_coordinate(p, i) \ - (p)->coordinates[(p)->num_open + i] - extern Hyperspace *dimension_scan(int32 hypertable_id, Oid main_table_relid, int16 num_dimension); extern Point *hyperspace_calculate_point(Hyperspace *h, HeapTuple tuple, TupleDesc tupdesc); -extern const char *point_to_string(Point *p); - #endif /* TIMESCALEDB_DIMENSION_H */ diff --git a/src/dimension_slice.c b/src/dimension_slice.c index 6ec604300..8bea55359 100644 --- a/src/dimension_slice.c +++ b/src/dimension_slice.c @@ -33,7 +33,6 @@ static inline Hypercube * hypercube_alloc(int16 num_dimensions) { Hypercube *hc = palloc0(HYPERCUBE_SIZE(num_dimensions)); - hc->capacity = num_dimensions; return hc; } @@ -94,8 +93,8 @@ dimension_slice_scan(int32 dimension_id, int64 coordinate) }; /* - * Perform an index scan for slice matching the dimension's ID and which - * encloses the coordinate + * Perform an index scan for slices matching the dimension's ID and which + * encloses the coordinate. */ ScanKeyInit(&scankey[0], Anum_dimension_slice_dimension_id_range_start_range_end_idx_dimension_id, BTEqualStrategyNumber, F_INT4EQ, Int32GetDatum(dimension_id)); @@ -288,8 +287,10 @@ dimension_vec_add_slice_sort(DimensionVec **vecptr, DimensionSlice *slice) DimensionSlice * dimension_vec_find_slice(DimensionVec *vec, int64 coordinate) { - DimensionSlice **res = bsearch(&coordinate, vec->slices, vec->num_slices, - sizeof(DimensionSlice *), cmp_coordinate_and_slice); + DimensionSlice **res; + + res = bsearch(&coordinate, vec->slices, vec->num_slices, + sizeof(DimensionSlice *), cmp_coordinate_and_slice); if (res == NULL) return NULL; diff --git a/src/dimension_slice.h b/src/dimension_slice.h index 088eaf96a..d74fc2d9a 100644 --- a/src/dimension_slice.h +++ b/src/dimension_slice.h @@ -14,7 +14,6 @@ typedef struct DimensionSlice DimensionType type; void (*storage_free) (void *); void *storage; - //used in the cache } DimensionSlice; /* diff --git a/src/hypertable.c b/src/hypertable.c index 7f33c0497..fc30dbc37 100644 --- a/src/hypertable.c +++ b/src/hypertable.c @@ -37,14 +37,15 @@ hypertable_get_chunk(Hypertable *h, Point *point) /* * chunk_find() must execute on the transaction memory context since - * it allocates a lot of transient data. + * it allocates a lot of transient data. We don't want this allocated + * on the cache's memory context. */ chunk = chunk_find(h->space, point); old = MemoryContextSwitchTo(subspace_store_mcxt(h->chunk_cache)); if (NULL == chunk) - chunk = chunk_create_new(h->space, point); + chunk = chunk_create(h->space, point); else /* Make a copy which lives in the chunk cache's memory context */ chunk = chunk_copy(chunk); diff --git a/src/hypertable_cache.c b/src/hypertable_cache.c index 614fcdcdc..293e8386e 100644 --- a/src/hypertable_cache.c +++ b/src/hypertable_cache.c @@ -72,13 +72,10 @@ hypertable_cache_create() static Cache *hypertable_cache_current = NULL; -/* Column numbers for 'hypertable' table in sql/common/tables.sql */ - static bool hypertable_tuple_found(TupleInfo *ti, void *data) { HypertableNameCacheEntry *entry = data; - entry->hypertable = hypertable_from_tuple(ti->tuple); return false; } diff --git a/src/insert.c b/src/insert.c index 3701c70dc..81262d9a1 100644 --- a/src/insert.c +++ b/src/insert.c @@ -39,9 +39,8 @@ PG_FUNCTION_INFO_V1(insert_main_table_trigger_after); /* * This row-level trigger is called for every row INSERTed into a hypertable. We - * use it to redirect inserted tuples to the correct hypertable chunk in space - * and time. - * + * use it to redirect inserted tuples to the correct hypertable chunk in an + * N-dimensional hyperspace. */ Datum insert_main_table_trigger(PG_FUNCTION_ARGS) @@ -80,6 +79,7 @@ insert_main_table_trigger(PG_FUNCTION_ARGS) /* Find or create the insert state matching the point */ cstate = insert_statement_state_get_insert_chunk_state(insert_statement_state, ht->space, point); + /* Insert the tuple into the chunk */ insert_chunk_state_insert_tuple(cstate, tuple); } PG_CATCH(); diff --git a/src/insert_statement_state.c b/src/insert_statement_state.c index 06339e58a..835809326 100644 --- a/src/insert_statement_state.c +++ b/src/insert_statement_state.c @@ -54,13 +54,12 @@ static void destroy_insert_chunk_state(void *ics_ptr) { InsertChunkState *ics = ics_ptr; - insert_chunk_state_destroy(ics); } /* - * Get an insert context to the chunk corresponding to the partition and - * timepoint of a tuple. + * Get the insert state for the chunk that matches the given point in the + * partitioned hyperspace. */ extern InsertChunkState * insert_statement_state_get_insert_chunk_state(InsertStatementState *state, Hyperspace *hs, Point *point) diff --git a/src/metadata_queries.c b/src/metadata_queries.c index db8e3dbf3..81243f379 100644 --- a/src/metadata_queries.c +++ b/src/metadata_queries.c @@ -43,22 +43,9 @@ prepare_plan(const char *src, int nargs, Oid *argtypes) return plan; \ } -/* - * Retrieving chunks: - * - * Locked chunk retrieval has to occur on every row. So we have a fast and slowpath. - * The fastpath retrieves and locks the chunk only if it already exists locally. The - * fastpath is faster since it does not call a plpgsql function but calls sql directly. - * This was found to make a performance difference in tests. - * - * The slowpath calls get_or_create_chunk(), and is called only if the fastpath returned no rows. - * - */ #define INT8ARRAYOID 1016 - #define CHUNK_CREATE_ARGS (Oid[]) {INT4ARRAYOID, INT8ARRAYOID} -#define CHUNK_CREATE "SELECT * \ - FROM _timescaledb_internal.chunk_create($1, $2)" +#define CHUNK_CREATE "SELECT * FROM _timescaledb_internal.chunk_create($1, $2)" /* plan for creating a chunk via create_chunk(). */ DEFINE_PLAN(create_chunk_plan, CHUNK_CREATE, 2, CHUNK_CREATE_ARGS) diff --git a/src/partitioning.c b/src/partitioning.c index 598ac5870..66851918b 100644 --- a/src/partitioning.c +++ b/src/partitioning.c @@ -4,11 +4,10 @@ #include #include #include +#include #include #include "partitioning.h" -#include "metadata_queries.h" -#include "scanner.h" #include "catalog.h" #include "utils.h" @@ -20,9 +19,7 @@ partitioning_func_set_func_fmgr(PartitioningFunc *pf) 1, NULL, false, false, false); if (funclist == NULL || funclist->next) - { elog(ERROR, "Could not resolve the partitioning function"); - } fmgr_info_cxt(funclist->oid, &pf->func_fmgr, CurrentMemoryContext); } @@ -64,9 +61,7 @@ partitioning_info_create(int num_partitions, strncpy(pi->column, partcol, NAMEDATALEN); if (schema != NULL) - { strncpy(pi->partfunc.schema, schema, NAMEDATALEN); - } partitioning_func_set_func_fmgr(&pi->partfunc); partitioning_info_set_textfunc_fmgr(pi, relid); @@ -94,17 +89,14 @@ partitioning_func_apply_tuple(PartitioningInfo *pinfo, HeapTuple tuple, TupleDes value = heap_getattr(tuple, pinfo->column_attnum, desc, &isnull); if (isnull) - { return 0; - } return partitioning_func_apply(pinfo, value); } /* _timescaledb_catalog.get_partition_for_key(key TEXT) RETURNS INT */ -Datum get_partition_for_key(PG_FUNCTION_ARGS); - PG_FUNCTION_INFO_V1(get_partition_for_key); + Datum get_partition_for_key(PG_FUNCTION_ARGS) { diff --git a/src/subspace_store.c b/src/subspace_store.c index f3c4bfa24..a5999c6ca 100644 --- a/src/subspace_store.c +++ b/src/subspace_store.c @@ -72,7 +72,6 @@ subspace_store_add(SubspaceStore *cache, const Hypercube *hc, if (match == NULL) { DimensionSlice *copy = dimension_slice_copy(target); - dimension_vec_add_slice_sort(vecptr, copy); match = copy; } diff --git a/src/subspace_store.h b/src/subspace_store.h index 95f4e57db..93d72eb65 100644 --- a/src/subspace_store.h +++ b/src/subspace_store.h @@ -1,21 +1,17 @@ #ifndef TIMESCALEDB_SUBSPACE_STORE_H #define TIMESCALEDB_SUBSPACE_STORE_H + #include -#include "dimension.h" -#include "dimension_slice.h" - +typedef struct Point Point; +typedef struct Hypercube Hypercube; typedef struct SubspaceStore SubspaceStore; extern SubspaceStore *subspace_store_init(int16 num_dimensions, MemoryContext mcxt); - extern void subspace_store_add(SubspaceStore *cache, const Hypercube *hc, void *end_store, void (*end_store_free) (void *)); - extern void *subspace_store_get(SubspaceStore *cache, Point *target); - extern void subspace_store_free(SubspaceStore *cache); - extern MemoryContext subspace_store_mcxt(SubspaceStore *cache); #endif /* TIMESCALEDB_SUBSPACE_STORE_H */ diff --git a/test/expected/pg_dump.out b/test/expected/pg_dump.out index bcb2446ab..c1b7241d0 100644 --- a/test/expected/pg_dump.out +++ b/test/expected/pg_dump.out @@ -42,7 +42,7 @@ SELECT count(*) AND refobjid = (SELECT oid FROM pg_extension WHERE extname = 'timescaledb'); count ------- - 108 + 107 (1 row) \c postgres @@ -66,7 +66,7 @@ SELECT count(*) AND refobjid = (SELECT oid FROM pg_extension WHERE extname = 'timescaledb'); count ------- - 108 + 107 (1 row) \c single