mirror of
https://github.com/timescale/timescaledb.git
synced 2025-05-17 19:13:16 +08:00
Refactor chunk creation for future extension
Separates chunk preparation and metadata update. Separates preparation of constraints names, since there is no overlap between preparing names for dimension constraints and other constraints. Factors out creation of json string describing dimension slices of a chunk. This refactoring is preparation for implementing new functionalities.
This commit is contained in:
parent
cc38ba3241
commit
639aef76a4
@ -514,7 +514,7 @@ catalog_get_table(Catalog *catalog, Oid relid)
|
||||
* Get the next serial ID for a catalog table, if one exists for the given table.
|
||||
*/
|
||||
TSDLLEXPORT int64
|
||||
ts_catalog_table_next_seq_id(Catalog *catalog, CatalogTable table)
|
||||
ts_catalog_table_next_seq_id(const Catalog *catalog, CatalogTable table)
|
||||
{
|
||||
Oid relid = catalog->tables[table].serial_relid;
|
||||
|
||||
|
@ -1244,7 +1244,7 @@ catalog_get_index(Catalog *catalog, CatalogTable tableid, int indexid)
|
||||
return (indexid == INVALID_INDEXID) ? InvalidOid : catalog->tables[tableid].index_ids[indexid];
|
||||
}
|
||||
|
||||
extern TSDLLEXPORT int64 ts_catalog_table_next_seq_id(Catalog *catalog, CatalogTable table);
|
||||
extern TSDLLEXPORT int64 ts_catalog_table_next_seq_id(const Catalog *catalog, CatalogTable table);
|
||||
extern Oid ts_catalog_get_cache_proxy_id(Catalog *catalog, CacheType type);
|
||||
|
||||
/* Functions that modify the actual catalog table on disk */
|
||||
|
75
src/chunk.c
75
src/chunk.c
@ -209,7 +209,7 @@ ts_chunk_primary_dimension_end(const Chunk *chunk)
|
||||
}
|
||||
|
||||
static void
|
||||
chunk_insert_relation(Relation rel, Chunk *chunk)
|
||||
chunk_insert_relation(Relation rel, const Chunk *chunk)
|
||||
{
|
||||
HeapTuple new_tuple;
|
||||
CatalogSecurityContext sec_ctx;
|
||||
@ -224,7 +224,7 @@ chunk_insert_relation(Relation rel, Chunk *chunk)
|
||||
}
|
||||
|
||||
void
|
||||
ts_chunk_insert_lock(Chunk *chunk, LOCKMODE lock)
|
||||
ts_chunk_insert_lock(const Chunk *chunk, LOCKMODE lock)
|
||||
{
|
||||
Catalog *catalog = ts_catalog_get();
|
||||
Relation rel;
|
||||
@ -862,7 +862,7 @@ ts_chunk_create_table(Chunk *chunk, Hypertable *ht, const char *tablespacename)
|
||||
}
|
||||
|
||||
static List *
|
||||
chunk_assign_data_nodes(Chunk *chunk, Hypertable *ht)
|
||||
chunk_assign_data_nodes(Chunk *chunk, const Hypertable *ht)
|
||||
{
|
||||
List *htnodes;
|
||||
List *chunk_data_nodes = NIL;
|
||||
@ -921,7 +921,10 @@ ts_chunk_get_data_node_name_list(const Chunk *chunk)
|
||||
}
|
||||
|
||||
/*
|
||||
* Create a chunk from the dimensional constraints in the given hypercube.
|
||||
* Create a chunk object from the dimensional constraints in the given hypercube.
|
||||
*
|
||||
* The chunk object is then used to create the actual chunk table and update the
|
||||
* metadata separately.
|
||||
*
|
||||
* The table name for the chunk can be given explicitly, or generated if
|
||||
* table_name is NULL. If the table name is generated, it will use the given
|
||||
@ -930,11 +933,11 @@ ts_chunk_get_data_node_name_list(const Chunk *chunk)
|
||||
* the chunk.
|
||||
*/
|
||||
static Chunk *
|
||||
chunk_create_metadata_after_lock(Hypertable *ht, Hypercube *cube, const char *schema_name,
|
||||
const char *table_name, const char *prefix)
|
||||
chunk_create_object(const Hypertable *ht, Hypercube *cube, const char *schema_name,
|
||||
const char *table_name, const char *prefix)
|
||||
{
|
||||
Hyperspace *hs = ht->space;
|
||||
Catalog *catalog = ts_catalog_get();
|
||||
const Hyperspace *hs = ht->space;
|
||||
const Catalog *catalog = ts_catalog_get();
|
||||
CatalogSecurityContext sec_ctx;
|
||||
Chunk *chunk;
|
||||
const char relkind = hypertable_chunk_relkind(ht);
|
||||
@ -970,34 +973,25 @@ chunk_create_metadata_after_lock(Hypertable *ht, Hypercube *cube, const char *sc
|
||||
else
|
||||
namestrcpy(&chunk->fd.table_name, table_name);
|
||||
|
||||
/* Insert chunk */
|
||||
ts_chunk_insert_lock(chunk, RowExclusiveLock);
|
||||
|
||||
/* Insert any new dimension slices */
|
||||
ts_dimension_slice_insert_multi(cube->slices, cube->num_slices);
|
||||
|
||||
/* Add metadata for dimensional and inheritable constraints */
|
||||
chunk_add_constraints(chunk);
|
||||
ts_chunk_constraints_insert_metadata(chunk->constraints);
|
||||
|
||||
/* If this is a remote chunk we assign data nodes */
|
||||
if (chunk->relkind == RELKIND_FOREIGN_TABLE)
|
||||
chunk->data_nodes = chunk_assign_data_nodes(chunk, ht);
|
||||
|
||||
return chunk;
|
||||
}
|
||||
|
||||
static Oid
|
||||
chunk_create_table_after_lock(Chunk *chunk, Hypertable *ht)
|
||||
static void
|
||||
chunk_insert_into_metadata_after_lock(const Chunk *chunk)
|
||||
{
|
||||
/* Create the actual table relation for the chunk */
|
||||
const char *tablespace = ts_hypertable_select_tablespace_name(ht, chunk);
|
||||
/* Insert chunk */
|
||||
ts_chunk_insert_lock(chunk, RowExclusiveLock);
|
||||
|
||||
chunk->table_id = ts_chunk_create_table(chunk, ht, tablespace);
|
||||
|
||||
if (!OidIsValid(chunk->table_id))
|
||||
elog(ERROR, "could not create chunk table");
|
||||
/* Add metadata for dimensional and inheritable constraints */
|
||||
ts_chunk_constraints_insert_metadata(chunk->constraints);
|
||||
}
|
||||
|
||||
static void
|
||||
chunk_create_table_constraints(Chunk *chunk)
|
||||
{
|
||||
/* Create the chunk's constraints, triggers, and indexes */
|
||||
ts_chunk_constraints_create(chunk->constraints,
|
||||
chunk->table_id,
|
||||
@ -1013,6 +1007,17 @@ chunk_create_table_after_lock(Chunk *chunk, Hypertable *ht)
|
||||
chunk->fd.id,
|
||||
chunk->table_id);
|
||||
}
|
||||
}
|
||||
|
||||
static Oid
|
||||
chunk_create_table(Chunk *chunk, Hypertable *ht)
|
||||
{
|
||||
/* Create the actual table relation for the chunk */
|
||||
const char *tablespace = ts_hypertable_select_tablespace_name(ht, chunk);
|
||||
|
||||
chunk->table_id = ts_chunk_create_table(chunk, ht, tablespace);
|
||||
|
||||
Assert(OidIsValid(chunk->table_id));
|
||||
|
||||
return chunk->table_id;
|
||||
}
|
||||
@ -1034,9 +1039,18 @@ chunk_create_from_hypercube_after_lock(Hypertable *ht, Hypercube *cube, const ch
|
||||
{
|
||||
Chunk *chunk;
|
||||
|
||||
chunk = chunk_create_metadata_after_lock(ht, cube, schema_name, table_name, prefix);
|
||||
/* Insert any new dimension slices into metadata */
|
||||
ts_dimension_slice_insert_multi(cube->slices, cube->num_slices);
|
||||
|
||||
chunk = chunk_create_object(ht, cube, schema_name, table_name, prefix);
|
||||
Assert(chunk != NULL);
|
||||
chunk_create_table_after_lock(chunk, ht);
|
||||
|
||||
chunk_create_table(chunk, ht);
|
||||
|
||||
chunk_add_constraints(chunk);
|
||||
chunk_insert_into_metadata_after_lock(chunk);
|
||||
|
||||
chunk_create_table_constraints(chunk);
|
||||
|
||||
return chunk;
|
||||
}
|
||||
@ -1614,7 +1628,8 @@ chunk_resurrect(Hypertable *ht, ChunkStub *stub)
|
||||
/* Create data table and related objects */
|
||||
chunk->hypertable_relid = ht->main_table_relid;
|
||||
chunk->relkind = hypertable_chunk_relkind(ht);
|
||||
chunk->table_id = chunk_create_table_after_lock(chunk, ht);
|
||||
chunk->table_id = chunk_create_table(chunk, ht);
|
||||
chunk_create_table_constraints(chunk);
|
||||
|
||||
if (chunk->relkind == RELKIND_FOREIGN_TABLE)
|
||||
chunk->data_nodes = ts_chunk_data_node_scan_by_chunk_id(chunk->fd.id, ti->mctx);
|
||||
|
@ -119,7 +119,7 @@ extern TSDLLEXPORT Chunk *ts_chunk_get_by_name_with_memory_context(const char *s
|
||||
const char *table_name,
|
||||
MemoryContext mctx,
|
||||
bool fail_if_not_found);
|
||||
extern TSDLLEXPORT void ts_chunk_insert_lock(Chunk *chunk, LOCKMODE lock);
|
||||
extern TSDLLEXPORT void ts_chunk_insert_lock(const Chunk *chunk, LOCKMODE lock);
|
||||
|
||||
extern TSDLLEXPORT Oid ts_chunk_create_table(Chunk *chunk, Hypertable *ht,
|
||||
const char *tablespacename);
|
||||
|
@ -80,31 +80,29 @@ chunk_constraints_expand(ChunkConstraints *ccs, int16 new_capacity)
|
||||
}
|
||||
|
||||
static void
|
||||
chunk_constraint_choose_name(Name dst, bool is_dimension, int32 dimension_slice_id,
|
||||
const char *hypertable_constraint_name, int32 chunk_id)
|
||||
chunk_constraint_dimension_choose_name(Name dst, int32 dimension_slice_id)
|
||||
{
|
||||
if (is_dimension)
|
||||
{
|
||||
snprintf(NameStr(*dst), NAMEDATALEN, "constraint_%d", dimension_slice_id);
|
||||
}
|
||||
else
|
||||
{
|
||||
char constrname[100];
|
||||
CatalogSecurityContext sec_ctx;
|
||||
snprintf(NameStr(*dst), NAMEDATALEN, "constraint_%d", dimension_slice_id);
|
||||
}
|
||||
|
||||
Assert(hypertable_constraint_name != NULL);
|
||||
static void
|
||||
chunk_constraint_choose_name(Name dst, const char *hypertable_constraint_name, int32 chunk_id)
|
||||
{
|
||||
char constrname[NAMEDATALEN];
|
||||
CatalogSecurityContext sec_ctx;
|
||||
|
||||
ts_catalog_database_info_become_owner(ts_catalog_database_info_get(), &sec_ctx);
|
||||
snprintf(constrname,
|
||||
100,
|
||||
"%d_" INT64_FORMAT "_%s",
|
||||
chunk_id,
|
||||
ts_catalog_table_next_seq_id(ts_catalog_get(), CHUNK_CONSTRAINT),
|
||||
hypertable_constraint_name);
|
||||
ts_catalog_restore_user(&sec_ctx);
|
||||
Assert(hypertable_constraint_name != NULL);
|
||||
|
||||
namestrcpy(dst, constrname);
|
||||
}
|
||||
ts_catalog_database_info_become_owner(ts_catalog_database_info_get(), &sec_ctx);
|
||||
snprintf(constrname,
|
||||
NAMEDATALEN,
|
||||
"%d_" INT64_FORMAT "_%s",
|
||||
chunk_id,
|
||||
ts_catalog_table_next_seq_id(ts_catalog_get(), CHUNK_CONSTRAINT),
|
||||
hypertable_constraint_name);
|
||||
ts_catalog_restore_user(&sec_ctx);
|
||||
|
||||
namestrcpy(dst, constrname);
|
||||
}
|
||||
|
||||
static ChunkConstraint *
|
||||
@ -120,14 +118,16 @@ chunk_constraints_add(ChunkConstraints *ccs, int32 chunk_id, int32 dimension_sli
|
||||
|
||||
if (NULL == constraint_name)
|
||||
{
|
||||
chunk_constraint_choose_name(&cc->fd.constraint_name,
|
||||
is_dimension_constraint(cc),
|
||||
cc->fd.dimension_slice_id,
|
||||
hypertable_constraint_name,
|
||||
cc->fd.chunk_id);
|
||||
|
||||
if (is_dimension_constraint(cc))
|
||||
{
|
||||
chunk_constraint_dimension_choose_name(&cc->fd.constraint_name,
|
||||
cc->fd.dimension_slice_id);
|
||||
namestrcpy(&cc->fd.hypertable_constraint_name, "");
|
||||
}
|
||||
else
|
||||
chunk_constraint_choose_name(&cc->fd.constraint_name,
|
||||
hypertable_constraint_name,
|
||||
cc->fd.chunk_id);
|
||||
}
|
||||
else
|
||||
namestrcpy(&cc->fd.constraint_name, constraint_name);
|
||||
@ -837,7 +837,7 @@ chunk_constraint_rename_hypertable_from_tuple(TupleInfo *ti, const char *newname
|
||||
|
||||
chunk_id = DatumGetInt32(values[AttrNumberGetAttrOffset(Anum_chunk_constraint_chunk_id)]);
|
||||
namestrcpy(&new_hypertable_constraint_name, newname);
|
||||
chunk_constraint_choose_name(&new_chunk_constraint_name, false, 0, newname, chunk_id);
|
||||
chunk_constraint_choose_name(&new_chunk_constraint_name, newname, chunk_id);
|
||||
|
||||
values[AttrNumberGetAttrOffset(Anum_chunk_constraint_hypertable_constraint_name)] =
|
||||
NameGetDatum(&new_hypertable_constraint_name);
|
||||
|
@ -46,7 +46,7 @@ ts_hypercube_free(Hypercube *hc)
|
||||
|
||||
#if defined(USE_ASSERT_CHECKING)
|
||||
static inline bool
|
||||
hypercube_is_sorted(Hypercube *hc)
|
||||
hypercube_is_sorted(const Hypercube *hc)
|
||||
{
|
||||
int i;
|
||||
|
||||
@ -131,7 +131,7 @@ ts_hypercube_slice_sort(Hypercube *hc)
|
||||
}
|
||||
|
||||
DimensionSlice *
|
||||
ts_hypercube_get_slice_by_dimension_id(Hypercube *hc, int32 dimension_id)
|
||||
ts_hypercube_get_slice_by_dimension_id(const Hypercube *hc, int32 dimension_id)
|
||||
{
|
||||
DimensionSlice slice = {
|
||||
.fd.dimension_id = dimension_id,
|
||||
|
@ -33,7 +33,7 @@ extern Hypercube *ts_hypercube_from_constraints(ChunkConstraints *constraints, M
|
||||
extern int ts_hypercube_find_existing_slices(Hypercube *cube, ScanTupLock *tuplock);
|
||||
extern Hypercube *ts_hypercube_calculate_from_point(Hyperspace *hs, Point *p, ScanTupLock *tuplock);
|
||||
extern bool ts_hypercubes_collide(Hypercube *cube1, Hypercube *cube2);
|
||||
extern TSDLLEXPORT DimensionSlice *ts_hypercube_get_slice_by_dimension_id(Hypercube *hc,
|
||||
extern TSDLLEXPORT DimensionSlice *ts_hypercube_get_slice_by_dimension_id(const Hypercube *hc,
|
||||
int32 dimension_id);
|
||||
extern Hypercube *ts_hypercube_copy(Hypercube *hc);
|
||||
extern bool ts_hypercube_equal(Hypercube *hc1, Hypercube *hc2);
|
||||
|
@ -1194,7 +1194,7 @@ ts_hypertable_has_tablespace(Hypertable *ht, Oid tspc_oid)
|
||||
}
|
||||
|
||||
static int
|
||||
hypertable_get_chunk_slice_ordinal(Hypertable *ht, Hypercube *hc)
|
||||
hypertable_get_chunk_slice_ordinal(const Hypertable *ht, const Hypercube *hc)
|
||||
{
|
||||
Dimension *dim;
|
||||
DimensionSlice *slice;
|
||||
@ -2568,7 +2568,7 @@ assert_chunk_data_nodes_is_a_set(List *chunk_data_nodes)
|
||||
* happens similar to tablespaces, i.e., based on dimension type.
|
||||
*/
|
||||
List *
|
||||
ts_hypertable_assign_chunk_data_nodes(Hypertable *ht, Hypercube *cube)
|
||||
ts_hypertable_assign_chunk_data_nodes(const Hypertable *ht, const Hypercube *cube)
|
||||
{
|
||||
List *chunk_data_nodes = NIL;
|
||||
List *available_nodes = ts_hypertable_get_available_data_nodes(ht, true);
|
||||
@ -2624,7 +2624,8 @@ get_hypertable_data_node(HypertableDataNode *node)
|
||||
}
|
||||
|
||||
static List *
|
||||
get_hypertable_data_node_values(Hypertable *ht, hypertable_data_node_filter filter, get_value value)
|
||||
get_hypertable_data_node_values(const Hypertable *ht, hypertable_data_node_filter filter,
|
||||
get_value value)
|
||||
{
|
||||
List *list = NULL;
|
||||
ListCell *cell;
|
||||
@ -2646,7 +2647,7 @@ ts_hypertable_get_data_node_name_list(Hypertable *ht)
|
||||
}
|
||||
|
||||
List *
|
||||
ts_hypertable_get_available_data_nodes(Hypertable *ht, bool error_if_missing)
|
||||
ts_hypertable_get_available_data_nodes(const Hypertable *ht, bool error_if_missing)
|
||||
{
|
||||
List *available_nodes = get_hypertable_data_node_values(ht,
|
||||
filter_non_blocked_data_nodes,
|
||||
|
@ -158,10 +158,10 @@ extern TSDLLEXPORT bool ts_hypertable_set_compressed(Hypertable *ht,
|
||||
extern TSDLLEXPORT bool ts_hypertable_unset_compressed(Hypertable *ht);
|
||||
extern TSDLLEXPORT void ts_hypertable_clone_constraints_to_compressed(Hypertable *ht,
|
||||
List *constraint_list);
|
||||
extern List *ts_hypertable_assign_chunk_data_nodes(Hypertable *ht, Hypercube *cube);
|
||||
extern List *ts_hypertable_assign_chunk_data_nodes(const Hypertable *ht, const Hypercube *cube);
|
||||
extern TSDLLEXPORT List *ts_hypertable_get_data_node_name_list(Hypertable *ht);
|
||||
extern TSDLLEXPORT List *ts_hypertable_get_data_node_serverids_list(Hypertable *ht);
|
||||
extern TSDLLEXPORT List *ts_hypertable_get_available_data_nodes(Hypertable *ht,
|
||||
extern TSDLLEXPORT List *ts_hypertable_get_available_data_nodes(const Hypertable *ht,
|
||||
bool error_if_missing);
|
||||
extern TSDLLEXPORT List *ts_hypertable_get_available_data_node_server_oids(Hypertable *ht);
|
||||
extern TSDLLEXPORT HypertableType ts_hypertable_get_type(Hypertable *ht);
|
||||
|
@ -407,6 +407,16 @@ get_result_datums(Datum *values, bool *nulls, unsigned int numvals, AttInMetadat
|
||||
}
|
||||
}
|
||||
|
||||
static const char *
|
||||
chunk_api_dimension_slices_json(const Chunk *chunk, const Hypertable *ht)
|
||||
{
|
||||
JsonbParseState *ps = NULL;
|
||||
JsonbValue *jv = hypercube_to_jsonb_value(chunk->cube, ht->space, &ps);
|
||||
Jsonb *hcjson = JsonbValueToJsonb(jv);
|
||||
|
||||
return JsonbToCString(NULL, &hcjson->root, ESTIMATE_JSON_STR_SIZE(ht->space->num_dimensions));
|
||||
}
|
||||
|
||||
/*
|
||||
* Create a replica of a chunk on all its assigned data nodes.
|
||||
*/
|
||||
@ -414,12 +424,9 @@ void
|
||||
chunk_api_create_on_data_nodes(Chunk *chunk, Hypertable *ht)
|
||||
{
|
||||
AsyncRequestSet *reqset = async_request_set_create();
|
||||
JsonbParseState *ps = NULL;
|
||||
JsonbValue *jv = hypercube_to_jsonb_value(chunk->cube, ht->space, &ps);
|
||||
Jsonb *hcjson = JsonbValueToJsonb(jv);
|
||||
const char *params[4] = {
|
||||
quote_qualified_identifier(NameStr(ht->fd.schema_name), NameStr(ht->fd.table_name)),
|
||||
JsonbToCString(NULL, &hcjson->root, ESTIMATE_JSON_STR_SIZE(ht->space->num_dimensions)),
|
||||
chunk_api_dimension_slices_json(chunk, ht),
|
||||
NameStr(chunk->fd.schema_name),
|
||||
NameStr(chunk->fd.table_name),
|
||||
};
|
||||
|
Loading…
x
Reference in New Issue
Block a user