Refactor chunk creation for future extension

Separates chunk preparation and metadata update. Separates preparation
of constraints names, since there is no overlap between preparing
names for dimension constraints and other constraints. Factors out
creation of json string describing dimension slices of a chunk.

This refactoring is preparation for implementing new functionalities.
This commit is contained in:
Ruslan Fomkin 2021-03-24 09:48:50 +01:00 committed by Ruslan Fomkin
parent cc38ba3241
commit 639aef76a4
10 changed files with 97 additions and 74 deletions

View File

@ -514,7 +514,7 @@ catalog_get_table(Catalog *catalog, Oid relid)
* Get the next serial ID for a catalog table, if one exists for the given table. * Get the next serial ID for a catalog table, if one exists for the given table.
*/ */
TSDLLEXPORT int64 TSDLLEXPORT int64
ts_catalog_table_next_seq_id(Catalog *catalog, CatalogTable table) ts_catalog_table_next_seq_id(const Catalog *catalog, CatalogTable table)
{ {
Oid relid = catalog->tables[table].serial_relid; Oid relid = catalog->tables[table].serial_relid;

View File

@ -1244,7 +1244,7 @@ catalog_get_index(Catalog *catalog, CatalogTable tableid, int indexid)
return (indexid == INVALID_INDEXID) ? InvalidOid : catalog->tables[tableid].index_ids[indexid]; return (indexid == INVALID_INDEXID) ? InvalidOid : catalog->tables[tableid].index_ids[indexid];
} }
extern TSDLLEXPORT int64 ts_catalog_table_next_seq_id(Catalog *catalog, CatalogTable table); extern TSDLLEXPORT int64 ts_catalog_table_next_seq_id(const Catalog *catalog, CatalogTable table);
extern Oid ts_catalog_get_cache_proxy_id(Catalog *catalog, CacheType type); extern Oid ts_catalog_get_cache_proxy_id(Catalog *catalog, CacheType type);
/* Functions that modify the actual catalog table on disk */ /* Functions that modify the actual catalog table on disk */

View File

@ -209,7 +209,7 @@ ts_chunk_primary_dimension_end(const Chunk *chunk)
} }
static void static void
chunk_insert_relation(Relation rel, Chunk *chunk) chunk_insert_relation(Relation rel, const Chunk *chunk)
{ {
HeapTuple new_tuple; HeapTuple new_tuple;
CatalogSecurityContext sec_ctx; CatalogSecurityContext sec_ctx;
@ -224,7 +224,7 @@ chunk_insert_relation(Relation rel, Chunk *chunk)
} }
void void
ts_chunk_insert_lock(Chunk *chunk, LOCKMODE lock) ts_chunk_insert_lock(const Chunk *chunk, LOCKMODE lock)
{ {
Catalog *catalog = ts_catalog_get(); Catalog *catalog = ts_catalog_get();
Relation rel; Relation rel;
@ -862,7 +862,7 @@ ts_chunk_create_table(Chunk *chunk, Hypertable *ht, const char *tablespacename)
} }
static List * static List *
chunk_assign_data_nodes(Chunk *chunk, Hypertable *ht) chunk_assign_data_nodes(Chunk *chunk, const Hypertable *ht)
{ {
List *htnodes; List *htnodes;
List *chunk_data_nodes = NIL; List *chunk_data_nodes = NIL;
@ -921,7 +921,10 @@ ts_chunk_get_data_node_name_list(const Chunk *chunk)
} }
/* /*
* Create a chunk from the dimensional constraints in the given hypercube. * Create a chunk object from the dimensional constraints in the given hypercube.
*
* The chunk object is then used to create the actual chunk table and update the
* metadata separately.
* *
* The table name for the chunk can be given explicitly, or generated if * The table name for the chunk can be given explicitly, or generated if
* table_name is NULL. If the table name is generated, it will use the given * table_name is NULL. If the table name is generated, it will use the given
@ -930,11 +933,11 @@ ts_chunk_get_data_node_name_list(const Chunk *chunk)
* the chunk. * the chunk.
*/ */
static Chunk * static Chunk *
chunk_create_metadata_after_lock(Hypertable *ht, Hypercube *cube, const char *schema_name, chunk_create_object(const Hypertable *ht, Hypercube *cube, const char *schema_name,
const char *table_name, const char *prefix) const char *table_name, const char *prefix)
{ {
Hyperspace *hs = ht->space; const Hyperspace *hs = ht->space;
Catalog *catalog = ts_catalog_get(); const Catalog *catalog = ts_catalog_get();
CatalogSecurityContext sec_ctx; CatalogSecurityContext sec_ctx;
Chunk *chunk; Chunk *chunk;
const char relkind = hypertable_chunk_relkind(ht); const char relkind = hypertable_chunk_relkind(ht);
@ -970,34 +973,25 @@ chunk_create_metadata_after_lock(Hypertable *ht, Hypercube *cube, const char *sc
else else
namestrcpy(&chunk->fd.table_name, table_name); namestrcpy(&chunk->fd.table_name, table_name);
/* Insert chunk */
ts_chunk_insert_lock(chunk, RowExclusiveLock);
/* Insert any new dimension slices */
ts_dimension_slice_insert_multi(cube->slices, cube->num_slices);
/* Add metadata for dimensional and inheritable constraints */
chunk_add_constraints(chunk);
ts_chunk_constraints_insert_metadata(chunk->constraints);
/* If this is a remote chunk we assign data nodes */
if (chunk->relkind == RELKIND_FOREIGN_TABLE) if (chunk->relkind == RELKIND_FOREIGN_TABLE)
chunk->data_nodes = chunk_assign_data_nodes(chunk, ht); chunk->data_nodes = chunk_assign_data_nodes(chunk, ht);
return chunk; return chunk;
} }
static Oid static void
chunk_create_table_after_lock(Chunk *chunk, Hypertable *ht) chunk_insert_into_metadata_after_lock(const Chunk *chunk)
{ {
/* Create the actual table relation for the chunk */ /* Insert chunk */
const char *tablespace = ts_hypertable_select_tablespace_name(ht, chunk); ts_chunk_insert_lock(chunk, RowExclusiveLock);
chunk->table_id = ts_chunk_create_table(chunk, ht, tablespace); /* Add metadata for dimensional and inheritable constraints */
ts_chunk_constraints_insert_metadata(chunk->constraints);
if (!OidIsValid(chunk->table_id)) }
elog(ERROR, "could not create chunk table");
static void
chunk_create_table_constraints(Chunk *chunk)
{
/* Create the chunk's constraints, triggers, and indexes */ /* Create the chunk's constraints, triggers, and indexes */
ts_chunk_constraints_create(chunk->constraints, ts_chunk_constraints_create(chunk->constraints,
chunk->table_id, chunk->table_id,
@ -1013,6 +1007,17 @@ chunk_create_table_after_lock(Chunk *chunk, Hypertable *ht)
chunk->fd.id, chunk->fd.id,
chunk->table_id); chunk->table_id);
} }
}
static Oid
chunk_create_table(Chunk *chunk, Hypertable *ht)
{
/* Create the actual table relation for the chunk */
const char *tablespace = ts_hypertable_select_tablespace_name(ht, chunk);
chunk->table_id = ts_chunk_create_table(chunk, ht, tablespace);
Assert(OidIsValid(chunk->table_id));
return chunk->table_id; return chunk->table_id;
} }
@ -1034,9 +1039,18 @@ chunk_create_from_hypercube_after_lock(Hypertable *ht, Hypercube *cube, const ch
{ {
Chunk *chunk; Chunk *chunk;
chunk = chunk_create_metadata_after_lock(ht, cube, schema_name, table_name, prefix); /* Insert any new dimension slices into metadata */
ts_dimension_slice_insert_multi(cube->slices, cube->num_slices);
chunk = chunk_create_object(ht, cube, schema_name, table_name, prefix);
Assert(chunk != NULL); Assert(chunk != NULL);
chunk_create_table_after_lock(chunk, ht);
chunk_create_table(chunk, ht);
chunk_add_constraints(chunk);
chunk_insert_into_metadata_after_lock(chunk);
chunk_create_table_constraints(chunk);
return chunk; return chunk;
} }
@ -1614,7 +1628,8 @@ chunk_resurrect(Hypertable *ht, ChunkStub *stub)
/* Create data table and related objects */ /* Create data table and related objects */
chunk->hypertable_relid = ht->main_table_relid; chunk->hypertable_relid = ht->main_table_relid;
chunk->relkind = hypertable_chunk_relkind(ht); chunk->relkind = hypertable_chunk_relkind(ht);
chunk->table_id = chunk_create_table_after_lock(chunk, ht); chunk->table_id = chunk_create_table(chunk, ht);
chunk_create_table_constraints(chunk);
if (chunk->relkind == RELKIND_FOREIGN_TABLE) if (chunk->relkind == RELKIND_FOREIGN_TABLE)
chunk->data_nodes = ts_chunk_data_node_scan_by_chunk_id(chunk->fd.id, ti->mctx); chunk->data_nodes = ts_chunk_data_node_scan_by_chunk_id(chunk->fd.id, ti->mctx);

View File

@ -119,7 +119,7 @@ extern TSDLLEXPORT Chunk *ts_chunk_get_by_name_with_memory_context(const char *s
const char *table_name, const char *table_name,
MemoryContext mctx, MemoryContext mctx,
bool fail_if_not_found); bool fail_if_not_found);
extern TSDLLEXPORT void ts_chunk_insert_lock(Chunk *chunk, LOCKMODE lock); extern TSDLLEXPORT void ts_chunk_insert_lock(const Chunk *chunk, LOCKMODE lock);
extern TSDLLEXPORT Oid ts_chunk_create_table(Chunk *chunk, Hypertable *ht, extern TSDLLEXPORT Oid ts_chunk_create_table(Chunk *chunk, Hypertable *ht,
const char *tablespacename); const char *tablespacename);

View File

@ -80,31 +80,29 @@ chunk_constraints_expand(ChunkConstraints *ccs, int16 new_capacity)
} }
static void static void
chunk_constraint_choose_name(Name dst, bool is_dimension, int32 dimension_slice_id, chunk_constraint_dimension_choose_name(Name dst, int32 dimension_slice_id)
const char *hypertable_constraint_name, int32 chunk_id)
{ {
if (is_dimension) snprintf(NameStr(*dst), NAMEDATALEN, "constraint_%d", dimension_slice_id);
{ }
snprintf(NameStr(*dst), NAMEDATALEN, "constraint_%d", dimension_slice_id);
}
else
{
char constrname[100];
CatalogSecurityContext sec_ctx;
Assert(hypertable_constraint_name != NULL); static void
chunk_constraint_choose_name(Name dst, const char *hypertable_constraint_name, int32 chunk_id)
{
char constrname[NAMEDATALEN];
CatalogSecurityContext sec_ctx;
ts_catalog_database_info_become_owner(ts_catalog_database_info_get(), &sec_ctx); Assert(hypertable_constraint_name != NULL);
snprintf(constrname,
100,
"%d_" INT64_FORMAT "_%s",
chunk_id,
ts_catalog_table_next_seq_id(ts_catalog_get(), CHUNK_CONSTRAINT),
hypertable_constraint_name);
ts_catalog_restore_user(&sec_ctx);
namestrcpy(dst, constrname); ts_catalog_database_info_become_owner(ts_catalog_database_info_get(), &sec_ctx);
} snprintf(constrname,
NAMEDATALEN,
"%d_" INT64_FORMAT "_%s",
chunk_id,
ts_catalog_table_next_seq_id(ts_catalog_get(), CHUNK_CONSTRAINT),
hypertable_constraint_name);
ts_catalog_restore_user(&sec_ctx);
namestrcpy(dst, constrname);
} }
static ChunkConstraint * static ChunkConstraint *
@ -120,14 +118,16 @@ chunk_constraints_add(ChunkConstraints *ccs, int32 chunk_id, int32 dimension_sli
if (NULL == constraint_name) if (NULL == constraint_name)
{ {
chunk_constraint_choose_name(&cc->fd.constraint_name,
is_dimension_constraint(cc),
cc->fd.dimension_slice_id,
hypertable_constraint_name,
cc->fd.chunk_id);
if (is_dimension_constraint(cc)) if (is_dimension_constraint(cc))
{
chunk_constraint_dimension_choose_name(&cc->fd.constraint_name,
cc->fd.dimension_slice_id);
namestrcpy(&cc->fd.hypertable_constraint_name, ""); namestrcpy(&cc->fd.hypertable_constraint_name, "");
}
else
chunk_constraint_choose_name(&cc->fd.constraint_name,
hypertable_constraint_name,
cc->fd.chunk_id);
} }
else else
namestrcpy(&cc->fd.constraint_name, constraint_name); namestrcpy(&cc->fd.constraint_name, constraint_name);
@ -837,7 +837,7 @@ chunk_constraint_rename_hypertable_from_tuple(TupleInfo *ti, const char *newname
chunk_id = DatumGetInt32(values[AttrNumberGetAttrOffset(Anum_chunk_constraint_chunk_id)]); chunk_id = DatumGetInt32(values[AttrNumberGetAttrOffset(Anum_chunk_constraint_chunk_id)]);
namestrcpy(&new_hypertable_constraint_name, newname); namestrcpy(&new_hypertable_constraint_name, newname);
chunk_constraint_choose_name(&new_chunk_constraint_name, false, 0, newname, chunk_id); chunk_constraint_choose_name(&new_chunk_constraint_name, newname, chunk_id);
values[AttrNumberGetAttrOffset(Anum_chunk_constraint_hypertable_constraint_name)] = values[AttrNumberGetAttrOffset(Anum_chunk_constraint_hypertable_constraint_name)] =
NameGetDatum(&new_hypertable_constraint_name); NameGetDatum(&new_hypertable_constraint_name);

View File

@ -46,7 +46,7 @@ ts_hypercube_free(Hypercube *hc)
#if defined(USE_ASSERT_CHECKING) #if defined(USE_ASSERT_CHECKING)
static inline bool static inline bool
hypercube_is_sorted(Hypercube *hc) hypercube_is_sorted(const Hypercube *hc)
{ {
int i; int i;
@ -131,7 +131,7 @@ ts_hypercube_slice_sort(Hypercube *hc)
} }
DimensionSlice * DimensionSlice *
ts_hypercube_get_slice_by_dimension_id(Hypercube *hc, int32 dimension_id) ts_hypercube_get_slice_by_dimension_id(const Hypercube *hc, int32 dimension_id)
{ {
DimensionSlice slice = { DimensionSlice slice = {
.fd.dimension_id = dimension_id, .fd.dimension_id = dimension_id,

View File

@ -33,7 +33,7 @@ extern Hypercube *ts_hypercube_from_constraints(ChunkConstraints *constraints, M
extern int ts_hypercube_find_existing_slices(Hypercube *cube, ScanTupLock *tuplock); extern int ts_hypercube_find_existing_slices(Hypercube *cube, ScanTupLock *tuplock);
extern Hypercube *ts_hypercube_calculate_from_point(Hyperspace *hs, Point *p, ScanTupLock *tuplock); extern Hypercube *ts_hypercube_calculate_from_point(Hyperspace *hs, Point *p, ScanTupLock *tuplock);
extern bool ts_hypercubes_collide(Hypercube *cube1, Hypercube *cube2); extern bool ts_hypercubes_collide(Hypercube *cube1, Hypercube *cube2);
extern TSDLLEXPORT DimensionSlice *ts_hypercube_get_slice_by_dimension_id(Hypercube *hc, extern TSDLLEXPORT DimensionSlice *ts_hypercube_get_slice_by_dimension_id(const Hypercube *hc,
int32 dimension_id); int32 dimension_id);
extern Hypercube *ts_hypercube_copy(Hypercube *hc); extern Hypercube *ts_hypercube_copy(Hypercube *hc);
extern bool ts_hypercube_equal(Hypercube *hc1, Hypercube *hc2); extern bool ts_hypercube_equal(Hypercube *hc1, Hypercube *hc2);

View File

@ -1194,7 +1194,7 @@ ts_hypertable_has_tablespace(Hypertable *ht, Oid tspc_oid)
} }
static int static int
hypertable_get_chunk_slice_ordinal(Hypertable *ht, Hypercube *hc) hypertable_get_chunk_slice_ordinal(const Hypertable *ht, const Hypercube *hc)
{ {
Dimension *dim; Dimension *dim;
DimensionSlice *slice; DimensionSlice *slice;
@ -2568,7 +2568,7 @@ assert_chunk_data_nodes_is_a_set(List *chunk_data_nodes)
* happens similar to tablespaces, i.e., based on dimension type. * happens similar to tablespaces, i.e., based on dimension type.
*/ */
List * List *
ts_hypertable_assign_chunk_data_nodes(Hypertable *ht, Hypercube *cube) ts_hypertable_assign_chunk_data_nodes(const Hypertable *ht, const Hypercube *cube)
{ {
List *chunk_data_nodes = NIL; List *chunk_data_nodes = NIL;
List *available_nodes = ts_hypertable_get_available_data_nodes(ht, true); List *available_nodes = ts_hypertable_get_available_data_nodes(ht, true);
@ -2624,7 +2624,8 @@ get_hypertable_data_node(HypertableDataNode *node)
} }
static List * static List *
get_hypertable_data_node_values(Hypertable *ht, hypertable_data_node_filter filter, get_value value) get_hypertable_data_node_values(const Hypertable *ht, hypertable_data_node_filter filter,
get_value value)
{ {
List *list = NULL; List *list = NULL;
ListCell *cell; ListCell *cell;
@ -2646,7 +2647,7 @@ ts_hypertable_get_data_node_name_list(Hypertable *ht)
} }
List * List *
ts_hypertable_get_available_data_nodes(Hypertable *ht, bool error_if_missing) ts_hypertable_get_available_data_nodes(const Hypertable *ht, bool error_if_missing)
{ {
List *available_nodes = get_hypertable_data_node_values(ht, List *available_nodes = get_hypertable_data_node_values(ht,
filter_non_blocked_data_nodes, filter_non_blocked_data_nodes,

View File

@ -158,10 +158,10 @@ extern TSDLLEXPORT bool ts_hypertable_set_compressed(Hypertable *ht,
extern TSDLLEXPORT bool ts_hypertable_unset_compressed(Hypertable *ht); extern TSDLLEXPORT bool ts_hypertable_unset_compressed(Hypertable *ht);
extern TSDLLEXPORT void ts_hypertable_clone_constraints_to_compressed(Hypertable *ht, extern TSDLLEXPORT void ts_hypertable_clone_constraints_to_compressed(Hypertable *ht,
List *constraint_list); List *constraint_list);
extern List *ts_hypertable_assign_chunk_data_nodes(Hypertable *ht, Hypercube *cube); extern List *ts_hypertable_assign_chunk_data_nodes(const Hypertable *ht, const Hypercube *cube);
extern TSDLLEXPORT List *ts_hypertable_get_data_node_name_list(Hypertable *ht); extern TSDLLEXPORT List *ts_hypertable_get_data_node_name_list(Hypertable *ht);
extern TSDLLEXPORT List *ts_hypertable_get_data_node_serverids_list(Hypertable *ht); extern TSDLLEXPORT List *ts_hypertable_get_data_node_serverids_list(Hypertable *ht);
extern TSDLLEXPORT List *ts_hypertable_get_available_data_nodes(Hypertable *ht, extern TSDLLEXPORT List *ts_hypertable_get_available_data_nodes(const Hypertable *ht,
bool error_if_missing); bool error_if_missing);
extern TSDLLEXPORT List *ts_hypertable_get_available_data_node_server_oids(Hypertable *ht); extern TSDLLEXPORT List *ts_hypertable_get_available_data_node_server_oids(Hypertable *ht);
extern TSDLLEXPORT HypertableType ts_hypertable_get_type(Hypertable *ht); extern TSDLLEXPORT HypertableType ts_hypertable_get_type(Hypertable *ht);

View File

@ -407,6 +407,16 @@ get_result_datums(Datum *values, bool *nulls, unsigned int numvals, AttInMetadat
} }
} }
static const char *
chunk_api_dimension_slices_json(const Chunk *chunk, const Hypertable *ht)
{
JsonbParseState *ps = NULL;
JsonbValue *jv = hypercube_to_jsonb_value(chunk->cube, ht->space, &ps);
Jsonb *hcjson = JsonbValueToJsonb(jv);
return JsonbToCString(NULL, &hcjson->root, ESTIMATE_JSON_STR_SIZE(ht->space->num_dimensions));
}
/* /*
* Create a replica of a chunk on all its assigned data nodes. * Create a replica of a chunk on all its assigned data nodes.
*/ */
@ -414,12 +424,9 @@ void
chunk_api_create_on_data_nodes(Chunk *chunk, Hypertable *ht) chunk_api_create_on_data_nodes(Chunk *chunk, Hypertable *ht)
{ {
AsyncRequestSet *reqset = async_request_set_create(); AsyncRequestSet *reqset = async_request_set_create();
JsonbParseState *ps = NULL;
JsonbValue *jv = hypercube_to_jsonb_value(chunk->cube, ht->space, &ps);
Jsonb *hcjson = JsonbValueToJsonb(jv);
const char *params[4] = { const char *params[4] = {
quote_qualified_identifier(NameStr(ht->fd.schema_name), NameStr(ht->fd.table_name)), quote_qualified_identifier(NameStr(ht->fd.schema_name), NameStr(ht->fd.table_name)),
JsonbToCString(NULL, &hcjson->root, ESTIMATE_JSON_STR_SIZE(ht->space->num_dimensions)), chunk_api_dimension_slices_json(chunk, ht),
NameStr(chunk->fd.schema_name), NameStr(chunk->fd.schema_name),
NameStr(chunk->fd.table_name), NameStr(chunk->fd.table_name),
}; };