mirror of
https://github.com/timescale/timescaledb.git
synced 2025-05-18 03:23:37 +08:00
Fix typos in comments
This commit is contained in:
parent
43eb4ffba6
commit
92586d8fc9
@ -339,7 +339,7 @@ bookend_combinefunc(MemoryContext aggcontext, InternalCmpAggStore *state1, Inter
|
||||
cache = transcache_get(fcinfo);
|
||||
|
||||
/*
|
||||
* manually copy all fields from state2 to state1, as per other comine
|
||||
* manually copy all fields from state2 to state1, as per other combine
|
||||
* func like int8_avg_combine
|
||||
*/
|
||||
if (state1 == NULL)
|
||||
|
@ -139,7 +139,7 @@ static void
|
||||
worker_state_cleanup(ScheduledBgwJob *sjob)
|
||||
{
|
||||
/*
|
||||
* This function needs to be safe wrt failures occuring at any point in
|
||||
* This function needs to be safe wrt failures occurring at any point in
|
||||
* the job starting process.
|
||||
*/
|
||||
if (sjob->handle != NULL)
|
||||
@ -230,7 +230,7 @@ scheduled_bgw_job_transition_state_to(ScheduledBgwJob *sjob, JobState new_state)
|
||||
|
||||
/*
|
||||
* start the job before you can encounter any errors so that they
|
||||
* are always registerd
|
||||
* are always registered
|
||||
*/
|
||||
mark_job_as_started(sjob);
|
||||
if (ts_bgw_job_has_timeout(&sjob->job))
|
||||
|
@ -206,7 +206,7 @@ release_all_pinned_caches()
|
||||
ListCell *lc;
|
||||
|
||||
/*
|
||||
* release once for every occurence of a cache in the pinned caches list.
|
||||
* release once for every occurrence of a cache in the pinned caches list.
|
||||
* On abort, release irrespective of cache->release_on_commit.
|
||||
*/
|
||||
foreach(lc, pinned_caches)
|
||||
|
@ -26,7 +26,7 @@
|
||||
* way to signal all backends that they should invalidate their caches. For this
|
||||
* we use the PostgreSQL relcache mechanism that propagates relation cache
|
||||
* invalidation events to all backends. We register a callback with this
|
||||
* mechanism to recieve events on all backends whenever a relation cache entry
|
||||
* mechanism to receive events on all backends whenever a relation cache entry
|
||||
* is invalidated.
|
||||
*
|
||||
* To know which events should trigger invalidation of our caches, we use dummy
|
||||
|
16
src/chunk.c
16
src/chunk.c
@ -1227,7 +1227,7 @@ chunk_get_chunks_in_time_range(Oid table_relid, Datum older_than_datum, Datum ne
|
||||
Oid time_dim_type = InvalidOid;
|
||||
|
||||
/*
|
||||
* contains the list of hypertables which need to be considred. this is a
|
||||
* contains the list of hypertables which need to be considered. this is a
|
||||
* list containing a single hypertable if we are passed an invalid table
|
||||
* OID. Otherwise, it will have the list of all hypertables in the system
|
||||
*/
|
||||
@ -1285,7 +1285,7 @@ chunk_get_chunks_in_time_range(Oid table_relid, Datum older_than_datum, Datum ne
|
||||
* time dimension constraint given as an argument (older_than or
|
||||
* newer_than) we make sure all hypertables have the time dimension
|
||||
* type of the given type or through an error. This check is done
|
||||
* accross hypertables that is why it is not in the helper function
|
||||
* across hypertables that is why it is not in the helper function
|
||||
* below.
|
||||
*/
|
||||
if (time_dim_type != time_dim->fd.column_type &&
|
||||
@ -1387,14 +1387,14 @@ chunk_scan_internal(int indexid,
|
||||
}
|
||||
|
||||
/*
|
||||
* Get a window of chunks that "preceed" the given dimensional point.
|
||||
* Get a window of chunks that "precede" the given dimensional point.
|
||||
*
|
||||
* For instance, if the dimension is "time", then given a point in time the
|
||||
* function returns the recent chunks that come before the chunk that includes
|
||||
* that point. The count parameter determines the number or slices the window
|
||||
* should include in the given dimension. Note, that with multi-dimensional
|
||||
* partitioning, there might be multiple chunks in each dimensional slice that
|
||||
* all preceed the given point. For instance, the example below shows two
|
||||
* all precede the given point. For instance, the example below shows two
|
||||
* different situations that each go "back" two slices (count = 2) in the
|
||||
* x-dimension, but returns two vs. eight chunks due to different
|
||||
* partitioning.
|
||||
@ -1415,7 +1415,7 @@ chunk_scan_internal(int indexid,
|
||||
* '|___|___|___|
|
||||
*
|
||||
* Note that the returned chunks will be allocated on the given memory
|
||||
* context, inlcuding the list itself. So, beware of not leaking the list if
|
||||
* context, including the list itself. So, beware of not leaking the list if
|
||||
* the chunks are later cached somewhere else.
|
||||
*/
|
||||
List *
|
||||
@ -1425,7 +1425,7 @@ ts_chunk_get_window(int32 dimension_id, int64 point, int count, MemoryContext mc
|
||||
DimensionVec *dimvec;
|
||||
int i;
|
||||
|
||||
/* Scan for "count" slices that preceeds the point in the given dimension */
|
||||
/* Scan for "count" slices that precede the point in the given dimension */
|
||||
dimvec = ts_dimension_slice_scan_by_dimension_before_point(dimension_id,
|
||||
point,
|
||||
count,
|
||||
@ -1927,10 +1927,10 @@ ts_chunk_drop_chunks(PG_FUNCTION_ARGS)
|
||||
* locks on C and PAR. If we have a query as "select * from
|
||||
* hypertable", this acquires a lock on C and PAR as well. But the
|
||||
* order of the locks is not the same and results in deadlocks. -
|
||||
* github issue 865 We hope to alleviate the problem by aquiring a
|
||||
* github issue 865 We hope to alleviate the problem by acquiring a
|
||||
* lock on PAR before executing the drop table stmt. This is not
|
||||
* fool-proof as we could have multiple fkrelids and the order of lock
|
||||
* acquistion for these could differ as well. Do not unlock - let the
|
||||
* acquisition for these could differ as well. Do not unlock - let the
|
||||
* transaction semantics take care of it.
|
||||
*/
|
||||
foreach(lf, fk_relids)
|
||||
|
@ -44,7 +44,7 @@ static CustomScanMethods chunk_dispatch_plan_methods = {
|
||||
* custom_private field.
|
||||
*
|
||||
* The chunk dispatch plan takes the original tuple-producing subplan, which
|
||||
* was part of a ModifyTable node, and imposes itself inbetween the
|
||||
* was part of a ModifyTable node, and imposes itself between the
|
||||
* ModifyTable plan and the subplan. During execution, the subplan will
|
||||
* produce the new tuples that the chunk dispatch node routes before passing
|
||||
* them up to the ModifyTable node.
|
||||
|
@ -556,18 +556,18 @@ ts_chunk_insert_state_destroy(ChunkInsertState *state)
|
||||
|
||||
/*
|
||||
* Postgres stores cached row types from `get_cached_rowtype` in the
|
||||
* contraint expression and tries to free this type via a callback from
|
||||
* constraint expression and tries to free this type via a callback from
|
||||
* the `per_tuple_exprcontext`. Since we create constraint expressions
|
||||
* within the chunk insert state memory context, this leads to a series of
|
||||
* pointers strutured like: `per_tuple_exprcontext -> constraint expr (in
|
||||
* pointers structured like: `per_tuple_exprcontext -> constraint expr (in
|
||||
* chunk insert state) -> cached row type` if we try to free the the chunk
|
||||
* insert state MemoryContext while the `es_per_tuple_exprcontext` is
|
||||
* live, postgres tries to dereference a dangling pointer in one of
|
||||
* `es_per_tuple_exprcontext`'s callbacks. Normally postgres allocates the
|
||||
* constraint expressions in a parent context of per_tuple_exprcontext so
|
||||
* there is no issue, however we've run into excessive memory ussage due
|
||||
* to too many constraints, and want to allocate them for a shorter
|
||||
* lifetime so we free them when SubspaceStore gets to full.
|
||||
* there is no issue, however we've run into excessive memory usage due to
|
||||
* too many constraints, and want to allocate them for a shorter lifetime
|
||||
* so we free them when SubspaceStore gets to full.
|
||||
*
|
||||
* To ensure this doesn't create dangling pointers, we don't free the
|
||||
* ChunkInsertState immediately, but rather register it to be freed when
|
||||
|
@ -1228,7 +1228,7 @@ ts_dimension_add(PG_FUNCTION_ARGS)
|
||||
/*
|
||||
* The hypertable catalog table has a CHECK(num_dimensions > 0), which
|
||||
* means, that when this function is called from create_hypertable()
|
||||
* instaed of directly, num_dimension is already set to one. We therefore
|
||||
* instead of directly, num_dimension is already set to one. We therefore
|
||||
* need to lock the hypertable tuple here so that we can set the correct
|
||||
* number of dimensions once we've added the new dimension
|
||||
*/
|
||||
@ -1281,7 +1281,7 @@ ts_dimension_add(PG_FUNCTION_ARGS)
|
||||
/*
|
||||
* Need to get a fresh copy of hypertable from the database as cache
|
||||
* does not reflect the changes in the previous 2 lines which add a
|
||||
* new dimenison
|
||||
* new dimension
|
||||
*/
|
||||
info.ht = ts_hypertable_get_by_id(info.ht->fd.id);
|
||||
ts_indexing_verify_indexes(info.ht);
|
||||
|
@ -44,7 +44,7 @@ static Oid extension_proxy_oid = InvalidOid;
|
||||
* We use a proxy_table to be notified of extension drops/creates. Namely,
|
||||
* we rely on the fact that postgres will internally create RelCacheInvalidation
|
||||
* events when any tables are created or dropped. We rely on the following properties
|
||||
* of Postgres's dependency managment:
|
||||
* of Postgres's dependency management:
|
||||
* * The proxy table will be created before the extension itself.
|
||||
* * The proxy table will be dropped before the extension itself.
|
||||
*/
|
||||
|
@ -177,7 +177,7 @@ ts_hist_deserializefunc(PG_FUNCTION_ARGS)
|
||||
PG_RETURN_BYTEA_P(state);
|
||||
}
|
||||
|
||||
/* hist_funalfunc(internal, val REAL, MIN REAL, MAX REAL, nbuckets INTEGER) => INTEGER[] */
|
||||
/* hist_finalfunc(internal, val REAL, MIN REAL, MAX REAL, nbuckets INTEGER) => INTEGER[] */
|
||||
Datum
|
||||
ts_hist_finalfunc(PG_FUNCTION_ARGS)
|
||||
{
|
||||
|
@ -208,7 +208,7 @@ ts_hypercube_calculate_from_point(Hyperspace *hs, Point *p)
|
||||
|
||||
/*
|
||||
* If this is an aligned dimension, we'd like to reuse any existing
|
||||
* slice that covers the coordinate in the dimenion
|
||||
* slice that covers the coordinate in the dimension
|
||||
*/
|
||||
if (dim->fd.aligned)
|
||||
{
|
||||
|
@ -493,8 +493,8 @@ ts_hypertable_lock_tuple_simple(Oid table_relid)
|
||||
|
||||
/*
|
||||
* Updated by the current transaction already. We equate this with
|
||||
* a successul lock since the tuple should be locked if updated by
|
||||
* us.
|
||||
* a successful lock since the tuple should be locked if updated
|
||||
* by us.
|
||||
*/
|
||||
return true;
|
||||
case HeapTupleMayBeUpdated:
|
||||
@ -1325,7 +1325,7 @@ ts_hypertable_create(PG_FUNCTION_ARGS)
|
||||
* with itself and RowExclusive, to prevent simultaneous inserts on the
|
||||
* table. Also since TRUNCATE (part of data migrations) takes an
|
||||
* AccessExclusiveLock take that lock level here too so that we don't have
|
||||
* lock upgrades, which are suceptible to deadlocks. If we aren't
|
||||
* lock upgrades, which are susceptible to deadlocks. If we aren't
|
||||
* migrating data, then shouldn't have much contention on the table thus
|
||||
* not worth optimizing.
|
||||
*/
|
||||
@ -1335,7 +1335,7 @@ ts_hypertable_create(PG_FUNCTION_ARGS)
|
||||
if (ts_is_hypertable(table_relid))
|
||||
{
|
||||
/*
|
||||
* Unlock and return. Note that unlocking is analagous to what PG does
|
||||
* Unlock and return. Note that unlocking is analogous to what PG does
|
||||
* for ALTER TABLE ADD COLUMN IF NOT EXIST
|
||||
*/
|
||||
heap_close(rel, AccessExclusiveLock);
|
||||
|
@ -45,7 +45,7 @@
|
||||
* (src/init.c:post_analyze_hook) and stores it in
|
||||
* extension_post_parse_analyze_hook.
|
||||
* d. Sets the post_parse_analyze_hook back to what it was before we
|
||||
* loaded the versioned extention (this hook eventually called our
|
||||
* loaded the versioned extension (this hook eventually called our
|
||||
* post_analyze_hook, but may not be our function, for instance, if
|
||||
* another extension is loaded).
|
||||
* e. Calls extension_post_parse_analyze_hook.
|
||||
@ -57,7 +57,7 @@
|
||||
* 1) We probably can't
|
||||
* - The shared_preload_libraries is called in PostmasterMain which is way before InitPostgres is called.
|
||||
* (Note: This happens even before the fork of the backend) -- so we don't even know which database this is for.
|
||||
* -- This means we cannot query for the existance of the extension yet because the caches are initialized in InitPostgres.
|
||||
* -- This means we cannot query for the existence of the extension yet because the caches are initialized in InitPostgres.
|
||||
* 2) We actually don't want to load the extension in two cases:
|
||||
* a) We are upgrading the extension.
|
||||
* b) We set the guc timescaledb.disable_load.
|
||||
|
@ -15,4 +15,4 @@ extern bool ts_loader_extension_exists(void);
|
||||
|
||||
extern void ts_loader_extension_check(void);
|
||||
|
||||
#endif /* TIMESCALDB_LOADER_H */
|
||||
#endif /* TIMESCALEDB_LOADER_H */
|
||||
|
@ -54,7 +54,7 @@ ts_http_version_string(HttpVersion version)
|
||||
/*
|
||||
* Send an HTTP request and receive the HTTP response on the given connection.
|
||||
*
|
||||
* Returns HTTP_ERROR_NONE (0) on success or a HTTP-specfic error on failure.
|
||||
* Returns HTTP_ERROR_NONE (0) on success or a HTTP-specific error on failure.
|
||||
*/
|
||||
HttpError
|
||||
ts_http_send_and_recv(Connection *conn, HttpRequest *req, HttpResponseState *state)
|
||||
|
@ -381,10 +381,10 @@ resolve_function_argtype(FunctionCallInfo fcinfo)
|
||||
* struct. For partitioning functions invoked on the insert path, this is
|
||||
* typically the Hypertable cache's memory context. Hence, the type cache lives
|
||||
* for the duration of the hypertable cache and can be reused across multiple
|
||||
* invokations of the partitioning function, even across transactions.
|
||||
* invocations of the partitioning function, even across transactions.
|
||||
*
|
||||
* If the partitioning function is invoked outside the insert path, the FmgrInfo
|
||||
* and its memory context has a lifetime corresponding to that invokation.
|
||||
* and its memory context has a lifetime corresponding to that invocation.
|
||||
*/
|
||||
typedef struct PartFuncCache
|
||||
{
|
||||
|
@ -230,7 +230,7 @@ contains_first_last_node(List *sortClause, List *targetList)
|
||||
*
|
||||
* Most of the code is borrowed from: preprocess_minmax_aggregates (planagg.c). Few
|
||||
* major differences:
|
||||
* - generate FirstLastAggInfo that wrapps MinMaxAggInfo
|
||||
* - generate FirstLastAggInfo that wraps MinMaxAggInfo
|
||||
* - generate subquery (path) for FIRST/LAST (we reuse MinMaxAggPath)
|
||||
* - replace Aggref node with Param node
|
||||
* - reject ORDER BY on FIRST/LAST
|
||||
@ -611,7 +611,7 @@ build_first_last_path(PlannerInfo *root, FirstLastAggInfo *fl_info,
|
||||
*/
|
||||
|
||||
/*
|
||||
* Value and sort target entries but sort target is eleminated later on
|
||||
* Value and sort target entries but sort target is eliminated later on
|
||||
* from target list
|
||||
*/
|
||||
value_target = makeTargetEntry(copyObject(mminfo->target), (AttrNumber) 1, pstrdup("value"), false);
|
||||
|
@ -412,7 +412,7 @@ involves_hypertable(PlannerInfo *root, RelOptInfo *rel)
|
||||
* table we'd like to insert into.
|
||||
*
|
||||
* The way we redirect tuples to chunks is to insert an intermediate "chunk
|
||||
* dispatch" plan node, inbetween the ModifyTable and its subplan that produces
|
||||
* dispatch" plan node, between the ModifyTable and its subplan that produces
|
||||
* the tuples. When the ModifyTable plan is executed, it tries to read a tuple
|
||||
* from the intermediate chunk dispatch plan instead of the original
|
||||
* subplan. The chunk plan reads the tuple from the original subplan, looks up
|
||||
|
@ -1478,8 +1478,8 @@ process_create_table_end(Node *parsetree)
|
||||
verify_constraint_list(stmt->relation, stmt->constraints);
|
||||
|
||||
/*
|
||||
* Only after parse analyis does tableElts contain only ColumnDefs. So, if
|
||||
* we capture this in processUtility, we should be prepared to have
|
||||
* Only after parse analysis does tableElts contain only ColumnDefs. So,
|
||||
* if we capture this in processUtility, we should be prepared to have
|
||||
* constraint nodes and TableLikeClauses intermixed
|
||||
*/
|
||||
foreach(lc, stmt->tableElts)
|
||||
|
@ -293,7 +293,7 @@ sort_transform_ec(PlannerInfo *root, EquivalenceClass *orig)
|
||||
ListCell *lc_member;
|
||||
EquivalenceClass *newec = NULL;
|
||||
|
||||
/* check all members, adding only tranformable members to new ec */
|
||||
/* check all members, adding only transformable members to new ec */
|
||||
foreach(lc_member, orig->ec_members)
|
||||
{
|
||||
EquivalenceMember *ec_mem = (EquivalenceMember *) lfirst(lc_member);
|
||||
|
@ -78,7 +78,7 @@ ts_subspace_store_init(Hyperspace *space, MemoryContext mcxt, int16 max_items)
|
||||
|
||||
/*
|
||||
* make sure that the first dimension is a time dimension, otherwise the
|
||||
* tree will grow in a way that makes prunning less effective.
|
||||
* tree will grow in a way that makes pruning less effective.
|
||||
*/
|
||||
Assert(space->num_dimensions < 1 || space->dimensions[0].type == DIMENSION_TYPE_OPEN);
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user