Free ChunkInsertStates when the es_per_tuple_exprcontext is freed

Instead of freeing ChunkInsertStates immediately when flushed from the
SubspaceStore, defer the freeing until the es_per_tuple_exprcontext is
being cleaned up. This ensures that pointers into the CIS don't become
invalidated without us freeing any of postgres's memory, like we were
doing in the prior fix; this fixes a segfault in COPY.

In the normal INSERT case these frees happen at approximately the same
time so there should be no regression in memory usage, while if there
was a delay in freeing the es_per_tuple_exprcontext our old strategy of
clearing the context ourselves was invalid.
This commit is contained in:
Joshua Lockerman 2018-12-14 11:07:15 -05:00 committed by JLockerman
parent 9cff0495b5
commit 4b42b301a7
4 changed files with 79 additions and 16 deletions

View File

@ -534,9 +534,20 @@ ts_chunk_insert_state_create(Chunk *chunk, ChunkDispatch *dispatch)
return state;
}
static void
chunk_insert_state_free(void *arg)
{
ChunkInsertState *state = arg;
MemoryContextDelete(state->mctx);
}
extern void
ts_chunk_insert_state_destroy(ChunkInsertState *state)
{
MemoryContext deletion_context;
MemoryContextCallback *free_callback;
if (state == NULL)
return;
@ -556,27 +567,30 @@ ts_chunk_insert_state_destroy(ChunkInsertState *state)
* constraint expressions in a parent context of per_tuple_exprcontext so
* there is no issue, however we've run into excessive memory ussage due
* to too many constraints, and want to allocate them for a shorter
* lifetime. To ensure this doesn't create dangling pointers, we free the
* `per_tuple_exprcontext`, and call all its callbacks, here, before the
* chunk insert state memory context.
* lifetime so we free them when SubspaceStore gets to full.
*
* This may not be entirely safe. Right now it appears that in the case
* where the chunk insert both the `es_per_tuple_exprcontext` and chunk
* insert state exist they are freed at approximately the same time.
* However, postgres does not guarantee this (and indeed does not know
* anything about `chunk_insert_state`) so this may not hold true in the
* future. A better fix may be to tie the `chunk_insert_state` lifetime to
* the lifetime of a known postgres memory context (possibly
* tuple_exprcontext's?) and handle cases where the insert state is freed
* early.
* To ensure this doesn't create dangling pointers, we don't free the
* ChunkInsertState immediately, but rather register it to be freed when
* the current `es_per_tuple_exprcontext` or `es_query_cxt` is cleaned up.
* deletion of the ChunkInsertState until the current context if freed.
*/
if (state->estate->es_per_tuple_exprcontext != NULL)
FreeExprContext(state->estate->es_per_tuple_exprcontext, true);
{
deletion_context = state->estate->es_per_tuple_exprcontext->ecxt_per_tuple_memory;
}
else
{
deletion_context = state->estate->es_query_cxt;
}
state->estate->es_per_tuple_exprcontext = NULL;
free_callback = MemoryContextAlloc(deletion_context, sizeof(*free_callback));
*free_callback = (MemoryContextCallback)
{
.func = chunk_insert_state_free,
.arg = state,
};
MemoryContextRegisterResetCallback(deletion_context, free_callback);
if (NULL != state->slot)
ExecDropSingleTupleTableSlot(state->slot);
MemoryContextDelete(state->mctx);
}

View File

@ -85,3 +85,16 @@ ERROR: insert or update on table "_hyper_2_6_chunk" violates foreign key constr
COPY (SELECT * FROM hyper ORDER BY time, meta_id) TO STDOUT;
1 1 1
1 2 1
--test that copy works with a low setting for max_open_chunks_per_insert
set timescaledb.max_open_chunks_per_insert = 1;
CREATE TABLE "hyper2" (
"time" bigint NOT NULL,
"value" double precision NOT NULL
);
SELECT create_hypertable('hyper2', 'time', chunk_time_interval => 10);
create_hypertable
---------------------
(3,public,hyper2,t)
(1 row)
\copy hyper2 from data/copy_data.csv with csv header ;

View File

@ -54,3 +54,13 @@ COPY hyper (time, meta_id, value) FROM STDIN DELIMITER ',';
\set ON_ERROR_STOP 1
COPY (SELECT * FROM hyper ORDER BY time, meta_id) TO STDOUT;
--test that copy works with a low setting for max_open_chunks_per_insert
set timescaledb.max_open_chunks_per_insert = 1;
CREATE TABLE "hyper2" (
"time" bigint NOT NULL,
"value" double precision NOT NULL
);
SELECT create_hypertable('hyper2', 'time', chunk_time_interval => 10);
\copy hyper2 from data/copy_data.csv with csv header ;

View File

@ -0,0 +1,26 @@
generate_series,random
1,0.951734602451324
2,0.717823888640851
3,0.543408489786088
4,0.641131274402142
5,0.12689296528697
6,0.0126486560329795
7,0.213605496101081
8,0.132784110959619
9,0.381155731156468
10,0.284836102742702
11,0.795640022493899
12,0.631451691035181
13,0.0958626130595803
14,0.929304684977978
15,0.524866581428796
16,0.919249163009226
17,0.878917074296623
18,0.68551931809634
19,0.594833800103515
20,0.819584367796779
21,0.474171321373433
22,0.938535195309669
23,0.333933369256556
24,0.274582070298493
25,0.602348630782217
1 generate_series random
2 1 0.951734602451324
3 2 0.717823888640851
4 3 0.543408489786088
5 4 0.641131274402142
6 5 0.12689296528697
7 6 0.0126486560329795
8 7 0.213605496101081
9 8 0.132784110959619
10 9 0.381155731156468
11 10 0.284836102742702
12 11 0.795640022493899
13 12 0.631451691035181
14 13 0.0958626130595803
15 14 0.929304684977978
16 15 0.524866581428796
17 16 0.919249163009226
18 17 0.878917074296623
19 18 0.68551931809634
20 19 0.594833800103515
21 20 0.819584367796779
22 21 0.474171321373433
23 22 0.938535195309669
24 23 0.333933369256556
25 24 0.274582070298493
26 25 0.602348630782217