mirror of
https://github.com/timescale/timescaledb.git
synced 2025-05-17 19:13:16 +08:00
Fix memory handling during scans
Scan functions cannot be called on a per-tuple memory context as they might allocate data that need to live until the end of the scan. Fix this in a couple of places to ensure correct memory handling. Fixes #4148, #4145
This commit is contained in:
parent
a759b2b2b9
commit
b954c00fa8
@ -8,6 +8,10 @@ accidentally triggering the load of a previous DB version.**
|
|||||||
|
|
||||||
**Bugfixes**
|
**Bugfixes**
|
||||||
* #4122 Fix segfault on INSERT into distributed hypertable
|
* #4122 Fix segfault on INSERT into distributed hypertable
|
||||||
|
* #4161 Fix memory handling during scans
|
||||||
|
|
||||||
|
**Thanks**
|
||||||
|
* @abrownsword for reporting a crash in the telemetry reporter
|
||||||
|
|
||||||
## 2.6.0 (2022-02-16)
|
## 2.6.0 (2022-02-16)
|
||||||
This release is medium priority for upgrade. We recommend that you upgrade at the next available opportunity.
|
This release is medium priority for upgrade. We recommend that you upgrade at the next available opportunity.
|
||||||
|
@ -252,9 +252,9 @@ ts_chunk_scan_by_constraints(const Hyperspace *hs, const List *dimension_vecs,
|
|||||||
prev_chunk_oid = chunk->table_id;
|
prev_chunk_oid = chunk->table_id;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
MemoryContextSwitchTo(work_mcxt);
|
||||||
/* Only one chunk should match */
|
/* Only one chunk should match */
|
||||||
Assert(ts_scan_iterator_next(&chunk_it) == NULL);
|
Assert(ts_scan_iterator_next(&chunk_it) == NULL);
|
||||||
MemoryContextSwitchTo(work_mcxt);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -56,5 +56,7 @@ ts_scan_iterator_scan_key_init(ScanIterator *iterator, AttrNumber attributeNumbe
|
|||||||
TSDLLEXPORT void
|
TSDLLEXPORT void
|
||||||
ts_scan_iterator_rescan(ScanIterator *iterator)
|
ts_scan_iterator_rescan(ScanIterator *iterator)
|
||||||
{
|
{
|
||||||
|
MemoryContext oldmcxt = MemoryContextSwitchTo(iterator->scankey_mcxt);
|
||||||
ts_scanner_rescan(&iterator->ctx, NULL);
|
ts_scanner_rescan(&iterator->ctx, NULL);
|
||||||
|
MemoryContextSwitchTo(oldmcxt);
|
||||||
}
|
}
|
||||||
|
@ -27,7 +27,6 @@ typedef struct StatsContext
|
|||||||
{
|
{
|
||||||
TelemetryStats *stats;
|
TelemetryStats *stats;
|
||||||
ScanIterator compressed_chunk_stats_iterator;
|
ScanIterator compressed_chunk_stats_iterator;
|
||||||
bool iterator_valid;
|
|
||||||
} StatsContext;
|
} StatsContext;
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -338,27 +337,25 @@ static bool
|
|||||||
get_chunk_compression_stats(StatsContext *statsctx, const Chunk *chunk,
|
get_chunk_compression_stats(StatsContext *statsctx, const Chunk *chunk,
|
||||||
Form_compression_chunk_size compr_stats)
|
Form_compression_chunk_size compr_stats)
|
||||||
{
|
{
|
||||||
|
TupleInfo *ti;
|
||||||
|
MemoryContext oldmcxt;
|
||||||
|
|
||||||
if (!ts_chunk_is_compressed(chunk))
|
if (!ts_chunk_is_compressed(chunk))
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
|
/* Need to execute the scan functions on the long-lived memory context */
|
||||||
|
oldmcxt = MemoryContextSwitchTo(statsctx->compressed_chunk_stats_iterator.scankey_mcxt);
|
||||||
ts_scan_iterator_scan_key_reset(&statsctx->compressed_chunk_stats_iterator);
|
ts_scan_iterator_scan_key_reset(&statsctx->compressed_chunk_stats_iterator);
|
||||||
ts_scan_iterator_scan_key_init(&statsctx->compressed_chunk_stats_iterator,
|
ts_scan_iterator_scan_key_init(&statsctx->compressed_chunk_stats_iterator,
|
||||||
Anum_compression_chunk_size_pkey_chunk_id,
|
Anum_compression_chunk_size_pkey_chunk_id,
|
||||||
BTEqualStrategyNumber,
|
BTEqualStrategyNumber,
|
||||||
F_INT4EQ,
|
F_INT4EQ,
|
||||||
Int32GetDatum(chunk->fd.id));
|
Int32GetDatum(chunk->fd.id));
|
||||||
|
ts_scan_iterator_start_or_restart_scan(&statsctx->compressed_chunk_stats_iterator);
|
||||||
|
ti = ts_scan_iterator_next(&statsctx->compressed_chunk_stats_iterator);
|
||||||
|
MemoryContextSwitchTo(oldmcxt);
|
||||||
|
|
||||||
if (statsctx->iterator_valid)
|
if (ti)
|
||||||
{
|
|
||||||
ts_scan_iterator_rescan(&statsctx->compressed_chunk_stats_iterator);
|
|
||||||
}
|
|
||||||
else
|
|
||||||
{
|
|
||||||
ts_scan_iterator_start_scan(&statsctx->compressed_chunk_stats_iterator);
|
|
||||||
statsctx->iterator_valid = true;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (ts_scan_iterator_next(&statsctx->compressed_chunk_stats_iterator))
|
|
||||||
{
|
{
|
||||||
Form_compression_chunk_size fd;
|
Form_compression_chunk_size fd;
|
||||||
bool should_free;
|
bool should_free;
|
||||||
@ -379,9 +376,8 @@ get_chunk_compression_stats(StatsContext *statsctx, const Chunk *chunk,
|
|||||||
/*
|
/*
|
||||||
* Should only get here if a compressed chunk is missing stats for some
|
* Should only get here if a compressed chunk is missing stats for some
|
||||||
* reason. The iterator will automatically close if no tuple is found, so
|
* reason. The iterator will automatically close if no tuple is found, so
|
||||||
* need to make sure it is re-opened next time this function is called.
|
* it will be re-opened next time this function is called.
|
||||||
*/
|
*/
|
||||||
statsctx->iterator_valid = false;
|
|
||||||
|
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
@ -496,7 +492,6 @@ ts_telemetry_stats_gather(TelemetryStats *stats)
|
|||||||
MemoryContext oldmcxt, relmcxt;
|
MemoryContext oldmcxt, relmcxt;
|
||||||
StatsContext statsctx = {
|
StatsContext statsctx = {
|
||||||
.stats = stats,
|
.stats = stats,
|
||||||
.iterator_valid = false,
|
|
||||||
};
|
};
|
||||||
|
|
||||||
MemSet(stats, 0, sizeof(*stats));
|
MemSet(stats, 0, sizeof(*stats));
|
||||||
@ -511,12 +506,6 @@ ts_telemetry_stats_gather(TelemetryStats *stats)
|
|||||||
|
|
||||||
relmcxt = AllocSetContextCreate(CurrentMemoryContext, "RelationStats", ALLOCSET_DEFAULT_SIZES);
|
relmcxt = AllocSetContextCreate(CurrentMemoryContext, "RelationStats", ALLOCSET_DEFAULT_SIZES);
|
||||||
|
|
||||||
/*
|
|
||||||
* Use temporary per-tuple memory context to not accumulate cruft during
|
|
||||||
* processing of pg_class.
|
|
||||||
*/
|
|
||||||
oldmcxt = MemoryContextSwitchTo(relmcxt);
|
|
||||||
|
|
||||||
while (true)
|
while (true)
|
||||||
{
|
{
|
||||||
HeapTuple tup;
|
HeapTuple tup;
|
||||||
@ -526,7 +515,6 @@ ts_telemetry_stats_gather(TelemetryStats *stats)
|
|||||||
const Hypertable *ht = NULL;
|
const Hypertable *ht = NULL;
|
||||||
const ContinuousAgg *cagg = NULL;
|
const ContinuousAgg *cagg = NULL;
|
||||||
|
|
||||||
MemoryContextReset(relmcxt);
|
|
||||||
tup = systable_getnext(scan);
|
tup = systable_getnext(scan);
|
||||||
|
|
||||||
if (!HeapTupleIsValid(tup))
|
if (!HeapTupleIsValid(tup))
|
||||||
@ -537,6 +525,13 @@ ts_telemetry_stats_gather(TelemetryStats *stats)
|
|||||||
if (should_ignore_relation(catalog, class))
|
if (should_ignore_relation(catalog, class))
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Use temporary per-relation memory context to not accumulate cruft
|
||||||
|
* during processing of pg_class.
|
||||||
|
*/
|
||||||
|
oldmcxt = MemoryContextSwitchTo(relmcxt);
|
||||||
|
MemoryContextReset(relmcxt);
|
||||||
|
|
||||||
reltype = classify_relation(class, htcache, &ht, &chunk, &cagg);
|
reltype = classify_relation(class, htcache, &ht, &chunk, &cagg);
|
||||||
|
|
||||||
switch (reltype)
|
switch (reltype)
|
||||||
@ -592,9 +587,10 @@ ts_telemetry_stats_gather(TelemetryStats *stats)
|
|||||||
case RELTYPE_OTHER:
|
case RELTYPE_OTHER:
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
MemoryContextSwitchTo(oldmcxt);
|
||||||
}
|
}
|
||||||
|
|
||||||
MemoryContextSwitchTo(oldmcxt);
|
|
||||||
systable_endscan(scan);
|
systable_endscan(scan);
|
||||||
table_close(rel, AccessShareLock);
|
table_close(rel, AccessShareLock);
|
||||||
ts_scan_iterator_close(&statsctx.compressed_chunk_stats_iterator);
|
ts_scan_iterator_close(&statsctx.compressed_chunk_stats_iterator);
|
||||||
|
Loading…
x
Reference in New Issue
Block a user