Refactor adaptive chunking scan code for PG12

This change adopts the PG12 table/index scan API for the adaptive
chunking functions that scan for min/max values in a table.

PostgreSQL versions older than 12 use compatibility functions and
wrappers for the new API.
This commit is contained in:
Erik Nordström 2020-02-21 15:57:40 +01:00 committed by Erik Nordström
parent 053040e3fd
commit 6d12b89b1e
4 changed files with 74 additions and 81 deletions

View File

@ -135,8 +135,8 @@ typedef enum MinMaxResult
static MinMaxResult
minmax_heapscan(Relation rel, Oid atttype, AttrNumber attnum, Datum minmax[2])
{
TupleTableSlot *slot = table_slot_create(rel, NULL);
TableScanDesc scan;
HeapTuple tuple;
TypeCacheEntry *tce;
bool nulls[2] = { true, true };
@ -148,10 +148,10 @@ minmax_heapscan(Relation rel, Oid atttype, AttrNumber attnum, Datum minmax[2])
scan = table_beginscan(rel, GetTransactionSnapshot(), 0, NULL);
while ((tuple = heap_getnext(scan, ForwardScanDirection)) != NULL)
while (table_scan_getnextslot(scan, ForwardScanDirection, slot))
{
bool isnull;
Datum value = heap_getattr(tuple, attnum, RelationGetDescr(rel), &isnull);
Datum value = slot_getattr(slot, attnum, &isnull);
if (isnull)
continue;
@ -171,7 +171,8 @@ minmax_heapscan(Relation rel, Oid atttype, AttrNumber attnum, Datum minmax[2])
}
}
heap_endscan(scan);
table_endscan(scan);
ExecDropSingleTupleTableSlot(slot);
return (nulls[0] || nulls[1]) ? MINMAX_NO_TUPLES : MINMAX_FOUND;
}
@ -183,69 +184,32 @@ static MinMaxResult
minmax_indexscan(Relation rel, Relation idxrel, AttrNumber attnum, Datum minmax[2])
{
IndexScanDesc scan = index_beginscan(rel, idxrel, GetTransactionSnapshot(), 0, 0);
HeapTuple tuple;
#if PG12_GE /* TODO we should not materialize a HeapTuple unless needed */
TupleTableSlot *slot = MakeSingleTupleTableSlot(RelationGetDescr(rel), &TTSOpsBufferHeapTuple);
bool should_free = false;
#endif
bool isnull;
TupleTableSlot *slot = table_slot_create(rel, NULL);
bool nulls[2] = { true, true };
int n = 0;
bool found_tuple;
int i;
#if PG12_LT
tuple = index_getnext(scan, BackwardScanDirection);
found_tuple = HeapTupleIsValid(tuple);
#else /* TODO we should not materialize a HeapTuple unless needed */
index_rescan(scan, NULL, 0, NULL, 0);
found_tuple = index_getnext_slot(scan, BackwardScanDirection, slot);
if (found_tuple)
tuple = ExecFetchSlotHeapTuple(slot, false, &should_free);
#endif
if (found_tuple)
for (i = 0; i < 2; i++)
{
minmax[n] = heap_getattr(tuple, attnum, RelationGetDescr(rel), &isnull);
nulls[n++] = false;
static ScanDirection directions[2] = { BackwardScanDirection, ForwardScanDirection };
bool found_tuple;
bool isnull;
index_rescan(scan, NULL, 0, NULL, 0);
found_tuple = index_getnext_slot(scan, directions[i], slot);
if (!found_tuple)
break;
minmax[i] = slot_getattr(slot, attnum, &isnull);
nulls[i] = isnull;
}
#if PG12_GE
if (should_free)
{
heap_freetuple(tuple);
should_free = false;
}
#endif
index_rescan(scan, NULL, 0, NULL, 0);
#if PG12_LT
tuple = index_getnext(scan, ForwardScanDirection);
found_tuple = HeapTupleIsValid(tuple);
#else
found_tuple = index_getnext_slot(scan, ForwardScanDirection, slot);
if (found_tuple)
tuple = ExecFetchSlotHeapTuple(slot, false, NULL);
#endif
if (found_tuple)
{
minmax[n] = heap_getattr(tuple, attnum, RelationGetDescr(rel), &isnull);
nulls[n++] = false;
}
#if PG12_GE
if (should_free)
heap_freetuple(tuple);
#endif
index_endscan(scan);
#if PG12_GE
ExecDropSingleTupleTableSlot(slot);
#endif
return (nulls[0] || nulls[1]) ? MINMAX_NO_TUPLES : MINMAX_FOUND;
Assert((nulls[0] && nulls[1]) || (!nulls[0] && !nulls[1]));
return nulls[0] ? MINMAX_NO_TUPLES : MINMAX_FOUND;
}
/*

View File

@ -695,12 +695,6 @@ get_attname_compat(Oid relid, AttrNumber attnum, bool missing_ok)
#define PG_RETURN_JSONB_P PG_RETURN_JSONB
#endif
#if PG12_LT
#define table_open(r, l) heap_open(r, l)
#define table_openrv(r, l) heap_openrv(r, l)
#define table_close(r, l) heap_close(r, l)
#endif
/*
* PG11 introduced a new level of nodes inside of ResultRelInfo for dealing with
* ON CONFLICT behavior in partitions (see:
@ -762,21 +756,6 @@ get_attname_compat(Oid relid, AttrNumber attnum, bool missing_ok)
#define PreventInTransactionBlock PreventTransactionChain
#endif
/*
* table_beginscan
* PG12 generalizes table scans to those not directly dependent on the heap.
* These are the functions we should generally use for PG12 and later versions. For
* earlier versions, we can assume that all tables are backed by the heap, so
* we forward it to heap_beginscan.
* see
* https://github.com/postgres/postgres/commit/c2fe139c201c48f1133e9fbea2dd99b8efe2fadd#diff-79a1a60cd631a1067199e0296de47ec4
*/
#if PG12_LT
#define TableScanDesc HeapScanDesc
#define table_beginscan heap_beginscan
#define table_beginscan_catalog heap_beginscan_catalog
#endif
/*
* TupleDescAttr
*

View File

@ -11,7 +11,6 @@
* directory for a copy of the PostgreSQL License.
*/
#include <postgres.h>
#include <access/heapam.h>
#include <utils/rel.h>
#include "tableam.h"
@ -44,3 +43,35 @@ ts_table_tuple_insert(Relation rel, TupleTableSlot *slot, CommandId cid, int opt
if (should_free)
pfree(tuple);
}
bool
ts_table_scan_getnextslot(TableScanDesc scan, const ScanDirection direction, TupleTableSlot *slot)
{
HeapTuple tuple = heap_getnext(scan, direction);
if (HeapTupleIsValid(tuple))
{
/* Tuple managed by heap so shouldn't free when replacing the tuple in
* the slot */
ExecStoreTuple(tuple, slot, InvalidBuffer, false);
return true;
}
return false;
}
bool
ts_index_getnext_slot(IndexScanDesc scan, const ScanDirection direction, TupleTableSlot *slot)
{
HeapTuple tuple = index_getnext(scan, direction);
if (HeapTupleIsValid(tuple))
{
/* Tuple managed by index so shouldn't free when replacing the tuple in
* the slot */
ExecStoreTuple(tuple, slot, InvalidBuffer, false);
return true;
}
return false;
}

View File

@ -10,13 +10,32 @@
#include <executor/tuptable.h>
#include <nodes/pg_list.h>
#include <utils/relcache.h>
#include <access/heapam.h>
#include <access/genam.h>
#define TableScanDesc HeapScanDesc
#define table_open(r, l) heap_open(r, l)
#define table_openrv(r, l) heap_openrv(r, l)
#define table_close(r, l) heap_close(r, l)
#define table_beginscan(rel, snapshot, nkeys, keys) heap_beginscan(rel, snapshot, nkeys, keys)
#define table_beginscan_catalog(rel, nkeys, keys) heap_beginscan_catalog(rel, nkeys, keys)
#define table_endscan(scan) heap_endscan(scan)
#define table_slot_create(rel, reglist) ts_table_slot_create(rel, reglist)
#define table_tuple_insert(rel, slot, cid, options, bistate) \
ts_table_tuple_insert(rel, slot, cid, options, bistate)
#define table_scan_getnextslot(scan, direction, slot) \
ts_table_scan_getnextslot(scan, direction, slot)
#define index_getnext_slot(scan, direction, slot) ts_index_getnext_slot(scan, direction, slot)
extern TupleTableSlot *ts_table_slot_create(Relation rel, List **reglist);
extern void ts_table_tuple_insert(Relation rel, TupleTableSlot *slot, CommandId cid, int options,
struct BulkInsertStateData *bistate);
extern bool ts_table_scan_getnextslot(TableScanDesc scan, const ScanDirection direction,
TupleTableSlot *slot);
extern bool ts_index_getnext_slot(IndexScanDesc scan, const ScanDirection direction,
TupleTableSlot *slot);
#endif /* TIMESCALEDB_COMPAT_TABLEAM_H */