mirror of
https://github.com/timescale/timescaledb.git
synced 2025-05-22 13:40:56 +08:00
formatting with pgindent
This commit is contained in:
parent
73f4dcaaf0
commit
32c45b75b2
22
src/cache.c
22
src/cache.c
@ -2,7 +2,7 @@
|
||||
|
||||
|
||||
void
|
||||
cache_init(Cache *cache)
|
||||
cache_init(Cache * cache)
|
||||
{
|
||||
if (cache->htab != NULL)
|
||||
{
|
||||
@ -16,8 +16,10 @@ cache_init(Cache *cache)
|
||||
}
|
||||
|
||||
static void
|
||||
cache_destroy(Cache *cache) {
|
||||
if (cache->refcount > 0) {
|
||||
cache_destroy(Cache * cache)
|
||||
{
|
||||
if (cache->refcount > 0)
|
||||
{
|
||||
/* will be destroyed later */
|
||||
return;
|
||||
}
|
||||
@ -32,7 +34,7 @@ cache_destroy(Cache *cache) {
|
||||
}
|
||||
|
||||
void
|
||||
cache_invalidate(Cache *cache)
|
||||
cache_invalidate(Cache * cache)
|
||||
{
|
||||
if (cache == NULL)
|
||||
return;
|
||||
@ -49,13 +51,15 @@ cache_invalidate(Cache *cache)
|
||||
* Each call to cache_pin MUST BE paired with a call to cache_release.
|
||||
*
|
||||
*/
|
||||
extern Cache *cache_pin(Cache *cache)
|
||||
extern Cache *
|
||||
cache_pin(Cache * cache)
|
||||
{
|
||||
cache->refcount++;
|
||||
return cache;
|
||||
}
|
||||
|
||||
extern void cache_release(Cache *cache)
|
||||
extern void
|
||||
cache_release(Cache * cache)
|
||||
{
|
||||
Assert(cache->refcount > 0);
|
||||
cache->refcount--;
|
||||
@ -64,19 +68,19 @@ extern void cache_release(Cache *cache)
|
||||
|
||||
|
||||
MemoryContext
|
||||
cache_memory_ctx(Cache *cache)
|
||||
cache_memory_ctx(Cache * cache)
|
||||
{
|
||||
return cache->hctl.hcxt;
|
||||
}
|
||||
|
||||
MemoryContext
|
||||
cache_switch_to_memory_context(Cache *cache)
|
||||
cache_switch_to_memory_context(Cache * cache)
|
||||
{
|
||||
return MemoryContextSwitchTo(cache->hctl.hcxt);
|
||||
}
|
||||
|
||||
void *
|
||||
cache_fetch(Cache *cache, CacheQueryCtx *ctx)
|
||||
cache_fetch(Cache * cache, CacheQueryCtx * ctx)
|
||||
{
|
||||
bool found;
|
||||
|
||||
|
14
src/cache.h
14
src/cache.h
@ -25,14 +25,14 @@ typedef struct Cache
|
||||
void (*pre_destroy_hook) (struct Cache *);
|
||||
} Cache;
|
||||
|
||||
extern void cache_init(Cache *cache);
|
||||
extern void cache_invalidate(Cache *cache);
|
||||
extern void *cache_fetch(Cache *cache, CacheQueryCtx *ctx);
|
||||
extern void cache_init(Cache * cache);
|
||||
extern void cache_invalidate(Cache * cache);
|
||||
extern void *cache_fetch(Cache * cache, CacheQueryCtx * ctx);
|
||||
|
||||
extern MemoryContext cache_memory_ctx(Cache *cache);
|
||||
extern MemoryContext cache_switch_to_memory_context(Cache *cache);
|
||||
extern MemoryContext cache_memory_ctx(Cache * cache);
|
||||
extern MemoryContext cache_switch_to_memory_context(Cache * cache);
|
||||
|
||||
extern Cache *cache_pin(Cache *cache);
|
||||
extern void cache_release(Cache *cache);
|
||||
extern Cache *cache_pin(Cache * cache);
|
||||
extern void cache_release(Cache * cache);
|
||||
|
||||
#endif /* _TIMESCALEDB_CACHE_H_ */
|
||||
|
@ -26,7 +26,8 @@ static Catalog catalog = {
|
||||
.database_id = InvalidOid,
|
||||
};
|
||||
|
||||
Catalog *catalog_get(void)
|
||||
Catalog *
|
||||
catalog_get(void)
|
||||
{
|
||||
AclResult aclresult;
|
||||
int i;
|
||||
@ -34,8 +35,10 @@ Catalog *catalog_get(void)
|
||||
if (MyDatabaseId == InvalidOid)
|
||||
elog(ERROR, "Invalid database ID");
|
||||
|
||||
/* Check that the user has CREATE permissions on the database, since the
|
||||
operation may involve creating chunks and inserting into them. */
|
||||
/*
|
||||
* Check that the user has CREATE permissions on the database, since the
|
||||
* operation may involve creating chunks and inserting into them.
|
||||
*/
|
||||
aclresult = pg_database_aclcheck(MyDatabaseId, GetUserId(), ACL_CREATE);
|
||||
|
||||
if (aclresult != ACLCHECK_OK)
|
||||
|
@ -3,7 +3,8 @@
|
||||
|
||||
#include <postgres.h>
|
||||
|
||||
enum catalog_table {
|
||||
enum catalog_table
|
||||
{
|
||||
HYPERTABLE = 0,
|
||||
CHUNK,
|
||||
PARTITION,
|
||||
@ -32,11 +33,13 @@ enum catalog_table {
|
||||
#define CHUNK_INDEX_NAME "chunk_pkey"
|
||||
#define CHUNK_PARTITION_TIME_INDEX_NAME "chunk_partition_id_start_time_end_time_idx"
|
||||
|
||||
typedef struct Catalog {
|
||||
typedef struct Catalog
|
||||
{
|
||||
char database_name[NAMEDATALEN];
|
||||
Oid database_id;
|
||||
Oid schema_id;
|
||||
struct {
|
||||
struct
|
||||
{
|
||||
const char *name;
|
||||
Oid id;
|
||||
Oid index_id;
|
||||
|
@ -46,22 +46,27 @@ typedef struct ChunkCacheQueryCtx
|
||||
} ChunkCacheQueryCtx;
|
||||
|
||||
static void *
|
||||
chunk_crn_set_cache_get_key(CacheQueryCtx *ctx)
|
||||
chunk_crn_set_cache_get_key(CacheQueryCtx * ctx)
|
||||
{
|
||||
return &((ChunkCacheQueryCtx *) ctx)->chunk_id;
|
||||
}
|
||||
|
||||
static void *chunk_crn_set_cache_create_entry(Cache *cache, CacheQueryCtx *ctx);
|
||||
static void *chunk_crn_set_cache_update_entry(Cache *cache, CacheQueryCtx *ctx);
|
||||
static void *chunk_crn_set_cache_create_entry(Cache * cache, CacheQueryCtx * ctx);
|
||||
static void *chunk_crn_set_cache_update_entry(Cache * cache, CacheQueryCtx * ctx);
|
||||
|
||||
static Cache *chunk_crn_set_cache_create() {
|
||||
static Cache *
|
||||
chunk_crn_set_cache_create()
|
||||
{
|
||||
MemoryContext ctx = AllocSetContextCreate(CacheMemoryContext,
|
||||
CHUNK_CACHE_INVAL_PROXY_TABLE,
|
||||
ALLOCSET_DEFAULT_SIZES);
|
||||
|
||||
Cache *cache = MemoryContextAlloc(ctx, sizeof(Cache));
|
||||
*cache = (Cache) {
|
||||
.hctl = {
|
||||
|
||||
Cache tmp = (Cache)
|
||||
{
|
||||
.hctl =
|
||||
{
|
||||
.keysize = sizeof(int32),
|
||||
.entrysize = sizeof(chunk_crn_set_htable_entry),
|
||||
.hcxt = ctx,
|
||||
@ -74,6 +79,8 @@ static Cache *chunk_crn_set_cache_create() {
|
||||
.update_entry = chunk_crn_set_cache_update_entry,
|
||||
};
|
||||
|
||||
*cache = tmp;
|
||||
|
||||
cache_init(cache);
|
||||
|
||||
return cache;
|
||||
@ -82,7 +89,7 @@ static Cache *chunk_crn_set_cache_create() {
|
||||
static Cache *chunk_crn_set_cache_current = NULL;
|
||||
|
||||
static void *
|
||||
chunk_crn_set_cache_create_entry(Cache *cache, CacheQueryCtx *ctx)
|
||||
chunk_crn_set_cache_create_entry(Cache * cache, CacheQueryCtx * ctx)
|
||||
{
|
||||
ChunkCacheQueryCtx *cctx = (ChunkCacheQueryCtx *) ctx;
|
||||
chunk_crn_set_htable_entry *pe = ctx->entry;
|
||||
@ -101,7 +108,7 @@ chunk_crn_set_cache_create_entry(Cache *cache, CacheQueryCtx *ctx)
|
||||
}
|
||||
|
||||
static void *
|
||||
chunk_crn_set_cache_update_entry(Cache *cache, CacheQueryCtx *ctx)
|
||||
chunk_crn_set_cache_update_entry(Cache * cache, CacheQueryCtx * ctx)
|
||||
{
|
||||
ChunkCacheQueryCtx *cctx = (ChunkCacheQueryCtx *) ctx;
|
||||
chunk_crn_set_htable_entry *pe = ctx->entry;
|
||||
@ -127,7 +134,7 @@ chunk_crn_set_cache_invalidate_callback(void)
|
||||
}
|
||||
|
||||
static chunk_crn_set_htable_entry *
|
||||
chunk_crn_set_cache_get_entry(Cache *cache, int32 chunk_id, int64 chunk_start_time, int64 chunk_end_time)
|
||||
chunk_crn_set_cache_get_entry(Cache * cache, int32 chunk_id, int64 chunk_start_time, int64 chunk_end_time)
|
||||
{
|
||||
if (cache == NULL)
|
||||
{
|
||||
@ -179,15 +186,18 @@ typedef struct ChunkScanCtx
|
||||
chunk_row *chunk;
|
||||
Oid chunk_tbl_id;
|
||||
int32 partition_id;
|
||||
int64 starttime, endtime, timepoint;
|
||||
int64 starttime,
|
||||
endtime,
|
||||
timepoint;
|
||||
bool should_lock;
|
||||
} ChunkScanCtx;
|
||||
|
||||
static bool
|
||||
chunk_tuple_timepoint_filter(TupleInfo *ti, void *arg)
|
||||
chunk_tuple_timepoint_filter(TupleInfo * ti, void *arg)
|
||||
{
|
||||
ChunkScanCtx *ctx = arg;
|
||||
bool starttime_is_null, endtime_is_null;
|
||||
bool starttime_is_null,
|
||||
endtime_is_null;
|
||||
Datum datum;
|
||||
|
||||
datum = heap_getattr(ti->tuple, CHUNK_TBL_COL_STARTTIME, ti->desc, &starttime_is_null);
|
||||
@ -203,7 +213,7 @@ chunk_tuple_timepoint_filter(TupleInfo *ti, void *arg)
|
||||
}
|
||||
|
||||
static bool
|
||||
chunk_tuple_found(TupleInfo *ti, void *arg)
|
||||
chunk_tuple_found(TupleInfo * ti, void *arg)
|
||||
{
|
||||
ChunkScanCtx *ctx = arg;
|
||||
bool is_null;
|
||||
@ -242,8 +252,9 @@ chunk_scan(int32 partition_id, int64 timepoint, bool tuplock)
|
||||
.scandirection = ForwardScanDirection,
|
||||
};
|
||||
|
||||
/* Perform an index scan on epoch ID to find the partitions for the
|
||||
* epoch. */
|
||||
/*
|
||||
* Perform an index scan on epoch ID to find the partitions for the epoch.
|
||||
*/
|
||||
ScanKeyInit(&scankey[0], CHUNK_IDX_COL_PARTITION_ID, BTEqualStrategyNumber,
|
||||
F_INT4EQ, Int32GetDatum(partition_id));
|
||||
|
||||
@ -257,7 +268,7 @@ chunk_scan(int32 partition_id, int64 timepoint, bool tuplock)
|
||||
* The cache parameter is a chunk_crn_set_cache (can be null to use current cache).
|
||||
*/
|
||||
chunk_cache_entry *
|
||||
get_chunk_cache_entry(Cache *cache, Partition *part, int64 timepoint, bool lock)
|
||||
get_chunk_cache_entry(Cache * cache, Partition * part, int64 timepoint, bool lock)
|
||||
{
|
||||
chunk_crn_set_htable_entry *chunk_crn_cache;
|
||||
chunk_cache_entry *entry;
|
||||
|
@ -24,7 +24,7 @@ typedef struct chunk_cache_entry
|
||||
} chunk_cache_entry;
|
||||
|
||||
|
||||
extern chunk_cache_entry *get_chunk_cache_entry(Cache *cache, Partition *part, int64 timepoint, bool lock);
|
||||
extern chunk_cache_entry *get_chunk_cache_entry(Cache * cache, Partition * part, int64 timepoint, bool lock);
|
||||
|
||||
extern void chunk_crn_set_cache_invalidate_callback(void);
|
||||
|
||||
|
@ -14,7 +14,7 @@
|
||||
#include "scanner.h"
|
||||
#include "partitioning.h"
|
||||
|
||||
static void *hypertable_cache_create_entry(Cache *cache, CacheQueryCtx *ctx);
|
||||
static void *hypertable_cache_create_entry(Cache * cache, CacheQueryCtx * ctx);
|
||||
|
||||
typedef struct HypertableCacheQueryCtx
|
||||
{
|
||||
@ -23,20 +23,25 @@ typedef struct HypertableCacheQueryCtx
|
||||
} HypertableCacheQueryCtx;
|
||||
|
||||
static void *
|
||||
hypertable_cache_get_key(CacheQueryCtx *ctx)
|
||||
hypertable_cache_get_key(CacheQueryCtx * ctx)
|
||||
{
|
||||
return &((HypertableCacheQueryCtx *) ctx)->hypertable_id;
|
||||
}
|
||||
|
||||
|
||||
static Cache *hypertable_cache_create() {
|
||||
static Cache *
|
||||
hypertable_cache_create()
|
||||
{
|
||||
MemoryContext ctx = AllocSetContextCreate(CacheMemoryContext,
|
||||
HYPERTABLE_CACHE_INVAL_PROXY_TABLE,
|
||||
ALLOCSET_DEFAULT_SIZES);
|
||||
|
||||
Cache *cache = MemoryContextAlloc(ctx, sizeof(Cache));
|
||||
*cache = (Cache) {
|
||||
.hctl = {
|
||||
|
||||
Cache tmp = (Cache)
|
||||
{
|
||||
.hctl =
|
||||
{
|
||||
.keysize = sizeof(int32),
|
||||
.entrysize = sizeof(hypertable_cache_entry),
|
||||
.hcxt = ctx,
|
||||
@ -48,6 +53,7 @@ static Cache *hypertable_cache_create() {
|
||||
.create_entry = hypertable_cache_create_entry,
|
||||
};
|
||||
|
||||
*cache = tmp;
|
||||
cache_init(cache);
|
||||
|
||||
return cache;
|
||||
@ -64,7 +70,7 @@ static Cache *hypertable_cache_current = NULL;
|
||||
#define HT_IDX_COL_ID 1
|
||||
|
||||
static bool
|
||||
hypertable_tuple_found(TupleInfo *ti, void *data)
|
||||
hypertable_tuple_found(TupleInfo * ti, void *data)
|
||||
{
|
||||
bool is_null;
|
||||
HypertableCacheQueryCtx *hctx = data;
|
||||
@ -88,7 +94,7 @@ hypertable_tuple_found(TupleInfo *ti, void *data)
|
||||
}
|
||||
|
||||
static void *
|
||||
hypertable_cache_create_entry(Cache *cache, CacheQueryCtx *ctx)
|
||||
hypertable_cache_create_entry(Cache * cache, CacheQueryCtx * ctx)
|
||||
{
|
||||
HypertableCacheQueryCtx *hctx = (HypertableCacheQueryCtx *) ctx;
|
||||
Catalog *catalog = catalog_get();
|
||||
@ -125,7 +131,7 @@ hypertable_cache_invalidate_callback(void)
|
||||
|
||||
/* Get hypertable cache entry. If the entry is not in the cache, add it. */
|
||||
hypertable_cache_entry *
|
||||
hypertable_cache_get_entry(Cache *cache, int32 hypertable_id)
|
||||
hypertable_cache_get_entry(Cache * cache, int32 hypertable_id)
|
||||
{
|
||||
HypertableCacheQueryCtx ctx = {
|
||||
.hypertable_id = hypertable_id,
|
||||
@ -155,7 +161,7 @@ cmp_epochs(const void *time_pt_pointer, const void *test)
|
||||
}
|
||||
|
||||
epoch_and_partitions_set *
|
||||
hypertable_cache_get_partition_epoch(Cache *cache, hypertable_cache_entry *hce, int64 time_pt, Oid relid)
|
||||
hypertable_cache_get_partition_epoch(Cache * cache, hypertable_cache_entry * hce, int64 time_pt, Oid relid)
|
||||
{
|
||||
MemoryContext old;
|
||||
epoch_and_partitions_set *epoch,
|
||||
@ -213,7 +219,8 @@ hypertable_cache_pin()
|
||||
}
|
||||
|
||||
|
||||
void _hypertable_cache_init(void)
|
||||
void
|
||||
_hypertable_cache_init(void)
|
||||
{
|
||||
CreateCacheMemoryContext();
|
||||
hypertable_cache_current = hypertable_cache_create();
|
||||
|
@ -27,7 +27,7 @@ typedef struct hypertable_cache_entry
|
||||
hypertable_cache_entry *hypertable_cache_get_entry(Cache * cache, int32 hypertable_id);
|
||||
|
||||
epoch_and_partitions_set *
|
||||
hypertable_cache_get_partition_epoch(Cache *cache, hypertable_cache_entry *hce, int64 time_pt, Oid relid);
|
||||
hypertable_cache_get_partition_epoch(Cache * cache, hypertable_cache_entry * hce, int64 time_pt, Oid relid);
|
||||
|
||||
void hypertable_cache_invalidate_callback(void);
|
||||
|
||||
|
60
src/insert.c
60
src/insert.c
@ -59,8 +59,8 @@
|
||||
|
||||
/* private funcs */
|
||||
|
||||
static ObjectAddress create_insert_index(int32 hypertable_id, char * time_field, PartitioningInfo *part_info,epoch_and_partitions_set *epoch);
|
||||
static Node *get_keyspace_fn_call(PartitioningInfo *part_info);
|
||||
static ObjectAddress create_insert_index(int32 hypertable_id, char *time_field, PartitioningInfo * part_info, epoch_and_partitions_set * epoch);
|
||||
static Node *get_keyspace_fn_call(PartitioningInfo * part_info);
|
||||
|
||||
/*
|
||||
* Inserts rows from the temporary copy table into correct hypertable child tables.
|
||||
@ -102,8 +102,9 @@ typedef struct ChunkInsertCtxRel
|
||||
BulkInsertState bistate;
|
||||
} ChunkInsertCtxRel;
|
||||
|
||||
static ChunkInsertCtxRel*
|
||||
chunk_insert_ctx_rel_new(Relation rel, ResultRelInfo *resultRelInfo, List *range_table) {
|
||||
static ChunkInsertCtxRel *
|
||||
chunk_insert_ctx_rel_new(Relation rel, ResultRelInfo *resultRelInfo, List *range_table)
|
||||
{
|
||||
TupleDesc tupDesc;
|
||||
ChunkInsertCtxRel *rel_ctx = palloc(sizeof(ChunkInsertCtxRel));
|
||||
|
||||
@ -125,7 +126,7 @@ chunk_insert_ctx_rel_new(Relation rel, ResultRelInfo *resultRelInfo, List *ra
|
||||
}
|
||||
|
||||
static void
|
||||
chunk_insert_ctx_rel_destroy(ChunkInsertCtxRel *rel_ctx)
|
||||
chunk_insert_ctx_rel_destroy(ChunkInsertCtxRel * rel_ctx)
|
||||
{
|
||||
FreeBulkInsertState(rel_ctx->bistate);
|
||||
ExecCloseIndices(rel_ctx->resultRelInfo);
|
||||
@ -136,7 +137,7 @@ chunk_insert_ctx_rel_destroy(ChunkInsertCtxRel *rel_ctx)
|
||||
|
||||
|
||||
static void
|
||||
chunk_insert_ctx_rel_insert_tuple(ChunkInsertCtxRel *rel_ctx, HeapTuple tuple)
|
||||
chunk_insert_ctx_rel_insert_tuple(ChunkInsertCtxRel * rel_ctx, HeapTuple tuple)
|
||||
{
|
||||
int hi_options = 0; /* no optimization */
|
||||
CommandId mycid = GetCurrentCommandId(true);
|
||||
@ -169,7 +170,7 @@ typedef struct ChunkInsertCtx
|
||||
} ChunkInsertCtx;
|
||||
|
||||
static ChunkInsertCtx *
|
||||
chunk_insert_ctx_new(chunk_cache_entry *chunk, Cache *pinned)
|
||||
chunk_insert_ctx_new(chunk_cache_entry * chunk, Cache * pinned)
|
||||
{
|
||||
ListCell *lc;
|
||||
List *rel_ctx_list = NIL;
|
||||
@ -246,7 +247,7 @@ chunk_insert_ctx_new(chunk_cache_entry *chunk, Cache *pinned)
|
||||
}
|
||||
|
||||
static void
|
||||
chunk_insert_ctx_destroy(ChunkInsertCtx *ctx)
|
||||
chunk_insert_ctx_destroy(ChunkInsertCtx * ctx)
|
||||
{
|
||||
ListCell *lc;
|
||||
|
||||
@ -260,23 +261,26 @@ chunk_insert_ctx_destroy(ChunkInsertCtx *ctx)
|
||||
foreach(lc, ctx->ctxs)
|
||||
{
|
||||
ChunkInsertCtxRel *rel_ctx = lfirst(lc);
|
||||
|
||||
chunk_insert_ctx_rel_destroy(rel_ctx);
|
||||
}
|
||||
}
|
||||
|
||||
static void
|
||||
chunk_insert_ctx_insert_tuple(ChunkInsertCtx *ctx, HeapTuple tup)
|
||||
chunk_insert_ctx_insert_tuple(ChunkInsertCtx * ctx, HeapTuple tup)
|
||||
{
|
||||
ListCell *lc;
|
||||
|
||||
foreach(lc, ctx->ctxs)
|
||||
{
|
||||
ChunkInsertCtxRel *rel_ctx = lfirst(lc);
|
||||
|
||||
chunk_insert_ctx_rel_insert_tuple(rel_ctx, tup);
|
||||
}
|
||||
}
|
||||
|
||||
typedef struct CopyTableQueryCtx {
|
||||
typedef struct CopyTableQueryCtx
|
||||
{
|
||||
Partition *part;
|
||||
ChunkInsertCtx *chunk_ctx;
|
||||
epoch_and_partitions_set *pe;
|
||||
@ -284,7 +288,7 @@ typedef struct CopyTableQueryCtx {
|
||||
} CopyTableQueryCtx;
|
||||
|
||||
static bool
|
||||
copy_table_tuple_found(TupleInfo *ti, void *data)
|
||||
copy_table_tuple_found(TupleInfo * ti, void *data)
|
||||
{
|
||||
bool is_null;
|
||||
CopyTableQueryCtx *ctx = data;
|
||||
@ -293,7 +297,10 @@ copy_table_tuple_found(TupleInfo *ti, void *data)
|
||||
|
||||
if (ctx->pe->num_partitions > 1)
|
||||
{
|
||||
/* first element is partition index (used for sorting but not necessary here) */
|
||||
/*
|
||||
* first element is partition index (used for sorting but not
|
||||
* necessary here)
|
||||
*/
|
||||
Datum time_datum = index_getattr(ti->ituple, 2, ti->ituple_desc, &is_null);
|
||||
Datum keyspace_datum = index_getattr(ti->ituple, 3, ti->ituple_desc, &is_null);
|
||||
|
||||
@ -303,6 +310,7 @@ copy_table_tuple_found(TupleInfo *ti, void *data)
|
||||
else
|
||||
{
|
||||
Datum time_datum = index_getattr(ti->ituple, 1, ti->ituple_desc, &is_null);
|
||||
|
||||
time_pt = time_value_to_internal(time_datum, ctx->hci->time_column_type);
|
||||
keyspace_pt = KEYSPACE_PT_NO_PARTITIONING;
|
||||
}
|
||||
@ -333,13 +341,18 @@ copy_table_tuple_found(TupleInfo *ti, void *data)
|
||||
Datum was_closed_datum;
|
||||
chunk_cache_entry *chunk;
|
||||
Cache *pinned = chunk_crn_set_cache_pin();
|
||||
|
||||
/*
|
||||
* TODO: this first call should be non-locking and use a cache(for
|
||||
* performance)
|
||||
*/
|
||||
chunk = get_chunk_cache_entry(pinned, ctx->part, time_pt, false);
|
||||
was_closed_datum = FunctionCall1(get_close_if_needed_fn(), Int32GetDatum(chunk->id));
|
||||
/* chunk may have been closed and thus changed /or/ need to get share lock */
|
||||
|
||||
/*
|
||||
* chunk may have been closed and thus changed /or/ need to get share
|
||||
* lock
|
||||
*/
|
||||
chunk = get_chunk_cache_entry(pinned, ctx->part, time_pt, true);
|
||||
|
||||
ctx->chunk_ctx = chunk_insert_ctx_new(chunk, pinned);
|
||||
@ -347,19 +360,22 @@ copy_table_tuple_found(TupleInfo *ti, void *data)
|
||||
|
||||
/* insert here: */
|
||||
/* has to be a copy(not sure why) */
|
||||
chunk_insert_ctx_insert_tuple(ctx->chunk_ctx,heap_copytuple(ti->tuple));
|
||||
chunk_insert_ctx_insert_tuple(ctx->chunk_ctx, heap_copytuple(ti->tuple));
|
||||
return true;
|
||||
}
|
||||
|
||||
static void scan_copy_table_and_insert_post(int num_tuples, void *data)
|
||||
static void
|
||||
scan_copy_table_and_insert_post(int num_tuples, void *data)
|
||||
{
|
||||
CopyTableQueryCtx *ctx = data;
|
||||
|
||||
if (ctx->chunk_ctx != NULL)
|
||||
chunk_insert_ctx_destroy(ctx->chunk_ctx);
|
||||
}
|
||||
|
||||
static void scan_copy_table_and_insert( hypertable_cache_entry *hci,
|
||||
epoch_and_partitions_set *pe,
|
||||
static void
|
||||
scan_copy_table_and_insert(hypertable_cache_entry * hci,
|
||||
epoch_and_partitions_set * pe,
|
||||
Oid table, Oid index)
|
||||
{
|
||||
CopyTableQueryCtx query_ctx = {
|
||||
@ -537,10 +553,12 @@ create_copy_table(int32 hypertable_id, Oid root_oid)
|
||||
}
|
||||
|
||||
static IndexElem *
|
||||
makeIndexElem(char *name, Node *expr){
|
||||
Assert((name ==NULL || expr == NULL) && (name !=NULL || expr !=NULL));
|
||||
makeIndexElem(char *name, Node *expr)
|
||||
{
|
||||
Assert((name == NULL || expr == NULL) && (name != NULL || expr != NULL));
|
||||
|
||||
IndexElem *time_elem = makeNode(IndexElem);
|
||||
|
||||
time_elem->name = name;
|
||||
time_elem->expr = expr;
|
||||
time_elem->indexcolname = NULL;
|
||||
@ -575,7 +593,7 @@ makeIndexElem(char *name, Node *expr){
|
||||
*
|
||||
* */
|
||||
static ObjectAddress
|
||||
create_insert_index(int32 hypertable_id, char *time_field, PartitioningInfo *part_info, epoch_and_partitions_set *epoch)
|
||||
create_insert_index(int32 hypertable_id, char *time_field, PartitioningInfo * part_info, epoch_and_partitions_set * epoch)
|
||||
{
|
||||
IndexStmt *index_stmt = makeNode(IndexStmt);
|
||||
IndexElem *time_elem;
|
||||
@ -657,7 +675,7 @@ create_insert_index(int32 hypertable_id, char *time_field, PartitioningInfo *par
|
||||
*
|
||||
*/
|
||||
static Node *
|
||||
get_keyspace_fn_call(PartitioningInfo *part_info)
|
||||
get_keyspace_fn_call(PartitioningInfo * part_info)
|
||||
{
|
||||
ColumnRef *col_ref = makeNode(ColumnRef);
|
||||
A_Const *mod_const;
|
||||
|
@ -49,7 +49,7 @@ prepare_plan(const char *src, int nargs, Oid *argtypes)
|
||||
}
|
||||
|
||||
void
|
||||
free_epoch(epoch_and_partitions_set *epoch)
|
||||
free_epoch(epoch_and_partitions_set * epoch)
|
||||
{
|
||||
if (epoch->partitioning != NULL)
|
||||
pfree(epoch->partitioning);
|
||||
@ -64,7 +64,7 @@ free_epoch(epoch_and_partitions_set *epoch)
|
||||
DEFINE_PLAN(get_crn_plan, CRN_QUERY, 1, CRN_QUERY_ARGS)
|
||||
|
||||
crn_set *
|
||||
fetch_crn_set(crn_set *entry, int32 chunk_id)
|
||||
fetch_crn_set(crn_set * entry, int32 chunk_id)
|
||||
{
|
||||
SPIPlanPtr plan = get_crn_plan();
|
||||
Datum args[1] = {Int32GetDatum(chunk_id)};
|
||||
@ -174,7 +174,7 @@ chunk_tuple_create_spi_connected(int32 partition_id, int64 timepoint, bool lock,
|
||||
}
|
||||
|
||||
static chunk_row *
|
||||
chunk_row_fill_in(chunk_row *chunk, HeapTuple tuple, TupleDesc tupdesc)
|
||||
chunk_row_fill_in(chunk_row * chunk, HeapTuple tuple, TupleDesc tupdesc)
|
||||
{
|
||||
int64 time_ret;
|
||||
bool is_null;
|
||||
@ -225,6 +225,8 @@ chunk_row_insert_new(int32 partition_id, int64 timepoint, bool lock)
|
||||
}
|
||||
|
||||
|
||||
bool chunk_row_timepoint_is_member(const chunk_row *row, const int64 time_pt){
|
||||
bool
|
||||
chunk_row_timepoint_is_member(const chunk_row * row, const int64 time_pt)
|
||||
{
|
||||
return row->start_time <= time_pt && row->end_time >= time_pt;
|
||||
}
|
||||
|
@ -37,16 +37,16 @@ extern SPIPlanPtr prepare_plan(const char *src, int nargs, Oid *argtypes);
|
||||
|
||||
|
||||
/* db access func */
|
||||
extern epoch_and_partitions_set *fetch_epoch_and_partitions_set(epoch_and_partitions_set *entry,
|
||||
extern epoch_and_partitions_set *fetch_epoch_and_partitions_set(epoch_and_partitions_set * entry,
|
||||
int32 hypertable_id, int64 time_pt, Oid relid);
|
||||
|
||||
extern void free_epoch(epoch_and_partitions_set *epoch);
|
||||
extern void free_epoch(epoch_and_partitions_set * epoch);
|
||||
|
||||
extern crn_set *fetch_crn_set(crn_set *entry, int32 chunk_id);
|
||||
extern crn_set *fetch_crn_set(crn_set * entry, int32 chunk_id);
|
||||
|
||||
chunk_row *
|
||||
chunk_row_insert_new(int32 partition_id, int64 timepoint, bool lock);
|
||||
chunk_row_insert_new(int32 partition_id, int64 timepoint, bool lock);
|
||||
|
||||
bool chunk_row_timepoint_is_member(const chunk_row *row, const int64 time_pt);
|
||||
bool chunk_row_timepoint_is_member(const chunk_row * row, const int64 time_pt);
|
||||
|
||||
#endif /* TIMESCALEDB_METADATA_QUERIES_H */
|
||||
|
@ -1,36 +1,39 @@
|
||||
/* -*- Mode: C; tab-width: 4; indent-tabs-mode: t; c-basic-offset: 4 -*- */
|
||||
//-----------------------------------------------------------------------------
|
||||
// MurmurHash3 was written by Austin Appleby, and is placed in the public
|
||||
// domain. The author hereby disclaims copyright to this source code.
|
||||
/* ----------------------------------------------------------------------------- */
|
||||
/* MurmurHash3 was written by Austin Appleby, and is placed in the public */
|
||||
/* domain. The author hereby disclaims copyright to this source code. */
|
||||
|
||||
// Note - The x86 and x64 versions do _not_ produce the same results, as the
|
||||
// algorithms are optimized for their respective platforms. You can still
|
||||
// compile and run any of them on any platform, but your performance with the
|
||||
// non-native version will be less than optimal.
|
||||
/* Note - The x86 and x64 versions do _not_ produce the same results, as the */
|
||||
/* algorithms are optimized for their respective platforms. You can still */
|
||||
/* compile and run any of them on any platform, but your performance with the */
|
||||
/* non-native version will be less than optimal. */
|
||||
|
||||
#include "pgmurmur3.h"
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
// Platform-specific functions and macros
|
||||
/* ----------------------------------------------------------------------------- */
|
||||
/* Platform-specific functions and macros */
|
||||
|
||||
static inline uint32_t rotl32(uint32_t x, int8_t r)
|
||||
static inline uint32_t
|
||||
rotl32(uint32_t x, int8_t r)
|
||||
{
|
||||
return (x << r) | (x >> (32 - r));
|
||||
}
|
||||
|
||||
#define ROTL32(x,y) rotl32(x,y)
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
// Block read - if your platform needs to do endian-swapping or can only
|
||||
// handle aligned reads, do the conversion here
|
||||
static inline uint32_t getblock(const uint32_t * p, int i)
|
||||
/* ----------------------------------------------------------------------------- */
|
||||
/* Block read - if your platform needs to do endian-swapping or can only */
|
||||
/* handle aligned reads, do the conversion here */
|
||||
static inline uint32_t
|
||||
getblock(const uint32_t *p, int i)
|
||||
{
|
||||
return p[i];
|
||||
}
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
// Finalization mix - force all bits of a hash block to avalanche
|
||||
static inline uint32_t fmix(uint32_t h)
|
||||
/* ----------------------------------------------------------------------------- */
|
||||
/* Finalization mix - force all bits of a hash block to avalanche */
|
||||
static inline uint32_t
|
||||
fmix(uint32_t h)
|
||||
{
|
||||
h ^= h >> 16;
|
||||
h *= 0x85ebca6b;
|
||||
@ -40,8 +43,9 @@ static inline uint32_t fmix(uint32_t h)
|
||||
return h;
|
||||
}
|
||||
|
||||
//-----------------------------------------------------------------------------
|
||||
void hlib_murmur3(const void *key, size_t len, uint64_t *io)
|
||||
/* ----------------------------------------------------------------------------- */
|
||||
void
|
||||
hlib_murmur3(const void *key, size_t len, uint64_t * io)
|
||||
{
|
||||
const uint8_t *data = (const uint8_t *) key;
|
||||
const int nblocks = len / 4;
|
||||
@ -53,10 +57,11 @@ void hlib_murmur3(const void *key, size_t len, uint64_t *io)
|
||||
int i;
|
||||
uint32_t k1;
|
||||
|
||||
//----------
|
||||
// body
|
||||
/* ---------- */
|
||||
/* body */
|
||||
blocks = (const uint32_t *) (data + nblocks * 4);
|
||||
for (i = -nblocks; i; i++) {
|
||||
for (i = -nblocks; i; i++)
|
||||
{
|
||||
k1 = getblock(blocks, i);
|
||||
k1 *= c1;
|
||||
k1 = ROTL32(k1, 15);
|
||||
@ -65,11 +70,12 @@ void hlib_murmur3(const void *key, size_t len, uint64_t *io)
|
||||
h1 = ROTL32(h1, 13);
|
||||
h1 = h1 * 5 + 0xe6546b64;
|
||||
}
|
||||
//----------
|
||||
// tail
|
||||
/* ---------- */
|
||||
/* tail */
|
||||
tail = (const uint8_t *) (data + nblocks * 4);
|
||||
k1 = 0;
|
||||
switch (len & 3) {
|
||||
switch (len & 3)
|
||||
{
|
||||
case 3:
|
||||
k1 ^= tail[2] << 16;
|
||||
case 2:
|
||||
@ -82,10 +88,9 @@ void hlib_murmur3(const void *key, size_t len, uint64_t *io)
|
||||
h1 ^= k1;
|
||||
};
|
||||
|
||||
//----------
|
||||
// finalization
|
||||
/* ---------- */
|
||||
/* finalization */
|
||||
h1 ^= len;
|
||||
h1 = fmix(h1);
|
||||
io[0] = h1;
|
||||
}
|
||||
|
||||
|
@ -6,7 +6,8 @@
|
||||
#include "scanner.h"
|
||||
#include "catalog.h"
|
||||
|
||||
static void partitioning_func_set_func_fmgr(PartitioningFunc *pf)
|
||||
static void
|
||||
partitioning_func_set_func_fmgr(PartitioningFunc * pf)
|
||||
{
|
||||
FuncCandidateList funclist =
|
||||
FuncnameGetCandidates(list_make2(makeString(pf->schema), makeString(pf->name)),
|
||||
@ -20,10 +21,13 @@ static void partitioning_func_set_func_fmgr(PartitioningFunc *pf)
|
||||
fmgr_info_cxt(funclist->oid, &pf->func_fmgr, CurrentMemoryContext);
|
||||
}
|
||||
|
||||
static void partitioning_info_set_textfunc_fmgr(PartitioningInfo *pi, Oid relid)
|
||||
static void
|
||||
partitioning_info_set_textfunc_fmgr(PartitioningInfo * pi, Oid relid)
|
||||
{
|
||||
Oid type_id, func_id;
|
||||
Oid type_id,
|
||||
func_id;
|
||||
bool isVarlena;
|
||||
|
||||
pi->column_attnum = get_attnum(relid, pi->column);
|
||||
type_id = get_atttype(relid, pi->column_attnum);
|
||||
getTypeOutputInfo(type_id, &func_id, &isVarlena);
|
||||
@ -56,13 +60,15 @@ partitioning_info_create(int num_partitions,
|
||||
return pi;
|
||||
}
|
||||
|
||||
int16 partitioning_func_apply(PartitioningFunc *pf, Datum value)
|
||||
int16
|
||||
partitioning_func_apply(PartitioningFunc * pf, Datum value)
|
||||
{
|
||||
Datum text = FunctionCall1(&pf->textfunc_fmgr, value);
|
||||
char *partition_val = DatumGetCString(text);
|
||||
Datum keyspace_datum = FunctionCall2(&pf->func_fmgr,
|
||||
CStringGetTextDatum(partition_val),
|
||||
Int32GetDatum(pf->modulos));
|
||||
|
||||
return DatumGetInt16(keyspace_datum);
|
||||
}
|
||||
|
||||
@ -85,20 +91,23 @@ int16 partitioning_func_apply(PartitioningFunc *pf, Datum value)
|
||||
|
||||
/* PartitionEpochCtx is used to pass on information during a partition epoch and
|
||||
* partition scans. */
|
||||
typedef struct {
|
||||
typedef struct
|
||||
{
|
||||
epoch_and_partitions_set *pe;
|
||||
int16 num_partitions;
|
||||
int32 hypertable_id;
|
||||
int64 starttime, endtime, timepoint;
|
||||
int64 starttime,
|
||||
endtime,
|
||||
timepoint;
|
||||
Oid relid;
|
||||
} PartitionEpochCtx;
|
||||
|
||||
static int
|
||||
partition_scan(PartitionEpochCtx *pctx);
|
||||
partition_scan(PartitionEpochCtx * pctx);
|
||||
|
||||
/* Filter partition epoch tuples based on hypertable ID and start/end time. */
|
||||
static bool
|
||||
partition_epoch_filter(TupleInfo *ti, void *arg)
|
||||
partition_epoch_filter(TupleInfo * ti, void *arg)
|
||||
{
|
||||
bool is_null;
|
||||
PartitionEpochCtx *pctx = arg;
|
||||
@ -106,7 +115,8 @@ partition_epoch_filter(TupleInfo *ti, void *arg)
|
||||
|
||||
if (DatumGetInt32(id) == pctx->hypertable_id)
|
||||
{
|
||||
bool starttime_is_null, endtime_is_null;
|
||||
bool starttime_is_null,
|
||||
endtime_is_null;
|
||||
Datum starttime = heap_getattr(ti->tuple, PE_TBL_COL_STARTTIME, ti->desc, &starttime_is_null);
|
||||
Datum endtime = heap_getattr(ti->tuple, PE_TBL_COL_ENDTIME, ti->desc, &endtime_is_null);
|
||||
|
||||
@ -123,9 +133,10 @@ partition_epoch_filter(TupleInfo *ti, void *arg)
|
||||
sizeof(epoch_and_partitions_set) + (sizeof(Partition) * num_partitions)
|
||||
|
||||
static epoch_and_partitions_set *
|
||||
partition_epoch_create(int32 epoch_id, PartitionEpochCtx *ctx)
|
||||
partition_epoch_create(int32 epoch_id, PartitionEpochCtx * ctx)
|
||||
{
|
||||
epoch_and_partitions_set *pe;
|
||||
|
||||
pe = palloc(PARTITION_EPOCH_SIZE(ctx->num_partitions));
|
||||
pe->id = epoch_id;
|
||||
pe->num_partitions = ctx->num_partitions;
|
||||
@ -138,7 +149,7 @@ partition_epoch_create(int32 epoch_id, PartitionEpochCtx *ctx)
|
||||
/* Callback for partition epoch scan. For every epoch tuple found, create a
|
||||
* partition epoch entry and scan for associated partitions. */
|
||||
static bool
|
||||
partition_epoch_tuple_found(TupleInfo *ti, void *arg)
|
||||
partition_epoch_tuple_found(TupleInfo * ti, void *arg)
|
||||
{
|
||||
PartitionEpochCtx *pctx = arg;
|
||||
epoch_and_partitions_set *pe;
|
||||
@ -156,8 +167,13 @@ partition_epoch_tuple_found(TupleInfo *ti, void *arg)
|
||||
|
||||
if (pctx->num_partitions > 1)
|
||||
{
|
||||
Datum partfunc, partmod, partcol;
|
||||
bool partfunc_is_null, partmod_is_null, partcol_is_null;
|
||||
Datum partfunc,
|
||||
partmod,
|
||||
partcol;
|
||||
bool partfunc_is_null,
|
||||
partmod_is_null,
|
||||
partcol_is_null;
|
||||
|
||||
partfunc = heap_getattr(ti->tuple, PE_TBL_COL_PARTFUNC, ti->desc, &partfunc_is_null);
|
||||
partmod = heap_getattr(ti->tuple, PE_TBL_COL_PARTMOD, ti->desc, &partmod_is_null);
|
||||
partcol = heap_getattr(ti->tuple, PE_TBL_COL_PARTCOL, ti->desc, &partcol_is_null);
|
||||
@ -175,7 +191,9 @@ partition_epoch_tuple_found(TupleInfo *ti, void *arg)
|
||||
DatumGetCString(partcol),
|
||||
DatumGetInt16(partmod),
|
||||
pctx->relid);
|
||||
} else {
|
||||
}
|
||||
else
|
||||
{
|
||||
pe->partitioning = NULL;
|
||||
}
|
||||
|
||||
@ -196,7 +214,7 @@ partition_epoch_tuple_found(TupleInfo *ti, void *arg)
|
||||
#define PARTITION_IDX_COL_ID 1
|
||||
|
||||
static bool
|
||||
partition_tuple_found(TupleInfo *ti, void *arg)
|
||||
partition_tuple_found(TupleInfo * ti, void *arg)
|
||||
{
|
||||
PartitionEpochCtx *pctx = arg;
|
||||
epoch_and_partitions_set *pe = pctx->pe;
|
||||
@ -219,7 +237,7 @@ partition_tuple_found(TupleInfo *ti, void *arg)
|
||||
}
|
||||
|
||||
static int
|
||||
partition_scan(PartitionEpochCtx *pctx)
|
||||
partition_scan(PartitionEpochCtx * pctx)
|
||||
{
|
||||
ScanKeyData scankey[1];
|
||||
Catalog *catalog = catalog_get();
|
||||
@ -236,15 +254,18 @@ partition_scan(PartitionEpochCtx *pctx)
|
||||
.scandirection = ForwardScanDirection,
|
||||
};
|
||||
|
||||
/* Perform an index scan on epoch ID to find the partitions for the
|
||||
* epoch. */
|
||||
/*
|
||||
* Perform an index scan on epoch ID to find the partitions for the epoch.
|
||||
*/
|
||||
ScanKeyInit(&scankey[0], PARTITION_IDX_COL_ID, BTEqualStrategyNumber,
|
||||
F_INT4EQ, Int32GetDatum(pctx->pe->id));
|
||||
|
||||
scanner_scan(&scanCtx);
|
||||
|
||||
/* The scan decremented the number of partitions in the context, so check
|
||||
that it is zero for correct number of partitions scanned. */
|
||||
/*
|
||||
* The scan decremented the number of partitions in the context, so check
|
||||
* that it is zero for correct number of partitions scanned.
|
||||
*/
|
||||
if (pctx->num_partitions != 0)
|
||||
{
|
||||
elog(ERROR, "%d partitions found for epoch %d, expected %d",
|
||||
@ -277,8 +298,10 @@ partition_epoch_scan(int32 hypertable_id, int64 timepoint, Oid relid)
|
||||
.scandirection = ForwardScanDirection,
|
||||
};
|
||||
|
||||
/* Perform an index scan on hypertable ID. We filter on start and end
|
||||
* time. */
|
||||
/*
|
||||
* Perform an index scan on hypertable ID. We filter on start and end
|
||||
* time.
|
||||
*/
|
||||
ScanKeyInit(&scankey[0], PE_IDX_COL_HTID, BTEqualStrategyNumber,
|
||||
F_INT4EQ, Int32GetDatum(hypertable_id));
|
||||
|
||||
@ -309,7 +332,7 @@ cmp_partitions(const void *keyspace_pt_arg, const void *value)
|
||||
}
|
||||
|
||||
Partition *
|
||||
partition_epoch_get_partition(epoch_and_partitions_set *epoch, int16 keyspace_pt)
|
||||
partition_epoch_get_partition(epoch_and_partitions_set * epoch, int16 keyspace_pt)
|
||||
{
|
||||
Partition *part;
|
||||
|
||||
@ -340,7 +363,8 @@ partition_epoch_get_partition(epoch_and_partitions_set *epoch, int16 keyspace_pt
|
||||
return part;
|
||||
}
|
||||
|
||||
bool partition_keyspace_pt_is_member(const Partition *part, const int16 keyspace_pt)
|
||||
bool
|
||||
partition_keyspace_pt_is_member(const Partition * part, const int16 keyspace_pt)
|
||||
{
|
||||
return keyspace_pt == KEYSPACE_PT_NO_PARTITIONING || (part->keyspace_start <= keyspace_pt && part->keyspace_end >= keyspace_pt);
|
||||
}
|
||||
|
@ -19,12 +19,16 @@ typedef struct PartitioningFunc
|
||||
char schema[NAMEDATALEN];
|
||||
char name[NAMEDATALEN];
|
||||
|
||||
/* Function manager info to call the function to convert a row's
|
||||
* partitioning column value to a text string */
|
||||
/*
|
||||
* Function manager info to call the function to convert a row's
|
||||
* partitioning column value to a text string
|
||||
*/
|
||||
FmgrInfo textfunc_fmgr;
|
||||
|
||||
/* Function manager info to call the partitioning function on the
|
||||
partitioning column's text representation */
|
||||
/*
|
||||
* Function manager info to call the partitioning function on the
|
||||
* partitioning column's text representation
|
||||
*/
|
||||
FmgrInfo func_fmgr;
|
||||
int32 modulos;
|
||||
} PartitioningFunc;
|
||||
@ -51,9 +55,9 @@ typedef struct epoch_and_partitions_set
|
||||
typedef struct epoch_and_partitions_set epoch_and_partitions_set;
|
||||
|
||||
epoch_and_partitions_set *partition_epoch_scan(int32 hypertable_id, int64 timepoint, Oid relid);
|
||||
int16 partitioning_func_apply(PartitioningFunc *pf, Datum value);
|
||||
int16 partitioning_func_apply(PartitioningFunc * pf, Datum value);
|
||||
|
||||
Partition *partition_epoch_get_partition(epoch_and_partitions_set *epoch, int16 keyspace_pt);
|
||||
Partition *partition_epoch_get_partition(epoch_and_partitions_set * epoch, int16 keyspace_pt);
|
||||
|
||||
bool partition_keyspace_pt_is_member(const Partition *part, const int16 keyspace_pt);
|
||||
bool partition_keyspace_pt_is_member(const Partition * part, const int16 keyspace_pt);
|
||||
#endif /* TIMESCALEDB_PARTITIONING_H */
|
||||
|
@ -36,7 +36,7 @@
|
||||
#define MAX_IO_VALUES 2
|
||||
|
||||
/* hash function signatures */
|
||||
void hlib_murmur3(const void *data, size_t len, uint64_t *io);
|
||||
void hlib_murmur3(const void *data, size_t len, uint64_t * io);
|
||||
|
||||
/* SQL function */
|
||||
Datum pg_murmur3_hash_string(PG_FUNCTION_ARGS);
|
||||
|
@ -8,7 +8,8 @@
|
||||
|
||||
#include "scanner.h"
|
||||
|
||||
typedef union ScanDesc {
|
||||
typedef union ScanDesc
|
||||
{
|
||||
IndexScanDesc index_scan;
|
||||
HeapScanDesc heap_scan;
|
||||
} ScanDesc;
|
||||
@ -18,8 +19,10 @@ typedef union ScanDesc {
|
||||
* It holds a pointer to the user-given ScannerCtx as well as
|
||||
* internal state used during scanning.
|
||||
*/
|
||||
typedef struct InternalScannerCtx {
|
||||
Relation tablerel, indexrel;
|
||||
typedef struct InternalScannerCtx
|
||||
{
|
||||
Relation tablerel,
|
||||
indexrel;
|
||||
TupleInfo tinfo;
|
||||
ScanDesc scan;
|
||||
ScannerCtx *sctx;
|
||||
@ -28,56 +31,66 @@ typedef struct InternalScannerCtx {
|
||||
/*
|
||||
* Scanner can implement both index and heap scans in a single interface.
|
||||
*/
|
||||
typedef struct Scanner {
|
||||
Relation (*open)(InternalScannerCtx *ctx);
|
||||
ScanDesc (*beginscan)(InternalScannerCtx *ctx);
|
||||
bool (*getnext)(InternalScannerCtx *ctx);
|
||||
void (*endscan)(InternalScannerCtx *ctx);
|
||||
void (*close)(InternalScannerCtx *ctx);
|
||||
typedef struct Scanner
|
||||
{
|
||||
Relation (*open) (InternalScannerCtx * ctx);
|
||||
ScanDesc(*beginscan) (InternalScannerCtx * ctx);
|
||||
bool (*getnext) (InternalScannerCtx * ctx);
|
||||
void (*endscan) (InternalScannerCtx * ctx);
|
||||
void (*close) (InternalScannerCtx * ctx);
|
||||
} Scanner;
|
||||
|
||||
/* Functions implementing heap scans */
|
||||
static Relation heap_scanner_open(InternalScannerCtx *ctx)
|
||||
static Relation
|
||||
heap_scanner_open(InternalScannerCtx * ctx)
|
||||
{
|
||||
ctx->tablerel = heap_open(ctx->sctx->table, ctx->sctx->lockmode);
|
||||
return ctx->tablerel;
|
||||
}
|
||||
|
||||
static ScanDesc heap_scanner_beginscan(InternalScannerCtx *ctx)
|
||||
static ScanDesc
|
||||
heap_scanner_beginscan(InternalScannerCtx * ctx)
|
||||
{
|
||||
ScannerCtx *sctx = ctx->sctx;
|
||||
|
||||
ctx->scan.heap_scan = heap_beginscan(ctx->tablerel, SnapshotSelf,
|
||||
sctx->nkeys, sctx->scankey);
|
||||
return ctx->scan;
|
||||
}
|
||||
|
||||
static bool heap_scanner_getnext(InternalScannerCtx *ctx)
|
||||
static bool
|
||||
heap_scanner_getnext(InternalScannerCtx * ctx)
|
||||
{
|
||||
ctx->tinfo.tuple = heap_getnext(ctx->scan.heap_scan, ctx->sctx->scandirection);
|
||||
return HeapTupleIsValid(ctx->tinfo.tuple);
|
||||
}
|
||||
|
||||
static void heap_scanner_endscan(InternalScannerCtx *ctx)
|
||||
static void
|
||||
heap_scanner_endscan(InternalScannerCtx * ctx)
|
||||
{
|
||||
heap_endscan(ctx->scan.heap_scan);
|
||||
}
|
||||
|
||||
static void heap_scanner_close(InternalScannerCtx *ctx)
|
||||
static void
|
||||
heap_scanner_close(InternalScannerCtx * ctx)
|
||||
{
|
||||
heap_close(ctx->tablerel, ctx->sctx->lockmode);
|
||||
}
|
||||
|
||||
/* Functions implementing index scans */
|
||||
static Relation index_scanner_open(InternalScannerCtx *ctx)
|
||||
static Relation
|
||||
index_scanner_open(InternalScannerCtx * ctx)
|
||||
{
|
||||
ctx->tablerel = heap_open(ctx->sctx->table, ctx->sctx->lockmode);
|
||||
ctx->indexrel = index_open(ctx->sctx->index, ctx->sctx->lockmode);
|
||||
return ctx->indexrel;
|
||||
}
|
||||
|
||||
static ScanDesc index_scanner_beginscan(InternalScannerCtx *ctx)
|
||||
static ScanDesc
|
||||
index_scanner_beginscan(InternalScannerCtx * ctx)
|
||||
{
|
||||
ScannerCtx *sctx = ctx->sctx;
|
||||
|
||||
ctx->scan.index_scan = index_beginscan(ctx->tablerel, ctx->indexrel,
|
||||
SnapshotSelf, sctx->nkeys,
|
||||
sctx->norderbys);
|
||||
@ -87,7 +100,8 @@ static ScanDesc index_scanner_beginscan(InternalScannerCtx *ctx)
|
||||
return ctx->scan;
|
||||
}
|
||||
|
||||
static bool index_scanner_getnext(InternalScannerCtx *ctx)
|
||||
static bool
|
||||
index_scanner_getnext(InternalScannerCtx * ctx)
|
||||
{
|
||||
ctx->tinfo.tuple = index_getnext(ctx->scan.index_scan, ctx->sctx->scandirection);
|
||||
ctx->tinfo.ituple = ctx->scan.index_scan->xs_itup;
|
||||
@ -95,12 +109,14 @@ static bool index_scanner_getnext(InternalScannerCtx *ctx)
|
||||
return HeapTupleIsValid(ctx->tinfo.tuple);
|
||||
}
|
||||
|
||||
static void index_scanner_endscan(InternalScannerCtx *ctx)
|
||||
static void
|
||||
index_scanner_endscan(InternalScannerCtx * ctx)
|
||||
{
|
||||
index_endscan(ctx->scan.index_scan);
|
||||
}
|
||||
|
||||
static void index_scanner_close(InternalScannerCtx *ctx)
|
||||
static void
|
||||
index_scanner_close(InternalScannerCtx * ctx)
|
||||
{
|
||||
heap_close(ctx->tablerel, ctx->sctx->lockmode);
|
||||
index_close(ctx->indexrel, ctx->sctx->lockmode);
|
||||
@ -133,7 +149,8 @@ static Scanner scanners[] = {
|
||||
*
|
||||
* Return the number of tuples that where found.
|
||||
*/
|
||||
int scanner_scan(ScannerCtx *ctx)
|
||||
int
|
||||
scanner_scan(ScannerCtx * ctx)
|
||||
{
|
||||
TupleDesc tuple_desc;
|
||||
bool is_valid;
|
||||
@ -175,7 +192,10 @@ int scanner_scan(ScannerCtx *ctx)
|
||||
ctx->tuplock.waitpolicy,
|
||||
false, &buffer, &hufd);
|
||||
|
||||
/* A tuple lock pins the underlying buffer, so we need to unpin it. */
|
||||
/*
|
||||
* A tuple lock pins the underlying buffer, so we need to
|
||||
* unpin it.
|
||||
*/
|
||||
ReleaseBuffer(buffer);
|
||||
}
|
||||
|
||||
|
@ -7,7 +7,8 @@
|
||||
#include <access/heapam.h>
|
||||
#include <nodes/lockoptions.h>
|
||||
|
||||
typedef enum ScannerType {
|
||||
typedef enum ScannerType
|
||||
{
|
||||
ScannerTypeHeap,
|
||||
ScannerTypeIndex,
|
||||
} ScannerType;
|
||||
@ -21,6 +22,7 @@ typedef struct TupleInfo
|
||||
/* return index tuple if it was requested -- only for index scans */
|
||||
IndexTuple ituple;
|
||||
TupleDesc ituple_desc;
|
||||
|
||||
/*
|
||||
* If the user requested a tuple lock, the result of the lock is passed on
|
||||
* in lockresult.
|
||||
@ -28,41 +30,53 @@ typedef struct TupleInfo
|
||||
HTSU_Result lockresult;
|
||||
} TupleInfo;
|
||||
|
||||
typedef struct ScannerCtx {
|
||||
typedef struct ScannerCtx
|
||||
{
|
||||
Oid table;
|
||||
Oid index;
|
||||
ScannerType scantype;
|
||||
ScanKey scankey;
|
||||
int nkeys, norderbys;
|
||||
int nkeys,
|
||||
norderbys;
|
||||
bool want_itup;
|
||||
LOCKMODE lockmode;
|
||||
struct {
|
||||
struct
|
||||
{
|
||||
LockTupleMode lockmode;
|
||||
LockWaitPolicy waitpolicy;
|
||||
bool enabled;
|
||||
} tuplock;
|
||||
ScanDirection scandirection;
|
||||
void *data; /* User-provided data passed on to filter() and tuple_found() */
|
||||
void *data; /* User-provided data passed on to filter()
|
||||
* and tuple_found() */
|
||||
|
||||
/* Optional handler called before a scan starts, but relation locks are
|
||||
* acquired. */
|
||||
void (*prescan)(void *data);
|
||||
/*
|
||||
* Optional handler called before a scan starts, but relation locks are
|
||||
* acquired.
|
||||
*/
|
||||
void (*prescan) (void *data);
|
||||
|
||||
/* Optional handler called after a scan finishes and before relation locks
|
||||
* are released. Passes on the number of tuples found. */
|
||||
void (*postscan)(int num_tuples, void *data);
|
||||
/*
|
||||
* Optional handler called after a scan finishes and before relation locks
|
||||
* are released. Passes on the number of tuples found.
|
||||
*/
|
||||
void (*postscan) (int num_tuples, void *data);
|
||||
|
||||
/* Optional handler to filter tuples. Should return true for tuples that
|
||||
* should be passed on to tuple_found, or false otherwise. */
|
||||
bool (*filter)(TupleInfo *ti, void *data);
|
||||
/*
|
||||
* Optional handler to filter tuples. Should return true for tuples that
|
||||
* should be passed on to tuple_found, or false otherwise.
|
||||
*/
|
||||
bool (*filter) (TupleInfo * ti, void *data);
|
||||
|
||||
/* Handler for found tuples. Should return true to continue the scan or
|
||||
* false to abort. */
|
||||
bool (*tuple_found)(TupleInfo *ti, void *data);
|
||||
/*
|
||||
* Handler for found tuples. Should return true to continue the scan or
|
||||
* false to abort.
|
||||
*/
|
||||
bool (*tuple_found) (TupleInfo * ti, void *data);
|
||||
} ScannerCtx;
|
||||
|
||||
/* Performs an index scan or heap scan and returns the number of matching
|
||||
* tuples. */
|
||||
int scanner_scan(ScannerCtx *ctx);
|
||||
int scanner_scan(ScannerCtx * ctx);
|
||||
|
||||
#endif /* TIMESCALEDB_SCANNER_H */
|
||||
|
@ -106,11 +106,11 @@ typedef struct add_partitioning_func_qual_context
|
||||
|
||||
hypertable_info *get_hypertable_info(Oid mainRelationOid);
|
||||
static void add_partitioning_func_qual(Query *parse, List *hypertable_info_list);
|
||||
static Node *add_partitioning_func_qual_mutator(Node *node, add_partitioning_func_qual_context *context);
|
||||
static Node *add_partitioning_func_qual_mutator(Node *node, add_partitioning_func_qual_context * context);
|
||||
static partitioning_info *
|
||||
get_partitioning_info_for_partition_column_var(Var *var_expr, Query *parse, List * hypertable_info_list);
|
||||
get_partitioning_info_for_partition_column_var(Var *var_expr, Query *parse, List *hypertable_info_list);
|
||||
static Expr *
|
||||
create_partition_func_equals_const(Var *var_expr, Const *const_expr, Name partitioning_func_schema, Name partitioning_func, int32 partitioning_mod);
|
||||
create_partition_func_equals_const(Var *var_expr, Const *const_expr, Name partitioning_func_schema, Name partitioning_func, int32 partitioning_mod);
|
||||
SPIPlanPtr get_hypertable_info_plan(void);
|
||||
|
||||
|
||||
@ -165,11 +165,13 @@ _PG_fini(void)
|
||||
_chunk_cache_fini();
|
||||
}
|
||||
|
||||
SPIPlanPtr get_hypertable_info_plan()
|
||||
SPIPlanPtr
|
||||
get_hypertable_info_plan()
|
||||
{
|
||||
Oid hypertable_info_plan_args[2] = {TEXTOID, TEXTOID};
|
||||
|
||||
if (hypertable_info_plan != NULL) {
|
||||
if (hypertable_info_plan != NULL)
|
||||
{
|
||||
return hypertable_info_plan;
|
||||
}
|
||||
|
||||
@ -194,12 +196,11 @@ SPIPlanPtr get_hypertable_info_plan()
|
||||
bool
|
||||
IobeamLoaded(void)
|
||||
{
|
||||
|
||||
if (!isLoaded)
|
||||
{
|
||||
Oid id;
|
||||
|
||||
if(!IsTransactionState())
|
||||
if (!IsTransactionState())
|
||||
{
|
||||
return false;
|
||||
}
|
||||
@ -231,17 +232,22 @@ change_table_name_walker(Node *node, void *context)
|
||||
if (IsA(node, RangeTblEntry))
|
||||
{
|
||||
RangeTblEntry *rangeTableEntry = (RangeTblEntry *) node;
|
||||
change_table_name_context* ctx = (change_table_name_context *)context;
|
||||
change_table_name_context *ctx = (change_table_name_context *) context;
|
||||
|
||||
if (rangeTableEntry->rtekind == RTE_RELATION && rangeTableEntry->inh)
|
||||
{
|
||||
hypertable_info* hinfo = get_hypertable_info(rangeTableEntry->relid);
|
||||
hypertable_info *hinfo = get_hypertable_info(rangeTableEntry->relid);
|
||||
|
||||
if (hinfo != NULL)
|
||||
{
|
||||
ctx->hypertable_info = lappend(ctx->hypertable_info, hinfo);
|
||||
rangeTableEntry->relid = hinfo->replica_oid;
|
||||
}
|
||||
} else if (rangeTableEntry->rtekind == RTE_RELATION && ctx->parse->commandType == CMD_INSERT){
|
||||
hypertable_info* hinfo = get_hypertable_info(rangeTableEntry->relid);
|
||||
}
|
||||
else if (rangeTableEntry->rtekind == RTE_RELATION && ctx->parse->commandType == CMD_INSERT)
|
||||
{
|
||||
hypertable_info *hinfo = get_hypertable_info(rangeTableEntry->relid);
|
||||
|
||||
if (hinfo != NULL)
|
||||
{
|
||||
rangeTableEntry->relid = create_copy_table(hinfo->hypertable_id, hinfo->root_oid);
|
||||
@ -267,7 +273,8 @@ timescaledb_planner(Query *parse, int cursorOptions, ParamListInfo boundParams)
|
||||
if (IobeamLoaded())
|
||||
{
|
||||
change_table_name_context context;
|
||||
char* printParse = GetConfigOptionByName("io.print_parse", NULL, true);
|
||||
char *printParse = GetConfigOptionByName("io.print_parse", NULL, true);
|
||||
|
||||
/* set to false to not print all internal actions */
|
||||
SetConfigOption("io.print_parse", "false", PGC_USERSET, PGC_S_SESSION);
|
||||
|
||||
@ -289,8 +296,9 @@ timescaledb_planner(Query *parse, int cursorOptions, ParamListInfo boundParams)
|
||||
if (prev_planner_hook != NULL)
|
||||
{
|
||||
/* Call any earlier hooks */
|
||||
rv = (prev_planner_hook)(parse, cursorOptions, boundParams);
|
||||
} else
|
||||
rv = (prev_planner_hook) (parse, cursorOptions, boundParams);
|
||||
}
|
||||
else
|
||||
{
|
||||
/* Call the standard planner */
|
||||
rv = standard_planner(parse, cursorOptions, boundParams);
|
||||
@ -342,14 +350,20 @@ get_hypertable_info(Oid mainRelationOid)
|
||||
bool isnull;
|
||||
int total_rows = SPI_processed;
|
||||
int j;
|
||||
/* do not populate list until SPI_finish because the list cannot be populated in the SPI memory context */
|
||||
|
||||
/*
|
||||
* do not populate list until SPI_finish because the list cannot be
|
||||
* populated in the SPI memory context
|
||||
*/
|
||||
List *partitioning_info_list;
|
||||
|
||||
/* used to track list stuff til list can be populated */
|
||||
partitioning_info **partitioning_info_array = SPI_palloc(total_rows * sizeof(partitioning_info *));
|
||||
hypertable_info *hinfo = SPI_palloc(sizeof(hypertable_info));
|
||||
|
||||
TupleDesc tupdesc = SPI_tuptable->tupdesc;
|
||||
HeapTuple tuple = SPI_tuptable->vals[0];
|
||||
|
||||
hinfo->replica_oid = DatumGetObjectId(SPI_getbinval(tuple, tupdesc, 1, &isnull));
|
||||
hinfo->root_oid = DatumGetObjectId(SPI_getbinval(tuple, tupdesc, 6, &isnull));
|
||||
hinfo->hypertable_id = DatumGetInt32(SPI_getbinval(tuple, tupdesc, 7, &isnull));
|
||||
@ -357,37 +371,43 @@ get_hypertable_info(Oid mainRelationOid)
|
||||
for (j = 0; j < total_rows; j++)
|
||||
{
|
||||
HeapTuple tuple = SPI_tuptable->vals[j];
|
||||
Name partitioning_func_schema, partitioning_func, partitioning_column;
|
||||
Name partitioning_func_schema,
|
||||
partitioning_func,
|
||||
partitioning_column;
|
||||
int32 partitioning_mod;
|
||||
|
||||
partitioning_info* info = (partitioning_info *) SPI_palloc(sizeof(partitioning_info));
|
||||
partitioning_info *info = (partitioning_info *) SPI_palloc(sizeof(partitioning_info));
|
||||
|
||||
memset(info, 0, sizeof(partitioning_info));
|
||||
|
||||
partitioning_column = DatumGetName(SPI_getbinval(tuple, tupdesc, 2, &isnull));
|
||||
|
||||
if (!isnull) {
|
||||
if (!isnull)
|
||||
{
|
||||
info->partitioning_column = SPI_palloc(sizeof(NameData));
|
||||
memcpy(info->partitioning_column, partitioning_column, sizeof(NameData));
|
||||
}
|
||||
|
||||
partitioning_func_schema = DatumGetName(SPI_getbinval(tuple, tupdesc, 3, &isnull));
|
||||
|
||||
if (!isnull) {
|
||||
if (!isnull)
|
||||
{
|
||||
info->partitioning_func_schema = SPI_palloc(sizeof(NameData));
|
||||
memcpy(info->partitioning_func_schema, partitioning_func_schema, sizeof(NameData));
|
||||
}
|
||||
|
||||
partitioning_func = DatumGetName(SPI_getbinval(tuple, tupdesc, 4, &isnull));
|
||||
|
||||
if (!isnull) {
|
||||
if (!isnull)
|
||||
{
|
||||
info->partitioning_func = SPI_palloc(sizeof(NameData));
|
||||
memcpy(info->partitioning_func, partitioning_func, sizeof(NameData));
|
||||
}
|
||||
|
||||
partitioning_mod = DatumGetInt32(SPI_getbinval(tuple, tupdesc, 5, &isnull));
|
||||
|
||||
if (!isnull) {
|
||||
if (!isnull)
|
||||
{
|
||||
info->partitioning_mod = partitioning_mod;
|
||||
}
|
||||
|
||||
@ -414,8 +434,11 @@ get_hypertable_info(Oid mainRelationOid)
|
||||
|
||||
|
||||
|
||||
char * copy_table_name(int32 hypertable_id) {
|
||||
char *
|
||||
copy_table_name(int32 hypertable_id)
|
||||
{
|
||||
StringInfo temp_table_name = makeStringInfo();
|
||||
|
||||
appendStringInfo(temp_table_name, "_copy_temp_%d", hypertable_id);
|
||||
return temp_table_name->data;
|
||||
}
|
||||
@ -433,24 +456,26 @@ char * copy_table_name(int32 hypertable_id) {
|
||||
* This tranformation helps because the check constraint on a table is of the form CHECK(partitioning_func(partition_column, partitioning_mod) BETWEEN X AND Y).
|
||||
*/
|
||||
static void
|
||||
add_partitioning_func_qual(Query *parse, List* hypertable_info_list)
|
||||
add_partitioning_func_qual(Query *parse, List *hypertable_info_list)
|
||||
{
|
||||
add_partitioning_func_qual_context context;
|
||||
|
||||
context.parse = parse;
|
||||
context.hypertable_info_list = hypertable_info_list;
|
||||
parse->jointree->quals = add_partitioning_func_qual_mutator(parse->jointree->quals, &context);
|
||||
}
|
||||
|
||||
static Node *
|
||||
add_partitioning_func_qual_mutator(Node *node, add_partitioning_func_qual_context *context)
|
||||
add_partitioning_func_qual_mutator(Node *node, add_partitioning_func_qual_context * context)
|
||||
{
|
||||
if (node == NULL)
|
||||
return NULL;
|
||||
|
||||
/* Detect partitioning_column = const. If not fall-thru.
|
||||
* If detected, replace with
|
||||
* partitioning_column = const AND
|
||||
* partitioning_func(partition_column, partitioning_mod) = partitioning_func(const, partitioning_mod)
|
||||
/*
|
||||
* Detect partitioning_column = const. If not fall-thru. If detected,
|
||||
* replace with partitioning_column = const AND
|
||||
* partitioning_func(partition_column, partitioning_mod) =
|
||||
* partitioning_func(const, partitioning_mod)
|
||||
*/
|
||||
if (IsA(node, OpExpr))
|
||||
{
|
||||
@ -458,7 +483,7 @@ add_partitioning_func_qual_mutator(Node *node, add_partitioning_func_qual_contex
|
||||
|
||||
if (list_length(exp->args) == 2)
|
||||
{
|
||||
//only look at var op const or const op var;
|
||||
/* only look at var op const or const op var; */
|
||||
Node *left = (Node *) linitial(exp->args);
|
||||
Node *right = (Node *) lsecond(exp->args);
|
||||
Var *var_expr = NULL;
|
||||
@ -468,9 +493,10 @@ add_partitioning_func_qual_mutator(Node *node, add_partitioning_func_qual_contex
|
||||
{
|
||||
var_expr = (Var *) left;
|
||||
other_expr = right;
|
||||
} else if (IsA(right, Var))
|
||||
}
|
||||
else if (IsA(right, Var))
|
||||
{
|
||||
var_expr = (Var *)right;
|
||||
var_expr = (Var *) right;
|
||||
other_expr = left;
|
||||
}
|
||||
|
||||
@ -489,16 +515,20 @@ add_partitioning_func_qual_mutator(Node *node, add_partitioning_func_qual_contex
|
||||
|
||||
if (eq_oid == exp->opno)
|
||||
{
|
||||
/* I now have a var = const. Make sure var is a partitioning column */
|
||||
/*
|
||||
* I now have a var = const. Make sure var is a
|
||||
* partitioning column
|
||||
*/
|
||||
partitioning_info *pi = get_partitioning_info_for_partition_column_var(var_expr,
|
||||
context->parse,
|
||||
context->hypertable_info_list);
|
||||
|
||||
if (pi != NULL
|
||||
&& pi->partitioning_column != NULL
|
||||
&& pi->partitioning_func != NULL) {
|
||||
&& pi->partitioning_func != NULL)
|
||||
{
|
||||
/* The var is a partitioning column */
|
||||
Expr * partitioning_clause = create_partition_func_equals_const(var_expr, const_expr,
|
||||
Expr *partitioning_clause = create_partition_func_equals_const(var_expr, const_expr,
|
||||
pi->partitioning_func_schema,
|
||||
pi->partitioning_func,
|
||||
pi->partitioning_mod);
|
||||
@ -519,7 +549,8 @@ add_partitioning_func_qual_mutator(Node *node, add_partitioning_func_qual_contex
|
||||
/* Returns the partitioning info for a var if the var is a partitioning column. If the var is not a partitioning
|
||||
* column return NULL */
|
||||
static partitioning_info *
|
||||
get_partitioning_info_for_partition_column_var(Var *var_expr, Query *parse, List * hypertable_info_list) {
|
||||
get_partitioning_info_for_partition_column_var(Var *var_expr, Query *parse, List *hypertable_info_list)
|
||||
{
|
||||
RangeTblEntry *rte = rt_fetch(var_expr->varno, parse->rtable);
|
||||
char *varname = get_rte_attribute_name(rte, var_expr->varattno);
|
||||
ListCell *hicell;
|
||||
@ -527,14 +558,17 @@ get_partitioning_info_for_partition_column_var(Var *var_expr, Query *parse, List
|
||||
foreach(hicell, hypertable_info_list)
|
||||
{
|
||||
hypertable_info *info = lfirst(hicell);
|
||||
|
||||
if (rte->relid == info->replica_oid)
|
||||
{
|
||||
ListCell *picell;
|
||||
|
||||
foreach(picell, info->partitioning_info)
|
||||
{
|
||||
partitioning_info *pi = lfirst(picell);
|
||||
|
||||
if (pi->partitioning_column != NULL &&
|
||||
strcmp(NameStr(*(pi->partitioning_column)), varname)==0)
|
||||
strcmp(NameStr(*(pi->partitioning_column)), varname) == 0)
|
||||
{
|
||||
return pi;
|
||||
}
|
||||
@ -590,7 +624,7 @@ create_partition_func_equals_const(Var *var_expr, Const *const_expr, Name partit
|
||||
f_var = ParseFuncOrColumn(NULL, func_name, args_func_var, fc_var, -1);
|
||||
f_const = ParseFuncOrColumn(NULL, func_name, args_func_const, fc_const, -1);
|
||||
|
||||
op_expr = make_op(NULL,list_make2(makeString("pg_catalog"), makeString("=")),f_var,f_const,-1);
|
||||
op_expr = make_op(NULL, list_make2(makeString("pg_catalog"), makeString("=")), f_var, f_const, -1);
|
||||
|
||||
return op_expr;
|
||||
}
|
||||
@ -601,10 +635,14 @@ PG_FUNCTION_INFO_V1(register_dblink_precommit_connection);
|
||||
Datum
|
||||
register_dblink_precommit_connection(PG_FUNCTION_ARGS)
|
||||
{
|
||||
/* allocate this stuff in top-level transaction context, so that it survives till commit */
|
||||
/*
|
||||
* allocate this stuff in top-level transaction context, so that it
|
||||
* survives till commit
|
||||
*/
|
||||
MemoryContext old = MemoryContextSwitchTo(TopTransactionContext);
|
||||
|
||||
char *connectionName = text_to_cstring(PG_GETARG_TEXT_PP(0));
|
||||
|
||||
callbackConnections = lappend(callbackConnections, connectionName);
|
||||
|
||||
MemoryContextSwitchTo(old);
|
||||
@ -617,7 +655,8 @@ register_dblink_precommit_connection(PG_FUNCTION_ARGS)
|
||||
* Look at meta_commands.sql for example usage. Remote commits happen in pre-commit.
|
||||
* Remote aborts happen on abort.
|
||||
* */
|
||||
static void io_xact_callback(XactEvent event, void *arg)
|
||||
static void
|
||||
io_xact_callback(XactEvent event, void *arg)
|
||||
{
|
||||
ListCell *cell;
|
||||
|
||||
@ -628,9 +667,10 @@ static void io_xact_callback(XactEvent event, void *arg)
|
||||
{
|
||||
case XACT_EVENT_PARALLEL_PRE_COMMIT:
|
||||
case XACT_EVENT_PRE_COMMIT:
|
||||
foreach (cell, callbackConnections)
|
||||
foreach(cell, callbackConnections)
|
||||
{
|
||||
char *connection = (char *) lfirst(cell);
|
||||
|
||||
DirectFunctionCall3(dblink_exec,
|
||||
PointerGetDatum(cstring_to_text(connection)),
|
||||
PointerGetDatum(cstring_to_text("COMMIT")),
|
||||
@ -640,11 +680,16 @@ static void io_xact_callback(XactEvent event, void *arg)
|
||||
break;
|
||||
case XACT_EVENT_PARALLEL_ABORT:
|
||||
case XACT_EVENT_ABORT:
|
||||
/* Be quite careful here. Cannot throw any errors (or infinite loop) and cannot use PG_TRY either.
|
||||
* Make sure to test with c-asserts on. */
|
||||
foreach (cell, callbackConnections)
|
||||
|
||||
/*
|
||||
* Be quite careful here. Cannot throw any errors (or infinite
|
||||
* loop) and cannot use PG_TRY either. Make sure to test with
|
||||
* c-asserts on.
|
||||
*/
|
||||
foreach(cell, callbackConnections)
|
||||
{
|
||||
char *connection = (char *) lfirst(cell);
|
||||
|
||||
DirectFunctionCall3(dblink_exec,
|
||||
PointerGetDatum(cstring_to_text(connection)),
|
||||
PointerGetDatum(cstring_to_text("ABORT")),
|
||||
@ -685,12 +730,12 @@ pg_gethostname(PG_FUNCTION_ARGS)
|
||||
SET_VARSIZE(t, VARHDRSZ);
|
||||
memset(VARDATA(t), '\0', hostname_max_len + 1);
|
||||
|
||||
if (gethostname((char *)VARDATA(t), hostname_max_len) == -1)
|
||||
if (gethostname((char *) VARDATA(t), hostname_max_len) == -1)
|
||||
{
|
||||
PG_RETURN_TEXT_P(NULL);
|
||||
}
|
||||
|
||||
length = strnlen((char *)VARDATA(t), hostname_max_len);
|
||||
length = strnlen((char *) VARDATA(t), hostname_max_len);
|
||||
SET_VARSIZE(t, VARHDRSZ + length);
|
||||
|
||||
PG_RETURN_TEXT_P(t);
|
||||
@ -708,7 +753,7 @@ prev_ProcessUtility(Node *parsetree,
|
||||
if (prev_ProcessUtility_hook != NULL)
|
||||
{
|
||||
/* Call any earlier hooks */
|
||||
(prev_ProcessUtility_hook)(parsetree, queryString, context, params, dest, completionTag);
|
||||
(prev_ProcessUtility_hook) (parsetree, queryString, context, params, dest, completionTag);
|
||||
}
|
||||
else
|
||||
{
|
||||
@ -720,14 +765,16 @@ prev_ProcessUtility(Node *parsetree,
|
||||
|
||||
/* Hook-intercept for ProcessUtility. Used to make COPY use a temp copy table and */
|
||||
/* blocking renaming of hypertables. */
|
||||
void timescaledb_ProcessUtility(Node *parsetree,
|
||||
void
|
||||
timescaledb_ProcessUtility(Node *parsetree,
|
||||
const char *queryString,
|
||||
ProcessUtilityContext context,
|
||||
ParamListInfo params,
|
||||
DestReceiver *dest,
|
||||
char *completionTag)
|
||||
{
|
||||
if (!IobeamLoaded()){
|
||||
if (!IobeamLoaded())
|
||||
{
|
||||
prev_ProcessUtility(parsetree, queryString, context, params, dest, completionTag);
|
||||
return;
|
||||
}
|
||||
@ -736,14 +783,17 @@ void timescaledb_ProcessUtility(Node *parsetree,
|
||||
{
|
||||
CopyStmt *copystmt = (CopyStmt *) parsetree;
|
||||
Oid relId = RangeVarGetRelid(copystmt->relation, NoLock, true);
|
||||
if (OidIsValid(relId)) {
|
||||
hypertable_info* hinfo = get_hypertable_info(relId);
|
||||
|
||||
if (OidIsValid(relId))
|
||||
{
|
||||
hypertable_info *hinfo = get_hypertable_info(relId);
|
||||
|
||||
if (hinfo != NULL)
|
||||
{
|
||||
copystmt->relation = makeRangeVarFromRelid(create_copy_table(hinfo->hypertable_id, hinfo->root_oid));
|
||||
}
|
||||
}
|
||||
prev_ProcessUtility((Node *)copystmt, queryString, context, params, dest, completionTag);
|
||||
prev_ProcessUtility((Node *) copystmt, queryString, context, params, dest, completionTag);
|
||||
return;
|
||||
}
|
||||
|
||||
@ -752,14 +802,17 @@ void timescaledb_ProcessUtility(Node *parsetree,
|
||||
{
|
||||
RenameStmt *renamestmt = (RenameStmt *) parsetree;
|
||||
Oid relId = RangeVarGetRelid(renamestmt->relation, NoLock, true);
|
||||
if (OidIsValid(relId)) {
|
||||
hypertable_info* hinfo = get_hypertable_info(relId);
|
||||
|
||||
if (OidIsValid(relId))
|
||||
{
|
||||
hypertable_info *hinfo = get_hypertable_info(relId);
|
||||
|
||||
if (hinfo != NULL && renamestmt->renameType == OBJECT_TABLE)
|
||||
ereport(ERROR,
|
||||
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
|
||||
errmsg("Renaming hypertables is not yet supported")));
|
||||
}
|
||||
prev_ProcessUtility((Node *)renamestmt, queryString, context, params, dest, completionTag);
|
||||
prev_ProcessUtility((Node *) renamestmt, queryString, context, params, dest, completionTag);
|
||||
return;
|
||||
}
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user