formatting with pgindent

This commit is contained in:
Matvey Arye 2017-03-06 15:20:00 -05:00
parent 73f4dcaaf0
commit 32c45b75b2
20 changed files with 775 additions and 607 deletions

View File

@ -2,7 +2,7 @@
void void
cache_init(Cache *cache) cache_init(Cache * cache)
{ {
if (cache->htab != NULL) if (cache->htab != NULL)
{ {
@ -15,9 +15,11 @@ cache_init(Cache *cache)
cache->refcount = 1; cache->refcount = 1;
} }
static void static void
cache_destroy(Cache *cache) { cache_destroy(Cache * cache)
if (cache->refcount > 0) { {
if (cache->refcount > 0)
{
/* will be destroyed later */ /* will be destroyed later */
return; return;
} }
@ -32,7 +34,7 @@ cache_destroy(Cache *cache) {
} }
void void
cache_invalidate(Cache *cache) cache_invalidate(Cache * cache)
{ {
if (cache == NULL) if (cache == NULL)
return; return;
@ -40,7 +42,7 @@ cache_invalidate(Cache *cache)
cache_destroy(cache); cache_destroy(cache);
} }
/* /*
* Pinning is needed if any items returned by the cache * Pinning is needed if any items returned by the cache
* may need to survive invalidation events (i.e. AcceptInvalidationMessages() may be called). * may need to survive invalidation events (i.e. AcceptInvalidationMessages() may be called).
* *
@ -49,13 +51,15 @@ cache_invalidate(Cache *cache)
* Each call to cache_pin MUST BE paired with a call to cache_release. * Each call to cache_pin MUST BE paired with a call to cache_release.
* *
*/ */
extern Cache *cache_pin(Cache *cache) extern Cache *
cache_pin(Cache * cache)
{ {
cache->refcount++; cache->refcount++;
return cache; return cache;
} }
extern void cache_release(Cache *cache) extern void
cache_release(Cache * cache)
{ {
Assert(cache->refcount > 0); Assert(cache->refcount > 0);
cache->refcount--; cache->refcount--;
@ -64,19 +68,19 @@ extern void cache_release(Cache *cache)
MemoryContext MemoryContext
cache_memory_ctx(Cache *cache) cache_memory_ctx(Cache * cache)
{ {
return cache->hctl.hcxt; return cache->hctl.hcxt;
} }
MemoryContext MemoryContext
cache_switch_to_memory_context(Cache *cache) cache_switch_to_memory_context(Cache * cache)
{ {
return MemoryContextSwitchTo(cache->hctl.hcxt); return MemoryContextSwitchTo(cache->hctl.hcxt);
} }
void * void *
cache_fetch(Cache *cache, CacheQueryCtx *ctx) cache_fetch(Cache * cache, CacheQueryCtx * ctx)
{ {
bool found; bool found;

View File

@ -9,30 +9,30 @@ typedef struct CacheQueryCtx
{ {
void *entry; void *entry;
void *private[0]; void *private[0];
} CacheQueryCtx; } CacheQueryCtx;
typedef struct Cache typedef struct Cache
{ {
HASHCTL hctl; HASHCTL hctl;
HTAB *htab; HTAB *htab;
int refcount; int refcount;
const char *name; const char *name;
long numelements; long numelements;
int flags; int flags;
void *(*get_key) (struct CacheQueryCtx *); void *(*get_key) (struct CacheQueryCtx *);
void *(*create_entry) (struct Cache *, CacheQueryCtx *); void *(*create_entry) (struct Cache *, CacheQueryCtx *);
void *(*update_entry) (struct Cache *, CacheQueryCtx *); void *(*update_entry) (struct Cache *, CacheQueryCtx *);
void (*pre_destroy_hook) (struct Cache *); void (*pre_destroy_hook) (struct Cache *);
} Cache; } Cache;
extern void cache_init(Cache *cache); extern void cache_init(Cache * cache);
extern void cache_invalidate(Cache *cache); extern void cache_invalidate(Cache * cache);
extern void *cache_fetch(Cache *cache, CacheQueryCtx *ctx); extern void *cache_fetch(Cache * cache, CacheQueryCtx * ctx);
extern MemoryContext cache_memory_ctx(Cache *cache); extern MemoryContext cache_memory_ctx(Cache * cache);
extern MemoryContext cache_switch_to_memory_context(Cache *cache); extern MemoryContext cache_switch_to_memory_context(Cache * cache);
extern Cache *cache_pin(Cache *cache); extern Cache *cache_pin(Cache * cache);
extern void cache_release(Cache *cache); extern void cache_release(Cache * cache);
#endif /* _TIMESCALEDB_CACHE_H_ */ #endif /* _TIMESCALEDB_CACHE_H_ */

View File

@ -93,15 +93,15 @@ invalidate_relcache_trigger(PG_FUNCTION_ARGS)
} }
/* /*
* This is similar to invalidate_relcache_trigger but not a trigger. * This is similar to invalidate_relcache_trigger but not a trigger.
* Not used regularly but useful for debugging. * Not used regularly but useful for debugging.
* *
*/ */
Datum Datum
invalidate_relcache(PG_FUNCTION_ARGS) invalidate_relcache(PG_FUNCTION_ARGS)
{ {
Oid proxy_oid = PG_GETARG_OID(0); Oid proxy_oid = PG_GETARG_OID(0);
/* arg 0 = relid of the cache_inval_proxy table */ /* arg 0 = relid of the cache_inval_proxy table */
CacheInvalidateRelcacheByRelid(proxy_oid); CacheInvalidateRelcacheByRelid(proxy_oid);

View File

@ -26,18 +26,21 @@ static Catalog catalog = {
.database_id = InvalidOid, .database_id = InvalidOid,
}; };
Catalog *catalog_get(void) Catalog *
catalog_get(void)
{ {
AclResult aclresult; AclResult aclresult;
int i; int i;
if (MyDatabaseId == InvalidOid) if (MyDatabaseId == InvalidOid)
elog(ERROR, "Invalid database ID"); elog(ERROR, "Invalid database ID");
/* Check that the user has CREATE permissions on the database, since the /*
operation may involve creating chunks and inserting into them. */ * Check that the user has CREATE permissions on the database, since the
* operation may involve creating chunks and inserting into them.
*/
aclresult = pg_database_aclcheck(MyDatabaseId, GetUserId(), ACL_CREATE); aclresult = pg_database_aclcheck(MyDatabaseId, GetUserId(), ACL_CREATE);
if (aclresult != ACLCHECK_OK) if (aclresult != ACLCHECK_OK)
aclcheck_error(aclresult, ACL_KIND_DATABASE, aclcheck_error(aclresult, ACL_KIND_DATABASE,
get_database_name(MyDatabaseId)); get_database_name(MyDatabaseId));
@ -57,10 +60,10 @@ Catalog *catalog_get(void)
for (i = 0; i < _MAX_CATALOG_TABLES; i++) for (i = 0; i < _MAX_CATALOG_TABLES; i++)
{ {
Oid id; Oid id;
id = get_relname_relid(catalog_table_names[i], catalog.schema_id); id = get_relname_relid(catalog_table_names[i], catalog.schema_id);
if (id == InvalidOid) if (id == InvalidOid)
{ {
elog(ERROR, "Oid lookup failed for table %s", catalog_table_names[i]); elog(ERROR, "Oid lookup failed for table %s", catalog_table_names[i]);
@ -69,7 +72,7 @@ Catalog *catalog_get(void)
catalog.tables[i].id = id; catalog.tables[i].id = id;
id = get_relname_relid(catalog_table_index_names[i], catalog.schema_id); id = get_relname_relid(catalog_table_index_names[i], catalog.schema_id);
if (id == InvalidOid) if (id == InvalidOid)
{ {
elog(ERROR, "Oid lookup failed for table index %s", catalog_table_index_names[i]); elog(ERROR, "Oid lookup failed for table index %s", catalog_table_index_names[i]);
@ -77,7 +80,7 @@ Catalog *catalog_get(void)
catalog.tables[i].index_id = id; catalog.tables[i].index_id = id;
catalog.tables[i].name = catalog_table_names[i]; catalog.tables[i].name = catalog_table_names[i];
} }
return &catalog; return &catalog;
} }

View File

@ -3,7 +3,8 @@
#include <postgres.h> #include <postgres.h>
enum catalog_table { enum catalog_table
{
HYPERTABLE = 0, HYPERTABLE = 0,
CHUNK, CHUNK,
PARTITION, PARTITION,
@ -32,17 +33,19 @@ enum catalog_table {
#define CHUNK_INDEX_NAME "chunk_pkey" #define CHUNK_INDEX_NAME "chunk_pkey"
#define CHUNK_PARTITION_TIME_INDEX_NAME "chunk_partition_id_start_time_end_time_idx" #define CHUNK_PARTITION_TIME_INDEX_NAME "chunk_partition_id_start_time_end_time_idx"
typedef struct Catalog { typedef struct Catalog
char database_name[NAMEDATALEN]; {
Oid database_id; char database_name[NAMEDATALEN];
Oid schema_id; Oid database_id;
struct { Oid schema_id;
struct
{
const char *name; const char *name;
Oid id; Oid id;
Oid index_id; Oid index_id;
} tables[_MAX_CATALOG_TABLES]; } tables[_MAX_CATALOG_TABLES];
} Catalog; } Catalog;
Catalog *catalog_get(void); Catalog *catalog_get(void);
#endif /* TIMESCALEDB_CATALOG_H */ #endif /* TIMESCALEDB_CATALOG_H */

View File

@ -35,54 +35,61 @@ typedef struct chunk_crn_set_htable_entry
int64 start_time; int64 start_time;
int64 end_time; int64 end_time;
crn_set *crns; crn_set *crns;
} chunk_crn_set_htable_entry; } chunk_crn_set_htable_entry;
typedef struct ChunkCacheQueryCtx typedef struct ChunkCacheQueryCtx
{ {
CacheQueryCtx cctx; CacheQueryCtx cctx;
int32 chunk_id; int32 chunk_id;
int64 chunk_start_time; int64 chunk_start_time;
int64 chunk_end_time; int64 chunk_end_time;
} ChunkCacheQueryCtx; } ChunkCacheQueryCtx;
static void * static void *
chunk_crn_set_cache_get_key(CacheQueryCtx *ctx) chunk_crn_set_cache_get_key(CacheQueryCtx * ctx)
{ {
return &((ChunkCacheQueryCtx *) ctx)->chunk_id; return &((ChunkCacheQueryCtx *) ctx)->chunk_id;
} }
static void *chunk_crn_set_cache_create_entry(Cache *cache, CacheQueryCtx *ctx); static void *chunk_crn_set_cache_create_entry(Cache * cache, CacheQueryCtx * ctx);
static void *chunk_crn_set_cache_update_entry(Cache *cache, CacheQueryCtx *ctx); static void *chunk_crn_set_cache_update_entry(Cache * cache, CacheQueryCtx * ctx);
static Cache *chunk_crn_set_cache_create() { static Cache *
MemoryContext ctx = AllocSetContextCreate(CacheMemoryContext, chunk_crn_set_cache_create()
CHUNK_CACHE_INVAL_PROXY_TABLE, {
ALLOCSET_DEFAULT_SIZES); MemoryContext ctx = AllocSetContextCreate(CacheMemoryContext,
CHUNK_CACHE_INVAL_PROXY_TABLE,
ALLOCSET_DEFAULT_SIZES);
Cache *cache = MemoryContextAlloc(ctx, sizeof(Cache)); Cache *cache = MemoryContextAlloc(ctx, sizeof(Cache));
*cache = (Cache) {
.hctl = { Cache tmp = (Cache)
{
.hctl =
{
.keysize = sizeof(int32), .keysize = sizeof(int32),
.entrysize = sizeof(chunk_crn_set_htable_entry), .entrysize = sizeof(chunk_crn_set_htable_entry),
.hcxt = ctx, .hcxt = ctx,
}, },
.name = CHUNK_CACHE_INVAL_PROXY_TABLE, .name = CHUNK_CACHE_INVAL_PROXY_TABLE,
.numelements = 16, .numelements = 16,
.flags = HASH_ELEM | HASH_CONTEXT | HASH_BLOBS, .flags = HASH_ELEM | HASH_CONTEXT | HASH_BLOBS,
.get_key = chunk_crn_set_cache_get_key, .get_key = chunk_crn_set_cache_get_key,
.create_entry = chunk_crn_set_cache_create_entry, .create_entry = chunk_crn_set_cache_create_entry,
.update_entry = chunk_crn_set_cache_update_entry, .update_entry = chunk_crn_set_cache_update_entry,
}; };
*cache = tmp;
cache_init(cache); cache_init(cache);
return cache; return cache;
} }
static Cache *chunk_crn_set_cache_current = NULL; static Cache *chunk_crn_set_cache_current = NULL;
static void * static void *
chunk_crn_set_cache_create_entry(Cache *cache, CacheQueryCtx *ctx) chunk_crn_set_cache_create_entry(Cache * cache, CacheQueryCtx * ctx)
{ {
ChunkCacheQueryCtx *cctx = (ChunkCacheQueryCtx *) ctx; ChunkCacheQueryCtx *cctx = (ChunkCacheQueryCtx *) ctx;
chunk_crn_set_htable_entry *pe = ctx->entry; chunk_crn_set_htable_entry *pe = ctx->entry;
@ -101,7 +108,7 @@ chunk_crn_set_cache_create_entry(Cache *cache, CacheQueryCtx *ctx)
} }
static void * static void *
chunk_crn_set_cache_update_entry(Cache *cache, CacheQueryCtx *ctx) chunk_crn_set_cache_update_entry(Cache * cache, CacheQueryCtx * ctx)
{ {
ChunkCacheQueryCtx *cctx = (ChunkCacheQueryCtx *) ctx; ChunkCacheQueryCtx *cctx = (ChunkCacheQueryCtx *) ctx;
chunk_crn_set_htable_entry *pe = ctx->entry; chunk_crn_set_htable_entry *pe = ctx->entry;
@ -127,7 +134,7 @@ chunk_crn_set_cache_invalidate_callback(void)
} }
static chunk_crn_set_htable_entry * static chunk_crn_set_htable_entry *
chunk_crn_set_cache_get_entry(Cache *cache, int32 chunk_id, int64 chunk_start_time, int64 chunk_end_time) chunk_crn_set_cache_get_entry(Cache * cache, int32 chunk_id, int64 chunk_start_time, int64 chunk_end_time)
{ {
if (cache == NULL) if (cache == NULL)
{ {
@ -143,7 +150,7 @@ chunk_crn_set_cache_get_entry(Cache *cache, int32 chunk_id, int64 chunk_start_ti
} }
extern Cache * extern Cache *
chunk_crn_set_cache_pin() chunk_crn_set_cache_pin()
{ {
return cache_pin(chunk_crn_set_cache_current); return cache_pin(chunk_crn_set_cache_current);
} }
@ -152,7 +159,7 @@ chunk_crn_set_cache_pin()
static chunk_row * static chunk_row *
chunk_row_create(int32 id, int32 partition_id, int64 starttime, int64 endtime) chunk_row_create(int32 id, int32 partition_id, int64 starttime, int64 endtime)
{ {
chunk_row *chunk; chunk_row *chunk;
chunk = palloc(sizeof(chunk_row)); chunk = palloc(sizeof(chunk_row));
chunk->id = id; chunk->id = id;
@ -176,19 +183,22 @@ chunk_row_create(int32 id, int32 partition_id, int64 starttime, int64 endtime)
typedef struct ChunkScanCtx typedef struct ChunkScanCtx
{ {
chunk_row *chunk; chunk_row *chunk;
Oid chunk_tbl_id; Oid chunk_tbl_id;
int32 partition_id; int32 partition_id;
int64 starttime, endtime, timepoint; int64 starttime,
bool should_lock; endtime,
} ChunkScanCtx; timepoint;
bool should_lock;
} ChunkScanCtx;
static bool static bool
chunk_tuple_timepoint_filter(TupleInfo *ti, void *arg) chunk_tuple_timepoint_filter(TupleInfo * ti, void *arg)
{ {
ChunkScanCtx *ctx = arg; ChunkScanCtx *ctx = arg;
bool starttime_is_null, endtime_is_null; bool starttime_is_null,
Datum datum; endtime_is_null;
Datum datum;
datum = heap_getattr(ti->tuple, CHUNK_TBL_COL_STARTTIME, ti->desc, &starttime_is_null); datum = heap_getattr(ti->tuple, CHUNK_TBL_COL_STARTTIME, ti->desc, &starttime_is_null);
ctx->starttime = starttime_is_null ? OPEN_START_TIME : DatumGetInt64(datum); ctx->starttime = starttime_is_null ? OPEN_START_TIME : DatumGetInt64(datum);
@ -203,11 +213,11 @@ chunk_tuple_timepoint_filter(TupleInfo *ti, void *arg)
} }
static bool static bool
chunk_tuple_found(TupleInfo *ti, void *arg) chunk_tuple_found(TupleInfo * ti, void *arg)
{ {
ChunkScanCtx *ctx = arg; ChunkScanCtx *ctx = arg;
bool is_null; bool is_null;
Datum id; Datum id;
id = heap_getattr(ti->tuple, CHUNK_TBL_COL_ID, ti->desc, &is_null); id = heap_getattr(ti->tuple, CHUNK_TBL_COL_ID, ti->desc, &is_null);
ctx->chunk = chunk_row_create(DatumGetInt32(id), ctx->partition_id, ctx->chunk = chunk_row_create(DatumGetInt32(id), ctx->partition_id,
@ -219,13 +229,13 @@ static chunk_row *
chunk_scan(int32 partition_id, int64 timepoint, bool tuplock) chunk_scan(int32 partition_id, int64 timepoint, bool tuplock)
{ {
ScanKeyData scankey[1]; ScanKeyData scankey[1];
Catalog *catalog = catalog_get(); Catalog *catalog = catalog_get();
ChunkScanCtx cctx = { ChunkScanCtx cctx = {
.chunk_tbl_id = catalog->tables[CHUNK].id, .chunk_tbl_id = catalog->tables[CHUNK].id,
.partition_id = partition_id, .partition_id = partition_id,
.timepoint = timepoint, .timepoint = timepoint,
}; };
ScannerCtx ctx = { ScannerCtx ctx = {
.table = catalog->tables[CHUNK].id, .table = catalog->tables[CHUNK].id,
.index = get_relname_relid(CHUNK_PARTITION_TIME_INDEX_NAME, catalog->schema_id), .index = get_relname_relid(CHUNK_PARTITION_TIME_INDEX_NAME, catalog->schema_id),
.scantype = ScannerTypeIndex, .scantype = ScannerTypeIndex,
@ -242,8 +252,9 @@ chunk_scan(int32 partition_id, int64 timepoint, bool tuplock)
.scandirection = ForwardScanDirection, .scandirection = ForwardScanDirection,
}; };
/* Perform an index scan on epoch ID to find the partitions for the /*
* epoch. */ * Perform an index scan on epoch ID to find the partitions for the epoch.
*/
ScanKeyInit(&scankey[0], CHUNK_IDX_COL_PARTITION_ID, BTEqualStrategyNumber, ScanKeyInit(&scankey[0], CHUNK_IDX_COL_PARTITION_ID, BTEqualStrategyNumber,
F_INT4EQ, Int32GetDatum(partition_id)); F_INT4EQ, Int32GetDatum(partition_id));
@ -257,11 +268,11 @@ chunk_scan(int32 partition_id, int64 timepoint, bool tuplock)
* The cache parameter is a chunk_crn_set_cache (can be null to use current cache). * The cache parameter is a chunk_crn_set_cache (can be null to use current cache).
*/ */
chunk_cache_entry * chunk_cache_entry *
get_chunk_cache_entry(Cache *cache, Partition *part, int64 timepoint, bool lock) get_chunk_cache_entry(Cache * cache, Partition * part, int64 timepoint, bool lock)
{ {
chunk_crn_set_htable_entry *chunk_crn_cache; chunk_crn_set_htable_entry *chunk_crn_cache;
chunk_cache_entry *entry; chunk_cache_entry *entry;
chunk_row *chunk; chunk_row *chunk;
chunk = chunk_scan(part->id, timepoint, lock); chunk = chunk_scan(part->id, timepoint, lock);
@ -274,7 +285,7 @@ get_chunk_cache_entry(Cache *cache, Partition *part, int64 timepoint, bool lock)
entry->chunk = chunk; entry->chunk = chunk;
entry->id = chunk->id; entry->id = chunk->id;
chunk_crn_cache = chunk_crn_set_cache_get_entry(cache, chunk->id, chunk_crn_cache = chunk_crn_set_cache_get_entry(cache, chunk->id,
chunk->start_time, chunk->end_time); chunk->start_time, chunk->end_time);
entry->crns = chunk_crn_cache->crns; entry->crns = chunk_crn_cache->crns;
return entry; return entry;
} }

View File

@ -8,7 +8,7 @@
#include "cache.h" #include "cache.h"
#define CHUNK_CACHE_INVAL_PROXY_TABLE "cache_inval_chunk" #define CHUNK_CACHE_INVAL_PROXY_TABLE "cache_inval_chunk"
#define CHUNK_CACHE_INVAL_PROXY_OID \ #define CHUNK_CACHE_INVAL_PROXY_OID \
get_relname_relid(CHUNK_CACHE_INVAL_PROXY_TABLE, CACHE_INVAL_PROXY_SCHEMA_OID) get_relname_relid(CHUNK_CACHE_INVAL_PROXY_TABLE, CACHE_INVAL_PROXY_SCHEMA_OID)
typedef struct hypertable_cache_entry hypertable_cache_entry; typedef struct hypertable_cache_entry hypertable_cache_entry;
@ -18,17 +18,17 @@ typedef struct chunk_row chunk_row;
typedef struct chunk_cache_entry typedef struct chunk_cache_entry
{ {
int32 id; int32 id;
chunk_row *chunk; chunk_row *chunk;
crn_set *crns; crn_set *crns;
} chunk_cache_entry; } chunk_cache_entry;
extern chunk_cache_entry *get_chunk_cache_entry(Cache *cache, Partition *part, int64 timepoint, bool lock); extern chunk_cache_entry *get_chunk_cache_entry(Cache * cache, Partition * part, int64 timepoint, bool lock);
extern void chunk_crn_set_cache_invalidate_callback(void); extern void chunk_crn_set_cache_invalidate_callback(void);
extern Cache *chunk_crn_set_cache_pin(void); extern Cache *chunk_crn_set_cache_pin(void);
extern void _chunk_cache_init(void); extern void _chunk_cache_init(void);
extern void _chunk_cache_fini(void); extern void _chunk_cache_fini(void);

View File

@ -14,47 +14,53 @@
#include "scanner.h" #include "scanner.h"
#include "partitioning.h" #include "partitioning.h"
static void *hypertable_cache_create_entry(Cache *cache, CacheQueryCtx *ctx); static void *hypertable_cache_create_entry(Cache * cache, CacheQueryCtx * ctx);
typedef struct HypertableCacheQueryCtx typedef struct HypertableCacheQueryCtx
{ {
CacheQueryCtx cctx; CacheQueryCtx cctx;
int32 hypertable_id; int32 hypertable_id;
} HypertableCacheQueryCtx; } HypertableCacheQueryCtx;
static void * static void *
hypertable_cache_get_key(CacheQueryCtx *ctx) hypertable_cache_get_key(CacheQueryCtx * ctx)
{ {
return &((HypertableCacheQueryCtx *) ctx)->hypertable_id; return &((HypertableCacheQueryCtx *) ctx)->hypertable_id;
} }
static Cache *hypertable_cache_create() { static Cache *
MemoryContext ctx = AllocSetContextCreate(CacheMemoryContext, hypertable_cache_create()
HYPERTABLE_CACHE_INVAL_PROXY_TABLE, {
ALLOCSET_DEFAULT_SIZES); MemoryContext ctx = AllocSetContextCreate(CacheMemoryContext,
HYPERTABLE_CACHE_INVAL_PROXY_TABLE,
ALLOCSET_DEFAULT_SIZES);
Cache *cache = MemoryContextAlloc(ctx, sizeof(Cache)); Cache *cache = MemoryContextAlloc(ctx, sizeof(Cache));
*cache = (Cache) {
.hctl = { Cache tmp = (Cache)
{
.hctl =
{
.keysize = sizeof(int32), .keysize = sizeof(int32),
.entrysize = sizeof(hypertable_cache_entry), .entrysize = sizeof(hypertable_cache_entry),
.hcxt = ctx, .hcxt = ctx,
}, },
.name = HYPERTABLE_CACHE_INVAL_PROXY_TABLE, .name = HYPERTABLE_CACHE_INVAL_PROXY_TABLE,
.numelements = 16, .numelements = 16,
.flags = HASH_ELEM | HASH_CONTEXT | HASH_BLOBS, .flags = HASH_ELEM | HASH_CONTEXT | HASH_BLOBS,
.get_key = hypertable_cache_get_key, .get_key = hypertable_cache_get_key,
.create_entry = hypertable_cache_create_entry, .create_entry = hypertable_cache_create_entry,
}; };
*cache = tmp;
cache_init(cache); cache_init(cache);
return cache; return cache;
} }
static Cache *hypertable_cache_current = NULL; static Cache *hypertable_cache_current = NULL;
/* Column numbers for 'hypertable' table in sql/common/tables.sql */ /* Column numbers for 'hypertable' table in sql/common/tables.sql */
#define HT_TBL_COL_ID 1 #define HT_TBL_COL_ID 1
#define HT_TBL_COL_TIME_COL_NAME 10 #define HT_TBL_COL_TIME_COL_NAME 10
@ -64,17 +70,17 @@ static Cache *hypertable_cache_current = NULL;
#define HT_IDX_COL_ID 1 #define HT_IDX_COL_ID 1
static bool static bool
hypertable_tuple_found(TupleInfo *ti, void *data) hypertable_tuple_found(TupleInfo * ti, void *data)
{ {
bool is_null; bool is_null;
HypertableCacheQueryCtx *hctx = data; HypertableCacheQueryCtx *hctx = data;
hypertable_cache_entry *he = hctx->cctx.entry; hypertable_cache_entry *he = hctx->cctx.entry;
Datum id_datum = heap_getattr(ti->tuple, HT_TBL_COL_ID, ti->desc, &is_null); Datum id_datum = heap_getattr(ti->tuple, HT_TBL_COL_ID, ti->desc, &is_null);
Datum time_col_datum = heap_getattr(ti->tuple, HT_TBL_COL_TIME_COL_NAME, ti->desc, &is_null); Datum time_col_datum = heap_getattr(ti->tuple, HT_TBL_COL_TIME_COL_NAME, ti->desc, &is_null);
Datum time_type_datum = heap_getattr(ti->tuple, HT_TBL_COL_TIME_TYPE, ti->desc, &is_null); Datum time_type_datum = heap_getattr(ti->tuple, HT_TBL_COL_TIME_TYPE, ti->desc, &is_null);
int32 id = DatumGetInt32(id_datum); int32 id = DatumGetInt32(id_datum);
if (id != hctx->hypertable_id) if (id != hctx->hypertable_id)
{ {
elog(ERROR, "Expected hypertable ID %u, got %u", hctx->hypertable_id, id); elog(ERROR, "Expected hypertable ID %u, got %u", hctx->hypertable_id, id);
} }
@ -88,12 +94,12 @@ hypertable_tuple_found(TupleInfo *ti, void *data)
} }
static void * static void *
hypertable_cache_create_entry(Cache *cache, CacheQueryCtx *ctx) hypertable_cache_create_entry(Cache * cache, CacheQueryCtx * ctx)
{ {
HypertableCacheQueryCtx *hctx = (HypertableCacheQueryCtx *) ctx; HypertableCacheQueryCtx *hctx = (HypertableCacheQueryCtx *) ctx;
Catalog *catalog = catalog_get(); Catalog *catalog = catalog_get();
ScanKeyData scankey[1]; ScanKeyData scankey[1];
ScannerCtx scanCtx = { ScannerCtx scanCtx = {
.table = catalog->tables[HYPERTABLE].id, .table = catalog->tables[HYPERTABLE].id,
.index = catalog->tables[HYPERTABLE].index_id, .index = catalog->tables[HYPERTABLE].index_id,
.scantype = ScannerTypeIndex, .scantype = ScannerTypeIndex,
@ -125,7 +131,7 @@ hypertable_cache_invalidate_callback(void)
/* Get hypertable cache entry. If the entry is not in the cache, add it. */ /* Get hypertable cache entry. If the entry is not in the cache, add it. */
hypertable_cache_entry * hypertable_cache_entry *
hypertable_cache_get_entry(Cache *cache, int32 hypertable_id) hypertable_cache_get_entry(Cache * cache, int32 hypertable_id)
{ {
HypertableCacheQueryCtx ctx = { HypertableCacheQueryCtx ctx = {
.hypertable_id = hypertable_id, .hypertable_id = hypertable_id,
@ -139,7 +145,7 @@ static int
cmp_epochs(const void *time_pt_pointer, const void *test) cmp_epochs(const void *time_pt_pointer, const void *test)
{ {
/* note reverse order; assume oldest stuff last */ /* note reverse order; assume oldest stuff last */
int64 *time_pt = (int64 *) time_pt_pointer; int64 *time_pt = (int64 *) time_pt_pointer;
epoch_and_partitions_set **entry = (epoch_and_partitions_set **) test; epoch_and_partitions_set **entry = (epoch_and_partitions_set **) test;
if ((*entry)->start_time <= *time_pt && (*entry)->end_time >= *time_pt) if ((*entry)->start_time <= *time_pt && (*entry)->end_time >= *time_pt)
@ -155,12 +161,12 @@ cmp_epochs(const void *time_pt_pointer, const void *test)
} }
epoch_and_partitions_set * epoch_and_partitions_set *
hypertable_cache_get_partition_epoch(Cache *cache, hypertable_cache_entry *hce, int64 time_pt, Oid relid) hypertable_cache_get_partition_epoch(Cache * cache, hypertable_cache_entry * hce, int64 time_pt, Oid relid)
{ {
MemoryContext old; MemoryContext old;
epoch_and_partitions_set *epoch, epoch_and_partitions_set *epoch,
**cache_entry; **cache_entry;
int j; int j;
/* fastpath: check latest entry */ /* fastpath: check latest entry */
if (hce->num_epochs > 0) if (hce->num_epochs > 0)
@ -213,7 +219,8 @@ hypertable_cache_pin()
} }
void _hypertable_cache_init(void) void
_hypertable_cache_init(void)
{ {
CreateCacheMemoryContext(); CreateCacheMemoryContext();
hypertable_cache_current = hypertable_cache_create(); hypertable_cache_current = hypertable_cache_create();

View File

@ -9,31 +9,31 @@ typedef struct epoch_and_partitions_set epoch_and_partitions_set;
typedef struct partition_info partition_info; typedef struct partition_info partition_info;
#define HYPERTABLE_CACHE_INVAL_PROXY_TABLE "cache_inval_hypertable" #define HYPERTABLE_CACHE_INVAL_PROXY_TABLE "cache_inval_hypertable"
#define HYPERTABLE_CACHE_INVAL_PROXY_OID \ #define HYPERTABLE_CACHE_INVAL_PROXY_OID \
get_relname_relid(HYPERTABLE_CACHE_INVAL_PROXY_TABLE, CACHE_INVAL_PROXY_SCHEMA_OID) get_relname_relid(HYPERTABLE_CACHE_INVAL_PROXY_TABLE, CACHE_INVAL_PROXY_SCHEMA_OID)
#define MAX_EPOCHS_PER_HYPERTABLE_CACHE_ENTRY 20 #define MAX_EPOCHS_PER_HYPERTABLE_CACHE_ENTRY 20
typedef struct hypertable_cache_entry typedef struct hypertable_cache_entry
{ {
int32 id; int32 id;
char time_column_name[NAMEDATALEN]; char time_column_name[NAMEDATALEN];
Oid time_column_type; Oid time_column_type;
int num_epochs; int num_epochs;
/* Array of epoch_and_partitions_set*. Order by start_time */ /* Array of epoch_and_partitions_set*. Order by start_time */
epoch_and_partitions_set *epochs[MAX_EPOCHS_PER_HYPERTABLE_CACHE_ENTRY]; epoch_and_partitions_set *epochs[MAX_EPOCHS_PER_HYPERTABLE_CACHE_ENTRY];
} hypertable_cache_entry; } hypertable_cache_entry;
hypertable_cache_entry *hypertable_cache_get_entry(Cache * cache, int32 hypertable_id); hypertable_cache_entry *hypertable_cache_get_entry(Cache * cache, int32 hypertable_id);
epoch_and_partitions_set * epoch_and_partitions_set *
hypertable_cache_get_partition_epoch(Cache *cache, hypertable_cache_entry *hce, int64 time_pt, Oid relid); hypertable_cache_get_partition_epoch(Cache * cache, hypertable_cache_entry * hce, int64 time_pt, Oid relid);
void hypertable_cache_invalidate_callback(void); void hypertable_cache_invalidate_callback(void);
extern Cache *hypertable_cache_pin(void); extern Cache *hypertable_cache_pin(void);
void _hypertable_cache_init(void); void _hypertable_cache_init(void);
void _hypertable_cache_fini(void); void _hypertable_cache_fini(void);
#endif /* TIMESCALEDB_HYPERTABLE_CACHE_H */ #endif /* TIMESCALEDB_HYPERTABLE_CACHE_H */

View File

@ -59,8 +59,8 @@
/* private funcs */ /* private funcs */
static ObjectAddress create_insert_index(int32 hypertable_id, char * time_field, PartitioningInfo *part_info,epoch_and_partitions_set *epoch); static ObjectAddress create_insert_index(int32 hypertable_id, char *time_field, PartitioningInfo * part_info, epoch_and_partitions_set * epoch);
static Node *get_keyspace_fn_call(PartitioningInfo *part_info); static Node *get_keyspace_fn_call(PartitioningInfo * part_info);
/* /*
* Inserts rows from the temporary copy table into correct hypertable child tables. * Inserts rows from the temporary copy table into correct hypertable child tables.
@ -100,10 +100,11 @@ typedef struct ChunkInsertCtxRel
EState *estate; EState *estate;
ResultRelInfo *resultRelInfo; ResultRelInfo *resultRelInfo;
BulkInsertState bistate; BulkInsertState bistate;
} ChunkInsertCtxRel; } ChunkInsertCtxRel;
static ChunkInsertCtxRel* static ChunkInsertCtxRel *
chunk_insert_ctx_rel_new(Relation rel, ResultRelInfo *resultRelInfo, List *range_table) { chunk_insert_ctx_rel_new(Relation rel, ResultRelInfo *resultRelInfo, List *range_table)
{
TupleDesc tupDesc; TupleDesc tupDesc;
ChunkInsertCtxRel *rel_ctx = palloc(sizeof(ChunkInsertCtxRel)); ChunkInsertCtxRel *rel_ctx = palloc(sizeof(ChunkInsertCtxRel));
@ -125,7 +126,7 @@ chunk_insert_ctx_rel_new(Relation rel, ResultRelInfo *resultRelInfo, List *ra
} }
static void static void
chunk_insert_ctx_rel_destroy(ChunkInsertCtxRel *rel_ctx) chunk_insert_ctx_rel_destroy(ChunkInsertCtxRel * rel_ctx)
{ {
FreeBulkInsertState(rel_ctx->bistate); FreeBulkInsertState(rel_ctx->bistate);
ExecCloseIndices(rel_ctx->resultRelInfo); ExecCloseIndices(rel_ctx->resultRelInfo);
@ -136,7 +137,7 @@ chunk_insert_ctx_rel_destroy(ChunkInsertCtxRel *rel_ctx)
static void static void
chunk_insert_ctx_rel_insert_tuple(ChunkInsertCtxRel *rel_ctx, HeapTuple tuple) chunk_insert_ctx_rel_insert_tuple(ChunkInsertCtxRel * rel_ctx, HeapTuple tuple)
{ {
int hi_options = 0; /* no optimization */ int hi_options = 0; /* no optimization */
CommandId mycid = GetCurrentCommandId(true); CommandId mycid = GetCurrentCommandId(true);
@ -164,12 +165,12 @@ chunk_insert_ctx_rel_insert_tuple(ChunkInsertCtxRel *rel_ctx, HeapTuple tuple)
typedef struct ChunkInsertCtx typedef struct ChunkInsertCtx
{ {
chunk_cache_entry *chunk; chunk_cache_entry *chunk;
Cache *pinned; Cache *pinned;
List *ctxs; List *ctxs;
} ChunkInsertCtx; } ChunkInsertCtx;
static ChunkInsertCtx * static ChunkInsertCtx *
chunk_insert_ctx_new(chunk_cache_entry *chunk, Cache *pinned) chunk_insert_ctx_new(chunk_cache_entry * chunk, Cache * pinned)
{ {
ListCell *lc; ListCell *lc;
List *rel_ctx_list = NIL; List *rel_ctx_list = NIL;
@ -246,7 +247,7 @@ chunk_insert_ctx_new(chunk_cache_entry *chunk, Cache *pinned)
} }
static void static void
chunk_insert_ctx_destroy(ChunkInsertCtx *ctx) chunk_insert_ctx_destroy(ChunkInsertCtx * ctx)
{ {
ListCell *lc; ListCell *lc;
@ -260,40 +261,46 @@ chunk_insert_ctx_destroy(ChunkInsertCtx *ctx)
foreach(lc, ctx->ctxs) foreach(lc, ctx->ctxs)
{ {
ChunkInsertCtxRel *rel_ctx = lfirst(lc); ChunkInsertCtxRel *rel_ctx = lfirst(lc);
chunk_insert_ctx_rel_destroy(rel_ctx); chunk_insert_ctx_rel_destroy(rel_ctx);
} }
} }
static void static void
chunk_insert_ctx_insert_tuple(ChunkInsertCtx *ctx, HeapTuple tup) chunk_insert_ctx_insert_tuple(ChunkInsertCtx * ctx, HeapTuple tup)
{ {
ListCell *lc; ListCell *lc;
foreach(lc, ctx->ctxs) foreach(lc, ctx->ctxs)
{ {
ChunkInsertCtxRel *rel_ctx = lfirst(lc); ChunkInsertCtxRel *rel_ctx = lfirst(lc);
chunk_insert_ctx_rel_insert_tuple(rel_ctx, tup); chunk_insert_ctx_rel_insert_tuple(rel_ctx, tup);
} }
} }
typedef struct CopyTableQueryCtx { typedef struct CopyTableQueryCtx
Partition *part; {
Partition *part;
ChunkInsertCtx *chunk_ctx; ChunkInsertCtx *chunk_ctx;
epoch_and_partitions_set *pe; epoch_and_partitions_set *pe;
hypertable_cache_entry *hci; hypertable_cache_entry *hci;
} CopyTableQueryCtx; } CopyTableQueryCtx;
static bool static bool
copy_table_tuple_found(TupleInfo *ti, void *data) copy_table_tuple_found(TupleInfo * ti, void *data)
{ {
bool is_null; bool is_null;
CopyTableQueryCtx *ctx = data; CopyTableQueryCtx *ctx = data;
int16 keyspace_pt; int16 keyspace_pt;
int64 time_pt; int64 time_pt;
if (ctx->pe->num_partitions > 1) if (ctx->pe->num_partitions > 1)
{ {
/* first element is partition index (used for sorting but not necessary here) */ /*
* first element is partition index (used for sorting but not
* necessary here)
*/
Datum time_datum = index_getattr(ti->ituple, 2, ti->ituple_desc, &is_null); Datum time_datum = index_getattr(ti->ituple, 2, ti->ituple_desc, &is_null);
Datum keyspace_datum = index_getattr(ti->ituple, 3, ti->ituple_desc, &is_null); Datum keyspace_datum = index_getattr(ti->ituple, 3, ti->ituple_desc, &is_null);
@ -303,6 +310,7 @@ copy_table_tuple_found(TupleInfo *ti, void *data)
else else
{ {
Datum time_datum = index_getattr(ti->ituple, 1, ti->ituple_desc, &is_null); Datum time_datum = index_getattr(ti->ituple, 1, ti->ituple_desc, &is_null);
time_pt = time_value_to_internal(time_datum, ctx->hci->time_column_type); time_pt = time_value_to_internal(time_datum, ctx->hci->time_column_type);
keyspace_pt = KEYSPACE_PT_NO_PARTITIONING; keyspace_pt = KEYSPACE_PT_NO_PARTITIONING;
} }
@ -332,14 +340,19 @@ copy_table_tuple_found(TupleInfo *ti, void *data)
{ {
Datum was_closed_datum; Datum was_closed_datum;
chunk_cache_entry *chunk; chunk_cache_entry *chunk;
Cache *pinned = chunk_crn_set_cache_pin(); Cache *pinned = chunk_crn_set_cache_pin();
/* /*
* TODO: this first call should be non-locking and use a cache(for * TODO: this first call should be non-locking and use a cache(for
* performance) * performance)
*/ */
chunk = get_chunk_cache_entry(pinned, ctx->part, time_pt, false); chunk = get_chunk_cache_entry(pinned, ctx->part, time_pt, false);
was_closed_datum = FunctionCall1(get_close_if_needed_fn(), Int32GetDatum(chunk->id)); was_closed_datum = FunctionCall1(get_close_if_needed_fn(), Int32GetDatum(chunk->id));
/* chunk may have been closed and thus changed /or/ need to get share lock */
/*
* chunk may have been closed and thus changed /or/ need to get share
* lock
*/
chunk = get_chunk_cache_entry(pinned, ctx->part, time_pt, true); chunk = get_chunk_cache_entry(pinned, ctx->part, time_pt, true);
ctx->chunk_ctx = chunk_insert_ctx_new(chunk, pinned); ctx->chunk_ctx = chunk_insert_ctx_new(chunk, pinned);
@ -347,27 +360,30 @@ copy_table_tuple_found(TupleInfo *ti, void *data)
/* insert here: */ /* insert here: */
/* has to be a copy(not sure why) */ /* has to be a copy(not sure why) */
chunk_insert_ctx_insert_tuple(ctx->chunk_ctx,heap_copytuple(ti->tuple)); chunk_insert_ctx_insert_tuple(ctx->chunk_ctx, heap_copytuple(ti->tuple));
return true; return true;
} }
static void scan_copy_table_and_insert_post(int num_tuples, void *data) static void
scan_copy_table_and_insert_post(int num_tuples, void *data)
{ {
CopyTableQueryCtx *ctx = data; CopyTableQueryCtx *ctx = data;
if (ctx->chunk_ctx != NULL) if (ctx->chunk_ctx != NULL)
chunk_insert_ctx_destroy(ctx->chunk_ctx); chunk_insert_ctx_destroy(ctx->chunk_ctx);
} }
static void scan_copy_table_and_insert( hypertable_cache_entry *hci, static void
epoch_and_partitions_set *pe, scan_copy_table_and_insert(hypertable_cache_entry * hci,
Oid table, Oid index) epoch_and_partitions_set * pe,
Oid table, Oid index)
{ {
CopyTableQueryCtx query_ctx = { CopyTableQueryCtx query_ctx = {
.pe = pe, .pe = pe,
.hci = hci, .hci = hci,
}; };
ScannerCtx scanCtx = { ScannerCtx scanCtx = {
.table = table, .table = table,
.index = index, .index = index,
.scantype = ScannerTypeIndex, .scantype = ScannerTypeIndex,
@ -399,7 +415,7 @@ insert_trigger_on_copy_table_c(PG_FUNCTION_ARGS)
hypertable_cache_entry *hci; hypertable_cache_entry *hci;
epoch_and_partitions_set *pe; epoch_and_partitions_set *pe;
Cache *hypertable_cache; Cache *hypertable_cache;
ObjectAddress idx; ObjectAddress idx;
DropStmt *drop = makeNode(DropStmt); DropStmt *drop = makeNode(DropStmt);
@ -537,10 +553,12 @@ create_copy_table(int32 hypertable_id, Oid root_oid)
} }
static IndexElem * static IndexElem *
makeIndexElem(char *name, Node *expr){ makeIndexElem(char *name, Node *expr)
Assert((name ==NULL || expr == NULL) && (name !=NULL || expr !=NULL)); {
Assert((name == NULL || expr == NULL) && (name != NULL || expr != NULL));
IndexElem *time_elem = makeNode(IndexElem);
IndexElem *time_elem = makeNode(IndexElem);
time_elem->name = name; time_elem->name = name;
time_elem->expr = expr; time_elem->expr = expr;
time_elem->indexcolname = NULL; time_elem->indexcolname = NULL;
@ -575,7 +593,7 @@ makeIndexElem(char *name, Node *expr){
* *
* */ * */
static ObjectAddress static ObjectAddress
create_insert_index(int32 hypertable_id, char *time_field, PartitioningInfo *part_info, epoch_and_partitions_set *epoch) create_insert_index(int32 hypertable_id, char *time_field, PartitioningInfo * part_info, epoch_and_partitions_set * epoch)
{ {
IndexStmt *index_stmt = makeNode(IndexStmt); IndexStmt *index_stmt = makeNode(IndexStmt);
IndexElem *time_elem; IndexElem *time_elem;
@ -657,7 +675,7 @@ create_insert_index(int32 hypertable_id, char *time_field, PartitioningInfo *par
* *
*/ */
static Node * static Node *
get_keyspace_fn_call(PartitioningInfo *part_info) get_keyspace_fn_call(PartitioningInfo * part_info)
{ {
ColumnRef *col_ref = makeNode(ColumnRef); ColumnRef *col_ref = makeNode(ColumnRef);
A_Const *mod_const; A_Const *mod_const;

View File

@ -49,7 +49,7 @@ prepare_plan(const char *src, int nargs, Oid *argtypes)
} }
void void
free_epoch(epoch_and_partitions_set *epoch) free_epoch(epoch_and_partitions_set * epoch)
{ {
if (epoch->partitioning != NULL) if (epoch->partitioning != NULL)
pfree(epoch->partitioning); pfree(epoch->partitioning);
@ -64,7 +64,7 @@ free_epoch(epoch_and_partitions_set *epoch)
DEFINE_PLAN(get_crn_plan, CRN_QUERY, 1, CRN_QUERY_ARGS) DEFINE_PLAN(get_crn_plan, CRN_QUERY, 1, CRN_QUERY_ARGS)
crn_set * crn_set *
fetch_crn_set(crn_set *entry, int32 chunk_id) fetch_crn_set(crn_set * entry, int32 chunk_id)
{ {
SPIPlanPtr plan = get_crn_plan(); SPIPlanPtr plan = get_crn_plan();
Datum args[1] = {Int32GetDatum(chunk_id)}; Datum args[1] = {Int32GetDatum(chunk_id)};
@ -174,7 +174,7 @@ chunk_tuple_create_spi_connected(int32 partition_id, int64 timepoint, bool lock,
} }
static chunk_row * static chunk_row *
chunk_row_fill_in(chunk_row *chunk, HeapTuple tuple, TupleDesc tupdesc) chunk_row_fill_in(chunk_row * chunk, HeapTuple tuple, TupleDesc tupdesc)
{ {
int64 time_ret; int64 time_ret;
bool is_null; bool is_null;
@ -208,10 +208,10 @@ chunk_row_fill_in(chunk_row *chunk, HeapTuple tuple, TupleDesc tupdesc)
chunk_row * chunk_row *
chunk_row_insert_new(int32 partition_id, int64 timepoint, bool lock) chunk_row_insert_new(int32 partition_id, int64 timepoint, bool lock)
{ {
HeapTuple tuple; HeapTuple tuple;
TupleDesc desc; TupleDesc desc;
chunk_row *chunk = palloc(sizeof(chunk_row)); chunk_row *chunk = palloc(sizeof(chunk_row));
SPIPlanPtr plan = get_chunk_plan(); SPIPlanPtr plan = get_chunk_plan();
if (SPI_connect() < 0) if (SPI_connect() < 0)
elog(ERROR, "Got an SPI connect error"); elog(ERROR, "Got an SPI connect error");
@ -225,6 +225,8 @@ chunk_row_insert_new(int32 partition_id, int64 timepoint, bool lock)
} }
bool chunk_row_timepoint_is_member(const chunk_row *row, const int64 time_pt){ bool
return row->start_time <= time_pt && row->end_time >= time_pt; chunk_row_timepoint_is_member(const chunk_row * row, const int64 time_pt)
{
return row->start_time <= time_pt && row->end_time >= time_pt;
} }

View File

@ -18,35 +18,35 @@ typedef struct chunk_row
int32 partition_id; int32 partition_id;
int64 start_time; int64 start_time;
int64 end_time; int64 end_time;
} chunk_row; } chunk_row;
typedef struct crn_row typedef struct crn_row
{ {
NameData schema_name; NameData schema_name;
NameData table_name; NameData table_name;
} crn_row; } crn_row;
typedef struct crn_set typedef struct crn_set
{ {
int32 chunk_id; int32 chunk_id;
List *tables; List *tables;
} crn_set; } crn_set;
/* utility func */ /* utility func */
extern SPIPlanPtr prepare_plan(const char *src, int nargs, Oid *argtypes); extern SPIPlanPtr prepare_plan(const char *src, int nargs, Oid *argtypes);
/* db access func */ /* db access func */
extern epoch_and_partitions_set *fetch_epoch_and_partitions_set(epoch_and_partitions_set *entry, extern epoch_and_partitions_set *fetch_epoch_and_partitions_set(epoch_and_partitions_set * entry,
int32 hypertable_id, int64 time_pt, Oid relid); int32 hypertable_id, int64 time_pt, Oid relid);
extern void free_epoch(epoch_and_partitions_set *epoch); extern void free_epoch(epoch_and_partitions_set * epoch);
extern crn_set *fetch_crn_set(crn_set *entry, int32 chunk_id); extern crn_set *fetch_crn_set(crn_set * entry, int32 chunk_id);
chunk_row * chunk_row *
chunk_row_insert_new(int32 partition_id, int64 timepoint, bool lock); chunk_row_insert_new(int32 partition_id, int64 timepoint, bool lock);
bool chunk_row_timepoint_is_member(const chunk_row *row, const int64 time_pt); bool chunk_row_timepoint_is_member(const chunk_row * row, const int64 time_pt);
#endif /* TIMESCALEDB_METADATA_QUERIES_H */ #endif /* TIMESCALEDB_METADATA_QUERIES_H */

View File

@ -1,91 +1,96 @@
/* -*- Mode: C; tab-width: 4; indent-tabs-mode: t; c-basic-offset: 4 -*- */ /* -*- Mode: C; tab-width: 4; indent-tabs-mode: t; c-basic-offset: 4 -*- */
//----------------------------------------------------------------------------- /* ----------------------------------------------------------------------------- */
// MurmurHash3 was written by Austin Appleby, and is placed in the public /* MurmurHash3 was written by Austin Appleby, and is placed in the public */
// domain. The author hereby disclaims copyright to this source code. /* domain. The author hereby disclaims copyright to this source code. */
// Note - The x86 and x64 versions do _not_ produce the same results, as the /* Note - The x86 and x64 versions do _not_ produce the same results, as the */
// algorithms are optimized for their respective platforms. You can still /* algorithms are optimized for their respective platforms. You can still */
// compile and run any of them on any platform, but your performance with the /* compile and run any of them on any platform, but your performance with the */
// non-native version will be less than optimal. /* non-native version will be less than optimal. */
#include "pgmurmur3.h" #include "pgmurmur3.h"
//----------------------------------------------------------------------------- /* ----------------------------------------------------------------------------- */
// Platform-specific functions and macros /* Platform-specific functions and macros */
static inline uint32_t rotl32(uint32_t x, int8_t r) static inline uint32_t
rotl32(uint32_t x, int8_t r)
{ {
return (x << r) | (x >> (32 - r)); return (x << r) | (x >> (32 - r));
} }
#define ROTL32(x,y) rotl32(x,y) #define ROTL32(x,y) rotl32(x,y)
//----------------------------------------------------------------------------- /* ----------------------------------------------------------------------------- */
// Block read - if your platform needs to do endian-swapping or can only /* Block read - if your platform needs to do endian-swapping or can only */
// handle aligned reads, do the conversion here /* handle aligned reads, do the conversion here */
static inline uint32_t getblock(const uint32_t * p, int i) static inline uint32_t
getblock(const uint32_t *p, int i)
{ {
return p[i]; return p[i];
} }
//----------------------------------------------------------------------------- /* ----------------------------------------------------------------------------- */
// Finalization mix - force all bits of a hash block to avalanche /* Finalization mix - force all bits of a hash block to avalanche */
static inline uint32_t fmix(uint32_t h) static inline uint32_t
fmix(uint32_t h)
{ {
h ^= h >> 16; h ^= h >> 16;
h *= 0x85ebca6b; h *= 0x85ebca6b;
h ^= h >> 13; h ^= h >> 13;
h *= 0xc2b2ae35; h *= 0xc2b2ae35;
h ^= h >> 16; h ^= h >> 16;
return h; return h;
} }
//----------------------------------------------------------------------------- /* ----------------------------------------------------------------------------- */
void hlib_murmur3(const void *key, size_t len, uint64_t *io) void
hlib_murmur3(const void *key, size_t len, uint64_t * io)
{ {
const uint8_t *data = (const uint8_t *) key; const uint8_t *data = (const uint8_t *) key;
const int nblocks = len / 4; const int nblocks = len / 4;
uint32_t h1 = io[0]; uint32_t h1 = io[0];
uint32_t c1 = 0xcc9e2d51; uint32_t c1 = 0xcc9e2d51;
uint32_t c2 = 0x1b873593; uint32_t c2 = 0x1b873593;
const uint32_t *blocks; const uint32_t *blocks;
const uint8_t *tail; const uint8_t *tail;
int i; int i;
uint32_t k1; uint32_t k1;
//---------- /* ---------- */
// body /* body */
blocks = (const uint32_t *) (data + nblocks * 4); blocks = (const uint32_t *) (data + nblocks * 4);
for (i = -nblocks; i; i++) { for (i = -nblocks; i; i++)
k1 = getblock(blocks, i); {
k1 *= c1; k1 = getblock(blocks, i);
k1 = ROTL32(k1, 15); k1 *= c1;
k1 *= c2; k1 = ROTL32(k1, 15);
h1 ^= k1; k1 *= c2;
h1 = ROTL32(h1, 13); h1 ^= k1;
h1 = h1 * 5 + 0xe6546b64; h1 = ROTL32(h1, 13);
} h1 = h1 * 5 + 0xe6546b64;
//---------- }
// tail /* ---------- */
tail = (const uint8_t *) (data + nblocks * 4); /* tail */
k1 = 0; tail = (const uint8_t *) (data + nblocks * 4);
switch (len & 3) { k1 = 0;
case 3: switch (len & 3)
k1 ^= tail[2] << 16; {
case 2: case 3:
k1 ^= tail[1] << 8; k1 ^= tail[2] << 16;
case 1: case 2:
k1 ^= tail[0]; k1 ^= tail[1] << 8;
k1 *= c1; case 1:
k1 = ROTL32(k1, 15); k1 ^= tail[0];
k1 *= c2; k1 *= c1;
h1 ^= k1; k1 = ROTL32(k1, 15);
}; k1 *= c2;
h1 ^= k1;
};
//---------- /* ---------- */
// finalization /* finalization */
h1 ^= len; h1 ^= len;
h1 = fmix(h1); h1 = fmix(h1);
io[0] = h1; io[0] = h1;
} }

View File

@ -6,24 +6,28 @@
#include "scanner.h" #include "scanner.h"
#include "catalog.h" #include "catalog.h"
static void partitioning_func_set_func_fmgr(PartitioningFunc *pf) static void
partitioning_func_set_func_fmgr(PartitioningFunc * pf)
{ {
FuncCandidateList funclist = FuncCandidateList funclist =
FuncnameGetCandidates(list_make2(makeString(pf->schema), makeString(pf->name)), FuncnameGetCandidates(list_make2(makeString(pf->schema), makeString(pf->name)),
2, NULL, false, false, false); 2, NULL, false, false, false);
if (funclist == NULL || funclist->next) if (funclist == NULL || funclist->next)
{ {
elog(ERROR, "Could not resolve the partitioning function"); elog(ERROR, "Could not resolve the partitioning function");
} }
fmgr_info_cxt(funclist->oid, &pf->func_fmgr, CurrentMemoryContext); fmgr_info_cxt(funclist->oid, &pf->func_fmgr, CurrentMemoryContext);
} }
static void partitioning_info_set_textfunc_fmgr(PartitioningInfo *pi, Oid relid) static void
partitioning_info_set_textfunc_fmgr(PartitioningInfo * pi, Oid relid)
{ {
Oid type_id, func_id; Oid type_id,
bool isVarlena; func_id;
bool isVarlena;
pi->column_attnum = get_attnum(relid, pi->column); pi->column_attnum = get_attnum(relid, pi->column);
type_id = get_atttype(relid, pi->column_attnum); type_id = get_atttype(relid, pi->column_attnum);
getTypeOutputInfo(type_id, &func_id, &isVarlena); getTypeOutputInfo(type_id, &func_id, &isVarlena);
@ -39,12 +43,12 @@ partitioning_info_create(int num_partitions,
Oid relid) Oid relid)
{ {
PartitioningInfo *pi; PartitioningInfo *pi;
pi = palloc0(sizeof(PartitioningInfo)); pi = palloc0(sizeof(PartitioningInfo));
pi->partfunc.modulos = partmod; pi->partfunc.modulos = partmod;
strncpy(pi->partfunc.name, partfunc, NAMEDATALEN); strncpy(pi->partfunc.name, partfunc, NAMEDATALEN);
strncpy(pi->column, partcol, NAMEDATALEN); strncpy(pi->column, partcol, NAMEDATALEN);
if (schema != NULL) if (schema != NULL)
{ {
strncpy(pi->partfunc.schema, schema, NAMEDATALEN); strncpy(pi->partfunc.schema, schema, NAMEDATALEN);
@ -56,13 +60,15 @@ partitioning_info_create(int num_partitions,
return pi; return pi;
} }
int16 partitioning_func_apply(PartitioningFunc *pf, Datum value) int16
partitioning_func_apply(PartitioningFunc * pf, Datum value)
{ {
Datum text = FunctionCall1(&pf->textfunc_fmgr, value); Datum text = FunctionCall1(&pf->textfunc_fmgr, value);
char *partition_val = DatumGetCString(text); char *partition_val = DatumGetCString(text);
Datum keyspace_datum = FunctionCall2(&pf->func_fmgr, Datum keyspace_datum = FunctionCall2(&pf->func_fmgr,
CStringGetTextDatum(partition_val), CStringGetTextDatum(partition_val),
Int32GetDatum(pf->modulos)); Int32GetDatum(pf->modulos));
return DatumGetInt16(keyspace_datum); return DatumGetInt16(keyspace_datum);
} }
@ -85,30 +91,34 @@ int16 partitioning_func_apply(PartitioningFunc *pf, Datum value)
/* PartitionEpochCtx is used to pass on information during a partition epoch and /* PartitionEpochCtx is used to pass on information during a partition epoch and
* partition scans. */ * partition scans. */
typedef struct { typedef struct
{
epoch_and_partitions_set *pe; epoch_and_partitions_set *pe;
int16 num_partitions; int16 num_partitions;
int32 hypertable_id; int32 hypertable_id;
int64 starttime, endtime, timepoint; int64 starttime,
Oid relid; endtime,
} PartitionEpochCtx; timepoint;
Oid relid;
} PartitionEpochCtx;
static int static int
partition_scan(PartitionEpochCtx *pctx); partition_scan(PartitionEpochCtx * pctx);
/* Filter partition epoch tuples based on hypertable ID and start/end time. */ /* Filter partition epoch tuples based on hypertable ID and start/end time. */
static bool static bool
partition_epoch_filter(TupleInfo *ti, void *arg) partition_epoch_filter(TupleInfo * ti, void *arg)
{ {
bool is_null; bool is_null;
PartitionEpochCtx *pctx = arg; PartitionEpochCtx *pctx = arg;
Datum id = heap_getattr(ti->tuple, PE_TBL_COL_HT_ID, ti->desc, &is_null); Datum id = heap_getattr(ti->tuple, PE_TBL_COL_HT_ID, ti->desc, &is_null);
if (DatumGetInt32(id) == pctx->hypertable_id) if (DatumGetInt32(id) == pctx->hypertable_id)
{ {
bool starttime_is_null, endtime_is_null; bool starttime_is_null,
Datum starttime = heap_getattr(ti->tuple, PE_TBL_COL_STARTTIME, ti->desc, &starttime_is_null); endtime_is_null;
Datum endtime = heap_getattr(ti->tuple, PE_TBL_COL_ENDTIME, ti->desc, &endtime_is_null); Datum starttime = heap_getattr(ti->tuple, PE_TBL_COL_STARTTIME, ti->desc, &starttime_is_null);
Datum endtime = heap_getattr(ti->tuple, PE_TBL_COL_ENDTIME, ti->desc, &endtime_is_null);
pctx->starttime = starttime_is_null ? OPEN_START_TIME : DatumGetInt64(starttime); pctx->starttime = starttime_is_null ? OPEN_START_TIME : DatumGetInt64(starttime);
pctx->endtime = endtime_is_null ? OPEN_END_TIME : DatumGetInt64(endtime); pctx->endtime = endtime_is_null ? OPEN_END_TIME : DatumGetInt64(endtime);
@ -123,9 +133,10 @@ partition_epoch_filter(TupleInfo *ti, void *arg)
sizeof(epoch_and_partitions_set) + (sizeof(Partition) * num_partitions) sizeof(epoch_and_partitions_set) + (sizeof(Partition) * num_partitions)
static epoch_and_partitions_set * static epoch_and_partitions_set *
partition_epoch_create(int32 epoch_id, PartitionEpochCtx *ctx) partition_epoch_create(int32 epoch_id, PartitionEpochCtx * ctx)
{ {
epoch_and_partitions_set *pe; epoch_and_partitions_set *pe;
pe = palloc(PARTITION_EPOCH_SIZE(ctx->num_partitions)); pe = palloc(PARTITION_EPOCH_SIZE(ctx->num_partitions));
pe->id = epoch_id; pe->id = epoch_id;
pe->num_partitions = ctx->num_partitions; pe->num_partitions = ctx->num_partitions;
@ -138,44 +149,51 @@ partition_epoch_create(int32 epoch_id, PartitionEpochCtx *ctx)
/* Callback for partition epoch scan. For every epoch tuple found, create a /* Callback for partition epoch scan. For every epoch tuple found, create a
* partition epoch entry and scan for associated partitions. */ * partition epoch entry and scan for associated partitions. */
static bool static bool
partition_epoch_tuple_found(TupleInfo *ti, void *arg) partition_epoch_tuple_found(TupleInfo * ti, void *arg)
{ {
PartitionEpochCtx *pctx = arg; PartitionEpochCtx *pctx = arg;
epoch_and_partitions_set *pe; epoch_and_partitions_set *pe;
int32 epoch_id; int32 epoch_id;
Datum datum; Datum datum;
bool is_null; bool is_null;
datum = heap_getattr(ti->tuple, PE_TBL_COL_NUMPARTITIONS, ti->desc, &is_null); datum = heap_getattr(ti->tuple, PE_TBL_COL_NUMPARTITIONS, ti->desc, &is_null);
pctx->num_partitions = DatumGetInt16(datum); pctx->num_partitions = DatumGetInt16(datum);
datum = heap_getattr(ti->tuple, PE_TBL_COL_ID, ti->desc, &is_null); datum = heap_getattr(ti->tuple, PE_TBL_COL_ID, ti->desc, &is_null);
epoch_id = DatumGetInt32(datum); epoch_id = DatumGetInt32(datum);
pe = partition_epoch_create(epoch_id, pctx); pe = partition_epoch_create(epoch_id, pctx);
pctx->pe = pe; pctx->pe = pe;
if (pctx->num_partitions > 1) if (pctx->num_partitions > 1)
{ {
Datum partfunc, partmod, partcol; Datum partfunc,
bool partfunc_is_null, partmod_is_null, partcol_is_null; partmod,
partfunc = heap_getattr(ti->tuple, PE_TBL_COL_PARTFUNC, ti->desc, &partfunc_is_null); partcol;
bool partfunc_is_null,
partmod_is_null,
partcol_is_null;
partfunc = heap_getattr(ti->tuple, PE_TBL_COL_PARTFUNC, ti->desc, &partfunc_is_null);
partmod = heap_getattr(ti->tuple, PE_TBL_COL_PARTMOD, ti->desc, &partmod_is_null); partmod = heap_getattr(ti->tuple, PE_TBL_COL_PARTMOD, ti->desc, &partmod_is_null);
partcol = heap_getattr(ti->tuple, PE_TBL_COL_PARTCOL, ti->desc, &partcol_is_null); partcol = heap_getattr(ti->tuple, PE_TBL_COL_PARTCOL, ti->desc, &partcol_is_null);
if (partfunc_is_null || partmod_is_null || partcol_is_null) if (partfunc_is_null || partmod_is_null || partcol_is_null)
{ {
elog(ERROR, "Invalid partitioning configuration for partition epoch %d", epoch_id); elog(ERROR, "Invalid partitioning configuration for partition epoch %d", epoch_id);
} }
datum = heap_getattr(ti->tuple, PE_TBL_COL_PARTFUNC_SCHEMA, ti->desc, &is_null); datum = heap_getattr(ti->tuple, PE_TBL_COL_PARTFUNC_SCHEMA, ti->desc, &is_null);
pe->partitioning = partitioning_info_create(pctx->num_partitions, pe->partitioning = partitioning_info_create(pctx->num_partitions,
is_null ? NULL : DatumGetCString(datum), is_null ? NULL : DatumGetCString(datum),
DatumGetCString(partfunc), DatumGetCString(partfunc),
DatumGetCString(partcol), DatumGetCString(partcol),
DatumGetInt16(partmod), DatumGetInt16(partmod),
pctx->relid); pctx->relid);
} else { }
else
{
pe->partitioning = NULL; pe->partitioning = NULL;
} }
@ -196,14 +214,14 @@ partition_epoch_tuple_found(TupleInfo *ti, void *arg)
#define PARTITION_IDX_COL_ID 1 #define PARTITION_IDX_COL_ID 1
static bool static bool
partition_tuple_found(TupleInfo *ti, void *arg) partition_tuple_found(TupleInfo * ti, void *arg)
{ {
PartitionEpochCtx *pctx = arg; PartitionEpochCtx *pctx = arg;
epoch_and_partitions_set *pe = pctx->pe; epoch_and_partitions_set *pe = pctx->pe;
Datum datum; Datum datum;
bool is_null; bool is_null;
pctx->num_partitions--; pctx->num_partitions--;
datum = heap_getattr(ti->tuple, PARTITION_TBL_COL_ID, ti->desc, &is_null); datum = heap_getattr(ti->tuple, PARTITION_TBL_COL_ID, ti->desc, &is_null);
pe->partitions[pctx->num_partitions].id = DatumGetInt32(datum); pe->partitions[pctx->num_partitions].id = DatumGetInt32(datum);
datum = heap_getattr(ti->tuple, PARTITION_TBL_COL_KEYSPACE_START, ti->desc, &is_null); datum = heap_getattr(ti->tuple, PARTITION_TBL_COL_KEYSPACE_START, ti->desc, &is_null);
@ -219,12 +237,12 @@ partition_tuple_found(TupleInfo *ti, void *arg)
} }
static int static int
partition_scan(PartitionEpochCtx *pctx) partition_scan(PartitionEpochCtx * pctx)
{ {
ScanKeyData scankey[1]; ScanKeyData scankey[1];
Catalog *catalog = catalog_get(); Catalog *catalog = catalog_get();
int num_partitions = pctx->num_partitions; int num_partitions = pctx->num_partitions;
ScannerCtx scanCtx = { ScannerCtx scanCtx = {
.table = catalog->tables[PARTITION].id, .table = catalog->tables[PARTITION].id,
.index = get_relname_relid(PARTITION_EPOCH_ID_INDEX_NAME, catalog->schema_id), .index = get_relname_relid(PARTITION_EPOCH_ID_INDEX_NAME, catalog->schema_id),
.scantype = ScannerTypeIndex, .scantype = ScannerTypeIndex,
@ -236,19 +254,22 @@ partition_scan(PartitionEpochCtx *pctx)
.scandirection = ForwardScanDirection, .scandirection = ForwardScanDirection,
}; };
/* Perform an index scan on epoch ID to find the partitions for the /*
* epoch. */ * Perform an index scan on epoch ID to find the partitions for the epoch.
*/
ScanKeyInit(&scankey[0], PARTITION_IDX_COL_ID, BTEqualStrategyNumber, ScanKeyInit(&scankey[0], PARTITION_IDX_COL_ID, BTEqualStrategyNumber,
F_INT4EQ, Int32GetDatum(pctx->pe->id)); F_INT4EQ, Int32GetDatum(pctx->pe->id));
scanner_scan(&scanCtx); scanner_scan(&scanCtx);
/* The scan decremented the number of partitions in the context, so check /*
that it is zero for correct number of partitions scanned. */ * The scan decremented the number of partitions in the context, so check
* that it is zero for correct number of partitions scanned.
*/
if (pctx->num_partitions != 0) if (pctx->num_partitions != 0)
{ {
elog(ERROR, "%d partitions found for epoch %d, expected %d", elog(ERROR, "%d partitions found for epoch %d, expected %d",
num_partitions - pctx->num_partitions, pctx->pe->id, num_partitions); num_partitions - pctx->num_partitions, pctx->pe->id, num_partitions);
} }
return num_partitions; return num_partitions;
@ -258,13 +279,13 @@ epoch_and_partitions_set *
partition_epoch_scan(int32 hypertable_id, int64 timepoint, Oid relid) partition_epoch_scan(int32 hypertable_id, int64 timepoint, Oid relid)
{ {
ScanKeyData scankey[1]; ScanKeyData scankey[1];
Catalog *catalog = catalog_get(); Catalog *catalog = catalog_get();
PartitionEpochCtx pctx = { PartitionEpochCtx pctx = {
.hypertable_id = hypertable_id, .hypertable_id = hypertable_id,
.timepoint = timepoint, .timepoint = timepoint,
.relid = relid, .relid = relid,
}; };
ScannerCtx scanctx = { ScannerCtx scanctx = {
.table = catalog->tables[PARTITION_EPOCH].id, .table = catalog->tables[PARTITION_EPOCH].id,
.index = get_relname_relid(PARTITION_EPOCH_TIME_INDEX_NAME, catalog->schema_id), .index = get_relname_relid(PARTITION_EPOCH_TIME_INDEX_NAME, catalog->schema_id),
.scantype = ScannerTypeIndex, .scantype = ScannerTypeIndex,
@ -272,16 +293,18 @@ partition_epoch_scan(int32 hypertable_id, int64 timepoint, Oid relid)
.scankey = scankey, .scankey = scankey,
.data = &pctx, .data = &pctx,
.filter = partition_epoch_filter, .filter = partition_epoch_filter,
.tuple_found = partition_epoch_tuple_found, .tuple_found = partition_epoch_tuple_found,
.lockmode = AccessShareLock, .lockmode = AccessShareLock,
.scandirection = ForwardScanDirection, .scandirection = ForwardScanDirection,
}; };
/* Perform an index scan on hypertable ID. We filter on start and end /*
* time. */ * Perform an index scan on hypertable ID. We filter on start and end
* time.
*/
ScanKeyInit(&scankey[0], PE_IDX_COL_HTID, BTEqualStrategyNumber, ScanKeyInit(&scankey[0], PE_IDX_COL_HTID, BTEqualStrategyNumber,
F_INT4EQ, Int32GetDatum(hypertable_id)); F_INT4EQ, Int32GetDatum(hypertable_id));
scanner_scan(&scanctx); scanner_scan(&scanctx);
return pctx.pe; return pctx.pe;
@ -293,9 +316,9 @@ static int
cmp_partitions(const void *keyspace_pt_arg, const void *value) cmp_partitions(const void *keyspace_pt_arg, const void *value)
{ {
/* note in keyspace asc; assume oldest stuff last */ /* note in keyspace asc; assume oldest stuff last */
int16 keyspace_pt = *((int16 *) keyspace_pt_arg); int16 keyspace_pt = *((int16 *) keyspace_pt_arg);
const Partition *part = value; const Partition *part = value;
if (partition_keyspace_pt_is_member(part, keyspace_pt)) if (partition_keyspace_pt_is_member(part, keyspace_pt))
{ {
return 0; return 0;
@ -309,16 +332,16 @@ cmp_partitions(const void *keyspace_pt_arg, const void *value)
} }
Partition * Partition *
partition_epoch_get_partition(epoch_and_partitions_set *epoch, int16 keyspace_pt) partition_epoch_get_partition(epoch_and_partitions_set * epoch, int16 keyspace_pt)
{ {
Partition *part; Partition *part;
if (epoch == NULL) if (epoch == NULL)
{ {
elog(ERROR, "No partitioning information for epoch"); elog(ERROR, "No partitioning information for epoch");
return NULL; return NULL;
} }
if (keyspace_pt == KEYSPACE_PT_NO_PARTITIONING) if (keyspace_pt == KEYSPACE_PT_NO_PARTITIONING)
{ {
if (epoch->num_partitions > 1) if (epoch->num_partitions > 1)
@ -340,7 +363,8 @@ partition_epoch_get_partition(epoch_and_partitions_set *epoch, int16 keyspace_pt
return part; return part;
} }
bool partition_keyspace_pt_is_member(const Partition *part, const int16 keyspace_pt) bool
partition_keyspace_pt_is_member(const Partition * part, const int16 keyspace_pt)
{ {
return keyspace_pt == KEYSPACE_PT_NO_PARTITIONING || (part->keyspace_start <= keyspace_pt && part->keyspace_end >= keyspace_pt); return keyspace_pt == KEYSPACE_PT_NO_PARTITIONING || (part->keyspace_start <= keyspace_pt && part->keyspace_end >= keyspace_pt);
} }

View File

@ -9,51 +9,55 @@
typedef struct Partition typedef struct Partition
{ {
int32 id; int32 id;
int16 keyspace_start; int16 keyspace_start;
int16 keyspace_end; int16 keyspace_end;
} Partition; } Partition;
typedef struct PartitioningFunc typedef struct PartitioningFunc
{ {
char schema[NAMEDATALEN]; char schema[NAMEDATALEN];
char name[NAMEDATALEN]; char name[NAMEDATALEN];
/* Function manager info to call the function to convert a row's /*
* partitioning column value to a text string */ * Function manager info to call the function to convert a row's
FmgrInfo textfunc_fmgr; * partitioning column value to a text string
*/
FmgrInfo textfunc_fmgr;
/* Function manager info to call the partitioning function on the /*
partitioning column's text representation */ * Function manager info to call the partitioning function on the
FmgrInfo func_fmgr; * partitioning column's text representation
int32 modulos; */
} PartitioningFunc; FmgrInfo func_fmgr;
int32 modulos;
} PartitioningFunc;
typedef struct PartitioningInfo typedef struct PartitioningInfo
{ {
char column[NAMEDATALEN]; char column[NAMEDATALEN];
AttrNumber column_attnum; AttrNumber column_attnum;
PartitioningFunc partfunc; PartitioningFunc partfunc;
} PartitioningInfo; } PartitioningInfo;
typedef struct epoch_and_partitions_set typedef struct epoch_and_partitions_set
{ {
int32 id; int32 id;
int32 hypertable_id; int32 hypertable_id;
int64 start_time; int64 start_time;
int64 end_time; int64 end_time;
PartitioningInfo *partitioning; PartitioningInfo *partitioning;
int16 num_partitions; int16 num_partitions;
Partition partitions[0]; Partition partitions[0];
} epoch_and_partitions_set; } epoch_and_partitions_set;
typedef struct epoch_and_partitions_set epoch_and_partitions_set; typedef struct epoch_and_partitions_set epoch_and_partitions_set;
epoch_and_partitions_set *partition_epoch_scan(int32 hypertable_id, int64 timepoint, Oid relid); epoch_and_partitions_set *partition_epoch_scan(int32 hypertable_id, int64 timepoint, Oid relid);
int16 partitioning_func_apply(PartitioningFunc *pf, Datum value); int16 partitioning_func_apply(PartitioningFunc * pf, Datum value);
Partition *partition_epoch_get_partition(epoch_and_partitions_set *epoch, int16 keyspace_pt); Partition *partition_epoch_get_partition(epoch_and_partitions_set * epoch, int16 keyspace_pt);
bool partition_keyspace_pt_is_member(const Partition *part, const int16 keyspace_pt); bool partition_keyspace_pt_is_member(const Partition * part, const int16 keyspace_pt);
#endif /* TIMESCALEDB_PARTITIONING_H */ #endif /* TIMESCALEDB_PARTITIONING_H */

View File

@ -36,7 +36,7 @@
#define MAX_IO_VALUES 2 #define MAX_IO_VALUES 2
/* hash function signatures */ /* hash function signatures */
void hlib_murmur3(const void *data, size_t len, uint64_t *io); void hlib_murmur3(const void *data, size_t len, uint64_t * io);
/* SQL function */ /* SQL function */
Datum pg_murmur3_hash_string(PG_FUNCTION_ARGS); Datum pg_murmur3_hash_string(PG_FUNCTION_ARGS);

View File

@ -8,76 +8,89 @@
#include "scanner.h" #include "scanner.h"
typedef union ScanDesc { typedef union ScanDesc
{
IndexScanDesc index_scan; IndexScanDesc index_scan;
HeapScanDesc heap_scan; HeapScanDesc heap_scan;
} ScanDesc; } ScanDesc;
/* /*
* InternalScannerCtx is the context passed to Scanner functions. * InternalScannerCtx is the context passed to Scanner functions.
* It holds a pointer to the user-given ScannerCtx as well as * It holds a pointer to the user-given ScannerCtx as well as
* internal state used during scanning. * internal state used during scanning.
*/ */
typedef struct InternalScannerCtx { typedef struct InternalScannerCtx
Relation tablerel, indexrel; {
TupleInfo tinfo; Relation tablerel,
ScanDesc scan; indexrel;
TupleInfo tinfo;
ScanDesc scan;
ScannerCtx *sctx; ScannerCtx *sctx;
} InternalScannerCtx; } InternalScannerCtx;
/* /*
* Scanner can implement both index and heap scans in a single interface. * Scanner can implement both index and heap scans in a single interface.
*/ */
typedef struct Scanner { typedef struct Scanner
Relation (*open)(InternalScannerCtx *ctx); {
ScanDesc (*beginscan)(InternalScannerCtx *ctx); Relation (*open) (InternalScannerCtx * ctx);
bool (*getnext)(InternalScannerCtx *ctx); ScanDesc(*beginscan) (InternalScannerCtx * ctx);
void (*endscan)(InternalScannerCtx *ctx); bool (*getnext) (InternalScannerCtx * ctx);
void (*close)(InternalScannerCtx *ctx); void (*endscan) (InternalScannerCtx * ctx);
} Scanner; void (*close) (InternalScannerCtx * ctx);
} Scanner;
/* Functions implementing heap scans */ /* Functions implementing heap scans */
static Relation heap_scanner_open(InternalScannerCtx *ctx) static Relation
heap_scanner_open(InternalScannerCtx * ctx)
{ {
ctx->tablerel = heap_open(ctx->sctx->table, ctx->sctx->lockmode); ctx->tablerel = heap_open(ctx->sctx->table, ctx->sctx->lockmode);
return ctx->tablerel; return ctx->tablerel;
} }
static ScanDesc heap_scanner_beginscan(InternalScannerCtx *ctx) static ScanDesc
heap_scanner_beginscan(InternalScannerCtx * ctx)
{ {
ScannerCtx *sctx = ctx->sctx; ScannerCtx *sctx = ctx->sctx;
ctx->scan.heap_scan = heap_beginscan(ctx->tablerel, SnapshotSelf, ctx->scan.heap_scan = heap_beginscan(ctx->tablerel, SnapshotSelf,
sctx->nkeys, sctx->scankey); sctx->nkeys, sctx->scankey);
return ctx->scan; return ctx->scan;
} }
static bool heap_scanner_getnext(InternalScannerCtx *ctx) static bool
heap_scanner_getnext(InternalScannerCtx * ctx)
{ {
ctx->tinfo.tuple = heap_getnext(ctx->scan.heap_scan, ctx->sctx->scandirection); ctx->tinfo.tuple = heap_getnext(ctx->scan.heap_scan, ctx->sctx->scandirection);
return HeapTupleIsValid(ctx->tinfo.tuple); return HeapTupleIsValid(ctx->tinfo.tuple);
} }
static void heap_scanner_endscan(InternalScannerCtx *ctx) static void
heap_scanner_endscan(InternalScannerCtx * ctx)
{ {
heap_endscan(ctx->scan.heap_scan); heap_endscan(ctx->scan.heap_scan);
} }
static void heap_scanner_close(InternalScannerCtx *ctx) static void
heap_scanner_close(InternalScannerCtx * ctx)
{ {
heap_close(ctx->tablerel, ctx->sctx->lockmode); heap_close(ctx->tablerel, ctx->sctx->lockmode);
} }
/* Functions implementing index scans */ /* Functions implementing index scans */
static Relation index_scanner_open(InternalScannerCtx *ctx) static Relation
index_scanner_open(InternalScannerCtx * ctx)
{ {
ctx->tablerel = heap_open(ctx->sctx->table, ctx->sctx->lockmode); ctx->tablerel = heap_open(ctx->sctx->table, ctx->sctx->lockmode);
ctx->indexrel = index_open(ctx->sctx->index, ctx->sctx->lockmode); ctx->indexrel = index_open(ctx->sctx->index, ctx->sctx->lockmode);
return ctx->indexrel; return ctx->indexrel;
} }
static ScanDesc index_scanner_beginscan(InternalScannerCtx *ctx) static ScanDesc
index_scanner_beginscan(InternalScannerCtx * ctx)
{ {
ScannerCtx *sctx = ctx->sctx; ScannerCtx *sctx = ctx->sctx;
ctx->scan.index_scan = index_beginscan(ctx->tablerel, ctx->indexrel, ctx->scan.index_scan = index_beginscan(ctx->tablerel, ctx->indexrel,
SnapshotSelf, sctx->nkeys, SnapshotSelf, sctx->nkeys,
sctx->norderbys); sctx->norderbys);
@ -87,7 +100,8 @@ static ScanDesc index_scanner_beginscan(InternalScannerCtx *ctx)
return ctx->scan; return ctx->scan;
} }
static bool index_scanner_getnext(InternalScannerCtx *ctx) static bool
index_scanner_getnext(InternalScannerCtx * ctx)
{ {
ctx->tinfo.tuple = index_getnext(ctx->scan.index_scan, ctx->sctx->scandirection); ctx->tinfo.tuple = index_getnext(ctx->scan.index_scan, ctx->sctx->scandirection);
ctx->tinfo.ituple = ctx->scan.index_scan->xs_itup; ctx->tinfo.ituple = ctx->scan.index_scan->xs_itup;
@ -95,12 +109,14 @@ static bool index_scanner_getnext(InternalScannerCtx *ctx)
return HeapTupleIsValid(ctx->tinfo.tuple); return HeapTupleIsValid(ctx->tinfo.tuple);
} }
static void index_scanner_endscan(InternalScannerCtx *ctx) static void
index_scanner_endscan(InternalScannerCtx * ctx)
{ {
index_endscan(ctx->scan.index_scan); index_endscan(ctx->scan.index_scan);
} }
static void index_scanner_close(InternalScannerCtx *ctx) static void
index_scanner_close(InternalScannerCtx * ctx)
{ {
heap_close(ctx->tablerel, ctx->sctx->lockmode); heap_close(ctx->tablerel, ctx->sctx->lockmode);
index_close(ctx->indexrel, ctx->sctx->lockmode); index_close(ctx->indexrel, ctx->sctx->lockmode);
@ -133,12 +149,13 @@ static Scanner scanners[] = {
* *
* Return the number of tuples that where found. * Return the number of tuples that where found.
*/ */
int scanner_scan(ScannerCtx *ctx) int
scanner_scan(ScannerCtx * ctx)
{ {
TupleDesc tuple_desc; TupleDesc tuple_desc;
bool is_valid; bool is_valid;
int num_tuples = 0; int num_tuples = 0;
Scanner *scanner = &scanners[ctx->scantype]; Scanner *scanner = &scanners[ctx->scantype];
InternalScannerCtx ictx = { InternalScannerCtx ictx = {
.sctx = ctx, .sctx = ctx,
}; };
@ -147,7 +164,7 @@ int scanner_scan(ScannerCtx *ctx)
scanner->beginscan(&ictx); scanner->beginscan(&ictx);
tuple_desc = RelationGetDescr(ictx.tablerel); tuple_desc = RelationGetDescr(ictx.tablerel);
ictx.tinfo.scanrel = ictx.tablerel; ictx.tinfo.scanrel = ictx.tablerel;
ictx.tinfo.desc = tuple_desc; ictx.tinfo.desc = tuple_desc;
@ -166,16 +183,19 @@ int scanner_scan(ScannerCtx *ctx)
if (ctx->tuplock.enabled) if (ctx->tuplock.enabled)
{ {
Buffer buffer; Buffer buffer;
HeapUpdateFailureData hufd; HeapUpdateFailureData hufd;
ictx.tinfo.lockresult = heap_lock_tuple(ictx.tablerel, ictx.tinfo.tuple, ictx.tinfo.lockresult = heap_lock_tuple(ictx.tablerel, ictx.tinfo.tuple,
GetCurrentCommandId(false), GetCurrentCommandId(false),
ctx->tuplock.lockmode, ctx->tuplock.lockmode,
ctx->tuplock.waitpolicy, ctx->tuplock.waitpolicy,
false, &buffer, &hufd); false, &buffer, &hufd);
/* A tuple lock pins the underlying buffer, so we need to unpin it. */ /*
* A tuple lock pins the underlying buffer, so we need to
* unpin it.
*/
ReleaseBuffer(buffer); ReleaseBuffer(buffer);
} }

View File

@ -7,62 +7,76 @@
#include <access/heapam.h> #include <access/heapam.h>
#include <nodes/lockoptions.h> #include <nodes/lockoptions.h>
typedef enum ScannerType { typedef enum ScannerType
{
ScannerTypeHeap, ScannerTypeHeap,
ScannerTypeIndex, ScannerTypeIndex,
} ScannerType; } ScannerType;
/* Tuple information passed on to handlers when scanning for tuples. */ /* Tuple information passed on to handlers when scanning for tuples. */
typedef struct TupleInfo typedef struct TupleInfo
{ {
Relation scanrel; Relation scanrel;
HeapTuple tuple; HeapTuple tuple;
TupleDesc desc; TupleDesc desc;
/* return index tuple if it was requested -- only for index scans */ /* return index tuple if it was requested -- only for index scans */
IndexTuple ituple; IndexTuple ituple;
TupleDesc ituple_desc; TupleDesc ituple_desc;
/* /*
* If the user requested a tuple lock, the result of the lock is passed on * If the user requested a tuple lock, the result of the lock is passed on
* in lockresult. * in lockresult.
*/ */
HTSU_Result lockresult; HTSU_Result lockresult;
} TupleInfo; } TupleInfo;
typedef struct ScannerCtx { typedef struct ScannerCtx
Oid table; {
Oid index; Oid table;
Oid index;
ScannerType scantype; ScannerType scantype;
ScanKey scankey; ScanKey scankey;
int nkeys, norderbys; int nkeys,
bool want_itup; norderbys;
LOCKMODE lockmode; bool want_itup;
struct { LOCKMODE lockmode;
struct
{
LockTupleMode lockmode; LockTupleMode lockmode;
LockWaitPolicy waitpolicy; LockWaitPolicy waitpolicy;
bool enabled; bool enabled;
} tuplock; } tuplock;
ScanDirection scandirection; ScanDirection scandirection;
void *data; /* User-provided data passed on to filter() and tuple_found() */ void *data; /* User-provided data passed on to filter()
* and tuple_found() */
/* Optional handler called before a scan starts, but relation locks are /*
* acquired. */ * Optional handler called before a scan starts, but relation locks are
void (*prescan)(void *data); * acquired.
*/
void (*prescan) (void *data);
/* Optional handler called after a scan finishes and before relation locks /*
* are released. Passes on the number of tuples found. */ * Optional handler called after a scan finishes and before relation locks
void (*postscan)(int num_tuples, void *data); * are released. Passes on the number of tuples found.
*/
void (*postscan) (int num_tuples, void *data);
/* Optional handler to filter tuples. Should return true for tuples that /*
* should be passed on to tuple_found, or false otherwise. */ * Optional handler to filter tuples. Should return true for tuples that
bool (*filter)(TupleInfo *ti, void *data); * should be passed on to tuple_found, or false otherwise.
*/
bool (*filter) (TupleInfo * ti, void *data);
/* Handler for found tuples. Should return true to continue the scan or /*
* false to abort. */ * Handler for found tuples. Should return true to continue the scan or
bool (*tuple_found)(TupleInfo *ti, void *data); * false to abort.
} ScannerCtx; */
bool (*tuple_found) (TupleInfo * ti, void *data);
} ScannerCtx;
/* Performs an index scan or heap scan and returns the number of matching /* Performs an index scan or heap scan and returns the number of matching
* tuples. */ * tuples. */
int scanner_scan(ScannerCtx *ctx); int scanner_scan(ScannerCtx * ctx);
#endif /* TIMESCALEDB_SCANNER_H */ #endif /* TIMESCALEDB_SCANNER_H */

View File

@ -45,7 +45,7 @@
#ifdef PG_MODULE_MAGIC #ifdef PG_MODULE_MAGIC
PG_MODULE_MAGIC; PG_MODULE_MAGIC;
#endif #endif
#define HYPERTABLE_INFO_QUERY "\ #define HYPERTABLE_INFO_QUERY "\
SELECT format('%I.%I', hr.schema_name, hr.table_name)::regclass::oid, \ SELECT format('%I.%I', hr.schema_name, hr.table_name)::regclass::oid, \
pe.partitioning_column, pe.partitioning_func_schema, pe.partitioning_func, pe.partitioning_mod, \ pe.partitioning_column, pe.partitioning_func_schema, pe.partitioning_func, pe.partitioning_mod, \
format('%I.%I', h.root_schema_name, h.root_table_name)::regclass::oid, \ format('%I.%I', h.root_schema_name, h.root_table_name)::regclass::oid, \
@ -56,15 +56,15 @@ PG_MODULE_MAGIC;
INNER JOIN _timescaledb_catalog.partition_epoch pe ON (pe.hypertable_id = h.id) \ INNER JOIN _timescaledb_catalog.partition_epoch pe ON (pe.hypertable_id = h.id) \
WHERE h.schema_name = $1 AND h.table_name = $2" WHERE h.schema_name = $1 AND h.table_name = $2"
void _PG_init(void); void _PG_init(void);
void _PG_fini(void); void _PG_fini(void);
/* Postgres hook interface */ /* Postgres hook interface */
static planner_hook_type prev_planner_hook = NULL; static planner_hook_type prev_planner_hook = NULL;
static ProcessUtility_hook_type prev_ProcessUtility_hook = NULL; static ProcessUtility_hook_type prev_ProcessUtility_hook = NULL;
/* cached plans */ /* cached plans */
static SPIPlanPtr hypertable_info_plan = NULL; static SPIPlanPtr hypertable_info_plan = NULL;
/* variables */ /* variables */
static bool isLoaded = false; static bool isLoaded = false;
@ -77,55 +77,55 @@ static List *callbackConnections = NIL;
typedef struct hypertable_info typedef struct hypertable_info
{ {
Oid replica_oid; Oid replica_oid;
Oid root_oid; Oid root_oid;
int32 hypertable_id; int32 hypertable_id;
List *partitioning_info; List *partitioning_info;
} hypertable_info; } hypertable_info;
typedef struct partitioning_info typedef struct partitioning_info
{ {
Name partitioning_column; Name partitioning_column;
Name partitioning_func_schema; Name partitioning_func_schema;
Name partitioning_func; Name partitioning_func;
int32 partitioning_mod; int32 partitioning_mod;
} partitioning_info; } partitioning_info;
typedef struct change_table_name_context typedef struct change_table_name_context
{ {
List *hypertable_info; List *hypertable_info;
Query *parse; Query *parse;
} change_table_name_context; } change_table_name_context;
typedef struct add_partitioning_func_qual_context typedef struct add_partitioning_func_qual_context
{ {
Query *parse; Query *parse;
List *hypertable_info_list; List *hypertable_info_list;
} add_partitioning_func_qual_context; } add_partitioning_func_qual_context;
hypertable_info *get_hypertable_info(Oid mainRelationOid); hypertable_info *get_hypertable_info(Oid mainRelationOid);
static void add_partitioning_func_qual(Query *parse, List *hypertable_info_list); static void add_partitioning_func_qual(Query *parse, List *hypertable_info_list);
static Node *add_partitioning_func_qual_mutator(Node *node, add_partitioning_func_qual_context *context); static Node *add_partitioning_func_qual_mutator(Node *node, add_partitioning_func_qual_context * context);
static partitioning_info * static partitioning_info *
get_partitioning_info_for_partition_column_var(Var *var_expr, Query *parse, List * hypertable_info_list); get_partitioning_info_for_partition_column_var(Var *var_expr, Query *parse, List *hypertable_info_list);
static Expr * static Expr *
create_partition_func_equals_const(Var *var_expr, Const *const_expr, Name partitioning_func_schema, Name partitioning_func, int32 partitioning_mod); create_partition_func_equals_const(Var *var_expr, Const *const_expr, Name partitioning_func_schema, Name partitioning_func, int32 partitioning_mod);
SPIPlanPtr get_hypertable_info_plan(void); SPIPlanPtr get_hypertable_info_plan(void);
void timescaledb_ProcessUtility(Node *parsetree, void timescaledb_ProcessUtility(Node *parsetree,
const char *queryString, const char *queryString,
ProcessUtilityContext context, ProcessUtilityContext context,
ParamListInfo params, ParamListInfo params,
DestReceiver *dest, DestReceiver *dest,
char *completionTag); char *completionTag);
void prev_ProcessUtility(Node *parsetree, void prev_ProcessUtility(Node *parsetree,
const char *queryString, const char *queryString,
ProcessUtilityContext context, ProcessUtilityContext context,
ParamListInfo params, ParamListInfo params,
DestReceiver *dest, DestReceiver *dest,
char *completionTag); char *completionTag);
extern void _hypertable_cache_init(void); extern void _hypertable_cache_init(void);
extern void _hypertable_cache_fini(void); extern void _hypertable_cache_fini(void);
@ -165,11 +165,13 @@ _PG_fini(void)
_chunk_cache_fini(); _chunk_cache_fini();
} }
SPIPlanPtr get_hypertable_info_plan() SPIPlanPtr
get_hypertable_info_plan()
{ {
Oid hypertable_info_plan_args[2] = {TEXTOID, TEXTOID}; Oid hypertable_info_plan_args[2] = {TEXTOID, TEXTOID};
if (hypertable_info_plan != NULL) { if (hypertable_info_plan != NULL)
{
return hypertable_info_plan; return hypertable_info_plan;
} }
@ -194,12 +196,11 @@ SPIPlanPtr get_hypertable_info_plan()
bool bool
IobeamLoaded(void) IobeamLoaded(void)
{ {
if (!isLoaded) if (!isLoaded)
{ {
Oid id; Oid id;
if(!IsTransactionState()) if (!IsTransactionState())
{ {
return false; return false;
} }
@ -231,17 +232,22 @@ change_table_name_walker(Node *node, void *context)
if (IsA(node, RangeTblEntry)) if (IsA(node, RangeTblEntry))
{ {
RangeTblEntry *rangeTableEntry = (RangeTblEntry *) node; RangeTblEntry *rangeTableEntry = (RangeTblEntry *) node;
change_table_name_context* ctx = (change_table_name_context *)context; change_table_name_context *ctx = (change_table_name_context *) context;
if (rangeTableEntry->rtekind == RTE_RELATION && rangeTableEntry->inh) if (rangeTableEntry->rtekind == RTE_RELATION && rangeTableEntry->inh)
{ {
hypertable_info* hinfo = get_hypertable_info(rangeTableEntry->relid); hypertable_info *hinfo = get_hypertable_info(rangeTableEntry->relid);
if (hinfo != NULL) if (hinfo != NULL)
{ {
ctx->hypertable_info = lappend(ctx->hypertable_info, hinfo); ctx->hypertable_info = lappend(ctx->hypertable_info, hinfo);
rangeTableEntry->relid = hinfo->replica_oid; rangeTableEntry->relid = hinfo->replica_oid;
} }
} else if (rangeTableEntry->rtekind == RTE_RELATION && ctx->parse->commandType == CMD_INSERT){ }
hypertable_info* hinfo = get_hypertable_info(rangeTableEntry->relid); else if (rangeTableEntry->rtekind == RTE_RELATION && ctx->parse->commandType == CMD_INSERT)
{
hypertable_info *hinfo = get_hypertable_info(rangeTableEntry->relid);
if (hinfo != NULL) if (hinfo != NULL)
{ {
rangeTableEntry->relid = create_copy_table(hinfo->hypertable_id, hinfo->root_oid); rangeTableEntry->relid = create_copy_table(hinfo->hypertable_id, hinfo->root_oid);
@ -256,7 +262,7 @@ change_table_name_walker(Node *node, void *context)
context, QTW_EXAMINE_RTES); context, QTW_EXAMINE_RTES);
} }
return expression_tree_walker(node, change_table_name_walker, context); return expression_tree_walker(node, change_table_name_walker, context);
} }
PlannedStmt * PlannedStmt *
@ -267,7 +273,8 @@ timescaledb_planner(Query *parse, int cursorOptions, ParamListInfo boundParams)
if (IobeamLoaded()) if (IobeamLoaded())
{ {
change_table_name_context context; change_table_name_context context;
char* printParse = GetConfigOptionByName("io.print_parse", NULL, true); char *printParse = GetConfigOptionByName("io.print_parse", NULL, true);
/* set to false to not print all internal actions */ /* set to false to not print all internal actions */
SetConfigOption("io.print_parse", "false", PGC_USERSET, PGC_S_SESSION); SetConfigOption("io.print_parse", "false", PGC_USERSET, PGC_S_SESSION);
@ -289,8 +296,9 @@ timescaledb_planner(Query *parse, int cursorOptions, ParamListInfo boundParams)
if (prev_planner_hook != NULL) if (prev_planner_hook != NULL)
{ {
/* Call any earlier hooks */ /* Call any earlier hooks */
rv = (prev_planner_hook)(parse, cursorOptions, boundParams); rv = (prev_planner_hook) (parse, cursorOptions, boundParams);
} else }
else
{ {
/* Call the standard planner */ /* Call the standard planner */
rv = standard_planner(parse, cursorOptions, boundParams); rv = standard_planner(parse, cursorOptions, boundParams);
@ -308,20 +316,20 @@ timescaledb_planner(Query *parse, int cursorOptions, ParamListInfo boundParams)
hypertable_info * hypertable_info *
get_hypertable_info(Oid mainRelationOid) get_hypertable_info(Oid mainRelationOid)
{ {
Oid namespace = get_rel_namespace(mainRelationOid); Oid namespace = get_rel_namespace(mainRelationOid);
Oid hypertable_meta = get_relname_relid("hypertable", get_namespace_oid("_timescaledb_catalog", false)); Oid hypertable_meta = get_relname_relid("hypertable", get_namespace_oid("_timescaledb_catalog", false));
char *tableName = get_rel_name(mainRelationOid); char *tableName = get_rel_name(mainRelationOid);
char *schemaName = get_namespace_name(namespace); char *schemaName = get_namespace_name(namespace);
Datum args[2] = {CStringGetTextDatum(schemaName), CStringGetTextDatum(tableName)}; Datum args[2] = {CStringGetTextDatum(schemaName), CStringGetTextDatum(tableName)};
int ret; int ret;
SPIPlanPtr plan = get_hypertable_info_plan(); SPIPlanPtr plan = get_hypertable_info_plan();
/* prevents infinite recursion, don't check hypertable meta tables */ /* prevents infinite recursion, don't check hypertable meta tables */
if ( if (
hypertable_meta == InvalidOid hypertable_meta == InvalidOid
|| namespace == PG_CATALOG_NAMESPACE || namespace == PG_CATALOG_NAMESPACE
|| namespace == get_namespace_oid("_timescaledb_catalog", false) || namespace == get_namespace_oid("_timescaledb_catalog", false)
) )
{ {
return NULL; return NULL;
@ -339,55 +347,67 @@ get_hypertable_info(Oid mainRelationOid)
if (SPI_processed > 0) if (SPI_processed > 0)
{ {
bool isnull; bool isnull;
int total_rows = SPI_processed; int total_rows = SPI_processed;
int j; int j;
/* do not populate list until SPI_finish because the list cannot be populated in the SPI memory context */
List *partitioning_info_list; /*
* do not populate list until SPI_finish because the list cannot be
* populated in the SPI memory context
*/
List *partitioning_info_list;
/* used to track list stuff til list can be populated */ /* used to track list stuff til list can be populated */
partitioning_info **partitioning_info_array = SPI_palloc(total_rows * sizeof(partitioning_info *)); partitioning_info **partitioning_info_array = SPI_palloc(total_rows * sizeof(partitioning_info *));
hypertable_info *hinfo = SPI_palloc(sizeof(hypertable_info)); hypertable_info *hinfo = SPI_palloc(sizeof(hypertable_info));
TupleDesc tupdesc = SPI_tuptable->tupdesc; TupleDesc tupdesc = SPI_tuptable->tupdesc;
HeapTuple tuple = SPI_tuptable->vals[0]; HeapTuple tuple = SPI_tuptable->vals[0];
hinfo->replica_oid = DatumGetObjectId(SPI_getbinval(tuple, tupdesc, 1, &isnull)); hinfo->replica_oid = DatumGetObjectId(SPI_getbinval(tuple, tupdesc, 1, &isnull));
hinfo->root_oid = DatumGetObjectId(SPI_getbinval(tuple, tupdesc, 6, &isnull)); hinfo->root_oid = DatumGetObjectId(SPI_getbinval(tuple, tupdesc, 6, &isnull));
hinfo->hypertable_id = DatumGetInt32(SPI_getbinval(tuple, tupdesc, 7, &isnull)); hinfo->hypertable_id = DatumGetInt32(SPI_getbinval(tuple, tupdesc, 7, &isnull));
for (j = 0; j < total_rows; j++) for (j = 0; j < total_rows; j++)
{ {
HeapTuple tuple = SPI_tuptable->vals[j]; HeapTuple tuple = SPI_tuptable->vals[j];
Name partitioning_func_schema, partitioning_func, partitioning_column; Name partitioning_func_schema,
int32 partitioning_mod; partitioning_func,
partitioning_column;
int32 partitioning_mod;
partitioning_info* info = (partitioning_info *) SPI_palloc(sizeof(partitioning_info)); partitioning_info *info = (partitioning_info *) SPI_palloc(sizeof(partitioning_info));
memset(info, 0, sizeof(partitioning_info)); memset(info, 0, sizeof(partitioning_info));
partitioning_column = DatumGetName(SPI_getbinval(tuple, tupdesc, 2, &isnull)); partitioning_column = DatumGetName(SPI_getbinval(tuple, tupdesc, 2, &isnull));
if (!isnull) { if (!isnull)
{
info->partitioning_column = SPI_palloc(sizeof(NameData)); info->partitioning_column = SPI_palloc(sizeof(NameData));
memcpy(info->partitioning_column, partitioning_column, sizeof(NameData)); memcpy(info->partitioning_column, partitioning_column, sizeof(NameData));
} }
partitioning_func_schema = DatumGetName(SPI_getbinval(tuple, tupdesc, 3, &isnull)); partitioning_func_schema = DatumGetName(SPI_getbinval(tuple, tupdesc, 3, &isnull));
if (!isnull) { if (!isnull)
{
info->partitioning_func_schema = SPI_palloc(sizeof(NameData)); info->partitioning_func_schema = SPI_palloc(sizeof(NameData));
memcpy(info->partitioning_func_schema, partitioning_func_schema, sizeof(NameData)); memcpy(info->partitioning_func_schema, partitioning_func_schema, sizeof(NameData));
} }
partitioning_func = DatumGetName(SPI_getbinval(tuple, tupdesc, 4, &isnull)); partitioning_func = DatumGetName(SPI_getbinval(tuple, tupdesc, 4, &isnull));
if (!isnull) { if (!isnull)
{
info->partitioning_func = SPI_palloc(sizeof(NameData)); info->partitioning_func = SPI_palloc(sizeof(NameData));
memcpy(info->partitioning_func, partitioning_func, sizeof(NameData)); memcpy(info->partitioning_func, partitioning_func, sizeof(NameData));
} }
partitioning_mod = DatumGetInt32(SPI_getbinval(tuple, tupdesc, 5, &isnull)); partitioning_mod = DatumGetInt32(SPI_getbinval(tuple, tupdesc, 5, &isnull));
if (!isnull) { if (!isnull)
{
info->partitioning_mod = partitioning_mod; info->partitioning_mod = partitioning_mod;
} }
@ -414,8 +434,11 @@ get_hypertable_info(Oid mainRelationOid)
char * copy_table_name(int32 hypertable_id) { char *
StringInfo temp_table_name = makeStringInfo(); copy_table_name(int32 hypertable_id)
{
StringInfo temp_table_name = makeStringInfo();
appendStringInfo(temp_table_name, "_copy_temp_%d", hypertable_id); appendStringInfo(temp_table_name, "_copy_temp_%d", hypertable_id);
return temp_table_name->data; return temp_table_name->data;
} }
@ -426,51 +449,54 @@ char * copy_table_name(int32 hypertable_id) {
* the query contains equivalence qualifiers on the space partition key. * the query contains equivalence qualifiers on the space partition key.
* *
* This function goes through the upper-level qual of a parse tree and finds quals of the form: * This function goes through the upper-level qual of a parse tree and finds quals of the form:
* partitioning_column = const * partitioning_column = const
* It transforms them into the qual: * It transforms them into the qual:
* partitioning_column = const AND partitioning_func(partition_column, partitioning_mod) = partitioning_func(const, partitioning_mod) * partitioning_column = const AND partitioning_func(partition_column, partitioning_mod) = partitioning_func(const, partitioning_mod)
* *
* This tranformation helps because the check constraint on a table is of the form CHECK(partitioning_func(partition_column, partitioning_mod) BETWEEN X AND Y). * This tranformation helps because the check constraint on a table is of the form CHECK(partitioning_func(partition_column, partitioning_mod) BETWEEN X AND Y).
*/ */
static void static void
add_partitioning_func_qual(Query *parse, List* hypertable_info_list) add_partitioning_func_qual(Query *parse, List *hypertable_info_list)
{ {
add_partitioning_func_qual_context context; add_partitioning_func_qual_context context;
context.parse = parse; context.parse = parse;
context.hypertable_info_list = hypertable_info_list; context.hypertable_info_list = hypertable_info_list;
parse->jointree->quals = add_partitioning_func_qual_mutator(parse->jointree->quals, &context); parse->jointree->quals = add_partitioning_func_qual_mutator(parse->jointree->quals, &context);
} }
static Node * static Node *
add_partitioning_func_qual_mutator(Node *node, add_partitioning_func_qual_context *context) add_partitioning_func_qual_mutator(Node *node, add_partitioning_func_qual_context * context)
{ {
if (node == NULL) if (node == NULL)
return NULL; return NULL;
/* Detect partitioning_column = const. If not fall-thru. /*
* If detected, replace with * Detect partitioning_column = const. If not fall-thru. If detected,
* partitioning_column = const AND * replace with partitioning_column = const AND
* partitioning_func(partition_column, partitioning_mod) = partitioning_func(const, partitioning_mod) * partitioning_func(partition_column, partitioning_mod) =
* partitioning_func(const, partitioning_mod)
*/ */
if (IsA(node, OpExpr)) if (IsA(node, OpExpr))
{ {
OpExpr *exp = (OpExpr *) node; OpExpr *exp = (OpExpr *) node;
if (list_length(exp->args) == 2) if (list_length(exp->args) == 2)
{ {
//only look at var op const or const op var; /* only look at var op const or const op var; */
Node *left = (Node *) linitial(exp->args); Node *left = (Node *) linitial(exp->args);
Node *right = (Node *) lsecond(exp->args); Node *right = (Node *) lsecond(exp->args);
Var *var_expr = NULL; Var *var_expr = NULL;
Node *other_expr = NULL; Node *other_expr = NULL;
if (IsA(left, Var)) if (IsA(left, Var))
{ {
var_expr = (Var *) left; var_expr = (Var *) left;
other_expr = right; other_expr = right;
} else if (IsA(right, Var)) }
else if (IsA(right, Var))
{ {
var_expr = (Var *)right; var_expr = (Var *) right;
other_expr = left; other_expr = left;
} }
@ -484,24 +510,28 @@ add_partitioning_func_qual_mutator(Node *node, add_partitioning_func_qual_contex
if (IsA(other_expr, Const)) if (IsA(other_expr, Const))
{ {
/* have a var and const, make sure the op is = */ /* have a var and const, make sure the op is = */
Const *const_expr = (Const *) other_expr; Const *const_expr = (Const *) other_expr;
Oid eq_oid = OpernameGetOprid(list_make2(makeString("pg_catalog"), makeString("=")), exprType(left), exprType(right)); Oid eq_oid = OpernameGetOprid(list_make2(makeString("pg_catalog"), makeString("=")), exprType(left), exprType(right));
if (eq_oid == exp->opno) if (eq_oid == exp->opno)
{ {
/* I now have a var = const. Make sure var is a partitioning column */ /*
* I now have a var = const. Make sure var is a
* partitioning column
*/
partitioning_info *pi = get_partitioning_info_for_partition_column_var(var_expr, partitioning_info *pi = get_partitioning_info_for_partition_column_var(var_expr,
context->parse, context->parse,
context->hypertable_info_list); context->hypertable_info_list);
if (pi != NULL if (pi != NULL
&& pi->partitioning_column != NULL && pi->partitioning_column != NULL
&& pi->partitioning_func != NULL) { && pi->partitioning_func != NULL)
{
/* The var is a partitioning column */ /* The var is a partitioning column */
Expr * partitioning_clause = create_partition_func_equals_const(var_expr, const_expr, Expr *partitioning_clause = create_partition_func_equals_const(var_expr, const_expr,
pi->partitioning_func_schema, pi->partitioning_func_schema,
pi->partitioning_func, pi->partitioning_func,
pi->partitioning_mod); pi->partitioning_mod);
return (Node *) make_andclause(list_make2(node, partitioning_clause)); return (Node *) make_andclause(list_make2(node, partitioning_clause));
@ -519,22 +549,26 @@ add_partitioning_func_qual_mutator(Node *node, add_partitioning_func_qual_contex
/* Returns the partitioning info for a var if the var is a partitioning column. If the var is not a partitioning /* Returns the partitioning info for a var if the var is a partitioning column. If the var is not a partitioning
* column return NULL */ * column return NULL */
static partitioning_info * static partitioning_info *
get_partitioning_info_for_partition_column_var(Var *var_expr, Query *parse, List * hypertable_info_list) { get_partitioning_info_for_partition_column_var(Var *var_expr, Query *parse, List *hypertable_info_list)
{
RangeTblEntry *rte = rt_fetch(var_expr->varno, parse->rtable); RangeTblEntry *rte = rt_fetch(var_expr->varno, parse->rtable);
char *varname = get_rte_attribute_name(rte, var_expr->varattno); char *varname = get_rte_attribute_name(rte, var_expr->varattno);
ListCell *hicell; ListCell *hicell;
foreach(hicell, hypertable_info_list) foreach(hicell, hypertable_info_list)
{ {
hypertable_info *info = lfirst(hicell); hypertable_info *info = lfirst(hicell);
if (rte->relid == info->replica_oid) if (rte->relid == info->replica_oid)
{ {
ListCell *picell; ListCell *picell;
foreach(picell, info->partitioning_info) foreach(picell, info->partitioning_info)
{ {
partitioning_info *pi = lfirst(picell); partitioning_info *pi = lfirst(picell);
if (pi->partitioning_column != NULL && if (pi->partitioning_column != NULL &&
strcmp(NameStr(*(pi->partitioning_column)), varname)==0) strcmp(NameStr(*(pi->partitioning_column)), varname) == 0)
{ {
return pi; return pi;
} }
@ -551,18 +585,18 @@ get_partitioning_info_for_partition_column_var(Var *var_expr, Query *parse, List
static Expr * static Expr *
create_partition_func_equals_const(Var *var_expr, Const *const_expr, Name partitioning_func_schema, Name partitioning_func, int32 partitioning_mod) create_partition_func_equals_const(Var *var_expr, Const *const_expr, Name partitioning_func_schema, Name partitioning_func, int32 partitioning_mod)
{ {
Expr *op_expr; Expr *op_expr;
List *func_name = list_make2(makeString(NameStr(*(partitioning_func_schema))), makeString(NameStr(*(partitioning_func)))); List *func_name = list_make2(makeString(NameStr(*(partitioning_func_schema))), makeString(NameStr(*(partitioning_func))));
Var *var_for_fn_call; Var *var_for_fn_call;
Const *const_for_fn_call; Const *const_for_fn_call;
Const *mod_const_var_call; Const *mod_const_var_call;
Const *mod_const_const_call; Const *mod_const_const_call;
List *args_func_var; List *args_func_var;
List *args_func_const; List *args_func_const;
FuncCall *fc_var; FuncCall *fc_var;
FuncCall *fc_const; FuncCall *fc_const;
Node *f_var; Node *f_var;
Node *f_const; Node *f_const;
mod_const_var_call = makeConst(INT4OID, mod_const_var_call = makeConst(INT4OID,
-1, -1,
@ -590,7 +624,7 @@ create_partition_func_equals_const(Var *var_expr, Const *const_expr, Name partit
f_var = ParseFuncOrColumn(NULL, func_name, args_func_var, fc_var, -1); f_var = ParseFuncOrColumn(NULL, func_name, args_func_var, fc_var, -1);
f_const = ParseFuncOrColumn(NULL, func_name, args_func_const, fc_const, -1); f_const = ParseFuncOrColumn(NULL, func_name, args_func_const, fc_const, -1);
op_expr = make_op(NULL,list_make2(makeString("pg_catalog"), makeString("=")),f_var,f_const,-1); op_expr = make_op(NULL, list_make2(makeString("pg_catalog"), makeString("=")), f_var, f_const, -1);
return op_expr; return op_expr;
} }
@ -601,10 +635,14 @@ PG_FUNCTION_INFO_V1(register_dblink_precommit_connection);
Datum Datum
register_dblink_precommit_connection(PG_FUNCTION_ARGS) register_dblink_precommit_connection(PG_FUNCTION_ARGS)
{ {
/* allocate this stuff in top-level transaction context, so that it survives till commit */ /*
* allocate this stuff in top-level transaction context, so that it
* survives till commit
*/
MemoryContext old = MemoryContextSwitchTo(TopTransactionContext); MemoryContext old = MemoryContextSwitchTo(TopTransactionContext);
char *connectionName = text_to_cstring(PG_GETARG_TEXT_PP(0)); char *connectionName = text_to_cstring(PG_GETARG_TEXT_PP(0));
callbackConnections = lappend(callbackConnections, connectionName); callbackConnections = lappend(callbackConnections, connectionName);
MemoryContextSwitchTo(old); MemoryContextSwitchTo(old);
@ -617,9 +655,10 @@ register_dblink_precommit_connection(PG_FUNCTION_ARGS)
* Look at meta_commands.sql for example usage. Remote commits happen in pre-commit. * Look at meta_commands.sql for example usage. Remote commits happen in pre-commit.
* Remote aborts happen on abort. * Remote aborts happen on abort.
* */ * */
static void io_xact_callback(XactEvent event, void *arg) static void
io_xact_callback(XactEvent event, void *arg)
{ {
ListCell *cell; ListCell *cell;
if (list_length(callbackConnections) == 0) if (list_length(callbackConnections) == 0)
return; return;
@ -628,25 +667,31 @@ static void io_xact_callback(XactEvent event, void *arg)
{ {
case XACT_EVENT_PARALLEL_PRE_COMMIT: case XACT_EVENT_PARALLEL_PRE_COMMIT:
case XACT_EVENT_PRE_COMMIT: case XACT_EVENT_PRE_COMMIT:
foreach (cell, callbackConnections) foreach(cell, callbackConnections)
{ {
char *connection = (char *) lfirst(cell); char *connection = (char *) lfirst(cell);
DirectFunctionCall3(dblink_exec, DirectFunctionCall3(dblink_exec,
PointerGetDatum(cstring_to_text(connection)), PointerGetDatum(cstring_to_text(connection)),
PointerGetDatum(cstring_to_text("COMMIT")), PointerGetDatum(cstring_to_text("COMMIT")),
BoolGetDatum(true)); /* throw error */ BoolGetDatum(true)); /* throw error */
DirectFunctionCall1(dblink_disconnect, PointerGetDatum(cstring_to_text(connection))); DirectFunctionCall1(dblink_disconnect, PointerGetDatum(cstring_to_text(connection)));
} }
break; break;
case XACT_EVENT_PARALLEL_ABORT: case XACT_EVENT_PARALLEL_ABORT:
case XACT_EVENT_ABORT: case XACT_EVENT_ABORT:
/* Be quite careful here. Cannot throw any errors (or infinite loop) and cannot use PG_TRY either.
* Make sure to test with c-asserts on. */ /*
foreach (cell, callbackConnections) * Be quite careful here. Cannot throw any errors (or infinite
* loop) and cannot use PG_TRY either. Make sure to test with
* c-asserts on.
*/
foreach(cell, callbackConnections)
{ {
char *connection = (char *) lfirst(cell); char *connection = (char *) lfirst(cell);
DirectFunctionCall3(dblink_exec, DirectFunctionCall3(dblink_exec,
PointerGetDatum(cstring_to_text(connection)), PointerGetDatum(cstring_to_text(connection)),
PointerGetDatum(cstring_to_text("ABORT")), PointerGetDatum(cstring_to_text("ABORT")),
BoolGetDatum(false)); BoolGetDatum(false));
DirectFunctionCall1(dblink_disconnect, PointerGetDatum(cstring_to_text(connection))); DirectFunctionCall1(dblink_disconnect, PointerGetDatum(cstring_to_text(connection)));
@ -672,9 +717,9 @@ PG_FUNCTION_INFO_V1(pg_gethostname);
Datum Datum
pg_gethostname(PG_FUNCTION_ARGS) pg_gethostname(PG_FUNCTION_ARGS)
{ {
text *t; text *t;
long hostname_max_len = sysconf(_SC_HOST_NAME_MAX); long hostname_max_len = sysconf(_SC_HOST_NAME_MAX);
size_t length; size_t length;
if (hostname_max_len == -1) if (hostname_max_len == -1)
{ {
@ -685,12 +730,12 @@ pg_gethostname(PG_FUNCTION_ARGS)
SET_VARSIZE(t, VARHDRSZ); SET_VARSIZE(t, VARHDRSZ);
memset(VARDATA(t), '\0', hostname_max_len + 1); memset(VARDATA(t), '\0', hostname_max_len + 1);
if (gethostname((char *)VARDATA(t), hostname_max_len) == -1) if (gethostname((char *) VARDATA(t), hostname_max_len) == -1)
{ {
PG_RETURN_TEXT_P(NULL); PG_RETURN_TEXT_P(NULL);
} }
length = strnlen((char *)VARDATA(t), hostname_max_len); length = strnlen((char *) VARDATA(t), hostname_max_len);
SET_VARSIZE(t, VARHDRSZ + length); SET_VARSIZE(t, VARHDRSZ + length);
PG_RETURN_TEXT_P(t); PG_RETURN_TEXT_P(t);
@ -708,7 +753,7 @@ prev_ProcessUtility(Node *parsetree,
if (prev_ProcessUtility_hook != NULL) if (prev_ProcessUtility_hook != NULL)
{ {
/* Call any earlier hooks */ /* Call any earlier hooks */
(prev_ProcessUtility_hook)(parsetree, queryString, context, params, dest, completionTag); (prev_ProcessUtility_hook) (parsetree, queryString, context, params, dest, completionTag);
} }
else else
{ {
@ -720,30 +765,35 @@ prev_ProcessUtility(Node *parsetree,
/* Hook-intercept for ProcessUtility. Used to make COPY use a temp copy table and */ /* Hook-intercept for ProcessUtility. Used to make COPY use a temp copy table and */
/* blocking renaming of hypertables. */ /* blocking renaming of hypertables. */
void timescaledb_ProcessUtility(Node *parsetree, void
const char *queryString, timescaledb_ProcessUtility(Node *parsetree,
ProcessUtilityContext context, const char *queryString,
ParamListInfo params, ProcessUtilityContext context,
DestReceiver *dest, ParamListInfo params,
char *completionTag) DestReceiver *dest,
char *completionTag)
{ {
if (!IobeamLoaded()){ if (!IobeamLoaded())
{
prev_ProcessUtility(parsetree, queryString, context, params, dest, completionTag); prev_ProcessUtility(parsetree, queryString, context, params, dest, completionTag);
return; return;
} }
if (IsA(parsetree, CopyStmt)) if (IsA(parsetree, CopyStmt))
{ {
CopyStmt *copystmt = (CopyStmt *) parsetree; CopyStmt *copystmt = (CopyStmt *) parsetree;
Oid relId = RangeVarGetRelid(copystmt->relation, NoLock, true); Oid relId = RangeVarGetRelid(copystmt->relation, NoLock, true);
if (OidIsValid(relId)) {
hypertable_info* hinfo = get_hypertable_info(relId); if (OidIsValid(relId))
{
hypertable_info *hinfo = get_hypertable_info(relId);
if (hinfo != NULL) if (hinfo != NULL)
{ {
copystmt->relation = makeRangeVarFromRelid(create_copy_table(hinfo->hypertable_id, hinfo->root_oid)); copystmt->relation = makeRangeVarFromRelid(create_copy_table(hinfo->hypertable_id, hinfo->root_oid));
} }
} }
prev_ProcessUtility((Node *)copystmt, queryString, context, params, dest, completionTag); prev_ProcessUtility((Node *) copystmt, queryString, context, params, dest, completionTag);
return; return;
} }
@ -751,15 +801,18 @@ void timescaledb_ProcessUtility(Node *parsetree,
if (IsA(parsetree, RenameStmt)) if (IsA(parsetree, RenameStmt))
{ {
RenameStmt *renamestmt = (RenameStmt *) parsetree; RenameStmt *renamestmt = (RenameStmt *) parsetree;
Oid relId = RangeVarGetRelid(renamestmt->relation, NoLock, true); Oid relId = RangeVarGetRelid(renamestmt->relation, NoLock, true);
if (OidIsValid(relId)) {
hypertable_info* hinfo = get_hypertable_info(relId); if (OidIsValid(relId))
{
hypertable_info *hinfo = get_hypertable_info(relId);
if (hinfo != NULL && renamestmt->renameType == OBJECT_TABLE) if (hinfo != NULL && renamestmt->renameType == OBJECT_TABLE)
ereport(ERROR, ereport(ERROR,
(errcode(ERRCODE_FEATURE_NOT_SUPPORTED), (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
errmsg("Renaming hypertables is not yet supported"))); errmsg("Renaming hypertables is not yet supported")));
} }
prev_ProcessUtility((Node *)renamestmt, queryString, context, params, dest, completionTag); prev_ProcessUtility((Node *) renamestmt, queryString, context, params, dest, completionTag);
return; return;
} }

View File

@ -4,14 +4,14 @@
#include <postgres.h> #include <postgres.h>
#define TIMESCALEDB_CATALOG_SCHEMA "_timescaledb_catalog" #define TIMESCALEDB_CATALOG_SCHEMA "_timescaledb_catalog"
#define TIMESCALEDB_INTERNAL_SCHEMA "_timescaledb_internal" #define TIMESCALEDB_INTERNAL_SCHEMA "_timescaledb_internal"
#define TIMESCALEDB_HYPERTABLE_TABLE "hypertable" #define TIMESCALEDB_HYPERTABLE_TABLE "hypertable"
typedef struct Node Node; typedef struct Node Node;
bool IobeamLoaded(void); bool IobeamLoaded(void);
char *copy_table_name(int32 hypertable_id); char *copy_table_name(int32 hypertable_id);
#endif /* timescaledb_H */ #endif /* timescaledb_H */