mirror of
https://github.com/timescale/timescaledb.git
synced 2025-05-21 13:14:19 +08:00
Clean up the table schema to get rid of legacy tables and functionality that makes it more difficult to provide an upgrade path. Notable changes: * Get rid of legacy tables and code * Simplify directory structure for SQL code * Simplify table hierarchy: remove root table and make chunk tables * inherit directly from main table * Change chunk table suffix from _data to _chunk * Simplify schema usage: _timescaledb_internal for internal functions. * _timescaledb_catalog for metadata tables. * Remove postgres_fdw dependency * Improve code comments in sql code
72 lines
2.3 KiB
Plaintext
72 lines
2.3 KiB
Plaintext
--parallel queries require big-ish tables so collect them all here
|
|
--so that we need to generate queries only once.
|
|
\ir include/create_single_db.sql
|
|
SET client_min_messages = WARNING;
|
|
DROP DATABASE IF EXISTS single;
|
|
SET client_min_messages = NOTICE;
|
|
CREATE DATABASE single;
|
|
\c single
|
|
CREATE EXTENSION IF NOT EXISTS timescaledb;
|
|
CREATE TABLE test (i int, j double precision, ts timestamp);
|
|
--has to be big enough to force at least 2 workers below.
|
|
INSERT INTO test SELECT x, x+0.1, _timescaledb_internal.to_timestamp(x*1000) FROM generate_series(1,1000000) AS x;
|
|
SET force_parallel_mode = 'on';
|
|
SET max_parallel_workers_per_gather = 4;
|
|
EXPLAIN (costs off) SELECT first(i, j) FROM "test";
|
|
QUERY PLAN
|
|
---------------------------------------------
|
|
Finalize Aggregate
|
|
-> Gather
|
|
Workers Planned: 2
|
|
-> Partial Aggregate
|
|
-> Parallel Seq Scan on test
|
|
(5 rows)
|
|
|
|
SELECT first(i, j) FROM "test";
|
|
first
|
|
-------
|
|
1
|
|
(1 row)
|
|
|
|
EXPLAIN (costs off) SELECT last(i, j) FROM "test";
|
|
QUERY PLAN
|
|
---------------------------------------------
|
|
Finalize Aggregate
|
|
-> Gather
|
|
Workers Planned: 2
|
|
-> Partial Aggregate
|
|
-> Parallel Seq Scan on test
|
|
(5 rows)
|
|
|
|
SELECT last(i, j) FROM "test";
|
|
last
|
|
---------
|
|
1000000
|
|
(1 row)
|
|
|
|
EXPLAIN (costs off) SELECT time_bucket('1 second', ts) sec, last(i, j) FROM "test" GROUP BY sec ORDER BY sec LIMIT 5;
|
|
QUERY PLAN
|
|
---------------------------------------------------------------------------
|
|
Limit
|
|
-> Finalize GroupAggregate
|
|
Group Key: (time_bucket('@ 1 sec'::interval, ts))
|
|
-> Sort
|
|
Sort Key: (time_bucket('@ 1 sec'::interval, ts))
|
|
-> Gather
|
|
Workers Planned: 2
|
|
-> Partial HashAggregate
|
|
Group Key: time_bucket('@ 1 sec'::interval, ts)
|
|
-> Parallel Seq Scan on test
|
|
(10 rows)
|
|
|
|
SELECT time_bucket('1 second', ts) sec, last(i, j) FROM "test" GROUP BY sec ORDER BY sec LIMIT 5;
|
|
sec | last
|
|
--------------------------+------
|
|
Wed Dec 31 16:00:00 1969 | 999
|
|
Wed Dec 31 16:00:01 1969 | 1999
|
|
Wed Dec 31 16:00:02 1969 | 2999
|
|
Wed Dec 31 16:00:03 1969 | 3999
|
|
Wed Dec 31 16:00:04 1969 | 4999
|
|
(5 rows)
|
|
|