-- This file and its contents are licensed under the Apache License 2.0. -- Please see the included NOTICE for copyright information and -- LICENSE-APACHE for a copy of the license. -- Test that transaction memory usage with COPY doesn't grow. -- We need memory usage in PortalContext after the completion of the query, so -- we'll have to log it from a trigger that runs after the query is completed. \c :TEST_DBNAME :ROLE_CLUSTER_SUPERUSER; create table uk_price_paid(price integer, "date" date, postcode1 text, postcode2 text, type smallint, is_new bool, duration smallint, addr1 text, addr2 text, street text, locality text, town text, district text, country text, category smallint); -- Aim to about 100 partitions, the data is from 1995 to 2022. select create_hypertable('uk_price_paid', 'date', chunk_time_interval => interval '90 day'); NOTICE: adding not-null constraint to column "date" create_hypertable ---------------------------- (1,public,uk_price_paid,t) (1 row) -- This is where we log the memory usage. create table portal_memory_log(id serial, bytes bigint); -- Returns the amount of memory currently allocated in a given -- memory context. Only works for PortalContext, and doesn't work for PG 12. create or replace function ts_debug_allocated_bytes(text) returns bigint as :MODULE_PATHNAME, 'ts_debug_allocated_bytes' language c strict volatile; -- Log current memory usage into the log table. create function log_memory() returns trigger as $$ begin insert into portal_memory_log values (default, ts_debug_allocated_bytes('PortalContext')); return new; end; $$ language plpgsql; -- Add a trigger that runs after completion of each INSERT/COPY and logs the -- current memory usage. create trigger check_update after insert on uk_price_paid for each statement execute function log_memory(); -- Memory leaks often happen on cache invalidation, so make sure they are -- invalidated often and independently (at co-prime periods). set timescaledb.max_open_chunks_per_insert = 2; set timescaledb.max_cached_chunks_per_hypertable = 3; -- Try increasingly larger data sets by concatenating the same file multiple -- times. \copy uk_price_paid from program 'bash -c "cat <(zcat < data/prices-10k-random-1.tsv.gz)"'; \copy uk_price_paid from program 'bash -c "cat <(zcat < data/prices-10k-random-1.tsv.gz) <(zcat < data/prices-10k-random-1.tsv.gz)"'; \copy uk_price_paid from program 'bash -c "cat <(zcat < data/prices-10k-random-1.tsv.gz) <(zcat < data/prices-10k-random-1.tsv.gz) <(zcat < data/prices-10k-random-1.tsv.gz)"'; \copy uk_price_paid from program 'bash -c "cat <(zcat < data/prices-10k-random-1.tsv.gz) <(zcat < data/prices-10k-random-1.tsv.gz) <(zcat < data/prices-10k-random-1.tsv.gz) <(zcat < data/prices-10k-random-1.tsv.gz)"'; \copy uk_price_paid from program 'bash -c "cat <(zcat < data/prices-10k-random-1.tsv.gz) <(zcat < data/prices-10k-random-1.tsv.gz) <(zcat < data/prices-10k-random-1.tsv.gz) <(zcat < data/prices-10k-random-1.tsv.gz) <(zcat < data/prices-10k-random-1.tsv.gz)"'; select count(*) from portal_memory_log; count ------- 5 (1 row) -- Check that the memory doesn't increase with file size by using linear regression. select * from portal_memory_log where ( select regr_slope(bytes, id - 1) / regr_intercept(bytes, id - 1)::float > 0.05 from portal_memory_log ); id | bytes ----+------- (0 rows)