Make the copy_memory_usage test less flaky

Increase the failure threshold.
This commit is contained in:
Alexander Kuzmenkov 2022-09-08 12:18:06 +03:00 committed by Alexander Kuzmenkov
parent d65cad94e3
commit 8e4dcddad6
2 changed files with 7 additions and 7 deletions

View File

@ -51,11 +51,11 @@ select count(*) from portal_memory_log;
(1 row)
-- We'll only compare the biggest runs, because the smaller ones have variance
-- due to new chunks being created and other unknown reasons. Allow 5% change of
-- due to new chunks being created and other unknown reasons. Allow 10% change of
-- memory usage to account for some randomness.
select * from portal_memory_log where (
select (max(bytes) - min(bytes)) / max(bytes)::float > 0.05
from portal_memory_log where id >= 4
select (max(bytes) - min(bytes)) / max(bytes)::float > 0.1
from portal_memory_log where id >= 3
);
id | bytes
----+-------

View File

@ -51,9 +51,9 @@ set timescaledb.max_cached_chunks_per_hypertable = 3;
select count(*) from portal_memory_log;
-- We'll only compare the biggest runs, because the smaller ones have variance
-- due to new chunks being created and other unknown reasons. Allow 5% change of
-- due to new chunks being created and other unknown reasons. Allow 10% change of
-- memory usage to account for some randomness.
select * from portal_memory_log where (
select (max(bytes) - min(bytes)) / max(bytes)::float > 0.05
from portal_memory_log where id >= 4
);
select (max(bytes) - min(bytes)) / max(bytes)::float > 0.1
from portal_memory_log where id >= 3
);