Merge pull request #6184 from sfc-gh-anoyes/anoyes/fix-asan-ctest

Fix ctest under ASAN
This commit is contained in:
Andrew Noyes 2022-01-04 10:40:39 -08:00 committed by GitHub
commit e4bbfe468e
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
4 changed files with 79 additions and 25 deletions

View File

@ -124,6 +124,8 @@ public:
sav->sendError(exc); sav->sendError(exc);
} }
void send(Never) { sendError(never_reply()); }
Future<T> getFuture() const { Future<T> getFuture() const {
sav->addFutureRef(); sav->addFutureRef();
return Future<T>(sav); return Future<T>(sav);

View File

@ -30,6 +30,7 @@
#include "flow/flow.h" #include "flow/flow.h"
#include "flow/actorcompiler.h" // This must be the last #include. #include "flow/actorcompiler.h" // This must be the last #include.
// This actor is used by FlowTransport to serialize the response to a ReplyPromise across the network
ACTOR template <class T> ACTOR template <class T>
void networkSender(Future<T> input, Endpoint endpoint) { void networkSender(Future<T> input, Endpoint endpoint) {
try { try {
@ -37,6 +38,9 @@ void networkSender(Future<T> input, Endpoint endpoint) {
FlowTransport::transport().sendUnreliable(SerializeSource<ErrorOr<EnsureTable<T>>>(value), endpoint, false); FlowTransport::transport().sendUnreliable(SerializeSource<ErrorOr<EnsureTable<T>>>(value), endpoint, false);
} catch (Error& err) { } catch (Error& err) {
// if (err.code() == error_code_broken_promise) return; // if (err.code() == error_code_broken_promise) return;
if (err.code() == error_code_never_reply) {
return;
}
ASSERT(err.code() != error_code_actor_cancelled); ASSERT(err.code() != error_code_actor_cancelled);
FlowTransport::transport().sendUnreliable(SerializeSource<ErrorOr<EnsureTable<T>>>(err), endpoint, false); FlowTransport::transport().sendUnreliable(SerializeSource<ErrorOr<EnsureTable<T>>>(err), endpoint, false);
} }

View File

@ -89,6 +89,7 @@ ERROR( broken_promise, 1100, "Broken promise" )
ERROR( operation_cancelled, 1101, "Asynchronous operation cancelled" ) ERROR( operation_cancelled, 1101, "Asynchronous operation cancelled" )
ERROR( future_released, 1102, "Future has been released" ) ERROR( future_released, 1102, "Future has been released" )
ERROR( connection_leaked, 1103, "Connection object leaked" ) ERROR( connection_leaked, 1103, "Connection object leaked" )
ERROR( never_reply, 1104, "Never reply to the request" )
ERROR( recruitment_failed, 1200, "Recruitment of a server failed" ) // Be careful, catching this will delete the data of a storage server or tlog permanently ERROR( recruitment_failed, 1200, "Recruitment of a server failed" ) // Be careful, catching this will delete the data of a storage server or tlog permanently
ERROR( move_to_removed_server, 1201, "Attempt to move keys to a storage server that was removed" ) ERROR( move_to_removed_server, 1201, "Attempt to move keys to a storage server that was removed" )

View File

@ -1,30 +1,34 @@
#!/usr/bin/env python3 #!/usr/bin/env python3
import glob
import os import os
import shutil import shutil
import subprocess import subprocess
import sys import sys
import socket
from local_cluster import LocalCluster from local_cluster import LocalCluster
from argparse import ArgumentParser, RawDescriptionHelpFormatter from argparse import ArgumentParser, RawDescriptionHelpFormatter
from random import choice from random import choice
from pathlib import Path from pathlib import Path
class TempCluster: class TempCluster:
def __init__(self, build_dir: str, process_number: int = 1, port: str = None): def __init__(self, build_dir: str, process_number: int = 1, port: str = None):
self.build_dir = Path(build_dir).resolve() self.build_dir = Path(build_dir).resolve()
assert self.build_dir.exists(), "{} does not exist".format(build_dir) assert self.build_dir.exists(), "{} does not exist".format(build_dir)
assert self.build_dir.is_dir(), "{} is not a directory".format(build_dir) assert self.build_dir.is_dir(), "{} is not a directory".format(build_dir)
tmp_dir = self.build_dir.joinpath( tmp_dir = self.build_dir.joinpath(
'tmp', "tmp",
''.join(choice(LocalCluster.valid_letters_for_secret) for i in range(16))) "".join(choice(LocalCluster.valid_letters_for_secret) for i in range(16)),
)
tmp_dir.mkdir(parents=True) tmp_dir.mkdir(parents=True)
self.cluster = LocalCluster(tmp_dir, self.cluster = LocalCluster(
self.build_dir.joinpath('bin', 'fdbserver'), tmp_dir,
self.build_dir.joinpath('bin', 'fdbmonitor'), self.build_dir.joinpath("bin", "fdbserver"),
self.build_dir.joinpath('bin', 'fdbcli'), self.build_dir.joinpath("bin", "fdbmonitor"),
process_number, self.build_dir.joinpath("bin", "fdbcli"),
port = port) process_number,
port=port,
)
self.log = self.cluster.log self.log = self.cluster.log
self.etc = self.cluster.etc self.etc = self.cluster.etc
self.data = self.cluster.data self.data = self.cluster.data
@ -40,13 +44,14 @@ class TempCluster:
shutil.rmtree(self.tmp_dir) shutil.rmtree(self.tmp_dir)
def close(self): def close(self):
self.cluster.__exit__(None,None,None) self.cluster.__exit__(None, None, None)
shutil.rmtree(self.tmp_dir) shutil.rmtree(self.tmp_dir)
if __name__ == '__main__': if __name__ == "__main__":
parser = ArgumentParser(formatter_class=RawDescriptionHelpFormatter, parser = ArgumentParser(
description=""" formatter_class=RawDescriptionHelpFormatter,
description="""
This script automatically configures a temporary local cluster on the machine This script automatically configures a temporary local cluster on the machine
and then calls a command while this cluster is running. As soon as the command and then calls a command while this cluster is running. As soon as the command
returns, the configured cluster is killed and all generated data is deleted. returns, the configured cluster is killed and all generated data is deleted.
@ -61,30 +66,72 @@ if __name__ == '__main__':
- All occurrences of @ETC_DIR@ will be replaced with the path to the configuration directory. - All occurrences of @ETC_DIR@ will be replaced with the path to the configuration directory.
The environment variable FDB_CLUSTER_FILE is set to the generated cluster for the command if it is not set already. The environment variable FDB_CLUSTER_FILE is set to the generated cluster for the command if it is not set already.
""") """,
parser.add_argument('--build-dir', '-b', metavar='BUILD_DIRECTORY', help='FDB build directory', required=True) )
parser.add_argument('cmd', metavar="COMMAND", nargs="+", help="The command to run") parser.add_argument(
parser.add_argument('--process-number', '-p', help="Number of fdb processes running", type=int, default=1) "--build-dir",
"-b",
metavar="BUILD_DIRECTORY",
help="FDB build directory",
required=True,
)
parser.add_argument("cmd", metavar="COMMAND", nargs="+", help="The command to run")
parser.add_argument(
"--process-number",
"-p",
help="Number of fdb processes running",
type=int,
default=1,
)
args = parser.parse_args() args = parser.parse_args()
errcode = 1 errcode = 1
with TempCluster(args.build_dir, args.process_number) as cluster: with TempCluster(args.build_dir, args.process_number) as cluster:
print("log-dir: {}".format(cluster.log)) print("log-dir: {}".format(cluster.log))
print("etc-dir: {}".format(cluster.etc)) print("etc-dir: {}".format(cluster.etc))
print("data-dir: {}".format(cluster.data)) print("data-dir: {}".format(cluster.data))
print("cluster-file: {}".format(cluster.etc.joinpath('fdb.cluster'))) print("cluster-file: {}".format(cluster.etc.joinpath("fdb.cluster")))
cmd_args = [] cmd_args = []
for cmd in args.cmd: for cmd in args.cmd:
if cmd == '@CLUSTER_FILE@': if cmd == "@CLUSTER_FILE@":
cmd_args.append(str(cluster.etc.joinpath('fdb.cluster'))) cmd_args.append(str(cluster.etc.joinpath("fdb.cluster")))
elif cmd == '@DATA_DIR@': elif cmd == "@DATA_DIR@":
cmd_args.append(str(cluster.data)) cmd_args.append(str(cluster.data))
elif cmd == '@LOG_DIR@': elif cmd == "@LOG_DIR@":
cmd_args.append(str(cluster.log)) cmd_args.append(str(cluster.log))
elif cmd == '@ETC_DIR@': elif cmd == "@ETC_DIR@":
cmd_args.append(str(cluster.etc)) cmd_args.append(str(cluster.etc))
else: else:
cmd_args.append(cmd) cmd_args.append(cmd)
env = dict(**os.environ) env = dict(**os.environ)
env['FDB_CLUSTER_FILE'] = env.get('FDB_CLUSTER_FILE', cluster.etc.joinpath('fdb.cluster')) env["FDB_CLUSTER_FILE"] = env.get(
errcode = subprocess.run(cmd_args, stdout=sys.stdout, stderr=sys.stderr, env=env).returncode "FDB_CLUSTER_FILE", cluster.etc.joinpath("fdb.cluster")
)
errcode = subprocess.run(
cmd_args, stdout=sys.stdout, stderr=sys.stderr, env=env
).returncode
sev40s = (
subprocess.getoutput(
"grep -r 'Severity=\"40\"' {}".format(cluster.log.as_posix())
)
.rstrip()
.splitlines()
)
for line in sev40s:
# When running ASAN we expect to see this message. Boost coroutine should be using the correct asan annotations so that it shouldn't produce any false positives.
if line.endswith(
"WARNING: ASan doesn't fully support makecontext/swapcontext functions and may produce false positives in some cases!"
):
continue
print(">>>>>>>>>>>>>>>>>>>> Found severity 40 events - the test fails")
errcode = 1
break
if errcode:
for log_file in glob.glob(os.path.join(cluster.log, "*")):
print(">>>>>>>>>>>>>>>>>>>> Contents of {}:".format(log_file))
with open(log_file, "r") as f:
print(f.read())
sys.exit(errcode) sys.exit(errcode)