mirror of
https://git.proxmox.com/git/mirror_frr
synced 2025-08-13 22:57:45 +00:00
tests: munet: update to version 0.14.9
Topotest relevant changes: - add support for `timeout` arg to `cmd_*()` - handle invalid regexp in CLI commands - fix long interface name support Full munet changelog: munet: 0.14.9: add support for `timeout` arg to `cmd_*()` munet: 0.14.8: cleanup the cleanup (kill) on launch options munet: 0.14.7: allow multiple extra commands for shell console init munet: 0.14.6: - qemu: gather gcda files where munet can find them - handle invalid regexp in CLI commands munet: 0.14.5: - (podman) pull missing images for containers - fix long interface name support - add another router example munet: 0.14.4: mutest: add color to PASS/FAIL indicators on tty consoles munet: 0.14.3: Add hostnet node that runs it's commands in the host network namespace. munet: 0.14.2: - always fail mutest tests on bad json inputs - improve ssh-remote for common use-case of connecting to host connected devices - fix ready-cmd for python v3.11+ munet: 0.14.1: Improved host interface support. Signed-off-by: Christian Hopps <chopps@labn.net>
This commit is contained in:
parent
fbc795a047
commit
a465000035
@ -19,6 +19,7 @@ from . import parser
|
||||
from .args import add_launch_args
|
||||
from .base import get_event_loop
|
||||
from .cleanup import cleanup_previous
|
||||
from .cleanup import is_running_in_rundir
|
||||
from .compat import PytestConfig
|
||||
|
||||
|
||||
@ -139,10 +140,11 @@ def main(*args):
|
||||
eap = ap.add_argument_group(title="Uncommon", description="uncommonly used options")
|
||||
eap.add_argument("--log-config", help="logging config file (yaml, toml, json, ...)")
|
||||
eap.add_argument(
|
||||
"--no-kill",
|
||||
"--kill",
|
||||
action="store_true",
|
||||
help="Do not kill previous running processes",
|
||||
help="Kill previous running processes using same rundir and exit",
|
||||
)
|
||||
eap.add_argument("--no-kill", action="store_true", help=argparse.SUPPRESS)
|
||||
eap.add_argument(
|
||||
"--no-cli", action="store_true", help="Do not run the interactive CLI"
|
||||
)
|
||||
@ -157,7 +159,18 @@ def main(*args):
|
||||
sys.exit(0)
|
||||
|
||||
rundir = args.rundir if args.rundir else "/tmp/munet"
|
||||
rundir = os.path.abspath(rundir)
|
||||
args.rundir = rundir
|
||||
|
||||
if args.kill:
|
||||
logging.info("Killing any previous run using rundir: {rundir}")
|
||||
cleanup_previous(args.rundir)
|
||||
elif is_running_in_rundir(args.rundir):
|
||||
logging.fatal(
|
||||
"Munet processes using rundir: %s, use `--kill` to cleanup first", rundir
|
||||
)
|
||||
return 1
|
||||
|
||||
if args.cleanup:
|
||||
if os.path.exists(rundir):
|
||||
if not os.path.exists(f"{rundir}/config.json"):
|
||||
@ -169,6 +182,10 @@ def main(*args):
|
||||
sys.exit(1)
|
||||
else:
|
||||
subprocess.run(["/usr/bin/rm", "-rf", rundir], check=True)
|
||||
|
||||
if args.kill:
|
||||
return 0
|
||||
|
||||
subprocess.run(f"mkdir -p {rundir} && chmod 755 {rundir}", check=True, shell=True)
|
||||
os.environ["MUNET_RUNDIR"] = rundir
|
||||
|
||||
@ -183,9 +200,6 @@ def main(*args):
|
||||
logger.critical("No nodes defined in config file")
|
||||
return 1
|
||||
|
||||
if not args.no_kill:
|
||||
cleanup_previous()
|
||||
|
||||
loop = None
|
||||
status = 4
|
||||
try:
|
||||
|
@ -469,6 +469,8 @@ class Commander: # pylint: disable=R0904
|
||||
env = {**(kwargs["env"] if "env" in kwargs else os.environ)}
|
||||
if "MUNET_NODENAME" not in env:
|
||||
env["MUNET_NODENAME"] = self.name
|
||||
if "MUNET_PID" not in env and "MUNET_PID" in os.environ:
|
||||
env["MUNET_PID"] = os.environ["MUNET_PID"]
|
||||
kwargs["env"] = env
|
||||
|
||||
defaults.update(kwargs)
|
||||
@ -780,8 +782,14 @@ class Commander: # pylint: disable=R0904
|
||||
|
||||
ps1 = re.escape(ps1)
|
||||
ps2 = re.escape(ps2)
|
||||
|
||||
extra = "PAGER=cat; export PAGER; TERM=dumb; unset HISTFILE; set +o emacs +o vi"
|
||||
extra = [
|
||||
"TERM=dumb",
|
||||
"set +o emacs",
|
||||
"set +o vi",
|
||||
"unset HISTFILE",
|
||||
"PAGER=cat",
|
||||
"export PAGER",
|
||||
]
|
||||
pchg = "PS1='{0}' PS2='{1}' PROMPT_COMMAND=''\n".format(ps1p, ps2p)
|
||||
p.send(pchg)
|
||||
return ShellWrapper(p, ps1, ps2, extra_init_cmd=extra, will_echo=will_echo)
|
||||
@ -934,15 +942,25 @@ class Commander: # pylint: disable=R0904
|
||||
|
||||
def _cmd_status(self, cmds, raises=False, warn=True, stdin=None, **kwargs):
|
||||
"""Execute a command."""
|
||||
timeout = None
|
||||
if "timeout" in kwargs:
|
||||
timeout = kwargs["timeout"]
|
||||
del kwargs["timeout"]
|
||||
|
||||
pinput, stdin = Commander._cmd_status_input(stdin)
|
||||
p, actual_cmd = self._popen("cmd_status", cmds, stdin=stdin, **kwargs)
|
||||
o, e = p.communicate(pinput)
|
||||
o, e = p.communicate(pinput, timeout=timeout)
|
||||
return self._cmd_status_finish(p, cmds, actual_cmd, o, e, raises, warn)
|
||||
|
||||
async def _async_cmd_status(
|
||||
self, cmds, raises=False, warn=True, stdin=None, text=None, **kwargs
|
||||
):
|
||||
"""Execute a command."""
|
||||
timeout = None
|
||||
if "timeout" in kwargs:
|
||||
timeout = kwargs["timeout"]
|
||||
del kwargs["timeout"]
|
||||
|
||||
pinput, stdin = Commander._cmd_status_input(stdin)
|
||||
p, actual_cmd = await self._async_popen(
|
||||
"async_cmd_status", cmds, stdin=stdin, **kwargs
|
||||
@ -955,7 +973,12 @@ class Commander: # pylint: disable=R0904
|
||||
|
||||
if encoding is not None and isinstance(pinput, str):
|
||||
pinput = pinput.encode(encoding)
|
||||
o, e = await p.communicate(pinput)
|
||||
try:
|
||||
o, e = await asyncio.wait_for(p.communicate(), timeout=timeout)
|
||||
except (TimeoutError, asyncio.TimeoutError) as error:
|
||||
raise subprocess.TimeoutExpired(
|
||||
cmd=actual_cmd, timeout=timeout, output=None, stderr=None
|
||||
) from error
|
||||
if encoding is not None:
|
||||
o = o.decode(encoding) if o is not None else o
|
||||
e = e.decode(encoding) if e is not None else e
|
||||
@ -1220,7 +1243,13 @@ class Commander: # pylint: disable=R0904
|
||||
if self.is_vm and self.use_ssh and not ns_only: # pylint: disable=E1101
|
||||
if isinstance(cmd, str):
|
||||
cmd = shlex.split(cmd)
|
||||
cmd = ["/usr/bin/env", f"MUNET_NODENAME={self.name}"] + cmd
|
||||
cmd = [
|
||||
"/usr/bin/env",
|
||||
f"MUNET_NODENAME={self.name}",
|
||||
]
|
||||
if "MUNET_PID" in os.environ:
|
||||
cmd.append(f"MUNET_PID={os.environ.get('MUNET_PID')}")
|
||||
cmd += cmd
|
||||
|
||||
# get the ssh cmd
|
||||
cmd = self._get_pre_cmd(False, True, ns_only=ns_only) + [shlex.join(cmd)]
|
||||
@ -1240,6 +1269,8 @@ class Commander: # pylint: disable=R0904
|
||||
envvars = f"MUNET_NODENAME={self.name} NODENAME={self.name}"
|
||||
if hasattr(self, "rundir"):
|
||||
envvars += f" RUNDIR={self.rundir}"
|
||||
if "MUNET_PID" in os.environ:
|
||||
envvars += f" MUNET_PID={os.environ.get('MUNET_PID')}"
|
||||
if hasattr(self.unet, "config_dirname") and self.unet.config_dirname:
|
||||
envvars += f" CONFIGDIR={self.unet.config_dirname}"
|
||||
elif "CONFIGDIR" in os.environ:
|
||||
@ -2520,7 +2551,7 @@ class Bridge(SharedNamespace, InterfaceMixin):
|
||||
|
||||
self.logger.debug("Bridge: Creating")
|
||||
|
||||
assert len(self.name) <= 16 # Make sure fits in IFNAMSIZE
|
||||
# assert len(self.name) <= 16 # Make sure fits in IFNAMSIZE
|
||||
self.cmd_raises(f"ip link delete {name} || true")
|
||||
self.cmd_raises(f"ip link add {name} type bridge")
|
||||
if self.mtu:
|
||||
@ -2644,10 +2675,6 @@ class BaseMunet(LinuxNamespace):
|
||||
|
||||
self.cfgopt = munet_config.ConfigOptionsProxy(pytestconfig)
|
||||
|
||||
super().__init__(
|
||||
name, mount=True, net=isolated, uts=isolated, pid=pid, unet=None, **kwargs
|
||||
)
|
||||
|
||||
# This allows us to cleanup any leftover running munet's
|
||||
if "MUNET_PID" in os.environ:
|
||||
if os.environ["MUNET_PID"] != str(our_pid):
|
||||
@ -2658,6 +2685,10 @@ class BaseMunet(LinuxNamespace):
|
||||
)
|
||||
os.environ["MUNET_PID"] = str(our_pid)
|
||||
|
||||
super().__init__(
|
||||
name, mount=True, net=isolated, uts=isolated, pid=pid, unet=None, **kwargs
|
||||
)
|
||||
|
||||
# this is for testing purposes do not use
|
||||
if not BaseMunet.g_unet:
|
||||
BaseMunet.g_unet = self
|
||||
@ -2765,7 +2796,7 @@ class BaseMunet(LinuxNamespace):
|
||||
self.logger.error('"%s" len %s > 16', nsif1, len(nsif1))
|
||||
elif len(nsif2) > 16:
|
||||
self.logger.error('"%s" len %s > 16', nsif2, len(nsif2))
|
||||
assert len(nsif1) <= 16 and len(nsif2) <= 16 # Make sure fits in IFNAMSIZE
|
||||
assert len(nsif1) < 16 and len(nsif2) < 16 # Make sure fits in IFNAMSIZE
|
||||
|
||||
self.logger.debug("%s: Creating veth pair for link %s", self, lname)
|
||||
|
||||
@ -2993,8 +3024,11 @@ if True: # pylint: disable=using-constant-test
|
||||
self._expectf = self.child.expect
|
||||
|
||||
if extra_init_cmd:
|
||||
self.expect_prompt()
|
||||
self.child.sendline(extra_init_cmd)
|
||||
if isinstance(extra_init_cmd, str):
|
||||
extra_init_cmd = [extra_init_cmd]
|
||||
for ecmd in extra_init_cmd:
|
||||
self.expect_prompt()
|
||||
self.child.sendline(ecmd)
|
||||
self.expect_prompt()
|
||||
|
||||
def expect_prompt(self, timeout=-1):
|
||||
|
@ -59,25 +59,33 @@ def _get_our_pids():
|
||||
return {}
|
||||
|
||||
|
||||
def _get_other_pids():
|
||||
piddict = get_pids_with_env("MUNET_PID")
|
||||
unet_pids = {d["MUNET_PID"] for d in piddict.values()}
|
||||
def _get_other_pids(rundir):
|
||||
if rundir:
|
||||
# get only munet pids using the given rundir
|
||||
piddict = get_pids_with_env("MUNET_RUNDIR", str(rundir))
|
||||
else:
|
||||
# Get all munet pids
|
||||
piddict = get_pids_with_env("MUNET_PID")
|
||||
unet_pids = {d["MUNET_PID"] for d in piddict.values() if "MUNET_PID" in d}
|
||||
pids_by_upid = {p: set() for p in unet_pids}
|
||||
for pid, envdict in piddict.items():
|
||||
if "MUNET_PID" not in envdict:
|
||||
continue
|
||||
unet_pid = envdict["MUNET_PID"]
|
||||
pids_by_upid[unet_pid].add(pid)
|
||||
# Filter out any child pid sets whos munet pid is still running
|
||||
return {x: y for x, y in pids_by_upid.items() if x not in y}
|
||||
|
||||
|
||||
def _get_pids_by_upid(ours):
|
||||
def _get_pids_by_upid(ours, rundir):
|
||||
if ours:
|
||||
assert rundir is None
|
||||
return _get_our_pids()
|
||||
return _get_other_pids()
|
||||
return _get_other_pids(rundir)
|
||||
|
||||
|
||||
def _cleanup_pids(ours):
|
||||
pids_by_upid = _get_pids_by_upid(ours).items()
|
||||
def _cleanup_pids(ours, rundir):
|
||||
pids_by_upid = _get_pids_by_upid(ours, rundir).items()
|
||||
if not pids_by_upid:
|
||||
return
|
||||
|
||||
@ -94,7 +102,7 @@ def _cleanup_pids(ours):
|
||||
# return
|
||||
# time.sleep(1)
|
||||
|
||||
pids_by_upid = _get_pids_by_upid(ours).items()
|
||||
pids_by_upid = _get_pids_by_upid(ours, rundir).items()
|
||||
_kill_piddict(pids_by_upid, signal.SIGKILL)
|
||||
|
||||
|
||||
@ -103,12 +111,16 @@ def cleanup_current():
|
||||
|
||||
Currently this only scans for old processes.
|
||||
"""
|
||||
_cleanup_pids(True)
|
||||
_cleanup_pids(True, None)
|
||||
|
||||
|
||||
def cleanup_previous():
|
||||
def cleanup_previous(rundir=None):
|
||||
"""Attempt to cleanup preview runs.
|
||||
|
||||
Currently this only scans for old processes.
|
||||
"""
|
||||
_cleanup_pids(False)
|
||||
_cleanup_pids(False, rundir)
|
||||
|
||||
|
||||
def is_running_in_rundir(rundir):
|
||||
return bool(get_pids_with_env("MUNET_RUNDIR", str(rundir)))
|
||||
|
@ -106,9 +106,13 @@ def is_host_regex(restr):
|
||||
|
||||
|
||||
def get_host_regex(restr):
|
||||
if len(restr) < 3 or restr[0] != "/" or restr[-1] != "/":
|
||||
try:
|
||||
if len(restr) < 3 or restr[0] != "/" or restr[-1] != "/":
|
||||
return None
|
||||
return re.compile(restr[1:-1])
|
||||
except re.error:
|
||||
logging.error("Invalid regex")
|
||||
return None
|
||||
return re.compile(restr[1:-1])
|
||||
|
||||
|
||||
def host_in(restr, names):
|
||||
@ -126,8 +130,8 @@ def expand_host(restr, names):
|
||||
hosts = []
|
||||
regexp = get_host_regex(restr)
|
||||
if not regexp:
|
||||
assert restr in names
|
||||
hosts.append(restr)
|
||||
if restr in names:
|
||||
hosts.append(restr)
|
||||
else:
|
||||
for name in names:
|
||||
if regexp.fullmatch(name):
|
||||
|
@ -1,5 +1,8 @@
|
||||
version: 1
|
||||
formatters:
|
||||
result_color:
|
||||
class: munet.mulog.ResultColorFormatter
|
||||
format: '%(levelname)5s: %(message)s'
|
||||
brief:
|
||||
format: '%(levelname)5s: %(message)s'
|
||||
operfmt:
|
||||
@ -22,7 +25,7 @@ handlers:
|
||||
info_console:
|
||||
level: INFO
|
||||
class: logging.StreamHandler
|
||||
formatter: brief
|
||||
formatter: result_color
|
||||
stream: ext://sys.stderr
|
||||
oper_console:
|
||||
level: DEBUG
|
||||
|
@ -89,14 +89,14 @@ def main(*args):
|
||||
ecmd = "/usr/bin/nsenter"
|
||||
eargs = [ecmd]
|
||||
|
||||
# start mucmd same way base process is started
|
||||
#start mucmd same way base process is started
|
||||
eargs.append(f"--mount=/proc/{pid}/ns/mnt")
|
||||
eargs.append(f"--net=/proc/{pid}/ns/net")
|
||||
eargs.append(f"--pid=/proc/{pid}/ns/pid_for_children")
|
||||
eargs.append(f"--uts=/proc/{pid}/ns/uts")
|
||||
eargs.append(f"--wd={rundir}")
|
||||
eargs += args.shellcmd
|
||||
# print("Using ", eargs)
|
||||
#print("Using ", eargs)
|
||||
return os.execvpe(ecmd, eargs, {**env, **envcfg})
|
||||
|
||||
|
||||
|
@ -12,6 +12,9 @@ import logging
|
||||
from pathlib import Path
|
||||
|
||||
|
||||
do_color = True
|
||||
|
||||
|
||||
class MultiFileHandler(logging.FileHandler):
|
||||
"""A logging handler that logs to new files based on the logger name.
|
||||
|
||||
@ -118,5 +121,28 @@ class ColorFormatter(logging.Formatter):
|
||||
super().__init__(fmt, datefmt, style, **kwargs)
|
||||
|
||||
def format(self, record):
|
||||
if not do_color:
|
||||
return super().format(record)
|
||||
formatter = self.formatters.get(record.levelno)
|
||||
return formatter.format(record)
|
||||
|
||||
|
||||
class ResultColorFormatter(logging.Formatter):
|
||||
"""A formatter that colorizes PASS/FAIL strings based on level."""
|
||||
|
||||
green = "\x1b[32m"
|
||||
red = "\x1b[31m"
|
||||
reset = "\x1b[0m"
|
||||
|
||||
def format(self, record):
|
||||
s = super().format(record)
|
||||
if not do_color:
|
||||
return s
|
||||
idx = s.find("FAIL")
|
||||
if idx >= 0 and record.levelno > logging.INFO:
|
||||
s = s[:idx] + self.red + "FAIL" + self.reset + s[idx + 4 :]
|
||||
elif record.levelno == logging.INFO:
|
||||
idx = s.find("PASS")
|
||||
if idx >= 0:
|
||||
s = s[:idx] + self.green + "PASS" + self.reset + s[idx + 4 :]
|
||||
return s
|
||||
|
@ -93,6 +93,9 @@
|
||||
"image": {
|
||||
"type": "string"
|
||||
},
|
||||
"hostnet": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"server": {
|
||||
"type": "string"
|
||||
},
|
||||
@ -383,6 +386,9 @@
|
||||
},
|
||||
"ipv6": {
|
||||
"type": "string"
|
||||
},
|
||||
"external": {
|
||||
"type": "boolean"
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -422,6 +428,9 @@
|
||||
"image": {
|
||||
"type": "string"
|
||||
},
|
||||
"hostnet": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"server": {
|
||||
"type": "string"
|
||||
},
|
||||
|
@ -20,6 +20,7 @@ from copy import deepcopy
|
||||
from pathlib import Path
|
||||
from typing import Union
|
||||
|
||||
from munet import mulog
|
||||
from munet import parser
|
||||
from munet.args import add_testing_args
|
||||
from munet.base import Bridge
|
||||
@ -380,8 +381,10 @@ async def run_tests(args):
|
||||
for result in results:
|
||||
test_name, passed, failed, e = result
|
||||
tnum += 1
|
||||
s = "FAIL" if failed or e else "PASS"
|
||||
reslog.info(" %s %s:%s", s, tnum, test_name)
|
||||
if failed or e:
|
||||
reslog.warning(" FAIL %s:%s", tnum, test_name)
|
||||
else:
|
||||
reslog.info(" PASS %s:%s", tnum, test_name)
|
||||
|
||||
reslog.info("-" * 70)
|
||||
reslog.info(
|
||||
@ -447,8 +450,9 @@ def main():
|
||||
sys.exit(0)
|
||||
|
||||
rundir = args.rundir if args.rundir else "/tmp/mutest"
|
||||
args.rundir = Path(rundir)
|
||||
os.environ["MUNET_RUNDIR"] = rundir
|
||||
rundir = Path(rundir).absolute()
|
||||
args.rundir = rundir
|
||||
os.environ["MUNET_RUNDIR"] = str(rundir)
|
||||
subprocess.run(f"mkdir -p {rundir} && chmod 755 {rundir}", check=True, shell=True)
|
||||
|
||||
config = parser.setup_logging(args, config_base="logconf-mutest")
|
||||
@ -459,6 +463,9 @@ def main():
|
||||
fconfig.get("format"), fconfig.get("datefmt")
|
||||
)
|
||||
|
||||
if not hasattr(sys.stderr, "isatty") or not sys.stderr.isatty():
|
||||
mulog.do_color = False
|
||||
|
||||
loop = None
|
||||
status = 4
|
||||
try:
|
||||
|
@ -544,7 +544,9 @@ class TestCase:
|
||||
"""
|
||||
js = self._command_json(target, cmd)
|
||||
if js is None:
|
||||
return expect_fail, {}
|
||||
# Always fail on bad json, even if user expected failure
|
||||
# return expect_fail, {}
|
||||
return False, {}
|
||||
|
||||
try:
|
||||
# Convert to string to validate the input is valid JSON
|
||||
@ -556,7 +558,9 @@ class TestCase:
|
||||
self.olog.warning(
|
||||
"JSON load failed. Check match value is in JSON format: %s", error
|
||||
)
|
||||
return expect_fail, {}
|
||||
# Always fail on bad json, even if user expected failure
|
||||
# return expect_fail, {}
|
||||
return False, {}
|
||||
|
||||
if exact_match:
|
||||
deep_diff = json_cmp(expect, js)
|
||||
|
@ -28,8 +28,10 @@ from . import cli
|
||||
from .base import BaseMunet
|
||||
from .base import Bridge
|
||||
from .base import Commander
|
||||
from .base import InterfaceMixin
|
||||
from .base import LinuxNamespace
|
||||
from .base import MunetError
|
||||
from .base import SharedNamespace
|
||||
from .base import Timeout
|
||||
from .base import _async_get_exec_path
|
||||
from .base import _get_exec_path
|
||||
@ -132,6 +134,22 @@ def convert_ranges_to_bitmask(ranges):
|
||||
return bitmask
|
||||
|
||||
|
||||
class ExternalNetwork(SharedNamespace, InterfaceMixin):
|
||||
"""A network external to munet."""
|
||||
|
||||
def __init__(self, name=None, unet=None, logger=None, mtu=None, config=None):
|
||||
"""Create an external network."""
|
||||
del logger # avoid linter
|
||||
del mtu # avoid linter
|
||||
# Do we want to use os.getpid() rather than unet.pid?
|
||||
super().__init__(name, pid=unet.pid, nsflags=unet.nsflags, unet=unet)
|
||||
self.config = config if config else {}
|
||||
|
||||
async def _async_delete(self):
|
||||
self.logger.debug("%s: deleting", self)
|
||||
await super()._async_delete()
|
||||
|
||||
|
||||
class L2Bridge(Bridge):
|
||||
"""A linux bridge with no IP network address."""
|
||||
|
||||
@ -555,17 +573,38 @@ class NodeMixin:
|
||||
await super()._async_delete()
|
||||
|
||||
|
||||
class HostnetNode(NodeMixin, LinuxNamespace):
|
||||
"""A node for running commands in the host network namespace."""
|
||||
|
||||
def __init__(self, name, pid=True, **kwargs):
|
||||
if "net" in kwargs:
|
||||
del kwargs["net"]
|
||||
super().__init__(name, pid=pid, net=False, **kwargs)
|
||||
|
||||
self.logger.debug("%s: creating", self)
|
||||
|
||||
self.mgmt_ip = None
|
||||
self.mgmt_ip6 = None
|
||||
self.set_ns_cwd(self.rundir)
|
||||
|
||||
super().pytest_hook_open_shell()
|
||||
self.logger.info("%s: created", self)
|
||||
|
||||
def get_ifname(self, netname): # pylint: disable=useless-return
|
||||
del netname
|
||||
return None
|
||||
|
||||
async def _async_delete(self):
|
||||
self.logger.debug("%s: deleting", self)
|
||||
await super()._async_delete()
|
||||
|
||||
|
||||
class SSHRemote(NodeMixin, Commander):
|
||||
"""SSHRemote a node representing an ssh connection to something."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
name,
|
||||
server,
|
||||
port=22,
|
||||
user=None,
|
||||
password=None,
|
||||
idfile=None,
|
||||
**kwargs,
|
||||
):
|
||||
super().__init__(name, **kwargs)
|
||||
@ -580,32 +619,33 @@ class SSHRemote(NodeMixin, Commander):
|
||||
self.mgmt_ip = None
|
||||
self.mgmt_ip6 = None
|
||||
|
||||
self.port = port
|
||||
|
||||
if user:
|
||||
self.user = user
|
||||
elif "SUDO_USER" in os.environ:
|
||||
self.user = os.environ["SUDO_USER"]
|
||||
else:
|
||||
self.server = self.config["server"]
|
||||
self.port = int(self.config.get("server-port", 22))
|
||||
self.sudo_user = os.environ.get("SUDO_USER")
|
||||
self.user = self.config.get("ssh-user")
|
||||
if not self.user:
|
||||
self.user = self.sudo_user
|
||||
if not self.user:
|
||||
self.user = getpass.getuser()
|
||||
self.password = password
|
||||
self.idfile = idfile
|
||||
|
||||
self.server = f"{self.user}@{server}"
|
||||
self.password = self.config.get("ssh-password")
|
||||
self.idfile = self.config.get("ssh-identity-file")
|
||||
self.use_host_network = None
|
||||
|
||||
# Setup our base `pre-cmd` values
|
||||
#
|
||||
# We maybe should add environment variable transfer here in particular
|
||||
# MUNET_NODENAME. The problem is the user has to explicitly approve
|
||||
# of SendEnv variables.
|
||||
self.__base_cmd = [
|
||||
get_exec_path_host("sudo"),
|
||||
"-E",
|
||||
f"-u{self.user}",
|
||||
get_exec_path_host("ssh"),
|
||||
]
|
||||
if port != 22:
|
||||
self.__base_cmd.append(f"-p{port}")
|
||||
self.__base_cmd = []
|
||||
if self.idfile and self.sudo_user:
|
||||
self.__base_cmd += [
|
||||
get_exec_path_host("sudo"),
|
||||
"-E",
|
||||
f"-u{self.sudo_user}",
|
||||
]
|
||||
self.__base_cmd.append(get_exec_path_host("ssh"))
|
||||
if self.port != 22:
|
||||
self.__base_cmd.append(f"-p{self.port}")
|
||||
self.__base_cmd.append("-q")
|
||||
self.__base_cmd.append("-oStrictHostKeyChecking=no")
|
||||
self.__base_cmd.append("-oUserKnownHostsFile=/dev/null")
|
||||
@ -615,15 +655,34 @@ class SSHRemote(NodeMixin, Commander):
|
||||
# self.__base_cmd.append("-oSendVar='TEST'")
|
||||
self.__base_cmd_pty = list(self.__base_cmd)
|
||||
self.__base_cmd_pty.append("-t")
|
||||
self.__base_cmd.append(self.server)
|
||||
self.__base_cmd_pty.append(self.server)
|
||||
server_str = f"{self.user}@{self.server}"
|
||||
self.__base_cmd.append(server_str)
|
||||
self.__base_cmd_pty.append(server_str)
|
||||
# self.set_pre_cmd(pre_cmd, pre_cmd_tty)
|
||||
|
||||
self.logger.info("%s: created", self)
|
||||
|
||||
def _get_pre_cmd(self, use_str, use_pty, ns_only=False, **kwargs):
|
||||
pre_cmd = []
|
||||
if self.unet:
|
||||
# None on first use, set after
|
||||
if self.use_host_network is None:
|
||||
# We have networks now so try and ping the server in the namespace
|
||||
if not self.unet:
|
||||
self.use_host_network = True
|
||||
else:
|
||||
rc, _, _ = self.unet.cmd_status(f"ping -w1 -c1 {self.server}")
|
||||
if rc:
|
||||
self.use_host_network = True
|
||||
else:
|
||||
self.use_host_network = False
|
||||
|
||||
if self.use_host_network:
|
||||
self.logger.debug("Using host namespace for ssh connection")
|
||||
else:
|
||||
self.logger.debug("Using munet namespace for ssh connection")
|
||||
|
||||
if self.use_host_network:
|
||||
pre_cmd = []
|
||||
else:
|
||||
pre_cmd = self.unet._get_pre_cmd(False, use_pty, ns_only=False, **kwargs)
|
||||
if ns_only:
|
||||
return pre_cmd
|
||||
@ -979,17 +1038,16 @@ ff02::2\tip6-allrouters
|
||||
)
|
||||
self.unet.rootcmd.cmd_status(f"ip link set {dname} name {hname}")
|
||||
|
||||
rc, o, _ = self.unet.rootcmd.cmd_status("ip -o link show")
|
||||
m = re.search(rf"\d+:\s+{re.escape(hname)}:.*", o)
|
||||
if m:
|
||||
self.unet.rootcmd.cmd_nostatus(f"ip link set {hname} down ")
|
||||
self.unet.rootcmd.cmd_raises(f"ip link set {hname} netns {self.pid}")
|
||||
# Make sure the interface is there.
|
||||
self.unet.rootcmd.cmd_raises(f"ip -o link show {hname}")
|
||||
self.unet.rootcmd.cmd_nostatus(f"ip link set {hname} down ")
|
||||
self.unet.rootcmd.cmd_raises(f"ip link set {hname} netns {self.pid}")
|
||||
|
||||
# Wait for interface to show up in namespace
|
||||
for retry in range(0, 10):
|
||||
rc, o, _ = self.cmd_status(f"ip -o link show {hname}")
|
||||
if not rc:
|
||||
if re.search(rf"\d+: {re.escape(hname)}:.*", o):
|
||||
break
|
||||
break
|
||||
if retry > 0:
|
||||
await asyncio.sleep(1)
|
||||
self.cmd_raises(f"ip link set {hname} name {lname}")
|
||||
@ -1001,12 +1059,11 @@ ff02::2\tip6-allrouters
|
||||
lname = self.host_intfs[hname]
|
||||
self.cmd_raises(f"ip link set {lname} down")
|
||||
self.cmd_raises(f"ip link set {lname} name {hname}")
|
||||
self.cmd_status(f"ip link set netns 1 dev {hname}")
|
||||
# The above is failing sometimes and not sure why
|
||||
# logging.error(
|
||||
# "XXX after setns %s",
|
||||
# self.unet.rootcmd.cmd_nostatus(f"ip link show {hname}"),
|
||||
# )
|
||||
# We need to NOT run this command in the new pid namespace so that pid 1 is the
|
||||
# root init process and so the interface gets returned to the root namespace
|
||||
self.unet.rootcmd.cmd_raises(
|
||||
f"nsenter -t {self.pid} -n ip link set netns 1 dev {hname}"
|
||||
)
|
||||
del self.host_intfs[hname]
|
||||
|
||||
async def add_phy_intf(self, devaddr, lname):
|
||||
@ -1917,7 +1974,11 @@ class L3QemuVM(L3NodeMixin, LinuxNamespace):
|
||||
# InterfaceMixin override
|
||||
# We need a name unique in the shared namespace.
|
||||
def get_ns_ifname(self, ifname):
|
||||
return self.name + ifname
|
||||
ifname = self.name + ifname
|
||||
ifname = re.sub("gigabitethernet", "GE", ifname, flags=re.I)
|
||||
if len(ifname) >= 16:
|
||||
ifname = ifname[0:7] + ifname[-8:]
|
||||
return ifname
|
||||
|
||||
async def add_host_intf(self, hname, lname, mtu=None):
|
||||
# L3QemuVM needs it's own add_host_intf for macvtap, We need to create the tap
|
||||
@ -2093,16 +2154,22 @@ class L3QemuVM(L3NodeMixin, LinuxNamespace):
|
||||
)
|
||||
con.cmd_raises(rf"rm -rf {tmpdir}")
|
||||
|
||||
self.logger.info("Saved coverage data in VM at %s", dest)
|
||||
self.logger.debug("Saved coverage data in VM at %s", dest)
|
||||
ldest = os.path.join(self.rundir, "gcov-data.tgz")
|
||||
if self.use_ssh:
|
||||
self.cmd_raises(["/bin/cat", dest], stdout=open(ldest, "wb"))
|
||||
self.logger.info("Saved coverage data on host at %s", ldest)
|
||||
self.logger.debug("Saved coverage data on host at %s", ldest)
|
||||
else:
|
||||
output = con.cmd_raises(rf"base64 {dest}")
|
||||
with open(ldest, "wb") as f:
|
||||
f.write(base64.b64decode(output))
|
||||
self.logger.info("Saved coverage data on host at %s", ldest)
|
||||
self.logger.debug("Saved coverage data on host at %s", ldest)
|
||||
self.logger.info("Extracting coverage for %s into %s", self.name, ldest)
|
||||
|
||||
# We need to place the gcda files where munet expects to find them
|
||||
gcdadir = Path(os.environ["GCOV_PREFIX"]) / self.name
|
||||
self.unet.cmd_raises_nsonly(f"mkdir -p {gcdadir}")
|
||||
self.unet.cmd_raises_nsonly(f"tar -C {gcdadir} -xzf {ldest}")
|
||||
|
||||
async def _opencons(
|
||||
self,
|
||||
@ -2878,7 +2945,9 @@ ff02::2\tip6-allrouters
|
||||
else:
|
||||
node2.set_lan_addr(node1, c2)
|
||||
|
||||
if "physical" not in c1 and not node1.is_vm:
|
||||
if isinstance(node1, ExternalNetwork):
|
||||
pass
|
||||
elif "physical" not in c1 and not node1.is_vm:
|
||||
node1.set_intf_constraints(if1, **c1)
|
||||
if "physical" not in c2 and not node2.is_vm:
|
||||
node2.set_intf_constraints(if2, **c2)
|
||||
@ -2891,14 +2960,8 @@ ff02::2\tip6-allrouters
|
||||
cls = L3QemuVM
|
||||
elif config and config.get("server"):
|
||||
cls = SSHRemote
|
||||
kwargs["server"] = config["server"]
|
||||
kwargs["port"] = int(config.get("server-port", 22))
|
||||
if "ssh-identity-file" in config:
|
||||
kwargs["idfile"] = config.get("ssh-identity-file")
|
||||
if "ssh-user" in config:
|
||||
kwargs["user"] = config.get("ssh-user")
|
||||
if "ssh-password" in config:
|
||||
kwargs["password"] = config.get("ssh-password")
|
||||
elif config and config.get("hostnet"):
|
||||
cls = HostnetNode
|
||||
else:
|
||||
cls = L3NamespaceNode
|
||||
return super().add_host(name, cls=cls, config=config, **kwargs)
|
||||
@ -2908,7 +2971,12 @@ ff02::2\tip6-allrouters
|
||||
if config is None:
|
||||
config = {}
|
||||
|
||||
cls = L3Bridge if config.get("ip") else L2Bridge
|
||||
if config.get("external"):
|
||||
cls = ExternalNetwork
|
||||
elif config.get("ip"):
|
||||
cls = L3Bridge
|
||||
else:
|
||||
cls = L2Bridge
|
||||
mtu = kwargs.get("mtu", config.get("mtu"))
|
||||
return super().add_switch(name, cls=cls, config=config, mtu=mtu, **kwargs)
|
||||
|
||||
@ -2947,7 +3015,7 @@ ff02::2\tip6-allrouters
|
||||
bdir = Path(os.environ["GCOV_BUILD_DIR"])
|
||||
gcdadir = Path(os.environ["GCOV_PREFIX"])
|
||||
|
||||
# Create GCNO symlinks
|
||||
# Create .gcno symlinks if they don't already exist, for kernel they will
|
||||
self.logger.info("Creating .gcno symlinks from '%s' to '%s'", gcdadir, bdir)
|
||||
commander.cmd_raises(
|
||||
f'cd "{gcdadir}"; bdir="{bdir}"'
|
||||
@ -2955,9 +3023,11 @@ ff02::2\tip6-allrouters
|
||||
for f in $(find . -name '*.gcda'); do
|
||||
f=${f#./};
|
||||
f=${f%.gcda}.gcno;
|
||||
ln -fs $bdir/$f $f;
|
||||
touch -h -r $bdir/$f $f;
|
||||
echo $f;
|
||||
if [ ! -h "$f" ]; then
|
||||
ln -fs $bdir/$f $f;
|
||||
touch -h -r $bdir/$f $f;
|
||||
echo $f;
|
||||
fi;
|
||||
done"""
|
||||
)
|
||||
|
||||
@ -2977,10 +3047,30 @@ done"""
|
||||
# f"\nCOVERAGE-SUMMARY-START\n{output}\nCOVERAGE-SUMMARY-END\n"
|
||||
# )
|
||||
|
||||
async def load_images(self, images):
|
||||
tasks = []
|
||||
for image in images:
|
||||
logging.debug("Checking for image %s", image)
|
||||
rc, _, _ = self.rootcmd.cmd_status(
|
||||
f"podman image inspect {image}", warn=False
|
||||
)
|
||||
if not rc:
|
||||
continue
|
||||
logging.info("Pulling missing image %s", image)
|
||||
aw = self.rootcmd.async_cmd_raises(f"podman pull {image}")
|
||||
tasks.append(asyncio.create_task(aw))
|
||||
if not tasks:
|
||||
return
|
||||
_, pending = await asyncio.wait(tasks, timeout=600)
|
||||
assert not pending, "Failed to pull container images"
|
||||
|
||||
async def run(self):
|
||||
tasks = []
|
||||
|
||||
hosts = self.hosts.values()
|
||||
|
||||
images = {x.container_image for x in hosts if hasattr(x, "container_image")}
|
||||
await self.load_images(images)
|
||||
|
||||
launch_nodes = [x for x in hosts if hasattr(x, "launch")]
|
||||
launch_nodes = [x for x in launch_nodes if x.config.get("qemu")]
|
||||
run_nodes = [x for x in hosts if x.has_run_cmd()]
|
||||
@ -3049,10 +3139,10 @@ done"""
|
||||
await asyncio.sleep(0.25)
|
||||
logging.debug("%s is ready!", x)
|
||||
|
||||
tasks = [asyncio.create_task(wait_until_ready(x)) for x in ready_nodes]
|
||||
|
||||
logging.debug("Waiting for ready on nodes: %s", ready_nodes)
|
||||
_, pending = await asyncio.wait(
|
||||
[wait_until_ready(x) for x in ready_nodes], timeout=30
|
||||
)
|
||||
_, pending = await asyncio.wait(tasks, timeout=30)
|
||||
if pending:
|
||||
logging.warning("Timeout waiting for ready: %s", pending)
|
||||
for nr in pending:
|
||||
|
@ -25,7 +25,6 @@ from ..base import BaseMunet
|
||||
from ..base import Bridge
|
||||
from ..base import get_event_loop
|
||||
from ..cleanup import cleanup_current
|
||||
from ..cleanup import cleanup_previous
|
||||
from ..native import L3NodeMixin
|
||||
from ..parser import async_build_topology
|
||||
from ..parser import get_config
|
||||
@ -130,9 +129,12 @@ def session_autouse():
|
||||
else:
|
||||
is_worker = True
|
||||
|
||||
if not is_worker:
|
||||
# This is unfriendly to multi-instance
|
||||
cleanup_previous()
|
||||
# We dont want to kill all munet and we don't have the rundir here yet
|
||||
# This was more useful back when we used to leave processes around a lot
|
||||
# more.
|
||||
# if not is_worker:
|
||||
# # This is unfriendly to multi-instance
|
||||
# cleanup_previous()
|
||||
|
||||
# We never pop as we want to keep logging
|
||||
_push_log_handler("session", "/tmp/unet-test/pytest-session.log")
|
||||
@ -150,8 +152,9 @@ def session_autouse():
|
||||
|
||||
@pytest.fixture(autouse=True, scope="module")
|
||||
def module_autouse(request):
|
||||
root_path = os.environ.get("MUNET_RUNDIR", "/tmp/unet-test")
|
||||
logpath = get_test_logdir(request.node.nodeid, True)
|
||||
logpath = os.path.join("/tmp/unet-test", logpath, "pytest-exec.log")
|
||||
logpath = os.path.join(root_path, logpath, "pytest-exec.log")
|
||||
with log_handler("module", logpath):
|
||||
sdir = os.path.dirname(os.path.realpath(request.fspath))
|
||||
with chdir(sdir, "module autouse fixture"):
|
||||
@ -174,7 +177,8 @@ def event_loop():
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def rundir_module():
|
||||
d = os.path.join("/tmp/unet-test", get_test_logdir(module=True))
|
||||
root_path = os.environ.get("MUNET_RUNDIR", "/tmp/unet-test")
|
||||
d = os.path.join(root_path, get_test_logdir(module=True))
|
||||
logging.debug("conftest: test module rundir %s", d)
|
||||
return d
|
||||
|
||||
@ -375,7 +379,8 @@ async def astepf(pytestconfig):
|
||||
|
||||
@pytest.fixture(scope="function")
|
||||
def rundir():
|
||||
d = os.path.join("/tmp/unet-test", get_test_logdir(module=False))
|
||||
root_path = os.environ.get("MUNET_RUNDIR", "/tmp/unet-test")
|
||||
d = os.path.join(root_path, get_test_logdir(module=False))
|
||||
logging.debug("conftest: test function rundir %s", d)
|
||||
return d
|
||||
|
||||
@ -383,9 +388,8 @@ def rundir():
|
||||
# Configure logging
|
||||
@pytest.hookimpl(hookwrapper=True, tryfirst=True)
|
||||
def pytest_runtest_setup(item):
|
||||
d = os.path.join(
|
||||
"/tmp/unet-test", get_test_logdir(nodeid=item.nodeid, module=False)
|
||||
)
|
||||
root_path = os.environ.get("MUNET_RUNDIR", "/tmp/unet-test")
|
||||
d = os.path.join(root_path, get_test_logdir(nodeid=item.nodeid, module=False))
|
||||
config = item.config
|
||||
logging_plugin = config.pluginmanager.get_plugin("logging-plugin")
|
||||
filename = Path(d, "pytest-exec.log")
|
||||
|
@ -15,7 +15,6 @@ from pathlib import Path
|
||||
|
||||
class MatchFoundError(Exception):
|
||||
"""An error raised when a match is not found."""
|
||||
|
||||
def __init__(self, watchlog, match):
|
||||
self.watchlog = watchlog
|
||||
self.match = match
|
||||
|
Loading…
Reference in New Issue
Block a user