From aa2549ca160b740ed00a0ee3aa8a3952f70a317f Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Fri, 19 May 2023 12:37:22 +0200 Subject: [PATCH 001/384] pyproject.toml: move pysnmp -> pysnmp-lextudio, drop unnecessary pysnmp-mibs The original author of pysnmp passed away and the lextudio folks took over maintenance [1]. A request to take over the pysnmp PyPi project is pending [2]. Let's move the the maintained fork rather now than later. While at it, drop the pysnmp-mibs dependency altogether, because this is no longer required for pysnmp>=4.3 [3]. [1] https://github.com/etingof/pysnmp/issues/429 [2] https://github.com/pypi/support/issues/2420 [3] https://github.com/lextudio/pysnmp-mibs/blob/master/README.md Signed-off-by: Bastian Krause --- pyproject.toml | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index fe06d30f2..1e1195694 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -63,10 +63,7 @@ pyvisa = [ "pyvisa>=1.11.3", "PyVISA-py>=0.5.2", ] -snmp = [ - "pysnmp>=4.4.12", - "pysnmp-mibs>=0.1.6", -] +snmp = ["pysnmp-lextudio>=4.4.12"] vxi11 = ["python-vxi11>=0.9"] xena = ["xenavalkyrie>=3.0.1"] deb = [ @@ -77,8 +74,7 @@ deb = [ "onewire>=0.2", # labgrid[snmp] - "pysnmp>=4.4.12", - "pysnmp-mibs>=0.1.6", + "pysnmp-lextudio>=4.4.12", ] dev = [ # references to other optional dependency groups @@ -111,8 +107,7 @@ dev = [ "PyVISA-py>=0.5.2", # labgrid[snmp] - "pysnmp>=4.4.12", - "pysnmp-mibs>=0.1.6", + "pysnmp-lextudio>=4.4.12", # labgrid[vxi11] "python-vxi11>=0.9", From a5371195cae391e53ca4819a629bc1ab7585dec7 Mon Sep 17 00:00:00 2001 From: Nicolas Labriet Date: Fri, 26 May 2023 08:27:37 +0200 Subject: [PATCH 002/384] ubootstrategy: add support for initial state The initial state can be force and unnecessary actions are not performed. Signed-off-by: Nicolas Labriet --- labgrid/strategy/ubootstrategy.py | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/labgrid/strategy/ubootstrategy.py b/labgrid/strategy/ubootstrategy.py index d985a1af6..7befb819a 100644 --- a/labgrid/strategy/ubootstrategy.py +++ b/labgrid/strategy/ubootstrategy.py @@ -57,3 +57,16 @@ def transition(self, status): else: raise StrategyError(f"no transition found from {self.status} to {status}") self.status = status + + def force(self, status): + if not isinstance(status, Status): + status = Status[status] + if status == Status.off: + self.target.activate(self.power) + elif status == Status.uboot: + self.target.activate(self.uboot) + elif status == Status.shell: + self.target.activate(self.shell) + else: + raise StrategyError("can not force state {}".format(status)) + self.status = status From edf087d24ebb8a8e09a2db79a844fbed01cf5690 Mon Sep 17 00:00:00 2001 From: Mischa Zihler Date: Wed, 31 May 2023 11:44:10 +0200 Subject: [PATCH 003/384] Add vendor and model id of STLink V2 to USBDebugger resource Signed-off-by: Mischa Zihler --- labgrid/resource/udev.py | 1 + 1 file changed, 1 insertion(+) diff --git a/labgrid/resource/udev.py b/labgrid/resource/udev.py index 3a8f3b745..3d865c388 100644 --- a/labgrid/resource/udev.py +++ b/labgrid/resource/udev.py @@ -687,6 +687,7 @@ def filter_match(self, device): match = (device.properties.get('ID_VENDOR_ID'), device.properties.get('ID_MODEL_ID')) if match not in [("0403", "6010"), # FT2232C/D/H Dual UART/FIFO IC + ("0483", "374b"), # STLINK-V3 ("0483", "374f"), # STLINK-V3 ("15ba", "0003"), # Olimex ARM-USB-OCD ("15ba", "002b"), # Olimex ARM-USB-OCD-H From 029e60becdca51650574e6769ab4d1a9d36b2893 Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Tue, 6 Jun 2023 15:48:57 +0200 Subject: [PATCH 004/384] udev/resource: fix mux device race condition leading to outdated control/disk paths When a udev event is retrieved from the UdevManager's queue, it is matched against each resource. If a match is found, the resource's device is updated (on "add"/"change"/"move" events) or removed (on "unbind"/"remove" events). Normally the `avail` attribute is set afterwards, but not for the USBSDMuxDevice and USBSDWireDevice: those devices prevent their USBResource super class from setting the `avail` attribute. Instead, the devices are available depending on the availability of their disk and control paths. Then the device's `update()` method is called, which is a no-op for those devices. On `poll()`, their disk and control paths is set to None if their device is None. Otherwise, these paths are set if the device is not already available. In case of a USB reset (or fast replug) of a big USB tree, loads of udev events need to be processed. Cases were observed where `poll()` did not run between processing of a "remove" and "add" event of the same device. This leads to a race condition, following the description from above: The USBSDMuxDevice's or USBSDWireDevice's underlying device is set to None on "remove" and then set to the new device again, on "add". The `avail` attribute is not updated as intended. After that, the `poll()` method runs: the device is set and the `avail` attribute evaluates to True because the disk and control paths are still set from the time before the reset replug. If this situation happens, these paths stay invalid until the next reset/replug or restart of the labgrid exporter. These invalid paths might point to non-existent /dev/sg* and dev/sd* devices, rendering interaction with the Mux and its SD card impossible. Or, even worse, the paths point to valid /dev/sg* and dev/sd* devices of another Mux connected as observed here (note different USB devnums): $ labgrid-client -vv resources rlab/usb-2-p3/NetworkUSBSDMuxDevice Exporter 'rlab': Group 'usb-2-p3' (rlab/usb-2-p3/*): Resource 'USBSDMuxDevice' (rlab/usb-2-p3/NetworkUSBSDMuxDevice[/USBSDMuxDevice]): {'acquired': 'board1', 'avail': True, 'cls': 'NetworkUSBSDMuxDevice', 'params': {'busnum': 2, 'control_path': '/dev/sg6', 'devnum': 42, 'extra': {'proxy': '[...]', 'proxy_required': False}, 'host': 'rlab', 'model_id': 16449, 'path': '/dev/sdg', 'vendor_id': 1060}} $ labgrid-client -vv r rlab/usb-1-p4/NetworkUSBSDMuxDevice Exporter 'rlab': Group 'usb-1-p4' (rlabC-srv/c-usb-1-p4/*): Resource 'USBSDMuxDevice' (rlab/usb-1-p4/NetworkUSBSDMuxDevice[/USBSDMuxDevice]): {'acquired': 'board2', 'avail': True, 'cls': 'NetworkUSBSDMuxDevice', 'params': {'busnum': 2, 'control_path': '/dev/sg6', 'devnum': 43, 'extra': {'proxy': '[...]', 'proxy_required': False}, 'host': 'rlab', 'model_id': 16449, 'path': '/dev/sdg', 'vendor_id': 1060}} Fix this race condition by implementing the disk and control paths reset in the USBSDMuxDevice's or USBSDWireDevice's update() method, in case the underlying device is gone. The update() method is called during the udev event processing, so it does no longer matter when the poll() method is called. Fixes: 27ff02fc ("udev: correct availability for USBSDMuxDevice") Fixes: 9324d324 ("Add sdwire driver") Signed-off-by: Bastian Krause --- labgrid/resource/udev.py | 30 +++++++++++++++++------------- 1 file changed, 17 insertions(+), 13 deletions(-) diff --git a/labgrid/resource/udev.py b/labgrid/resource/udev.py index 3d865c388..42e4f303e 100644 --- a/labgrid/resource/udev.py +++ b/labgrid/resource/udev.py @@ -467,15 +467,17 @@ def avail(self, prop): # paths are available. def poll(self): super().poll() + if self.device is not None and not self.avail: + for child in self.device.parent.children: + if child.subsystem == 'block' and child.device_type == 'disk': + self.disk_path = child.device_node + self.control_serial = self.device.properties.get('ID_SERIAL_SHORT') + + def update(self): + super().update() if self.device is None: self.disk_path = None self.control_serial = None - else: - if not self.avail: - for child in self.device.parent.children: - if child.subsystem == 'block' and child.device_type == 'disk': - self.disk_path = child.device_node - self.control_serial = self.device.properties.get('ID_SERIAL_SHORT') @property def path(self): @@ -510,16 +512,18 @@ def avail(self, prop): # paths are available. def poll(self): super().poll() + if self.device is not None and not self.avail: + for child in self.device.children: + if child.subsystem == 'block' and child.device_type == 'disk': + self.disk_path = child.device_node + elif child.subsystem == 'scsi_generic': + self.control_path = child.device_node + + def update(self): + super().update() if self.device is None: self.control_path = None self.disk_path = None - else: - if not self.avail: - for child in self.device.children: - if child.subsystem == 'block' and child.device_type == 'disk': - self.disk_path = child.device_node - elif child.subsystem == 'scsi_generic': - self.control_path = child.device_node @property def path(self): From 5b221f61af5e71b3904623da1600352890b8321a Mon Sep 17 00:00:00 2001 From: Nicolas Labriet Date: Thu, 1 Jun 2023 16:51:28 +0200 Subject: [PATCH 005/384] sync-places: add named match support Signed-off-by: Nicolas Labriet --- contrib/sync-places.py | 35 +++++++++++++++++++++++++++-------- 1 file changed, 27 insertions(+), 8 deletions(-) diff --git a/contrib/sync-places.py b/contrib/sync-places.py index 2406bd4f9..3bad6b365 100755 --- a/contrib/sync-places.py +++ b/contrib/sync-places.py @@ -62,14 +62,20 @@ async def do_sync(session, args): changed = True for name in config["places"]: - matches = config["places"][name].get("matches", []) + matches = [] + for m in config["places"][name].get("matches", []): + if isinstance(m, dict): + match = list(m.keys())[0] + matches.append((match, m[match])) + else: + matches.append((m, None)) + seen_matches = set() remove_matches = set() place_tags = {} if name in seen_places: place = session.places[name] - for m in place.matches: - m = repr(m) + for m in [(repr(x), x.rename) for x in place.matches]: if m in matches: seen_matches.add(m) else: @@ -77,19 +83,28 @@ async def do_sync(session, args): place_tags = place.tags for m in remove_matches: - print(f"Deleting match '{m}' for place {name}") + match, rename = m + if rename: + print(f"Deleting named match '{match} -> {rename}' for place {name}") + else: + print(f"Deleting match '{match}' for place {name}") if not args.dry_run: await session.call( - "org.labgrid.coordinator.del_place_match", name, m + "org.labgrid.coordinator.del_place_match", name, match, rename ) changed = True for m in matches: if not m in seen_matches: - print(f"Adding match '{m}' for place {name}") + match, rename = m + if rename: + print(f"Adding named match '{match} -> {rename}' for place {name}") + else: + print(f"Adding match '{match}' for place {name}") + if not args.dry_run: await session.call( - "org.labgrid.coordinator.add_place_match", name, m + "org.labgrid.coordinator.add_place_match", name, match, rename ) changed = True @@ -120,7 +135,10 @@ async def do_dump(session, args): config = {"places": {}} for name, place in session.places.items(): config["places"][name] = { - "matches": [repr(m) for m in place.matches], + "matches": [ + {repr(m): m.rename} if m.rename else repr(m) + for m in place.matches + ], "tags": {k: v for k, v in place.tags.items()}, } @@ -139,6 +157,7 @@ async def do_dump(session, args): my-place1: # Replace with your place matches: # A list of match patterns. Replace with your match patterns - "*/my-place1/*" + - "exporter/my-place1/resource": name # named matches supported tags: # A dictionary of key/value tags. Replace with your tags board: awesomesauce bar: baz From df4f6a9187ecfc6881d196f6fba764de8346e3c9 Mon Sep 17 00:00:00 2001 From: Joshua Watt Date: Mon, 5 Jun 2023 17:03:35 -0500 Subject: [PATCH 006/384] qemudriver: Handle Virtio GL change in 6.1.0 Starting with version 6.1.0, the QEMU command line argument to enable virtio with VirGL support was changed from "-vga virtio" to "-device virtio-vgal-gl". To correctly handle this, scrape the QEMU version number from the command output and use it to pass the correct arguments. Signed-off-by: Joshua Watt --- labgrid/driver/qemudriver.py | 22 ++++++++++++++++++++-- tests/test_qemudriver.py | 12 +++++++++--- 2 files changed, 29 insertions(+), 5 deletions(-) diff --git a/labgrid/driver/qemudriver.py b/labgrid/driver/qemudriver.py index 84cfef2d7..80ce1cb73 100644 --- a/labgrid/driver/qemudriver.py +++ b/labgrid/driver/qemudriver.py @@ -6,6 +6,7 @@ import shutil import socket import subprocess +import re import tempfile import time @@ -108,6 +109,17 @@ def _atexit(self): self._child.kill() self._child.communicate(timeout=1) + def get_qemu_version(self, qemu_bin): + p = subprocess.run([qemu_bin, "-version"], stdout=subprocess.PIPE, encoding="utf-8") + if p.returncode != 0: + raise ExecutionError(f"Unable to get QEMU version. QEMU exited with: {p.returncode}") + + m = re.search(r'(?P\d+)\.(?P\d+)\.(?P\d+)', p.stdout.splitlines()[0]) + if m is None: + raise ExecutionError(f"Unable to find QEMU version in: {p.stdout.splitlines()[0]}") + + return (int(m.group('major')), int(m.group('minor')), int(m.group('micro'))) + def on_activate(self): self._tempdir = tempfile.mkdtemp(prefix="labgrid-qemu-tmp-") sockpath = f"{self._tempdir}/serialrw" @@ -121,6 +133,8 @@ def on_activate(self): "QEMU Binary Path not configured in tools configuration key") self._cmd = [qemu_bin] + self._qemu_version = self.get_qemu_version(qemu_bin) + boot_args = [] if self.kernel is not None: @@ -190,8 +204,12 @@ def on_activate(self): self._cmd.append("-display") self._cmd.append("none") elif self.display == "egl-headless": - self._cmd.append("-vga") - self._cmd.append("virtio") + if self._qemu_version >= (6, 1, 0): + self._cmd.append("-device") + self._cmd.append("virtio-vga-gl") + else: + self._cmd.append("-vga") + self._cmd.append("virtio") self._cmd.append("-display") self._cmd.append("egl-headless") else: diff --git a/tests/test_qemudriver.py b/tests/test_qemudriver.py index d94032ca9..bfbe08edb 100644 --- a/tests/test_qemudriver.py +++ b/tests/test_qemudriver.py @@ -60,14 +60,20 @@ def qemu_mock(mocker): socket_mock = mocker.patch('socket.socket') socket_mock.return_value.accept.return_value = mocker.MagicMock(), '' +@pytest.fixture +def qemu_version_mock(mocker): + run_mock = mocker.patch('subprocess.run') + run_mock.return_value.returncode = 0 + run_mock.return_value.stdout = "QEMU emulator version 4.2.1" + def test_qemu_instance(qemu_target, qemu_driver): assert (isinstance(qemu_driver, QEMUDriver)) -def test_qemu_activate_deactivate(qemu_target, qemu_driver): +def test_qemu_activate_deactivate(qemu_target, qemu_driver, qemu_version_mock): qemu_target.activate(qemu_driver) qemu_target.deactivate(qemu_driver) -def test_qemu_on_off(qemu_target, qemu_driver, qemu_mock): +def test_qemu_on_off(qemu_target, qemu_driver, qemu_mock, qemu_version_mock): qemu_target.activate(qemu_driver) qemu_driver.on() @@ -75,7 +81,7 @@ def test_qemu_on_off(qemu_target, qemu_driver, qemu_mock): qemu_target.deactivate(qemu_driver) -def test_qemu_read_write(qemu_target, qemu_driver, qemu_mock): +def test_qemu_read_write(qemu_target, qemu_driver, qemu_mock, qemu_version_mock): qemu_target.activate(qemu_driver) qemu_driver.on() From df781cf0eba50f6b30e662b2cc0dd8981727d6db Mon Sep 17 00:00:00 2001 From: Jan Vermaete Date: Fri, 9 Jun 2023 15:59:51 +0200 Subject: [PATCH 007/384] doc: do not open if already closed -> open Signed-off-by: Jan Vermaete --- labgrid/driver/serialdriver.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/labgrid/driver/serialdriver.py b/labgrid/driver/serialdriver.py index ea770aebc..1c172c8f3 100644 --- a/labgrid/driver/serialdriver.py +++ b/labgrid/driver/serialdriver.py @@ -99,7 +99,7 @@ def _write(self, data: bytes): return self.serial.write(data) def open(self): - """Opens the serialport, does nothing if it is already closed""" + """Opens the serialport, does nothing if it is already open""" if not self.status: try: self.serial.open() From 5430d86c6f0f3a5b2a92cca65266a6fb27fa4eca Mon Sep 17 00:00:00 2001 From: Jan Vermaete Date: Fri, 9 Jun 2023 16:00:46 +0200 Subject: [PATCH 008/384] doc: do not open if already closed -> open Signed-off-by: Jan Vermaete --- labgrid/driver/externalconsoledriver.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/labgrid/driver/externalconsoledriver.py b/labgrid/driver/externalconsoledriver.py index a46241f28..6ba18b669 100644 --- a/labgrid/driver/externalconsoledriver.py +++ b/labgrid/driver/externalconsoledriver.py @@ -30,7 +30,7 @@ def __attrs_post_init__(self): self._child = None def open(self): - """Starts the subprocess, does nothing if it is already closed""" + """Starts the subprocess, does nothing if it is already open""" if self.status: return cmd = shlex.split(self.cmd) From cd1b0425ea2fed4fc2734cb7600e526caf3f5894 Mon Sep 17 00:00:00 2001 From: Joshua Watt Date: Fri, 9 Jun 2023 09:25:33 -0500 Subject: [PATCH 009/384] remote: Expose tags in RemotePlace Exposes the place tags in the RemotePlace. This allows persistent data to be stored in the tags and retrieved by tests, for example unique MAC address that need to be programmed to a device after flashing Signed-off-by: Joshua Watt --- labgrid/resource/remote.py | 3 +++ tests/test_crossbar.py | 6 +++++- 2 files changed, 8 insertions(+), 1 deletion(-) diff --git a/labgrid/resource/remote.py b/labgrid/resource/remote.py index 211f0763c..b83d6c3cc 100644 --- a/labgrid/resource/remote.py +++ b/labgrid/resource/remote.py @@ -1,3 +1,4 @@ +import copy import logging import os import attr @@ -62,6 +63,7 @@ def on_resource_added(self, resource): expanded.append(new) self.logger.debug("expanded remote resources for place %s: %s", remote_place.name, expanded) remote_place.avail = True + remote_place.tags = copy.deepcopy(place.tags) def poll(self): import asyncio @@ -97,6 +99,7 @@ class RemotePlace(ManagedResource): def __attrs_post_init__(self): self.timeout = 10.0 + self.tags = {} super().__attrs_post_init__() @attr.s(eq=False) diff --git a/tests/test_crossbar.py b/tests/test_crossbar.py index 1ea657d9b..0f1a477bd 100644 --- a/tests/test_crossbar.py +++ b/tests/test_crossbar.py @@ -286,12 +286,16 @@ def test_remoteplace_target(place_acquire, tmpdir): t = e.get_target("test1") t.await_resources(t.resources) + remote_place = t.get_resource("RemotePlace") + assert remote_place.tags == {"board": "bar"} + def test_remoteplace_target_without_env(request, place_acquire): from labgrid import Target from labgrid.resource import RemotePlace t = Target(request.node.name) - RemotePlace(t, name="test") + remote_place = RemotePlace(t, name="test") + assert remote_place.tags == {"board": "bar"} def test_resource_conflict(place_acquire, tmpdir): with pexpect.spawn('python -m labgrid.remote.client -p test2 create') as spawn: From c0016b0035e0fd3e2ce1db5b35a92152e741e934 Mon Sep 17 00:00:00 2001 From: "Rahbek, Nikolaj (EXT) (SGRE OF TE TD LC SLC CMI)" Date: Tue, 13 Jun 2023 10:16:02 +0200 Subject: [PATCH 010/384] fix sftp option for scp The option to use sftp was added multiple times when using SSHDriver::put() and SSHDriver::get() causing them to only work one time. Also adds the sftp option to SSHDriver::scp(). Signed-off-by: Nikolaj Rahbek --- CHANGES.rst | 2 ++ labgrid/driver/sshdriver.py | 20 ++++++++++++-------- 2 files changed, 14 insertions(+), 8 deletions(-) diff --git a/CHANGES.rst b/CHANGES.rst index c4fe8bc1e..a10bcddab 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -23,6 +23,8 @@ Bug fixes in 23.1 ERROR, INFO and similar log notifiers. - Fix named SSH lookups in conjunction with an environment file in labgrid-client. +- Fix sftp option issue in SSH driver that caused sftp to only work once per + test run. Breaking changes in 23.1 ~~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/labgrid/driver/sshdriver.py b/labgrid/driver/sshdriver.py index a54fe2042..3ad6fafa5 100644 --- a/labgrid/driver/sshdriver.py +++ b/labgrid/driver/sshdriver.py @@ -328,6 +328,10 @@ def scp(self, *, src, dst): "-o", f"ControlPath={self.control.replace('%', '%%')}", src, dst, ] + + if self.explicit_sftp_mode and self._scp_supports_explicit_sftp_mode(): + complete_cmd.insert(1, "-s") + self.logger.info("Running command: %s", complete_cmd) sub = subprocess.Popen( complete_cmd, @@ -415,18 +419,18 @@ def _scp_supports_explicit_sftp_mode(self): @Driver.check_active @step(args=['filename', 'remotepath']) def put(self, filename, remotepath=''): - ssh_prefix = self.ssh_prefix - if self.explicit_sftp_mode and self._scp_supports_explicit_sftp_mode(): - ssh_prefix.append("-s") transfer_cmd = [ "scp", - *ssh_prefix, + *self.ssh_prefix, "-P", str(self.networkservice.port), "-r", filename, f"{self.networkservice.username}@{self.networkservice.address}:{remotepath}" ] + if self.explicit_sftp_mode and self._scp_supports_explicit_sftp_mode(): + transfer_cmd.insert(1, "-s") + try: sub = subprocess.call( transfer_cmd @@ -443,18 +447,18 @@ def put(self, filename, remotepath=''): @Driver.check_active @step(args=['filename', 'destination']) def get(self, filename, destination="."): - ssh_prefix = self.ssh_prefix - if self.explicit_sftp_mode and self._scp_supports_explicit_sftp_mode(): - ssh_prefix.append("-s") transfer_cmd = [ "scp", - *ssh_prefix, + *self.ssh_prefix, "-P", str(self.networkservice.port), "-r", f"{self.networkservice.username}@{self.networkservice.address}:{filename}", destination ] + if self.explicit_sftp_mode and self._scp_supports_explicit_sftp_mode(): + transfer_cmd.insert(1, "-s") + try: sub = subprocess.call( transfer_cmd From e98050da6ef763ec665734e0a00ff2e98724a671 Mon Sep 17 00:00:00 2001 From: Rouven Czerwinski Date: Thu, 15 Jun 2023 07:54:09 +0200 Subject: [PATCH 011/384] pytestplugin/hooks: factor out log configuration No functional changes, a following commit will ensure that logging is only configured when the logging plugin is available. Signed-off-by: Rouven Czerwinski --- labgrid/pytestplugin/hooks.py | 33 ++++++++++++++++++--------------- 1 file changed, 18 insertions(+), 15 deletions(-) diff --git a/labgrid/pytestplugin/hooks.py b/labgrid/pytestplugin/hooks.py index f51a150c8..10752ea00 100644 --- a/labgrid/pytestplugin/hooks.py +++ b/labgrid/pytestplugin/hooks.py @@ -36,28 +36,31 @@ def set_cli_log_level(level): set_cli_log_level(logging.INFO) -@pytest.hookimpl(trylast=True) -def pytest_configure(config): - StepLogger.start() - config.add_cleanup(StepLogger.stop) - - logging_plugin = config.pluginmanager.getplugin('logging-plugin') - logging_plugin.log_cli_handler.formatter.add_color_level(logging.CONSOLE, "blue") - logging_plugin.log_cli_handler.setFormatter(StepFormatter( +def configure_pytest_logging(config, plugin): + plugin.log_cli_handler.formatter.add_color_level(logging.CONSOLE, "blue") + plugin.log_cli_handler.setFormatter(StepFormatter( color=config.option.lg_colored_steps, - parent=logging_plugin.log_cli_handler.formatter, + parent=plugin.log_cli_handler.formatter, )) - logging_plugin.log_file_handler.setFormatter(StepFormatter( - parent=logging_plugin.log_file_handler.formatter, + plugin.log_file_handler.setFormatter(StepFormatter( + parent=plugin.log_file_handler.formatter, )) # Might be the same formatter instance, so get a reference for both before # changing either - report_formatter = logging_plugin.report_handler.formatter - caplog_formatter = logging_plugin.caplog_handler.formatter + report_formatter = plugin.report_handler.formatter + caplog_formatter = plugin.caplog_handler.formatter + + plugin.report_handler.setFormatter(StepFormatter(parent=report_formatter)) + plugin.report_handler.setFormatter(StepFormatter(parent=caplog_formatter)) - logging_plugin.report_handler.setFormatter(StepFormatter(parent=report_formatter)) - logging_plugin.report_handler.setFormatter(StepFormatter(parent=caplog_formatter)) +@pytest.hookimpl(trylast=True) +def pytest_configure(config): + StepLogger.start() + config.add_cleanup(StepLogger.stop) + + logging_plugin = config.pluginmanager.getplugin('logging-plugin') + configure_pytest_logging(config, logging_plugin) config.addinivalue_line("markers", "lg_feature: marker for labgrid feature flags") From 1946d0b1b941bc7121c1186a286809570131defc Mon Sep 17 00:00:00 2001 From: Rouven Czerwinski Date: Thu, 15 Jun 2023 07:55:08 +0200 Subject: [PATCH 012/384] pytestplugin/hooks: check logging plugin Check if the logging plugin is available and only configure logging if it is. Fixes an internal pytest error during loading when the logging plugin is explicitly disabled via "-p no:logging". Signed-off-by: Rouven Czerwinski --- labgrid/pytestplugin/hooks.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/labgrid/pytestplugin/hooks.py b/labgrid/pytestplugin/hooks.py index 10752ea00..9130b4753 100644 --- a/labgrid/pytestplugin/hooks.py +++ b/labgrid/pytestplugin/hooks.py @@ -60,7 +60,8 @@ def pytest_configure(config): config.add_cleanup(StepLogger.stop) logging_plugin = config.pluginmanager.getplugin('logging-plugin') - configure_pytest_logging(config, logging_plugin) + if logging_plugin: + configure_pytest_logging(config, logging_plugin) config.addinivalue_line("markers", "lg_feature: marker for labgrid feature flags") From b7f2cc72b078602fe711ccddd0b1805597b723ae Mon Sep 17 00:00:00 2001 From: Rouven Czerwinski Date: Thu, 15 Jun 2023 07:57:01 +0200 Subject: [PATCH 013/384] pytestplugin/hooks: catch getoption ValueError In case the logging plugin is not loaded, the option "log_cli_level" is never registered and getoption fails. Catch the exception and return from the function since there is nothing for us to configure in this case. Fixes the following traceback: Traceback (most recent call last): File "/home/phoenix/work/ptx/labgrid/venv/lib/python3.10/site-packages/_pytest/config/__init__.py", line 1542, in getoption val = getattr(self.option, name) AttributeError: 'Namespace' object has no attribute 'log_cli_level' The above exception was the direct cause of the following exception: Traceback (most recent call last): File "/home/phoenix/work/ptx/labgrid/venv/bin/pytest", line 8, in sys.exit(console_main()) File "/home/phoenix/work/ptx/labgrid/venv/lib/python3.10/site-packages/_pytest/config/__init__.py", line 190, in console_main code = main() File "/home/phoenix/work/ptx/labgrid/venv/lib/python3.10/site-packages/_pytest/config/__init__.py", line 167, in main ret: Union[ExitCode, int] = config.hook.pytest_cmdline_main( File "/home/phoenix/work/ptx/labgrid/venv/lib/python3.10/site-packages/pluggy/_hooks.py", line 265, in __call__ return self._hookexec(self.name, self.get_hookimpls(), kwargs, firstresult) File "/home/phoenix/work/ptx/labgrid/venv/lib/python3.10/site-packages/pluggy/_manager.py", line 80, in _hookexec return self._inner_hookexec(hook_name, methods, kwargs, firstresult) File "/home/phoenix/work/ptx/labgrid/venv/lib/python3.10/site-packages/pluggy/_callers.py", line 60, in _multicall return outcome.get_result() File "/home/phoenix/work/ptx/labgrid/venv/lib/python3.10/site-packages/pluggy/_result.py", line 60, in get_result raise ex[1].with_traceback(ex[2]) File "/home/phoenix/work/ptx/labgrid/venv/lib/python3.10/site-packages/pluggy/_callers.py", line 39, in _multicall res = hook_impl.function(*args) File "/home/phoenix/work/ptx/labgrid/labgrid/pytestplugin/hooks.py", line 32, in pytest_cmdline_main set_cli_log_level(logging.DEBUG) File "/home/phoenix/work/ptx/labgrid/labgrid/pytestplugin/hooks.py", line 16, in set_cli_log_level current_level = config.getoption("log_cli_level") or config.getini("log_cli_level") File "/home/phoenix/work/ptx/labgrid/venv/lib/python3.10/site-packages/_pytest/config/__init__.py", line 1553, in getoption raise ValueError(f"no option named {name!r}") from e ValueError: no option named 'log_cli_level' Signed-off-by: Rouven Czerwinski --- labgrid/pytestplugin/hooks.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/labgrid/pytestplugin/hooks.py b/labgrid/pytestplugin/hooks.py index 9130b4753..9b8b6708a 100644 --- a/labgrid/pytestplugin/hooks.py +++ b/labgrid/pytestplugin/hooks.py @@ -13,7 +13,10 @@ def pytest_cmdline_main(config): def set_cli_log_level(level): nonlocal config - current_level = config.getoption("log_cli_level") or config.getini("log_cli_level") + try: + current_level = config.getoption("log_cli_level") or config.getini("log_cli_level") + except ValueError: + return print(f"current_level: {current_level}") if isinstance(current_level, str): From 534f846ba03d74fe3edeeea65d7ff6b169338d3c Mon Sep 17 00:00:00 2001 From: Rouven Czerwinski Date: Fri, 16 Jun 2023 08:57:44 +0200 Subject: [PATCH 014/384] tests/test_fixtures: add no logging test Add a test which uses the pytestplugin with -p no:logging set. Signed-off-by: Rouven Czerwinski --- tests/test_fixtures.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/tests/test_fixtures.py b/tests/test_fixtures.py index 25a9e04a9..a1cd7600c 100644 --- a/tests/test_fixtures.py +++ b/tests/test_fixtures.py @@ -41,6 +41,11 @@ def test_env_fixture(short_env, short_test): spawn.close() assert spawn.exitstatus == 0 +def test_env_fixture_no_logging(short_env, short_test): + with pexpect.spawn(f'pytest -p no:logging --lg-env {short_env} {short_test}') as spawn: + spawn.expect(pexpect.EOF) + spawn.close() + assert spawn.exitstatus == 0, spawn.before def test_env_old_fixture(short_env, short_test): with pexpect.spawn(f'pytest --env-config {short_env} {short_test}') as spawn: From b68d4eacfe6dd5869161ae07ad0e0611d1d85adf Mon Sep 17 00:00:00 2001 From: Andreas Martinsson Date: Mon, 12 Jun 2023 16:36:58 +0200 Subject: [PATCH 015/384] resource/udev: add SEGGER J-Link vendor and model IDs to USBDebugger resource Signed-off-by: Andreas Martinsson Signed-off-by: Jan Luebbe --- labgrid/resource/udev.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/labgrid/resource/udev.py b/labgrid/resource/udev.py index 42e4f303e..eaaed1a58 100644 --- a/labgrid/resource/udev.py +++ b/labgrid/resource/udev.py @@ -697,6 +697,8 @@ def filter_match(self, device): ("15ba", "002b"), # Olimex ARM-USB-OCD-H ("15ba", "0004"), # Olimex ARM-USB-TINY ("15ba", "002a"), # Olimex ARM-USB-TINY-H + ("1366", "0101"), # SEGGER J-Link PLUS + ("1366", "1015"), # SEGGER J-Link ]: return False From a6ac483c8020cc3c87f40b95140d2e916a9e31ac Mon Sep 17 00:00:00 2001 From: James Puderer Date: Tue, 6 Jun 2023 17:46:59 -0600 Subject: [PATCH 016/384] util/managedfile: fix stat command for MacOS Signed-off-by: James Puderer Signed-off-by: Jan Luebbe --- labgrid/util/managedfile.py | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/labgrid/util/managedfile.py b/labgrid/util/managedfile.py index 9bf095de8..563b8c706 100644 --- a/labgrid/util/managedfile.py +++ b/labgrid/util/managedfile.py @@ -2,6 +2,7 @@ import logging import os import subprocess +import platform import attr @@ -88,8 +89,15 @@ def _on_nfs(self, conn): self._on_nfs_cached = False fmt = "inode=%i,size=%s,modified=%Y" - local = subprocess.run(["stat", "--format", fmt, self.local_path], - stdout=subprocess.PIPE) + # The stat command is very different on MacOs + if platform.system() == 'Darwin': + darwin_fmt = "inode=%i,size=%z,modified=%m" + local = subprocess.run(["stat", "-f", darwin_fmt, self.local_path], + stdout=subprocess.PIPE) + else: + local = subprocess.run(["stat", "--format", fmt, self.local_path], + stdout=subprocess.PIPE) + if local.returncode != 0: self.logger.debug("local: stat: unsuccessful error code %d", local.returncode) return False From 1bf663eb96f791b8983d1ea587f8ed11fc7078d6 Mon Sep 17 00:00:00 2001 From: Jan Luebbe Date: Fri, 16 Jun 2023 14:17:29 +0200 Subject: [PATCH 017/384] util/managedfile: import platform module only when needed This avoids the overhead on startup. Signed-off-by: Jan Luebbe --- labgrid/util/managedfile.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/labgrid/util/managedfile.py b/labgrid/util/managedfile.py index 563b8c706..21ecf7655 100644 --- a/labgrid/util/managedfile.py +++ b/labgrid/util/managedfile.py @@ -2,7 +2,7 @@ import logging import os import subprocess -import platform +from importlib import import_module import attr @@ -90,6 +90,7 @@ def _on_nfs(self, conn): fmt = "inode=%i,size=%s,modified=%Y" # The stat command is very different on MacOs + platform = import_module('platform') if platform.system() == 'Darwin': darwin_fmt = "inode=%i,size=%z,modified=%m" local = subprocess.run(["stat", "-f", darwin_fmt, self.local_path], From ac560620ba4c4d4ab276a551ae623c2707af1b5a Mon Sep 17 00:00:00 2001 From: Ederson de Souza Date: Fri, 21 Apr 2023 13:50:22 -0700 Subject: [PATCH 018/384] remote/client: ensure driver is created only for specified resource When using `power` command, one can use `-n` to choose to which resource on a target the command will apply. However, when looping over resources to find a compatible driver, it ignores the named resource. This will cause an exception if another resource is chosen due another driver being selected on the loop. This patch fixes it by ignoring resources whose name do not match the resource given in the command line. Signed-off-by: Ederson de Souza --- labgrid/remote/client.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/labgrid/remote/client.py b/labgrid/remote/client.py index 61f6eade6..f7a48377e 100755 --- a/labgrid/remote/client.py +++ b/labgrid/remote/client.py @@ -732,6 +732,8 @@ def power(self): drv = target.get_driver("PowerProtocol", name=name) except NoDriverFoundError: for resource in target.resources: + if name and resource.name != name: + continue if isinstance(resource, NetworkPowerPort): drv = self._get_driver_or_new(target, "NetworkPowerDriver", name=name) elif isinstance(resource, NetworkUSBPowerPort): From 6425df99a2de31563d96fa33f2ba65a2d669c84d Mon Sep 17 00:00:00 2001 From: Chen Peng1 Date: Fri, 15 Jul 2022 20:40:08 +0800 Subject: [PATCH 019/384] driver/powerdriver: ykushpower: support remote control via ssh A new NetworkYKUSHPowerPort is created to represent a remote ykush hub. In order to control it via ssh, YKUSHPowerDriver now uses the `ykushcmd` command instead of python module - it must be installed on the host where the ykush is connected. One can get it from https://github.com/Yepkit/ykush A YKUSHPowerPortExport is also created to allow automatic exporting the remote resource from a local one. Signed-off-by: Chen Peng1 Signed-off-by: Ederson de Souza --- doc/configuration.rst | 6 +++- labgrid/driver/powerdriver.py | 48 +++++++++++++++++++++++------- labgrid/remote/exporter.py | 20 +++++++++++++ labgrid/resource/__init__.py | 2 +- labgrid/resource/ykushpowerport.py | 14 ++++++++- 5 files changed, 77 insertions(+), 13 deletions(-) diff --git a/doc/configuration.rst b/doc/configuration.rst index f859dbec8..1a1ecba3e 100644 --- a/doc/configuration.rst +++ b/doc/configuration.rst @@ -270,7 +270,7 @@ A YKUSHPowerPort describes a YEPKIT YKUSH USB (HID) switchable USB hub. The example describes port 1 on the YKUSH USB hub with the serial "YK12345". -(use "pykush -l" to get your serial...) +(use "ykushcmd -l" to get your serial...) Arguments: - serial (str): serial number of the YKUSH hub @@ -279,6 +279,10 @@ Arguments: Used by: - `YKUSHPowerDriver`_ +NetworkYKUSHPowerPort ++++++++++++++++++++++ +A NetworkYKUSHPowerPort describes a `YKUSHPowerPort`_ available on a remote computer. + USBPowerPort ++++++++++++ A USBPowerPort describes a generic switchable USB hub as supported by diff --git a/labgrid/driver/powerdriver.py b/labgrid/driver/powerdriver.py index 1e0befc85..4227c1de2 100644 --- a/labgrid/driver/powerdriver.py +++ b/labgrid/driver/powerdriver.py @@ -271,27 +271,37 @@ def get(self): @attr.s(eq=False) class YKUSHPowerDriver(Driver, PowerResetMixin, PowerProtocol): """YKUSHPowerDriver - Driver using a YEPKIT YKUSH switchable USB hub - to control a target's power - https://www.yepkit.com/products/ykush""" - bindings = {"port": "YKUSHPowerPort", } + to control a target's power - https://www.yepkit.com/products/ykush. + Uses ykushcmd tool to control the hub.""" + bindings = {"port": {"YKUSHPowerPort", "NetworkYKUSHPowerPort"} } delay = attr.ib(default=2.0, validator=attr.validators.instance_of(float)) - def __attrs_post_init__(self): super().__attrs_post_init__() - # uses the YKUSH pykush interface from here: - # https://github.com/Yepkit/pykush - self.pykush_mod = import_module('pykush.pykush') - self.pykush = self.pykush_mod.YKUSH(serial=self.port.serial) + if self.target.env: + self.tool = self.target.env.config.get_tool('ykushcmd') or 'ykushcmd' + else: + self.tool = 'ykushcmd' @Driver.check_active @step() def on(self): - self.pykush.set_port_state(self.port.index, self.pykush_mod.YKUSH_PORT_STATE_UP) + cmd = [ + self.tool, + "-s", f"{self.port.serial}", + "-u", f"{self.port.index}" + ] + processwrapper.check_output(self.port.command_prefix + cmd) @Driver.check_active @step() def off(self): - self.pykush.set_port_state(self.port.index, self.pykush_mod.YKUSH_PORT_STATE_DOWN) + cmd = [ + self.tool, + "-s", f"{self.port.serial}", + "-d", f"{self.port.index}" + ] + processwrapper.check_output(self.port.command_prefix + cmd) @Driver.check_active @step() @@ -302,7 +312,25 @@ def cycle(self): @Driver.check_active def get(self): - return self.pykush.get_port_state(self.port.index) + cmd = [ + self.tool, + "-s", f"{self.port.serial}", + "-g", f"{self.port.index}" + ] + res = processwrapper.check_output(self.port.command_prefix + cmd) + res = res.decode("utf-8") + + # the example of ykushcmd -g like below: + # cmd: ykushcmd -g 1 + # output: Downstream port 1 is ON/OFF + check_str = "Downstream port {port} is {status}" + if check_str.format(port=self.port.index, status="ON") in res: + return True + if check_str.format(port=self.port.index, status="OFF") in res: + return False + + raise ExecutionError(f"Could not find port string in ykushcmd output: {res}") + @target_factory.reg_driver @attr.s(eq=False) diff --git a/labgrid/remote/exporter.py b/labgrid/remote/exporter.py index f00320686..a30b1afe1 100755 --- a/labgrid/remote/exporter.py +++ b/labgrid/remote/exporter.py @@ -695,6 +695,26 @@ def _get_params(self): exports["AndroidNetFastboot"] = AndroidNetFastbootExport +@attr.s(eq=False) +class YKUSHPowerPortExport(ResourceExport): + """ResourceExport for YKUSHPowerPort devices""" + + def __attrs_post_init__(self): + super().__attrs_post_init__() + local_cls_name = self.cls + self.data['cls'] = f"Network{local_cls_name}" + from ..resource import ykushpowerport + local_cls = getattr(ykushpowerport, local_cls_name) + self.local = local_cls(target=None, name=None, **self.local_params) + + def _get_params(self): + return { + "host": self.host, + **self.local_params + } + +exports["YKUSHPowerPort"] = YKUSHPowerPortExport + class ExporterSession(ApplicationSession): def onConnect(self): """Set up internal datastructures on successful connection: diff --git a/labgrid/resource/__init__.py b/labgrid/resource/__init__.py index 93516345c..1820cad5d 100644 --- a/labgrid/resource/__init__.py +++ b/labgrid/resource/__init__.py @@ -13,7 +13,7 @@ from .udev import USBPowerPort from .udev import SiSPMPowerPort from .common import Resource, ResourceManager, ManagedResource -from .ykushpowerport import YKUSHPowerPort +from .ykushpowerport import YKUSHPowerPort, NetworkYKUSHPowerPort from .xenamanager import XenaManager from .flashrom import Flashrom, NetworkFlashrom from .docker import DockerManager, DockerDaemon, DockerConstants diff --git a/labgrid/resource/ykushpowerport.py b/labgrid/resource/ykushpowerport.py index 81cc30245..b7a967816 100644 --- a/labgrid/resource/ykushpowerport.py +++ b/labgrid/resource/ykushpowerport.py @@ -1,7 +1,7 @@ import attr from ..factory import target_factory -from .common import Resource +from .common import NetworkResource, Resource @target_factory.reg_resource @@ -15,3 +15,15 @@ class YKUSHPowerPort(Resource): serial = attr.ib(validator=attr.validators.instance_of(str)) index = attr.ib(validator=attr.validators.instance_of(int), converter=int) + +@target_factory.reg_resource +@attr.s(eq=False) +class NetworkYKUSHPowerPort(NetworkResource): + """"This resource describes a remote YEPKIT YKUSH switchable USB hub. + + Args: + serial (str): serial of the YKUSH device + index (int): port index""" + serial = attr.ib(validator=attr.validators.instance_of(str)) + index = attr.ib(validator=attr.validators.instance_of(int), + converter=int) From d6920fdb2acd51526e90ed94d8b6ca0f0136664d Mon Sep 17 00:00:00 2001 From: Ederson de Souza Date: Fri, 21 Apr 2023 14:26:11 -0700 Subject: [PATCH 020/384] remote/client: support YKUSHPowerPort resources on `power` command So that targets which expose YKUSHPowerPort instances can be controlled via the command line client. Signed-off-by: Ederson de Souza --- labgrid/remote/client.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/labgrid/remote/client.py b/labgrid/remote/client.py index f7a48377e..4e3bd2d6f 100755 --- a/labgrid/remote/client.py +++ b/labgrid/remote/client.py @@ -725,7 +725,7 @@ def power(self): target = self._get_target(place) from ..resource.power import NetworkPowerPort, PDUDaemonPort from ..resource.remote import NetworkUSBPowerPort, NetworkSiSPMPowerPort - from ..resource import TasmotaPowerPort + from ..resource import TasmotaPowerPort, NetworkYKUSHPowerPort drv = None try: @@ -744,6 +744,8 @@ def power(self): drv = self._get_driver_or_new(target, "PDUDaemonDriver", name=name) elif isinstance(resource, TasmotaPowerPort): drv = self._get_driver_or_new(target, "TasmotaPowerDriver", name=name) + elif isinstance(resource, NetworkYKUSHPowerPort): + drv = self._get_driver_or_new(target, "YKUSHPowerDriver", name=name) if drv: break From 71b8505b3384a96cc9ecd34a9b3cbbe0fb73d054 Mon Sep 17 00:00:00 2001 From: Ahmad Fatoum Date: Fri, 16 Jun 2023 20:16:55 +0200 Subject: [PATCH 021/384] qemudriver: refactor on_activate to use local variables This introduces no functional change, but makes it easier to split the function into two in a follow-up commit: - one function to just compute the base command line - one function to actually store the command line into the object along with QMP-specific options Signed-off-by: Ahmad Fatoum --- labgrid/driver/qemudriver.py | 98 ++++++++++++++++++------------------ 1 file changed, 50 insertions(+), 48 deletions(-) diff --git a/labgrid/driver/qemudriver.py b/labgrid/driver/qemudriver.py index 80ce1cb73..3bf46aa0c 100644 --- a/labgrid/driver/qemudriver.py +++ b/labgrid/driver/qemudriver.py @@ -131,15 +131,15 @@ def on_activate(self): if qemu_bin is None: raise KeyError( "QEMU Binary Path not configured in tools configuration key") - self._cmd = [qemu_bin] + cmd = [qemu_bin] - self._qemu_version = self.get_qemu_version(qemu_bin) + qemu_version = self.get_qemu_version(qemu_bin) boot_args = [] if self.kernel is not None: - self._cmd.append("-kernel") - self._cmd.append( + cmd.append("-kernel") + cmd.append( self.target.env.config.get_image_path(self.kernel)) if self.disk is not None: disk_path = self.target.env.config.get_image_path(self.disk) @@ -147,18 +147,18 @@ def on_activate(self): if disk_path.endswith(".qcow2"): disk_format = "qcow2" if self.machine == "vexpress-a9": - self._cmd.append("-drive") - self._cmd.append( + cmd.append("-drive") + cmd.append( f"if=sd,format={disk_format},file={disk_path},id=mmc0") boot_args.append("root=/dev/mmcblk0p1 rootfstype=ext4 rootwait") elif self.machine == "q35": - self._cmd.append("-drive") - self._cmd.append( + cmd.append("-drive") + cmd.append( f"if=virtio,format={disk_format},file={disk_path}") boot_args.append("root=/dev/vda rootwait") elif self.machine == "pc": - self._cmd.append("-drive") - self._cmd.append( + cmd.append("-drive") + cmd.append( f"if=virtio,format={disk_format},file={disk_path}") boot_args.append("root=/dev/vda rootwait") else: @@ -166,69 +166,71 @@ def on_activate(self): f"QEMU disk image support not implemented for machine '{self.machine}'" ) if self.rootfs is not None: - self._cmd.append("-fsdev") - self._cmd.append( + cmd.append("-fsdev") + cmd.append( f"local,id=rootfs,security_model=none,path={self.target.env.config.get_path(self.rootfs)}") # pylint: disable=line-too-long - self._cmd.append("-device") - self._cmd.append( + cmd.append("-device") + cmd.append( "virtio-9p-device,fsdev=rootfs,mount_tag=/dev/root") boot_args.append("root=/dev/root rootfstype=9p rootflags=trans=virtio") if self.dtb is not None: - self._cmd.append("-dtb") - self._cmd.append(self.target.env.config.get_image_path(self.dtb)) + cmd.append("-dtb") + cmd.append(self.target.env.config.get_image_path(self.dtb)) if self.flash is not None: - self._cmd.append("-drive") - self._cmd.append( + cmd.append("-drive") + cmd.append( f"if=pflash,format=raw,file={self.target.env.config.get_image_path(self.flash)},id=nor0") # pylint: disable=line-too-long if self.bios is not None: - self._cmd.append("-bios") - self._cmd.append( + cmd.append("-bios") + cmd.append( self.target.env.config.get_image_path(self.bios)) if "-append" in shlex.split(self.extra_args): raise ExecutionError("-append in extra_args not allowed, use boot_args instead") - self._cmd.extend(shlex.split(self.extra_args)) - self._cmd.append("-S") - self._cmd.append("-qmp") - self._cmd.append("stdio") - self._cmd.append("-machine") - self._cmd.append(self.machine) - self._cmd.append("-cpu") - self._cmd.append(self.cpu) - self._cmd.append("-m") - self._cmd.append(self.memory) + cmd.extend(shlex.split(self.extra_args)) + cmd.append("-S") + cmd.append("-qmp") + cmd.append("stdio") + cmd.append("-machine") + cmd.append(self.machine) + cmd.append("-cpu") + cmd.append(self.cpu) + cmd.append("-m") + cmd.append(self.memory) if self.display == "none": - self._cmd.append("-nographic") + cmd.append("-nographic") elif self.display == "fb-headless": - self._cmd.append("-display") - self._cmd.append("none") + cmd.append("-display") + cmd.append("none") elif self.display == "egl-headless": - if self._qemu_version >= (6, 1, 0): - self._cmd.append("-device") - self._cmd.append("virtio-vga-gl") + if qemu_version >= (6, 1, 0): + cmd.append("-device") + cmd.append("virtio-vga-gl") else: - self._cmd.append("-vga") - self._cmd.append("virtio") - self._cmd.append("-display") - self._cmd.append("egl-headless") + cmd.append("-vga") + cmd.append("virtio") + cmd.append("-display") + cmd.append("egl-headless") else: raise ExecutionError(f"Unknown display '{self.display}'") - self._cmd.append("-chardev") - self._cmd.append(f"socket,id=serialsocket,path={sockpath}") - self._cmd.append("-serial") - self._cmd.append("chardev:serialsocket") + cmd.append("-chardev") + cmd.append(f"socket,id=serialsocket,path={sockpath}") + cmd.append("-serial") + cmd.append("chardev:serialsocket") if self.nic: - self._cmd.append("-nic") - self._cmd.append(self.nic) + cmd.append("-nic") + cmd.append(self.nic) if self.boot_args is not None: boot_args.append(self.boot_args) if self.kernel is not None and boot_args: - self._cmd.append("-append") - self._cmd.append(" ".join(boot_args)) + cmd.append("-append") + cmd.append(" ".join(boot_args)) + + self._cmd = cmd def on_deactivate(self): if self.status: From 758cfaf30e9a1d08576300653ecec66a369bed6c Mon Sep 17 00:00:00 2001 From: Ahmad Fatoum Date: Fri, 16 Jun 2023 20:18:12 +0200 Subject: [PATCH 022/384] qemudriver: collect QMP specific options at the end Next commit will factor command line computation out of on_activate to make it available for use with an interactively started Qemu. Prepare for this by collecting everything that is not applicable to non-QMP usage at the end of the function. No functional change intended. Signed-off-by: Ahmad Fatoum --- labgrid/driver/qemudriver.py | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/labgrid/driver/qemudriver.py b/labgrid/driver/qemudriver.py index 3bf46aa0c..3677f3fd7 100644 --- a/labgrid/driver/qemudriver.py +++ b/labgrid/driver/qemudriver.py @@ -189,9 +189,6 @@ def on_activate(self): raise ExecutionError("-append in extra_args not allowed, use boot_args instead") cmd.extend(shlex.split(self.extra_args)) - cmd.append("-S") - cmd.append("-qmp") - cmd.append("stdio") cmd.append("-machine") cmd.append(self.machine) cmd.append("-cpu") @@ -215,11 +212,6 @@ def on_activate(self): else: raise ExecutionError(f"Unknown display '{self.display}'") - cmd.append("-chardev") - cmd.append(f"socket,id=serialsocket,path={sockpath}") - cmd.append("-serial") - cmd.append("chardev:serialsocket") - if self.nic: cmd.append("-nic") cmd.append(self.nic) @@ -232,6 +224,15 @@ def on_activate(self): self._cmd = cmd + self._cmd.append("-S") + self._cmd.append("-qmp") + self._cmd.append("stdio") + + self._cmd.append("-chardev") + self._cmd.append(f"socket,id=serialsocket,path={sockpath}") + self._cmd.append("-serial") + self._cmd.append("chardev:serialsocket") + def on_deactivate(self): if self.status: self.off() From 318e7788ceb7c40b336450630fe0faabbe95e82f Mon Sep 17 00:00:00 2001 From: Ahmad Fatoum Date: Fri, 16 Jun 2023 20:54:14 +0200 Subject: [PATCH 023/384] qemudriver: export get_qemu_base_args method For debugging, it can be useful to start an interactive session for the user to access the DUT console. This is possible with real targets through labgrid-client -s STATE console, but no equivalent exists yet for targets using QEMUDriver. Resolving that may be a bigger undertaking, so for now, let's provide a class method get_qemu_base_args, that returns the list of arguments sans QMP parts. Users can then initialize labgrid as usual and call the function to get the command line and start Qemu for interactive use without having to duplicate the labgrid environment parsing as in [1]. [1]: https://github.com/barebox/barebox/blob/v2023.05.0/test/emulate.pl#L226 Signed-off-by: Ahmad Fatoum --- labgrid/driver/qemudriver.py | 23 ++++++++++++++++------- 1 file changed, 16 insertions(+), 7 deletions(-) diff --git a/labgrid/driver/qemudriver.py b/labgrid/driver/qemudriver.py index 3677f3fd7..6d7daf02b 100644 --- a/labgrid/driver/qemudriver.py +++ b/labgrid/driver/qemudriver.py @@ -120,12 +120,12 @@ def get_qemu_version(self, qemu_bin): return (int(m.group('major')), int(m.group('minor')), int(m.group('micro'))) - def on_activate(self): - self._tempdir = tempfile.mkdtemp(prefix="labgrid-qemu-tmp-") - sockpath = f"{self._tempdir}/serialrw" - self._socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) - self._socket.bind(sockpath) - self._socket.listen(0) + def get_qemu_base_args(self): + """Returns the base command line used for Qemu without the options + related to QMP. These options can be used to start an interactive + Qemu manually for debugging tests + """ + cmd = [] qemu_bin = self.target.env.config.get_tool(self.qemu_bin) if qemu_bin is None: @@ -222,7 +222,16 @@ def on_activate(self): cmd.append("-append") cmd.append(" ".join(boot_args)) - self._cmd = cmd + return cmd + + def on_activate(self): + self._tempdir = tempfile.mkdtemp(prefix="labgrid-qemu-tmp-") + sockpath = f"{self._tempdir}/serialrw" + self._socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) + self._socket.bind(sockpath) + self._socket.listen(0) + + self._cmd = self.get_qemu_base_args() self._cmd.append("-S") self._cmd.append("-qmp") From 054b845303699e3d1c8cdc6f5bd6c1f635f58dc6 Mon Sep 17 00:00:00 2001 From: Jan Luebbe Date: Thu, 22 Jun 2023 12:11:52 +0200 Subject: [PATCH 024/384] CHANGES: add hint regarding deprecated ticket authentication Signed-off-by: Jan Luebbe --- CHANGES.rst | 3 +++ 1 file changed, 3 insertions(+) diff --git a/CHANGES.rst b/CHANGES.rst index a10bcddab..433a7a2c9 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -32,6 +32,9 @@ Breaking changes in 23.1 `coordinator container `_ or install it into a separate local venv as desribed in the `documentation `_. + If you see ``WARNING: Ticket authentication is deprecated. Please update your + coordinator.`` on the client when running an updated coordinator, your + coordinator configuration may set ``ticket`` instead of ``anonymous`` auth. - The `StepReporter` API has been changed. To start step reporting, you must now call ``StepReporter.start()`` instead of ``StepReporter()`` - Logging output when running pytest is no longer sent to stderr by default, From 886e9915994caac5c354963bf48f11d1e4cc553b Mon Sep 17 00:00:00 2001 From: Jan Luebbe Date: Thu, 22 Jun 2023 12:27:23 +0200 Subject: [PATCH 025/384] CHANGES: fix typo Signed-off-by: Jan Luebbe --- CHANGES.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGES.rst b/CHANGES.rst index 433a7a2c9..a4a030b59 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -30,7 +30,7 @@ Breaking changes in 23.1 ~~~~~~~~~~~~~~~~~~~~~~~~~ - The Debian package (``debian/``) no longer contains crossbar. Use the `coordinator container `_ or - install it into a separate local venv as desribed in the + install it into a separate local venv as described in the `documentation `_. If you see ``WARNING: Ticket authentication is deprecated. Please update your coordinator.`` on the client when running an updated coordinator, your From 166cb2930c56c63d3808c331cbd5efb73fde825e Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Thu, 22 Jun 2023 13:02:03 +0200 Subject: [PATCH 026/384] man/labgrid-device-config: add ykushcmd The YKUSHPowerDriver uses the ykushcmd binary since [1]. [1] 6425df99 ("driver/powerdriver: ykushpower: support remote control via ssh") Signed-off-by: Bastian Krause --- man/labgrid-device-config.5 | 4 ++++ man/labgrid-device-config.rst | 4 ++++ 2 files changed, 8 insertions(+) diff --git a/man/labgrid-device-config.5 b/man/labgrid-device-config.5 index 444bea847..1f0c6e729 100644 --- a/man/labgrid-device-config.5 +++ b/man/labgrid-device-config.5 @@ -161,6 +161,10 @@ See: \fI\%https://github.com/linux\-automation/usbsdmux\fP .B \fBuuu\-loader\fP Path to the uuu\-loader binary, used by the UUUDriver. See: \fI\%https://github.com/nxp\-imx/mfgtools\fP +.TP +.B \fBykushcmd\fP +Path to the ykushcmd binary, used by the YKUSHPowerDriver. +See: \fI\%https://github.com/Yepkit/ykush\fP .UNINDENT .sp The QEMUDriver expects a custom key set via its \fBqemu_bin\fP argument. diff --git a/man/labgrid-device-config.rst b/man/labgrid-device-config.rst index a83d0bc62..3298f5c47 100644 --- a/man/labgrid-device-config.rst +++ b/man/labgrid-device-config.rst @@ -159,6 +159,10 @@ TOOLS KEYS Path to the uuu-loader binary, used by the UUUDriver. See: https://github.com/nxp-imx/mfgtools +``ykushcmd`` + Path to the ykushcmd binary, used by the YKUSHPowerDriver. + See: https://github.com/Yepkit/ykush + The QEMUDriver expects a custom key set via its ``qemu_bin`` argument. See https://www.qemu.org/ From 13486c7f136523167ceb2add8a856f3ef9019e8a Mon Sep 17 00:00:00 2001 From: Jan Remmet Date: Thu, 22 Jun 2023 13:50:53 +0200 Subject: [PATCH 027/384] pytest.stash instead of protected member since pytest 7.0 stash is suggested to be used for storing private information from a plugin. Signed-off-by: Jan Remmet --- labgrid/pytestplugin/fixtures.py | 3 ++- labgrid/pytestplugin/hooks.py | 7 +++++-- 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/labgrid/pytestplugin/fixtures.py b/labgrid/pytestplugin/fixtures.py index 441531349..1bd8d2f2b 100644 --- a/labgrid/pytestplugin/fixtures.py +++ b/labgrid/pytestplugin/fixtures.py @@ -7,6 +7,7 @@ from ..resource.remote import RemotePlace from ..util.ssh import sshmanager from ..logging import DEFAULT_FORMAT +from .hooks import LABGRID_ENV_KEY # pylint: disable=redefined-outer-name @@ -60,7 +61,7 @@ def env(request, record_testsuite_property): """Return the environment configured in the supplied configuration file. It contains the targets contained in the configuration file. """ - env = request.config._labgrid_env + env = request.config.stash[LABGRID_ENV_KEY] if not env: pytest.skip("missing environment config (use --lg-env)") diff --git a/labgrid/pytestplugin/hooks.py b/labgrid/pytestplugin/hooks.py index 9b8b6708a..a701e5ccb 100644 --- a/labgrid/pytestplugin/hooks.py +++ b/labgrid/pytestplugin/hooks.py @@ -8,6 +8,9 @@ from ..util.helper import processwrapper from ..logging import StepFormatter, StepLogger +LABGRID_ENV_KEY = pytest.StashKey[Environment]() + + @pytest.hookimpl(tryfirst=True) def pytest_cmdline_main(config): def set_cli_log_level(level): @@ -89,7 +92,7 @@ def pytest_configure(config): env = Environment(config_file=lg_env) if lg_coordinator is not None: env.config.set_option('crossbar_url', lg_coordinator) - config._labgrid_env = env + config.stash[LABGRID_ENV_KEY] = env processwrapper.enable_logging() @@ -97,7 +100,7 @@ def pytest_configure(config): def pytest_collection_modifyitems(config, items): """This function matches function feature flags with those found in the environment and disables the item if no match is found""" - env = config._labgrid_env + env = config.stash[LABGRID_ENV_KEY] if not env: return From 84f6dc8dc7abd14b52d381b96e337bd21d83c2a5 Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Thu, 22 Jun 2023 13:20:14 +0200 Subject: [PATCH 028/384] remote/authenticator: drop obsolete ticket authenticator Now that #1079 and #1136 are merged, the coordinator no longer supports ticket authentication. Drop the corresponding authenticator class, do not advertise ticket auth anymore. Exporters and clients might still talk to older coordinator versions, so leave ticket authentication for these components enabled. Signed-off-by: Bastian Krause --- labgrid/remote/authenticator.py | 20 -------------------- labgrid/remote/coordinator.py | 5 +---- pyproject.toml | 3 --- 3 files changed, 1 insertion(+), 27 deletions(-) delete mode 100644 labgrid/remote/authenticator.py diff --git a/labgrid/remote/authenticator.py b/labgrid/remote/authenticator.py deleted file mode 100644 index a31321947..000000000 --- a/labgrid/remote/authenticator.py +++ /dev/null @@ -1,20 +0,0 @@ -import logging -from pprint import pprint -from twisted.internet.defer import inlineCallbacks -from autobahn.twisted.wamp import ApplicationSession - - -class AuthenticatorSession(ApplicationSession): - @inlineCallbacks - def onJoin(self, details): - def authenticate(realm, authid, details): # pylint: disable=unused-argument - logging.warning("%s still uses deprecated ticket authentication. Please update.", authid) - pprint(details) - principal = {'role': 'public', 'extra': {}} - return principal - - import warnings - warnings.warn("Ticket authentication is deprecated. Please switch to anonymous authentication once all your exporters/clients support it: .crossbar/config-anonymous.yaml", - DeprecationWarning) - - yield self.register(authenticate, 'org.labgrid.authenticate') diff --git a/labgrid/remote/coordinator.py b/labgrid/remote/coordinator.py index 6fc230302..cfd86ea0f 100644 --- a/labgrid/remote/coordinator.py +++ b/labgrid/remote/coordinator.py @@ -133,14 +133,11 @@ async def onConnect(self): enable_tcp_nodelay(self) self.join( self.config.realm, - authmethods=["anonymous", "ticket"], + authmethods=["anonymous"], authid="coordinator", authextra={"authid": "coordinator"}, ) - def onChallenge(self, challenge): - return "dummy-ticket" - @locked async def onJoin(self, details): await self.subscribe(self.on_session_join, 'wamp.session.on_join') diff --git a/pyproject.toml b/pyproject.toml index 1e1195694..42eed529a 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -157,9 +157,6 @@ testpaths = [ ] addopts = "-p no:labgrid" -[tool.pylint.MASTER] -ignore-paths = ["labgrid/remote/authenticator.py"] - [tool.pylint.imports] ignored-modules = ["gi"] From 94400ccdd8d11934c090a2d2780a79f7577defb9 Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Thu, 22 Jun 2023 15:37:33 +0200 Subject: [PATCH 029/384] CHANGES: document dropped ticket authentication in v23.1 Signed-off-by: Bastian Krause --- CHANGES.rst | 3 +++ 1 file changed, 3 insertions(+) diff --git a/CHANGES.rst b/CHANGES.rst index a10bcddab..36e8caf19 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -28,6 +28,9 @@ Bug fixes in 23.1 Breaking changes in 23.1 ~~~~~~~~~~~~~~~~~~~~~~~~~ +- Support for the legacy ticket authentication was dropped: If the coordinator + logs ModuleNotFoundError on startup, switch the crossbar config to anonymous + authentication (see ``.crossbar/config-anonymous.yaml`` for an example). - The Debian package (``debian/``) no longer contains crossbar. Use the `coordinator container `_ or install it into a separate local venv as desribed in the From 8bc7858805c96cf05fb6028e571ac3bda260fc07 Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Fri, 23 Jun 2023 15:19:20 +0200 Subject: [PATCH 030/384] driver/shelldriver: fix status attribute setting Since `_check_prompt()` sets the correct `_status` attribute anyway when a login prompt is detected, this only is a problem for cases without login (either because no login is required or it happened before). `get_status()` returns 0 instead of 1 then until `run()` is executed the first time. Fix this by setting the correct attribute. Signed-off-by: Bastian Krause --- labgrid/driver/shelldriver.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/labgrid/driver/shelldriver.py b/labgrid/driver/shelldriver.py index e9c38d725..78953f54c 100644 --- a/labgrid/driver/shelldriver.py +++ b/labgrid/driver/shelldriver.py @@ -143,7 +143,7 @@ def _await_login(self): else: # we got a prompt. no need for any further action to # activate this driver. - self.status = 1 + self._status = 1 break elif index == 1: From 6f7bf6614ffd38cbd9049239596b103d93cf1268 Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Fri, 23 Jun 2023 16:14:16 +0200 Subject: [PATCH 031/384] driver/bareboxdriver: silence barebox in _await_prompt() and unsilence on boot() Log messages such as.. eth0: 1000Mbps full duplex link detected ..are logged asynchronously. This confuses the BareboxDriver. To fix that, set the log level to "emerg" right after a prompt is detected and recover the previous log level right before booting. To make the tests run sucessfully, mock the _run() method before driver activation to act as if `echo $global.loglevel` was run. Signed-off-by: Bastian Krause --- CHANGES.rst | 2 ++ labgrid/driver/bareboxdriver.py | 17 +++++++++++++++++ tests/test_bareboxdriver.py | 10 ++++++++-- 3 files changed, 27 insertions(+), 2 deletions(-) diff --git a/CHANGES.rst b/CHANGES.rst index a10bcddab..9a4fb26a1 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -56,6 +56,8 @@ Breaking changes in 23.1 slightly. ``-vv`` is now an alias for ``--log-cli-level=INFO`` (effectively unchanged), ``-vvv`` is an alias for ``--log-cli-level=CONSOLE``, and ``-vvvv`` is an alias for ``--log-cli-level=DEBUG``. +- The `BareboxDriver` now remembers the log level, sets it to ``0`` on initial + activation/reset and recovers it on ``boot()``. Known issues in 23.1 ~~~~~~~~~~~~~~~~~~~~ diff --git a/labgrid/driver/bareboxdriver.py b/labgrid/driver/bareboxdriver.py index a35f899d7..144080c98 100644 --- a/labgrid/driver/bareboxdriver.py +++ b/labgrid/driver/bareboxdriver.py @@ -42,6 +42,8 @@ def __attrs_post_init__(self): super().__attrs_post_init__() self.logger = logging.getLogger(f"{self}:{self.target}") self._status = 0 + # barebox' default log level, used as fallback if no log level can be saved + self.saved_log_level = 7 def on_activate(self): """Activate the BareboxDriver @@ -186,6 +188,18 @@ def _await_prompt(self): self._check_prompt() + # remember barebox' log level - we don't expect to be interrupted here + # by pollers because no hardware interaction is triggered by echo, so + # it should be safe to use the usual shell wrapper via _run() + stdout, _, exitcode = self._run("echo $global.loglevel") + [saved_log_level] = stdout + if exitcode == 0 and saved_log_level.isnumeric(): + self.saved_log_level = saved_log_level + + # silence barebox, the driver can get confused by asynchronous messages + # logged to the console otherwise + self._run("global.loglevel=0") + @Driver.check_active def await_boot(self): """Wait for the initial Linux version string to verify we successfully @@ -199,6 +213,9 @@ def boot(self, name: str): Args: name (str): name of the entry to boot""" + # recover saved log level + self._run(f"global.loglevel={self.saved_log_level}") + if name: self.console.sendline(f"boot -v {name}") else: diff --git a/tests/test_bareboxdriver.py b/tests/test_bareboxdriver.py index 3670665dc..9842a17c8 100644 --- a/tests/test_bareboxdriver.py +++ b/tests/test_bareboxdriver.py @@ -19,7 +19,10 @@ def test_create(self): def test_barebox_run(self, target_with_fakeconsole, mocker): t = target_with_fakeconsole d = BareboxDriver(t, "barebox") - d = t.get_driver(BareboxDriver) + d = t.get_driver(BareboxDriver, activate=False) + # mock for d._run('echo $global.loglevel') + d._run = mocker.MagicMock(return_value=(['7'], [], 0)) + t.activate(d) d._run = mocker.MagicMock(return_value=(['success'], [], 0)) res = d.run_check("test") assert res == ['success'] @@ -29,7 +32,10 @@ def test_barebox_run(self, target_with_fakeconsole, mocker): def test_barebox_run_error(self, target_with_fakeconsole, mocker): t = target_with_fakeconsole d = BareboxDriver(t, "barebox") - d = t.get_driver(BareboxDriver) + d = t.get_driver(BareboxDriver, activate=False) + # mock for d._run('echo $global.loglevel') + d._run = mocker.MagicMock(return_value=(['7'], [], 0)) + t.activate(d) d._run = mocker.MagicMock(return_value=(['error'], [], 1)) with pytest.raises(ExecutionError): res = d.run_check("test") From 79d71ab9235c12caf24740374162ac606a2473ab Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Fri, 23 Jun 2023 15:51:02 +0200 Subject: [PATCH 032/384] remote/coordinator: fix resetting places to an empty set Fix the typo, so the actually used attribute is reset. Signed-off-by: Bastian Krause --- labgrid/remote/coordinator.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/labgrid/remote/coordinator.py b/labgrid/remote/coordinator.py index 6fc230302..8fcc5dbe6 100644 --- a/labgrid/remote/coordinator.py +++ b/labgrid/remote/coordinator.py @@ -314,7 +314,7 @@ async def save(self): def load(self): try: - self.place = {} + self.places = {} with open('places.yaml', 'r') as f: self.places = yaml.load(f.read()) for placename, config in self.places.items(): From d03aaae3a2446a418750dd9010cbf6761dac3db2 Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Thu, 22 Jun 2023 15:42:35 +0200 Subject: [PATCH 033/384] dockerfiles: bump base image bullseye -> bookworm Signed-off-by: Bastian Krause --- dockerfiles/Dockerfile | 16 ++++++++-------- dockerfiles/staging/dut/Dockerfile | 2 +- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/dockerfiles/Dockerfile b/dockerfiles/Dockerfile index f598ff4b5..695453840 100644 --- a/dockerfiles/Dockerfile +++ b/dockerfiles/Dockerfile @@ -1,4 +1,4 @@ -FROM debian:bullseye-slim AS labgrid-base +FROM debian:bookworm-slim AS labgrid-base LABEL maintainer="eha@deif.com" @@ -9,7 +9,7 @@ COPY ./ /opt/labgrid/ RUN set -e ;\ apt update -q=2 ;\ apt install -q=2 --yes --no-install-recommends python3 python3-dev python3-pip python3-setuptools git build-essential libsnappy-dev ;\ - pip3 install -U pip;\ + pip3 install --break-system-packages -U pip;\ apt clean ;\ rm -rf /var/lib/apt/lists/* ;\ git clone https://github.com/vishnubob/wait-for-it.git opt/wait-for-it && cd opt/wait-for-it && git reset --hard 54d1f0bfeb6557adf8a3204455389d0901652242 @@ -22,8 +22,8 @@ ARG VERSION RUN set -e ;\ cd /opt/labgrid ;\ - pip3 install yq ;\ - SETUPTOOLS_SCM_PRETEND_VERSION="$VERSION" pip3 install --no-cache-dir . ;\ + pip3 install --break-system-packages yq ;\ + SETUPTOOLS_SCM_PRETEND_VERSION="$VERSION" pip3 install --break-system-packages --no-cache-dir . ;\ apt update -q=2 ;\ apt install -q=2 --yes --no-install-recommends microcom openssh-client rsync jq qemu-system qemu-utils ;\ apt clean ;\ @@ -41,10 +41,10 @@ ENV CROSSBAR_DIR=/opt/crossbar RUN set -e ;\ cd /opt/labgrid ;\ - pip3 install virtualenv ;\ - SETUPTOOLS_SCM_PRETEND_VERSION="$VERSION" pip3 install --no-cache-dir . ;\ + pip3 install --break-system-packages virtualenv ;\ + SETUPTOOLS_SCM_PRETEND_VERSION="$VERSION" pip3 install --break-system-packages --no-cache-dir . ;\ virtualenv -p python3 crossbar-venv ;\ - crossbar-venv/bin/pip3 install -r crossbar-requirements.txt ;\ + crossbar-venv/bin/pip3 install --break-system-packages -r crossbar-requirements.txt ;\ sed -i "s#^ executable: .*\$# executable: python3#" .crossbar/config-anonymous.yaml VOLUME /opt/crossbar @@ -63,7 +63,7 @@ COPY dockerfiles/exporter/entrypoint.sh /entrypoint.sh RUN set -e ;\ cd /opt/labgrid ;\ - SETUPTOOLS_SCM_PRETEND_VERSION="$VERSION" pip3 install --no-cache-dir . ;\ + SETUPTOOLS_SCM_PRETEND_VERSION="$VERSION" pip3 install --break-system-packages --no-cache-dir . ;\ apt update -q=2 ;\ apt install -q=2 --yes --no-install-recommends ser2net ;\ apt clean ;\ diff --git a/dockerfiles/staging/dut/Dockerfile b/dockerfiles/staging/dut/Dockerfile index ec76c4d3d..f2c26aba7 100644 --- a/dockerfiles/staging/dut/Dockerfile +++ b/dockerfiles/staging/dut/Dockerfile @@ -1,4 +1,4 @@ -FROM debian:bullseye-slim +FROM debian:bookworm-slim MAINTAINER "Kasper Revsbech" From 4e8c906c90d54d7cac9b0044aed16345a419ee20 Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Mon, 3 Jul 2023 18:01:03 +0200 Subject: [PATCH 034/384] github/workflows: add scheduled test jobs for stable branch Signed-off-by: Bastian Krause --- .github/workflows/scheduled-unit-tests.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/scheduled-unit-tests.yml b/.github/workflows/scheduled-unit-tests.yml index 4d915d387..4f68a79e5 100644 --- a/.github/workflows/scheduled-unit-tests.yml +++ b/.github/workflows/scheduled-unit-tests.yml @@ -11,7 +11,7 @@ jobs: fail-fast: false matrix: python-version: ['3.8', '3.9', '3.10', '3.11'] - branch: ['master'] + branch: ['master', 'stable-23.0'] uses: ./.github/workflows/reusable-unit-tests.yml with: python-version: ${{ matrix.python-version }} @@ -21,7 +21,7 @@ jobs: strategy: fail-fast: false matrix: - branch: ['master'] + branch: ['master', 'stable-23.0'] uses: ./.github/workflows/reusable-unit-tests-docker.yml with: branch: ${{ matrix.branch }} From 3825a39bc4cfbe6406e5270567029ca711c5d0dd Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Wed, 5 Jul 2023 14:38:53 +0200 Subject: [PATCH 035/384] github/workflows: do not build scheduled unit tests for stable-23.0 This partly reverts commit 4e8c906c. It only makes sense to add stable branches to the scheduled builds once they use the same setup as master. Otherwise CI breaks [1]. The setup changed in #1079 which introduced separating labgrid/crossbar venvs and moved the crossbar dependency to crossbar-requirements.txt. That means we cannot add the stable branch to the scheduled CI tests until the stable branch includes this setup, i.e. starting with stable-23.1. The setup has no impact on the docker tests, so leave them enabled for stable-23.0. [1] https://github.com/labgrid-project/labgrid/actions/runs/5462091372 Signed-off-by: Bastian Krause --- .github/workflows/scheduled-unit-tests.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/scheduled-unit-tests.yml b/.github/workflows/scheduled-unit-tests.yml index 4f68a79e5..faadf9cc7 100644 --- a/.github/workflows/scheduled-unit-tests.yml +++ b/.github/workflows/scheduled-unit-tests.yml @@ -11,7 +11,7 @@ jobs: fail-fast: false matrix: python-version: ['3.8', '3.9', '3.10', '3.11'] - branch: ['master', 'stable-23.0'] + branch: ['master'] uses: ./.github/workflows/reusable-unit-tests.yml with: python-version: ${{ matrix.python-version }} From 4d48dd5aaa020849f8337865f9532c1461bb9649 Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Thu, 6 Jul 2023 15:27:41 +0200 Subject: [PATCH 036/384] stepreporter: fix class name in DeprecationWarning Fixes: a3df4e66 ("stepreporter: deprecate it") Signed-off-by: Bastian Krause --- labgrid/stepreporter.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/labgrid/stepreporter.py b/labgrid/stepreporter.py index fc9d9f7a1..e88c51c99 100644 --- a/labgrid/stepreporter.py +++ b/labgrid/stepreporter.py @@ -19,7 +19,7 @@ def start(cls): from warnings import warn warn( """ - StepLogger is deprecated, use the StepLogger and basicConfig from labgrid.logging + StepReporter is deprecated, use the StepLogger and basicConfig from labgrid.logging instead which integrates with the python logging infrastructure. """, DeprecationWarning, From 22028070518e5ac799856c93a7bf1faa2826b94d Mon Sep 17 00:00:00 2001 From: Ahmad Fatoum Date: Thu, 6 Jul 2023 16:31:07 +0200 Subject: [PATCH 037/384] util/managedfile: resolve symlinks before comparing NFS paths ManagedFile has an optimization to detect if the "local" file is available over NFS at the remote side. In that case, rsync is skipped and get_remote_path will just return the NFS path. This is done by comparing stat(1) output of the absolute path on local and remote host. If the absolute paths contains symlinks, this comparison may not happen if the remote host doesn't contain the exact same symlinks. Let's avoid this issue by canonicalizing the local path first. Fixes: d42ac82b7a70 ("util/managedfile: heuristically detect NFS to avoid copy") Signed-off-by: Ahmad Fatoum --- CHANGES.rst | 2 ++ labgrid/util/managedfile.py | 2 +- tests/test_flashscript.py | 3 ++- 3 files changed, 5 insertions(+), 2 deletions(-) diff --git a/CHANGES.rst b/CHANGES.rst index 9a4fb26a1..bf02b5251 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -25,6 +25,8 @@ Bug fixes in 23.1 labgrid-client. - Fix sftp option issue in SSH driver that caused sftp to only work once per test run. +- ManagedFile NFS detection heuristic now does symlink resolution on the + local host. Breaking changes in 23.1 ~~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/labgrid/util/managedfile.py b/labgrid/util/managedfile.py index 21ecf7655..2e06e8fef 100644 --- a/labgrid/util/managedfile.py +++ b/labgrid/util/managedfile.py @@ -31,7 +31,7 @@ class ManagedFile: """ local_path = attr.ib( validator=attr.validators.instance_of(str), - converter=lambda x: os.path.abspath(str(x)) + converter=lambda x: os.path.realpath(str(x)) ) resource = attr.ib( validator=attr.validators.instance_of(Resource), diff --git a/tests/test_flashscript.py b/tests/test_flashscript.py index f8b158534..d64642a87 100644 --- a/tests/test_flashscript.py +++ b/tests/test_flashscript.py @@ -2,6 +2,7 @@ import subprocess import tempfile import attr +import os from pathlib import Path from labgrid.driver.flashscriptdriver import FlashScriptDriver from labgrid.resource.common import ManagedResource @@ -69,4 +70,4 @@ def test_argument_device_expansion(target, resource, driver): def test_argument_file_expansion(target, driver): value = capture_argument_expansion(driver, "file.local_path") - assert value == "/bin/sh" + assert os.path.samefile(value, "/bin/sh") From 1a6d435c94f88cf74cb09daae43b3450ec3382b3 Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Tue, 11 Jul 2023 15:50:27 +0200 Subject: [PATCH 038/384] doc/configuration: drop "optional" from driver arguments with default value Arguments with a default value are obviously optional. So drop "optional" to keep them aligned with the rest of the driver/resource documentation. Signed-off-by: Bastian Krause --- doc/configuration.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/configuration.rst b/doc/configuration.rst index 1a1ecba3e..858ebd32c 100644 --- a/doc/configuration.rst +++ b/doc/configuration.rst @@ -98,7 +98,7 @@ Arguments: - port (str): tty the instrument is connected to, e.g. '/dev/ttyUSB0' - address (int): slave address on the modbus, e.g. 16 - speed (int, default=115200): baud rate of the serial port - - timeout (float, default=0.25): optional, timeout in seconds + - timeout (float, default=0.25): timeout in seconds Used by: - `ModbusRTUDriver`_ @@ -2390,7 +2390,7 @@ Arguments: - rootfs (str): optional, reference to the paths key for use as the virtio-9p filesystem - dtb (str): optional, reference to the image key for the device tree - bios (str): optional, reference to the image key for the bios image - - display (str, default="none"): optional, display output to enable; must be one of: + - display (str, default="none"): display output to enable; must be one of: - none: Do not create a display device - fb-headless: Create a headless framebuffer device - egl-headless: Create a headless GPU-backed graphics card. Requires host support From fbe118d169349fc378d327168dbf955ac2c6205b Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Tue, 11 Jul 2023 15:52:50 +0200 Subject: [PATCH 039/384] doc/configuration: add empty lines around each list level reStructuredText requires an empty line above and below each level of list indentation. This fixes the HTML output of the QEMUDriver arguments. Fixes: 54037f56 ("qemudriver: Add display support") Signed-off-by: Bastian Krause --- doc/configuration.rst | 2 ++ 1 file changed, 2 insertions(+) diff --git a/doc/configuration.rst b/doc/configuration.rst index 858ebd32c..0cf81d8e2 100644 --- a/doc/configuration.rst +++ b/doc/configuration.rst @@ -2391,9 +2391,11 @@ Arguments: - dtb (str): optional, reference to the image key for the device tree - bios (str): optional, reference to the image key for the bios image - display (str, default="none"): display output to enable; must be one of: + - none: Do not create a display device - fb-headless: Create a headless framebuffer device - egl-headless: Create a headless GPU-backed graphics card. Requires host support + - nic (str): optional, configuration string to pass to QEMU to create a network interface The QEMUDriver also requires the specification of: From e921a4ab2421f1bb66f2bbe44c534c80e556d18c Mon Sep 17 00:00:00 2001 From: Chris Fiege Date: Thu, 20 Apr 2023 15:26:59 +0200 Subject: [PATCH 040/384] driver/sigrok: Add sigrok driver for DMMs The SigrokDmmDriver wraps around a single channel DMM controlled by sigrok. It has been tested with Unit-T UT61C and UT61B devices but probably also works with other single chnnel DMMs. This driver binds to a SigrokUsbDevice. Make sure to select the correct driver for your DMM there. Example usage: > resources: > - SigrokUSBDevice: > driver: uni-t-ut61c > match: > 'ID_PATH': pci-0000:07:00.4-usb-0:2:1.0 > drivers: > - SigrokDmmDriver: {} Args: bindings (dict): driver to use with sigrok Signed-off-by: Chris Fiege Co-authored-by: Bastian Krause <16608704+Bastian-Krause@users.noreply.github.com> --- doc/configuration.rst | 34 ++++++++++ labgrid/driver/__init__.py | 2 +- labgrid/driver/sigrokdriver.py | 113 +++++++++++++++++++++++++++++++++ 3 files changed, 148 insertions(+), 1 deletion(-) diff --git a/doc/configuration.rst b/doc/configuration.rst index f859dbec8..3bd0f22c9 100644 --- a/doc/configuration.rst +++ b/doc/configuration.rst @@ -2444,6 +2444,40 @@ Arguments: - max_current (float): optional, maximum allowed current for protection against accidental damage (in ampere) +SigrokDmmDriver +~~~~~~~~~~~~~~~ +The `SigrokDmmDriver` uses a `SigrokDevice` resource to record samples from a digital multimeter (DMM) and provides +them during test runs. + +It is known to work with Unit-T `UT61B` and `UT61C` devices but should also work with other DMMs supported by *sigrok*. + +Binds to: + sigrok: + - `SigrokUSBDevice`_ + - `SigrokUSBSerialDevice`_ + - `NetworkSigrokUSBDevice`_ + - NetworkSigrokUSBSerialDevice + +Implements: + - None yet + +Arguments: + - None + +Sampling can be started calling `capture(samples, timeout=None)`. +It sets up sampling and returns immediately. +The default timeout has been chosen to work with Unit-T `UT61B`. +Other devices may require a different timeout setting. + +Samples can be obtained using `stop()`. +`stop()` will block until either *sigrok* terminates or `timeout` is reached. +This method returns a `(unit, samples)` tuple: +`unit` is the physical unit reported by the DMM; +samples is an iterable of samples. + +This driver relies on buffering of the subprocess call. +Reading a few samples will very likely work - but obtaining a lot of samples may stall. + USBSDMuxDriver ~~~~~~~~~~~~~~ The :any:`USBSDMuxDriver` uses a USBSDMuxDevice resource to control a diff --git a/labgrid/driver/__init__.py b/labgrid/driver/__init__.py index 471eb0078..d26ad68f4 100644 --- a/labgrid/driver/__init__.py +++ b/labgrid/driver/__init__.py @@ -23,7 +23,7 @@ from .qemudriver import QEMUDriver from .modbusdriver import ModbusCoilDriver from .modbusrtudriver import ModbusRTUDriver -from .sigrokdriver import SigrokDriver, SigrokPowerDriver +from .sigrokdriver import SigrokDriver, SigrokPowerDriver, SigrokDmmDriver from .usbstoragedriver import USBStorageDriver, NetworkUSBStorageDriver, Mode from .resetdriver import DigitalOutputResetDriver from .gpiodriver import GpioDigitalOutputDriver diff --git a/labgrid/driver/sigrokdriver.py b/labgrid/driver/sigrokdriver.py index ded2edc82..9ee762531 100644 --- a/labgrid/driver/sigrokdriver.py +++ b/labgrid/driver/sigrokdriver.py @@ -23,6 +23,7 @@ from .common import Driver, check_file from .exception import ExecutionError from .powerdriver import PowerResetMixin +from ..util import Timeout @attr.s(eq=False) @@ -329,3 +330,115 @@ def measure(self): if len(res) != 2: raise ExecutionError(f"Cannot parse --show output {out}") return res + +@target_factory.reg_driver +@attr.s(eq=False) +class SigrokDmmDriver(SigrokCommon): + """ + This driver wraps around a single channel DMM controlled by sigrok. + It has been tested with Unit-T UT61C and UT61B devices but probably also + works with other single chnnel DMMs. + + This driver binds to a SigrokUsbDevice. + Make sure to select the correct driver for your DMM there. + + Example usage: + > resources: + > - SigrokUSBDevice: + > driver: uni-t-ut61c + > match: + > 'ID_PATH': pci-0000:07:00.4-usb-0:2:1.0 + > drivers: + > - SigrokDmmDriver: {} + + Args: + bindings (dict): driver to use with sigrok + """ + + bindings = { + "sigrok": {SigrokUSBSerialDevice, NetworkSigrokUSBSerialDevice, SigrokUSBDevice, NetworkSigrokUSBDevice}, + } + + @Driver.check_active + @step(result=True) + def capture(self, samples, timeout=None): + """ + Starts to read samples from the DMM. + This method returns once sampling has been started. Sampling continues in the background. + + Note: We use subprocess.PIPE to buffer the samples. + When this buffer is too small for the number of samples requested sampling may stall. + + Args: + samples: Number of samples to obtain + timeout: Timeout after which sampling should be stopped. + If None: timeout[s] = samples * 1s + 5s + If int: Timeout in [s] + + Raises: + RuntimeError() if a capture is already running. + """ + if self._running: + raise RuntimeError("capture is already running") + + if not timeout: + timeout = samples + 5.0 + + args = f"-O csv --samples {samples}".split(" ") + self._call_with_driver(*args) + self._timeout = Timeout(timeout) + self._running = True + + @Driver.check_active + @step(result=True) + def stop(self): + """ + Waits for sigrok to complete and returns all samples obtained afterwards. + This function blocks until either sigrok has terminated or the timeout has been reached. + + Returns: + (unit_spec, [sample, ...]) + + Raises: + RuntimeError() if capture has not been started + """ + if not self._running: + raise RuntimeError("no capture started yet") + while not self._timeout.expired: + if self._process.poll() is not None: + # process has finished. no need to wait for the timeout + break + time.sleep(0.1) + else: + # process did not finish in time + self.log.info("sigrok-cli did not finish in time, increase timeout?") + self._process.kill() + + res = [] + unit = "" + for line in self._process.stdout.readlines(): + line = line.strip() + if b";" in line: + # discard header information + continue + if not unit: + # the first line after the header contains the unit information + unit = line.decode() + else: + # all other lines are actual values + res.append(float(line)) + _, stderr = self._process.communicate() + self.log.debug("stderr: %s", stderr) + + self._running = False + return unit, res + + def on_activate(self): + # This driver does not use self._tmpdir from SigrockCommon. + # Overriding this function to inhibit the temp-dir creation. + pass + + def on_deactivate(self): + # This driver does not use self._tmpdir from SigrockCommon. + # Overriding this function to inhibit the temp-dir creation. + pass From d8279ddc1572832e9a7a8c9b397c583408f236f8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Leonard=20G=C3=B6hrs?= Date: Mon, 26 Jun 2023 15:55:41 +0200 Subject: [PATCH 041/384] httpdigitaloutput: support generic digital outputs via HTTP MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add support and documentation for a generic digital output driver that works via HTTP. The driver allows setting an URL to PUT/POST/PATCH a new output status to and an URL to GET the current status from. The message body to send when asserting/deasserting the output can be specified independently. The status returned by a GET request can either be matched exactly against the same body used to set the state or more fuzzily using regular expressions. Signed-off-by: Leonard Göhrs --- doc/configuration.rst | 54 ++++++++++++++++ labgrid/driver/__init__.py | 1 + labgrid/driver/httpdigitaloutput.py | 71 +++++++++++++++++++++ labgrid/remote/client.py | 4 +- labgrid/resource/__init__.py | 1 + labgrid/resource/httpdigitalout.py | 34 ++++++++++ tests/test_httpdigitalout.py | 97 +++++++++++++++++++++++++++++ 7 files changed, 261 insertions(+), 1 deletion(-) create mode 100644 labgrid/driver/httpdigitaloutput.py create mode 100644 labgrid/resource/httpdigitalout.py create mode 100644 tests/test_httpdigitalout.py diff --git a/doc/configuration.rst b/doc/configuration.rst index 1a1ecba3e..0b0193750 100644 --- a/doc/configuration.rst +++ b/doc/configuration.rst @@ -483,6 +483,39 @@ Arguments: Used by: - `HIDRelayDriver`_ +HttpDigitalOutput ++++++++++++++++++ +A ``HttpDigitalOutput`` resource describes a generic digital output that can be +controlled via HTTP. + +.. code-block:: yaml + + HttpDigitalOutput: + url: http://host.example/some/endpoint + body_asserted: "On" + body_deasserted: "Off" + +The example assumes a simple scenario where the same URL is used for PUT +requests that set the output state and GET requests to get the current state. +It also assumes that the returned state matches either "On" or "Off" exactly. + +The `HttpDigitalOutputDriver`_ also supports more advanced use cases where the +current state is fetched from another URL and is interpreted using regular +expressions. + +Arguments: + - url (str): URL to use for setting a new state + - body_asserted (str): Request body to send to assert the output + - body_deasserted (str): Request body to send to de-assert the output + - method (str, default="PUT"): HTTP method to set a new state + + - url_get (str): URL to use instead of ``url`` for getting the state + - body_get_asserted (str): Regular Expression that matches an asserted response body + - body_get_deasserted (str): Regular Expression that matches a de-asserted response body + +Used by: + - `HttpDigitalOutputDriver`_ + NetworkHIDRelay +++++++++++++++ A NetworkHIDRelay describes an `HIDRelay`_ exported over the network. @@ -2763,6 +2796,27 @@ Implements: Arguments: - None + +HttpDigitalOutputDriver +~~~~~~~~~~~~~~~~~~~~~~~ +A HttpDigitalOutputDriver binds to a `HttpDigitalOutput` to set and get a +digital output state via HTTP. + +Binds to: + http: + - `HttpDigitalOutput`_ + +.. code-block:: yaml + + HttpDigitalOutputDriver: {} + +Implements: + - :any:`DigitalOutputProtocol` + +Arguments: + - None + + PyVISADriver ~~~~~~~~~~~~ The PyVISADriver uses a PyVISADevice resource to control test equipment manageable by PyVISA. diff --git a/labgrid/driver/__init__.py b/labgrid/driver/__init__.py index 471eb0078..cd18cc47b 100644 --- a/labgrid/driver/__init__.py +++ b/labgrid/driver/__init__.py @@ -46,3 +46,4 @@ from .usbtmcdriver import USBTMCDriver from .deditecrelaisdriver import DeditecRelaisDriver from .dediprogflashdriver import DediprogFlashDriver +from .httpdigitaloutput import HttpDigitalOutputDriver diff --git a/labgrid/driver/httpdigitaloutput.py b/labgrid/driver/httpdigitaloutput.py new file mode 100644 index 000000000..32a387aac --- /dev/null +++ b/labgrid/driver/httpdigitaloutput.py @@ -0,0 +1,71 @@ +import re +from importlib import import_module + +import attr + +from ..factory import target_factory +from ..protocol import DigitalOutputProtocol +from ..step import step +from ..util.proxy import proxymanager +from .common import Driver +from .exception import ExecutionError + + +@target_factory.reg_driver +@attr.s(eq=False) +class HttpDigitalOutputDriver(Driver, DigitalOutputProtocol): + bindings = { "http": "HttpDigitalOutput" } + + def __attrs_post_init__(self): + super().__attrs_post_init__() + self._requests = import_module("requests") + + def on_activate(self): + self._url_set = proxymanager.get_url( + self.http.url, + default_port=(443 if self.http.url.startswith("https") else 80), + ) + + if self.http.url_get: + self._url_get = proxymanager.get_url( + self.http.url_get, + default_port=(443 if self.http.url_get.startswith("https") else 80), + ) + + else: + self._url_get = self._url_set + + @Driver.check_active + @step(args=["status"]) + def set(self, status): + method = self.http.method or "PUT" + body = self.http.body_asserted if status else self.http.body_deasserted + + res = self._requests.request(method, self._url_set, data=body) + res.raise_for_status() + + @Driver.check_active + @step(result=["True"]) + def get(self): + res = self._requests.get(self._url_get) + res.raise_for_status() + + # Check if the response body matches an asserted state + if self.http.body_get_asserted: + if re.fullmatch(self.http.body_get_asserted, res.text) is not None: + return True + + elif res.text == self.http.body_asserted: + return True + + # Check if the response body matches a de-asserted state + if self.http.body_get_deasserted: + if re.fullmatch(self.http.body_get_deasserted, res.text) is not None: + return False + + elif res.text == self.http.body_deasserted: + return False + + raise ExecutionError( + f'response does not match asserted or deasserted state: "{res.text}"' + ) diff --git a/labgrid/remote/client.py b/labgrid/remote/client.py index a1f8d9825..3fe534e9a 100755 --- a/labgrid/remote/client.py +++ b/labgrid/remote/client.py @@ -762,7 +762,7 @@ def digital_io(self): action = self.args.action name = self.args.name target = self._get_target(place) - from ..resource import ModbusTCPCoil, OneWirePIO + from ..resource import ModbusTCPCoil, OneWirePIO, HttpDigitalOutput from ..resource.remote import (NetworkDeditecRelais8, NetworkSysfsGPIO, NetworkLXAIOBusPIO, NetworkHIDRelay) @@ -775,6 +775,8 @@ def digital_io(self): drv = self._get_driver_or_new(target, "ModbusCoilDriver", name=name) elif isinstance(resource, OneWirePIO): drv = self._get_driver_or_new(target, "OneWirePIODriver", name=name) + elif isinstance(resource, HttpDigitalOutput): + drv = self._get_driver_or_new(target, "HttpDigitalOutputDriver", name=name) elif isinstance(resource, NetworkDeditecRelais8): drv = self._get_driver_or_new(target, "DeditecRelaisDriver", name=name) elif isinstance(resource, NetworkSysfsGPIO): diff --git a/labgrid/resource/__init__.py b/labgrid/resource/__init__.py index 1820cad5d..77672b2a5 100644 --- a/labgrid/resource/__init__.py +++ b/labgrid/resource/__init__.py @@ -23,3 +23,4 @@ from .mqtt import TasmotaPowerPort from .httpvideostream import HTTPVideoStream from .dediprogflasher import DediprogFlasher, NetworkDediprogFlasher +from .httpdigitalout import HttpDigitalOutput diff --git a/labgrid/resource/httpdigitalout.py b/labgrid/resource/httpdigitalout.py new file mode 100644 index 000000000..6bcaa4074 --- /dev/null +++ b/labgrid/resource/httpdigitalout.py @@ -0,0 +1,34 @@ +import attr + +from ..factory import target_factory +from .common import Resource + + +@target_factory.reg_resource +@attr.s(eq=False) +class HttpDigitalOutput(Resource): + """This resource describes a generic HTTP-controlled output pin. + + Args: + url (str): URL to use for setting a new state + body_asserted (str): Request body to send to assert the output + body_deasserted (str): Request body to send to de-assert the output + method (str): HTTP method to use instead of PUT (the default) to set a new state + + url_get (str): URL to use for getting the state + body_get_asserted (str): Regular Expression that matches an asserted response body + body_get_deasserted (str): Regular Expression that matches a de-asserted response body + """ + + url = attr.ib(validator=attr.validators.instance_of(str)) + body_asserted = attr.ib(validator=attr.validators.instance_of(str)) + body_deasserted = attr.ib(validator=attr.validators.instance_of(str)) + method = attr.ib(default="PUT", validator=attr.validators.instance_of(str)) + + url_get = attr.ib(default="", validator=attr.validators.instance_of(str)) + body_get_asserted = attr.ib( + default="", validator=attr.validators.instance_of(str) + ) + body_get_deasserted = attr.ib( + default="", validator=attr.validators.instance_of(str) + ) diff --git a/tests/test_httpdigitalout.py b/tests/test_httpdigitalout.py new file mode 100644 index 000000000..9b2d20bcb --- /dev/null +++ b/tests/test_httpdigitalout.py @@ -0,0 +1,97 @@ +import pytest +import requests + +from labgrid.driver import HttpDigitalOutputDriver +from labgrid.resource import HttpDigitalOutput + + +@pytest.fixture(scope="function") +def mock_server(mocker): + state = '"Unknown"' + + def request(method, url, data=None): + nonlocal state + state = data + return mocker.MagicMock() + + def get(url): + r = mocker.MagicMock() + r.text = state + return r + + mock_request = mocker.patch("requests.request") + mock_request.side_effect = request + mock_get = mocker.patch("requests.get") + mock_get.side_effect = get + + return (mock_request, mock_get) + + +def _make_http_driver(target, with_tls, with_regex, separate_get, match_error): + scheme = "https" if with_tls else "http" + url = f"{scheme}://host.example/set" + url_get = f"{scheme}://host.example/get" if separate_get else "" + + body_get_asserted = ".*n.*" if with_regex else "" + body_get_deasserted = ".*ff.*" if with_regex else "" + + if match_error: + body_get_asserted = "--- DOES NOT MATCH ---" + body_get_deasserted = "--- DOES NOT MATCH EITHER ---" + + dig_out_res = HttpDigitalOutput( + target, + name=None, + url=url, + body_asserted='"On"', + body_deasserted='"Off"', + method="PUT", + url_get=url_get, + body_get_asserted=body_get_asserted, + body_get_deasserted=body_get_deasserted, + ) + + http_driver = HttpDigitalOutputDriver(target, name=None) + target.activate(http_driver) + + return http_driver + + +@pytest.mark.parametrize( + "asserted,with_tls,with_regex,separate_get", + [ + (False, False, False, False), + (True, False, False, False), + (True, True, False, False), + (True, False, True, False), + (False, False, True, False), + (True, False, False, True), + (True, True, True, True), + ], +) +def test_set_get(asserted, with_tls, with_regex, separate_get, target, mock_server): + http_driver = _make_http_driver(target, with_tls, with_regex, separate_get, False) + mock_request, mock_get = mock_server + + data = '"On"' if asserted else '"Off"' + scheme = "https" if with_tls else "http" + port = 443 if with_tls else 80 + get_endpoint = "get" if separate_get else "set" + + set_url = f"{scheme}://host.example:{port}/set" + get_url = f"{scheme}://host.example:{port}/{get_endpoint}" + + http_driver.set(asserted) + mock_request.assert_called_once_with("PUT", set_url, data=data) + + assert http_driver.get() == asserted + mock_get.assert_called_once_with(get_url) + + +def test_match_exception(target, mock_server): + http_driver = _make_http_driver(target, False, False, False, True) + mock_request, mock_get = mock_server + + http_driver.set(True) + with pytest.raises(Exception): + http_driver.get() From c4fb24b326a53484789c310643b4cf780fc83897 Mon Sep 17 00:00:00 2001 From: Jan Remmet Date: Thu, 20 Jul 2023 10:11:01 +0200 Subject: [PATCH 042/384] pyproject: remove Python 3.7 from tox envlist tox fails with because we require python >= 3.8 since commit: 6297725 ("drop support for Python 3.7") Signed-off-by: Jan Remmet --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 1e1195694..5f3f6f8b0 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -215,7 +215,7 @@ signature-mutators = ["labgrid.step.step"] [tool.tox] legacy_tox_ini = """ [tox] -envlist = py37, py38, py39, py310, py311 +envlist = py38, py39, py310, py311 isolated_build = true [testenv] From 352191f898256c00684dc90ae674d0c2ea2f9a00 Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Thu, 20 Jul 2023 14:26:59 +0200 Subject: [PATCH 043/384] tests/test_modbusrtudriver: mock serial instead of relying on port=None Since minimalmodbus 2.1.0, minimalmodbus.Instrument() [1] no longer accepts port=None. Accepting this was never documented. The idea was that port=None would be passed to serial.Serial(port=None), which explicitly allows this [2]. Now that this is no longer allowed, stick to the original port="/dev/tty0" in the test and mock serial.Serial, so it is not actually opened. [1] https://minimalmodbus.readthedocs.io/en/stable/apiminimalmodbus.html#minimalmodbus.Instrument [2] https://pyserial.readthedocs.io/en/latest/pyserial_api.html#serial.Serial Signed-off-by: Bastian Krause --- tests/test_modbusrtudriver.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/tests/test_modbusrtudriver.py b/tests/test_modbusrtudriver.py index a92004d5e..a25b6e5fc 100644 --- a/tests/test_modbusrtudriver.py +++ b/tests/test_modbusrtudriver.py @@ -24,12 +24,11 @@ def test_resource_with_non_default_argument(target): def test_driver(target, mocker): pytest.importorskip("minimalmodbus") + mocker.patch('serial.Serial') + ModbusRTU(target, name=None, port="/dev/tty0", address=10) driver = ModbusRTUDriver(target, name=None) - # Ensure pyserial will not try to open the port - driver.resource.port = None - target.activate(driver) assert driver.instrument.serial.baudrate == 115200 From 44fd0f91ffbbd66d199c64b816185fabb95ee561 Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Thu, 20 Jul 2023 15:35:50 +0200 Subject: [PATCH 044/384] driver/bareboxdriver: run commands with saved log level [1] introduced lowering barebox' log level to 0 (emerg) right after the barebox prompt has been detected and recovering the log level right before booting. Tests may rely on log output, though. To enable capturing log messages a command may emit, set the log level to the initially remembered level before the command is executed and reset it to 0 (emerg) after the exit code was emitted. barebox' poller should not run between the echos, so this should be safe. No asynchronous prints are expected to happen during this time. [1] #1221, 6f7bf661 ("driver/bareboxdriver: silence barebox in _await_prompt() and unsilence on boot()") Signed-off-by: Bastian Krause --- CHANGES.rst | 3 ++- labgrid/driver/bareboxdriver.py | 17 ++++++++++++----- 2 files changed, 14 insertions(+), 6 deletions(-) diff --git a/CHANGES.rst b/CHANGES.rst index 9a4fb26a1..e62ac01a9 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -57,7 +57,8 @@ Breaking changes in 23.1 unchanged), ``-vvv`` is an alias for ``--log-cli-level=CONSOLE``, and ``-vvvv`` is an alias for ``--log-cli-level=DEBUG``. - The `BareboxDriver` now remembers the log level, sets it to ``0`` on initial - activation/reset and recovers it on ``boot()``. + activation/reset and recovers it on ``boot()``. During + ``run()``/``run_check()`` the initially detected log level is used. Known issues in 23.1 ~~~~~~~~~~~~~~~~~~~~ diff --git a/labgrid/driver/bareboxdriver.py b/labgrid/driver/bareboxdriver.py index 144080c98..910f25300 100644 --- a/labgrid/driver/bareboxdriver.py +++ b/labgrid/driver/bareboxdriver.py @@ -65,7 +65,7 @@ def on_deactivate(self): def run(self, cmd: str, *, timeout: int = 30): return self._run(cmd, timeout=timeout) - def _run(self, cmd: str, *, timeout: int = 30, codec: str = "utf-8", decodeerrors: str = "strict"): # pylint: disable=unused-argument,line-too-long + def _run(self, cmd: str, *, timeout: int = 30, adjust_log_level: bool = True, codec: str = "utf-8", decodeerrors: str = "strict"): # pylint: disable=unused-argument,line-too-long """ Runs the specified command on the shell and returns the output. @@ -80,7 +80,14 @@ def _run(self, cmd: str, *, timeout: int = 30, codec: str = "utf-8", decodeerror marker = gen_marker() # hide marker from expect hidden_marker = f'"{marker[:4]}""{marker[4:]}"' - cmp_command = f'''echo -o /cmd {shlex.quote(cmd)}; echo {hidden_marker}; sh /cmd; echo {hidden_marker} $?;''' # pylint: disable=line-too-long + # generate command with marker and log level adjustment + cmp_command = f'echo -o /cmd {shlex.quote(cmd)}; echo {hidden_marker};' + if self.saved_log_level and adjust_log_level: + cmp_command += f' global.loglevel={self.saved_log_level};' + cmp_command += f' sh /cmd; echo {hidden_marker} $?;' + if self.saved_log_level and adjust_log_level: + cmp_command += ' global.loglevel=0;' + if self._status == 1: self.console.sendline(cmp_command) _, _, match, _ = self.console.expect( @@ -191,14 +198,14 @@ def _await_prompt(self): # remember barebox' log level - we don't expect to be interrupted here # by pollers because no hardware interaction is triggered by echo, so # it should be safe to use the usual shell wrapper via _run() - stdout, _, exitcode = self._run("echo $global.loglevel") + stdout, _, exitcode = self._run("echo $global.loglevel", adjust_log_level=False) [saved_log_level] = stdout if exitcode == 0 and saved_log_level.isnumeric(): self.saved_log_level = saved_log_level # silence barebox, the driver can get confused by asynchronous messages # logged to the console otherwise - self._run("global.loglevel=0") + self._run("global.loglevel=0", adjust_log_level=False) @Driver.check_active def await_boot(self): @@ -214,7 +221,7 @@ def boot(self, name: str): Args: name (str): name of the entry to boot""" # recover saved log level - self._run(f"global.loglevel={self.saved_log_level}") + self._run(f"global.loglevel={self.saved_log_level}", adjust_log_level=False) if name: self.console.sendline(f"boot -v {name}") From 24ff7b61194a781d3f64a35fd5345857227acb73 Mon Sep 17 00:00:00 2001 From: Jan Remmet Date: Wed, 19 Jul 2023 14:33:59 +0200 Subject: [PATCH 045/384] pyproject: use PyYAML>=6.0.1 With the release of Cython3 the build of PyYAML 5.4.1 is broken [1]. There seems to be no plan to fix 5.4.x branch [2]. Tox tests and a real world example runs fine with 6.0.1 [1] https://github.com/yaml/pyyaml/issues/724 [2] https://github.com/yaml/pyyaml/issues/728 Signed-off-by: Jan Remmet --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 5f3f6f8b0..4b1981da3 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -40,7 +40,7 @@ dependencies = [ "pytest>=7.0.0", "pyudev>=0.22.0", "pyusb>=1.2.1", - "PyYAML>=5.4.1", + "PyYAML>=6.0.1", "requests>=2.26.0", "xmodem>=0.4.6", ] From 8ff47428030cd4996c3a853a1fbdcc7c8a64d1ba Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Wed, 26 Jul 2023 12:58:02 +0200 Subject: [PATCH 046/384] driver/provider: rename NFSPProviderDriver -> NFSProviderDriver The typo has been in the class name since its introduction. The documentation contains the correct name "NFSProviderDriver" already. Signed-off-by: Bastian Krause --- labgrid/driver/__init__.py | 2 +- labgrid/driver/provider.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/labgrid/driver/__init__.py b/labgrid/driver/__init__.py index e16d0529a..4cda6be5f 100644 --- a/labgrid/driver/__init__.py +++ b/labgrid/driver/__init__.py @@ -40,7 +40,7 @@ from .usbvideodriver import USBVideoDriver from .httpvideodriver import HTTPVideoDriver from .networkinterfacedriver import NetworkInterfaceDriver -from .provider import HTTPProviderDriver, NFSPProviderDriver, TFTPProviderDriver +from .provider import HTTPProviderDriver, NFSProviderDriver, TFTPProviderDriver from .mqtt import TasmotaPowerDriver from .manualswitchdriver import ManualSwitchDriver from .usbtmcdriver import USBTMCDriver diff --git a/labgrid/driver/provider.py b/labgrid/driver/provider.py index 4a264ce2d..3761424d0 100644 --- a/labgrid/driver/provider.py +++ b/labgrid/driver/provider.py @@ -43,7 +43,7 @@ class TFTPProviderDriver(BaseProviderDriver): @target_factory.reg_driver @attr.s(eq=False) -class NFSPProviderDriver(BaseProviderDriver): +class NFSProviderDriver(BaseProviderDriver): bindings = { "provider": {"NFSProvider", "RemoteNFSProvider"}, } From b2e9fd27dd0945cacba820fbc56e2c21bc09d8ea Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Thu, 27 Jul 2023 18:21:15 +0200 Subject: [PATCH 047/384] doc/configuration: use correct class name for NetworkFlashrom Fixes: 36900559 ("flashrom: Add support for flashing ROM's by the flashrom utility") Signed-off-by: Bastian Krause --- doc/configuration.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/configuration.rst b/doc/configuration.rst index aa8881a25..f8ae6ebd0 100644 --- a/doc/configuration.rst +++ b/doc/configuration.rst @@ -1041,7 +1041,7 @@ Below an example where the local spidev is used. Used by: - `FlashromDriver`_ -NetworkFlashRom +NetworkFlashrom ~~~~~~~~~~~~~~~ A NetworkFlashrom describes a `Flashrom`_ available on a remote computer. From d96eb68b39487129b342e5fd4f32a78ffa8044d3 Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Thu, 27 Jul 2023 18:23:08 +0200 Subject: [PATCH 048/384] doc/configuration: document SmallUBootDriver's boot_secret_nolf attribute Signed-off-by: Bastian Krause --- doc/configuration.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/doc/configuration.rst b/doc/configuration.rst index f8ae6ebd0..553418d03 100644 --- a/doc/configuration.rst +++ b/doc/configuration.rst @@ -1654,6 +1654,7 @@ Implements: Arguments: - boot_expression (str, default="U-Boot 20\\d+"): regex to match the U-Boot start string - boot_secret (str, default="a"): secret used to unlock prompt + - boot_secret_nolf (bool, default=False): send boot_secret without new line - login_timeout (int, default=60): timeout for password/login prompt detection - for other arguments, see `UBootDriver`_ From 7316e255072340251f79ed08708859e8455dce18 Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Thu, 13 Jul 2023 17:36:13 +0200 Subject: [PATCH 049/384] resource: import NFSProvider, HTTPProvider This allows the user to import these resources from labgrid.resource directly. Signed-off-by: Bastian Krause --- labgrid/resource/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/labgrid/resource/__init__.py b/labgrid/resource/__init__.py index 77672b2a5..69bb424df 100644 --- a/labgrid/resource/__init__.py +++ b/labgrid/resource/__init__.py @@ -19,7 +19,7 @@ from .docker import DockerManager, DockerDaemon, DockerConstants from .lxaiobus import LXAIOBusPIO from .pyvisa import PyVISADevice -from .provider import TFTPProvider +from .provider import TFTPProvider, NFSProvider, HTTPProvider from .mqtt import TasmotaPowerPort from .httpvideostream import HTTPVideoStream from .dediprogflasher import DediprogFlasher, NetworkDediprogFlasher From e8dbe63a0f9661a843f03fe46f23e9cabc3e6be9 Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Thu, 27 Jul 2023 20:29:30 +0200 Subject: [PATCH 050/384] driver/provider: fix BaseProviderDriver with local resources The TFTPProvider, HTTPProvider and NFSProvider don't have a host property unlike the remote variants. Fix the `get_export_vars()` method by distinguishing local and remote resources: use the provider's host attribute for remote resources, otherwise return "localhost". Signed-off-by: Bastian Krause --- labgrid/resource/provider.py | 4 ++++ tests/test_export.py | 14 +++++++++++++- 2 files changed, 17 insertions(+), 1 deletion(-) diff --git a/labgrid/resource/provider.py b/labgrid/resource/provider.py index c498a1d69..fb9a9153b 100644 --- a/labgrid/resource/provider.py +++ b/labgrid/resource/provider.py @@ -9,6 +9,10 @@ class BaseProvider(Resource): internal = attr.ib(validator=attr.validators.instance_of(str)) external = attr.ib(validator=attr.validators.instance_of(str)) + def __attrs_post_init__(self): + self.host = "localhost" + super().__attrs_post_init__() + @target_factory.reg_resource @attr.s(eq=False) diff --git a/tests/test_export.py b/tests/test_export.py index 132115956..f977322a8 100644 --- a/tests/test_export.py +++ b/tests/test_export.py @@ -1,6 +1,6 @@ import pytest -from labgrid.resource import Resource, NetworkSerialPort +from labgrid.resource import Resource, NetworkSerialPort, TFTPProvider from labgrid.resource.remote import RemoteNetworkInterface, RemoteTFTPProvider from labgrid.driver import Driver, SerialDriver, NetworkInterfaceDriver, TFTPProviderDriver from labgrid.strategy import Strategy @@ -88,6 +88,18 @@ def test_export_remote_network_interface(target): } +def test_export_tftp_provider(target): + TFTPProvider(target, None, internal='/srv/tftp/testboard/', external='testboard/') + TFTPProviderDriver(target, "tftp") + + exported = target.export() + assert exported == { + 'LG__TFTP_HOST': 'localhost', + 'LG__TFTP_INTERNAL': '/srv/tftp/testboard/', + 'LG__TFTP_EXTERNAL': 'testboard/', + } + + def test_export_remote_tftp_provider(target): RemoteTFTPProvider(target, None, host='testhost', internal='/srv/tftp/testboard/', external='testboard/') TFTPProviderDriver(target, "tftp") From 1a53a26f911739148882ba037edddb4f87c01319 Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Thu, 27 Jul 2023 20:53:12 +0200 Subject: [PATCH 051/384] driver/provider: drop BaseProviderDriver.__attrs_post_init__() There is no need to overwrite this method to only call the same method on super(). This is the default. Signed-off-by: Bastian Krause --- labgrid/driver/provider.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/labgrid/driver/provider.py b/labgrid/driver/provider.py index 3761424d0..060b4f15f 100644 --- a/labgrid/driver/provider.py +++ b/labgrid/driver/provider.py @@ -10,9 +10,6 @@ @attr.s(eq=False) class BaseProviderDriver(Driver): - def __attrs_post_init__(self): - super().__attrs_post_init__() - @Driver.check_bound def get_export_vars(self): return { From 14a258eabb92b0e1db9016195045eb74a5a8f13f Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Wed, 26 Jul 2023 13:43:43 +0200 Subject: [PATCH 052/384] util/managedfile: expose user cache path This path will be used by the NFSProviderDriver in a future commit. Signed-off-by: Bastian Krause --- labgrid/util/managedfile.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/labgrid/util/managedfile.py b/labgrid/util/managedfile.py index 21ecf7655..a2c0b501d 100644 --- a/labgrid/util/managedfile.py +++ b/labgrid/util/managedfile.py @@ -60,7 +60,7 @@ def sync_to_resource(self, symlink=None): self.logger.info("File %s is accessible on %s, skipping copy", self.local_path, host) self.rpath = os.path.dirname(self.local_path) + "/" else: - self.rpath = f"/var/cache/labgrid/{get_user()}/{self.get_hash()}/" + self.rpath = f"{self.get_user_cache_path()}/{self.get_hash()}/" self.logger.info("Synchronizing %s to %s", self.local_path, host) conn.run_check(f"mkdir -p {self.rpath}") conn.put_file( @@ -150,3 +150,6 @@ def get_hash(self): self.hash = hasher.hexdigest() return self.hash + + def get_user_cache_path(self): + return f"/var/cache/labgrid/{get_user()}" From a092969560628390d5d265e1e423ac55fca4359a Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Wed, 26 Jul 2023 13:59:57 +0200 Subject: [PATCH 053/384] driver/provider: adjust NFSProviderDriver to NFS mounting use cases During the introduction of the provider drivers, it was assumed all provider drivers work the same way: by relying on the `internal`/`external` attributes of their corresponding provider resource and symlinking the required file (either from /var/cache/labgrid/$USER/$HASH/file or the given path, in case it's locally available) to the `internal` path. For the target, the file is now expected to be available below `external` as returned by `stage()`. This, however, does not work for NFS: all NFS servers known to me don't touch the symlinks, meaning in our case that they point to a location not exported via NFS. Exporting and mounting these destinations to the same location is impractical at best or does not work at all. On top of that, the `internal`/`external` lingo does not fit the NFS mount use case. To improve this situation, introduce a custom `stage()` method for the NFSProviderDriver: assume that /var/cache/labgrid is exported via NFS, always copy the file to be staged there (even if it would be locally available) and return an NFSFile object containing the information required to mount and access the NFS share: host, export and the file path relative to the export. Signed-off-by: Bastian Krause --- CHANGES.rst | 3 +++ doc/configuration.rst | 34 +++++++++++++++++++++++++++------- labgrid/driver/provider.py | 29 ++++++++++++++++++++++++++++- 3 files changed, 58 insertions(+), 8 deletions(-) diff --git a/CHANGES.rst b/CHANGES.rst index e62ac01a9..58a6e473f 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -59,6 +59,9 @@ Breaking changes in 23.1 - The `BareboxDriver` now remembers the log level, sets it to ``0`` on initial activation/reset and recovers it on ``boot()``. During ``run()``/``run_check()`` the initially detected log level is used. +- The `NFSProviderDriver` now returns mount and path information on ``stage()`` + instead of the path to be used on the target. The previous return value did + not fit the NFS mount use case. Known issues in 23.1 ~~~~~~~~~~~~~~~~~~~~ diff --git a/doc/configuration.rst b/doc/configuration.rst index aa8881a25..bccc2265c 100644 --- a/doc/configuration.rst +++ b/doc/configuration.rst @@ -1165,10 +1165,13 @@ This is useful for software installation in the bootloader (via TFTP) or downloading update artifacts under Linux (via HTTP). They are used with the ManagedFile helper, which ensures that the file is -available on the server and then creates a symlink from the internal directory -to the uploaded file. +available on the server. For HTTP and TFTP, a symlink from the internal +directory to the uploaded file is created. The path for the target is generated by replacing the internal prefix with the external prefix. +For NFS, it is assumed that ``/var/cache/labgrid`` is exported. +The information required for mounting and accessing staged files are returned, +see below. For now, the TFTP/NFS/HTTP server needs to be configured before using it from labgrid. @@ -2344,11 +2347,10 @@ Arguments: - None .. _TFTPProviderDriver: -.. _NFSProviderDriver: .. _HTTPProviderDriver: -TFTPProviderDriver / NFSProviderDriver / HTTPProviderDriver -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +TFTPProviderDriver / HTTPProviderDriver +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ These drivers control their corresponding Provider resources, either locally or remotely. @@ -2356,8 +2358,6 @@ Binds to: provider: - `TFTPProvider`_ - `RemoteTFTPProvider`_ - - `NFSProvider`_ - - `RemoteNFSProvider`_ - `HTTPProvider`_ - `RemoteHTTPProvider`_ @@ -2371,6 +2371,26 @@ Arguments: The driver can be used in test cases by calling the `stage()` function, which returns the path to be used by the target. +NFSProviderDriver +~~~~~~~~~~~~~~~~~ +An NFSProviderDriver controls an `NFSProvider` resource. + +Binds to: + provider: + - `NFSProvider`_ + - `RemoteNFSProvider`_ + +.. code-block:: yaml + + NFSProviderDriver: {} + +Arguments: + - None + +The driver can be used in test cases by calling the `stage()` function, which +returns an NFSFile object with ``host``, ``export`` and ``relative_file_path`` +attributes. + QEMUDriver ~~~~~~~~~~ The QEMUDriver allows the usage of a QEMU instance as a target. It requires diff --git a/labgrid/driver/provider.py b/labgrid/driver/provider.py index 060b4f15f..9c8fda70f 100644 --- a/labgrid/driver/provider.py +++ b/labgrid/driver/provider.py @@ -38,13 +38,40 @@ class TFTPProviderDriver(BaseProviderDriver): } +@attr.s +class NFSFile: + host = attr.ib(validator=attr.validators.instance_of(str)) + export = attr.ib(validator=attr.validators.instance_of(str)) + relative_file_path = attr.ib(validator=attr.validators.instance_of(str)) + + @target_factory.reg_driver @attr.s(eq=False) -class NFSProviderDriver(BaseProviderDriver): +class NFSProviderDriver(Driver): bindings = { "provider": {"NFSProvider", "RemoteNFSProvider"}, } + @Driver.check_bound + def get_export_vars(self): + return { + "host": self.provider.host, + } + + @Driver.check_active + @step(args=['filename'], result=True) + def stage(self, filename): + # always copy the file to he user cache path: + # locally available files might not be NFS-exported + mf = ManagedFile(filename, self.provider, detect_nfs=False) + mf.sync_to_resource() + mf.get_remote_path() + + # assuming /var/cache/labgrid is NFS-exported, return required information for mounting and + # file access + relate_file_path = os.path.join(mf.get_hash(), os.path.basename(mf.local_path)) + return NFSFile(self.provider.host, mf.get_user_cache_path(), relate_file_path) + @target_factory.reg_driver @attr.s(eq=False) From 1a66ea505127e3be5c5a816a051c45c01ce188f4 Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Wed, 26 Jul 2023 14:00:14 +0200 Subject: [PATCH 054/384] resource: do not let (Remote)NFSProvider inherit from (Remote)BaseProvider Now that the NFSProviderDriver no longer relies on the `internal`/`external` attributes of the (Remote)NFSProvider resource, let the NFSProvider inherit from Resource directly, so specifying these obsolete attributes is no longer required. The same applies to the RemoteNFSProvider which now inherits from NetworkResource directly. Signed-off-by: Bastian Krause --- CHANGES.rst | 3 +++ doc/configuration.rst | 36 ++++++++++++++++++++++++++---------- labgrid/resource/provider.py | 6 ++++-- labgrid/resource/remote.py | 2 +- 4 files changed, 34 insertions(+), 13 deletions(-) diff --git a/CHANGES.rst b/CHANGES.rst index 58a6e473f..ca3e79eb2 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -62,6 +62,9 @@ Breaking changes in 23.1 - The `NFSProviderDriver` now returns mount and path information on ``stage()`` instead of the path to be used on the target. The previous return value did not fit the NFS mount use case. +- The `NFSProvider` and `RemoteNFSProvider` resources no longer expect the + ``internal`` and ``external`` arguments as they do not fit the NFS mount use + case. Known issues in 23.1 ~~~~~~~~~~~~~~~~~~~~ diff --git a/doc/configuration.rst b/doc/configuration.rst index bccc2265c..9d2635cb4 100644 --- a/doc/configuration.rst +++ b/doc/configuration.rst @@ -1177,11 +1177,10 @@ For now, the TFTP/NFS/HTTP server needs to be configured before using it from labgrid. .. _TFTPProvider: -.. _NFSProvider: .. _HTTPProvider: -TFTPProvider / NFSProvider / HTTPProvider -+++++++++++++++++++++++++++++++++++++++++ +TFTPProvider / HTTPProvider ++++++++++++++++++++++++++++ .. code-block:: yaml @@ -1199,23 +1198,40 @@ Arguments: Used by: - `TFTPProviderDriver`_ - - `NFSProviderDriver`_ - `HTTPProviderDriver`_ +NFSProvider ++++++++++++ + +.. code-block:: yaml + + NFSProvider: {} + +Arguments: + - None + +Used by: + - `NFSProviderDriver`_ + .. _RemoteTFTPProvider: -.. _RemoteNFSProvider: .. _RemoteHTTPProvider: -RemoteTFTPProvider / RemoteNFSProvider / RemoteHTTPProvider -+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ -These describe a `TFTPProvider`_, `NFSProvider`_ or `HTTPProvider`_ resource -available on a remote computer +RemoteTFTPProvider / RemoteHTTPProvider ++++++++++++++++++++++++++++++++++++++++ +These describe a `TFTPProvider`_ or `HTTPProvider`_ resource available on a +remote computer. Used by: - `TFTPProviderDriver`_ - - `NFSProviderDriver`_ - `HTTPProviderDriver`_ +RemoteNFSProvider ++++++++++++++++++ +An `NFSProvider`_ resource available on a remote computer. + +Used by: + - `NFSProviderDriver`_ + RemotePlace ~~~~~~~~~~~ A RemotePlace describes a set of resources attached to a labgrid remote place. diff --git a/labgrid/resource/provider.py b/labgrid/resource/provider.py index fb9a9153b..3b8f723ab 100644 --- a/labgrid/resource/provider.py +++ b/labgrid/resource/provider.py @@ -22,8 +22,10 @@ class TFTPProvider(BaseProvider): @target_factory.reg_resource @attr.s(eq=False) -class NFSProvider(BaseProvider): - pass +class NFSProvider(Resource): + def __attrs_post_init__(self): + self.host = "localhost" + super().__attrs_post_init__() @target_factory.reg_resource diff --git a/labgrid/resource/remote.py b/labgrid/resource/remote.py index b83d6c3cc..bca849bf0 100644 --- a/labgrid/resource/remote.py +++ b/labgrid/resource/remote.py @@ -403,7 +403,7 @@ class RemoteTFTPProvider(RemoteBaseProvider): @target_factory.reg_resource @attr.s(eq=False) -class RemoteNFSProvider(RemoteBaseProvider): +class RemoteNFSProvider(NetworkResource): pass From c3452735cfb8703534e1bbe6b19349c6bd06d8d4 Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Wed, 26 Jul 2023 14:29:43 +0200 Subject: [PATCH 055/384] doc/configuration: document remote provider arguments and provide examples Signed-off-by: Bastian Krause --- doc/configuration.rst | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/doc/configuration.rst b/doc/configuration.rst index 9d2635cb4..48576a589 100644 --- a/doc/configuration.rst +++ b/doc/configuration.rst @@ -1221,6 +1221,23 @@ RemoteTFTPProvider / RemoteHTTPProvider These describe a `TFTPProvider`_ or `HTTPProvider`_ resource available on a remote computer. +.. code-block:: yaml + + RemoteTFTPProvider + host: "tftphost" + internal: "/srv/tftp/board-23/" + external: "board-23/" + + RemoteHTTPProvider: + host: "httphost" + internal: "/srv/www/board-23/" + external: "http://192.168.1.1/board-23/" + +Arguments: + - host (str): hostname of the remote host + - internal (str): path prefix to the HTTP/TFTP root directory on ``host`` + - external (str): corresponding path prefix for use by the target + Used by: - `TFTPProviderDriver`_ - `HTTPProviderDriver`_ @@ -1229,6 +1246,14 @@ RemoteNFSProvider +++++++++++++++++ An `NFSProvider`_ resource available on a remote computer. +.. code-block:: yaml + + RemoteNFSProvider: + host: "nfshost" + +Arguments: + - host (str): hostname of the remote host + Used by: - `NFSProviderDriver`_ From 4228d2a8d7324f9b20ddf0753437665681e54701 Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Thu, 27 Jul 2023 18:24:19 +0200 Subject: [PATCH 056/384] resource/udev: warn on initializing USBNetworkInterface with ifname Similar to USBSerialPort's `port` attribute, USBNetworkInterface's `ifname` attribute is overwritten by udev. Add a warning to make that clear. Signed-off-by: Bastian Krause --- labgrid/resource/udev.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/labgrid/resource/udev.py b/labgrid/resource/udev.py index eaaed1a58..5e53f256c 100644 --- a/labgrid/resource/udev.py +++ b/labgrid/resource/udev.py @@ -346,6 +346,11 @@ class USBNetworkInterface(USBResource, NetworkInterface): def __attrs_post_init__(self): self.match['SUBSYSTEM'] = 'net' self.match['@SUBSYSTEM'] = 'usb' + if self.ifname: + warnings.warn( + "USBNetworkInterface: The ifname attribute will be overwritten by udev.\n" + "Please use udev matching as described in http://labgrid.readthedocs.io/en/latest/configuration.html#udev-matching" # pylint: disable=line-too-long + ) super().__attrs_post_init__() def update(self): From 57061b949a952151c4f468ee94cd25911bca21c8 Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Thu, 27 Jul 2023 18:25:58 +0200 Subject: [PATCH 057/384] resource: import missing resources This allows users to import resources from labgrid.resource directly. Signed-off-by: Bastian Krause --- labgrid/resource/__init__.py | 36 +++++++++++++++++++++++++++++------- 1 file changed, 29 insertions(+), 7 deletions(-) diff --git a/labgrid/resource/__init__.py b/labgrid/resource/__init__.py index 77672b2a5..a51f011c9 100644 --- a/labgrid/resource/__init__.py +++ b/labgrid/resource/__init__.py @@ -1,17 +1,37 @@ -from .base import SerialPort, NetworkInterface, EthernetPort +from .base import SerialPort, NetworkInterface, EthernetPort, SysfsGPIO from .ethernetport import SNMPEthernetPort from .serialport import RawSerialPort, NetworkSerialPort from .modbus import ModbusTCPCoil from .modbusrtu import ModbusRTU from .networkservice import NetworkService from .onewireport import OneWirePIO -from .power import NetworkPowerPort +from .power import NetworkPowerPort, PDUDaemonPort from .remote import RemotePlace -from .udev import USBSerialPort -from .udev import USBSDMuxDevice -from .udev import USBSDWireDevice -from .udev import USBPowerPort -from .udev import SiSPMPowerPort +from .udev import ( + AlteraUSBBlaster, + AndroidUSBFastboot, + DFUDevice, + DeditecRelais8, + HIDRelay, + IMXUSBLoader, + LXAUSBMux, + MXSUSBLoader, + RKUSBLoader, + SiSPMPowerPort, + SigrokUSBDevice, + SigrokUSBSerialDevice, + USBAudioInput, + USBDebugger, + USBFlashableDevice, + USBMassStorage, + USBNetworkInterface, + USBPowerPort, + USBSDMuxDevice, + USBSDWireDevice, + USBSerialPort, + USBTMC, + USBVideo, +) from .common import Resource, ResourceManager, ManagedResource from .ykushpowerport import YKUSHPowerPort, NetworkYKUSHPowerPort from .xenamanager import XenaManager @@ -24,3 +44,5 @@ from .httpvideostream import HTTPVideoStream from .dediprogflasher import DediprogFlasher, NetworkDediprogFlasher from .httpdigitalout import HttpDigitalOutput +from .sigrok import SigrokDevice +from .fastboot import AndroidNetFastboot From 05234007d33eecbbc07ec85f4314707d9be03ecf Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Mon, 7 Aug 2023 12:14:59 +0200 Subject: [PATCH 058/384] dockerfiles/dut: fix --chown option MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The docker docs contain [1] these COPY forms: COPY [--chown=:] [--chmod=] ... COPY [--chown=:] [--chmod=] ["",... ""] Arguments within [ ] are optional. The square brackets are not meant to be part of the actual instruction. The docker docs [1] mention that "[e]ach may contain wildcards and matching will be done using Go’s filepath.Match rules". Go’s filepath.Match [2] interprets terms in square brackets as a character class. "[--chown=root:root]" must have been interpreted as a valid character class until recently. A couple of days ago, building the "dut" service started failing: Building dut #0 building with "default" instance using docker driver #1 [internal] load build definition from Dockerfile #1 transferring dockerfile: 924B done #1 DONE 0.0s #2 [internal] load .dockerignore #2 transferring context: 2B done #2 DONE 0.0s #3 [internal] load metadata for docker.io/library/debian:bookworm-slim #3 DONE 0.1s #4 [1/4] FROM docker.io/library/debian:bookworm-slim@sha256:89468107e4c2b9fdea2f15fc582bf92c25aa4296a661ca0202f7ea2f4fc3f48c #4 CACHED #5 [internal] load build context #5 transferring context: 56B done #5 ERROR: error from sender: invalid includepatterns: []: syntax error in pattern Fix this by dropping the accidental square brackets around the --chown option. [1] https://docs.docker.com/engine/reference/builder/#copy [2] https://pkg.go.dev/path/filepath#Match Signed-off-by: Bastian Krause --- dockerfiles/staging/dut/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dockerfiles/staging/dut/Dockerfile b/dockerfiles/staging/dut/Dockerfile index f2c26aba7..cd1a71dc9 100644 --- a/dockerfiles/staging/dut/Dockerfile +++ b/dockerfiles/staging/dut/Dockerfile @@ -15,7 +15,7 @@ RUN set -e ;\ sed 's@session\s*required\s*pam_loginuid.so@session optional pam_loginuid.so@g' -i /etc/pam.d/sshd # SSH login fix. Otherwise user is kicked off after login -COPY [--chown=root:root] ./authorized_keys /root/.ssh/authorized_keys +COPY --chown=root:root ./authorized_keys /root/.ssh/authorized_keys # As sshd scrubs ENV variables if they are set by the ENV varibale ensure to put the into /etc/profile as shown below ENV NOTVISIBLE "in users profile" From 3517a22a42448ef6aab61b5a50ea4043cf5ca7c6 Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Tue, 8 Aug 2023 12:45:39 +0200 Subject: [PATCH 059/384] pyproject.toml: require Sphinx>=2.0.0 for doc feature readthedocs installs Python package dependencies like this: pip install --upgrade --no-cache-dir pip setuptools pip install --upgrade --no-cache-dir pillow mock==1.0.1 alabaster>=0.7,<0.8,!=0.7.5 commonmark==0.9.1 recommonmark==0.5.0 sphinx<2 sphinx-rtd-theme<0.5 readthedocs-sphinx-ext<2.3 jinja2<3.1.0 pip install --upgrade --upgrade-strategy only-if-needed --no-cache-dir .[doc] Until recently --upgrade-strategy was specified as `eager`, but now `only-if-needed` [1]. This results in Sphinx 1.8.6 being used. This becomes an issue since Sphinx is not required directly by labgrid, but via sphinx_rtd_theme which loosely requires `sphinx >=1.6,<8` [2]. This results in this readthedocs build error: Exception occurred: File "[...]/labgrid/venv-rtd-debugging/lib/python3.11/site-packages/sphinx/ext/autodoc/__init__.py", line 82, in members_option return [x.strip() for x in arg.split(',')] ^^^^^^^^^ AttributeError: 'bool' object has no attribute 'split' We could make this work with Sphinx<2.0.0 [3]: --- a/doc/conf.py +++ b/doc/conf.py @@ -179,7 +179,7 @@ texinfo_documents = [ autodoc_member_order = 'bysource' autodoc_default_options = { - 'special-members': True, + 'special-members': None, } autodoc_mock_imports = ['onewire', 'txaio', But even with this, another errors and several warnings occur: WARNING: Sphinx 1.x is deprecated with sphinx_rtd_theme, update to Sphinx 2.x or greater WARNING: 'html4_writer' is deprecated with sphinx_rtd_theme [...]/labgrid/venv-rtd-debugging/lib/python3.11/site-packages/sphinx/util/nodes.py:94: FutureWarning: The iterable returned by Node.traverse() will become an iterator instead of a list in Docutils > 0.16. for classifier in reversed(node.parent.traverse(nodes.classifier)): [...]/labgrid/doc/getting_started.rst:393: WARNING: Error in "code-block" directive: 1 argument(s) required, 0 supplied. Exception occurred: File "[...]/labgrid/venv-rtd-debugging/lib/python3.11/site-packages/sphinx/ext/napoleon/docstring.py", line 123, in __init__ elif isinstance(obj, collections.Callable): # type: ignore ^^^^^^^^^^^^^^^^^^^^ AttributeError: module 'collections' has no attribute 'Callable' It's not worth the effort to keep this backwards compatible, so simply require Sphinx>=2.0.0 in labgrid directly. [1] https://github.com/readthedocs/readthedocs.org/pull/10560 [2] https://github.com/readthedocs/sphinx_rtd_theme/blob/b5833585b25358be94918f13c50530e3e9237e7e/setup.cfg [3] https://github.com/sphinx-doc/sphinx/issues/5459 Signed-off-by: Bastian Krause --- pyproject.toml | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 4b1981da3..577bce655 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -51,7 +51,10 @@ dynamic = ["version"] # via setuptools_scm "Bug Tracker" = "https://github.com/labgrid-project/labgrid/issues" [project.optional-dependencies] -doc = ["sphinx_rtd_theme>=1.0.0"] +doc = [ + "sphinx_rtd_theme>=1.0.0", + "Sphinx>=2.0.0", +] docker = ["docker>=5.0.2"] graph = ["graphviz>=0.17.0"] kasa = ["python-kasa>=0.4.0"] @@ -80,6 +83,7 @@ dev = [ # references to other optional dependency groups # labgrid[doc] "sphinx_rtd_theme>=1.0.0", + "Sphinx>=2.0.0", # labgrid[docker] "docker>=5.0.2", From 5eff5394630286cb514a2832b858136323485193 Mon Sep 17 00:00:00 2001 From: Joshua Watt Date: Mon, 28 Aug 2023 08:27:53 -0600 Subject: [PATCH 060/384] shelldriver: Fix xmodem xmodem was failing because it would consume the trailing newline character between the `echo` for the marker and the command output. Fix this by suppressing new line on the marker Signed-off-by: Joshua Watt --- labgrid/driver/shelldriver.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/labgrid/driver/shelldriver.py b/labgrid/driver/shelldriver.py index 78953f54c..6689e2568 100644 --- a/labgrid/driver/shelldriver.py +++ b/labgrid/driver/shelldriver.py @@ -311,7 +311,7 @@ def _start_xmodem_transfer(self, cmd): """ marker = gen_marker() - marked_cmd = f"echo '{marker[:4]}''{marker[4:]}'; {cmd}" + marked_cmd = f"echo -n '{marker[:4]}''{marker[4:]}'; {cmd}" self.console.sendline(marked_cmd) self.console.expect(marker, timeout=30) From 9e4fdc65b5dd8c34d43f8fe416cdd29b46333e63 Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Tue, 5 Sep 2023 13:47:33 +0200 Subject: [PATCH 061/384] remote/client: do not print deleted resource's class on monitor On delete, resource is an empty dictionary: ERROR labgrid.remote.: ClientSession.onUserError(): "While firing > subscribed under 8031289985454517." Traceback (most recent call last): File "/usr/ptx-venvs/labgrid/lib/python3.11/site-packages/txaio/aio.py", line 487, in done res = f.result() ^^^^^^^^^^ File "/usr/ptx-venvs/labgrid/lib/python3.11/site-packages/labgrid/remote/client.py", line 126, in on_resource_changed print(f"Resource {exporter}/{group_name}/{resource['cls']}/{resource_name} deleted") ~~~~~~~~^^^^^^^ KeyError: 'cls' This can be reproduced easily by restarting an exporter while running `labgrid-client monitor`. We could publish the class, but that would only work for new exporters. For now, simply replace the class name with "???". Fixes: f9d4ccd7 ("remote/client: print full resource identifier on monitor") Signed-off-by: Bastian Krause --- labgrid/remote/client.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/labgrid/remote/client.py b/labgrid/remote/client.py index 3fe534e9a..6ef073942 100755 --- a/labgrid/remote/client.py +++ b/labgrid/remote/client.py @@ -123,7 +123,7 @@ async def on_resource_changed(self, exporter, group_name, resource_name, resourc for k, v_old, v_new in diff_dict(flat_dict(old), flat_dict(resource)): print(f" {k}: {v_old} -> {v_new}") else: - print(f"Resource {exporter}/{group_name}/{resource['cls']}/{resource_name} deleted") + print(f"Resource {exporter}/{group_name}/???/{resource_name} deleted") async def on_place_changed(self, name, config): if not config: From f9ca0243660f01567ac33a654ef5c54aff3683c0 Mon Sep 17 00:00:00 2001 From: Joshua Watt Date: Thu, 7 Sep 2023 08:36:05 -0600 Subject: [PATCH 062/384] ssh: Prevent timeout from deadlock Using Popen.wait() on a process that has output sent to a pipe can potentially deadlock if the process produces enough output to fill the pipe, since it will stall and never terminate waiting for the pipe to have more space. Instead, use Popen.communicate() with the timeout parameter. This will consume all output until EOF (preventing the process from stalling due to a full pipe), and then check the return code. In the event of a timeout error, Popen.communicate() doesn't loose any data, so it's safe to call it again after the Popen.kill() in the exception handler. This likely was done this way because the timeout parameter was new in Python 3.3, but this shouldn't be a concern anymore Signed-off-by: Joshua Watt --- labgrid/util/ssh.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/labgrid/util/ssh.py b/labgrid/util/ssh.py index 0f51bbce8..8df205510 100644 --- a/labgrid/util/ssh.py +++ b/labgrid/util/ssh.py @@ -453,8 +453,8 @@ def _start_own_master(self): ) try: - if self._master.wait(timeout=connect_timeout) != 0: - stdout, stderr = self._master.communicate() + stdout, stderr = self._master.communicate(timeout=connect_timeout) + if self._master.returncode != 0: raise ExecutionError( f"failed to connect to {self.host} with args {args}, returncode={self._master.returncode} {stdout},{stderr}" # pylint: disable=line-too-long ) From b8f059fb4277e3ca01684b7a71ac89974ff1cc36 Mon Sep 17 00:00:00 2001 From: Joshua Watt Date: Thu, 7 Sep 2023 08:36:05 -0600 Subject: [PATCH 063/384] sshdriver: Prevent timeout from deadlock Using Popen.wait() on a process that has output sent to a pipe can potentially deadlock if the process produces enough output to fill the pipe, since it will stall and never terminate waiting for the pipe to have more space. Instead, use Popen.communicate() with the timeout parameter. This will consume all output until EOF (preventing the process from stalling due to a full pipe), and then check the return code. In the event of a timeout error, Popen.communicate() doesn't loose any data, so it's safe to call it again after the Popen.kill() in the exception handler. This likely was done this way because the timeout parameter was new in Python 3.3, but this shouldn't be a concern anymore Signed-off-by: Joshua Watt --- labgrid/driver/sshdriver.py | 11 ++++++----- tests/test_sshdriver.py | 4 ++++ 2 files changed, 10 insertions(+), 5 deletions(-) diff --git a/labgrid/driver/sshdriver.py b/labgrid/driver/sshdriver.py index 3ad6fafa5..fb63988d4 100644 --- a/labgrid/driver/sshdriver.py +++ b/labgrid/driver/sshdriver.py @@ -136,9 +136,8 @@ def _start_own_master_once(self, timeout): try: subprocess_timeout = timeout + 5 - return_value = self.process.wait(timeout=subprocess_timeout) - if return_value != 0: - stdout, _ = self.process.communicate(timeout=subprocess_timeout) + stdout, _ = self.process.communicate(timeout=subprocess_timeout) + if self.process.returncode != 0: stdout = stdout.split(b"\n") for line in stdout: self.logger.warning("ssh: %s", line.rstrip().decode(encoding="utf-8", errors="replace")) @@ -155,12 +154,14 @@ def _start_own_master_once(self, timeout): pass raise ExecutionError( - f"Failed to connect to {self.networkservice.address} with {' '.join(args)}: return code {return_value}", # pylint: disable=line-too-long + f"Failed to connect to {self.networkservice.address} with {' '.join(args)}: return code {self.process.returncode}", # pylint: disable=line-too-long stdout=stdout, ) except subprocess.TimeoutExpired: + self.process.kill() + stdout, _ = self.process.communicate() raise ExecutionError( - f"Subprocess timed out [{subprocess_timeout}s] while executing {args}", + f"Subprocess timed out [{subprocess_timeout}s] while executing {args}: {stdout}", ) finally: if self.networkservice.password and os.path.exists(pass_file): diff --git a/tests/test_sshdriver.py b/tests/test_sshdriver.py index 875570822..0f766dfc7 100644 --- a/tests/test_sshdriver.py +++ b/tests/test_sshdriver.py @@ -17,6 +17,8 @@ def ssh_driver_mocked_and_activated(target, mocker): instance_mock = mocker.MagicMock() popen.return_value = instance_mock instance_mock.wait = mocker.MagicMock(return_value=0) + instance_mock.communicate = mocker.MagicMock(return_value=(b"", b"")) + instance_mock.returncode = 0 SSHDriver(target, "ssh") s = target.get_driver("SSHDriver") return s @@ -35,6 +37,8 @@ def test_create(target, mocker): instance_mock = mocker.MagicMock() popen.return_value = instance_mock instance_mock.wait = mocker.MagicMock(return_value=0) + instance_mock.communicate = mocker.MagicMock(return_value=(b"", b"")) + instance_mock.returncode = 0 s = SSHDriver(target, "ssh") assert isinstance(s, SSHDriver) From 639136bbe36c731714de79dbb9e6bce7ff310368 Mon Sep 17 00:00:00 2001 From: Rouven Czerwinski Date: Tue, 12 Sep 2023 10:49:44 +0200 Subject: [PATCH 064/384] tests: test different resources having a default This came up in an IRC discussion where the reporter said that this raised some issues for him. The core library already supports this, add a test to ensure the support stays. Signed-off-by: Rouven Czerwinski --- tests/test_target.py | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/tests/test_target.py b/tests/test_target.py index 71d416368..3afd66a17 100644 --- a/tests/test_target.py +++ b/tests/test_target.py @@ -41,6 +41,23 @@ class A(Resource): with pytest.raises(NoResourceFoundError) as excinfo: target.get_resource(A, name="nosuchresource") +def test_get_resource_multiple_with_default(target): + class A(Resource): + pass + + class B(Resource): + pass + + a = A(target, "aresource") + adef = A(target, "default") + b = B(target, "bresource") + bdef = B(target, "default") + + assert target.get_resource(A) is adef + assert target.get_resource(B) is bdef + assert target.get_resource(A, name="aresource") is a + assert target.get_resource(B, name="bresource") is b + def test_get_driver(target): class A(Driver): pass From 968ca0711864471c4854ad82d3047062e435d8cd Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Sun, 17 Sep 2023 14:12:05 +0200 Subject: [PATCH 065/384] target: use short version of class name in exceptions labgrid's exceptions are already hard to understand, so at least use the short-hand version of the class name in the error messages. Example: NoDriverFoundError: multiple drivers matching found in Target(name='main', env=Environment(config_file='env.yaml')) with the same priorities ..becomes.. NoDriverFoundError: multiple drivers matching ConsoleProtocol found in Target(name='main', env=Environment(config_file='env.yaml')) with the same priorities Signed-off-by: Bastian Krause --- labgrid/target.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/labgrid/target.py b/labgrid/target.py index 6892890d6..9ae2b3170 100644 --- a/labgrid/target.py +++ b/labgrid/target.py @@ -140,15 +140,15 @@ def get_resource(self, cls, *, name=None, wait_avail=True): name_msg = f" named '{name}'" if name else "" if other_names: raise NoResourceFoundError( - f"no {cls} resource{name_msg} found in {self}, matching resources with other names: {other_names}" # pylint: disable=line-too-long + f"no {cls.__name__} resource{name_msg} found in {self}, matching resources with other names: {other_names}" # pylint: disable=line-too-long ) raise NoResourceFoundError( - f"no {cls} resource{name_msg} found in {self}" + f"no {cls.__name__} resource{name_msg} found in {self}" ) elif len(found) > 1: raise NoResourceFoundError( - f"multiple resources matching {cls} found in {self}", found=found + f"multiple resources matching {cls.__name__} found in {self}", found=found ) if wait_avail: self.await_resources(found) @@ -178,12 +178,12 @@ def _get_driver(self, cls, *, name=None, resource=None, activate=True, active=Fa if other_names: raise NoDriverFoundError( "no {active}{cls} driver{name} found in {target}, matching resources with other names: {other_names}".format( # pylint: disable=line-too-long - active="active " if active else "", cls=cls, name=name_msg, target=self, - other_names=other_names) + active="active " if active else "", cls=cls.__name__, name=name_msg, + target=self, other_names=other_names) ) raise NoDriverFoundError( - f"no {'active ' if active else ''}{cls} driver{name_msg} found in {self}" + f"no {'active ' if active else ''}{cls.__name__} driver{name_msg} found in {self}" ) elif len(found) > 1: prio_last = -255 @@ -202,7 +202,7 @@ def _get_driver(self, cls, *, name=None, resource=None, activate=True, active=Fa else: raise NoDriverFoundError( "multiple {active}drivers matching {cls} found in {target} with the same priorities".format( # pylint: disable=line-too-long - active="active " if active else "", cls=cls, target=self) + active="active " if active else "", cls=cls.__name__, target=self) ) if activate: self.activate(found[0]) @@ -276,7 +276,7 @@ def __getitem__(self, key): cls = target_factory.class_from_string(cls) if not issubclass(cls, (Driver, abc.ABC)): # all Protocols derive from ABC raise NoDriverFoundError( - f"invalid driver class {cls}" + f"invalid driver class {cls.__name__}" ) return self.get_active_driver(cls, name=name) From ca2b88d7a4c9cfafaf84067d77565e73f045584a Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Sun, 17 Sep 2023 14:54:21 +0200 Subject: [PATCH 066/384] target: mention driver causing NoSupplierFoundErrors NoSupplierFoundError and its sub classes are hard to understand if the driver/strategy causing it is not mentioned: NoDriverFoundError: multiple drivers matching ConsoleProtocol found in Target(name='main', env=Environment(config_file='env.yaml')) with the same priorities Improve this by rasing a more detailed exception from the original exception with this information included: NoDriverFoundError: binding MyStrategy failed: multiple drivers matching ConsoleProtocol found in Target(name='main', env=Environment(config_file='env.yaml')) with the same priorities Signed-off-by: Bastian Krause --- labgrid/target.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/labgrid/target.py b/labgrid/target.py index 9ae2b3170..c5184da3c 100644 --- a/labgrid/target.py +++ b/labgrid/target.py @@ -370,13 +370,16 @@ def bind_driver(self, client): except NoSupplierFoundError as e: errors.append(e) if not suppliers: + client_name = client.name or client.__class__.__name__ if optional: supplier = None elif len(errors) == 1: - raise errors[0] + err = errors[0] + err_cls = type(err) + raise err_cls(f"binding {client_name} failed: {err}") from err else: raise NoSupplierFoundError( - f"no supplier matching {requirements} found in {self} (errors: {errors})" + f"binding {client_name} failed: no supplier matching {requirements} found in {self} (errors: {errors})" ) elif len(suppliers) > 1: raise NoSupplierFoundError(f"conflicting suppliers matching {requirements} found in target {self}") # pylint: disable=line-too-long From 282e230f1e9c1530e8a80eaa8bf3a125377f93de Mon Sep 17 00:00:00 2001 From: Zach Malinowski Date: Wed, 30 Aug 2023 08:50:26 -0500 Subject: [PATCH 067/384] sshdriver: Add Port Forwarding to Unix Sockets This commit adds a function that will forward a port on the local host to a unix socket on the target. Signed-off-by: Zach Malinowski --- labgrid/driver/sshdriver.py | 28 ++++++++++++++++++++++++++++ tests/test_sshdriver.py | 20 ++++++++++++++++++++ 2 files changed, 48 insertions(+) diff --git a/labgrid/driver/sshdriver.py b/labgrid/driver/sshdriver.py index 3ad6fafa5..2407bc57c 100644 --- a/labgrid/driver/sshdriver.py +++ b/labgrid/driver/sshdriver.py @@ -310,6 +310,34 @@ def forward_remote_port(self, remoteport, localport): with self._forward(forward): yield + @Driver.check_active + @contextlib.contextmanager + def forward_unix_socket(self, unixsocket, localport=None): + """Forward a unix socket on the target to a local port + + A context manager that keeps a unix socket forwarded to a local port as + long as the context remains valid. A connection can be made to the + remote socket on the target device will be forwarded to the returned + local port on localhost + + usage: + with ssh.forward_unix_socket("/run/docker.sock") as localport: + # Use localhost:localport here to connect to the socket on the + # target + + returns: + localport + """ + if not self._check_keepalive(): + raise ExecutionError("Keepalive no longer running") + + if localport is None: + localport = get_free_port() + + forward = f"-L{localport:d}:{unixsocket:s}" + with self._forward(forward): + yield localport + @Driver.check_active @step(args=['src', 'dst']) def scp(self, *, src, dst): diff --git a/tests/test_sshdriver.py b/tests/test_sshdriver.py index 875570822..81a5a603c 100644 --- a/tests/test_sshdriver.py +++ b/tests/test_sshdriver.py @@ -172,3 +172,23 @@ def test_local_remote_forward(ssh_localhost, tmpdir): send_socket.send(test_string.encode('utf-8')) assert client_socket.recv(16).decode("utf-8") == test_string + + +@pytest.mark.sshusername +def test_unix_socket_forward(ssh_localhost, tmpdir): + p = tmpdir.join("console.sock") + test_string = "Hello World" + + with ssh_localhost.forward_unix_socket(str(p)) as localport: + with socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) as server_socket: + with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as send_socket: + server_socket.bind(str(p)) + server_socket.listen(1) + + send_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + send_socket.connect(("127.0.0.1", localport)) + + client_socket, address = server_socket.accept() + send_socket.send(test_string.encode("utf-8")) + + assert client_socket.recv(16).decode("utf-8") == test_string From 6f08b96a384045f56a9b6d6b2b97f0fc6cad8a4a Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Mon, 18 Sep 2023 12:18:12 +0200 Subject: [PATCH 068/384] github: workflows: update GitHub actions to latest versions Signed-off-by: Bastian Krause --- .github/workflows/build-and-release.yml | 2 +- .github/workflows/docker.yml | 4 ++-- .github/workflows/reusable-unit-tests-docker.yml | 2 +- .github/workflows/reusable-unit-tests.yml | 2 +- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/.github/workflows/build-and-release.yml b/.github/workflows/build-and-release.yml index 2075bfea8..2edeb4cf0 100644 --- a/.github/workflows/build-and-release.yml +++ b/.github/workflows/build-and-release.yml @@ -7,7 +7,7 @@ jobs: runs-on: ubuntu-22.04 continue-on-error: false steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 with: fetch-depth: 0 - name: Set up Python diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index 1a2e2dfbe..a5d76d59b 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -10,13 +10,13 @@ jobs: docker: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Install system dependencies run: | sudo apt install -yq python3-pip python3 -m pip install setuptools_scm - name: Login to DockerHub - uses: docker/login-action@v2 + uses: docker/login-action@v3 with: username: ${{ secrets.DOCKERHUB_USERNAME }} password: ${{ secrets.DOCKERHUB_TOKEN }} diff --git a/.github/workflows/reusable-unit-tests-docker.yml b/.github/workflows/reusable-unit-tests-docker.yml index 063438aeb..af1a7a499 100644 --- a/.github/workflows/reusable-unit-tests-docker.yml +++ b/.github/workflows/reusable-unit-tests-docker.yml @@ -11,7 +11,7 @@ jobs: docker: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 with: ref: ${{ inputs.branch }} - name: Install system dependencies diff --git a/.github/workflows/reusable-unit-tests.yml b/.github/workflows/reusable-unit-tests.yml index 18251a3f9..c79d7318e 100644 --- a/.github/workflows/reusable-unit-tests.yml +++ b/.github/workflows/reusable-unit-tests.yml @@ -15,7 +15,7 @@ jobs: runs-on: ubuntu-22.04 continue-on-error: false steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 with: ref: ${{ inputs.branch }} - name: Set up Python ${{ inputs.python-version }} From d08a35c524b5a10e7ce91946f2548ab823ad8d14 Mon Sep 17 00:00:00 2001 From: Joshua Watt Date: Thu, 7 Sep 2023 12:26:56 -0600 Subject: [PATCH 069/384] sshdriver: Log keepalive output While it is not normally expected for the keepalive process to produce any output, it can be helpful for debugging purposes to log any output it may have produced. As such, try harder to get output when terminating the process, and also report any output after it exits Signed-off-by: Joshua Watt --- labgrid/driver/sshdriver.py | 18 +++++++++++++----- 1 file changed, 13 insertions(+), 5 deletions(-) diff --git a/labgrid/driver/sshdriver.py b/labgrid/driver/sshdriver.py index cbb545950..db3ff55ab 100644 --- a/labgrid/driver/sshdriver.py +++ b/labgrid/driver/sshdriver.py @@ -527,7 +527,8 @@ def _start_keepalive(self): args, stdin=subprocess.PIPE, stdout=subprocess.PIPE, - stderr=subprocess.PIPE, + stderr=subprocess.STDOUT, + encoding="utf-8", ) self.logger.debug('Started keepalive for %s', self.networkservice.address) @@ -540,12 +541,19 @@ def _stop_keepalive(self): self.logger.debug('Stopping keepalive for %s', self.networkservice.address) + stdout = None try: - self._keepalive.communicate(timeout=60) + stdout, _ = self._keepalive.communicate(timeout=60) except subprocess.TimeoutExpired: self._keepalive.kill() - - try: - self._keepalive.wait(timeout=60) + try: + # Try again to get output + stdout, _ = self._keepalive.communicate(timeout=60) + except subprocess.TimeoutExpired: + self.logger.warning("ssh keepalive for %s timed out during termination", self.networkservice.address) finally: self._keepalive = None + + if stdout: + for line in stdout.splitlines(): + self.logger.warning("Keepalive %s: %s", self.networkservice.address, line) From e6fa794b23f1f63a4d33f87d3043c5381ba38ba8 Mon Sep 17 00:00:00 2001 From: Larry Shen Date: Sat, 30 Sep 2023 18:50:06 +0800 Subject: [PATCH 070/384] docker: fix wrong state of example Signed-off-by: Larry Shen --- examples/docker/conftest.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/docker/conftest.py b/examples/docker/conftest.py index d4ec68ada..6c407bf4e 100644 --- a/examples/docker/conftest.py +++ b/examples/docker/conftest.py @@ -3,7 +3,7 @@ @pytest.fixture(scope='session') def command(target): strategy = target.get_driver('DockerStrategy') - strategy.transition("shell") + strategy.transition("accessible") shell = target.get_driver('CommandProtocol') return shell From 4e028f0e2f2c7681fc09a21843178685473817bc Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Thu, 24 Aug 2023 17:28:02 +0200 Subject: [PATCH 071/384] pyproject.toml: depend on pylint rather than pytest-pylint labgrid's tests have never been linted, so there is no need to install pytest-pylint. Install pylint instead to allow linting of the labgrid module. Require at least version 3.0.0 of pylint which will be required for Python 3.12 anyway [1]. [1] https://github.com/pylint-dev/pylint/releases/tag/v3.0.0 Signed-off-by: Bastian Krause --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index c97a626f8..2e80ccf46 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -122,7 +122,7 @@ dev = [ "pytest-dependency>=0.5.1", "pytest-isort>=2.0.0", "pytest-mock>=3.6.1", - "pytest-pylint>=0.18.0", + "pylint>=3.0.0", ] [project.scripts] From f1822477178a0eca07fde14868af78a56f3cdb38 Mon Sep 17 00:00:00 2001 From: Enrico Jorns Date: Fri, 13 Oct 2023 08:31:07 +0200 Subject: [PATCH 072/384] doc/configuration: note that explicit_sftp_mode also applies to scp() Signed-off-by: Enrico Jorns --- doc/configuration.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/configuration.rst b/doc/configuration.rst index 553418d03..5b65b9744 100644 --- a/doc/configuration.rst +++ b/doc/configuration.rst @@ -1571,7 +1571,7 @@ Arguments: stdout, and an empty list as second element. - connection_timeout (float, default=30.0): timeout when trying to establish connection to target. - - explicit_sftp_mode (bool, default=False): if set to True, `put()` and `get()` will + - explicit_sftp_mode (bool, default=False): if set to True, `put()`, `get()`, and `scp()` will explicitly use the SFTP protocol for file transfers instead of scp's default protocol UBootDriver From 2c062e3ac6bb55308ddc9b2bdd3602b09f50f3fe Mon Sep 17 00:00:00 2001 From: Enrico Jorns Date: Thu, 12 Oct 2023 13:09:18 +0200 Subject: [PATCH 073/384] driver/sshdriver: store OpenSSH version in cached property We will use the version multiple times and should not call ssh each time since we do not expect the ssh version to suddenly change while running labgrid. Signed-off-by: Enrico Jorns --- labgrid/driver/sshdriver.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/labgrid/driver/sshdriver.py b/labgrid/driver/sshdriver.py index db3ff55ab..b1ede6e9f 100644 --- a/labgrid/driver/sshdriver.py +++ b/labgrid/driver/sshdriver.py @@ -9,6 +9,7 @@ import subprocess import tempfile import time +from functools import cached_property import attr @@ -432,10 +433,14 @@ def get_status(self): """The SSHDriver is always connected, return 1""" return 1 - def _scp_supports_explicit_sftp_mode(self): + @cached_property + def _ssh_version(self): version = subprocess.run(["ssh", "-V"], capture_output=True, text=True) version = re.match(r"^OpenSSH_(\d+)\.(\d+)", version.stderr) - major, minor = map(int, version.groups()) + return map(int, version.groups()) + + def _scp_supports_explicit_sftp_mode(self): + major, minor = self._ssh_version # OpenSSH >= 8.6 supports explicitly using the SFTP protocol via -s if major == 8 and minor >= 6: From fbd0c8b6b42722ebb28372449758c11d4af05754 Mon Sep 17 00:00:00 2001 From: Enrico Jorns Date: Thu, 12 Oct 2023 12:48:48 +0200 Subject: [PATCH 074/384] driver/sshdriver: support selecting scp mode explicitly OpenSSH >= 9.0 defaults to the SFTP protocol. However, some older targets may only have scp installed. OpenSSH allows to explicitly select scp by providing the -O flag. As already done for the transition of SFTP, add a dedicated SSHDriver attribute named 'explicit_scp_mode' which defaults to 'False'. Signed-off-by: Enrico Jorns --- doc/configuration.rst | 2 ++ labgrid/driver/sshdriver.py | 15 +++++++++++++++ 2 files changed, 17 insertions(+) diff --git a/doc/configuration.rst b/doc/configuration.rst index 5b65b9744..d6ba6f7e0 100644 --- a/doc/configuration.rst +++ b/doc/configuration.rst @@ -1573,6 +1573,8 @@ Arguments: target. - explicit_sftp_mode (bool, default=False): if set to True, `put()`, `get()`, and `scp()` will explicitly use the SFTP protocol for file transfers instead of scp's default protocol + - explicit_scp_mode (bool, default=False): if set to True, `put()`, `get()`, and `scp()` will + explicitly use the SCP protocol for file transfers instead of scp's default protocol UBootDriver ~~~~~~~~~~~ diff --git a/labgrid/driver/sshdriver.py b/labgrid/driver/sshdriver.py index b1ede6e9f..deb58dc07 100644 --- a/labgrid/driver/sshdriver.py +++ b/labgrid/driver/sshdriver.py @@ -35,6 +35,7 @@ class SSHDriver(CommandMixin, Driver, CommandProtocol, FileTransferProtocol): stderr_merge = attr.ib(default=False, validator=attr.validators.instance_of(bool)) connection_timeout = attr.ib(default=float(get_ssh_connect_timeout()), validator=attr.validators.instance_of(float)) explicit_sftp_mode = attr.ib(default=False, validator=attr.validators.instance_of(bool)) + explicit_scp_mode = attr.ib(default=False, validator=attr.validators.instance_of(bool)) def __attrs_post_init__(self): super().__attrs_post_init__() @@ -361,6 +362,8 @@ def scp(self, *, src, dst): if self.explicit_sftp_mode and self._scp_supports_explicit_sftp_mode(): complete_cmd.insert(1, "-s") + if self.explicit_scp_mode and self._scp_supports_explicit_scp_mode(): + complete_cmd.insert(1, "-O") self.logger.info("Running command: %s", complete_cmd) sub = subprocess.Popen( @@ -450,6 +453,14 @@ def _scp_supports_explicit_sftp_mode(self): return False raise Exception(f"OpenSSH version {major}.{minor} does not support explicit SFTP mode") + def _scp_supports_explicit_scp_mode(self): + major, minor = self._ssh_version + + # OpenSSH >= 9.0 default to the SFTP protocol + if major >= 9: + return True + raise Exception(f"OpenSSH version {major}.{minor} does not support explicit SCP mode") + @Driver.check_active @step(args=['filename', 'remotepath']) def put(self, filename, remotepath=''): @@ -464,6 +475,8 @@ def put(self, filename, remotepath=''): if self.explicit_sftp_mode and self._scp_supports_explicit_sftp_mode(): transfer_cmd.insert(1, "-s") + if self.explicit_scp_mode and self._scp_supports_explicit_scp_mode(): + transfer_cmd.insert(1, "-O") try: sub = subprocess.call( @@ -492,6 +505,8 @@ def get(self, filename, destination="."): if self.explicit_sftp_mode and self._scp_supports_explicit_sftp_mode(): transfer_cmd.insert(1, "-s") + if self.explicit_scp_mode and self._scp_supports_explicit_scp_mode(): + transfer_cmd.insert(1, "-O") try: sub = subprocess.call( From 6df298f79a8346cf25e2b6c2322eebd3dfd36ae3 Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Fri, 13 Oct 2023 15:25:36 +0200 Subject: [PATCH 075/384] client: fix console --loop on microcom error In certain situation, e.g. if the serial port vanishes (i.e. on-board FTDI chips on power off), microcom returns with 1 and: Got EOF from port connection lost The loop should still continue, so do not raise an exception in this case. This uncovers another problem: if the corresponding place is released, ser2net is stopped from the exporter and the port is set to None. Check that after updating the resource to prevent a crude AssertionError later due to an unavailable port. Fixes: cdebb1cc ("remote/client: return exit code for ssh/scp/rsync/telnet/video/audio/console") Signed-off-by: Bastian Krause --- labgrid/remote/client.py | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/labgrid/remote/client.py b/labgrid/remote/client.py index 6ef073942..4f97502c8 100755 --- a/labgrid/remote/client.py +++ b/labgrid/remote/client.py @@ -813,6 +813,10 @@ async def _console(self, place, target, timeout, *, logfile=None, loop=False, li # use zero timeout to prevent blocking sleeps target.await_resources([resource], timeout=0.0) + if not place.acquired: + print("place released") + return 255 + host, port = proxymanager.get_host_and_port(resource) # check for valid resources @@ -856,11 +860,14 @@ async def console(self, place, target): while True: res = await self._console(place, target, 10.0, logfile=self.args.logfile, loop=self.args.loop, listen_only=self.args.listenonly) - if res: - exc = InteractiveCommandError("microcom error") - exc.exitcode = res - raise exc + # place released + if res == 255: + break if not self.args.loop: + if res: + exc = InteractiveCommandError("microcom error") + exc.exitcode = res + raise exc break await asyncio.sleep(1.0) console.needs_target = True From 8aef83241a85c2e98288f0c6f0e1e235d8790b65 Mon Sep 17 00:00:00 2001 From: Andreas Martinsson Date: Thu, 19 Oct 2023 16:30:55 +0200 Subject: [PATCH 076/384] resources: udev: add additional SEGGER J-Link model IDs to USBDebugger resource Signed-off-by: Andreas Martinsson [bst: added prefix to commit message] Signed-off-by: Bastian Krause --- labgrid/resource/udev.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/labgrid/resource/udev.py b/labgrid/resource/udev.py index 5e53f256c..819f5cbba 100644 --- a/labgrid/resource/udev.py +++ b/labgrid/resource/udev.py @@ -703,7 +703,9 @@ def filter_match(self, device): ("15ba", "0004"), # Olimex ARM-USB-TINY ("15ba", "002a"), # Olimex ARM-USB-TINY-H ("1366", "0101"), # SEGGER J-Link PLUS + ("1366", "0105"), # SEGGER J-Link ("1366", "1015"), # SEGGER J-Link + ("1366", "1051"), # SEGGER J-Link ]: return False From 79963616b31359dc39cc40d6441e722846fe545d Mon Sep 17 00:00:00 2001 From: Alexander Merkle Date: Tue, 17 Oct 2023 14:27:39 +0200 Subject: [PATCH 077/384] dockerfiles: fix has_podman routine Signed-off-by: Alexander Merkle [bst: dropped comment about unused routine from commit message] Signed-off-by: Bastian Krause --- dockerfiles/build.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dockerfiles/build.sh b/dockerfiles/build.sh index 3ab140f22..501667a17 100755 --- a/dockerfiles/build.sh +++ b/dockerfiles/build.sh @@ -22,7 +22,7 @@ has_docker() { } has_podman() { - command -v podman /dev/null 2>&1 + command -v podman >/dev/null 2>&1 } has_buildx() { From d425be47fccba0a5d5b75bd7c73d016b69aae13d Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Fri, 20 Oct 2023 17:15:28 +0200 Subject: [PATCH 078/384] Revert "sshdriver: Prevent timeout from deadlock" This reverts commit b8f059fb4277e3ca01684b7a71ac89974ff1cc36. OpenSSH < 8.5 called with -f waits for the stderr pipe to be closed before forking. [1] fixes this behavior. The labgrid commit to be reverted calls communicate(), relying on a timely fork. Affected OpenSSH versions now run into a TimeoutExpired exception on communicate() leading to an ExecutionError being raised in labgrid. A wait() with timeout was used since the initial implementation [2]. We wanted to make sure that we don't depend on the state of the pipes, so use of wait() was intentional, as it directly covers the interesting cases: - immediate abort (due to a config error or similar) - normal startup (parent process exits after fork) - hang (via wait() timeout) Reverting the problematic commit avoids the complexity of having to maintain two different ways to start SSH as suggested in [3]. If timeouts still occur after the revert, we should implement this suggestion [4]. Discussion that lead to this patch can be found in the PR introducing the problematic commit [5] as well as in [3]. The other commit [6] in the PR [5] has a similar approach for `labgrid.util.SSHConnection._start_own_master()`. It shouldn't be problematic though: The ssh process is not expected to fork as it is not called with -f. [1] OpenSSH: 396d32f3a ("upstream: There are lots of place where we want to redirect stdin,") [2] e4862fa4 ("SSHDriver: ControlMaster & Driver") [3] https://github.com/labgrid-project/labgrid/pull/1278 [4] https://github.com/labgrid-project/labgrid/pull/1265#issuecomment-1761670396 [5] https://github.com/labgrid-project/labgrid/pull/1265 [6] f9ca0243 ("ssh: Prevent timeout from deadlock") Signed-off-by: Bastian Krause --- labgrid/driver/sshdriver.py | 11 +++++------ tests/test_sshdriver.py | 4 ---- 2 files changed, 5 insertions(+), 10 deletions(-) diff --git a/labgrid/driver/sshdriver.py b/labgrid/driver/sshdriver.py index deb58dc07..68e9c725a 100644 --- a/labgrid/driver/sshdriver.py +++ b/labgrid/driver/sshdriver.py @@ -138,8 +138,9 @@ def _start_own_master_once(self, timeout): try: subprocess_timeout = timeout + 5 - stdout, _ = self.process.communicate(timeout=subprocess_timeout) - if self.process.returncode != 0: + return_value = self.process.wait(timeout=subprocess_timeout) + if return_value != 0: + stdout, _ = self.process.communicate(timeout=subprocess_timeout) stdout = stdout.split(b"\n") for line in stdout: self.logger.warning("ssh: %s", line.rstrip().decode(encoding="utf-8", errors="replace")) @@ -156,14 +157,12 @@ def _start_own_master_once(self, timeout): pass raise ExecutionError( - f"Failed to connect to {self.networkservice.address} with {' '.join(args)}: return code {self.process.returncode}", # pylint: disable=line-too-long + f"Failed to connect to {self.networkservice.address} with {' '.join(args)}: return code {return_value}", # pylint: disable=line-too-long stdout=stdout, ) except subprocess.TimeoutExpired: - self.process.kill() - stdout, _ = self.process.communicate() raise ExecutionError( - f"Subprocess timed out [{subprocess_timeout}s] while executing {args}: {stdout}", + f"Subprocess timed out [{subprocess_timeout}s] while executing {args}", ) finally: if self.networkservice.password and os.path.exists(pass_file): diff --git a/tests/test_sshdriver.py b/tests/test_sshdriver.py index 1eb5518f1..81a5a603c 100644 --- a/tests/test_sshdriver.py +++ b/tests/test_sshdriver.py @@ -17,8 +17,6 @@ def ssh_driver_mocked_and_activated(target, mocker): instance_mock = mocker.MagicMock() popen.return_value = instance_mock instance_mock.wait = mocker.MagicMock(return_value=0) - instance_mock.communicate = mocker.MagicMock(return_value=(b"", b"")) - instance_mock.returncode = 0 SSHDriver(target, "ssh") s = target.get_driver("SSHDriver") return s @@ -37,8 +35,6 @@ def test_create(target, mocker): instance_mock = mocker.MagicMock() popen.return_value = instance_mock instance_mock.wait = mocker.MagicMock(return_value=0) - instance_mock.communicate = mocker.MagicMock(return_value=(b"", b"")) - instance_mock.returncode = 0 s = SSHDriver(target, "ssh") assert isinstance(s, SSHDriver) From 1b9333964f3e25c510e17b94d887735432587996 Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Fri, 28 Jul 2023 13:16:20 +0200 Subject: [PATCH 079/384] driver/common: add logger to Driver base class Set the logger name to "()[drivername]". This should give us consistent logger naming. Signed-off-by: Bastian Krause --- labgrid/driver/common.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/labgrid/driver/common.py b/labgrid/driver/common.py index 4386cad9d..2adf3f893 100644 --- a/labgrid/driver/common.py +++ b/labgrid/driver/common.py @@ -1,3 +1,4 @@ +import logging import subprocess import attr @@ -25,6 +26,11 @@ def __attrs_post_init__(self): if self.target is None: raise BindingError("Drivers can only be created on a valid target") + logger_name = f"{self.__class__.__name__}({self.target.name})" + if self.name: + logger_name += f":{self.name}" + self.logger = logging.getLogger(logger_name) + def get_priority(self, protocol): """Retrieve the priority for a given protocol From a387ffefe41c1f42c5dcd54c0345b3648235dfaf Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Fri, 28 Jul 2023 13:15:38 +0200 Subject: [PATCH 080/384] driver/bareboxdriver: use logger from Driver base class Signed-off-by: Bastian Krause --- labgrid/driver/bareboxdriver.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/labgrid/driver/bareboxdriver.py b/labgrid/driver/bareboxdriver.py index 910f25300..0aeb19d67 100644 --- a/labgrid/driver/bareboxdriver.py +++ b/labgrid/driver/bareboxdriver.py @@ -1,4 +1,3 @@ -import logging import shlex import attr @@ -40,7 +39,6 @@ class BareboxDriver(CommandMixin, Driver, CommandProtocol, LinuxBootProtocol): def __attrs_post_init__(self): super().__attrs_post_init__() - self.logger = logging.getLogger(f"{self}:{self.target}") self._status = 0 # barebox' default log level, used as fallback if no log level can be saved self.saved_log_level = 7 From 0f4136fc6aec26809e1a277e1fdeae1454e33338 Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Fri, 28 Jul 2023 13:16:34 +0200 Subject: [PATCH 081/384] driver/dediprogflashdriver: use logger from Driver base class Signed-off-by: Bastian Krause --- labgrid/driver/dediprogflashdriver.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/labgrid/driver/dediprogflashdriver.py b/labgrid/driver/dediprogflashdriver.py index b2f091885..eb2de7d7c 100644 --- a/labgrid/driver/dediprogflashdriver.py +++ b/labgrid/driver/dediprogflashdriver.py @@ -1,5 +1,4 @@ import os.path -import logging import attr from ..resource import NetworkDediprogFlasher @@ -24,7 +23,6 @@ class DediprogFlashDriver(Driver): def __attrs_post_init__(self): super().__attrs_post_init__() - self.logger = logging.getLogger(f'{self}') if self.target.env: self.tool = self.target.env.config.get_tool('dpcmd') else: From aab15363b3eb4c4355e863c1779bdbfdc00c2bfd Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Fri, 28 Jul 2023 13:16:49 +0200 Subject: [PATCH 082/384] driver/dockerdriver: use logger from Driver base class Signed-off-by: Bastian Krause --- labgrid/driver/dockerdriver.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/labgrid/driver/dockerdriver.py b/labgrid/driver/dockerdriver.py index b88f9df97..49e45f021 100644 --- a/labgrid/driver/dockerdriver.py +++ b/labgrid/driver/dockerdriver.py @@ -1,9 +1,6 @@ """ Class for connecting to a docker daemon running on the host machine. """ - -import logging - import attr from labgrid.factory import target_factory @@ -62,7 +59,6 @@ class DockerDriver(PowerProtocol, Driver): attr.validators.instance_of(list))) def __attrs_post_init__(self): - self.logger = logging.getLogger(f"{self}({self.target})") super().__attrs_post_init__() self._client = None self._container = None From 6a21f770b0ae967c9f48ac1bf0b0283cc912d71d Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Fri, 28 Jul 2023 13:17:01 +0200 Subject: [PATCH 083/384] driver/externalconsoledriver: use logger from Driver base class Signed-off-by: Bastian Krause --- labgrid/driver/externalconsoledriver.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/labgrid/driver/externalconsoledriver.py b/labgrid/driver/externalconsoledriver.py index 6ba18b669..2ab16ada5 100644 --- a/labgrid/driver/externalconsoledriver.py +++ b/labgrid/driver/externalconsoledriver.py @@ -1,5 +1,4 @@ import fcntl -import logging import os import select import shlex @@ -25,7 +24,6 @@ class ExternalConsoleDriver(ConsoleExpectMixin, Driver, ConsoleProtocol): def __attrs_post_init__(self): super().__attrs_post_init__() - self.logger = logging.getLogger(f"{self}({self.target})") self.status = 0 self._child = None From 8ba83450d9cbbf175bd89bb56847d34af6dbd70e Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Fri, 28 Jul 2023 13:17:13 +0200 Subject: [PATCH 084/384] driver/fake: use logger from Driver base class Signed-off-by: Bastian Krause --- labgrid/driver/fake.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/labgrid/driver/fake.py b/labgrid/driver/fake.py index cf1a6598d..45eb1cb61 100644 --- a/labgrid/driver/fake.py +++ b/labgrid/driver/fake.py @@ -1,5 +1,4 @@ # pylint: disable=arguments-differ -import logging import re import attr @@ -18,7 +17,6 @@ class FakeConsoleDriver(ConsoleExpectMixin, Driver, ConsoleProtocol): def __attrs_post_init__(self): super().__attrs_post_init__() - self.logger = logging.getLogger(f"{self}({self.target})") self.rxq = [] self.txq = [] From e422edaa5913908757f65b17b8cea2dfa4619e73 Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Fri, 28 Jul 2023 13:17:35 +0200 Subject: [PATCH 085/384] driver/flashromdriver: use logger from Driver base class Signed-off-by: Bastian Krause --- labgrid/driver/flashromdriver.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/labgrid/driver/flashromdriver.py b/labgrid/driver/flashromdriver.py index 1f4c9dbab..452b74686 100644 --- a/labgrid/driver/flashromdriver.py +++ b/labgrid/driver/flashromdriver.py @@ -1,5 +1,4 @@ import os.path -import logging import attr from ..resource import NetworkFlashrom @@ -24,7 +23,6 @@ class FlashromDriver(Driver, BootstrapProtocol): def __attrs_post_init__(self): super().__attrs_post_init__() - self.logger = logging.getLogger(f'{self}') if self.target.env: self.tool = self.target.env.config.get_tool('flashrom') else: From 36a19d842dff3abae843b7baa955a1c01ff2f5eb Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Fri, 28 Jul 2023 13:17:49 +0200 Subject: [PATCH 086/384] driver/flashscriptdriver: use logger from Driver base class Signed-off-by: Bastian Krause --- labgrid/driver/flashscriptdriver.py | 5 ----- 1 file changed, 5 deletions(-) diff --git a/labgrid/driver/flashscriptdriver.py b/labgrid/driver/flashscriptdriver.py index f2f8f4834..bf2941d86 100644 --- a/labgrid/driver/flashscriptdriver.py +++ b/labgrid/driver/flashscriptdriver.py @@ -1,4 +1,3 @@ -import logging import attr from ..factory import target_factory @@ -27,10 +26,6 @@ class FlashScriptDriver(Driver): validator=attr.validators.optional(attr.validators.instance_of(list)), ) - def __attrs_post_init__(self): - super().__attrs_post_init__() - self.logger = logging.getLogger(f"{self}:{self.target}") - def on_activate(self): pass From 15195fc96a6fe500293e41030b2d76bd77492353 Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Fri, 28 Jul 2023 13:18:07 +0200 Subject: [PATCH 087/384] driver/openocddriver: use logger from Driver base class Signed-off-by: Bastian Krause --- labgrid/driver/openocddriver.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/labgrid/driver/openocddriver.py b/labgrid/driver/openocddriver.py index 566667764..4ce4cf3ca 100644 --- a/labgrid/driver/openocddriver.py +++ b/labgrid/driver/openocddriver.py @@ -1,4 +1,3 @@ -import logging from itertools import chain import attr @@ -47,7 +46,6 @@ class OpenOCDDriver(Driver, BootstrapProtocol): def __attrs_post_init__(self): super().__attrs_post_init__() - self.logger = logging.getLogger(f"{self}:{self.target}") # FIXME make sure we always have an environment or config if self.target.env: From 42aa9c2ac0e6fa4f5f9edd344ac239e53664929d Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Fri, 28 Jul 2023 13:18:19 +0200 Subject: [PATCH 088/384] driver/qemudriver: use logger from Driver base class Signed-off-by: Bastian Krause --- labgrid/driver/qemudriver.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/labgrid/driver/qemudriver.py b/labgrid/driver/qemudriver.py index 6d7daf02b..ecb47c200 100644 --- a/labgrid/driver/qemudriver.py +++ b/labgrid/driver/qemudriver.py @@ -1,6 +1,5 @@ """The QEMUDriver implements a driver to use a QEMU target""" import atexit -import logging import select import shlex import shutil @@ -89,7 +88,6 @@ class QEMUDriver(ConsoleExpectMixin, Driver, PowerProtocol, ConsoleProtocol): def __attrs_post_init__(self): super().__attrs_post_init__() - self.logger = logging.getLogger(f"{self}:") self.status = 0 self.txdelay = None self._child = None From 4667eed07fff7e9236fc670a12461c5442017609 Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Fri, 28 Jul 2023 13:18:33 +0200 Subject: [PATCH 089/384] driver/quartushpsdriver: use logger from Driver base class Signed-off-by: Bastian Krause --- labgrid/driver/quartushpsdriver.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/labgrid/driver/quartushpsdriver.py b/labgrid/driver/quartushpsdriver.py index 2c15793eb..d8cc0ab2e 100644 --- a/labgrid/driver/quartushpsdriver.py +++ b/labgrid/driver/quartushpsdriver.py @@ -1,7 +1,6 @@ import subprocess import re import time -import logging import attr @@ -28,7 +27,6 @@ class QuartusHPSDriver(Driver): def __attrs_post_init__(self): super().__attrs_post_init__() - self.logger = logging.getLogger(f"{self}({self.target})") # FIXME make sure we always have an environment or config if self.target.env: From c776c6122cc24aa670e52698015bfc250c8a953d Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Fri, 28 Jul 2023 13:18:44 +0200 Subject: [PATCH 090/384] driver/serialdriver: use logger from Driver base class Signed-off-by: Bastian Krause --- labgrid/driver/serialdriver.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/labgrid/driver/serialdriver.py b/labgrid/driver/serialdriver.py index 1c172c8f3..884522622 100644 --- a/labgrid/driver/serialdriver.py +++ b/labgrid/driver/serialdriver.py @@ -1,5 +1,3 @@ -import logging - import attr from pexpect import TIMEOUT import serial @@ -26,7 +24,6 @@ class SerialDriver(ConsoleExpectMixin, Driver, ConsoleProtocol): def __attrs_post_init__(self): super().__attrs_post_init__() - self.logger = logging.getLogger(f"{self}({self.target})") if isinstance(self.port, SerialPort): self.serial = serial.Serial() else: From f2aba57c2d3ffa8fb05e1d4681bf61ff49d2295f Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Fri, 28 Jul 2023 13:18:56 +0200 Subject: [PATCH 091/384] driver/shelldriver: use logger from Driver base class Signed-off-by: Bastian Krause --- labgrid/driver/shelldriver.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/labgrid/driver/shelldriver.py b/labgrid/driver/shelldriver.py index 6689e2568..682d7189b 100644 --- a/labgrid/driver/shelldriver.py +++ b/labgrid/driver/shelldriver.py @@ -2,7 +2,6 @@ """The ShellDriver provides the CommandProtocol, ConsoleProtocol and InfoProtocol on top of a SerialPort.""" import io -import logging import re import shlex import ipaddress @@ -58,7 +57,6 @@ class ShellDriver(CommandMixin, Driver, CommandProtocol, FileTransferProtocol): def __attrs_post_init__(self): super().__attrs_post_init__() - self.logger = logging.getLogger(f"{self}:{self.target}") self._status = 0 self._xmodem_cached_rx_cmd = "" From 1b99825bf206a917dc03721e71af723f8fc122fd Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Fri, 28 Jul 2023 13:19:29 +0200 Subject: [PATCH 092/384] driver/sigrokdriver: use logger from Driver base class Signed-off-by: Bastian Krause --- labgrid/driver/sigrokdriver.py | 26 ++++++++++++-------------- 1 file changed, 12 insertions(+), 14 deletions(-) diff --git a/labgrid/driver/sigrokdriver.py b/labgrid/driver/sigrokdriver.py index 9ee762531..a9bc61f1c 100644 --- a/labgrid/driver/sigrokdriver.py +++ b/labgrid/driver/sigrokdriver.py @@ -1,4 +1,3 @@ -import logging import os.path import re import subprocess @@ -37,7 +36,6 @@ def __attrs_post_init__(self): ) or 'sigrok-cli' else: self.tool = 'sigrok-cli' - self.log = logging.getLogger("SigrokDriver") self._running = False def _create_tmpdir(self): @@ -46,26 +44,26 @@ def _create_tmpdir(self): command = self.sigrok.command_prefix + [ 'mkdir', '-p', self._tmpdir ] - self.log.debug("Tmpdir command: %s", command) + self.logger.debug("Tmpdir command: %s", command) subprocess.call( command, stdin=subprocess.DEVNULL, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL ) - self.log.debug("Created tmpdir: %s", self._tmpdir) + self.logger.debug("Created tmpdir: %s", self._tmpdir) self._local_tmpdir = tempfile.mkdtemp(prefix="labgrid-sigrok-") - self.log.debug("Created local tmpdir: %s", self._local_tmpdir) + self.logger.debug("Created local tmpdir: %s", self._local_tmpdir) else: self._tmpdir = tempfile.mkdtemp(prefix="labgrid-sigrok-") - self.log.debug("created tmpdir: %s", self._tmpdir) + self.logger.debug("created tmpdir: %s", self._tmpdir) def _delete_tmpdir(self): if isinstance(self.sigrok, NetworkSigrokUSBDevice): command = self.sigrok.command_prefix + [ 'rm', '-r', self._tmpdir ] - self.log.debug("Tmpdir command: %s", command) + self.logger.debug("Tmpdir command: %s", command) subprocess.call( command, stdin=subprocess.DEVNULL, @@ -98,7 +96,7 @@ def _get_sigrok_prefix(self): @step(title='call', args=['args']) def _call_with_driver(self, *args): combined = self._get_sigrok_prefix() + list(args) - self.log.debug("Combined command: %s", " ".join(combined)) + self.logger.debug("Combined command: %s", " ".join(combined)) self._process = subprocess.Popen( combined, stdout=subprocess.PIPE, @@ -113,7 +111,7 @@ def _call(self, *args): if self.sigrok.channels: combined += ["-C", self.sigrok.channels] combined += list(args) - self.log.debug("Combined command: %s", combined) + self.logger.debug("Combined command: %s", combined) self._process = subprocess.Popen( combined, stdout=subprocess.PIPE, @@ -138,7 +136,7 @@ class SigrokDriver(SigrokCommon): def capture(self, filename, samplerate="200k"): self._filename = filename self._basename = os.path.basename(self._filename) - self.log.debug( + self.logger.debug( "Saving to: %s with basename: %s", self._filename, self._basename ) cmd = [ @@ -166,7 +164,7 @@ def stop(self): self._process.send_signal(signal.SIGINT) stdout, stderr = self._process.communicate() self._process.wait() - self.log.debug("stdout:\n %s\n ----- \n stderr:\n %s", stdout, stderr) + self.logger.debug("stdout:\n %s\n ----- \n stderr:\n %s", stdout, stderr) # Convert from .sr to .csv cmd = [ @@ -177,7 +175,7 @@ def stop(self): self._call(*cmd) self._process.wait() stdout, stderr = self._process.communicate() - self.log.debug("stdout:\n %s\n ----- \n stderr:\n %s", stdout, stderr) + self.logger.debug("stdout:\n %s\n ----- \n stderr:\n %s", stdout, stderr) if isinstance(self.sigrok, NetworkSigrokUSBDevice): subprocess.call([ 'scp', f'{self.sigrok.host}:{os.path.join(self._tmpdir, self._basename)}', @@ -411,7 +409,7 @@ def stop(self): time.sleep(0.1) else: # process did not finish in time - self.log.info("sigrok-cli did not finish in time, increase timeout?") + self.logger.info("sigrok-cli did not finish in time, increase timeout?") self._process.kill() res = [] @@ -428,7 +426,7 @@ def stop(self): # all other lines are actual values res.append(float(line)) _, stderr = self._process.communicate() - self.log.debug("stderr: %s", stderr) + self.logger.debug("stderr: %s", stderr) self._running = False return unit, res From 2d5fb2d7e96d11b99c8a7b3242f8b8384e2bda61 Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Fri, 28 Jul 2023 13:19:42 +0200 Subject: [PATCH 093/384] driver/sshdriver: use logger from Driver base class Signed-off-by: Bastian Krause --- labgrid/driver/sshdriver.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/labgrid/driver/sshdriver.py b/labgrid/driver/sshdriver.py index 68e9c725a..ee07e6a13 100644 --- a/labgrid/driver/sshdriver.py +++ b/labgrid/driver/sshdriver.py @@ -1,6 +1,5 @@ """The SSHDriver uses SSH as a transport to implement CommandProtocol and FileTransferProtocol""" import contextlib -import logging import os import re import stat @@ -39,7 +38,6 @@ class SSHDriver(CommandMixin, Driver, CommandProtocol, FileTransferProtocol): def __attrs_post_init__(self): super().__attrs_post_init__() - self.logger = logging.getLogger(f"{self}({self.target})") self._keepalive = None def on_activate(self): From 0e34328b59d6c2ae1f146138bb94324b6abd80e8 Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Fri, 28 Jul 2023 13:19:53 +0200 Subject: [PATCH 094/384] driver/ubootdriver: use logger from Driver base class Signed-off-by: Bastian Krause --- labgrid/driver/ubootdriver.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/labgrid/driver/ubootdriver.py b/labgrid/driver/ubootdriver.py index d721f598c..cc1cd0d4b 100644 --- a/labgrid/driver/ubootdriver.py +++ b/labgrid/driver/ubootdriver.py @@ -1,6 +1,4 @@ """The U-Boot Module contains the UBootDriver""" -import logging - import attr from pexpect import TIMEOUT @@ -48,7 +46,6 @@ class UBootDriver(CommandMixin, Driver, CommandProtocol, LinuxBootProtocol): def __attrs_post_init__(self): super().__attrs_post_init__() - self.logger = logging.getLogger(f"{self}:{self.target}") self._status = 0 if self.boot_expression: From 932b9918d54953139d619f5078c550dc1ebc3cff Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Fri, 28 Jul 2023 13:20:05 +0200 Subject: [PATCH 095/384] driver/usbaudiodriver: use logger from Driver base class Signed-off-by: Bastian Krause --- labgrid/driver/usbaudiodriver.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/labgrid/driver/usbaudiodriver.py b/labgrid/driver/usbaudiodriver.py index 01b0f926e..997317668 100644 --- a/labgrid/driver/usbaudiodriver.py +++ b/labgrid/driver/usbaudiodriver.py @@ -1,4 +1,3 @@ -import logging import subprocess import attr @@ -28,7 +27,6 @@ class USBAudioInputDriver(Driver): def __attrs_post_init__(self): super().__attrs_post_init__() - self.logger = logging.getLogger(f"{self}") self._prepared = False def _get_pipeline(self): From 57e5e08765d02d72e20eca8a728c5abc64ee22eb Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Fri, 28 Jul 2023 13:20:21 +0200 Subject: [PATCH 096/384] driver/usbstoragedriver: use logger from Driver base class Signed-off-by: Bastian Krause --- labgrid/driver/usbstoragedriver.py | 5 ----- 1 file changed, 5 deletions(-) diff --git a/labgrid/driver/usbstoragedriver.py b/labgrid/driver/usbstoragedriver.py index 902fd343a..5bc42856a 100644 --- a/labgrid/driver/usbstoragedriver.py +++ b/labgrid/driver/usbstoragedriver.py @@ -1,5 +1,4 @@ import enum -import logging import os import time import subprocess @@ -42,10 +41,6 @@ class USBStorageDriver(Driver): validator=attr.validators.optional(attr.validators.instance_of(str)) ) - def __attrs_post_init__(self): - super().__attrs_post_init__() - self.logger = logging.getLogger(f"{self}:{self.target}") - def on_activate(self): pass From e41f684317c56d41b8ef5b9dfe0dbee3986ea3b7 Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Fri, 28 Jul 2023 13:20:32 +0200 Subject: [PATCH 097/384] driver/usbvideodriver: use logger from Driver base class Signed-off-by: Bastian Krause --- labgrid/driver/usbvideodriver.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/labgrid/driver/usbvideodriver.py b/labgrid/driver/usbvideodriver.py index b87df4267..67cf6f87f 100644 --- a/labgrid/driver/usbvideodriver.py +++ b/labgrid/driver/usbvideodriver.py @@ -1,4 +1,3 @@ -import logging import subprocess import attr @@ -18,7 +17,6 @@ class USBVideoDriver(Driver, VideoProtocol): def __attrs_post_init__(self): super().__attrs_post_init__() - self.logger = logging.getLogger(f"{self}") self._prepared = False def get_qualities(self): From 7edac8344cb6d58ebb73ef48e66b9deb96c33198 Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Fri, 28 Jul 2023 13:20:47 +0200 Subject: [PATCH 098/384] driver/xenadriver: use logger from Driver base class Signed-off-by: Bastian Krause --- labgrid/driver/xenadriver.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/labgrid/driver/xenadriver.py b/labgrid/driver/xenadriver.py index c3993f983..e4139da89 100644 --- a/labgrid/driver/xenadriver.py +++ b/labgrid/driver/xenadriver.py @@ -1,4 +1,3 @@ -import logging from importlib import import_module import attr @@ -17,11 +16,10 @@ def __attrs_post_init__(self): super().__attrs_post_init__() self._xena_app = import_module('xenavalkyrie.xena_app') self._tgn_utils = import_module('trafficgenerator.tgn_utils') - self._logger = logging.getLogger(f"{self}") self._xm = None def on_activate(self): - self._xm = self._xena_app.init_xena(self._tgn_utils.ApiType.socket, self._logger, 'labgrid') + self._xm = self._xena_app.init_xena(self._tgn_utils.ApiType.socket, self.logger, 'labgrid') self._xm.session.add_chassis(self.xena_manager.hostname) def on_deactivate(self): From 96bbe6128a9e9ca337219af294f54f2d3dde771d Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Fri, 28 Jul 2023 14:23:22 +0200 Subject: [PATCH 099/384] resource/common: add logger to Resource/ResourceManager Set the logger names to "[()][:]". This should give us consistent logger naming. Signed-off-by: Bastian Krause --- labgrid/resource/common.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/labgrid/resource/common.py b/labgrid/resource/common.py index 1314a9e8c..d2b190750 100644 --- a/labgrid/resource/common.py +++ b/labgrid/resource/common.py @@ -1,3 +1,4 @@ +import logging import shlex from typing import Dict, Type, List import attr @@ -26,6 +27,13 @@ def __attrs_post_init__(self): super().__attrs_post_init__() self._parent = None + logger_name = self.__class__.__name__ + if self.target: + logger_name += f"({self.target.name})" + if self.name: + logger_name += f":{self.name}" + self.logger = logging.getLogger(logger_name) + @property def command_prefix(self): return [] @@ -118,6 +126,7 @@ def get(cls) -> 'ResourceManager': def __attrs_post_init__(self): self.resources: List[ManagedResource] = [] + self.logger = logging.getLogger(str(self)) def _add_resource(self, resource: 'ManagedResource'): self.resources.append(resource) From 7600343626ac003fe36a86b9c977aabb22b3ad4d Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Fri, 28 Jul 2023 14:24:21 +0200 Subject: [PATCH 100/384] resource/docker: use logger from Resource(Manager) Signed-off-by: Bastian Krause --- labgrid/resource/docker.py | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/labgrid/resource/docker.py b/labgrid/resource/docker.py index c6f37294a..626e4f803 100644 --- a/labgrid/resource/docker.py +++ b/labgrid/resource/docker.py @@ -4,7 +4,6 @@ in the specification (e.g. yaml) of DockerDriver. """ -import logging import socket import attr @@ -34,7 +33,6 @@ class DockerManager(ResourceManager): def __attrs_post_init__(self): super().__attrs_post_init__() - self.log = logging.getLogger('DockerManager') self._client = dict() self._docker_daemons_cleaned = list() @@ -71,7 +69,7 @@ def _container_cleanup(self, docker_client): for container in container_list: if (container['Labels'][DockerConstants.DOCKER_LG_CLEANUP_LABEL] == DockerConstants.DOCKER_LG_CLEANUP_TYPE_AUTO): - self.log.info("Deleting container %s", container['Names'][0]) + self.logger.info("Deleting container %s", container['Names'][0]) docker_client.api.remove_container(container['Id'], force=True) self._docker_daemons_cleaned.append(docker_client.api.base_url) @@ -90,7 +88,6 @@ class DockerDaemon(ManagedResource): def __attrs_post_init__(self): super().__attrs_post_init__() self._nw_services = dict() - self.log = logging.getLogger('DockerContainer') self.timeout = 5.0 self.avail = True @@ -132,7 +129,7 @@ class and the resource is identified as available. if nw_service.address == "": container = docker_client.api.containers( filters={"name": "/" + container_name}) - self.log.debug("Containers found %s", container) + self.logger.debug("Containers found %s", container) if container: nw_service.address = find_dict( d=container[0]['NetworkSettings'], From 71c5589c773704f2307f28b10c28b6a7c79ac3f7 Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Fri, 28 Jul 2023 14:25:27 +0200 Subject: [PATCH 101/384] resource/ethernetport: use logger from ResourceManager Signed-off-by: Bastian Krause --- labgrid/resource/ethernetport.py | 1 - 1 file changed, 1 deletion(-) diff --git a/labgrid/resource/ethernetport.py b/labgrid/resource/ethernetport.py index d66fcd799..0020f26ba 100644 --- a/labgrid/resource/ethernetport.py +++ b/labgrid/resource/ethernetport.py @@ -183,7 +183,6 @@ class EthernetPortManager(ResourceManager): """The EthernetPortManager periodically polls the switch for new updates.""" def __attrs_post_init__(self): super().__attrs_post_init__() - self.logger = logging.getLogger(f"{self}") self.loop = None self.poll_tasks = [] self.switches = {} From be3cc7e99379d5f69c22cc1727bdc95b4ed0acce Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Fri, 28 Jul 2023 14:25:56 +0200 Subject: [PATCH 102/384] resource/lxaiobus: use logger from ResourceManager Signed-off-by: Bastian Krause --- labgrid/resource/lxaiobus.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/labgrid/resource/lxaiobus.py b/labgrid/resource/lxaiobus.py index 4367e5927..1645346eb 100644 --- a/labgrid/resource/lxaiobus.py +++ b/labgrid/resource/lxaiobus.py @@ -1,4 +1,3 @@ -import logging from time import monotonic from importlib import import_module @@ -14,8 +13,6 @@ def __attrs_post_init__(self): super().__attrs_post_init__() self._requests = import_module('requests') - self.log = logging.getLogger('LXAIOBusNodeManager') - self._last = 0.0 def _get_nodes(self, host): @@ -25,7 +22,7 @@ def _get_nodes(self, host): j = r.json() return j["result"] except self._requests.exceptions.ConnectionError: - self.log.exception("failed to connect to host %s", host) + self.logger.exception("failed to connect to host %s", host) return [] def poll(self): From 26dffe45082a3ac46f930fc408d96c7d8416a069 Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Fri, 28 Jul 2023 14:26:08 +0200 Subject: [PATCH 103/384] resource/mqtt: use logger from ResourceManager Signed-off-by: Bastian Krause --- labgrid/resource/mqtt.py | 5 ----- 1 file changed, 5 deletions(-) diff --git a/labgrid/resource/mqtt.py b/labgrid/resource/mqtt.py index 98be9b722..b5991537b 100644 --- a/labgrid/resource/mqtt.py +++ b/labgrid/resource/mqtt.py @@ -1,4 +1,3 @@ -import logging import threading from time import monotonic @@ -16,10 +15,6 @@ class MQTTManager(ResourceManager): _topic_lock = attr.ib(default=threading.Lock()) _last = attr.ib(default=0.0, validator=attr.validators.instance_of(float)) - def __attrs_post_init__(self): - super().__attrs_post_init__() - self.log = logging.getLogger('MQTTManager') - def _create_mqtt_connection(self, host): import paho.mqtt.client as mqtt client = mqtt.Client() From ee2cfbe4f05de14947637277e8fa3e04979eb144 Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Fri, 28 Jul 2023 14:26:29 +0200 Subject: [PATCH 104/384] resource/remote: use logger from ResourceManager Signed-off-by: Bastian Krause --- labgrid/resource/remote.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/labgrid/resource/remote.py b/labgrid/resource/remote.py index bca849bf0..ad116382d 100644 --- a/labgrid/resource/remote.py +++ b/labgrid/resource/remote.py @@ -1,5 +1,4 @@ import copy -import logging import os import attr @@ -11,7 +10,6 @@ class RemotePlaceManager(ResourceManager): def __attrs_post_init__(self): super().__attrs_post_init__() - self.logger = logging.getLogger(f"{self}") self.url = None self.realm = None self.loop = None From 3d156a12bad994bc1fab6f130e6dbf686e538f45 Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Fri, 28 Jul 2023 14:26:48 +0200 Subject: [PATCH 105/384] resource/udev: use logger from ResourceManager Signed-off-by: Bastian Krause --- labgrid/resource/udev.py | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/labgrid/resource/udev.py b/labgrid/resource/udev.py index 819f5cbba..600bb1a62 100644 --- a/labgrid/resource/udev.py +++ b/labgrid/resource/udev.py @@ -1,4 +1,3 @@ -import logging import os import queue import warnings @@ -19,7 +18,6 @@ def __attrs_post_init__(self): super().__attrs_post_init__() self.queue = queue.Queue() - self.log = logging.getLogger('UdevManager') self._pyudev = import_module('pyudev') self._context = self._pyudev.Context() self._monitor = self._pyudev.Monitor.from_netlink(self._context) @@ -32,7 +30,7 @@ def on_resource_added(self, resource): devices.match_subsystem(resource.match['SUBSYSTEM']) for device in devices: if resource.try_match(device): - self.log.debug(" matched successfully against %s", resource.device) + self.logger.debug(" matched successfully against %s", resource.device) def _insert_into_queue(self, device): self.queue.put(device) @@ -44,10 +42,10 @@ def poll(self): device = self.queue.get(False) except queue.Empty: break - self.log.debug("%s: %s", device.action, device) + self.logger.debug("%s: %s", device.action, device) for resource in self.resources: if resource.try_match(device): - self.log.debug(" matched successfully") + self.logger.debug(" matched successfully") @attr.s(eq=False) class USBResource(ManagedResource): @@ -59,7 +57,6 @@ class USBResource(ManagedResource): def __attrs_post_init__(self): self.timeout = 5.0 - self.log = logging.getLogger('USBResource') self.match.setdefault('SUBSYSTEM', 'usb') super().__attrs_post_init__() @@ -134,7 +131,7 @@ def match_ancestors(key, value): if self.device.sys_path != device.sys_path: return False - self.log.debug(" found match: %s", self) + self.logger.debug(" found match: %s", self) if self.suggest and device.action in [None, 'add']: self.device = device From 2a095c28681511c6cda9900aea2ea62d807e9089 Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Fri, 28 Jul 2023 14:35:12 +0200 Subject: [PATCH 106/384] util/qmp: drop colon from logger name Logger names should not contain any formatting. Signed-off-by: Bastian Krause --- labgrid/util/qmp.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/labgrid/util/qmp.py b/labgrid/util/qmp.py index 8af3b7c01..1eabce2c4 100644 --- a/labgrid/util/qmp.py +++ b/labgrid/util/qmp.py @@ -9,7 +9,7 @@ class QMPMonitor: monitor_in = attr.ib() def __attrs_post_init__(self): - self.logger = logging.getLogger(f"{self}:") + self.logger = logging.getLogger(f"{self}") self._negotiate_capabilities() def _negotiate_capabilities(self): From 6fb3fa2506e32a3b00b7f6ac0cdab61b91a4451b Mon Sep 17 00:00:00 2001 From: Andreas Naumann Date: Mon, 23 Oct 2023 16:41:38 +0200 Subject: [PATCH 107/384] remote/exporter: Add option to default to fqdn for hostname In distributed setups, the coordinator may point to exporters/places in different subnets. In order for clients to resolve resources behind the exporter, they need to know its FQDN instead of just the hostname. Using an option to change the default avoids conflicts for configs which rely on the hostname only. Signed-off-by: Andreas Naumann --- labgrid/remote/exporter.py | 8 +++++++- man/labgrid-exporter.1 | 8 ++++++++ man/labgrid-exporter.rst | 8 ++++++++ 3 files changed, 23 insertions(+), 1 deletion(-) diff --git a/labgrid/remote/exporter.py b/labgrid/remote/exporter.py index a30b1afe1..288ea3a62 100755 --- a/labgrid/remote/exporter.py +++ b/labgrid/remote/exporter.py @@ -916,6 +916,12 @@ def main(): default=None, help='hostname (or IP) published for accessing resources (defaults to the system hostname)' ) + parser.add_argument( + '--fqdn', + action='store_true', + default=False, + help='Use fully qualified domain name as default for hostname' + ) parser.add_argument( '-d', '--debug', @@ -943,7 +949,7 @@ def main(): extra = { 'name': args.name or gethostname(), - 'hostname': args.hostname or gethostname(), + 'hostname': args.hostname or getfqdn() if args.fqdn else gethostname(), 'resources': args.resources, 'isolated': args.isolated } diff --git a/man/labgrid-exporter.1 b/man/labgrid-exporter.1 index 20fe4b522..4ee130e8b 100644 --- a/man/labgrid-exporter.1 +++ b/man/labgrid-exporter.1 @@ -59,6 +59,9 @@ the public name of the exporter .B \-\-hostname hostname (or IP) published for accessing resources .TP +.B \-\-fqdn +use fully qualified domain name as default for hostname +.TP .B \-d\fP,\fB \-\-debug enable debug mode .UNINDENT @@ -83,6 +86,11 @@ exporter needs to provide a host name to set the exported value of the \(dqhost\ key. If the system hostname is not resolvable via DNS, this option can be used to override this default with another name (or an IP address). +.SS \-\-fqdn +.sp +In some networks the fully qualified domain name may be needed to reach resources +on an exporter. This option changes the default to fqdn when no \-\-hostname is +explicitly set. .SH CONFIGURATION .sp The exporter uses a YAML configuration file which defines groups of related diff --git a/man/labgrid-exporter.rst b/man/labgrid-exporter.rst index 8c9bda9dc..27e902beb 100644 --- a/man/labgrid-exporter.rst +++ b/man/labgrid-exporter.rst @@ -46,6 +46,8 @@ OPTIONS the public name of the exporter --hostname hostname (or IP) published for accessing resources +--fqdn + use fully qualified domain name as default for hostname -d, --debug enable debug mode @@ -73,6 +75,12 @@ key. If the system hostname is not resolvable via DNS, this option can be used to override this default with another name (or an IP address). +--fqdn +~~~~~~ +In some networks the fully qualified domain name may be needed to reach resources +on an exporter. This option changes the default to fqdn when no --hostname is +explicitly set. + CONFIGURATION ------------- The exporter uses a YAML configuration file which defines groups of related From 8f661ae4435b0897031d313499d7b10460afee1c Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Mon, 20 Nov 2023 16:16:06 +0100 Subject: [PATCH 108/384] tests: sshdriver: fix ValueError on cleanup Since d08a35c5, stdout of the ssh keepalive process is logged. The return value of the communicate method is used to achieve this. This leads to ValueErrors during testing: Traceback (most recent call last): File "/home/runner/work/labgrid/labgrid/labgrid/target.py", line 516, in _atexit_cleanup self.cleanup() File "/home/runner/work/labgrid/labgrid/labgrid/target.py", line 562, in cleanup self.deactivate_all_drivers() File "/home/runner/work/labgrid/labgrid/labgrid/target.py", line 512, in deactivate_all_drivers self.deactivate(drv) File "/home/runner/work/labgrid/labgrid/labgrid/target.py", line 505, in deactivate client.on_deactivate() File "/home/runner/work/labgrid/labgrid/labgrid/driver/sshdriver.py", line 65, in on_deactivate self._stop_keepalive() File "/home/runner/work/labgrid/labgrid/labgrid/driver/sshdriver.py", line 565, in _stop_keepalive stdout, _ = self._keepalive.communicate(timeout=60) ^^^^^^^^^ ValueError: not enough values to unpack (expected 2, got 0) This happens also during CI testing, but is not fatal, since it happens during cleanup. To prevent this, mock the communicate method. This has also been done in b8f059fb which has been reverted since (d425be47). Fixes: d08a35c5 ("sshdriver: Log keepalive output") Signed-off-by: Bastian Krause --- tests/test_sshdriver.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/test_sshdriver.py b/tests/test_sshdriver.py index 81a5a603c..47a22fc11 100644 --- a/tests/test_sshdriver.py +++ b/tests/test_sshdriver.py @@ -17,6 +17,7 @@ def ssh_driver_mocked_and_activated(target, mocker): instance_mock = mocker.MagicMock() popen.return_value = instance_mock instance_mock.wait = mocker.MagicMock(return_value=0) + instance_mock.communicate = mocker.MagicMock(return_value=(b"", b"")) SSHDriver(target, "ssh") s = target.get_driver("SSHDriver") return s @@ -35,6 +36,7 @@ def test_create(target, mocker): instance_mock = mocker.MagicMock() popen.return_value = instance_mock instance_mock.wait = mocker.MagicMock(return_value=0) + instance_mock.communicate = mocker.MagicMock(return_value=(b"", b"")) s = SSHDriver(target, "ssh") assert isinstance(s, SSHDriver) From 4b2bc30f66527ab72ac27080f7fa535fd0172e4e Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Mon, 20 Nov 2023 17:55:27 +0100 Subject: [PATCH 109/384] tests/test_sshdriver: test deactivation Signed-off-by: Bastian Krause --- tests/test_sshdriver.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/tests/test_sshdriver.py b/tests/test_sshdriver.py index 47a22fc11..c11cdabd6 100644 --- a/tests/test_sshdriver.py +++ b/tests/test_sshdriver.py @@ -40,21 +40,23 @@ def test_create(target, mocker): s = SSHDriver(target, "ssh") assert isinstance(s, SSHDriver) -def test_run_check(ssh_driver_mocked_and_activated, mocker): +def test_run_check(target, ssh_driver_mocked_and_activated, mocker): s = ssh_driver_mocked_and_activated s._run = mocker.MagicMock(return_value=(['success'], [], 0)) res = s.run_check("test") assert res == ['success'] res = s.run("test") assert res == (['success'], [], 0) + target.deactivate(s) -def test_run_check_raise(ssh_driver_mocked_and_activated, mocker): +def test_run_check_raise(target, ssh_driver_mocked_and_activated, mocker): s = ssh_driver_mocked_and_activated s._run = mocker.MagicMock(return_value=(['error'], [], 1)) with pytest.raises(ExecutionError): res = s.run_check("test") res = s.run("test") assert res == (['error'], [], 1) + target.deactivate(s) @pytest.fixture(scope='function') def ssh_localhost(target, pytestconfig): From bff18ed4a5cd8b7633a157d1ecf148036dd2eb28 Mon Sep 17 00:00:00 2001 From: Jens Kleintje Date: Fri, 15 Sep 2023 11:38:11 +0200 Subject: [PATCH 110/384] driver/power: add support for Robot Electronics ETH008 use HTTP-GET API defined at https://www.robot-electronics.co.uk/htm/eth008tech.htm Signed-off-by: Jens Kleintje --- doc/configuration.rst | 3 +++ labgrid/driver/power/eth008.py | 41 ++++++++++++++++++++++++++++++++++ 2 files changed, 44 insertions(+) create mode 100644 labgrid/driver/power/eth008.py diff --git a/doc/configuration.rst b/doc/configuration.rst index 6afab1b01..83214bc6a 100644 --- a/doc/configuration.rst +++ b/doc/configuration.rst @@ -174,6 +174,9 @@ Currently available are: interface, this module deliberately uses the standard password '1' and is not compatible with a different password. +``eth008`` + Controls a Robot-Electronics eth008 via a simple HTTP API. + ``gude`` Controls a Gude PDU via a simple HTTP API. diff --git a/labgrid/driver/power/eth008.py b/labgrid/driver/power/eth008.py new file mode 100644 index 000000000..6a4450953 --- /dev/null +++ b/labgrid/driver/power/eth008.py @@ -0,0 +1,41 @@ +""" +This driver implements a power port for the robot electronics 8 relay +outputs board. + +Driver has been tested with: +* ETH008 - 8 relay outputs +""" + +import requests +from ..exception import ExecutionError + +PORT = 80 + +def power_set(host, port, index, value): + index = int(index) + assert 1 <= index <= 8 + # access the web interface... + value_str = "A" if value else "I" + response = requests.get( + f"http://{host}:{port}/io.cgi?DO{value_str}{index}" + ) + response.raise_for_status() + + # Check, that the port is in the desired state + state = get_state(response, index) + if state != value: + raise ExecutionError(f"failed to set port {index} to status {value}") + +def power_get(host, port, index): + index = int(index) + assert 1 <= index <= 8 + # get the contents of the main page + response = requests.get(f"http://{host}:{port}/io.cgi?relay") + + response.raise_for_status() + state = get_state(response, index) + return state + +def get_state(request, index): + value = request.text.split()[1][index-1] + return bool(int(value)) From 5a8acba7479646059c90baceb17bbb5b9c5f8b2b Mon Sep 17 00:00:00 2001 From: Jens Kleintje Date: Thu, 28 Sep 2023 10:15:48 +0200 Subject: [PATCH 111/384] tests/test_powerdriver: add eth008 import Signed-off-by: Jens Kleintje --- tests/test_powerdriver.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/test_powerdriver.py b/tests/test_powerdriver.py index 3c7b3dbab..7afcdfb33 100644 --- a/tests/test_powerdriver.py +++ b/tests/test_powerdriver.py @@ -244,6 +244,7 @@ def test_import_backends(self): import labgrid.driver.power.apc import labgrid.driver.power.digipower import labgrid.driver.power.digitalloggers_http + import labgrid.driver.power.eth008 import labgrid.driver.power.gude import labgrid.driver.power.gude24 import labgrid.driver.power.netio From 5864c001ef523deaef7f74ce5f23e0329653e82b Mon Sep 17 00:00:00 2001 From: Joshua Watt Date: Fri, 17 Nov 2023 12:14:02 -0700 Subject: [PATCH 112/384] qemudriver: Add support for additional disk options Adds support to pass additional disk options to QEMU when creating the drive. This can be helpful for example to pass different "cache" options that may affect performance. Signed-off-by: Joshua Watt --- CHANGES.rst | 2 ++ doc/configuration.rst | 1 + labgrid/driver/qemudriver.py | 13 ++++++++++--- 3 files changed, 13 insertions(+), 3 deletions(-) diff --git a/CHANGES.rst b/CHANGES.rst index 03a09c2f6..45ee0dea2 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -10,6 +10,8 @@ New Features in 23.1 - A new log level called ``CONSOLE`` has been added between the default ``INFO`` and ``DEBUG`` levels. This level will show all reads and writes made to the serial console during testing. +- The `QEMUDriver` now has an additional ``disk_opts`` property which can be + used to pass additional options for the disk directly to QEMU Bug fixes in 23.1 ~~~~~~~~~~~~~~~~~ diff --git a/doc/configuration.rst b/doc/configuration.rst index 6afab1b01..577b78810 100644 --- a/doc/configuration.rst +++ b/doc/configuration.rst @@ -2483,6 +2483,7 @@ Arguments: - boot_args (str): optional, additional kernel boot argument - kernel (str): optional, reference to the images key for the kernel - disk (str): optional, reference to the images key for the disk image + - disk_opts (str): optional, additional QEMU disk options - flash (str): optional, reference to the images key for the flash image - rootfs (str): optional, reference to the paths key for use as the virtio-9p filesystem - dtb (str): optional, reference to the image key for the device tree diff --git a/labgrid/driver/qemudriver.py b/labgrid/driver/qemudriver.py index 6d7daf02b..2c8d2dafb 100644 --- a/labgrid/driver/qemudriver.py +++ b/labgrid/driver/qemudriver.py @@ -40,6 +40,7 @@ class QEMUDriver(ConsoleExpectMixin, Driver, PowerProtocol, ConsoleProtocol): boot_args (str): optional, additional kernel boot argument kernel (str): optional, reference to the images key for the kernel disk (str): optional, reference to the images key for the disk image + disk_opts (str): optional, additional QEMU disk options flash (str): optional, reference to the images key for the flash image rootfs (str): optional, reference to the paths key for use as the virtio-9p filesystem dtb (str): optional, reference to the image key for the device tree @@ -64,6 +65,9 @@ class QEMUDriver(ConsoleExpectMixin, Driver, PowerProtocol, ConsoleProtocol): disk = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(str))) + disk_opts = attr.ib( + default=None, + validator=attr.validators.optional(attr.validators.instance_of(str))) rootfs = attr.ib( default=None, validator=attr.validators.optional(attr.validators.instance_of(str))) @@ -146,20 +150,23 @@ def get_qemu_base_args(self): disk_format = "raw" if disk_path.endswith(".qcow2"): disk_format = "qcow2" + disk_opts = "" + if self.disk_opts: + disk_opts = f",{self.disk_opts}" if self.machine == "vexpress-a9": cmd.append("-drive") cmd.append( - f"if=sd,format={disk_format},file={disk_path},id=mmc0") + f"if=sd,format={disk_format},file={disk_path},id=mmc0{disk_opts}") boot_args.append("root=/dev/mmcblk0p1 rootfstype=ext4 rootwait") elif self.machine == "q35": cmd.append("-drive") cmd.append( - f"if=virtio,format={disk_format},file={disk_path}") + f"if=virtio,format={disk_format},file={disk_path}{disk_opts}") boot_args.append("root=/dev/vda rootwait") elif self.machine == "pc": cmd.append("-drive") cmd.append( - f"if=virtio,format={disk_format},file={disk_path}") + f"if=virtio,format={disk_format},file={disk_path}{disk_opts}") boot_args.append("root=/dev/vda rootwait") else: raise NotImplementedError( From 2c475e6497f8f32c4f2b30c2a2a0cd7deafbd17c Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Mon, 27 Nov 2023 17:47:14 +0100 Subject: [PATCH 113/384] github/workflows/docker: upgrade setuptools before installing setuptools_scm Fixes warnings such as: /home/runner/.local/lib/python3.10/site-packages/setuptools_scm/_integration/setuptools.py:30: RuntimeWarning: ERROR: setuptools==59.6.0 is used in combination with setuptools_scm>=8.x Your build configuration is incomplete and previously worked by accident! setuptools_scm requires setuptools>=61 Suggested workaround if applicable: - migrating from the deprecated setup_requires mechanism to pep517/518 and using a pyproject.toml to declare build dependencies which are reliably pre-installed before running the build tools Signed-off-by: Bastian Krause --- .github/workflows/docker.yml | 3 ++- .github/workflows/reusable-unit-tests-docker.yml | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index a5d76d59b..89cf99bf8 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -14,7 +14,8 @@ jobs: - name: Install system dependencies run: | sudo apt install -yq python3-pip - python3 -m pip install setuptools_scm + pip install --upgrade setuptools + pip install setuptools_scm - name: Login to DockerHub uses: docker/login-action@v3 with: diff --git a/.github/workflows/reusable-unit-tests-docker.yml b/.github/workflows/reusable-unit-tests-docker.yml index af1a7a499..dc1b58a2d 100644 --- a/.github/workflows/reusable-unit-tests-docker.yml +++ b/.github/workflows/reusable-unit-tests-docker.yml @@ -17,7 +17,8 @@ jobs: - name: Install system dependencies run: | sudo apt install -yq python3-pip - python3 -m pip install setuptools_scm + pip install --upgrade setuptools + pip install setuptools_scm - name: Build docker images run: | ./dockerfiles/build.sh From 61d5704c6783ea7aec4f580b0f9d997b859a18f9 Mon Sep 17 00:00:00 2001 From: Johann Wiens Date: Wed, 29 Nov 2023 10:50:45 +0100 Subject: [PATCH 114/384] driver/qemudriver: add support for virt machine Signed-off-by: Johann Wiens --- labgrid/driver/qemudriver.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/labgrid/driver/qemudriver.py b/labgrid/driver/qemudriver.py index 2c8d2dafb..a0ab8bbb7 100644 --- a/labgrid/driver/qemudriver.py +++ b/labgrid/driver/qemudriver.py @@ -168,6 +168,11 @@ def get_qemu_base_args(self): cmd.append( f"if=virtio,format={disk_format},file={disk_path}{disk_opts}") boot_args.append("root=/dev/vda rootwait") + elif self.machine == "virt": + cmd.append("-drive") + cmd.append( + f"if=virtio,format={disk_format},file={disk_path}{disk_opts}") + boot_args.append("root=/dev/vda rootwait") else: raise NotImplementedError( f"QEMU disk image support not implemented for machine '{self.machine}'" From ccbccf3526324bf03d36fa1b30811f672e2cbf46 Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Wed, 13 Dec 2023 13:03:03 +0100 Subject: [PATCH 115/384] tests/test_crossbar: mark tests incompatible with python3.12+ crossbar is not compatible with Python 3.12, yet: [1] and [2] are not part of a release. A release will probably take some more time [3][4]. Since this is the last remaining issue before we can advertise Python 3.12 support, let's mark these tests as XFAIL for now. [1] https://github.com/crossbario/crossbar/pull/2091 [2] https://github.com/crossbario/crossbar/pull/2093 [3] https://github.com/crossbario/crossbar/pull/2093#issuecomment-1853007773 [4] https://github.com/crossbario/crossbar/pull/2091#issuecomment-1850126501 Signed-off-by: Bastian Krause --- tests/test_crossbar.py | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/tests/test_crossbar.py b/tests/test_crossbar.py index 0f1a477bd..a1db0eeeb 100644 --- a/tests/test_crossbar.py +++ b/tests/test_crossbar.py @@ -1,5 +1,6 @@ import os import re +import sys import time import pytest @@ -21,6 +22,7 @@ def resume_tree(pid): for child in main.children(recursive=True): child.resume() +@pytest.mark.xfail(sys.version_info >= (3, 12), reason="latest crossbar release incompatible with python3.12+") def test_startup(crossbar): pass @@ -69,6 +71,7 @@ def test_connect_error(): spawn.close() assert spawn.exitstatus == 1, spawn.before.strip() +@pytest.mark.xfail(sys.version_info >= (3, 12), reason="latest crossbar release incompatible with python3.12+") def test_connect_timeout(crossbar): suspend_tree(crossbar.pid) try: @@ -81,6 +84,7 @@ def test_connect_timeout(crossbar): resume_tree(crossbar.pid) pass +@pytest.mark.xfail(sys.version_info >= (3, 12), reason="latest crossbar release incompatible with python3.12+") def test_place_show(place): with pexpect.spawn('python -m labgrid.remote.client -p test show') as spawn: spawn.expect("Place 'test':") @@ -88,6 +92,7 @@ def test_place_show(place): spawn.close() assert spawn.exitstatus == 0, spawn.before.strip() +@pytest.mark.xfail(sys.version_info >= (3, 12), reason="latest crossbar release incompatible with python3.12+") def test_place_alias(place): with pexpect.spawn('python -m labgrid.remote.client -p test add-alias foo') as spawn: spawn.expect(pexpect.EOF) @@ -99,6 +104,7 @@ def test_place_alias(place): spawn.close() assert spawn.exitstatus == 0, spawn.before.strip() +@pytest.mark.xfail(sys.version_info >= (3, 12), reason="latest crossbar release incompatible with python3.12+") def test_place_comment(place): with pexpect.spawn('python -m labgrid.remote.client -p test set-comment my comment') as spawn: spawn.expect(pexpect.EOF) @@ -112,6 +118,7 @@ def test_place_comment(place): spawn.close() assert spawn.exitstatus == 0, spawn.before.strip() +@pytest.mark.xfail(sys.version_info >= (3, 12), reason="latest crossbar release incompatible with python3.12+") def test_place_match(place): with pexpect.spawn('python -m labgrid.remote.client -p test add-match "e1/g1/r1" "e2/g2/*"') as spawn: spawn.expect(pexpect.EOF) @@ -130,6 +137,7 @@ def test_place_match(place): spawn.close() assert spawn.exitstatus == 0, spawn.before.strip() +@pytest.mark.xfail(sys.version_info >= (3, 12), reason="latest crossbar release incompatible with python3.12+") def test_place_match_duplicates(place): # first given match should succeed, second should be skipped matches = ( @@ -150,6 +158,7 @@ def test_place_match_duplicates(place): spawn.close() assert spawn.exitstatus == 0, spawn.before.strip() +@pytest.mark.xfail(sys.version_info >= (3, 12), reason="latest crossbar release incompatible with python3.12+") def test_place_acquire(place): with pexpect.spawn('python -m labgrid.remote.client -p test acquire') as spawn: spawn.expect(pexpect.EOF) @@ -167,6 +176,7 @@ def test_place_acquire(place): spawn.close() assert spawn.exitstatus == 0, spawn.before.strip() +@pytest.mark.xfail(sys.version_info >= (3, 12), reason="latest crossbar release incompatible with python3.12+") def test_place_acquire_enforce(place): with pexpect.spawn('python -m labgrid.remote.client -p test add-match does/not/exist') as spawn: spawn.expect(pexpect.EOF) @@ -190,6 +200,7 @@ def test_place_acquire_enforce(place): spawn.close() assert spawn.exitstatus == 0, spawn.before.strip() +@pytest.mark.xfail(sys.version_info >= (3, 12), reason="latest crossbar release incompatible with python3.12+") def test_place_acquire_broken(place, exporter): with pexpect.spawn('python -m labgrid.remote.client -p test add-match "*/Broken/*"') as spawn: spawn.expect(pexpect.EOF) @@ -209,6 +220,7 @@ def test_place_acquire_broken(place, exporter): print(spawn.before.decode()) assert spawn.exitstatus == 0, spawn.before.strip() +@pytest.mark.xfail(sys.version_info >= (3, 12), reason="latest crossbar release incompatible with python3.12+") def test_place_release_from(monkeypatch, place, exporter): user = "test-user" host = "test-host" @@ -255,6 +267,7 @@ def test_place_release_from(monkeypatch, place, exporter): before = spawn.before.decode("utf-8").strip() assert user not in before and not host in before, before +@pytest.mark.xfail(sys.version_info >= (3, 12), reason="latest crossbar release incompatible with python3.12+") def test_place_add_no_name(crossbar): with pexpect.spawn('python -m labgrid.remote.client create') as spawn: spawn.expect("missing place name") @@ -262,6 +275,7 @@ def test_place_add_no_name(crossbar): spawn.close() assert spawn.exitstatus != 0, spawn.before.strip() +@pytest.mark.xfail(sys.version_info >= (3, 12), reason="latest crossbar release incompatible with python3.12+") def test_place_del_no_name(crossbar): with pexpect.spawn('python -m labgrid.remote.client delete') as spawn: spawn.expect("deletes require an exact place name") @@ -269,6 +283,7 @@ def test_place_del_no_name(crossbar): spawn.close() assert spawn.exitstatus != 0, spawn.before.strip() +@pytest.mark.xfail(sys.version_info >= (3, 12), reason="latest crossbar release incompatible with python3.12+") def test_remoteplace_target(place_acquire, tmpdir): from labgrid.environment import Environment p = tmpdir.join("config.yaml") @@ -289,6 +304,7 @@ def test_remoteplace_target(place_acquire, tmpdir): remote_place = t.get_resource("RemotePlace") assert remote_place.tags == {"board": "bar"} +@pytest.mark.xfail(sys.version_info >= (3, 12), reason="latest crossbar release incompatible with python3.12+") def test_remoteplace_target_without_env(request, place_acquire): from labgrid import Target from labgrid.resource import RemotePlace @@ -297,6 +313,7 @@ def test_remoteplace_target_without_env(request, place_acquire): remote_place = RemotePlace(t, name="test") assert remote_place.tags == {"board": "bar"} +@pytest.mark.xfail(sys.version_info >= (3, 12), reason="latest crossbar release incompatible with python3.12+") def test_resource_conflict(place_acquire, tmpdir): with pexpect.spawn('python -m labgrid.remote.client -p test2 create') as spawn: spawn.expect(pexpect.EOF) @@ -318,6 +335,7 @@ def test_resource_conflict(place_acquire, tmpdir): spawn.close() assert spawn.exitstatus == 0, spawn.before.strip() +@pytest.mark.xfail(sys.version_info >= (3, 12), reason="latest crossbar release incompatible with python3.12+") def test_reservation(place_acquire, tmpdir): with pexpect.spawn('python -m labgrid.remote.client reserve --shell board=bar name=test') as spawn: spawn.expect(pexpect.EOF) @@ -395,6 +413,7 @@ def test_reservation(place_acquire, tmpdir): spawn.close() assert spawn.exitstatus == 0, spawn.before.strip() +@pytest.mark.xfail(sys.version_info >= (3, 12), reason="latest crossbar release incompatible with python3.12+") def test_exporter_timeout(place, exporter): with pexpect.spawn('python -m labgrid.remote.client resources') as spawn: spawn.expect(pexpect.EOF) @@ -432,6 +451,7 @@ def test_exporter_timeout(place, exporter): spawn.close() assert spawn.exitstatus == 0, spawn.before.strip() +@pytest.mark.xfail(sys.version_info >= (3, 12), reason="latest crossbar release incompatible with python3.12+") def test_reservation_custom_config(place, exporter, tmpdir): p = tmpdir.join("config.yaml") p.write( @@ -469,6 +489,7 @@ def test_reservation_custom_config(place, exporter, tmpdir): spawn.close() assert spawn.exitstatus == 0, spawn.before.strip() +@pytest.mark.xfail(sys.version_info >= (3, 12), reason="latest crossbar release incompatible with python3.12+") def test_same_name_resources(place, exporter, tmpdir): with pexpect.spawn('python -m labgrid.remote.client -p test add-named-match "testhost/Many/NetworkService" "samename"') as spawn: spawn.expect(pexpect.EOF) From 4f23236467384e5dcd4627987f4a6734ca4a1f09 Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Wed, 13 Dec 2023 13:03:30 +0100 Subject: [PATCH 116/384] github/workflows: test Python 3.12 crossbar is not compatible with Python 3.12, yet: [1] and [2] are not part of a release. A release will probably take some more time [3][4]. We shouldn't advertise Python 3.12 support until that happens. But we can make sure that everything else keeps working fine with Python 3.12 by running the test suite. A previous commit marked the failing crossbar tests as XFAIL for 3.12. [1] https://github.com/crossbario/crossbar/pull/2091 [2] https://github.com/crossbario/crossbar/pull/2093 [3] https://github.com/crossbario/crossbar/pull/2093#issuecomment-1853007773 [4] https://github.com/crossbario/crossbar/pull/2091#issuecomment-1850126501 Signed-off-by: Bastian Krause --- .github/workflows/push-pr-unit-tests.yml | 2 +- .github/workflows/scheduled-unit-tests.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/push-pr-unit-tests.yml b/.github/workflows/push-pr-unit-tests.yml index 8477cd113..4eb35620d 100644 --- a/.github/workflows/push-pr-unit-tests.yml +++ b/.github/workflows/push-pr-unit-tests.yml @@ -8,7 +8,7 @@ jobs: strategy: fail-fast: false matrix: - python-version: ['3.8', '3.9', '3.10', '3.11'] + python-version: ['3.8', '3.9', '3.10', '3.11', '3.12'] uses: ./.github/workflows/reusable-unit-tests.yml with: python-version: ${{ matrix.python-version }} diff --git a/.github/workflows/scheduled-unit-tests.yml b/.github/workflows/scheduled-unit-tests.yml index faadf9cc7..096e7aee6 100644 --- a/.github/workflows/scheduled-unit-tests.yml +++ b/.github/workflows/scheduled-unit-tests.yml @@ -10,7 +10,7 @@ jobs: strategy: fail-fast: false matrix: - python-version: ['3.8', '3.9', '3.10', '3.11'] + python-version: ['3.8', '3.9', '3.10', '3.11', '3.12'] branch: ['master'] uses: ./.github/workflows/reusable-unit-tests.yml with: From 7033b3b49f3e507dc4e6998b1940d5998ff39f33 Mon Sep 17 00:00:00 2001 From: Rouven Czerwinski Date: Fri, 15 Dec 2023 11:37:21 +0100 Subject: [PATCH 117/384] driver: qemudriver: fold equivalent disk arguments Fold the "q35", "virt" and "pc" machines into a single if clause. They all use the same definition anyway. Signed-off-by: Rouven Czerwinski --- labgrid/driver/qemudriver.py | 12 +----------- 1 file changed, 1 insertion(+), 11 deletions(-) diff --git a/labgrid/driver/qemudriver.py b/labgrid/driver/qemudriver.py index a0ab8bbb7..4d8a8a77d 100644 --- a/labgrid/driver/qemudriver.py +++ b/labgrid/driver/qemudriver.py @@ -158,17 +158,7 @@ def get_qemu_base_args(self): cmd.append( f"if=sd,format={disk_format},file={disk_path},id=mmc0{disk_opts}") boot_args.append("root=/dev/mmcblk0p1 rootfstype=ext4 rootwait") - elif self.machine == "q35": - cmd.append("-drive") - cmd.append( - f"if=virtio,format={disk_format},file={disk_path}{disk_opts}") - boot_args.append("root=/dev/vda rootwait") - elif self.machine == "pc": - cmd.append("-drive") - cmd.append( - f"if=virtio,format={disk_format},file={disk_path}{disk_opts}") - boot_args.append("root=/dev/vda rootwait") - elif self.machine == "virt": + elif self.machine in ["pc", "q35", "virt"]: cmd.append("-drive") cmd.append( f"if=virtio,format={disk_format},file={disk_path}{disk_opts}") From a712e64f1aef9f2053907bc77985aa65aafcf6d8 Mon Sep 17 00:00:00 2001 From: Roland Hieber Date: Thu, 14 Dec 2023 14:38:36 +0100 Subject: [PATCH 118/384] shelldriver: allow login with empty password Make 'password' default to None, so when is not set, no password is entered, but the user can set "" (empty string) to support boards which prompt for a password, but accept an enter. Fixes #441 Signed-off-by: Roland Hieber [r.czerwinski@pengutronix.de: fix commit message, use better None check] Signed-off-by: Rouven Czerwinski --- CHANGES.rst | 1 + doc/configuration.rst | 3 ++- labgrid/driver/shelldriver.py | 4 ++-- 3 files changed, 5 insertions(+), 3 deletions(-) diff --git a/CHANGES.rst b/CHANGES.rst index 45ee0dea2..159cfaa0d 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -29,6 +29,7 @@ Bug fixes in 23.1 test run. - ManagedFile NFS detection heuristic now does symlink resolution on the local host. +- The password for the ShellDriver can now be an empty string. Breaking changes in 23.1 ~~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/doc/configuration.rst b/doc/configuration.rst index 577b78810..79f51d8a2 100644 --- a/doc/configuration.rst +++ b/doc/configuration.rst @@ -1558,7 +1558,8 @@ Arguments: - prompt (regex): shell prompt to match after logging in - login_prompt (regex): match for the login prompt - username (str): username to use during login - - password (str): optional, password to use during login + - password (str): optional, password to use during login. + Can be an empty string. - keyfile (str): optional, keyfile to upload after login, making the `SSHDriver`_ usable - login_timeout (int, default=60): timeout for login prompt detection in diff --git a/labgrid/driver/shelldriver.py b/labgrid/driver/shelldriver.py index 6689e2568..490a353b7 100644 --- a/labgrid/driver/shelldriver.py +++ b/labgrid/driver/shelldriver.py @@ -48,7 +48,7 @@ class ShellDriver(CommandMixin, Driver, CommandProtocol, FileTransferProtocol): prompt = attr.ib(validator=attr.validators.instance_of(str)) login_prompt = attr.ib(validator=attr.validators.instance_of(str)) username = attr.ib(validator=attr.validators.instance_of(str)) - password = attr.ib(default="", validator=attr.validators.instance_of(str)) + password = attr.ib(default=None, validator=attr.validators.optional(attr.validators.instance_of(str))) keyfile = attr.ib(default="", validator=attr.validators.instance_of(str)) login_timeout = attr.ib(default=60, validator=attr.validators.instance_of(int)) console_ready = attr.ib(default="", validator=attr.validators.instance_of(str)) @@ -152,7 +152,7 @@ def _await_login(self): did_login = True elif index == 2: - if self.password: + if self.password is not None: self.console.sendline(self.password) else: raise Exception("Password entry needed but no password set") From 12b272eefb8465c2d9afcca376fd82cf432ce6a7 Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Mon, 31 Jul 2023 13:13:31 +0200 Subject: [PATCH 119/384] driver/sigrokdriver: drop redundant process.wait() calls In both cases, `process.communicate()` already waits for the process to terminate, so we can drop the redundant `process.wait()` calls. Signed-off-by: Bastian Krause --- labgrid/driver/sigrokdriver.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/labgrid/driver/sigrokdriver.py b/labgrid/driver/sigrokdriver.py index a9bc61f1c..b82fa6b80 100644 --- a/labgrid/driver/sigrokdriver.py +++ b/labgrid/driver/sigrokdriver.py @@ -163,7 +163,6 @@ def stop(self): self._process.send_signal(signal.SIGINT) stdout, stderr = self._process.communicate() - self._process.wait() self.logger.debug("stdout:\n %s\n ----- \n stderr:\n %s", stdout, stderr) # Convert from .sr to .csv @@ -173,7 +172,6 @@ def stop(self): os.path.join(self._tmpdir, csv_filename) ] self._call(*cmd) - self._process.wait() stdout, stderr = self._process.communicate() self.logger.debug("stdout:\n %s\n ----- \n stderr:\n %s", stdout, stderr) if isinstance(self.sigrok, NetworkSigrokUSBDevice): From cf626b5b1a0e7172933829410a3300ee24e32ec6 Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Mon, 31 Jul 2023 13:17:31 +0200 Subject: [PATCH 120/384] driver/sigrokdriver: log stdout/stderr separately, drop formatting from log messages stdout/stderr from the subprocess are logged as bytes, there is no benefit in logging them with line breaks and in a single message. Signed-off-by: Bastian Krause --- labgrid/driver/sigrokdriver.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/labgrid/driver/sigrokdriver.py b/labgrid/driver/sigrokdriver.py index b82fa6b80..874a33302 100644 --- a/labgrid/driver/sigrokdriver.py +++ b/labgrid/driver/sigrokdriver.py @@ -163,7 +163,8 @@ def stop(self): self._process.send_signal(signal.SIGINT) stdout, stderr = self._process.communicate() - self.logger.debug("stdout:\n %s\n ----- \n stderr:\n %s", stdout, stderr) + self.logger.debug("stdout: %s", stdout) + self.logger.debug("stderr: %s", stderr) # Convert from .sr to .csv cmd = [ @@ -173,7 +174,8 @@ def stop(self): ] self._call(*cmd) stdout, stderr = self._process.communicate() - self.logger.debug("stdout:\n %s\n ----- \n stderr:\n %s", stdout, stderr) + self.logger.debug("stdout: %s", stdout) + self.logger.debug("stderr: %s", stderr) if isinstance(self.sigrok, NetworkSigrokUSBDevice): subprocess.call([ 'scp', f'{self.sigrok.host}:{os.path.join(self._tmpdir, self._basename)}', From 0d8b82be390fe87ce1b706d04a8526d3e05acd5c Mon Sep 17 00:00:00 2001 From: Nicolas Labriet Date: Tue, 12 Dec 2023 14:16:46 +0100 Subject: [PATCH 121/384] driver/sshdriver: Add credential overriding NetworkService's Depending on the software provisioned on a place, the credentials may be different. The SSHDriver's credentials override the credentials from the NetworkService. Signed-off-by: Nicolas Labriet Signed-off-by: Bastian Krause --- doc/configuration.rst | 2 ++ labgrid/driver/sshdriver.py | 28 +++++++++++++++++++--------- 2 files changed, 21 insertions(+), 9 deletions(-) diff --git a/doc/configuration.rst b/doc/configuration.rst index 577b78810..a01b28a13 100644 --- a/doc/configuration.rst +++ b/doc/configuration.rst @@ -1619,6 +1619,8 @@ Arguments: explicitly use the SFTP protocol for file transfers instead of scp's default protocol - explicit_scp_mode (bool, default=False): if set to True, `put()`, `get()`, and `scp()` will explicitly use the SCP protocol for file transfers instead of scp's default protocol + - username (str, default=username from `NetworkService`): username used by SSH + - password (str, default=password from `NetworkService`): password used by SSH UBootDriver ~~~~~~~~~~~ diff --git a/labgrid/driver/sshdriver.py b/labgrid/driver/sshdriver.py index 68e9c725a..d11162cb7 100644 --- a/labgrid/driver/sshdriver.py +++ b/labgrid/driver/sshdriver.py @@ -36,12 +36,22 @@ class SSHDriver(CommandMixin, Driver, CommandProtocol, FileTransferProtocol): connection_timeout = attr.ib(default=float(get_ssh_connect_timeout()), validator=attr.validators.instance_of(float)) explicit_sftp_mode = attr.ib(default=False, validator=attr.validators.instance_of(bool)) explicit_scp_mode = attr.ib(default=False, validator=attr.validators.instance_of(bool)) + username = attr.ib(default="", validator=attr.validators.instance_of(str)) + password = attr.ib(default="", validator=attr.validators.instance_of(str)) def __attrs_post_init__(self): super().__attrs_post_init__() self.logger = logging.getLogger(f"{self}({self.target})") self._keepalive = None + def _get_username(self): + """Get the username from this class or from NetworkService""" + return self.username or self.networkservice.username + + def _get_password(self): + """Get the password from this class or from NetworkService""" + return self.password or self.networkservice.password + def on_activate(self): self.ssh_prefix = ["-o", "LogLevel=ERROR"] if self.keyfile: @@ -49,7 +59,7 @@ def on_activate(self): if self.target.env: keyfile_path = self.target.env.config.resolve_path(self.keyfile) self.ssh_prefix += ["-i", keyfile_path ] - if not self.networkservice.password: + if not self._get_password(): self.ssh_prefix += ["-o", "PasswordAuthentication=no"] self.control = self._start_own_master() @@ -101,7 +111,7 @@ def _start_own_master_once(self, timeout): "-o", "ControlPersist=300", "-o", "UserKnownHostsFile=/dev/null", "-o", "StrictHostKeyChecking=no", "-o", "ServerAliveInterval=15", "-MN", "-S", control.replace('%', '%%'), "-p", - str(self.networkservice.port), "-l", self.networkservice.username, + str(self.networkservice.port), "-l", self._get_username(), self.networkservice.address] # proxy via the exporter if we have an ifname suffix @@ -121,14 +131,14 @@ def _start_own_master_once(self, timeout): env = os.environ.copy() pass_file = '' - if self.networkservice.password: + if self._get_password(): fd, pass_file = tempfile.mkstemp() os.fchmod(fd, stat.S_IRWXU) #with openssh>=8.4 SSH_ASKPASS_REQUIRE can be used to force SSH_ASK_PASS #openssh<8.4 requires the DISPLAY var and a detached process with start_new_session=True env = {'SSH_ASKPASS': pass_file, 'DISPLAY':'', 'SSH_ASKPASS_REQUIRE':'force'} with open(fd, 'w') as f: - f.write("#!/bin/sh\necho " + shlex.quote(self.networkservice.password)) + f.write("#!/bin/sh\necho " + shlex.quote(self._get_password())) self.process = subprocess.Popen(args, env=env, stdout=subprocess.PIPE, @@ -165,7 +175,7 @@ def _start_own_master_once(self, timeout): f"Subprocess timed out [{subprocess_timeout}s] while executing {args}", ) finally: - if self.networkservice.password and os.path.exists(pass_file): + if self._get_password() and os.path.exists(pass_file): os.remove(pass_file) if not os.path.exists(control): @@ -196,7 +206,7 @@ def _run(self, cmd, codec="utf-8", decodeerrors="strict", timeout=None): raise ExecutionError("Keepalive no longer running") complete_cmd = ["ssh", "-x", *self.ssh_prefix, - "-p", str(self.networkservice.port), "-l", self.networkservice.username, + "-p", str(self.networkservice.port), "-l", self._get_username(), self.networkservice.address ] + cmd.split(" ") self.logger.debug("Sending command: %s", complete_cmd) @@ -469,7 +479,7 @@ def put(self, filename, remotepath=''): "-P", str(self.networkservice.port), "-r", filename, - f"{self.networkservice.username}@{self.networkservice.address}:{remotepath}" + f"{self._get_username()}@{self.networkservice.address}:{remotepath}" ] if self.explicit_sftp_mode and self._scp_supports_explicit_sftp_mode(): @@ -498,7 +508,7 @@ def get(self, filename, destination="."): *self.ssh_prefix, "-P", str(self.networkservice.port), "-r", - f"{self.networkservice.username}@{self.networkservice.address}:{filename}", + f"{self._get_username()}@{self.networkservice.address}:{filename}", destination ] @@ -522,7 +532,7 @@ def get(self, filename, destination="."): def _cleanup_own_master(self): """Exit the controlmaster and delete the tmpdir""" - complete_cmd = f"ssh -x -o ControlPath={self.control.replace('%', '%%')} -O exit -p {self.networkservice.port} -l {self.networkservice.username} {self.networkservice.address}".split(' ') # pylint: disable=line-too-long + complete_cmd = f"ssh -x -o ControlPath={self.control.replace('%', '%%')} -O exit -p {self.networkservice.port} -l {self._get_username()} {self.networkservice.address}".split(' ') # pylint: disable=line-too-long res = subprocess.call( complete_cmd, stdin=subprocess.DEVNULL, From 016d8143fcf8fe80e7502dc75262c12720b4aac8 Mon Sep 17 00:00:00 2001 From: Rouven Czerwinski Date: Fri, 15 Dec 2023 16:10:50 +0100 Subject: [PATCH 122/384] examples: add sigrok example Add a simple sigrok example that captures on one channel on a fx2lafw device. Signed-off-by: Rouven Czerwinski --- examples/sigrok/env.yaml | 8 ++++++++ examples/sigrok/main.py | 23 +++++++++++++++++++++++ 2 files changed, 31 insertions(+) create mode 100644 examples/sigrok/env.yaml create mode 100755 examples/sigrok/main.py diff --git a/examples/sigrok/env.yaml b/examples/sigrok/env.yaml new file mode 100644 index 000000000..0a046b9ee --- /dev/null +++ b/examples/sigrok/env.yaml @@ -0,0 +1,8 @@ +targets: + main: + resources: + SigrokDevice: + driver: fx2lafw + channels: D1 + drivers: + SigrokDriver: {} diff --git a/examples/sigrok/main.py b/examples/sigrok/main.py new file mode 100755 index 000000000..a213232d6 --- /dev/null +++ b/examples/sigrok/main.py @@ -0,0 +1,23 @@ +#!/usr/bin/env python3 +"""Capture Channel D1 of an fx2lafw device for 1 second.""" + +import sys +import time +import logging + +from labgrid import Environment + +# enable debug logging +logging.basicConfig( + level=logging.DEBUG, + format='%(levelname)7s: %(message)s', + stream=sys.stderr, +) + +env = Environment(sys.argv[1]) +target = env.get_target('main') + +sigrok = target.get_driver("SigrokDriver") +sigrok.capture("test.cap") +time.sleep(1) +sigrok.stop() From 9d4cd4cb759dc72ae34014f3b44500904060db98 Mon Sep 17 00:00:00 2001 From: Sebastian Goscik Date: Tue, 12 Dec 2023 11:43:58 +0000 Subject: [PATCH 123/384] driver/modbusdriver: Fix modbus error parsing As per the changelog: https://github.com/sourceperl/pyModbusTCP/blob/7bc2efcf08b596be9401c2cc4831811edea405df/CHANGES#L16 pymodbusTCP 2.x.x changed some paramters to properties instead of methods. Signed-off-by: Sebastian Goscik --- labgrid/driver/modbusdriver.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/labgrid/driver/modbusdriver.py b/labgrid/driver/modbusdriver.py index 844306278..5bddc2437 100644 --- a/labgrid/driver/modbusdriver.py +++ b/labgrid/driver/modbusdriver.py @@ -28,9 +28,9 @@ def on_deactivate(self): self.client = None def _handle_error(self, action): - error_code = self.client.last_error() + error_code = self.client.last_error if error_code == self._consts.MB_EXCEPT_ERR: - exc = self.client.last_except() + exc = self.client.last_except if exc not in [self._consts.EXP_ACKNOWLEDGE, self._consts.EXP_NONE]: raise ExecutionError( f'Could not {action} coil (code={error_code}/exception={exc})') From e5967f20ff9b3b035c9dc1bca6b3a5c708444ccf Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Tue, 19 Dec 2023 10:39:39 +0100 Subject: [PATCH 124/384] pyproject.toml: increase minimum pyModbusTCP version With pyModbusTCP 0.2.0, the API of the ModbusClient class changed. Bump the minimum dependency, so future commits can use the new API. Signed-off-by: Bastian Krause --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 2e80ccf46..ca83ffd34 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -58,7 +58,7 @@ doc = [ docker = ["docker>=5.0.2"] graph = ["graphviz>=0.17.0"] kasa = ["python-kasa>=0.4.0"] -modbus = ["pyModbusTCP>=0.1.10"] +modbus = ["pyModbusTCP>=0.2.0"] modbusrtu = ["minimalmodbus>=1.0.2"] mqtt = ["paho-mqtt>=1.5.1"] onewire = ["onewire>=0.2"] From 90cf941308e8fefccf093c9cd975f0e3328355e4 Mon Sep 17 00:00:00 2001 From: Rouven Czerwinski Date: Thu, 20 Apr 2023 13:50:29 +0200 Subject: [PATCH 125/384] helpers: add labgrid-raw-interface helper Wrapper script to be deployed on machines whose network interfaces should be controllable via the RawNetworkInterfaceDriver. A /etc/labgrid/helpers.yaml can deny access to network interfaces. Intended to be used via sudo. Signed-off-by: Rouven Czerwinski Signed-off-by: Bastian Krause --- debian/labgrid.install | 1 + helpers/labgrid-raw-interface | 96 +++++++++++++++++++++++++++++++++++ 2 files changed, 97 insertions(+) create mode 100755 helpers/labgrid-raw-interface diff --git a/debian/labgrid.install b/debian/labgrid.install index 87e162fa8..cfea6dd96 100755 --- a/debian/labgrid.install +++ b/debian/labgrid.install @@ -5,4 +5,5 @@ debian/labgrid-exporter /usr/bin debian/labgrid-pytest /usr/bin debian/labgrid-suggest /usr/bin helpers/labgrid-bound-connect /usr/sbin +helpers/labgrid-raw-interface /usr/sbin contrib/completion/labgrid-client.bash => /usr/share/bash-completion/completions/labgrid-client diff --git a/helpers/labgrid-raw-interface b/helpers/labgrid-raw-interface new file mode 100755 index 000000000..ad54dcf10 --- /dev/null +++ b/helpers/labgrid-raw-interface @@ -0,0 +1,96 @@ +#!/usr/bin/env python3 +# +# Wrapper script to be deployed on machines whose network interfaces should be +# controllable via the RawNetworkInterfaceDriver. A /etc/labgrid/helpers.yaml +# can deny access to network interfaces. See below. +# +# This is intended to be used via sudo. For example, add via visudo: +# %developers ALL = NOPASSWD: /usr/sbin/labgrid-raw-interface + +import argparse +import os +import sys + +import yaml + + +def get_denylist(): + denylist_file = "/etc/labgrid/helpers.yaml" + try: + with open(denylist_file) as stream: + data = yaml.load(stream, Loader=yaml.SafeLoader) + except (PermissionError, FileNotFoundError, AttributeError) as e: + raise Exception(f"No configuration file ({denylist_file}), inaccessable or invalid yaml") from e + + denylist = data.get("raw-interface", {}).get("denied-interfaces", []) + + if not isinstance(denylist, list): + raise Exception("No explicit denied-interfaces or not a list, please check your configuration") + + denylist.append("lo") + + return denylist + + +def main(program, ifname, count): + if not ifname: + raise ValueError("Empty interface name.") + if any((c == "/" or c.isspace()) for c in ifname): + raise ValueError(f"Interface name '{ifname}' contains invalid characters.") + if len(ifname) > 16: + raise ValueError(f"Interface name '{ifname}' is too long.") + + denylist = get_denylist() + + if ifname in denylist: + raise ValueError(f"Interface name '{ifname}' is denied in denylist.") + + programs = ["tcpreplay", "tcpdump"] + if program not in programs: + raise ValueError(f"Invalid program {program} called with wrapper, valid programs are: {programs}") + + args = [ + program, + ] + + if program == "tcpreplay": + args.append(f"--intf1={ifname}") + args.append('-') + + if program == "tcpdump": + args.append("-n") + args.append(f"--interface={ifname}") + args.append("-w") + args.append('-') + + if count: + args.append("-c") + args.append(str(count)) + + try: + os.execvp(args[0], args) + except FileNotFoundError as e: + raise RuntimeError(f"Missing {program} binary") from e + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument( + '-d', + '--debug', + action='store_true', + default=False, + help="enable debug mode" + ) + parser.add_argument('program', type=str, help='program to run, either tcpreplay or tcpdump') + parser.add_argument('interface', type=str, help='interface name') + parser.add_argument('count', nargs="?", type=int, default=None, help='amount of frames to capture while recording') + args = parser.parse_args() + try: + main(args.program, args.interface, args.count) + except Exception as e: # pylint: disable=broad-except + if args.debug: + import traceback + traceback.print_exc(file=sys.stderr) + print(f"ERROR: {e}", file=sys.stderr) + exit(1) From 9c57dd8855fcb2efd58ca842563fbc193b2116aa Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Fri, 5 Jan 2024 14:31:09 +0100 Subject: [PATCH 126/384] man: extend copyright period to 2024 Signed-off-by: Bastian Krause --- man/labgrid-device-config.5 | 2 +- man/labgrid-device-config.rst | 2 +- man/labgrid-exporter.1 | 2 +- man/labgrid-exporter.rst | 2 +- man/labgrid-pytest.7 | 2 +- man/labgrid-pytest.rst | 2 +- man/labgrid-suggest.1 | 2 +- man/labgrid-suggest.rst | 2 +- 8 files changed, 8 insertions(+), 8 deletions(-) diff --git a/man/labgrid-device-config.5 b/man/labgrid-device-config.5 index 1f0c6e729..13943611c 100644 --- a/man/labgrid-device-config.5 +++ b/man/labgrid-device-config.5 @@ -266,7 +266,7 @@ Rouven Czerwinski Organization: Labgrid-Project .SH COPYRIGHT -Copyright (C) 2016-2023 Pengutronix. This library is free software; +Copyright (C) 2016-2024 Pengutronix. This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) diff --git a/man/labgrid-device-config.rst b/man/labgrid-device-config.rst index 3298f5c47..216a657bd 100644 --- a/man/labgrid-device-config.rst +++ b/man/labgrid-device-config.rst @@ -9,7 +9,7 @@ labgrid test configuration files :Author: Rouven Czerwinski :organization: Labgrid-Project :Date: 2017-04-15 -:Copyright: Copyright (C) 2016-2023 Pengutronix. This library is free software; +:Copyright: Copyright (C) 2016-2024 Pengutronix. This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) diff --git a/man/labgrid-exporter.1 b/man/labgrid-exporter.1 index 4ee130e8b..faf836daa 100644 --- a/man/labgrid-exporter.1 +++ b/man/labgrid-exporter.1 @@ -141,7 +141,7 @@ Rouven Czerwinski Organization: Labgrid-Project .SH COPYRIGHT -Copyright (C) 2016-2023 Pengutronix. This library is free software; +Copyright (C) 2016-2024 Pengutronix. This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) diff --git a/man/labgrid-exporter.rst b/man/labgrid-exporter.rst index 27e902beb..f43754ca1 100644 --- a/man/labgrid-exporter.rst +++ b/man/labgrid-exporter.rst @@ -9,7 +9,7 @@ labgrid-exporter interface to control boards :Author: Rouven Czerwinski :organization: Labgrid-Project :Date: 2017-04-15 -:Copyright: Copyright (C) 2016-2023 Pengutronix. This library is free software; +:Copyright: Copyright (C) 2016-2024 Pengutronix. This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) diff --git a/man/labgrid-pytest.7 b/man/labgrid-pytest.7 index ad775495c..f23eae478 100644 --- a/man/labgrid-pytest.7 +++ b/man/labgrid-pytest.7 @@ -64,7 +64,7 @@ Rouven Czerwinski Organization: Labgrid-Project .SH COPYRIGHT -Copyright (C) 2016-2023 Pengutronix. This library is free software; +Copyright (C) 2016-2024 Pengutronix. This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) diff --git a/man/labgrid-pytest.rst b/man/labgrid-pytest.rst index cd5b715fe..78fccada3 100644 --- a/man/labgrid-pytest.rst +++ b/man/labgrid-pytest.rst @@ -8,7 +8,7 @@ labgrid-pytest labgrid integration for pytest :Author: Rouven Czerwinski :organization: Labgrid-Project :Date: 2017-04-15 -:Copyright: Copyright (C) 2016-2023 Pengutronix. This library is free software; +:Copyright: Copyright (C) 2016-2024 Pengutronix. This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) diff --git a/man/labgrid-suggest.1 b/man/labgrid-suggest.1 index 52c7b1f3c..51efbae5e 100644 --- a/man/labgrid-suggest.1 +++ b/man/labgrid-suggest.1 @@ -88,7 +88,7 @@ USBSerialPort: Organization: Labgrid-Project .SH COPYRIGHT -Copyright (C) 2016-2023 Pengutronix. This library is free software; +Copyright (C) 2016-2024 Pengutronix. This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) diff --git a/man/labgrid-suggest.rst b/man/labgrid-suggest.rst index c3e6dcbb2..d46a677af 100644 --- a/man/labgrid-suggest.rst +++ b/man/labgrid-suggest.rst @@ -8,7 +8,7 @@ labgrid-suggest generator for YAML config files :organization: Labgrid-Project :Date: 2021-05-20 -:Copyright: Copyright (C) 2016-2023 Pengutronix. This library is free software; +:Copyright: Copyright (C) 2016-2024 Pengutronix. This library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) From c792c2b80dca3b60f6c3a603a9e84150f4c5dc4b Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Fri, 5 Jan 2024 14:31:20 +0100 Subject: [PATCH 127/384] doc: extend copyright period to 2024 Signed-off-by: Bastian Krause --- doc/conf.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/conf.py b/doc/conf.py index 5061f4333..b1f470dc7 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -57,7 +57,7 @@ # General information about the project. project = 'labgrid' -copyright = '2016-2023 Pengutronix, Jan Luebbe and Rouven Czerwinski' +copyright = '2016-2024 Pengutronix, Jan Luebbe and Rouven Czerwinski' author = 'Jan Luebbe, Rouven Czerwinski' # The version info for the project you're documenting, acts as replacement for From 64e32258b048b16621eb9f7395803f615b953f1b Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Fri, 5 Jan 2024 14:31:48 +0100 Subject: [PATCH 128/384] debian: extend copyright period to 2024 Signed-off-by: Bastian Krause --- debian/copyright | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/debian/copyright b/debian/copyright index e921d88cb..1e176ff96 100644 --- a/debian/copyright +++ b/debian/copyright @@ -3,12 +3,12 @@ Upstream-Name: labgrid Source: https://github.com/labgrid-project/labgrid Files: * -Copyright: Copyright (C) 2016-2023 Pengutronix, Jan Luebbe - Copyright (C) 2016-2023 Pengutronix, Rouven Czerwinski +Copyright: Copyright (C) 2016-2024 Pengutronix, Jan Luebbe + Copyright (C) 2016-2024 Pengutronix, Rouven Czerwinski License: LGPL-2.1+ Files: man/* -Copyright: Copyright (C) 2016-2023 Pengutronix +Copyright: Copyright (C) 2016-2024 Pengutronix License: LGPL-2.1+ License: LGPL-2.1+ From 6fb70ea123b7039c914d6e1351cc632dcd7f2c90 Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Fri, 5 Jan 2024 14:34:05 +0100 Subject: [PATCH 129/384] CHANGES/debian: name next release 24.0 Signed-off-by: Bastian Krause --- CHANGES.rst | 10 +++++----- debian/changelog | 4 ++-- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/CHANGES.rst b/CHANGES.rst index f0238a462..1ff2cb236 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -1,7 +1,7 @@ -Release 23.1 (unreleased) +Release 24.0 (unreleased) ------------------------------------ -New Features in 23.1 +New Features in 24.0 ~~~~~~~~~~~~~~~~~~~~ - When invoking tests with pytest, the ``--log-(cli|file)-(level|format)`` command line arguments and their corresponding pytest.ini configure options @@ -13,7 +13,7 @@ New Features in 23.1 - The `QEMUDriver` now has an additional ``disk_opts`` property which can be used to pass additional options for the disk directly to QEMU -Bug fixes in 23.1 +Bug fixes in 24.0 ~~~~~~~~~~~~~~~~~ - The pypi release now uses the labgrid pyserial fork in the form of the pyserial-labgrid package. This fixes installation with newer versions @@ -31,7 +31,7 @@ Bug fixes in 23.1 local host. - The password for the ShellDriver can now be an empty string. -Breaking changes in 23.1 +Breaking changes in 24.0 ~~~~~~~~~~~~~~~~~~~~~~~~~ - Support for the legacy ticket authentication was dropped: If the coordinator logs ModuleNotFoundError on startup, switch the crossbar config to anonymous @@ -77,7 +77,7 @@ Breaking changes in 23.1 ``internal`` and ``external`` arguments as they do not fit the NFS mount use case. -Known issues in 23.1 +Known issues in 24.0 ~~~~~~~~~~~~~~~~~~~~ diff --git a/debian/changelog b/debian/changelog index 4b9ee3cb4..47e263732 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,8 +1,8 @@ -labgrid (23.1.0) UNRELEASED; urgency=low +labgrid (24.0.0) UNRELEASED; urgency=low * See https://github.com/labgrid-project/labgrid/blob/master/CHANGES.rst - -- Bastian Krause Wed, 26 Apr 2023 16:06:25 +0200 + -- Bastian Krause Wed, 01 Jan 2024 14:06:25 +0100 labgrid (23.0.0) UNRELEASED; urgency=low From f7cb04c35fb7867b07009fa3255f638a61b3c1d3 Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Fri, 5 Jan 2024 14:35:22 +0100 Subject: [PATCH 130/384] CHANGES: fix underlines Signed-off-by: Bastian Krause --- CHANGES.rst | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/CHANGES.rst b/CHANGES.rst index 1ff2cb236..d6807a7b0 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -1,5 +1,5 @@ Release 24.0 (unreleased) ------------------------------------- +------------------------- New Features in 24.0 ~~~~~~~~~~~~~~~~~~~~ @@ -32,7 +32,7 @@ Bug fixes in 24.0 - The password for the ShellDriver can now be an empty string. Breaking changes in 24.0 -~~~~~~~~~~~~~~~~~~~~~~~~~ +~~~~~~~~~~~~~~~~~~~~~~~~ - Support for the legacy ticket authentication was dropped: If the coordinator logs ModuleNotFoundError on startup, switch the crossbar config to anonymous authentication (see ``.crossbar/config-anonymous.yaml`` for an example). @@ -188,7 +188,7 @@ Bug fixes in 23.0 evaluation. Breaking changes in 23.0 -~~~~~~~~~~~~~~~~~~~~~~~~~ +~~~~~~~~~~~~~~~~~~~~~~~~ - ``Config``'s ``get_option()``/``get_target_option()`` convert non-string options no longer to strings. - `UBootDriver`'s ``boot_expression`` attribute is deprecated, it will no @@ -268,7 +268,7 @@ Breaking changes in 0.4.0 - ``EthernetInterface`` has been renamed to ``NetworkInterface``. Known issues in 0.4.0 -~~~~~~~~~~~~~~~~~~~~~~~~~ +~~~~~~~~~~~~~~~~~~~~~ - Some client commands return 0 even if the command failed. - Currently empty passwords are not well supported by the ShellDriver @@ -365,7 +365,7 @@ Breaking changes in 0.3.0 reasons. Known issues in 0.3.0 -~~~~~~~~~~~~~~~~~~~~~~~~~ +~~~~~~~~~~~~~~~~~~~~~ - There are several reports of ``sshpass`` used within the SSHDriver not working in call cases or only on the first connection. - Some client commands return 0 even if the command failed. From c604bc8209a605cdff8eff97b21bfdde26d76579 Mon Sep 17 00:00:00 2001 From: Joshua Watt Date: Mon, 8 Jan 2024 12:29:41 -0700 Subject: [PATCH 131/384] contrib/coordinator-statsd: Close sessions Ensures that the statsd service closes sessions after querying the coordinator. Signed-off-by: Joshua Watt --- contrib/coordinator-statsd.py | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/contrib/coordinator-statsd.py b/contrib/coordinator-statsd.py index 1d1ec5c44..cdf6c6c73 100755 --- a/contrib/coordinator-statsd.py +++ b/contrib/coordinator-statsd.py @@ -181,13 +181,15 @@ def main(): os.environ.get("LG_CROSSBAR_REALM", "realm1"), extra, ) - - session.loop.run_until_complete( - asyncio.gather( - report_places(session, args.tags, gauges), - report_reservations(session, args.tags, gauges), + try: + session.loop.run_until_complete( + asyncio.gather( + report_places(session, args.tags, gauges), + report_reservations(session, args.tags, gauges), + ) ) - ) + finally: + session.leave() except labgrid.remote.client.Error as e: print(f"Error communicating with labgrid: {e}") continue From 9aa2684b903d6a3d5e17bc70d5d23144e4121062 Mon Sep 17 00:00:00 2001 From: Rouven Czerwinski Date: Wed, 10 Jan 2024 11:51:13 +0100 Subject: [PATCH 132/384] labgrid/driver/power: add poe-mib backend The PoE MiB backend uses SNMP enable and disable PoE on a switch port. Compatibility was tested on a Cisco CBS350. Signed-off-by: Rouven Czerwinski --- doc/configuration.rst | 3 +++ labgrid/driver/power/poe_mib.py | 27 +++++++++++++++++++++++++++ tests/test_powerdriver.py | 4 ++++ 3 files changed, 34 insertions(+) create mode 100644 labgrid/driver/power/poe_mib.py diff --git a/doc/configuration.rst b/doc/configuration.rst index f05b0544d..764e0b63f 100644 --- a/doc/configuration.rst +++ b/doc/configuration.rst @@ -233,6 +233,9 @@ Currently available are: Controls TP-Link power strips via `python-kasa `_. +``poe_mib`` + Controls PoE switches using the PoE SNMP administration MiBs. + Used by: - `NetworkPowerDriver`_ diff --git a/labgrid/driver/power/poe_mib.py b/labgrid/driver/power/poe_mib.py new file mode 100644 index 000000000..12b159e36 --- /dev/null +++ b/labgrid/driver/power/poe_mib.py @@ -0,0 +1,27 @@ +""" tested with Cisco CBS350, should be compatible with switches implementing the PoE administration MiB""" + +from ..exception import ExecutionError +from ...util.snmp import SimpleSNMP + +OID = "1.3.6.1.2.1.105.1.1.1.3.1" + +def power_set(host, port, index, value): + _snmp = SimpleSNMP(host, 'private', port=port) + outlet_control_oid = "{}.{}".format(OID, index) + + oid_value = "1" if value else "2" + + _snmp.set(outlet_control_oid, oid_value) + +def power_get(host, port, index): + _snmp = SimpleSNMP(host, 'private', port=port) + output_status_oid = "{}.{}".format(OID, index) + + value = _snmp.get(output_status_oid) + + if value == 1: # On + return True + if value == 2: # Off + return False + + raise ExecutionError("failed to get SNMP value") diff --git a/tests/test_powerdriver.py b/tests/test_powerdriver.py index 7afcdfb33..37d6c2af7 100644 --- a/tests/test_powerdriver.py +++ b/tests/test_powerdriver.py @@ -265,3 +265,7 @@ def test_import_backend_tplink(self): def test_import_backend_siglent(self): pytest.importorskip("vxi11") import labgrid.driver.power.siglent + + def test_import_backend_poe_mib(self): + pytest.importorskip("pysnmp") + import labgrid.driver.power.poe_mib From 0cf3fe364373d30ff0d178b21d8f94dc498f230e Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Fri, 9 Sep 2022 13:48:05 +0200 Subject: [PATCH 133/384] driver: add RawNetworkInterfaceDriver This driver allows "raw" control of a network interface (such as Ethernet or WiFi). Signed-off-by: Bastian Krause Signed-off-by: Rouven Czerwinski --- doc/configuration.rst | 42 +++++ labgrid/driver/__init__.py | 1 + labgrid/driver/rawnetworkinterfacedriver.py | 187 ++++++++++++++++++++ 3 files changed, 230 insertions(+) create mode 100644 labgrid/driver/rawnetworkinterfacedriver.py diff --git a/doc/configuration.rst b/doc/configuration.rst index eabd2e905..b28545141 100644 --- a/doc/configuration.rst +++ b/doc/configuration.rst @@ -2955,6 +2955,48 @@ It supports: - connection sharing (DHCP server with NAT) - listing DHCP leases (if the client has sufficient permissions) +Binds to: + iface: + - `NetworkInterface`_ + - `USBNetworkInterface`_ + - `RemoteNetworkInterface`_ + +Implements: + - None yet + +Arguments: + - None + +RawNetworkInterfaceDriver +~~~~~~~~~~~~~~~~~~~~~~~~~ +This driver allows "raw" control of a network interface (such as Ethernet or +WiFi). + +The labgrid-raw-interface helper (``helpers/labgrid-raw-interface``) needs to +be installed in the PATH and usable via sudo without password. +A configuration file ``/etc/labgrid/helpers.yaml`` must be installed on hosts +exporting network interfaces for the RawNetworkInterfaceDriver, e.g.: + +.. code-block:: yaml + + raw-interface: + denied-interfaces: + - eth1 + +It supports: +- recording traffic +- replaying traffic +- basic statistic collection + +For now, the RawNetworkInterfaceDriver leaves pre-configuration of the exported +network interface to the user, including: +- disabling DHCP +- disabling IPv6 Duplicate Address Detection (DAD) by SLAAC (Stateless +Address Autoconfiguration) and Neighbor Discovery +- disabling Generic Receive Offload (GRO) + +This might change in the future. + Binds to: iface: - `NetworkInterface`_ diff --git a/labgrid/driver/__init__.py b/labgrid/driver/__init__.py index 4cda6be5f..721256bbf 100644 --- a/labgrid/driver/__init__.py +++ b/labgrid/driver/__init__.py @@ -41,6 +41,7 @@ from .httpvideodriver import HTTPVideoDriver from .networkinterfacedriver import NetworkInterfaceDriver from .provider import HTTPProviderDriver, NFSProviderDriver, TFTPProviderDriver +from .rawnetworkinterfacedriver import RawNetworkInterfaceDriver from .mqtt import TasmotaPowerDriver from .manualswitchdriver import ManualSwitchDriver from .usbtmcdriver import USBTMCDriver diff --git a/labgrid/driver/rawnetworkinterfacedriver.py b/labgrid/driver/rawnetworkinterfacedriver.py new file mode 100644 index 000000000..3be80960f --- /dev/null +++ b/labgrid/driver/rawnetworkinterfacedriver.py @@ -0,0 +1,187 @@ +# pylint: disable=no-member +import contextlib +import json +import subprocess + +import attr + +from .common import Driver +from ..factory import target_factory +from ..step import step +from ..util.helper import processwrapper +from ..util.managedfile import ManagedFile +from ..resource.common import NetworkResource + + +@target_factory.reg_driver +@attr.s(eq=False) +class RawNetworkInterfaceDriver(Driver): + bindings = { + "iface": {"NetworkInterface", "RemoteNetworkInterface", "USBNetworkInterface"}, + } + + def __attrs_post_init__(self): + super().__attrs_post_init__() + self._record_handle = None + self._replay_handle = None + + def _wrap_command(self, args): + wrapper = ["sudo", "labgrid-raw-interface"] + + if self.iface.command_prefix: + # add ssh prefix, convert command passed via ssh (including wrapper) to single argument + return self.iface.command_prefix + [" ".join(wrapper + args)] + else: + # keep wrapper and args as-is + return wrapper + args + + def _stop(self, proc, *, timeout=None): + assert proc is not None + + try: + _, err = proc.communicate(timeout=timeout) + except subprocess.TimeoutExpired: + proc.terminate() + _, err = proc.communicate() + raise + + if proc.returncode: + raise subprocess.CalledProcessError( + returncode=proc.returncode, + cmd=proc.args, + stderr=err, + ) + + @Driver.check_active + @step(args=["filename", "count"]) + def start_record(self, filename, *, count=None): + """ + Starts tcpdump on bound network interface resource. + + Args: + filename (str): name of a file to record to + count (int): optional, exit after receiving this many number of packets + Returns: + Popen object of tcpdump process + """ + assert self._record_handle is None + + cmd = ["tcpdump", self.iface.ifname] + if count is not None: + cmd.append(str(count)) + cmd = self._wrap_command(cmd) + with open(filename, "wb") as outdata: + self._record_handle = subprocess.Popen(cmd, stdout=outdata, stderr=subprocess.PIPE) + return self._record_handle + + @Driver.check_active + @step(args=["timeout"]) + def stop_record(self, *, timeout=None): + """ + Stops previously started tcpdump on bound network interface resource. + + Args: + timeout (int): optional, maximum number of seconds to wait for the tcpdump process to + terminate + """ + try: + self._stop(self._record_handle, timeout=timeout) + finally: + self._record_handle = None + + @contextlib.contextmanager + def record(self, filename, *, count=None, timeout=None): + """ + Context manager to start/stop tcpdump on bound network interface resource. + + Either count or timeout must be specified. + + Args: + filename (str): name of a file to record to + count (int): optional, exit after receiving this many number of packets + timeout (int): optional, maximum number of seconds to wait for the tcpdump process to + terminate + """ + assert count or timeout + + try: + yield self.start_record(filename, count=count) + finally: + self.stop_record(timeout=timeout) + + @Driver.check_active + @step(args=["filename"]) + def start_replay(self, filename): + """ + Starts tcpreplay on bound network interface resource. + + Args: + filename (str): name of a file to replay from + Returns: + Popen object of tcpreplay process + """ + assert self._replay_handle is None + + if isinstance(self.iface, NetworkResource): + mf = ManagedFile(filename, self.iface) + mf.sync_to_resource() + cmd = self._wrap_command([f"tcpreplay {self.iface.ifname} < {mf.get_remote_path()}"]) + self._replay_handle = subprocess.Popen(cmd, stderr=subprocess.PIPE) + else: + cmd = self._wrap_command(["tcpreplay", self.iface.ifname]) + with open(filename, "rb") as indata: + self._replay_handle = subprocess.Popen(cmd, stdin=indata) + + return self._replay_handle + + @Driver.check_active + @step(args=["timeout"]) + def stop_replay(self, *, timeout=None): + """ + Stops previously started tcpreplay on bound network interface resource. + + Args: + timeout (int): optional, maximum number of seconds to wait for the tcpreplay process to + terminate + """ + try: + self._stop(self._replay_handle, timeout=timeout) + finally: + self._replay_handle = None + + @contextlib.contextmanager + def replay(self, filename, *, timeout=None): + """ + Context manager to start/stop tcpreplay on bound network interface resource. + + Args: + filename (str): name of a file to replay from + timeout (int): optional, maximum number of seconds to wait for the tcpreplay process to + terminate + """ + try: + yield self.start_replay(filename) + finally: + self.stop_replay(timeout=timeout) + + @Driver.check_active + @step() + def get_statistics(self): + """ + Returns basic interface statistics of bound network interface resource. + """ + cmd = self.iface.command_prefix + [ + "ip", + "--json", + "-stats", "-stats", + "link", "show", + self.iface.ifname] + output = processwrapper.check_output(cmd) + return json.loads(output)[0] + + @Driver.check_active + def get_address(self): + """ + Returns the MAC address of the bound network interface resource. + """ + return self.get_statistics()["address"] From e3ee7f6bdc26237c87b26a3e1a0cef2ea9be5e85 Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Wed, 26 Oct 2022 15:54:13 +0200 Subject: [PATCH 134/384] examples: add network test using RawNetworkDriver Generates an Ethernet frame via scapy using pcap, copies pcap to DUT, replays pcap on interface, records frame locally (or on exporter, adjust env.yaml accordingly), and compares both. Signed-off-by: Bastian Krause --- examples/network-test/env.yaml | 14 ++++++ examples/network-test/pkg-replay-record.py | 55 ++++++++++++++++++++++ 2 files changed, 69 insertions(+) create mode 100644 examples/network-test/env.yaml create mode 100755 examples/network-test/pkg-replay-record.py diff --git a/examples/network-test/env.yaml b/examples/network-test/env.yaml new file mode 100644 index 000000000..1f8b9b10f --- /dev/null +++ b/examples/network-test/env.yaml @@ -0,0 +1,14 @@ +targets: + main: + resources: + NetworkService: + address: 192.168.1.5 + username: root + NetworkInterface: + ifname: enp2s0f3 + drivers: + SSHDriver: {} + RawNetworkInterfaceDriver: {} + options: + local_iface_to_dut_iface: + enp2s0f3: uplink diff --git a/examples/network-test/pkg-replay-record.py b/examples/network-test/pkg-replay-record.py new file mode 100755 index 000000000..65cb51702 --- /dev/null +++ b/examples/network-test/pkg-replay-record.py @@ -0,0 +1,55 @@ +#!/usr/bin/env python3 +# Generates an Ethernet frame via scapy using pcap, copies pcap to DUT, replays pcap on interface, +# records frame locally (or on exporter, adjust env.yaml accordingly), and compares both. + +import logging +import os +from tempfile import NamedTemporaryFile, TemporaryDirectory + +from scapy.all import Ether, Raw, rdpcap, wrpcap, conf + +from labgrid import Environment +from labgrid.logging import basicConfig, StepLogger + +def generate_frame(): + frame = [Ether(dst="11:22:33:44:55:66", src="66:55:44:33:22:11", type=0x9000)] + padding = "\x00" * (conf.min_pkt_size - len(frame)) + frame = frame[0] / Raw(load=padding) + return frame + + +basicConfig(level=logging.INFO) +StepLogger.start() +env = Environment("env.yaml") +target = env.get_target() + +netdrv = target.get_driver("RawNetworkInterfaceDriver") +ssh = target.get_driver("SSHDriver") + +# get DUT interface +exporter_iface = netdrv.iface.ifname +dut_iface = env.config.get_target_option(target.name, "local_iface_to_dut_iface")[exporter_iface] + +# generate test frame +generated_frame = generate_frame() + +# write pcap, copy to DUT +remote_pcap = "/tmp/pcap" +with NamedTemporaryFile() as pcap: + wrpcap(pcap.name, generated_frame) + ssh.put(pcap.name, remote_pcap) + +# copy recorded pcap from DUT, compare with generated frame +with TemporaryDirectory() as tempdir: + # start record on exporter + tempf = os.path.join(tempdir, "record.pcap") + with netdrv.record(tempf, count=1) as record: + # replay pcap on DUT + ssh.run_check(f"ip link set {dut_iface} up") + ssh.run_check(f"tcpreplay -i {dut_iface} {remote_pcap}") + + remote_frame = rdpcap(tempf) + assert remote_frame[0] == generated_frame[0] + +print("statistics", netdrv.get_statistics()) +print("address", netdrv.get_address()) From a6d1a749f3f8862395eda5bf891ae37c350e3199 Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Wed, 17 Jan 2024 11:13:48 +0100 Subject: [PATCH 135/384] crossbar: enable auto fragmentation of outgoing WebSocket messages Larger labgrid remote infrastructure setups with a big number of exported resources are limited by the maximum WebSocket payload size of 1M in autobahn [1]. Future patches in labgrid will increase the maximum payload size from 1M to 10M to allow larger setups. In oder to be able to send these payloads, enable auto fragmentation of outgoing WebSocket messages into multiple WebSocket frames [2] in the crossbar configuration. Use 65536 bytes which is the value set in the ApplicationRunner [3]. [1] https://github.com/crossbario/autobahn-python/blob/359f868f9db410586cf01c071220994d8d7f165a/autobahn/asyncio/wamp.py#L223 [2] https://github.com/crossbario/crossbar/blob/master/docs/WebSocket-Options.rst [3] https://github.com/crossbario/autobahn-python/blob/359f868f9db410586cf01c071220994d8d7f165a/autobahn/asyncio/wamp.py#L224 Signed-off-by: Bastian Krause --- .crossbar/config-anonymous.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.crossbar/config-anonymous.yaml b/.crossbar/config-anonymous.yaml index b12e8d70d..8771a5aa1 100644 --- a/.crossbar/config-anonymous.yaml +++ b/.crossbar/config-anonymous.yaml @@ -28,6 +28,8 @@ workers: directory: ../web ws: type: websocket + options: + auto_fragment_size: 65536 auth: anonymous: type: static From 2c8cc737f13064f879dbd9e742502c30a96b864d Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Tue, 16 Jan 2024 23:12:03 +0100 Subject: [PATCH 136/384] remote/common: add monkey patch function for maxMessagePayloadSize in WampWebSocketClientFactory.setProtocolOptions Larger labgrid remote infrastructure setups with a big number of exported resources are limited by the maximum WebSocket payload size of 1M in autobahn [1]: When calling `get_resources()` in this situation, labgrid-client simply hangs. In crossbar's log errors occur: tried to send WebSocket message with size 1068476 exceeding payload limit of 1048576 octets There is no easy way of setting this WebSocket option, since it's set in `ApplicationRunner.run()` [2], which would need to be duplicated into labgrid's source code in its entirety. For an upstream approach, see [3] which was reverted without comment in [4]. So increase the maximum payload size from 1M to 10M by monkey patching maxMessagePayloadSize in WampWebSocketClientFactory.setProtocolOptions(). In oder to be able to actually send these payload sizes, a previous patch already enabled auto fragmentation of outgoing WebSocket messages into multiple WebSocket frames in crossbar's configuration. [1] https://github.com/crossbario/autobahn-python/blob/359f868f9db410586cf01c071220994d8d7f165a/autobahn/asyncio/wamp.py#L223 [2] https://github.com/crossbario/autobahn-python/blob/359f868f9db410586cf01c071220994d8d7f165a/autobahn/asyncio/wamp.py#L90 [3] https://github.com/crossbario/autobahn-python/pull/912 [4] https://github.com/crossbario/autobahn-python/pull/921 Signed-off-by: Bastian Krause --- doc/conf.py | 1 + labgrid/remote/common.py | 34 ++++++++++++++++++++++++++++++++++ 2 files changed, 35 insertions(+) diff --git a/doc/conf.py b/doc/conf.py index b1f470dc7..139f530f0 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -186,6 +186,7 @@ 'autobahn', 'autobahn.asyncio', 'autobahn.asyncio.wamp', + 'autobahn.asyncio.websocket', 'autobahn.wamp', 'autobahn.wamp.types', 'autobahn.twisted', diff --git a/labgrid/remote/common.py b/labgrid/remote/common.py index e3a7023c5..142455eb2 100644 --- a/labgrid/remote/common.py +++ b/labgrid/remote/common.py @@ -18,6 +18,7 @@ 'ReservationState', 'Reservation', 'enable_tcp_nodelay', + 'monkey_patch_max_msg_payload_size_ws_option', ] TAG_KEY = re.compile(r"[a-z][a-z0-9_]+") @@ -312,3 +313,36 @@ def enable_tcp_nodelay(session): """ s = session._transport.transport.get_extra_info('socket') s.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, True) + + +def monkey_patch_max_msg_payload_size_ws_option(): + """ + The default maxMessagePayloadSize in autobahn is 1M. For larger setups with a big number of + exported resources, this becomes the limiting factor. + Increase maxMessagePayloadSize in WampWebSocketClientFactory.setProtocolOptions() by monkey + patching it, so autobahn.asyncio.wamp.ApplicationRunner effectively sets the increased value. + + This function must be called before ApplicationRunner is instanciated. + """ + from autobahn.asyncio.websocket import WampWebSocketClientFactory + + original_method = WampWebSocketClientFactory.setProtocolOptions + + def set_protocol_options(*args, **kwargs): + new_max_message_payload_size = 10485760 + + # maxMessagePayloadSize given as positional arg + args = list(args) + try: + args[9] = max((args[9], new_max_message_payload_size)) + except IndexError: + pass + + # maxMessagePayloadSize given as kwarg + kwarg_name = "maxMessagePayloadSize" + if kwarg_name in kwargs and kwargs[kwarg_name] is not None: + kwargs[kwarg_name] = max((kwargs[kwarg_name], new_max_message_payload_size)) + + return original_method(*args, **kwargs) + + WampWebSocketClientFactory.setProtocolOptions = set_protocol_options From 6bd531a32af02f44ff15db29f37801eeed47abf8 Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Tue, 16 Jan 2024 23:13:10 +0100 Subject: [PATCH 137/384] remote/coordinator: apply maxMessagePayloadSize monkey patch for WampWebSocketClientFactory.setProtocolOptions() Signed-off-by: Bastian Krause --- labgrid/remote/coordinator.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/labgrid/remote/coordinator.py b/labgrid/remote/coordinator.py index f8572ddce..29b45d83b 100644 --- a/labgrid/remote/coordinator.py +++ b/labgrid/remote/coordinator.py @@ -19,6 +19,9 @@ from ..util import atomic_replace, yaml +monkey_patch_max_msg_payload_size_ws_option() + + class Action(Enum): ADD = 0 DEL = 1 From 7930f12222054b13ebd330d9c05f244ddef46270 Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Tue, 16 Jan 2024 23:13:21 +0100 Subject: [PATCH 138/384] remote/exporter: apply maxMessagePayloadSize monkey patch for WampWebSocketClientFactory.setProtocolOptions() Signed-off-by: Bastian Krause --- labgrid/remote/exporter.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/labgrid/remote/exporter.py b/labgrid/remote/exporter.py index 288ea3a62..3bf2de171 100755 --- a/labgrid/remote/exporter.py +++ b/labgrid/remote/exporter.py @@ -18,10 +18,12 @@ from autobahn.asyncio.wamp import ApplicationRunner, ApplicationSession from .config import ResourceConfig -from .common import ResourceEntry, enable_tcp_nodelay +from .common import ResourceEntry, enable_tcp_nodelay, monkey_patch_max_msg_payload_size_ws_option from ..util import get_free_port, labgrid_version +monkey_patch_max_msg_payload_size_ws_option() + __version__ = labgrid_version() exports: Dict[str, Type[ResourceEntry]] = {} reexec = False From 54b15185832575f50ca1aa1b7abfb65abcf4a632 Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Tue, 16 Jan 2024 23:12:56 +0100 Subject: [PATCH 139/384] remote/client: apply maxMessagePayloadSize monkey patch for WampWebSocketClientFactory.setProtocolOptions() Signed-off-by: Bastian Krause --- labgrid/remote/client.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/labgrid/remote/client.py b/labgrid/remote/client.py index 4f97502c8..235238187 100755 --- a/labgrid/remote/client.py +++ b/labgrid/remote/client.py @@ -23,7 +23,7 @@ from autobahn.asyncio.wamp import ApplicationSession from .common import (ResourceEntry, ResourceMatch, Place, Reservation, ReservationState, TAG_KEY, - TAG_VAL, enable_tcp_nodelay) + TAG_VAL, enable_tcp_nodelay, monkey_patch_max_msg_payload_size_ws_option) from .. import Environment, Target, target_factory from ..exceptions import NoDriverFoundError, NoResourceFoundError, InvalidConfigError from ..resource.remote import RemotePlaceManager, RemotePlace @@ -34,6 +34,7 @@ from ..logging import basicConfig, StepLogger txaio.config.loop = asyncio.get_event_loop() # pylint: disable=no-member +monkey_patch_max_msg_payload_size_ws_option() class Error(Exception): From e82c0d1b8207c43011f7fc01fd95aad588676d46 Mon Sep 17 00:00:00 2001 From: Roland Hieber Date: Thu, 18 Jan 2024 11:16:28 +0100 Subject: [PATCH 140/384] CHANGES.rst: mention logging config for StepReporter This is consistent with commit a3df4e6698880ab1f340 (2023-04-27, Rouven Czerwinski: "stepreporter: deprecate it"). Signed-off-by: Roland Hieber --- CHANGES.rst | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/CHANGES.rst b/CHANGES.rst index d6807a7b0..0c7968c4d 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -44,7 +44,8 @@ Breaking changes in 24.0 coordinator.`` on the client when running an updated coordinator, your coordinator configuration may set ``ticket`` instead of ``anonymous`` auth. - The `StepReporter` API has been changed. To start step reporting, you must - now call ``StepReporter.start()`` instead of ``StepReporter()`` + now call ``StepReporter.start()`` instead of ``StepReporter()``, and set up + logging via ``labgrid.logging.basicConfig()``. - Logging output when running pytest is no longer sent to stderr by default, since this is both chatty and also unnecessary with the improved logging flexibility. It it recommended to use the ``--log-cli-level=INFO`` command From ecffb08d8165adef14877193a8f6dbb377e68731 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Ulrich=20=C3=96lmann?= Date: Wed, 24 Jan 2024 06:31:20 +0100 Subject: [PATCH 141/384] doc/configuration: harmonize with related sections MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Adjust the section "TFTPProviderDriver / HTTPProviderDriver" to what is found in sections "TFTPProvider / HTTPProvider" & "RemoteTFTPProvider / RemoteHTTP- Provider" and enumerate the HTTPProviderDriver in the code-block, too. Signed-off-by: Ulrich Ölmann --- doc/configuration.rst | 2 ++ 1 file changed, 2 insertions(+) diff --git a/doc/configuration.rst b/doc/configuration.rst index 56b581825..fa1324a63 100644 --- a/doc/configuration.rst +++ b/doc/configuration.rst @@ -2418,6 +2418,8 @@ Binds to: TFTPProviderDriver: {} + HTTPProviderDriver: {} + Arguments: - None From c81081b4c72f9e458a71ffc19cc59442ed4d262b Mon Sep 17 00:00:00 2001 From: Jerome Brunet Date: Wed, 31 Jan 2024 12:09:25 +0100 Subject: [PATCH 142/384] remote/exporter: hostname option is ignored Following the addition of the fqdn option, the hostname is ignored. Hostname is set with gethostname() regardless of args.hostname value because the condition is incorrect. Fix this and get the hostname option working again. Cc: Andreas Naumann Signed-off-by: Jerome Brunet --- labgrid/remote/exporter.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/labgrid/remote/exporter.py b/labgrid/remote/exporter.py index 288ea3a62..b0e744cce 100755 --- a/labgrid/remote/exporter.py +++ b/labgrid/remote/exporter.py @@ -949,7 +949,7 @@ def main(): extra = { 'name': args.name or gethostname(), - 'hostname': args.hostname or getfqdn() if args.fqdn else gethostname(), + 'hostname': args.hostname or (getfqdn() if args.fqdn else gethostname()), 'resources': args.resources, 'isolated': args.isolated } From e6c16622d098c675abb4c76213928cc51bf2a251 Mon Sep 17 00:00:00 2001 From: Liam Beguin Date: Mon, 4 Dec 2023 15:00:58 -0500 Subject: [PATCH 143/384] contrib: sync-places: convert tags into strings When settings tags from the command line, key, value pairs are always strings. Make sure this is the same when reading a YAML file. It can easily be done in the YAML file itself, but this makes the syntax a bit lighter. Signed-off-by: Liam Beguin --- contrib/sync-places.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/contrib/sync-places.py b/contrib/sync-places.py index 3bad6b365..d3d74f86d 100755 --- a/contrib/sync-places.py +++ b/contrib/sync-places.py @@ -109,6 +109,11 @@ async def do_sync(session, args): changed = True tags = config["places"][name].get("tags", {}).copy() + for k, v in tags.items(): + if not isinstance(k, str) or not isinstance(v, str): + del(tags[k]) + tags[str(k)] = str(v) + if place_tags != tags: print( "Setting tags for place %s to %s" From f99c44b247851e789efa897b43ff5525f878798e Mon Sep 17 00:00:00 2001 From: Liam Beguin Date: Fri, 15 Dec 2023 14:13:18 -0500 Subject: [PATCH 144/384] contrib: add helper to graph current configuration A visual representation can help make sense of a large configuration quickly. As such, add a command line helper to generate a graphviz graph of the resources and places in a given environment. By default, this graph will be served on http://localhost:8800/labgrid/graph Signed-off-by: Liam Beguin --- contrib/README.rst | 41 ++++++++ contrib/labgrid-webapp | 160 ++++++++++++++++++++++++++++++++ contrib/requirements-webapp.txt | 3 + 3 files changed, 204 insertions(+) create mode 100644 contrib/README.rst create mode 100755 contrib/labgrid-webapp create mode 100644 contrib/requirements-webapp.txt diff --git a/contrib/README.rst b/contrib/README.rst new file mode 100644 index 000000000..30e77c078 --- /dev/null +++ b/contrib/README.rst @@ -0,0 +1,41 @@ +labgrid-webapp +============== + +labgrid-webapp implements a browser interface to access some of labgrid's +information. + +Quick Start +----------- + +.. code-block:: bash + + $ cd labgrid/ + $ source venv/bin/activate + venv $ pip install -r contrib/requirements-webapp.txt + venv $ ./contrib/labgrid-webapp --help + usage: labgrid-webapp [-h] [--crossbar URL] [--port PORT] [--proxy PROXY] + + Labgrid webapp + + options: + -h, --help show this help message and exit + --crossbar URL, -x URL + Crossbar websocket URL (default: ws://127.0.0.1:20408/ws) + --port PORT Port to serve on + --proxy PROXY, -P PROXY + + venv $ ./contrib/labgrid-webapp --help + INFO: Available routes: + INFO: - /labgrid/graph + INFO: Started server process [2378028] + INFO: Waiting for application startup. + INFO: Application startup complete. + INFO: Uvicorn running on http://0.0.0.0:8800 (Press CTRL+C to quit) + ... + +Please note that the graph feature relies on a valid `graphviz` system +installation. + +By default the application will start on port 8800. + +See http://0.0.0.0:8800/docs for more information on available endpoints. diff --git a/contrib/labgrid-webapp b/contrib/labgrid-webapp new file mode 100755 index 000000000..bd4a22178 --- /dev/null +++ b/contrib/labgrid-webapp @@ -0,0 +1,160 @@ +#!/usr/bin/env python3 +import argparse +import logging +import os +import sys +from typing import Dict + +import graphviz +import uvicorn +from fastapi import FastAPI +from fastapi.responses import Response + +from labgrid.remote.client import ClientSession, start_session +from labgrid.remote.common import Place +from labgrid.resource import Resource +from labgrid.util.proxy import proxymanager + + +async def do_graph(session: ClientSession) -> bytes: + '''Generate a graphviz graph of the current configuration. + + Graph displays: + - all resources, grouped by groupname and exporter. + - all places, with a list of tags + - solid edges between places and acquired resources + - dotted edges between places and unacquired resources + - edges between resources and places carry the match name if any. + ''' + def res_node_attr(name: str, resource: Resource) -> Dict[str, str]: + return { + 'shape': 'plaintext', + 'label': f'''< + + + + + + + + +
Resource
{resource.cls}{name}
>''', + } + + def place_node_attr(name: str, place: Place) -> Dict[str, str]: + acquired = '' + bgcolor = 'lightblue' + if place.acquired: + bgcolor = 'cornflowerblue' + acquired = f'{place.acquired}' + + tags = 'Tags' if place.tags else '' + for k, v in place.tags.items(): + tags += f'{k}={v}' + + return { + 'shape': 'plaintext', + 'label': f'''< + + + + {acquired} + + + + + {tags} +
Place
{name}
>''', + } + + g = graphviz.Digraph('G') + g.attr(rankdir='LR') + + paths = {} + for exporter, groups in session.resources.items(): + g_exporter = graphviz.Digraph(f'cluster_{exporter}') + g_exporter.attr(label=exporter) + + for group, resources in groups.items(): + g_group = graphviz.Digraph(f'cluster_{group}') + g_group.attr(label=group) + + for r_name, entry in resources.items(): + res_node = f'{exporter}/{group}/{entry.cls}/{r_name}'.replace(':', '_') + paths[res_node] = [exporter, group, entry.cls, r_name] + g_group.node(res_node, **res_node_attr(r_name, entry)) + + g_exporter.subgraph(g_group) + + g.subgraph(g_exporter) + + for p_node, place in session.places.items(): + g.node(p_node, **place_node_attr(p_node, place)) + + for m in place.matches: + for node, p in paths.items(): + if m.ismatch(p): + g.edge( + f'{node}:name', p_node, + style='solid' if place.acquired else 'dotted', + label=m.rename if m.rename else None, + ) + + return g.pipe(format='svg') + + +def main(): + app = FastAPI() + logger = logging.getLogger('uvicorn') + + @app.get('/labgrid/graph') + async def get_graph() -> str: + '''Show a graph of the current infrastructure.''' + svg = await do_graph(session) + return Response(content=svg, media_type='image/svg+xml') + + parser = argparse.ArgumentParser( + description='Labgrid webapp', + formatter_class=argparse.RawDescriptionHelpFormatter, + ) + parser.add_argument( + '--crossbar', + '-x', + metavar='URL', + default=os.environ.get('LG_CROSSBAR', 'ws://127.0.0.1:20408/ws'), + help='Crossbar websocket URL (default: %(default)s)', + ) + parser.add_argument('--port', type=int, default=8800, help='Port to serve on') + parser.add_argument('--proxy', '-P', help='Proxy connections via given ssh host') + + args = parser.parse_args() + + if args.proxy: + proxymanager.force_proxy(args.proxy) + + try: + session = start_session( + args.crossbar, os.environ.get('LG_CROSSBAR_REALM', 'realm1'), {}, + ) + except ConnectionRefusedError: + logger.fatal('Unable to connect to labgrid crossbar') + return + + server = uvicorn.Server(config=uvicorn.Config( + loop=session.loop, + host='0.0.0.0', + port=args.port, + app=app, + )) + + logger.info('Available routes:') + for route in app.routes: + reserved_routes = ['/openapi.json', '/docs', '/docs/oauth2-redirect', '/redoc'] + if route.path not in reserved_routes: + logger.info(f' - {route.path}') + + session.loop.run_until_complete(server.serve()) + + +if __name__ == '__main__': + sys.exit(main()) diff --git a/contrib/requirements-webapp.txt b/contrib/requirements-webapp.txt new file mode 100644 index 000000000..539b76d5e --- /dev/null +++ b/contrib/requirements-webapp.txt @@ -0,0 +1,3 @@ +fastapi +graphviz +uvicorn From 8ac72cb1ce9b5e14c63690e425650ab5d9ecb019 Mon Sep 17 00:00:00 2001 From: Trevor Gamblin Date: Mon, 5 Feb 2024 09:26:53 -0500 Subject: [PATCH 145/384] man: labgrid-client.rst: replace 'status' with 'cycle' 'get' seems to serve the same purpose as 'status', which isn't a valid option, and 'cycle' is not shown in the doc despite being in the CLI help output. Signed-off-by: Trevor Gamblin --- man/labgrid-client.1 | 2 +- man/labgrid-client.rst | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/man/labgrid-client.1 b/man/labgrid-client.1 index b15645ecc..d2b850b32 100644 --- a/man/labgrid-client.1 +++ b/man/labgrid-client.1 @@ -183,7 +183,7 @@ not at all. .sp \fBenv\fP Generate a labgrid environment file for a place .sp -\fBpower (pw)\fP action Change (or get) a place\(aqs power status, where action is one of get, on, off, status +\fBpower (pw)\fP action Change (or get) a place\(aqs power status, where action is one of get, on, off, cycle .sp \fBio\fP action Interact with GPIO (OneWire, relays, ...) devices, where action is one of high, low, get .sp diff --git a/man/labgrid-client.rst b/man/labgrid-client.rst index 89bac5959..d9542559e 100644 --- a/man/labgrid-client.rst +++ b/man/labgrid-client.rst @@ -175,7 +175,7 @@ LABGRID-CLIENT COMMANDS ``env`` Generate a labgrid environment file for a place -``power (pw)`` action Change (or get) a place's power status, where action is one of get, on, off, status +``power (pw)`` action Change (or get) a place's power status, where action is one of get, on, off, cycle ``io`` action Interact with GPIO (OneWire, relays, ...) devices, where action is one of high, low, get From e3fc0029e63e996afd771ba9d83da68feca03973 Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Mon, 12 Feb 2024 19:14:12 +0100 Subject: [PATCH 146/384] driver/resource: mqtt: use callback API v2, bump paho-mqtt>=2.0.0 labgrid's "mqtt" and "dev" extras require paho-mqtt>=1.5.1. With paho-mqtt 2.0.0 [1] released on 2024-02-10, breaking changes were introduced [2]. This was first detected by the scheduled CI jobs via pylint: ************* Module labgrid.resource.mqtt labgrid/resource/mqtt.py:20:17: E1120: No value for argument 'callback_api_version' in constructor call (no-value-for-parameter) ************* Module labgrid.driver.mqtt labgrid/driver/mqtt.py:30:23: E1120: No value for argument 'callback_api_version' in constructor call (no-value-for-parameter) When using 'labgrid-client pw get' with a TasmotaPowerPort resource, the errors looks like this: DEBUG root: Starting session with "ws://labgrid:20408/ws", realm: "realm1" Exception ignored in: Traceback (most recent call last): File "/path/to/venv/lib/python3.11/site-packages/paho/mqtt/client.py", line 874, in __del__ self._reset_sockets() File "/path/to/venv/lib/python3.11/site-packages/paho/mqtt/client.py", line 1133, in _reset_sockets self._sock_close() File "/path/to/venv/lib/python3.11/site-packages/paho/mqtt/client.py", line 1119, in _sock_close if not self._sock: ^^^^^^^^^^ AttributeError: 'Client' object has no attribute '_sock' Traceback (most recent call last): File "/path/to/labgrid/labgrid/factory.py", line 124, in make_resource r = cls(target, name, **args) ^^^^^^^^^^^^^^^^^^^^^^^^^ File "", line 19, in __init__ self.__attrs_post_init__() File "/path/to/labgrid/labgrid/resource/mqtt.py", line 61, in __attrs_post_init__ super().__attrs_post_init__() File "/path/to/labgrid/labgrid/resource/common.py", line 157, in __attrs_post_init__ self.manager._add_resource(self) File "/path/to/labgrid/labgrid/resource/common.py", line 133, in _add_resource self.on_resource_added(resource) File "/path/to/labgrid/labgrid/resource/mqtt.py", line 29, in on_resource_added self._clients[host] = self._create_mqtt_connection(host) ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/path/to/labgrid/labgrid/resource/mqtt.py", line 20, in _create_mqtt_connection client = mqtt.Client() ^^^^^^^^^^^^^ TypeError: Client.__init__() missing 1 required positional argument: 'callback_api_version' The above exception was the direct cause of the following exception: Traceback (most recent call last): File "/path/to/labgrid/labgrid/remote/client.py", line 1892, in main args.func(session) File "/path/to/labgrid/labgrid/remote/client.py", line 726, in power target = self._get_target(place) ^^^^^^^^^^^^^^^^^^^^^^^ File "/path/to/labgrid/labgrid/remote/client.py", line 687, in _get_target RemotePlace(target, name=place.name) File "", line 11, in __init__ self.__attrs_post_init__() File "/path/to/labgrid/labgrid/resource/remote.py", line 101, in __attrs_post_init__ super().__attrs_post_init__() File "/path/to/labgrid/labgrid/resource/common.py", line 157, in __attrs_post_init__ self.manager._add_resource(self) File "/path/to/labgrid/labgrid/resource/common.py", line 133, in _add_resource self.on_resource_added(resource) File "/path/to/labgrid/labgrid/resource/remote.py", line 53, in on_resource_added new = target_factory.make_resource( ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ File "/path/to/labgrid/labgrid/factory.py", line 126, in make_resource raise InvalidConfigError( labgrid.exceptions.InvalidConfigError: failed to create TasmotaPowerPort for target 'Target(name='test', env=None)' using {'host': 'mqtt', 'status_topic': 'stat/03374/POWER', 'power_topic': 'cmnd/03374/POWER', 'avail_topic': 'tele/03374/LWT'} This is likely caused by an error in the environment configuration or invalid resource information provided by the coordinator. To fix this, use paho-mqtt's callback API v2 and migrate the on_connect() callback. Require paho-mqtt>=2.0.0 from now on. [1] https://github.com/eclipse/paho.mqtt.python/releases/tag/v2.0.0 [2] https://github.com/eclipse/paho.mqtt.python/blob/master/docs/migrations.rst#change-between-version-1x-and-20 Signed-off-by: Bastian Krause --- labgrid/driver/mqtt.py | 4 ++-- labgrid/resource/mqtt.py | 2 +- pyproject.toml | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/labgrid/driver/mqtt.py b/labgrid/driver/mqtt.py index 5238f201d..3722ad111 100644 --- a/labgrid/driver/mqtt.py +++ b/labgrid/driver/mqtt.py @@ -27,7 +27,7 @@ class TasmotaPowerDriver(Driver, PowerProtocol): def __attrs_post_init__(self): super().__attrs_post_init__() import paho.mqtt.client as mqtt - self._client = mqtt.Client() + self._client = mqtt.Client(mqtt.CallbackAPIVersion.VERSION2) def on_activate(self): self._client.on_message = self._on_message @@ -45,7 +45,7 @@ def _on_message(self, client, userdata, msg): status = False self._status = status - def _on_connect(self, client, userdata, flags, rc): + def _on_connect(self, client, userdata, flags, reason_code, properties): client.subscribe(self.power.status_topic) def _publish(self, topic, payload): diff --git a/labgrid/resource/mqtt.py b/labgrid/resource/mqtt.py index b5991537b..c9742556a 100644 --- a/labgrid/resource/mqtt.py +++ b/labgrid/resource/mqtt.py @@ -17,7 +17,7 @@ class MQTTManager(ResourceManager): def _create_mqtt_connection(self, host): import paho.mqtt.client as mqtt - client = mqtt.Client() + client = mqtt.Client(mqtt.CallbackAPIVersion.VERSION2) client.connect(host) client.on_message = self._on_message client.loop_start() diff --git a/pyproject.toml b/pyproject.toml index ca83ffd34..e351279fe 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -60,7 +60,7 @@ graph = ["graphviz>=0.17.0"] kasa = ["python-kasa>=0.4.0"] modbus = ["pyModbusTCP>=0.2.0"] modbusrtu = ["minimalmodbus>=1.0.2"] -mqtt = ["paho-mqtt>=1.5.1"] +mqtt = ["paho-mqtt>=2.0.0"] onewire = ["onewire>=0.2"] pyvisa = [ "pyvisa>=1.11.3", @@ -101,7 +101,7 @@ dev = [ "minimalmodbus>=1.0.2", # labgrid[mqtt] - "paho-mqtt>=1.5.1", + "paho-mqtt>=2.0.0", # labgrid[onewire] "onewire>=0.2", From 7e580bca7c154f20bdb9355b7c3dc7a72acdf1e3 Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Tue, 13 Feb 2024 14:24:13 +0100 Subject: [PATCH 147/384] github/workflows: update GitHub actions to latest versions Fixes deprecation warnings such as: Node.js 16 actions are deprecated. Please update the following actions to use Node.js 20: actions/setup-python@v4, actions/cache@v3. For more information see: https://github.blog/changelog/2023-09-22-github-actions-transitioning-from-node-16-to-node-20/. When bumping codecov/codecov-action v3 to v4, we run into [1]. The changes needed for this change [2] are not clear, yet. Let's stick to the previous version for now. [1] https://github.com/codecov/codecov-action/issues/1274 [2] https://about.codecov.io/blog/january-product-update-updating-the-codecov-ci-uploaders-to-the-codecov-cli/ Signed-off-by: Bastian Krause --- .github/workflows/build-and-release.yml | 2 +- .github/workflows/reusable-unit-tests.yml | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/build-and-release.yml b/.github/workflows/build-and-release.yml index 2edeb4cf0..390dad4cf 100644 --- a/.github/workflows/build-and-release.yml +++ b/.github/workflows/build-and-release.yml @@ -11,7 +11,7 @@ jobs: with: fetch-depth: 0 - name: Set up Python - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: "3.11" - name: Install python dependencies diff --git a/.github/workflows/reusable-unit-tests.yml b/.github/workflows/reusable-unit-tests.yml index c79d7318e..16a26f0c6 100644 --- a/.github/workflows/reusable-unit-tests.yml +++ b/.github/workflows/reusable-unit-tests.yml @@ -19,10 +19,10 @@ jobs: with: ref: ${{ inputs.branch }} - name: Set up Python ${{ inputs.python-version }} - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: ${{ inputs.python-version }} - - uses: actions/cache@v3 + - uses: actions/cache@v4 with: path: ~/.cache/pip key: ${{ runner.os }}-pip-${{ hashFiles('pyproject.toml') }} From 24c041cdbc02b5a4a7f0dd1869f86d60a9d9f06c Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Mon, 12 Feb 2024 17:38:34 +0100 Subject: [PATCH 148/384] doc/configuration: fix unordered lists This fixes list rendering. Signed-off-by: Bastian Krause --- doc/configuration.rst | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/doc/configuration.rst b/doc/configuration.rst index fa1324a63..2ca87f4ed 100644 --- a/doc/configuration.rst +++ b/doc/configuration.rst @@ -2957,6 +2957,7 @@ To use it, `PyGObject `_ must be installed For Debian, the necessary packages are `python3-gi` and `gir1.2-nm-1.0`. It supports: + - static and DHCP address configuration - WiFi client or AP - connection sharing (DHCP server with NAT) @@ -2991,15 +2992,17 @@ exporting network interfaces for the RawNetworkInterfaceDriver, e.g.: - eth1 It supports: + - recording traffic - replaying traffic - basic statistic collection For now, the RawNetworkInterfaceDriver leaves pre-configuration of the exported network interface to the user, including: + - disabling DHCP - disabling IPv6 Duplicate Address Detection (DAD) by SLAAC (Stateless -Address Autoconfiguration) and Neighbor Discovery + Address Autoconfiguration) and Neighbor Discovery - disabling Generic Receive Offload (GRO) This might change in the future. From a0e8af6e858d9cfb1ac980cba37c718481149b00 Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Fri, 26 Jan 2024 15:50:36 +0100 Subject: [PATCH 149/384] doc/configuration: fix header underlines Signed-off-by: Bastian Krause --- doc/configuration.rst | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/doc/configuration.rst b/doc/configuration.rst index 2ca87f4ed..8f67c2684 100644 --- a/doc/configuration.rst +++ b/doc/configuration.rst @@ -638,7 +638,7 @@ Used by: - `UUUDriver`_ RKUSBLoader -~~~~~~~~~~~~ +~~~~~~~~~~~ An RKUSBLoader resource describes a USB device in the rockchip loader state. .. code-block:: yaml @@ -662,7 +662,7 @@ NetworkIMXUSBLoader A NetworkIMXUSBLoader describes an `IMXUSBLoader`_ available on a remote computer. NetworkRKUSBLoader -~~~~~~~~~~~~~~~~~~~ +~~~~~~~~~~~~~~~~~~ A NetworkRKUSBLoader describes an `RKUSBLoader`_ available on a remote computer. AndroidUSBFastboot @@ -737,7 +737,7 @@ Arguments: - ifname (str): name of the interface USBNetworkInterface -~~~~~~~~~~~~~~~~~~~~ +~~~~~~~~~~~~~~~~~~~ A USBNetworkInterface resource describes a USB network adapter (such as Ethernet or WiFi) @@ -2291,7 +2291,7 @@ Arguments: - None RKUSBDriver -~~~~~~~~~~~~ +~~~~~~~~~~~ A RKUSBDriver is used to upload an image into a device in the rockchip USB loader state. This is useful to bootstrap a bootloader onto a device. From 936509238689722d20fdaac9568ac0dc471c4538 Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Fri, 26 Jan 2024 15:20:21 +0100 Subject: [PATCH 150/384] doc/configuration: drop unnecessary new lines Signed-off-by: Bastian Krause --- doc/configuration.rst | 28 ---------------------------- 1 file changed, 28 deletions(-) diff --git a/doc/configuration.rst b/doc/configuration.rst index 8f67c2684..31e16e2d0 100644 --- a/doc/configuration.rst +++ b/doc/configuration.rst @@ -872,7 +872,6 @@ Used by: NetworkUSBSDMuxDevice ~~~~~~~~~~~~~~~~~~~~~ - A :any:`NetworkUSBSDMuxDevice` resource describes a `USBSDMuxDevice`_ available on a remote computer. @@ -894,7 +893,6 @@ Used by: NetworkLXAUSBMux ~~~~~~~~~~~~~~~~ - A :any:`NetworkLXAUSBMux` resource describes a `LXAUSBMux`_ available on a remote computer. @@ -918,13 +916,11 @@ Used by: NetworkUSBSDWireDevice ~~~~~~~~~~~~~~~~~~~~~~ - A :any:`NetworkUSBSDWireDevice` resource describes a `USBSDWireDevice`_ available on a remote computer. USBVideo ~~~~~~~~ - A :any:`USBVideo` resource describes a USB video camera which is supported by a Video4Linux2 kernel driver. @@ -942,7 +938,6 @@ Used by: SysfsGPIO ~~~~~~~~~ - A :any:`SysfsGPIO` resource describes a GPIO line. .. code-block:: yaml @@ -958,13 +953,11 @@ Used by: NetworkUSBVideo ~~~~~~~~~~~~~~~ - A :any:`NetworkUSBVideo` resource describes a :any:`USBVideo` resource available on a remote computer. USBAudioInput ~~~~~~~~~~~~~ - A :any:`USBAudioInput` resource describes a USB audio input which is supported by an ALSA kernel driver. @@ -984,13 +977,11 @@ Used by: NetworkUSBAudioInput ~~~~~~~~~~~~~~~~~~~~ - A :any:`NetworkUSBAudioInput` resource describes a :any:`USBAudioInput` resource available on a remote computer. USBTMC ~~~~~~ - A :any:`USBTMC` resource describes an oscilloscope connected via the USB TMC protocol. The low-level communication is handled by the ``usbtmc`` kernel driver. @@ -1016,7 +1007,6 @@ Used by: NetworkUSBTMC ~~~~~~~~~~~~~ - A :any:`NetworkUSBTMC` resource describes a :any:`USBTMC` resource available on a remote computer. @@ -1103,7 +1093,6 @@ For instance, to flash using 3.5V vcc: DediprogFlasher: vcc: '3.5V' - Used by: - `DediprogFlashDriver`_ @@ -1149,7 +1138,6 @@ Used by: HTTPVideoStream ~~~~~~~~~~~~~~~ - A :any:`HTTPVideoStream` resource describes a IP video stream over HTTP or HTTPS. .. code-block:: yaml @@ -1187,7 +1175,6 @@ labgrid. TFTPProvider / HTTPProvider +++++++++++++++++++++++++++ - .. code-block:: yaml TFTPProvider: @@ -1208,7 +1195,6 @@ Used by: NFSProvider +++++++++++ - .. code-block:: yaml NFSProvider: {} @@ -1318,7 +1304,6 @@ Used by: udev Matching ~~~~~~~~~~~~~ - labgrid allows the exporter (or the client-side environment) to match resources via udev rules. Any udev property key and value can be used, path matching USB devices is @@ -1374,7 +1359,6 @@ use-cases. Matching a USB Serial Converter on a Hub Port +++++++++++++++++++++++++++++++++++++++++++++ - This will match any USB serial converter connected below the hub port 1.2.5.5 on bus 1. The `ID_PATH` value corresponds to the hierarchy of buses and ports as shown @@ -1393,7 +1377,6 @@ This is necessary for the `USBSerialPort` because we actually want to find the Matching an Android USB Fastboot Device +++++++++++++++++++++++++++++++++++++++ - In this case, we want to match the USB device on that port directly, so we don't use a parent match. @@ -1405,7 +1388,6 @@ don't use a parent match. Matching a Specific UART in a Dual-Port Adapter +++++++++++++++++++++++++++++++++++++++++++++++ - On this board, the serial console is connected to the second port of an on-board dual-port USB-UART. The board itself is connected to the bus 3 and port path 10.2.2.2. @@ -1457,7 +1439,6 @@ We use the ``ID_USB_INTERFACE_NUM`` to distinguish between the two ports: Matching a USB UART by Serial Number ++++++++++++++++++++++++++++++++++++ - Most of the USB serial converters in our lab have been programmed with unique serial numbers. This makes it easy to always match the same one even if the USB topology @@ -1715,7 +1696,6 @@ Arguments: BareboxDriver ~~~~~~~~~~~~~ - A BareboxDriver interfaces with a barebox bootloader via a `ConsoleProtocol`. Binds to: @@ -2804,7 +2784,6 @@ Although the driver can be used from Python code by calling the `stream()` method, it is currently mainly useful for the ``video`` subcommand of ``labgrid-client``. - ========== ========================================================= Key Description ========== ========================================================= @@ -2908,7 +2887,6 @@ Implements: Arguments: - None - HttpDigitalOutputDriver ~~~~~~~~~~~~~~~~~~~~~~~ A HttpDigitalOutputDriver binds to a `HttpDigitalOutput` to set and get a @@ -2928,7 +2906,6 @@ Implements: Arguments: - None - PyVISADriver ~~~~~~~~~~~~ The PyVISADriver uses a PyVISADevice resource to control test equipment manageable by PyVISA. @@ -3023,7 +3000,6 @@ Arguments: Strategies ---------- - Strategies are used to ensure that the device is in a certain state during a test. Such a state could be the bootloader or a booted Linux kernel with shell. @@ -3241,7 +3217,6 @@ The Reporter can be stopped with a call to the stop function: Stopping the StepReporter if it has not been started will raise an AssertionError, as will starting an already started StepReporter. - ConsoleLoggingReporter ~~~~~~~~~~~~~~~~~~~~~~ The ConsoleLoggingReporter outputs read calls from the console transports into @@ -3259,12 +3234,9 @@ The Reporter can be stopped with a call to the stop function: >>> from labgrid import ConsoleLoggingReporter >>> ConsoleLoggingReporter.stop() - Stopping the ConsoleLoggingReporter if it has not been started will raise an AssertionError, as will starting an already started StepReporter. - - Environment Configuration ------------------------- The environment configuration for a test environment consists of a YAML file From 385cc3576c3108dd27c362e126219f231853f752 Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Fri, 26 Jan 2024 16:35:16 +0100 Subject: [PATCH 151/384] doc/configuration: update driver protocol implementations Signed-off-by: Bastian Krause --- doc/configuration.rst | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) diff --git a/doc/configuration.rst b/doc/configuration.rst index 31e16e2d0..c364063bc 100644 --- a/doc/configuration.rst +++ b/doc/configuration.rst @@ -1503,6 +1503,7 @@ Binds to: Implements: - :any:`ConsoleProtocol` + - :any:`ResetProtocol` Arguments: - txdelay (float, default=0.0): time in seconds to wait before sending each byte @@ -1522,6 +1523,9 @@ Binds to: port: - `ModbusRTU`_ +Implements: + - None (yet) + ShellDriver ~~~~~~~~~~~ A ShellDriver binds on top of a `ConsoleProtocol` and is designed to interact @@ -1533,6 +1537,7 @@ Binds to: Implements: - :any:`CommandProtocol` + - :any:`FileTransferProtocol` .. code-block:: yaml @@ -1620,6 +1625,7 @@ Binds to: Implements: - :any:`CommandProtocol` + - :any:`LinuxBootProtocol` .. code-block:: yaml @@ -1679,6 +1685,7 @@ Binds to: Implements: - :any:`CommandProtocol` + - :any:`LinuxBootProtocol` .. code-block:: yaml @@ -1704,6 +1711,7 @@ Binds to: Implements: - :any:`CommandProtocol` + - :any:`LinuxBootProtocol` .. code-block:: yaml @@ -1851,6 +1859,7 @@ The driver's name will be displayed during interaction. Implements: - :any:`PowerProtocol` + - :any:`ResetProtocol` .. code-block:: yaml @@ -1866,6 +1875,7 @@ An ExternalPowerDriver is used to control a target power state via an external c Implements: - :any:`PowerProtocol` + - :any:`ResetProtocol` .. code-block:: yaml @@ -1891,6 +1901,7 @@ Binds to: Implements: - :any:`PowerProtocol` + - :any:`ResetProtocol` .. code-block:: yaml @@ -1915,6 +1926,7 @@ Binds to: Implements: - :any:`PowerProtocol` + - :any:`ResetProtocol` .. code-block:: yaml @@ -1935,6 +1947,7 @@ Binds to: Implements: - :any:`PowerProtocol` + - :any:`ResetProtocol` .. code-block:: yaml @@ -1956,6 +1969,10 @@ Binds to: output: - :any:`DigitalOutputProtocol` +Implements: + - :any:`PowerProtocol` + - :any:`ResetProtocol` + .. code-block:: yaml DigitalOutputPowerDriver: @@ -1974,6 +1991,7 @@ Binds to: Implements: - :any:`PowerProtocol` + - :any:`ResetProtocol` .. code-block:: yaml @@ -1993,6 +2011,7 @@ Binds to: Implements: - :any:`PowerProtocol` + - :any:`ResetProtocol` .. code-block:: yaml @@ -2394,6 +2413,9 @@ Binds to: - `HTTPProvider`_ - `RemoteHTTPProvider`_ +Implements: + - None (yet) + .. code-block:: yaml TFTPProviderDriver: {} @@ -2415,6 +2437,9 @@ Binds to: - `NFSProvider`_ - `RemoteNFSProvider`_ +Implements: + - None (yet) + .. code-block:: yaml NFSProviderDriver: {} @@ -2526,6 +2551,7 @@ Binds to: Implements: - :any:`PowerProtocol` + - :any:`ResetProtocol` .. code-block:: yaml @@ -2811,6 +2837,9 @@ Binds to: - `DediprogFlasher`_ - `NetworkDediprogFlasher`_ +Implements: + - None (yet) + Arguments: - image (str): optional, key in :ref:`images ` containing the path of an image to flash onto the target From 0cfdec74daf71daeadec371733e8e545d452f350 Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Fri, 26 Jan 2024 16:30:46 +0100 Subject: [PATCH 152/384] doc/configuration: update which drivers use which resources Signed-off-by: Bastian Krause --- doc/configuration.rst | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) diff --git a/doc/configuration.rst b/doc/configuration.rst index c364063bc..6118638a9 100644 --- a/doc/configuration.rst +++ b/doc/configuration.rst @@ -619,6 +619,7 @@ Arguments: Used by: - `IMXUSBDriver`_ - `UUUDriver`_ + - `BDIMXUSBDriver`_ MXSUSBLoader ~~~~~~~~~~~~ @@ -635,6 +636,7 @@ Arguments: Used by: - `MXSUSBDriver`_ + - `IMXUSBDriver`_ - `UUUDriver`_ RKUSBLoader @@ -736,6 +738,10 @@ WiFi) Arguments: - ifname (str): name of the interface +Used by: + - `NetworkInterfaceDriver`_ + - `RawNetworkInterfaceDriver`_ + USBNetworkInterface ~~~~~~~~~~~~~~~~~~~ A USBNetworkInterface resource describes a USB network adapter (such as @@ -750,6 +756,10 @@ Ethernet or WiFi) Arguments: - match (dict): key and value pairs for a udev match, see `udev Matching`_ +Used by: + - `NetworkInterfaceDriver`_ + - `RawNetworkInterfaceDriver`_ + RemoteNetworkInterface ~~~~~~~~~~~~~~~~~~~~~~ A :any:`RemoteNetworkInterface` resource describes a :any:`NetworkInterface` or @@ -804,6 +814,9 @@ Arguments: - switch (str): host name of the Ethernet switch - interface (str): interface name +Used by: + - None + SigrokUSBDevice ~~~~~~~~~~~~~~~ A SigrokUSBDevice resource describes a sigrok USB device. @@ -851,6 +864,7 @@ Arguments: Used by: - `SigrokPowerDriver`_ + - `SigrokDmmDriver`_ USBSDMuxDevice ~~~~~~~~~~~~~~ @@ -868,7 +882,8 @@ Arguments: - match (dict): key and value pairs for a udev match, see `udev Matching`_ Used by: - - `USBSDMUXDriver`_ + - `USBSDMuxDriver`_ + - `USBStorageDriver`_ NetworkUSBSDMuxDevice ~~~~~~~~~~~~~~~~~~~~~ @@ -913,6 +928,7 @@ Arguments: Used by: - `USBSDWireDriver`_ + - `USBStorageDriver`_ NetworkUSBSDWireDevice ~~~~~~~~~~~~~~~~~~~~~~ From ae46e6717202ca992baa91588e99be7b7799251d Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Fri, 26 Jan 2024 16:31:16 +0100 Subject: [PATCH 153/384] doc/configuration: drop driver usage from network resouces Other network resourced do not list the drivers using them, neither. Signed-off-by: Bastian Krause --- doc/configuration.rst | 5 ----- 1 file changed, 5 deletions(-) diff --git a/doc/configuration.rst b/doc/configuration.rst index 6118638a9..ea113dff5 100644 --- a/doc/configuration.rst +++ b/doc/configuration.rst @@ -578,9 +578,6 @@ NetworkUSBMassStorage A NetworkUSBMassStorage resource describes a USB memory stick or similar device available on a remote computer. -Used by: - - `USBStorageDriver`_ - The NetworkUSBMassStorage can be used in test cases by calling the `write_image()`, and `get_size()` functions. @@ -1085,8 +1082,6 @@ NetworkUSBFlashableDevice A :any:`NetworkUSBFlashableDevice` resource describes a :any:`USBFlashableDevice` resource available on a remote computer -Used by: - - `FlashScriptDriver`_ DediprogFlasher ~~~~~~~~~~~~~~~ From bd742785a047cb8785981b34635fe6522370de1c Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Fri, 26 Jan 2024 16:35:39 +0100 Subject: [PATCH 154/384] doc/configuration: update/fix driver bindings Initially, all driver bindings were documented with their respective binding key. This wasn't documented consistently since then. Especially when using multiple instances of a single driver class, the bindings are needed to specify on which resource or other driver the driver should bind on. Until now, most binding keys had to be looked up in the source code. So improve this situation by fixing and adding binding keys as well as also adding all Remote/Network resource variants a driver is able to bind to. Signed-off-by: Bastian Krause --- doc/configuration.rst | 91 +++++++++++++++++++++++++++++++++++-------- 1 file changed, 75 insertions(+), 16 deletions(-) diff --git a/doc/configuration.rst b/doc/configuration.rst index ea113dff5..3e625d9ea 100644 --- a/doc/configuration.rst +++ b/doc/configuration.rst @@ -1531,7 +1531,7 @@ local usage and will not work with an exporter. ModbusRTUDriver: {} Binds to: - port: + resource: - `ModbusRTU`_ Implements: @@ -1692,7 +1692,8 @@ This driver needs the following features activated in U-Boot to work: - The U-Boot must support the "bootm" command to boot from a memory location. Binds to: - - :any:`ConsoleProtocol` (see `SerialDriver`_) + console: + - :any:`ConsoleProtocol` (see `SerialDriver`_) Implements: - :any:`CommandProtocol` @@ -1743,6 +1744,9 @@ ExternalConsoleDriver An ExternalConsoleDriver implements the `ConsoleProtocol` on top of a command executed on the local computer. +Binds to: + - None + Implements: - :any:`ConsoleProtocol` @@ -1764,7 +1768,9 @@ network fastboot state. Binds to: fastboot: - `AndroidUSBFastboot`_ + - RemoteAndroidUSBFastboot - `AndroidNetFastboot`_ + - RemoteAndroidNetFastboot Implements: - None (yet) @@ -1792,6 +1798,7 @@ Upgrade) mode. Binds to: dfu: - `DFUDevice`_ + - NetworkDFUDevice Implements: - None (yet) @@ -1818,7 +1825,9 @@ Consider updating your OpenOCD version when using multiple USB Blasters. Binds to: interface: - `AlteraUSBBlaster`_ + - NetworkAlteraUSBBlaster - `USBDebugger`_ + - NetworkUSBDebugger Implements: - :any:`BootstrapProtocol` @@ -1849,7 +1858,9 @@ A QuartusHPSDriver controls the "Quartus Prime Programmer and Tools" to flash a target's QSPI. Binds to: - - `AlteraUSBBlaster`_ + interface: + - `AlteraUSBBlaster`_ + - NetworkAlteraUSBBlaster Implements: - None @@ -1868,6 +1879,9 @@ control is available. The driver's name will be displayed during interaction. +Binds to: + - None + Implements: - :any:`PowerProtocol` - :any:`ResetProtocol` @@ -1884,6 +1898,9 @@ ExternalPowerDriver ~~~~~~~~~~~~~~~~~~~ An ExternalPowerDriver is used to control a target power state via an external command. +Binds to: + - None + Implements: - :any:`PowerProtocol` - :any:`ResetProtocol` @@ -1955,6 +1972,7 @@ target power state without user interaction. Binds to: port: - `YKUSHPowerPort`_ + - `NetworkYKUSHPowerPort`_ Implements: - :any:`PowerProtocol` @@ -1998,7 +2016,9 @@ A USBPowerDriver controls a `USBPowerPort`, allowing control of the target power state without user interaction. Binds to: - - `USBPowerPort`_ + hub: + - `USBPowerPort`_ + - NetworkUSBPowerPort Implements: - :any:`PowerProtocol` @@ -2018,7 +2038,9 @@ A SiSPMPowerDriver controls a `SiSPMPowerPort`, allowing control of the target power state without user interaction. Binds to: - - `SiSPMPowerPort`_ + port: + - `SiSPMPowerPort`_ + - NetworkSiSPMPowerPort Implements: - :any:`PowerProtocol` @@ -2038,7 +2060,8 @@ A TasmotaPowerDriver controls a `TasmotaPowerPort`, allowing the outlet to be switched on and off. Binds to: - - `TasmotaPowerPort`_ + power: + - `TasmotaPowerPort`_ Implements: - :any:`PowerProtocol` @@ -2059,7 +2082,9 @@ This driver configures GPIO lines via `the sysfs kernel interface `_ tool. +Binds to: + mux: + - `USBSDMuxDevice`_ + - `NetworkUSBSDMuxDevice`_ + Implements: - None yet @@ -2631,6 +2680,11 @@ The :any:`LXAUSBMuxDriver` uses a LXAUSBMux resource to control a USB-Mux device via the `usbmuxctl `_ tool. +Binds to: + mux: + - `LXAUSBMux`_ + - `NetworkLXAUSBMux` + Implements: - None yet @@ -2647,6 +2701,11 @@ The :any:`USBSDWireDriver` uses a USBSDWireDevice resource to control a USB-SD-Wire device via `sd-mux-ctrl `_ tool. +Binds to: + mux: + - `USBSDWireDevice`_ + - `NetworkUSBSDWireDevice` + Implements: - None yet @@ -2696,7 +2755,7 @@ On the receiver, it either uses ``gst-launch`` for simple playback or complex cases (such as measuring the current volume level). Binds to: - video: + res: - `USBAudioInput`_ - `NetworkUSBAudioInput`_ @@ -2781,7 +2840,7 @@ a device. foo: ../images/flash_device.sh Binds to: - flashabledevice_resource: + device: - `USBFlashableDevice`_ - `NetworkUSBFlashableDevice`_ @@ -2844,7 +2903,7 @@ The :any:`DediprogFlashDriver` is used to flash an SPI device using DediprogFlas foo: ../images/image_to_load.raw Binds to: - DediprogFlasher_resource: + flasher: - `DediprogFlasher`_ - `NetworkDediprogFlasher`_ @@ -2983,8 +3042,8 @@ It supports: Binds to: iface: - `NetworkInterface`_ - - `USBNetworkInterface`_ - `RemoteNetworkInterface`_ + - `USBNetworkInterface`_ Implements: - None yet @@ -3027,8 +3086,8 @@ This might change in the future. Binds to: iface: - `NetworkInterface`_ - - `USBNetworkInterface`_ - `RemoteNetworkInterface`_ + - `USBNetworkInterface`_ Implements: - None yet From 7f2c5f5efddfbcdc4cc668784fd5147b9421ff44 Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Fri, 26 Jan 2024 15:38:14 +0100 Subject: [PATCH 155/384] doc/configuration: explicitly document drivers not expecting arguments Signed-off-by: Bastian Krause --- doc/configuration.rst | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/doc/configuration.rst b/doc/configuration.rst index 3e625d9ea..6de41d47f 100644 --- a/doc/configuration.rst +++ b/doc/configuration.rst @@ -1537,6 +1537,9 @@ Binds to: Implements: - None (yet) +Arguments: + - None + ShellDriver ~~~~~~~~~~~ A ShellDriver binds on top of a `ConsoleProtocol` and is designed to interact @@ -2876,6 +2879,9 @@ Binds to: Implements: - :any:`VideoProtocol` +Arguments: + - None + Although the driver can be used from Python code by calling the `stream()` method, it is currently mainly useful for the ``video`` subcommand of ``labgrid-client``. From 9f19129eb133dd90120607553a61afb7055d5774 Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Mon, 12 Feb 2024 16:11:41 +0100 Subject: [PATCH 156/384] doc/configuration: add links to resource/driver/strategy classes Signed-off-by: Bastian Krause --- doc/configuration.rst | 423 +++++++++++++++++++++++------------------- 1 file changed, 229 insertions(+), 194 deletions(-) diff --git a/doc/configuration.rst b/doc/configuration.rst index 6de41d47f..40c511722 100644 --- a/doc/configuration.rst +++ b/doc/configuration.rst @@ -8,8 +8,8 @@ have no dependencies. .. image:: res/config_graph.svg :width: 50% -Here the resource `RawSerialPort` provides the information for the -`SerialDriver`, which in turn is needed by the `ShellDriver`. +Here the resource `RawSerialPort`_ provides the information for the +`SerialDriver`_, which in turn is needed by the `ShellDriver`_. Driver dependency resolution is done by searching for the driver which implements the dependent protocol, all drivers implement one or more protocols. @@ -21,8 +21,8 @@ Serial Ports RawSerialPort +++++++++++++ -A RawSerialPort is a serial port which is identified via the device path on the -local computer. +A :any:`RawSerialPort` is a serial port which is identified via the device path +on the local computer. Take note that re-plugging USB serial converters can result in a different enumeration order. @@ -44,8 +44,9 @@ Used by: NetworkSerialPort +++++++++++++++++ -A NetworkSerialPort describes a serial port which is exported over the network, -usually using RFC2217 or raw tcp. +A :any:`NetworkSerialPort` describes a serial port which is exported over the +network, usually using `RFC2217 `_ +or raw tcp. .. code-block:: yaml @@ -69,7 +70,7 @@ Used by: ModbusRTU +++++++++ -Describes the resource required to use the ModbusRTU driver. +A :any:`ModbusRTU` resource is required to use the `ModbusRTUDriver`_. `Modbus RTU `_ is a communication protocol used to control many different kinds of electronic systems, such as thermostats, power plants, etc. @@ -77,11 +78,11 @@ Modbus is normally implemented on top of RS-485, though this is not strictly necessary, as long as the Modbus network only has one master (and up to 256 slaves). -The labgrid driver is implemented using the minimalmodbus Python library. +The labgrid driver is implemented using the +`minimalmodbus `_ Python +library. The implementation only supports that labgrid will be the master on the Modbus network. -For more information, see `minimalmodbus -`_. This resource and driver only supports local usage and will not work with an exporter. @@ -105,8 +106,8 @@ Used by: USBSerialPort +++++++++++++ -A USBSerialPort describes a serial port which is connected via USB and is -identified by matching udev properties. +A :any:`USBSerialPort` describes a serial port which is connected via USB and +is identified by matching udev properties. This allows identification through hot-plugging or rebooting. .. code-block:: yaml @@ -133,7 +134,7 @@ Power Ports NetworkPowerPort ++++++++++++++++ -A NetworkPowerPort describes a remotely switchable power port. +A :any:`NetworkPowerPort` describes a remotely switchable power port. .. code-block:: yaml @@ -241,7 +242,7 @@ Used by: PDUDaemonPort +++++++++++++ -A PDUDaemonPort describes a PDU port accessible via `PDUDaemon +A :any:`PDUDaemonPort` describes a PDU port accessible via `PDUDaemon `_. As one PDUDaemon instance can control many PDUs, the instance name from the PDUDaemon configuration file needs to be specified. @@ -266,7 +267,7 @@ Used by: YKUSHPowerPort ++++++++++++++ -A YKUSHPowerPort describes a YEPKIT YKUSH USB (HID) switchable USB hub. +A :any:`YKUSHPowerPort` describes a YEPKIT YKUSH USB (HID) switchable USB hub. .. code-block:: yaml @@ -287,11 +288,12 @@ Used by: NetworkYKUSHPowerPort +++++++++++++++++++++ -A NetworkYKUSHPowerPort describes a `YKUSHPowerPort`_ available on a remote computer. +A :any:`NetworkYKUSHPowerPort` describes a `YKUSHPowerPort`_ available on a +remote computer. USBPowerPort ++++++++++++ -A USBPowerPort describes a generic switchable USB hub as supported by +A :any:`USBPowerPort` describes a generic switchable USB hub as supported by `uhubctl `_. .. code-block:: yaml @@ -321,7 +323,7 @@ Used by: SiSPMPowerPort ++++++++++++++ -A SiSPMPowerPort describes a GEMBIRD SiS-PM as supported by +A :any:`SiSPMPowerPort` describes a GEMBIRD SiS-PM as supported by `sispmctl `_. .. code-block:: yaml @@ -343,8 +345,8 @@ Used by: TasmotaPowerPort ++++++++++++++++ -A :any:`TasmotaPowerPort` resource describes a switchable Tasmota power outlet -accessed over MQTT. +A :any:`TasmotaPowerPort` resource describes a switchable `Tasmota +`_ power outlet accessed over MQTT. .. code-block:: yaml @@ -373,7 +375,7 @@ Digital Outputs ModbusTCPCoil +++++++++++++ -A ModbusTCPCoil describes a coil accessible via ModbusTCP. +A :any:`ModbusTCPCoil` describes a coil accessible via ModbusTCP. .. code-block:: yaml @@ -397,7 +399,7 @@ Used by: DeditecRelais8 ++++++++++++++ -A DeditecRelais8 describes a Deditec USB GPO module with 8 relays. +A :any:`DeditecRelais8` describes a Deditec USB GPO module with 8 relays. .. code-block:: yaml @@ -418,7 +420,7 @@ Used by: OneWirePIO ++++++++++ -A OneWirePIO describes a onewire programmable I/O pin. +A :any:`OneWirePIO` describes a onewire programmable I/O pin. .. code-block:: yaml @@ -465,7 +467,7 @@ Used by: NetworkLXAIOBusPIO ++++++++++++++++++ -A NetworkLXAIOBusPIO describes an `LXAIOBusPIO`_ exported over the network. +A :any:`NetworkLXAIOBusPIO` describes an `LXAIOBusPIO`_ exported over the network. HIDRelay ++++++++ @@ -491,7 +493,7 @@ Used by: HttpDigitalOutput +++++++++++++++++ -A ``HttpDigitalOutput`` resource describes a generic digital output that can be +A :any:`HttpDigitalOutput` resource describes a generic digital output that can be controlled via HTTP. .. code-block:: yaml @@ -524,11 +526,11 @@ Used by: NetworkHIDRelay +++++++++++++++ -A NetworkHIDRelay describes an `HIDRelay`_ exported over the network. +A :any:`NetworkHIDRelay` describes an `HIDRelay`_ exported over the network. NetworkService ~~~~~~~~~~~~~~ -A NetworkService describes a remote SSH connection. +A :any:`NetworkService` describes a remote SSH connection. .. code-block:: yaml @@ -559,7 +561,8 @@ Used by: USBMassStorage ~~~~~~~~~~~~~~ -A USBMassStorage resource describes a USB memory stick or similar device. +A :any:`USBMassStorage` resource describes a USB memory stick or similar +device. .. code-block:: yaml @@ -575,7 +578,7 @@ Used by: NetworkUSBMassStorage ~~~~~~~~~~~~~~~~~~~~~ -A NetworkUSBMassStorage resource describes a USB memory stick or similar +A :any:`NetworkUSBMassStorage` resource describes a USB memory stick or similar device available on a remote computer. The NetworkUSBMassStorage can be used in test cases by calling the @@ -583,8 +586,8 @@ The NetworkUSBMassStorage can be used in test cases by calling the SigrokDevice ~~~~~~~~~~~~ -A SigrokDevice resource describes a sigrok device. To select a specific device -from all connected supported devices use the `SigrokUSBDevice`_. +A :any:`SigrokDevice` resource describes a sigrok device. To select a specific +device from all connected supported devices use the `SigrokUSBDevice`_. .. code-block:: yaml @@ -602,7 +605,7 @@ Used by: IMXUSBLoader ~~~~~~~~~~~~ -An IMXUSBLoader resource describes a USB device in the imx loader state. +An :any:`IMXUSBLoader` resource describes a USB device in the imx loader state. .. code-block:: yaml @@ -620,7 +623,7 @@ Used by: MXSUSBLoader ~~~~~~~~~~~~ -An MXSUSBLoader resource describes a USB device in the mxs loader state. +An :any:`MXSUSBLoader` resource describes a USB device in the mxs loader state. .. code-block:: yaml @@ -638,7 +641,8 @@ Used by: RKUSBLoader ~~~~~~~~~~~ -An RKUSBLoader resource describes a USB device in the rockchip loader state. +An :any:`RKUSBLoader` resource describes a USB device in the rockchip loader +state. .. code-block:: yaml @@ -654,19 +658,23 @@ Used by: NetworkMXSUSBLoader ~~~~~~~~~~~~~~~~~~~ -A NetworkMXSUSBLoader describes an `MXSUSBLoader`_ available on a remote computer. +A :any:`NetworkMXSUSBLoader` describes an `MXSUSBLoader`_ available on a remote +computer. NetworkIMXUSBLoader ~~~~~~~~~~~~~~~~~~~ -A NetworkIMXUSBLoader describes an `IMXUSBLoader`_ available on a remote computer. +A :any:`NetworkIMXUSBLoader` describes an `IMXUSBLoader`_ available on a remote +computer. NetworkRKUSBLoader ~~~~~~~~~~~~~~~~~~ -A NetworkRKUSBLoader describes an `RKUSBLoader`_ available on a remote computer. +A :any:`NetworkRKUSBLoader` describes an `RKUSBLoader`_ available on a remote +computer. AndroidUSBFastboot ~~~~~~~~~~~~~~~~~~ -An AndroidUSBFastboot resource describes a USB device in the fastboot state. +An :any:`AndroidUSBFastboot` resource describes a USB device in the fastboot +state. Previously, this resource was named AndroidFastboot and this name still supported for backwards compatibility. @@ -688,7 +696,8 @@ Used by: AndroidNetFastboot ~~~~~~~~~~~~~~~~~~ -An AndroidNetFastboot resource describes a network device in fastboot state. +An :any:`AndroidNetFastboot` resource describes a network device in fastboot +state. .. code-block:: yaml @@ -707,8 +716,8 @@ Used by: DFUDevice ~~~~~~~~~ -A DFUDevice resource describes a USB device in DFU (Device Firmware Upgrade) -mode. +A :any:`DFUDevice` resource describes a USB device in DFU (Device Firmware +Upgrade) mode. .. code-block:: yaml @@ -724,8 +733,8 @@ Used by: NetworkInterface ~~~~~~~~~~~~~~~~ -A NetworkInterface resource describes a network adapter (such as Ethernet or -WiFi) +A :any:`NetworkInterface` resource describes a network adapter (such as +Ethernet or WiFi) .. code-block:: yaml @@ -741,7 +750,7 @@ Used by: USBNetworkInterface ~~~~~~~~~~~~~~~~~~~ -A USBNetworkInterface resource describes a USB network adapter (such as +A :any:`USBNetworkInterface` resource describes a USB network adapter (such as Ethernet or WiFi) .. code-block:: yaml @@ -759,12 +768,12 @@ Used by: RemoteNetworkInterface ~~~~~~~~~~~~~~~~~~~~~~ -A :any:`RemoteNetworkInterface` resource describes a :any:`NetworkInterface` or -:any:`USBNetworkInterface` resource available on a remote computer. +A :any:`RemoteNetworkInterface` resource describes a `NetworkInterface`_ or +`USBNetworkInterface`_ resource available on a remote computer. AlteraUSBBlaster ~~~~~~~~~~~~~~~~ -An AlteraUSBBlaster resource describes an Altera USB blaster. +An :any:`AlteraUSBBlaster` resource describes an Altera USB blaster. .. code-block:: yaml @@ -781,8 +790,8 @@ Used by: USBDebugger ~~~~~~~~~~~ -An USBDebugger resource describes a JTAG USB adapter (for example an FTDI -FT2232H). +An :any:`USBDebugger` resource describes a JTAG USB adapter (for example an +FTDI FT2232H). .. code-block:: yaml @@ -798,8 +807,8 @@ Used by: SNMPEthernetPort ~~~~~~~~~~~~~~~~ -A SNMPEthernetPort resource describes a port on an Ethernet switch, which is -accessible via SNMP. +A :any:`SNMPEthernetPort` resource describes a port on an Ethernet switch, +which is accessible via SNMP. .. code-block:: yaml @@ -816,7 +825,7 @@ Used by: SigrokUSBDevice ~~~~~~~~~~~~~~~ -A SigrokUSBDevice resource describes a sigrok USB device. +A :any:`SigrokUSBDevice` resource describes a sigrok USB device. .. code-block:: yaml @@ -837,14 +846,15 @@ Used by: NetworkSigrokUSBDevice ~~~~~~~~~~~~~~~~~~~~~~ -A NetworkSigrokUSBDevice resource describes a sigrok USB device connected to a -host which is exported over the network. The SigrokDriver will access it via SSH. +A :any:`NetworkSigrokUSBDevice` resource describes a sigrok USB device +connected to a host which is exported over the network. +The `SigrokDriver`_ will access it via SSH. SigrokUSBSerialDevice ~~~~~~~~~~~~~~~~~~~~~ -A SigrokUSBSerialDevice resource describes a sigrok device which communicates -of a USB serial port instead of being a USB device itself (see -`SigrokUSBDevice` for that case). +A :any:`SigrokUSBSerialDevice` resource describes a sigrok device which +communicates over a USB serial port instead of being a USB device itself (see +`SigrokUSBDevice`_ for that case). .. code-block:: yaml @@ -966,7 +976,7 @@ Used by: NetworkUSBVideo ~~~~~~~~~~~~~~~ -A :any:`NetworkUSBVideo` resource describes a :any:`USBVideo` resource available +A :any:`NetworkUSBVideo` resource describes a `USBVideo`_ resource available on a remote computer. USBAudioInput @@ -990,7 +1000,7 @@ Used by: NetworkUSBAudioInput ~~~~~~~~~~~~~~~~~~~~ -A :any:`NetworkUSBAudioInput` resource describes a :any:`USBAudioInput` resource +A :any:`NetworkUSBAudioInput` resource describes a `USBAudioInput`_ resource available on a remote computer. USBTMC @@ -1020,13 +1030,15 @@ Used by: NetworkUSBTMC ~~~~~~~~~~~~~ -A :any:`NetworkUSBTMC` resource describes a :any:`USBTMC` resource available +A :any:`NetworkUSBTMC` resource describes a `USBTMC`_ resource available on a remote computer. Flashrom ~~~~~~~~ -A Flashrom resource is used to configure the parameters to a local installed flashrom instance. -It is assumed that flashrom is installed on the host and the executable is configured in: +A :any:`Flashrom` resource is used to configure the parameters to a local +installed flashrom instance. +It is assumed that flashrom is installed on the host and the executable is +configured in: .. code-block:: yaml @@ -1052,13 +1064,14 @@ Used by: NetworkFlashrom ~~~~~~~~~~~~~~~ -A NetworkFlashrom describes a `Flashrom`_ available on a remote computer. +A :any:`NetworkFlashrom` describes a `Flashrom`_ available on a remote computer. USBFlashableDevice ~~~~~~~~~~~~~~~~~~ -Represents an "opaque" USB device used by custom flashing programs. There is -usually not anything useful that can be done with the interface other than -running a flashing program with `FlashScriptDriver`_. +A :any:`USBFlashableDevice` represents an "opaque" USB device used by custom +flashing programs. +There is usually not anything useful that can be done with the interface other +than running a flashing program with `FlashScriptDriver`_. .. note:: This resource is only intended to be used as a last resort when it is @@ -1079,15 +1092,16 @@ Used by: NetworkUSBFlashableDevice ~~~~~~~~~~~~~~~~~~~~~~~~~ -A :any:`NetworkUSBFlashableDevice` resource describes a :any:`USBFlashableDevice` +A :any:`NetworkUSBFlashableDevice` resource describes a `USBFlashableDevice`_ resource available on a remote computer DediprogFlasher ~~~~~~~~~~~~~~~ -A DediprogFlasher resource is used to configure the parameters to a locally installed -dpmcd instance. It is assumed that dpcmd is installed on the host and the -executable can be configured via: +A :any:`DediprogFlasher` resource is used to configure the parameters to a +locally installed dpmcd instance. +It is assumed that dpcmd is installed on the host and the executable can be +configured via: .. code-block:: yaml @@ -1109,12 +1123,13 @@ Used by: NetworkDediprogFlasher ~~~~~~~~~~~~~~~~~~~~~~ -A NetworkDediprogFlasher describes a `DediprogFlasher`_ available on a remote computer. +A :any:`NetworkDediprogFlasher` describes a `DediprogFlasher`_ available on a +remote computer. XenaManager ~~~~~~~~~~~ -A XenaManager resource describes a Xena Manager instance which is the instance the -`XenaDriver`_ must connect to in order to configure a Xena chassis. +A :any:`XenaManager` resource describes a Xena Manager instance which is the +instance the `XenaDriver`_ must connect to in order to configure a Xena chassis. .. code-block:: yaml @@ -1129,7 +1144,8 @@ Used by: PyVISADevice ~~~~~~~~~~~~ -A PyVISADevice resource describes a test stimuli device controlled by PyVISA. +A :any:`PyVISADevice` resource describes a test stimuli device controlled by +PyVISA. Such device could be a signal generator. .. code-block:: yaml @@ -1169,8 +1185,8 @@ specific protocol. This is useful for software installation in the bootloader (via TFTP) or downloading update artifacts under Linux (via HTTP). -They are used with the ManagedFile helper, which ensures that the file is -available on the server. For HTTP and TFTP, a symlink from the internal +They are used with the :any:`ManagedFile` helper, which ensures that the file +is available on the server. For HTTP and TFTP, a symlink from the internal directory to the uploaded file is created. The path for the target is generated by replacing the internal prefix with the external prefix. @@ -1186,6 +1202,9 @@ labgrid. TFTPProvider / HTTPProvider +++++++++++++++++++++++++++ +A :any:`TFTPProvider` resource describes TFTP server. +A :any:`HTTPProvider` resource describes an HTTP server. + .. code-block:: yaml TFTPProvider: @@ -1206,6 +1225,8 @@ Used by: NFSProvider +++++++++++ +An :any:`NFSProvider` resource describes an NFS server. + .. code-block:: yaml NFSProvider: {} @@ -1221,8 +1242,10 @@ Used by: RemoteTFTPProvider / RemoteHTTPProvider +++++++++++++++++++++++++++++++++++++++ -These describe a `TFTPProvider`_ or `HTTPProvider`_ resource available on a -remote computer. +A :any:`RemoteTFTPProvider` describes a `TFTPProvider`_ resource available on +a remote computer. +A :any:`RemoteHTTPProvider` describes a `HTTPProvider`_ resource available on +a remote computer. .. code-block:: yaml @@ -1247,7 +1270,8 @@ Used by: RemoteNFSProvider +++++++++++++++++ -An `NFSProvider`_ resource available on a remote computer. +A :any:`RemoteNFSProvider` describes an `NFSProvider`_ resource available on a +remote computer. .. code-block:: yaml @@ -1262,7 +1286,8 @@ Used by: RemotePlace ~~~~~~~~~~~ -A RemotePlace describes a set of resources attached to a labgrid remote place. +A :any:`RemotePlace` describes a set of resources attached to a labgrid remote +place. .. code-block:: yaml @@ -1281,8 +1306,8 @@ Used by: DockerDaemon ~~~~~~~~~~~~ -A DockerDaemon describes where to contact a docker daemon process. -DockerDaemon also participates in managing `NetworkService` instances +A :any:`DockerDaemon` describes where to contact a docker daemon process. +DockerDaemon also participates in managing `NetworkService`_ instances created through interaction with that daemon. .. code-block:: yaml @@ -1291,16 +1316,16 @@ created through interaction with that daemon. docker_daemon_url: unix://var/run/docker.sock The example describes a docker daemon accessible via the -'/var/run/docker.sock' unix socket. When used by a `DockerDriver`, the -`DockerDriver` will first create a docker container which the +``/var/run/docker.sock`` unix socket. When used by a `DockerDriver`_, the +`DockerDriver`_ will first create a docker container which the DockerDaemon resource will subsequently use to create one/more -`NetworkService` instances - as specified by `DockerDriver` configuration. -Each `NetworkService` instance corresponds to a network service running inside +`NetworkService`_ instances - as specified by `DockerDriver`_ configuration. +Each `NetworkService`_ instance corresponds to a network service running inside the container. Moreover, DockerDaemon will remove any hanging containers if DockerDaemon is used several times in a row - as is the case when -executing test suites. Normally `DockerDriver` - when deactivated - +executing test suites. Normally `DockerDriver`_ - when deactivated - cleans up the created docker container; programming errors, keyboard interrupts or unix kill signals may lead to hanging containers, however; therefore auto-cleanup is important. @@ -1383,7 +1408,7 @@ with ``udevadm info /dev/ttyUSB0``. Note the ``@`` in the ``@ID_PATH`` match, which applies this match to the device's parents instead of directly to itself. -This is necessary for the `USBSerialPort` because we actually want to find the +This is necessary for the `USBSerialPort`_ because we actually want to find the ``ttyUSB?`` device below the USB serial converter device. Matching an Android USB Fastboot Device @@ -1498,8 +1523,8 @@ Drivers SerialDriver ~~~~~~~~~~~~ -A SerialDriver connects to a serial port. It requires one of the serial port -resources. +A :any:`SerialDriver` connects to a serial port. It requires one of the serial +port resources. Binds to: port: @@ -1523,8 +1548,8 @@ Arguments: ModbusRTUDriver ~~~~~~~~~~~~~~~ -A ModbusRTUDriver connects to a ModbusRTU resource. This driver only supports -local usage and will not work with an exporter. +A :any:`ModbusRTUDriver` connects to a ModbusRTU resource. This driver only +supports local usage and will not work with an exporter. .. code-block:: yaml @@ -1542,8 +1567,8 @@ Arguments: ShellDriver ~~~~~~~~~~~ -A ShellDriver binds on top of a `ConsoleProtocol` and is designed to interact -with a login prompt and a Linux shell. +A :any:`ShellDriver` binds on top of a :any:`ConsoleProtocol` and is designed +to interact with a login prompt and a Linux shell. Binds to: console: @@ -1593,8 +1618,8 @@ Arguments: SSHDriver ~~~~~~~~~ -A SSHDriver requires a `NetworkService` resource and allows the execution of -commands and file upload via network. +A :any:`SSHDriver` requires a `NetworkService`_ resource and allows the +execution of commands and file upload via network. It uses SSH's `ServerAliveInterval` option to detect failed connections. If a shared SSH connection to the target is already open, it will reuse it when @@ -1617,7 +1642,7 @@ Implements: Arguments: - keyfile (str): optional, filename of private key to login into the remote system - (has precedence over `NetworkService`'s password) + (has precedence over `NetworkService`_'s password) - stderr_merge (bool, default=False): set to True to make `run()` return stderr merged with stdout, and an empty list as second element. - connection_timeout (float, default=30.0): timeout when trying to establish connection to @@ -1626,12 +1651,13 @@ Arguments: explicitly use the SFTP protocol for file transfers instead of scp's default protocol - explicit_scp_mode (bool, default=False): if set to True, `put()`, `get()`, and `scp()` will explicitly use the SCP protocol for file transfers instead of scp's default protocol - - username (str, default=username from `NetworkService`): username used by SSH - - password (str, default=password from `NetworkService`): password used by SSH + - username (str, default=username from `NetworkService`_): username used by SSH + - password (str, default=password from `NetworkService`_): password used by SSH UBootDriver ~~~~~~~~~~~ -A UBootDriver interfaces with a U-Boot bootloader via a `ConsoleProtocol`. +A :any:`UBootDriver` interfaces with a U-Boot bootloader via a +:any:`ConsoleProtocol`. Binds to: console: @@ -1665,8 +1691,8 @@ Arguments: SmallUBootDriver ~~~~~~~~~~~~~~~~ -A SmallUBootDriver interfaces with stripped-down U-Boot variants that are -sometimes used in cheap consumer electronics. +A :any:`SmallUBootDriver` interfaces with stripped-down U-Boot variants that +are sometimes used in cheap consumer electronics. SmallUBootDriver is meant as a driver for U-Boot with only little functionality compared to a standard U-Boot. @@ -1718,7 +1744,8 @@ Arguments: BareboxDriver ~~~~~~~~~~~~~ -A BareboxDriver interfaces with a barebox bootloader via a `ConsoleProtocol`. +A :any:`BareboxDriver` interfaces with a *barebox* bootloader via a +:any:`ConsoleProtocol`. Binds to: console: @@ -1744,8 +1771,8 @@ Arguments: ExternalConsoleDriver ~~~~~~~~~~~~~~~~~~~~~ -An ExternalConsoleDriver implements the `ConsoleProtocol` on top of a command -executed on the local computer. +An :any:`ExternalConsoleDriver` implements the :any:`ConsoleProtocol` on top of +a command executed on the local computer. Binds to: - None @@ -1765,8 +1792,8 @@ Arguments: AndroidFastbootDriver ~~~~~~~~~~~~~~~~~~~~~ -An AndroidFastbootDriver allows the upload of images to a device in the USB or -network fastboot state. +An :any:`AndroidFastbootDriver` allows the upload of images to a device in the +USB or network fastboot state. Binds to: fastboot: @@ -1795,8 +1822,8 @@ Arguments: DFUDriver ~~~~~~~~~ -A DFUDriver allows the download of images to a device in DFU (Device Firmware -Upgrade) mode. +A :any:`DFUDriver` allows the download of images to a device in DFU (Device +Firmware Upgrade) mode. Binds to: dfu: @@ -1815,7 +1842,8 @@ Arguments: OpenOCDDriver ~~~~~~~~~~~~~ -An OpenOCDDriver controls OpenOCD to bootstrap a target with a bootloader. +An :any:`OpenOCDDriver` controls OpenOCD to bootstrap a target with a +bootloader. Note that OpenOCD supports specifying USB paths since `a1b308ab `_ which was released with v0.11. @@ -1857,8 +1885,8 @@ Arguments: QuartusHPSDriver ~~~~~~~~~~~~~~~~ -A QuartusHPSDriver controls the "Quartus Prime Programmer and Tools" to flash -a target's QSPI. +A :any:`QuartusHPSDriver` controls the "Quartus Prime Programmer and Tools" to +flash a target's QSPI. Binds to: interface: @@ -1876,8 +1904,9 @@ example strategy is included in labgrid. ManualPowerDriver ~~~~~~~~~~~~~~~~~ -A ManualPowerDriver requires the user to control the target power states. This -is required if a strategy is used with the target, but no automatic power +A :any:`ManualPowerDriver` requires the user to control the target power +states. +This is required if a strategy is used with the target, but no automatic power control is available. The driver's name will be displayed during interaction. @@ -1899,7 +1928,8 @@ Arguments: ExternalPowerDriver ~~~~~~~~~~~~~~~~~~~ -An ExternalPowerDriver is used to control a target power state via an external command. +An :any:`ExternalPowerDriver` is used to control a target power state via an +external command. Binds to: - None @@ -1923,8 +1953,8 @@ Arguments: NetworkPowerDriver ~~~~~~~~~~~~~~~~~~ -A NetworkPowerDriver controls a `NetworkPowerPort`, allowing control of the -target power state without user interaction. +A :any:`NetworkPowerDriver` controls a `NetworkPowerPort`_, allowing control of +the target power state without user interaction. Binds to: port: @@ -1944,8 +1974,8 @@ Arguments: PDUDaemonDriver ~~~~~~~~~~~~~~~ -A PDUDaemonDriver controls a `PDUDaemonPort`, allowing control of the target -power state without user interaction. +A :any:`PDUDaemonDriver` controls a `PDUDaemonPort`_, allowing control of the +target power state without user interaction. .. note:: PDUDaemon processes commands in the background, so the actual state change @@ -1969,7 +1999,7 @@ Arguments: YKUSHPowerDriver ~~~~~~~~~~~~~~~~ -A YKUSHPowerDriver controls a `YKUSHPowerPort`, allowing control of the +A :any:`YKUSHPowerDriver` controls a `YKUSHPowerPort`_, allowing control of the target power state without user interaction. Binds to: @@ -1991,8 +2021,8 @@ Arguments: DigitalOutputPowerDriver ~~~~~~~~~~~~~~~~~~~~~~~~ -A DigitalOutputPowerDriver can be used to control the power of a -device using a DigitalOutputDriver. +A :any:`DigitalOutputPowerDriver` can be used to control the power of a device +using a DigitalOutputDriver. Using this driver you probably want an external relay to switch the power of your DUT. @@ -2015,8 +2045,8 @@ Arguments: USBPowerDriver ~~~~~~~~~~~~~~ -A USBPowerDriver controls a `USBPowerPort`, allowing control of the target -power state without user interaction. +A :any:`USBPowerDriver` controls a `USBPowerPort`_, allowing control of the +target power state without user interaction. Binds to: hub: @@ -2037,8 +2067,8 @@ Arguments: SiSPMPowerDriver ~~~~~~~~~~~~~~~~ -A SiSPMPowerDriver controls a `SiSPMPowerPort`, allowing control of the target -power state without user interaction. +A :any:`SiSPMPowerDriver` controls a `SiSPMPowerPort`_, allowing control of the +target power state without user interaction. Binds to: port: @@ -2059,8 +2089,8 @@ Arguments: TasmotaPowerDriver ~~~~~~~~~~~~~~~~~~ -A TasmotaPowerDriver controls a `TasmotaPowerPort`, allowing the outlet to be -switched on and off. +A :any:`TasmotaPowerDriver` controls a `TasmotaPowerPort`_, allowing the outlet +to be switched on and off. Binds to: power: @@ -2079,7 +2109,7 @@ Arguments: GpioDigitalOutputDriver ~~~~~~~~~~~~~~~~~~~~~~~ -The GpioDigitalOutputDriver writes a digital signal to a GPIO line. +The :any:`GpioDigitalOutputDriver` writes a digital signal to a GPIO line. This driver configures GPIO lines via `the sysfs kernel interface `. While the driver automatically exports the GPIO, it does not configure it in any other way than as an output. @@ -2101,7 +2131,7 @@ Arguments: SerialPortDigitalOutputDriver ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -The SerialPortDigitalOutputDriver makes it possible to use a UART +The :any:`SerialPortDigitalOutputDriver` makes it possible to use a UART as a 1-Bit general-purpose digital output. This driver acts on top of a SerialDriver and uses the its pyserial port to @@ -2130,7 +2160,7 @@ Arguments: FileDigitalOutputDriver ~~~~~~~~~~~~~~~~~~~~~~~ -The FileDigitalOutputDriver uses a file +The :any:`FileDigitalOutputDriver` uses a file to write arbitrary string representations of booleans to a file and read from it. @@ -2159,7 +2189,7 @@ Arguments: DigitalOutputResetDriver ~~~~~~~~~~~~~~~~~~~~~~~~ -A DigitalOutputResetDriver uses a DigitalOutput to reset the target. +A :any:`DigitalOutputResetDriver` uses a DigitalOutput to reset the target. Binds to: output: @@ -2178,7 +2208,7 @@ Arguments: ModbusCoilDriver ~~~~~~~~~~~~~~~~ -A ModbusCoilDriver controls a `ModbusTCPCoil` resource. +A :any:`ModbusCoilDriver` controls a `ModbusTCPCoil`_ resource. It can set and get the current state of the resource. Binds to: @@ -2197,7 +2227,7 @@ Arguments: HIDRelayDriver ~~~~~~~~~~~~~~ -A HIDRelayDriver controls a `HIDRelay` or `NetworkHIDRelay` resource. +A :any:`HIDRelayDriver` controls a `HIDRelay`_ or `NetworkHIDRelay`_ resource. It can set and get the current state of the resource. Binds to: @@ -2217,9 +2247,10 @@ Arguments: ManualSwitchDriver ~~~~~~~~~~~~~~~~~~ -A ManualSwitchDriver requires the user to control a switch or jumper on the -target. This can be used if a driver binds to a :any:`DigitalOutputProtocol`, -but no automatic control is available. +A :any:`ManualSwitchDriver` requires the user to control a switch or jumper on +the target. +This can be used if a driver binds to a :any:`DigitalOutputProtocol`, but no +automatic control is available. Binds to: - None @@ -2237,7 +2268,7 @@ Arguments: DeditecRelaisDriver ~~~~~~~~~~~~~~~~~~~ -A DeditecRelaisDriver controls a Deditec relay resource. +A :any:`DeditecRelaisDriver` controls a Deditec relay resource. It can set and get the current state of the resource. Binds to: @@ -2257,8 +2288,9 @@ Arguments: MXSUSBDriver ~~~~~~~~~~~~ -A MXUSBDriver is used to upload an image into a device in the mxs USB loader -state. This is useful to bootstrap a bootloader onto a device. +An :any:`MXSUSBDriver` is used to upload an image into a device in the mxs USB +loader state. +This is useful to bootstrap a bootloader onto a device. Binds to: loader: @@ -2285,8 +2317,9 @@ Arguments: IMXUSBDriver ~~~~~~~~~~~~ -A IMXUSBDriver is used to upload an image into a device in the imx USB loader -state. This is useful to bootstrap a bootloader onto a device. +A :any:`IMXUSBDriver` is used to upload an image into a device in the imx USB +loader state. +This is useful to bootstrap a bootloader onto a device. This driver uses the imx-usb-loader tool from barebox. Binds to: @@ -2316,8 +2349,8 @@ Arguments: BDIMXUSBDriver ~~~~~~~~~~~~~~ -The BDIMXUSBDriver is used to upload bootloader images into an i.MX device in -the USB SDP mode. +The :any:`BDIMXUSBDriver` is used to upload bootloader images into an i.MX +device in the USB SDP mode. This driver uses the imx_usb tool by Boundary Devices. Compared to the IMXUSBLoader, it supports two-stage upload of U-Boot images. The images paths need to be specified from code instead of in the YAML @@ -2343,8 +2376,9 @@ Arguments: RKUSBDriver ~~~~~~~~~~~ -A RKUSBDriver is used to upload an image into a device in the rockchip USB loader -state. This is useful to bootstrap a bootloader onto a device. +A :any:`RKUSBDriver` is used to upload an image into a device in the rockchip +USB loader state. +This is useful to bootstrap a bootloader onto a device. Binds to: loader: @@ -2375,8 +2409,9 @@ Arguments: UUUDriver ~~~~~~~~~ -A UUUDriver is used to upload an image into a device in the NXP USB loader -state. This is useful to bootstrap a bootloader onto a device. +A :any:`UUUDriver` is used to upload an image into a device in the NXP USB +loader state. +This is useful to bootstrap a bootloader onto a device. Binds to: loader: @@ -2407,7 +2442,7 @@ Arguments: USBStorageDriver ~~~~~~~~~~~~~~~~ -A USBStorageDriver allows access to a USB stick or similar local or +A :any:`USBStorageDriver` allows access to a USB stick or similar local or remote device. Binds to: @@ -2439,7 +2474,7 @@ Arguments: OneWirePIODriver ~~~~~~~~~~~~~~~~ -A OneWirePIODriver controls a `OneWirePIO` resource. +A :any:`OneWirePIODriver` controls a `OneWirePIO`_ resource. It can set and get the current state of the resource. Binds to: @@ -2461,8 +2496,8 @@ Arguments: TFTPProviderDriver / HTTPProviderDriver ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -These drivers control their corresponding Provider resources, either locally or -remotely. +The :any:`TFTPProviderDriver` and :any:`HTTPProviderDriver` control their +corresponding Provider resources, either locally or remotely. Binds to: provider: @@ -2488,7 +2523,7 @@ returns the path to be used by the target. NFSProviderDriver ~~~~~~~~~~~~~~~~~ -An NFSProviderDriver controls an `NFSProvider` resource. +An :any:`NFSProviderDriver` controls an `NFSProvider`_ resource. Binds to: provider: @@ -2511,8 +2546,8 @@ attributes. QEMUDriver ~~~~~~~~~~ -The QEMUDriver allows the usage of a QEMU instance as a target. It requires -several arguments, listed below. +The :any:`QEMUDriver` allows the usage of a QEMU instance as a target. +It requires several arguments, listed below. The kernel, flash, rootfs and dtb arguments refer to images and paths declared in the environment configuration. @@ -2579,7 +2614,7 @@ The QEMUDriver also requires the specification of: SigrokDriver ~~~~~~~~~~~~ -The SigrokDriver uses a SigrokDevice resource to record samples and provides +The :any:`SigrokDriver` uses a `SigrokDevice`_ resource to record samples and provides them during test runs. Binds to: @@ -2599,8 +2634,8 @@ The driver can be used in test cases by calling the `capture`, `stop` and SigrokPowerDriver ~~~~~~~~~~~~~~~~~ -The SigrokPowerDriver uses a `SigrokUSBSerialDevice`_ resource to control a -programmable power supply. +The :any:`SigrokPowerDriver` uses a `SigrokUSBSerialDevice`_ resource to +control a programmable power supply. Binds to: sigrok: @@ -2625,8 +2660,8 @@ Arguments: SigrokDmmDriver ~~~~~~~~~~~~~~~ -The `SigrokDmmDriver` uses a `SigrokDevice` resource to record samples from a digital multimeter (DMM) and provides -them during test runs. +The :any:`SigrokDmmDriver` uses a `SigrokDevice`_ resource to record samples +from a digital multimeter (DMM) and provides them during test runs. It is known to work with Unit-T `UT61B` and `UT61C` devices but should also work with other DMMs supported by *sigrok*. @@ -2659,7 +2694,7 @@ Reading a few samples will very likely work - but obtaining a lot of samples may USBSDMuxDriver ~~~~~~~~~~~~~~ -The :any:`USBSDMuxDriver` uses a USBSDMuxDevice resource to control a +The :any:`USBSDMuxDriver` uses a `USBSDMuxDevice`_ resource to control a USB-SD-Mux device via `usbsdmux `_ tool. @@ -2674,14 +2709,13 @@ Implements: Arguments: - None -The driver can be used in test cases by calling the `set_mode()` function with -argument being `dut`, `host`, `off`, or `client`. +The driver can be used in test cases by calling its ``set_mode()`` method with +argument being "dut", "host", "off", or "client". LXAUSBMuxDriver ~~~~~~~~~~~~~~~ -The :any:`LXAUSBMuxDriver` uses a LXAUSBMux resource to control a USB-Mux -device via the `usbmuxctl `_ -tool. +The :any:`LXAUSBMuxDriver` uses a `LXAUSBMux`_ resource to control a USB-Mux +device via the `usbmuxctl `_ tool. Binds to: mux: @@ -2700,7 +2734,7 @@ Not all combinations can be configured at the same time. USBSDWireDriver ~~~~~~~~~~~~~~~ -The :any:`USBSDWireDriver` uses a USBSDWireDevice resource to control a +The :any:`USBSDWireDriver` uses a `USBSDWireDevice`_ resource to control a USB-SD-Wire device via `sd-mux-ctrl `_ tool. @@ -2926,7 +2960,7 @@ DediprogFlasher SF100 for instance, to the device being flashed. XenaDriver ~~~~~~~~~~ -The XenaDriver allows to use Xena networking test equipment. +The :any:`XenaDriver` allows to use Xena networking test equipment. Using the `xenavalkyrie` library a full API to control the tester is available. Binds to: @@ -2938,8 +2972,8 @@ Currently tested on a `XenaCompact` chassis equipped with a `1 GE test module`. DockerDriver ~~~~~~~~~~~~ -A DockerDriver binds to a `DockerDaemon` and is used to create and control one -docker container. +A :any:`DockerDriver` binds to a `DockerDaemon`_ and is used to create and +control one docker container. | The driver uses the docker python module to interact with the docker daemon. | For more information on the parameters see: @@ -2974,8 +3008,8 @@ Arguments: LXAIOBusPIODriver ~~~~~~~~~~~~~~~~~ -An LXAIOBusPIODriver binds to a single `LXAIOBusPIO` to toggle and read the PIO -states. +An :any:`LXAIOBusPIODriver` binds to a single `LXAIOBusPIO`_ to toggle and read +the PIO states. Binds to: pio: @@ -2994,8 +3028,8 @@ Arguments: HttpDigitalOutputDriver ~~~~~~~~~~~~~~~~~~~~~~~ -A HttpDigitalOutputDriver binds to a `HttpDigitalOutput` to set and get a -digital output state via HTTP. +A :any:`HttpDigitalOutputDriver` binds to a `HttpDigitalOutput`_ to set and get +a digital output state via HTTP. Binds to: http: @@ -3013,7 +3047,8 @@ Arguments: PyVISADriver ~~~~~~~~~~~~ -The PyVISADriver uses a PyVISADevice resource to control test equipment manageable by PyVISA. +The :any:`PyVISADriver` uses a `PyVISADevice`_ resource to control test +equipment manageable by PyVISA. Binds to: pyvisa_resource: @@ -3027,8 +3062,8 @@ Arguments: NetworkInterfaceDriver ~~~~~~~~~~~~~~~~~~~~~~ -This driver allows controlling a network interface (such as Ethernet or WiFi) on -the exporter using NetworkManager. +The :any:`NetworkInterfaceDriver` allows controlling a network interface (such +as Ethernet or WiFi) on the exporter using NetworkManager. The configuration is based on dictionaries with contents similar to NM's connection files in INI-format. @@ -3059,8 +3094,8 @@ Arguments: RawNetworkInterfaceDriver ~~~~~~~~~~~~~~~~~~~~~~~~~ -This driver allows "raw" control of a network interface (such as Ethernet or -WiFi). +The :any:`RawNetworkInterfaceDriver` allows "raw" control of a network +interface (such as Ethernet or WiFi). The labgrid-raw-interface helper (``helpers/labgrid-raw-interface``) needs to be installed in the PATH and usable via sudo without password. @@ -3110,7 +3145,7 @@ Such a state could be the bootloader or a booted Linux kernel with shell. BareboxStrategy ~~~~~~~~~~~~~~~ -A BareboxStrategy has four states: +A :any:`BareboxStrategy` has four states: - unknown - off @@ -3155,11 +3190,11 @@ the ``shell`` state: >>> s.transition("shell") This command would transition from the bootloader into a Linux shell and -activate the ShellDriver. +activate the `ShellDriver`_. ShellStrategy ~~~~~~~~~~~~~ -A ShellStrategy has three states: +A :any:`ShellStrategy` has three states: - unknown - off @@ -3201,11 +3236,11 @@ the ``shell`` state: >>> s = t.get_driver("ShellStrategy") This command would transition directly into a Linux shell and -activate the ShellDriver. +activate the `ShellDriver`_. UBootStrategy ~~~~~~~~~~~~~ -A UBootStrategy has four states: +A :any:`UBootStrategy` has four states: - unknown - off @@ -3250,11 +3285,11 @@ the ``shell`` state: >>> s.transition("shell") This command would transition from the bootloader into a Linux shell and -activate the ShellDriver. +activate the `ShellDriver`_. DockerStrategy ~~~~~~~~~~~~~~ -A DockerStrategy has three states: +A :any:`DockerStrategy` has three states: - unknown - gone @@ -3305,7 +3340,7 @@ Reporters StepReporter ~~~~~~~~~~~~ -The StepReporter outputs individual labgrid steps to `STDOUT`. +The :any:`StepReporter` outputs individual labgrid steps to `STDOUT`. .. doctest:: @@ -3324,7 +3359,7 @@ AssertionError, as will starting an already started StepReporter. ConsoleLoggingReporter ~~~~~~~~~~~~~~~~~~~~~~ -The ConsoleLoggingReporter outputs read calls from the console transports into +The :any:`ConsoleLoggingReporter` outputs read calls from the console transports into files. It takes the path as a parameter. .. doctest:: @@ -3429,8 +3464,8 @@ becomes: - SerialDriver: {} - SerialDriver: {} -This configuration doesn't specify which :any:`RawSerialPort` to use for each -:any:`SerialDriver`, so it will cause an exception when instantiating the +This configuration doesn't specify which `RawSerialPort`_ to use for each +`SerialDriver`_, so it will cause an exception when instantiating the :any:`Target`. To bind the correct driver to the correct resource, explicit ``name`` and ``bindings`` properties are used: @@ -3476,7 +3511,7 @@ As an example: qemu_bin: !template "$BASE/bin/qemu-bin" would resolve the qemu_bin path relative to the BASE dir of the YAML file and -try to use the RemotePlace with the name set in the LG_PLACE environment +try to use the `RemotePlace`_ with the name set in the LG_PLACE environment variable. See the :ref:`labgrid-device-config` man page for documentation on the @@ -3517,7 +3552,7 @@ and `` will be passed to its constructor. For USB resources, you will most likely want to use :ref:`udev-matching` here. As a simple example, here is one group called *usb-hub-in-rack12* containing -a single :any:`USBSerialPort` resource (using udev matching), which will be +a single `USBSerialPort`_ resource (using udev matching), which will be exported as `exportername/usb-hub-in-rack12/NetworkSerialPort/USBSerialPort`: .. code-block:: yaml @@ -3531,9 +3566,9 @@ To export multiple resources of the same class in the same group, you can choose a unique resource name, and then use the ``cls`` parameter to specify the class name instead (which will not be passed as a parameter to the class constructor). -In this next example we will export one :any:`USBSerialPort` as +In this next example we will export one `USBSerialPort`_ as `exportername/usb-hub-in-rack12/NetworkSerialPort/console-main`, -and another :any:`USBSerialPort` as +and another `USBSerialPort`_ as `exportername/usb-hub-in-rack12/NetworkSerialPort/console-secondary`: .. code-block:: yaml From a96ee54ccfbcc3008b97a186abd28dd5eb6e5004 Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Mon, 12 Feb 2024 12:27:24 +0100 Subject: [PATCH 157/384] doc/configuration: make yaml snippets more consistent Explicitly quote string top prevent strange yaml string auto dection issues. Also use better readable dict notation everywhere. Signed-off-by: Bastian Krause --- doc/configuration.rst | 315 +++++++++++++++++++++--------------------- 1 file changed, 161 insertions(+), 154 deletions(-) diff --git a/doc/configuration.rst b/doc/configuration.rst index 40c511722..fa355c4db 100644 --- a/doc/configuration.rst +++ b/doc/configuration.rst @@ -29,7 +29,7 @@ enumeration order. .. code-block:: yaml RawSerialPort: - port: /dev/ttyUSB0 + port: '/dev/ttyUSB0' speed: 115200 The example would access the serial port /dev/ttyUSB0 on the local computer with @@ -51,7 +51,7 @@ or raw tcp. .. code-block:: yaml NetworkSerialPort: - host: remote.example.computer + host: 'remote.example.computer' port: 53867 speed: 115200 @@ -90,7 +90,7 @@ exporter. .. code-block:: yaml ModbusRTU: - port: "/dev/ttyUSB0" + port: '/dev/ttyUSB0' address: 16 speed: 115200 timeout: 0.25 @@ -114,7 +114,7 @@ This allows identification through hot-plugging or rebooting. USBSerialPort: match: - ID_SERIAL_SHORT: P-00-00682 + ID_SERIAL_SHORT: 'P-00-00682' speed: 115200 The example would search for a USB serial converter with the key @@ -139,8 +139,8 @@ A :any:`NetworkPowerPort` describes a remotely switchable power port. .. code-block:: yaml NetworkPowerPort: - model: gude - host: powerswitch.example.computer + model: 'gude' + host: 'powerswitch.example.computer' index: 0 The example describes port 0 on the remote power switch @@ -250,8 +250,8 @@ PDUDaemon configuration file needs to be specified. .. code-block:: yaml PDUDaemonPort: - host: pduserver - pdu: apc-snmpv3-noauth + host: 'pduserver' + pdu: 'apc-snmpv3-noauth' index: 1 The example describes port 1 on the PDU configured as `apc-snmpv3-noauth`, with @@ -272,7 +272,7 @@ A :any:`YKUSHPowerPort` describes a YEPKIT YKUSH USB (HID) switchable USB hub. .. code-block:: yaml YKUSHPowerPort: - serial: YK12345 + serial: 'YK12345' index: 1 The example describes port 1 on the YKUSH USB hub with the @@ -300,7 +300,7 @@ A :any:`USBPowerPort` describes a generic switchable USB hub as supported by USBPowerPort: match: - ID_PATH: pci-0000:00:14.0-usb-0:2:1.0 + ID_PATH: 'pci-0000:00:14.0-usb-0:2:1.0' index: 1 The example describes port 1 on the hub with the ID_PATH @@ -330,7 +330,7 @@ A :any:`SiSPMPowerPort` describes a GEMBIRD SiS-PM as supported by SiSPMPowerPort: match: - ID_PATH: platform-1c1a400.usb-usb-0:2 + ID_PATH: 'platform-1c1a400.usb-usb-0:2' index: 1 The example describes port 1 on the hub with the ID_PATH @@ -351,10 +351,10 @@ A :any:`TasmotaPowerPort` resource describes a switchable `Tasmota .. code-block:: yaml TasmotaPowerPort: - host: this.is.an.example.host.com - status_topic: stat/tasmota_575A2B/POWER - power_topic: cmnd/tasmota_575A2B/POWER - avail_topic: tele/tasmota_575A2B/LWT + host: 'this.is.an.example.host.com' + status_topic: 'stat/tasmota_575A2B/POWER' + power_topic: 'cmnd/tasmota_575A2B/POWER' + avail_topic: 'tele/tasmota_575A2B/LWT' The example uses a mosquitto server at "this.is.an.example.host.com" and has the topics setup for a tasmota power port that has the ID 575A2B. @@ -380,7 +380,7 @@ A :any:`ModbusTCPCoil` describes a coil accessible via ModbusTCP. .. code-block:: yaml ModbusTCPCoil: - host: "192.168.23.42" + host: '192.168.23.42' coil: 1 The example describes the coil with the address 1 on the ModbusTCP device @@ -407,7 +407,7 @@ A :any:`DeditecRelais8` describes a Deditec USB GPO module with 8 relays. index: 1 invert: false match: - ID_PATH: pci-0000:00:14.0-usb-0:2:1.0 + ID_PATH: 'pci-0000:00:14.0-usb-0:2:1.0' Arguments: - index (int): number of the relay to use @@ -425,8 +425,8 @@ A :any:`OneWirePIO` describes a onewire programmable I/O pin. .. code-block:: yaml OneWirePIO: - host: example.computer - path: /29.7D6913000000/PIO.0 + host: 'example.computer' + path: '/29.7D6913000000/PIO.0' invert: false The example describes a `PIO.0` at device address `29.7D6913000000` via the onewire @@ -448,10 +448,10 @@ An :any:`LXAIOBusPIO` resource describes a single PIO pin on an LXAIOBusNode. .. code-block:: yaml LXAIOBusPIO: - host: localhost:8080 - node: IOMux-00000003 - pin: OUT0 - invert: False + host: 'localhost:8080' + node: 'IOMux-00000003' + pin: 'OUT0' + invert: false The example uses an lxa-iobus-server running on localhost:8080, with node IOMux-00000003 and pin OUT0. @@ -479,9 +479,9 @@ It currently supports the widely used "dcttech USBRelay". HIDRelay: index: 2 - invert: False + invert: false match: - ID_PATH: pci-0000:00:14.0-usb-0:2:1.0 + ID_PATH: 'pci-0000:00:14.0-usb-0:2:1.0' Arguments: - index (int, default=1): number of the relay to use @@ -499,9 +499,9 @@ controlled via HTTP. .. code-block:: yaml HttpDigitalOutput: - url: http://host.example/some/endpoint - body_asserted: "On" - body_deasserted: "Off" + url: 'http://host.example/some/endpoint' + body_asserted: 'On' + body_deasserted: 'Off' The example assumes a simple scenario where the same URL is used for PUT requests that set the output state and GET requests to get the current state. @@ -535,8 +535,8 @@ A :any:`NetworkService` describes a remote SSH connection. .. code-block:: yaml NetworkService: - address: example.computer - username: root + address: 'example.computer' + username: 'root' The example describes a remote SSH connection to the computer `example.computer` with the username `root`. @@ -568,7 +568,7 @@ device. USBMassStorage: match: - ID_PATH: pci-0000:06:00.0-usb-0:1.3.2:1.0-scsi-0:0:0:3 + ID_PATH: 'pci-0000:06:00.0-usb-0:1.3.2:1.0-scsi-0:0:0:3' Arguments: - match (dict): key and value pairs for a udev match, see `udev Matching`_ @@ -592,8 +592,8 @@ device from all connected supported devices use the `SigrokUSBDevice`_. .. code-block:: yaml SigrokDevice: - driver: fx2lafw - channels: "D0=CLK,D1=DATA" + driver: 'fx2lafw' + channels: 'D0=CLK,D1=DATA' Arguments: - driver (str): name of the sigrok driver to use @@ -611,7 +611,7 @@ An :any:`IMXUSBLoader` resource describes a USB device in the imx loader state. IMXUSBLoader: match: - ID_PATH: pci-0000:06:00.0-usb-0:1.3.2:1.0 + ID_PATH: 'pci-0000:06:00.0-usb-0:1.3.2:1.0' Arguments: - match (dict): key and value pairs for a udev match, see `udev Matching`_ @@ -629,7 +629,7 @@ An :any:`MXSUSBLoader` resource describes a USB device in the mxs loader state. MXSUSBLoader: match: - ID_PATH: pci-0000:06:00.0-usb-0:1.3.2:1.0 + ID_PATH: 'pci-0000:06:00.0-usb-0:1.3.2:1.0' Arguments: - match (dict): key and value pairs for a udev match, see `udev Matching`_ @@ -648,7 +648,7 @@ state. RKUSBLoader: match: - ID_PATH: pci-0000:06:00.0-usb-0:1.3.2:1.0 + ID_PATH: 'pci-0000:06:00.0-usb-0:1.3.2:1.0' Arguments: - match (dict): key and value pairs for a udev match, see `udev Matching`_ @@ -682,7 +682,7 @@ supported for backwards compatibility. AndroidUSBFastboot: match: - ID_PATH: pci-0000:06:00.0-usb-0:1.3.2:1.0 + ID_PATH: 'pci-0000:06:00.0-usb-0:1.3.2:1.0' Arguments: - usb_vendor_id (str, default="1d6b"): USB vendor ID to be compared with the @@ -702,7 +702,7 @@ state. .. code-block:: yaml AndroidNetFastboot: - address: "192.168.23.42" + address: '192.168.23.42' Arguments: - address (str): ip address of the fastboot device @@ -723,7 +723,7 @@ Upgrade) mode. DFUDevice: match: - ID_PATH: pci-0000:06:00.0-usb-0:1.3.2:1.0 + ID_PATH: 'pci-0000:06:00.0-usb-0:1.3.2:1.0' Arguments: - match (dict): key and value pairs for a udev match, see `udev Matching`_ @@ -739,7 +739,7 @@ Ethernet or WiFi) .. code-block:: yaml NetworkInterface: - ifname: eth0 + ifname: 'eth0' Arguments: - ifname (str): name of the interface @@ -757,7 +757,7 @@ Ethernet or WiFi) USBNetworkInterface: match: - ID_PATH: pci-0000:06:00.0-usb-0:1.3.2:1.0 + ID_PATH: 'pci-0000:06:00.0-usb-0:1.3.2:1.0' Arguments: - match (dict): key and value pairs for a udev match, see `udev Matching`_ @@ -779,7 +779,7 @@ An :any:`AlteraUSBBlaster` resource describes an Altera USB blaster. AlteraUSBBlaster: match: - ID_PATH: pci-0000:06:00.0-usb-0:1.3.2:1.0 + ID_PATH: 'pci-0000:06:00.0-usb-0:1.3.2:1.0' Arguments: - match (dict): key and value pairs for a udev match, see `udev Matching`_ @@ -797,7 +797,7 @@ FTDI FT2232H). USBDebugger: match: - ID_PATH: pci-0000:00:10.0-usb-0:1.4 + ID_PATH: 'pci-0000:00:10.0-usb-0:1.4' Arguments: - match (dict): key and value pairs for a udev match, see `udev Matching`_ @@ -813,8 +813,8 @@ which is accessible via SNMP. .. code-block:: yaml SNMPEthernetPort: - switch: "switch-012" - interface: "17" + switch: 'switch-012' + interface: '17' Arguments: - switch (str): host name of the Ethernet switch @@ -830,10 +830,10 @@ A :any:`SigrokUSBDevice` resource describes a sigrok USB device. .. code-block:: yaml SigrokUSBDevice: - driver: fx2lafw - channels: "D0=CLK,D1=DATA" + driver: 'fx2lafw' + channels: 'D0=CLK,D1=DATA' match: - ID_PATH: pci-0000:06:00.0-usb-0:1.3.2:1.0 + ID_PATH: 'pci-0000:06:00.0-usb-0:1.3.2:1.0' Arguments: - driver (str): name of the sigrok driver to use @@ -859,9 +859,9 @@ communicates over a USB serial port instead of being a USB device itself (see .. code-block:: yaml SigrokUSBSerialDevice: - driver: manson-hcs-3xxx + driver: 'manson-hcs-3xxx' match: - '@ID_SERIAL_SHORT': P-00-02389 + '@ID_SERIAL_SHORT': 'P-00-02389' Arguments: - driver (str): name of the sigrok driver to use @@ -883,7 +883,7 @@ device. USBSDMuxDevice: match: - '@ID_PATH': pci-0000:00:14.0-usb-0:1.2 + '@ID_PATH': 'pci-0000:00:14.0-usb-0:1.2' Arguments: - match (dict): key and value pairs for a udev match, see `udev Matching`_ @@ -905,7 +905,7 @@ A :any:`LXAUSBMux` resource describes a Linux Automation GmbH USB-Mux device. LXAUSBMux: match: - '@ID_PATH': pci-0000:00:14.0-usb-0:1.2 + '@ID_PATH': 'pci-0000:00:14.0-usb-0:1.2' Arguments: - match (dict): key and value pairs for a udev match, see `udev Matching`_ @@ -928,7 +928,7 @@ device. USBSDWireDevice: match: - '@ID_PATH': pci-0000:00:14.0-usb-0:1.2 + '@ID_PATH': 'pci-0000:00:14.0-usb-0:1.2' Arguments: - match (dict): key and value pairs for a udev match, see `udev Matching`_ @@ -951,7 +951,7 @@ Video4Linux2 kernel driver. USBVideo: match: - '@ID_PATH': pci-0000:00:14.0-usb-0:1.2 + '@ID_PATH': 'pci-0000:00:14.0-usb-0:1.2' Arguments: - match (dict): key and value pairs for a udev match, see `udev Matching`_ @@ -988,7 +988,7 @@ by an ALSA kernel driver. USBAudioInput: match: - ID_PATH: pci-0000:00:14.0-usb-0:3:1.0 + ID_PATH: 'pci-0000:00:14.0-usb-0:3:1.0' Arguments: - index (int, default=0): ALSA PCM device number (as in @@ -1014,7 +1014,7 @@ The low-level communication is handled by the ``usbtmc`` kernel driver. USBTMC: match: - '@ID_PATH': pci-0000:00:14.0-usb-0:1.2 + '@ID_PATH': 'pci-0000:00:14.0-usb-0:1.2' Arguments: - match (dict): key and value pairs for a udev match, see `udev Matching`_ @@ -1081,7 +1081,7 @@ than running a flashing program with `FlashScriptDriver`_. USBFlashableDevice: match: - SUBSYSTEM: usb + SUBSYSTEM: 'usb' ID_SERIAL: '1234' Arguments: @@ -1134,7 +1134,7 @@ instance the `XenaDriver`_ must connect to in order to configure a Xena chassis. .. code-block:: yaml XenaManager: - hostname: "example.computer" + hostname: 'example.computer' Arguments: - hostname (str): hostname or IP of the management address of the Xena tester @@ -1151,8 +1151,8 @@ Such device could be a signal generator. .. code-block:: yaml PyVISADevice: - type: "TCPIP" - url: "192.168.110.11" + type: 'TCPIP' + url: '192.168.110.11' Arguments: - type (str): device resource type following the pyVISA resource syntax, e.g. @@ -1170,7 +1170,7 @@ A :any:`HTTPVideoStream` resource describes a IP video stream over HTTP or HTTPS .. code-block:: yaml HTTPVideoStream: - url: http://192.168.110.11/0.ts + url: 'http://192.168.110.11/0.ts' Arguments: - url (str): URI of the IP video stream @@ -1208,12 +1208,12 @@ A :any:`HTTPProvider` resource describes an HTTP server. .. code-block:: yaml TFTPProvider: - internal: "/srv/tftp/board-23/" - external: "board-23/" + internal: '/srv/tftp/board-23/' + external: 'board-23/' HTTPProvider: - internal: "/srv/www/board-23/" - external: "http://192.168.1.1/board-23/" + internal: '/srv/www/board-23/' + external: 'http://192.168.1.1/board-23/' Arguments: - internal (str): path prefix to the local directory accessible by the target @@ -1250,14 +1250,14 @@ a remote computer. .. code-block:: yaml RemoteTFTPProvider - host: "tftphost" - internal: "/srv/tftp/board-23/" - external: "board-23/" + host: 'tftphost' + internal: '/srv/tftp/board-23/' + external: 'board-23/' RemoteHTTPProvider: - host: "httphost" - internal: "/srv/www/board-23/" - external: "http://192.168.1.1/board-23/" + host: 'httphost' + internal: '/srv/www/board-23/' + external: 'http://192.168.1.1/board-23/' Arguments: - host (str): hostname of the remote host @@ -1276,7 +1276,7 @@ remote computer. .. code-block:: yaml RemoteNFSProvider: - host: "nfshost" + host: 'nfshost' Arguments: - host (str): hostname of the remote host @@ -1292,7 +1292,7 @@ place. .. code-block:: yaml RemotePlace: - name: example-place + name: 'example-place' The example describes the remote place `example-place`. It will connect to the labgrid remote coordinator, wait until the resources become available and expose @@ -1313,7 +1313,7 @@ created through interaction with that daemon. .. code-block:: yaml DockerDaemon: - docker_daemon_url: unix://var/run/docker.sock + docker_daemon_url: 'unix://var/run/docker.sock' The example describes a docker daemon accessible via the ``/var/run/docker.sock`` unix socket. When used by a `DockerDriver`_, the @@ -1404,7 +1404,7 @@ with ``udevadm info /dev/ttyUSB0``. USBSerialPort: match: - '@ID_PATH': pci-0000:05:00.0-usb-0:1.2.5.5 + '@ID_PATH': 'pci-0000:05:00.0-usb-0:1.2.5.5' Note the ``@`` in the ``@ID_PATH`` match, which applies this match to the device's parents instead of directly to itself. @@ -1470,7 +1470,7 @@ We use the ``ID_USB_INTERFACE_NUM`` to distinguish between the two ports: USBSerialPort: match: - '@ID_PATH': pci-0000:05:00.0-usb-2:10.2.2.2' + '@ID_PATH': 'pci-0000:05:00.0-usb-2:10.2.2.2' ID_USB_INTERFACE_NUM: '01' Matching a USB UART by Serial Number @@ -1484,7 +1484,7 @@ changes or a board has been moved between host systems. USBSerialPort: match: - ID_SERIAL_SHORT: P-00-03564 + ID_SERIAL_SHORT: 'P-00-03564' To check if your device has a serial number, you can use ``udevadm info``: @@ -1583,7 +1583,7 @@ Implements: ShellDriver: prompt: 'root@\w+:[^ ]+ ' login_prompt: ' login: ' - username: root + username: 'root' Arguments: - prompt (regex): shell prompt to match after logging in @@ -1638,7 +1638,7 @@ Implements: .. code-block:: yaml SSHDriver: - keyfile: example.key + keyfile: 'example.key' Arguments: - keyfile (str): optional, filename of private key to login into the remote system @@ -1672,8 +1672,8 @@ Implements: UBootDriver: prompt: 'Uboot> ' boot_commands: - net: run netboot - spi: run spiboot + net: 'run netboot' + spi: 'run spiboot' Arguments: - prompt (regex, default=""): U-Boot prompt to match @@ -1733,7 +1733,7 @@ Implements: SmallUBootDriver: prompt: 'ap143-2\.0> ' boot_expression: 'Autobooting in 1 seconds' - boot_secret: "tpl" + boot_secret: 'tpl' Arguments: - boot_expression (str, default="U-Boot 20\\d+"): regex to match the U-Boot start string @@ -1808,8 +1808,8 @@ Implements: .. code-block:: yaml AndroidFastbootDriver: - boot_image: mylocal.image - sparse_size: 100M + boot_image: 'mylocal.image' + sparse_size: '100M' Arguments: - boot_image (str): optional, image key referring to the image to boot @@ -1866,14 +1866,14 @@ Implements: .. code-block:: yaml OpenOCDDriver: - config: local-settings.cfg - image: bitstream - interface_config: ftdi/lambdaconcept_ecpix-5.cfg - board_config: lambdaconcept_ecpix-5.cfg + config: 'local-settings.cfg' + image: 'bitstream' + interface_config: 'ftdi/lambdaconcept_ecpix-5.cfg' + board_config: 'lambdaconcept_ecpix-5.cfg' load_commands: - - "init" - - "svf -quiet {filename}" - - "exit" + - 'init' + - 'svf -quiet {filename}' + - 'exit' Arguments: - config (str/list): optional, OpenOCD configuration file(s) @@ -1941,9 +1941,9 @@ Implements: .. code-block:: yaml ExternalPowerDriver: - cmd_on: example_command on - cmd_off: example_command off - cmd_cycle: example_command cycle + cmd_on: 'example_command on' + cmd_off: 'example_command off' + cmd_cycle: 'example_command cycle' Arguments: - cmd_on (str): command to turn power to the board on @@ -2147,8 +2147,9 @@ Implements: .. code-block:: yaml SerialPortDigitalOutputDriver: - signal: "dtr" - bindings: { serial : "nameOfSerial" } + signal: 'dtr' + bindings: + serial: 'nameOfSerial' Arguments: - signal (str): control signal to use: "dtr" or "rts" @@ -2180,7 +2181,7 @@ Implements: .. code-block:: yaml FileDigitalOutputDriver: - filepath: "/sys/class/leds/myled/brightness" + filepath: '/sys/class/leds/myled/brightness' Arguments: - filepath (str): file that is used for reads and writes. @@ -2306,10 +2307,10 @@ Implements: main: drivers: MXSUSBDriver: - image: mybootloaderkey + image: 'mybootloaderkey' images: - mybootloaderkey: path/to/mybootloader.img + mybootloaderkey: 'path/to/mybootloader.img' Arguments: - image (str): optional, key in :ref:`images ` containing the path @@ -2338,10 +2339,10 @@ Implements: main: drivers: IMXUSBDriver: - image: mybootloaderkey + image: 'mybootloaderkey' images: - mybootloaderkey: path/to/mybootloader.img + mybootloaderkey: 'path/to/mybootloader.img' Arguments: - image (str): optional, key in :ref:`images ` containing the path @@ -2394,12 +2395,12 @@ Implements: main: drivers: RKUSBDriver: - image: mybootloaderkey - usb_loader: myloaderkey + image: 'mybootloaderkey' + usb_loader: 'myloaderkey' images: - mybootloaderkey: path/to/mybootloader.img - myloaderkey: path/to/myloader.bin + mybootloaderkey: 'path/to/mybootloader.img' + myloaderkey: 'path/to/myloader.bin' Arguments: - image (str): optional, key in :ref:`images ` containing the path @@ -2429,11 +2430,11 @@ Implements: main: drivers: UUUDriver: - image: mybootloaderkey - cmd: spl + image: 'mybootloaderkey' + cmd: 'spl' images: - mybootloaderkey: path/to/mybootloader.img + mybootloaderkey: 'path/to/mybootloader.img' Arguments: - image (str): optional, key in :ref:`images ` containing the path @@ -2454,19 +2455,18 @@ Binds to: - `USBSDWireDevice`_ - `NetworkUSBSDWireDevice`_ - Implements: - None (yet) .. code-block:: yaml USBStorageDriver: - image: flashimage + image: 'flashimage' .. code-block:: yaml images: - flashimage: ../images/myusb.image + flashimage: '../images/myusb.image' Arguments: - image (str): optional, key in :ref:`images ` containing the path @@ -2557,26 +2557,26 @@ Binds to: .. code-block:: yaml QEMUDriver: - qemu_bin: qemu_arm - machine: vexpress-a9 - cpu: cortex-a9 - memory: 512M - boot_args: "root=/dev/root console=ttyAMA0,115200" - extra_args: "" - kernel: kernel - rootfs: rootfs - dtb: dtb - nic: user + qemu_bin: 'qemu_arm' + machine: 'vexpress-a9' + cpu: 'cortex-a9' + memory: '512M' + boot_args: 'root=/dev/root console=ttyAMA0,115200' + extra_args: '' + kernel: 'kernel' + rootfs: 'rootfs' + dtb: 'dtb' + nic: 'user' .. code-block:: yaml tools: - qemu_arm: /bin/qemu-system-arm + qemu_arm: '/bin/qemu-system-arm' paths: - rootfs: ../images/root + rootfs: '../images/root' images: - dtb: ../images/mydtb.dtb - kernel: ../images/vmlinuz + dtb: '../images/mydtb.dtb' + kernel: '../images/vmlinuz' Implements: @@ -2835,7 +2835,7 @@ The :any:`FlashromDriver` is used to flash a rom, using the flashrom utility. FlashromDriver: image: 'foo' images: - foo: ../images/image_to_load.raw + foo: '../images/image_to_load.raw' Binds to: flashrom_resource: @@ -2874,7 +2874,7 @@ a device. .. code-block:: yaml images: - foo: ../images/flash_device.sh + foo: '../images/flash_device.sh' Binds to: device: @@ -2940,7 +2940,7 @@ The :any:`DediprogFlashDriver` is used to flash an SPI device using DediprogFlas DediprogFlashDriver: image: 'foo' images: - foo: ../images/image_to_load.raw + foo: '../images/image_to_load.raw' Binds to: flasher: @@ -2989,10 +2989,10 @@ Implements: .. code-block:: yaml DockerDriver: - image_uri: "rastasheep/ubuntu-sshd:16.04" - container_name: "ubuntu-lg-example" - host_config: {"network_mode":"bridge"} - network_services: [{"port":22,"username":"root","password":"root"}] + image_uri: 'rastasheep/ubuntu-sshd:16.04' + container_name: 'ubuntu-lg-example' + host_config: {'network_mode': 'bridge'} + network_services: [{'port': 22, 'username': 'root', 'password': 'root'}] Arguments: - image_uri (str): identifier of the docker image to use (may have a tag suffix) @@ -3106,7 +3106,7 @@ exporting network interfaces for the RawNetworkInterfaceDriver, e.g.: raw-interface: denied-interfaces: - - eth1 + - 'eth1' It supports: @@ -3169,7 +3169,7 @@ Here is an example environment config: ShellDriver: prompt: 'root@\w+:[^ ]+ ' login_prompt: ' login: ' - username: root + username: 'root' BareboxStrategy: {} In order to use the BareboxStrategy via labgrid as a library and transition to @@ -3216,7 +3216,7 @@ Here is an example environment config: ShellDriver: prompt: 'root@\w+:[^ ]+ ' login_prompt: ' login: ' - username: root + username: 'root' ShellStrategy: {} In order to use the ShellStrategy via labgrid as a library and transition to @@ -3264,7 +3264,7 @@ Here is an example environment config: ShellDriver: prompt: 'root@\w+:[^ ]+ ' login_prompt: ' login: ' - username: root + username: 'root' UBootStrategy: {} In order to use the UBootStrategy via labgrid as a library and transition to @@ -3304,13 +3304,13 @@ Here is an example environment config: main: resources: DockerDaemon: - docker_daemon_url: unix://var/run/docker.sock + docker_daemon_url: 'unix://var/run/docker.sock' drivers: DockerDriver: - image_uri: "rastasheep/ubuntu-sshd:16.04" - container_name: "ubuntu-lg-example" - host_config: {"network_mode":"bridge"} - network_services: [{"port":22,"username":"root","password":"root"}] + image_uri: 'rastasheep/ubuntu-sshd:16.04' + container_name: 'ubuntu-lg-example' + host_config: {'network_mode': 'bridge'} + network_services: [{'port': 22, 'username': 'root', 'password': 'root'}] DockerStrategy: {} In order to use the DockerStrategy via labgrid as a library and transition to @@ -3506,9 +3506,9 @@ As an example: main: resources: RemotePlace: - name: !template $LG_PLACE + name: !template '$LG_PLACE' tools: - qemu_bin: !template "$BASE/bin/qemu-bin" + qemu_bin: !template '$BASE/bin/qemu-bin' would resolve the qemu_bin path relative to the BASE dir of the YAML file and try to use the `RemotePlace`_ with the name set in the LG_PLACE environment @@ -3560,7 +3560,7 @@ exported as `exportername/usb-hub-in-rack12/NetworkSerialPort/USBSerialPort`: usb-hub-in-rack12: USBSerialPort: match: - '@ID_PATH': pci-0000:05:00.0-usb-3-1.3 + '@ID_PATH': 'pci-0000:05:00.0-usb-3-1.3' To export multiple resources of the same class in the same group, you can choose a unique resource name, and then use the ``cls`` parameter to @@ -3575,13 +3575,13 @@ and another `USBSerialPort`_ as usb-hub-in-rack12: console-main: - cls: USBSerialPort + cls: 'USBSerialPort' match: - '@ID_PATH': pci-0000:05:00.0-usb-3-1.3 + '@ID_PATH': 'pci-0000:05:00.0-usb-3-1.3' console-secondary: - cls: USBSerialPort + cls: 'USBSerialPort' match: - '@ID_PATH': pci-0000:05:00.0-usb-3-1.4 + '@ID_PATH': 'pci-0000:05:00.0-usb-3-1.4' Note that you could also split the resources up into distinct groups instead to achieve the same effect: @@ -3591,11 +3591,11 @@ to achieve the same effect: usb-hub-in-rack12-port3: USBSerialPort: match: - '@ID_PATH': pci-0000:05:00.0-usb-3-1.3 + '@ID_PATH': 'pci-0000:05:00.0-usb-3-1.3' usb-hub-in-rack12-port4: USBSerialPort: match: - '@ID_PATH': pci-0000:05:00.0-usb-3-1.4 + '@ID_PATH': 'pci-0000:05:00.0-usb-3-1.4' Templating ~~~~~~~~~~ @@ -3609,14 +3609,21 @@ is used as a preprocessor for the configuration file: # for idx in range(1, 17) {{ 1000 + idx }}: NetworkSerialPort: - {host: rl1, port: {{ 4000 + idx }}} + host: 'rl1' + port: {{ 4000 + idx }} NetworkPowerPort: # if 1 <= idx <= 8 - {model: apc, host: apc1, index: {{ idx }}} + model: 'apc' + host: 'apc1' + index: {{ idx }} # elif 9 <= idx <= 12 - {model: netio, host: netio4, index: {{ idx - 8 }}} + model: 'netio' + host: 'netio4' + index: {{ idx - 8 }} # elif 13 <= idx <= 16 - {model: netio, host: netio5, index: {{ idx - 12 }}} + model: 'netio' + host: 'netio5' + index: {{ idx - 12 }} # endif # endfor From 11e09f4811dd3ae8b485a3ee2dc04c19360f1e56 Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Mon, 12 Feb 2024 18:12:52 +0100 Subject: [PATCH 158/384] doc/configuration: use code and italic annotations more consistent Signed-off-by: Bastian Krause --- doc/configuration.rst | 333 ++++++++++++++++++++++-------------------- 1 file changed, 171 insertions(+), 162 deletions(-) diff --git a/doc/configuration.rst b/doc/configuration.rst index fa355c4db..bd50e4fb8 100644 --- a/doc/configuration.rst +++ b/doc/configuration.rst @@ -32,8 +32,8 @@ enumeration order. port: '/dev/ttyUSB0' speed: 115200 -The example would access the serial port /dev/ttyUSB0 on the local computer with -a baud rate of 115200. +The example would access the serial port ``/dev/ttyUSB0`` on the local computer +with a baud rate of ``115200``. Arguments: - port (str): path to the serial device @@ -55,8 +55,9 @@ or raw tcp. port: 53867 speed: 115200 -The example would access the serial port on computer remote.example.computer via -port 53867 and use a baud rate of 115200 with the RFC2217 protocol. +The example would access the serial port on computer +``remote.example.computer`` via port ``53867`` and use a baud rate of +``115200`` with the RFC2217 protocol. Arguments: - host (str): hostname of the remote host @@ -96,7 +97,7 @@ exporter. timeout: 0.25 Arguments: - - port (str): tty the instrument is connected to, e.g. '/dev/ttyUSB0' + - port (str): tty the instrument is connected to, e.g. ``/dev/ttyUSB0`` - address (int): slave address on the modbus, e.g. 16 - speed (int, default=115200): baud rate of the serial port - timeout (float, default=0.25): timeout in seconds @@ -118,9 +119,9 @@ This allows identification through hot-plugging or rebooting. speed: 115200 The example would search for a USB serial converter with the key -`ID_SERIAL_SHORT` and the value `P-00-00682` and use it with a baud rate -of 115200. -The `ID_SERIAL_SHORT` property is set by the usb_id builtin helper program. +``ID_SERIAL_SHORT`` and the value ``P-00-00682`` and use it with a baud rate +of ``115200``. +The ``ID_SERIAL_SHORT`` property is set by the ``usb_id`` builtin helper program. Arguments: - match (dict): key and value pairs for a udev match, see `udev Matching`_ @@ -144,63 +145,63 @@ A :any:`NetworkPowerPort` describes a remotely switchable power port. index: 0 The example describes port 0 on the remote power switch -`powerswitch.example.computer`, which is a `gude` model. +``powerswitch.example.computer``, which is a ``gude`` model. Arguments: - model (str): model of the power switch - host (str): hostname of the power switch - index (int): number of the port to switch -The `model` property selects one of several `backend implementations +The ``model`` property selects one of several `backend implementations `_. Currently available are: ``apc`` - Controls an APU PDU via SNMP. + Controls an *APU PDU* via SNMP. ``digipower`` - Controls a DigiPower PDU via a simple HTTP API. + Controls a *DigiPower PDU* via a simple HTTP API. ``digitalloggers_http`` - Control a Digital Loggers PDU that use the legacy HTTP API. Note that + Control a *Digital Loggers PDUs* that use the legacy HTTP API. Note that host argument must include the protocol, such as ``http://192.168.0.3`` or ``http://admin:pass@192.168.0.4``. ``eaton`` - Controls Eaton ePDUs via SNMP. + Controls *Eaton ePDUs* via SNMP. ``eg_pms2_network`` - Controls the EG_PMS2_LAN & EG_PMS2_WLAN devices, through simple HTTP POST and - GET requests. The device requires a password for logging into the control - interface, this module deliberately uses the standard password '1' and is - not compatible with a different password. + Controls the *EG_PMS2_LAN* & *EG_PMS2_WLAN* devices, through simple HTTP POST + and GET requests. The device requires a password for logging into the + control interface, this module deliberately uses the standard password ``1`` + and is not compatible with a different password. ``eth008`` - Controls a Robot-Electronics eth008 via a simple HTTP API. + Controls a *Robot-Electronics eth008* via a simple HTTP API. ``gude`` - Controls a Gude PDU via a simple HTTP API. + Controls a *Gude PDU* via a simple HTTP API. ``gude24`` - Controls a Gude Expert Power Control 8008 PDU via a simple HTTP API. + Controls a *Gude Expert Power Control 8008 PDU* via a simple HTTP API. ``gude8031`` - Controls a Gude Expert Power Control 8031 PDU via a simple HTTP API. + Controls a *Gude Expert Power Control 8031 PDU* via a simple HTTP API. ``gude8225`` - Controls a Gude Expert Power Control 8225 PDU via a simple HTTP API. + Controls a *Gude Expert Power Control 8225 PDUs via a simple HTTP API. ``gude8316`` - Controls a Gude Expert Power Control 8316 PDU via a simple HTTP API. + Controls a *Gude Expert Power Control 8316 PDU* via a simple HTTP API. ``netio`` - Controls a NETIO 4-Port PDU via a simple HTTP API. + Controls a *NETIO 4-Port PDU* via a simple HTTP API. ``netio_kshell`` - Controls a NETIO 4C PDU via a Telnet interface. + Controls a *NETIO 4C PDU* via a Telnet interface. ``raritan`` - Controls Raritan PDUs via SNMP. + Controls *Raritan PDUs* via SNMP. ``rest`` This is a generic backend for PDU implementations which can be controlled via @@ -210,17 +211,17 @@ Currently available are: for details. ``sentry`` - Controls a Sentry PDU via SNMP using Sentry3-MIB. - It was tested on CW-24VDD and 4805-XLS-16. + Controls a *Sentry PDU* via SNMP using Sentry3-MIB. + It was tested on *CW-24VDD* and *4805-XLS-16*. ``shelly_gen1`` - Controls relays of Shelly devices using the Gen 1 Device API. + Controls relays of *Shelly* devices using the Gen 1 Device API. See the `docstring in the module `__ for details. ``siglent`` - Controls Siglent SPD3000X series modules via the `vxi11 Python module + Controls *Siglent SPD3000X* series modules via the `vxi11 Python module `_. ``simplerest`` @@ -231,7 +232,7 @@ Currently available are: for details. ``tplink`` - Controls TP-Link power strips via `python-kasa + Controls *TP-Link power strips* via `python-kasa `_. ``poe_mib`` @@ -254,8 +255,8 @@ PDUDaemon configuration file needs to be specified. pdu: 'apc-snmpv3-noauth' index: 1 -The example describes port 1 on the PDU configured as `apc-snmpv3-noauth`, with -PDUDaemon running on the host `pduserver`. +The example describes port ``1`` on the PDU configured as +``apc-snmpv3-noauth``, with PDUDaemon running on the host ``pduserver``. Arguments: - host (str): name of the host running the PDUDaemon @@ -267,7 +268,8 @@ Used by: YKUSHPowerPort ++++++++++++++ -A :any:`YKUSHPowerPort` describes a YEPKIT YKUSH USB (HID) switchable USB hub. +A :any:`YKUSHPowerPort` describes a *YEPKIT YKUSH* USB (HID) switchable USB +hub. .. code-block:: yaml @@ -276,8 +278,8 @@ A :any:`YKUSHPowerPort` describes a YEPKIT YKUSH USB (HID) switchable USB hub. index: 1 The example describes port 1 on the YKUSH USB hub with the -serial "YK12345". -(use "ykushcmd -l" to get your serial...) +serial ``YK12345``. +Use ``ykushcmd -l`` to get your serial number. Arguments: - serial (str): serial number of the YKUSH hub @@ -304,7 +306,7 @@ A :any:`USBPowerPort` describes a generic switchable USB hub as supported by index: 1 The example describes port 1 on the hub with the ID_PATH -"pci-0000:00:14.0-usb-0:2:1.0". +``pci-0000:00:14.0-usb-0:2:1.0``. (use ``udevadm info /sys/bus/usb/devices/...`` to find the ID_PATH value) Arguments: @@ -323,7 +325,7 @@ Used by: SiSPMPowerPort ++++++++++++++ -A :any:`SiSPMPowerPort` describes a GEMBIRD SiS-PM as supported by +A :any:`SiSPMPowerPort` describes a *GEMBIRD SiS-PM* as supported by `sispmctl `_. .. code-block:: yaml @@ -334,7 +336,7 @@ A :any:`SiSPMPowerPort` describes a GEMBIRD SiS-PM as supported by index: 1 The example describes port 1 on the hub with the ID_PATH -"platform-1c1a400.usb-usb-0:2". +``platform-1c1a400.usb-usb-0:2``. Arguments: - index (int): number of the port to switch @@ -346,7 +348,7 @@ Used by: TasmotaPowerPort ++++++++++++++++ A :any:`TasmotaPowerPort` resource describes a switchable `Tasmota -`_ power outlet accessed over MQTT. +`_ power outlet accessed over *MQTT*. .. code-block:: yaml @@ -356,8 +358,8 @@ A :any:`TasmotaPowerPort` resource describes a switchable `Tasmota power_topic: 'cmnd/tasmota_575A2B/POWER' avail_topic: 'tele/tasmota_575A2B/LWT' -The example uses a mosquitto server at "this.is.an.example.host.com" and has the -topics setup for a tasmota power port that has the ID 575A2B. +The example uses a *Mosquitto* server at ``this.is.an.example.host.com`` and +has the topics setup for a Tasmota power port that has the ID ``575A2B``. Arguments: - host (str): hostname of the MQTT server @@ -375,7 +377,7 @@ Digital Outputs ModbusTCPCoil +++++++++++++ -A :any:`ModbusTCPCoil` describes a coil accessible via ModbusTCP. +A :any:`ModbusTCPCoil` describes a coil accessible via *Modbus TCP*. .. code-block:: yaml @@ -383,12 +385,12 @@ A :any:`ModbusTCPCoil` describes a coil accessible via ModbusTCP. host: '192.168.23.42' coil: 1 -The example describes the coil with the address 1 on the ModbusTCP device -`192.168.23.42`. +The example describes the coil with the address ``1`` on the Modbus TCP device +``192.168.23.42``. Arguments: - - host (str): hostname of the Modbus TCP server e.g. "192.168.23.42:502" - - coil (int): index of the coil e.g. 3 + - host (str): hostname of the Modbus TCP server e.g. ``192.168.23.42:502`` + - coil (int): index of the coil, e.g. ``3`` - invert (bool, default=False): whether the logic level is inverted (active-low) - write_multiple_coils (bool, default=False): whether to perform write @@ -399,7 +401,7 @@ Used by: DeditecRelais8 ++++++++++++++ -A :any:`DeditecRelais8` describes a Deditec USB GPO module with 8 relays. +A :any:`DeditecRelais8` describes a *Deditec USB GPO module* with 8 relays. .. code-block:: yaml @@ -420,7 +422,7 @@ Used by: OneWirePIO ++++++++++ -A :any:`OneWirePIO` describes a onewire programmable I/O pin. +A :any:`OneWirePIO` describes a *1-Wire* programmable I/O pin. .. code-block:: yaml @@ -429,11 +431,11 @@ A :any:`OneWirePIO` describes a onewire programmable I/O pin. path: '/29.7D6913000000/PIO.0' invert: false -The example describes a `PIO.0` at device address `29.7D6913000000` via the onewire -server on `example.computer`. +The example describes a ``PIO.0`` at device address ``29.7D6913000000`` via the +1-Wire server on ``example.computer``. Arguments: - - host (str): hostname of the remote system running the onewire server + - host (str): hostname of the remote system running the 1-Wire server - path (str): path on the server to the programmable I/O pin - invert (bool, default=False): whether the logic level is inverted (active-low) @@ -453,8 +455,8 @@ An :any:`LXAIOBusPIO` resource describes a single PIO pin on an LXAIOBusNode. pin: 'OUT0' invert: false -The example uses an lxa-iobus-server running on localhost:8080, with node -IOMux-00000003 and pin OUT0. +The example uses an lxa-iobus-server running on ``localhost:8080``, with node +``IOMux-00000003`` and pin ``OUT0``. Arguments: - host (str): hostname with port of the lxa-io-bus server @@ -473,7 +475,7 @@ HIDRelay ++++++++ An :any:`HIDRelay` resource describes a single output of a HID protocol based USB relays. -It currently supports the widely used "dcttech USBRelay". +It currently supports the widely used *dcttech USBRelay*. .. code-block:: yaml @@ -538,8 +540,8 @@ A :any:`NetworkService` describes a remote SSH connection. address: 'example.computer' username: 'root' -The example describes a remote SSH connection to the computer `example.computer` -with the username `root`. +The example describes a remote SSH connection to the computer +``example.computer`` with the username ``root``. Set the optional password password property to make SSH login with a password instead of the key file. @@ -582,12 +584,13 @@ A :any:`NetworkUSBMassStorage` resource describes a USB memory stick or similar device available on a remote computer. The NetworkUSBMassStorage can be used in test cases by calling the -`write_image()`, and `get_size()` functions. +``write_image()``, and ``get_size()`` functions. SigrokDevice ~~~~~~~~~~~~ -A :any:`SigrokDevice` resource describes a sigrok device. To select a specific -device from all connected supported devices use the `SigrokUSBDevice`_. +A :any:`SigrokDevice` resource describes a *Sigrok* device. To select a +specific device from all connected supported devices use the +`SigrokUSBDevice`_. .. code-block:: yaml @@ -623,7 +626,8 @@ Used by: MXSUSBLoader ~~~~~~~~~~~~ -An :any:`MXSUSBLoader` resource describes a USB device in the mxs loader state. +An :any:`MXSUSBLoader` resource describes a USB device in the *MXS loader +state*. .. code-block:: yaml @@ -641,8 +645,8 @@ Used by: RKUSBLoader ~~~~~~~~~~~ -An :any:`RKUSBLoader` resource describes a USB device in the rockchip loader -state. +An :any:`RKUSBLoader` resource describes a USB device in the *Rockchip loader +state*. .. code-block:: yaml @@ -673,8 +677,8 @@ computer. AndroidUSBFastboot ~~~~~~~~~~~~~~~~~~ -An :any:`AndroidUSBFastboot` resource describes a USB device in the fastboot -state. +An :any:`AndroidUSBFastboot` resource describes a USB device in the *Fastboot +state*. Previously, this resource was named AndroidFastboot and this name still supported for backwards compatibility. @@ -696,8 +700,8 @@ Used by: AndroidNetFastboot ~~~~~~~~~~~~~~~~~~ -An :any:`AndroidNetFastboot` resource describes a network device in fastboot -state. +An :any:`AndroidNetFastboot` resource describes a network device in *Fastboot +state*. .. code-block:: yaml @@ -791,7 +795,7 @@ Used by: USBDebugger ~~~~~~~~~~~ An :any:`USBDebugger` resource describes a JTAG USB adapter (for example an -FTDI FT2232H). +*FTDI FT2232H*). .. code-block:: yaml @@ -825,7 +829,7 @@ Used by: SigrokUSBDevice ~~~~~~~~~~~~~~~ -A :any:`SigrokUSBDevice` resource describes a sigrok USB device. +A :any:`SigrokUSBDevice` resource describes a *Sigrok* USB device. .. code-block:: yaml @@ -846,13 +850,13 @@ Used by: NetworkSigrokUSBDevice ~~~~~~~~~~~~~~~~~~~~~~ -A :any:`NetworkSigrokUSBDevice` resource describes a sigrok USB device +A :any:`NetworkSigrokUSBDevice` resource describes a *Sigrok* USB device connected to a host which is exported over the network. The `SigrokDriver`_ will access it via SSH. SigrokUSBSerialDevice ~~~~~~~~~~~~~~~~~~~~~ -A :any:`SigrokUSBSerialDevice` resource describes a sigrok device which +A :any:`SigrokUSBSerialDevice` resource describes a *Sigrok* device which communicates over a USB serial port instead of being a USB device itself (see `SigrokUSBDevice`_ for that case). @@ -865,8 +869,8 @@ communicates over a USB serial port instead of being a USB device itself (see Arguments: - driver (str): name of the sigrok driver to use - - channels (str): optional, channel mapping as described in the sigrok-cli - man page + - channels (str): optional, channel mapping as described in the + ``sigrok-cli`` man page - match (dict): key and value pairs for a udev match, see `udev Matching`_ Used by: @@ -899,7 +903,7 @@ on a remote computer. LXAUSBMux ~~~~~~~~~ -A :any:`LXAUSBMux` resource describes a Linux Automation GmbH USB-Mux device. +A :any:`LXAUSBMux` resource describes a *Linux Automation GmbH USB-Mux* device. .. code-block:: yaml @@ -992,7 +996,7 @@ by an ALSA kernel driver. Arguments: - index (int, default=0): ALSA PCM device number (as in - `hw:CARD=,DEV=`) + ``hw:CARD=,DEV=``) - match (dict): key and value pairs for a udev match, see `udev Matching`_ Used by: @@ -1005,9 +1009,9 @@ available on a remote computer. USBTMC ~~~~~~ -A :any:`USBTMC` resource describes an oscilloscope connected via the USB TMC -protocol. -The low-level communication is handled by the ``usbtmc`` kernel driver. +A :any:`USBTMC` resource describes an oscilloscope connected via the *USB TMC +protocol*. +The low-level communication is handled by the "usbtmc" kernel driver. .. code-block:: yaml @@ -1046,12 +1050,13 @@ configured in: flashrom: '/usr/sbin/flashrom' Arguments: - - programmer (str): programmer device as described in `-p, --programmer` in - `man 8 flashrom` + - programmer (str): programmer device as described in ``-p, --programmer`` in + ``man 8 flashrom`` -The resource must configure which programmer to use and the parameters to the programmer. -The programmer parameter is passed directly to the flashrom bin hence man(8) flashrom -can be used for reference. +The resource must configure which programmer to use and the parameters to the +programmer. +The programmer parameter is passed directly to the flashrom bin hence +``man 8 flashrom`` can be used for reference. Below an example where the local spidev is used. .. code-block:: yaml @@ -1109,9 +1114,9 @@ configured via: dpcmd: '/usr/sbin/dpcmd' Arguments: - - vcc (str): '3.5V', '2.5V' or '1.8V'. + - vcc (str): ``3.5V``, ``2.5V`` or ``1.8V``. -For instance, to flash using 3.5V vcc: +For instance, to flash using 3.5 V VCC: .. code-block:: yaml @@ -1128,8 +1133,9 @@ remote computer. XenaManager ~~~~~~~~~~~ -A :any:`XenaManager` resource describes a Xena Manager instance which is the -instance the `XenaDriver`_ must connect to in order to configure a Xena chassis. +A :any:`XenaManager` resource describes a *Xena Manager* instance which is the +instance the `XenaDriver`_ must connect to in order to configure a Xena +chassis. .. code-block:: yaml @@ -1294,7 +1300,7 @@ place. RemotePlace: name: 'example-place' -The example describes the remote place `example-place`. It will connect to the +The example describes the remote place ``example-place``. It will connect to the labgrid remote coordinator, wait until the resources become available and expose them to the internal environment. @@ -1306,7 +1312,7 @@ Used by: DockerDaemon ~~~~~~~~~~~~ -A :any:`DockerDaemon` describes where to contact a docker daemon process. +A :any:`DockerDaemon` describes where to contact a *docker daemon* process. DockerDaemon also participates in managing `NetworkService`_ instances created through interaction with that daemon. @@ -1397,7 +1403,7 @@ Matching a USB Serial Converter on a Hub Port +++++++++++++++++++++++++++++++++++++++++++++ This will match any USB serial converter connected below the hub port 1.2.5.5 on bus 1. -The `ID_PATH` value corresponds to the hierarchy of buses and ports as shown +The ``ID_PATH`` value corresponds to the hierarchy of buses and ports as shown with ``udevadm info /dev/ttyUSB0``. .. code-block:: yaml @@ -1420,13 +1426,13 @@ don't use a parent match. AndroidUSBFastboot: match: - ID_PATH: pci-0000:05:00.0-usb-0:1.2.3 + ID_PATH: 'pci-0000:05:00.0-usb-0:1.2.3' Matching a Specific UART in a Dual-Port Adapter +++++++++++++++++++++++++++++++++++++++++++++++ On this board, the serial console is connected to the second port of an on-board dual-port USB-UART. -The board itself is connected to the bus 3 and port path 10.2.2.2. +The board itself is connected to the bus 3 and port path ``10.2.2.2``. The correct value can be shown by running ``udevadm info /dev/ttyUSB9`` in our case: @@ -1620,11 +1626,11 @@ SSHDriver ~~~~~~~~~ A :any:`SSHDriver` requires a `NetworkService`_ resource and allows the execution of commands and file upload via network. -It uses SSH's `ServerAliveInterval` option to detect failed connections. +It uses SSH's ``ServerAliveInterval`` option to detect failed connections. If a shared SSH connection to the target is already open, it will reuse it when running commands. -In that case, `ServerAliveInterval` should be set outside of labgrid, as it +In that case, ``ServerAliveInterval`` should be set outside of labgrid, as it cannot be enabled for an existing connection. Binds to: @@ -1643,14 +1649,14 @@ Implements: Arguments: - keyfile (str): optional, filename of private key to login into the remote system (has precedence over `NetworkService`_'s password) - - stderr_merge (bool, default=False): set to True to make `run()` return stderr merged with + - stderr_merge (bool, default=False): set to True to make ``run()`` return stderr merged with stdout, and an empty list as second element. - connection_timeout (float, default=30.0): timeout when trying to establish connection to target. - - explicit_sftp_mode (bool, default=False): if set to True, `put()`, `get()`, and `scp()` will - explicitly use the SFTP protocol for file transfers instead of scp's default protocol - - explicit_scp_mode (bool, default=False): if set to True, `put()`, `get()`, and `scp()` will - explicitly use the SCP protocol for file transfers instead of scp's default protocol + - explicit_sftp_mode (bool, default=False): if set to True, ``put()``, ``get()``, and ``scp()`` + will explicitly use the SFTP protocol for file transfers instead of scp's default protocol + - explicit_scp_mode (bool, default=False): if set to True, ``put()``, ``get()``, and ``scp()`` + will explicitly use the SCP protocol for file transfers instead of scp's default protocol - username (str, default=username from `NetworkService`_): username used by SSH - password (str, default=password from `NetworkService`_): password used by SSH @@ -1679,7 +1685,7 @@ Arguments: - prompt (regex, default=""): U-Boot prompt to match - autoboot (regex, default="stop autoboot"): autoboot message to match - password (str): optional, U-Boot unlock password - - interrupt (str, default="\\n"): string to interrupt autoboot (use "\\x03" for CTRL-C) + - interrupt (str, default="\\n"): string to interrupt autoboot (use ``\\x03`` for CTRL-C) - init_commands (tuple): optional, tuple of commands to execute after matching the prompt - password_prompt (str, default="enter Password: "): regex to match the U-Boot password prompt @@ -1691,7 +1697,7 @@ Arguments: SmallUBootDriver ~~~~~~~~~~~~~~~~ -A :any:`SmallUBootDriver` interfaces with stripped-down U-Boot variants that +A :any:`SmallUBootDriver` interfaces with stripped-down *U-Boot* variants that are sometimes used in cheap consumer electronics. SmallUBootDriver is meant as a driver for U-Boot with only little functionality @@ -1701,7 +1707,7 @@ Especially is copes with the following limitations: - The U-Boot does not have a real password-prompt but can be activated by entering a "secret" after a message was displayed. - The command line does not have a built-in echo command. - Thus this driver uses 'Unknown Command' messages as marker before and after + Thus this driver uses "Unknown Command" messages as marker before and after the output of a command. - Since there is no echo we cannot return the exit code of the command. Commands will always return 0 unless the command was not found. @@ -1793,7 +1799,7 @@ Arguments: AndroidFastbootDriver ~~~~~~~~~~~~~~~~~~~~~ An :any:`AndroidFastbootDriver` allows the upload of images to a device in the -USB or network fastboot state. +USB or network *Fastboot state*. Binds to: fastboot: @@ -1842,7 +1848,7 @@ Arguments: OpenOCDDriver ~~~~~~~~~~~~~ -An :any:`OpenOCDDriver` controls OpenOCD to bootstrap a target with a +An :any:`OpenOCDDriver` controls *OpenOCD* to bootstrap a target with a bootloader. Note that OpenOCD supports specifying USB paths since @@ -1899,7 +1905,7 @@ Implements: Arguments: - image (str): optional, filename of image to write into QSPI flash -The driver can be used in test cases by calling the `flash` function. An +The driver can be used in test cases by calling its ``flash()`` method. An example strategy is included in labgrid. ManualPowerDriver @@ -2269,7 +2275,7 @@ Arguments: DeditecRelaisDriver ~~~~~~~~~~~~~~~~~~~ -A :any:`DeditecRelaisDriver` controls a Deditec relay resource. +A :any:`DeditecRelaisDriver` controls a *Deditec* relay resource. It can set and get the current state of the resource. Binds to: @@ -2289,8 +2295,8 @@ Arguments: MXSUSBDriver ~~~~~~~~~~~~ -An :any:`MXSUSBDriver` is used to upload an image into a device in the mxs USB -loader state. +An :any:`MXSUSBDriver` is used to upload an image into a device in the *MXS USB +loader state*. This is useful to bootstrap a bootloader onto a device. Binds to: @@ -2318,10 +2324,10 @@ Arguments: IMXUSBDriver ~~~~~~~~~~~~ -A :any:`IMXUSBDriver` is used to upload an image into a device in the imx USB -loader state. +An :any:`IMXUSBDriver` is used to upload an image into a device in the *i.MX +USB loader state*. This is useful to bootstrap a bootloader onto a device. -This driver uses the imx-usb-loader tool from barebox. +This driver uses the ``imx-usb-loader`` tool from barebox. Binds to: loader: @@ -2350,10 +2356,11 @@ Arguments: BDIMXUSBDriver ~~~~~~~~~~~~~~ -The :any:`BDIMXUSBDriver` is used to upload bootloader images into an i.MX -device in the USB SDP mode. -This driver uses the imx_usb tool by Boundary Devices. -Compared to the IMXUSBLoader, it supports two-stage upload of U-Boot images. +The :any:`BDIMXUSBDriver` is used to upload bootloader images into an *i.MX +device* in the *USB SDP mode*. +This driver uses the ``imx_usb`` tool by Boundary Devices. +Compared to the ``imx-usb-loader``, it supports two-stage upload of U-Boot +images. The images paths need to be specified from code instead of in the YAML environment, as the correct image depends on the system state. @@ -2377,9 +2384,8 @@ Arguments: RKUSBDriver ~~~~~~~~~~~ -A :any:`RKUSBDriver` is used to upload an image into a device in the rockchip -USB loader state. -This is useful to bootstrap a bootloader onto a device. +A :any:`RKUSBDriver` is used to upload an image into a device in the *Rockchip +USB loader state*. This is useful to bootstrap a bootloader onto a device. Binds to: loader: @@ -2410,8 +2416,8 @@ Arguments: UUUDriver ~~~~~~~~~ -A :any:`UUUDriver` is used to upload an image into a device in the NXP USB -loader state. +A :any:`UUUDriver` is used to upload an image into a device in the *NXP USB +loader state*. This is useful to bootstrap a bootloader onto a device. Binds to: @@ -2439,7 +2445,7 @@ Implements: Arguments: - image (str): optional, key in :ref:`images ` containing the path of an image to bootstrap onto the target - - script (str): run built-in script with "uuu -b", called with image as arg0 + - script (str): run built-in script with ``uuu -b``, called with image as arg0 USBStorageDriver ~~~~~~~~~~~~~~~~ @@ -2518,7 +2524,7 @@ Implements: Arguments: - None -The driver can be used in test cases by calling the `stage()` function, which +The driver can be used in test cases by calling its ``stage()`` method, which returns the path to be used by the target. NFSProviderDriver @@ -2540,7 +2546,7 @@ Implements: Arguments: - None -The driver can be used in test cases by calling the `stage()` function, which +The driver can be used in test cases by calling its ``stage()`` method, which returns an NFSFile object with ``host``, ``export`` and ``relative_file_path`` attributes. @@ -2629,8 +2635,8 @@ Implements: Arguments: - None -The driver can be used in test cases by calling the `capture`, `stop` and -`analyze` functions. +The driver can be used in test cases by calling its ``capture()``, ``stop()`` +and ``analyze()`` methods. SigrokPowerDriver ~~~~~~~~~~~~~~~~~ @@ -2661,9 +2667,10 @@ Arguments: SigrokDmmDriver ~~~~~~~~~~~~~~~ The :any:`SigrokDmmDriver` uses a `SigrokDevice`_ resource to record samples -from a digital multimeter (DMM) and provides them during test runs. +from a *digital multimeter* (DMM) and provides them during test runs. -It is known to work with Unit-T `UT61B` and `UT61C` devices but should also work with other DMMs supported by *sigrok*. +It is known to work with *Unit-T UT61B* and *UT61C* devices but should also +work with other DMMs supported by *Sigrok*. Binds to: sigrok: @@ -2678,15 +2685,15 @@ Implements: Arguments: - None -Sampling can be started calling `capture(samples, timeout=None)`. +Sampling can be started calling ``capture(samples, timeout=None)``. It sets up sampling and returns immediately. -The default timeout has been chosen to work with Unit-T `UT61B`. +The default timeout has been chosen to work with *Unit-T UT61B*. Other devices may require a different timeout setting. -Samples can be obtained using `stop()`. -`stop()` will block until either *sigrok* terminates or `timeout` is reached. -This method returns a `(unit, samples)` tuple: -`unit` is the physical unit reported by the DMM; +Samples can be obtained using ``stop()``. +``stop()`` will block until either *sigrok* terminates or *timeout* is reached. +This method returns a ``(unit, samples)`` tuple: +``unit`` is the physical unit reported by the DMM; samples is an iterable of samples. This driver relies on buffering of the subprocess call. @@ -2728,7 +2735,7 @@ Implements: Arguments: - None -The driver can be used in test cases by calling the `set_links()` function with +The driver can be used in test cases by calling its ``set_links()`` method with a list containing one or more of "dut-device", "host-dut" and "host-device". Not all combinations can be configured at the same time. @@ -2749,8 +2756,8 @@ Implements: Arguments: - None -The driver can be used in test cases by calling the `set_mode()` function with -argument being `dut`, `host`, `off`, or `client`. +The driver can be used in test cases by calling its ``set_mode()`` method with +argument being "dut", "host", "off", or "client". USBVideoDriver ~~~~~~~~~~~~~~ @@ -2770,12 +2777,12 @@ Implements: Arguments: - None -Although the driver can be used from Python code by calling the `stream()` +Although the driver can be used from Python code by calling the ``stream()`` method, it is currently mainly useful for the ``video`` subcommand of ``labgrid-client``. It supports the `Logitech HD Pro Webcam C920` with the USB ID 046d:082d and a few others. -More cameras can be added to `get_qualities()` and `get_pipeline()` in +More cameras can be added to ``get_qualities()`` and ``get_pipeline()`` in ``labgrid/driver/usbvideodriver.py``. Appropriate configuration parameters can be determined by using the GStreamer ``gst-device-monitor-1.0`` command line utility. @@ -2804,8 +2811,8 @@ Arguments: USBTMCDriver ~~~~~~~~~~~~ -The :any:`USBTMCDriver` is used to control a oscilloscope via the USB TMC -protocol. +The :any:`USBTMCDriver` is used to control a oscilloscope via the *USB TMC +protocol*. Binds to: tmc: @@ -2822,7 +2829,7 @@ Currently, it can be used by the ``labgrid-client`` ``tmc`` subcommands to show (and save) a screenshot, to show per channel measurements and to execute raw TMC commands. It only supports the `Keysight DSO-X 2000` series (with the USB ID 0957:1798), -but more devices can be added by extending `on_activate()` in +but more devices can be added by extending ``on_activate()`` in ``labgrid/driver/usbtmcdriver.py`` and writing a corresponding backend in ``labgrid/driver/usbtmc/``. @@ -2916,7 +2923,7 @@ Implements: Arguments: - None -Although the driver can be used from Python code by calling the `stream()` +Although the driver can be used from Python code by calling the ``stream()`` method, it is currently mainly useful for the ``video`` subcommand of ``labgrid-client``. @@ -2928,8 +2935,8 @@ Key Description ========== ========================================================= Properties of these keys can be selected using the Python format string syntax, -e.g. ``{device.devnode}`` to select the device node path of -:any:`USBFlashableDevice` +e.g. ``{device.devnode}`` to select the device node path of a +:any:`USBFlashableDevice`. DediprogFlashDriver ~~~~~~~~~~~~~~~~~~~ @@ -2955,8 +2962,9 @@ Arguments: of an image to flash onto the target The DediprogFlashDriver allows using DediprogFlasher dpcmd to flash or erase SPI -devices. It is assumed that the device flashing is an exporter wired, via -DediprogFlasher SF100 for instance, to the device being flashed. +devices. It is assumed that the device flashing is an exporter wired, via a +*Dediprog SF100 SPI NOR Flash Programmer* for instance, to the device being +flashed. XenaDriver ~~~~~~~~~~ @@ -3071,7 +3079,7 @@ Currently basic wired and wireless configuration options have been tested. To use it, `PyGObject `_ must be installed (on the same system as the network interface). -For Debian, the necessary packages are `python3-gi` and `gir1.2-nm-1.0`. +For Debian, the necessary packages are ``python3-gi`` and ``gir1.2-nm-1.0``. It supports: @@ -3173,7 +3181,7 @@ Here is an example environment config: BareboxStrategy: {} In order to use the BareboxStrategy via labgrid as a library and transition to -the ``shell`` state: +the "shell" state: .. testsetup:: barebox-strategy @@ -3220,7 +3228,7 @@ Here is an example environment config: ShellStrategy: {} In order to use the ShellStrategy via labgrid as a library and transition to -the ``shell`` state: +the "shell" state: .. testsetup:: shell-strategy @@ -3268,7 +3276,7 @@ Here is an example environment config: UBootStrategy: {} In order to use the UBootStrategy via labgrid as a library and transition to -the ``shell`` state: +the "shell" state: .. testsetup:: uboot-strategy @@ -3314,7 +3322,7 @@ Here is an example environment config: DockerStrategy: {} In order to use the DockerStrategy via labgrid as a library and transition to -the ``accessible`` state: +the "accessible" state: .. testsetup:: docker-strategy @@ -3489,14 +3497,15 @@ To bind the correct driver to the correct resource, explicit ``name`` and bindings: port: 'bar' -The property name for the binding (e.g. `port` in the example above) is +The property name for the binding (e.g. ``port`` in the example above) is documented for each individual driver in this chapter. The YAML configuration file also supports templating for some substitutions, these are: -- LG_* variables, are replaced with their respective LG_* environment variable -- BASE is substituted with the base directory of the YAML file. +- ``LG_*`` variables, are replaced with their respective ``LG_*`` environment + variable +- ``BASE`` is substituted with the base directory of the YAML file. As an example: @@ -3510,9 +3519,9 @@ As an example: tools: qemu_bin: !template '$BASE/bin/qemu-bin' -would resolve the qemu_bin path relative to the BASE dir of the YAML file and -try to use the `RemotePlace`_ with the name set in the LG_PLACE environment -variable. +would resolve the ``qemu_bin`` path relative to the ``BASE`` dir of the YAML +file and try to use the `RemotePlace`_ with the name set in the ``LG_PLACE`` +environment variable. See the :ref:`labgrid-device-config` man page for documentation on the top-level ``options``, ``images``, ``tools``, and ``examples`` keys in the @@ -3551,9 +3560,9 @@ By default, the class name is inferred from the resource name, and `` will be passed to its constructor. For USB resources, you will most likely want to use :ref:`udev-matching` here. -As a simple example, here is one group called *usb-hub-in-rack12* containing +As a simple example, here is one group called ``usb-hub-in-rack12`` containing a single `USBSerialPort`_ resource (using udev matching), which will be -exported as `exportername/usb-hub-in-rack12/NetworkSerialPort/USBSerialPort`: +exported as ``exportername/usb-hub-in-rack12/NetworkSerialPort/USBSerialPort``: .. code-block:: yaml @@ -3567,9 +3576,9 @@ you can choose a unique resource name, and then use the ``cls`` parameter to specify the class name instead (which will not be passed as a parameter to the class constructor). In this next example we will export one `USBSerialPort`_ as -`exportername/usb-hub-in-rack12/NetworkSerialPort/console-main`, +``exportername/usb-hub-in-rack12/NetworkSerialPort/console-main``, and another `USBSerialPort`_ as -`exportername/usb-hub-in-rack12/NetworkSerialPort/console-secondary`: +``exportername/usb-hub-in-rack12/NetworkSerialPort/console-secondary``: .. code-block:: yaml From 2f88a5af093b810d6a20d3ca0843be37a85db78d Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Mon, 12 Feb 2024 18:13:40 +0100 Subject: [PATCH 159/384] doc/configuration: fix spelling inconsistencies, improve wording Signed-off-by: Bastian Krause --- doc/configuration.rst | 75 ++++++++++++++++++++++--------------------- 1 file changed, 38 insertions(+), 37 deletions(-) diff --git a/doc/configuration.rst b/doc/configuration.rst index bd50e4fb8..850b7ca92 100644 --- a/doc/configuration.rst +++ b/doc/configuration.rst @@ -46,7 +46,7 @@ NetworkSerialPort +++++++++++++++++ A :any:`NetworkSerialPort` describes a serial port which is exported over the network, usually using `RFC2217 `_ -or raw tcp. +or raw TCP. .. code-block:: yaml @@ -157,13 +157,13 @@ The ``model`` property selects one of several `backend implementations Currently available are: ``apc`` - Controls an *APU PDU* via SNMP. + Controls *APU PDUs* via SNMP. ``digipower`` - Controls a *DigiPower PDU* via a simple HTTP API. + Controls *DigiPower PDUs* via a simple HTTP API. ``digitalloggers_http`` - Control a *Digital Loggers PDUs* that use the legacy HTTP API. Note that + Controls *Digital Loggers PDUs* that use the legacy HTTP API. Note that host argument must include the protocol, such as ``http://192.168.0.3`` or ``http://admin:pass@192.168.0.4``. @@ -171,34 +171,34 @@ Currently available are: Controls *Eaton ePDUs* via SNMP. ``eg_pms2_network`` - Controls the *EG_PMS2_LAN* & *EG_PMS2_WLAN* devices, through simple HTTP POST + Controls *EG_PMS2_LAN* and *EG_PMS2_WLAN* devices, through simple HTTP POST and GET requests. The device requires a password for logging into the control interface, this module deliberately uses the standard password ``1`` and is not compatible with a different password. ``eth008`` - Controls a *Robot-Electronics eth008* via a simple HTTP API. + Controls *Robot-Electronics eth008* via a simple HTTP API. ``gude`` - Controls a *Gude PDU* via a simple HTTP API. + Controls *Gude PDUs* via a simple HTTP API. ``gude24`` - Controls a *Gude Expert Power Control 8008 PDU* via a simple HTTP API. + Controls *Gude Expert Power Control 8008 PDUs* via a simple HTTP API. ``gude8031`` - Controls a *Gude Expert Power Control 8031 PDU* via a simple HTTP API. + Controls *Gude Expert Power Control 8031 PDUs* via a simple HTTP API. ``gude8225`` - Controls a *Gude Expert Power Control 8225 PDUs via a simple HTTP API. + Controls *Gude Expert Power Control 8225 PDUs* via a simple HTTP API. ``gude8316`` - Controls a *Gude Expert Power Control 8316 PDU* via a simple HTTP API. + Controls *Gude Expert Power Control 8316 PDUs* via a simple HTTP API. ``netio`` - Controls a *NETIO 4-Port PDU* via a simple HTTP API. + Controls *NETIO 4-Port PDUs* via a simple HTTP API. ``netio_kshell`` - Controls a *NETIO 4C PDU* via a Telnet interface. + Controls *NETIO 4C PDUs* via a Telnet interface. ``raritan`` Controls *Raritan PDUs* via SNMP. @@ -211,7 +211,7 @@ Currently available are: for details. ``sentry`` - Controls a *Sentry PDU* via SNMP using Sentry3-MIB. + Controls *Sentry PDUs* via SNMP using Sentry3-MIB. It was tested on *CW-24VDD* and *4805-XLS-16*. ``shelly_gen1`` @@ -317,7 +317,7 @@ Used by: - `USBPowerDriver`_ .. note:: - Labgrid requires that the interface is contained in the ID_PATH. + labgrid requires that the interface is contained in the ID_PATH. This usually means that the ID_PATH should end with ``:1.0``. Only this first interface is registered with the ``hub`` driver labgrid is looking for, paths without the interface will fail to match since they use @@ -473,7 +473,7 @@ A :any:`NetworkLXAIOBusPIO` describes an `LXAIOBusPIO`_ exported over the networ HIDRelay ++++++++ -An :any:`HIDRelay` resource describes a single output of a HID protocol based +An :any:`HIDRelay` resource describes a single output of an HID protocol based USB relays. It currently supports the widely used *dcttech USBRelay*. @@ -495,8 +495,8 @@ Used by: HttpDigitalOutput +++++++++++++++++ -A :any:`HttpDigitalOutput` resource describes a generic digital output that can be -controlled via HTTP. +An :any:`HttpDigitalOutput` resource describes a generic digital output that +can be controlled via HTTP. .. code-block:: yaml @@ -794,7 +794,7 @@ Used by: USBDebugger ~~~~~~~~~~~ -An :any:`USBDebugger` resource describes a JTAG USB adapter (for example an +A :any:`USBDebugger` resource describes a JTAG USB adapter (for example an *FTDI FT2232H*). .. code-block:: yaml @@ -903,7 +903,7 @@ on a remote computer. LXAUSBMux ~~~~~~~~~ -A :any:`LXAUSBMux` resource describes a *Linux Automation GmbH USB-Mux* device. +An :any:`LXAUSBMux` resource describes a *Linux Automation GmbH USB-Mux* device. .. code-block:: yaml @@ -919,14 +919,13 @@ Used by: NetworkLXAUSBMux ~~~~~~~~~~~~~~~~ -A :any:`NetworkLXAUSBMux` resource describes a `LXAUSBMux`_ available on a +A :any:`NetworkLXAUSBMux` resource describes an `LXAUSBMux`_ available on a remote computer. USBSDWireDevice ~~~~~~~~~~~~~~~ A :any:`USBSDWireDevice` resource describes a Tizen -`SD Wire device `_ -device. +`SD Wire device `_. .. code-block:: yaml @@ -949,7 +948,7 @@ on a remote computer. USBVideo ~~~~~~~~ A :any:`USBVideo` resource describes a USB video camera which is supported by a -Video4Linux2 kernel driver. +Video4Linux2 (v4l2) kernel driver. .. code-block:: yaml @@ -1161,7 +1160,7 @@ Such device could be a signal generator. url: '192.168.110.11' Arguments: - - type (str): device resource type following the pyVISA resource syntax, e.g. + - type (str): device resource type following the PyVISA resource syntax, e.g. ASRL, TCPIP... - url (str): device identifier on selected resource, e.g. for TCPIP resource @@ -1171,7 +1170,7 @@ Used by: HTTPVideoStream ~~~~~~~~~~~~~~~ -A :any:`HTTPVideoStream` resource describes a IP video stream over HTTP or HTTPS. +An :any:`HTTPVideoStream` resource describes an IP video stream over HTTP or HTTPS. .. code-block:: yaml @@ -1624,7 +1623,7 @@ Arguments: SSHDriver ~~~~~~~~~ -A :any:`SSHDriver` requires a `NetworkService`_ resource and allows the +An :any:`SSHDriver` requires a `NetworkService`_ resource and allows the execution of commands and file upload via network. It uses SSH's ``ServerAliveInterval`` option to detect failed connections. @@ -2234,7 +2233,7 @@ Arguments: HIDRelayDriver ~~~~~~~~~~~~~~ -A :any:`HIDRelayDriver` controls a `HIDRelay`_ or `NetworkHIDRelay`_ resource. +An :any:`HIDRelayDriver` controls an `HIDRelay`_ or `NetworkHIDRelay`_ resource. It can set and get the current state of the resource. Binds to: @@ -2384,8 +2383,9 @@ Arguments: RKUSBDriver ~~~~~~~~~~~ -A :any:`RKUSBDriver` is used to upload an image into a device in the *Rockchip -USB loader state*. This is useful to bootstrap a bootloader onto a device. +An :any:`RKUSBDriver` is used to upload an image into a device in the *Rockchip +USB loader state*. +This is useful to bootstrap a bootloader onto a device. Binds to: loader: @@ -2721,8 +2721,9 @@ argument being "dut", "host", "off", or "client". LXAUSBMuxDriver ~~~~~~~~~~~~~~~ -The :any:`LXAUSBMuxDriver` uses a `LXAUSBMux`_ resource to control a USB-Mux -device via the `usbmuxctl `_ tool. +The :any:`LXAUSBMuxDriver` uses an `LXAUSBMux`_ resource to control a USB-Mux +device via the `usbmuxctl `_ +tool. Binds to: mux: @@ -2789,7 +2790,7 @@ Appropriate configuration parameters can be determined by using the GStreamer USBAudioInputDriver ~~~~~~~~~~~~~~~~~~~ -The :any:`USBAudioInputDriver` is used to receive a audio stream from a local +The :any:`USBAudioInputDriver` is used to receive an audio stream from a local or remote USB audio input. It uses the GStreamer command line utility ``gst-launch`` on the sender side to stream the audio to the client. @@ -2811,7 +2812,7 @@ Arguments: USBTMCDriver ~~~~~~~~~~~~ -The :any:`USBTMCDriver` is used to control a oscilloscope via the *USB TMC +The :any:`USBTMCDriver` is used to control an oscilloscope via the *USB TMC protocol*. Binds to: @@ -2835,7 +2836,7 @@ but more devices can be added by extending ``on_activate()`` in FlashromDriver ~~~~~~~~~~~~~~ -The :any:`FlashromDriver` is used to flash a rom, using the flashrom utility. +The :any:`FlashromDriver` is used to flash a ROM, using the flashrom utility. .. code-block:: yaml @@ -3036,8 +3037,8 @@ Arguments: HttpDigitalOutputDriver ~~~~~~~~~~~~~~~~~~~~~~~ -A :any:`HttpDigitalOutputDriver` binds to a `HttpDigitalOutput`_ to set and get -a digital output state via HTTP. +A :any:`HttpDigitalOutputDriver` binds to an `HttpDigitalOutput`_ to set and +get a digital output state via HTTP. Binds to: http: From 74eff4bacc9022472a6af7f754fc404469be89d7 Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Mon, 12 Feb 2024 18:14:24 +0100 Subject: [PATCH 160/384] doc/configuration: split driver and images section in FlashromDriver yaml snippet In the previous form, it looks like the environemnt's "drivers" section also contains the "images" section, which is not the case. Make that clear by splitting both sections into their own snippet. Signed-off-by: Bastian Krause --- doc/configuration.rst | 3 +++ 1 file changed, 3 insertions(+) diff --git a/doc/configuration.rst b/doc/configuration.rst index 850b7ca92..9ef9317d9 100644 --- a/doc/configuration.rst +++ b/doc/configuration.rst @@ -2842,6 +2842,9 @@ The :any:`FlashromDriver` is used to flash a ROM, using the flashrom utility. FlashromDriver: image: 'foo' + +.. code-block:: yaml + images: foo: '../images/image_to_load.raw' From 43633ea279d6171d9e202f72e5b931d104b75cd9 Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Mon, 12 Feb 2024 15:41:38 +0100 Subject: [PATCH 161/384] doc/configuration: move driver details from ModbusRTU resource to ModbusRTUDriver Signed-off-by: Bastian Krause --- doc/configuration.rst | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/doc/configuration.rst b/doc/configuration.rst index 9ef9317d9..373b41e98 100644 --- a/doc/configuration.rst +++ b/doc/configuration.rst @@ -79,14 +79,7 @@ Modbus is normally implemented on top of RS-485, though this is not strictly necessary, as long as the Modbus network only has one master (and up to 256 slaves). -The labgrid driver is implemented using the -`minimalmodbus `_ Python -library. -The implementation only supports that labgrid will be the master on the Modbus -network. - -This resource and driver only supports local usage and will not work with an -exporter. +This resource only supports local usage and will not work with an exporter. .. code-block:: yaml @@ -1556,6 +1549,12 @@ ModbusRTUDriver A :any:`ModbusRTUDriver` connects to a ModbusRTU resource. This driver only supports local usage and will not work with an exporter. +The driver is implemented using the +`minimalmodbus `_ Python +library. +The implementation only supports that labgrid will be the master on the Modbus +network. + .. code-block:: yaml ModbusRTUDriver: {} From 9e43814dda20f9a6f1993db68d466347326fb1c7 Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Mon, 12 Feb 2024 17:10:41 +0100 Subject: [PATCH 162/384] doc/configuration: split TFTP/HTTP resources and drivers Signed-off-by: Bastian Krause --- doc/configuration.rst | 85 ++++++++++++++++++++++++++++++------------- 1 file changed, 59 insertions(+), 26 deletions(-) diff --git a/doc/configuration.rst b/doc/configuration.rst index 373b41e98..4a81d70cf 100644 --- a/doc/configuration.rst +++ b/doc/configuration.rst @@ -1195,13 +1195,9 @@ see below. For now, the TFTP/NFS/HTTP server needs to be configured before using it from labgrid. -.. _TFTPProvider: -.. _HTTPProvider: - -TFTPProvider / HTTPProvider -+++++++++++++++++++++++++++ -A :any:`TFTPProvider` resource describes TFTP server. -A :any:`HTTPProvider` resource describes an HTTP server. +TFTPProvider +++++++++++++ +A :any:`TFTPProvider` resource describes a TFTP server. .. code-block:: yaml @@ -1209,6 +1205,19 @@ A :any:`HTTPProvider` resource describes an HTTP server. internal: '/srv/tftp/board-23/' external: 'board-23/' +Arguments: + - internal (str): path prefix to the local directory accessible by the target + - external (str): corresponding path prefix for use by the target + +Used by: + - `TFTPProviderDriver`_ + +HTTPProvider +++++++++++++ +An :any:`HTTPProvider` resource describes an HTTP server. + +.. code-block:: yaml + HTTPProvider: internal: '/srv/www/board-23/' external: 'http://192.168.1.1/board-23/' @@ -1218,7 +1227,6 @@ Arguments: - external (str): corresponding path prefix for use by the target Used by: - - `TFTPProviderDriver`_ - `HTTPProviderDriver`_ NFSProvider @@ -1235,15 +1243,10 @@ Arguments: Used by: - `NFSProviderDriver`_ -.. _RemoteTFTPProvider: -.. _RemoteHTTPProvider: - -RemoteTFTPProvider / RemoteHTTPProvider -+++++++++++++++++++++++++++++++++++++++ +RemoteTFTPProvider +++++++++++++++++++ A :any:`RemoteTFTPProvider` describes a `TFTPProvider`_ resource available on a remote computer. -A :any:`RemoteHTTPProvider` describes a `HTTPProvider`_ resource available on -a remote computer. .. code-block:: yaml @@ -1252,6 +1255,21 @@ a remote computer. internal: '/srv/tftp/board-23/' external: 'board-23/' +Arguments: + - host (str): hostname of the remote host + - internal (str): path prefix to the TFTP root directory on ``host`` + - external (str): corresponding path prefix for use by the target + +Used by: + - `TFTPProviderDriver`_ + +RemoteHTTPProvider +++++++++++++++++++ +A :any:`RemoteHTTPProvider` describes an `HTTPProvider`_ resource available on +a remote computer. + +.. code-block:: yaml + RemoteHTTPProvider: host: 'httphost' internal: '/srv/www/board-23/' @@ -1259,11 +1277,10 @@ a remote computer. Arguments: - host (str): hostname of the remote host - - internal (str): path prefix to the HTTP/TFTP root directory on ``host`` + - internal (str): path prefix to the HTTP root directory on ``host`` - external (str): corresponding path prefix for use by the target Used by: - - `TFTPProviderDriver`_ - `HTTPProviderDriver`_ RemoteNFSProvider @@ -2496,20 +2513,15 @@ Implements: Arguments: - None -.. _TFTPProviderDriver: -.. _HTTPProviderDriver: - -TFTPProviderDriver / HTTPProviderDriver -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -The :any:`TFTPProviderDriver` and :any:`HTTPProviderDriver` control their -corresponding Provider resources, either locally or remotely. +TFTPProviderDriver +~~~~~~~~~~~~~~~~~~ +The :any:`TFTPProviderDriver` controls its corresponding TFTP resource, either +locally or remotely. Binds to: provider: - `TFTPProvider`_ - `RemoteTFTPProvider`_ - - `HTTPProvider`_ - - `RemoteHTTPProvider`_ Implements: - None (yet) @@ -2518,6 +2530,27 @@ Implements: TFTPProviderDriver: {} +Arguments: + - None + +The driver can be used in test cases by calling its ``stage()`` method, which +returns the path to be used by the target. + +HTTPProviderDriver +~~~~~~~~~~~~~~~~~~ +The :any:`HTTPProviderDriver` controls its corresponding HTTP resource, either +locally or remotely. + +Binds to: + provider: + - `HTTPProvider`_ + - `RemoteHTTPProvider`_ + +Implements: + - None (yet) + +.. code-block:: yaml + HTTPProviderDriver: {} Arguments: From efcc8a22ca41f17d7ae215e49d4911a1c71199a2 Mon Sep 17 00:00:00 2001 From: Christian Hemp Date: Wed, 21 Feb 2024 12:05:33 +0100 Subject: [PATCH 163/384] resource: udev: add new USB ID for IMXUSBLoader Add USB ID for i.MX93 SoC to IMXUSBLoader. Signed-off-by: Christian Hemp --- labgrid/resource/udev.py | 1 + 1 file changed, 1 insertion(+) diff --git a/labgrid/resource/udev.py b/labgrid/resource/udev.py index 600bb1a62..f66a0ed21 100644 --- a/labgrid/resource/udev.py +++ b/labgrid/resource/udev.py @@ -277,6 +277,7 @@ def filter_match(self, device): ("1fc9", "0128"), ("1fc9", "0126"), ("1fc9", "012b"), ("1fc9", "0134"), ("1fc9", "013e"), ("1fc9", "0146"), + ("1fc9", "014e"), ("1b67", "4fff"), ("0525", "b4a4"), # SPL ("3016", "1001"), ]: From b9e1c927324290b9f415d8417ceb7159e80d3637 Mon Sep 17 00:00:00 2001 From: Rouven Czerwinski Date: Fri, 23 Feb 2024 15:24:34 +0100 Subject: [PATCH 164/384] pyproject.yaml: limit pysnmp to versions < 6 The new major release 6.0 introduces breaking changes (functions are now async), limit the dependency until we fix this properly. Signed-off-by: Rouven Czerwinski --- pyproject.toml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index e351279fe..0c1c590ee 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -66,7 +66,7 @@ pyvisa = [ "pyvisa>=1.11.3", "PyVISA-py>=0.5.2", ] -snmp = ["pysnmp-lextudio>=4.4.12"] +snmp = ["pysnmp-lextudio>=4.4.12, <6"] vxi11 = ["python-vxi11>=0.9"] xena = ["xenavalkyrie>=3.0.1"] deb = [ @@ -77,7 +77,7 @@ deb = [ "onewire>=0.2", # labgrid[snmp] - "pysnmp-lextudio>=4.4.12", + "pysnmp-lextudio>=4.4.12, <6", ] dev = [ # references to other optional dependency groups @@ -111,7 +111,7 @@ dev = [ "PyVISA-py>=0.5.2", # labgrid[snmp] - "pysnmp-lextudio>=4.4.12", + "pysnmp-lextudio>=4.4.12, <6", # labgrid[vxi11] "python-vxi11>=0.9", From 65998f685cbdadac10339a7c49034c548a7a6589 Mon Sep 17 00:00:00 2001 From: Stefan Kerkmann Date: Mon, 4 Mar 2024 17:04:10 +0100 Subject: [PATCH 165/384] driver/usbvideodriver: use playbin3 element Decoding via VA-API fails for the C920 model and the playbin element on AMD graphic cards, playbin3 has no problems though - so lets switch to that. Signed-off-by: Stefan Kerkmann --- labgrid/driver/usbvideodriver.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/labgrid/driver/usbvideodriver.py b/labgrid/driver/usbvideodriver.py index 67cf6f87f..15fad97ed 100644 --- a/labgrid/driver/usbvideodriver.py +++ b/labgrid/driver/usbvideodriver.py @@ -131,7 +131,7 @@ def stream(self, caps_hint=None, controls=None): tx_cmd = self.video.command_prefix + ["gst-launch-1.0", "-q"] tx_cmd += pipeline.split() rx_cmd = ["gst-launch-1.0"] - rx_cmd += "playbin uri=fd://0".split() + rx_cmd += "playbin3 uri=fd://0".split() tx = subprocess.Popen( tx_cmd, From f9f6ba359811ad8802e24ee24c2d4755c8e05fe8 Mon Sep 17 00:00:00 2001 From: Jerome Brunet Date: Mon, 11 Mar 2024 17:01:32 +0100 Subject: [PATCH 166/384] doc/configuration: move sysfsgpio documentation Move the SysfsGPIO documentation under the digital output section Signed-off-by: Jerome Brunet --- doc/configuration.rst | 31 ++++++++++++++++--------------- 1 file changed, 16 insertions(+), 15 deletions(-) diff --git a/doc/configuration.rst b/doc/configuration.rst index 4a81d70cf..872f921d1 100644 --- a/doc/configuration.rst +++ b/doc/configuration.rst @@ -523,6 +523,22 @@ NetworkHIDRelay +++++++++++++++ A :any:`NetworkHIDRelay` describes an `HIDRelay`_ exported over the network. +SysfsGPIO ++++++++++ + +A :any:`SysfsGPIO` resource describes a GPIO line. + +.. code-block:: yaml + + SysfsGPIO: + index: 12 + +Arguments: + - index (int): index of the GPIO line + +Used by: + - `GpioDigitalOutputDriver`_ + NetworkService ~~~~~~~~~~~~~~ A :any:`NetworkService` describes a remote SSH connection. @@ -955,21 +971,6 @@ Arguments: Used by: - `USBVideoDriver`_ -SysfsGPIO -~~~~~~~~~ -A :any:`SysfsGPIO` resource describes a GPIO line. - -.. code-block:: yaml - - SysfsGPIO: - index: 12 - -Arguments: - - index (int): index of the GPIO line - -Used by: - - `GpioDigitalOutputDriver`_ - NetworkUSBVideo ~~~~~~~~~~~~~~~ A :any:`NetworkUSBVideo` resource describes a `USBVideo`_ resource available From ea1812b44c415c113e9d68638d200e2bedf39095 Mon Sep 17 00:00:00 2001 From: Joerg Hofrichter Date: Fri, 1 Mar 2024 08:35:09 +0100 Subject: [PATCH 167/384] resource: udev: also suggest ID_USB_INTERFACE_NUM for USBResource A USB serial device often provides multiple interfaces for the same serial number. In these cases it is not possible to distinguish the interfaces solely based on the serial number (ID_SERIAL_SHORT). However, if additionally the interface number (ID_USB_INTERFACE_NUM) is provided, it is possible to distinguish all interfaces without the need of using the full device path (ID_PATH). Example: (...) === suggested matches === (...) USBSerialPort: match: ID_SERIAL_SHORT: ABCDEF00001 ID_USB_INTERFACE_NUM: '00' (...) === suggested matches === (...) USBSerialPort: match: ID_SERIAL_SHORT: ABCDEF00001 ID_USB_INTERFACE_NUM: '01' Signed-off-by: Joerg Hofrichter --- labgrid/resource/udev.py | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/labgrid/resource/udev.py b/labgrid/resource/udev.py index f66a0ed21..0b4354bca 100644 --- a/labgrid/resource/udev.py +++ b/labgrid/resource/udev.py @@ -91,12 +91,22 @@ def suggest_match(self, device): suggestions.append({'@ID_PATH': path}) serial = self.device.properties.get('ID_SERIAL_SHORT') + interface_num = self.device.properties.get('ID_USB_INTERFACE_NUM') if serial: - suggestions.append({'ID_SERIAL_SHORT': serial}) + if interface_num is not None: + suggestions.append({'ID_SERIAL_SHORT': serial, + 'ID_USB_INTERFACE_NUM': interface_num}) + else: + suggestions.append({'ID_SERIAL_SHORT': serial}) elif self.match.get('@SUBSYSTEM', None) == 'usb': serial = self._get_usb_device().properties.get('ID_SERIAL_SHORT') + interface_num = self._get_usb_device().properties.get('ID_USB_INTERFACE_NUM') if serial: - suggestions.append({'@ID_SERIAL_SHORT': serial}) + if interface_num is not None: + suggestions.append({'@ID_SERIAL_SHORT': serial, + '@ID_USB_INTERFACE_NUM': interface_num}) + else: + suggestions.append({'@ID_SERIAL_SHORT': serial}) return meta, suggestions From c7b634b806b344aca4ff925f6ff621d0b3ecce9c Mon Sep 17 00:00:00 2001 From: Joerg Hofrichter Date: Fri, 1 Mar 2024 08:53:33 +0100 Subject: [PATCH 168/384] resource: udev: also suggest ID_USB_INTERFACE_NUM - docu update Signed-off-by: Joerg Hofrichter --- doc/configuration.rst | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/doc/configuration.rst b/doc/configuration.rst index 4a81d70cf..d3cb73dc9 100644 --- a/doc/configuration.rst +++ b/doc/configuration.rst @@ -109,12 +109,15 @@ This allows identification through hot-plugging or rebooting. USBSerialPort: match: ID_SERIAL_SHORT: 'P-00-00682' + ID_USB_INTERFACE_NUM: '00' speed: 115200 -The example would search for a USB serial converter with the key -``ID_SERIAL_SHORT`` and the value ``P-00-00682`` and use it with a baud rate -of ``115200``. -The ``ID_SERIAL_SHORT`` property is set by the ``usb_id`` builtin helper program. +The example would search for a USB serial converter with a given serial number +(``ID_SERIAL_SHORT`` = ``P-00-00682``) and use first interface +(``ID_USB_INTERFACE_NUM`` = ``00``) with a baud rate of 115200. + +The ``ID_SERIAL_SHORT`` and ``ID_USB_INTERFACE_NUM`` properties are set by the +``usb_id`` builtin helper program. Arguments: - match (dict): key and value pairs for a udev match, see `udev Matching`_ From 63873ee5623739d7a151f5d9347b96ff2e5fb3f7 Mon Sep 17 00:00:00 2001 From: Jerome Brunet Date: Mon, 11 Mar 2024 18:17:09 +0100 Subject: [PATCH 169/384] resource: matchedsysfsgpio: add udev matched gpios Add a support gpio line for which the gpiochip is identified by udev and pin number provided is local the identified gpiochip. This is useful when there is several gpio controller and the order in which the controller are probed is not predictable. An common example are the GPIOs provided by some USB FTDI chips. This resouce re-use most the support already done for SysfsGPIO. The global gpio index is computed at runtime, based on the udev properties of the matched gpiochip. Signed-off-by: Jerome Brunet --- doc/configuration.rst | 27 +++++++++++++++++++++++++++ labgrid/remote/exporter.py | 13 ++++++++----- labgrid/resource/__init__.py | 1 + labgrid/resource/suggest.py | 4 ++++ labgrid/resource/udev.py | 29 +++++++++++++++++++++++++++++ 5 files changed, 69 insertions(+), 5 deletions(-) diff --git a/doc/configuration.rst b/doc/configuration.rst index 872f921d1..093cc7837 100644 --- a/doc/configuration.rst +++ b/doc/configuration.rst @@ -539,6 +539,32 @@ Arguments: Used by: - `GpioDigitalOutputDriver`_ +MatchedSysfsGpio +++++++++++++++++ +A MatchedSysfsGpio resource describes a GPIO line, like a SysfsGPIO. +The gpiochip is identified by matching udev properties. This allows +identification through hot-plugging or rebooting for controllers like +USB based gpiochips. + +.. code-block:: yaml + + MatchedSysfsGpio: + match: + '@SUBSYSTEM': 'usb' + '@ID_SERIAL_SHORT': 'D38EJ8LF' + pin: 0 + +The example would search for a USB gpiochip with the key `ID_SERIAL_SHORT` +and the value `D38EJ8LF` and use the pin 0 of this device. +The `ID_SERIAL_SHORT` property is set by the usb_id builtin helper program. + +Arguments: + - match (dict): key and value pairs for a udev match, see `udev Matching`_ + - pin (int): gpio pin number within the matched gpiochip. + +Used by: + - `GpioDigitalOutputDriver`_ + NetworkService ~~~~~~~~~~~~~~ A :any:`NetworkService` describes a remote SSH connection. @@ -2139,6 +2165,7 @@ While the driver automatically exports the GPIO, it does not configure it in any Binds to: gpio: - `SysfsGPIO`_ + - `MatchedSysfsGPIO`_ - NetworkSysfsGPIO Implements: diff --git a/labgrid/remote/exporter.py b/labgrid/remote/exporter.py index 31a881524..eb7a27ed3 100755 --- a/labgrid/remote/exporter.py +++ b/labgrid/remote/exporter.py @@ -579,11 +579,13 @@ class GPIOSysFSExport(ResourceExport): def __attrs_post_init__(self): super().__attrs_post_init__() - local_cls_name = self.cls - self.data['cls'] = f"Network{self.cls}" - from ..resource import base - local_cls = getattr(base, local_cls_name) - self.local = local_cls(target=None, name=None, **self.local_params) + if self.cls == "SysfsGPIO": + from ..resource.base import SysfsGPIO + self.local = SysfsGPIO(target=None, name=None, **self.local_params) + elif self.cls == "MatchedSysfsGPIO": + from ..resource.udev import MatchedSysfsGPIO + self.local = MatchedSysfsGPIO(target=None, name=None, **self.local_params) + self.data['cls'] = "NetworkSysfsGPIO" self.export_path = Path(GPIOSysFSExport._gpio_sysfs_path_prefix, f'gpio{self.local.index}') self.system_exported = False @@ -624,6 +626,7 @@ def _stop(self, start_params): unexport.write(str(index).encode('utf-8')) exports["SysfsGPIO"] = GPIOSysFSExport +exports["MatchedSysfsGPIO"] = GPIOSysFSExport @attr.s diff --git a/labgrid/resource/__init__.py b/labgrid/resource/__init__.py index fd9b09bfb..dd7554dff 100644 --- a/labgrid/resource/__init__.py +++ b/labgrid/resource/__init__.py @@ -15,6 +15,7 @@ HIDRelay, IMXUSBLoader, LXAUSBMux, + MatchedSysfsGPIO, MXSUSBLoader, RKUSBLoader, SiSPMPowerPort, diff --git a/labgrid/resource/suggest.py b/labgrid/resource/suggest.py index 2b9e862ad..707779bf8 100644 --- a/labgrid/resource/suggest.py +++ b/labgrid/resource/suggest.py @@ -23,6 +23,7 @@ HIDRelay, USBDebugger, USBPowerPort, + MatchedSysfsGPIO ) from ..util import dump @@ -56,6 +57,7 @@ def __init__(self, args): self.resources.append(HIDRelay(**args)) self.resources.append(USBDebugger(**args)) self.resources.append(USBPowerPort(**args, index=0)) + self.resources.append(MatchedSysfsGPIO(**args, pin=0)) def suggest_callback(self, resource, meta, suggestions): cls = type(resource).__name__ @@ -84,6 +86,8 @@ def suggest_callback(self, resource, meta, suggestions): )) if cls == 'USBPowerPort': print(' index: ?') + if cls == 'MatchedSysfsGPIO': + print(' pin: ?') print(" ---") print() diff --git a/labgrid/resource/udev.py b/labgrid/resource/udev.py index f66a0ed21..97a80be90 100644 --- a/labgrid/resource/udev.py +++ b/labgrid/resource/udev.py @@ -708,3 +708,32 @@ def filter_match(self, device): return False return super().filter_match(device) + +@target_factory.reg_resource +@attr.s(eq=False) +class MatchedSysfsGPIO(USBResource): + """The MatchedSysfsGPIO described a SysfsGPIO matched by Udev + + Args: + pin (int): gpio pin number within the matched gpiochip.""" + pin = attr.ib(default=None, validator=attr.validators.instance_of(int)) + index = None + + def __attrs_post_init__(self): + self.match['SUBSYSTEM'] = 'gpio' + super().__attrs_post_init__() + + def filter_match(self, device): + # Filter out the char device + if device.properties.get('DEVNAME') is not None: + return False + return super().filter_match(device) + + def update(self): + super().update() + if self.device is not None: + if self.pin >= int(self.read_attr('ngpio')): + raise ValueError("MatchedSysfsGPIO pin out of bound") + self.index = int(self.read_attr('base')) + self.pin + else: + self.index = None From a89eb64715f3a95cb9139627fed543d6009f069c Mon Sep 17 00:00:00 2001 From: Liam Beguin Date: Wed, 1 Nov 2023 18:50:45 -0400 Subject: [PATCH 170/384] driver/pyvisadriver: add optional backend property Allow users to override PyVISA backed to use something like PyVISA-sim. Signed-off-by: Liam Beguin --- doc/configuration.rst | 1 + labgrid/driver/pyvisadriver.py | 2 +- labgrid/resource/pyvisa.py | 2 ++ 3 files changed, 4 insertions(+), 1 deletion(-) diff --git a/doc/configuration.rst b/doc/configuration.rst index d3cb73dc9..2cb90c64f 100644 --- a/doc/configuration.rst +++ b/doc/configuration.rst @@ -1160,6 +1160,7 @@ Arguments: ASRL, TCPIP... - url (str): device identifier on selected resource, e.g. for TCPIP resource + - backend (str): Visa library backend, e.g. '@sim' for pyvisa-sim backend Used by: - `PyVISADriver`_ diff --git a/labgrid/driver/pyvisadriver.py b/labgrid/driver/pyvisadriver.py index 5d3b4b249..044375cc5 100644 --- a/labgrid/driver/pyvisadriver.py +++ b/labgrid/driver/pyvisadriver.py @@ -18,7 +18,7 @@ class PyVISADriver(Driver): def __attrs_post_init__(self): super().__attrs_post_init__() _py_pyvisa_module = import_module('pyvisa') - self._pyvisa_resource_manager = _py_pyvisa_module.ResourceManager() + self._pyvisa_resource_manager = _py_pyvisa_module.ResourceManager(self.pyvisa_resource.backend) self.pyvisa_device = None def on_activate(self): diff --git a/labgrid/resource/pyvisa.py b/labgrid/resource/pyvisa.py index d1a93e89f..2228ea74b 100644 --- a/labgrid/resource/pyvisa.py +++ b/labgrid/resource/pyvisa.py @@ -12,6 +12,8 @@ class PyVISADevice(Resource): Args: type (str): device resource type following the pyVISA resource syntax, e.g. ASRL, TCPIP... url (str): device identifier on selected resource, e.g. for TCPIP resource + backend (str, default=''): Visa library backend, e.g. '@sim' for pyvisa-sim backend """ type = attr.ib(validator=attr.validators.instance_of(str)) url = attr.ib(validator=attr.validators.instance_of(str)) + backend = attr.ib(default='', validator=attr.validators.instance_of(str)) From 7bba58936a337cf52e2d81753221395cdd97e7c6 Mon Sep 17 00:00:00 2001 From: Jan Luebbe Date: Thu, 21 Mar 2024 09:31:52 +0100 Subject: [PATCH 171/384] driver/sshdriver: fix caching of _ssh_version map() returns an iterator, which only works once. Return and cache a tuple instead. Fixes: 2c062e3ac6bb ("driver/sshdriver: store OpenSSH version in cached property") Signed-off-by: Jan Luebbe --- labgrid/driver/sshdriver.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/labgrid/driver/sshdriver.py b/labgrid/driver/sshdriver.py index 9c7af8f52..055a7e419 100644 --- a/labgrid/driver/sshdriver.py +++ b/labgrid/driver/sshdriver.py @@ -447,7 +447,7 @@ def get_status(self): def _ssh_version(self): version = subprocess.run(["ssh", "-V"], capture_output=True, text=True) version = re.match(r"^OpenSSH_(\d+)\.(\d+)", version.stderr) - return map(int, version.groups()) + return tuple(int(x) for x in version.groups()) def _scp_supports_explicit_sftp_mode(self): major, minor = self._ssh_version From 1ac04df4d7a6701adf6660ab7b3f875bfac90efe Mon Sep 17 00:00:00 2001 From: Stefan Wiehler Date: Sun, 25 Jun 2023 12:15:09 +0200 Subject: [PATCH 172/384] driver/usbstoragedriver: have write_image wait for partition to appear write_image currently waits for only the block device to appear, even if the image should be written to a partition. Improve this a bit by polling for the partition itself to appear with a non-zero size. While at it, we refactor the code a bit to improve code reuse when we add the write_files method in the follow-up commit. Signed-off-by: Stefan Wiehler Co-authored-by: Ahmad Fatoum Signed-off-by: Ahmad Fatoum --- labgrid/driver/usbstoragedriver.py | 55 ++++++++++++++++++++---------- 1 file changed, 37 insertions(+), 18 deletions(-) diff --git a/labgrid/driver/usbstoragedriver.py b/labgrid/driver/usbstoragedriver.py index 5bc42856a..65f9a2303 100644 --- a/labgrid/driver/usbstoragedriver.py +++ b/labgrid/driver/usbstoragedriver.py @@ -40,6 +40,8 @@ class USBStorageDriver(Driver): default=None, validator=attr.validators.optional(attr.validators.instance_of(str)) ) + WAIT_FOR_MEDIUM_TIMEOUT = 10.0 # s + WAIT_FOR_MEDIUM_SLEEP = 0.5 # s def on_activate(self): pass @@ -68,22 +70,10 @@ def write_image(self, filename=None, mode=Mode.DD, partition=None, skip=0, seek= mf = ManagedFile(filename, self.storage) mf.sync_to_resource() - # wait for medium - timeout = Timeout(10.0) - while not timeout.expired: - try: - if self.get_size() > 0: - break - time.sleep(0.5) - except ValueError: - # when the medium gets ready the sysfs attribute is empty for a short time span - continue - else: - raise ExecutionError("Timeout while waiting for medium") + self._wait_for_medium(partition) - partition = "" if partition is None else partition + target = self._get_devpath(partition) remote_path = mf.get_remote_path() - target = f"{self.storage.path}{partition}" if mode == Mode.DD: self.logger.info('Writing %s to %s using dd.', remote_path, target) @@ -139,12 +129,41 @@ def write_image(self, filename=None, mode=Mode.DD, partition=None, skip=0, seek= print_on_silent_log=True ) + def _get_devpath(self, partition): + partition = "" if partition is None else partition + # simple concatenation is sufficient for USB mass storage + return f"{self.storage.path}{partition}" + @Driver.check_active - @step(result=True) - def get_size(self): - args = ["cat", f"/sys/class/block/{self.storage.path[5:]}/size"] + def _wait_for_medium(self, partition): + timeout = Timeout(self.WAIT_FOR_MEDIUM_TIMEOUT) + while not timeout.expired: + if self.get_size(partition) > 0: + break + time.sleep(self.WAIT_FOR_MEDIUM_SLEEP) + else: + raise ExecutionError("Timeout while waiting for medium") + + @Driver.check_active + @step(args=['partition'], result=True) + def get_size(self, partition=None): + """ + Get the size of the bound USB storage root device or partition. + + Args: + partition (int or None): optional, get size of the specified partition or None for + getting the size of the root device (defaults to None) + + Returns: + int: size in bytes + """ + args = ["cat", f"/sys/class/block/{self._get_devpath(partition)[5:]}/size"] size = subprocess.check_output(self.storage.command_prefix + args) - return int(size)*512 + try: + return int(size) * 512 + except ValueError: + # when the medium gets ready the sysfs attribute is empty for a short time span + return 0 @target_factory.reg_driver From 80e748cf8a02ec7b9b05509a6aae70df29a38526 Mon Sep 17 00:00:00 2001 From: Ahmad Fatoum Date: Tue, 27 Jun 2023 21:23:56 +0200 Subject: [PATCH 173/384] util: agents: add udisks2 agent for remote mounting While udisksctl exists as a client tool to talk to the udisks2 daemon, it's not well-suited for programmatic use. We thus introduce an agent that is copied to the remote system and that uses PyGObject to directly talk DBus to the daemon to mount and unmount file systems. Signed-off-by: Ahmad Fatoum Signed-off-by: Bastian Krause --- doc/configuration.rst | 8 +++ labgrid/util/agents/udisks2.py | 102 +++++++++++++++++++++++++++++++++ 2 files changed, 110 insertions(+) create mode 100644 labgrid/util/agents/udisks2.py diff --git a/doc/configuration.rst b/doc/configuration.rst index a759e6f9a..9ada5f2fe 100644 --- a/doc/configuration.rst +++ b/doc/configuration.rst @@ -610,6 +610,14 @@ device. match: ID_PATH: 'pci-0000:06:00.0-usb-0:1.3.2:1.0-scsi-0:0:0:3' +Writing images to disk requires installation of ``dd`` or optionally +``bmaptool`` on the same system as the block device. + +For mounting the file system and writing into it, +`PyGObject `_ must be installed. +For Debian, the necessary packages are `python3-gi` and `gir1.2-udisks-2.0`. +This is not required for writing images to disks. + Arguments: - match (dict): key and value pairs for a udev match, see `udev Matching`_ diff --git a/labgrid/util/agents/udisks2.py b/labgrid/util/agents/udisks2.py new file mode 100644 index 000000000..6def4da7c --- /dev/null +++ b/labgrid/util/agents/udisks2.py @@ -0,0 +1,102 @@ +""" +This module implements mounting file systems via communication with udisksd. +""" +import logging +import time + +import gi +gi.require_version('UDisks', '2.0') +from gi.repository import GLib, UDisks + +class UDisks2Device: + UNMOUNT_MAX_RETRIES = 5 + UNMOUNT_BUSY_WAIT = 3 # s + + def __init__(self, devpath): + self._logger = logging.getLogger("Device: ") + self.devpath = devpath + client = UDisks.Client.new_sync(None) + + manager = client.get_object_manager() + for obj in manager.get_objects(): + block = obj.get_block() + if not block: + continue + + device_path = block.get_cached_property("Device").get_bytestring().decode('utf-8') + if device_path == devpath: + self.fs = obj.get_filesystem() + if self.fs is None: + raise ValueError(f"no filesystem found on {devpath}") + + return + + raise ValueError(f"No udisks2 device found for {devpath}") + + def mount(self, readonly=False): + opts = GLib.Variant('a{sv}', {'options': GLib.Variant('s', 'ro' if readonly else 'rw')}) + + try: + mountpoint = self.fs.call_mount_sync(opts, None) + except GLib.GError as err: + if not err.matches(UDisks.error_quark(), UDisks.Error.ALREADY_MOUNTED): + raise err + + self._logger.warning('Unmounting lazily and remounting %s...', self.devpath) + self._unmount_lazy() + + mountpoint = self.fs.call_mount_sync(opts, None) + + return mountpoint + + def _unmount_lazy(self): + opts = GLib.Variant('a{sv}', {'force': GLib.Variant('b', True)}) + + try: + self.fs.call_unmount_sync(opts, None) + except GLib.GError as err: + if not err.matches(UDisks.error_quark(), UDisks.Error.NOT_MOUNTED): + raise err + + def _unmount(self): + opts = GLib.Variant('a{sv}', {'force': GLib.Variant('b', False)}) + + for _ in range(self.UNMOUNT_MAX_RETRIES): + try: + self.fs.call_unmount_sync(opts, None) + return + except GLib.GError as err: + if not err.matches(UDisks.error_quark(), UDisks.Error.DEVICE_BUSY): + raise err + + self._logger.warning('waiting %s s for busy %s', + self.UNMOUNT_BUSY_WAIT, self.devpath) + time.sleep(self.UNMOUNT_BUSY_WAIT) + + raise TimeoutError("Timeout waiting for device to become non-busy") + + def unmount(self, lazy=False): + if lazy: + self._unmount_lazy() + else: + self._unmount() + +_devs = {} + +def _get_udisks2_dev(devpath): + if devpath not in _devs: + _devs[devpath] = UDisks2Device(devpath=devpath) + return _devs[devpath] + +def handle_mount(devpath): + dev = _get_udisks2_dev(devpath) + return dev.mount() + +def handle_unmount(devpath, lazy=False): + dev = _get_udisks2_dev(devpath) + return dev.unmount(lazy=lazy) + +methods = { + 'mount': handle_mount, + 'unmount': handle_unmount, +} From 5537bab567bc25b15b33f98af50f793cf2f3d1b0 Mon Sep 17 00:00:00 2001 From: Stefan Wiehler Date: Wed, 11 Dec 2019 13:56:24 +0100 Subject: [PATCH 174/384] driver/usbstoragedriver: add write_files method write_image, especially with the seek and skip parameters, can be useful for fast iterations during bootloader development: Only the bootloader is copied and the rest of the storage medium stays intact. Some boot firmware however loads subsequent boot stages from a, usually FAT32, filesystem, notably UEFI firmware from the EFI system partition. Make development on such targets easier by adding a write_files method that temporarily mounts the target device using udisks2 and writes the specified files there. Signed-off-by: Stefan Wiehler Co-authored-by: Ahmad Fatoum Signed-off-by: Ahmad Fatoum Signed-off-by: Bastian Krause --- doc/configuration.rst | 2 +- labgrid/driver/usbstoragedriver.py | 62 +++++++++++++++++++++++++++++- 2 files changed, 61 insertions(+), 3 deletions(-) diff --git a/doc/configuration.rst b/doc/configuration.rst index 9ada5f2fe..2ef3f5271 100644 --- a/doc/configuration.rst +++ b/doc/configuration.rst @@ -630,7 +630,7 @@ A :any:`NetworkUSBMassStorage` resource describes a USB memory stick or similar device available on a remote computer. The NetworkUSBMassStorage can be used in test cases by calling the -``write_image()``, and ``get_size()`` functions. +``write_files()``, ``write_image()``, and ``get_size()`` functions. SigrokDevice ~~~~~~~~~~~~ diff --git a/labgrid/driver/usbstoragedriver.py b/labgrid/driver/usbstoragedriver.py index 65f9a2303..3207b787f 100644 --- a/labgrid/driver/usbstoragedriver.py +++ b/labgrid/driver/usbstoragedriver.py @@ -1,17 +1,20 @@ import enum import os +import pathlib import time import subprocess import attr from ..factory import target_factory +from ..resource.remote import RemoteUSBResource from ..step import step from ..util.managedfile import ManagedFile from .common import Driver from ..driver.exception import ExecutionError from ..util.helper import processwrapper +from ..util.agentwrapper import AgentWrapper from ..util import Timeout @@ -43,11 +46,66 @@ class USBStorageDriver(Driver): WAIT_FOR_MEDIUM_TIMEOUT = 10.0 # s WAIT_FOR_MEDIUM_SLEEP = 0.5 # s + def __attrs_post_init__(self): + super().__attrs_post_init__() + self.wrapper = None + def on_activate(self): - pass + host = self.storage.host if isinstance(self.storage, RemoteUSBResource) else None + self.wrapper = AgentWrapper(host) + self.proxy = self.wrapper.load('udisks2') def on_deactivate(self): - pass + self.wrapper.close() + self.wrapper = None + self.proxy = None + + @Driver.check_active + @step(args=['sources', 'target', 'partition', 'target_is_directory']) + def write_files(self, sources, target, partition, target_is_directory=True): + """ + Write the file(s) specified by filename(s) to the + bound USB storage partition. + + Args: + sources (List[str]): path(s) to the file(s) to be copied to the bound USB storage + partition. + target (str): target directory or file to copy to + partition (int): mount the specified partition or None to mount the whole disk + target_is_directory (bool): Whether target is a directory + """ + + self.devpath = self._get_devpath(partition) + mount_path = self.proxy.mount(self.devpath) + + try: + # (pathlib.PurePath(...) / "/") == "/", so we turn absolute paths into relative + # paths with respect to the mount point here + target_rel = target.relative_to(target.root) if target.root is not None else target + target_path = str(pathlib.PurePath(mount_path) / target_rel) + + copied_sources = [] + + for f in sources: + mf = ManagedFile(f, self.storage) + mf.sync_to_resource() + copied_sources.append(mf.get_remote_path()) + + if target_is_directory: + args = ["cp", "-t", target_path] + copied_sources + else: + if len(sources) != 1: + raise ValueError("single source argument required when target_is_directory=False") + + args = ["cp", "-T", copied_sources[0], target_path] + + processwrapper.check_output(self.storage.command_prefix + args) + self.proxy.unmount(self.devpath) + except: + # We are going to die with an exception anyway, so no point in waiting + # to make sure everything has been written before continuing + self.proxy.unmount(self.devpath, lazy=True) + raise @Driver.check_active @step(args=['filename']) From a712ee2b899076ce3e011ae8a67ebba6417ee2ac Mon Sep 17 00:00:00 2001 From: Stefan Wiehler Date: Wed, 11 Dec 2019 13:58:15 +0100 Subject: [PATCH 175/384] remote/client: add write-files command Analogously to write-image, add a write-file command that can be used to overwrite (or create anew) a single file in a file system located on a USBStorageDriver. This is useful for faster iteration during bootloader development when a bootloader is loaded from a file system. Following usages are supported: labgrid-client write-files a b # writes files a, b into /mnt/ labgrid-client write-files -t a b c # writes files b, c into /mnt/a/ labgrid-client write-files -T a b # writes file b to /mnt/a Where /mnt is some dynamic path on the, possibly remote, host exporting the USBStorageDriver. Signed-off-by: Stefan Wiehler Co-authored-by: Ahmad Fatoum Signed-off-by: Ahmad Fatoum Signed-off-by: Bastian Krause --- CHANGES.rst | 2 ++ contrib/completion/labgrid-client.bash | 35 +++++++++++++++++++ labgrid/remote/client.py | 48 ++++++++++++++++++++++++++ man/labgrid-client.1 | 2 ++ man/labgrid-client.rst | 2 ++ 5 files changed, 89 insertions(+) diff --git a/CHANGES.rst b/CHANGES.rst index 0c7968c4d..25e699250 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -12,6 +12,8 @@ New Features in 24.0 to the serial console during testing. - The `QEMUDriver` now has an additional ``disk_opts`` property which can be used to pass additional options for the disk directly to QEMU +- labgrid-client now has a ``write-files`` subcommand to copy files onto mass + storage devices. Bug fixes in 24.0 ~~~~~~~~~~~~~~~~~ diff --git a/contrib/completion/labgrid-client.bash b/contrib/completion/labgrid-client.bash index d8bd59abf..7bc0d8499 100644 --- a/contrib/completion/labgrid-client.bash +++ b/contrib/completion/labgrid-client.bash @@ -769,6 +769,40 @@ _labgrid_client_write_image() esac } +_labgrid_client_write_files() +{ + local cur prev words cword + _init_completion || return + + case "$prev" in + -w|--wait) + ;& + -p|--partition) + ;& + -t|--target-directory) + ;& + -T) + ;& + -n|--name) + _labgrid_complete match-names "$cur" + return + ;; + esac + + case "$cur" in + -*) + local options="--wait --partition --target-directory --name $_labgrid_shared_options" + COMPREPLY=( $(compgen -W "$options" -- "$cur") ) + ;; + *) + local args + _labgrid_count_args "@(-w|--wait|-p|--partition|-t|--target-directory|-T|-n|--name)" || return + + _filedir + ;; + esac +} + _labgrid_client_reserve() { _labgrid_client_generic_subcommand "--wait --shell --prio" @@ -888,6 +922,7 @@ _labgrid_client() audio \ tmc \ write-image \ + write-files \ reserve \ cancel-reservation \ wait \ diff --git a/labgrid/remote/client.py b/labgrid/remote/client.py index 235238187..ddcd66637 100755 --- a/labgrid/remote/client.py +++ b/labgrid/remote/client.py @@ -5,6 +5,7 @@ import contextlib import enum import os +import pathlib import subprocess import traceback import logging @@ -1214,6 +1215,32 @@ def tmc_channel(self): for k, v in sorted(data.items()): print(f"{k:<16s} {str(v):<10s}") + def write_files(self): + place = self.get_acquired_place() + target = self._get_target(place) + name = self.args.name + drv = self._get_driver_or_new(target, "USBStorageDriver", activate=False, name=name) + drv.storage.timeout = self.args.wait + target.activate(drv) + + try: + if self.args.partition == 0: + self.args.partition = None + + if self.args.rename: + if len(self.args.SOURCE) != 2: + self.args.parser.error("the following arguments are required: SOURCE DEST") + + drv.write_files([self.args.SOURCE[0]], self.args.SOURCE[1], + self.args.partition, target_is_directory=False) + else: + drv.write_files(self.args.SOURCE, self.args.target_directory, + self.args.partition, target_is_directory=True) + except subprocess.CalledProcessError as e: + raise UserError(f"could not copy files to network usb storage: {e}") + except FileNotFoundError as e: + raise UserError(e) + def write_image(self): place = self.get_acquired_place() target = self._get_target(place) @@ -1761,6 +1788,27 @@ def main(): tmc_subparser.add_argument('action', choices=['info', 'values']) tmc_subparser.set_defaults(func=ClientSession.tmc_channel) + subparser = subparsers.add_parser('write-files', help="copy files onto mass storage device", + usage="%(prog)s [OPTION]... -T SOURCE DEST\n" + + " %(prog)s [OPTION]... [-t DIRECTORY] SOURCE...") + subparser.add_argument('-w', '--wait', type=float, default=10.0, + help='storage poll timeout in seconds') + subparser.add_argument('-p', '--partition', type=int, choices=range(0, 256), + metavar='0-255', default=1, + help='partition number to mount or 0 to mount whole disk (default: %(default)s)') + group = subparser.add_mutually_exclusive_group() + group.add_argument('-t', '--target-directory', type=pathlib.PurePath, metavar='DIRECTORY', + default=pathlib.PurePath("/"), + help='copy all SOURCE files into DIRECTORY (default: partition root)') + group.add_argument('-T', action='store_true', dest='rename', + help='copy SOURCE file and rename to DEST') + subparser.add_argument('--name', '-n', help="optional resource name") + subparser.add_argument('SOURCE', type=pathlib.PurePath, nargs='+', + help='source file(s) to copy') + subparser.add_argument('DEST', type=pathlib.PurePath, nargs='?', + help='destination file name for SOURCE') + subparser.set_defaults(func=ClientSession.write_files, parser=subparser) + subparser = subparsers.add_parser('write-image', help="write an image onto mass storage") subparser.add_argument('-w', '--wait', type=float, default=10.0) subparser.add_argument('-p', '--partition', type=int, help="partition number to write to") diff --git a/man/labgrid-client.1 b/man/labgrid-client.1 index d2b850b32..07e94ac58 100644 --- a/man/labgrid-client.1 +++ b/man/labgrid-client.1 @@ -219,6 +219,8 @@ not at all. .sp \fBtmc\fP command Control a USB TMC device .sp +\fBwrite\-files\fP filename(s) Copy files onto mass storage device +.sp \fBwrite\-image\fP Write images onto block devices (USBSDMux, USB Sticks, …) .sp \fBreserve\fP filter Create a reservation diff --git a/man/labgrid-client.rst b/man/labgrid-client.rst index d9542559e..3a8c5eaf5 100644 --- a/man/labgrid-client.rst +++ b/man/labgrid-client.rst @@ -211,6 +211,8 @@ LABGRID-CLIENT COMMANDS ``tmc`` command Control a USB TMC device +``write-files`` filename(s) Copy files onto mass storage device + ``write-image`` Write images onto block devices (USBSDMux, USB Sticks, …) ``reserve`` filter Create a reservation From 3ea19dee4babd4633eb569d9b867a96731861911 Mon Sep 17 00:00:00 2001 From: Rouven Czerwinski Date: Mon, 25 Mar 2024 15:40:16 +0100 Subject: [PATCH 176/384] doc/overview: mention SSH requirement Mention that SSH access to the exporter is required. Also document how the SSH jump host function can be used with plain SSH. Signed-off-by: Rouven Czerwinski --- doc/overview.rst | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/doc/overview.rst b/doc/overview.rst index 104cd7147..8152406bf 100644 --- a/doc/overview.rst +++ b/doc/overview.rst @@ -319,6 +319,12 @@ published as declared in the configuration file. This is useful to register externally configured resources such as network power switches or serial port servers with a labgrid coordinator. +.. note:: + Users will require SSH access to the exporter to access services and command + line utilities. You also have to ensure that users can access usb devices for + i.e. imx-usb-loader. To test a SSH jump to a device over the exporter outside + of labgrid, `ssh -J EXPORTER USER@DEVICE` can be used. + .. _overview-client: Client From 8e471bfa21bad08d16db4a152cb5a4d2e882cd84 Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Wed, 27 Mar 2024 13:17:23 +0100 Subject: [PATCH 177/384] doc/configuration: fix spelling of MatchedSysfsGPIO class name The exact class name must be used in the configuration snippet. Stick to that name also in the headline as well as in the text. While at it, link MatchedSysfsGPIO and SysfsGPIO. Fixes: 63873ee5 ("resource: matchedsysfsgpio: add udev matched gpios") Signed-off-by: Bastian Krause --- doc/configuration.rst | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/doc/configuration.rst b/doc/configuration.rst index 2ef3f5271..cb4d8dd82 100644 --- a/doc/configuration.rst +++ b/doc/configuration.rst @@ -542,16 +542,16 @@ Arguments: Used by: - `GpioDigitalOutputDriver`_ -MatchedSysfsGpio +MatchedSysfsGPIO ++++++++++++++++ -A MatchedSysfsGpio resource describes a GPIO line, like a SysfsGPIO. +A :any:`MatchedSysfsGPIO` describes a GPIO line, like a `SysfsGPIO`_. The gpiochip is identified by matching udev properties. This allows identification through hot-plugging or rebooting for controllers like USB based gpiochips. .. code-block:: yaml - MatchedSysfsGpio: + MatchedSysfsGPIO: match: '@SUBSYSTEM': 'usb' '@ID_SERIAL_SHORT': 'D38EJ8LF' From 987aa904e66cec9120b15051341acb687710576b Mon Sep 17 00:00:00 2001 From: Peter Hoyes Date: Thu, 14 Dec 2023 00:20:46 +0000 Subject: [PATCH 178/384] dockerfiles: Allow passing arbitrary arguments to build.sh To allow customizing the arguments passed to docker/podman build(x) beyond just the platform, modify build.sh to pass all its arguments through to the build command. For the platform case, this means it is now necessary to pass "--platform linux/arm64" instead of just "linux/arm64" so modify README.md to reflect this. This change also has the effect of automatically using buildx if supported, even if no arguments are passed, so move the information about buildx further up in the text. The link to podman's buildx is dropped as (unlike Docker) "podman buildx" is just an alias for "podman build". Signed-off-by: Peter Hoyes --- dockerfiles/README.rst | 13 ++++++------- dockerfiles/build.sh | 21 ++++++++++++--------- 2 files changed, 18 insertions(+), 16 deletions(-) diff --git a/dockerfiles/README.rst b/dockerfiles/README.rst index 9ab661fec..963412d90 100644 --- a/dockerfiles/README.rst +++ b/dockerfiles/README.rst @@ -27,8 +27,9 @@ Example showing how to build labgrid-client image: Using `BuildKit `_ is recommended to reduce build times. -You can also choose to build all 3 images, -with the included script. +You can also choose to build all 3 images with the included script. The script +will automatically use `docker buildx +`` if available. .. code-block:: bash @@ -43,15 +44,13 @@ The script supports ``podman`` as well. $ ./dockerfiles/build.sh It builds for the native platform by default. However, building -for foreign platforms is also supported using `docker buildx -` or `podman -buildx ` -by passing the platform of choice, e.g. `linux/arm64`. +for foreign platforms is also supported by passing the platform(s) of choice, +e.g. `linux/arm64` as an additional argument. .. code-block:: bash $ pip install --upgrade setuptools_scm - $ ./dockerfiles/build.sh linux/arm64 + $ ./dockerfiles/build.sh --platform linux/arm64 Usage diff --git a/dockerfiles/build.sh b/dockerfiles/build.sh index 501667a17..4ff05a0af 100755 --- a/dockerfiles/build.sh +++ b/dockerfiles/build.sh @@ -53,12 +53,14 @@ perform_regular_build() { docker_cmd="${1}" script_dir="${2}" version="${3}" + extra_args=("${@:4}") log_info "building for native platform only." for t in client exporter coordinator; do "${docker_cmd}" build --build-arg VERSION="${version}" \ - --target labgrid-${t} -t labgrid-${t}:latest -f "${script_dir}/Dockerfile" . + --target labgrid-${t} -t labgrid-${t}:latest -f "${script_dir}/Dockerfile" \ + "${extra_args[@]}" . done } @@ -67,16 +69,17 @@ perform_docker_buildx_build() { docker_cmd="${1}" script_dir="${2}" version="${3}" + extra_args=("${@:4}") for t in client exporter coordinator; do - "${docker_cmd}" buildx build --platform "${platform}" --build-arg VERSION="${version}" \ - --target labgrid-${t} -t labgrid-${t}:latest -f "${script_dir}/Dockerfile" . + "${docker_cmd}" buildx build --build-arg VERSION="${version}" \ + --target labgrid-${t} -t labgrid-${t}:latest -f "${script_dir}/Dockerfile" \ + "${extra_args[@]}" . done } main() { - local platform script_dir version - platform="${1}" + local script_dir version if ! has_docker && ! has_podman; then die "Neither docker nor podman could be found." @@ -88,11 +91,11 @@ main() { cd "${script_dir}/.." || die "Could not cd into repo root dir" - if has_buildx "${docker_cmd}" && [ -n "${platform}" ]; then - perform_docker_buildx_build "${docker_cmd}" "${script_dir}" "${version}" "${platform}" + if has_buildx "${docker_cmd}"; then + perform_docker_buildx_build "${docker_cmd}" "${script_dir}" "${version}" "${@}" else - perform_regular_build "${docker_cmd}" "${script_dir}" "${version}" + perform_regular_build "${docker_cmd}" "${script_dir}" "${version}" "${@}" fi } -main "${1}" +main "${@}" From 21ce832cc89ddf367d08da76d944a8b15aebab48 Mon Sep 17 00:00:00 2001 From: Peter Hoyes Date: Thu, 14 Dec 2023 00:32:32 +0000 Subject: [PATCH 179/384] dockerfiles: Use same default image name as DockerHub To simplify usage of build.sh on CI, use the same image names for local development as on the Docker hub (i.e. labgrid/client instead of labgrid-client). Allow both the prefix and the tag to be overridden using environment variables. Use the same prefix in docker-compose.yml, customizable using the same environment variable. Adjust the documentation to reflect the new prefix. It is no longer necessary to build the images locally before running docker-compose so remove this part of the staging instructions. Signed-off-by: Peter Hoyes --- dockerfiles/README.rst | 20 +++++++------------- dockerfiles/build.sh | 6 ++++-- dockerfiles/staging/docker-compose.yml | 6 +++--- 3 files changed, 14 insertions(+), 18 deletions(-) diff --git a/dockerfiles/README.rst b/dockerfiles/README.rst index 963412d90..3268de03d 100644 --- a/dockerfiles/README.rst +++ b/dockerfiles/README.rst @@ -22,7 +22,7 @@ Example showing how to build labgrid-client image: .. code-block:: bash - $ docker build --target labgrid-client -t labgrid-client -f dockerfiles/Dockerfile . + $ docker build --target labgrid-client -t docker.io/labgrid/client -f dockerfiles/Dockerfile . Using `BuildKit `_ is recommended to reduce build times. @@ -76,7 +76,7 @@ so you can restart the service without loosing state. .. code-block:: bash $ docker run -t -p 20408:20408 -v $HOME/crossbar:/opt/crossbar \ - labgrid-coordinator + docker.io/labgrid/coordinator labgrid-client usage @@ -89,14 +89,14 @@ ws://192.168.1.42:20408/ws .. code-block:: bash - $ docker run -e LG_CROSSBAR=ws://192.168.1.42:20408/ws labgrid-client \ + $ docker run -e LG_CROSSBAR=ws://192.168.1.42:20408/ws docker.io/labgrid/client \ labgrid-client places Or running all pytest/labgrid tests at current directory: .. code-block:: bash - $ docker run -e LG_CROSSBAR=ws://192.168.1.42:20408/ws labgrid-client \ + $ docker run -e LG_CROSSBAR=ws://192.168.1.42:20408/ws docker.io/labgrid/client \ pytest @@ -115,7 +115,7 @@ Start it with something like: $ docker run -e LG_CROSSBAR=ws://192.168.1.42:20408/ws \ -v $HOME/exporter-conf:/opt/conf \ - labgrid-exporter + docker.io/labgrid/exporter If using ser2net or if "exporting" e.g. a serial device, the devices needed must be added to Docker container (``docker run --device`` option). @@ -135,14 +135,8 @@ create a setup with the following instances The environment serves both to allow checking if the environment still function after changes, and can act as an example how to configure the docker images needed to run a minimal setup. -To use the staging environment to conduct a smoke test first build the images as instructed below: - -.. code-block:: bash - - $ pip install --upgrade setuptools_scm - $ ./dockerfiles/build.sh - -Then use docker compose to start all services except the client: +To use the staging environment to conduct a smoke test, first run docker compose to start all services except the +client: .. code-block:: bash diff --git a/dockerfiles/build.sh b/dockerfiles/build.sh index 4ff05a0af..6758c9376 100755 --- a/dockerfiles/build.sh +++ b/dockerfiles/build.sh @@ -1,6 +1,8 @@ #!/bin/bash export DOCKER_BUILDKIT=1 +: "${IMAGE_PREFIX:=docker.io/labgrid/}" +: "${IMAGE_TAG:=latest}" die () { local msg @@ -59,7 +61,7 @@ perform_regular_build() { for t in client exporter coordinator; do "${docker_cmd}" build --build-arg VERSION="${version}" \ - --target labgrid-${t} -t labgrid-${t}:latest -f "${script_dir}/Dockerfile" \ + --target labgrid-${t} -t "${IMAGE_PREFIX}${t}:${IMAGE_TAG}" -f "${script_dir}/Dockerfile" \ "${extra_args[@]}" . done } @@ -73,7 +75,7 @@ perform_docker_buildx_build() { for t in client exporter coordinator; do "${docker_cmd}" buildx build --build-arg VERSION="${version}" \ - --target labgrid-${t} -t labgrid-${t}:latest -f "${script_dir}/Dockerfile" \ + --target labgrid-${t} -t "${IMAGE_PREFIX}${t}:${IMAGE_TAG}" -f "${script_dir}/Dockerfile" \ "${extra_args[@]}" . done } diff --git a/dockerfiles/staging/docker-compose.yml b/dockerfiles/staging/docker-compose.yml index a5c906944..7f0f9524e 100644 --- a/dockerfiles/staging/docker-compose.yml +++ b/dockerfiles/staging/docker-compose.yml @@ -1,7 +1,7 @@ version: '3.3' services: coordinator: - image: "labgrid-coordinator" + image: "${IMAGE_PREFIX:-docker.io/labgrid/}coordinator" volumes: - "./crossbar:/home/root/crossbar" tty: true @@ -9,7 +9,7 @@ services: command: bash -c "cp /home/root/crossbar/places_example.yaml /opt/crossbar/places.yaml && /opt/labgrid/crossbar-venv/bin/crossbar start --config /opt/labgrid/.crossbar/config-anonymous.yaml" client: - image: "labgrid-client" + image: "${IMAGE_PREFIX:-docker.io/labgrid/}client" volumes: - "./client/simple-test:/simple-test" - "./client/.ssh:/root/.ssh" @@ -33,7 +33,7 @@ services: - exporter - dut exporter: - image: "labgrid-exporter" + image: "${IMAGE_PREFIX:-docker.io/labgrid/}exporter" volumes: - "./exporter-conf:/opt/conf" - "/run/udev:/run/udev:ro" From cfe650a27d443a3f300b97a75faea3cebf7c1838 Mon Sep 17 00:00:00 2001 From: Peter Hoyes Date: Thu, 14 Dec 2023 00:49:06 +0000 Subject: [PATCH 180/384] github/workflows: Use buildx to build images for multiple platforms docker buildx is able to create multi-platform docker images in a single invocation (using emulation where required), but multi-platform images cannot be stored locally, only pushed directly to a regsitry. So refacor the workflow a little. Set common configuration as environment variables. Add actions to initialize QEMU and buildx. The remaining steps are modified to: * First build just the amd64 image and use this for validation using docker-compose (--load only supports single-platform images) * Build, tag and push the "latest" images for all platforms in a single invocation * For tags, override IMAGE_TAG and run the same command again. Built layers are cached so no redundant work is performed, despite running the same command multiple times. Signed-off-by: Peter Hoyes --- .github/workflows/docker.yml | 38 ++++++++++++++++++------------------ 1 file changed, 19 insertions(+), 19 deletions(-) diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index 89cf99bf8..89ce71b09 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -6,6 +6,11 @@ on: tags: - '*' +env: + QEMU_PLATFORMS: arm64 + IMAGE_PLATFORMS: linux/amd64,linux/arm64 + IMAGE_PREFIX: ${{ secrets.DOCKERHUB_PREFIX }} + jobs: docker: runs-on: ubuntu-latest @@ -16,32 +21,27 @@ jobs: sudo apt install -yq python3-pip pip install --upgrade setuptools pip install setuptools_scm + - name: Set up QEMU + uses: docker/setup-qemu-action@v3 + with: + platforms: ${QEMU_PLATFORMS} + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v3 - name: Login to DockerHub uses: docker/login-action@v3 with: username: ${{ secrets.DOCKERHUB_USERNAME }} password: ${{ secrets.DOCKERHUB_TOKEN }} - - name: Build docker image + - name: Build amd64 docker image and validate run: | - ./dockerfiles/build.sh + ./dockerfiles/build.sh --load docker-compose -f dockerfiles/staging/docker-compose.yml up --exit-code-from client client docker-compose -f dockerfiles/staging/docker-compose.yml down docker images - - name: Tag latest images - run: | - docker tag labgrid-client ${{ secrets.DOCKERHUB_PREFIX }}client - docker tag labgrid-exporter ${{ secrets.DOCKERHUB_PREFIX }}exporter - docker tag labgrid-coordinator ${{ secrets.DOCKERHUB_PREFIX }}coordinator - - name: Tag release image + - name: Build, tag and push latest image for all platforms + run: ./dockerfiles/build.sh --platform ${IMAGE_PLATFORMS} --push + - name: Tag and push release image for all platforms if: startsWith(github.ref, 'refs/tags') - run: | - docker tag labgrid-client ${{ secrets.DOCKERHUB_PREFIX }}client:${GITHUB_REF_NAME} - docker tag labgrid-exporter ${{ secrets.DOCKERHUB_PREFIX }}exporter:${GITHUB_REF_NAME} - docker tag labgrid-coordinator ${{ secrets.DOCKERHUB_PREFIX }}coordinator:${GITHUB_REF_NAME} - - name: Push to dockerhub - run: | - docker push --all-tags ${{ secrets.DOCKERHUB_PREFIX }}client - docker push --all-tags ${{ secrets.DOCKERHUB_PREFIX }}exporter - docker push --all-tags ${{ secrets.DOCKERHUB_PREFIX }}coordinator - - name: Show images again - run: docker images + env: + IMAGE_TAG: ${{ github.ref_name }} + run: ./dockerfiles/build.sh --platform ${IMAGE_PLATFORMS} --push From 58fdba64c0e11c4221bbf77204c5ffceda916723 Mon Sep 17 00:00:00 2001 From: Peter Hoyes Date: Thu, 14 Dec 2023 01:01:23 +0000 Subject: [PATCH 181/384] github/workflows: Build Docker images on release creation It is observed that there are no images on Docker hub for Labgrid release tags, despite listening to tag events. This may be because this event is not triggered for tags created via the Github release UI, so add this event too. Additionally allow the workflow to be triggered manually for testing purposes. Signed-off-by: Peter Hoyes --- .github/workflows/docker.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index 89ce71b09..bd111b4eb 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -5,6 +5,9 @@ on: branches: [ master ] tags: - '*' + release: + types: [ released ] + workflow_dispatch: env: QEMU_PLATFORMS: arm64 From 1dbd6996b75e326cf11714e6464cfb98172a04db Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Wed, 3 Apr 2024 14:37:08 +0200 Subject: [PATCH 182/384] driver/sshdriver: redirect /dev/null to stdin on run() By default, no redirection will occur. That means a command run on the target consumes the stdin of our process. That is especially unfortunate if we expect interactive input, such as for the ManualPowerDriver, ManualSwitchDriver or in a REPL: shell.run_check("sleep 100 &") pidfile = "/tmp/pidfile" shell.run_check(f"pgrep sleep > {pidfile}") try: ssh.run(f"pwait --pidfile {pidfile}", timeout=1) except: from IPython import embed embed() This example shows that not all input reaches the IPython REPL. Fix this by redirecting /dev/null to stdin, so the command run via SSH do not receive unexpected input and do not compete over it. Signed-off-by: Bastian Krause --- labgrid/driver/sshdriver.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/labgrid/driver/sshdriver.py b/labgrid/driver/sshdriver.py index 055a7e419..b9efe7c06 100644 --- a/labgrid/driver/sshdriver.py +++ b/labgrid/driver/sshdriver.py @@ -214,7 +214,7 @@ def _run(self, cmd, codec="utf-8", decodeerrors="strict", timeout=None): stderr_pipe = subprocess.PIPE try: sub = subprocess.Popen( - complete_cmd, stdout=subprocess.PIPE, stderr=stderr_pipe + complete_cmd, stdin=subprocess.DEVNULL, stdout=subprocess.PIPE, stderr=stderr_pipe ) except: raise ExecutionError( From 5008e8a08a6060b8b837a90a5f8f279b7ff38904 Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Thu, 4 Apr 2024 13:36:04 +0200 Subject: [PATCH 183/384] examples/network-test: simplify ethernet frame generation There is no need to put the frame as a single item into a list. Signed-off-by: Bastian Krause --- examples/network-test/pkg-replay-record.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/examples/network-test/pkg-replay-record.py b/examples/network-test/pkg-replay-record.py index 65cb51702..124991ca0 100755 --- a/examples/network-test/pkg-replay-record.py +++ b/examples/network-test/pkg-replay-record.py @@ -12,9 +12,9 @@ from labgrid.logging import basicConfig, StepLogger def generate_frame(): - frame = [Ether(dst="11:22:33:44:55:66", src="66:55:44:33:22:11", type=0x9000)] + frame = Ether(dst="11:22:33:44:55:66", src="66:55:44:33:22:11", type=0x9000) padding = "\x00" * (conf.min_pkt_size - len(frame)) - frame = frame[0] / Raw(load=padding) + frame /= Raw(load=padding) return frame From fbca8a07aa9e9a90c781d3c2285d65236769a578 Mon Sep 17 00:00:00 2001 From: Jan Luebbe Date: Tue, 9 Apr 2024 16:36:10 +0200 Subject: [PATCH 184/384] driver/usbstoragedriver: use udisk2 only when calling write_file Otherwise, udisks2 and gi are needed for all installations, even if write_files is never used. Fixes: #1219 Signed-off-by: Jan Luebbe --- labgrid/driver/usbstoragedriver.py | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/labgrid/driver/usbstoragedriver.py b/labgrid/driver/usbstoragedriver.py index 3207b787f..414528429 100644 --- a/labgrid/driver/usbstoragedriver.py +++ b/labgrid/driver/usbstoragedriver.py @@ -49,14 +49,21 @@ class USBStorageDriver(Driver): def __attrs_post_init__(self): super().__attrs_post_init__() self.wrapper = None + self.proxy = None - def on_activate(self): + def _start_wrapper(self): + if self.wrapper: + return host = self.storage.host if isinstance(self.storage, RemoteUSBResource) else None self.wrapper = AgentWrapper(host) self.proxy = self.wrapper.load('udisks2') + def on_activate(self): + pass + def on_deactivate(self): - self.wrapper.close() + if self.wrapper: + self.wrapper.close() self.wrapper = None self.proxy = None @@ -75,6 +82,8 @@ def write_files(self, sources, target, partition, target_is_directory=True): target_is_directory (bool): Whether target is a directory """ + self._start_wrapper() + self.devpath = self._get_devpath(partition) mount_path = self.proxy.mount(self.devpath) From 15b1e16596fc27900e20961011107f1b12ac8745 Mon Sep 17 00:00:00 2001 From: Simon Glass Date: Wed, 10 Apr 2024 11:08:12 -0600 Subject: [PATCH 185/384] contrib: labgrid-webapp: Show how to get the graph Add a line indicating how to actually see the graph. Signed-off-by: Simon Glass --- contrib/README.rst | 2 ++ 1 file changed, 2 insertions(+) diff --git a/contrib/README.rst b/contrib/README.rst index 30e77c078..f9898f512 100644 --- a/contrib/README.rst +++ b/contrib/README.rst @@ -38,4 +38,6 @@ installation. By default the application will start on port 8800. +To see the graph, go to http://0.0.0.0:8800/labgrid/graph + See http://0.0.0.0:8800/docs for more information on available endpoints. From d2bd508547be3d7b231e58301ba4537eb3040db9 Mon Sep 17 00:00:00 2001 From: Simon Glass Date: Wed, 10 Apr 2024 11:13:55 -0600 Subject: [PATCH 186/384] contrib: labgrid-webapp: Fix minor docs issues Add a link to graphviz and drop the unwanted --help from the invocation line. Signed-off-by: Simon Glass --- contrib/README.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/contrib/README.rst b/contrib/README.rst index f9898f512..5e537d850 100644 --- a/contrib/README.rst +++ b/contrib/README.rst @@ -24,7 +24,7 @@ Quick Start --port PORT Port to serve on --proxy PROXY, -P PROXY - venv $ ./contrib/labgrid-webapp --help + venv $ ./contrib/labgrid-webapp INFO: Available routes: INFO: - /labgrid/graph INFO: Started server process [2378028] @@ -34,7 +34,7 @@ Quick Start ... Please note that the graph feature relies on a valid `graphviz` system -installation. +installation. See http://graphviz.org/download/ By default the application will start on port 8800. From 56e3cfab5bc0d8012c43f9d561fef793016e1dd9 Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Mon, 15 Apr 2024 13:14:08 +0200 Subject: [PATCH 187/384] man/labgrid-client: fix set-tag argument Signed-off-by: Bastian Krause --- man/labgrid-client.1 | 2 +- man/labgrid-client.rst | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/man/labgrid-client.1 b/man/labgrid-client.1 index 07e94ac58..d78c70e2b 100644 --- a/man/labgrid-client.1 +++ b/man/labgrid-client.1 @@ -157,7 +157,7 @@ matches anything. .sp \fBset\-comment\fP comment Update or set the place comment .sp -\fBset\-tags\fP comment Set place tags (key=value) +\fBset\-tags\fP key=value Set place tags (key=value) .sp \fBadd\-match\fP match Add one (or multiple) match pattern(s) to a place, see MATCHES .sp diff --git a/man/labgrid-client.rst b/man/labgrid-client.rst index 3a8c5eaf5..0c05e0f65 100644 --- a/man/labgrid-client.rst +++ b/man/labgrid-client.rst @@ -152,7 +152,7 @@ LABGRID-CLIENT COMMANDS ``set-comment`` comment Update or set the place comment -``set-tags`` comment Set place tags (key=value) +``set-tags`` key=value Set place tags (key=value) ``add-match`` match Add one (or multiple) match pattern(s) to a place, see MATCHES From a2dedd0846fba20e6510965e5ef77d9411919621 Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Mon, 15 Apr 2024 13:14:25 +0200 Subject: [PATCH 188/384] man/labgrid-client: add missing positional arguments Signed-off-by: Bastian Krause --- man/labgrid-client.1 | 14 +++++++------- man/labgrid-client.rst | 14 +++++++------- 2 files changed, 14 insertions(+), 14 deletions(-) diff --git a/man/labgrid-client.1 b/man/labgrid-client.1 index d78c70e2b..c5772ae0b 100644 --- a/man/labgrid-client.1 +++ b/man/labgrid-client.1 @@ -185,9 +185,9 @@ not at all. .sp \fBpower (pw)\fP action Change (or get) a place\(aqs power status, where action is one of get, on, off, cycle .sp -\fBio\fP action Interact with GPIO (OneWire, relays, ...) devices, where action is one of high, low, get +\fBio\fP action [name] Interact with GPIO (OneWire, relays, ...) devices, where action is one of high, low, get .sp -\fBconsole (con)\fP Connect to the console +\fBconsole (con)\fP [name] Connect to the console .sp \fBdfu\fP arg Run dfu commands .sp @@ -201,13 +201,13 @@ not at all. .sp \fBusb\-mux\fP action Switch USB Muxer, where action is one of off, dut\-device, host\-dut, host\-device, host\-dut+host\-device .sp -\fBssh\fP Connect via SSH +\fBssh\fP [command] Connect via SSH. Additional arguments are passed to ssh. .sp -\fBscp\fP Transfer file via scp (use \(aq:dir/file\(aq for the remote side) +\fBscp\fP source destination Transfer file via scp (use \(aq:dir/file\(aq for the remote side) .sp -\fBrsync\fP Transfer files via rsync (use \(aq:dir/file\(aq for the remote side) +\fBrsync\fP source destination Transfer files via rsync (use \(aq:dir/file\(aq for the remote side) .sp -\fBsshfs\fP Mount a remote path via sshfs +\fBsshfs\fP remotepath mountpoint Mount a remote path via sshfs .sp \fBforward\fP Forward local port to remote target .sp @@ -221,7 +221,7 @@ not at all. .sp \fBwrite\-files\fP filename(s) Copy files onto mass storage device .sp -\fBwrite\-image\fP Write images onto block devices (USBSDMux, USB Sticks, …) +\fBwrite\-image\fP filename Write images onto block devices (USBSDMux, USB Sticks, …) .sp \fBreserve\fP filter Create a reservation .sp diff --git a/man/labgrid-client.rst b/man/labgrid-client.rst index 0c05e0f65..27259bfed 100644 --- a/man/labgrid-client.rst +++ b/man/labgrid-client.rst @@ -177,9 +177,9 @@ LABGRID-CLIENT COMMANDS ``power (pw)`` action Change (or get) a place's power status, where action is one of get, on, off, cycle -``io`` action Interact with GPIO (OneWire, relays, ...) devices, where action is one of high, low, get +``io`` action [name] Interact with GPIO (OneWire, relays, ...) devices, where action is one of high, low, get -``console (con)`` Connect to the console +``console (con)`` [name] Connect to the console ``dfu`` arg Run dfu commands @@ -193,13 +193,13 @@ LABGRID-CLIENT COMMANDS ``usb-mux`` action Switch USB Muxer, where action is one of off, dut-device, host-dut, host-device, host-dut+host-device -``ssh`` Connect via SSH +``ssh`` [command] Connect via SSH. Additional arguments are passed to ssh. -``scp`` Transfer file via scp (use ':dir/file' for the remote side) +``scp`` source destination Transfer file via scp (use ':dir/file' for the remote side) -``rsync`` Transfer files via rsync (use ':dir/file' for the remote side) +``rsync`` source destination Transfer files via rsync (use ':dir/file' for the remote side) -``sshfs`` Mount a remote path via sshfs +``sshfs`` remotepath mountpoint Mount a remote path via sshfs ``forward`` Forward local port to remote target @@ -213,7 +213,7 @@ LABGRID-CLIENT COMMANDS ``write-files`` filename(s) Copy files onto mass storage device -``write-image`` Write images onto block devices (USBSDMux, USB Sticks, …) +``write-image`` filename Write images onto block devices (USBSDMux, USB Sticks, …) ``reserve`` filter Create a reservation From a1e693ed2571ed45d5f9e02e765e419d4869e5fb Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Tue, 16 Apr 2024 14:15:19 +0200 Subject: [PATCH 189/384] dockerfiles: replace wait-for-it dependency with simple bash loop Instead of relying on an external repository that basically does the same job, use a simple loop opening a socket via bash's pseudo-device files below /dev/tcp instead. Signed-off-by: Bastian Krause --- dockerfiles/Dockerfile | 3 +-- dockerfiles/staging/docker-compose.yml | 6 +++--- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/dockerfiles/Dockerfile b/dockerfiles/Dockerfile index 695453840..1b749fe4f 100644 --- a/dockerfiles/Dockerfile +++ b/dockerfiles/Dockerfile @@ -11,8 +11,7 @@ RUN set -e ;\ apt install -q=2 --yes --no-install-recommends python3 python3-dev python3-pip python3-setuptools git build-essential libsnappy-dev ;\ pip3 install --break-system-packages -U pip;\ apt clean ;\ - rm -rf /var/lib/apt/lists/* ;\ - git clone https://github.com/vishnubob/wait-for-it.git opt/wait-for-it && cd opt/wait-for-it && git reset --hard 54d1f0bfeb6557adf8a3204455389d0901652242 + rm -rf /var/lib/apt/lists/* # # Client diff --git a/dockerfiles/staging/docker-compose.yml b/dockerfiles/staging/docker-compose.yml index a5c906944..fed1b59d8 100644 --- a/dockerfiles/staging/docker-compose.yml +++ b/dockerfiles/staging/docker-compose.yml @@ -17,12 +17,12 @@ services: stdin_open: true network_mode: "host" tmpfs: "/tmp" - # Use wait-for-it to ensure exporter service is up, as exporter is assuming exporter to + # Wait until coordinator is up # Use labgrid-client r to ensure the exporter has populated the resource list in the coordinator # Use sleep to fix the problem that sometimes the coordinator is not ready even though the service is up command: timeout 60 bash -c "set -e && cd /simple-test && - /opt/wait-for-it/wait-for-it.sh 127.0.0.1:20408 && + until echo > /dev/tcp/localhost/20408; do sleep 1; done && sleep 5 && while [ -z $$(/usr/local/bin/labgrid-client r) ]; do echo 'Wait one sec on coordinator' && sleep 1; done && /usr/local/bin/labgrid-client -p example-place lock && @@ -43,7 +43,7 @@ services: network_mode: "host" stdin_open: true command: bash -c "set -e && - /opt/wait-for-it/wait-for-it.sh 127.0.0.1:20408 -- labgrid-exporter /opt/conf/exporter.yaml" + until echo > /dev/tcp/localhost/20408; do sleep 1; done && labgrid-exporter /opt/conf/exporter.yaml" dut: build: context: "./dut" From f2c25686e028796f65119d280ec24703ff48bf4a Mon Sep 17 00:00:00 2001 From: Simon Glass Date: Sat, 20 Apr 2024 15:29:34 -0600 Subject: [PATCH 190/384] doc: Install [dev] so that docs can be built The 'make html' command fails with the [doc] build. Update the documentation to show how to resolve this. Fixes: #1352 Signed-off-by: Simon Glass --- doc/development.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/development.rst b/doc/development.rst index f6f069ccf..1b099a825 100644 --- a/doc/development.rst +++ b/doc/development.rst @@ -679,7 +679,7 @@ When contributing to documentation it's practical to be able to build it also lo git clone https://github.com/labgrid-project/labgrid.git cd labgrid - pip install -e ".[doc]" + pip install -e ".[dev]" cd doc make html From 92882138d33bea91868b3c7935500e257ca006c4 Mon Sep 17 00:00:00 2001 From: Ahmad Fatoum Date: Wed, 24 Apr 2024 11:28:47 +0200 Subject: [PATCH 191/384] doc/configuration: UUUDriver: align example with reality Besides the image name, the UUUDriver supports a script attribute. This is described in the Arguments: section, but not in the example, which uses the non-existent cmd attribute instead. Fix that and while at it, correctly indicate that script is optional. Omitting it will just remove the -b $script from the uuu invocation. Signed-off-by: Ahmad Fatoum --- doc/configuration.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/configuration.rst b/doc/configuration.rst index cb4d8dd82..a4c83b040 100644 --- a/doc/configuration.rst +++ b/doc/configuration.rst @@ -2493,7 +2493,7 @@ Implements: drivers: UUUDriver: image: 'mybootloaderkey' - cmd: 'spl' + script: 'spl' images: mybootloaderkey: 'path/to/mybootloader.img' @@ -2501,7 +2501,7 @@ Implements: Arguments: - image (str): optional, key in :ref:`images ` containing the path of an image to bootstrap onto the target - - script (str): run built-in script with ``uuu -b``, called with image as arg0 + - script (str): optional, run built-in script with ``uuu -b``, called with image as arg0 USBStorageDriver ~~~~~~~~~~~~~~~~ From 6ab51630bec04b39a71e655155aba1e3b9c05779 Mon Sep 17 00:00:00 2001 From: Simon Glass Date: Thu, 11 Apr 2024 09:35:23 -0600 Subject: [PATCH 192/384] udisks2: Be more tolerant of device-startup time When switching an SDWire to 'host' mode, it may take time for the partition to appear. During that time the device (e.g. '/dev/sdo1') may not exist, so trying to cat e.g. '/sys/call/block/sdo1' fails. Handle this by returning a zero size in that case, as is done when the returned size is empty. Even when the block device is present, udisks2 may take a short time to make the device available. Handle this by retrying a few times, until things settle. Add the call to _wait_for_medium() in write_files() while we are here, to match what is done in write_image(). Also fix the parameter type for write_files() and mention the exception it may raise. Fixes: #1357 Signed-off-by: Simon Glass --- labgrid/driver/usbstoragedriver.py | 17 ++++++++--- labgrid/util/agents/udisks2.py | 49 ++++++++++++++++++++++++------ 2 files changed, 52 insertions(+), 14 deletions(-) diff --git a/labgrid/driver/usbstoragedriver.py b/labgrid/driver/usbstoragedriver.py index 414528429..5ceec293d 100644 --- a/labgrid/driver/usbstoragedriver.py +++ b/labgrid/driver/usbstoragedriver.py @@ -45,6 +45,7 @@ class USBStorageDriver(Driver): ) WAIT_FOR_MEDIUM_TIMEOUT = 10.0 # s WAIT_FOR_MEDIUM_SLEEP = 0.5 # s + MOUNT_RETRIES = 5 def __attrs_post_init__(self): super().__attrs_post_init__() @@ -77,15 +78,19 @@ def write_files(self, sources, target, partition, target_is_directory=True): Args: sources (List[str]): path(s) to the file(s) to be copied to the bound USB storage partition. - target (str): target directory or file to copy to + target (PurePath): target directory or file to copy to partition (int): mount the specified partition or None to mount the whole disk target_is_directory (bool): Whether target is a directory + + Raises: + Exception if anything goes wrong """ + self._wait_for_medium(partition) self._start_wrapper() self.devpath = self._get_devpath(partition) - mount_path = self.proxy.mount(self.devpath) + mount_path = self.proxy.mount(self.devpath, self.MOUNT_RETRIES) try: # (pathlib.PurePath(...) / "/") == "/", so we turn absolute paths into relative @@ -222,10 +227,14 @@ def get_size(self, partition=None): getting the size of the root device (defaults to None) Returns: - int: size in bytes + int: size in bytes, or 0 if the partition is not found """ args = ["cat", f"/sys/class/block/{self._get_devpath(partition)[5:]}/size"] - size = subprocess.check_output(self.storage.command_prefix + args) + try: + size = subprocess.check_output(self.storage.command_prefix + args) + except subprocess.CalledProcessError: + # while the medium is getting ready, the file does not yet exist + return 0 try: return int(size) * 512 except ValueError: diff --git a/labgrid/util/agents/udisks2.py b/labgrid/util/agents/udisks2.py index 6def4da7c..f5c9832cf 100644 --- a/labgrid/util/agents/udisks2.py +++ b/labgrid/util/agents/udisks2.py @@ -15,8 +15,15 @@ class UDisks2Device: def __init__(self, devpath): self._logger = logging.getLogger("Device: ") self.devpath = devpath - client = UDisks.Client.new_sync(None) + self.fs = None + + def _setup(self): + """Try to find the devpath + Raises: + ValueError: no udisks2 device or no filesystem found on devpath + """ + client = UDisks.Client.new_sync(None) manager = client.get_object_manager() for obj in manager.get_objects(): block = obj.get_block() @@ -24,16 +31,16 @@ def __init__(self, devpath): continue device_path = block.get_cached_property("Device").get_bytestring().decode('utf-8') - if device_path == devpath: + if device_path == self.devpath: self.fs = obj.get_filesystem() if self.fs is None: - raise ValueError(f"no filesystem found on {devpath}") + raise ValueError(f"no filesystem found on {self.devpath}") return - raise ValueError(f"No udisks2 device found for {devpath}") + raise ValueError(f"No udisks2 device found for {self.devpath}") - def mount(self, readonly=False): + def mount(self, readonly=False, retries=0): opts = GLib.Variant('a{sv}', {'options': GLib.Variant('s', 'ro' if readonly else 'rw')}) try: @@ -83,17 +90,39 @@ def unmount(self, lazy=False): _devs = {} -def _get_udisks2_dev(devpath): +def _get_udisks2_dev(devpath, retries): + """Try to get the udisks2 device + + Args: + devpath (str): Device name + retries (int): Number of retries to allow + + Raises: + ValueError: Failed to obtain the device (e.g. does not exist) + """ if devpath not in _devs: - _devs[devpath] = UDisks2Device(devpath=devpath) + dev = UDisks2Device(devpath=devpath) + while True: + try: + dev._setup() + break + except ValueError as exc: + if 'No udisks2 device' not in str(exc) or not retries: + raise + retries -= 1 + dev._logger.warning('udisks2: Retrying %s...', devpath) + time.sleep(1) + + # Success, so record the new device + _devs[devpath] = dev return _devs[devpath] -def handle_mount(devpath): - dev = _get_udisks2_dev(devpath) +def handle_mount(devpath, retries=0): + dev = _get_udisks2_dev(devpath, retries) return dev.mount() def handle_unmount(devpath, lazy=False): - dev = _get_udisks2_dev(devpath) + dev = _get_udisks2_dev(devpath, 0) return dev.unmount(lazy=lazy) methods = { From 6d0b571d687eb889f6d1f5a1500a0ce928422c28 Mon Sep 17 00:00:00 2001 From: Paul Spooren Date: Fri, 26 Apr 2024 07:31:29 +0200 Subject: [PATCH 193/384] shelldriver: allow dashes and dots in device names The `get_ip_addresses` may be called with a specific device and returns alls IP addresses on that interface. Due to a limitation in the regex, devices including either dashes or dots aren't detected. On the OpenWrt distributions the default device is called `br-lan`, usually connecting ethernet ports and WiFi devices. Likewise dots are attached to the device name to specify VLANs. This commit extends the regex to support both dashes and dots. Signed-off-by: Paul Spooren --- labgrid/driver/shelldriver.py | 2 +- tests/test_shelldriver.py | 17 +++++++++++++++++ 2 files changed, 18 insertions(+), 1 deletion(-) diff --git a/labgrid/driver/shelldriver.py b/labgrid/driver/shelldriver.py index c8b748d5e..c9766387e 100644 --- a/labgrid/driver/shelldriver.py +++ b/labgrid/driver/shelldriver.py @@ -596,7 +596,7 @@ def get_ip_addresses(self, device=None): device = self.get_default_interface_device_name() regex = r"""\d+: # leading number - \s+\w+ # interface name + \s+[\w\.-]+ # interface name \s+inet6?\s+(\S+) # IP address, prefix .*global # global scope, not host scope""" diff --git a/tests/test_shelldriver.py b/tests/test_shelldriver.py index 0f24928ad..f0c4eecc3 100644 --- a/tests/test_shelldriver.py +++ b/tests/test_shelldriver.py @@ -3,6 +3,8 @@ from labgrid.driver import ShellDriver, ExecutionError from labgrid.exceptions import NoDriverFoundError +from ipaddress import IPv4Interface + class TestShellDriver: def test_instance(self, target, serial_driver): @@ -45,3 +47,18 @@ def test_run_with_timeout(self, target_with_fakeconsole, mocker): assert res == ['success'] res = d.run("test") assert res == (['success'], [], 0) + + def test_get_ip_addresses(self, target_with_fakeconsole, mocker): + fake_ip_addr_show = r""" +18: br-lan.42 inet 192.168.42.1/24 brd 192.168.42.255 scope global br-lan.42\ valid_lft forever preferred_lft forever +18: br-lan.42 inet6 fe80::9683:c4ff:fea6:fb6b/64 scope link \ valid_lft forever preferred_lft forever +""" + + t = target_with_fakeconsole + d = ShellDriver(t, "shell", prompt='dummy', login_prompt='dummy', username='dummy') + d.on_activate = mocker.MagicMock() + d = t.get_driver('ShellDriver') + d._run = mocker.MagicMock(return_value=([fake_ip_addr_show], [], 0)) + + res = d.get_ip_addresses("br-lan.42") + assert res[0] == IPv4Interface("192.168.42.1/24") From f7a1d38e89bead91cb92fb3c101353255070556b Mon Sep 17 00:00:00 2001 From: Trevor Gamblin Date: Mon, 6 May 2024 09:53:12 -0400 Subject: [PATCH 194/384] exporter: check ser2net version in exporter The following ser2net versions return nonzero even on a successful use of the -v flag to get the version: v4.4.0 v4.5.0 v4.5.1 v4.6.0 v4.6.1 This was fixed in v4.6.2, which was only recently released. Modify _start() so that it uses subprocess.run() to capture stdout and the returncode, then check against the version and effectively ignore the return value if there's a match. If the ser2net version is one of the unaffected ones (e.g. v4.6.2) and still returns nonzero, then raise an ExporterError. Signed-off-by: Trevor Gamblin --- labgrid/remote/exporter.py | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/labgrid/remote/exporter.py b/labgrid/remote/exporter.py index eb7a27ed3..577d3c926 100755 --- a/labgrid/remote/exporter.py +++ b/labgrid/remote/exporter.py @@ -225,8 +225,17 @@ def _start(self, start_params): self.port = get_free_port() # Ser2net has switched to using YAML format at version 4.0.0. - _, _, version = str(subprocess.check_output([self.ser2net_bin,'-v'])).split(' ') + result = subprocess.run([self.ser2net_bin,'-v'], capture_output=True, text=True) + _, _, version = str(result.stdout).split(' ') major_version = version.split('.')[0] + + # There is a bug in ser2net between 4.4.0 and 4.6.1 where it + # returns 1 on a successful call to 'ser2net -v'. We don't want + # a failure because of this, so raise an error only if ser2net + # is not one of those versions. + if version.strip() not in ["4.4.0", "4.5.0", "4.5.1", "4.6.0", "4.6.1"] and result.returncode == 1: + raise ExporterError(f"ser2net {version} returned a nonzero code during version check.") + if int(major_version) >= 4: cmd = [ self.ser2net_bin, From ea47e51cfd9720adb67b12d108350005632144e5 Mon Sep 17 00:00:00 2001 From: Maciej Grela Date: Mon, 13 May 2024 15:00:55 +0200 Subject: [PATCH 195/384] resource: udev: new USB ID for USBDebugger Add the USB IDs for an FT232HL chip in the Digilent JTAG-SMT2 surface-mount programming module used as a JTAG programmer on FPGA boards. Signed-off-by: Maciej Grela --- labgrid/resource/udev.py | 1 + 1 file changed, 1 insertion(+) diff --git a/labgrid/resource/udev.py b/labgrid/resource/udev.py index fec175505..d3903eb95 100644 --- a/labgrid/resource/udev.py +++ b/labgrid/resource/udev.py @@ -704,6 +704,7 @@ def filter_match(self, device): match = (device.properties.get('ID_VENDOR_ID'), device.properties.get('ID_MODEL_ID')) if match not in [("0403", "6010"), # FT2232C/D/H Dual UART/FIFO IC + ("0403", "6014"), # FT232HL/Q ("0483", "374b"), # STLINK-V3 ("0483", "374f"), # STLINK-V3 ("15ba", "0003"), # Olimex ARM-USB-OCD From 95f812761f7491a99e51676becb446c69b1b0d55 Mon Sep 17 00:00:00 2001 From: Maciej Grela Date: Mon, 13 May 2024 17:23:57 +0200 Subject: [PATCH 196/384] labgrid/driver/power: Backend for tinycontrol.eu IP Power Socket 6G10A v2 Signed-off-by: Maciej Grela --- doc/configuration.rst | 5 +++++ labgrid/driver/power/tinycontrol.py | 34 +++++++++++++++++++++++++++++ 2 files changed, 39 insertions(+) create mode 100644 labgrid/driver/power/tinycontrol.py diff --git a/doc/configuration.rst b/doc/configuration.rst index a4c83b040..048c9ecbe 100644 --- a/doc/configuration.rst +++ b/doc/configuration.rst @@ -231,6 +231,11 @@ Currently available are: Controls *TP-Link power strips* via `python-kasa `_. +``tinycontrol`` + Controls a tinycontrol.eu IP Power Socket via HTTP. + It was tested on the *6G10A v2* model. + `Manual `__ + ``poe_mib`` Controls PoE switches using the PoE SNMP administration MiBs. diff --git a/labgrid/driver/power/tinycontrol.py b/labgrid/driver/power/tinycontrol.py new file mode 100644 index 000000000..fad5e9224 --- /dev/null +++ b/labgrid/driver/power/tinycontrol.py @@ -0,0 +1,34 @@ +""" A driver to control the Tinycontrol IP Power Socket 6G10A v2 + Reference: https://tinycontrol.pl/media/documents/manual_IP_Power_Socket__6G10A_v2_LANLIS-010-015_En-1.pdf + + Example configuration to use port #3 on a device with URL 'http://172.17.180.53:9999/' + + NetworkPowerPort: + model: tinycontrol + host: 'http://172.17.180.53:9999/' + index: 3 +""" + +import requests +from urllib.parse import urljoin +import xml.etree.ElementTree as ET + + +def power_set(host, port, index, value): + assert port is None + + index = int(index) + value = 1 if value else 0 + r = requests.get(urljoin(host, f"/outs.cgi?out{index}={value}")) + r.raise_for_status() + + +def power_get(host, port, index): + assert port is None + + index = int(index) + r = requests.get(urljoin(host, "/st0.xml")) + r.raise_for_status() + root = ET.fromstring(r.text) + output = root.find(f"out{index}") + return output.text == '1' From 49aa0eb00067d387efbc4ba243eb006c58b3dcc9 Mon Sep 17 00:00:00 2001 From: Paul Spooren Date: Thu, 13 Jun 2024 13:52:34 +0200 Subject: [PATCH 197/384] treewide: fixes to make pylint happier Right now the CI fails and that makes me and my PRs unhappy. Signed-off-by: Paul Spooren --- labgrid/driver/mqtt.py | 2 ++ labgrid/remote/client.py | 6 ++++++ labgrid/util/yaml.py | 3 +++ 3 files changed, 11 insertions(+) diff --git a/labgrid/driver/mqtt.py b/labgrid/driver/mqtt.py index 3722ad111..a451ac005 100644 --- a/labgrid/driver/mqtt.py +++ b/labgrid/driver/mqtt.py @@ -43,6 +43,8 @@ def _on_message(self, client, userdata, msg): status = True elif msg.payload == b'OFF': status = False + else: + raise ValueError(f"Unknown status: {msg.payload}. Must be 'ON' or 'OFF'") self._status = status def _on_connect(self, client, userdata, flags, reason_code, properties): diff --git a/labgrid/remote/client.py b/labgrid/remote/client.py index ddcd66637..6e2c92d5a 100755 --- a/labgrid/remote/client.py +++ b/labgrid/remote/client.py @@ -1212,6 +1212,9 @@ def tmc_channel(self): data = drv.get_channel_info(channel) elif action == 'values': data = drv.get_channel_values(channel) + else: + raise ValueError(f"unknown action {action}") + for k, v in sorted(data.items()): print(f"{k:<16s} {str(v):<10s}") @@ -1323,6 +1326,9 @@ async def export(self, place, target): data = "\n".join(lines) + "\n" elif self.args.format is ExportFormat.JSON: data = json.dumps(exported) + else: + raise NotImplementedError(f"unsupported format {self.args.format}") + if self.args.filename == "-": sys.stdout.write(data) else: diff --git a/labgrid/util/yaml.py b/labgrid/util/yaml.py index 64a522a0c..1c70b6999 100644 --- a/labgrid/util/yaml.py +++ b/labgrid/util/yaml.py @@ -84,6 +84,9 @@ def resolve_templates(data, mapping): items = enumerate(data) elif isinstance(data, dict): items = data.items() + else: + raise TypeError(f"Expected list or dict, got {type(data)}") + for k, val in items: if isinstance(val, Template): try: From bc76201af37f7ad51da2144b65b1d37cda5f8b8d Mon Sep 17 00:00:00 2001 From: Rouven Czerwinski Date: Mon, 17 Jun 2024 10:59:07 +0200 Subject: [PATCH 198/384] util/agent: disable pylint warnings The pylint code warns because of a possibly-used-before-assignment, which is only valid in case python gobject introspectiuon is not used. The reason the variable is hidden behind a type check is a hang related to sphinx, which when trying to import the module for autodoc hangs forever. The normal case is that the `nm` variable is always assigned before use, so disable the related warnings. Signed-off-by: Rouven Czerwinski --- labgrid/util/agents/network_interface.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/labgrid/util/agents/network_interface.py b/labgrid/util/agents/network_interface.py index c48da4eb2..c480386b3 100644 --- a/labgrid/util/agents/network_interface.py +++ b/labgrid/util/agents/network_interface.py @@ -95,7 +95,7 @@ def connection_from_dict(data): class NMDev: def __init__(self, interface): self._interface = interface - self._nm_dev = nm.get_device_by_iface(interface) + self._nm_dev = nm.get_device_by_iface(interface) # pylint: disable=possibly-used-before-assignment def _delete_connection(self, con): future = Future() @@ -114,7 +114,7 @@ def cb(con, res, error): ) def get_settings(self): - lg_con = nm.get_connection_by_id(f"labgrid-{self._interface}") + lg_con = nm.get_connection_by_id(f"labgrid-{self._interface}") # pylint: disable=possibly-used-before-assignment if lg_con: return dict(lg_con.to_dbus(NM.ConnectionSerializationFlags.ALL)) @@ -125,7 +125,7 @@ def get_active_settings(self): return dict(con.to_dbus(NM.ConnectionSerializationFlags.ALL)) def configure(self, data): - lg_con = nm.get_connection_by_id(f"labgrid-{self._interface}") + lg_con = nm.get_connection_by_id(f"labgrid-{self._interface}") # pylint: disable=possibly-used-before-assignment if lg_con: self._delete_connection(lg_con) data['connection'].update({ @@ -139,12 +139,12 @@ def configure(self, data): def cb(dev, res, error): assert error is None try: - res = nm.add_and_activate_connection_finish(res) + res = nm.add_and_activate_connection_finish(res) # pylint: disable=possibly-used-before-assignment future.set(res) except Exception as e: future.set(e) - nm.add_and_activate_connection_async( + nm.add_and_activate_connection_async( # pylint: disable=possibly-used-before-assignment con, self._nm_dev, None, # specific_object @@ -172,7 +172,7 @@ def wait_state(self, expected, timeout): ) def disable(self): - lg_con = nm.get_connection_by_id(f"labgrid-{self._interface}") + lg_con = nm.get_connection_by_id(f"labgrid-{self._interface}") # pylint: disable=possibly-used-before-assignment if lg_con: self._delete_connection(lg_con) From c56347ec71427b0ef9bee42a23591f3eb1cb940b Mon Sep 17 00:00:00 2001 From: Paul Spooren Date: Thu, 13 Jun 2024 13:39:06 +0200 Subject: [PATCH 199/384] util: use short `ln` options for Busybox compat If the host systems run Busybox, long options may be disabled. Switch to the short options but comment them for readability. Remove the pylint comment as a bonus since the line is no longer to long! Signed-off-by: Paul Spooren --- labgrid/util/managedfile.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/labgrid/util/managedfile.py b/labgrid/util/managedfile.py index 07bc46f4a..c3b19ed27 100644 --- a/labgrid/util/managedfile.py +++ b/labgrid/util/managedfile.py @@ -74,9 +74,9 @@ def sync_to_resource(self, symlink=None): conn.run_check(f"test ! -e {symlink} -o -L {symlink}") except ExecutionError: raise ManagedFileError(f"Path {symlink} exists but is not a symlink.") - conn.run_check( - f"ln --symbolic --force --no-dereference {self.rpath}{os.path.basename(self.local_path)} {symlink}" # pylint: disable=line-too-long - ) + # use short options to be compatible with busybox + # --symbolic --force --no-dereference + conn.run_check(f"ln -sfn {self.rpath}{os.path.basename(self.local_path)} {symlink}") def _on_nfs(self, conn): From a6f63a9402b63efe4441cd3de9d06bf8af7d1237 Mon Sep 17 00:00:00 2001 From: Rouven Czerwinski Date: Fri, 3 May 2024 10:12:30 +0200 Subject: [PATCH 200/384] doc/development: document await_resources Document how await_resources inside a strategy can be used to wait for the availability of a resource. Signed-off-by: Rouven Czerwinski --- doc/development.rst | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/doc/development.rst b/doc/development.rst index 1b099a825..8b832f757 100644 --- a/doc/development.rst +++ b/doc/development.rst @@ -304,6 +304,12 @@ while the `shell` state uses the barebox state to cycle the board and then boot the linux kernel. The `off` state switches the power off. +Oftentimes it is also necessary to wait for specific resources to appear before +a transition can be continued. The `await_resources` function of the target +implements this functionality, it expects a list of resources to wait for and +optionally takes a timeout and whether the resource should be available or +unavailable. + Tips for Writing and Debugging Tests ------------------------------------ From aae45b67e5cbece451149c6c445726cff360b546 Mon Sep 17 00:00:00 2001 From: Paul Spooren Date: Tue, 21 May 2024 17:08:50 +0300 Subject: [PATCH 201/384] power: add ubus NetworkPowerPort Using PoE switches as power supply is convenient since a switch is needed in many cases anyway and with the right adapter most embedded devices can be powered. OpenWrt offers `ubus` as micro bus system to control the system, including PoE ports on switches. This commit adds a driver to handle PoE switches running OpenWrt. An ACL example is given below: ```shell root@switch:~# cat /usr/share/rpcd/acl.d/unauthenticated.json { "unauthenticated": { "description": "Access controls for unauthenticated requests", "read": { "ubus": { "session": [ "access", "login" ], "poe": [ "info", "manage" ] } } } } ``` Signed-off-by: Paul Spooren --- doc/configuration.rst | 4 +++ labgrid/driver/power/ubus.py | 48 ++++++++++++++++++++++++++++++++++++ tests/test_powerdriver.py | 40 ++++++++++++++++++++++++++++-- 3 files changed, 90 insertions(+), 2 deletions(-) create mode 100644 labgrid/driver/power/ubus.py diff --git a/doc/configuration.rst b/doc/configuration.rst index a4c83b040..67e687eb5 100644 --- a/doc/configuration.rst +++ b/doc/configuration.rst @@ -234,6 +234,10 @@ Currently available are: ``poe_mib`` Controls PoE switches using the PoE SNMP administration MiBs. +``ubus`` + Controls *PoE switches* running OpenWrt using the *ubus* interface. + Further infromation available at + Used by: - `NetworkPowerDriver`_ diff --git a/labgrid/driver/power/ubus.py b/labgrid/driver/power/ubus.py new file mode 100644 index 000000000..3efcc268f --- /dev/null +++ b/labgrid/driver/power/ubus.py @@ -0,0 +1,48 @@ +""" + UBUS jsonrpc interface for PoE management on OpenWrt devices. This comes in + handy if devices are connected to a PoE switch running OpenWrt. + + The URL given in hosts in exporter.yaml must accept unauthenticated UBUS + calls for the two `poe` calls `info` and `manage`. + + Further information is availbe at https://openwrt.org/docs/techref/ubus#acls + + NetworkPowerPort: + model: ubus + host: 'http://192.168.1.1/ubus' + index: 1 +""" + +import requests + + +def jsonrpc_call(host, path, method, message): + r = requests.post( + host, + json={ + "jsonrpc": "2.0", + "id": 1, + "method": "call", + "params": ["00000000000000000000000000000000", path, method, message], + }, + ) + r.raise_for_status() + return r.json()["result"] + + +def power_set(host, port, index, value): + assert port is None + + jsonrpc_call(host, "poe", "manage", {"port": f"lan{index}", "enable": value == 1}) + + +def power_get(host, port, index): + assert port is None + + poe_info = jsonrpc_call(host, "poe", "info", {})[1] + + assert ( + f"lan{index}" in poe_info["ports"] + ), f"Port lan{index} not found in {poe_info['ports']}" + + return poe_info["ports"][f"lan{index}"] != "Disabled" diff --git a/tests/test_powerdriver.py b/tests/test_powerdriver.py index 37d6c2af7..657b76e41 100644 --- a/tests/test_powerdriver.py +++ b/tests/test_powerdriver.py @@ -234,11 +234,46 @@ def test_create_shelly_gen1_backend_with_url_in_host(self, target, mocker, host) expected_host = f"{host}/relay/{index}" url = urlparse(expected_host) if url.port is None: - implicit_port = 443 if url.scheme == 'https' else 80 - expected_host = expected_host.replace(url.netloc, f'{url.netloc}:{implicit_port}') + implicit_port = 443 if url.scheme == "https" else 80 + expected_host = expected_host.replace( + url.netloc, f"{url.netloc}:{implicit_port}" + ) get.assert_called_with(expected_host) + def test_create_ubus_backend(self, target, mocker): + post = mocker.patch("requests.post") + post.return_value.json.return_value = { + "jsonrpc": "2.0", + "id": 1, + "result": [ + 0, + { + "firmware": "v80.1", + "budget": 77.000000, + "consumption": 1.700000, + "ports": { + "lan1": { + "priority": 0, + "mode": "PoE+", + "status": "Delivering power", + "consumption": 1.700000, + } + }, + }, + ], + } + + index = "1" + NetworkPowerPort( + target, "power", model="ubus", host="http://example.com/ubus", index=index + ) + d = NetworkPowerDriver(target, "power") + target.activate(d) + + d.cycle() + assert d.get() is True + def test_import_backends(self): import labgrid.driver.power import labgrid.driver.power.apc @@ -253,6 +288,7 @@ def test_import_backends(self): import labgrid.driver.power.sentry import labgrid.driver.power.eg_pms2_network import labgrid.driver.power.shelly_gen1 + import labgrid.driver.power.ubus def test_import_backend_eaton(self): pytest.importorskip("pysnmp") From dcb4eac4666d04c71d614223a8c28cb483749ee2 Mon Sep 17 00:00:00 2001 From: Maciej Grela Date: Mon, 13 May 2024 15:10:14 +0200 Subject: [PATCH 202/384] console: Make settle() indicate success/failure Signed-off-by: Maciej Grela --- labgrid/driver/consoleexpectmixin.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/labgrid/driver/consoleexpectmixin.py b/labgrid/driver/consoleexpectmixin.py index 536b12227..9bb1cc15e 100644 --- a/labgrid/driver/consoleexpectmixin.py +++ b/labgrid/driver/consoleexpectmixin.py @@ -61,13 +61,14 @@ def expect(self, pattern, timeout=-1): @Driver.check_active @step(args=['quiet_time']) - def settle(self, quiet_time, timeout=120.0): + def settle(self, quiet_time, timeout=120.0) -> bool: t = Timeout(timeout) while not t.expired: try: self.read(timeout=quiet_time) except pexpect.TIMEOUT: - break + return True + return False def resolve_conflicts(self, client): for other in self.clients: From b0506afda846f03941b156ec1d03cfa390d3dae4 Mon Sep 17 00:00:00 2001 From: Felix Zwettler Date: Tue, 2 Apr 2024 15:24:15 +0200 Subject: [PATCH 203/384] driver: fix SigrokDriver explicitly set time field in csv conversion, apparently some versions of sigrok-cli on some distros (for example Ubuntu 22.04) default to false, resulting in invalid parsing Signed-off-by: Felix Zwettler --- labgrid/driver/sigrokdriver.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/labgrid/driver/sigrokdriver.py b/labgrid/driver/sigrokdriver.py index 874a33302..5315e11f6 100644 --- a/labgrid/driver/sigrokdriver.py +++ b/labgrid/driver/sigrokdriver.py @@ -169,7 +169,7 @@ def stop(self): # Convert from .sr to .csv cmd = [ '-i', - os.path.join(self._tmpdir, self._basename), '-O', 'csv', '-o', + os.path.join(self._tmpdir, self._basename), '-O', 'csv:time=true', '-o', os.path.join(self._tmpdir, csv_filename) ] self._call(*cmd) From 581296e968bcb8db7ed584f19356a70abe3c7882 Mon Sep 17 00:00:00 2001 From: Felix Zwettler Date: Tue, 2 Apr 2024 15:35:09 +0200 Subject: [PATCH 204/384] driver: SigrokDriver analyze method checks/uses wrong (local) file path, happens only when when the resource is remote Signed-off-by: Felix Zwettler --- labgrid/driver/sigrokdriver.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/labgrid/driver/sigrokdriver.py b/labgrid/driver/sigrokdriver.py index 5315e11f6..c12d16eb8 100644 --- a/labgrid/driver/sigrokdriver.py +++ b/labgrid/driver/sigrokdriver.py @@ -212,7 +212,7 @@ def stop(self): def analyze(self, args, filename=None): annotation_regex = re.compile(r'(?P\d+)-(?P\d+) (?P[\w\-]+): (?P[\w\-]+): (?P".*)') # pylint: disable=line-too-long if not filename and self._filename: - filename = self._filename + filename = os.path.join(self._tmpdir, self._basename) else: filename = os.path.abspath(filename) check_file(filename, command_prefix=self.sigrok.command_prefix) From b69ff8405629274efa069b7f7df339717f07a342 Mon Sep 17 00:00:00 2001 From: Felix Zwettler Date: Tue, 2 Apr 2024 15:42:51 +0200 Subject: [PATCH 205/384] driver: SigrokDriver capture file is transferred to wrong path when specified filename is not absolute and using a remote sigrok resource Usually when an absolute path is supplied as `filename` the call `abs.path.join()` resolves to `filename`. But when the file path is relative, it resolves it to a relative path appended to the tmp dir, instead of from where the user calls this method. Signed-off-by: Felix Zwettler --- labgrid/driver/sigrokdriver.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/labgrid/driver/sigrokdriver.py b/labgrid/driver/sigrokdriver.py index c12d16eb8..cc3d171cb 100644 --- a/labgrid/driver/sigrokdriver.py +++ b/labgrid/driver/sigrokdriver.py @@ -179,7 +179,7 @@ def stop(self): if isinstance(self.sigrok, NetworkSigrokUSBDevice): subprocess.call([ 'scp', f'{self.sigrok.host}:{os.path.join(self._tmpdir, self._basename)}', - os.path.join(self._local_tmpdir, self._filename) + os.path.abspath(self._filename) ], stdin=subprocess.DEVNULL, stdout=subprocess.DEVNULL, From c547b0576a948f0c808c9506b3d6d6ac3bdd77b6 Mon Sep 17 00:00:00 2001 From: Felix Zwettler Date: Thu, 25 Apr 2024 14:26:01 +0200 Subject: [PATCH 206/384] driver: SigrokDriver avoid deadlock when sigrok-cli process failed The driver expected that the sigrok-cli is successful and starts capturing in continuous mode. When the call is unsuccessful, the driver then waited for the creation of the capture file forever. Fixed by checking if the sigrok-cli process terminated prematurely while waiting for the existence of the capture file. Signed-off-by: Felix Zwettler --- labgrid/driver/sigrokdriver.py | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/labgrid/driver/sigrokdriver.py b/labgrid/driver/sigrokdriver.py index cc3d171cb..dc3099885 100644 --- a/labgrid/driver/sigrokdriver.py +++ b/labgrid/driver/sigrokdriver.py @@ -149,6 +149,16 @@ def capture(self, filename, samplerate="200k"): args = self.sigrok.command_prefix + ['test', '-e', filename] while subprocess.call(args): + # in case the sigrok-cli call fails, this would wait forever. + # to avoid this, we also check the spawned sigrok process + if self._process.poll() is not None: + ret = self._process.returncode + if ret != 0: + stdout, stderr = self._process.communicate() + self.logger.debug("sigrok-cli call terminated prematurely with non-zero return-code") + self.logger.debug("stdout: %s", stdout) + self.logger.debug("stderr: %s", stderr) + raise ExecutionError(f"sigrok-cli call terminated prematurely with return-code '{ret}'.") sleep(0.1) self._running = True From 8646297ae0b2437eb52336349d96d82659f908bf Mon Sep 17 00:00:00 2001 From: Felix Zwettler Date: Mon, 29 Apr 2024 10:02:51 +0200 Subject: [PATCH 207/384] driver: SigrokDriver unsuccessful sigrok-cli termination when used remotely the signal that is supposed to terminate the process SIGINT was not being propagated through ssh. The driver waited for termination forever. fixed by quitting sigrok-cli through an emulated keypress by sending a char over ssh. Signed-off-by: Felix Zwettler --- labgrid/driver/sigrokdriver.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/labgrid/driver/sigrokdriver.py b/labgrid/driver/sigrokdriver.py index dc3099885..747f3ac66 100644 --- a/labgrid/driver/sigrokdriver.py +++ b/labgrid/driver/sigrokdriver.py @@ -2,7 +2,6 @@ import re import subprocess import shutil -import signal import tempfile import time import uuid @@ -171,8 +170,8 @@ def stop(self): fnames.extend(self.sigrok.channels.split(',')) csv_filename = f'{os.path.splitext(self._basename)[0]}.csv' - self._process.send_signal(signal.SIGINT) - stdout, stderr = self._process.communicate() + # sigrok-cli can be quit through any keypress + stdout, stderr = self._process.communicate(input="q") self.logger.debug("stdout: %s", stdout) self.logger.debug("stderr: %s", stderr) From 2d6edbca1803977c29899daa4eba450c640e0c15 Mon Sep 17 00:00:00 2001 From: Jan Luebbe Date: Thu, 20 Jun 2024 09:32:56 +0200 Subject: [PATCH 208/384] pyproject.toml: add ruff config Signed-off-by: Jan Luebbe --- pyproject.toml | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/pyproject.toml b/pyproject.toml index 0c1c590ee..e6ef3a7ee 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -231,3 +231,20 @@ commands = extras = dev commands = pylint -f colorized labgrid """ + +[tool.ruff] +line-length = 119 +exclude = [ + "__pycache__", + "labgrid.egg-info", + ".pybuild", + "build", + "debian", + "env", + "venv", + "envs", + "dist", +] + +[tool.ruff.lint] +select = ["B", "E", "F", "I", "SIM", "UP"] From bd6ee8d01aeed49925a8d6c435171c015c46733e Mon Sep 17 00:00:00 2001 From: Jan Luebbe Date: Thu, 20 Jun 2024 09:35:14 +0200 Subject: [PATCH 209/384] remote: run ruff format This should make it easier to review the gRPC migration. Signed-off-by: Jan Luebbe --- labgrid/remote/client.py | 890 +++++++++++++++++----------------- labgrid/remote/common.py | 111 +++-- labgrid/remote/config.py | 16 +- labgrid/remote/coordinator.py | 214 ++++---- labgrid/remote/exporter.py | 454 ++++++++--------- 5 files changed, 828 insertions(+), 857 deletions(-) diff --git a/labgrid/remote/client.py b/labgrid/remote/client.py index 6e2c92d5a..63c0cf859 100755 --- a/labgrid/remote/client.py +++ b/labgrid/remote/client.py @@ -1,5 +1,6 @@ """The remote.client module contains the functionality to connect to a coordinator, acquire a place and interact with the connected resources""" + import argparse import asyncio import contextlib @@ -20,11 +21,21 @@ from datetime import datetime from pprint import pformat import txaio + txaio.use_asyncio() from autobahn.asyncio.wamp import ApplicationSession -from .common import (ResourceEntry, ResourceMatch, Place, Reservation, ReservationState, TAG_KEY, - TAG_VAL, enable_tcp_nodelay, monkey_patch_max_msg_payload_size_ws_option) +from .common import ( + ResourceEntry, + ResourceMatch, + Place, + Reservation, + ReservationState, + TAG_KEY, + TAG_VAL, + enable_tcp_nodelay, + monkey_patch_max_msg_payload_size_ws_option, +) from .. import Environment, Target, target_factory from ..exceptions import NoDriverFoundError, NoResourceFoundError, InvalidConfigError from ..resource.remote import RemotePlaceManager, RemotePlace @@ -59,20 +70,20 @@ class ClientSession(ApplicationSession): the coordinator.""" def gethostname(self): - return os.environ.get('LG_HOSTNAME', gethostname()) + return os.environ.get("LG_HOSTNAME", gethostname()) def getuser(self): - return os.environ.get('LG_USERNAME', getuser()) + return os.environ.get("LG_USERNAME", getuser()) def onConnect(self): """Actions which are executed if a connection is successfully opened.""" - self.loop = self.config.extra['loop'] - self.connected = self.config.extra['connected'] - self.args = self.config.extra.get('args') - self.env = self.config.extra.get('env', None) - self.role = self.config.extra.get('role', None) - self.prog = self.config.extra.get('prog', os.path.basename(sys.argv[0])) - self.monitor = self.config.extra.get('monitor', False) + self.loop = self.config.extra["loop"] + self.connected = self.config.extra["connected"] + self.args = self.config.extra.get("args") + self.env = self.config.extra.get("env", None) + self.role = self.config.extra.get("role", None) + self.prog = self.config.extra.get("prog", os.path.basename(sys.argv[0])) + self.monitor = self.config.extra.get("monitor", False) enable_tcp_nodelay(self) self.join( self.config.realm, @@ -83,32 +94,31 @@ def onConnect(self): def onChallenge(self, challenge): import warnings - warnings.warn("Ticket authentication is deprecated. Please update your coordinator.", - DeprecationWarning) + + warnings.warn("Ticket authentication is deprecated. Please update your coordinator.", DeprecationWarning) logging.warning("Ticket authentication is deprecated. Please update your coordinator.") return "dummy-ticket" async def onJoin(self, details): # FIXME race condition? - resources = await self.call('org.labgrid.coordinator.get_resources') + resources = await self.call("org.labgrid.coordinator.get_resources") self.resources = {} for exporter, groups in resources.items(): for group_name, group in sorted(groups.items()): for resource_name, resource in sorted(group.items()): await self.on_resource_changed(exporter, group_name, resource_name, resource) - places = await self.call('org.labgrid.coordinator.get_places') + places = await self.call("org.labgrid.coordinator.get_places") self.places = {} for placename, config in places.items(): await self.on_place_changed(placename, config) - await self.subscribe(self.on_resource_changed, 'org.labgrid.coordinator.resource_changed') - await self.subscribe(self.on_place_changed, 'org.labgrid.coordinator.place_changed') + await self.subscribe(self.on_resource_changed, "org.labgrid.coordinator.resource_changed") + await self.subscribe(self.on_place_changed, "org.labgrid.coordinator.place_changed") await self.connected(self) async def on_resource_changed(self, exporter, group_name, resource_name, resource): - group = self.resources.setdefault(exporter, - {}).setdefault(group_name, {}) + group = self.resources.setdefault(exporter, {}).setdefault(group_name, {}) # Do not replace the ResourceEntry object, as other components may keep # a reference to it and want to see changes. if resource_name not in group: @@ -134,8 +144,8 @@ async def on_place_changed(self, name, config): print(f"Place {name} deleted") return config = config.copy() - config['name'] = name - config['matches'] = [ResourceMatch(**match) for match in config['matches']] + config["name"] = name + config["matches"] = [ResourceMatch(**match) for match in config["matches"]] config = filter_dict(config, Place, warn=True) if name not in self.places: place = Place(**config) @@ -158,19 +168,19 @@ async def do_monitor(self): await asyncio.sleep(3600.0) async def complete(self): - if self.args.type == 'resources': + if self.args.type == "resources": for exporter, groups in sorted(self.resources.items()): for group_name, group in sorted(groups.items()): for _, resource in sorted(group.items()): print(f"{exporter}/{group_name}/{resource.cls}") - elif self.args.type == 'places': + elif self.args.type == "places": for name in sorted(self.places.keys()): print(name) - elif self.args.type == 'matches': + elif self.args.type == "matches": place = self.get_place() for match in place.matches: print(repr(match)) - elif self.args.type == 'match-names': + elif self.args.type == "match-names": place = self.get_place() match_names = {match.rename for match in place.matches if match.rename is not None} print("\n".join(match_names)) @@ -198,8 +208,7 @@ async def print_resources(self): continue if self.args.acquired and resource.acquired is None: continue - if match and not match.ismatch((exporter, group_name, - resource.cls, resource_name)): + if match and not match.ismatch((exporter, group_name, resource.cls, resource_name)): continue filtered[exporter][group_name][resource_name] = resource @@ -211,9 +220,11 @@ async def print_resources(self): for group_name, group in sorted(groups.items()): print(f" Group '{group_name}' ({exporter}/{group_name}/*):") for resource_name, resource in sorted(group.items()): - print(" Resource '{res}' ({exporter}/{group}/{res_cls}[/{res}]):" - .format(res=resource_name, exporter=exporter, group=group_name, - res_cls=resource.cls)) + print( + " Resource '{res}' ({exporter}/{group}/{res_cls}[/{res}]):".format( + res=resource_name, exporter=exporter, group=group_name, res_cls=resource.cls + ) + ) print(indent(pformat(resource.asdict()), prefix=" ")) else: results = [] @@ -234,10 +245,7 @@ async def print_resources(self): for places, exporter, group_name, resource_cls in results: if self.args.sort_by_matched_place_change: - places_strs = [ - f"{p.name}: {datetime.fromtimestamp(p.changed):%Y-%m-%d}" - for p in places - ] + places_strs = [f"{p.name}: {datetime.fromtimestamp(p.changed):%Y-%m-%d}" for p in places] places_info = ", ".join(places_strs) if places_strs else "not used by any place" else: @@ -272,18 +280,18 @@ async def print_places(self): def print_who(self): """Print acquired places by user""" - result = ['User Host Place Changed'.split()] + result = ["User Host Place Changed".split()] if self.args.show_exporters: - result[0].append('Exporters') + result[0].append("Exporters") for name, place in self.places.items(): if place.acquired is None: continue - host, user = place.acquired.split('/') + host, user = place.acquired.split("/") result.append([user, host, name, str(datetime.fromtimestamp(place.changed))]) if self.args.show_exporters: exporters = {resource_path[0] for resource_path in place.acquired_resources} - result[-1].append(', '.join(sorted(exporters))) + result[-1].append(", ".join(sorted(exporters))) result.sort() widths = [max(map(len, c)) for c in zip(*result)] @@ -305,10 +313,10 @@ def _match_places(self, pattern): # reservation token lookup token = None - if pattern.startswith('+'): + if pattern.startswith("+"): token = pattern[1:] if not token: - token = os.environ.get('LG_TOKEN', None) + token = os.environ.get("LG_TOKEN", None) if not token: return [] for name, place in self.places.items(): @@ -323,8 +331,8 @@ def _match_places(self, pattern): if pattern in name: result.add(name) for alias in place.aliases: - if ':' in alias: - namespace, alias = alias.split(':', 1) + if ":" in alias: + namespace, alias = alias.split(":", 1) if namespace != self.getuser(): continue if alias == pattern: # prefer user namespace @@ -336,16 +344,12 @@ def _match_places(self, pattern): def _check_allowed(self, place): if not place.acquired: raise UserError(f"place {place.name} is not acquired") - if f'{self.gethostname()}/{self.getuser()}' not in place.allowed: - host, user = place.acquired.split('/') + if f"{self.gethostname()}/{self.getuser()}" not in place.allowed: + host, user = place.acquired.split("/") if user != self.getuser(): - raise UserError( - f"place {place.name} is not acquired by your user, acquired by {user}" - ) + raise UserError(f"place {place.name} is not acquired by your user, acquired by {user}") if host != self.gethostname(): - raise UserError( - f"place {place.name} is not acquired on this computer, acquired on {host}" - ) + raise UserError(f"place {place.name} is not acquired on this computer, acquired on {host}") def get_place(self, place=None): pattern = place or self.args.place @@ -408,7 +412,7 @@ async def add_place(self): raise UserError("missing place name. Set with -p or via env var $PLACE") if name in self.places: raise UserError(f"{name} already exists") - res = await self.call('org.labgrid.coordinator.add_place', name) + res = await self.call("org.labgrid.coordinator.add_place", name) if not res: raise ServerError(f"failed to add place {name}") return res @@ -426,7 +430,7 @@ async def del_place(self): raise UserError("missing place name. Set with -p or via env var $PLACE") if name not in self.places: raise UserError(f"{name} does not exist") - res = await self.call('org.labgrid.coordinator.del_place', name) + res = await self.call("org.labgrid.coordinator.del_place", name) if not res: raise ServerError(f"failed to delete place {name}") return res @@ -437,7 +441,7 @@ async def add_alias(self): alias = self.args.alias if alias in place.aliases: raise UserError(f"place {place.name} already has alias {alias}") - res = await self.call('org.labgrid.coordinator.add_place_alias', place.name, alias) + res = await self.call("org.labgrid.coordinator.add_place_alias", place.name, alias) if not res: raise ServerError(f"failed to add alias {alias} for place {place.name}") return res @@ -448,7 +452,7 @@ async def del_alias(self): alias = self.args.alias if alias not in place.aliases: raise UserError(f"place {place.name} has no alias {alias}") - res = await self.call('org.labgrid.coordinator.del_place_alias', place.name, alias) + res = await self.call("org.labgrid.coordinator.del_place_alias", place.name, alias) if not res: raise ServerError(f"failed to delete alias {alias} for place {place.name}") return res @@ -456,8 +460,8 @@ async def del_alias(self): async def set_comment(self): """Set the comment on a place""" place = self.get_place() - comment = ' '.join(self.args.comment) - res = await self.call('org.labgrid.coordinator.set_place_comment', place.name, comment) + comment = " ".join(self.args.comment) + res = await self.call("org.labgrid.coordinator.set_place_comment", place.name, comment) if not res: raise ServerError(f"failed to set comment {comment} for place {place.name}") return res @@ -468,7 +472,7 @@ async def set_tags(self): tags = {} for pair in self.args.tags: try: - k, v = pair.split('=') + k, v = pair.split("=") except ValueError: raise UserError(f"tag '{pair}' needs to match '='") if not TAG_KEY.match(k): @@ -476,11 +480,9 @@ async def set_tags(self): if not TAG_VAL.match(v): raise UserError(f"tag value '{v}' needs to match the rexex '{TAG_VAL.pattern}'") tags[k] = v - res = await self.call('org.labgrid.coordinator.set_place_tags', place.name, tags) + res = await self.call("org.labgrid.coordinator.set_place_tags", place.name, tags) if not res: - raise ServerError( - f"failed to set tags {' '.join(self.args.tags)} for place {place.name}" - ) + raise ServerError(f"failed to set tags {' '.join(self.args.tags)} for place {place.name}") return res async def add_match(self): @@ -491,13 +493,11 @@ async def add_match(self): raise UserError(f"can not change acquired place {place.name}") for pattern in self.args.patterns: if not 2 <= pattern.count("/") <= 3: - raise UserError( - f"invalid pattern format '{pattern}' (use 'exporter/group/cls/name')" - ) + raise UserError(f"invalid pattern format '{pattern}' (use 'exporter/group/cls/name')") if place.hasmatch(pattern.split("/")): print(f"pattern '{pattern}' exists, skipping", file=sys.stderr) continue - res = await self.call('org.labgrid.coordinator.add_place_match', place.name, pattern) + res = await self.call("org.labgrid.coordinator.add_place_match", place.name, pattern) if not res: raise ServerError(f"failed to add match {pattern} for place {place.name}") @@ -508,12 +508,10 @@ async def del_match(self): raise UserError(f"can not change acquired place {place.name}") for pattern in self.args.patterns: if not 2 <= pattern.count("/") <= 3: - raise UserError( - f"invalid pattern format '{pattern}' (use 'exporter/group/cls/name')" - ) + raise UserError(f"invalid pattern format '{pattern}' (use 'exporter/group/cls/name')") if not place.hasmatch(pattern.split("/")): print(f"pattern '{pattern}' not found, skipping", file=sys.stderr) - res = await self.call('org.labgrid.coordinator.del_place_match', place.name, pattern) + res = await self.call("org.labgrid.coordinator.del_place_match", place.name, pattern) if not res: raise ServerError(f"failed to delete match {pattern} for place {place.name}") @@ -530,11 +528,11 @@ async def add_named_match(self): raise UserError(f"invalid pattern format '{pattern}' (use 'exporter/group/cls/name')") if place.hasmatch(pattern.split("/")): raise UserError(f"pattern '{pattern}' exists") - if '*' in pattern: + if "*" in pattern: raise UserError(f"invalid pattern '{pattern}' ('*' not allowed for named matches)") if not name: raise UserError(f"invalid name '{name}'") - res = await self.call('org.labgrid.coordinator.add_place_match', place.name, pattern, name) + res = await self.call("org.labgrid.coordinator.add_place_match", place.name, pattern, name) if not res: raise ServerError(f"failed to add match {pattern} for place {place.name}") @@ -559,7 +557,7 @@ async def acquire(self): if not self.args.allow_unmatched: self.check_matches(place) - res = await self.call('org.labgrid.coordinator.acquire_place', place.name) + res = await self.call("org.labgrid.coordinator.acquire_place", place.name) if res: print(f"acquired place {place.name}") @@ -578,7 +576,9 @@ async def acquire(self): name = resource_name if match.rename: name = match.rename - print(f"Matching resource '{name}' ({exporter}/{group_name}/{resource.cls}/{resource_name}) already acquired by place '{resource.acquired}'") # pylint: disable=line-too-long + print( + f"Matching resource '{name}' ({exporter}/{group_name}/{resource.cls}/{resource_name}) already acquired by place '{resource.acquired}'" + ) # pylint: disable=line-too-long raise ServerError(f"failed to acquire place {place.name}") @@ -587,12 +587,14 @@ async def release(self): place = self.get_place() if not place.acquired: raise UserError(f"place {place.name} is not acquired") - _, user = place.acquired.split('/') + _, user = place.acquired.split("/") if user != self.getuser(): if not self.args.kick: - raise UserError(f"place {place.name} is acquired by a different user ({place.acquired}), use --kick if you are sure") # pylint: disable=line-too-long + raise UserError( + f"place {place.name} is acquired by a different user ({place.acquired}), use --kick if you are sure" + ) # pylint: disable=line-too-long print(f"warning: kicking user ({place.acquired})") - res = await self.call('org.labgrid.coordinator.release_place', place.name) + res = await self.call("org.labgrid.coordinator.release_place", place.name) if not res: raise ServerError(f"failed to release place {place.name}") @@ -602,7 +604,9 @@ async def release_from(self): """Release a place, but only if acquired by a specific user""" place = self.get_place() res = await self.call( - 'org.labgrid.coordinator.release_place_from', place.name, self.args.acquired, + "org.labgrid.coordinator.release_place_from", + place.name, + self.args.acquired, ) if not res: raise ServerError(f"failed to release place {place.name}") @@ -614,14 +618,12 @@ async def allow(self): place = self.get_place() if not place.acquired: raise UserError(f"place {place.name} is not acquired") - _, user = place.acquired.split('/') + _, user = place.acquired.split("/") if user != self.getuser(): - raise UserError( - f"place {place.name} is acquired by a different user ({place.acquired})" - ) - if '/' not in self.args.user: + raise UserError(f"place {place.name} is acquired by a different user ({place.acquired})") + if "/" not in self.args.user: raise UserError(f"user {self.args.user} must be in / format") - res = await self.call('org.labgrid.coordinator.allow_place', place.name, self.args.user) + res = await self.call("org.labgrid.coordinator.allow_place", place.name, self.args.user) if not res: raise ServerError(f"failed to allow {self.args.user} for place {place.name}") @@ -641,18 +643,18 @@ def get_target_resources(self, place): def get_target_config(self, place): config = {} - resources = config['resources'] = [] + resources = config["resources"] = [] for (name, _), resource in self.get_target_resources(place).items(): args = OrderedDict() if name != resource.cls: - args['name'] = name + args["name"] = name args.update(resource.args) resources.append({resource.cls: args}) return config def print_env(self): place = self.get_acquired_place() - env = {'targets': {place.name: self.get_target_config(place)}} + env = {"targets": {place.name: self.get_target_config(place)}} print(dump(env)) def _prepare_manager(self): @@ -756,7 +758,7 @@ def power(self): if delay is not None: drv.delay = delay res = getattr(drv, action)() - if action == 'get': + if action == "get": print(f"power{' ' + name if name else ''} for place {place.name} is {'on' if res else 'off'}") def digital_io(self): @@ -765,8 +767,7 @@ def digital_io(self): name = self.args.name target = self._get_target(place) from ..resource import ModbusTCPCoil, OneWirePIO, HttpDigitalOutput - from ..resource.remote import (NetworkDeditecRelais8, NetworkSysfsGPIO, NetworkLXAIOBusPIO, - NetworkHIDRelay) + from ..resource.remote import NetworkDeditecRelais8, NetworkSysfsGPIO, NetworkLXAIOBusPIO, NetworkHIDRelay drv = None try: @@ -792,16 +793,17 @@ def digital_io(self): if not drv: raise UserError("target has no compatible resource available") - if action == 'get': + if action == "get": print(f"digital IO{' ' + name if name else ''} for place {place.name} is {'high' if drv.get() else 'low'}") - elif action == 'high': + elif action == "high": drv.set(True) - elif action == 'low': + elif action == "low": drv.set(False) async def _console(self, place, target, timeout, *, logfile=None, loop=False, listen_only=False): name = self.args.name from ..resource import NetworkSerialPort + resource = target.get_resource(NetworkSerialPort, name=name, wait_avail=False) # async await resources @@ -824,7 +826,7 @@ async def _console(self, place, target, timeout, *, logfile=None, loop=False, li # check for valid resources assert port is not None, "Port is not set" - call = ['microcom', '-s', str(resource.speed), '-t', f"{host}:{port}"] + call = ["microcom", "-s", str(resource.speed), "-t", f"{host}:{port}"] if listen_only: call.append("--listenonly") @@ -860,8 +862,9 @@ async def _console(self, place, target, timeout, *, logfile=None, loop=False, li async def console(self, place, target): while True: - res = await self._console(place, target, 10.0, logfile=self.args.logfile, - loop=self.args.loop, listen_only=self.args.listenonly) + res = await self._console( + place, target, 10.0, logfile=self.args.logfile, loop=self.args.loop, listen_only=self.args.listenonly + ) # place released if res == 255: break @@ -872,23 +875,24 @@ async def console(self, place, target): raise exc break await asyncio.sleep(1.0) + console.needs_target = True def dfu(self): place = self.get_acquired_place() target = self._get_target(place) name = self.args.name - if self.args.action == 'download' and not self.args.filename: - raise UserError('not enough arguments for dfu download') + if self.args.action == "download" and not self.args.filename: + raise UserError("not enough arguments for dfu download") drv = self._get_driver_or_new(target, "DFUDriver", activate=False, name=name) drv.dfu.timeout = self.args.wait target.activate(drv) - if self.args.action == 'download': + if self.args.action == "download": drv.download(self.args.altsetting, os.path.abspath(self.args.filename)) - if self.args.action == 'detach': + if self.args.action == "detach": drv.detach(self.args.altsetting) - if self.args.action == 'list': + if self.args.action == "list": drv.list() def fastboot(self): @@ -903,17 +907,17 @@ def fastboot(self): try: action = args[0] - if action == 'flash': + if action == "flash": drv.flash(args[1], os.path.abspath(args[2])) - elif action == 'boot': + elif action == "boot": args[1:] = map(os.path.abspath, args[1:]) drv.boot(args[1]) - elif action == 'oem' and args[1] == 'exec': - drv.run(' '.join(args[2:])) + elif action == "oem" and args[1] == "exec": + drv.run(" ".join(args[2:])) else: drv(*args) except IndexError: - raise UserError('not enough arguments for fastboot action') + raise UserError("not enough arguments for fastboot action") except subprocess.CalledProcessError as e: raise UserError(str(e)) @@ -929,24 +933,27 @@ def bootstrap(self): place = self.get_acquired_place() target = self._get_target(place) name = self.args.name - from ..resource.remote import (NetworkMXSUSBLoader, NetworkIMXUSBLoader, NetworkRKUSBLoader, - NetworkAlteraUSBBlaster) + from ..resource.remote import ( + NetworkMXSUSBLoader, + NetworkIMXUSBLoader, + NetworkRKUSBLoader, + NetworkAlteraUSBBlaster, + ) from ..driver import OpenOCDDriver + drv = None try: drv = target.get_driver("BootstrapProtocol", name=name) except NoDriverFoundError: for resource in target.resources: if isinstance(resource, NetworkIMXUSBLoader): - drv = self._get_driver_or_new(target, "IMXUSBDriver", activate=False, - name=name) + drv = self._get_driver_or_new(target, "IMXUSBDriver", activate=False, name=name) drv.loader.timeout = self.args.wait elif isinstance(resource, NetworkMXSUSBLoader): - drv = self._get_driver_or_new(target, "MXSUSBDriver", activate=False, - name=name) + drv = self._get_driver_or_new(target, "MXSUSBDriver", activate=False, name=name) drv.loader.timeout = self.args.wait elif isinstance(resource, NetworkAlteraUSBBlaster): - args = dict(arg.split('=', 1) for arg in self.args.bootstrap_args) + args = dict(arg.split("=", 1) for arg in self.args.bootstrap_args) try: drv = target.get_driver("OpenOCDDriver", activate=False, name=name) except NoDriverFoundError: @@ -981,7 +988,7 @@ def sd_mux(self): if not drv: raise UserError("target has no compatible resource available") - if action == 'get': + if action == "get": print(drv.get_mode()) else: try: @@ -993,10 +1000,10 @@ def usb_mux(self): place = self.get_acquired_place() name = self.args.name links = self.args.links - if links == 'off': + if links == "off": links = [] - elif links == 'host-dut+host-device': - links = ['host-dut', 'host-device'] + elif links == "host-dut+host-device": + links = ["host-dut", "host-device"] else: links = [links] target = self._get_target(place) @@ -1021,11 +1028,11 @@ def _get_ip(self, place): return resource.address matches = [] - for details in resource.extra.get('macs').values(): - ips = details.get('ips', []) + for details in resource.extra.get("macs").values(): + ips = details.get("ips", []) if not ips: continue - matches.append((details['timestamp'], ips)) + matches.append((details["timestamp"], ips)) matches.sort() newest = matches[-1][1] if len(ips) > 1: @@ -1042,13 +1049,14 @@ def _get_ssh(self): return drv except NoDriverFoundError: from ..resource import NetworkService + try: resource = target.get_resource(NetworkService, name=self.args.name) except NoResourceFoundError: ip = self._get_ip(place) if not ip: return - resource = NetworkService(target, address=str(ip), username='root') + resource = NetworkService(target, address=str(ip), username="root") drv = self._get_driver_or_new(target, "SSHDriver", name=resource.name) return drv @@ -1113,7 +1121,7 @@ def telnet(self): ip = self._get_ip(place) if not ip: return - args = ['telnet', str(ip)] + args = ["telnet", str(ip)] res = subprocess.call(args) if res: exc = InteractiveCommandError("telnet error") @@ -1129,6 +1137,7 @@ def video(self): from ..resource.httpvideostream import HTTPVideoStream from ..resource.udev import USBVideo from ..resource.remote import NetworkUSBVideo + drv = None try: drv = target.get_driver("VideoProtocol", name=name) @@ -1143,10 +1152,10 @@ def video(self): if not drv: raise UserError("target has no compatible resource available") - if quality == 'list': + if quality == "list": default, variants = drv.get_qualities() for name, caps in variants: - mark = '*' if default == name else ' ' + mark = "*" if default == name else " " print(f"{mark} {name:<10s} {caps:s}") else: res = drv.stream(quality, controls=controls) @@ -1175,10 +1184,10 @@ def _get_tmc(self): def tmc_command(self): drv = self._get_tmc() - command = ' '.join(self.args.command) + command = " ".join(self.args.command) if not command: raise UserError("no command given") - if '?' in command: + if "?" in command: result = drv.query(command) print(result) else: @@ -1186,7 +1195,7 @@ def tmc_command(self): def tmc_query(self): drv = self._get_tmc() - query = ' '.join(self.args.query) + query = " ".join(self.args.query) if not query: raise UserError("no query given") result = drv.query(query) @@ -1195,22 +1204,22 @@ def tmc_query(self): def tmc_screen(self): drv = self._get_tmc() action = self.args.action - if action in ['show', 'save']: + if action in ["show", "save"]: extension, data = drv.get_screenshot() - filename = 'tmc-screen_{0:%Y-%m-%d}_{0:%H:%M:%S}.{1}'.format(datetime.now(), extension) - with open(filename, 'wb') as f: + filename = "tmc-screen_{0:%Y-%m-%d}_{0:%H:%M:%S}.{1}".format(datetime.now(), extension) + with open(filename, "wb") as f: f.write(data) print(f"Saved as {filename}") - if action == 'show': - subprocess.call(['xdg-open', filename]) + if action == "show": + subprocess.call(["xdg-open", filename]) def tmc_channel(self): drv = self._get_tmc() channel = self.args.channel action = self.args.action - if action == 'info': + if action == "info": data = drv.get_channel_info(channel) - elif action == 'values': + elif action == "values": data = drv.get_channel_values(channel) else: raise ValueError(f"unknown action {action}") @@ -1234,11 +1243,13 @@ def write_files(self): if len(self.args.SOURCE) != 2: self.args.parser.error("the following arguments are required: SOURCE DEST") - drv.write_files([self.args.SOURCE[0]], self.args.SOURCE[1], - self.args.partition, target_is_directory=False) + drv.write_files( + [self.args.SOURCE[0]], self.args.SOURCE[1], self.args.partition, target_is_directory=False + ) else: - drv.write_files(self.args.SOURCE, self.args.target_directory, - self.args.partition, target_is_directory=True) + drv.write_files( + self.args.SOURCE, self.args.target_directory, self.args.partition, target_is_directory=True + ) except subprocess.CalledProcessError as e: raise UserError(f"could not copy files to network usb storage: {e}") except FileNotFoundError as e: @@ -1253,17 +1264,22 @@ def write_image(self): target.activate(drv) try: - drv.write_image(self.args.filename, partition=self.args.partition, skip=self.args.skip, - seek=self.args.seek, mode=self.args.write_mode) + drv.write_image( + self.args.filename, + partition=self.args.partition, + skip=self.args.skip, + seek=self.args.seek, + mode=self.args.write_mode, + ) except subprocess.CalledProcessError as e: raise UserError(f"could not write image to network usb storage: {e}") except FileNotFoundError as e: raise UserError(e) async def create_reservation(self): - filters = ' '.join(self.args.filters) + filters = " ".join(self.args.filters) prio = self.args.prio - res = await self.call('org.labgrid.coordinator.create_reservation', filters, prio=prio) + res = await self.call("org.labgrid.coordinator.create_reservation", filters, prio=prio) if res is None: raise ServerError("failed to create reservation") ((token, config),) = res.items() # we get a one-item dict @@ -1281,13 +1297,13 @@ async def create_reservation(self): async def cancel_reservation(self): token = self.args.token - res = await self.call('org.labgrid.coordinator.cancel_reservation', token) + res = await self.call("org.labgrid.coordinator.cancel_reservation", token) if not res: raise ServerError(f"failed to cancel reservation {token}") async def _wait_reservation(self, token, verbose=True): while True: - config = await self.call('org.labgrid.coordinator.poll_reservation', token) + config = await self.call("org.labgrid.coordinator.poll_reservation", token) if config is None: raise ServerError("reservation not found") config = filter_dict(config, Reservation, warn=True) @@ -1304,8 +1320,8 @@ async def wait_reservation(self): await self._wait_reservation(token) async def print_reservations(self): - reservations = await self.call('org.labgrid.coordinator.get_reservations') - for token, config in sorted(reservations.items(), key=lambda x: (-x[1]['prio'], x[1]['created'])): # pylint: disable=line-too-long + reservations = await self.call("org.labgrid.coordinator.get_reservations") + for token, config in sorted(reservations.items(), key=lambda x: (-x[1]["prio"], x[1]["created"])): # pylint: disable=line-too-long config = filter_dict(config, Reservation, warn=True) res = Reservation(token=token, **config) print(f"Reservation '{res.token}':") @@ -1340,6 +1356,7 @@ async def export(self, place, target): await asyncio.sleep(1.0) except GeneratorExit: print("Exiting...\n", file=sys.stderr) + export.needs_target = True def print_version(self): @@ -1357,8 +1374,8 @@ async def connected(session): # pylint: disable=unused-argument if not extra: extra = {} - extra['loop'] = loop - extra['connected'] = connected + extra["loop"] = loop + extra["connected"] = connected session = [None] @@ -1376,10 +1393,9 @@ def make(*args, **kwargs): # there is no other notification when the WAMP connection setup times out, # so we need to wait for one of these protocol futures to resolve - done, pending = loop.run_until_complete(asyncio.wait( - {protocol.is_open, protocol.is_closed}, - timeout=30, - return_when=asyncio.FIRST_COMPLETED)) + done, pending = loop.run_until_complete( + asyncio.wait({protocol.is_open, protocol.is_closed}, timeout=30, return_when=asyncio.FIRST_COMPLETED) + ) if protocol.is_closed in done: raise Error("connection closed during setup") if protocol.is_open in pending: @@ -1388,23 +1404,26 @@ def make(*args, **kwargs): loop.run_until_complete(ready.wait()) return session[0] + def find_role_by_place(config, place): for role, role_config in config.items(): resources, _ = target_factory.normalize_config(role_config) - remote_places = resources.get('RemotePlace', {}) + remote_places = resources.get("RemotePlace", {}) remote_place = remote_places.get(place) if remote_place: return role return None + def find_any_role_with_place(config): for role, role_config in config.items(): resources, _ = target_factory.normalize_config(role_config) - remote_places = resources.get('RemotePlace', {}) + remote_places = resources.get("RemotePlace", {}) for place in remote_places: return (role, place) return None, None + class LocalPort(argparse.Action): def __init__(self, option_strings, dest, nargs=None, **kwargs): if nargs is not None: @@ -1424,6 +1443,7 @@ def __call__(self, parser, namespace, value, option_string): v.append((local, remote)) setattr(namespace, self.dest, v) + class RemotePort(argparse.Action): def __init__(self, option_strings, dest, nargs=None, **kwargs): if nargs is not None: @@ -1459,411 +1479,371 @@ def main(): processwrapper.enable_logging() # Support both legacy variables and properly namespaced ones - place = os.environ.get('PLACE', None) - place = os.environ.get('LG_PLACE', place) - state = os.environ.get('STATE', None) - state = os.environ.get('LG_STATE', state) - initial_state = os.environ.get('LG_INITIAL_STATE', None) - token = os.environ.get('LG_TOKEN', None) + place = os.environ.get("PLACE", None) + place = os.environ.get("LG_PLACE", place) + state = os.environ.get("STATE", None) + state = os.environ.get("LG_STATE", state) + initial_state = os.environ.get("LG_INITIAL_STATE", None) + token = os.environ.get("LG_TOKEN", None) parser = argparse.ArgumentParser() parser.add_argument( - '-x', - '--crossbar', - metavar='URL', + "-x", + "--crossbar", + metavar="URL", type=str, - help="crossbar websocket URL (default: value from env variable LG_CROSSBAR, otherwise ws://127.0.0.1:20408/ws)" + help="crossbar websocket URL (default: value from env variable LG_CROSSBAR, otherwise ws://127.0.0.1:20408/ws)", ) + parser.add_argument("-c", "--config", type=str, default=os.environ.get("LG_ENV"), help="config file") + parser.add_argument("-p", "--place", type=str, default=place, help="place name/alias") + parser.add_argument("-s", "--state", type=str, default=state, help="strategy state to switch into before command") parser.add_argument( - '-c', - '--config', - type=str, - default=os.environ.get("LG_ENV"), - help="config file" - ) - parser.add_argument( - '-p', - '--place', - type=str, - default=place, - help="place name/alias" - ) - parser.add_argument( - '-s', - '--state', - type=str, - default=state, - help="strategy state to switch into before command" - ) - parser.add_argument( - '-i', - '--initial-state', + "-i", + "--initial-state", type=str, default=initial_state, - help="strategy state to force into before switching to desired state" - ) - parser.add_argument( - '-d', - '--debug', - action='store_true', - default=False, - help="enable debug mode (show python tracebacks)" - ) - parser.add_argument( - '-v', - '--verbose', - action='count', - default=0 + help="strategy state to force into before switching to desired state", ) parser.add_argument( - '-P', - '--proxy', - type=str, - help="proxy connections via given ssh host" + "-d", "--debug", action="store_true", default=False, help="enable debug mode (show python tracebacks)" ) + parser.add_argument("-v", "--verbose", action="count", default=0) + parser.add_argument("-P", "--proxy", type=str, help="proxy connections via given ssh host") subparsers = parser.add_subparsers( - dest='command', - title='available subcommands', + dest="command", + title="available subcommands", metavar="COMMAND", ) - subparser = subparsers.add_parser('help') + subparser = subparsers.add_parser("help") - subparser = subparsers.add_parser('complete') - subparser.add_argument('type', choices=['resources', 'places', 'matches', 'match-names']) + subparser = subparsers.add_parser("complete") + subparser.add_argument("type", choices=["resources", "places", "matches", "match-names"]) subparser.set_defaults(func=ClientSession.complete) - subparser = subparsers.add_parser('monitor', - help="monitor events from the coordinator") + subparser = subparsers.add_parser("monitor", help="monitor events from the coordinator") subparser.set_defaults(func=ClientSession.do_monitor) - subparser = subparsers.add_parser('resources', aliases=('r',), - help="list available resources") - subparser.add_argument('-a', '--acquired', action='store_true') - subparser.add_argument('-e', '--exporter') - subparser.add_argument('--sort-by-matched-place-change', action='store_true', - help="sort by matched place's changed date (oldest first) and show place and date") # pylint: disable=line-too-long - subparser.add_argument('match', nargs='?') + subparser = subparsers.add_parser("resources", aliases=("r",), help="list available resources") + subparser.add_argument("-a", "--acquired", action="store_true") + subparser.add_argument("-e", "--exporter") + subparser.add_argument( + "--sort-by-matched-place-change", + action="store_true", + help="sort by matched place's changed date (oldest first) and show place and date", + ) # pylint: disable=line-too-long + subparser.add_argument("match", nargs="?") subparser.set_defaults(func=ClientSession.print_resources) - subparser = subparsers.add_parser('places', aliases=('p',), - help="list available places") - subparser.add_argument('-a', '--acquired', action='store_true') - subparser.add_argument('--sort-last-changed', action='store_true', - help='sort by last changed date (oldest first)') + subparser = subparsers.add_parser("places", aliases=("p",), help="list available places") + subparser.add_argument("-a", "--acquired", action="store_true") + subparser.add_argument("--sort-last-changed", action="store_true", help="sort by last changed date (oldest first)") subparser.set_defaults(func=ClientSession.print_places) - subparser = subparsers.add_parser('who', - help="list acquired places by user") - subparser.add_argument('-e', '--show-exporters', action='store_true', - help='show exporters currently used by each place') + subparser = subparsers.add_parser("who", help="list acquired places by user") + subparser.add_argument( + "-e", "--show-exporters", action="store_true", help="show exporters currently used by each place" + ) subparser.set_defaults(func=ClientSession.print_who) - subparser = subparsers.add_parser('show', - help="show a place and related resources") + subparser = subparsers.add_parser("show", help="show a place and related resources") subparser.set_defaults(func=ClientSession.print_place) - subparser = subparsers.add_parser('create', help="add a new place") + subparser = subparsers.add_parser("create", help="add a new place") subparser.set_defaults(func=ClientSession.add_place) - subparser = subparsers.add_parser('delete', help="delete an existing place") + subparser = subparsers.add_parser("delete", help="delete an existing place") subparser.set_defaults(func=ClientSession.del_place) - subparser = subparsers.add_parser('add-alias', - help="add an alias to a place") - subparser.add_argument('alias') + subparser = subparsers.add_parser("add-alias", help="add an alias to a place") + subparser.add_argument("alias") subparser.set_defaults(func=ClientSession.add_alias) - subparser = subparsers.add_parser('del-alias', - help="delete an alias from a place") - subparser.add_argument('alias') + subparser = subparsers.add_parser("del-alias", help="delete an alias from a place") + subparser.add_argument("alias") subparser.set_defaults(func=ClientSession.del_alias) - subparser = subparsers.add_parser('set-comment', - help="update the place comment") - subparser.add_argument('comment', nargs='+') + subparser = subparsers.add_parser("set-comment", help="update the place comment") + subparser.add_argument("comment", nargs="+") subparser.set_defaults(func=ClientSession.set_comment) - subparser = subparsers.add_parser('set-tags', - help="update the place tags") - subparser.add_argument('tags', metavar='KEY=VALUE', nargs='+', - help="use an empty value for deletion") + subparser = subparsers.add_parser("set-tags", help="update the place tags") + subparser.add_argument("tags", metavar="KEY=VALUE", nargs="+", help="use an empty value for deletion") subparser.set_defaults(func=ClientSession.set_tags) - subparser = subparsers.add_parser('add-match', - help="add one (or multiple) match pattern(s) to a place") - subparser.add_argument('patterns', metavar='PATTERN', nargs='+') + subparser = subparsers.add_parser("add-match", help="add one (or multiple) match pattern(s) to a place") + subparser.add_argument("patterns", metavar="PATTERN", nargs="+") subparser.set_defaults(func=ClientSession.add_match) - subparser = subparsers.add_parser('del-match', - help="delete one (or multiple) match pattern(s) from a place") - subparser.add_argument('patterns', metavar='PATTERN', nargs='+') + subparser = subparsers.add_parser("del-match", help="delete one (or multiple) match pattern(s) from a place") + subparser.add_argument("patterns", metavar="PATTERN", nargs="+") subparser.set_defaults(func=ClientSession.del_match) - subparser = subparsers.add_parser('add-named-match', - help="add one match pattern with a name to a place") - subparser.add_argument('pattern', metavar='PATTERN') - subparser.add_argument('name', metavar='NAME') + subparser = subparsers.add_parser("add-named-match", help="add one match pattern with a name to a place") + subparser.add_argument("pattern", metavar="PATTERN") + subparser.add_argument("name", metavar="NAME") subparser.set_defaults(func=ClientSession.add_named_match) - subparser = subparsers.add_parser('acquire', - aliases=('lock',), - help="acquire a place") - subparser.add_argument('--allow-unmatched', action='store_true', - help="allow missing resources for matches when locking the place") + subparser = subparsers.add_parser("acquire", aliases=("lock",), help="acquire a place") + subparser.add_argument( + "--allow-unmatched", action="store_true", help="allow missing resources for matches when locking the place" + ) subparser.set_defaults(func=ClientSession.acquire) - subparser = subparsers.add_parser('release', - aliases=('unlock',), - help="release a place") - subparser.add_argument('-k', '--kick', action='store_true', - help="release a place even if it is acquired by a different user") + subparser = subparsers.add_parser("release", aliases=("unlock",), help="release a place") + subparser.add_argument( + "-k", "--kick", action="store_true", help="release a place even if it is acquired by a different user" + ) subparser.set_defaults(func=ClientSession.release) - subparser = subparsers.add_parser('release-from', - help="atomically release a place, but only if locked by a specific user") - subparser.add_argument("acquired", - metavar="HOST/USER", - help="User and host to match against when releasing") + subparser = subparsers.add_parser( + "release-from", help="atomically release a place, but only if locked by a specific user" + ) + subparser.add_argument("acquired", metavar="HOST/USER", help="User and host to match against when releasing") subparser.set_defaults(func=ClientSession.release_from) - subparser = subparsers.add_parser('allow', help="allow another user to access a place") - subparser.add_argument('user', help="/") + subparser = subparsers.add_parser("allow", help="allow another user to access a place") + subparser.add_argument("user", help="/") subparser.set_defaults(func=ClientSession.allow) - subparser = subparsers.add_parser('env', - help="generate a labgrid environment file for a place") + subparser = subparsers.add_parser("env", help="generate a labgrid environment file for a place") subparser.set_defaults(func=ClientSession.print_env) - subparser = subparsers.add_parser('power', - aliases=('pw',), - help="change (or get) a place's power status") - subparser.add_argument('action', choices=['on', 'off', 'cycle', 'get']) - subparser.add_argument('-t', '--delay', type=float, default=None, - help='wait time in seconds between off and on during cycle') - subparser.add_argument('--name', '-n', help="optional resource name") + subparser = subparsers.add_parser("power", aliases=("pw",), help="change (or get) a place's power status") + subparser.add_argument("action", choices=["on", "off", "cycle", "get"]) + subparser.add_argument( + "-t", "--delay", type=float, default=None, help="wait time in seconds between off and on during cycle" + ) + subparser.add_argument("--name", "-n", help="optional resource name") subparser.set_defaults(func=ClientSession.power) - subparser = subparsers.add_parser('io', - help="change (or get) a digital IO status") - subparser.add_argument('action', choices=['high', 'low', 'get'], help="action") - subparser.add_argument('name', help="optional resource name", nargs='?') + subparser = subparsers.add_parser("io", help="change (or get) a digital IO status") + subparser.add_argument("action", choices=["high", "low", "get"], help="action") + subparser.add_argument("name", help="optional resource name", nargs="?") subparser.set_defaults(func=ClientSession.digital_io) - subparser = subparsers.add_parser('console', - aliases=('con',), - help="connect to the console") - subparser.add_argument('-l', '--loop', action='store_true', - help="keep trying to connect if the console is unavailable") - subparser.add_argument('-o', '--listenonly', action='store_true', - help="do not modify local terminal, do not send input from stdin") - subparser.add_argument('name', help="optional resource name", nargs='?') - subparser.add_argument('--logfile', metavar="FILE", help="Log output to FILE", default=None) + subparser = subparsers.add_parser("console", aliases=("con",), help="connect to the console") + subparser.add_argument( + "-l", "--loop", action="store_true", help="keep trying to connect if the console is unavailable" + ) + subparser.add_argument( + "-o", "--listenonly", action="store_true", help="do not modify local terminal, do not send input from stdin" + ) + subparser.add_argument("name", help="optional resource name", nargs="?") + subparser.add_argument("--logfile", metavar="FILE", help="Log output to FILE", default=None) subparser.set_defaults(func=ClientSession.console) - subparser = subparsers.add_parser('dfu', - help="communicate with device in DFU mode") - subparser.add_argument('action', choices=['download', 'detach', 'list'], help='action') - subparser.add_argument('altsetting', help='altsetting name or number (download, detach only)', - nargs='?') - subparser.add_argument('filename', help='file to write into device (download only)', nargs='?') - subparser.add_argument('--wait', type=float, default=10.0) - subparser.add_argument('--name', '-n', help="optional resource name") + subparser = subparsers.add_parser("dfu", help="communicate with device in DFU mode") + subparser.add_argument("action", choices=["download", "detach", "list"], help="action") + subparser.add_argument("altsetting", help="altsetting name or number (download, detach only)", nargs="?") + subparser.add_argument("filename", help="file to write into device (download only)", nargs="?") + subparser.add_argument("--wait", type=float, default=10.0) + subparser.add_argument("--name", "-n", help="optional resource name") subparser.set_defaults(func=ClientSession.dfu) - subparser = subparsers.add_parser('fastboot', - help="run fastboot") - subparser.add_argument('fastboot_args', metavar='ARG', nargs=argparse.REMAINDER, - help='fastboot arguments') - subparser.add_argument('--wait', type=float, default=10.0) - subparser.add_argument('--name', '-n', help="optional resource name") + subparser = subparsers.add_parser("fastboot", help="run fastboot") + subparser.add_argument("fastboot_args", metavar="ARG", nargs=argparse.REMAINDER, help="fastboot arguments") + subparser.add_argument("--wait", type=float, default=10.0) + subparser.add_argument("--name", "-n", help="optional resource name") subparser.set_defaults(func=ClientSession.fastboot) - subparser = subparsers.add_parser('flashscript', - help="run flash script") - subparser.add_argument('script', help="Flashing script") - subparser.add_argument('script_args', metavar='ARG', nargs=argparse.REMAINDER, - help='script arguments') - subparser.add_argument('--name', '-n', help="optional resource name") + subparser = subparsers.add_parser("flashscript", help="run flash script") + subparser.add_argument("script", help="Flashing script") + subparser.add_argument("script_args", metavar="ARG", nargs=argparse.REMAINDER, help="script arguments") + subparser.add_argument("--name", "-n", help="optional resource name") subparser.set_defaults(func=ClientSession.flashscript) - subparser = subparsers.add_parser('bootstrap', - help="start a bootloader") - subparser.add_argument('-w', '--wait', type=float, default=10.0) - subparser.add_argument('filename', help='filename to boot on the target') - subparser.add_argument('bootstrap_args', metavar='ARG', nargs=argparse.REMAINDER, - help='extra bootstrap arguments') - subparser.add_argument('--name', '-n', help="optional resource name") + subparser = subparsers.add_parser("bootstrap", help="start a bootloader") + subparser.add_argument("-w", "--wait", type=float, default=10.0) + subparser.add_argument("filename", help="filename to boot on the target") + subparser.add_argument("bootstrap_args", metavar="ARG", nargs=argparse.REMAINDER, help="extra bootstrap arguments") + subparser.add_argument("--name", "-n", help="optional resource name") subparser.set_defaults(func=ClientSession.bootstrap) - subparser = subparsers.add_parser('sd-mux', - help="switch USB SD Muxer or get current mode") - subparser.add_argument('action', choices=['dut', 'host', 'off', 'client', 'get']) - subparser.add_argument('--name', '-n', help="optional resource name") + subparser = subparsers.add_parser("sd-mux", help="switch USB SD Muxer or get current mode") + subparser.add_argument("action", choices=["dut", "host", "off", "client", "get"]) + subparser.add_argument("--name", "-n", help="optional resource name") subparser.set_defaults(func=ClientSession.sd_mux) - subparser = subparsers.add_parser('usb-mux', - help="switch USB Muxer") - subparser.add_argument('links', choices=['off', 'dut-device', 'host-dut', 'host-device', 'host-dut+host-device']) - subparser.add_argument('--name', '-n', help="optional resource name") + subparser = subparsers.add_parser("usb-mux", help="switch USB Muxer") + subparser.add_argument("links", choices=["off", "dut-device", "host-dut", "host-device", "host-dut+host-device"]) + subparser.add_argument("--name", "-n", help="optional resource name") subparser.set_defaults(func=ClientSession.usb_mux) - subparser = subparsers.add_parser('ssh', - help="connect via ssh (with optional arguments)", - epilog="Additional arguments are passed to the ssh subprocess.") - subparser.add_argument('--name', '-n', help="optional resource name") + subparser = subparsers.add_parser( + "ssh", + help="connect via ssh (with optional arguments)", + epilog="Additional arguments are passed to the ssh subprocess.", + ) + subparser.add_argument("--name", "-n", help="optional resource name") subparser.set_defaults(func=ClientSession.ssh) - subparser = subparsers.add_parser('scp', - help="transfer file via scp") - subparser.add_argument('--name', '-n', help="optional resource name") - subparser.add_argument('src', help='source path (use :dir/file for remote side)') - subparser.add_argument('dst', help='destination path (use :dir/file for remote side)') + subparser = subparsers.add_parser("scp", help="transfer file via scp") + subparser.add_argument("--name", "-n", help="optional resource name") + subparser.add_argument("src", help="source path (use :dir/file for remote side)") + subparser.add_argument("dst", help="destination path (use :dir/file for remote side)") subparser.set_defaults(func=ClientSession.scp) - subparser = subparsers.add_parser('rsync', - help="transfer files via rsync", - epilog="Additional arguments are passed to the rsync subprocess.") - subparser.add_argument('--name', '-n', help="optional resource name") - subparser.add_argument('src', help='source path (use :dir/file for remote side)') - subparser.add_argument('dst', help='destination path (use :dir/file for remote side)') + subparser = subparsers.add_parser( + "rsync", help="transfer files via rsync", epilog="Additional arguments are passed to the rsync subprocess." + ) + subparser.add_argument("--name", "-n", help="optional resource name") + subparser.add_argument("src", help="source path (use :dir/file for remote side)") + subparser.add_argument("dst", help="destination path (use :dir/file for remote side)") subparser.set_defaults(func=ClientSession.rsync) - subparser = subparsers.add_parser('sshfs', - help="mount via sshfs (blocking)") - subparser.add_argument('--name', '-n', help="optional resource name") - subparser.add_argument('path', help='remote path on the target') - subparser.add_argument('mountpoint', help='local path') + subparser = subparsers.add_parser("sshfs", help="mount via sshfs (blocking)") + subparser.add_argument("--name", "-n", help="optional resource name") + subparser.add_argument("path", help="remote path on the target") + subparser.add_argument("mountpoint", help="local path") subparser.set_defaults(func=ClientSession.sshfs) - subparser = subparsers.add_parser('forward', - help="forward local port to remote target") - subparser.add_argument('--name', '-n', help="optional resource name") - subparser.add_argument("--local", "-L", metavar="[LOCAL:]REMOTE", - action=LocalPort, - help="Forward local port LOCAL to remote port REMOTE. If LOCAL is unspecified, an arbitrary port will be chosen") - subparser.add_argument("--remote", "-R", metavar="REMOTE:LOCAL", - action=RemotePort, - help="Forward remote port REMOTE to local port LOCAL") + subparser = subparsers.add_parser("forward", help="forward local port to remote target") + subparser.add_argument("--name", "-n", help="optional resource name") + subparser.add_argument( + "--local", + "-L", + metavar="[LOCAL:]REMOTE", + action=LocalPort, + help="Forward local port LOCAL to remote port REMOTE. If LOCAL is unspecified, an arbitrary port will be chosen", + ) + subparser.add_argument( + "--remote", + "-R", + metavar="REMOTE:LOCAL", + action=RemotePort, + help="Forward remote port REMOTE to local port LOCAL", + ) subparser.set_defaults(func=ClientSession.forward) - subparser = subparsers.add_parser('telnet', - help="connect via telnet") + subparser = subparsers.add_parser("telnet", help="connect via telnet") subparser.set_defaults(func=ClientSession.telnet) - subparser = subparsers.add_parser('video', - help="start a video stream") - subparser.add_argument('-q', '--quality', type=str, - help="select a video quality (use 'list' to show options)") - subparser.add_argument('-c', '--controls', type=str, - help="configure v4l controls (such as 'focus_auto=0,focus_absolute=40')") - subparser.add_argument('--name', '-n', help="optional resource name") + subparser = subparsers.add_parser("video", help="start a video stream") + subparser.add_argument("-q", "--quality", type=str, help="select a video quality (use 'list' to show options)") + subparser.add_argument( + "-c", "--controls", type=str, help="configure v4l controls (such as 'focus_auto=0,focus_absolute=40')" + ) + subparser.add_argument("--name", "-n", help="optional resource name") subparser.set_defaults(func=ClientSession.video) - subparser = subparsers.add_parser('audio', help="start a audio stream") - subparser.add_argument('--name', '-n', help="optional resource name") + subparser = subparsers.add_parser("audio", help="start a audio stream") + subparser.add_argument("--name", "-n", help="optional resource name") subparser.set_defaults(func=ClientSession.audio) - tmc_parser = subparsers.add_parser('tmc', help="control a USB TMC device") - tmc_parser.add_argument('--name', '-n', help="optional resource name") + tmc_parser = subparsers.add_parser("tmc", help="control a USB TMC device") + tmc_parser.add_argument("--name", "-n", help="optional resource name") tmc_parser.set_defaults(func=lambda _: tmc_parser.print_help(file=sys.stderr)) tmc_subparsers = tmc_parser.add_subparsers( - dest='subcommand', - title='available subcommands', + dest="subcommand", + title="available subcommands", metavar="SUBCOMMAND", ) - tmc_subparser = tmc_subparsers.add_parser('cmd', - aliases=('c',), - help="execute raw command") - tmc_subparser.add_argument('command', nargs='+') + tmc_subparser = tmc_subparsers.add_parser("cmd", aliases=("c",), help="execute raw command") + tmc_subparser.add_argument("command", nargs="+") tmc_subparser.set_defaults(func=ClientSession.tmc_command) - tmc_subparser = tmc_subparsers.add_parser('query', - aliases=('q',), - help="execute raw query") - tmc_subparser.add_argument('query', nargs='+') + tmc_subparser = tmc_subparsers.add_parser("query", aliases=("q",), help="execute raw query") + tmc_subparser.add_argument("query", nargs="+") tmc_subparser.set_defaults(func=ClientSession.tmc_query) - tmc_subparser = tmc_subparsers.add_parser('screen', help="show or save a screenshot") - tmc_subparser.add_argument('action', choices=['show', 'save']) + tmc_subparser = tmc_subparsers.add_parser("screen", help="show or save a screenshot") + tmc_subparser.add_argument("action", choices=["show", "save"]) tmc_subparser.set_defaults(func=ClientSession.tmc_screen) - tmc_subparser = tmc_subparsers.add_parser('channel', help="use a channel") - tmc_subparser.add_argument('channel', type=int) - tmc_subparser.add_argument('action', choices=['info', 'values']) + tmc_subparser = tmc_subparsers.add_parser("channel", help="use a channel") + tmc_subparser.add_argument("channel", type=int) + tmc_subparser.add_argument("action", choices=["info", "values"]) tmc_subparser.set_defaults(func=ClientSession.tmc_channel) - subparser = subparsers.add_parser('write-files', help="copy files onto mass storage device", - usage="%(prog)s [OPTION]... -T SOURCE DEST\n" + - " %(prog)s [OPTION]... [-t DIRECTORY] SOURCE...") - subparser.add_argument('-w', '--wait', type=float, default=10.0, - help='storage poll timeout in seconds') - subparser.add_argument('-p', '--partition', type=int, choices=range(0, 256), - metavar='0-255', default=1, - help='partition number to mount or 0 to mount whole disk (default: %(default)s)') + subparser = subparsers.add_parser( + "write-files", + help="copy files onto mass storage device", + usage="%(prog)s [OPTION]... -T SOURCE DEST\n" + " %(prog)s [OPTION]... [-t DIRECTORY] SOURCE...", + ) + subparser.add_argument("-w", "--wait", type=float, default=10.0, help="storage poll timeout in seconds") + subparser.add_argument( + "-p", + "--partition", + type=int, + choices=range(0, 256), + metavar="0-255", + default=1, + help="partition number to mount or 0 to mount whole disk (default: %(default)s)", + ) group = subparser.add_mutually_exclusive_group() - group.add_argument('-t', '--target-directory', type=pathlib.PurePath, metavar='DIRECTORY', - default=pathlib.PurePath("/"), - help='copy all SOURCE files into DIRECTORY (default: partition root)') - group.add_argument('-T', action='store_true', dest='rename', - help='copy SOURCE file and rename to DEST') - subparser.add_argument('--name', '-n', help="optional resource name") - subparser.add_argument('SOURCE', type=pathlib.PurePath, nargs='+', - help='source file(s) to copy') - subparser.add_argument('DEST', type=pathlib.PurePath, nargs='?', - help='destination file name for SOURCE') + group.add_argument( + "-t", + "--target-directory", + type=pathlib.PurePath, + metavar="DIRECTORY", + default=pathlib.PurePath("/"), + help="copy all SOURCE files into DIRECTORY (default: partition root)", + ) + group.add_argument("-T", action="store_true", dest="rename", help="copy SOURCE file and rename to DEST") + subparser.add_argument("--name", "-n", help="optional resource name") + subparser.add_argument("SOURCE", type=pathlib.PurePath, nargs="+", help="source file(s) to copy") + subparser.add_argument("DEST", type=pathlib.PurePath, nargs="?", help="destination file name for SOURCE") subparser.set_defaults(func=ClientSession.write_files, parser=subparser) - subparser = subparsers.add_parser('write-image', help="write an image onto mass storage") - subparser.add_argument('-w', '--wait', type=float, default=10.0) - subparser.add_argument('-p', '--partition', type=int, help="partition number to write to") - subparser.add_argument('--skip', type=int, default=0, - help="skip n 512-sized blocks at start of input") - subparser.add_argument('--seek', type=int, default=0, - help="skip n 512-sized blocks at start of output") - subparser.add_argument('--mode', dest='write_mode', - type=Mode, choices=Mode, default=Mode.DD, - help="Choose tool for writing images (default: %(default)s)") - subparser.add_argument('--name', '-n', help="optional resource name") - subparser.add_argument('filename', help='filename to boot on the target') + subparser = subparsers.add_parser("write-image", help="write an image onto mass storage") + subparser.add_argument("-w", "--wait", type=float, default=10.0) + subparser.add_argument("-p", "--partition", type=int, help="partition number to write to") + subparser.add_argument("--skip", type=int, default=0, help="skip n 512-sized blocks at start of input") + subparser.add_argument("--seek", type=int, default=0, help="skip n 512-sized blocks at start of output") + subparser.add_argument( + "--mode", + dest="write_mode", + type=Mode, + choices=Mode, + default=Mode.DD, + help="Choose tool for writing images (default: %(default)s)", + ) + subparser.add_argument("--name", "-n", help="optional resource name") + subparser.add_argument("filename", help="filename to boot on the target") subparser.set_defaults(func=ClientSession.write_image) - subparser = subparsers.add_parser('reserve', help="create a reservation") - subparser.add_argument('--wait', action='store_true', - help="wait until the reservation is allocated") - subparser.add_argument('--shell', action='store_true', - help="format output as shell variables") - subparser.add_argument('--prio', type=float, default=0.0, - help="priority relative to other reservations (default 0)") - subparser.add_argument('filters', metavar='KEY=VALUE', nargs='+', - help="required tags") + subparser = subparsers.add_parser("reserve", help="create a reservation") + subparser.add_argument("--wait", action="store_true", help="wait until the reservation is allocated") + subparser.add_argument("--shell", action="store_true", help="format output as shell variables") + subparser.add_argument( + "--prio", type=float, default=0.0, help="priority relative to other reservations (default 0)" + ) + subparser.add_argument("filters", metavar="KEY=VALUE", nargs="+", help="required tags") subparser.set_defaults(func=ClientSession.create_reservation) - subparser = subparsers.add_parser('cancel-reservation', help="cancel a reservation") - subparser.add_argument('token', type=str, default=token, nargs='?' if token else None) + subparser = subparsers.add_parser("cancel-reservation", help="cancel a reservation") + subparser.add_argument("token", type=str, default=token, nargs="?" if token else None) subparser.set_defaults(func=ClientSession.cancel_reservation) - subparser = subparsers.add_parser('wait', help="wait for a reservation to be allocated") - subparser.add_argument('token', type=str, default=token, nargs='?' if token else None) + subparser = subparsers.add_parser("wait", help="wait for a reservation to be allocated") + subparser.add_argument("token", type=str, default=token, nargs="?" if token else None) subparser.set_defaults(func=ClientSession.wait_reservation) - subparser = subparsers.add_parser('reservations', help="list current reservations") + subparser = subparsers.add_parser("reservations", help="list current reservations") subparser.set_defaults(func=ClientSession.print_reservations) - subparser = subparsers.add_parser('export', help="export driver information to a file (needs environment with drivers)") - subparser.add_argument('--format', dest='format', - type=ExportFormat, choices=ExportFormat, default=ExportFormat.SHELL_EXPORT, - help="output format (default: %(default)s)") - subparser.add_argument('filename', help='output filename') + subparser = subparsers.add_parser( + "export", help="export driver information to a file (needs environment with drivers)" + ) + subparser.add_argument( + "--format", + dest="format", + type=ExportFormat, + choices=ExportFormat, + default=ExportFormat.SHELL_EXPORT, + help="output format (default: %(default)s)", + ) + subparser.add_argument("filename", help="output filename") subparser.set_defaults(func=ClientSession.export) - subparser = subparsers.add_parser('version', help="show version") + subparser = subparsers.add_parser("version", help="show version") subparser.set_defaults(func=ClientSession.print_version) # make any leftover arguments available for some commands args, leftover = parser.parse_known_args() - if args.command not in ['ssh', 'rsync', 'forward']: + if args.command not in ["ssh", "rsync", "forward"]: args = parser.parse_args() else: args.leftover = leftover @@ -1890,9 +1870,9 @@ def main(): env = Environment(config_file=args.config) role = None - if args.command != 'reserve' and env and env.config.get_targets(): + if args.command != "reserve" and env and env.config.get_targets(): if args.place: - if not args.place.startswith('+'): + if not args.place.startswith("+"): role = find_role_by_place(env.config.get_targets(), args.place) if not role: print(f"RemotePlace {args.place} not found in configuration file", file=sys.stderr) @@ -1906,36 +1886,35 @@ def main(): print(f"Selected role {role} and place {args.place} from configuration file") extra = { - 'args': args, - 'env': env, - 'role': role, - 'prog': parser.prog, + "args": args, + "env": env, + "role": role, + "prog": parser.prog, } - if args.command and args.command != 'help': + if args.command and args.command != "help": exitcode = 0 try: signal.signal(signal.SIGTERM, lambda *_: sys.exit(0)) try: - crossbar_url = args.crossbar or env.config.get_option('crossbar_url') + crossbar_url = args.crossbar or env.config.get_option("crossbar_url") except (AttributeError, KeyError): # in case of no env or not set, use LG_CROSSBAR env variable or default crossbar_url = os.environ.get("LG_CROSSBAR", "ws://127.0.0.1:20408/ws") try: - crossbar_realm = env.config.get_option('crossbar_realm') + crossbar_realm = env.config.get_option("crossbar_realm") except (AttributeError, KeyError): # in case of no env, use LG_CROSSBAR_REALM env variable or default crossbar_realm = os.environ.get("LG_CROSSBAR_REALM", "realm1") - logging.debug('Starting session with "%s", realm: "%s"', crossbar_url, - crossbar_realm) + logging.debug('Starting session with "%s", realm: "%s"', crossbar_url, crossbar_realm) session = start_session(crossbar_url, crossbar_realm, extra) try: if asyncio.iscoroutinefunction(args.func): - if getattr(args.func, 'needs_target', False): + if getattr(args.func, "needs_target", False): place = session.get_acquired_place() target = session._get_target(place) coro = args.func(session, place, target) @@ -1958,11 +1937,20 @@ def main(): for res in e.found: print(f"{res.name}", file=sys.stderr) else: - print("This may be caused by disconnected exporter or wrong match entries.\nYou can use the 'show' command to review all matching resources.", file=sys.stderr) # pylint: disable=line-too-long + print( + "This may be caused by disconnected exporter or wrong match entries.\nYou can use the 'show' command to review all matching resources.", + file=sys.stderr, + ) # pylint: disable=line-too-long elif isinstance(e, NoDriverFoundError): - print("This is likely caused by an error or missing driver in the environment configuration.", file=sys.stderr) # pylint: disable=line-too-long + print( + "This is likely caused by an error or missing driver in the environment configuration.", + file=sys.stderr, + ) # pylint: disable=line-too-long elif isinstance(e, InvalidConfigError): - print("This is likely caused by an error in the environment configuration or invalid\nresource information provided by the coordinator.", file=sys.stderr) # pylint: disable=line-too-long + print( + "This is likely caused by an error in the environment configuration or invalid\nresource information provided by the coordinator.", + file=sys.stderr, + ) # pylint: disable=line-too-long exitcode = 1 except ConnectionError as e: diff --git a/labgrid/remote/common.py b/labgrid/remote/common.py index 142455eb2..2ea1d2f1a 100644 --- a/labgrid/remote/common.py +++ b/labgrid/remote/common.py @@ -10,15 +10,15 @@ import attr __all__ = [ - 'TAG_KEY', - 'TAG_VAL', - 'ResourceEntry', - 'ResourceMatch', - 'Place', - 'ReservationState', - 'Reservation', - 'enable_tcp_nodelay', - 'monkey_patch_max_msg_payload_size_ws_option', + "TAG_KEY", + "TAG_VAL", + "ResourceEntry", + "ResourceMatch", + "Place", + "ReservationState", + "Reservation", + "enable_tcp_nodelay", + "monkey_patch_max_msg_payload_size_ws_option", ] TAG_KEY = re.compile(r"[a-z][a-z0-9_]+") @@ -30,59 +30,59 @@ class ResourceEntry: data = attr.ib() # cls, params def __attrs_post_init__(self): - self.data.setdefault('acquired', None) - self.data.setdefault('avail', False) + self.data.setdefault("acquired", None) + self.data.setdefault("avail", False) @property def acquired(self): - return self.data['acquired'] + return self.data["acquired"] @property def avail(self): - return self.data['avail'] + return self.data["avail"] @property def cls(self): - return self.data['cls'] + return self.data["cls"] @property def params(self): - return self.data['params'] + return self.data["params"] @property def args(self): """arguments for resource construction""" - args = self.data['params'].copy() - args.pop('extra', None) + args = self.data["params"].copy() + args.pop("extra", None) return args @property def extra(self): """extra resource information""" - return self.data['params'].get('extra', {}) + return self.data["params"].get("extra", {}) def asdict(self): return { - 'cls': self.cls, - 'params': self.params, - 'acquired': self.acquired, - 'avail': self.avail, + "cls": self.cls, + "params": self.params, + "acquired": self.acquired, + "avail": self.avail, } def update(self, data): """apply updated information from the exporter on the coordinator""" data = data.copy() - data.setdefault('acquired', None) - data.setdefault('avail', False) + data.setdefault("acquired", None) + data.setdefault("avail", False) self.data = data def acquire(self, place_name): - assert self.data['acquired'] is None - self.data['acquired'] = place_name + assert self.data["acquired"] is None + self.data["acquired"] = place_name def release(self): # ignore repeated releases - self.data['acquired'] = None + self.data["acquired"] = None @attr.s(eq=True, repr=False, str=False) @@ -99,9 +99,7 @@ class ResourceMatch: @classmethod def fromstr(cls, pattern): if not 2 <= pattern.count("/") <= 3: - raise ValueError( - f"invalid pattern format '{pattern}' (use 'exporter/group/cls/name')" - ) + raise ValueError(f"invalid pattern format '{pattern}' (use 'exporter/group/cls/name')") return cls(*pattern.split("/")) def __repr__(self): @@ -160,30 +158,30 @@ def asdict(self): acquired_resources.append(resource.path) return { - 'aliases': list(self.aliases), - 'comment': self.comment, - 'tags': self.tags, - 'matches': [attr.asdict(x) for x in self.matches], - 'acquired': self.acquired, - 'acquired_resources': acquired_resources, - 'allowed': list(self.allowed), - 'created': self.created, - 'changed': self.changed, - 'reservation': self.reservation, + "aliases": list(self.aliases), + "comment": self.comment, + "tags": self.tags, + "matches": [attr.asdict(x) for x in self.matches], + "acquired": self.acquired, + "acquired_resources": acquired_resources, + "allowed": list(self.allowed), + "created": self.created, + "changed": self.changed, + "reservation": self.reservation, } def update(self, config): fields = attr.fields_dict(type(self)) for k, v in config.items(): assert k in fields - if k == 'name': + if k == "name": # we cannot rename places assert v == self.name continue setattr(self, k, v) def show(self, level=0): - indent = ' ' * level + indent = " " * level if self.aliases: print(indent + f"aliases: {', '.join(sorted(self.aliases))}") if self.comment: @@ -240,7 +238,6 @@ def unmatched(self, resource_paths): if not any([match.ismatch(resource) for resource in resource_paths]): return match - def touch(self): self.changed = time.time() @@ -256,12 +253,14 @@ class ReservationState(enum.Enum): @attr.s(eq=False) class Reservation: owner = attr.ib(validator=attr.validators.instance_of(str)) - token = attr.ib(default=attr.Factory( - lambda: ''.join(random.choice(string.ascii_uppercase+string.digits) for i in range(10)))) + token = attr.ib( + default=attr.Factory(lambda: "".join(random.choice(string.ascii_uppercase + string.digits) for i in range(10))) + ) state = attr.ib( - default='waiting', + default="waiting", converter=lambda x: x if isinstance(x, ReservationState) else ReservationState[x], - validator=attr.validators.instance_of(ReservationState)) + validator=attr.validators.instance_of(ReservationState), + ) prio = attr.ib(default=0.0, validator=attr.validators.instance_of(float)) # a dictionary of name -> filter dicts filters = attr.ib(default=attr.Factory(dict), validator=attr.validators.instance_of(dict)) @@ -272,13 +271,13 @@ class Reservation: def asdict(self): return { - 'owner': self.owner, - 'state': self.state.name, - 'prio': self.prio, - 'filters': self.filters, - 'allocations': self.allocations, - 'created': self.created, - 'timeout': self.timeout, + "owner": self.owner, + "state": self.state.name, + "prio": self.prio, + "filters": self.filters, + "allocations": self.allocations, + "created": self.created, + "timeout": self.timeout, } def refresh(self, delta=60): @@ -289,7 +288,7 @@ def expired(self): return self.timeout < time.time() def show(self, level=0): - indent = ' ' * level + indent = " " * level print(indent + f"owner: {self.owner}") print(indent + f"token: {self.token}") print(indent + f"state: {self.state.name}") @@ -311,7 +310,7 @@ def enable_tcp_nodelay(session): asyncio/autobahn does not set TCP_NODELAY by default, so we need to do it like this for now. """ - s = session._transport.transport.get_extra_info('socket') + s = session._transport.transport.get_extra_info("socket") s.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, True) diff --git a/labgrid/remote/config.py b/labgrid/remote/config.py index e519b6ae2..fa5e237cb 100644 --- a/labgrid/remote/config.py +++ b/labgrid/remote/config.py @@ -12,24 +12,20 @@ @attr.s(eq=False) class ResourceConfig: filename = attr.ib(validator=attr.validators.instance_of(str)) - template_env = attr.ib( - default=attr.Factory(dict), validator=attr.validators.instance_of(dict) - ) + template_env = attr.ib(default=attr.Factory(dict), validator=attr.validators.instance_of(dict)) def __attrs_post_init__(self): env = jinja2.Environment( loader=jinja2.FileSystemLoader(os.path.dirname(self.filename)), - line_statement_prefix='#', - line_comment_prefix='##', + line_statement_prefix="#", + line_comment_prefix="##", ) try: with open(self.filename) as file: template = env.from_string(file.read()) except FileNotFoundError: - raise NoConfigFoundError( - f"{self.filename} could not be found" - ) + raise NoConfigFoundError(f"{self.filename} could not be found") rendered = template.render(self.template_env) - pprint(('rendered', rendered)) + pprint(("rendered", rendered)) self.data = load(rendered) - pprint(('loaded', self.data)) + pprint(("loaded", self.data)) diff --git a/labgrid/remote/coordinator.py b/labgrid/remote/coordinator.py index 29b45d83b..e3ba8210f 100644 --- a/labgrid/remote/coordinator.py +++ b/labgrid/remote/coordinator.py @@ -1,4 +1,5 @@ """The coordinator module coordinates exported resources and clients accessing them.""" + # pylint: disable=no-member,unused-argument import asyncio import sys @@ -31,6 +32,7 @@ class Action(Enum): @attr.s(init=False, eq=False) class RemoteSession: """class encapsulating a session, used by ExporterSession and ClientSession""" + coordinator = attr.ib() session = attr.ib() authid = attr.ib() @@ -44,13 +46,14 @@ def key(self): @property def name(self): """Name of the session""" - return self.authid.split('/', 1)[1] + return self.authid.split("/", 1)[1] @attr.s(eq=False) class ExporterSession(RemoteSession): """An ExporterSession is opened for each Exporter connecting to the coordinator, allowing the Exporter to get and set resources""" + groups = attr.ib(default=attr.Factory(dict), init=False) def set_resource(self, groupname, resourcename, resourcedata): @@ -61,8 +64,7 @@ def set_resource(self, groupname, resourcename, resourcedata): new = old elif resourcedata and not old: new = group[resourcename] = ResourceImport( - resourcedata, - path=(self.name, groupname, resourcedata['cls'], resourcename) + resourcedata, path=(self.name, groupname, resourcedata["cls"], resourcename) ) elif not resourcedata and old: new = None @@ -72,8 +74,7 @@ def set_resource(self, groupname, resourcename, resourcedata): new = None self.coordinator.publish( - 'org.labgrid.coordinator.resource_changed', self.name, - groupname, resourcename, new.asdict() if new else {} + "org.labgrid.coordinator.resource_changed", self.name, groupname, resourcename, new.asdict() if new else {} ) if old and new: @@ -107,6 +108,7 @@ class ResourceImport(ResourceEntry): The ResourceEntry attributes contain the information for the client. """ + path = attr.ib(kw_only=True, validator=attr.validators.instance_of(tuple)) @@ -115,8 +117,10 @@ def locked(func): async def wrapper(self, *args, **kwargs): async with self.lock: return await func(self, *args, **kwargs) + return wrapper + class CoordinatorComponent(ApplicationSession): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) @@ -143,93 +147,60 @@ async def onConnect(self): @locked async def onJoin(self, details): - await self.subscribe(self.on_session_join, 'wamp.session.on_join') - await self.subscribe( - self.on_session_leave, 'wamp.session.on_leave' - ) + await self.subscribe(self.on_session_join, "wamp.session.on_join") + await self.subscribe(self.on_session_leave, "wamp.session.on_leave") await self.register( - self.attach, - 'org.labgrid.coordinator.attach', - options=RegisterOptions(details_arg='details') + self.attach, "org.labgrid.coordinator.attach", options=RegisterOptions(details_arg="details") ) # resources await self.register( - self.set_resource, - 'org.labgrid.coordinator.set_resource', - options=RegisterOptions(details_arg='details') - ) - await self.register( - self.get_resources, - 'org.labgrid.coordinator.get_resources' + self.set_resource, "org.labgrid.coordinator.set_resource", options=RegisterOptions(details_arg="details") ) + await self.register(self.get_resources, "org.labgrid.coordinator.get_resources") # places + await self.register(self.add_place, "org.labgrid.coordinator.add_place") + await self.register(self.del_place, "org.labgrid.coordinator.del_place") + await self.register(self.add_place_alias, "org.labgrid.coordinator.add_place_alias") + await self.register(self.del_place_alias, "org.labgrid.coordinator.del_place_alias") + await self.register(self.set_place_tags, "org.labgrid.coordinator.set_place_tags") + await self.register(self.set_place_comment, "org.labgrid.coordinator.set_place_comment") + await self.register(self.add_place_match, "org.labgrid.coordinator.add_place_match") + await self.register(self.del_place_match, "org.labgrid.coordinator.del_place_match") await self.register( - self.add_place, 'org.labgrid.coordinator.add_place' - ) - await self.register( - self.del_place, 'org.labgrid.coordinator.del_place' - ) - await self.register( - self.add_place_alias, 'org.labgrid.coordinator.add_place_alias' - ) - await self.register( - self.del_place_alias, 'org.labgrid.coordinator.del_place_alias' - ) - await self.register( - self.set_place_tags, 'org.labgrid.coordinator.set_place_tags' + self.acquire_place, "org.labgrid.coordinator.acquire_place", options=RegisterOptions(details_arg="details") ) await self.register( - self.set_place_comment, 'org.labgrid.coordinator.set_place_comment' - ) - await self.register( - self.add_place_match, 'org.labgrid.coordinator.add_place_match' - ) - await self.register( - self.del_place_match, 'org.labgrid.coordinator.del_place_match' - ) - await self.register( - self.acquire_place, - 'org.labgrid.coordinator.acquire_place', - options=RegisterOptions(details_arg='details') - ) - await self.register( - self.release_place, - 'org.labgrid.coordinator.release_place', - options=RegisterOptions(details_arg='details') + self.release_place, "org.labgrid.coordinator.release_place", options=RegisterOptions(details_arg="details") ) await self.register( self.release_place_from, - 'org.labgrid.coordinator.release_place_from', - options=RegisterOptions(details_arg='details') + "org.labgrid.coordinator.release_place_from", + options=RegisterOptions(details_arg="details"), ) await self.register( - self.allow_place, - 'org.labgrid.coordinator.allow_place', - options=RegisterOptions(details_arg='details') - ) - await self.register( - self.get_places, 'org.labgrid.coordinator.get_places' + self.allow_place, "org.labgrid.coordinator.allow_place", options=RegisterOptions(details_arg="details") ) + await self.register(self.get_places, "org.labgrid.coordinator.get_places") # reservations await self.register( self.create_reservation, - 'org.labgrid.coordinator.create_reservation', - options=RegisterOptions(details_arg='details'), + "org.labgrid.coordinator.create_reservation", + options=RegisterOptions(details_arg="details"), ) await self.register( self.cancel_reservation, - 'org.labgrid.coordinator.cancel_reservation', + "org.labgrid.coordinator.cancel_reservation", ) await self.register( self.poll_reservation, - 'org.labgrid.coordinator.poll_reservation', + "org.labgrid.coordinator.poll_reservation", ) await self.register( self.get_reservations, - 'org.labgrid.coordinator.get_reservations', + "org.labgrid.coordinator.get_reservations", ) self.poll_task = asyncio.get_event_loop().create_task(self.poll()) @@ -250,7 +221,7 @@ async def onDisconnect(self): if self.poll_task: self.poll_task.cancel() await asyncio.wait([self.poll_task]) - await asyncio.sleep(0.5) # give others a chance to clean up + await asyncio.sleep(0.5) # give others a chance to clean up async def _poll_step(self): # save changes @@ -259,26 +230,24 @@ async def _poll_step(self): # poll exporters for session in list(self.sessions.values()): if isinstance(session, ExporterSession): - fut = self.call( - f'org.labgrid.exporter.{session.name}.version' - ) + fut = self.call(f"org.labgrid.exporter.{session.name}.version") done, _ = await asyncio.wait([fut], timeout=5) if not done: - print(f'kicking exporter ({session.key}/{session.name})') - await self.call('wamp.session.kill', session.key, message="timeout detected by coordinator") - print(f'cleaning up exporter ({session.key}/{session.name})') + print(f"kicking exporter ({session.key}/{session.name})") + await self.call("wamp.session.kill", session.key, message="timeout detected by coordinator") + print(f"cleaning up exporter ({session.key}/{session.name})") await self.on_session_leave(session.key) - print(f'removed exporter ({session.key}/{session.name})') + print(f"removed exporter ({session.key}/{session.name})") continue try: session.version = done.pop().result() except wamp.exception.ApplicationError as e: if e.error == "wamp.error.no_such_procedure": - pass # old client + pass # old client elif e.error == "wamp.error.canceled": - pass # disconnected + pass # disconnected elif e.error == "wamp.error.no_such_session": - pass # client has already disconnected + pass # client has already disconnected else: raise # update reservations @@ -309,26 +278,26 @@ async def save(self): places = places.encode() loop = asyncio.get_event_loop() - await loop.run_in_executor(None, atomic_replace, 'resources.yaml', resources) - await loop.run_in_executor(None, atomic_replace, 'places.yaml', places) + await loop.run_in_executor(None, atomic_replace, "resources.yaml", resources) + await loop.run_in_executor(None, atomic_replace, "places.yaml", places) def load(self): try: self.places = {} - with open('places.yaml', 'r') as f: + with open("places.yaml", "r") as f: self.places = yaml.load(f.read()) for placename, config in self.places.items(): - config['name'] = placename + config["name"] = placename # FIXME maybe recover previously acquired places here? - if 'acquired' in config: - del config['acquired'] - if 'acquired_resources' in config: - del config['acquired_resources'] - if 'allowed' in config: - del config['allowed'] - if 'reservation' in config: - del config['reservation'] - config['matches'] = [ResourceMatch(**match) for match in config['matches']] + if "acquired" in config: + del config["acquired"] + if "acquired_resources" in config: + del config["acquired_resources"] + if "allowed" in config: + del config["allowed"] + if "reservation" in config: + del config["reservation"] + config["matches"] = [ResourceMatch(**match) for match in config["matches"]] place = Place(**config) self.places[placename] = place except FileNotFoundError: @@ -371,28 +340,26 @@ async def _update_acquired_places(self, action, resource, callback=True): self._publish_place(place) def _publish_place(self, place): - self.publish( - 'org.labgrid.coordinator.place_changed', place.name, place.asdict() - ) + self.publish("org.labgrid.coordinator.place_changed", place.name, place.asdict()) def _publish_resource(self, resource): self.publish( - 'org.labgrid.coordinator.resource_changed', - resource.path[0], # exporter name - resource.path[1], # group name - resource.path[3], # resource name + "org.labgrid.coordinator.resource_changed", + resource.path[0], # exporter name + resource.path[1], # group name + resource.path[3], # resource name resource.asdict(), ) @locked async def on_session_join(self, session_details): - print('join') + print("join") pprint(session_details) - session = session_details['session'] - authid = session_details['authextra'].get('authid') or session_details['authid'] - if authid.startswith('client/'): + session = session_details["session"] + authid = session_details["authextra"].get("authid") or session_details["authid"] + if authid.startswith("client/"): session = ClientSession(self, session, authid) - elif authid.startswith('exporter/'): + elif authid.startswith("exporter/"): session = ExporterSession(self, session, authid) else: return @@ -400,7 +367,7 @@ async def on_session_join(self, session_details): @locked async def on_session_leave(self, session_id): - print(f'leave ({session_id})') + print(f"leave ({session_id})") try: session = self.sessions.pop(session_id) except KeyError: @@ -417,7 +384,7 @@ async def attach(self, name, details=None): # TODO check if name is in use session = self.sessions[details.caller] session_details = self.sessions[session] - session_details['name'] = name + session_details["name"] = name self.exporters[name] = defaultdict(dict) # not @locked because set_resource my be triggered by a acquire() call to @@ -473,9 +440,7 @@ async def del_place(self, name, details=None): if name not in self.places: return False del self.places[name] - self.publish( - 'org.labgrid.coordinator.place_changed', name, {} - ) + self.publish("org.labgrid.coordinator.place_changed", name, {}) self.save_later() return True @@ -551,7 +516,7 @@ async def add_place_match(self, placename, pattern, rename=None, details=None): place = self.places[placename] except KeyError: return False - match = ResourceMatch(*pattern.split('/'), rename=rename) + match = ResourceMatch(*pattern.split("/"), rename=rename) if match in place.matches: return False place.matches.append(match) @@ -566,7 +531,7 @@ async def del_place_match(self, placename, pattern, rename=None, details=None): place = self.places[placename] except KeyError: return False - match = ResourceMatch(*pattern.split('/'), rename=rename) + match = ResourceMatch(*pattern.split("/"), rename=rename) try: place.matches.remove(match) except ValueError: @@ -577,7 +542,7 @@ async def del_place_match(self, placename, pattern, rename=None, details=None): return True async def _acquire_resources(self, place, resources): - resources = resources.copy() # we may modify the list + resources = resources.copy() # we may modify the list # all resources need to be free for resource in resources: if resource.acquired: @@ -589,8 +554,9 @@ async def _acquire_resources(self, place, resources): for resource in resources: # this triggers an update from the exporter which is published # to the clients - await self.call(f'org.labgrid.exporter.{resource.path[0]}.acquire', - resource.path[1], resource.path[3], place.name) + await self.call( + f"org.labgrid.exporter.{resource.path[0]}.acquire", resource.path[1], resource.path[3], place.name + ) acquired.append(resource) except: print(f"failed to acquire {resource}", file=sys.stderr) @@ -604,7 +570,7 @@ async def _acquire_resources(self, place, resources): return True async def _release_resources(self, place, resources, callback=True): - resources = resources.copy() # we may modify the list + resources = resources.copy() # we may modify the list for resource in resources: try: @@ -617,8 +583,9 @@ async def _release_resources(self, place, resources, callback=True): # this triggers an update from the exporter which is published # to the clients if callback: - await self.call(f'org.labgrid.exporter.{resource.path[0]}.release', - resource.path[1], resource.path[3]) + await self.call( + f"org.labgrid.exporter.{resource.path[0]}.release", resource.path[1], resource.path[3] + ) except: print(f"failed to release {resource}", file=sys.stderr) # at leaset try to notify the clients @@ -758,10 +725,10 @@ def schedule_reservations(self): res.state = ReservationState.expired res.allocations.clear() res.refresh() - print(f'reservation ({res.owner}/{res.token}) is now {res.state.name}') + print(f"reservation ({res.owner}/{res.token}) is now {res.state.name}") else: del self.reservations[res.token] - print(f'removed {res.state.name} reservation ({res.owner}/{res.token})') + print(f"removed {res.state.name} reservation ({res.owner}/{res.token})") # check which places are already allocated and handle state transitions allocated_places = set() @@ -775,7 +742,7 @@ def schedule_reservations(self): res.state = ReservationState.invalid res.allocations.clear() res.refresh(300) - print(f'reservation ({res.owner}/{res.token}) is now {res.state.name}') + print(f"reservation ({res.owner}/{res.token}) is now {res.state.name}") if place.acquired is not None: acquired_places.add(name) assert name not in allocated_places, "conflicting allocation" @@ -784,12 +751,12 @@ def schedule_reservations(self): # an allocated place was acquired res.state = ReservationState.acquired res.refresh() - print(f'reservation ({res.owner}/{res.token}) is now {res.state.name}') + print(f"reservation ({res.owner}/{res.token}) is now {res.state.name}") if not acquired_places and res.state is ReservationState.acquired: # all allocated places were released res.state = ReservationState.allocated res.refresh() - print(f'reservation ({res.owner}/{res.token}) is now {res.state.name}') + print(f"reservation ({res.owner}/{res.token}) is now {res.state.name}") # check which places are available for allocation available_places = set() @@ -811,21 +778,21 @@ def schedule_reservations(self): for name in available_places: tags = set(self.places[name].tags.items()) # support place names - tags |= {('name', name)} + tags |= {("name", name)} # support place aliases place_tagsets.append(TagSet(name, tags)) filter_tagsets = [] for res in pending_reservations: - filter_tagsets.append(TagSet(res.token, set(res.filters['main'].items()))) + filter_tagsets.append(TagSet(res.token, set(res.filters["main"].items()))) allocation = schedule(place_tagsets, filter_tagsets) # apply allocations for res_token, place_name in allocation.items(): res = self.reservations[res_token] - res.allocations = {'main': [place_name]} + res.allocations = {"main": [place_name]} res.state = ReservationState.allocated res.refresh() - print(f'reservation ({res.owner}/{res.token}) is now {res.state.name}') + print(f"reservation ({res.owner}/{res.token}) is now {res.state.name}") # update reservation property of each place and notify old_map = {} @@ -853,7 +820,7 @@ async def create_reservation(self, spec, prio=0.0, details=None): filter_ = {} for pair in spec.split(): try: - k, v = pair.split('=') + k, v = pair.split("=") except ValueError: return None if not TAG_KEY.match(k): @@ -862,7 +829,7 @@ async def create_reservation(self, spec, prio=0.0, details=None): return None filter_[k] = v - filters = {'main': filter_} # currently, only one group is implemented + filters = {"main": filter_} # currently, only one group is implemented owner = self.sessions[details.caller].name res = Reservation(owner=owner, prio=prio, filters=filters) @@ -893,7 +860,8 @@ async def poll_reservation(self, token, details=None): async def get_reservations(self, details=None): return {k: v.asdict() for k, v in self.reservations.items()} -if __name__ == '__main__': + +if __name__ == "__main__": runner = ApplicationRunner( url=environ.get("WS", "ws://127.0.0.1:20408/ws"), realm="realm1", diff --git a/labgrid/remote/exporter.py b/labgrid/remote/exporter.py index 577d3c926..03a8cd26c 100755 --- a/labgrid/remote/exporter.py +++ b/labgrid/remote/exporter.py @@ -1,5 +1,6 @@ """The remote.exporter module exports resources to the coordinator and makes them available to other clients on the same coordinator""" + import argparse import asyncio import logging @@ -28,6 +29,7 @@ exports: Dict[str, Type[ResourceEntry]] = {} reexec = False + class ExporterError(Exception): pass @@ -40,19 +42,21 @@ def log_subprocess_kernel_stack(logger, child): if child.poll() is not None: # nothing to check if no longer running return try: - with open(f'/proc/{child.pid}/stack', 'r') as f: + with open(f"/proc/{child.pid}/stack", "r") as f: stack = f.read() stack = stack.strip() except PermissionError: return logger.info("current kernel stack of %s is:\n%s", child.args, stack) + @attr.s(eq=False) class ResourceExport(ResourceEntry): """Represents a local resource exported via a specific protocol. The ResourceEntry attributes contain the information for the client. """ + host = attr.ib(default=gethostname(), validator=attr.validators.instance_of(str)) proxy = attr.ib(default=None) proxy_required = attr.ib(default=False) @@ -86,7 +90,7 @@ def broken(self, reason): # resource. For now, when trying to acquire a place with a match for # this resource, we get 'resource is already in used by ', # instead of an unspecific error. - self.data['acquired'] = '' + self.data["acquired"] = "" self.logger.error("marked as broken: %s", reason) def _get_start_params(self): # pylint: disable=no-self-use @@ -145,17 +149,17 @@ def poll(self): # check if resulting information has changed dirty = False if self.avail != (self.local.avail and not self.broken): - self.data['avail'] = self.local.avail and not self.broken + self.data["avail"] = self.local.avail and not self.broken dirty = True params = self._get_params() - if not params.get('extra'): - params['extra'] = {} - params['extra']['proxy_required'] = self.proxy_required - params['extra']['proxy'] = self.proxy + if not params.get("extra"): + params["extra"] = {} + params["extra"]["proxy_required"] = self.proxy_required + params["extra"]["proxy"] = self.proxy if self.broken: - params['extra']['broken'] = self.broken + params["extra"]["broken"] = self.broken if self.params != params: - self.data['params'].update(params) + self.data["params"].update(params) dirty = True return dirty @@ -181,11 +185,13 @@ def __attrs_post_init__(self): super().__attrs_post_init__() if self.cls == "RawSerialPort": from ..resource.serialport import RawSerialPort + self.local = RawSerialPort(target=None, name=None, **self.local_params) elif self.cls == "USBSerialPort": from ..resource.udev import USBSerialPort + self.local = USBSerialPort(target=None, name=None, **self.local_params) - self.data['cls'] = "NetworkSerialPort" + self.data["cls"] = "NetworkSerialPort" self.child = None self.port = None self.ser2net_bin = shutil.which("ser2net") @@ -203,31 +209,31 @@ def __del__(self): def _get_start_params(self): return { - 'path': self.local.port, + "path": self.local.port, } def _get_params(self): """Helper function to return parameters""" return { - 'host': self.host, - 'port': self.port, - 'speed': self.local.speed, - 'extra': { - 'path': self.local.port, - } + "host": self.host, + "port": self.port, + "speed": self.local.speed, + "extra": { + "path": self.local.port, + }, } def _start(self, start_params): """Start ``ser2net`` subprocess""" assert self.local.avail assert self.child is None - assert start_params['path'].startswith('/dev/') + assert start_params["path"].startswith("/dev/") self.port = get_free_port() # Ser2net has switched to using YAML format at version 4.0.0. - result = subprocess.run([self.ser2net_bin,'-v'], capture_output=True, text=True) - _, _, version = str(result.stdout).split(' ') - major_version = version.split('.')[0] + result = subprocess.run([self.ser2net_bin, "-v"], capture_output=True, text=True) + _, _, version = str(result.stdout).split(" ") + major_version = version.split(".")[0] # There is a bug in ser2net between 4.4.0 and 4.6.1 where it # returns 1 on a successful call to 'ser2net -v'. We don't want @@ -239,20 +245,24 @@ def _start(self, start_params): if int(major_version) >= 4: cmd = [ self.ser2net_bin, - '-d', - '-n', - '-Y', f'connection: &con01# accepter: telnet(rfc2217,mode=server),{self.port}', - '-Y', f' connector: serialdev(nouucplock=true),{start_params["path"]},{self.local.speed}n81,local', # pylint: disable=line-too-long - '-Y', ' options:', - '-Y', ' max-connections: 10', + "-d", + "-n", + "-Y", + f"connection: &con01# accepter: telnet(rfc2217,mode=server),{self.port}", + "-Y", + f' connector: serialdev(nouucplock=true),{start_params["path"]},{self.local.speed}n81,local', # pylint: disable=line-too-long + "-Y", + " options:", + "-Y", + " max-connections: 10", ] else: cmd = [ self.ser2net_bin, - '-d', - '-n', - '-u', - '-C', + "-d", + "-n", + "-u", + "-C", f'{self.port}:telnet:0:{start_params["path"]}:{self.local.speed} NONE 8DATABITS 1STOPBIT LOCAL', # pylint: disable=line-too-long ] self.logger.info("Starting ser2net with: %s", " ".join(cmd)) @@ -263,7 +273,7 @@ def _start(self, start_params): except subprocess.TimeoutExpired: # good, ser2net didn't exit immediately pass - self.logger.info("started ser2net for %s on port %d", start_params['path'], self.port) + self.logger.info("started ser2net for %s on port %d", start_params["path"], self.port) def _stop(self, start_params): """Stop ``ser2net`` subprocess""" @@ -276,16 +286,17 @@ def _stop(self, start_params): try: child.wait(2.0) # ser2net takes about a second to react except subprocess.TimeoutExpired: - self.logger.warning("ser2net for %s still running after SIGTERM", start_params['path']) + self.logger.warning("ser2net for %s still running after SIGTERM", start_params["path"]) log_subprocess_kernel_stack(self.logger, child) child.kill() child.wait(1.0) - self.logger.info("stopped ser2net for %s on port %d", start_params['path'], port) + self.logger.info("stopped ser2net for %s on port %d", start_params["path"], port) exports["USBSerialPort"] = SerialPortExport exports["RawSerialPort"] = SerialPortExport + @attr.s(eq=False) class NetworkInterfaceExport(ResourceExport): """ResourceExport for a network interface""" @@ -294,21 +305,23 @@ def __attrs_post_init__(self): super().__attrs_post_init__() if self.cls == "NetworkInterface": from ..resource.base import NetworkInterface + self.local = NetworkInterface(target=None, name=None, **self.local_params) elif self.cls == "USBNetworkInterface": from ..resource.udev import USBNetworkInterface + self.local = USBNetworkInterface(target=None, name=None, **self.local_params) - self.data['cls'] = "RemoteNetworkInterface" + self.data["cls"] = "RemoteNetworkInterface" def _get_params(self): """Helper function to return parameters""" params = { - 'host': self.host, - 'ifname': self.local.ifname, + "host": self.host, + "ifname": self.local.ifname, } if self.cls == "USBNetworkInterface": - params['extra'] = { - 'state': self.local.if_state, + params["extra"] = { + "state": self.local.if_state, } return params @@ -317,6 +330,7 @@ def _get_params(self): exports["USBNetworkInterface"] = NetworkInterfaceExport exports["NetworkInterface"] = NetworkInterfaceExport + @attr.s(eq=False) class USBGenericExport(ResourceExport): """ResourceExport for USB devices accessed directly from userspace""" @@ -324,22 +338,24 @@ class USBGenericExport(ResourceExport): def __attrs_post_init__(self): super().__attrs_post_init__() local_cls_name = self.cls - self.data['cls'] = f"Network{self.cls}" + self.data["cls"] = f"Network{self.cls}" from ..resource import udev + local_cls = getattr(udev, local_cls_name) self.local = local_cls(target=None, name=None, **self.local_params) def _get_params(self): """Helper function to return parameters""" return { - 'host': self.host, - 'busnum': self.local.busnum, - 'devnum': self.local.devnum, - 'path': self.local.path, - 'vendor_id': self.local.vendor_id, - 'model_id': self.local.model_id, + "host": self.host, + "busnum": self.local.busnum, + "devnum": self.local.devnum, + "path": self.local.path, + "vendor_id": self.local.vendor_id, + "model_id": self.local.model_id, } + @attr.s(eq=False) class USBSigrokExport(USBGenericExport): """ResourceExport for USB devices accessed directly from userspace""" @@ -350,16 +366,17 @@ def __attrs_post_init__(self): def _get_params(self): """Helper function to return parameters""" return { - 'host': self.host, - 'busnum': self.local.busnum, - 'devnum': self.local.devnum, - 'path': self.local.path, - 'vendor_id': self.local.vendor_id, - 'model_id': self.local.model_id, - 'driver': self.local.driver, - 'channels': self.local.channels + "host": self.host, + "busnum": self.local.busnum, + "devnum": self.local.devnum, + "path": self.local.path, + "vendor_id": self.local.vendor_id, + "model_id": self.local.model_id, + "driver": self.local.driver, + "channels": self.local.channels, } + @attr.s(eq=False) class USBSDMuxExport(USBGenericExport): """ResourceExport for USB devices accessed directly from userspace""" @@ -370,15 +387,16 @@ def __attrs_post_init__(self): def _get_params(self): """Helper function to return parameters""" return { - 'host': self.host, - 'busnum': self.local.busnum, - 'devnum': self.local.devnum, - 'path': self.local.path, - 'vendor_id': self.local.vendor_id, - 'model_id': self.local.model_id, - 'control_path': self.local.control_path, + "host": self.host, + "busnum": self.local.busnum, + "devnum": self.local.devnum, + "path": self.local.path, + "vendor_id": self.local.vendor_id, + "model_id": self.local.model_id, + "control_path": self.local.control_path, } + @attr.s(eq=False) class USBSDWireExport(USBGenericExport): """ResourceExport for USB devices accessed directly from userspace""" @@ -389,15 +407,16 @@ def __attrs_post_init__(self): def _get_params(self): """Helper function to return parameters""" return { - 'host': self.host, - 'busnum': self.local.busnum, - 'devnum': self.local.devnum, - 'path': self.local.path, - 'vendor_id': self.local.vendor_id, - 'model_id': self.local.model_id, - 'control_serial': self.local.control_serial, + "host": self.host, + "busnum": self.local.busnum, + "devnum": self.local.devnum, + "path": self.local.path, + "vendor_id": self.local.vendor_id, + "model_id": self.local.model_id, + "control_serial": self.local.control_serial, } + @attr.s(eq=False) class USBAudioInputExport(USBGenericExport): """ResourceExport for ports on switchable USB hubs""" @@ -408,16 +427,17 @@ def __attrs_post_init__(self): def _get_params(self): """Helper function to return parameters""" return { - 'host': self.host, - 'busnum': self.local.busnum, - 'devnum': self.local.devnum, - 'path': self.local.path, - 'vendor_id': self.local.vendor_id, - 'model_id': self.local.model_id, - 'index': self.local.index, - 'alsa_name': self.local.alsa_name, + "host": self.host, + "busnum": self.local.busnum, + "devnum": self.local.devnum, + "path": self.local.path, + "vendor_id": self.local.vendor_id, + "model_id": self.local.model_id, + "index": self.local.index, + "alsa_name": self.local.alsa_name, } + @attr.s(eq=False) class SiSPMPowerPortExport(USBGenericExport): """ResourceExport for ports on GEMBRID switches""" @@ -428,15 +448,16 @@ def __attrs_post_init__(self): def _get_params(self): """Helper function to return parameters""" return { - 'host': self.host, - 'busnum': self.local.busnum, - 'devnum': self.local.devnum, - 'path': self.local.path, - 'vendor_id': self.local.vendor_id, - 'model_id': self.local.model_id, - 'index': self.local.index, + "host": self.host, + "busnum": self.local.busnum, + "devnum": self.local.devnum, + "path": self.local.path, + "vendor_id": self.local.vendor_id, + "model_id": self.local.model_id, + "index": self.local.index, } + @attr.s(eq=False) class USBPowerPortExport(USBGenericExport): """ResourceExport for ports on switchable USB hubs""" @@ -447,15 +468,16 @@ def __attrs_post_init__(self): def _get_params(self): """Helper function to return parameters""" return { - 'host': self.host, - 'busnum': self.local.busnum, - 'devnum': self.local.devnum, - 'path': self.local.path, - 'vendor_id': self.local.vendor_id, - 'model_id': self.local.model_id, - 'index': self.local.index, + "host": self.host, + "busnum": self.local.busnum, + "devnum": self.local.devnum, + "path": self.local.path, + "vendor_id": self.local.vendor_id, + "model_id": self.local.model_id, + "index": self.local.index, } + @attr.s(eq=False) class USBDeditecRelaisExport(USBGenericExport): """ResourceExport for outputs on deditec relais""" @@ -466,15 +488,16 @@ def __attrs_post_init__(self): def _get_params(self): """Helper function to return parameters""" return { - 'host': self.host, - 'busnum': self.local.busnum, - 'devnum': self.local.devnum, - 'path': self.local.path, - 'vendor_id': self.local.vendor_id, - 'model_id': self.local.model_id, - 'index': self.local.index, + "host": self.host, + "busnum": self.local.busnum, + "devnum": self.local.devnum, + "path": self.local.path, + "vendor_id": self.local.vendor_id, + "model_id": self.local.model_id, + "index": self.local.index, } + @attr.s(eq=False) class USBHIDRelayExport(USBGenericExport): """ResourceExport for outputs on simple USB HID relays""" @@ -485,31 +508,35 @@ def __attrs_post_init__(self): def _get_params(self): """Helper function to return parameters""" return { - 'host': self.host, - 'busnum': self.local.busnum, - 'devnum': self.local.devnum, - 'path': self.local.path, - 'vendor_id': self.local.vendor_id, - 'model_id': self.local.model_id, - 'index': self.local.index, + "host": self.host, + "busnum": self.local.busnum, + "devnum": self.local.devnum, + "path": self.local.path, + "vendor_id": self.local.vendor_id, + "model_id": self.local.model_id, + "index": self.local.index, } + @attr.s(eq=False) class USBFlashableExport(USBGenericExport): """ResourceExport for Flashable USB devices""" + def __attrs_post_init__(self): super().__attrs_post_init__() def _get_params(self): p = super()._get_params() - p['devnode'] = self.local.devnode + p["devnode"] = self.local.devnode return p + @attr.s(eq=False) class USBGenericRemoteExport(USBGenericExport): def __attrs_post_init__(self): super().__attrs_post_init__() - self.data['cls'] = f"Remote{self.cls}".replace("Network", "") + self.data["cls"] = f"Remote{self.cls}".replace("Network", "") + exports["AndroidFastboot"] = USBGenericExport exports["AndroidUSBFastboot"] = USBGenericRemoteExport @@ -535,6 +562,7 @@ def __attrs_post_init__(self): exports["USBFlashableDevice"] = USBFlashableExport exports["LXAUSBMux"] = USBGenericExport + @attr.s(eq=False) class ProviderGenericExport(ResourceExport): """ResourceExport for Resources derived from BaseProvider""" @@ -542,23 +570,26 @@ class ProviderGenericExport(ResourceExport): def __attrs_post_init__(self): super().__attrs_post_init__() local_cls_name = self.cls - self.data['cls'] = f"Remote{self.cls}" + self.data["cls"] = f"Remote{self.cls}" from ..resource import provider + local_cls = getattr(provider, local_cls_name) self.local = local_cls(target=None, name=None, **self.local_params) def _get_params(self): """Helper function to return parameters""" return { - 'host': self.host, - 'internal': self.local.internal, - 'external': self.local.external, + "host": self.host, + "internal": self.local.internal, + "external": self.local.external, } + exports["TFTPProvider"] = ProviderGenericExport exports["NFSProvider"] = ProviderGenericExport exports["HTTPProvider"] = ProviderGenericExport + @attr.s class EthernetPortExport(ResourceExport): """ResourceExport for a ethernet interface""" @@ -566,23 +597,21 @@ class EthernetPortExport(ResourceExport): def __attrs_post_init__(self): super().__attrs_post_init__() from ..resource.ethernetport import SNMPEthernetPort - self.data['cls'] = "EthernetPort" + + self.data["cls"] = "EthernetPort" self.local = SNMPEthernetPort(target=None, name=None, **self.local_params) def _get_params(self): """Helper function to return parameters""" - return { - 'switch': self.local.switch, - 'interface': self.local.interface, - 'extra': self.local.extra - } + return {"switch": self.local.switch, "interface": self.local.interface, "extra": self.local.extra} + exports["SNMPEthernetPort"] = EthernetPortExport @attr.s(eq=False) class GPIOSysFSExport(ResourceExport): - _gpio_sysfs_path_prefix = '/sys/class/gpio' + _gpio_sysfs_path_prefix = "/sys/class/gpio" """ResourceExport for GPIO lines accessed directly from userspace""" @@ -590,49 +619,51 @@ def __attrs_post_init__(self): super().__attrs_post_init__() if self.cls == "SysfsGPIO": from ..resource.base import SysfsGPIO + self.local = SysfsGPIO(target=None, name=None, **self.local_params) elif self.cls == "MatchedSysfsGPIO": from ..resource.udev import MatchedSysfsGPIO + self.local = MatchedSysfsGPIO(target=None, name=None, **self.local_params) - self.data['cls'] = "NetworkSysfsGPIO" - self.export_path = Path(GPIOSysFSExport._gpio_sysfs_path_prefix, - f'gpio{self.local.index}') + self.data["cls"] = "NetworkSysfsGPIO" + self.export_path = Path(GPIOSysFSExport._gpio_sysfs_path_prefix, f"gpio{self.local.index}") self.system_exported = False def _get_params(self): """Helper function to return parameters""" return { - 'host': self.host, - 'index': self.local.index, + "host": self.host, + "index": self.local.index, } def _get_start_params(self): return { - 'index': self.local.index, + "index": self.local.index, } def _start(self, start_params): """Start a GPIO export to userspace""" - index = start_params['index'] + index = start_params["index"] if self.export_path.exists(): self.system_exported = True return - export_sysfs_path = os.path.join(GPIOSysFSExport._gpio_sysfs_path_prefix, 'export') - with open(export_sysfs_path, mode='wb') as export: - export.write(str(index).encode('utf-8')) + export_sysfs_path = os.path.join(GPIOSysFSExport._gpio_sysfs_path_prefix, "export") + with open(export_sysfs_path, mode="wb") as export: + export.write(str(index).encode("utf-8")) def _stop(self, start_params): """Disable a GPIO export to userspace""" - index = start_params['index'] + index = start_params["index"] if self.system_exported: return - export_sysfs_path = os.path.join(GPIOSysFSExport._gpio_sysfs_path_prefix, 'unexport') - with open(export_sysfs_path, mode='wb') as unexport: - unexport.write(str(index).encode('utf-8')) + export_sysfs_path = os.path.join(GPIOSysFSExport._gpio_sysfs_path_prefix, "unexport") + with open(export_sysfs_path, mode="wb") as unexport: + unexport.write(str(index).encode("utf-8")) + exports["SysfsGPIO"] = GPIOSysFSExport exports["MatchedSysfsGPIO"] = GPIOSysFSExport @@ -649,9 +680,10 @@ class NetworkServiceExport(ResourceExport): def __attrs_post_init__(self): super().__attrs_post_init__() from ..resource.networkservice import NetworkService - self.data['cls'] = "NetworkService" + + self.data["cls"] = "NetworkService" self.local = NetworkService(target=None, name=None, **self.local_params) - if '%' in self.local_params['address']: + if "%" in self.local_params["address"]: self.proxy_required = True def _get_params(self): @@ -660,22 +692,28 @@ def _get_params(self): **self.local_params, } + exports["NetworkService"] = NetworkServiceExport + @attr.s class HTTPVideoStreamExport(ResourceExport): """ResourceExport for an HTTPVideoStream""" + def __attrs_post_init__(self): super().__attrs_post_init__() from ..resource.httpvideostream import HTTPVideoStream - self.data['cls'] = "HTTPVideoStream" + + self.data["cls"] = "HTTPVideoStream" self.local = HTTPVideoStream(target=None, name=None, **self.local_params) def _get_params(self): return self.local_params + exports["HTTPVideoStream"] = HTTPVideoStreamExport + @attr.s(eq=False) class LXAIOBusNodeExport(ResourceExport): """ResourceExport for LXAIOBusNode devices accessed via the HTTP API""" @@ -683,32 +721,38 @@ class LXAIOBusNodeExport(ResourceExport): def __attrs_post_init__(self): super().__attrs_post_init__() local_cls_name = self.cls - self.data['cls'] = f"Network{self.cls}" + self.data["cls"] = f"Network{self.cls}" from ..resource import lxaiobus + local_cls = getattr(lxaiobus, local_cls_name) self.local = local_cls(target=None, name=None, **self.local_params) def _get_params(self): return self.local_params + exports["LXAIOBusPIO"] = LXAIOBusNodeExport + @attr.s(eq=False) class AndroidNetFastbootExport(ResourceExport): def __attrs_post_init__(self): super().__attrs_post_init__() local_cls_name = self.cls - self.data['cls'] = f"Remote{self.cls}" + self.data["cls"] = f"Remote{self.cls}" from ..resource import fastboot + local_cls = getattr(fastboot, local_cls_name) self.local = local_cls(target=None, name=None, **self.local_params) def _get_params(self): """Helper function to return parameters""" - return {'host' : self.host, **self.local_params} + return {"host": self.host, **self.local_params} + exports["AndroidNetFastboot"] = AndroidNetFastbootExport + @attr.s(eq=False) class YKUSHPowerPortExport(ResourceExport): """ResourceExport for YKUSHPowerPort devices""" @@ -716,29 +760,29 @@ class YKUSHPowerPortExport(ResourceExport): def __attrs_post_init__(self): super().__attrs_post_init__() local_cls_name = self.cls - self.data['cls'] = f"Network{local_cls_name}" + self.data["cls"] = f"Network{local_cls_name}" from ..resource import ykushpowerport + local_cls = getattr(ykushpowerport, local_cls_name) self.local = local_cls(target=None, name=None, **self.local_params) def _get_params(self): - return { - "host": self.host, - **self.local_params - } + return {"host": self.host, **self.local_params} + exports["YKUSHPowerPort"] = YKUSHPowerPortExport + class ExporterSession(ApplicationSession): def onConnect(self): """Set up internal datastructures on successful connection: - Setup loop, name, authid and address - Join the coordinator as an exporter""" - self.loop = self.config.extra['loop'] - self.name = self.config.extra['name'] - self.hostname = self.config.extra['hostname'] - self.isolated = self.config.extra['isolated'] - self.address = self._transport.transport.get_extra_info('sockname')[0] + self.loop = self.config.extra["loop"] + self.name = self.config.extra["name"] + self.hostname = self.config.extra["hostname"] + self.isolated = self.config.extra["isolated"] + self.address = self._transport.transport.get_extra_info("sockname")[0] self.checkpoint = time.monotonic() self.poll_task = None @@ -765,35 +809,31 @@ async def onJoin(self, details): """ print(details) - prefix = f'org.labgrid.exporter.{self.name}' + prefix = f"org.labgrid.exporter.{self.name}" try: - await self.register(self.acquire, f'{prefix}.acquire') - await self.register(self.release, f'{prefix}.release') - await self.register(self.version, f'{prefix}.version') + await self.register(self.acquire, f"{prefix}.acquire") + await self.register(self.release, f"{prefix}.release") + await self.register(self.version, f"{prefix}.version") config_template_env = { - 'env': os.environ, - 'isolated': self.isolated, - 'hostname': self.hostname, - 'name': self.name, + "env": os.environ, + "isolated": self.isolated, + "hostname": self.hostname, + "name": self.name, } - resource_config = ResourceConfig( - self.config.extra['resources'], config_template_env - ) + resource_config = ResourceConfig(self.config.extra["resources"], config_template_env) for group_name, group in resource_config.data.items(): group_name = str(group_name) for resource_name, params in group.items(): resource_name = str(resource_name) - if resource_name == 'location': + if resource_name == "location": continue if params is None: continue - cls = params.pop('cls', resource_name) + cls = params.pop("cls", resource_name) # this may call back to acquire the resource immediately - await self.add_resource( - group_name, resource_name, cls, params - ) + await self.add_resource(group_name, resource_name, cls, params) self.checkpoint = time.monotonic() except Exception: # pylint: disable=broad-except @@ -817,7 +857,7 @@ async def onDisconnect(self): if self.poll_task: self.poll_task.cancel() await asyncio.wait([self.poll_task]) - await asyncio.sleep(0.5) # give others a chance to clean up + await asyncio.sleep(0.5) # give others a chance to clean up self.loop.stop() async def acquire(self, group_name, resource_name, place_name): @@ -871,25 +911,22 @@ async def poll(self): async def add_resource(self, group_name, resource_name, cls, params): """Add a resource to the exporter and update status on the coordinator""" - print( - f"add resource {group_name}/{resource_name}: {cls}/{params}" - ) + print(f"add resource {group_name}/{resource_name}: {cls}/{params}") group = self.groups.setdefault(group_name, {}) assert resource_name not in group export_cls = exports.get(cls, ResourceEntry) config = { - 'avail': export_cls is ResourceEntry, - 'cls': cls, - 'params': params, + "avail": export_cls is ResourceEntry, + "cls": cls, + "params": params, } proxy_req = self.isolated if issubclass(export_cls, ResourceExport): - group[resource_name] = export_cls(config, host=self.hostname, proxy=getfqdn(), - proxy_required=proxy_req) + group[resource_name] = export_cls(config, host=self.hostname, proxy=getfqdn(), proxy_required=proxy_req) else: - config['params']['extra'] = { - 'proxy': getfqdn(), - 'proxy_required': proxy_req, + config["params"]["extra"] = { + "proxy": getfqdn(), + "proxy_required": proxy_req, } group[resource_name] = export_cls(config) await self.update_resource(group_name, resource_name) @@ -899,73 +936,56 @@ async def update_resource(self, group_name, resource_name): resource = self.groups[group_name][resource_name] data = resource.asdict() print(data) - await self.call( - 'org.labgrid.coordinator.set_resource', group_name, resource_name, - data - ) + await self.call("org.labgrid.coordinator.set_resource", group_name, resource_name, data) def main(): parser = argparse.ArgumentParser() parser.add_argument( - '-x', - '--crossbar', - metavar='URL', + "-x", + "--crossbar", + metavar="URL", type=str, default=os.environ.get("LG_CROSSBAR", "ws://127.0.0.1:20408/ws"), - help="crossbar websocket URL" + help="crossbar websocket URL", ) parser.add_argument( - '-n', - '--name', - dest='name', + "-n", + "--name", + dest="name", type=str, default=None, - help='public name of this exporter (defaults to the system hostname)' + help="public name of this exporter (defaults to the system hostname)", ) parser.add_argument( - '--hostname', - dest='hostname', + "--hostname", + dest="hostname", type=str, default=None, - help='hostname (or IP) published for accessing resources (defaults to the system hostname)' + help="hostname (or IP) published for accessing resources (defaults to the system hostname)", ) parser.add_argument( - '--fqdn', - action='store_true', - default=False, - help='Use fully qualified domain name as default for hostname' + "--fqdn", action="store_true", default=False, help="Use fully qualified domain name as default for hostname" ) + parser.add_argument("-d", "--debug", action="store_true", default=False, help="enable debug mode") parser.add_argument( - '-d', - '--debug', - action='store_true', + "-i", + "--isolated", + action="store_true", default=False, - help="enable debug mode" - ) - parser.add_argument( - '-i', - '--isolated', - action='store_true', - default=False, - help="enable isolated mode (always request SSH forwards)" - ) - parser.add_argument( - 'resources', - metavar='RESOURCES', - type=str, - help='resource config file name' + help="enable isolated mode (always request SSH forwards)", ) + parser.add_argument("resources", metavar="RESOURCES", type=str, help="resource config file name") args = parser.parse_args() - level = 'debug' if args.debug else 'info' + level = "debug" if args.debug else "info" extra = { - 'name': args.name or gethostname(), - 'hostname': args.hostname or (getfqdn() if args.fqdn else gethostname()), - 'resources': args.resources, - 'isolated': args.isolated + "name": args.name or gethostname(), + "hostname": args.hostname or (getfqdn() if args.fqdn else gethostname()), + "resources": args.resources, + "isolated": args.isolated, } crossbar_url = args.crossbar @@ -977,7 +997,7 @@ def main(): print(f"exporter hostname: {extra['hostname']}") print(f"resource config file: {extra['resources']}") - extra['loop'] = loop = asyncio.get_event_loop() + extra["loop"] = loop = asyncio.get_event_loop() if args.debug: loop.set_debug(True) runner = ApplicationRunner(url=crossbar_url, realm=crossbar_realm, extra=extra) From 7acfa35dcd555dbcbb4df321f0d332d010ccfac7 Mon Sep 17 00:00:00 2001 From: Jan Luebbe Date: Fri, 14 Jun 2024 17:44:07 +0200 Subject: [PATCH 210/384] use setuptools_scm to generate a version file This avoids the overhead of using importlib.metadata to search for the version. Signed-off-by: Jan Luebbe --- .gitignore | 1 + labgrid/__init__.py | 5 +++++ labgrid/util/version.py | 6 ++++++ pyproject.toml | 1 + 4 files changed, 13 insertions(+) diff --git a/.gitignore b/.gitignore index 84192a924..cc74652b4 100644 --- a/.gitignore +++ b/.gitignore @@ -17,6 +17,7 @@ /dist /.pytest_cache/ /htmlcov/ +/labgrid/_version.py /dockerfiles/staging/crossbar/* !/dockerfiles/staging/crossbar/places_example.yaml /.idea diff --git a/labgrid/__init__.py b/labgrid/__init__.py index 209352f68..4d4366e49 100644 --- a/labgrid/__init__.py +++ b/labgrid/__init__.py @@ -6,3 +6,8 @@ from .step import step, steps from .stepreporter import StepReporter from .consoleloggingreporter import ConsoleLoggingReporter + +try: + from ._version import __version__ +except ImportError: + __version__ = "unknown" diff --git a/labgrid/util/version.py b/labgrid/util/version.py index 89bbe4158..dbd79c7df 100644 --- a/labgrid/util/version.py +++ b/labgrid/util/version.py @@ -4,6 +4,12 @@ def labgrid_version(): + try: + from .._version import __version__ + return __version__ + except ModuleNotFoundError: + pass + import contextlib from importlib.metadata import PackageNotFoundError, version diff --git a/pyproject.toml b/pyproject.toml index 0c1c590ee..270c5750c 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -153,6 +153,7 @@ packages = [ [tool.setuptools_scm] local_scheme = "no-local-version" +version_file = "labgrid/_version.py" [tool.pytest.ini_options] testpaths = [ From 72825417f4a7f38ff2c19d04b7142e13af14232b Mon Sep 17 00:00:00 2001 From: Rouven Czerwinski Date: Fri, 21 Jun 2024 10:58:20 +0200 Subject: [PATCH 211/384] remote/exporter: refactor ser2net version check Turn the version variable into a tuple which consists of (major, minor, micro). Convert the exit code check to use tuples instead of strings and also correct the version check for the YAML configuration to use a tuple. No functional changes intended. Signed-off-by: Rouven Czerwinski --- labgrid/remote/exporter.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/labgrid/remote/exporter.py b/labgrid/remote/exporter.py index 03a8cd26c..b26a1162c 100755 --- a/labgrid/remote/exporter.py +++ b/labgrid/remote/exporter.py @@ -233,16 +233,16 @@ def _start(self, start_params): # Ser2net has switched to using YAML format at version 4.0.0. result = subprocess.run([self.ser2net_bin, "-v"], capture_output=True, text=True) _, _, version = str(result.stdout).split(" ") - major_version = version.split(".")[0] + version = tuple(map(int, version.strip().split("."))) # There is a bug in ser2net between 4.4.0 and 4.6.1 where it # returns 1 on a successful call to 'ser2net -v'. We don't want # a failure because of this, so raise an error only if ser2net # is not one of those versions. - if version.strip() not in ["4.4.0", "4.5.0", "4.5.1", "4.6.0", "4.6.1"] and result.returncode == 1: + if version not in [(4, 4, 0), (4, 5, 0), (4, 5, 1), (4, 6, 0), (4, 6, 1)] and result.returncode == 1: raise ExporterError(f"ser2net {version} returned a nonzero code during version check.") - if int(major_version) >= 4: + if version >= (4, 0, 0): cmd = [ self.ser2net_bin, "-d", From d0b08a101a97345432f0bdd7448a1878e719162e Mon Sep 17 00:00:00 2001 From: Rouven Czerwinski Date: Fri, 21 Jun 2024 11:23:00 +0200 Subject: [PATCH 212/384] remote/exporter: ser2net YAML config since 4.2.0 Contrary to the code, ser2net started using the YAML configuration in version 4.2.0, correct the exporter version check. Signed-off-by: Rouven Czerwinski --- labgrid/remote/exporter.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/labgrid/remote/exporter.py b/labgrid/remote/exporter.py index b26a1162c..5018206c6 100755 --- a/labgrid/remote/exporter.py +++ b/labgrid/remote/exporter.py @@ -242,7 +242,7 @@ def _start(self, start_params): if version not in [(4, 4, 0), (4, 5, 0), (4, 5, 1), (4, 6, 0), (4, 6, 1)] and result.returncode == 1: raise ExporterError(f"ser2net {version} returned a nonzero code during version check.") - if version >= (4, 0, 0): + if version >= (4, 2, 0): cmd = [ self.ser2net_bin, "-d", From a3dbfb559237469ac16b04e8a3967e00f05c3105 Mon Sep 17 00:00:00 2001 From: Rouven Czerwinski Date: Fri, 21 Jun 2024 11:23:47 +0200 Subject: [PATCH 213/384] remote/exporter: always set tcp for ser2net Always indicate that the network connection should use tcp. This was not mandatory for versions before 4.6.0, but is for versions afterwards. This should not impact the behavior for earlier versions, since all versions since the introduction of YAML configurations in 4.2.0 support this parameter. Fixes #1416 Signed-off-by: Rouven Czerwinski --- labgrid/remote/exporter.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/labgrid/remote/exporter.py b/labgrid/remote/exporter.py index 5018206c6..dde83bb7a 100755 --- a/labgrid/remote/exporter.py +++ b/labgrid/remote/exporter.py @@ -248,7 +248,7 @@ def _start(self, start_params): "-d", "-n", "-Y", - f"connection: &con01# accepter: telnet(rfc2217,mode=server),{self.port}", + f"connection: &con01# accepter: telnet(rfc2217,mode=server),tcp,{self.port}", "-Y", f' connector: serialdev(nouucplock=true),{start_params["path"]},{self.local.speed}n81,local', # pylint: disable=line-too-long "-Y", From e8cbdbfc231a64ccf2b7fde178ffba222344e52d Mon Sep 17 00:00:00 2001 From: Rouven Czerwinski Date: Thu, 2 May 2024 10:35:20 +0200 Subject: [PATCH 214/384] CHANGES: update changelog for 24.0 release Signed-off-by: Rouven Czerwinski --- CHANGES.rst | 79 ++++++++++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 75 insertions(+), 4 deletions(-) diff --git a/CHANGES.rst b/CHANGES.rst index 25e699250..56996efad 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -10,10 +10,44 @@ New Features in 24.0 - A new log level called ``CONSOLE`` has been added between the default ``INFO`` and ``DEBUG`` levels. This level will show all reads and writes made to the serial console during testing. -- The `QEMUDriver` now has an additional ``disk_opts`` property which can be +- The docker support was extended to support buildx, allowing the build of arm64 + container images. +- The tool lookup function has been extended to return the original name in case + the path can't be found. This makes specification of the qemu binary easier to + use. +- The ``bindings`` base class has been extended, allowing the user to retrieve + all resources used by a driver. +- Support for STLink V2 was added. +- ``UBootStrategy`` was extended with a ``force()`` function. +- labgrid was switched from pysnmp to pysnmp-lexstudio. +- Support for Segger J-Link was added. +- Place tags are now exposed by the RemotePlace. +- The sync-places contrib script has gained support for named matches. +- Remote support for YKush Devices was added. +- Support for sigrok DMMs was added. +- Support for Digital Outputs switched via HTTP was added. +- The ``QEMUDriver`` has a new get_qemu_base_args() function which can be used to + extract the arguments passed to qemu. +- The ``SSHDriver`` has gained support to forward unix sockets. +- The exporter has gained an ``--fqdn`` argument to set the hostname to the + fully qualified domain name instead of the hostname. +- The ``QEMUDriver`` now has an additional ``disk_opts`` property which can be used to pass additional options for the disk directly to QEMU +- All drivers now inherit a logger from the ``Driver`` base class and many + drivers were changed to use this logger. +- The new ``poe_mib`` backend allows switching of power over Ethernet-capable + ports on switches that use the corresponding SNMP MIB. +- The ``RawNetworkInterfaceDriver`` allows the replay and recording of network + packets on ethernet interfaces. +- The i.MX93 usb loader USB ID has been added to the ``IMXUSBLoader`` resource. +- Support for udev matched GPIOs has been added. - labgrid-client now has a ``write-files`` subcommand to copy files onto mass storage devices. +- The ``NetworkPowerPort`` supports a new backend ``ubus``. It controls PoE + switches running OpenWrt using the ubus interface. +- The pyproject.toml gained a config for `ruff `_. +- ``setuptools_scm`` is now used to generate a version file. + Bug fixes in 24.0 ~~~~~~~~~~~~~~~~~ @@ -22,19 +56,55 @@ Bug fixes in 24.0 of pip. - Several tests have gained an importorskip() call to skip them if the module is not available. +- labgrid now uses its own pyserial fork from pypi since installation from + github as an egg is no longer properly supported. - The build-and-release workflow supports building wheels. -- The markers now are restricted to patterns which won't match WARN, - ERROR, INFO and similar log notifiers. - Fix named SSH lookups in conjunction with an environment file in labgrid-client. +- The crossbar virtual-environment now needs to be separate from the labgrid + environment, for more information please consult the `current documentation `_. +- The markers now are restricted to patterns which won't match WARN, + ERROR, INFO and similar log notifiers. +- A race inside the ``SSHDriver`` cleanup has been fixed. +- The ``labgrid-client monitor`` command now outputs the full resource identifier. +- Many of the USB loader commands e.g. imx-usb-loader will now print to the + console when logging is not enabled. +- An ``UnboundLocalError`` inside the atomic_replace code which is used inside the + coordinator was fixed. +- Resources of different classes can now have the same name. +- A bug within the pytest logging setup was fixed. +- The ``QemuDriver`` correctly handles the different command lines for virgl + enablement. +- A bug was fixed where resource names were ignored during lookup of the correct + power driver. +- ManagedFile was fixed to work with the stat command on Darwin. +- Instead of using a private member on the pytest config, the labgrid plugin now + uses the pytest config stash. +- The ``ShellDriver`` was fixed to set the correct status attribute. +- The USBNetworkInterface now warns if the interface name is set, as it will be + overwritten by the ResourceManager to assign the correct interface name. - Fix sftp option issue in SSH driver that caused sftp to only work once per test run. - ManagedFile NFS detection heuristic now does symlink resolution on the local host. -- The password for the ShellDriver can now be an empty string. +- XModem support within the Shelldriver was fixed by removing the newline from + the marker. +- A typo in the ``NFSProviderDriver`` class was fixed. Documentation was already + correct, however the classname contained an additional P. +- The ``--loop`` argument for labgrid-client console was fixed. +- The password for the ``ShellDriver`` can now be an empty string. +- The default crossbar configuration now enables auto-fragmentation to handle + bigger labs where the payload size can be bigger than 1 megabyte. +- The ``SSHDriver`` redirects ``/dev/null`` to stdin of commands run via SSH. + This prevents unexpected input, especially when using the + ``ManualPowerDriver`` or a REPL. +- The ``ser2net`` version check for YAML configurations in the exporter was + fixed. +- The exporter forces ``ser2net`` TCP connections for versions >=4.2.0. Breaking changes in 24.0 ~~~~~~~~~~~~~~~~~~~~~~~~ +- Support for Python 3.7 was dropped. - Support for the legacy ticket authentication was dropped: If the coordinator logs ModuleNotFoundError on startup, switch the crossbar config to anonymous authentication (see ``.crossbar/config-anonymous.yaml`` for an example). @@ -82,6 +152,7 @@ Breaking changes in 24.0 Known issues in 24.0 ~~~~~~~~~~~~~~~~~~~~ +- Some client commands return 0 even if the command failed. Release 23.0 (Released Apr 24, 2023) From a5a8b6590f3dc57360e86346dd1f18b6071fe4a6 Mon Sep 17 00:00:00 2001 From: Nick Cao Date: Thu, 23 May 2024 15:29:13 -0400 Subject: [PATCH 215/384] remote/client: fallback to telnet when microcom is not available microcom is generally not available on rpm based distributions[1], making it hard to setup labgrid client on them. [1] https://repology.org/project/microcom/versions Signed-off-by: Nick Cao [r.czerwinski@pengutronix.de: rebased, added CHANGES.rst entry] Signed-off-by: Rouven Czerwinski --- CHANGES.rst | 1 + doc/getting_started.rst | 4 ++-- labgrid/remote/client.py | 27 +++++++++++++++++++++------ 3 files changed, 24 insertions(+), 8 deletions(-) diff --git a/CHANGES.rst b/CHANGES.rst index 56996efad..c8a4d9545 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -47,6 +47,7 @@ New Features in 24.0 switches running OpenWrt using the ubus interface. - The pyproject.toml gained a config for `ruff `_. - ``setuptools_scm`` is now used to generate a version file. +- labgrid-client console will fallback to telnet if microcom is not available. Bug fixes in 24.0 diff --git a/doc/getting_started.rst b/doc/getting_started.rst index f07086129..7d1990bb5 100644 --- a/doc/getting_started.rst +++ b/doc/getting_started.rst @@ -349,8 +349,8 @@ Now we can connect to the serial console: labgrid-venv $ labgrid-client -p example-place console -.. note:: Using remote connection requires ``microcom`` installed on the host - where the labgrid-client is called. +.. note:: Using remote connection requires ``microcom`` or ``telnet`` installed + on the host where the labgrid-client is called. See :ref:`remote-usage` for some more advanced features. For a complete reference have a look at the :doc:`labgrid-client(1) ` diff --git a/labgrid/remote/client.py b/labgrid/remote/client.py index 63c0cf859..8b9ef847a 100755 --- a/labgrid/remote/client.py +++ b/labgrid/remote/client.py @@ -13,6 +13,7 @@ import signal import sys import shlex +import shutil import json from textwrap import indent from socket import gethostname @@ -826,18 +827,32 @@ async def _console(self, place, target, timeout, *, logfile=None, loop=False, li # check for valid resources assert port is not None, "Port is not set" - call = ["microcom", "-s", str(resource.speed), "-t", f"{host}:{port}"] + microcom_bin = shutil.which("microcom") - if listen_only: - call.append("--listenonly") + if microcom_bin is not None: + call = [microcom_bin, "-s", str(resource.speed), "-t", f"{host}:{port}"] + + if listen_only: + call.append("--listenonly") + + if logfile: + call.append(f"--logfile={logfile}") + else: + call = ["telnet", host, str(port)] + + logging.info("microcom not available, using telnet instead") + + if listen_only: + logging.warning("--listenonly option not supported by telnet, ignoring") + + if logfile: + logging.warning("--logfile option not supported by telnet, ignoring") - if logfile: - call.append(f"--logfile={logfile}") print(f"connecting to {resource} calling {' '.join(call)}") try: p = await asyncio.create_subprocess_exec(*call) except FileNotFoundError as e: - raise ServerError(f"failed to execute microcom: {e}") + raise ServerError(f"failed to execute remote console command: {e}") while p.returncode is None: try: await asyncio.wait_for(p.wait(), 1.0) From a0fc26d2beacdedbce0a7f57bec9747bab9bb923 Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Thu, 27 Jun 2024 13:02:52 +0200 Subject: [PATCH 216/384] driver/serialdigitaloutput: fix get() for DTR signal If self.signal is "dtr", a ValueError is raised. That happens because the if/else logic is wrong for that case. Fix that. Signed-off-by: Bastian Krause --- labgrid/driver/serialdigitaloutput.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/labgrid/driver/serialdigitaloutput.py b/labgrid/driver/serialdigitaloutput.py index 2a0cd64b7..cc1c1d821 100644 --- a/labgrid/driver/serialdigitaloutput.py +++ b/labgrid/driver/serialdigitaloutput.py @@ -41,7 +41,7 @@ def __attrs_post_init__(self): def get(self): if self.signal == "dtr": val = self._p.dtr - if self.signal == "rts": + elif self.signal == "rts": val = self._p.rts else: raise ValueError("Expected signal to be dtr or rts") From 63685d62c9cb4137cbbb4905079d31b2a56de4c8 Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Thu, 27 Jun 2024 12:30:17 +0200 Subject: [PATCH 217/384] util/ssh: stop keepalive in disconnect() when existing connection is used SSHConnection does not open a new connection if an existing connection is open. It reuses the connection and starts a keepalive command (cat). On disconnect(), the keepalive command is only stopped for newly initiated connections, not for existing ones. Fix that by stopping the keepalive first, unconditionally. Signed-off-by: Bastian Krause --- labgrid/util/ssh.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/labgrid/util/ssh.py b/labgrid/util/ssh.py index 8df205510..62bbf4cbb 100644 --- a/labgrid/util/ssh.py +++ b/labgrid/util/ssh.py @@ -521,9 +521,10 @@ def _stop_keepalive(self): def disconnect(self): assert self._connected try: + self._stop_keepalive() + if self._socket: self._logger.info("Closing SSH connection to %s", self.host) - self._stop_keepalive() self._stop_own_master() finally: self._connected = False From ad07f493555dc4ec77e2e4d3bdaa636e7d7210c6 Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Thu, 27 Jun 2024 14:27:13 +0200 Subject: [PATCH 218/384] doc/configuration: drop additional empty lines Signed-off-by: Bastian Krause --- doc/configuration.rst | 3 --- 1 file changed, 3 deletions(-) diff --git a/doc/configuration.rst b/doc/configuration.rst index 67e687eb5..dde59d4a9 100644 --- a/doc/configuration.rst +++ b/doc/configuration.rst @@ -1047,7 +1047,6 @@ A :any:`USBTMC` resource describes an oscilloscope connected via the *USB TMC protocol*. The low-level communication is handled by the "usbtmc" kernel driver. - .. code-block:: yaml USBTMC: @@ -1134,7 +1133,6 @@ NetworkUSBFlashableDevice A :any:`NetworkUSBFlashableDevice` resource describes a `USBFlashableDevice`_ resource available on a remote computer - DediprogFlasher ~~~~~~~~~~~~~~~ A :any:`DediprogFlasher` resource is used to configure the parameters to a @@ -2660,7 +2658,6 @@ Binds to: dtb: '../images/mydtb.dtb' kernel: '../images/vmlinuz' - Implements: - :any:`ConsoleProtocol` - :any:`PowerProtocol` From c7237e04b3374785a12dac4163adba1f81f975a8 Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Thu, 27 Jun 2024 14:29:49 +0200 Subject: [PATCH 219/384] doc/configuration: use consistent optional notation for arguments Signed-off-by: Bastian Krause --- doc/configuration.rst | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/doc/configuration.rst b/doc/configuration.rst index dde59d4a9..3b9252cc5 100644 --- a/doc/configuration.rst +++ b/doc/configuration.rst @@ -519,9 +519,9 @@ Arguments: - body_deasserted (str): Request body to send to de-assert the output - method (str, default="PUT"): HTTP method to set a new state - - url_get (str): URL to use instead of ``url`` for getting the state - - body_get_asserted (str): Regular Expression that matches an asserted response body - - body_get_deasserted (str): Regular Expression that matches a de-asserted response body + - url_get (str): optional, URL to use instead of ``url`` for getting the state + - body_get_asserted (str): optional, regular expression that matches an asserted response body + - body_get_deasserted (str): optional, regular expression that matches a de-asserted response body Used by: - `HttpDigitalOutputDriver`_ @@ -1197,7 +1197,7 @@ Arguments: ASRL, TCPIP... - url (str): device identifier on selected resource, e.g. for TCPIP resource - - backend (str): Visa library backend, e.g. '@sim' for pyvisa-sim backend + - backend (str): optional, Visa library backend, e.g. '@sim' for pyvisa-sim backend Used by: - `PyVISADriver`_ @@ -3081,10 +3081,10 @@ Implements: Arguments: - image_uri (str): identifier of the docker image to use (may have a tag suffix) - - command (str): command to run in the container (optional, depends on image) - - volumes (list): list to configure volumes mounted inside the container (optional) + - command (str): optional, command to run in the container (depends on image) + - volumes (list): optional, list to configure volumes mounted inside the container - container_name (str): name of the container - - environment (list): list of environment variables (optional) + - environment (list): optional, list of environment variables - host_config (dict): dictionary of host configurations - network_services (list): dictionaries that describe individual `NetworkService`_ instances that come alive when the container is created. The "address" argument From c47ce18972a8446a536c83fb285331e7d6514cfb Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Thu, 27 Jun 2024 14:30:32 +0200 Subject: [PATCH 220/384] doc/configuration: use consistent lower case description for arguments Signed-off-by: Bastian Krause --- doc/configuration.rst | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/doc/configuration.rst b/doc/configuration.rst index 3b9252cc5..e3264bf14 100644 --- a/doc/configuration.rst +++ b/doc/configuration.rst @@ -515,8 +515,8 @@ expressions. Arguments: - url (str): URL to use for setting a new state - - body_asserted (str): Request body to send to assert the output - - body_deasserted (str): Request body to send to de-assert the output + - body_asserted (str): request body to send to assert the output + - body_deasserted (str): request body to send to de-assert the output - method (str, default="PUT"): HTTP method to set a new state - url_get (str): optional, URL to use instead of ``url`` for getting the state @@ -1387,7 +1387,7 @@ interrupts or unix kill signals may lead to hanging containers, however; therefore auto-cleanup is important. Arguments: - - docker_daemon_url (str): The url of the daemon to use for this target + - docker_daemon_url (str): url of the daemon to use for this target Used by: - `DockerDriver`_ From bfbfd25e9823c96b3f565b0c0bb670468fe58392 Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Thu, 27 Jun 2024 14:31:05 +0200 Subject: [PATCH 221/384] doc/configuration: add required invert argument to SerialPortDigitalOutputDriver example Signed-off-by: Bastian Krause --- doc/configuration.rst | 1 + 1 file changed, 1 insertion(+) diff --git a/doc/configuration.rst b/doc/configuration.rst index e3264bf14..dc61441ff 100644 --- a/doc/configuration.rst +++ b/doc/configuration.rst @@ -2211,6 +2211,7 @@ Implements: SerialPortDigitalOutputDriver: signal: 'dtr' + invert: false bindings: serial: 'nameOfSerial' From 4c4715d30153b3ea4457d345025f31438ed1ed4f Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Thu, 27 Jun 2024 14:31:29 +0200 Subject: [PATCH 222/384] doc/configuration: split DediprogFlashDriver example Driver and image are not expected on the same indentation level. Other drivers use an example with two sections, one for the driver and one for global options like "images". Do the same for the DediprogFlashDriver to prevent confusion. Signed-off-by: Bastian Krause --- doc/configuration.rst | 3 +++ 1 file changed, 3 insertions(+) diff --git a/doc/configuration.rst b/doc/configuration.rst index dc61441ff..ad2a75e66 100644 --- a/doc/configuration.rst +++ b/doc/configuration.rst @@ -3024,6 +3024,9 @@ The :any:`DediprogFlashDriver` is used to flash an SPI device using DediprogFlas DediprogFlashDriver: image: 'foo' + +.. code-block:: yaml + images: foo: '../images/image_to_load.raw' From c3a3fe586c9211bd5e1331ed8d25bafbfb9455a8 Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Thu, 27 Jun 2024 14:35:59 +0200 Subject: [PATCH 223/384] doc/configuration: fix a typo: infromation -> information Signed-off-by: Bastian Krause --- doc/configuration.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/configuration.rst b/doc/configuration.rst index ad2a75e66..2bedaec47 100644 --- a/doc/configuration.rst +++ b/doc/configuration.rst @@ -236,7 +236,7 @@ Currently available are: ``ubus`` Controls *PoE switches* running OpenWrt using the *ubus* interface. - Further infromation available at + Further information available at Used by: - `NetworkPowerDriver`_ From 11cce1d27551e61853c225a686f74f7bebab37bb Mon Sep 17 00:00:00 2001 From: Ederson de Souza Date: Thu, 27 Jun 2024 15:46:40 -0700 Subject: [PATCH 224/384] driver/power: Remove check for maximum number of outlets for raritan No need to ensure a maximum number - new models can have more outlets. Signed-off-by: Ederson de Souza --- labgrid/driver/power/raritan.py | 5 ----- 1 file changed, 5 deletions(-) diff --git a/labgrid/driver/power/raritan.py b/labgrid/driver/power/raritan.py index 1fa7b2385..597e72d0b 100644 --- a/labgrid/driver/power/raritan.py +++ b/labgrid/driver/power/raritan.py @@ -10,12 +10,9 @@ OID = ".1.3.6.1.4.1.13742.6.4.1.2.1" -NUMBER_OF_OUTLETS = 16 def power_set(host, port, index, value): - assert 1 <= int(index) <= NUMBER_OF_OUTLETS - _snmp = SimpleSNMP(host, 'private', port=port) outlet_control_oid = "{}.2.1.{}".format(OID, index) @@ -23,8 +20,6 @@ def power_set(host, port, index, value): def power_get(host, port, index): - assert 1 <= int(index) <= NUMBER_OF_OUTLETS - _snmp = SimpleSNMP(host, 'public', port=port) output_status_oid = "{}.3.1.{}".format(OID, index) From 169b525c8669e3736663c1427fd5d2ed80a31514 Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Sun, 7 Jul 2024 21:33:34 +0200 Subject: [PATCH 225/384] driver/externalconsoledriver: use bufsize=0 for opening external process The ExternalConsoleDriver works on top of a command executed on the local computer. Until now the system default of io.DEFAULT_BUFFER_SIZE (typically 8192 bytes) was used for bufsize. This does not work well for this use case, we want unbuffered reads and writes. So set the bufsize to 0, see https://docs.python.org/3/library/subprocess.html#popen-constructor . Signed-off-by: Bastian Krause --- labgrid/driver/externalconsoledriver.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/labgrid/driver/externalconsoledriver.py b/labgrid/driver/externalconsoledriver.py index 2ab16ada5..b5c7ffd17 100644 --- a/labgrid/driver/externalconsoledriver.py +++ b/labgrid/driver/externalconsoledriver.py @@ -33,7 +33,7 @@ def open(self): return cmd = shlex.split(self.cmd) self._child = subprocess.Popen( - cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=None + cmd, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=None, bufsize=0 ) # make stdout non-blocking From 0cb2186e9a448443eb7926070cd68e02b1914b38 Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Wed, 17 Jul 2024 17:58:35 +0200 Subject: [PATCH 226/384] driver/power/tinycontrol: fix import order Signed-off-by: Bastian Krause --- labgrid/driver/power/tinycontrol.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/labgrid/driver/power/tinycontrol.py b/labgrid/driver/power/tinycontrol.py index fad5e9224..403a0176e 100644 --- a/labgrid/driver/power/tinycontrol.py +++ b/labgrid/driver/power/tinycontrol.py @@ -9,10 +9,10 @@ index: 3 """ -import requests from urllib.parse import urljoin import xml.etree.ElementTree as ET +import requests def power_set(host, port, index, value): assert port is None From bc6edfe90a834dcb439fcf3b0701095a88e82af8 Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Mon, 22 Jul 2024 15:11:46 +0200 Subject: [PATCH 227/384] logging: fix class name in StepLogger warning The class name was not adjusted when the warning has been copied from StepLogger. Signed-off-by: Bastian Krause --- labgrid/logging.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/labgrid/logging.py b/labgrid/logging.py index 097168a1e..a3807ecf1 100644 --- a/labgrid/logging.py +++ b/labgrid/logging.py @@ -136,7 +136,7 @@ def __attrs_post_init__(self): from warnings import warn warn( - "StepLogger should not be instantiated, use StepReporter.start()/.stop() instead.", + "StepLogger should not be instantiated, use StepLogger.start()/.stop() instead.", DeprecationWarning, stacklevel=2, ) From 2dd40348de9c52d932872e026af4c9669d34f48e Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Mon, 22 Jul 2024 16:51:43 +0200 Subject: [PATCH 228/384] doc: document StepLogger Signed-off-by: Bastian Krause --- doc/configuration.rst | 29 +++++++++++++++++++++++++++++ doc/development.rst | 2 +- 2 files changed, 30 insertions(+), 1 deletion(-) diff --git a/doc/configuration.rst b/doc/configuration.rst index df04e2b74..3697dc744 100644 --- a/doc/configuration.rst +++ b/doc/configuration.rst @@ -3434,6 +3434,9 @@ Reporters StepReporter ~~~~~~~~~~~~ +.. warning:: + The StepReporter is deprecated, use the `StepLogger`_ instead. + The :any:`StepReporter` outputs individual labgrid steps to `STDOUT`. .. doctest:: @@ -3471,6 +3474,32 @@ The Reporter can be stopped with a call to the stop function: Stopping the ConsoleLoggingReporter if it has not been started will raise an AssertionError, as will starting an already started StepReporter. +Loggers +------- + +StepLogger +~~~~~~~~~~ +The :any:`StepLogger` logs individual labgrid steps. + +Logging can be set up via ``labgrid.logging.basicConfig()``. + +.. doctest:: + + >>> import logging + >>> from labgrid.logging import basicConfig, StepLogger + >>> basicConfig(level=logging.INFO) + >>> StepLogger.start() + +The logger can be stopped with a call to the stop function: + +.. doctest:: + + >>> from labgrid.logging import StepLogger + >>> StepLogger.stop() + +Stopping the StepLogger if it has not been started will raise an +AssertionError, as will starting an already started StepLogger. + Environment Configuration ------------------------- The environment configuration for a test environment consists of a YAML file diff --git a/doc/development.rst b/doc/development.rst index 8b832f757..8b0351076 100644 --- a/doc/development.rst +++ b/doc/development.rst @@ -764,7 +764,7 @@ Step Tracing The Step infrastructure already collects timing and nesting information on executed commands, but is currently only used in the pytest plugin or via the -standalone StepReporter. +standalone StepLogger (or deprecated StepReporter). By writing these events to a file (or sqlite database) as a trace, we can collect data over multiple runs for later analysis. This would become more useful by passing recognized events (stack traces, From fb5e982d7c3954bfbe702b567dfe4f46ea02c684 Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Mon, 22 Jul 2024 16:54:47 +0200 Subject: [PATCH 229/384] autoinstall/main: use labgrid's StepLogger/basicConfig StepReporter is deprecated, use StepLogger instead. While at it, make use of labgrid's basicConfig(). It provides a very similar format string, so drop all redundant arguments. Signed-off-by: Bastian Krause --- labgrid/autoinstall/main.py | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) diff --git a/labgrid/autoinstall/main.py b/labgrid/autoinstall/main.py index 77e03db48..cf70dcd61 100644 --- a/labgrid/autoinstall/main.py +++ b/labgrid/autoinstall/main.py @@ -2,13 +2,12 @@ import ast import argparse import logging -import sys import multiprocessing import textwrap from time import sleep from .. import Environment, target_factory -from ..stepreporter import StepReporter +from ..logging import basicConfig, StepLogger from ..exceptions import NoResourceFoundError @@ -177,11 +176,7 @@ def join(self): handler.join() def main(): - logging.basicConfig( - level=logging.INFO, - format='%(levelname)7s %(name)-20s %(message)s', - stream=sys.stderr, - ) + basicConfig(level=logging.INFO) parser = argparse.ArgumentParser() parser.add_argument( @@ -210,7 +205,7 @@ def main(): env = Environment(config_file=args.config) - StepReporter.start() + StepLogger.start() manager = Manager(env, args) if not manager.configure(): From fe7b57075ebf3cc979c99937337e36ac252e76c1 Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Mon, 22 Jul 2024 16:53:30 +0200 Subject: [PATCH 230/384] doc/usage: adjust docs for legacy --lg-colored-steps, LG_COLOR_SCHEME Signed-off-by: Bastian Krause --- doc/usage.rst | 22 +++------------------- 1 file changed, 3 insertions(+), 19 deletions(-) diff --git a/doc/usage.rst b/doc/usage.rst index 1e1cd814c..4caff26bc 100644 --- a/doc/usage.rst +++ b/doc/usage.rst @@ -447,12 +447,9 @@ Other labgrid-related pytest plugin options are: If option is specified without path the current working directory is used. ``--lg-colored-steps`` - Enables the ColoredStepReporter. - Different events have different colors. - The more colorful, the more important. - In order to make less important output "blend into the background" different - color schemes are available. - See :ref:`LG_COLOR_SCHEME `. + Previously enabled the ColoredStepReporter, which has been removed with the + StepLogger introduction. + Kept for compatibility reasons without effect. ``--lg-initial-state=STATE_NAME`` Sets the Strategy's initial state. @@ -470,19 +467,6 @@ LG_ENV ^^^^^^ Behaves like ``LG_ENV`` for :doc:`labgrid-client `. -.. _usage-lgcolorscheme: - -LG_COLOR_SCHEME -^^^^^^^^^^^^^^^ -Influences the color scheme used for the Colored Step Reporter. -``dark`` is meant for dark terminal background. -``light`` is optimized for light terminal background. -``dark-256color`` and ``light-256color`` are respective variants for terminals -that support 256 colors. -By default, ``dark`` or ``dark-256color`` (depending on the terminal) are used. - -Takes effect only when used with ``--lg-colored-steps``. - LG_PROXY ^^^^^^^^ Specifies a SSH proxy host to be used for port forwards to access the From 04d6d07ef5bc7c6f5d4ebf45fe472f499c9726c2 Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Mon, 22 Jul 2024 16:27:28 +0200 Subject: [PATCH 231/384] examples/qemu-networking/qemunetworkstrategy: fix NameErrors Add missing imports and use correct variable names. Signed-off-by: Bastian Krause --- examples/qemu-networking/qemunetworkstrategy.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/examples/qemu-networking/qemunetworkstrategy.py b/examples/qemu-networking/qemunetworkstrategy.py index 995223494..48becd0da 100644 --- a/examples/qemu-networking/qemunetworkstrategy.py +++ b/examples/qemu-networking/qemunetworkstrategy.py @@ -17,7 +17,7 @@ import attr from labgrid import target_factory, step -from labgrid.strategy import Strategy +from labgrid.strategy import Strategy, StrategyError from labgrid.util import get_free_port @@ -75,7 +75,7 @@ def update_network_service(self): networkservice.port = local_port else: networkservice.address = new_address - networkserivce.port = self.__remote_port + networkservice.port = self.__remote_port @step(args=["state"]) def transition(self, state, *, step): @@ -83,7 +83,7 @@ def transition(self, state, *, step): state = Status[state] if state == Status.unknown: - raise StrategyError(f"can not transition to {new_status}") + raise StrategyError(f"can not transition to {state}") elif self.status == state: step.skip("nothing to do") @@ -99,4 +99,4 @@ def transition(self, state, *, step): self.target.activate(self.shell) self.update_network_service() - self.status = status + self.status = state From c8128f6345eda3bf75e8939f6142d10ca3b1b37e Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Mon, 22 Jul 2024 16:05:36 +0200 Subject: [PATCH 232/384] examples: drop unused imports Signed-off-by: Bastian Krause --- examples/deditec-relais8/deditec.py | 3 +-- examples/deditec-relais8/deditec_remote.py | 4 +--- examples/strategy/test_barebox_strategy.py | 2 -- examples/sysfsgpio/sysfsgpio.py | 2 +- examples/sysfsgpio/sysfsgpio_remote.py | 3 +-- examples/usbpower/test_example.py | 2 -- 6 files changed, 4 insertions(+), 12 deletions(-) diff --git a/examples/deditec-relais8/deditec.py b/examples/deditec-relais8/deditec.py index 189cbefe8..e6dd13112 100644 --- a/examples/deditec-relais8/deditec.py +++ b/examples/deditec-relais8/deditec.py @@ -3,8 +3,7 @@ import logging import time -from labgrid import Environment, StepReporter -from labgrid.strategy.bareboxstrategy import Status +from labgrid import StepReporter from labgrid.driver.deditecrelaisdriver import DeditecRelaisDriver # enable debug logging diff --git a/examples/deditec-relais8/deditec_remote.py b/examples/deditec-relais8/deditec_remote.py index b9f2ca16c..ae62ebea4 100644 --- a/examples/deditec-relais8/deditec_remote.py +++ b/examples/deditec-relais8/deditec_remote.py @@ -3,9 +3,7 @@ import logging import time -from labgrid import Environment, StepReporter -from labgrid.strategy.bareboxstrategy import Status -from labgrid.driver.deditecrelaisdriver import DeditecRelaisDriver +from labgrid import StepReporter # enable debug logging logging.basicConfig( diff --git a/examples/strategy/test_barebox_strategy.py b/examples/strategy/test_barebox_strategy.py index d64431757..6d950145e 100644 --- a/examples/strategy/test_barebox_strategy.py +++ b/examples/strategy/test_barebox_strategy.py @@ -1,7 +1,5 @@ import pytest -from labgrid.exceptions import NoDriverFoundError - @pytest.fixture(scope="function") def in_bootloader(strategy, capsys): diff --git a/examples/sysfsgpio/sysfsgpio.py b/examples/sysfsgpio/sysfsgpio.py index d915d7e8b..1fc367faf 100644 --- a/examples/sysfsgpio/sysfsgpio.py +++ b/examples/sysfsgpio/sysfsgpio.py @@ -3,7 +3,7 @@ import logging import time -from labgrid import Environment, StepReporter +from labgrid import StepReporter from labgrid.driver.gpiodriver import GpioDigitalOutputDriver # enable debug logging diff --git a/examples/sysfsgpio/sysfsgpio_remote.py b/examples/sysfsgpio/sysfsgpio_remote.py index b85807843..df7a7ff9e 100644 --- a/examples/sysfsgpio/sysfsgpio_remote.py +++ b/examples/sysfsgpio/sysfsgpio_remote.py @@ -3,8 +3,7 @@ import logging import time -from labgrid import Environment, StepReporter -from labgrid.driver.gpiodriver import GpioDigitalOutputDriver +from labgrid import StepReporter # enable debug logging logging.basicConfig( diff --git a/examples/usbpower/test_example.py b/examples/usbpower/test_example.py index ea2b9dcdc..fe07d0cbf 100644 --- a/examples/usbpower/test_example.py +++ b/examples/usbpower/test_example.py @@ -1,7 +1,5 @@ import pytest -from labgrid.exceptions import NoDriverFoundError - @pytest.fixture(scope="function") def bootloader(target, strategy, capsys): From c872e7fa9564df3869205d13c7ba3255901df7af Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Mon, 22 Jul 2024 16:26:15 +0200 Subject: [PATCH 233/384] examples: make imports consistent The examples try to demonstrate how labgrid can be used. So use sane, consistent imports for this purpose. Signed-off-by: Bastian Krause --- examples/deditec-relais8/deditec.py | 10 +++++----- examples/deditec-relais8/deditec_remote.py | 5 ++--- examples/networkmanager/nm.py | 5 +++-- examples/strategy/bareboxrebootstrategy.py | 5 ++--- examples/strategy/quartusstrategy.py | 5 ++--- examples/sysfsgpio/sysfsgpio.py | 10 +++++----- examples/sysfsgpio/sysfsgpio_remote.py | 5 ++--- examples/usbpower/examplestrategy.py | 3 +-- 8 files changed, 22 insertions(+), 26 deletions(-) diff --git a/examples/deditec-relais8/deditec.py b/examples/deditec-relais8/deditec.py index e6dd13112..8dc38abf7 100644 --- a/examples/deditec-relais8/deditec.py +++ b/examples/deditec-relais8/deditec.py @@ -1,10 +1,10 @@ import sys -import labgrid import logging import time -from labgrid import StepReporter -from labgrid.driver.deditecrelaisdriver import DeditecRelaisDriver +from labgrid import Target, StepReporter +from labgrid.resource.udev import DeditecRelais8 +from labgrid.driver import DeditecRelaisDriver # enable debug logging logging.basicConfig( @@ -16,8 +16,8 @@ # show labgrid steps on the console StepReporter.start() -t = labgrid.Target('main') -r = labgrid.resource.udev.DeditecRelais8(t, name=None, index=1) +t = Target('main') +r = DeditecRelais8(t, name=None, index=1) d = DeditecRelaisDriver(t, name=None) p = t.get_driver("DigitalOutputProtocol") diff --git a/examples/deditec-relais8/deditec_remote.py b/examples/deditec-relais8/deditec_remote.py index ae62ebea4..c941060aa 100644 --- a/examples/deditec-relais8/deditec_remote.py +++ b/examples/deditec-relais8/deditec_remote.py @@ -1,9 +1,8 @@ import sys -import labgrid import logging import time -from labgrid import StepReporter +from labgrid import Environment, StepReporter # enable debug logging logging.basicConfig( @@ -15,7 +14,7 @@ # show labgrid steps on the console StepReporter.start() -e = labgrid.Environment('import-dedicontrol.yaml') +e = Environment('import-dedicontrol.yaml') t = e.get_target() p = t.get_driver("DigitalOutputProtocol") diff --git a/examples/networkmanager/nm.py b/examples/networkmanager/nm.py index 587e39085..d5d3943d6 100644 --- a/examples/networkmanager/nm.py +++ b/examples/networkmanager/nm.py @@ -1,7 +1,8 @@ -import logging, sys +import logging +import sys from pprint import pprint -from labgrid import * +from labgrid import Environment, StepReporter # enable debug logging diff --git a/examples/strategy/bareboxrebootstrategy.py b/examples/strategy/bareboxrebootstrategy.py index f909635f6..22bb93489 100644 --- a/examples/strategy/bareboxrebootstrategy.py +++ b/examples/strategy/bareboxrebootstrategy.py @@ -2,11 +2,10 @@ import attr +from labgrid import target_factory, step from labgrid.driver import BareboxDriver, ShellDriver from labgrid.protocol import PowerProtocol -from labgrid.factory import target_factory -from labgrid.step import step -from labgrid.strategy.common import Strategy +from labgrid.strategy import Strategy @attr.s(eq=False) diff --git a/examples/strategy/quartusstrategy.py b/examples/strategy/quartusstrategy.py index 47d346a74..d5f65ccff 100644 --- a/examples/strategy/quartusstrategy.py +++ b/examples/strategy/quartusstrategy.py @@ -2,11 +2,10 @@ import attr +from labgrid import target_factory, step from labgrid.driver import QuartusHPSDriver, SerialDriver -from labgrid.factory import target_factory from labgrid.protocol import PowerProtocol -from labgrid.step import step -from labgrid.strategy.common import Strategy +from labgrid.strategy import Strategy @attr.s(eq=False) diff --git a/examples/sysfsgpio/sysfsgpio.py b/examples/sysfsgpio/sysfsgpio.py index 1fc367faf..675398150 100644 --- a/examples/sysfsgpio/sysfsgpio.py +++ b/examples/sysfsgpio/sysfsgpio.py @@ -1,10 +1,10 @@ import sys -import labgrid import logging import time -from labgrid import StepReporter -from labgrid.driver.gpiodriver import GpioDigitalOutputDriver +from labgrid import StepReporter, Target +from labgrid.driver import GpioDigitalOutputDriver +from labgrid.resource import SysfsGPIO # enable debug logging logging.basicConfig( @@ -16,8 +16,8 @@ # show labgrid steps on the console StepReporter.start() -t = labgrid.Target('main') -r = labgrid.resource.base.SysfsGPIO(t, name=None, index=60) +t = Target('main') +r = SysfsGPIO(t, name=None, index=60) d = GpioDigitalOutputDriver(t, name=None) p = t.get_driver("DigitalOutputProtocol") diff --git a/examples/sysfsgpio/sysfsgpio_remote.py b/examples/sysfsgpio/sysfsgpio_remote.py index df7a7ff9e..c99a311f8 100644 --- a/examples/sysfsgpio/sysfsgpio_remote.py +++ b/examples/sysfsgpio/sysfsgpio_remote.py @@ -1,9 +1,8 @@ import sys -import labgrid import logging import time -from labgrid import StepReporter +from labgrid import StepReporter, Environment # enable debug logging logging.basicConfig( @@ -15,7 +14,7 @@ # show labgrid steps on the console StepReporter.start() -e = labgrid.Environment('import-gpio.yaml') +e = Environment('import-gpio.yaml') t = e.get_target() p = t.get_driver("DigitalOutputProtocol") diff --git a/examples/usbpower/examplestrategy.py b/examples/usbpower/examplestrategy.py index accc6b89f..daf733f7a 100644 --- a/examples/usbpower/examplestrategy.py +++ b/examples/usbpower/examplestrategy.py @@ -3,9 +3,8 @@ import attr from labgrid.driver import BareboxDriver, ShellDriver, USBSDMuxDriver -from labgrid.factory import target_factory +from labgrid import step, target_factory from labgrid.protocol import PowerProtocol -from labgrid.step import step from labgrid.strategy import Strategy From a3bc87c59d336477ee494f73112bf489e7550f77 Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Mon, 22 Jul 2024 16:38:30 +0200 Subject: [PATCH 234/384] examples: use labgrid's StepLogger/basicConfig StepReporter is deprecated, use StepLogger instead. While at it, make use of labgrid's basicConfig(). It provides a very similar format string, so drop all redundant arguments. Also switch from DEBUG to INFO log level for various examples that do not benefit from such a noisy log level. Signed-off-by: Bastian Krause --- examples/deditec-relais8/deditec.py | 16 ++++++---------- examples/deditec-relais8/deditec_remote.py | 14 +++++--------- examples/library/test.py | 15 ++++++--------- examples/networkmanager/nm.py | 12 ++++-------- examples/sigrok/main.py | 9 +++------ examples/sysfsgpio/sysfsgpio.py | 14 +++++--------- examples/sysfsgpio/sysfsgpio_remote.py | 14 +++++--------- 7 files changed, 34 insertions(+), 60 deletions(-) diff --git a/examples/deditec-relais8/deditec.py b/examples/deditec-relais8/deditec.py index 8dc38abf7..0a47982ca 100644 --- a/examples/deditec-relais8/deditec.py +++ b/examples/deditec-relais8/deditec.py @@ -1,20 +1,16 @@ -import sys import logging import time -from labgrid import Target, StepReporter +from labgrid import Target +from labgrid.logging import basicConfig, StepLogger from labgrid.resource.udev import DeditecRelais8 from labgrid.driver import DeditecRelaisDriver -# enable debug logging -logging.basicConfig( - level=logging.DEBUG, - format='%(levelname)7s: %(message)s', - stream=sys.stderr, -) +# enable info logging +basicConfig(level=logging.INFO) -# show labgrid steps on the console -StepReporter.start() +# log labgrid steps +StepLogger.start() t = Target('main') r = DeditecRelais8(t, name=None, index=1) diff --git a/examples/deditec-relais8/deditec_remote.py b/examples/deditec-relais8/deditec_remote.py index c941060aa..53a860ed3 100644 --- a/examples/deditec-relais8/deditec_remote.py +++ b/examples/deditec-relais8/deditec_remote.py @@ -1,18 +1,14 @@ -import sys import logging import time -from labgrid import Environment, StepReporter +from labgrid import Environment +from labgrid.logging import basicConfig, StepLogger -# enable debug logging -logging.basicConfig( - level=logging.DEBUG, - format='%(levelname)7s: %(message)s', - stream=sys.stderr, -) +# enable info logging +basicConfig(level=logging.INFO) # show labgrid steps on the console -StepReporter.start() +StepLogger.start() e = Environment('import-dedicontrol.yaml') t = e.get_target() diff --git a/examples/library/test.py b/examples/library/test.py index f0b060218..9870e1670 100755 --- a/examples/library/test.py +++ b/examples/library/test.py @@ -4,19 +4,16 @@ import sys import logging -from labgrid import Environment, StepReporter +from labgrid import Environment +from labgrid.logging import basicConfig, StepLogger from labgrid.strategy.bareboxstrategy import Status -# enable debug logging -logging.basicConfig( - level=logging.DEBUG, - format='%(levelname)7s: %(message)s', - stream=sys.stderr, -) +# enable info logging +basicConfig(level=logging.INFO) -# show labgrid steps on the console -StepReporter.start() +# log labgrid steps +StepLogger.start() def run_once(target): s = target.get_driver('BareboxStrategy') diff --git a/examples/networkmanager/nm.py b/examples/networkmanager/nm.py index d5d3943d6..e2e9f8876 100644 --- a/examples/networkmanager/nm.py +++ b/examples/networkmanager/nm.py @@ -1,19 +1,15 @@ import logging -import sys from pprint import pprint -from labgrid import Environment, StepReporter +from labgrid import Environment +from labgrid.logging import basicConfig, StepLogger # enable debug logging -logging.basicConfig( - level=logging.DEBUG, - format='%(levelname)7s: %(message)s', - stream=sys.stderr, -) +basicConfig(level=logging.DEBUG) # show labgrid steps on the console -StepReporter.start() +StepLogger.start() e = Environment('nm.env') diff --git a/examples/sigrok/main.py b/examples/sigrok/main.py index a213232d6..3b107863f 100755 --- a/examples/sigrok/main.py +++ b/examples/sigrok/main.py @@ -6,13 +6,10 @@ import logging from labgrid import Environment +from labgrid.logging import basicConfig -# enable debug logging -logging.basicConfig( - level=logging.DEBUG, - format='%(levelname)7s: %(message)s', - stream=sys.stderr, -) +# enable info logging +basicConfig(level=logging.INFO) env = Environment(sys.argv[1]) target = env.get_target('main') diff --git a/examples/sysfsgpio/sysfsgpio.py b/examples/sysfsgpio/sysfsgpio.py index 675398150..494c87eee 100644 --- a/examples/sysfsgpio/sysfsgpio.py +++ b/examples/sysfsgpio/sysfsgpio.py @@ -1,20 +1,16 @@ -import sys import logging import time -from labgrid import StepReporter, Target +from labgrid import Target +from labgrid.logging import basicConfig, StepLogger from labgrid.driver import GpioDigitalOutputDriver from labgrid.resource import SysfsGPIO -# enable debug logging -logging.basicConfig( - level=logging.DEBUG, - format='%(levelname)7s: %(message)s', - stream=sys.stderr, -) +# enable info logging +basicConfig(level=logging.INFO) # show labgrid steps on the console -StepReporter.start() +StepLogger.start() t = Target('main') r = SysfsGPIO(t, name=None, index=60) diff --git a/examples/sysfsgpio/sysfsgpio_remote.py b/examples/sysfsgpio/sysfsgpio_remote.py index c99a311f8..e5225dd48 100644 --- a/examples/sysfsgpio/sysfsgpio_remote.py +++ b/examples/sysfsgpio/sysfsgpio_remote.py @@ -1,18 +1,14 @@ -import sys import logging import time -from labgrid import StepReporter, Environment +from labgrid import Environment +from labgrid.logging import basicConfig, StepLogger -# enable debug logging -logging.basicConfig( - level=logging.DEBUG, - format='%(levelname)7s: %(message)s', - stream=sys.stderr, -) +# enable info logging +basicConfig(level=logging.INFO) # show labgrid steps on the console -StepReporter.start() +StepLogger.start() e = Environment('import-gpio.yaml') t = e.get_target() From 0bf0141bf3ed2a810d7cd3b0f86bf48ecf96035e Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Fri, 26 Jul 2024 20:58:32 +0200 Subject: [PATCH 235/384] util/yaml: warn on duplicate keys in mappings When adding drivers/resources to an environment config or resources to an exporter config, users can stumble upon redundant keys. The resulting errors are not obvious, the last key simply overrides any previous entries with the same key. In order to make these config errors more obvious, emit a warning when duplicate keys are detected in mappings. Signed-off-by: Bastian Krause --- labgrid/util/yaml.py | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/labgrid/util/yaml.py b/labgrid/util/yaml.py index 1c70b6999..3c7db7802 100644 --- a/labgrid/util/yaml.py +++ b/labgrid/util/yaml.py @@ -2,6 +2,8 @@ This module contains the custom YAML load and dump functions and associated loader and dumper """ + +import warnings from collections import OrderedDict, UserString from string import Template @@ -16,7 +18,19 @@ class Dumper(yaml.SafeDumper): pass +def _check_duplicate_dict_keys(loader, node): + seen_keys = [] + for key_node, _ in node.value: + key = loader.construct_scalar(key_node) + if key in seen_keys: + warnings.warn( + f"{loader.name}: previous entry with duplicate YAML dictionary key '{key}' overwritten", UserWarning + ) + seen_keys.append(key) + + def _dict_constructor(loader, node): + _check_duplicate_dict_keys(loader, node) return OrderedDict(loader.construct_pairs(node)) From fe8ee04b9689953397f30fd3774496c078d147fb Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Thu, 1 Aug 2024 17:23:56 +0200 Subject: [PATCH 236/384] dockerfiles/staging/docker-compose: drop obsolete version element Version is now an obsolete element [1]. [1] https://docs.docker.com/compose/compose-file/04-version-and-name/#version-top-level-element-optional Signed-off-by: Bastian Krause --- dockerfiles/staging/docker-compose.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/dockerfiles/staging/docker-compose.yml b/dockerfiles/staging/docker-compose.yml index 5d2b69d0a..cb3547802 100644 --- a/dockerfiles/staging/docker-compose.yml +++ b/dockerfiles/staging/docker-compose.yml @@ -1,4 +1,3 @@ -version: '3.3' services: coordinator: image: "${IMAGE_PREFIX:-docker.io/labgrid/}coordinator" From 27500028ed31723c2b92519c69bf8f85e96d432d Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Thu, 1 Aug 2024 17:27:48 +0200 Subject: [PATCH 237/384] docker: migrate to Docker compose v2 Docker compose v1 has been deprecated since July 2023 [1]. Now the GitHub actions Ubuntu runner images removed it, too [2]. So move to v2. labgrid is obviously not affected by any changes between v1 and v2 other than the compose call docker-compose -> docker compose. [1] https://docs.docker.com/compose/migrate/ [2] https://github.com/actions/runner-images/issues/9692 Signed-off-by: Bastian Krause --- .github/workflows/docker.yml | 4 ++-- .github/workflows/reusable-unit-tests-docker.yml | 4 ++-- dockerfiles/README.rst | 4 ++-- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index bd111b4eb..71fafa366 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -38,8 +38,8 @@ jobs: - name: Build amd64 docker image and validate run: | ./dockerfiles/build.sh --load - docker-compose -f dockerfiles/staging/docker-compose.yml up --exit-code-from client client - docker-compose -f dockerfiles/staging/docker-compose.yml down + docker compose -f dockerfiles/staging/docker-compose.yml up --exit-code-from client client + docker compose -f dockerfiles/staging/docker-compose.yml down docker images - name: Build, tag and push latest image for all platforms run: ./dockerfiles/build.sh --platform ${IMAGE_PLATFORMS} --push diff --git a/.github/workflows/reusable-unit-tests-docker.yml b/.github/workflows/reusable-unit-tests-docker.yml index dc1b58a2d..986d47161 100644 --- a/.github/workflows/reusable-unit-tests-docker.yml +++ b/.github/workflows/reusable-unit-tests-docker.yml @@ -22,8 +22,8 @@ jobs: - name: Build docker images run: | ./dockerfiles/build.sh - docker-compose -f dockerfiles/staging/docker-compose.yml up --exit-code-from client client || (docker-compose -f dockerfiles/staging/docker-compose.yml logs --timestamps && false) - docker-compose -f dockerfiles/staging/docker-compose.yml down + docker compose -f dockerfiles/staging/docker-compose.yml up --exit-code-from client client || (docker compose -f dockerfiles/staging/docker-compose.yml logs --timestamps && false) + docker compose -f dockerfiles/staging/docker-compose.yml down - name: Show docker images run: | docker images diff --git a/dockerfiles/README.rst b/dockerfiles/README.rst index 3268de03d..8c2e31439 100644 --- a/dockerfiles/README.rst +++ b/dockerfiles/README.rst @@ -141,10 +141,10 @@ client: .. code-block:: bash $ cd dockerfiles/staging - $ CURRENT_UID=$(id -u):$(id -g) docker-compose up -d coordinator exporter dut + $ CURRENT_UID=$(id -u):$(id -g) docker compose up -d coordinator exporter dut To run the smoke test just run the client: .. code-block:: bash - $ docker-compose up client + $ docker compose up client From ed51858fa89f59f8da00b2a9fe63909dde7eae6c Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Fri, 2 Aug 2024 23:50:19 +0200 Subject: [PATCH 238/384] workflows/reusable-unit-tests: do not install ncurses-term, drop TERM setting ncurses-term and the TERM setting were added for the ColoredStepReporter functionality. This has been removed with the StepLogger introduction. So drop both. Signed-off-by: Bastian Krause --- .github/workflows/reusable-unit-tests.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/reusable-unit-tests.yml b/.github/workflows/reusable-unit-tests.yml index 16a26f0c6..3bf401292 100644 --- a/.github/workflows/reusable-unit-tests.yml +++ b/.github/workflows/reusable-unit-tests.yml @@ -30,7 +30,7 @@ jobs: ${{ runner.os }}-pip- - name: Install system dependencies run: | - sudo apt-get install -yq libow-dev openssh-server openssh-client libsnappy-dev ncurses-term graphviz openocd + sudo apt-get install -yq libow-dev openssh-server openssh-client libsnappy-dev graphviz openocd sudo mkdir -p /var/cache/labgrid/runner && sudo chown runner /var/cache/labgrid/runner - name: Prepare local SSH run: | @@ -56,7 +56,7 @@ jobs: pylint labgrid - name: Test with pytest run: | - TERM=xterm pytest --cov-config .coveragerc --cov=labgrid --local-sshmanager --ssh-username runner --crossbar-venv crossbar-venv -k "not test_docker_with_daemon" + pytest --cov-config .coveragerc --cov=labgrid --local-sshmanager --ssh-username runner --crossbar-venv crossbar-venv -k "not test_docker_with_daemon" - name: Build documentation run: | make -C doc clean From 1bce19fd8c91f47707710812c09da5dc1b5aca8e Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Tue, 23 Jul 2024 15:32:46 +0200 Subject: [PATCH 239/384] remote/client: suggest non-deprecated env var LG_PLACE instead of PLACE Since 06746168 ("remote/client: support env variables in namespace") the PLACE environment variable is deprecated in favor of LG_PLACE. So suggest LG_PLACE rather than PLACE. Signed-off-by: Bastian Krause --- labgrid/remote/client.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/labgrid/remote/client.py b/labgrid/remote/client.py index 8b9ef847a..f5842c541 100755 --- a/labgrid/remote/client.py +++ b/labgrid/remote/client.py @@ -410,7 +410,7 @@ async def add_place(self): """Add a place to the coordinator""" name = self.args.place if not name: - raise UserError("missing place name. Set with -p or via env var $PLACE") + raise UserError("missing place name. Set with -p or via env var LG_PLACE") if name in self.places: raise UserError(f"{name} already exists") res = await self.call("org.labgrid.coordinator.add_place", name) From 200048943d1cb7df68bbc70e2cb091f7034656d8 Mon Sep 17 00:00:00 2001 From: Jan Luebbe Date: Thu, 8 Aug 2024 09:22:02 +0200 Subject: [PATCH 240/384] resource/udev: use filter_match for HIDRelay This makes it easier to add more VID/PID pairs later. Signed-off-by: Jan Luebbe --- labgrid/resource/udev.py | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/labgrid/resource/udev.py b/labgrid/resource/udev.py index d3903eb95..1bb34f951 100644 --- a/labgrid/resource/udev.py +++ b/labgrid/resource/udev.py @@ -671,10 +671,14 @@ class HIDRelay(USBResource): index = attr.ib(default=1, validator=attr.validators.instance_of(int)) invert = attr.ib(default=False, validator=attr.validators.instance_of(bool)) - def __attrs_post_init__(self): - self.match['ID_VENDOR_ID'] = '16c0' - self.match['ID_MODEL_ID'] = '05df' - super().__attrs_post_init__() + def filter_match(self, device): + match = (device.properties.get('ID_VENDOR_ID'), device.properties.get('ID_MODEL_ID')) + + if match not in [("16c0", "05df"), # dcttech USBRelay2 + ]: + return False + + return super().filter_match(device) @target_factory.reg_resource @attr.s(eq=False) From cb1e88c617fdde7e4cbf0d9c98ee540b2fadd3e5 Mon Sep 17 00:00:00 2001 From: Jan Luebbe Date: Thu, 8 Aug 2024 09:42:54 +0200 Subject: [PATCH 241/384] util/agents/usb_hid_relay: run ruff format Signed-off-by: Jan Luebbe --- labgrid/util/agents/usb_hid_relay.py | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/labgrid/util/agents/usb_hid_relay.py b/labgrid/util/agents/usb_hid_relay.py index f20645774..ffbe51157 100644 --- a/labgrid/util/agents/usb_hid_relay.py +++ b/labgrid/util/agents/usb_hid_relay.py @@ -10,6 +10,7 @@ - Turn digital output on and off """ + import usb.core import usb.util @@ -32,9 +33,9 @@ def set_output(self, number, status): self._dev.ctrl_transfer( usb.util.CTRL_TYPE_CLASS | usb.util.CTRL_RECIPIENT_DEVICE | usb.util.ENDPOINT_OUT, SET_REPORT, - (REPORT_TYPE_FEATURE << 8) | 0, # no report ID + (REPORT_TYPE_FEATURE << 8) | 0, # no report ID 0, - req, # payload + req, # payload ) def get_output(self, number): @@ -42,11 +43,11 @@ def get_output(self, number): resp = self._dev.ctrl_transfer( usb.util.CTRL_TYPE_CLASS | usb.util.CTRL_RECIPIENT_DEVICE | usb.util.ENDPOINT_IN, GET_REPORT, - (REPORT_TYPE_FEATURE << 8) | 0, # no report ID + (REPORT_TYPE_FEATURE << 8) | 0, # no report ID 0, - 8, # size + 8, # size ) - return bool(resp[7] & (1 << (number-1))) + return bool(resp[7] & (1 << (number - 1))) def __del__(self): usb.util.release_interface(self._dev, 0) @@ -63,6 +64,6 @@ def handle_get(busnum, devnum, number): methods = { - 'set': handle_set, - 'get': handle_get, + "set": handle_set, + "get": handle_get, } From e592a376657e3aa77d41667e627561353de981ea Mon Sep 17 00:00:00 2001 From: Jan Luebbe Date: Thu, 8 Aug 2024 09:35:22 +0200 Subject: [PATCH 242/384] util/agents/usb_hid_relay: use VID to detect the protocol Signed-off-by: Jan Luebbe --- labgrid/util/agents/usb_hid_relay.py | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/labgrid/util/agents/usb_hid_relay.py b/labgrid/util/agents/usb_hid_relay.py index ffbe51157..b32f28477 100644 --- a/labgrid/util/agents/usb_hid_relay.py +++ b/labgrid/util/agents/usb_hid_relay.py @@ -24,10 +24,17 @@ def __init__(self, **args): self._dev = usb.core.find(**args) if self._dev is None: raise ValueError("Device not found") + + if self._dev.idVendor == 0x16C0: + self.set_output = self.set_output_dcttech + self.get_output = self.get_output_dcttech + else: + raise ValueError(f"Unknown vendor/protocol for VID {self._dev.idVendor:x}") + if self._dev.is_kernel_driver_active(0): self._dev.detach_kernel_driver(0) - def set_output(self, number, status): + def set_output_dcttech(self, number, status): assert 1 <= number <= 8 req = [0xFF if status else 0xFD, number] self._dev.ctrl_transfer( @@ -38,7 +45,7 @@ def set_output(self, number, status): req, # payload ) - def get_output(self, number): + def get_output_dcttech(self, number): assert 1 <= number <= 8 resp = self._dev.ctrl_transfer( usb.util.CTRL_TYPE_CLASS | usb.util.CTRL_RECIPIENT_DEVICE | usb.util.ENDPOINT_IN, From c9fc5bfdb9daf728f6e5302a767875917907651b Mon Sep 17 00:00:00 2001 From: Jan Luebbe Date: Thu, 8 Aug 2024 11:02:02 +0200 Subject: [PATCH 243/384] labgrid/util/agents/usb_hid_relay: keep the USB device open This avoids device busy errors when accessing the device in a loop. Signed-off-by: Jan Luebbe --- labgrid/util/agents/usb_hid_relay.py | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/labgrid/util/agents/usb_hid_relay.py b/labgrid/util/agents/usb_hid_relay.py index b32f28477..5a083e81d 100644 --- a/labgrid/util/agents/usb_hid_relay.py +++ b/labgrid/util/agents/usb_hid_relay.py @@ -60,13 +60,22 @@ def __del__(self): usb.util.release_interface(self._dev, 0) +_relays = {} + + +def _get_relay(busnum, devnum): + if (busnum, devnum) not in _relays: + _relays[(busnum, devnum)] = USBHIDRelay(bus=busnum, address=devnum) + return _relays[(busnum, devnum)] + + def handle_set(busnum, devnum, number, status): - relay = USBHIDRelay(bus=busnum, address=devnum) + relay = _get_relay(busnum, devnum) relay.set_output(number, status) def handle_get(busnum, devnum, number): - relay = USBHIDRelay(bus=busnum, address=devnum) + relay = _get_relay(busnum, devnum) return relay.get_output(number) From a9d2d1c45a37e6f4048638b831b70a1a8ee80d31 Mon Sep 17 00:00:00 2001 From: Jan Luebbe Date: Thu, 8 Aug 2024 11:02:50 +0200 Subject: [PATCH 244/384] util/agents/usb_hid_relay: add support for the LCTech USB HID relay Signed-off-by: Jan Luebbe --- doc/configuration.rst | 2 +- labgrid/resource/udev.py | 1 + labgrid/util/agents/usb_hid_relay.py | 17 +++++++++++++++++ 3 files changed, 19 insertions(+), 1 deletion(-) diff --git a/doc/configuration.rst b/doc/configuration.rst index 3697dc744..f70bf2b66 100644 --- a/doc/configuration.rst +++ b/doc/configuration.rst @@ -480,7 +480,7 @@ HIDRelay ++++++++ An :any:`HIDRelay` resource describes a single output of an HID protocol based USB relays. -It currently supports the widely used *dcttech USBRelay*. +It currently supports the widely used *dcttech USBRelay* and *lctech LCUS* .. code-block:: yaml diff --git a/labgrid/resource/udev.py b/labgrid/resource/udev.py index 1bb34f951..54c6a9fd3 100644 --- a/labgrid/resource/udev.py +++ b/labgrid/resource/udev.py @@ -675,6 +675,7 @@ def filter_match(self, device): match = (device.properties.get('ID_VENDOR_ID'), device.properties.get('ID_MODEL_ID')) if match not in [("16c0", "05df"), # dcttech USBRelay2 + ("5131", "2007"), # LC-US8 ]: return False diff --git a/labgrid/util/agents/usb_hid_relay.py b/labgrid/util/agents/usb_hid_relay.py index 5a083e81d..9cbcaf2c5 100644 --- a/labgrid/util/agents/usb_hid_relay.py +++ b/labgrid/util/agents/usb_hid_relay.py @@ -28,6 +28,9 @@ def __init__(self, **args): if self._dev.idVendor == 0x16C0: self.set_output = self.set_output_dcttech self.get_output = self.get_output_dcttech + elif self._dev.idVendor == 0x5131: + self.set_output = self.set_output_lcus + self.get_output = self.get_output_lcus else: raise ValueError(f"Unknown vendor/protocol for VID {self._dev.idVendor:x}") @@ -56,6 +59,20 @@ def get_output_dcttech(self, number): ) return bool(resp[7] & (1 << (number - 1))) + def set_output_lcus(self, number, status): + assert 1 <= number <= 8 + ep_in = self._dev[0][(0, 0)][0] + ep_out = self._dev[0][(0, 0)][1] + req = [0xA0, number, 0x01 if status else 0x00, 0x00] + req[3] = sum(req) & 0xFF + ep_out.write(req) + ep_in.read(64) + + def get_output_lcus(self, number): + assert 1 <= number <= 8 + # we have no information on how to read the current value + return False + def __del__(self): usb.util.release_interface(self._dev, 0) From 02a465790f82f3e283320002ec4b1ae32012c93c Mon Sep 17 00:00:00 2001 From: Rouven Czerwinski Date: Mon, 12 Aug 2024 11:21:55 +0200 Subject: [PATCH 245/384] CHANGES: more updates for 24.0 release Signed-off-by: Rouven Czerwinski --- CHANGES.rst | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/CHANGES.rst b/CHANGES.rst index c8a4d9545..1c77c0304 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -48,6 +48,11 @@ New Features in 24.0 - The pyproject.toml gained a config for `ruff `_. - ``setuptools_scm`` is now used to generate a version file. - labgrid-client console will fallback to telnet if microcom is not available. +- A power backend for tinycontrol.eu IP Power Socket 6G10A v2 was added. +- Labgrid now publishes arm64 docker images. +- Labgrid's YAML parser will now warn when mapping keys are duplicated and thus + overwritten. +- LC USB Relais are now supported. Bug fixes in 24.0 @@ -102,6 +107,14 @@ Bug fixes in 24.0 - The ``ser2net`` version check for YAML configurations in the exporter was fixed. - The exporter forces ``ser2net`` TCP connections for versions >=4.2.0. +- The retrieval of the DTR status for ``SerialPortDigitalOutputDriver`` was + fixed. +- The ``SSHDriver`` keepalive is now correctly stopped when using existing + connections. +- The power backend for raritan devices now supports devices with more than 16 + outlets. +- The ``ExternalConsoleDriver`` now correctly sets the bufsize to zero to + prevent buffering. Breaking changes in 24.0 ~~~~~~~~~~~~~~~~~~~~~~~~ From b65340d4153c11ae6d31e238f7170b575a14178e Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Tue, 13 Aug 2024 12:39:39 +0200 Subject: [PATCH 246/384] CHANGES: add release date to 24.0 Signed-off-by: Bastian Krause --- CHANGES.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/CHANGES.rst b/CHANGES.rst index 1c77c0304..885861a1f 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -1,5 +1,5 @@ -Release 24.0 (unreleased) -------------------------- +Release 24.0 (Released Aug 12, 2024) +------------------------------------ New Features in 24.0 ~~~~~~~~~~~~~~~~~~~~ From 2317d021f397414b932d57ad501e983eb22b4344 Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Tue, 13 Aug 2024 12:40:28 +0200 Subject: [PATCH 247/384] CHANGES: Add 23.0.x releases from stable-23.0 branch Signed-off-by: Bastian Krause --- CHANGES.rst | 80 +++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 80 insertions(+) diff --git a/CHANGES.rst b/CHANGES.rst index 885861a1f..d185b909c 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -169,6 +169,86 @@ Known issues in 24.0 - Some client commands return 0 even if the command failed. +Release 23.0.6 (Released Apr 16, 2024) +-------------------------------------- + +Bug fixes in 23.0.6 +~~~~~~~~~~~~~~~~~~~ +- In `USBVideoDriver`, use the ``playbin3`` element instead of ``playbin`` to + fix decoding via VA-API for certain webcams on AMD graphic cards. +- Let the `SSHDriver` redirect ``/dev/null`` to stdin on ``run()`` to prevent + unexpected consumption of stdin of the remotely started process. +- Cover more failure scenarios in the exporter and coordinator systemd + services, fix the service startup order, do not buffer journal logs. + +Release 23.0.5 (Released Jan 13, 2024) +-------------------------------------- + +Bug fixes in 23.0.5 +~~~~~~~~~~~~~~~~~~~ +- Fix readthedocs build by specifying Python version and OS. +- Fix several incompatibilities with doc sphinxcontrib-* dependencies having + dropped their explicit Sphinx dependencies, which prevented generation of + labgrid's docs. + +Release 23.0.4 (Released Nov 10, 2023) +-------------------------------------- + +Bug fixes in 23.0.4 +~~~~~~~~~~~~~~~~~~~ +- Fix dockerfiles syntax error that became fatal in a recent docker release. +- Fix ShellDriver's xmodem functionality. +- Pin pylint to prevent incompatibility with pinned pytest-pylint. +- Fix ``labgrid-client console --loop`` on disappearing serial ports (such as + on-board FTDIs). + +Release 23.0.3 (Released Jul 20, 2023) +-------------------------------------- + +Bug fixes in 23.0.3 +~~~~~~~~~~~~~~~~~~~ +- Update to PyYAML 6.0.1 to prevent install errors with Cython>=3.0, see: + https://github.com/yaml/pyyaml/issues/601 + https://github.com/yaml/pyyaml/pull/726#issuecomment-1640397938 + +Release 23.0.2 (Released Jul 04, 2023) +-------------------------------------- + +Bug fixes in 23.0.2 +~~~~~~~~~~~~~~~~~~~ +- Move `SSHDriver`'s control socket tmpdir clean up after the the SSH process + has terminated. Ignore errors on cleanup since it's best effort. +- Add missing class name in ``labgrid-client monitor`` resource output. +- Print USB loader process output if log level does not cover logging it. +- Fix UnboundLocalError in ``atomic_replace()`` used by the coordinator and + ``labgrid-client export`` to write config files. +- Let Config's ``get_tool()`` return the requested tool if it is not found in + the config. Return the resolved path if it exists, otherwise return the value + as is. Also drop the now obsolete tool fallbacks from the drivers and add + tests. +- Fix `USBSDMuxDevice`/`USBSDWireDevice` udev race condition leading to + outdated control/disk paths. +- Fix `SSHDriver`'s ``explicit_sftp_mode`` option to allow calls to ``put()`` + and ``get()`` multiple times. Also make ``scp()`` respect this option. +- Add compatibility with QEMU >= 6.1.0 to `QEMUDriver`'s ``display`` argument + for the ``egl-headless`` option. + +Release 23.0.1 (Released Apr 26, 2023) +-------------------------------------- + +Bug fixes in 23.0.1 +~~~~~~~~~~~~~~~~~~~ +- The pypi release now uses the labgrid pyserial fork in the form of the + pyserial-labgrid package. This fixes installation with newer versions + of pip. +- Several tests have gained an importorskip() call to skip them if the + module is not available. +- The build-and-release workflow supports building wheels. +- The markers now are restricted to patterns which won't match WARN, + ERROR, INFO and similar log notifiers. +- Fix named SSH lookups in conjunction with an environment file in + labgrid-client. + Release 23.0 (Released Apr 24, 2023) ------------------------------------ From f4793e1cf8c1343bd7babd50d0e5d1ae425fb615 Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Tue, 13 Aug 2024 12:33:49 +0200 Subject: [PATCH 248/384] debian: add unreleased version 24.1.0 Signed-off-by: Bastian Krause --- debian/changelog | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/debian/changelog b/debian/changelog index 47e263732..51f0f9b90 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +labgrid (24.1.0) UNRELEASED; urgency=low + + * See https://github.com/labgrid-project/labgrid/blob/master/CHANGES.rst + + -- Bastian Krause Tue, 13 Aug 2024 12:23:25 +0200 + labgrid (24.0.0) UNRELEASED; urgency=low * See https://github.com/labgrid-project/labgrid/blob/master/CHANGES.rst From 2099ee2a8545cc85340ed457fba95bfc5db4bdc6 Mon Sep 17 00:00:00 2001 From: Jan Luebbe Date: Thu, 20 Jun 2024 12:36:18 +0200 Subject: [PATCH 249/384] treewide: replace autobahn and crossbar with gRPC Crossbar and autobahn are used to communicate between client and coordinator as well as between exporter and coordinator. Both are unfortunately not very well maintained anymore. The crossbar component was moved to its own virtualenv for quite a while to cope with conflicting dependencies. python3.13 support for crossbar is still not available (at least not in a version releases on PyPi). That's why labgrid will now move to gRPC. It's a well maintained RPC framework. As a side effect, the message transfer is more performant and the import times are shorter. gRPC relies on the protocol buffers compiler (protoc) to generate code. Use grpcio-tools 1.62.2 to do that. It's used in Yocto scarthgap and is incompatible with generated code from newer grpcio-tools. Co-developed-by: Rouven Czerwinski Co-developed-by: Bastian Krause Signed-off-by: Jan Luebbe --- .crossbar/.gitignore | 3 - .crossbar/config-anonymous.yaml | 47 - .github/workflows/reusable-unit-tests.yml | 8 +- contrib/completion/labgrid-client.bash | 6 +- crossbar-requirements.txt | 2 - doc/conf.py | 11 - labgrid/config.py | 4 + labgrid/pytestplugin/fixtures.py | 4 +- labgrid/pytestplugin/hooks.py | 2 +- labgrid/remote/client.py | 563 ++++++----- labgrid/remote/common.py | 242 ++++- labgrid/remote/coordinator.py | 873 ++++++++++-------- labgrid/remote/exporter.py | 274 +++--- labgrid/remote/generated/generate-proto.sh | 4 + .../generated/labgrid_coordinator_pb2.py | 158 ++++ .../generated/labgrid_coordinator_pb2.pyi | 448 +++++++++ .../generated/labgrid_coordinator_pb2_grpc.py | 627 +++++++++++++ labgrid/remote/generated/requirements.in | 3 + labgrid/remote/generated/requirements.txt | 15 + .../remote/generated/update-requirements.sh | 5 + .../remote/proto/labgrid-coordinator.proto | 297 ++++++ labgrid/resource/remote.py | 9 +- labgrid/util/proxy.py | 7 + pyproject.toml | 11 +- tests/conftest.py | 107 +-- tests/{test_crossbar.py => test_client.py} | 132 ++- tests/test_coordinator.py | 167 ++++ tests/test_fixtures.py | 4 +- tests/test_pb2.py | 172 ++++ tests/test_remote.py | 3 - 30 files changed, 3262 insertions(+), 946 deletions(-) delete mode 100644 .crossbar/.gitignore delete mode 100644 .crossbar/config-anonymous.yaml delete mode 100644 crossbar-requirements.txt create mode 100755 labgrid/remote/generated/generate-proto.sh create mode 100644 labgrid/remote/generated/labgrid_coordinator_pb2.py create mode 100644 labgrid/remote/generated/labgrid_coordinator_pb2.pyi create mode 100644 labgrid/remote/generated/labgrid_coordinator_pb2_grpc.py create mode 100644 labgrid/remote/generated/requirements.in create mode 100644 labgrid/remote/generated/requirements.txt create mode 100755 labgrid/remote/generated/update-requirements.sh create mode 100644 labgrid/remote/proto/labgrid-coordinator.proto rename tests/{test_crossbar.py => test_client.py} (80%) create mode 100644 tests/test_coordinator.py create mode 100644 tests/test_pb2.py diff --git a/.crossbar/.gitignore b/.crossbar/.gitignore deleted file mode 100644 index a6c031384..000000000 --- a/.crossbar/.gitignore +++ /dev/null @@ -1,3 +0,0 @@ -key.priv -key.pub -node.pid diff --git a/.crossbar/config-anonymous.yaml b/.crossbar/config-anonymous.yaml deleted file mode 100644 index 8771a5aa1..000000000 --- a/.crossbar/config-anonymous.yaml +++ /dev/null @@ -1,47 +0,0 @@ -version: 2 -workers: -- type: router - realms: - - name: realm1 - roles: - - name: public - permissions: - - uri: '' - match: prefix - allow: - call: true - register: true - publish: true - subscribe: true - disclose: - caller: true - publisher: true - cache: true - transports: - - type: web - endpoint: - type: tcp - port: 20408 - paths: - /: - type: static - directory: ../web - ws: - type: websocket - options: - auto_fragment_size: 65536 - auth: - anonymous: - type: static - role: public -- id: coordinator - type: guest - executable: /path/to/labgrid-venv/bin/python3 - arguments: - - -m - - labgrid.remote.coordinator - options: - workdir: . - env: - vars: - WS: ws://localhost:20408/ws diff --git a/.github/workflows/reusable-unit-tests.yml b/.github/workflows/reusable-unit-tests.yml index 3bf401292..15be56078 100644 --- a/.github/workflows/reusable-unit-tests.yml +++ b/.github/workflows/reusable-unit-tests.yml @@ -30,7 +30,7 @@ jobs: ${{ runner.os }}-pip- - name: Install system dependencies run: | - sudo apt-get install -yq libow-dev openssh-server openssh-client libsnappy-dev graphviz openocd + sudo apt-get install -yq libow-dev openssh-server openssh-client graphviz openocd sudo mkdir -p /var/cache/labgrid/runner && sudo chown runner /var/cache/labgrid/runner - name: Prepare local SSH run: | @@ -46,17 +46,13 @@ jobs: - name: Install labgrid run: | pip install -e ".[dev]" - - name: Install crossbar in virtualenv - run: | - virtualenv -p python3 crossbar-venv - crossbar-venv/bin/pip install -r crossbar-requirements.txt - name: Lint with pylint run: | pylint --list-msgs-enabled pylint labgrid - name: Test with pytest run: | - pytest --cov-config .coveragerc --cov=labgrid --local-sshmanager --ssh-username runner --crossbar-venv crossbar-venv -k "not test_docker_with_daemon" + pytest --cov-config .coveragerc --cov=labgrid --local-sshmanager --ssh-username runner -k "not test_docker_with_daemon" - name: Build documentation run: | make -C doc clean diff --git a/contrib/completion/labgrid-client.bash b/contrib/completion/labgrid-client.bash index 7bc0d8499..81b6883c0 100644 --- a/contrib/completion/labgrid-client.bash +++ b/contrib/completion/labgrid-client.bash @@ -2,14 +2,14 @@ # options top level and subcommands support _labgrid_shared_options="--help" -_labgrid_main_opts_with_value="@(-x|--crossbar|-c|--config|-p|--place|-s|--state|-i|--initial-state|-P|--proxy)" +_labgrid_main_opts_with_value="@(-x|--coordinator|-c|--config|-p|--place|-s|--state|-i|--initial-state|-P|--proxy)" # Parses labgrid-client arguments # Sets arg to subcommand, excluding options and their values. # Sets last_arg_opt_with_value to true if the last argument is an option requiring a value, else # false. # Sets base_cmd to the labgrid-client base command up to subcommand and removes trailing -# option requiring a value - useful to call 'labgrid-client complete' with place/crossbar/proxy set +# option requiring a value - useful to call 'labgrid-client complete' with place/coordinator/proxy set # Before calling this function, make sure arg, base_cmd and last_arg_opt_with_value are local _labgrid_parse_args() { @@ -867,7 +867,7 @@ _labgrid_client() case "$cur" in --*) # top level args completion - local options="--crossbar \ + local options="--coordinator \ --config \ --place \ --state \ diff --git a/crossbar-requirements.txt b/crossbar-requirements.txt deleted file mode 100644 index d361d83d9..000000000 --- a/crossbar-requirements.txt +++ /dev/null @@ -1,2 +0,0 @@ -crossbar==21.3.1 -autobahn<=22.4.1 diff --git a/doc/conf.py b/doc/conf.py index 139f530f0..cfef4259b 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -182,17 +182,6 @@ 'special-members': True, } autodoc_mock_imports = ['onewire', - 'txaio', - 'autobahn', - 'autobahn.asyncio', - 'autobahn.asyncio.wamp', - 'autobahn.asyncio.websocket', - 'autobahn.wamp', - 'autobahn.wamp.types', - 'autobahn.twisted', - 'autobahn.twisted.wamp', - 'autobahn.wamp.exception', - 'twisted.internet.defer', 'gi', 'gi.repository',] diff --git a/labgrid/config.py b/labgrid/config.py index 7801e05e1..9b09dcb5e 100644 --- a/labgrid/config.py +++ b/labgrid/config.py @@ -4,6 +4,7 @@ """ import os +import warnings from yaml import YAMLError import attr @@ -50,6 +51,9 @@ def __attrs_post_init__(self): f"configuration file '{self.filename}' is invalid: {e}" ) + if self.get_option("crossbar_url", ""): + warnings.warn("Ignored option 'crossbar_url' in config, use 'coordinator_address' instead", UserWarning) + def resolve_path(self, path): """Resolve an absolute path diff --git a/labgrid/pytestplugin/fixtures.py b/labgrid/pytestplugin/fixtures.py index 1bd8d2f2b..f881377c3 100644 --- a/labgrid/pytestplugin/fixtures.py +++ b/labgrid/pytestplugin/fixtures.py @@ -28,8 +28,8 @@ def pytest_addoption(parser): '--lg-coordinator', action='store', dest='lg_coordinator', - metavar='CROSSBAR_URL', - help='labgrid coordinator websocket URL.') + metavar='COORDINATOR_ADDRESS', + help='labgrid coordinator HOST[:PORT].') group.addoption( '--lg-log', action='store', diff --git a/labgrid/pytestplugin/hooks.py b/labgrid/pytestplugin/hooks.py index a701e5ccb..f69507250 100644 --- a/labgrid/pytestplugin/hooks.py +++ b/labgrid/pytestplugin/hooks.py @@ -91,7 +91,7 @@ def pytest_configure(config): if lg_env is not None: env = Environment(config_file=lg_env) if lg_coordinator is not None: - env.config.set_option('crossbar_url', lg_coordinator) + env.config.set_option('coordinator_address', lg_coordinator) config.stash[LABGRID_ENV_KEY] = env processwrapper.enable_logging() diff --git a/labgrid/remote/client.py b/labgrid/remote/client.py index f5842c541..a78759fb7 100755 --- a/labgrid/remote/client.py +++ b/labgrid/remote/client.py @@ -15,16 +15,16 @@ import shlex import shutil import json +import itertools from textwrap import indent from socket import gethostname from getpass import getuser from collections import defaultdict, OrderedDict from datetime import datetime from pprint import pformat -import txaio -txaio.use_asyncio() -from autobahn.asyncio.wamp import ApplicationSession +import attr +import grpc from .common import ( ResourceEntry, @@ -34,21 +34,18 @@ ReservationState, TAG_KEY, TAG_VAL, - enable_tcp_nodelay, - monkey_patch_max_msg_payload_size_ws_option, + queue_as_aiter, ) from .. import Environment, Target, target_factory from ..exceptions import NoDriverFoundError, NoResourceFoundError, InvalidConfigError +from .generated import labgrid_coordinator_pb2, labgrid_coordinator_pb2_grpc from ..resource.remote import RemotePlaceManager, RemotePlace -from ..util import diff_dict, flat_dict, filter_dict, dump, atomic_replace, labgrid_version, Timeout +from ..util import diff_dict, flat_dict, dump, atomic_replace, labgrid_version, Timeout from ..util.proxy import proxymanager from ..util.helper import processwrapper from ..driver import Mode, ExecutionError from ..logging import basicConfig, StepLogger -txaio.config.loop = asyncio.get_event_loop() # pylint: disable=no-member -monkey_patch_max_msg_payload_size_ws_option() - class Error(Exception): pass @@ -66,57 +63,148 @@ class InteractiveCommandError(Error): pass -class ClientSession(ApplicationSession): - """The ClientSession encapsulates all the actions a Client can Invoke on +@attr.s(eq=False) +class ClientSession: + """The ClientSession encapsulates all the actions a Client can invoke on the coordinator.""" + address = attr.ib(validator=attr.validators.instance_of(str)) + loop = attr.ib(validator=attr.validators.instance_of(asyncio.BaseEventLoop)) + env = attr.ib(validator=attr.validators.optional(attr.validators.instance_of(Environment))) + role = attr.ib(default=None, validator=attr.validators.optional(attr.validators.instance_of(str))) + prog = attr.ib(default=None, validator=attr.validators.optional(attr.validators.instance_of(str))) + args = attr.ib(default=None, validator=attr.validators.optional(attr.validators.instance_of(argparse.Namespace))) + monitor = attr.ib(default=False, validator=attr.validators.instance_of(bool)) + def gethostname(self): return os.environ.get("LG_HOSTNAME", gethostname()) def getuser(self): return os.environ.get("LG_USERNAME", getuser()) - def onConnect(self): + def __attrs_post_init__(self): """Actions which are executed if a connection is successfully opened.""" - self.loop = self.config.extra["loop"] - self.connected = self.config.extra["connected"] - self.args = self.config.extra.get("args") - self.env = self.config.extra.get("env", None) - self.role = self.config.extra.get("role", None) - self.prog = self.config.extra.get("prog", os.path.basename(sys.argv[0])) - self.monitor = self.config.extra.get("monitor", False) - enable_tcp_nodelay(self) - self.join( - self.config.realm, - authmethods=["anonymous", "ticket"], - authid=f"client/{self.gethostname()}/{self.getuser()}", - authextra={"authid": f"client/{self.gethostname()}/{self.getuser()}"}, - ) + self.stopping = asyncio.Event() - def onChallenge(self, challenge): - import warnings + self.channel = grpc.aio.insecure_channel(self.address) + self.stub = labgrid_coordinator_pb2_grpc.CoordinatorStub(self.channel) - warnings.warn("Ticket authentication is deprecated. Please update your coordinator.", DeprecationWarning) - logging.warning("Ticket authentication is deprecated. Please update your coordinator.") - return "dummy-ticket" + self.out_queue = asyncio.Queue() + self.stream_call = None + self.pump_task = None + self.sync_id = itertools.count(start=1) + self.sync_events = {} - async def onJoin(self, details): - # FIXME race condition? - resources = await self.call("org.labgrid.coordinator.get_resources") + async def start(self): + """Starts receiving resource and place updates from the coordinator.""" self.resources = {} - for exporter, groups in resources.items(): - for group_name, group in sorted(groups.items()): - for resource_name, resource in sorted(group.items()): - await self.on_resource_changed(exporter, group_name, resource_name, resource) - - places = await self.call("org.labgrid.coordinator.get_places") self.places = {} - for placename, config in places.items(): - await self.on_place_changed(placename, config) - await self.subscribe(self.on_resource_changed, "org.labgrid.coordinator.resource_changed") - await self.subscribe(self.on_place_changed, "org.labgrid.coordinator.place_changed") - await self.connected(self) + self.pump_task = self.loop.create_task(self.message_pump()) + msg = labgrid_coordinator_pb2.ClientInMessage() + msg.startup.version = labgrid_version() + msg.startup.name = f"{self.gethostname()}/{self.getuser()}" + self.out_queue.put_nowait(msg) + msg = labgrid_coordinator_pb2.ClientInMessage() + msg.subscribe.all_places = True + self.out_queue.put_nowait(msg) + msg = labgrid_coordinator_pb2.ClientInMessage() + msg.subscribe.all_resources = True + self.out_queue.put_nowait(msg) + await self.sync_with_coordinator() + if self.stopping.is_set(): + raise ServerError("Could not connect to coordinator") + + async def stop(self): + """Stops stream for resource and place updates started with ClientSession.start().""" + self.out_queue.put_nowait(None) # let the sender side exit gracefully + if self.stream_call: + self.stream_call.cancel() + try: + await self.pump_task + except asyncio.CancelledError: + pass + self.cancel_pending_syncs() + + async def close(self): + """Closes the channel to the coordinator.""" + await self.channel.close() + + async def sync_with_coordinator(self): + """Wait for coordinator to process all previous messages in stream.""" + identifier = next(self.sync_id) + event = self.sync_events[identifier] = asyncio.Event() + msg = labgrid_coordinator_pb2.ClientInMessage() + msg.sync.id = identifier + logging.debug("sending sync %s", identifier) + self.out_queue.put_nowait(msg) + await event.wait() + if self.stopping.is_set(): + logging.debug("sync %s failed", identifier) + else: + logging.debug("received sync %s", identifier) + return not self.stopping.is_set() + + def cancel_pending_syncs(self): + """Cancel all pending ClientSession.sync_with_coordinator() calls.""" + assert self.stopping.is_set() # only call when something has gone wrong + while True: + try: + identifier, event = self.sync_events.popitem() + logging.debug("cancelling %s %s", identifier, event) + event.set() + except KeyError: + break + + async def message_pump(self): + """Task for receiving resource and place updates.""" + got_message = False + try: + self.stream_call = call = self.stub.ClientStream(queue_as_aiter(self.out_queue)) + async for out_msg in call: + out_msg: labgrid_coordinator_pb2.ClientOutMessage + got_message = True + logging.debug("out_msg from coordinator: %s", out_msg) + for update in out_msg.updates: + update_kind = update.WhichOneof("kind") + if update_kind == "resource": + resource: labgrid_coordinator_pb2.Resource = update.resource + await self.on_resource_changed( + resource.path.exporter_name, + resource.path.group_name, + resource.path.resource_name, + ResourceEntry.data_from_pb2(resource), + ) + elif update_kind == "del_resource": + resource_path: labgrid_coordinator_pb2.Resource.Path = update.del_resource + await self.on_resource_changed( + resource_path.exporter_name, resource_path.group_name, resource_path.resource_name, {} + ) + elif update_kind == "place": + place = update.place + await self.on_place_changed(place) + elif update_kind == "del_place": + place_name = update.del_place + await self.on_place_deleted(place_name) + else: + logging.warning("unknown update from coordinator! %s", update_kind) + if out_msg.HasField("sync"): + event = self.sync_events.pop(out_msg.sync.id) + event.set() + except grpc.aio.AioRpcError as e: + if e.code() == grpc.StatusCode.UNAVAILABLE: + if got_message: + logging.error("coordinator became unavailable: %s", e.details()) + else: + logging.error("coordinator is unavailable: %s", e.details()) + else: + logging.exception("unexpected grpc error in coordinator message pump task") + except Exception: + logging.exception("error in coordinator message pump task") + finally: + self.stopping.set() + self.out_queue.put_nowait(None) # let the sender side exit gracefully + self.cancel_pending_syncs() async def on_resource_changed(self, exporter, group_name, resource_name, resource): group = self.resources.setdefault(exporter, {}).setdefault(group_name, {}) @@ -129,44 +217,40 @@ async def on_resource_changed(self, exporter, group_name, resource_name, resourc old = group[resource_name].data group[resource_name].data = resource if self.monitor: - if resource and not old: + if "cls" in resource and not old: print(f"Resource {exporter}/{group_name}/{resource['cls']}/{resource_name} created: {resource}") - elif resource and old: + elif "cls" in resource and old: print(f"Resource {exporter}/{group_name}/{resource['cls']}/{resource_name} changed:") for k, v_old, v_new in diff_dict(flat_dict(old), flat_dict(resource)): print(f" {k}: {v_old} -> {v_new}") else: print(f"Resource {exporter}/{group_name}/???/{resource_name} deleted") - async def on_place_changed(self, name, config): - if not config: - del self.places[name] - if self.monitor: - print(f"Place {name} deleted") - return - config = config.copy() - config["name"] = name - config["matches"] = [ResourceMatch(**match) for match in config["matches"]] - config = filter_dict(config, Place, warn=True) + async def on_place_changed(self, place_pb2: labgrid_coordinator_pb2.Place): + name = place_pb2.name + if name not in self.places: - place = Place(**config) - self.places[name] = place + self.places[name] = Place.from_pb2(place_pb2) if self.monitor: - print(f"Place {name} created: {place}") + print(f"Place {name} created: {place_pb2}") else: place = self.places[name] old = flat_dict(place.asdict()) - place.update(config) + place.update_from_pb2(place_pb2) new = flat_dict(place.asdict()) if self.monitor: print(f"Place {name} changed:") for k, v_old, v_new in diff_dict(old, new): print(f" {k}: {v_old} -> {v_new}") + async def on_place_deleted(self, name: str): + del self.places[name] + if self.monitor: + print(f"Place {name} deleted") + async def do_monitor(self): self.monitor = True - while True: - await asyncio.sleep(3600.0) + await self.stopping.wait() async def complete(self): if self.args.type == "resources": @@ -411,61 +495,62 @@ async def add_place(self): name = self.args.place if not name: raise UserError("missing place name. Set with -p or via env var LG_PLACE") - if name in self.places: - raise UserError(f"{name} already exists") - res = await self.call("org.labgrid.coordinator.add_place", name) - if not res: - raise ServerError(f"failed to add place {name}") - return res + + request = labgrid_coordinator_pb2.AddPlaceRequest(name=name) + try: + await self.stub.AddPlace(request) + await self.sync_with_coordinator() + except grpc.aio.AioRpcError as e: + raise ServerError(e.details()) async def del_place(self): """Delete a place from the coordinator""" - pattern = self.args.place - if pattern not in self.places: - raise UserError("deletes require an exact place name") - place = self.places[pattern] - if place.acquired: - raise UserError(f"place {place.name} is not idle (acquired by {place.acquired})") - name = place.name - if not name: - raise UserError("missing place name. Set with -p or via env var $PLACE") - if name not in self.places: - raise UserError(f"{name} does not exist") - res = await self.call("org.labgrid.coordinator.del_place", name) - if not res: - raise ServerError(f"failed to delete place {name}") - return res + place = self.get_idle_place() + request = labgrid_coordinator_pb2.DeletePlaceRequest(name=place.name) + try: + await self.stub.DeletePlace(request) + await self.sync_with_coordinator() + except grpc.aio.AioRpcError as e: + raise ServerError(e.details()) async def add_alias(self): """Add an alias for a place on the coordinator""" place = self.get_idle_place() alias = self.args.alias - if alias in place.aliases: - raise UserError(f"place {place.name} already has alias {alias}") - res = await self.call("org.labgrid.coordinator.add_place_alias", place.name, alias) - if not res: - raise ServerError(f"failed to add alias {alias} for place {place.name}") - return res + + request = labgrid_coordinator_pb2.AddPlaceAliasRequest(placename=place.name, alias=alias) + + try: + await self.stub.AddPlaceAlias(request) + await self.sync_with_coordinator() + except grpc.aio.AioRpcError as e: + raise ServerError(e.details()) async def del_alias(self): """Delete an alias for a place from the coordinator""" place = self.get_idle_place() alias = self.args.alias - if alias not in place.aliases: - raise UserError(f"place {place.name} has no alias {alias}") - res = await self.call("org.labgrid.coordinator.del_place_alias", place.name, alias) - if not res: - raise ServerError(f"failed to delete alias {alias} for place {place.name}") - return res + + request = labgrid_coordinator_pb2.DeletePlaceAliasRequest(placename=place.name, alias=alias) + + try: + await self.stub.DeletePlaceAlias(request) + await self.sync_with_coordinator() + except grpc.aio.AioRpcError as e: + raise ServerError(e.details()) async def set_comment(self): """Set the comment on a place""" place = self.get_place() comment = " ".join(self.args.comment) - res = await self.call("org.labgrid.coordinator.set_place_comment", place.name, comment) - if not res: - raise ServerError(f"failed to set comment {comment} for place {place.name}") - return res + + request = labgrid_coordinator_pb2.SetPlaceCommentRequest(placename=place.name, comment=comment) + + try: + await self.stub.SetPlaceComment(request) + await self.sync_with_coordinator() + except grpc.aio.AioRpcError as e: + raise ServerError(e.details()) async def set_tags(self): """Set the tags on a place""" @@ -481,10 +566,14 @@ async def set_tags(self): if not TAG_VAL.match(v): raise UserError(f"tag value '{v}' needs to match the rexex '{TAG_VAL.pattern}'") tags[k] = v - res = await self.call("org.labgrid.coordinator.set_place_tags", place.name, tags) - if not res: - raise ServerError(f"failed to set tags {' '.join(self.args.tags)} for place {place.name}") - return res + + request = labgrid_coordinator_pb2.SetPlaceTagsRequest(placename=place.name, tags=tags) + + try: + await self.stub.SetPlaceTags(request) + await self.sync_with_coordinator() + except grpc.aio.AioRpcError as e: + raise ServerError(e.details()) async def add_match(self): """Add a match for a place, making fuzzy matching available to the @@ -498,9 +587,14 @@ async def add_match(self): if place.hasmatch(pattern.split("/")): print(f"pattern '{pattern}' exists, skipping", file=sys.stderr) continue - res = await self.call("org.labgrid.coordinator.add_place_match", place.name, pattern) - if not res: - raise ServerError(f"failed to add match {pattern} for place {place.name}") + + request = labgrid_coordinator_pb2.AddPlaceMatchRequest(placename=place.name, pattern=pattern) + + try: + await self.stub.AddPlaceMatch(request) + await self.sync_with_coordinator() + except grpc.aio.AioRpcError as e: + raise ServerError(e.details()) async def del_match(self): """Delete a match for a place""" @@ -512,9 +606,14 @@ async def del_match(self): raise UserError(f"invalid pattern format '{pattern}' (use 'exporter/group/cls/name')") if not place.hasmatch(pattern.split("/")): print(f"pattern '{pattern}' not found, skipping", file=sys.stderr) - res = await self.call("org.labgrid.coordinator.del_place_match", place.name, pattern) - if not res: - raise ServerError(f"failed to delete match {pattern} for place {place.name}") + + request = labgrid_coordinator_pb2.DeletePlaceMatchRequest(placename=place.name, pattern=pattern) + + try: + await self.stub.DeletePlaceMatch(request) + await self.sync_with_coordinator() + except grpc.aio.AioRpcError as e: + raise ServerError(e.details()) async def add_named_match(self): """Add a named match for a place. @@ -527,15 +626,18 @@ async def add_named_match(self): name = self.args.name if not 2 <= pattern.count("/") <= 3: raise UserError(f"invalid pattern format '{pattern}' (use 'exporter/group/cls/name')") - if place.hasmatch(pattern.split("/")): - raise UserError(f"pattern '{pattern}' exists") if "*" in pattern: raise UserError(f"invalid pattern '{pattern}' ('*' not allowed for named matches)") if not name: raise UserError(f"invalid name '{name}'") - res = await self.call("org.labgrid.coordinator.add_place_match", place.name, pattern, name) - if not res: - raise ServerError(f"failed to add match {pattern} for place {place.name}") + + request = labgrid_coordinator_pb2.AddPlaceMatchRequest(placename=place.name, pattern=pattern, rename=name) + + try: + await self.stub.AddPlaceMatch(request) + await self.sync_with_coordinator() + except grpc.aio.AioRpcError as e: + raise ServerError(e.details()) def check_matches(self, place): resources = [] @@ -558,30 +660,31 @@ async def acquire(self): if not self.args.allow_unmatched: self.check_matches(place) - res = await self.call("org.labgrid.coordinator.acquire_place", place.name) + request = labgrid_coordinator_pb2.AcquirePlaceRequest(placename=place.name) - if res: + try: + await self.stub.AcquirePlace(request) + await self.sync_with_coordinator() print(f"acquired place {place.name}") - return - - # check potential failure causes - for exporter, groups in sorted(self.resources.items()): - for group_name, group in sorted(groups.items()): - for resource_name, resource in sorted(group.items()): - resource_path = (exporter, group_name, resource.cls, resource_name) - if resource.acquired is None: - continue - match = place.getmatch(resource_path) - if match is None: - continue - name = resource_name - if match.rename: - name = match.rename - print( - f"Matching resource '{name}' ({exporter}/{group_name}/{resource.cls}/{resource_name}) already acquired by place '{resource.acquired}'" - ) # pylint: disable=line-too-long + except grpc.aio.AioRpcError as e: + # check potential failure causes + for exporter, groups in sorted(self.resources.items()): + for group_name, group in sorted(groups.items()): + for resource_name, resource in sorted(group.items()): + resource_path = (exporter, group_name, resource.cls, resource_name) + if not resource.acquired: + continue + match = place.getmatch(resource_path) + if match is None: + continue + name = resource_name + if match.rename: + name = match.rename + print( + f"Matching resource '{name}' ({exporter}/{group_name}/{resource.cls}/{resource_name}) already acquired by place '{resource.acquired}'" + ) # pylint: disable=line-too-long - raise ServerError(f"failed to acquire place {place.name}") + raise ServerError(e.details()) async def release(self): """Release a previously acquired place""" @@ -595,38 +698,43 @@ async def release(self): f"place {place.name} is acquired by a different user ({place.acquired}), use --kick if you are sure" ) # pylint: disable=line-too-long print(f"warning: kicking user ({place.acquired})") - res = await self.call("org.labgrid.coordinator.release_place", place.name) - if not res: - raise ServerError(f"failed to release place {place.name}") + + request = labgrid_coordinator_pb2.ReleasePlaceRequest(placename=place.name) + + try: + await self.stub.ReleasePlace(request) + await self.sync_with_coordinator() + except grpc.aio.AioRpcError as e: + raise ServerError(e.details()) print(f"released place {place.name}") async def release_from(self): """Release a place, but only if acquired by a specific user""" place = self.get_place() - res = await self.call( - "org.labgrid.coordinator.release_place_from", - place.name, - self.args.acquired, - ) - if not res: - raise ServerError(f"failed to release place {place.name}") + + request = labgrid_coordinator_pb2.ReleasePlaceRequest(placename=place.name, fromuser=self.args.acquired) + + try: + await self.stub.ReleasePlace(request) + await self.sync_with_coordinator() + except grpc.aio.AioRpcError as e: + raise ServerError(e.details()) print(f"{self.args.acquired} has released place {place.name}") async def allow(self): """Allow another use access to a previously acquired place""" place = self.get_place() - if not place.acquired: - raise UserError(f"place {place.name} is not acquired") - _, user = place.acquired.split("/") - if user != self.getuser(): - raise UserError(f"place {place.name} is acquired by a different user ({place.acquired})") if "/" not in self.args.user: raise UserError(f"user {self.args.user} must be in / format") - res = await self.call("org.labgrid.coordinator.allow_place", place.name, self.args.user) - if not res: - raise ServerError(f"failed to allow {self.args.user} for place {place.name}") + request = labgrid_coordinator_pb2.AllowPlaceRequest(placename=place.name, user=self.args.user) + + try: + await self.stub.AllowPlace(request) + await self.sync_with_coordinator() + except grpc.aio.AioRpcError as e: + raise ServerError(e.details()) print(f"allowed {self.args.user} for place {place.name}") @@ -1292,14 +1400,32 @@ def write_image(self): raise UserError(e) async def create_reservation(self): - filters = " ".join(self.args.filters) prio = self.args.prio - res = await self.call("org.labgrid.coordinator.create_reservation", filters, prio=prio) - if res is None: - raise ServerError("failed to create reservation") - ((token, config),) = res.items() # we get a one-item dict - config = filter_dict(config, Reservation, warn=True) - res = Reservation(token=token, **config) + + fltr = {} + for pair in self.args.filters: + try: + k, v = pair.split("=") + except ValueError: + raise UserError(f"'{pair}' is not a valid filter (must contain a '=')") + if not TAG_KEY.match(k): + raise UserError(f"Key '{k}' in filter '{pair}' is invalid") + if not TAG_KEY.match(v): + raise UserError(f"Value '{v}' in filter '{pair}' is invalid") + fltr[k] = v + + fltrs = { + "main": labgrid_coordinator_pb2.Reservation.Filter(filter=fltr), + } + + request = labgrid_coordinator_pb2.CreateReservationRequest(filters=fltrs, prio=prio) + + try: + response: labgrid_coordinator_pb2.CreateReservationResponse = await self.stub.CreateReservation(request) + except grpc.aio.AioRpcError as e: + raise ServerError(e.details()) + + res = Reservation.from_pb2(response.reservation) if self.args.shell: print(f"export LG_TOKEN={res.token}") else: @@ -1311,18 +1437,25 @@ async def create_reservation(self): await self._wait_reservation(res.token, verbose=False) async def cancel_reservation(self): - token = self.args.token - res = await self.call("org.labgrid.coordinator.cancel_reservation", token) - if not res: - raise ServerError(f"failed to cancel reservation {token}") + token: str = self.args.token + + request = labgrid_coordinator_pb2.CancelReservationRequest(token=token) + + try: + await self.stub.CancelReservation(request) + except grpc.aio.AioRpcError as e: + raise ServerError(e.details()) - async def _wait_reservation(self, token, verbose=True): + async def _wait_reservation(self, token: str, verbose=True): while True: - config = await self.call("org.labgrid.coordinator.poll_reservation", token) - if config is None: - raise ServerError("reservation not found") - config = filter_dict(config, Reservation, warn=True) - res = Reservation(token=token, **config) + request = labgrid_coordinator_pb2.PollReservationRequest(token=token) + + try: + response: labgrid_coordinator_pb2.PollReservationResponse = await self.stub.PollReservation(request) + except grpc.aio.AioRpcError as e: + raise ServerError(e.details()) + + res = Reservation.from_pb2(response.reservation) if verbose: res.show() if res.state is ReservationState.waiting: @@ -1335,10 +1468,15 @@ async def wait_reservation(self): await self._wait_reservation(token) async def print_reservations(self): - reservations = await self.call("org.labgrid.coordinator.get_reservations") - for token, config in sorted(reservations.items(), key=lambda x: (-x[1]["prio"], x[1]["created"])): # pylint: disable=line-too-long - config = filter_dict(config, Reservation, warn=True) - res = Reservation(token=token, **config) + request = labgrid_coordinator_pb2.GetReservationsRequest() + + try: + response: labgrid_coordinator_pb2.GetReservationsResponse = await self.stub.GetReservations(request) + reservations = [Reservation.from_pb2(x) for x in response.reservations] + except grpc.aio.AioRpcError as e: + raise ServerError(e.details()) + + for res in sorted(reservations, key=lambda x: (-x.prio, x.created)): print(f"Reservation '{res.token}':") res.show(level=1) @@ -1378,46 +1516,16 @@ def print_version(self): print(labgrid_version()) -def start_session(url, realm, extra): - from autobahn.asyncio.wamp import ApplicationRunner - +def start_session(address, extra, debug=False): loop = asyncio.get_event_loop() - ready = asyncio.Event() - - async def connected(session): # pylint: disable=unused-argument - ready.set() - - if not extra: - extra = {} - extra["loop"] = loop - extra["connected"] = connected - - session = [None] - - def make(*args, **kwargs): - nonlocal session - session[0] = ClientSession(*args, **kwargs) - return session[0] + if debug: + loop.set_debug(True) - url = proxymanager.get_url(url, default_port=20408) + address = proxymanager.get_grpc_address(address, default_port=20408) - runner = ApplicationRunner(url, realm=realm, extra=extra) - coro = runner.run(make, start_loop=False) - - _, protocol = loop.run_until_complete(coro) - - # there is no other notification when the WAMP connection setup times out, - # so we need to wait for one of these protocol futures to resolve - done, pending = loop.run_until_complete( - asyncio.wait({protocol.is_open, protocol.is_closed}, timeout=30, return_when=asyncio.FIRST_COMPLETED) - ) - if protocol.is_closed in done: - raise Error("connection closed during setup") - if protocol.is_open in pending: - raise Error("connection timed out during setup") - - loop.run_until_complete(ready.wait()) - return session[0] + session = ClientSession(address, loop, **extra) + loop.run_until_complete(session.start()) + return session def find_role_by_place(config, place): @@ -1504,10 +1612,10 @@ def main(): parser = argparse.ArgumentParser() parser.add_argument( "-x", - "--crossbar", - metavar="URL", + "--coordinator", + metavar="ADDRESS", type=str, - help="crossbar websocket URL (default: value from env variable LG_CROSSBAR, otherwise ws://127.0.0.1:20408/ws)", + help="coordinator HOST[:PORT] (default: value from env variable LG_COORDINATOR, otherwise 127.0.0.1:20408)", ) parser.add_argument("-c", "--config", type=str, default=os.environ.get("LG_ENV"), help="config file") parser.add_argument("-p", "--place", type=str, default=place, help="place name/alias") @@ -1913,20 +2021,15 @@ def main(): signal.signal(signal.SIGTERM, lambda *_: sys.exit(0)) try: - crossbar_url = args.crossbar or env.config.get_option("crossbar_url") - except (AttributeError, KeyError): - # in case of no env or not set, use LG_CROSSBAR env variable or default - crossbar_url = os.environ.get("LG_CROSSBAR", "ws://127.0.0.1:20408/ws") - - try: - crossbar_realm = env.config.get_option("crossbar_realm") + coordinator_address = args.coordinator or env.config.get_option("coordinator_address") except (AttributeError, KeyError): - # in case of no env, use LG_CROSSBAR_REALM env variable or default - crossbar_realm = os.environ.get("LG_CROSSBAR_REALM", "realm1") + # in case of no env or not set, use LG_COORDINATOR env variable or default + coordinator_address = os.environ.get("LG_COORDINATOR", "127.0.0.1:20408") - logging.debug('Starting session with "%s", realm: "%s"', crossbar_url, crossbar_realm) + logging.debug('Starting session with "%s"', coordinator_address) + session = start_session(coordinator_address, extra, args.debug) + logging.debug("Started session") - session = start_session(crossbar_url, crossbar_realm, extra) try: if asyncio.iscoroutinefunction(args.func): if getattr(args.func, "needs_target", False): @@ -1939,6 +2042,10 @@ def main(): else: args.func(session) finally: + logging.debug("Stopping session") + session.loop.run_until_complete(session.stop()) + session.loop.run_until_complete(session.close()) + logging.debug("Stopping loop") session.loop.close() except (NoResourceFoundError, NoDriverFoundError, InvalidConfigError) as e: if args.debug: @@ -1968,8 +2075,8 @@ def main(): ) # pylint: disable=line-too-long exitcode = 1 - except ConnectionError as e: - print(f"Could not connect to coordinator: {e}", file=sys.stderr) + except ServerError as e: + print(f"Server error: {e}", file=sys.stderr) exitcode = 1 except InteractiveCommandError as e: if args.debug: diff --git a/labgrid/remote/common.py b/labgrid/remote/common.py index 2ea1d2f1a..93b6b22e7 100644 --- a/labgrid/remote/common.py +++ b/labgrid/remote/common.py @@ -1,14 +1,17 @@ -import socket +import asyncio import time import enum import random import re import string +import logging from datetime import datetime from fnmatch import fnmatchcase import attr +from .generated import labgrid_coordinator_pb2 + __all__ = [ "TAG_KEY", "TAG_VAL", @@ -17,19 +20,50 @@ "Place", "ReservationState", "Reservation", - "enable_tcp_nodelay", - "monkey_patch_max_msg_payload_size_ws_option", ] TAG_KEY = re.compile(r"[a-z][a-z0-9_]+") TAG_VAL = re.compile(r"[a-z0-9_]?") +def set_map_from_dict(m, d): + for k, v in d.items(): + assert isinstance(k, str) + if v is None: + m[k].Clear() + elif isinstance(v, bool): + m[k].bool_value = v + elif isinstance(v, int): + if v < 0: + m[k].int_value = v + else: + m[k].uint_value = v + elif isinstance(v, float): + m[k].float_value = v + elif isinstance(v, str): + m[k].string_value = v + else: + raise ValueError(f"cannot translate {repr(v)} to MapValue") + + +def build_dict_from_map(m): + d = {} + for k, v in m.items(): + v: labgrid_coordinator_pb2.MapValue + kind = v.WhichOneof("kind") + if kind is None: + d[k] = None + else: + d[k] = getattr(v, kind) + return d + + @attr.s(eq=False) class ResourceEntry: data = attr.ib() # cls, params def __attrs_post_init__(self): + assert isinstance(self.data, dict) self.data.setdefault("acquired", None) self.data.setdefault("avail", False) @@ -84,6 +118,35 @@ def release(self): # ignore repeated releases self.data["acquired"] = None + def as_pb2(self): + msg = labgrid_coordinator_pb2.Resource() + msg.cls = self.cls + params = self.params.copy() + extra = params.pop("extra", {}) + set_map_from_dict(msg.params, params) + set_map_from_dict(msg.extra, extra) + if self.acquired is not None: + msg.acquired = self.acquired + msg.avail = self.avail + return msg + + @staticmethod + def data_from_pb2(pb2): + assert isinstance(pb2, labgrid_coordinator_pb2.Resource) + data = { + "cls": pb2.cls, + "params": build_dict_from_map(pb2.params), + "acquired": pb2.acquired or None, + "avail": pb2.avail, + } + data["params"]["extra"] = build_dict_from_map(pb2.extra) + return data + + @classmethod + def from_pb2(cls, pb2): + assert isinstance(pb2, labgrid_coordinator_pb2.Place) + return cls(cls.data_from_pb2(pb2)) + @attr.s(eq=True, repr=False, str=False) # This class requires eq=True, since we put the matches into a list and require @@ -133,6 +196,26 @@ def ismatch(self, resource_path): return True + def as_pb2(self): + return labgrid_coordinator_pb2.ResourceMatch( + exporter=self.exporter, + group=self.group, + cls=self.cls, + name=self.name, + rename=self.rename, + ) + + @classmethod + def from_pb2(cls, pb2): + assert isinstance(pb2, labgrid_coordinator_pb2.ResourceMatch) + return cls( + exporter=pb2.exporter, + group=pb2.group, + cls=pb2.cls, + name=pb2.name if pb2.HasField("name") else None, + rename=pb2.rename, + ) + @attr.s(eq=False) class Place: @@ -170,14 +253,19 @@ def asdict(self): "reservation": self.reservation, } - def update(self, config): + def update_from_pb2(self, place_pb2): + # FIXME untangle this... + place = Place.from_pb2(place_pb2) fields = attr.fields_dict(type(self)) - for k, v in config.items(): + for k, v in place.asdict().items(): assert k in fields if k == "name": # we cannot rename places assert v == self.name continue + if k == "matches": + self.matches = [ResourceMatch.from_pb2(m) for m in place_pb2.matches] + continue setattr(self, k, v) def show(self, level=0): @@ -241,6 +329,56 @@ def unmatched(self, resource_paths): def touch(self): self.changed = time.time() + def as_pb2(self): + try: + acquired_resources = [] + for resource in self.acquired_resources: + assert not isinstance(resource, (tuple, list)), "as_pb2() only implemented for coordinator" + assert len(resource.path) == 4 + path = "/".join(resource.path) + acquired_resources.append(path) + + place = labgrid_coordinator_pb2.Place() + place.name = self.name + place.aliases.extend(self.aliases) + place.comment = self.comment + place.matches.extend(m.as_pb2() for m in self.matches) + place.acquired = self.acquired or "" + place.acquired_resources.extend(acquired_resources) + place.allowed.extend(self.allowed) + place.changed = self.changed + place.created = self.created + if self.reservation: + place.reservation = self.reservation + for key, value in self.tags.items(): + place.tags[key] = value + return place + except TypeError: + logging.exception("failed to convert place %s to protobuf", self) + raise + + @classmethod + def from_pb2(cls, pb2): + assert isinstance(pb2, labgrid_coordinator_pb2.Place) + acquired_resources = [] + for path in pb2.acquired_resources: + path = path.split("/") + assert len(path) == 4 + acquired_resources.append(path) + return cls( + name=pb2.name, + aliases=pb2.aliases, + comment=pb2.comment, + tags=dict(pb2.tags), + matches=[ResourceMatch.from_pb2(m) for m in pb2.matches], + acquired=pb2.acquired if pb2.HasField("acquired") and pb2.acquired else None, + acquired_resources=acquired_resources, + allowed=pb2.allowed, + created=pb2.created, + changed=pb2.changed, + reservation=pb2.reservation if pb2.HasField("reservation") else None, + ) + class ReservationState(enum.Enum): waiting = 0 @@ -304,44 +442,58 @@ def show(self, level=0): print(indent + f"created: {datetime.fromtimestamp(self.created)}") print(indent + f"timeout: {datetime.fromtimestamp(self.timeout)}") + def as_pb2(self): + res = labgrid_coordinator_pb2.Reservation() + res.owner = self.owner + res.token = self.token + res.state = self.state.value + res.prio = self.prio + for name, fltr in self.filters.items(): + res.filters[name].CopyFrom(labgrid_coordinator_pb2.Reservation.Filter(filter=fltr)) + if self.allocations: + # TODO: refactor to have only one place per filter group + assert len(self.allocations) == 1 + assert "main" in self.allocations + allocation = self.allocations["main"] + assert len(allocation) == 1 + res.allocations.update({"main": allocation[0]}) + res.created = self.created + res.timeout = self.timeout + return res -def enable_tcp_nodelay(session): - """ - asyncio/autobahn does not set TCP_NODELAY by default, so we need to do it - like this for now. - """ - s = session._transport.transport.get_extra_info("socket") - s.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, True) - - -def monkey_patch_max_msg_payload_size_ws_option(): - """ - The default maxMessagePayloadSize in autobahn is 1M. For larger setups with a big number of - exported resources, this becomes the limiting factor. - Increase maxMessagePayloadSize in WampWebSocketClientFactory.setProtocolOptions() by monkey - patching it, so autobahn.asyncio.wamp.ApplicationRunner effectively sets the increased value. - - This function must be called before ApplicationRunner is instanciated. - """ - from autobahn.asyncio.websocket import WampWebSocketClientFactory - - original_method = WampWebSocketClientFactory.setProtocolOptions - - def set_protocol_options(*args, **kwargs): - new_max_message_payload_size = 10485760 - - # maxMessagePayloadSize given as positional arg - args = list(args) - try: - args[9] = max((args[9], new_max_message_payload_size)) - except IndexError: - pass - - # maxMessagePayloadSize given as kwarg - kwarg_name = "maxMessagePayloadSize" - if kwarg_name in kwargs and kwargs[kwarg_name] is not None: - kwargs[kwarg_name] = max((kwargs[kwarg_name], new_max_message_payload_size)) - - return original_method(*args, **kwargs) - - WampWebSocketClientFactory.setProtocolOptions = set_protocol_options + @classmethod + def from_pb2(cls, pb2: labgrid_coordinator_pb2.Reservation): + filters = {} + for name, fltr_pb2 in pb2.filters.items(): + filters[name] = dict(fltr_pb2.filter) + allocations = {} + for fltr_name, place_name in pb2.allocations.items(): + allocations[fltr_name] = [place_name] + return cls( + owner=pb2.owner, + token=pb2.token, + state=ReservationState(pb2.state), + prio=pb2.prio, + filters=filters, + allocations=allocations, + created=pb2.created, + timeout=pb2.timeout, + ) + + +async def queue_as_aiter(q): + try: + while True: + try: + item = await q.get() + except asyncio.CancelledError: + # gRPC doesn't like to receive exceptions from the request_iterator + return + if item is None: + return + yield item + q.task_done() + logging.debug("sent message %s", item) + except Exception: + logging.exception("error in queue_as_aiter") + raise diff --git a/labgrid/remote/coordinator.py b/labgrid/remote/coordinator.py index e3ba8210f..b4d4cf27a 100644 --- a/labgrid/remote/coordinator.py +++ b/labgrid/remote/coordinator.py @@ -1,26 +1,29 @@ -"""The coordinator module coordinates exported resources and clients accessing them.""" - -# pylint: disable=no-member,unused-argument +#!/usr/bin/env python3 +import argparse +import logging import asyncio -import sys import traceback -from collections import defaultdict -from os import environ -from pprint import pprint from enum import Enum from functools import wraps import attr -from autobahn import wamp -from autobahn.asyncio.wamp import ApplicationRunner, ApplicationSession -from autobahn.wamp.types import RegisterOptions - -from .common import * # pylint: disable=wildcard-import +import grpc +from grpc_reflection.v1alpha import reflection + +from .common import ( + ResourceEntry, + ResourceMatch, + Place, + Reservation, + ReservationState, + queue_as_aiter, + TAG_KEY, + TAG_VAL, +) from .scheduler import TagSet, schedule -from ..util import atomic_replace, yaml - - -monkey_patch_max_msg_payload_size_ws_option() +from .generated import labgrid_coordinator_pb2 +from .generated import labgrid_coordinator_pb2_grpc +from ..util import atomic_replace, labgrid_version, yaml class Action(Enum): @@ -34,19 +37,10 @@ class RemoteSession: """class encapsulating a session, used by ExporterSession and ClientSession""" coordinator = attr.ib() - session = attr.ib() - authid = attr.ib() - version = attr.ib(default="unknown", init=False) - - @property - def key(self): - """Key of the session""" - return self.session - - @property - def name(self): - """Name of the session""" - return self.authid.split("/", 1)[1] + peer = attr.ib() + name = attr.ib() + queue = attr.ib() + version = attr.ib() @attr.s(eq=False) @@ -56,26 +50,41 @@ class ExporterSession(RemoteSession): groups = attr.ib(default=attr.Factory(dict), init=False) - def set_resource(self, groupname, resourcename, resourcedata): + def set_resource(self, groupname, resourcename, resource): + """This is called when Exporters update resources or when they disconnect.""" + logging.info("set_resource %s %s %s", groupname, resourcename, resource) group = self.groups.setdefault(groupname, {}) old = group.get(resourcename) - if resourcedata and old: - old.update(resourcedata) - new = old - elif resourcedata and not old: - new = group[resourcename] = ResourceImport( - resourcedata, path=(self.name, groupname, resourcedata["cls"], resourcename) + if resource is not None: + new = ResourceImport( + data=ResourceImport.data_from_pb2(resource), path=(self.name, groupname, resource.cls, resourcename) ) - elif not resourcedata and old: - new = None - del group[resourcename] + if old: + old.data.update(new.data) + new = old + else: + group[resourcename] = new else: - assert not resourcedata and not old new = None + try: + del group[resourcename] + except KeyError: + pass - self.coordinator.publish( - "org.labgrid.coordinator.resource_changed", self.name, groupname, resourcename, new.asdict() if new else {} - ) + msg = labgrid_coordinator_pb2.ClientOutMessage() + update = msg.updates.add() + if new: + update.resource.CopyFrom(new.as_pb2()) + update.resource.path.exporter_name = self.name + update.resource.path.group_name = groupname + update.resource.path.resource_name = resourcename + else: + update.del_resource.exporter_name = self.name + update.del_resource.group_name = groupname + update.del_resource.resource_name = resourcename + + for client in self.coordinator.clients.values(): + client.queue.put_nowait(msg) if old and new: assert old is new @@ -99,7 +108,38 @@ def get_resources(self): @attr.s(eq=False) class ClientSession(RemoteSession): - pass + def subscribe_places(self): + # send initial places + out_msg = labgrid_coordinator_pb2.ClientOutMessage() + for place in self.coordinator.places.values(): + place: Place + out_msg.updates.add().place.CopyFrom(place.as_pb2()) + self.queue.put_nowait(out_msg) + + def subscribe_resources(self): + # collect initial resources + collected = [] + logging.debug("sending resources to %s", self) + for exporter in self.coordinator.exporters.values(): + logging.debug("sending resources %s", exporter) + exporter: ExporterSession + for groupname, group in exporter.groups.items(): + logging.debug("sending resources %s", groupname) + for resourcename, resource in group.items(): + logging.debug("sending resources %s", resourcename) + resource: ResourceImport + update = labgrid_coordinator_pb2.UpdateResponse() + update.resource.CopyFrom(resource.as_pb2()) + update.resource.path.exporter_name = exporter.name + update.resource.path.group_name = groupname + update.resource.path.resource_name = resourcename + collected.append(update) + # send batches + while collected: + batch, collected = collected[:100], collected[100:] + out_msg = labgrid_coordinator_pb2.ClientOutMessage() + out_msg.updates.extend(batch) + self.queue.put_nowait(out_msg) @attr.s(eq=False) @@ -121,135 +161,42 @@ async def wrapper(self, *args, **kwargs): return wrapper -class CoordinatorComponent(ApplicationSession): - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - self.lock = asyncio.Lock() +class ExporterCommand: + def __init__(self, request) -> None: + self.request = request + self.response = None + self.completed = asyncio.Event() - @locked - async def onConnect(self): - self.sessions = {} + def complete(self, response) -> None: + self.response = response + self.completed.set() + + async def wait(self): + await asyncio.wait_for(self.completed.wait(), 10) + + +class ExporterError(Exception): + pass + + +class Coordinator(labgrid_coordinator_pb2_grpc.CoordinatorServicer): + def __init__(self) -> None: self.places = {} self.reservations = {} self.poll_task = None self.save_scheduled = False + self.lock = asyncio.Lock() + self.exporters: dict[str, ExporterSession] = {} + self.clients: dict[str, ClientSession] = {} self.load() - self.save_later() - - enable_tcp_nodelay(self) - self.join( - self.config.realm, - authmethods=["anonymous"], - authid="coordinator", - authextra={"authid": "coordinator"}, - ) - - @locked - async def onJoin(self, details): - await self.subscribe(self.on_session_join, "wamp.session.on_join") - await self.subscribe(self.on_session_leave, "wamp.session.on_leave") - await self.register( - self.attach, "org.labgrid.coordinator.attach", options=RegisterOptions(details_arg="details") - ) - - # resources - await self.register( - self.set_resource, "org.labgrid.coordinator.set_resource", options=RegisterOptions(details_arg="details") - ) - await self.register(self.get_resources, "org.labgrid.coordinator.get_resources") - - # places - await self.register(self.add_place, "org.labgrid.coordinator.add_place") - await self.register(self.del_place, "org.labgrid.coordinator.del_place") - await self.register(self.add_place_alias, "org.labgrid.coordinator.add_place_alias") - await self.register(self.del_place_alias, "org.labgrid.coordinator.del_place_alias") - await self.register(self.set_place_tags, "org.labgrid.coordinator.set_place_tags") - await self.register(self.set_place_comment, "org.labgrid.coordinator.set_place_comment") - await self.register(self.add_place_match, "org.labgrid.coordinator.add_place_match") - await self.register(self.del_place_match, "org.labgrid.coordinator.del_place_match") - await self.register( - self.acquire_place, "org.labgrid.coordinator.acquire_place", options=RegisterOptions(details_arg="details") - ) - await self.register( - self.release_place, "org.labgrid.coordinator.release_place", options=RegisterOptions(details_arg="details") - ) - await self.register( - self.release_place_from, - "org.labgrid.coordinator.release_place_from", - options=RegisterOptions(details_arg="details"), - ) - await self.register( - self.allow_place, "org.labgrid.coordinator.allow_place", options=RegisterOptions(details_arg="details") - ) - await self.register(self.get_places, "org.labgrid.coordinator.get_places") - - # reservations - await self.register( - self.create_reservation, - "org.labgrid.coordinator.create_reservation", - options=RegisterOptions(details_arg="details"), - ) - await self.register( - self.cancel_reservation, - "org.labgrid.coordinator.cancel_reservation", - ) - await self.register( - self.poll_reservation, - "org.labgrid.coordinator.poll_reservation", - ) - await self.register( - self.get_reservations, - "org.labgrid.coordinator.get_reservations", - ) self.poll_task = asyncio.get_event_loop().create_task(self.poll()) - print("Coordinator ready.") - - @locked - async def onLeave(self, details): - await self.save() - if self.poll_task: - self.poll_task.cancel() - await asyncio.wait([self.poll_task]) - super().onLeave(details) - - @locked - async def onDisconnect(self): - await self.save() - if self.poll_task: - self.poll_task.cancel() - await asyncio.wait([self.poll_task]) - await asyncio.sleep(0.5) # give others a chance to clean up - async def _poll_step(self): # save changes if self.save_scheduled: await self.save() - # poll exporters - for session in list(self.sessions.values()): - if isinstance(session, ExporterSession): - fut = self.call(f"org.labgrid.exporter.{session.name}.version") - done, _ = await asyncio.wait([fut], timeout=5) - if not done: - print(f"kicking exporter ({session.key}/{session.name})") - await self.call("wamp.session.kill", session.key, message="timeout detected by coordinator") - print(f"cleaning up exporter ({session.key}/{session.name})") - await self.on_session_leave(session.key) - print(f"removed exporter ({session.key}/{session.name})") - continue - try: - session.version = done.pop().result() - except wamp.exception.ApplicationError as e: - if e.error == "wamp.error.no_such_procedure": - pass # old client - elif e.error == "wamp.error.canceled": - pass # disconnected - elif e.error == "wamp.error.no_such_session": - pass # client has already disconnected - else: - raise # update reservations self.schedule_reservations() @@ -265,9 +212,17 @@ async def poll(self): traceback.print_exc() def save_later(self): + logging.debug("Setting Save-later") self.save_scheduled = True + def _get_resources(self): + result = {} + for session in self.exporters.values(): + result[session.name] = session.get_resources() + return result + async def save(self): + logging.debug("Running Save") self.save_scheduled = False resources = self._get_resources() @@ -278,7 +233,9 @@ async def save(self): places = places.encode() loop = asyncio.get_event_loop() + logging.debug("Awaiting resources") await loop.run_in_executor(None, atomic_replace, "resources.yaml", resources) + logging.debug("Awaiting places") await loop.run_in_executor(None, atomic_replace, "places.yaml", places) def load(self): @@ -302,6 +259,59 @@ def load(self): self.places[placename] = place except FileNotFoundError: pass + logging.info("loaded %s place(s)", len(self.places)) + + async def ClientStream(self, request_iterator, context): + peer = context.peer() + logging.info("client connected: %s", peer) + assert peer not in self.clients + out_msg_queue = asyncio.Queue() + + async def request_task(): + name = None + version = None + try: + async for in_msg in request_iterator: + in_msg: labgrid_coordinator_pb2.ClientInMessage + logging.debug("client in_msg %s", in_msg) + kind = in_msg.WhichOneof("kind") + if kind == "sync": + out_msg = labgrid_coordinator_pb2.ClientOutMessage() + out_msg.sync.id = in_msg.sync.id + out_msg_queue.put_nowait(out_msg) + elif kind == "startup": + version = in_msg.startup.version + name = in_msg.startup.name + session = self.clients[peer] = ClientSession(self, peer, name, out_msg_queue, version) + logging.debug("Received startup from %s with %s", name, version) + elif kind == "subscribe": + if in_msg.subscribe.all_places: + session.subscribe_places() + if in_msg.subscribe.all_resources: + session.subscribe_resources() + else: + logging.warning("received unknown kind %s from client %s (version %s)", kind, name, version) + logging.debug("client request_task done: %s", context.done()) + except Exception: + logging.exception("error in client message handler") + + runnning_request_task = asyncio.get_event_loop().create_task(request_task()) + + try: + async for out_msg in queue_as_aiter(out_msg_queue): + out_msg: labgrid_coordinator_pb2.ClientOutMessage + logging.debug("client output %s", out_msg) + yield out_msg + finally: + try: + session = self.clients.pop(peer) + except KeyError: + logging.info("Never received startup from peer %s that disconnected", peer) + return + + runnning_request_task.cancel() + await runnning_request_task + logging.debug("client aborted %s, cancelled: %s", session, context.cancelled()) def _add_default_place(self, name): if name in self.places: @@ -313,6 +323,11 @@ def _add_default_place(self, name): place.matches.append(ResourceMatch(exporter="*", group=name, cls="*")) self.places[name] = place + def get_exporter_by_name(self, name): + for exporter in self.exporters.values(): + if exporter.name == name: + return exporter + async def _update_acquired_places(self, action, resource, callback=True): """Update acquired places when resources are added or removed.""" if action not in [Action.ADD, Action.DEL]: @@ -340,151 +355,178 @@ async def _update_acquired_places(self, action, resource, callback=True): self._publish_place(place) def _publish_place(self, place): - self.publish("org.labgrid.coordinator.place_changed", place.name, place.asdict()) + msg = labgrid_coordinator_pb2.ClientOutMessage() + msg.updates.add().place.CopyFrom(place.as_pb2()) + + for client in self.clients.values(): + client.queue.put_nowait(msg) + + def _publish_resource(self, resource: ResourceImport): + msg = labgrid_coordinator_pb2.ClientOutMessage() + update = msg.updates.add() + update.resource.CopyFrom(resource.as_pb2()) + update.resource.path.exporter_name = resource.path[0] + update.resource.path.group_name = resource.path[1] + update.resource.path.resource_name = resource.path[3] + + for client in self.clients.values(): + client.queue.put_nowait(msg) + + async def ExporterStream(self, request_iterator, context): + peer = context.peer() + logging.info("exporter connected: %s", peer) + assert peer not in self.exporters + command_queue = asyncio.Queue() + pending_commands = [] + + out_msg = labgrid_coordinator_pb2.ExporterOutMessage() + out_msg.hello.version = labgrid_version() + yield out_msg + + async def request_task(): + name = None + version = None + try: + async for in_msg in request_iterator: + in_msg: labgrid_coordinator_pb2.ExporterInMessage + logging.debug("exporter in_msg %s", in_msg) + kind = in_msg.WhichOneof("kind") + if kind in "response": + cmd = pending_commands.pop(0) + cmd.complete(in_msg.response) + logging.debug("Command %s is done", cmd) + elif kind == "startup": + version = in_msg.startup.version + name = in_msg.startup.name + session = self.exporters[peer] = ExporterSession(self, peer, name, command_queue, version) + logging.debug("Exporters: %s", self.exporters) + logging.debug("Received startup from %s with %s", name, version) + elif kind == "resource": + logging.debug("Received resource from %s with %s", name, in_msg.resource) + action, resource = session.set_resource( + in_msg.resource.path.group_name, in_msg.resource.path.resource_name, in_msg.resource + ) + if action is Action.ADD: + async with self.lock: + self._add_default_place(in_msg.resource.path.group_name) + if action in (Action.ADD, Action.DEL): + async with self.lock: + await self._update_acquired_places(action, resource) + self.save_later() + else: + logging.warning("received unknown kind %s from exporter %s (version %s)", kind, name, version) - def _publish_resource(self, resource): - self.publish( - "org.labgrid.coordinator.resource_changed", - resource.path[0], # exporter name - resource.path[1], # group name - resource.path[3], # resource name - resource.asdict(), - ) + logging.debug("exporter request_task done: %s", context.done()) + except Exception: + logging.exception("error in exporter message handler") - @locked - async def on_session_join(self, session_details): - print("join") - pprint(session_details) - session = session_details["session"] - authid = session_details["authextra"].get("authid") or session_details["authid"] - if authid.startswith("client/"): - session = ClientSession(self, session, authid) - elif authid.startswith("exporter/"): - session = ExporterSession(self, session, authid) - else: - return - self.sessions[session.key] = session + runnning_request_task = asyncio.get_event_loop().create_task(request_task()) - @locked - async def on_session_leave(self, session_id): - print(f"leave ({session_id})") try: - session = self.sessions.pop(session_id) - except KeyError: - return - if isinstance(session, ExporterSession): + async for cmd in queue_as_aiter(command_queue): + logging.debug("exporter cmd %s", cmd) + out_msg = labgrid_coordinator_pb2.ExporterOutMessage() + out_msg.set_acquired_request.CopyFrom(cmd.request) + pending_commands.append(cmd) + yield out_msg + except asyncio.exceptions.CancelledError: + logging.info("exporter disconnected %s", context.peer()) + except Exception: + logging.exception("error in exporter command handler") + finally: + runnning_request_task.cancel() + await runnning_request_task + + try: + session = self.exporters.pop(peer) + except KeyError: + logging.info("Never received startup from peer %s that disconnected", peer) + return + for groupname, group in session.groups.items(): for resourcename in group.copy(): - action, resource = session.set_resource(groupname, resourcename, {}) + action, resource = session.set_resource(groupname, resourcename, None) await self._update_acquired_places(action, resource, callback=False) - self.save_later() - - @locked - async def attach(self, name, details=None): - # TODO check if name is in use - session = self.sessions[details.caller] - session_details = self.sessions[session] - session_details["name"] = name - self.exporters[name] = defaultdict(dict) - - # not @locked because set_resource my be triggered by a acquire() call to - # an exporter, leading to a deadlock on acquire_place() - async def set_resource(self, groupname, resourcename, resourcedata, details=None): - """Called by exporter to create/update/remove resources.""" - session = self.sessions.get(details.caller) - if session is None: - return - assert isinstance(session, ExporterSession) - - groupname = str(groupname) - resourcename = str(resourcename) - # TODO check if acquired - print(details) - pprint(resourcedata) - action, resource = session.set_resource(groupname, resourcename, resourcedata) - if action is Action.ADD: - async with self.lock: - self._add_default_place(groupname) - if action in (Action.ADD, Action.DEL): - async with self.lock: - await self._update_acquired_places(action, resource) - self.save_later() - def _get_resources(self): - result = {} - for session in self.sessions.values(): - if isinstance(session, ExporterSession): - result[session.name] = session.get_resources() - return result - - @locked - async def get_resources(self, details=None): - return self._get_resources() + logging.debug("exporter aborted %s, cancelled: %s", context.peer(), context.cancelled()) @locked - async def add_place(self, name, details=None): + async def AddPlace(self, request, context): + name = request.name if not name or not isinstance(name, str): - return False + await context.abort(grpc.StatusCode.INVALID_ARGUMENT, "name was not a string") if name in self.places: - return False + await context.abort(grpc.StatusCode.ALREADY_EXISTS, f"Place {name} already exists") + logging.debug("Adding %s", name) place = Place(name) self.places[name] = place self._publish_place(place) self.save_later() - return True + return labgrid_coordinator_pb2.AddPlaceResponse() @locked - async def del_place(self, name, details=None): + async def DeletePlace(self, request, context): + name = request.name if not name or not isinstance(name, str): - return False + await context.abort(grpc.StatusCode.INVALID_ARGUMENT, "name was not a string") if name not in self.places: - return False + await context.abort(grpc.StatusCode.ALREADY_EXISTS, f"Place {name} does not exist") + logging.debug("Deleting %s", name) del self.places[name] - self.publish("org.labgrid.coordinator.place_changed", name, {}) + msg = labgrid_coordinator_pb2.ClientOutMessage() + msg.updates.add().del_place = name + for client in self.clients.values(): + client.queue.put_nowait(msg) self.save_later() - return True + return labgrid_coordinator_pb2.DeletePlaceResponse() @locked - async def add_place_alias(self, placename, alias, details=None): + async def AddPlaceAlias(self, request, context): + placename = request.placename + alias = request.alias try: place = self.places[placename] except KeyError: - return False + await context.abort(grpc.StatusCode.INVALID_ARGUMENT, f"Place {placename} does not exist") place.aliases.add(alias) place.touch() self._publish_place(place) self.save_later() - return True + return labgrid_coordinator_pb2.AddPlaceAliasResponse() @locked - async def del_place_alias(self, placename, alias, details=None): + async def DeletePlaceAlias(self, request, context): + placename = request.placename + alias = request.alias try: place = self.places[placename] except KeyError: - return False + await context.abort(grpc.StatusCode.INVALID_ARGUMENT, f"Place {placename} does not exist") try: place.aliases.remove(alias) except ValueError: - return False + await context.abort(grpc.StatusCode.INVALID_ARGUMENT, f"Failed to remove {alias} from {placename}") place.touch() self._publish_place(place) self.save_later() - return True + return labgrid_coordinator_pb2.DeletePlaceAliasResponse() @locked - async def set_place_tags(self, placename, tags, details=None): + async def SetPlaceTags(self, request, context): + placename = request.placename + tags = dict(request.tags) try: place = self.places[placename] except KeyError: - return False + await context.abort(grpc.StatusCode.INVALID_ARGUMENT, f"Place {placename} does not exist") assert isinstance(tags, dict) for k, v in tags.items(): assert isinstance(k, str) assert isinstance(v, str) if not TAG_KEY.match(k): - return False + await context.abort(grpc.StatusCode.INVALID_ARGUMENT, f"Key {k} in {tags} is invalid") if not TAG_VAL.match(v): - return False + await context.abort(grpc.StatusCode.INVALID_ARGUMENT, f"Value {v} in {tags} is invalid") for k, v in tags.items(): if not v: try: @@ -496,52 +538,62 @@ async def set_place_tags(self, placename, tags, details=None): place.touch() self._publish_place(place) self.save_later() - return True + return labgrid_coordinator_pb2.SetPlaceTagsResponse() @locked - async def set_place_comment(self, placename, comment, details=None): + async def SetPlaceComment(self, request, context): + placename = request.placename + comment = request.comment try: place = self.places[placename] except KeyError: - return False + await context.abort(grpc.StatusCode.INVALID_ARGUMENT, f"Place {placename} does not exist") place.comment = comment place.touch() self._publish_place(place) self.save_later() - return True + return labgrid_coordinator_pb2.SetPlaceCommentResponse() @locked - async def add_place_match(self, placename, pattern, rename=None, details=None): + async def AddPlaceMatch(self, request, context): + placename = request.placename + pattern = request.pattern + rename = request.rename if request.HasField("rename") else None try: place = self.places[placename] except KeyError: - return False - match = ResourceMatch(*pattern.split("/"), rename=rename) - if match in place.matches: - return False - place.matches.append(match) + await context.abort(grpc.StatusCode.INVALID_ARGUMENT, f"Place {placename} does not exist") + rm = ResourceMatch(*pattern.split("/"), rename=rename) + if rm in place.matches: + await context.abort(grpc.StatusCode.ALREADY_EXISTS, f"Match {rm} already exists") + place.matches.append(rm) place.touch() self._publish_place(place) self.save_later() - return True + return labgrid_coordinator_pb2.AddPlaceMatchResponse() @locked - async def del_place_match(self, placename, pattern, rename=None, details=None): + async def DeletePlaceMatch(self, request, context): + placename = request.placename + pattern = request.pattern + rename = request.rename if request.HasField("rename") else None try: place = self.places[placename] except KeyError: - return False - match = ResourceMatch(*pattern.split("/"), rename=rename) + await context.abort(grpc.StatusCode.INVALID_ARGUMENT, f"Place {placename} does not exist") + rm = ResourceMatch(*pattern.split("/"), rename=rename) try: - place.matches.remove(match) + place.matches.remove(rm) except ValueError: - return False + await context.abort(grpc.StatusCode.INVALID_ARGUMENT, f"Match {rm} does not exist in {placename}") place.touch() self._publish_place(place) self.save_later() - return True + return labgrid_coordinator_pb2.DeletePlaceMatchResponse() async def _acquire_resources(self, place, resources): + assert self.lock.locked() + resources = resources.copy() # we may modify the list # all resources need to be free for resource in resources: @@ -554,12 +606,18 @@ async def _acquire_resources(self, place, resources): for resource in resources: # this triggers an update from the exporter which is published # to the clients - await self.call( - f"org.labgrid.exporter.{resource.path[0]}.acquire", resource.path[1], resource.path[3], place.name - ) + request = labgrid_coordinator_pb2.ExporterSetAcquiredRequest() + request.group_name = resource.path[1] + request.resource_name = resource.path[3] + request.place_name = place.name + cmd = ExporterCommand(request) + self.get_exporter_by_name(resource.path[0]).queue.put_nowait(cmd) + await cmd.wait() + if not cmd.response.success: + raise ExporterError("failed to acquire {resource}") acquired.append(resource) - except: - print(f"failed to acquire {resource}", file=sys.stderr) + except Exception: + logging.exception("failed to acquire %s", resource) # cleanup await self._release_resources(place, acquired) return False @@ -570,6 +628,8 @@ async def _acquire_resources(self, place, resources): return True async def _release_resources(self, place, resources, callback=True): + assert self.lock.locked() + resources = resources.copy() # we may modify the list for resource in resources: @@ -583,37 +643,48 @@ async def _release_resources(self, place, resources, callback=True): # this triggers an update from the exporter which is published # to the clients if callback: - await self.call( - f"org.labgrid.exporter.{resource.path[0]}.release", resource.path[1], resource.path[3] - ) - except: - print(f"failed to release {resource}", file=sys.stderr) + request = labgrid_coordinator_pb2.ExporterSetAcquiredRequest() + request.group_name = resource.path[1] + request.resource_name = resource.path[3] + # request.place_name is left unset to indicate release + cmd = ExporterCommand(request) + self.get_exporter_by_name(resource.path[0]).queue.put_nowait(cmd) + await cmd.wait() + if not cmd.response.success: + raise ExporterError(f"failed to release {resource}") + except (ExporterError, TimeoutError): + logging.exception("failed to release %s", resource) # at leaset try to notify the clients try: self._publish_resource(resource) except: - pass + logging.exception("failed to publish released resource %s", resource) @locked - async def acquire_place(self, name, details=None): - print(details) + async def AcquirePlace(self, request, context): + peer = context.peer() + name = request.placename + try: + username = self.clients[peer].name + except KeyError: + await context.abort(grpc.StatusCode.FAILED_PRECONDITION, f"Peer {peer} does not have a valid session") + print(request) + try: place = self.places[name] except KeyError: - return False + await context.abort(grpc.StatusCode.INVALID_ARGUMENT, f"Place {name} does not exist") if place.acquired: - return False + await context.abort(grpc.StatusCode.FAILED_PRECONDITION, f"Place {name} is already acquired") if place.reservation: res = self.reservations[place.reservation] - if not res.owner == self.sessions[details.caller].name: - return False + if not res.owner == username: + await context.abort(grpc.StatusCode.PERMISSION_DENIED, f"Place {name} was not reserved for {username}") # FIXME use the session object instead? or something else which # survives disconnecting clients? - place.acquired = self.sessions[details.caller].name + place.acquired = username resources = [] - for _, session in sorted(self.sessions.items()): - if not isinstance(session, ExporterSession): - continue + for _, session in sorted(self.exporters.items()): for _, group in sorted(session.groups.items()): for _, resource in sorted(group.items()): if not place.hasmatch(resource.path): @@ -622,23 +693,29 @@ async def acquire_place(self, name, details=None): if not await self._acquire_resources(place, resources): # revert earlier change place.acquired = None - return False + await context.abort(grpc.StatusCode.FAILED_PRECONDITION, f"Failed to acquire resources for place {name}") place.touch() self._publish_place(place) self.save_later() self.schedule_reservations() print(f"{place.name}: place acquired by {place.acquired}") - return True + return labgrid_coordinator_pb2.AcquirePlaceResponse() @locked - async def release_place(self, name, details=None): - print(details) + async def ReleasePlace(self, request, context): + name = request.placename + print(request) + fromuser = request.fromuser if request.HasField("fromuser") else None try: place = self.places[name] except KeyError: - return False + await context.abort(grpc.StatusCode.INVALID_ARGUMENT, f"Place {name} does not exist") if not place.acquired: - return False + if fromuser: + return labgrid_coordinator_pb2.ReleasePlaceResponse() + await context.abort(grpc.StatusCode.FAILED_PRECONDITION, f"Place {name} is not acquired") + if fromuser and place.acquired != fromuser: + return labgrid_coordinator_pb2.ReleasePlaceResponse() await self._release_resources(place, place.acquired_resources) @@ -649,66 +726,43 @@ async def release_place(self, name, details=None): self.save_later() self.schedule_reservations() print(f"{place.name}: place released") - return True + return labgrid_coordinator_pb2.ReleasePlaceResponse() @locked - async def release_place_from(self, name, acquired, details=None): - """ - Release a place, but only if acquired by a specific user - - Note that unlike the release_place API, this function returns True as - long as the specific place is not acquired by the specified user. This - may mean that the place was not acquired at all, is acquired by - another, or was released; which of these states cannot be inferred from - the return code. This is intentional as the purpose of the command is - to validate that the specified user no longer owns the place, and the - exact state is irrelevant as long as that condition is met. - - Returns: - bool: True if the user no longer owns the place, or False if there - was an error that prevented releasing the place - """ + async def AllowPlace(self, request, context): + placename = request.placename + user = request.user + peer = context.peer() try: - place = self.places[name] + username = self.clients[peer].name except KeyError: - return False - if not place.acquired: - return True - if place.acquired != acquired: - return True - - await self._release_resources(place, place.acquired_resources) - - place.acquired = None - place.allowed = set() - place.touch() - self._publish_place(place) - self.save_later() - self.schedule_reservations() - return True - - @locked - async def allow_place(self, name, user, details=None): + await context.abort(grpc.StatusCode.FAILED_PRECONDITION, f"Peer {peer} does not have a valid session") try: - place = self.places[name] + place = self.places[placename] except KeyError: - return False + await context.abort(grpc.StatusCode.INVALID_ARGUMENT, f"Place {placename} does not exist") if not place.acquired: - return False - if not place.acquired == self.sessions[details.caller].name: - return False + await context.abort(grpc.StatusCode.FAILED_PRECONDITION, f"Place {placename} is not acquired") + if not place.acquired == username: + await context.abort( + grpc.StatusCode.FAILED_PRECONDITION, f"Place {placename} is not acquired by {username}" + ) place.allowed.add(user) place.touch() self._publish_place(place) self.save_later() - return True + return labgrid_coordinator_pb2.AllowPlaceResponse() def _get_places(self): return {k: v.asdict() for k, v in self.places.items()} @locked - async def get_places(self, details=None): - return self._get_places() + async def GetPlaces(self, unused_request, unused_context): + logging.debug("GetPlaces") + try: + return labgrid_coordinator_pb2.GetPlacesResponse(places=[x.as_pb2() for x in self.places.values()]) + except Exception: + logging.exception("error during get places") def schedule_reservations(self): # The primary information is stored in the reservations and the places @@ -816,54 +870,129 @@ def schedule_reservations(self): self._publish_place(place) @locked - async def create_reservation(self, spec, prio=0.0, details=None): - filter_ = {} - for pair in spec.split(): - try: - k, v = pair.split("=") - except ValueError: - return None - if not TAG_KEY.match(k): - return None - if not TAG_VAL.match(v): - return None - filter_[k] = v - - filters = {"main": filter_} # currently, only one group is implemented - - owner = self.sessions[details.caller].name - res = Reservation(owner=owner, prio=prio, filters=filters) + async def CreateReservation(self, request: labgrid_coordinator_pb2.CreateReservationRequest, context): + peer = context.peer() + + fltrs = {} + for name, fltr_pb in request.filters.items(): + if name != "main": + await context.abort( + grpc.StatusCode.UNIMPLEMENTED, "Reservations for multiple groups are not implemented yet" + ) + fltr = fltrs[name] = {} + for k, v in fltr_pb.filter.items(): + if not TAG_KEY.match(k): + await context.abort(grpc.StatusCode.INVALID_ARGUMENT, f"Key {k} is invalid") + if not TAG_VAL.match(v): + await context.abort(grpc.StatusCode.INVALID_ARGUMENT, f"Value {v} is invalid") + fltr[k] = v + + owner = self.clients[peer].name + res = Reservation(owner=owner, prio=request.prio, filters=fltrs) self.reservations[res.token] = res self.schedule_reservations() - return {res.token: res.asdict()} + return labgrid_coordinator_pb2.CreateReservationResponse(reservation=res.as_pb2()) @locked - async def cancel_reservation(self, token, details=None): - if not isinstance(token, str): - return False + async def CancelReservation(self, request: labgrid_coordinator_pb2.CancelReservationRequest, context): + token = request.token + if not isinstance(token, str) or not token: + await context.abort(grpc.StatusCode.INVALID_ARGUMENT, f"Invalid token {token}") if token not in self.reservations: - return False + await context.abort(grpc.StatusCode.FAILED_PRECONDITION, f"Reservation {token} does not exist") del self.reservations[token] self.schedule_reservations() - return True + return labgrid_coordinator_pb2.CancelReservationResponse() @locked - async def poll_reservation(self, token, details=None): + async def PollReservation(self, request: labgrid_coordinator_pb2.PollReservationRequest, context): + token = request.token try: res = self.reservations[token] except KeyError: - return None + await context.abort(grpc.StatusCode.FAILED_PRECONDITION, f"Reservation {token} does not exist") res.refresh() - return res.asdict() + return labgrid_coordinator_pb2.PollReservationResponse(reservation=res.as_pb2()) @locked - async def get_reservations(self, details=None): - return {k: v.asdict() for k, v in self.reservations.items()} + async def GetReservations(self, request: labgrid_coordinator_pb2.GetReservationsRequest, context): + reservations = [x.as_pb2() for x in self.reservations.values()] + return labgrid_coordinator_pb2.GetReservationsResponse(reservations=reservations) + + +async def serve(listen, cleanup) -> None: + server = grpc.aio.server( + options=[ + ("grpc.keepalive_time_ms", 30000), # Send keepalive ping every 30 seconds + ( + "grpc.keepalive_timeout_ms", + 10000, + ), # Wait 10 seconds for ping ack before considering the connection dead + ("grpc.http2.min_time_between_pings_ms", 15000), # Minimum amount of time between pings + ("grpc.http2.max_pings_without_data", 0), # Allow pings even without active streams + ("grpc.keepalive_permit_without_calls", 1), # Allow keepalive pings even when there are no calls + ], + ) + coordinator = Coordinator() + labgrid_coordinator_pb2_grpc.add_CoordinatorServicer_to_server(coordinator, server) + # enable reflection for use with grpcurl + reflection.enable_server_reflection( + ( + labgrid_coordinator_pb2.DESCRIPTOR.services_by_name["Coordinator"].full_name, + reflection.SERVICE_NAME, + ), + server, + ) + # optionally enable channelz for use with grpcdebug + try: + from grpc_channelz.v1 import channelz + + channelz.add_channelz_servicer(server) + logging.info("Enabled channelz support") + except ImportError: + logging.info("Module grpcio-channelz not available") + + server.add_insecure_port(listen) + logging.debug("Starting server") + await server.start() + + async def server_graceful_shutdown(): + logging.info("Starting graceful shutdown...") + # Shuts down the server with 0 seconds of grace period. During the + # grace period, the server won't accept new connections and allow + # existing RPCs to continue within the grace period. + await server.stop(5) + + cleanup.append(server_graceful_shutdown()) + logging.info("Coordinator ready") + await server.wait_for_termination() + + +def main(): + parser = argparse.ArgumentParser() + parser.add_argument( + "-l", + "--listen", + metavar="HOST:PORT", + type=str, + default="[::]:20408", + help="coordinator listening host and port", + ) + parser.add_argument("-d", "--debug", action="store_true", default=False, help="enable debug mode") + + args = parser.parse_args() + + logging.basicConfig(level=logging.DEBUG if args.debug else logging.INFO) + + loop = asyncio.get_event_loop() + cleanup = [] + loop.set_debug(True) + try: + loop.run_until_complete(serve(args.listen, cleanup)) + finally: + if cleanup: + loop.run_until_complete(*cleanup) if __name__ == "__main__": - runner = ApplicationRunner( - url=environ.get("WS", "ws://127.0.0.1:20408/ws"), - realm="realm1", - ) - runner.run(CoordinatorComponent) + main() diff --git a/labgrid/remote/exporter.py b/labgrid/remote/exporter.py index dde83bb7a..78b8ca606 100755 --- a/labgrid/remote/exporter.py +++ b/labgrid/remote/exporter.py @@ -7,7 +7,6 @@ import sys import os import os.path -import time import traceback import shutil import subprocess @@ -15,17 +14,16 @@ from pathlib import Path from typing import Dict, Type from socket import gethostname, getfqdn + import attr -from autobahn.asyncio.wamp import ApplicationRunner, ApplicationSession +import grpc from .config import ResourceConfig -from .common import ResourceEntry, enable_tcp_nodelay, monkey_patch_max_msg_payload_size_ws_option +from .common import ResourceEntry, queue_as_aiter +from .generated import labgrid_coordinator_pb2, labgrid_coordinator_pb2_grpc from ..util import get_free_port, labgrid_version -monkey_patch_max_msg_payload_size_ws_option() - -__version__ = labgrid_version() exports: Dict[str, Type[ResourceEntry]] = {} reexec = False @@ -112,10 +110,10 @@ def start(self): start_params = self._get_start_params() try: self._start(start_params) - except Exception: + except Exception as e: self.broken = "start failed" self.logger.exception("failed to start with %s", start_params) - raise + raise BrokenResourceError("Failed to start resource") from e self.start_params = start_params def stop(self): @@ -773,111 +771,158 @@ def _get_params(self): exports["YKUSHPowerPort"] = YKUSHPowerPortExport -class ExporterSession(ApplicationSession): - def onConnect(self): +class Exporter: + def __init__(self, config) -> None: """Set up internal datastructures on successful connection: - Setup loop, name, authid and address - Join the coordinator as an exporter""" - self.loop = self.config.extra["loop"] - self.name = self.config.extra["name"] - self.hostname = self.config.extra["hostname"] - self.isolated = self.config.extra["isolated"] - self.address = self._transport.transport.get_extra_info("sockname")[0] - self.checkpoint = time.monotonic() + self.config = config + self.loop = asyncio.get_event_loop() + self.name = config["name"] + self.hostname = config["hostname"] + self.isolated = config["isolated"] + + self.channel = grpc.aio.insecure_channel(config["coordinator"]) + self.stub = labgrid_coordinator_pb2_grpc.CoordinatorStub(self.channel) + self.out_queue = asyncio.Queue() + self.pump_task = None + self.poll_task = None self.groups = {} - enable_tcp_nodelay(self) - self.join( - self.config.realm, - authmethods=["anonymous", "ticket"], - authid=f"exporter/{self.name}", - authextra={"authid": f"exporter/{self.name}"}, - ) - - def onChallenge(self, challenge): - """Function invoked on received challege, returns just a dummy ticket - at the moment, authentication is not supported yet""" - logging.warning("Ticket authentication is deprecated. Please update your coordinator.") - return "dummy-ticket" - - async def onJoin(self, details): - """On successful join: - - export available resources - - bail out if we are unsuccessful - """ - print(details) - - prefix = f"org.labgrid.exporter.{self.name}" - try: - await self.register(self.acquire, f"{prefix}.acquire") - await self.register(self.release, f"{prefix}.release") - await self.register(self.version, f"{prefix}.version") - - config_template_env = { - "env": os.environ, - "isolated": self.isolated, - "hostname": self.hostname, - "name": self.name, - } - resource_config = ResourceConfig(self.config.extra["resources"], config_template_env) - for group_name, group in resource_config.data.items(): - group_name = str(group_name) - for resource_name, params in group.items(): - resource_name = str(resource_name) - if resource_name == "location": - continue - if params is None: - continue - cls = params.pop("cls", resource_name) - - # this may call back to acquire the resource immediately - await self.add_resource(group_name, resource_name, cls, params) - self.checkpoint = time.monotonic() - - except Exception: # pylint: disable=broad-except - traceback.print_exc(file=sys.stderr) - self.loop.stop() - return + async def run(self) -> None: + self.pump_task = self.loop.create_task(self.message_pump()) + self.send_started() + config_template_env = { + "env": os.environ, + "isolated": self.isolated, + "hostname": self.hostname, + "name": self.name, + } + resource_config = ResourceConfig(self.config["resources"], config_template_env) + for group_name, group in resource_config.data.items(): + group_name = str(group_name) + for resource_name, params in group.items(): + resource_name = str(resource_name) + if resource_name == "location": + continue + if params is None: + continue + cls = params.pop("cls", resource_name) + + # this may call back to acquire the resource immediately + await self.add_resource(group_name, resource_name, cls, params) + + # flush queued message + while not self.pump_task.done(): + try: + await asyncio.wait_for(self.out_queue.join(), timeout=1) + break + except asyncio.TimeoutError: + if self.pump_task.done(): + await self.pump_task + logging.debug("pump task exited, shutting down exporter") + return + + logging.info("creating poll task") self.poll_task = self.loop.create_task(self.poll()) - async def onLeave(self, details): - """Cleanup after leaving the coordinator connection""" - if self.poll_task: - self.poll_task.cancel() - await asyncio.wait([self.poll_task]) - super().onLeave(details) - - async def onDisconnect(self): - print("connection lost", file=sys.stderr) - global reexec - reexec = True - if self.poll_task: - self.poll_task.cancel() - await asyncio.wait([self.poll_task]) - await asyncio.sleep(0.5) # give others a chance to clean up - self.loop.stop() + (done, pending) = await asyncio.wait((self.pump_task, self.poll_task), return_when=asyncio.FIRST_COMPLETED) + logging.debug("task(s) %s exited, shutting down exporter", done) + for task in pending: + task.cancel() + + await self.pump_task + await self.poll_task + + def send_started(self): + msg = labgrid_coordinator_pb2.ExporterInMessage() + msg.startup.version = labgrid_version() + msg.startup.name = self.name + self.out_queue.put_nowait(msg) + + async def message_pump(self): + got_message = False + try: + async for out_message in self.stub.ExporterStream(queue_as_aiter(self.out_queue)): + got_message = True + logging.debug("received message %s", out_message) + kind = out_message.WhichOneof("kind") + if kind == "hello": + logging.info("connected to exporter version %s", out_message.hello.version) + elif kind == "set_acquired_request": + logging.debug("acquire request") + success = False + reason = None + try: + if out_message.set_acquired_request.place_name: + await self.acquire( + out_message.set_acquired_request.group_name, + out_message.set_acquired_request.resource_name, + out_message.set_acquired_request.place_name, + ) + else: + await self.release( + out_message.set_acquired_request.group_name, + out_message.set_acquired_request.resource_name, + ) + success = True + except BrokenResourceError as e: + reason = e.args[0] + finally: + in_message = labgrid_coordinator_pb2.ExporterInMessage() + in_message.response.success = success + if reason: + in_message.response.reason = reason + logging.debug("queuing %s", in_message) + self.out_queue.put_nowait(in_message) + logging.debug("queued %s", in_message) + else: + logging.debug("unknown request: %s", kind) + except grpc.aio.AioRpcError as e: + self.out_queue.put_nowait(None) # let the sender side exit gracefully + if e.code() == grpc.StatusCode.UNAVAILABLE: + if got_message: + logging.error("coordinator became unavailable: %s", e.details()) + else: + logging.error("coordinator is unavailable: %s", e.details()) + + global reexec + reexec = True + else: + logging.exception("unexpected grpc error in coordinator message pump task") + except Exception: + self.out_queue.put_nowait(None) # let the sender side exit gracefully + logging.exception("error in coordinator message pump") + + # only send command response when the other updates have left the queue + # perhaps with queue join/task_done + # this should be a command from the coordinator async def acquire(self, group_name, resource_name, place_name): - resource = self.groups[group_name][resource_name] + resource = self.groups.get(group_name, {}).get(resource_name) + if resource is None: + logging.error("acquire request for unknown resource %s/%s by %s", group_name, resource_name, place_name) + return + try: resource.acquire(place_name) finally: await self.update_resource(group_name, resource_name) async def release(self, group_name, resource_name): - resource = self.groups[group_name][resource_name] + resource = self.groups.get(group_name, {}).get(resource_name) + if resource is None: + logging.error("release request for unknown resource %s/%s", group_name, resource_name) + return + try: resource.release() finally: await self.update_resource(group_name, resource_name) - async def version(self): - self.checkpoint = time.monotonic() - return __version__ - async def _poll_step(self): for group_name, group in self.groups.items(): for resource_name, resource in group.items(): @@ -904,10 +949,6 @@ async def poll(self): break except Exception: # pylint: disable=broad-except traceback.print_exc(file=sys.stderr) - age = time.monotonic() - self.checkpoint - if age > 300: - print(f"missed checkpoint, exiting (last was {age} seconds ago)", file=sys.stderr) - self.disconnect() async def add_resource(self, group_name, resource_name, cls, params): """Add a resource to the exporter and update status on the coordinator""" @@ -934,20 +975,28 @@ async def add_resource(self, group_name, resource_name, cls, params): async def update_resource(self, group_name, resource_name): """Update status on the coordinator""" resource = self.groups[group_name][resource_name] - data = resource.asdict() - print(data) - await self.call("org.labgrid.coordinator.set_resource", group_name, resource_name, data) + msg = labgrid_coordinator_pb2.ExporterInMessage() + msg.resource.CopyFrom(resource.as_pb2()) + msg.resource.path.group_name = group_name + msg.resource.path.resource_name = resource_name + self.out_queue.put_nowait(msg) + logging.info("queued update for resource %s/%s", group_name, resource_name) + + +async def amain(config) -> bool: + exporter = Exporter(config) + await exporter.run() def main(): parser = argparse.ArgumentParser() parser.add_argument( - "-x", - "--crossbar", - metavar="URL", + "-c", + "--coordinator", + metavar="HOST:PORT", type=str, - default=os.environ.get("LG_CROSSBAR", "ws://127.0.0.1:20408/ws"), - help="crossbar websocket URL", + default=os.environ.get("LG_COORDINATOR", "127.0.0.1:20408"), + help="coordinator host and port", ) parser.add_argument( "-n", @@ -979,29 +1028,22 @@ def main(): args = parser.parse_args() - level = "debug" if args.debug else "info" + logging.basicConfig(level=logging.DEBUG if args.debug else logging.INFO) - extra = { + config = { "name": args.name or gethostname(), "hostname": args.hostname or (getfqdn() if args.fqdn else gethostname()), "resources": args.resources, + "coordinator": args.coordinator, "isolated": args.isolated, } - crossbar_url = args.crossbar - crossbar_realm = os.environ.get("LG_CROSSBAR_REALM", "realm1") + print(f"exporter name: {config['name']}") + print(f"exporter hostname: {config['hostname']}") + print(f"resource config file: {config['resources']}") - print(f"crossbar URL: {crossbar_url}") - print(f"crossbar realm: {crossbar_realm}") - print(f"exporter name: {extra['name']}") - print(f"exporter hostname: {extra['hostname']}") - print(f"resource config file: {extra['resources']}") + asyncio.run(amain(config), debug=bool(args.debug)) - extra["loop"] = loop = asyncio.get_event_loop() - if args.debug: - loop.set_debug(True) - runner = ApplicationRunner(url=crossbar_url, realm=crossbar_realm, extra=extra) - runner.run(ExporterSession, log_level=level) if reexec: exit(100) diff --git a/labgrid/remote/generated/generate-proto.sh b/labgrid/remote/generated/generate-proto.sh new file mode 100755 index 000000000..d160b0c74 --- /dev/null +++ b/labgrid/remote/generated/generate-proto.sh @@ -0,0 +1,4 @@ +#!/usr/bin/env bash +set -ex +python3 -m grpc_tools.protoc -I../proto --python_out=. --pyi_out=. --grpc_python_out=. ../proto/labgrid-coordinator.proto +sed -i "s/import labgrid/from . import labgrid/g" labgrid_coordinator_pb2_grpc.py diff --git a/labgrid/remote/generated/labgrid_coordinator_pb2.py b/labgrid/remote/generated/labgrid_coordinator_pb2.py new file mode 100644 index 000000000..37652bff7 --- /dev/null +++ b/labgrid/remote/generated/labgrid_coordinator_pb2.py @@ -0,0 +1,158 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: labgrid-coordinator.proto +# Protobuf Python Version: 4.25.1 +"""Generated protocol buffer code.""" +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import symbol_database as _symbol_database +from google.protobuf.internal import builder as _builder +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x19labgrid-coordinator.proto\x12\x07labgrid\"\x8a\x01\n\x0f\x43lientInMessage\x12\x1d\n\x04sync\x18\x01 \x01(\x0b\x32\r.labgrid.SyncH\x00\x12\'\n\x07startup\x18\x02 \x01(\x0b\x32\x14.labgrid.StartupDoneH\x00\x12\'\n\tsubscribe\x18\x03 \x01(\x0b\x32\x12.labgrid.SubscribeH\x00\x42\x06\n\x04kind\"\x12\n\x04Sync\x12\n\n\x02id\x18\x01 \x01(\x04\",\n\x0bStartupDone\x12\x0f\n\x07version\x18\x01 \x01(\t\x12\x0c\n\x04name\x18\x02 \x01(\t\"r\n\tSubscribe\x12\x1b\n\x0eis_unsubscribe\x18\x01 \x01(\x08H\x01\x88\x01\x01\x12\x14\n\nall_places\x18\x02 \x01(\x08H\x00\x12\x17\n\rall_resources\x18\x03 \x01(\x08H\x00\x42\x06\n\x04kindB\x11\n\x0f_is_unsubscribe\"g\n\x10\x43lientOutMessage\x12 \n\x04sync\x18\x01 \x01(\x0b\x32\r.labgrid.SyncH\x00\x88\x01\x01\x12(\n\x07updates\x18\x02 \x03(\x0b\x32\x17.labgrid.UpdateResponseB\x07\n\x05_sync\"\xa5\x01\n\x0eUpdateResponse\x12%\n\x08resource\x18\x01 \x01(\x0b\x32\x11.labgrid.ResourceH\x00\x12.\n\x0c\x64\x65l_resource\x18\x02 \x01(\x0b\x32\x16.labgrid.Resource.PathH\x00\x12\x1f\n\x05place\x18\x03 \x01(\x0b\x32\x0e.labgrid.PlaceH\x00\x12\x13\n\tdel_place\x18\x04 \x01(\tH\x00\x42\x06\n\x04kind\"\x9a\x01\n\x11\x45xporterInMessage\x12%\n\x08resource\x18\x01 \x01(\x0b\x32\x11.labgrid.ResourceH\x00\x12\'\n\x07startup\x18\x02 \x01(\x0b\x32\x14.labgrid.StartupDoneH\x00\x12-\n\x08response\x18\x03 \x01(\x0b\x32\x19.labgrid.ExporterResponseH\x00\x42\x06\n\x04kind\"\x9e\x03\n\x08Resource\x12$\n\x04path\x18\x01 \x01(\x0b\x32\x16.labgrid.Resource.Path\x12\x0b\n\x03\x63ls\x18\x02 \x01(\t\x12-\n\x06params\x18\x03 \x03(\x0b\x32\x1d.labgrid.Resource.ParamsEntry\x12+\n\x05\x65xtra\x18\x04 \x03(\x0b\x32\x1c.labgrid.Resource.ExtraEntry\x12\x10\n\x08\x61\x63quired\x18\x05 \x01(\t\x12\r\n\x05\x61vail\x18\x06 \x01(\x08\x1a_\n\x04Path\x12\x1a\n\rexporter_name\x18\x01 \x01(\tH\x00\x88\x01\x01\x12\x12\n\ngroup_name\x18\x02 \x01(\t\x12\x15\n\rresource_name\x18\x03 \x01(\tB\x10\n\x0e_exporter_name\x1a@\n\x0bParamsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12 \n\x05value\x18\x02 \x01(\x0b\x32\x11.labgrid.MapValue:\x02\x38\x01\x1a?\n\nExtraEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12 \n\x05value\x18\x02 \x01(\x0b\x32\x11.labgrid.MapValue:\x02\x38\x01\"\x82\x01\n\x08MapValue\x12\x14\n\nbool_value\x18\x01 \x01(\x08H\x00\x12\x13\n\tint_value\x18\x02 \x01(\x03H\x00\x12\x14\n\nuint_value\x18\x03 \x01(\x04H\x00\x12\x15\n\x0b\x66loat_value\x18\x04 \x01(\x01H\x00\x12\x16\n\x0cstring_value\x18\x05 \x01(\tH\x00\x42\x06\n\x04kind\"C\n\x10\x45xporterResponse\x12\x0f\n\x07success\x18\x01 \x01(\x08\x12\x13\n\x06reason\x18\x02 \x01(\tH\x00\x88\x01\x01\x42\t\n\x07_reason\"\x18\n\x05Hello\x12\x0f\n\x07version\x18\x01 \x01(\t\"\x82\x01\n\x12\x45xporterOutMessage\x12\x1f\n\x05hello\x18\x01 \x01(\x0b\x32\x0e.labgrid.HelloH\x00\x12\x43\n\x14set_acquired_request\x18\x02 \x01(\x0b\x32#.labgrid.ExporterSetAcquiredRequestH\x00\x42\x06\n\x04kind\"o\n\x1a\x45xporterSetAcquiredRequest\x12\x12\n\ngroup_name\x18\x01 \x01(\t\x12\x15\n\rresource_name\x18\x02 \x01(\t\x12\x17\n\nplace_name\x18\x03 \x01(\tH\x00\x88\x01\x01\x42\r\n\x0b_place_name\"\x1f\n\x0f\x41\x64\x64PlaceRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\"\x12\n\x10\x41\x64\x64PlaceResponse\"\"\n\x12\x44\x65letePlaceRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\"\x15\n\x13\x44\x65letePlaceResponse\"\x12\n\x10GetPlacesRequest\"3\n\x11GetPlacesResponse\x12\x1e\n\x06places\x18\x01 \x03(\x0b\x32\x0e.labgrid.Place\"\xd2\x02\n\x05Place\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0f\n\x07\x61liases\x18\x02 \x03(\t\x12\x0f\n\x07\x63omment\x18\x03 \x01(\t\x12&\n\x04tags\x18\x04 \x03(\x0b\x32\x18.labgrid.Place.TagsEntry\x12\'\n\x07matches\x18\x05 \x03(\x0b\x32\x16.labgrid.ResourceMatch\x12\x15\n\x08\x61\x63quired\x18\x06 \x01(\tH\x00\x88\x01\x01\x12\x1a\n\x12\x61\x63quired_resources\x18\x07 \x03(\t\x12\x0f\n\x07\x61llowed\x18\x08 \x03(\t\x12\x0f\n\x07\x63reated\x18\t \x01(\x01\x12\x0f\n\x07\x63hanged\x18\n \x01(\x01\x12\x18\n\x0breservation\x18\x0b \x01(\tH\x01\x88\x01\x01\x1a+\n\tTagsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x42\x0b\n\t_acquiredB\x0e\n\x0c_reservation\"y\n\rResourceMatch\x12\x10\n\x08\x65xporter\x18\x01 \x01(\t\x12\r\n\x05group\x18\x02 \x01(\t\x12\x0b\n\x03\x63ls\x18\x03 \x01(\t\x12\x11\n\x04name\x18\x04 \x01(\tH\x00\x88\x01\x01\x12\x13\n\x06rename\x18\x05 \x01(\tH\x01\x88\x01\x01\x42\x07\n\x05_nameB\t\n\x07_rename\"8\n\x14\x41\x64\x64PlaceAliasRequest\x12\x11\n\tplacename\x18\x01 \x01(\t\x12\r\n\x05\x61lias\x18\x02 \x01(\t\"\x17\n\x15\x41\x64\x64PlaceAliasResponse\";\n\x17\x44\x65letePlaceAliasRequest\x12\x11\n\tplacename\x18\x01 \x01(\t\x12\r\n\x05\x61lias\x18\x02 \x01(\t\"\x1a\n\x18\x44\x65letePlaceAliasResponse\"\x8b\x01\n\x13SetPlaceTagsRequest\x12\x11\n\tplacename\x18\x01 \x01(\t\x12\x34\n\x04tags\x18\x02 \x03(\x0b\x32&.labgrid.SetPlaceTagsRequest.TagsEntry\x1a+\n\tTagsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\x16\n\x14SetPlaceTagsResponse\"<\n\x16SetPlaceCommentRequest\x12\x11\n\tplacename\x18\x01 \x01(\t\x12\x0f\n\x07\x63omment\x18\x02 \x01(\t\"\x19\n\x17SetPlaceCommentResponse\"Z\n\x14\x41\x64\x64PlaceMatchRequest\x12\x11\n\tplacename\x18\x01 \x01(\t\x12\x0f\n\x07pattern\x18\x02 \x01(\t\x12\x13\n\x06rename\x18\x03 \x01(\tH\x00\x88\x01\x01\x42\t\n\x07_rename\"\x17\n\x15\x41\x64\x64PlaceMatchResponse\"]\n\x17\x44\x65letePlaceMatchRequest\x12\x11\n\tplacename\x18\x01 \x01(\t\x12\x0f\n\x07pattern\x18\x02 \x01(\t\x12\x13\n\x06rename\x18\x03 \x01(\tH\x00\x88\x01\x01\x42\t\n\x07_rename\"\x1a\n\x18\x44\x65letePlaceMatchResponse\"(\n\x13\x41\x63quirePlaceRequest\x12\x11\n\tplacename\x18\x01 \x01(\t\"\x16\n\x14\x41\x63quirePlaceResponse\"L\n\x13ReleasePlaceRequest\x12\x11\n\tplacename\x18\x01 \x01(\t\x12\x15\n\x08\x66romuser\x18\x02 \x01(\tH\x00\x88\x01\x01\x42\x0b\n\t_fromuser\"\x16\n\x14ReleasePlaceResponse\"4\n\x11\x41llowPlaceRequest\x12\x11\n\tplacename\x18\x01 \x01(\t\x12\x0c\n\x04user\x18\x02 \x01(\t\"\x14\n\x12\x41llowPlaceResponse\"\xb6\x01\n\x18\x43reateReservationRequest\x12?\n\x07\x66ilters\x18\x01 \x03(\x0b\x32..labgrid.CreateReservationRequest.FiltersEntry\x12\x0c\n\x04prio\x18\x02 \x01(\x01\x1aK\n\x0c\x46iltersEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12*\n\x05value\x18\x02 \x01(\x0b\x32\x1b.labgrid.Reservation.Filter:\x02\x38\x01\"F\n\x19\x43reateReservationResponse\x12)\n\x0breservation\x18\x01 \x01(\x0b\x32\x14.labgrid.Reservation\"\xcd\x03\n\x0bReservation\x12\r\n\x05owner\x18\x01 \x01(\t\x12\r\n\x05token\x18\x02 \x01(\t\x12\r\n\x05state\x18\x03 \x01(\x05\x12\x0c\n\x04prio\x18\x04 \x01(\x01\x12\x32\n\x07\x66ilters\x18\x05 \x03(\x0b\x32!.labgrid.Reservation.FiltersEntry\x12:\n\x0b\x61llocations\x18\x06 \x03(\x0b\x32%.labgrid.Reservation.AllocationsEntry\x12\x0f\n\x07\x63reated\x18\x07 \x01(\x01\x12\x0f\n\x07timeout\x18\x08 \x01(\x01\x1ap\n\x06\x46ilter\x12\x37\n\x06\x66ilter\x18\x01 \x03(\x0b\x32\'.labgrid.Reservation.Filter.FilterEntry\x1a-\n\x0b\x46ilterEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x1aK\n\x0c\x46iltersEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12*\n\x05value\x18\x02 \x01(\x0b\x32\x1b.labgrid.Reservation.Filter:\x02\x38\x01\x1a\x32\n\x10\x41llocationsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\")\n\x18\x43\x61ncelReservationRequest\x12\r\n\x05token\x18\x01 \x01(\t\"\x1b\n\x19\x43\x61ncelReservationResponse\"\'\n\x16PollReservationRequest\x12\r\n\x05token\x18\x01 \x01(\t\"D\n\x17PollReservationResponse\x12)\n\x0breservation\x18\x01 \x01(\x0b\x32\x14.labgrid.Reservation\"E\n\x17GetReservationsResponse\x12*\n\x0creservations\x18\x01 \x03(\x0b\x32\x14.labgrid.Reservation\"\x18\n\x16GetReservationsRequest2\xd2\x0b\n\x0b\x43oordinator\x12I\n\x0c\x43lientStream\x12\x18.labgrid.ClientInMessage\x1a\x19.labgrid.ClientOutMessage\"\x00(\x01\x30\x01\x12O\n\x0e\x45xporterStream\x12\x1a.labgrid.ExporterInMessage\x1a\x1b.labgrid.ExporterOutMessage\"\x00(\x01\x30\x01\x12\x41\n\x08\x41\x64\x64Place\x12\x18.labgrid.AddPlaceRequest\x1a\x19.labgrid.AddPlaceResponse\"\x00\x12J\n\x0b\x44\x65letePlace\x12\x1b.labgrid.DeletePlaceRequest\x1a\x1c.labgrid.DeletePlaceResponse\"\x00\x12\x44\n\tGetPlaces\x12\x19.labgrid.GetPlacesRequest\x1a\x1a.labgrid.GetPlacesResponse\"\x00\x12P\n\rAddPlaceAlias\x12\x1d.labgrid.AddPlaceAliasRequest\x1a\x1e.labgrid.AddPlaceAliasResponse\"\x00\x12Y\n\x10\x44\x65letePlaceAlias\x12 .labgrid.DeletePlaceAliasRequest\x1a!.labgrid.DeletePlaceAliasResponse\"\x00\x12M\n\x0cSetPlaceTags\x12\x1c.labgrid.SetPlaceTagsRequest\x1a\x1d.labgrid.SetPlaceTagsResponse\"\x00\x12V\n\x0fSetPlaceComment\x12\x1f.labgrid.SetPlaceCommentRequest\x1a .labgrid.SetPlaceCommentResponse\"\x00\x12P\n\rAddPlaceMatch\x12\x1d.labgrid.AddPlaceMatchRequest\x1a\x1e.labgrid.AddPlaceMatchResponse\"\x00\x12Y\n\x10\x44\x65letePlaceMatch\x12 .labgrid.DeletePlaceMatchRequest\x1a!.labgrid.DeletePlaceMatchResponse\"\x00\x12M\n\x0c\x41\x63quirePlace\x12\x1c.labgrid.AcquirePlaceRequest\x1a\x1d.labgrid.AcquirePlaceResponse\"\x00\x12M\n\x0cReleasePlace\x12\x1c.labgrid.ReleasePlaceRequest\x1a\x1d.labgrid.ReleasePlaceResponse\"\x00\x12G\n\nAllowPlace\x12\x1a.labgrid.AllowPlaceRequest\x1a\x1b.labgrid.AllowPlaceResponse\"\x00\x12\\\n\x11\x43reateReservation\x12!.labgrid.CreateReservationRequest\x1a\".labgrid.CreateReservationResponse\"\x00\x12\\\n\x11\x43\x61ncelReservation\x12!.labgrid.CancelReservationRequest\x1a\".labgrid.CancelReservationResponse\"\x00\x12V\n\x0fPollReservation\x12\x1f.labgrid.PollReservationRequest\x1a .labgrid.PollReservationResponse\"\x00\x12V\n\x0fGetReservations\x12\x1f.labgrid.GetReservationsRequest\x1a .labgrid.GetReservationsResponse\"\x00\x62\x06proto3') + +_globals = globals() +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'labgrid_coordinator_pb2', _globals) +if _descriptor._USE_C_DESCRIPTORS == False: + DESCRIPTOR._options = None + _globals['_RESOURCE_PARAMSENTRY']._options = None + _globals['_RESOURCE_PARAMSENTRY']._serialized_options = b'8\001' + _globals['_RESOURCE_EXTRAENTRY']._options = None + _globals['_RESOURCE_EXTRAENTRY']._serialized_options = b'8\001' + _globals['_PLACE_TAGSENTRY']._options = None + _globals['_PLACE_TAGSENTRY']._serialized_options = b'8\001' + _globals['_SETPLACETAGSREQUEST_TAGSENTRY']._options = None + _globals['_SETPLACETAGSREQUEST_TAGSENTRY']._serialized_options = b'8\001' + _globals['_CREATERESERVATIONREQUEST_FILTERSENTRY']._options = None + _globals['_CREATERESERVATIONREQUEST_FILTERSENTRY']._serialized_options = b'8\001' + _globals['_RESERVATION_FILTER_FILTERENTRY']._options = None + _globals['_RESERVATION_FILTER_FILTERENTRY']._serialized_options = b'8\001' + _globals['_RESERVATION_FILTERSENTRY']._options = None + _globals['_RESERVATION_FILTERSENTRY']._serialized_options = b'8\001' + _globals['_RESERVATION_ALLOCATIONSENTRY']._options = None + _globals['_RESERVATION_ALLOCATIONSENTRY']._serialized_options = b'8\001' + _globals['_CLIENTINMESSAGE']._serialized_start=39 + _globals['_CLIENTINMESSAGE']._serialized_end=177 + _globals['_SYNC']._serialized_start=179 + _globals['_SYNC']._serialized_end=197 + _globals['_STARTUPDONE']._serialized_start=199 + _globals['_STARTUPDONE']._serialized_end=243 + _globals['_SUBSCRIBE']._serialized_start=245 + _globals['_SUBSCRIBE']._serialized_end=359 + _globals['_CLIENTOUTMESSAGE']._serialized_start=361 + _globals['_CLIENTOUTMESSAGE']._serialized_end=464 + _globals['_UPDATERESPONSE']._serialized_start=467 + _globals['_UPDATERESPONSE']._serialized_end=632 + _globals['_EXPORTERINMESSAGE']._serialized_start=635 + _globals['_EXPORTERINMESSAGE']._serialized_end=789 + _globals['_RESOURCE']._serialized_start=792 + _globals['_RESOURCE']._serialized_end=1206 + _globals['_RESOURCE_PATH']._serialized_start=980 + _globals['_RESOURCE_PATH']._serialized_end=1075 + _globals['_RESOURCE_PARAMSENTRY']._serialized_start=1077 + _globals['_RESOURCE_PARAMSENTRY']._serialized_end=1141 + _globals['_RESOURCE_EXTRAENTRY']._serialized_start=1143 + _globals['_RESOURCE_EXTRAENTRY']._serialized_end=1206 + _globals['_MAPVALUE']._serialized_start=1209 + _globals['_MAPVALUE']._serialized_end=1339 + _globals['_EXPORTERRESPONSE']._serialized_start=1341 + _globals['_EXPORTERRESPONSE']._serialized_end=1408 + _globals['_HELLO']._serialized_start=1410 + _globals['_HELLO']._serialized_end=1434 + _globals['_EXPORTEROUTMESSAGE']._serialized_start=1437 + _globals['_EXPORTEROUTMESSAGE']._serialized_end=1567 + _globals['_EXPORTERSETACQUIREDREQUEST']._serialized_start=1569 + _globals['_EXPORTERSETACQUIREDREQUEST']._serialized_end=1680 + _globals['_ADDPLACEREQUEST']._serialized_start=1682 + _globals['_ADDPLACEREQUEST']._serialized_end=1713 + _globals['_ADDPLACERESPONSE']._serialized_start=1715 + _globals['_ADDPLACERESPONSE']._serialized_end=1733 + _globals['_DELETEPLACEREQUEST']._serialized_start=1735 + _globals['_DELETEPLACEREQUEST']._serialized_end=1769 + _globals['_DELETEPLACERESPONSE']._serialized_start=1771 + _globals['_DELETEPLACERESPONSE']._serialized_end=1792 + _globals['_GETPLACESREQUEST']._serialized_start=1794 + _globals['_GETPLACESREQUEST']._serialized_end=1812 + _globals['_GETPLACESRESPONSE']._serialized_start=1814 + _globals['_GETPLACESRESPONSE']._serialized_end=1865 + _globals['_PLACE']._serialized_start=1868 + _globals['_PLACE']._serialized_end=2206 + _globals['_PLACE_TAGSENTRY']._serialized_start=2134 + _globals['_PLACE_TAGSENTRY']._serialized_end=2177 + _globals['_RESOURCEMATCH']._serialized_start=2208 + _globals['_RESOURCEMATCH']._serialized_end=2329 + _globals['_ADDPLACEALIASREQUEST']._serialized_start=2331 + _globals['_ADDPLACEALIASREQUEST']._serialized_end=2387 + _globals['_ADDPLACEALIASRESPONSE']._serialized_start=2389 + _globals['_ADDPLACEALIASRESPONSE']._serialized_end=2412 + _globals['_DELETEPLACEALIASREQUEST']._serialized_start=2414 + _globals['_DELETEPLACEALIASREQUEST']._serialized_end=2473 + _globals['_DELETEPLACEALIASRESPONSE']._serialized_start=2475 + _globals['_DELETEPLACEALIASRESPONSE']._serialized_end=2501 + _globals['_SETPLACETAGSREQUEST']._serialized_start=2504 + _globals['_SETPLACETAGSREQUEST']._serialized_end=2643 + _globals['_SETPLACETAGSREQUEST_TAGSENTRY']._serialized_start=2134 + _globals['_SETPLACETAGSREQUEST_TAGSENTRY']._serialized_end=2177 + _globals['_SETPLACETAGSRESPONSE']._serialized_start=2645 + _globals['_SETPLACETAGSRESPONSE']._serialized_end=2667 + _globals['_SETPLACECOMMENTREQUEST']._serialized_start=2669 + _globals['_SETPLACECOMMENTREQUEST']._serialized_end=2729 + _globals['_SETPLACECOMMENTRESPONSE']._serialized_start=2731 + _globals['_SETPLACECOMMENTRESPONSE']._serialized_end=2756 + _globals['_ADDPLACEMATCHREQUEST']._serialized_start=2758 + _globals['_ADDPLACEMATCHREQUEST']._serialized_end=2848 + _globals['_ADDPLACEMATCHRESPONSE']._serialized_start=2850 + _globals['_ADDPLACEMATCHRESPONSE']._serialized_end=2873 + _globals['_DELETEPLACEMATCHREQUEST']._serialized_start=2875 + _globals['_DELETEPLACEMATCHREQUEST']._serialized_end=2968 + _globals['_DELETEPLACEMATCHRESPONSE']._serialized_start=2970 + _globals['_DELETEPLACEMATCHRESPONSE']._serialized_end=2996 + _globals['_ACQUIREPLACEREQUEST']._serialized_start=2998 + _globals['_ACQUIREPLACEREQUEST']._serialized_end=3038 + _globals['_ACQUIREPLACERESPONSE']._serialized_start=3040 + _globals['_ACQUIREPLACERESPONSE']._serialized_end=3062 + _globals['_RELEASEPLACEREQUEST']._serialized_start=3064 + _globals['_RELEASEPLACEREQUEST']._serialized_end=3140 + _globals['_RELEASEPLACERESPONSE']._serialized_start=3142 + _globals['_RELEASEPLACERESPONSE']._serialized_end=3164 + _globals['_ALLOWPLACEREQUEST']._serialized_start=3166 + _globals['_ALLOWPLACEREQUEST']._serialized_end=3218 + _globals['_ALLOWPLACERESPONSE']._serialized_start=3220 + _globals['_ALLOWPLACERESPONSE']._serialized_end=3240 + _globals['_CREATERESERVATIONREQUEST']._serialized_start=3243 + _globals['_CREATERESERVATIONREQUEST']._serialized_end=3425 + _globals['_CREATERESERVATIONREQUEST_FILTERSENTRY']._serialized_start=3350 + _globals['_CREATERESERVATIONREQUEST_FILTERSENTRY']._serialized_end=3425 + _globals['_CREATERESERVATIONRESPONSE']._serialized_start=3427 + _globals['_CREATERESERVATIONRESPONSE']._serialized_end=3497 + _globals['_RESERVATION']._serialized_start=3500 + _globals['_RESERVATION']._serialized_end=3961 + _globals['_RESERVATION_FILTER']._serialized_start=3720 + _globals['_RESERVATION_FILTER']._serialized_end=3832 + _globals['_RESERVATION_FILTER_FILTERENTRY']._serialized_start=3787 + _globals['_RESERVATION_FILTER_FILTERENTRY']._serialized_end=3832 + _globals['_RESERVATION_FILTERSENTRY']._serialized_start=3350 + _globals['_RESERVATION_FILTERSENTRY']._serialized_end=3425 + _globals['_RESERVATION_ALLOCATIONSENTRY']._serialized_start=3911 + _globals['_RESERVATION_ALLOCATIONSENTRY']._serialized_end=3961 + _globals['_CANCELRESERVATIONREQUEST']._serialized_start=3963 + _globals['_CANCELRESERVATIONREQUEST']._serialized_end=4004 + _globals['_CANCELRESERVATIONRESPONSE']._serialized_start=4006 + _globals['_CANCELRESERVATIONRESPONSE']._serialized_end=4033 + _globals['_POLLRESERVATIONREQUEST']._serialized_start=4035 + _globals['_POLLRESERVATIONREQUEST']._serialized_end=4074 + _globals['_POLLRESERVATIONRESPONSE']._serialized_start=4076 + _globals['_POLLRESERVATIONRESPONSE']._serialized_end=4144 + _globals['_GETRESERVATIONSRESPONSE']._serialized_start=4146 + _globals['_GETRESERVATIONSRESPONSE']._serialized_end=4215 + _globals['_GETRESERVATIONSREQUEST']._serialized_start=4217 + _globals['_GETRESERVATIONSREQUEST']._serialized_end=4241 + _globals['_COORDINATOR']._serialized_start=4244 + _globals['_COORDINATOR']._serialized_end=5734 +# @@protoc_insertion_point(module_scope) diff --git a/labgrid/remote/generated/labgrid_coordinator_pb2.pyi b/labgrid/remote/generated/labgrid_coordinator_pb2.pyi new file mode 100644 index 000000000..366f4e438 --- /dev/null +++ b/labgrid/remote/generated/labgrid_coordinator_pb2.pyi @@ -0,0 +1,448 @@ +from google.protobuf.internal import containers as _containers +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from typing import ClassVar as _ClassVar, Iterable as _Iterable, Mapping as _Mapping, Optional as _Optional, Union as _Union + +DESCRIPTOR: _descriptor.FileDescriptor + +class ClientInMessage(_message.Message): + __slots__ = ("sync", "startup", "subscribe") + SYNC_FIELD_NUMBER: _ClassVar[int] + STARTUP_FIELD_NUMBER: _ClassVar[int] + SUBSCRIBE_FIELD_NUMBER: _ClassVar[int] + sync: Sync + startup: StartupDone + subscribe: Subscribe + def __init__(self, sync: _Optional[_Union[Sync, _Mapping]] = ..., startup: _Optional[_Union[StartupDone, _Mapping]] = ..., subscribe: _Optional[_Union[Subscribe, _Mapping]] = ...) -> None: ... + +class Sync(_message.Message): + __slots__ = ("id",) + ID_FIELD_NUMBER: _ClassVar[int] + id: int + def __init__(self, id: _Optional[int] = ...) -> None: ... + +class StartupDone(_message.Message): + __slots__ = ("version", "name") + VERSION_FIELD_NUMBER: _ClassVar[int] + NAME_FIELD_NUMBER: _ClassVar[int] + version: str + name: str + def __init__(self, version: _Optional[str] = ..., name: _Optional[str] = ...) -> None: ... + +class Subscribe(_message.Message): + __slots__ = ("is_unsubscribe", "all_places", "all_resources") + IS_UNSUBSCRIBE_FIELD_NUMBER: _ClassVar[int] + ALL_PLACES_FIELD_NUMBER: _ClassVar[int] + ALL_RESOURCES_FIELD_NUMBER: _ClassVar[int] + is_unsubscribe: bool + all_places: bool + all_resources: bool + def __init__(self, is_unsubscribe: bool = ..., all_places: bool = ..., all_resources: bool = ...) -> None: ... + +class ClientOutMessage(_message.Message): + __slots__ = ("sync", "updates") + SYNC_FIELD_NUMBER: _ClassVar[int] + UPDATES_FIELD_NUMBER: _ClassVar[int] + sync: Sync + updates: _containers.RepeatedCompositeFieldContainer[UpdateResponse] + def __init__(self, sync: _Optional[_Union[Sync, _Mapping]] = ..., updates: _Optional[_Iterable[_Union[UpdateResponse, _Mapping]]] = ...) -> None: ... + +class UpdateResponse(_message.Message): + __slots__ = ("resource", "del_resource", "place", "del_place") + RESOURCE_FIELD_NUMBER: _ClassVar[int] + DEL_RESOURCE_FIELD_NUMBER: _ClassVar[int] + PLACE_FIELD_NUMBER: _ClassVar[int] + DEL_PLACE_FIELD_NUMBER: _ClassVar[int] + resource: Resource + del_resource: Resource.Path + place: Place + del_place: str + def __init__(self, resource: _Optional[_Union[Resource, _Mapping]] = ..., del_resource: _Optional[_Union[Resource.Path, _Mapping]] = ..., place: _Optional[_Union[Place, _Mapping]] = ..., del_place: _Optional[str] = ...) -> None: ... + +class ExporterInMessage(_message.Message): + __slots__ = ("resource", "startup", "response") + RESOURCE_FIELD_NUMBER: _ClassVar[int] + STARTUP_FIELD_NUMBER: _ClassVar[int] + RESPONSE_FIELD_NUMBER: _ClassVar[int] + resource: Resource + startup: StartupDone + response: ExporterResponse + def __init__(self, resource: _Optional[_Union[Resource, _Mapping]] = ..., startup: _Optional[_Union[StartupDone, _Mapping]] = ..., response: _Optional[_Union[ExporterResponse, _Mapping]] = ...) -> None: ... + +class Resource(_message.Message): + __slots__ = ("path", "cls", "params", "extra", "acquired", "avail") + class Path(_message.Message): + __slots__ = ("exporter_name", "group_name", "resource_name") + EXPORTER_NAME_FIELD_NUMBER: _ClassVar[int] + GROUP_NAME_FIELD_NUMBER: _ClassVar[int] + RESOURCE_NAME_FIELD_NUMBER: _ClassVar[int] + exporter_name: str + group_name: str + resource_name: str + def __init__(self, exporter_name: _Optional[str] = ..., group_name: _Optional[str] = ..., resource_name: _Optional[str] = ...) -> None: ... + class ParamsEntry(_message.Message): + __slots__ = ("key", "value") + KEY_FIELD_NUMBER: _ClassVar[int] + VALUE_FIELD_NUMBER: _ClassVar[int] + key: str + value: MapValue + def __init__(self, key: _Optional[str] = ..., value: _Optional[_Union[MapValue, _Mapping]] = ...) -> None: ... + class ExtraEntry(_message.Message): + __slots__ = ("key", "value") + KEY_FIELD_NUMBER: _ClassVar[int] + VALUE_FIELD_NUMBER: _ClassVar[int] + key: str + value: MapValue + def __init__(self, key: _Optional[str] = ..., value: _Optional[_Union[MapValue, _Mapping]] = ...) -> None: ... + PATH_FIELD_NUMBER: _ClassVar[int] + CLS_FIELD_NUMBER: _ClassVar[int] + PARAMS_FIELD_NUMBER: _ClassVar[int] + EXTRA_FIELD_NUMBER: _ClassVar[int] + ACQUIRED_FIELD_NUMBER: _ClassVar[int] + AVAIL_FIELD_NUMBER: _ClassVar[int] + path: Resource.Path + cls: str + params: _containers.MessageMap[str, MapValue] + extra: _containers.MessageMap[str, MapValue] + acquired: str + avail: bool + def __init__(self, path: _Optional[_Union[Resource.Path, _Mapping]] = ..., cls: _Optional[str] = ..., params: _Optional[_Mapping[str, MapValue]] = ..., extra: _Optional[_Mapping[str, MapValue]] = ..., acquired: _Optional[str] = ..., avail: bool = ...) -> None: ... + +class MapValue(_message.Message): + __slots__ = ("bool_value", "int_value", "uint_value", "float_value", "string_value") + BOOL_VALUE_FIELD_NUMBER: _ClassVar[int] + INT_VALUE_FIELD_NUMBER: _ClassVar[int] + UINT_VALUE_FIELD_NUMBER: _ClassVar[int] + FLOAT_VALUE_FIELD_NUMBER: _ClassVar[int] + STRING_VALUE_FIELD_NUMBER: _ClassVar[int] + bool_value: bool + int_value: int + uint_value: int + float_value: float + string_value: str + def __init__(self, bool_value: bool = ..., int_value: _Optional[int] = ..., uint_value: _Optional[int] = ..., float_value: _Optional[float] = ..., string_value: _Optional[str] = ...) -> None: ... + +class ExporterResponse(_message.Message): + __slots__ = ("success", "reason") + SUCCESS_FIELD_NUMBER: _ClassVar[int] + REASON_FIELD_NUMBER: _ClassVar[int] + success: bool + reason: str + def __init__(self, success: bool = ..., reason: _Optional[str] = ...) -> None: ... + +class Hello(_message.Message): + __slots__ = ("version",) + VERSION_FIELD_NUMBER: _ClassVar[int] + version: str + def __init__(self, version: _Optional[str] = ...) -> None: ... + +class ExporterOutMessage(_message.Message): + __slots__ = ("hello", "set_acquired_request") + HELLO_FIELD_NUMBER: _ClassVar[int] + SET_ACQUIRED_REQUEST_FIELD_NUMBER: _ClassVar[int] + hello: Hello + set_acquired_request: ExporterSetAcquiredRequest + def __init__(self, hello: _Optional[_Union[Hello, _Mapping]] = ..., set_acquired_request: _Optional[_Union[ExporterSetAcquiredRequest, _Mapping]] = ...) -> None: ... + +class ExporterSetAcquiredRequest(_message.Message): + __slots__ = ("group_name", "resource_name", "place_name") + GROUP_NAME_FIELD_NUMBER: _ClassVar[int] + RESOURCE_NAME_FIELD_NUMBER: _ClassVar[int] + PLACE_NAME_FIELD_NUMBER: _ClassVar[int] + group_name: str + resource_name: str + place_name: str + def __init__(self, group_name: _Optional[str] = ..., resource_name: _Optional[str] = ..., place_name: _Optional[str] = ...) -> None: ... + +class AddPlaceRequest(_message.Message): + __slots__ = ("name",) + NAME_FIELD_NUMBER: _ClassVar[int] + name: str + def __init__(self, name: _Optional[str] = ...) -> None: ... + +class AddPlaceResponse(_message.Message): + __slots__ = () + def __init__(self) -> None: ... + +class DeletePlaceRequest(_message.Message): + __slots__ = ("name",) + NAME_FIELD_NUMBER: _ClassVar[int] + name: str + def __init__(self, name: _Optional[str] = ...) -> None: ... + +class DeletePlaceResponse(_message.Message): + __slots__ = () + def __init__(self) -> None: ... + +class GetPlacesRequest(_message.Message): + __slots__ = () + def __init__(self) -> None: ... + +class GetPlacesResponse(_message.Message): + __slots__ = ("places",) + PLACES_FIELD_NUMBER: _ClassVar[int] + places: _containers.RepeatedCompositeFieldContainer[Place] + def __init__(self, places: _Optional[_Iterable[_Union[Place, _Mapping]]] = ...) -> None: ... + +class Place(_message.Message): + __slots__ = ("name", "aliases", "comment", "tags", "matches", "acquired", "acquired_resources", "allowed", "created", "changed", "reservation") + class TagsEntry(_message.Message): + __slots__ = ("key", "value") + KEY_FIELD_NUMBER: _ClassVar[int] + VALUE_FIELD_NUMBER: _ClassVar[int] + key: str + value: str + def __init__(self, key: _Optional[str] = ..., value: _Optional[str] = ...) -> None: ... + NAME_FIELD_NUMBER: _ClassVar[int] + ALIASES_FIELD_NUMBER: _ClassVar[int] + COMMENT_FIELD_NUMBER: _ClassVar[int] + TAGS_FIELD_NUMBER: _ClassVar[int] + MATCHES_FIELD_NUMBER: _ClassVar[int] + ACQUIRED_FIELD_NUMBER: _ClassVar[int] + ACQUIRED_RESOURCES_FIELD_NUMBER: _ClassVar[int] + ALLOWED_FIELD_NUMBER: _ClassVar[int] + CREATED_FIELD_NUMBER: _ClassVar[int] + CHANGED_FIELD_NUMBER: _ClassVar[int] + RESERVATION_FIELD_NUMBER: _ClassVar[int] + name: str + aliases: _containers.RepeatedScalarFieldContainer[str] + comment: str + tags: _containers.ScalarMap[str, str] + matches: _containers.RepeatedCompositeFieldContainer[ResourceMatch] + acquired: str + acquired_resources: _containers.RepeatedScalarFieldContainer[str] + allowed: _containers.RepeatedScalarFieldContainer[str] + created: float + changed: float + reservation: str + def __init__(self, name: _Optional[str] = ..., aliases: _Optional[_Iterable[str]] = ..., comment: _Optional[str] = ..., tags: _Optional[_Mapping[str, str]] = ..., matches: _Optional[_Iterable[_Union[ResourceMatch, _Mapping]]] = ..., acquired: _Optional[str] = ..., acquired_resources: _Optional[_Iterable[str]] = ..., allowed: _Optional[_Iterable[str]] = ..., created: _Optional[float] = ..., changed: _Optional[float] = ..., reservation: _Optional[str] = ...) -> None: ... + +class ResourceMatch(_message.Message): + __slots__ = ("exporter", "group", "cls", "name", "rename") + EXPORTER_FIELD_NUMBER: _ClassVar[int] + GROUP_FIELD_NUMBER: _ClassVar[int] + CLS_FIELD_NUMBER: _ClassVar[int] + NAME_FIELD_NUMBER: _ClassVar[int] + RENAME_FIELD_NUMBER: _ClassVar[int] + exporter: str + group: str + cls: str + name: str + rename: str + def __init__(self, exporter: _Optional[str] = ..., group: _Optional[str] = ..., cls: _Optional[str] = ..., name: _Optional[str] = ..., rename: _Optional[str] = ...) -> None: ... + +class AddPlaceAliasRequest(_message.Message): + __slots__ = ("placename", "alias") + PLACENAME_FIELD_NUMBER: _ClassVar[int] + ALIAS_FIELD_NUMBER: _ClassVar[int] + placename: str + alias: str + def __init__(self, placename: _Optional[str] = ..., alias: _Optional[str] = ...) -> None: ... + +class AddPlaceAliasResponse(_message.Message): + __slots__ = () + def __init__(self) -> None: ... + +class DeletePlaceAliasRequest(_message.Message): + __slots__ = ("placename", "alias") + PLACENAME_FIELD_NUMBER: _ClassVar[int] + ALIAS_FIELD_NUMBER: _ClassVar[int] + placename: str + alias: str + def __init__(self, placename: _Optional[str] = ..., alias: _Optional[str] = ...) -> None: ... + +class DeletePlaceAliasResponse(_message.Message): + __slots__ = () + def __init__(self) -> None: ... + +class SetPlaceTagsRequest(_message.Message): + __slots__ = ("placename", "tags") + class TagsEntry(_message.Message): + __slots__ = ("key", "value") + KEY_FIELD_NUMBER: _ClassVar[int] + VALUE_FIELD_NUMBER: _ClassVar[int] + key: str + value: str + def __init__(self, key: _Optional[str] = ..., value: _Optional[str] = ...) -> None: ... + PLACENAME_FIELD_NUMBER: _ClassVar[int] + TAGS_FIELD_NUMBER: _ClassVar[int] + placename: str + tags: _containers.ScalarMap[str, str] + def __init__(self, placename: _Optional[str] = ..., tags: _Optional[_Mapping[str, str]] = ...) -> None: ... + +class SetPlaceTagsResponse(_message.Message): + __slots__ = () + def __init__(self) -> None: ... + +class SetPlaceCommentRequest(_message.Message): + __slots__ = ("placename", "comment") + PLACENAME_FIELD_NUMBER: _ClassVar[int] + COMMENT_FIELD_NUMBER: _ClassVar[int] + placename: str + comment: str + def __init__(self, placename: _Optional[str] = ..., comment: _Optional[str] = ...) -> None: ... + +class SetPlaceCommentResponse(_message.Message): + __slots__ = () + def __init__(self) -> None: ... + +class AddPlaceMatchRequest(_message.Message): + __slots__ = ("placename", "pattern", "rename") + PLACENAME_FIELD_NUMBER: _ClassVar[int] + PATTERN_FIELD_NUMBER: _ClassVar[int] + RENAME_FIELD_NUMBER: _ClassVar[int] + placename: str + pattern: str + rename: str + def __init__(self, placename: _Optional[str] = ..., pattern: _Optional[str] = ..., rename: _Optional[str] = ...) -> None: ... + +class AddPlaceMatchResponse(_message.Message): + __slots__ = () + def __init__(self) -> None: ... + +class DeletePlaceMatchRequest(_message.Message): + __slots__ = ("placename", "pattern", "rename") + PLACENAME_FIELD_NUMBER: _ClassVar[int] + PATTERN_FIELD_NUMBER: _ClassVar[int] + RENAME_FIELD_NUMBER: _ClassVar[int] + placename: str + pattern: str + rename: str + def __init__(self, placename: _Optional[str] = ..., pattern: _Optional[str] = ..., rename: _Optional[str] = ...) -> None: ... + +class DeletePlaceMatchResponse(_message.Message): + __slots__ = () + def __init__(self) -> None: ... + +class AcquirePlaceRequest(_message.Message): + __slots__ = ("placename",) + PLACENAME_FIELD_NUMBER: _ClassVar[int] + placename: str + def __init__(self, placename: _Optional[str] = ...) -> None: ... + +class AcquirePlaceResponse(_message.Message): + __slots__ = () + def __init__(self) -> None: ... + +class ReleasePlaceRequest(_message.Message): + __slots__ = ("placename", "fromuser") + PLACENAME_FIELD_NUMBER: _ClassVar[int] + FROMUSER_FIELD_NUMBER: _ClassVar[int] + placename: str + fromuser: str + def __init__(self, placename: _Optional[str] = ..., fromuser: _Optional[str] = ...) -> None: ... + +class ReleasePlaceResponse(_message.Message): + __slots__ = () + def __init__(self) -> None: ... + +class AllowPlaceRequest(_message.Message): + __slots__ = ("placename", "user") + PLACENAME_FIELD_NUMBER: _ClassVar[int] + USER_FIELD_NUMBER: _ClassVar[int] + placename: str + user: str + def __init__(self, placename: _Optional[str] = ..., user: _Optional[str] = ...) -> None: ... + +class AllowPlaceResponse(_message.Message): + __slots__ = () + def __init__(self) -> None: ... + +class CreateReservationRequest(_message.Message): + __slots__ = ("filters", "prio") + class FiltersEntry(_message.Message): + __slots__ = ("key", "value") + KEY_FIELD_NUMBER: _ClassVar[int] + VALUE_FIELD_NUMBER: _ClassVar[int] + key: str + value: Reservation.Filter + def __init__(self, key: _Optional[str] = ..., value: _Optional[_Union[Reservation.Filter, _Mapping]] = ...) -> None: ... + FILTERS_FIELD_NUMBER: _ClassVar[int] + PRIO_FIELD_NUMBER: _ClassVar[int] + filters: _containers.MessageMap[str, Reservation.Filter] + prio: float + def __init__(self, filters: _Optional[_Mapping[str, Reservation.Filter]] = ..., prio: _Optional[float] = ...) -> None: ... + +class CreateReservationResponse(_message.Message): + __slots__ = ("reservation",) + RESERVATION_FIELD_NUMBER: _ClassVar[int] + reservation: Reservation + def __init__(self, reservation: _Optional[_Union[Reservation, _Mapping]] = ...) -> None: ... + +class Reservation(_message.Message): + __slots__ = ("owner", "token", "state", "prio", "filters", "allocations", "created", "timeout") + class Filter(_message.Message): + __slots__ = ("filter",) + class FilterEntry(_message.Message): + __slots__ = ("key", "value") + KEY_FIELD_NUMBER: _ClassVar[int] + VALUE_FIELD_NUMBER: _ClassVar[int] + key: str + value: str + def __init__(self, key: _Optional[str] = ..., value: _Optional[str] = ...) -> None: ... + FILTER_FIELD_NUMBER: _ClassVar[int] + filter: _containers.ScalarMap[str, str] + def __init__(self, filter: _Optional[_Mapping[str, str]] = ...) -> None: ... + class FiltersEntry(_message.Message): + __slots__ = ("key", "value") + KEY_FIELD_NUMBER: _ClassVar[int] + VALUE_FIELD_NUMBER: _ClassVar[int] + key: str + value: Reservation.Filter + def __init__(self, key: _Optional[str] = ..., value: _Optional[_Union[Reservation.Filter, _Mapping]] = ...) -> None: ... + class AllocationsEntry(_message.Message): + __slots__ = ("key", "value") + KEY_FIELD_NUMBER: _ClassVar[int] + VALUE_FIELD_NUMBER: _ClassVar[int] + key: str + value: str + def __init__(self, key: _Optional[str] = ..., value: _Optional[str] = ...) -> None: ... + OWNER_FIELD_NUMBER: _ClassVar[int] + TOKEN_FIELD_NUMBER: _ClassVar[int] + STATE_FIELD_NUMBER: _ClassVar[int] + PRIO_FIELD_NUMBER: _ClassVar[int] + FILTERS_FIELD_NUMBER: _ClassVar[int] + ALLOCATIONS_FIELD_NUMBER: _ClassVar[int] + CREATED_FIELD_NUMBER: _ClassVar[int] + TIMEOUT_FIELD_NUMBER: _ClassVar[int] + owner: str + token: str + state: int + prio: float + filters: _containers.MessageMap[str, Reservation.Filter] + allocations: _containers.ScalarMap[str, str] + created: float + timeout: float + def __init__(self, owner: _Optional[str] = ..., token: _Optional[str] = ..., state: _Optional[int] = ..., prio: _Optional[float] = ..., filters: _Optional[_Mapping[str, Reservation.Filter]] = ..., allocations: _Optional[_Mapping[str, str]] = ..., created: _Optional[float] = ..., timeout: _Optional[float] = ...) -> None: ... + +class CancelReservationRequest(_message.Message): + __slots__ = ("token",) + TOKEN_FIELD_NUMBER: _ClassVar[int] + token: str + def __init__(self, token: _Optional[str] = ...) -> None: ... + +class CancelReservationResponse(_message.Message): + __slots__ = () + def __init__(self) -> None: ... + +class PollReservationRequest(_message.Message): + __slots__ = ("token",) + TOKEN_FIELD_NUMBER: _ClassVar[int] + token: str + def __init__(self, token: _Optional[str] = ...) -> None: ... + +class PollReservationResponse(_message.Message): + __slots__ = ("reservation",) + RESERVATION_FIELD_NUMBER: _ClassVar[int] + reservation: Reservation + def __init__(self, reservation: _Optional[_Union[Reservation, _Mapping]] = ...) -> None: ... + +class GetReservationsResponse(_message.Message): + __slots__ = ("reservations",) + RESERVATIONS_FIELD_NUMBER: _ClassVar[int] + reservations: _containers.RepeatedCompositeFieldContainer[Reservation] + def __init__(self, reservations: _Optional[_Iterable[_Union[Reservation, _Mapping]]] = ...) -> None: ... + +class GetReservationsRequest(_message.Message): + __slots__ = () + def __init__(self) -> None: ... diff --git a/labgrid/remote/generated/labgrid_coordinator_pb2_grpc.py b/labgrid/remote/generated/labgrid_coordinator_pb2_grpc.py new file mode 100644 index 000000000..debfb24f2 --- /dev/null +++ b/labgrid/remote/generated/labgrid_coordinator_pb2_grpc.py @@ -0,0 +1,627 @@ +# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! +"""Client and server classes corresponding to protobuf-defined services.""" +import grpc + +from . import labgrid_coordinator_pb2 as labgrid__coordinator__pb2 + + +class CoordinatorStub(object): + """Missing associated documentation comment in .proto file.""" + + def __init__(self, channel): + """Constructor. + + Args: + channel: A grpc.Channel. + """ + self.ClientStream = channel.stream_stream( + '/labgrid.Coordinator/ClientStream', + request_serializer=labgrid__coordinator__pb2.ClientInMessage.SerializeToString, + response_deserializer=labgrid__coordinator__pb2.ClientOutMessage.FromString, + ) + self.ExporterStream = channel.stream_stream( + '/labgrid.Coordinator/ExporterStream', + request_serializer=labgrid__coordinator__pb2.ExporterInMessage.SerializeToString, + response_deserializer=labgrid__coordinator__pb2.ExporterOutMessage.FromString, + ) + self.AddPlace = channel.unary_unary( + '/labgrid.Coordinator/AddPlace', + request_serializer=labgrid__coordinator__pb2.AddPlaceRequest.SerializeToString, + response_deserializer=labgrid__coordinator__pb2.AddPlaceResponse.FromString, + ) + self.DeletePlace = channel.unary_unary( + '/labgrid.Coordinator/DeletePlace', + request_serializer=labgrid__coordinator__pb2.DeletePlaceRequest.SerializeToString, + response_deserializer=labgrid__coordinator__pb2.DeletePlaceResponse.FromString, + ) + self.GetPlaces = channel.unary_unary( + '/labgrid.Coordinator/GetPlaces', + request_serializer=labgrid__coordinator__pb2.GetPlacesRequest.SerializeToString, + response_deserializer=labgrid__coordinator__pb2.GetPlacesResponse.FromString, + ) + self.AddPlaceAlias = channel.unary_unary( + '/labgrid.Coordinator/AddPlaceAlias', + request_serializer=labgrid__coordinator__pb2.AddPlaceAliasRequest.SerializeToString, + response_deserializer=labgrid__coordinator__pb2.AddPlaceAliasResponse.FromString, + ) + self.DeletePlaceAlias = channel.unary_unary( + '/labgrid.Coordinator/DeletePlaceAlias', + request_serializer=labgrid__coordinator__pb2.DeletePlaceAliasRequest.SerializeToString, + response_deserializer=labgrid__coordinator__pb2.DeletePlaceAliasResponse.FromString, + ) + self.SetPlaceTags = channel.unary_unary( + '/labgrid.Coordinator/SetPlaceTags', + request_serializer=labgrid__coordinator__pb2.SetPlaceTagsRequest.SerializeToString, + response_deserializer=labgrid__coordinator__pb2.SetPlaceTagsResponse.FromString, + ) + self.SetPlaceComment = channel.unary_unary( + '/labgrid.Coordinator/SetPlaceComment', + request_serializer=labgrid__coordinator__pb2.SetPlaceCommentRequest.SerializeToString, + response_deserializer=labgrid__coordinator__pb2.SetPlaceCommentResponse.FromString, + ) + self.AddPlaceMatch = channel.unary_unary( + '/labgrid.Coordinator/AddPlaceMatch', + request_serializer=labgrid__coordinator__pb2.AddPlaceMatchRequest.SerializeToString, + response_deserializer=labgrid__coordinator__pb2.AddPlaceMatchResponse.FromString, + ) + self.DeletePlaceMatch = channel.unary_unary( + '/labgrid.Coordinator/DeletePlaceMatch', + request_serializer=labgrid__coordinator__pb2.DeletePlaceMatchRequest.SerializeToString, + response_deserializer=labgrid__coordinator__pb2.DeletePlaceMatchResponse.FromString, + ) + self.AcquirePlace = channel.unary_unary( + '/labgrid.Coordinator/AcquirePlace', + request_serializer=labgrid__coordinator__pb2.AcquirePlaceRequest.SerializeToString, + response_deserializer=labgrid__coordinator__pb2.AcquirePlaceResponse.FromString, + ) + self.ReleasePlace = channel.unary_unary( + '/labgrid.Coordinator/ReleasePlace', + request_serializer=labgrid__coordinator__pb2.ReleasePlaceRequest.SerializeToString, + response_deserializer=labgrid__coordinator__pb2.ReleasePlaceResponse.FromString, + ) + self.AllowPlace = channel.unary_unary( + '/labgrid.Coordinator/AllowPlace', + request_serializer=labgrid__coordinator__pb2.AllowPlaceRequest.SerializeToString, + response_deserializer=labgrid__coordinator__pb2.AllowPlaceResponse.FromString, + ) + self.CreateReservation = channel.unary_unary( + '/labgrid.Coordinator/CreateReservation', + request_serializer=labgrid__coordinator__pb2.CreateReservationRequest.SerializeToString, + response_deserializer=labgrid__coordinator__pb2.CreateReservationResponse.FromString, + ) + self.CancelReservation = channel.unary_unary( + '/labgrid.Coordinator/CancelReservation', + request_serializer=labgrid__coordinator__pb2.CancelReservationRequest.SerializeToString, + response_deserializer=labgrid__coordinator__pb2.CancelReservationResponse.FromString, + ) + self.PollReservation = channel.unary_unary( + '/labgrid.Coordinator/PollReservation', + request_serializer=labgrid__coordinator__pb2.PollReservationRequest.SerializeToString, + response_deserializer=labgrid__coordinator__pb2.PollReservationResponse.FromString, + ) + self.GetReservations = channel.unary_unary( + '/labgrid.Coordinator/GetReservations', + request_serializer=labgrid__coordinator__pb2.GetReservationsRequest.SerializeToString, + response_deserializer=labgrid__coordinator__pb2.GetReservationsResponse.FromString, + ) + + +class CoordinatorServicer(object): + """Missing associated documentation comment in .proto file.""" + + def ClientStream(self, request_iterator, context): + """Missing associated documentation comment in .proto file.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def ExporterStream(self, request_iterator, context): + """Missing associated documentation comment in .proto file.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def AddPlace(self, request, context): + """Missing associated documentation comment in .proto file.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def DeletePlace(self, request, context): + """Missing associated documentation comment in .proto file.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def GetPlaces(self, request, context): + """Missing associated documentation comment in .proto file.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def AddPlaceAlias(self, request, context): + """Missing associated documentation comment in .proto file.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def DeletePlaceAlias(self, request, context): + """Missing associated documentation comment in .proto file.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def SetPlaceTags(self, request, context): + """Missing associated documentation comment in .proto file.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def SetPlaceComment(self, request, context): + """Missing associated documentation comment in .proto file.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def AddPlaceMatch(self, request, context): + """Missing associated documentation comment in .proto file.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def DeletePlaceMatch(self, request, context): + """Missing associated documentation comment in .proto file.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def AcquirePlace(self, request, context): + """Missing associated documentation comment in .proto file.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def ReleasePlace(self, request, context): + """Missing associated documentation comment in .proto file.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def AllowPlace(self, request, context): + """Missing associated documentation comment in .proto file.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def CreateReservation(self, request, context): + """Missing associated documentation comment in .proto file.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def CancelReservation(self, request, context): + """Missing associated documentation comment in .proto file.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def PollReservation(self, request, context): + """Missing associated documentation comment in .proto file.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def GetReservations(self, request, context): + """Missing associated documentation comment in .proto file.""" + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + +def add_CoordinatorServicer_to_server(servicer, server): + rpc_method_handlers = { + 'ClientStream': grpc.stream_stream_rpc_method_handler( + servicer.ClientStream, + request_deserializer=labgrid__coordinator__pb2.ClientInMessage.FromString, + response_serializer=labgrid__coordinator__pb2.ClientOutMessage.SerializeToString, + ), + 'ExporterStream': grpc.stream_stream_rpc_method_handler( + servicer.ExporterStream, + request_deserializer=labgrid__coordinator__pb2.ExporterInMessage.FromString, + response_serializer=labgrid__coordinator__pb2.ExporterOutMessage.SerializeToString, + ), + 'AddPlace': grpc.unary_unary_rpc_method_handler( + servicer.AddPlace, + request_deserializer=labgrid__coordinator__pb2.AddPlaceRequest.FromString, + response_serializer=labgrid__coordinator__pb2.AddPlaceResponse.SerializeToString, + ), + 'DeletePlace': grpc.unary_unary_rpc_method_handler( + servicer.DeletePlace, + request_deserializer=labgrid__coordinator__pb2.DeletePlaceRequest.FromString, + response_serializer=labgrid__coordinator__pb2.DeletePlaceResponse.SerializeToString, + ), + 'GetPlaces': grpc.unary_unary_rpc_method_handler( + servicer.GetPlaces, + request_deserializer=labgrid__coordinator__pb2.GetPlacesRequest.FromString, + response_serializer=labgrid__coordinator__pb2.GetPlacesResponse.SerializeToString, + ), + 'AddPlaceAlias': grpc.unary_unary_rpc_method_handler( + servicer.AddPlaceAlias, + request_deserializer=labgrid__coordinator__pb2.AddPlaceAliasRequest.FromString, + response_serializer=labgrid__coordinator__pb2.AddPlaceAliasResponse.SerializeToString, + ), + 'DeletePlaceAlias': grpc.unary_unary_rpc_method_handler( + servicer.DeletePlaceAlias, + request_deserializer=labgrid__coordinator__pb2.DeletePlaceAliasRequest.FromString, + response_serializer=labgrid__coordinator__pb2.DeletePlaceAliasResponse.SerializeToString, + ), + 'SetPlaceTags': grpc.unary_unary_rpc_method_handler( + servicer.SetPlaceTags, + request_deserializer=labgrid__coordinator__pb2.SetPlaceTagsRequest.FromString, + response_serializer=labgrid__coordinator__pb2.SetPlaceTagsResponse.SerializeToString, + ), + 'SetPlaceComment': grpc.unary_unary_rpc_method_handler( + servicer.SetPlaceComment, + request_deserializer=labgrid__coordinator__pb2.SetPlaceCommentRequest.FromString, + response_serializer=labgrid__coordinator__pb2.SetPlaceCommentResponse.SerializeToString, + ), + 'AddPlaceMatch': grpc.unary_unary_rpc_method_handler( + servicer.AddPlaceMatch, + request_deserializer=labgrid__coordinator__pb2.AddPlaceMatchRequest.FromString, + response_serializer=labgrid__coordinator__pb2.AddPlaceMatchResponse.SerializeToString, + ), + 'DeletePlaceMatch': grpc.unary_unary_rpc_method_handler( + servicer.DeletePlaceMatch, + request_deserializer=labgrid__coordinator__pb2.DeletePlaceMatchRequest.FromString, + response_serializer=labgrid__coordinator__pb2.DeletePlaceMatchResponse.SerializeToString, + ), + 'AcquirePlace': grpc.unary_unary_rpc_method_handler( + servicer.AcquirePlace, + request_deserializer=labgrid__coordinator__pb2.AcquirePlaceRequest.FromString, + response_serializer=labgrid__coordinator__pb2.AcquirePlaceResponse.SerializeToString, + ), + 'ReleasePlace': grpc.unary_unary_rpc_method_handler( + servicer.ReleasePlace, + request_deserializer=labgrid__coordinator__pb2.ReleasePlaceRequest.FromString, + response_serializer=labgrid__coordinator__pb2.ReleasePlaceResponse.SerializeToString, + ), + 'AllowPlace': grpc.unary_unary_rpc_method_handler( + servicer.AllowPlace, + request_deserializer=labgrid__coordinator__pb2.AllowPlaceRequest.FromString, + response_serializer=labgrid__coordinator__pb2.AllowPlaceResponse.SerializeToString, + ), + 'CreateReservation': grpc.unary_unary_rpc_method_handler( + servicer.CreateReservation, + request_deserializer=labgrid__coordinator__pb2.CreateReservationRequest.FromString, + response_serializer=labgrid__coordinator__pb2.CreateReservationResponse.SerializeToString, + ), + 'CancelReservation': grpc.unary_unary_rpc_method_handler( + servicer.CancelReservation, + request_deserializer=labgrid__coordinator__pb2.CancelReservationRequest.FromString, + response_serializer=labgrid__coordinator__pb2.CancelReservationResponse.SerializeToString, + ), + 'PollReservation': grpc.unary_unary_rpc_method_handler( + servicer.PollReservation, + request_deserializer=labgrid__coordinator__pb2.PollReservationRequest.FromString, + response_serializer=labgrid__coordinator__pb2.PollReservationResponse.SerializeToString, + ), + 'GetReservations': grpc.unary_unary_rpc_method_handler( + servicer.GetReservations, + request_deserializer=labgrid__coordinator__pb2.GetReservationsRequest.FromString, + response_serializer=labgrid__coordinator__pb2.GetReservationsResponse.SerializeToString, + ), + } + generic_handler = grpc.method_handlers_generic_handler( + 'labgrid.Coordinator', rpc_method_handlers) + server.add_generic_rpc_handlers((generic_handler,)) + + + # This class is part of an EXPERIMENTAL API. +class Coordinator(object): + """Missing associated documentation comment in .proto file.""" + + @staticmethod + def ClientStream(request_iterator, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.stream_stream(request_iterator, target, '/labgrid.Coordinator/ClientStream', + labgrid__coordinator__pb2.ClientInMessage.SerializeToString, + labgrid__coordinator__pb2.ClientOutMessage.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def ExporterStream(request_iterator, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.stream_stream(request_iterator, target, '/labgrid.Coordinator/ExporterStream', + labgrid__coordinator__pb2.ExporterInMessage.SerializeToString, + labgrid__coordinator__pb2.ExporterOutMessage.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def AddPlace(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/labgrid.Coordinator/AddPlace', + labgrid__coordinator__pb2.AddPlaceRequest.SerializeToString, + labgrid__coordinator__pb2.AddPlaceResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def DeletePlace(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/labgrid.Coordinator/DeletePlace', + labgrid__coordinator__pb2.DeletePlaceRequest.SerializeToString, + labgrid__coordinator__pb2.DeletePlaceResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def GetPlaces(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/labgrid.Coordinator/GetPlaces', + labgrid__coordinator__pb2.GetPlacesRequest.SerializeToString, + labgrid__coordinator__pb2.GetPlacesResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def AddPlaceAlias(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/labgrid.Coordinator/AddPlaceAlias', + labgrid__coordinator__pb2.AddPlaceAliasRequest.SerializeToString, + labgrid__coordinator__pb2.AddPlaceAliasResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def DeletePlaceAlias(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/labgrid.Coordinator/DeletePlaceAlias', + labgrid__coordinator__pb2.DeletePlaceAliasRequest.SerializeToString, + labgrid__coordinator__pb2.DeletePlaceAliasResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def SetPlaceTags(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/labgrid.Coordinator/SetPlaceTags', + labgrid__coordinator__pb2.SetPlaceTagsRequest.SerializeToString, + labgrid__coordinator__pb2.SetPlaceTagsResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def SetPlaceComment(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/labgrid.Coordinator/SetPlaceComment', + labgrid__coordinator__pb2.SetPlaceCommentRequest.SerializeToString, + labgrid__coordinator__pb2.SetPlaceCommentResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def AddPlaceMatch(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/labgrid.Coordinator/AddPlaceMatch', + labgrid__coordinator__pb2.AddPlaceMatchRequest.SerializeToString, + labgrid__coordinator__pb2.AddPlaceMatchResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def DeletePlaceMatch(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/labgrid.Coordinator/DeletePlaceMatch', + labgrid__coordinator__pb2.DeletePlaceMatchRequest.SerializeToString, + labgrid__coordinator__pb2.DeletePlaceMatchResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def AcquirePlace(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/labgrid.Coordinator/AcquirePlace', + labgrid__coordinator__pb2.AcquirePlaceRequest.SerializeToString, + labgrid__coordinator__pb2.AcquirePlaceResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def ReleasePlace(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/labgrid.Coordinator/ReleasePlace', + labgrid__coordinator__pb2.ReleasePlaceRequest.SerializeToString, + labgrid__coordinator__pb2.ReleasePlaceResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def AllowPlace(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/labgrid.Coordinator/AllowPlace', + labgrid__coordinator__pb2.AllowPlaceRequest.SerializeToString, + labgrid__coordinator__pb2.AllowPlaceResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def CreateReservation(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/labgrid.Coordinator/CreateReservation', + labgrid__coordinator__pb2.CreateReservationRequest.SerializeToString, + labgrid__coordinator__pb2.CreateReservationResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def CancelReservation(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/labgrid.Coordinator/CancelReservation', + labgrid__coordinator__pb2.CancelReservationRequest.SerializeToString, + labgrid__coordinator__pb2.CancelReservationResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def PollReservation(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/labgrid.Coordinator/PollReservation', + labgrid__coordinator__pb2.PollReservationRequest.SerializeToString, + labgrid__coordinator__pb2.PollReservationResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def GetReservations(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/labgrid.Coordinator/GetReservations', + labgrid__coordinator__pb2.GetReservationsRequest.SerializeToString, + labgrid__coordinator__pb2.GetReservationsResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) diff --git a/labgrid/remote/generated/requirements.in b/labgrid/remote/generated/requirements.in new file mode 100644 index 000000000..c43218a5c --- /dev/null +++ b/labgrid/remote/generated/requirements.in @@ -0,0 +1,3 @@ +# use ./update-requirements.sh to update requirements.txt +grpcio-tools==1.62.2 + diff --git a/labgrid/remote/generated/requirements.txt b/labgrid/remote/generated/requirements.txt new file mode 100644 index 000000000..580b2389d --- /dev/null +++ b/labgrid/remote/generated/requirements.txt @@ -0,0 +1,15 @@ +# +# This file is autogenerated by pip-compile with Python 3.11 +# by the following command: +# +# pip-compile requirements.in +# +grpcio==1.64.1 + # via grpcio-tools +grpcio-tools==1.62.2 + # via -r requirements.in +protobuf==4.25.3 + # via grpcio-tools + +# The following packages are considered to be unsafe in a requirements file: +# setuptools diff --git a/labgrid/remote/generated/update-requirements.sh b/labgrid/remote/generated/update-requirements.sh new file mode 100755 index 000000000..f828ed573 --- /dev/null +++ b/labgrid/remote/generated/update-requirements.sh @@ -0,0 +1,5 @@ +#!/bin/sh + +set -ex + +pipx run --spec pip-tools pip-compile requirements.in -U | tee requirements.txt diff --git a/labgrid/remote/proto/labgrid-coordinator.proto b/labgrid/remote/proto/labgrid-coordinator.proto new file mode 100644 index 000000000..e0585f7e1 --- /dev/null +++ b/labgrid/remote/proto/labgrid-coordinator.proto @@ -0,0 +1,297 @@ +syntax = "proto3"; + +package labgrid; + +service Coordinator { + rpc ClientStream(stream ClientInMessage) returns (stream ClientOutMessage) {} + + rpc ExporterStream(stream ExporterInMessage) returns (stream ExporterOutMessage) {} + + rpc AddPlace(AddPlaceRequest) returns (AddPlaceResponse) {} + + rpc DeletePlace(DeletePlaceRequest) returns (DeletePlaceResponse) {} + + rpc GetPlaces(GetPlacesRequest) returns (GetPlacesResponse) {} + + rpc AddPlaceAlias(AddPlaceAliasRequest) returns (AddPlaceAliasResponse) {} + + rpc DeletePlaceAlias(DeletePlaceAliasRequest) returns (DeletePlaceAliasResponse) {} + + rpc SetPlaceTags(SetPlaceTagsRequest) returns (SetPlaceTagsResponse) {} + + rpc SetPlaceComment(SetPlaceCommentRequest) returns (SetPlaceCommentResponse) {} + + rpc AddPlaceMatch(AddPlaceMatchRequest) returns (AddPlaceMatchResponse) {} + + rpc DeletePlaceMatch(DeletePlaceMatchRequest) returns (DeletePlaceMatchResponse) {} + + rpc AcquirePlace(AcquirePlaceRequest) returns (AcquirePlaceResponse) {} + + rpc ReleasePlace(ReleasePlaceRequest) returns (ReleasePlaceResponse) {} + + rpc AllowPlace(AllowPlaceRequest) returns (AllowPlaceResponse) {} + + rpc CreateReservation(CreateReservationRequest) returns (CreateReservationResponse) {} + + rpc CancelReservation(CancelReservationRequest) returns (CancelReservationResponse) {} + + rpc PollReservation(PollReservationRequest) returns (PollReservationResponse) {} + + rpc GetReservations(GetReservationsRequest) returns (GetReservationsResponse) {} +} + +message ClientInMessage { + oneof kind { + Sync sync = 1; + StartupDone startup = 2; + Subscribe subscribe = 3; + }; +}; + +message Sync { + uint64 id = 1; +}; + +message StartupDone { + string version = 1; + string name = 2; +}; + +message Subscribe { + optional bool is_unsubscribe = 1; + oneof kind { + bool all_places = 2; + bool all_resources = 3; + } +}; + +message ClientOutMessage { + optional Sync sync = 1; + repeated UpdateResponse updates = 2; +}; + +message UpdateResponse { + oneof kind { + Resource resource = 1; + Resource.Path del_resource = 2; + Place place = 3; + string del_place = 4; + }; +}; + +message ExporterInMessage { + oneof kind { + Resource resource = 1; + StartupDone startup = 2; + ExporterResponse response = 3; + }; +}; + +message Resource { + message Path { + optional string exporter_name = 1; + string group_name = 2; + string resource_name = 3; + } + Path path = 1; + string cls = 2; + map params = 3; + map extra = 4; + string acquired = 5; + bool avail = 6; +}; + +message MapValue { + oneof kind { + bool bool_value = 1; + int64 int_value = 2; + uint64 uint_value = 3; + double float_value = 4; + string string_value = 5; + // FIXME do we need arrays? + } +}; + +message ExporterResponse { + bool success = 1; + optional string reason = 2; +}; + +message Hello { + string version = 1; +} + +message ExporterOutMessage { + oneof kind { + Hello hello = 1; + ExporterSetAcquiredRequest set_acquired_request = 2; + }; +}; + +message ExporterSetAcquiredRequest { + string group_name = 1; + string resource_name = 2; + optional string place_name = 3; +}; + +message AddPlaceRequest { + string name = 1; +}; + +message AddPlaceResponse { +}; + +message DeletePlaceRequest { + string name = 1; +}; + +message DeletePlaceResponse { +}; + +message GetPlacesRequest { +}; + +message GetPlacesResponse { + repeated Place places = 1; +} + +message Place { + string name = 1; + repeated string aliases = 2; + string comment = 3; + map tags = 4; + repeated ResourceMatch matches = 5; + optional string acquired = 6; + repeated string acquired_resources = 7; + repeated string allowed = 8; + double created = 9; + double changed = 10; + optional string reservation = 11; +}; + +message ResourceMatch { + string exporter = 1; + string group = 2; + string cls = 3; + optional string name = 4; + optional string rename = 5; +}; + +message AddPlaceAliasRequest { + string placename = 1; + string alias = 2; +}; + +message AddPlaceAliasResponse { +}; + +message DeletePlaceAliasRequest { + string placename = 1; + string alias = 2; +}; + +message DeletePlaceAliasResponse { +}; + +message SetPlaceTagsRequest { + string placename = 1; + map tags = 2; +}; + +message SetPlaceTagsResponse { +}; + +message SetPlaceCommentRequest { + string placename = 1; + string comment = 2; +}; + +message SetPlaceCommentResponse { +}; + +message AddPlaceMatchRequest { + string placename = 1; + string pattern = 2; + optional string rename = 3; +}; + +message AddPlaceMatchResponse { +}; + +message DeletePlaceMatchRequest { + string placename = 1; + string pattern = 2; + optional string rename = 3; +}; + +message DeletePlaceMatchResponse { +}; + +message AcquirePlaceRequest { + string placename = 1; +}; + +message AcquirePlaceResponse { +}; + +message ReleasePlaceRequest { + string placename = 1; + optional string fromuser = 2; +}; + +message ReleasePlaceResponse { +}; + +message AllowPlaceRequest { + string placename = 1; + string user = 2; +}; + +message AllowPlaceResponse { +}; + + +message CreateReservationRequest { + map filters = 1; + double prio = 2; +}; + +message CreateReservationResponse { + Reservation reservation = 1; +}; + +message Reservation { + message Filter { + map filter = 1; + } + string owner = 1; + string token = 2; + int32 state = 3; + double prio = 4; + map filters = 5; + map allocations = 6; + double created = 7; + double timeout = 8; +}; + +message CancelReservationRequest { + string token = 1; +}; + +message CancelReservationResponse { +}; + +message PollReservationRequest { + string token = 1; +}; + +message PollReservationResponse { + Reservation reservation = 1; +}; + +message GetReservationsResponse { + repeated Reservation reservations = 1; +}; + +message GetReservationsRequest { +}; diff --git a/labgrid/resource/remote.py b/labgrid/resource/remote.py index ad116382d..1b8256ef0 100644 --- a/labgrid/resource/remote.py +++ b/labgrid/resource/remote.py @@ -11,7 +11,6 @@ class RemotePlaceManager(ResourceManager): def __attrs_post_init__(self): super().__attrs_post_init__() self.url = None - self.realm = None self.loop = None self.session = None self.ready = None @@ -23,7 +22,7 @@ def _start(self): from ..remote.client import start_session try: - self.session = start_session(self.url, self.realm, {'env': self.env}) + self.session = start_session(self.url, {'env': self.env}) except ConnectionRefusedError as e: raise ConnectionRefusedError(f"Could not connect to coordinator {self.url}") \ from e @@ -39,12 +38,10 @@ def on_resource_added(self, resource): # be the same). if not self.session: self.env = remote_place.target.env - self.url = os.environ.get("LG_CROSSBAR", "ws://127.0.0.1:20408/ws") - self.realm = os.environ.get("LG_CROSSBAR_REALM", "realm1") + self.url = os.environ.get("LG_COORDINATOR", "127.0.0.1:20408") if self.env: config = self.env.config - self.url = config.get_option('crossbar_url', self.url) - self.realm = config.get_option('crossbar_realm', self.realm) + self.url = config.get_option("coordinator_address", self.url) self._start() place = self.session.get_place(remote_place.name) # pylint: disable=no-member resource_entries = self.session.get_target_resources(place) # pylint: disable=no-member diff --git a/labgrid/util/proxy.py b/labgrid/util/proxy.py index a5740e8fd..d489d89a5 100644 --- a/labgrid/util/proxy.py +++ b/labgrid/util/proxy.py @@ -95,6 +95,13 @@ def get_url(cls, url, *, default_port=None): return urlunsplit(s) + @classmethod + def get_grpc_address(cls, address, *, default_port=None): + url = f"//{address}" + url = proxymanager.get_url(url, default_port=default_port) + address = url.lstrip("/") + return address + @classmethod def get_command(cls, res, host, port, ifname=None): """get argument list to start a proxy process connected to the target""" diff --git a/pyproject.toml b/pyproject.toml index d84b7b571..590fcb7c7 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -33,7 +33,9 @@ classifiers = [ dependencies = [ "ansicolors>=1.1.8", "attrs>=21.4.0", - "autobahn>=21.3.1", + "grpcio>=1.64.1, <2.0.0", + "grpcio-reflection>=1.64.1, <2.0.0", + "protobuf>=5.27.0", "jinja2>=3.0.2", "pexpect>=4.8.0", "pyserial-labgrid>=3.4.0.1", @@ -118,11 +120,15 @@ dev = [ # additional dev dependencies "psutil>=5.8.0", + "pytest-benchmark>=4.0.0", "pytest-cov>=3.0.0", "pytest-dependency>=0.5.1", "pytest-isort>=2.0.0", "pytest-mock>=3.6.1", "pylint>=3.0.0", + + # GRPC Channelz support + "grpcio-channelz>=1.64.1, <2.0.0", ] [project.scripts] @@ -130,6 +136,7 @@ labgrid-autoinstall = "labgrid.autoinstall.main:main" labgrid-client = "labgrid.remote.client:main" labgrid-exporter = "labgrid.remote.exporter:main" labgrid-suggest = "labgrid.resource.suggest:main" +labgrid-coordinator = "labgrid.remote.coordinator:main" # the following makes a plugin available to pytest [project.entry-points.pytest11] @@ -145,6 +152,7 @@ packages = [ "labgrid.protocol", "labgrid.pytestplugin", "labgrid.remote", + "labgrid.remote.generated", "labgrid.resource", "labgrid.strategy", "labgrid.util", @@ -211,6 +219,7 @@ enable = [ generated-members = [ "labgrid.driver.*", "labgrid.strategy.*", + "labgrid_coordinator_pb2", ] signature-mutators = ["labgrid.step.step"] diff --git a/tests/conftest.py b/tests/conftest.py index 50bcad1a0..164914c31 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -1,12 +1,10 @@ import logging -from pathlib import Path from signal import SIGTERM import sys import threading import pytest import pexpect -import yaml from labgrid import Target from labgrid.driver import SerialDriver @@ -100,56 +98,24 @@ def serial_driver_no_name(target, serial_port, mocker): return s @pytest.fixture(scope='function') -def crossbar_config(tmpdir, pytestconfig): - crossbar_config = '.crossbar/config-anonymous.yaml' - - pytestconfig.rootdir.join(crossbar_config).copy(tmpdir.mkdir('.crossbar')) - crossbar_config = tmpdir.join(crossbar_config) - - # crossbar runs labgrid's coordinator component as a guest, record its coverage - if pytestconfig.pluginmanager.get_plugin('pytest_cov'): - with open(crossbar_config, 'r+') as stream: - conf = yaml.safe_load(stream) - - for worker in conf['workers']: - if worker['type'] == 'guest': - worker['executable'] = 'coverage' - worker['arguments'].insert(0, 'run') - worker['arguments'].insert(1, '--parallel-mode') - # pytest-cov combines coverage files in root dir automatically, so copy it there - coverage_data = pytestconfig.rootdir.join('.coverage') - worker['arguments'].insert(2, f'--data-file={coverage_data}') - - stream.seek(0) - yaml.safe_dump(conf, stream) - - return crossbar_config - -@pytest.fixture(scope='function') -def crossbar(tmpdir, pytestconfig, crossbar_config): - crossbar_venv = Path(pytestconfig.getoption("--crossbar-venv")) - if not crossbar_venv.is_absolute(): - crossbar_venv = pytestconfig.rootdir / crossbar_venv - crossbar_bin = crossbar_venv / "bin/crossbar" +def coordinator(tmpdir): spawn = pexpect.spawn( - f'{crossbar_bin} start --color false --logformat none --config {crossbar_config}', - logfile=Prefixer(sys.stdout.buffer, 'crossbar'), + 'labgrid-coordinator', + logfile=Prefixer(sys.stdout.buffer, 'coordinator'), cwd=str(tmpdir)) try: - spawn.expect('Realm .* started') - spawn.expect('Guest .* started') spawn.expect('Coordinator ready') except: - print(f"crossbar startup failed with {spawn.before}") + print(f"coordinator startup failed with {spawn.before}") raise - reader = threading.Thread(target=keep_reading, name='crossbar-reader', args=(spawn,), daemon=True) + reader = threading.Thread(target=keep_reading, name='coordinator-reader', args=(spawn,), daemon=True) reader.start() yield spawn # let coverage write its data: # https://coverage.readthedocs.io/en/latest/subprocess.html#process-termination - print("stopping crossbar") + print("stopping coordinator") spawn.kill(SIGTERM) spawn.expect(pexpect.EOF) spawn.wait() @@ -157,7 +123,15 @@ def crossbar(tmpdir, pytestconfig, crossbar_config): reader.join() @pytest.fixture(scope='function') -def exporter(tmpdir, crossbar): +def exporter(tmpdir, coordinator, start_exporter): + yield start_exporter() + + +@pytest.fixture(scope='function') +def start_exporter(tmpdir, coordinator): + spawns = [] + readers = [] + p = tmpdir.join("exports.yaml") p.write( """ @@ -177,22 +151,33 @@ def exporter(tmpdir, crossbar): username: "root" """ ) - spawn = pexpect.spawn( - f'{sys.executable} -m labgrid.remote.exporter --name testhost exports.yaml', - logfile=Prefixer(sys.stdout.buffer, 'exporter'), - cwd=str(tmpdir)) - try: - spawn.expect('exporter/testhost') - except: - print(f"exporter startup failed with {spawn.before}") - raise - reader = threading.Thread(target=keep_reading, name='exporter-reader', args=(spawn,), daemon=True) - reader.start() - yield spawn - print("stopping exporter") - spawn.close(force=True) - assert not spawn.isalive() - reader.join() + + def _start_exporter(): + spawn = pexpect.spawn( + f'{sys.executable} -m labgrid.remote.exporter --name testhost exports.yaml', + logfile=Prefixer(sys.stdout.buffer, 'exporter'), + cwd=str(tmpdir)) + try: + spawn.expect('exporter name: testhost') + spawn.expect('connected to exporter') + except: + print(f"exporter startup failed with {spawn.before}") + raise + reader = threading.Thread(target=keep_reading, name=f'exporter-reader-{spawn.pid}', args=(spawn,), daemon=True) + reader.start() + + spawns.append(spawn) + readers.append(reader) + + return spawn + + yield _start_exporter + + for spawn, reader in zip(spawns, readers): + print(f"stopping exporter pid={spawn.pid}") + spawn.close(force=True) + assert not spawn.isalive() + reader.join() def pytest_addoption(parser): parser.addoption("--sigrok-usb", action="store_true", @@ -201,8 +186,6 @@ def pytest_addoption(parser): help="Run SSHManager tests against localhost") parser.addoption("--ssh-username", default=None, help="SSH username to use for SSHDriver testing") - parser.addoption("--crossbar-venv", default=None, - help="Path to separate virtualenv with crossbar installed") def pytest_configure(config): # register an additional marker @@ -213,7 +196,7 @@ def pytest_configure(config): config.addinivalue_line("markers", "sshusername: test SSHDriver against Localhost") config.addinivalue_line("markers", - "crossbar: test against local crossbar") + "coordinator: test against local coordinator") def pytest_runtest_setup(item): envmarker = item.get_closest_marker("sigrokusb") @@ -228,7 +211,3 @@ def pytest_runtest_setup(item): if envmarker is not None: if item.config.getoption("--ssh-username") is None: pytest.skip("SSHDriver tests against localhost not enabled (enable with --ssh-username )") - envmarker = item.get_closest_marker("crossbar") - if envmarker is not None: - if item.config.getoption("--crossbar-venv") is None: - pytest.skip("No path to crossbar virtualenv given (set with --crossbar-venv )") diff --git a/tests/test_crossbar.py b/tests/test_client.py similarity index 80% rename from tests/test_crossbar.py rename to tests/test_client.py index a1db0eeeb..d82c0a8ce 100644 --- a/tests/test_crossbar.py +++ b/tests/test_client.py @@ -1,6 +1,5 @@ import os import re -import sys import time import pytest @@ -8,8 +7,6 @@ psutil = pytest.importorskip("psutil") -pytestmark = pytest.mark.crossbar - def suspend_tree(pid): main = psutil.Process(pid) main.suspend() @@ -22,12 +19,11 @@ def resume_tree(pid): for child in main.children(recursive=True): child.resume() -@pytest.mark.xfail(sys.version_info >= (3, 12), reason="latest crossbar release incompatible with python3.12+") -def test_startup(crossbar): +def test_startup(coordinator): pass @pytest.fixture(scope='function') -def place(crossbar): +def place(coordinator): with pexpect.spawn('python -m labgrid.remote.client -p test create') as spawn: spawn.expect(pexpect.EOF) spawn.close() @@ -65,26 +61,24 @@ def place_acquire(place, exporter): assert spawn.exitstatus == 0, spawn.before.strip() def test_connect_error(): - with pexpect.spawn('python -m labgrid.remote.client -x ws://127.0.0.1:20409/ws places') as spawn: + with pexpect.spawn('python -m labgrid.remote.client -x 127.0.0.1:20409 places') as spawn: spawn.expect("Could not connect to coordinator") spawn.expect(pexpect.EOF) spawn.close() assert spawn.exitstatus == 1, spawn.before.strip() -@pytest.mark.xfail(sys.version_info >= (3, 12), reason="latest crossbar release incompatible with python3.12+") -def test_connect_timeout(crossbar): - suspend_tree(crossbar.pid) +def test_connect_timeout(coordinator): + suspend_tree(coordinator.pid) try: with pexpect.spawn('python -m labgrid.remote.client places') as spawn: - spawn.expect("connection closed during setup") + spawn.expect("connection attempt timed out before receiving SETTINGS frame") spawn.expect(pexpect.EOF) spawn.close() assert spawn.exitstatus == 1, spawn.before.strip() finally: - resume_tree(crossbar.pid) + resume_tree(coordinator.pid) pass -@pytest.mark.xfail(sys.version_info >= (3, 12), reason="latest crossbar release incompatible with python3.12+") def test_place_show(place): with pexpect.spawn('python -m labgrid.remote.client -p test show') as spawn: spawn.expect("Place 'test':") @@ -92,7 +86,6 @@ def test_place_show(place): spawn.close() assert spawn.exitstatus == 0, spawn.before.strip() -@pytest.mark.xfail(sys.version_info >= (3, 12), reason="latest crossbar release incompatible with python3.12+") def test_place_alias(place): with pexpect.spawn('python -m labgrid.remote.client -p test add-alias foo') as spawn: spawn.expect(pexpect.EOF) @@ -104,7 +97,6 @@ def test_place_alias(place): spawn.close() assert spawn.exitstatus == 0, spawn.before.strip() -@pytest.mark.xfail(sys.version_info >= (3, 12), reason="latest crossbar release incompatible with python3.12+") def test_place_comment(place): with pexpect.spawn('python -m labgrid.remote.client -p test set-comment my comment') as spawn: spawn.expect(pexpect.EOF) @@ -118,7 +110,6 @@ def test_place_comment(place): spawn.close() assert spawn.exitstatus == 0, spawn.before.strip() -@pytest.mark.xfail(sys.version_info >= (3, 12), reason="latest crossbar release incompatible with python3.12+") def test_place_match(place): with pexpect.spawn('python -m labgrid.remote.client -p test add-match "e1/g1/r1" "e2/g2/*"') as spawn: spawn.expect(pexpect.EOF) @@ -137,7 +128,6 @@ def test_place_match(place): spawn.close() assert spawn.exitstatus == 0, spawn.before.strip() -@pytest.mark.xfail(sys.version_info >= (3, 12), reason="latest crossbar release incompatible with python3.12+") def test_place_match_duplicates(place): # first given match should succeed, second should be skipped matches = ( @@ -158,7 +148,6 @@ def test_place_match_duplicates(place): spawn.close() assert spawn.exitstatus == 0, spawn.before.strip() -@pytest.mark.xfail(sys.version_info >= (3, 12), reason="latest crossbar release incompatible with python3.12+") def test_place_acquire(place): with pexpect.spawn('python -m labgrid.remote.client -p test acquire') as spawn: spawn.expect(pexpect.EOF) @@ -176,7 +165,6 @@ def test_place_acquire(place): spawn.close() assert spawn.exitstatus == 0, spawn.before.strip() -@pytest.mark.xfail(sys.version_info >= (3, 12), reason="latest crossbar release incompatible with python3.12+") def test_place_acquire_enforce(place): with pexpect.spawn('python -m labgrid.remote.client -p test add-match does/not/exist') as spawn: spawn.expect(pexpect.EOF) @@ -200,7 +188,6 @@ def test_place_acquire_enforce(place): spawn.close() assert spawn.exitstatus == 0, spawn.before.strip() -@pytest.mark.xfail(sys.version_info >= (3, 12), reason="latest crossbar release incompatible with python3.12+") def test_place_acquire_broken(place, exporter): with pexpect.spawn('python -m labgrid.remote.client -p test add-match "*/Broken/*"') as spawn: spawn.expect(pexpect.EOF) @@ -208,7 +195,7 @@ def test_place_acquire_broken(place, exporter): assert spawn.exitstatus == 0, spawn.before.strip() with pexpect.spawn('python -m labgrid.remote.client -p test acquire') as spawn: - spawn.expect('failed to acquire place test') + spawn.expect('Failed to acquire resources for place test') spawn.expect(pexpect.EOF) spawn.close() assert spawn.exitstatus == 1, spawn.before.strip() @@ -220,7 +207,6 @@ def test_place_acquire_broken(place, exporter): print(spawn.before.decode()) assert spawn.exitstatus == 0, spawn.before.strip() -@pytest.mark.xfail(sys.version_info >= (3, 12), reason="latest crossbar release incompatible with python3.12+") def test_place_release_from(monkeypatch, place, exporter): user = "test-user" host = "test-host" @@ -267,23 +253,20 @@ def test_place_release_from(monkeypatch, place, exporter): before = spawn.before.decode("utf-8").strip() assert user not in before and not host in before, before -@pytest.mark.xfail(sys.version_info >= (3, 12), reason="latest crossbar release incompatible with python3.12+") -def test_place_add_no_name(crossbar): +def test_place_add_no_name(coordinator): with pexpect.spawn('python -m labgrid.remote.client create') as spawn: spawn.expect("missing place name") spawn.expect(pexpect.EOF) spawn.close() assert spawn.exitstatus != 0, spawn.before.strip() -@pytest.mark.xfail(sys.version_info >= (3, 12), reason="latest crossbar release incompatible with python3.12+") -def test_place_del_no_name(crossbar): +def test_place_del_no_name(coordinator): with pexpect.spawn('python -m labgrid.remote.client delete') as spawn: - spawn.expect("deletes require an exact place name") + spawn.expect("name was not a string") spawn.expect(pexpect.EOF) spawn.close() assert spawn.exitstatus != 0, spawn.before.strip() -@pytest.mark.xfail(sys.version_info >= (3, 12), reason="latest crossbar release incompatible with python3.12+") def test_remoteplace_target(place_acquire, tmpdir): from labgrid.environment import Environment p = tmpdir.join("config.yaml") @@ -304,7 +287,6 @@ def test_remoteplace_target(place_acquire, tmpdir): remote_place = t.get_resource("RemotePlace") assert remote_place.tags == {"board": "bar"} -@pytest.mark.xfail(sys.version_info >= (3, 12), reason="latest crossbar release incompatible with python3.12+") def test_remoteplace_target_without_env(request, place_acquire): from labgrid import Target from labgrid.resource import RemotePlace @@ -313,7 +295,6 @@ def test_remoteplace_target_without_env(request, place_acquire): remote_place = RemotePlace(t, name="test") assert remote_place.tags == {"board": "bar"} -@pytest.mark.xfail(sys.version_info >= (3, 12), reason="latest crossbar release incompatible with python3.12+") def test_resource_conflict(place_acquire, tmpdir): with pexpect.spawn('python -m labgrid.remote.client -p test2 create') as spawn: spawn.expect(pexpect.EOF) @@ -335,7 +316,6 @@ def test_resource_conflict(place_acquire, tmpdir): spawn.close() assert spawn.exitstatus == 0, spawn.before.strip() -@pytest.mark.xfail(sys.version_info >= (3, 12), reason="latest crossbar release incompatible with python3.12+") def test_reservation(place_acquire, tmpdir): with pexpect.spawn('python -m labgrid.remote.client reserve --shell board=bar name=test') as spawn: spawn.expect(pexpect.EOF) @@ -413,7 +393,93 @@ def test_reservation(place_acquire, tmpdir): spawn.close() assert spawn.exitstatus == 0, spawn.before.strip() -@pytest.mark.xfail(sys.version_info >= (3, 12), reason="latest crossbar release incompatible with python3.12+") +def test_resource_acquired_state_on_exporter_restart(monkeypatch, place,start_exporter): + user = "test-user" + host = "test-host" + monkeypatch.setenv("LG_USERNAME", user) + monkeypatch.setenv("LG_HOSTNAME", host) + + exporter = start_exporter() + + # add resource match + with pexpect.spawn('python -m labgrid.remote.client -p test add-match testhost/Testport/NetworkSerialPort') as spawn: + spawn.expect(pexpect.EOF) + spawn.close() + assert spawn.exitstatus == 0, spawn.before.strip() + + # make sure matching resource is found + with pexpect.spawn('python -m labgrid.remote.client -p test show') as spawn: + spawn.expect(pexpect.EOF) + spawn.close() + assert spawn.exitstatus == 0, spawn.before.strip() + assert b"acquired: None" in spawn.before + assert b"Matching resource 'NetworkSerialPort' (testhost/Testport/NetworkSerialPort/NetworkSerialPort)" in spawn.before + + with pexpect.spawn('python -m labgrid.remote.client -p test -v resources') as spawn: + spawn.expect(pexpect.EOF) + spawn.close() + assert spawn.exitstatus == 0, spawn.before.strip() + assert b"Resource 'NetworkSerialPort' (testhost/Testport/NetworkSerialPort[/NetworkSerialPort]):\r\n {'acquired': None," in spawn.before + + # lock place (and its resources) + with pexpect.spawn('python -m labgrid.remote.client -p test acquire') as spawn: + spawn.expect(pexpect.EOF) + spawn.close() + assert spawn.exitstatus == 0, spawn.before.strip() + + with pexpect.spawn('python -m labgrid.remote.client -p test -v resources') as spawn: + spawn.expect(pexpect.EOF) + spawn.close() + assert spawn.exitstatus == 0, spawn.before.strip() + assert b"Resource 'NetworkSerialPort' (testhost/Testport/NetworkSerialPort[/NetworkSerialPort]):\r\n {'acquired': 'test'," in spawn.before + + # stop exporter + exporter.close() + assert not exporter.isalive() + + # start exporter again + exporter = start_exporter() + + # make sure matching resource is still found + with pexpect.spawn('python -m labgrid.remote.client -p test show') as spawn: + spawn.expect(pexpect.EOF) + spawn.close() + assert spawn.exitstatus == 0, spawn.before.strip() + assert f"acquired: {host}/{user}" in spawn.before.decode("utf-8") + assert b"Acquired resource 'NetworkSerialPort' (testhost/Testport/NetworkSerialPort/NetworkSerialPort)" in spawn.before + + # release place + with pexpect.spawn('python -m labgrid.remote.client -p test release') as spawn: + spawn.expect(pexpect.EOF) + spawn.close() + assert spawn.exitstatus == 0, spawn.before.strip() + + with pexpect.spawn('python -m labgrid.remote.client -p test -v resources') as spawn: + spawn.expect(pexpect.EOF) + spawn.close() + assert spawn.exitstatus == 0, spawn.before.strip() + assert b"Resource 'NetworkSerialPort' (testhost/Testport/NetworkSerialPort[/NetworkSerialPort]):\r\n {'acquired': None," in spawn.before + + # make sure matching resource is still found + with pexpect.spawn('python -m labgrid.remote.client -p test show') as spawn: + spawn.expect(pexpect.EOF) + spawn.close() + assert spawn.exitstatus == 0, spawn.before.strip() + assert b"acquired: None" in spawn.before + assert b"Matching resource 'NetworkSerialPort' (testhost/Testport/NetworkSerialPort/NetworkSerialPort)" in spawn.before + + # place should now be acquirable again + with pexpect.spawn('python -m labgrid.remote.client -p test acquire') as spawn: + spawn.expect(pexpect.EOF) + spawn.close() + assert spawn.exitstatus == 0, spawn.before.strip() + + with pexpect.spawn('python -m labgrid.remote.client -p test release') as spawn: + spawn.expect(pexpect.EOF) + spawn.close() + assert spawn.exitstatus == 0, spawn.before.strip() + + def test_exporter_timeout(place, exporter): with pexpect.spawn('python -m labgrid.remote.client resources') as spawn: spawn.expect(pexpect.EOF) @@ -451,7 +517,6 @@ def test_exporter_timeout(place, exporter): spawn.close() assert spawn.exitstatus == 0, spawn.before.strip() -@pytest.mark.xfail(sys.version_info >= (3, 12), reason="latest crossbar release incompatible with python3.12+") def test_reservation_custom_config(place, exporter, tmpdir): p = tmpdir.join("config.yaml") p.write( @@ -489,7 +554,6 @@ def test_reservation_custom_config(place, exporter, tmpdir): spawn.close() assert spawn.exitstatus == 0, spawn.before.strip() -@pytest.mark.xfail(sys.version_info >= (3, 12), reason="latest crossbar release incompatible with python3.12+") def test_same_name_resources(place, exporter, tmpdir): with pexpect.spawn('python -m labgrid.remote.client -p test add-named-match "testhost/Many/NetworkService" "samename"') as spawn: spawn.expect(pexpect.EOF) diff --git a/tests/test_coordinator.py b/tests/test_coordinator.py new file mode 100644 index 000000000..27c04969b --- /dev/null +++ b/tests/test_coordinator.py @@ -0,0 +1,167 @@ +import pytest + +import grpc +import labgrid.remote.generated.labgrid_coordinator_pb2_grpc as labgrid_coordinator_pb2_grpc +import labgrid.remote.generated.labgrid_coordinator_pb2 as labgrid_coordinator_pb2 + +psutil = pytest.importorskip("psutil") + +@pytest.fixture(scope='function') +def channel_stub(): + import queue + queue = queue.Queue() + + channel = grpc.insecure_channel("127.0.0.1:20408") + stub = labgrid_coordinator_pb2_grpc.CoordinatorStub(channel) + def generate_startup(queue): + msg = labgrid_coordinator_pb2.ClientInMessage() + msg.startup.version = "2.0.0" + msg.startup.name = "testclient" + messages = [ + msg + ] + for msg in messages: + yield msg + while True: + msg = queue.get() + yield msg + queue.task_done() + + stream = stub.ClientStream(generate_startup(queue)) + yield stub + channel.close() + +@pytest.fixture(scope='function') +def coordinator_place(channel_stub): + name = "test" + place = labgrid_coordinator_pb2.AddPlaceRequest(name=name) + res = channel_stub.AddPlace(place) + assert res, f"There was an error: {res}" + return channel_stub + +def test_startup(coordinator): + pass + +def test_coordinator_add_place(coordinator, channel_stub): + name = "test" + place = labgrid_coordinator_pb2.AddPlaceRequest(name=name) + res = channel_stub.AddPlace(place) + assert res, f"There was an error: {res}" + +def test_coordinator_del_place(coordinator, channel_stub): + name = "test" + place = labgrid_coordinator_pb2.AddPlaceRequest(name=name) + res = channel_stub.AddPlace(place) + assert res, f"There was an error: {res}" + place = labgrid_coordinator_pb2.DeletePlaceRequest(name=name) + res = channel_stub.DeletePlace(place) + assert res, f"There was an error: {res}" + +def test_coordinator_get_places(coordinator, channel_stub): + name = "test" + place = labgrid_coordinator_pb2.AddPlaceRequest(name=name) + res = channel_stub.AddPlace(place) + assert res, f"There was an error: {res}" + name = "test2" + place = labgrid_coordinator_pb2.AddPlaceRequest(name=name) + res = channel_stub.AddPlace(place) + assert res, f"There was an error: {res}" + + request = labgrid_coordinator_pb2.GetPlacesRequest() + res = channel_stub.GetPlaces(request) + + from labgrid.remote.common import Place + places = set() + names = set() + for pb2 in res.places: + place = Place.from_pb2(pb2) + places.add(place) + names.add(place.name) + + assert len(places) == 2, f"Returned places not two: {places}" + assert set(names) == {"test", "test2"}, f"There was an error: {res}" + +def test_coordinator_exporter_session(coordinator, channel_stub): + import queue + queue = queue.Queue() + + def generate_startup(queue): + msg = labgrid_coordinator_pb2.ExporterInMessage() + msg.startup.version = "2.0.0" + msg.startup.name = "testporter" + messages = [ + msg + ] + for msg in messages: + yield msg + while True: + msg = queue.get() + yield msg + queue.task_done() + + coordinator = channel_stub.ExporterStream(generate_startup(queue), wait_for_ready=True) + +def test_coordinator_place_acquire(coordinator, coordinator_place): + stub = coordinator_place + res = stub.AcquirePlace(labgrid_coordinator_pb2.AcquirePlaceRequest(placename="test")) + assert res + +def test_coordinator_place_acquire_release(coordinator, coordinator_place): + stub = coordinator_place + res = stub.AcquirePlace(labgrid_coordinator_pb2.AcquirePlaceRequest(placename="test")) + assert res + res = stub.ReleasePlace(labgrid_coordinator_pb2.ReleasePlaceRequest(placename="test")) + assert res + +def test_coordinator_place_add_alias(coordinator, coordinator_place): + stub = coordinator_place + res = stub.AddPlaceAlias(labgrid_coordinator_pb2.AddPlaceAliasRequest(placename="test", alias="testalias")) + assert res + +def test_coordinator_place_add_remove_alias(coordinator, coordinator_place): + stub = coordinator_place + res = stub.AddPlaceAlias(labgrid_coordinator_pb2.AddPlaceAliasRequest(placename="test", alias="testalias")) + assert res + res = stub.DeletePlaceAlias(labgrid_coordinator_pb2.DeletePlaceAliasRequest(placename="test", alias="testalias")) + assert res + +def test_coordinator_place_set_tags(coordinator, coordinator_place): + stub = coordinator_place + res = stub.SetPlaceTags(labgrid_coordinator_pb2.SetPlaceTagsRequest(placename="test", tags={"one": "two"})) + assert res + +def test_coordinator_place_set_comment(coordinator, coordinator_place): + stub = coordinator_place + res = stub.SetPlaceComment(labgrid_coordinator_pb2.SetPlaceCommentRequest(placename="test", comment="testcomment")) + assert res + +def test_coordinator_place_add_match(coordinator, coordinator_place): + stub = coordinator_place + res = stub.AddPlaceMatch(labgrid_coordinator_pb2.AddPlaceMatchRequest(placename="test", pattern="this/test/pattern")) + assert res + +def test_coordinator_place_add_delete_match(coordinator, coordinator_place): + stub = coordinator_place + res = stub.AddPlaceMatch(labgrid_coordinator_pb2.AddPlaceMatchRequest(placename="test", pattern="this/test/pattern")) + assert res + res = stub.DeletePlaceMatch(labgrid_coordinator_pb2.DeletePlaceMatchRequest(placename="test", pattern="this/test/pattern")) + assert res + +def test_coordinator_place_allow(coordinator, coordinator_place): + stub = coordinator_place + res = stub.AcquirePlace(labgrid_coordinator_pb2.AcquirePlaceRequest(placename="test")) + assert res + res = stub.AllowPlace(labgrid_coordinator_pb2.AllowPlaceRequest(placename="test", user="othertest")) + assert res + +def test_coordinator_create_reservation(coordinator, coordinator_place): + tags = {"board": "test"} + stub = coordinator_place + res = stub.SetPlaceTags(labgrid_coordinator_pb2.SetPlaceTagsRequest(placename="test", tags=tags)) + assert res + res = stub.CreateReservation(labgrid_coordinator_pb2.CreateReservationRequest(filters={ + "main": labgrid_coordinator_pb2.Reservation.Filter(filter={"board": "test"}), + }, prio=1.0)) + assert res + res: labgrid_coordinator_pb2.CreateReservationResponse + assert len(res.reservation.token) > 0 diff --git a/tests/test_fixtures.py b/tests/test_fixtures.py index a1cd7600c..4ed9fba39 100644 --- a/tests/test_fixtures.py +++ b/tests/test_fixtures.py @@ -72,12 +72,12 @@ def test_env_with_junit(short_env, short_test, tmpdir): def test_help(short_test): with pexpect.spawn(f'pytest --help {short_test}') as spawn: spawn.expect(pexpect.EOF) - assert b'--lg-coordinator=CROSSBAR_URL' in spawn.before + assert b'--lg-coordinator=COORDINATOR_ADDRESS' in spawn.before spawn.close() assert spawn.exitstatus == 0 def test_help_coordinator(short_test): - with pexpect.spawn(f'pytest --lg-coordinator=ws://127.0.0.1:20408/ws --help {short_test}') as spawn: + with pexpect.spawn(f'pytest --lg-coordinator=127.0.0.1:20408 --help {short_test}') as spawn: spawn.expect(pexpect.EOF) spawn.close() assert spawn.exitstatus == 0 diff --git a/tests/test_pb2.py b/tests/test_pb2.py new file mode 100644 index 000000000..d1340ab45 --- /dev/null +++ b/tests/test_pb2.py @@ -0,0 +1,172 @@ +from labgrid.remote.common import Place, ResourceMatch, Reservation, set_map_from_dict, build_dict_from_map +import labgrid.remote.generated.labgrid_coordinator_pb2 as labgrid_coordinator_pb2 + +def test_place_as_pb2(): + place = Place(name="testing-place") + pb2 = place.as_pb2() + assert pb2.name == "testing-place" + assert pb2.created == place.created + assert pb2.changed == place.changed + +def test_place_from_pb2(): + place_start = Place(name="testing-place", comment="such-comment") + pb2 = place_start.as_pb2() + assert pb2.name == "testing-place" + place_new = Place.from_pb2(pb2) + assert place_new.name == "testing-place" + assert place_new.name == place_start.name + assert place_new.comment == place_start.comment + assert place_new.tags == place_start.tags + assert place_new.matches == place_start.matches + assert place_new.acquired == place_start.acquired + assert place_new.acquired_resources == place_start.acquired_resources + assert place_new.allowed == place_start.allowed + assert place_new.created == place_start.created + assert place_new.changed == place_start.changed + assert place_new.reservation == place_start.reservation + +def test_from_pb2_tags(): + tags = {"some": "test", "more": "values"} + place_start = Place(name="testing-place", tags=tags) + pb2 = place_start.as_pb2() + assert pb2.name == "testing-place", f"PB2 has wrong name: {pb2}" + assert pb2.tags is not None, f"PB2 has no tags field: {pb2}" + place_new = Place.from_pb2(pb2) + assert place_new.name == "testing-place" + assert place_new.tags == place_start.tags + assert place_new.tags == tags + +def test_from_pb2_matches(): + rm = ResourceMatch("such", "test", "match") + place_start = Place(name="testing-place", matches=[rm]) + pb2 = place_start.as_pb2() + assert pb2.name == "testing-place", f"PB2 has wrong name: {pb2}" + assert pb2.tags is not None, f"PB2 has no tags field: {pb2}" + place_new = Place.from_pb2(pb2) + assert place_new.name == "testing-place" + assert place_new.tags == place_start.tags + assert place_new.matches == place_start.matches + +def test_from_pb2_tags_deepcopy(): + # Used by the RemotePlaceManager + tags = {"some": "test", "more": "values"} + place_start = Place(name="testing-place", tags=tags) + pb2 = place_start.as_pb2() + place_new = Place.from_pb2(pb2) + import copy + tags_copy = copy.deepcopy(place_new.tags) + +def test_place_as_pb2_copy_with_match(): + tags = {"some": "test", "more": "values"} + # Used by the RemotePlaceManager + place_start = Place(name="testing-place", tags=tags, comment="Hello", aliases={"some": "alias"}, matches=[ResourceMatch("testporter","somegroup","someclass")]) + out = labgrid_coordinator_pb2.ClientOutMessage() + + update_response = labgrid_coordinator_pb2.UpdateResponse() + update_response.place.CopyFrom(place_start.as_pb2()) + + out.updates.append(update_response) + +def test_match_as_from_pb2(): + rms = ResourceMatch("*", "somegroup", "someclass") + pb2 = rms.as_pb2() + assert pb2 + rme = ResourceMatch.from_pb2(pb2) + assert rms == rme + +def test_reservation_as_pb2(): + reservation = Reservation( + owner="test", + filters={ + "main": {"some": "filter"}, + }, + allocations={ + "main": ["the-place"], + }, + ) + pb2 = reservation.as_pb2() + assert pb2.owner == "test" + assert pb2.token == reservation.token + assert pb2.state == reservation.state.value + assert pb2.filters["main"].filter == {"some": "filter"} + assert pb2.created == reservation.created + assert pb2.timeout == reservation.timeout + +def test_reservation_as_from_pb2(): + resold = Reservation( + owner="test", + filters={ + "main": {"some": "filter"}, + }, + allocations={ + "main": ["the-place"], + }, + ) + pb2 = resold.as_pb2() + assert pb2.owner == resold.owner + assert pb2.token == resold.token + assert pb2.state == resold.state.value + assert pb2.filters["main"].filter == {"some": "filter"} + assert pb2.created == resold.created + assert pb2.timeout == resold.timeout + + resnew = Reservation.from_pb2(pb2) + + assert resnew.owner == resold.owner + assert resnew.token == resold.token + assert resnew.state == resold.state + assert resnew.filters["main"] == resold.filters["main"] + assert resnew.created == resold.created + assert resnew.timeout == resold.timeout + +def test_resource_dict(): + params = { + 'host': 'foo', + 'model_id': 2277, + 'vendor_id': 1133, + 'path': None, + } + + resource = labgrid_coordinator_pb2.Resource() + set_map_from_dict(resource.params, params) + bm = resource.SerializeToString() + + resource = labgrid_coordinator_pb2.Resource() + resource.ParseFromString(bm) + decoded = build_dict_from_map(resource.params) + + assert params == decoded + +def test_map_serialize(benchmark): + params = { + 'host': 'foo', + 'model_id': 2277, + 'vendor_id': 1133, + 'path': None, + } + + def run(): + resource = labgrid_coordinator_pb2.Resource() + set_map_from_dict(resource.params, params) + bm = resource.SerializeToString() + + benchmark(run) + +def test_map_deser(benchmark): + params = { + 'host': 'foo', + 'model_id': 2277, + 'vendor_id': 1133, + 'path': None, + } + + resource = labgrid_coordinator_pb2.Resource() + set_map_from_dict(resource.params, params) + bm = resource.SerializeToString() + + def run(): + resource = labgrid_coordinator_pb2.Resource() + resource.ParseFromString(bm) + decoded = build_dict_from_map(resource.params) + + benchmark(run) diff --git a/tests/test_remote.py b/tests/test_remote.py index 4f803b043..80f54430e 100644 --- a/tests/test_remote.py +++ b/tests/test_remote.py @@ -1,8 +1,5 @@ -import pytest import pexpect -pytest.importorskip('autobahn') - def test_client_help(): with pexpect.spawn('python -m labgrid.remote.client --help') as spawn: spawn.expect('usage') From d7ac3e7faff061758e82d4bf5ac1ca7f6ff637a9 Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Fri, 2 Aug 2024 23:05:57 +0200 Subject: [PATCH 250/384] remote/exporter: make coordinator port default to 20408 The coordinator binds to port 20408 by default. The client tries to connect to that port if no port is specified via --cordinator/-x, LG_COORDINATOR or via coordinator_address option in the environment config. Only the exporter does not yet default to 20408. Change that by appending the default port 20408 to the address given via --coordinator/-c or LG_COORDINATOR if no port was specified. Closes #1429 Signed-off-by: Bastian Krause --- labgrid/remote/exporter.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/labgrid/remote/exporter.py b/labgrid/remote/exporter.py index 78b8ca606..019d39f96 100755 --- a/labgrid/remote/exporter.py +++ b/labgrid/remote/exporter.py @@ -10,6 +10,7 @@ import traceback import shutil import subprocess +from urllib.parse import urlsplit import warnings from pathlib import Path from typing import Dict, Type @@ -782,6 +783,10 @@ def __init__(self, config) -> None: self.hostname = config["hostname"] self.isolated = config["isolated"] + # default to port 20408 if not specified + if urlsplit(f"//{config['coordinator']}").port is None: + config["coordinator"] += ":20408" + self.channel = grpc.aio.insecure_channel(config["coordinator"]) self.stub = labgrid_coordinator_pb2_grpc.CoordinatorStub(self.channel) self.out_queue = asyncio.Queue() From db9a326830277891453fb23f297511c1c42e2771 Mon Sep 17 00:00:00 2001 From: Jan Luebbe Date: Wed, 3 Jul 2024 15:42:53 +0200 Subject: [PATCH 251/384] remote/coordinator: catch exceptions from poll steps separately If one of these fails, the others should not be skipped. Signed-off-by: Jan Luebbe --- labgrid/remote/coordinator.py | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/labgrid/remote/coordinator.py b/labgrid/remote/coordinator.py index b4d4cf27a..336b6409f 100644 --- a/labgrid/remote/coordinator.py +++ b/labgrid/remote/coordinator.py @@ -195,10 +195,16 @@ def __init__(self) -> None: async def _poll_step(self): # save changes - if self.save_scheduled: - await self.save() + try: + if self.save_scheduled: + await self.save() + except Exception: # pylint: disable=broad-except + traceback.print_exc() # update reservations - self.schedule_reservations() + try: + self.schedule_reservations() + except Exception: # pylint: disable=broad-except + traceback.print_exc() async def poll(self): loop = asyncio.get_event_loop() From 829acab0647205af970298ba7fd65101c25842a9 Mon Sep 17 00:00:00 2001 From: Jan Luebbe Date: Wed, 3 Jul 2024 12:47:53 +0200 Subject: [PATCH 252/384] remote/coordinator: simplify _update_acquired_places() _update_acquired_places() is only called when resources are added by the exporter or when they are removed after it has disconnected. So we never want to call back to the exporter for removed resources. Signed-off-by: Jan Luebbe --- labgrid/remote/coordinator.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/labgrid/remote/coordinator.py b/labgrid/remote/coordinator.py index 336b6409f..d825a576c 100644 --- a/labgrid/remote/coordinator.py +++ b/labgrid/remote/coordinator.py @@ -334,7 +334,7 @@ def get_exporter_by_name(self, name): if exporter.name == name: return exporter - async def _update_acquired_places(self, action, resource, callback=True): + async def _update_acquired_places(self, action, resource): """Update acquired places when resources are added or removed.""" if action not in [Action.ADD, Action.DEL]: return # currently nothing needed for Action.UPD @@ -357,7 +357,9 @@ async def _update_acquired_places(self, action, resource, callback=True): self._publish_place(place) else: for place in places: - await self._release_resources(place, [resource], callback=callback) + # resources only disappear when exporters disconnect, so we + # can't call back to the exporter + await self._release_resources(place, [resource], callback=False) self._publish_place(place) def _publish_place(self, place): @@ -452,7 +454,7 @@ async def request_task(): for groupname, group in session.groups.items(): for resourcename in group.copy(): action, resource = session.set_resource(groupname, resourcename, None) - await self._update_acquired_places(action, resource, callback=False) + await self._update_acquired_places(action, resource) logging.debug("exporter aborted %s, cancelled: %s", context.peer(), context.cancelled()) From 2ca13299d429b27c5e2c60e74df92dbc3ed02b87 Mon Sep 17 00:00:00 2001 From: Jan Luebbe Date: Wed, 3 Jul 2024 15:17:28 +0200 Subject: [PATCH 253/384] remote/coordinator: split out code to acquire a resource on the exporter In future commits, more code will call the newly introduced _acquire_resource() method. Signed-off-by: Jan Luebbe --- labgrid/remote/coordinator.py | 27 ++++++++++++++++----------- 1 file changed, 16 insertions(+), 11 deletions(-) diff --git a/labgrid/remote/coordinator.py b/labgrid/remote/coordinator.py index d825a576c..80a9a6832 100644 --- a/labgrid/remote/coordinator.py +++ b/labgrid/remote/coordinator.py @@ -599,6 +599,21 @@ async def DeletePlaceMatch(self, request, context): self.save_later() return labgrid_coordinator_pb2.DeletePlaceMatchResponse() + async def _acquire_resource(self, place, resource): + assert self.lock.locked() + + # this triggers an update from the exporter which is published + # to the clients + request = labgrid_coordinator_pb2.ExporterSetAcquiredRequest() + request.group_name = resource.path[1] + request.resource_name = resource.path[3] + request.place_name = place.name + cmd = ExporterCommand(request) + self.get_exporter_by_name(resource.path[0]).queue.put_nowait(cmd) + await cmd.wait() + if not cmd.response.success: + raise ExporterError("failed to acquire {resource}") + async def _acquire_resources(self, place, resources): assert self.lock.locked() @@ -612,17 +627,7 @@ async def _acquire_resources(self, place, resources): acquired = [] try: for resource in resources: - # this triggers an update from the exporter which is published - # to the clients - request = labgrid_coordinator_pb2.ExporterSetAcquiredRequest() - request.group_name = resource.path[1] - request.resource_name = resource.path[3] - request.place_name = place.name - cmd = ExporterCommand(request) - self.get_exporter_by_name(resource.path[0]).queue.put_nowait(cmd) - await cmd.wait() - if not cmd.response.success: - raise ExporterError("failed to acquire {resource}") + await self._acquire_resource(place, resource) acquired.append(resource) except Exception: logging.exception("failed to acquire %s", resource) From 33126fa59e6687ff5ef7a3dc8915755f91094266 Mon Sep 17 00:00:00 2001 From: Jan Luebbe Date: Wed, 3 Jul 2024 15:40:06 +0200 Subject: [PATCH 254/384] remote/client: explicitly handle orphaned resources in the client In cases when an exporter used by an acquired place disconnects, the place still references those orphaned resources. If the exporter reconnects, they are reaquired by the coordinator automatically. Signed-off-by: Jan Luebbe --- labgrid/remote/client.py | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/labgrid/remote/client.py b/labgrid/remote/client.py index a78759fb7..8ce447d7a 100755 --- a/labgrid/remote/client.py +++ b/labgrid/remote/client.py @@ -472,7 +472,11 @@ async def print_place(self): name = resource_name if match.rename: name = match.rename - resource = self.resources[exporter][group_name][resource_name] + try: + resource = self.resources[exporter][group_name][resource_name] + except KeyError: + print(f"Orphaned resource '{name}' ({exporter}/{group_name}/{cls}/{resource_name})") + continue print(f"Acquired resource '{name}' ({exporter}/{group_name}/{resource.cls}/{resource_name}):") # pylint: disable=line-too-long print(indent(pformat(resource.asdict()), prefix=" ")) assert resource.cls == cls @@ -747,7 +751,11 @@ def get_target_resources(self, place): name = resource_name if match.rename: name = match.rename - resources[(name, cls)] = self.resources[exporter][group_name][resource_name] + try: + resources[(name, cls)] = self.resources[exporter][group_name][resource_name] + except KeyError: + raise ServerError(f"place {place} has an orphaned resource (exporter {exporter} disconnected?)") + return resources def get_target_config(self, place): From fc7b8022b64b77c83bb123964c78971657cfca60 Mon Sep 17 00:00:00 2001 From: Jan Luebbe Date: Wed, 3 Jul 2024 15:44:13 +0200 Subject: [PATCH 255/384] remote/coordinator: refactor handling of orphaned resources If an exporter disconnects, corresponding resources that are acquired become orphaned. If the exporter reconnects, the resources are not locked. Requiring the user to unlock and lock the corresponding place again is inconvenient and would change the previous behavior. Prior to this commit, reacquiring did not work due to a logic error: ExporterCommand.complete() und ExporterCommand.wait() are both called in ExporterStream.request_task(). The blocking wait() prevents further processing of exporter messages. That also means responses for ExporterSetAcquiredRequest are not handled anymore. This leads to a state where resources cannot be acquired/released by their place anymore. Ultimatively, this leads to an inconsistent state requiring a coordinator restart. Refactor handling of these orphaned resources to solve this. Signed-off-by: Jan Luebbe --- labgrid/remote/coordinator.py | 97 ++++++++++++++++++++++------------- 1 file changed, 61 insertions(+), 36 deletions(-) diff --git a/labgrid/remote/coordinator.py b/labgrid/remote/coordinator.py index 80a9a6832..961ea4ea9 100644 --- a/labgrid/remote/coordinator.py +++ b/labgrid/remote/coordinator.py @@ -54,7 +54,7 @@ def set_resource(self, groupname, resourcename, resource): """This is called when Exporters update resources or when they disconnect.""" logging.info("set_resource %s %s %s", groupname, resourcename, resource) group = self.groups.setdefault(groupname, {}) - old = group.get(resourcename) + old: ResourceImport = group.get(resourcename) if resource is not None: new = ResourceImport( data=ResourceImport.data_from_pb2(resource), path=(self.name, groupname, resource.cls, resourcename) @@ -66,6 +66,8 @@ def set_resource(self, groupname, resourcename, resource): group[resourcename] = new else: new = None + if old.acquired: + old.orphaned = True try: del group[resourcename] except KeyError: @@ -150,6 +152,7 @@ class ResourceImport(ResourceEntry): """ path = attr.ib(kw_only=True, validator=attr.validators.instance_of(tuple)) + orphaned = attr.ib(init=False, default=False, validator=attr.validators.instance_of(bool)) def locked(func): @@ -181,7 +184,7 @@ class ExporterError(Exception): class Coordinator(labgrid_coordinator_pb2_grpc.CoordinatorServicer): def __init__(self) -> None: - self.places = {} + self.places: dict[str, Place] = {} self.reservations = {} self.poll_task = None self.save_scheduled = False @@ -200,6 +203,12 @@ async def _poll_step(self): await self.save() except Exception: # pylint: disable=broad-except traceback.print_exc() + # try to re-acquire orphaned resources + try: + async with self.lock: + await self._reacquire_orphaned_resources() + except Exception: # pylint: disable=broad-except + traceback.print_exc() # update reservations try: self.schedule_reservations() @@ -334,34 +343,6 @@ def get_exporter_by_name(self, name): if exporter.name == name: return exporter - async def _update_acquired_places(self, action, resource): - """Update acquired places when resources are added or removed.""" - if action not in [Action.ADD, Action.DEL]: - return # currently nothing needed for Action.UPD - - # collect affected places - places = [] - for place in self.places.values(): - if not place.acquired: - continue - if not place.hasmatch(resource.path): - continue - places.append(place) - - if action is Action.ADD: - # only add if there is no conflict - if len(places) != 1: - return - place = places[0] - await self._acquire_resources(place, [resource]) - self._publish_place(place) - else: - for place in places: - # resources only disappear when exporters disconnect, so we - # can't call back to the exporter - await self._release_resources(place, [resource], callback=False) - self._publish_place(place) - def _publish_place(self, place): msg = labgrid_coordinator_pb2.ClientOutMessage() msg.updates.add().place.CopyFrom(place.as_pb2()) @@ -411,15 +392,12 @@ async def request_task(): logging.debug("Received startup from %s with %s", name, version) elif kind == "resource": logging.debug("Received resource from %s with %s", name, in_msg.resource) - action, resource = session.set_resource( + action, _ = session.set_resource( in_msg.resource.path.group_name, in_msg.resource.path.resource_name, in_msg.resource ) if action is Action.ADD: async with self.lock: self._add_default_place(in_msg.resource.path.group_name) - if action in (Action.ADD, Action.DEL): - async with self.lock: - await self._update_acquired_places(action, resource) self.save_later() else: logging.warning("received unknown kind %s from exporter %s (version %s)", kind, name, version) @@ -453,8 +431,7 @@ async def request_task(): for groupname, group in session.groups.items(): for resourcename in group.copy(): - action, resource = session.set_resource(groupname, resourcename, None) - await self._update_acquired_places(action, resource) + session.set_resource(groupname, resourcename, None) logging.debug("exporter aborted %s, cancelled: %s", context.peer(), context.cancelled()) @@ -652,6 +629,8 @@ async def _release_resources(self, place, resources, callback=True): pass for resource in resources: + if resource.orphaned: + continue try: # this triggers an update from the exporter which is published # to the clients @@ -673,6 +652,48 @@ async def _release_resources(self, place, resources, callback=True): except: logging.exception("failed to publish released resource %s", resource) + async def _reacquire_orphaned_resources(self): + assert self.lock.locked() + + for place in self.places.values(): + changed = False + + for idx, resource in enumerate(place.acquired_resources): + if not resource.orphaned: + continue + + # is the exporter connected again? + exporter = self.get_exporter_by_name(resource.path[0]) + if not exporter: + continue + + # does the resource exist again? + try: + new_resource = exporter.groups[resource.path[1]][resource.path[3]] + except KeyError: + continue + + if new_resource.acquired: + # this should only happen when resources become broken + logging.debug("ignoring acquired/broken resource %s for place %s", new_resource, place.name) + continue + + try: + await self._acquire_resource(place, new_resource) + place.acquired_resources[idx] = new_resource + except Exception: + logging.exception( + "failed to reacquire orphaned resource %s for place %s", new_resource, place.name + ) + break + + logging.info("reacquired orphaned resource %s for place %s", new_resource, place.name) + changed = True + + if changed: + self._publish_place(place) + self.save_later() + @locked async def AcquirePlace(self, request, context): peer = context.peer() @@ -693,6 +714,10 @@ async def AcquirePlace(self, request, context): res = self.reservations[place.reservation] if not res.owner == username: await context.abort(grpc.StatusCode.PERMISSION_DENIED, f"Place {name} was not reserved for {username}") + + # First try to reacquire orphaned resources to avoid conflicts. + await self._reacquire_orphaned_resources() + # FIXME use the session object instead? or something else which # survives disconnecting clients? place.acquired = username From bef539e94f0e9023b1654ae324362a1714274c07 Mon Sep 17 00:00:00 2001 From: Jan Luebbe Date: Wed, 3 Jul 2024 18:04:38 +0200 Subject: [PATCH 256/384] remote: rework grpc timeout configuration It's not really clear how keepalive_timeout_ms and the ping_timeout_ms experiment should interact, so we set them both. Signed-off-by: Jan Luebbe --- labgrid/remote/client.py | 16 +++++++++++++++- labgrid/remote/coordinator.py | 23 +++++++++++++---------- labgrid/remote/exporter.py | 16 +++++++++++++++- 3 files changed, 43 insertions(+), 12 deletions(-) diff --git a/labgrid/remote/client.py b/labgrid/remote/client.py index 8ce447d7a..3541aacb5 100755 --- a/labgrid/remote/client.py +++ b/labgrid/remote/client.py @@ -86,7 +86,21 @@ def __attrs_post_init__(self): """Actions which are executed if a connection is successfully opened.""" self.stopping = asyncio.Event() - self.channel = grpc.aio.insecure_channel(self.address) + # It seems since https://github.com/grpc/grpc/pull/34647, the + # ping_timeout_ms default of 60 seconds overrides keepalive_timeout_ms, + # so set it as well. + # Use GRPC_VERBOSITY=DEBUG GRPC_TRACE=http_keepalive for debugging. + channel_options = [ + ("grpc.keepalive_time_ms", 7500), # 7.5 seconds + ("grpc.keepalive_timeout_ms", 10000), # 10 seconds + ("grpc.http2.ping_timeout_ms", 10000), # 10 seconds + ("grpc.http2.max_pings_without_data", 0), # no limit + ] + + self.channel = grpc.aio.insecure_channel( + target=self.address, + options=channel_options, + ) self.stub = labgrid_coordinator_pb2_grpc.CoordinatorStub(self.channel) self.out_queue = asyncio.Queue() diff --git a/labgrid/remote/coordinator.py b/labgrid/remote/coordinator.py index 961ea4ea9..47c58296b 100644 --- a/labgrid/remote/coordinator.py +++ b/labgrid/remote/coordinator.py @@ -959,17 +959,20 @@ async def GetReservations(self, request: labgrid_coordinator_pb2.GetReservations async def serve(listen, cleanup) -> None: + # It seems since https://github.com/grpc/grpc/pull/34647, the + # ping_timeout_ms default of 60 seconds overrides keepalive_timeout_ms, + # so set it as well. + # Use GRPC_VERBOSITY=DEBUG GRPC_TRACE=http_keepalive for debugging. + channel_options = [ + ("grpc.keepalive_time_ms", 10000), # 10 seconds + ("grpc.keepalive_timeout_ms", 10000), # 10 seconds + ("grpc.http2.ping_timeout_ms", 15000), # 15 seconds + ("grpc.http2.min_ping_interval_without_data_ms", 5000), + ("grpc.http2.max_pings_without_data", 0), # no limit + ("grpc.keepalive_permit_without_calls", 1), # allow keepalive pings even when there are no calls + ] server = grpc.aio.server( - options=[ - ("grpc.keepalive_time_ms", 30000), # Send keepalive ping every 30 seconds - ( - "grpc.keepalive_timeout_ms", - 10000, - ), # Wait 10 seconds for ping ack before considering the connection dead - ("grpc.http2.min_time_between_pings_ms", 15000), # Minimum amount of time between pings - ("grpc.http2.max_pings_without_data", 0), # Allow pings even without active streams - ("grpc.keepalive_permit_without_calls", 1), # Allow keepalive pings even when there are no calls - ], + options=channel_options, ) coordinator = Coordinator() labgrid_coordinator_pb2_grpc.add_CoordinatorServicer_to_server(coordinator, server) diff --git a/labgrid/remote/exporter.py b/labgrid/remote/exporter.py index 019d39f96..8187c4e14 100755 --- a/labgrid/remote/exporter.py +++ b/labgrid/remote/exporter.py @@ -783,11 +783,25 @@ def __init__(self, config) -> None: self.hostname = config["hostname"] self.isolated = config["isolated"] + # It seems since https://github.com/grpc/grpc/pull/34647, the + # ping_timeout_ms default of 60 seconds overrides keepalive_timeout_ms, + # so set it as well. + # Use GRPC_VERBOSITY=DEBUG GRPC_TRACE=http_keepalive for debugging. + channel_options = [ + ("grpc.keepalive_time_ms", 7500), # 7.5 seconds + ("grpc.keepalive_timeout_ms", 10000), # 10 seconds + ("grpc.http2.ping_timeout_ms", 10000), # 10 seconds + ("grpc.http2.max_pings_without_data", 0), # no limit + ] + # default to port 20408 if not specified if urlsplit(f"//{config['coordinator']}").port is None: config["coordinator"] += ":20408" - self.channel = grpc.aio.insecure_channel(config["coordinator"]) + self.channel = grpc.aio.insecure_channel( + target=config["coordinator"], + options=channel_options, + ) self.stub = labgrid_coordinator_pb2_grpc.CoordinatorStub(self.channel) self.out_queue = asyncio.Queue() self.pump_task = None From f32d41833eff192287def83516e4492b8441df44 Mon Sep 17 00:00:00 2001 From: Jan Luebbe Date: Wed, 3 Jul 2024 19:50:11 +0200 Subject: [PATCH 257/384] remote/coordinator: disable so_reuseport We only want to run one instance of the coordinator, so enabling so_reuseport can lead to confusing situations where multiple coordinator are running in parallel. Signed-off-by: Jan Luebbe --- labgrid/remote/coordinator.py | 1 + 1 file changed, 1 insertion(+) diff --git a/labgrid/remote/coordinator.py b/labgrid/remote/coordinator.py index 47c58296b..f7d97b86a 100644 --- a/labgrid/remote/coordinator.py +++ b/labgrid/remote/coordinator.py @@ -964,6 +964,7 @@ async def serve(listen, cleanup) -> None: # so set it as well. # Use GRPC_VERBOSITY=DEBUG GRPC_TRACE=http_keepalive for debugging. channel_options = [ + ("grpc.so_reuseport", 0), # no load balancing ("grpc.keepalive_time_ms", 10000), # 10 seconds ("grpc.keepalive_timeout_ms", 10000), # 10 seconds ("grpc.http2.ping_timeout_ms", 15000), # 15 seconds From 9ad28c334a468603f7fb6815c1792d0bc615935b Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Thu, 1 Aug 2024 14:47:48 +0200 Subject: [PATCH 258/384] remote/coordinator: store event loop in loop attribute Instead of using asyncio.get_event_loop() in various places in the Coordinator class, store it in an attribute called loop. Signed-off-by: Bastian Krause --- labgrid/remote/coordinator.py | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/labgrid/remote/coordinator.py b/labgrid/remote/coordinator.py index f7d97b86a..635c7c5c9 100644 --- a/labgrid/remote/coordinator.py +++ b/labgrid/remote/coordinator.py @@ -194,7 +194,8 @@ def __init__(self) -> None: self.clients: dict[str, ClientSession] = {} self.load() - self.poll_task = asyncio.get_event_loop().create_task(self.poll()) + self.loop = asyncio.get_event_loop() + self.poll_task = self.loop.create_task(self.poll()) async def _poll_step(self): # save changes @@ -216,8 +217,7 @@ async def _poll_step(self): traceback.print_exc() async def poll(self): - loop = asyncio.get_event_loop() - while not loop.is_closed(): + while not self.loop.is_closed(): try: await asyncio.sleep(15.0) await self._poll_step() @@ -247,11 +247,10 @@ async def save(self): places = yaml.dump(places) places = places.encode() - loop = asyncio.get_event_loop() logging.debug("Awaiting resources") - await loop.run_in_executor(None, atomic_replace, "resources.yaml", resources) + await self.loop.run_in_executor(None, atomic_replace, "resources.yaml", resources) logging.debug("Awaiting places") - await loop.run_in_executor(None, atomic_replace, "places.yaml", places) + await self.loop.run_in_executor(None, atomic_replace, "places.yaml", places) def load(self): try: @@ -310,7 +309,7 @@ async def request_task(): except Exception: logging.exception("error in client message handler") - runnning_request_task = asyncio.get_event_loop().create_task(request_task()) + runnning_request_task = self.loop.create_task(request_task()) try: async for out_msg in queue_as_aiter(out_msg_queue): @@ -406,7 +405,7 @@ async def request_task(): except Exception: logging.exception("error in exporter message handler") - runnning_request_task = asyncio.get_event_loop().create_task(request_task()) + runnning_request_task = self.loop.create_task(request_task()) try: async for cmd in queue_as_aiter(command_queue): From 1bd0f99a3a6235050317820dd6b25c421a500406 Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Tue, 23 Jul 2024 15:14:17 +0200 Subject: [PATCH 259/384] remote/client: drop error check already done by argparser Signed-off-by: Bastian Krause --- labgrid/remote/client.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/labgrid/remote/client.py b/labgrid/remote/client.py index 3541aacb5..cec377c54 100755 --- a/labgrid/remote/client.py +++ b/labgrid/remote/client.py @@ -646,8 +646,6 @@ async def add_named_match(self): raise UserError(f"invalid pattern format '{pattern}' (use 'exporter/group/cls/name')") if "*" in pattern: raise UserError(f"invalid pattern '{pattern}' ('*' not allowed for named matches)") - if not name: - raise UserError(f"invalid name '{name}'") request = labgrid_coordinator_pb2.AddPlaceMatchRequest(placename=place.name, pattern=pattern, rename=name) From 5679fa579352b91113de0f3a04db1603e9c828c2 Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Thu, 8 Aug 2024 15:53:58 +0200 Subject: [PATCH 260/384] remote/client: replace manual place checks with helper method calls Signed-off-by: Bastian Krause --- labgrid/remote/client.py | 5 +---- tests/test_client.py | 2 +- 2 files changed, 2 insertions(+), 5 deletions(-) diff --git a/labgrid/remote/client.py b/labgrid/remote/client.py index cec377c54..28488fbc8 100755 --- a/labgrid/remote/client.py +++ b/labgrid/remote/client.py @@ -669,10 +669,7 @@ def check_matches(self, place): async def acquire(self): """Acquire a place, marking it unavailable for other clients""" - place = self.get_place() - if place.acquired: - raise UserError(f"place {place.name} is already acquired by {place.acquired}") - + place = self.get_idle_place() if not self.args.allow_unmatched: self.check_matches(place) diff --git a/tests/test_client.py b/tests/test_client.py index d82c0a8ce..14b855d1e 100644 --- a/tests/test_client.py +++ b/tests/test_client.py @@ -262,7 +262,7 @@ def test_place_add_no_name(coordinator): def test_place_del_no_name(coordinator): with pexpect.spawn('python -m labgrid.remote.client delete') as spawn: - spawn.expect("name was not a string") + spawn.expect("place pattern not specified") spawn.expect(pexpect.EOF) spawn.close() assert spawn.exitstatus != 0, spawn.before.strip() From 42305915e79722e2c59914ba8d3a504afef8d8cb Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Tue, 23 Jul 2024 14:10:47 +0200 Subject: [PATCH 261/384] remote/client: drop redundant checks These checks are already performed by get_idle_place(). Signed-off-by: Bastian Krause --- labgrid/remote/client.py | 6 ------ 1 file changed, 6 deletions(-) diff --git a/labgrid/remote/client.py b/labgrid/remote/client.py index 28488fbc8..bd3526c17 100755 --- a/labgrid/remote/client.py +++ b/labgrid/remote/client.py @@ -597,8 +597,6 @@ async def add_match(self): """Add a match for a place, making fuzzy matching available to the client""" place = self.get_idle_place() - if place.acquired: - raise UserError(f"can not change acquired place {place.name}") for pattern in self.args.patterns: if not 2 <= pattern.count("/") <= 3: raise UserError(f"invalid pattern format '{pattern}' (use 'exporter/group/cls/name')") @@ -617,8 +615,6 @@ async def add_match(self): async def del_match(self): """Delete a match for a place""" place = self.get_idle_place() - if place.acquired: - raise UserError(f"can not change acquired place {place.name}") for pattern in self.args.patterns: if not 2 <= pattern.count("/") <= 3: raise UserError(f"invalid pattern format '{pattern}' (use 'exporter/group/cls/name')") @@ -638,8 +634,6 @@ async def add_named_match(self): Fuzzy matching is not allowed to avoid accidental names conflicts.""" place = self.get_idle_place() - if place.acquired: - raise UserError(f"can not change acquired place {place.name}") pattern = self.args.pattern name = self.args.name if not 2 <= pattern.count("/") <= 3: From 624667c39740d9183ff7383e0fc9d1da8f47e8fb Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Tue, 23 Jul 2024 14:12:02 +0200 Subject: [PATCH 262/384] remote/client: be more explicit about expected place acquired state in allow()/release_from() Signed-off-by: Bastian Krause --- labgrid/remote/client.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/labgrid/remote/client.py b/labgrid/remote/client.py index bd3526c17..63fdfa7af 100755 --- a/labgrid/remote/client.py +++ b/labgrid/remote/client.py @@ -719,6 +719,8 @@ async def release(self): async def release_from(self): """Release a place, but only if acquired by a specific user""" place = self.get_place() + if not place.acquired: + raise UserError(f"place {place.name} is not acquired") request = labgrid_coordinator_pb2.ReleasePlaceRequest(placename=place.name, fromuser=self.args.acquired) @@ -732,7 +734,7 @@ async def release_from(self): async def allow(self): """Allow another use access to a previously acquired place""" - place = self.get_place() + place = self.get_acquired_place() if "/" not in self.args.user: raise UserError(f"user {self.args.user} must be in / format") request = labgrid_coordinator_pb2.AllowPlaceRequest(placename=place.name, user=self.args.user) From da95c4446416205a26bf30d3d3979f6c9a7a0ad5 Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Thu, 1 Aug 2024 15:18:16 +0200 Subject: [PATCH 263/384] remote: use more explicit event loop handling Calling asyncio.get_event_loop() with no current event loop is deprecated since Python 3.10 and will be an error in some future Python release [1]. Whenever we don't expect to run with an event loop, create one explicitly. In coroutine and callbacks from asynchronous code, use the more explicit asyncio.get_running_loop() to get the loop. Note that this does not work in labgrid.resources.ethernetport.EthernetPortManager: This code is usually not called in coroutines and callbacks from asynchronous code, so asyncio.get_running_loop() does not work there. So stick to asyncio.get_event_loop() there and just expect to be called with a running event loop (which is the non-deprecated use case for this function). Users that do not have an event loop running will see a justified DeprecationWarning with Python >= 3.12 and an error in some future Python version. [1] https://docs.python.org/3/library/asyncio-eventloop.html#asyncio.get_event_loop Signed-off-by: Bastian Krause --- labgrid/remote/coordinator.py | 6 ++++-- labgrid/remote/exporter.py | 5 ++++- 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/labgrid/remote/coordinator.py b/labgrid/remote/coordinator.py index 635c7c5c9..97ed16d26 100644 --- a/labgrid/remote/coordinator.py +++ b/labgrid/remote/coordinator.py @@ -194,7 +194,7 @@ def __init__(self) -> None: self.clients: dict[str, ClientSession] = {} self.load() - self.loop = asyncio.get_event_loop() + self.loop = asyncio.get_running_loop() self.poll_task = self.loop.create_task(self.poll()) async def _poll_step(self): @@ -1025,7 +1025,9 @@ def main(): logging.basicConfig(level=logging.DEBUG if args.debug else logging.INFO) - loop = asyncio.get_event_loop() + loop = asyncio.new_event_loop() + asyncio.set_event_loop(loop) + cleanup = [] loop.set_debug(True) try: diff --git a/labgrid/remote/exporter.py b/labgrid/remote/exporter.py index 8187c4e14..86a261c92 100755 --- a/labgrid/remote/exporter.py +++ b/labgrid/remote/exporter.py @@ -778,7 +778,7 @@ def __init__(self, config) -> None: - Setup loop, name, authid and address - Join the coordinator as an exporter""" self.config = config - self.loop = asyncio.get_event_loop() + self.loop = asyncio.get_running_loop() self.name = config["name"] self.hostname = config["hostname"] self.isolated = config["isolated"] @@ -1061,6 +1061,9 @@ def main(): print(f"exporter hostname: {config['hostname']}") print(f"resource config file: {config['resources']}") + loop = asyncio.new_event_loop() + asyncio.set_event_loop(loop) + asyncio.run(amain(config), debug=bool(args.debug)) if reexec: From 0e1e4fbaccec3c1c27b6314bdb3ab3eafa86e490 Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Thu, 1 Aug 2024 15:18:57 +0200 Subject: [PATCH 264/384] remote/client: rework event loop handling in start_session() Calling asyncio.get_event_loop() with no current event loop is deprecated since Python 3.10 and will be an error in some future Python release [1]. Using it in labgrid.remote.client.start_session() causes errors in IPython when using a RemotePlace: In [1]: from labgrid.resource.remote import RemotePlace ...: from labgrid import Target ...: ...: target = Target("example") ...: RemotePlace(target, name="example-place") [...] RuntimeError: There is no current event loop in thread 'MainThread'. For labgrid.remote.client.start_session() there is no reliable way of retrieving the thread's event loop without being called from an async context (which we cannot assume here). Instead of using asyncio.get_event_loop(), use a new helper function ensure_event_loop() that returns the first available loop instance from: - externally provided event loop - stashed event loop - OS thread's running event loop (when called from async code) - new event loop The returned loop is stashed for future calls. See also [2] for a similar approach. start_session() now accepts a new optional argument "loop" for providing an external event loop. [1] https://docs.python.org/3/library/asyncio-eventloop.html#asyncio.get_event_loop [2] https://github.com/jupyter/jupyter_core/pull/387 Signed-off-by: Bastian Krause --- labgrid/remote/client.py | 46 +++++++++++++++++++++++++++++++++++++--- 1 file changed, 43 insertions(+), 3 deletions(-) diff --git a/labgrid/remote/client.py b/labgrid/remote/client.py index 63fdfa7af..6924090f5 100755 --- a/labgrid/remote/client.py +++ b/labgrid/remote/client.py @@ -4,6 +4,7 @@ import argparse import asyncio import contextlib +from contextvars import ContextVar import enum import os import pathlib @@ -1529,8 +1530,45 @@ def print_version(self): print(labgrid_version()) -def start_session(address, extra, debug=False): - loop = asyncio.get_event_loop() +_loop: ContextVar["asyncio.AbstractEventLoop | None"] = ContextVar("_loop", default=None) + + +def ensure_event_loop(external_loop=None): + """Get the event loop for this thread, or create a new event loop.""" + # get stashed loop + loop = _loop.get() + + # ignore closed stashed loop + if loop and loop.is_closed(): + loop = None + + if external_loop: + # if a loop is stashed, expect it to be the same as the external one + if loop: + assert loop is external_loop + _loop.set(external_loop) + return external_loop + + # return stashed loop + if loop: + return loop + + try: + # if called from async code, try to get current's thread loop + loop = asyncio.get_running_loop() + except RuntimeError: + # no previous, external or running loop found, create a new one + loop = asyncio.new_event_loop() + asyncio.set_event_loop(loop) + + # stash it + _loop.set(loop) + return loop + + +def start_session(address, extra, debug=False, loop=None): + loop = ensure_event_loop(loop) + if debug: loop.set_debug(True) @@ -2040,7 +2078,9 @@ def main(): coordinator_address = os.environ.get("LG_COORDINATOR", "127.0.0.1:20408") logging.debug('Starting session with "%s"', coordinator_address) - session = start_session(coordinator_address, extra, args.debug) + loop = asyncio.new_event_loop() + asyncio.set_event_loop(loop) + session = start_session(coordinator_address, extra=extra, debug=args.debug, loop=loop) logging.debug("Started session") try: From 545d240ec3bd48916c1119c0f4b88b4b9ac6551d Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Fri, 2 Aug 2024 15:54:47 +0200 Subject: [PATCH 265/384] remote/client: provide default for ClientSession.env to make it optional The Environment was always optional. Before users of ClientSession had to pass an explicit None for this attribute. While we're changing the ClientSession for gRPC anyway, let's make env really optional. This again allows us to make start_session()'s extra argument optional, too. It is used to pass extra arguments to the ClientSession, which means it can be an empty dictionary now. Signed-off-by: Bastian Krause --- labgrid/remote/client.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/labgrid/remote/client.py b/labgrid/remote/client.py index 6924090f5..731e81950 100755 --- a/labgrid/remote/client.py +++ b/labgrid/remote/client.py @@ -71,7 +71,7 @@ class ClientSession: address = attr.ib(validator=attr.validators.instance_of(str)) loop = attr.ib(validator=attr.validators.instance_of(asyncio.BaseEventLoop)) - env = attr.ib(validator=attr.validators.optional(attr.validators.instance_of(Environment))) + env = attr.ib(default=None, validator=attr.validators.optional(attr.validators.instance_of(Environment))) role = attr.ib(default=None, validator=attr.validators.optional(attr.validators.instance_of(str))) prog = attr.ib(default=None, validator=attr.validators.optional(attr.validators.instance_of(str))) args = attr.ib(default=None, validator=attr.validators.optional(attr.validators.instance_of(argparse.Namespace))) @@ -1566,9 +1566,12 @@ def ensure_event_loop(external_loop=None): return loop -def start_session(address, extra, debug=False, loop=None): +def start_session(address, extra=None, debug=False, loop=None): loop = ensure_event_loop(loop) + if extra is None: + extra = {} + if debug: loop.set_debug(True) From 37a92b5a86371dab28c3e5ef93146af0cf13ecb7 Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Fri, 2 Aug 2024 15:55:40 +0200 Subject: [PATCH 266/384] remote: make start_session() args more explicit During the move to gRPC, start_session()'s arguments changed. Since this is one of the functions used from outside of labgrid, add typing hints and force the kwargs to be passed with names. This should make users aware of the changes, so their code can be adapted. Signed-off-by: Bastian Krause --- labgrid/remote/client.py | 15 ++++++++++++++- labgrid/resource/remote.py | 2 +- 2 files changed, 15 insertions(+), 2 deletions(-) diff --git a/labgrid/remote/client.py b/labgrid/remote/client.py index 731e81950..5ab4f0683 100755 --- a/labgrid/remote/client.py +++ b/labgrid/remote/client.py @@ -23,6 +23,7 @@ from collections import defaultdict, OrderedDict from datetime import datetime from pprint import pformat +from typing import Any, Dict import attr import grpc @@ -1566,7 +1567,19 @@ def ensure_event_loop(external_loop=None): return loop -def start_session(address, extra=None, debug=False, loop=None): +def start_session( + address: str, *, extra: Dict[str, Any] = None, debug: bool = False, loop: "asyncio.AbstractEventLoop | None" = None +): + """ + Starts a ClientSession. + + Args: + address: coordinator address as HOST[:PORT], PORT defaults to 20408 + extra: additional kwargs for ClientSession + debug: set debug mode of the event loop + loop: explicit event loop to use (otherwise a previously stashed loop, + if retrievable the current thread's loop or a new loop is used) + """ loop = ensure_event_loop(loop) if extra is None: diff --git a/labgrid/resource/remote.py b/labgrid/resource/remote.py index 1b8256ef0..b8adb2524 100644 --- a/labgrid/resource/remote.py +++ b/labgrid/resource/remote.py @@ -22,7 +22,7 @@ def _start(self): from ..remote.client import start_session try: - self.session = start_session(self.url, {'env': self.env}) + self.session = start_session(self.url, extra={'env': self.env}) except ConnectionRefusedError as e: raise ConnectionRefusedError(f"Could not connect to coordinator {self.url}") \ from e From 606af353d5621c0bd0a7f19bdac7182f318acb32 Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Wed, 31 Jul 2024 18:30:34 +0200 Subject: [PATCH 267/384] tests/test_ethernetport: create and set event loop for test Calling asyncio.get_event_loop() with no current event loop is deprecated since Python 3.10 and will be an error in some future Python release [1]. SNMPEthernetPort expects a running event loop. So create and set one. [1] https://docs.python.org/3/library/asyncio-eventloop.html#asyncio.get_event_loop Signed-off-by: Bastian Krause --- tests/test_ethernetport.py | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/tests/test_ethernetport.py b/tests/test_ethernetport.py index bea4b8ad7..55dbac6b8 100644 --- a/tests/test_ethernetport.py +++ b/tests/test_ethernetport.py @@ -1,6 +1,15 @@ +import asyncio + from labgrid.resource import SNMPEthernetPort def test_instance(target): - s = SNMPEthernetPort(target, 'port-1', switch='dummy-switch', interface='1') - assert (isinstance(s, SNMPEthernetPort)) + # SNMPEthernetPort should be called with a running event loop + loop = asyncio.new_event_loop() + asyncio.set_event_loop(loop) + + try: + s = SNMPEthernetPort(target, 'port-1', switch='dummy-switch', interface='1') + assert (isinstance(s, SNMPEthernetPort)) + finally: + loop.close() From cb9ecde9e03737ed90cf3432099ede738b63cb5e Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Tue, 2 Jul 2024 11:15:41 +0200 Subject: [PATCH 268/384] tests/conftest: turn exporter fixture into class This allows stopping and starting the exporter during a test. More functionality will be moved into the class in future commits. Signed-off-by: Bastian Krause --- tests/conftest.py | 93 +++++++++++++++++++++++++++----------------- tests/test_client.py | 13 ++----- 2 files changed, 61 insertions(+), 45 deletions(-) diff --git a/tests/conftest.py b/tests/conftest.py index 164914c31..b120782f6 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -60,6 +60,54 @@ def __getattr__(self, name): return getattr(self.__wrapped, name) +class Exporter: + def __init__(self, config, cwd): + self.cwd = str(cwd) + self.config = config + self.spawn = None + self.reader = None + + def start(self): + assert self.spawn is None + assert self.reader is None + + self.spawn = pexpect.spawn( + f'{sys.executable} -m labgrid.remote.exporter --name testhost {self.config}', + logfile=Prefixer(sys.stdout.buffer, 'exporter'), + cwd=self.cwd) + try: + self.spawn.expect('exporter name: testhost') + self.spawn.expect('connected to exporter') + except Exception as e: + raise Exception(f"exporter startup failed with {self.spawn.before}") from e + + self.reader = threading.Thread( + target=keep_reading, + name=f'exporter-reader-{self.pid}', + args=(self.spawn,), daemon=True) + self.reader.start() + + def stop(self): + logging.info("stopping exporter pid=%s", self.spawn.pid) + self.spawn.close(force=True) + assert not self.spawn.isalive() + self.reader.join() + + self.spawn = None + self.reader = None + + def isalive(self): + return self.spawn.isalive() + + @property + def exitstatus(self): + return self.spawn.exitstatus + + @property + def pid(self): + return self.spawn.pid + + @pytest.fixture(scope='function') def target(): return Target('Test') @@ -123,16 +171,9 @@ def coordinator(tmpdir): reader.join() @pytest.fixture(scope='function') -def exporter(tmpdir, coordinator, start_exporter): - yield start_exporter() - - -@pytest.fixture(scope='function') -def start_exporter(tmpdir, coordinator): - spawns = [] - readers = [] - - p = tmpdir.join("exports.yaml") +def exporter(tmpdir, coordinator): + config = "exports.yaml" + p = tmpdir.join(config) p.write( """ Testport: @@ -152,32 +193,12 @@ def start_exporter(tmpdir, coordinator): """ ) - def _start_exporter(): - spawn = pexpect.spawn( - f'{sys.executable} -m labgrid.remote.exporter --name testhost exports.yaml', - logfile=Prefixer(sys.stdout.buffer, 'exporter'), - cwd=str(tmpdir)) - try: - spawn.expect('exporter name: testhost') - spawn.expect('connected to exporter') - except: - print(f"exporter startup failed with {spawn.before}") - raise - reader = threading.Thread(target=keep_reading, name=f'exporter-reader-{spawn.pid}', args=(spawn,), daemon=True) - reader.start() - - spawns.append(spawn) - readers.append(reader) - - return spawn - - yield _start_exporter - - for spawn, reader in zip(spawns, readers): - print(f"stopping exporter pid={spawn.pid}") - spawn.close(force=True) - assert not spawn.isalive() - reader.join() + exporter = Exporter(config, tmpdir) + exporter.start() + + yield exporter + + exporter.stop() def pytest_addoption(parser): parser.addoption("--sigrok-usb", action="store_true", diff --git a/tests/test_client.py b/tests/test_client.py index 14b855d1e..8f397f56f 100644 --- a/tests/test_client.py +++ b/tests/test_client.py @@ -393,14 +393,12 @@ def test_reservation(place_acquire, tmpdir): spawn.close() assert spawn.exitstatus == 0, spawn.before.strip() -def test_resource_acquired_state_on_exporter_restart(monkeypatch, place,start_exporter): +def test_resource_acquired_state_on_exporter_restart(monkeypatch, place, exporter): user = "test-user" host = "test-host" monkeypatch.setenv("LG_USERNAME", user) monkeypatch.setenv("LG_HOSTNAME", host) - exporter = start_exporter() - # add resource match with pexpect.spawn('python -m labgrid.remote.client -p test add-match testhost/Testport/NetworkSerialPort') as spawn: spawn.expect(pexpect.EOF) @@ -433,12 +431,9 @@ def test_resource_acquired_state_on_exporter_restart(monkeypatch, place,start_ex assert spawn.exitstatus == 0, spawn.before.strip() assert b"Resource 'NetworkSerialPort' (testhost/Testport/NetworkSerialPort[/NetworkSerialPort]):\r\n {'acquired': 'test'," in spawn.before - # stop exporter - exporter.close() - assert not exporter.isalive() - - # start exporter again - exporter = start_exporter() + # restart exporter + exporter.stop() + exporter.start() # make sure matching resource is still found with pexpect.spawn('python -m labgrid.remote.client -p test show') as spawn: From 373874bac00d817fbde68f354096dbc8cfb4c199 Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Wed, 17 Jul 2024 16:21:43 +0200 Subject: [PATCH 269/384] tests/conftest: move common Exporter functionality into generic LabgridComponent Most functionality of this class is not exporter- or coordinator-specific, so move the common parts into a common class. Both Exporter (and a future) Coordinator class will inherhit it. Signed-off-by: Bastian Krause --- tests/conftest.py | 91 ++++++++++++++++++++++++++++------------------- 1 file changed, 55 insertions(+), 36 deletions(-) diff --git a/tests/conftest.py b/tests/conftest.py index b120782f6..497eb2ee6 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -21,20 +21,6 @@ def curses_init(): except ModuleNotFoundError: logging.warning("curses module not found, not setting up a default terminal – tests may fail") -def keep_reading(spawn): - "The output from background processes must be read to avoid blocking them." - while spawn.isalive(): - try: - data = spawn.read_nonblocking(size=1024, timeout=0.1) - if not data: - return - except pexpect.TIMEOUT: - continue - except pexpect.EOF: - return - except OSError: - return - class Prefixer: def __init__(self, wrapped, prefix): @@ -60,40 +46,51 @@ def __getattr__(self, name): return getattr(self.__wrapped, name) -class Exporter: - def __init__(self, config, cwd): +class LabgridComponent: + def __init__(self, cwd): self.cwd = str(cwd) - self.config = config self.spawn = None self.reader = None - def start(self): - assert self.spawn is None - assert self.reader is None + def stop(self): + logging.info("stopping {self.__class__.__name__} pid=%s", self.spawn.pid) + + # let coverage write its data: + # https://coverage.readthedocs.io/en/latest/subprocess.html#process-termination + self.spawn.kill(SIGTERM) + if not self.spawn.closed: + self.spawn.expect(pexpect.EOF) + self.spawn.wait() + assert not self.spawn.isalive() - self.spawn = pexpect.spawn( - f'{sys.executable} -m labgrid.remote.exporter --name testhost {self.config}', - logfile=Prefixer(sys.stdout.buffer, 'exporter'), - cwd=self.cwd) - try: - self.spawn.expect('exporter name: testhost') - self.spawn.expect('connected to exporter') - except Exception as e: - raise Exception(f"exporter startup failed with {self.spawn.before}") from e + self.spawn = None + self.stop_reader() + + @staticmethod + def keep_reading(spawn): + "The output from background processes must be read to avoid blocking them." + while spawn.isalive(): + try: + data = spawn.read_nonblocking(size=1024, timeout=0.1) + if not data: + return + except pexpect.TIMEOUT: + continue + except pexpect.EOF: + return + except OSError: + return + def start_reader(self): self.reader = threading.Thread( - target=keep_reading, - name=f'exporter-reader-{self.pid}', + target=LabgridComponent.keep_reading, + name=f'{self.__class__.__name__}-reader-{self.pid}', args=(self.spawn,), daemon=True) self.reader.start() - def stop(self): - logging.info("stopping exporter pid=%s", self.spawn.pid) - self.spawn.close(force=True) - assert not self.spawn.isalive() + def stop_reader(self): self.reader.join() - self.spawn = None self.reader = None def isalive(self): @@ -108,6 +105,28 @@ def pid(self): return self.spawn.pid +class Exporter(LabgridComponent): + def __init__(self, config, cwd): + super().__init__(cwd) + self.config = config + + def start(self): + assert self.spawn is None + assert self.reader is None + + self.spawn = pexpect.spawn( + f'labgrid-exporter --name testhost {self.config}', + logfile=Prefixer(sys.stdout.buffer, 'exporter'), + cwd=self.cwd) + try: + self.spawn.expect('exporter name: testhost') + self.spawn.expect('connected to exporter') + except Exception as e: + raise Exception(f"exporter startup failed with {self.spawn.before}") from e + + self.start_reader() + + @pytest.fixture(scope='function') def target(): return Target('Test') From 15da271b45a890abe33c226c0ed804e837e64117 Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Wed, 17 Jul 2024 16:22:48 +0200 Subject: [PATCH 270/384] tests/conftest: make coordinator fixture use a helper class Coordinator The same was previously implemented for the exporter. Since we need the same functionality also for the coordinator (along with suspend_tree()/resume_tree() functionality to be moved in a future commit), let's refactor it now. Signed-off-by: Bastian Krause --- tests/conftest.py | 43 ++++++++++++++++++++++--------------------- 1 file changed, 22 insertions(+), 21 deletions(-) diff --git a/tests/conftest.py b/tests/conftest.py index 497eb2ee6..08f630c59 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -127,6 +127,23 @@ def start(self): self.start_reader() +class Coordinator(LabgridComponent): + def start(self): + assert self.spawn is None + assert self.reader is None + + self.spawn = pexpect.spawn( + 'labgrid-coordinator', + logfile=Prefixer(sys.stdout.buffer, 'coordinator'), + cwd=self.cwd) + try: + self.spawn.expect('Coordinator ready') + except Exception as e: + raise Exception(f"coordinator startup failed with {self.spawn.before}") from e + + self.start_reader() + + @pytest.fixture(scope='function') def target(): return Target('Test') @@ -166,28 +183,12 @@ def serial_driver_no_name(target, serial_port, mocker): @pytest.fixture(scope='function') def coordinator(tmpdir): + coordinator = Coordinator(tmpdir) + coordinator.start() - spawn = pexpect.spawn( - 'labgrid-coordinator', - logfile=Prefixer(sys.stdout.buffer, 'coordinator'), - cwd=str(tmpdir)) - try: - spawn.expect('Coordinator ready') - except: - print(f"coordinator startup failed with {spawn.before}") - raise - reader = threading.Thread(target=keep_reading, name='coordinator-reader', args=(spawn,), daemon=True) - reader.start() - yield spawn - - # let coverage write its data: - # https://coverage.readthedocs.io/en/latest/subprocess.html#process-termination - print("stopping coordinator") - spawn.kill(SIGTERM) - spawn.expect(pexpect.EOF) - spawn.wait() - - reader.join() + yield coordinator + + coordinator.stop() @pytest.fixture(scope='function') def exporter(tmpdir, coordinator): From 5b2776d0e699a81abaafab8ab12af9c4299c79af Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Wed, 17 Jul 2024 16:24:02 +0200 Subject: [PATCH 271/384] tests/conftest: move suspend_tree/resume_tree into LabgridComponent Previously only tests/test_client.py could use the suspend_tree() and resume_tree() functionality. Since this is also useful for exporter + coordinator tests, move it into the generic LabgridComponent class. Signed-off-by: Bastian Krause --- tests/conftest.py | 14 ++++++++++++++ tests/test_client.py | 22 ++++------------------ tests/test_coordinator.py | 2 -- 3 files changed, 18 insertions(+), 20 deletions(-) diff --git a/tests/conftest.py b/tests/conftest.py index 08f630c59..b56b212fa 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -11,6 +11,8 @@ from labgrid.resource import RawSerialPort, NetworkSerialPort from labgrid.driver.fake import FakeConsoleDriver +psutil = pytest.importorskip("psutil") + @pytest.fixture(scope="session") def curses_init(): """ curses only reads the terminfo DB once on the first import, so make @@ -104,6 +106,18 @@ def exitstatus(self): def pid(self): return self.spawn.pid + def suspend_tree(self): + main = psutil.Process(self.pid) + main.suspend() + for child in main.children(recursive=True): + child.suspend() + + def resume_tree(self): + main = psutil.Process(self.pid) + main.resume() + for child in main.children(recursive=True): + child.resume() + class Exporter(LabgridComponent): def __init__(self, config, cwd): diff --git a/tests/test_client.py b/tests/test_client.py index 8f397f56f..5a1fc1a74 100644 --- a/tests/test_client.py +++ b/tests/test_client.py @@ -5,20 +5,6 @@ import pytest import pexpect -psutil = pytest.importorskip("psutil") - -def suspend_tree(pid): - main = psutil.Process(pid) - main.suspend() - for child in main.children(recursive=True): - child.suspend() - -def resume_tree(pid): - main = psutil.Process(pid) - main.resume() - for child in main.children(recursive=True): - child.resume() - def test_startup(coordinator): pass @@ -68,7 +54,7 @@ def test_connect_error(): assert spawn.exitstatus == 1, spawn.before.strip() def test_connect_timeout(coordinator): - suspend_tree(coordinator.pid) + coordinator.suspend_tree() try: with pexpect.spawn('python -m labgrid.remote.client places') as spawn: spawn.expect("connection attempt timed out before receiving SETTINGS frame") @@ -76,7 +62,7 @@ def test_connect_timeout(coordinator): spawn.close() assert spawn.exitstatus == 1, spawn.before.strip() finally: - resume_tree(coordinator.pid) + coordinator.resume_tree() pass def test_place_show(place): @@ -488,7 +474,7 @@ def test_exporter_timeout(place, exporter): spawn.close() assert spawn.exitstatus == 0, spawn.before.strip() - suspend_tree(exporter.pid) + exporter.suspend_tree() try: time.sleep(30) @@ -499,7 +485,7 @@ def test_exporter_timeout(place, exporter): assert spawn.exitstatus == 0, spawn.before.strip() assert b'/Testport/NetworkSerialPort' not in spawn.before finally: - resume_tree(exporter.pid) + exporter.resume_tree() # the exporter should quit by itself now time.sleep(5) diff --git a/tests/test_coordinator.py b/tests/test_coordinator.py index 27c04969b..dbbefa33a 100644 --- a/tests/test_coordinator.py +++ b/tests/test_coordinator.py @@ -4,8 +4,6 @@ import labgrid.remote.generated.labgrid_coordinator_pb2_grpc as labgrid_coordinator_pb2_grpc import labgrid.remote.generated.labgrid_coordinator_pb2 as labgrid_coordinator_pb2 -psutil = pytest.importorskip("psutil") - @pytest.fixture(scope='function') def channel_stub(): import queue From df8df522b152d81ed95bebd224901a71553bbdce Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Wed, 17 Jul 2024 12:59:21 +0200 Subject: [PATCH 272/384] tests/test_remote: add test_exporter_start_coordinator_unreachable Previously the exporter had blocking issues when the coordinator was not available. Add a test to prevent future regressions. Signed-off-by: Bastian Krause --- tests/test_remote.py | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) diff --git a/tests/test_remote.py b/tests/test_remote.py index 80f54430e..b07b5b170 100644 --- a/tests/test_remote.py +++ b/tests/test_remote.py @@ -15,3 +15,23 @@ def test_exporter_help(): spawn.close() assert spawn.exitstatus == 0 assert spawn.signalstatus is None + +def test_exporter_start_coordinator_unreachable(monkeypatch, tmpdir): + monkeypatch.setenv("LG_COORDINATOR", "coordinator.invalid") + + config = "exports.yaml" + p = tmpdir.join(config) + p.write( + """ + Testport: + NetworkSerialPort: + host: 'localhost' + port: 4000 + """ + ) + + with pexpect.spawn(f"python -m labgrid.remote.exporter {config}", cwd=tmpdir) as spawn: + spawn.expect("coordinator is unavailable", timeout=10) + spawn.expect(pexpect.EOF) + spawn.close() + assert spawn.exitstatus == 100, spawn.before From 8462aebc046c401bcab4ee167b19fb20aa4dbd8e Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Wed, 17 Jul 2024 16:24:56 +0200 Subject: [PATCH 273/384] tests/test_remote: add test_exporter_coordinator_becomes_unreachable A previous commit added a test for exporter startup with an unreachable coordinator. Now also add a test simulating a dissappearing coordinator during operation. The exporter should notice the coordinator disappearing and should exit with exitcode 100. This way systemd can try restarting the exporter regularly until the coordinator is available again. Signed-off-by: Bastian Krause --- tests/test_remote.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/tests/test_remote.py b/tests/test_remote.py index b07b5b170..ca09908ec 100644 --- a/tests/test_remote.py +++ b/tests/test_remote.py @@ -35,3 +35,12 @@ def test_exporter_start_coordinator_unreachable(monkeypatch, tmpdir): spawn.expect(pexpect.EOF) spawn.close() assert spawn.exitstatus == 100, spawn.before + +def test_exporter_coordinator_becomes_unreachable(coordinator, exporter): + coordinator.suspend_tree() + + exporter.spawn.expect(pexpect.EOF, timeout=30) + exporter.spawn.close() + assert exporter.exitstatus == 100 + + coordinator.resume_tree() From 61516b6c28361726424fde2c31142250b247924a Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Wed, 7 Aug 2024 15:48:59 +0200 Subject: [PATCH 274/384] contrib/systemd: update service for new versions of coordinator/exporter Signed-off-by: Rouven Czerwinski Signed-off-by: Bastian Krause --- contrib/systemd/labgrid-coordinator.service | 6 +++--- contrib/systemd/labgrid-exporter.service | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/contrib/systemd/labgrid-coordinator.service b/contrib/systemd/labgrid-coordinator.service index 2dc5d117e..c701038c6 100644 --- a/contrib/systemd/labgrid-coordinator.service +++ b/contrib/systemd/labgrid-coordinator.service @@ -4,12 +4,12 @@ After=network.target [Service] Environment="PYTHONUNBUFFERED=1" -# labgrid's .crossbar/config-anonymous.yaml serves as an example -ExecStart=/path/to/labgrid-coordinator/venv/bin/crossbar start --logformat=syslogd --cbdir /var/lib/labgrid-coordinator --config /etc/labgrid/coordinator.yaml -ExecStop=/usr/bin/labgrid-coordinator stop --cbdir /var/lib/labgrid-coordinator +ExecStart=/path/to/labgrid/venv/bin/labgrid-coordinator Restart=on-failure DynamicUser=yes StateDirectory=labgrid-coordinator +# Set WorkingDirectory to StateDirectory, this works in DynamicUser mode since symlinks are created +WorkingDirectory=%S/labgrid-coordinator [Install] WantedBy=multi-user.target diff --git a/contrib/systemd/labgrid-exporter.service b/contrib/systemd/labgrid-exporter.service index a896aeeae..10cbfff26 100644 --- a/contrib/systemd/labgrid-exporter.service +++ b/contrib/systemd/labgrid-exporter.service @@ -5,7 +5,7 @@ Wants=network-online.target [Service] Environment="PYTHONUNBUFFERED=1" -# Should contain LG_CROSSBAR configuration +# Should contain LG_COORDINATOR configuration EnvironmentFile=-/etc/environment ExecStart=/path/to/labgrid/venv/bin/labgrid-exporter /etc/labgrid/exporter.yaml Restart=on-failure From 394864eecad91d3465cad799dda82d84e6a6ac7e Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Fri, 2 Aug 2024 15:58:17 +0200 Subject: [PATCH 275/384] contrib/coordinator-statsd: migrate to gRPC Signed-off-by: Bastian Krause --- contrib/coordinator-statsd.py | 45 +++++++++++++++++------------------ 1 file changed, 22 insertions(+), 23 deletions(-) diff --git a/contrib/coordinator-statsd.py b/contrib/coordinator-statsd.py index cdf6c6c73..f8ea0254b 100755 --- a/contrib/coordinator-statsd.py +++ b/contrib/coordinator-statsd.py @@ -42,12 +42,14 @@ import sys import argparse -import statsd import os -import labgrid.remote.client import time import asyncio -import txaio + +from labgrid.remote.client import start_session, Error +from labgrid.remote.generated import labgrid_coordinator_pb2 +from labgrid.remote.common import Reservation +import statsd def inc_gauge(gauges, key): @@ -56,12 +58,13 @@ def inc_gauge(gauges, key): async def report_reservations(session, tags, gauges): - reservations = await session.call("org.labgrid.coordinator.get_reservations") + request = labgrid_coordinator_pb2.GetReservationsRequest() - for token, config in reservations.items(): - state = config["state"] + response = await session.stub.GetReservations(request) + reservations = [Reservation.from_pb2(x) for x in response.reservations] - groups = config.get("filters", {}) + for reservation in reservations: + groups = reservation.filters if not groups: groups = {"": {}} @@ -72,7 +75,7 @@ async def report_reservations(session, tags, gauges): ".".join( ["reservations", group_name] + [group.get(t, "") for t in tags] - + [state] + + [reservation.state.name] ), ) @@ -94,10 +97,10 @@ def main(): ) parser.add_argument( "-x", - "--crossbar", - metavar="URL", - help="Crossbar URL for the coordinator", - default=os.environ.get("LG_CROSSBAR", "ws://127.0.0.1:20408/ws"), + "--coordinator", + metavar="ADDRESS", + help="Coordinator address as HOST[:PORT]. Default is %(default)s", + default=os.environ.get("LG_COORDINATOR", "127.0.0.1:20408"), ) parser.add_argument( "--period", @@ -142,8 +145,8 @@ def main(): args = parser.parse_args() - txaio.use_asyncio() - txaio.config.loop = asyncio.get_event_loop() + loop = asyncio.new_event_loop() + asyncio.set_event_loop(loop) statsd_client = None gauges = {} @@ -175,22 +178,18 @@ def main(): next_time = time.monotonic() + args.period try: - extra = {} - session = labgrid.remote.client.start_session( - args.crossbar, - os.environ.get("LG_CROSSBAR_REALM", "realm1"), - extra, - ) + session = start_session(args.coordinator, loop=loop) try: - session.loop.run_until_complete( + loop.run_until_complete( asyncio.gather( report_places(session, args.tags, gauges), report_reservations(session, args.tags, gauges), ) ) finally: - session.leave() - except labgrid.remote.client.Error as e: + loop.run_until_complete(session.stop()) + loop.run_until_complete(session.close()) + except Error as e: print(f"Error communicating with labgrid: {e}") continue From b589897a56b491c168774561160f06d227a97382 Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Fri, 2 Aug 2024 16:08:39 +0200 Subject: [PATCH 276/384] contrib/labgrid-webapp: migrate to gRPC Signed-off-by: Bastian Krause --- contrib/README.rst | 6 +++--- contrib/labgrid-webapp | 23 ++++++++++++++++------- 2 files changed, 19 insertions(+), 10 deletions(-) diff --git a/contrib/README.rst b/contrib/README.rst index 5e537d850..d770cdebd 100644 --- a/contrib/README.rst +++ b/contrib/README.rst @@ -13,14 +13,14 @@ Quick Start $ source venv/bin/activate venv $ pip install -r contrib/requirements-webapp.txt venv $ ./contrib/labgrid-webapp --help - usage: labgrid-webapp [-h] [--crossbar URL] [--port PORT] [--proxy PROXY] + usage: labgrid-webapp [-h] [--coordinator ADDRESS] [--port PORT] [--proxy PROXY] Labgrid webapp options: -h, --help show this help message and exit - --crossbar URL, -x URL - Crossbar websocket URL (default: ws://127.0.0.1:20408/ws) + --coordinator ADDRESS, -x ADDRESS + Coordinator address as HOST[:PORT] (default: 127.0.0.1:20408) --port PORT Port to serve on --proxy PROXY, -P PROXY diff --git a/contrib/labgrid-webapp b/contrib/labgrid-webapp index bd4a22178..e78976ad1 100755 --- a/contrib/labgrid-webapp +++ b/contrib/labgrid-webapp @@ -1,5 +1,6 @@ #!/usr/bin/env python3 import argparse +import asyncio import logging import os import sys @@ -118,11 +119,11 @@ def main(): formatter_class=argparse.RawDescriptionHelpFormatter, ) parser.add_argument( - '--crossbar', + '--coordinator', '-x', metavar='URL', - default=os.environ.get('LG_CROSSBAR', 'ws://127.0.0.1:20408/ws'), - help='Crossbar websocket URL (default: %(default)s)', + default=os.environ.get('LG_COORDINATOR', '127.0.0.1:20408'), + help='Coordinator address as HOST[:PORT] (default: %(default)s)', ) parser.add_argument('--port', type=int, default=8800, help='Port to serve on') parser.add_argument('--proxy', '-P', help='Proxy connections via given ssh host') @@ -132,16 +133,20 @@ def main(): if args.proxy: proxymanager.force_proxy(args.proxy) + loop = asyncio.new_event_loop() + asyncio.set_event_loop(loop) + try: session = start_session( - args.crossbar, os.environ.get('LG_CROSSBAR_REALM', 'realm1'), {}, + args.coordinator, + loop=loop, ) except ConnectionRefusedError: - logger.fatal('Unable to connect to labgrid crossbar') + logger.fatal('Unable to connect to labgrid coordinator') return server = uvicorn.Server(config=uvicorn.Config( - loop=session.loop, + loop=loop, host='0.0.0.0', port=args.port, app=app, @@ -153,7 +158,11 @@ def main(): if route.path not in reserved_routes: logger.info(f' - {route.path}') - session.loop.run_until_complete(server.serve()) + try: + loop.run_until_complete(server.serve()) + finally: + loop.run_until_complete(session.stop()) + loop.run_until_complete(session.close()) if __name__ == '__main__': From 5a49429b7a246292365d156ec95258f0d4b87365 Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Fri, 2 Aug 2024 16:46:15 +0200 Subject: [PATCH 277/384] contrib/sync-places: migrate to gRPC Signed-off-by: Bastian Krause --- contrib/sync-places.py | 52 ++++++++++++++++++++++++++++-------------- 1 file changed, 35 insertions(+), 17 deletions(-) diff --git a/contrib/sync-places.py b/contrib/sync-places.py index d3d74f86d..94ea15734 100755 --- a/contrib/sync-places.py +++ b/contrib/sync-places.py @@ -15,8 +15,10 @@ # limitations under the License. import argparse +import asyncio from contextlib import contextmanager from labgrid.remote.client import start_session +from labgrid.remote.generated import labgrid_coordinator_pb2 from labgrid.util.proxy import proxymanager import os import sys @@ -51,14 +53,20 @@ async def do_sync(session, args): for name in remove_places: print(f"Removing place {name}") if not args.dry_run: - await session.call("org.labgrid.coordinator.del_place", name) + request = labgrid_coordinator_pb2.DeletePlaceRequest(name=name) + await session.stub.DeletePlace(request) + await session.sync_with_coordinator() + changed = True for name in config["places"]: if not name in seen_places: print(f"Adding place {name}") if not args.dry_run: - await session.call("org.labgrid.coordinator.add_place", name) + request = labgrid_coordinator_pb2.AddPlaceRequest(name=name) + await session.stub.AddPlace(request) + await session.sync_with_coordinator() + changed = True for name in config["places"]: @@ -89,9 +97,10 @@ async def do_sync(session, args): else: print(f"Deleting match '{match}' for place {name}") if not args.dry_run: - await session.call( - "org.labgrid.coordinator.del_place_match", name, match, rename - ) + request = labgrid_coordinator_pb2.DeletePlaceMatchRequest(placename=name, pattern=match) + await session.stub.DeletePlaceMatch(request) + await session.sync_with_coordinator() + changed = True for m in matches: @@ -103,9 +112,9 @@ async def do_sync(session, args): print(f"Adding match '{match}' for place {name}") if not args.dry_run: - await session.call( - "org.labgrid.coordinator.add_place_match", name, match, rename - ) + request = labgrid_coordinator_pb2.AddPlaceMatchRequest(placename=name, pattern=match, rename=rename) + await session.stub.AddPlaceMatch(request) + await session.sync_with_coordinator() changed = True tags = config["places"][name].get("tags", {}).copy() @@ -131,9 +140,10 @@ async def do_sync(session, args): tags[k] = "" if not args.dry_run: - await session.call( - "org.labgrid.coordinator.set_place_tags", name, tags - ) + request = labgrid_coordinator_pb2.SetPlaceTagsRequest(placename=name, tags=tags) + await session.stub.SetPlaceTags(request) + await session.sync_with_coordinator() + changed = True async def do_dump(session, args): @@ -174,11 +184,11 @@ async def do_dump(session, args): formatter_class=argparse.RawDescriptionHelpFormatter, ) parser.add_argument( - "--crossbar", + "--coordinator", "-x", - metavar="URL", - default=os.environ.get("LG_CROSSBAR", "ws://127.0.0.1:20408/ws"), - help="Crossbar websocket URL (default: %(default)s)", + metavar="ADDRESS", + default=os.environ.get("LG_COORDINATOR", "127.0.0.1:20408"), + help="Coordinator address as HOST[:PORT] (default: %(default)s)", ) parser.add_argument("--proxy", "-P", help="Proxy connections via given ssh host") @@ -219,11 +229,19 @@ async def do_dump(session, args): if args.proxy: proxymanager.force_proxy(args.proxy) + loop = asyncio.new_event_loop() + asyncio.set_event_loop(loop) + session = start_session( - args.crossbar, os.environ.get("LG_CROSSBAR_REALM", "realm1"), {} + args.coordinator, + loop=loop, ) - return session.loop.run_until_complete(args.func(session, args)) + try: + return loop.run_until_complete(args.func(session, args)) + finally: + loop.run_until_complete(session.stop()) + loop.run_until_complete(session.close()) if __name__ == "__main__": From 13969df4fade306021028a71c3adf9e9abd1fccd Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Wed, 7 Aug 2024 15:26:38 +0200 Subject: [PATCH 278/384] examples: migrate to gRPC Signed-off-by: Bastian Krause --- examples/deditec-relais8/import-dedicontrol.yaml | 2 +- examples/networkmanager/nm.env | 2 +- examples/sysfsgpio/import-gpio.yaml | 2 +- examples/usbpower/README.rst | 6 ++---- 4 files changed, 5 insertions(+), 7 deletions(-) diff --git a/examples/deditec-relais8/import-dedicontrol.yaml b/examples/deditec-relais8/import-dedicontrol.yaml index 0e8607962..9af4776e9 100644 --- a/examples/deditec-relais8/import-dedicontrol.yaml +++ b/examples/deditec-relais8/import-dedicontrol.yaml @@ -6,4 +6,4 @@ targets: drivers: DeditecRelaisDriver: {} options: - crossbar_url: 'ws://labgrid:20408/ws' + coordinator_address: 'labgrid:20408' diff --git a/examples/networkmanager/nm.env b/examples/networkmanager/nm.env index c96ef21da..be767f2fb 100644 --- a/examples/networkmanager/nm.env +++ b/examples/networkmanager/nm.env @@ -6,4 +6,4 @@ targets: drivers: NetworkInterfaceDriver: {} options: - crossbar_url: 'ws://labgrid/ws' + coordinator_address: 'labgrid:20408' diff --git a/examples/sysfsgpio/import-gpio.yaml b/examples/sysfsgpio/import-gpio.yaml index 76c9c285f..4ba7b223f 100644 --- a/examples/sysfsgpio/import-gpio.yaml +++ b/examples/sysfsgpio/import-gpio.yaml @@ -6,4 +6,4 @@ targets: drivers: GpioDigitalOutputDriver: {} options: - crossbar_url: 'ws://labgrid:20408/ws' + coordinator_address: 'labgrid:20408' diff --git a/examples/usbpower/README.rst b/examples/usbpower/README.rst index 552f933c8..f427d42d3 100644 --- a/examples/usbpower/README.rst +++ b/examples/usbpower/README.rst @@ -40,8 +40,6 @@ on port 3. Software Setup -------------- -The following expects that labgrid is installed in the -active virtualenv and crossbar is installed into a separate virtualenv. The ``uhubctl`` and ``usbsdmux`` tools need to be installed on the system. Library Example @@ -116,9 +114,9 @@ Remote Setup ------------ To access resources remotely, you first need to start the coordinator:: - $ crossbar-venv/bin/crossbar start --logformat none --config config-anonymous.yaml + $ labgrid-coordinator [...] - Coordinator ready. + Coordinator ready Then, you need to start the exporter:: $ labgrid-exporter exports.yaml From c613f1e55e08e12da4ceff3d6b9e797b97a0c97f Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Mon, 22 Jul 2024 11:43:42 +0200 Subject: [PATCH 279/384] dockerfiles: migrate to gRPC Signed-off-by: Bastian Krause --- .gitignore | 4 +-- dockerfiles/Dockerfile | 27 +++++++------------ dockerfiles/README.rst | 20 +++++++------- .../places_example.yaml | 0 dockerfiles/staging/docker-compose.yml | 6 ++--- 5 files changed, 24 insertions(+), 33 deletions(-) rename dockerfiles/staging/{crossbar => coordinator}/places_example.yaml (100%) diff --git a/.gitignore b/.gitignore index cc74652b4..dad0e7c90 100644 --- a/.gitignore +++ b/.gitignore @@ -18,6 +18,6 @@ /.pytest_cache/ /htmlcov/ /labgrid/_version.py -/dockerfiles/staging/crossbar/* -!/dockerfiles/staging/crossbar/places_example.yaml +/dockerfiles/staging/coordinator/* +!/dockerfiles/staging/coordinator/places_example.yaml /.idea diff --git a/dockerfiles/Dockerfile b/dockerfiles/Dockerfile index 1b749fe4f..15fd5eb1f 100644 --- a/dockerfiles/Dockerfile +++ b/dockerfiles/Dockerfile @@ -1,4 +1,5 @@ FROM debian:bookworm-slim AS labgrid-base +ARG VERSION LABEL maintainer="eha@deif.com" @@ -8,10 +9,12 @@ COPY ./ /opt/labgrid/ RUN set -e ;\ apt update -q=2 ;\ - apt install -q=2 --yes --no-install-recommends python3 python3-dev python3-pip python3-setuptools git build-essential libsnappy-dev ;\ + apt install -q=2 --yes --no-install-recommends python3 python3-dev python3-pip python3-setuptools git build-essential ;\ pip3 install --break-system-packages -U pip;\ apt clean ;\ - rm -rf /var/lib/apt/lists/* + rm -rf /var/lib/apt/lists/* ;\ + cd /opt/labgrid ;\ + SETUPTOOLS_SCM_PRETEND_VERSION="$VERSION" pip3 install --break-system-packages --no-cache-dir . # # Client @@ -20,9 +23,7 @@ FROM labgrid-base AS labgrid-client ARG VERSION RUN set -e ;\ - cd /opt/labgrid ;\ pip3 install --break-system-packages yq ;\ - SETUPTOOLS_SCM_PRETEND_VERSION="$VERSION" pip3 install --break-system-packages --no-cache-dir . ;\ apt update -q=2 ;\ apt install -q=2 --yes --no-install-recommends microcom openssh-client rsync jq qemu-system qemu-utils ;\ apt clean ;\ @@ -36,21 +37,13 @@ CMD ["/bin/bash"] FROM labgrid-base AS labgrid-coordinator ARG VERSION -ENV CROSSBAR_DIR=/opt/crossbar - -RUN set -e ;\ - cd /opt/labgrid ;\ - pip3 install --break-system-packages virtualenv ;\ - SETUPTOOLS_SCM_PRETEND_VERSION="$VERSION" pip3 install --break-system-packages --no-cache-dir . ;\ - virtualenv -p python3 crossbar-venv ;\ - crossbar-venv/bin/pip3 install --break-system-packages -r crossbar-requirements.txt ;\ - sed -i "s#^ executable: .*\$# executable: python3#" .crossbar/config-anonymous.yaml - -VOLUME /opt/crossbar +VOLUME /opt/coordinator EXPOSE 20408 -CMD ["/opt/labgrid/crossbar-venv/bin/crossbar", "start", "--config", "/opt/labgrid/.crossbar/config-anonymous.yaml"] +WORKDIR /opt/coordinator + +CMD ["/usr/local/bin/labgrid-coordinator"] # # Exporter @@ -61,8 +54,6 @@ ARG VERSION COPY dockerfiles/exporter/entrypoint.sh /entrypoint.sh RUN set -e ;\ - cd /opt/labgrid ;\ - SETUPTOOLS_SCM_PRETEND_VERSION="$VERSION" pip3 install --break-system-packages --no-cache-dir . ;\ apt update -q=2 ;\ apt install -q=2 --yes --no-install-recommends ser2net ;\ apt clean ;\ diff --git a/dockerfiles/README.rst b/dockerfiles/README.rst index 8c2e31439..b0f8ff150 100644 --- a/dockerfiles/README.rst +++ b/dockerfiles/README.rst @@ -5,7 +5,7 @@ This folder contains Dockerfile's for building Docker images for the 3 different components of a Labgrid distributed infrastructure. - **labgrid-coordinator** - An image for with crossbar which can be used to run + An image with the Labgrid coordinator. a Labgrid coordinator instance. - **labgrid-client** An image with the Labgrid client tools and pytest integration. @@ -64,18 +64,18 @@ No policy or configuration is done. labgrid-coordinator usage ~~~~~~~~~~~~~~~~~~~~~~~~~ -The labgrid-coordinator comes with a preconfigured Crossbar.io server. +The labgrid-coordinator image can be used to run a coordinator instance. -It listens to port 20408, +It listens on port 20408, so you probably want to publish that so you can talk to the coordinator. -State is written to ``/opt/crossbar``. +State is written to ``/opt/coordinator``. You might want to bind a volume to that -so you can restart the service without loosing state. +so you can restart the service without losing state. .. code-block:: bash - $ docker run -t -p 20408:20408 -v $HOME/crossbar:/opt/crossbar \ + $ docker run -t -p 20408:20408 -v $HOME/coordinator:/opt/coordinator \ docker.io/labgrid/coordinator @@ -85,18 +85,18 @@ labgrid-client usage The labgrid-client image can be used to run ``labgrid-client`` and ``pytest`` commands. For example listing available places registered at coordinator at -ws://192.168.1.42:20408/ws +192.168.1.42:20408 .. code-block:: bash - $ docker run -e LG_CROSSBAR=ws://192.168.1.42:20408/ws docker.io/labgrid/client \ + $ docker run -e LG_COORDINATOR=192.168.1.42:20408 docker.io/labgrid/client \ labgrid-client places Or running all pytest/labgrid tests at current directory: .. code-block:: bash - $ docker run -e LG_CROSSBAR=ws://192.168.1.42:20408/ws docker.io/labgrid/client \ + $ docker run -e LG_COORDINATOR=192.168.1.42:20408 docker.io/labgrid/client \ pytest @@ -113,7 +113,7 @@ Start it with something like: .. code-block:: bash - $ docker run -e LG_CROSSBAR=ws://192.168.1.42:20408/ws \ + $ docker run -e LG_COORDINATOR=192.168.1.42:20408 \ -v $HOME/exporter-conf:/opt/conf \ docker.io/labgrid/exporter diff --git a/dockerfiles/staging/crossbar/places_example.yaml b/dockerfiles/staging/coordinator/places_example.yaml similarity index 100% rename from dockerfiles/staging/crossbar/places_example.yaml rename to dockerfiles/staging/coordinator/places_example.yaml diff --git a/dockerfiles/staging/docker-compose.yml b/dockerfiles/staging/docker-compose.yml index cb3547802..b78c648b6 100644 --- a/dockerfiles/staging/docker-compose.yml +++ b/dockerfiles/staging/docker-compose.yml @@ -2,11 +2,11 @@ services: coordinator: image: "${IMAGE_PREFIX:-docker.io/labgrid/}coordinator" volumes: - - "./crossbar:/home/root/crossbar" + - "./coordinator:/home/root/coordinator" tty: true network_mode: "host" - command: bash -c "cp /home/root/crossbar/places_example.yaml /opt/crossbar/places.yaml && - /opt/labgrid/crossbar-venv/bin/crossbar start --config /opt/labgrid/.crossbar/config-anonymous.yaml" + command: bash -c "cp /home/root/coordinator/places_example.yaml /opt/coordinator/places.yaml && + /usr/local/bin/labgrid-coordinator" client: image: "${IMAGE_PREFIX:-docker.io/labgrid/}client" volumes: From 81f5801cdbaebba8e78735a3639656e82e6f6875 Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Thu, 8 Aug 2024 18:20:49 +0200 Subject: [PATCH 280/384] man: update for gRPC Signed-off-by: Bastian Krause Signed-off-by: Rouven Czerwinski --- debian/labgrid.manpages | 1 + man/Makefile | 4 +- man/labgrid-client.1 | 14 +++---- man/labgrid-client.rst | 17 +++------ man/labgrid-coordinator.1 | 70 +++++++++++++++++++++++++++++++++++ man/labgrid-coordinator.rst | 49 ++++++++++++++++++++++++ man/labgrid-device-config.5 | 12 ++---- man/labgrid-device-config.rst | 12 ++---- man/labgrid-exporter.1 | 14 +++---- man/labgrid-exporter.rst | 17 +++------ 10 files changed, 152 insertions(+), 58 deletions(-) create mode 100644 man/labgrid-coordinator.1 create mode 100644 man/labgrid-coordinator.rst diff --git a/debian/labgrid.manpages b/debian/labgrid.manpages index eb6e10245..b72c77540 100644 --- a/debian/labgrid.manpages +++ b/debian/labgrid.manpages @@ -1,4 +1,5 @@ man/labgrid-client.1 +man/labgrid-coordinator.1 man/labgrid-exporter.1 man/labgrid-suggest.1 man/labgrid-device-config.5 diff --git a/man/Makefile b/man/Makefile index a31910b9f..4c1c1accc 100644 --- a/man/Makefile +++ b/man/Makefile @@ -1,5 +1,5 @@ -PAGES := labgrid-client.1 labgrid-exporter.1 labgrid-device-config.5 labgrid-pytest.7 labgrid-suggest.1 -COMPRESSED := labgrid-client.1.gz labgrid-exporter.1.gz labgrid-device-config.5.gz labgrid-pytest.7.gz labgrid-suggest.1.gz +PAGES := labgrid-client.1 labgrid-coordinator.1 labgrid-exporter.1 labgrid-device-config.5 labgrid-pytest.7 labgrid-suggest.1 +COMPRESSED := labgrid-client.1.gz labgrid-coordinator.1.gz labgrid-exporter.1.gz labgrid-device-config.5.gz labgrid-pytest.7.gz labgrid-suggest.1.gz %.1 %.5 %.7: %.rst rst2man.py $< >$@.tmp diff --git a/man/labgrid-client.1 b/man/labgrid-client.1 index c5772ae0b..52db032db 100644 --- a/man/labgrid-client.1 +++ b/man/labgrid-client.1 @@ -51,8 +51,8 @@ display command line help .BI \-p \ PLACE\fR,\fB \ \-\-place \ PLACE specify the place to operate on .TP -.BI \-x \ URL\fR,\fB \ \-\-crossbar \ URL -the crossbar url of the coordinator, defaults to \fBws://127.0.0.1:20408/ws\fP +.BI \-x \ ADDRESS\fR,\fB \ \-\-coordinator \ ADDRESS +coordinator \fBHOST[:PORT]\fP to connect to, defaults to \fB127.0.0.1:20408\fP .TP .BI \-c \ CONFIG\fR,\fB \ \-\-config \ CONFIG set the configuration file @@ -103,14 +103,10 @@ A desired state must be set using \fBLG_STATE\fP or \fB\-s\fP/\fB\-\-state\fP\&. .sp This variable can be used to specify the configuration file to use without using the \fB\-\-config\fP option, the \fB\-\-config\fP option overrides it. -.SS LG_CROSSBAR +.SS LG_COORDINATOR .sp -This variable can be used to set the default crossbar URL (instead of using the -\fB\-x\fP option). -.SS LG_CROSSBAR_REALM -.sp -This variable can be used to set the default crossbar realm to use instead of -\fBrealm1\fP\&. +This variable can be used to set the default coordinator in the format +\fBHOST[:PORT]\fP (instead of using the \fB\-x\fP option). .SS LG_PROXY .sp This variable can be used to specify a SSH proxy hostname which should be used diff --git a/man/labgrid-client.rst b/man/labgrid-client.rst index 27259bfed..43b76f663 100644 --- a/man/labgrid-client.rst +++ b/man/labgrid-client.rst @@ -38,8 +38,8 @@ OPTIONS display command line help -p PLACE, --place PLACE specify the place to operate on --x URL, --crossbar URL - the crossbar url of the coordinator, defaults to ``ws://127.0.0.1:20408/ws`` +-x ADDRESS, --coordinator ADDRESS + coordinator ``HOST[:PORT]`` to connect to, defaults to ``127.0.0.1:20408`` -c CONFIG, --config CONFIG set the configuration file -s STATE, --state STATE @@ -91,15 +91,10 @@ LG_ENV This variable can be used to specify the configuration file to use without using the ``--config`` option, the ``--config`` option overrides it. -LG_CROSSBAR -~~~~~~~~~~~ -This variable can be used to set the default crossbar URL (instead of using the -``-x`` option). - -LG_CROSSBAR_REALM -~~~~~~~~~~~~~~~~~ -This variable can be used to set the default crossbar realm to use instead of -``realm1``. +LG_COORDINATOR +~~~~~~~~~~~~~~ +This variable can be used to set the default coordinator in the format +``HOST[:PORT]`` (instead of using the ``-x`` option). LG_PROXY ~~~~~~~~ diff --git a/man/labgrid-coordinator.1 b/man/labgrid-coordinator.1 new file mode 100644 index 000000000..22673a065 --- /dev/null +++ b/man/labgrid-coordinator.1 @@ -0,0 +1,70 @@ +.\" Man page generated from reStructuredText. +. +. +.nr rst2man-indent-level 0 +. +.de1 rstReportMargin +\\$1 \\n[an-margin] +level \\n[rst2man-indent-level] +level margin: \\n[rst2man-indent\\n[rst2man-indent-level]] +- +\\n[rst2man-indent0] +\\n[rst2man-indent1] +\\n[rst2man-indent2] +.. +.de1 INDENT +.\" .rstReportMargin pre: +. RS \\$1 +. nr rst2man-indent\\n[rst2man-indent-level] \\n[an-margin] +. nr rst2man-indent-level +1 +.\" .rstReportMargin post: +.. +.de UNINDENT +. RE +.\" indent \\n[an-margin] +.\" old: \\n[rst2man-indent\\n[rst2man-indent-level]] +.nr rst2man-indent-level -1 +.\" new: \\n[rst2man-indent\\n[rst2man-indent-level]] +.in \\n[rst2man-indent\\n[rst2man-indent-level]]u +.. +.TH "LABGRID-COORDINATOR" 1 "2024-08-06" "0.0.1" "embedded testing" +.SH NAME +labgrid-coordinator \- labgrid-coordinator managing labgrid resources and places +.SH SYNOPSIS +.sp +\fBlabgrid\-coordinator\fP \fB\-\-help\fP +.SH DESCRIPTION +.sp +Labgrid is a scalable infrastructure and test architecture for embedded (linux) +systems. +.sp +This is the man page for the coordinator. Clients and exporters connect to the +coordinator to publish resources, manage place configuration and handle mutual +exclusion. +.SH OPTIONS +.INDENT 0.0 +.TP +.B \-h\fP,\fB \-\-help +display command line help +.TP +.BI \-l \ ADDRESS\fR,\fB \ \-\-listen \ ADDRESS +make coordinator listen on host and port +.TP +.B \-d\fP,\fB \-\-debug +enable debug mode +.UNINDENT +.SH SEE ALSO +.sp +\fBlabgrid\-client\fP(1), \fBlabgrid\-exporter\fP(1) +.SH AUTHOR +Rouven Czerwinski + +Organization: Labgrid-Project +.SH COPYRIGHT +Copyright (C) 2016-2024 Pengutronix. This library is free software; +you can redistribute it and/or modify it under the terms of the GNU +Lesser General Public License as published by the Free Software +Foundation; either version 2.1 of the License, or (at your option) +any later version. +.\" Generated by docutils manpage writer. +. diff --git a/man/labgrid-coordinator.rst b/man/labgrid-coordinator.rst new file mode 100644 index 000000000..786059196 --- /dev/null +++ b/man/labgrid-coordinator.rst @@ -0,0 +1,49 @@ +===================== + labgrid-coordinator +===================== + +labgrid-coordinator managing labgrid resources and places +========================================================= + + +:Author: Rouven Czerwinski +:organization: Labgrid-Project +:Date: 2024-08-06 +:Copyright: Copyright (C) 2016-2024 Pengutronix. This library is free software; + you can redistribute it and/or modify it under the terms of the GNU + Lesser General Public License as published by the Free Software + Foundation; either version 2.1 of the License, or (at your option) + any later version. +:Version: 0.0.1 +:Manual section: 1 +:Manual group: embedded testing + + + +SYNOPSIS +-------- + +``labgrid-coordinator`` ``--help`` + +DESCRIPTION +----------- +Labgrid is a scalable infrastructure and test architecture for embedded (linux) +systems. + +This is the man page for the coordinator. Clients and exporters connect to the +coordinator to publish resources, manage place configuration and handle mutual +exclusion. + +OPTIONS +------- +-h, --help + display command line help +-l ADDRESS, --listen ADDRESS + make coordinator listen on host and port +-d, --debug + enable debug mode + +SEE ALSO +-------- + +``labgrid-client``\(1), ``labgrid-exporter``\(1) diff --git a/man/labgrid-device-config.5 b/man/labgrid-device-config.5 index 13943611c..f6de7a954 100644 --- a/man/labgrid-device-config.5 +++ b/man/labgrid-device-config.5 @@ -52,17 +52,13 @@ For a list of available resources and drivers refer to \fI\%https://labgrid.readthedocs.io/en/latest/configuration.html\fP\&. .SH OPTIONS .sp -The \fBoptions:\fP top key configures various options such as the crossbar_url. +The \fBoptions:\fP top key configures various options such as the coordinator_address. .SS OPTIONS KEYS .INDENT 0.0 .TP -.B \fBcrossbar_url\fP -takes as parameter the URL of the crossbar (coordinator) to connect to. -Defaults to \(aqws://127.0.0.1:20408\(aq. -.TP -.B \fBcrossbar_realm\fP -takes as parameter the realm of the crossbar (coordinator) to connect to. -Defaults to \(aqrealm1\(aq. +.B \fBcoordinator_address\fP +takes as parameter the coordinator \fBHOST[:PORT]\fP to connect to. +Defaults to \fB127.0.0.1:20408\fP\&. .UNINDENT .SH IMAGES .sp diff --git a/man/labgrid-device-config.rst b/man/labgrid-device-config.rst index 216a657bd..ba0156830 100644 --- a/man/labgrid-device-config.rst +++ b/man/labgrid-device-config.rst @@ -47,18 +47,14 @@ https://labgrid.readthedocs.io/en/latest/configuration.html. OPTIONS ------- -The ``options:`` top key configures various options such as the crossbar_url. +The ``options:`` top key configures various options such as the coordinator_address. OPTIONS KEYS ~~~~~~~~~~~~ -``crossbar_url`` - takes as parameter the URL of the crossbar (coordinator) to connect to. - Defaults to 'ws://127.0.0.1:20408'. - -``crossbar_realm`` - takes as parameter the realm of the crossbar (coordinator) to connect to. - Defaults to 'realm1'. +``coordinator_address`` + takes as parameter the coordinator ``HOST[:PORT]`` to connect to. + Defaults to ``127.0.0.1:20408``. .. _labgrid-device-config-images: diff --git a/man/labgrid-exporter.1 b/man/labgrid-exporter.1 index faf836daa..66d1d69e0 100644 --- a/man/labgrid-exporter.1 +++ b/man/labgrid-exporter.1 @@ -47,8 +47,8 @@ USB devices and various other controllers. .B \-h\fP,\fB \-\-help display command line help .TP -.B \-x\fP,\fB \-\-crossbar -the crossbar url of the coordinator +.B \-x\fP,\fB \-\-coordinator +coordinator \fBHOST[:PORT]\fP to connect to, defaults to \fB127.0.0.1:20408\fP .TP .B \-i\fP,\fB \-\-isolated enable isolated mode (always request SSH forwards) @@ -100,14 +100,10 @@ for more information. .SH ENVIRONMENT VARIABLES .sp The following environment variable can be used to configure labgrid\-exporter. -.SS LG_CROSSBAR +.SS LG_COORDINATOR .sp -This variable can be used to set the default crossbar URL (instead of using the -\fB\-x\fP option). -.SS LG_CROSSBAR_REALM -.sp -This variable can be used to set the default crossbar realm to use instead of -\fBrealm1\fP\&. +This variable can be used to set the default coordinator in the format +\fBHOST[:PORT]\fP (instead of using the \fB\-x\fP option). .SH EXAMPLES .sp Start the exporter with the configuration file \fImy\-config.yaml\fP: diff --git a/man/labgrid-exporter.rst b/man/labgrid-exporter.rst index f43754ca1..bf3ec50d0 100644 --- a/man/labgrid-exporter.rst +++ b/man/labgrid-exporter.rst @@ -38,8 +38,8 @@ OPTIONS ------- -h, --help display command line help --x, --crossbar - the crossbar url of the coordinator +-x, --coordinator + coordinator ``HOST[:PORT]`` to connect to, defaults to ``127.0.0.1:20408`` -i, --isolated enable isolated mode (always request SSH forwards) -n, --name @@ -92,15 +92,10 @@ ENVIRONMENT VARIABLES --------------------- The following environment variable can be used to configure labgrid-exporter. -LG_CROSSBAR -~~~~~~~~~~~ -This variable can be used to set the default crossbar URL (instead of using the -``-x`` option). - -LG_CROSSBAR_REALM -~~~~~~~~~~~~~~~~~ -This variable can be used to set the default crossbar realm to use instead of -``realm1``. +LG_COORDINATOR +~~~~~~~~~~~~~~ +This variable can be used to set the default coordinator in the format +``HOST[:PORT]`` (instead of using the ``-x`` option). EXAMPLES -------- From e0167aeebc8119a01673a5276ecfdaa402af9f3f Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Thu, 8 Aug 2024 18:22:12 +0200 Subject: [PATCH 281/384] doc: update for gRPC Signed-off-by: Bastian Krause Signed-off-by: Rouven Czerwinski --- doc/RELEASE.rst | 6 +--- doc/development.rst | 2 +- doc/getting_started.rst | 63 ++++------------------------------------- doc/man.rst | 1 + doc/man/coordinator.rst | 2 ++ doc/overview.rst | 13 ++++----- doc/usage.rst | 8 +++--- 7 files changed, 20 insertions(+), 75 deletions(-) create mode 100644 doc/man/coordinator.rst diff --git a/doc/RELEASE.rst b/doc/RELEASE.rst index da1b2f41d..886746ba5 100644 --- a/doc/RELEASE.rst +++ b/doc/RELEASE.rst @@ -66,10 +66,6 @@ Test the upload by using pypi dev as a download source :: - virtualenv -p python3 labgrid-crossbar-release- - labgrid-crossbar-release-/bin/pip install --upgrade pip - labgrid-crossbar-release-/bin/pip install -r crossbar-requirements.txt - virtualenv -p python3 labgrid-release- source labgrid-release-/bin/activate pip install --upgrade pip setuptools wheel @@ -80,7 +76,7 @@ And optionally run the tests: :: pip install ".[dev]" - pytest tests --crossbar-venv labgrid-crossbar-release- + pytest tests 7. Upload to pypi ================= diff --git a/doc/development.rst b/doc/development.rst index 8b0351076..5a58abb52 100644 --- a/doc/development.rst +++ b/doc/development.rst @@ -23,7 +23,7 @@ Install required dependencies: .. code-block:: bash - sudo apt install python3-dev libow-dev libsnappy-dev + sudo apt install python3-dev libow-dev Install labgrid with development dependencies into the virtualenv in editable mode: diff --git a/doc/getting_started.rst b/doc/getting_started.rst index 7d1990bb5..c27d6b161 100644 --- a/doc/getting_started.rst +++ b/doc/getting_started.rst @@ -62,7 +62,7 @@ Test your installation by running: .. code-block:: bash labgrid-venv $ labgrid-client --help - usage: labgrid-client [-h] [-x URL] [-c CONFIG] [-p PLACE] [-d] COMMAND ... + usage: labgrid-client [-h] [-x ADDRESS] [-c CONFIG] [-p PLACE] [-d] COMMAND ... ... If the help for labgrid-client does not show up, open an `Issue @@ -170,58 +170,11 @@ exporter, and learn how to access the exporter via the client. Coordinator ~~~~~~~~~~~ -To start the coordinator, we will download the labgrid repository, create an -extra virtualenv and install the dependencies: +We can simply start the coordinator: .. code-block:: bash - $ sudo apt install libsnappy-dev - $ git clone https://github.com/labgrid-project/labgrid - $ cd labgrid - $ virtualenv -p python3 crossbar-venv - $ crossbar-venv/bin/pip install --upgrade pip - $ crossbar-venv/bin/pip install -r crossbar-requirements.txt - $ virtualenv -p python3 labgrid-venv - $ source labgrid-venv/bin/activate - labgrid-venv $ pip install --upgrade pip - labgrid-venv $ pip install . - -All necessary dependencies should be installed now. - -Copy and customize the crossbar config file ``.crossbar/config-anonymous.yaml`` -for your use case: - -.. code-block:: bash - - labgrid-venv $ cp .crossbar/config-anonymous.yaml .crossbar/my-config.yaml - -.. note:: crossbar is a network messaging framework for building distributed - applications, which labgrid plugs into. - -The path to the Python interpreter in the labgrid-venv needs to be configured -in crossbar's config, either manually or with the labgrid-venv being active -via: - -.. code-block:: bash - - labgrid-venv $ sed -i "s#^ executable: .*\$# executable: ${VIRTUAL_ENV}/bin/python3#" .crossbar/my-config.yaml - -.. note:: For long running deployments a different ``workdir`` and port may be - used. - The crossbar config should reside in a ``.crossbar`` directory in the - ``workdir`` in this case. - For an example systemd service file, see - :ref:`remote-getting-started-systemd-files`. - -Now we can finally start the coordinator inside the repository: - -.. code-block:: bash - - $ crossbar-venv/bin/crossbar start --config my-config.yaml - -.. note:: If --config is specified as a relative path, the config is expected - in a .crossbar subdirectory (as is the case in the labgrid - repository). + labgrid-venv $ labgrid-coordinator Exporter ~~~~~~~~ @@ -375,25 +328,19 @@ Follow these instructions to install the systemd files on your machine(s): installation paths of your distribution. #. Adapt the ``ExecStart`` paths of the service files to the respective Python virtual environments of the coordinator and exporter. -#. Create the coordinator configuration file referenced in the ``ExecStart`` - option of the :file:`labgrid-coordinator.service` file by using - :file:`.crossbar/config-anonymous.yaml` as a starting point. You most likely - want to make sure that the ``workdir`` option matches the path given via the - ``--cbdir`` option in the service file; see - :ref:`remote-getting-started-coordinator` for further information. #. Adjust the ``SupplementaryGroups`` option in the :file:`labgrid-exporter.service` file to your distribution so that the exporter gains read and write access on TTY devices (for ``ser2net``); most often, these groups are called ``dialout``, ``plugdev`` or ``tty``. Depending on your udev configuration, you may need multiple groups. -#. Set the coordinator URL the exporter should connect to by overriding the +#. Set the coordinator address the exporter should connect to by overriding the exporter service file; i.e. execute ``systemctl edit labgrid-exporter.service`` and add the following snippet: .. code-block:: [Service] - Environment="LG_CROSSBAR=ws://:/ws" + Environment="LG_COORDINATOR=[:]" #. Create the ``labgrid`` user and group: diff --git a/doc/man.rst b/doc/man.rst index 0523f301a..e6d6f6360 100644 --- a/doc/man.rst +++ b/doc/man.rst @@ -5,3 +5,4 @@ Manual Pages man/client man/device-config man/exporter + man/coordinator diff --git a/doc/man/coordinator.rst b/doc/man/coordinator.rst new file mode 100644 index 000000000..c1c7afbd7 --- /dev/null +++ b/doc/man/coordinator.rst @@ -0,0 +1,2 @@ +.. _labgrid-coordinator: +.. include:: ../../man/labgrid-coordinator.rst diff --git a/doc/overview.rst b/doc/overview.rst index 8152406bf..8d69d85f8 100644 --- a/doc/overview.rst +++ b/doc/overview.rst @@ -210,7 +210,7 @@ labgrid contains components for accessing resources which are not directly accessible on the local machine. The main parts of this are: -labgrid-coordinator (crossbar component) +labgrid-coordinator Clients and exporters connect to the coordinator to publish resources, manage place configuration and handle mutual exclusion. @@ -227,9 +227,8 @@ RemotePlace (managed resource) When used in a `Target`, the RemotePlace expands to the resources configured for the named places. -These components communicate over the `WAMP `_ -implementation `Autobahn `_ and the `Crossbar -`_ WAMP router. +These components communicate over `gRPC `_. The coordinator +acts as a gRPC server to which client and exporter connect. The following sections describe the responsibilities of each component. See :ref:`remote-usage` for usage information. @@ -239,8 +238,8 @@ The following sections describe the responsibilities of each component. See Coordinator ~~~~~~~~~~~ -The `Coordinator` is implemented as a Crossbar component and is started by the -router. +The `Coordinator` is implemented as a gRPC server and is started as a separate +process. It provides separate RPC methods for the exporters and clients. The coordinator keeps a list of all resources for clients and @@ -387,7 +386,7 @@ variable needs to be set to the remote host which should tunnel the connection to the coordinator. The client then forwards all network traffic - client-to-coordinator and client-to-exporter - through SSH, via their respective proxies. This means that with :code:`LG_PROXY` and -:code:`LG_CROSSBAR` labgrid can be used fully remotely with only a SSH +:code:`LG_COORDINATOR` labgrid can be used fully remotely with only a SSH connection as a requirement. .. note:: diff --git a/doc/usage.rst b/doc/usage.rst index 4caff26bc..9d423d50b 100644 --- a/doc/usage.rst +++ b/doc/usage.rst @@ -437,10 +437,10 @@ Other labgrid-related pytest plugin options are: Specify a labgrid environment config file. This is equivalent to labgrid-client's ``-c``/``--config``. -``--lg-coordinator=CROSSBAR_URL`` - Specify labgrid coordinator websocket URL. - Defaults to ``ws://127.0.0.1:20408/ws``. - This is equivalent to labgrid-client's ``-x``/``--crossbar``. +``--lg-coordinator=COORDINATOR_ADDRESS`` + Specify labgrid coordinator gRPC address as ``HOST[:PORT]``. + Defaults to ``127.0.0.1:20408``. + This is equivalent to labgrid-client's ``-x``/``--coordinator``. ``--lg-log=[path to logfiles]`` Path to store console log file. From 045634b25f1e489b9513f9d0ce64edaee5250e7b Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Mon, 22 Jul 2024 11:53:08 +0200 Subject: [PATCH 282/384] github/workflows/reusable-unit-tests: skip pylint for python3.8 pylint finds false positives for Python 3.8: labgrid/remote/coordinator.py:188:21: E1136: Value 'dict' is unsubscriptable (unsubscriptable-object) labgrid/remote/coordinator.py:194:24: E1136: Value 'dict' is unsubscriptable (unsubscriptable-object) labgrid/remote/coordinator.py:195:22: E1136: Value 'dict' is unsubscriptable (unsubscriptable-object) Since Python 3.8 reaches EOL in October 2024 anyways, let's just skip pylint for this version until then. Signed-off-by: Bastian Krause --- .github/workflows/reusable-unit-tests.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/reusable-unit-tests.yml b/.github/workflows/reusable-unit-tests.yml index 15be56078..852cd1a26 100644 --- a/.github/workflows/reusable-unit-tests.yml +++ b/.github/workflows/reusable-unit-tests.yml @@ -47,6 +47,7 @@ jobs: run: | pip install -e ".[dev]" - name: Lint with pylint + if: inputs.python-version != '3.8' run: | pylint --list-msgs-enabled pylint labgrid From bf4379c9f2e01ba844a70d4be60b972110f770fa Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Wed, 7 Aug 2024 14:11:38 +0200 Subject: [PATCH 283/384] pyproject: advertise python3.12 compatibility The latest crossbar release was not compatible with python3.12. Now that the crossbar dependency is gone, we can finally advertise python3.12 compatibility. Closes #1260 Signed-off-by: Bastian Krause --- pyproject.toml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index 590fcb7c7..b06d84ea4 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -28,6 +28,7 @@ classifiers = [ "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", + "Programming Language :: Python :: 3.12", "License :: OSI Approved :: GNU Lesser General Public License v2 or later (LGPLv2+)", ] dependencies = [ @@ -226,7 +227,7 @@ signature-mutators = ["labgrid.step.step"] [tool.tox] legacy_tox_ini = """ [tox] -envlist = py38, py39, py310, py311 +envlist = py38, py39, py310, py311, py312 isolated_build = true [testenv] From 0ea9fe597db806f77f9c8b7dc1b9f46a9d1fe034 Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Tue, 13 Aug 2024 11:38:34 +0200 Subject: [PATCH 284/384] debian: install labgrid-coordinator Signed-off-by: Bastian Krause --- debian/labgrid-coordinator | 3 +++ debian/labgrid.install | 1 + 2 files changed, 4 insertions(+) create mode 100755 debian/labgrid-coordinator diff --git a/debian/labgrid-coordinator b/debian/labgrid-coordinator new file mode 100755 index 000000000..54a34440b --- /dev/null +++ b/debian/labgrid-coordinator @@ -0,0 +1,3 @@ +#!/bin/sh + +exec /opt/venvs/labgrid/bin/labgrid-coordinator "$@" diff --git a/debian/labgrid.install b/debian/labgrid.install index cfea6dd96..2a1d9725d 100755 --- a/debian/labgrid.install +++ b/debian/labgrid.install @@ -1,6 +1,7 @@ #!/usr/bin/dh-exec debian/labgrid.yaml /etc debian/labgrid-client /usr/bin +debian/labgrid-coordinator /usr/bin debian/labgrid-exporter /usr/bin debian/labgrid-pytest /usr/bin debian/labgrid-suggest /usr/bin From 1a406bcb95de8e21f935dafc0993c502d092ab47 Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Tue, 13 Aug 2024 11:39:10 +0200 Subject: [PATCH 285/384] debian: numpy is no longer an indirect dependency With the autobahn/crossbar dependencies removed, numpy is no longer an indirect dependency of labgrid. Signed-off-by: Bastian Krause --- debian/rules | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/debian/rules b/debian/rules index 7c1532c6c..cd09b0c5d 100755 --- a/debian/rules +++ b/debian/rules @@ -13,4 +13,4 @@ override_dh_virtualenv: --upgrade-pip \ --extras deb \ --extra-pip-arg='--no-binary' \ - --extra-pip-arg='cffi,numpy' + --extra-pip-arg='cffi' From ab8f79a64e0266fcac79616917e843b1e1068ef1 Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Tue, 13 Aug 2024 11:34:10 +0200 Subject: [PATCH 286/384] CHANGES: add gRPC migration to new 24.1 release Signed-off-by: Bastian Krause --- CHANGES.rst | 51 +++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 51 insertions(+) diff --git a/CHANGES.rst b/CHANGES.rst index d185b909c..b2c4ce84c 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -1,3 +1,54 @@ +Release 24.1 (Unreleased) +------------------------- +As announced +`before `_, +this is the first release using gRPC instead of crossbar/autobahn for +communication between client/exporter and coordinator. + +Crossbar/autobahn are unfortunately not very well maintained anymore. The +crossbar component was moved to its own virtualenv to cope with the high number +of dependencies leading to conflicts. Support for Python 3.13 is still not +available in a crossbar release on PyPI. + +That's why labgrid moves to gRPC with this release. gRPC is a well maintained +RPC framework with a lot of users. As a side effect, the message transfer is +more performant and the import times are shorter. + +New Features in 24.1 +~~~~~~~~~~~~~~~~~~~~ +- All components can be installed into the same virtualenv again. + +Bug fixes in 24.1 +~~~~~~~~~~~~~~~~~ + +FIXME + +Breaking changes in 24.1 +~~~~~~~~~~~~~~~~~~~~~~~~ +Maintaining support for both crossbar/autobahn as well as gRPC in labgrid would +be a lot of effort due to the different architectures of those frameworks. +Therefore, a hard migration to gRPC is deemed the lesser issue. + +Due to the migration, 24.1 includes the following breaking changes: + +- The labgrid environment config option ``crossbar_url`` was renamed to + ``coordinator_address``. The environment variable ``LG_CROSSBAR`` was renamed + to ``LG_COORDINATOR``. +- The labgrid environment config option ``crossbar_realm`` is now obsolete as + well as the environment variable ``LG_CROSSBAR_REALM``. +- The coordinator is available as ``labgrid-coordinator`` (instead of + ``crossbar start``). No additional configuration file is required. +- The systemd services in ``contrib/systemd/`` were updated. + +Other breaking changes include: + +FIXME + +Known issues in 24.1 +~~~~~~~~~~~~~~~~~~~~ + +FIXME + Release 24.0 (Released Aug 12, 2024) ------------------------------------ From 00e13b7760d5c6fd86bd617892eee462d3df7398 Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Wed, 14 Aug 2024 11:41:14 +0200 Subject: [PATCH 287/384] github/workflows/scheduled-unit-tests: drop scheduled docker tests on stable-23.0 Adding the stable branches here is problematic: GitHub uses the workflow definitions from the default branch (master). The workflow might not match the stable branch. So drop it here. As a replacement, the current stable branch is tested on a labgrid fork that has the stable branch set as its default branch, so the workflow is maintained in the same branch as the code. Signed-off-by: Bastian Krause --- .github/workflows/scheduled-unit-tests.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/scheduled-unit-tests.yml b/.github/workflows/scheduled-unit-tests.yml index 096e7aee6..87b4474db 100644 --- a/.github/workflows/scheduled-unit-tests.yml +++ b/.github/workflows/scheduled-unit-tests.yml @@ -21,7 +21,7 @@ jobs: strategy: fail-fast: false matrix: - branch: ['master', 'stable-23.0'] + branch: ['master'] uses: ./.github/workflows/reusable-unit-tests-docker.yml with: branch: ${{ matrix.branch }} From 5a212eb8309b05df8375abb29e1fa41a71aa6a4e Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Tue, 13 Aug 2024 14:25:01 +0200 Subject: [PATCH 288/384] doc/RELEASE: update release instructions Signed-off-by: Bastian Krause --- doc/RELEASE.rst | 112 +++++++++++++++++------------------------------- 1 file changed, 40 insertions(+), 72 deletions(-) diff --git a/doc/RELEASE.rst b/doc/RELEASE.rst index 886746ba5..2bf33fcc9 100644 --- a/doc/RELEASE.rst +++ b/doc/RELEASE.rst @@ -1,97 +1,65 @@ Step by step guide to releasing a new labgrid version. +labgrid follows the `calver `_ versioning scheme +``YY.MINOR[.MICRO]``. +The ``MINOR`` number starts at 0. +The ``MICRO`` number for stable releases starts at 1. -0. Preparations -=============== -Clean the `dist/` directory: - -.. code-block:: bash - - rm dist/* - -Check your commit mail and name: - -.. code-block:: bash +1. Check for relevant PRs that need a merge +=========================================== - git config --get user.name - git config --get user.email +- `Milestones `_ +- `Fixes `_ +- `Fixes for stable `_ -1. Update CHANGES.rst +2. Update CHANGES.rst ===================== Update the `CHANGES.rst` file. -Ensure that no incompatiblities are unlisted and that all major features are +Ensure that no incompatibilities are unlisted and that all major features are described in a separate section. It's best to compare against the git log. -2. Bump Version Number -====================== +Add new sections including the version number for the release in `CHANGES.rst` +(if not already done). +Set the release date. -Bump the version number in `CHANGES.rst`. +If you are bumping the ``MINOR`` number, import the changes from the latest stable +branch and add a new (unreleased) section for the next release. +Also add a new section into ``debian/changelog``. -3. Create a signed Tag -====================== - -Create a signed tag of the new release. -Your PGP-key has to be available on the computer. - -.. code-block:: bash - - git tag -s - -4. Create sdist +3. Create a tag =============== -Run the following command: - -:: +Wait for the CI to succeed on the commit you are about to tag. - pip install build - python -m build --sdist +Now create a (signed) tag of the new release. +If it should be signed (``-s``), your PGP-key has to be available on the +computer. +The release tag should start with a lower case ``v``, e.g. ``v24.0`` or +``v24.0.1``. -The sdist file will be available in the `dist/` directory. - -5. Test upload to pypi dev -========================== - -Test the upload by using twine to upload to pypi test service - -:: - - twine upload --repository-url https://test.pypi.org/legacy/ dist/* - -6. Test download from pypi dev -============================== - -Test the upload by using pypi dev as a download source - -:: - - virtualenv -p python3 labgrid-release- - source labgrid-release-/bin/activate - pip install --upgrade pip setuptools wheel - pip install --index-url https://test.pypi.org/simple/ labgrid - -And optionally run the tests: - -:: +.. code-block:: bash - pip install ".[dev]" - pytest tests + git tag -s $VERSION -7. Upload to pypi -================= +If you're happy with it, push it: -Upload the tested dist file to pypi. +.. code-block:: bash -:: + git push upstream $VERSION - twine upload dist/* +The CI should take care of the rest. +Make sure it succeeds and the new release is available on PyPi. -8. Upload the signed tag -======================== +4. Draft a release +================== -Upload the signed tag to the upstream repository +On GitHub, draft a new release, add the changes in Markdown format and create a +discussion for the release: +https://github.com/labgrid-project/labgrid/releases/new -:: +5. Create new stable branch +=========================== - git push upstream +If you are bumping the ``MINOR`` number, push a new stable branch +``stable-YY.MINOR`` based on the release tag. From 2f5d3aeb018e96534f90d976a252db7155d16ed4 Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Tue, 13 Aug 2024 15:26:17 +0200 Subject: [PATCH 289/384] pyproject.toml: add ruff to dev extra Signed-off-by: Bastian Krause --- pyproject.toml | 1 + 1 file changed, 1 insertion(+) diff --git a/pyproject.toml b/pyproject.toml index b06d84ea4..f95f66300 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -127,6 +127,7 @@ dev = [ "pytest-isort>=2.0.0", "pytest-mock>=3.6.1", "pylint>=3.0.0", + "ruff>=0.5.7", # GRPC Channelz support "grpcio-channelz>=1.64.1, <2.0.0", From abcb4211d0ec43fdf823c7070c9bc11b68e6d79b Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Tue, 13 Aug 2024 15:19:50 +0200 Subject: [PATCH 290/384] github/workflows/reusable-unit-tests: enforce ruff format on labgrid.remote The labgrid.remote module was formatted with ruff in #1426. The gRPC changes are formatted correctly. Enforce formatting on the module now, but exclude generated code. We can enforce formatting of more modules over time. Signed-off-by: Bastian Krause --- .github/workflows/reusable-unit-tests.yml | 3 +++ pyproject.toml | 1 + 2 files changed, 4 insertions(+) diff --git a/.github/workflows/reusable-unit-tests.yml b/.github/workflows/reusable-unit-tests.yml index 852cd1a26..6a6eef797 100644 --- a/.github/workflows/reusable-unit-tests.yml +++ b/.github/workflows/reusable-unit-tests.yml @@ -51,6 +51,9 @@ jobs: run: | pylint --list-msgs-enabled pylint labgrid + - name: Format with ruff + run: | + ruff format --check --diff labgrid/remote/ - name: Test with pytest run: | pytest --cov-config .coveragerc --cov=labgrid --local-sshmanager --ssh-username runner -k "not test_docker_with_daemon" diff --git a/pyproject.toml b/pyproject.toml index f95f66300..6eab8df10 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -256,6 +256,7 @@ exclude = [ "venv", "envs", "dist", + "labgrid/remote/generated", ] [tool.ruff.lint] From 288a0d0a9bbc01d0b9f9a47102abcb2013a908fe Mon Sep 17 00:00:00 2001 From: Rainer Poisel Date: Fri, 30 Aug 2024 08:51:45 +0200 Subject: [PATCH 291/384] fix: match interface regex in ShellDriver Fix regex to correctly match network interfaces with dots and/or dashes in the name Previously, the regex for extracting the interface from the default route was not capable of matching network interfaces containing dots ('.') and/or dashes ('-') in their names. This commit improves the regex by allowing such interfaces to be correctly identified. For example, interfaces like 'br-lan.42' and 'eth0-1' are now correctly matched by the updated regex. Sample output of the `ip -4 route list default` command: default via 192.168.42.255 dev br-lan.42 src 192.168.42.1 Signed-off-by: Rainer Poisel --- labgrid/driver/shelldriver.py | 2 +- tests/test_shelldriver.py | 35 +++++++++++++++++++++++++++++++++++ 2 files changed, 36 insertions(+), 1 deletion(-) diff --git a/labgrid/driver/shelldriver.py b/labgrid/driver/shelldriver.py index c9766387e..6cff6f1ea 100644 --- a/labgrid/driver/shelldriver.py +++ b/labgrid/driver/shelldriver.py @@ -567,7 +567,7 @@ def get_default_interface_device_name(self, version=4): regex = r"""default\s+via # leading strings \s+\S+ # IP address - \s+dev\s+(\w+) # interface""" + \s+dev\s+([\w\.-]+) # interface""" default_route = self._run_check(f"ip -{version} route list default") matches = re.findall(regex, "\n".join(default_route), re.X) diff --git a/tests/test_shelldriver.py b/tests/test_shelldriver.py index f0c4eecc3..89dc0ea01 100644 --- a/tests/test_shelldriver.py +++ b/tests/test_shelldriver.py @@ -48,6 +48,18 @@ def test_run_with_timeout(self, target_with_fakeconsole, mocker): res = d.run("test") assert res == (['success'], [], 0) + def test_default_interface_device_name(self, target_with_fakeconsole, mocker): + fake_default_route_show = "default via 10.0.2.2 dev br-lan src 10.0.2.15" + + t = target_with_fakeconsole + d = ShellDriver(t, "shell", prompt="dummy", login_prompt="dummy", username="dummy") + d.on_activate = mocker.MagicMock() + d = t.get_driver("ShellDriver") + d._run = mocker.MagicMock(return_value=([fake_default_route_show], [], 0)) + + res = d.get_default_interface_device_name() + assert res == "br-lan" + def test_get_ip_addresses(self, target_with_fakeconsole, mocker): fake_ip_addr_show = r""" 18: br-lan.42 inet 192.168.42.1/24 brd 192.168.42.255 scope global br-lan.42\ valid_lft forever preferred_lft forever @@ -62,3 +74,26 @@ def test_get_ip_addresses(self, target_with_fakeconsole, mocker): res = d.get_ip_addresses("br-lan.42") assert res[0] == IPv4Interface("192.168.42.1/24") + + def test_get_ip_addresses_default(self, target_with_fakeconsole, mocker): + t = target_with_fakeconsole + d = ShellDriver(t, "shell", prompt="dummy", login_prompt="dummy", username="dummy") + d.on_activate = mocker.MagicMock() + d = t.get_driver("ShellDriver") + d._run = mocker.MagicMock() + d._run.side_effect = [ + (["default via 192.168.42.255 dev br-lan.42 src 192.168.42.1"], [], 0), + ( + [ + r""" +18: br-lan.42 inet 192.168.42.1/24 brd 192.168.42.255 scope global br-lan.42\ valid_lft forever preferred_lft forever +18: br-lan.42 inet6 fe80::9683:c4ff:fea6:fb6b/64 scope link \ valid_lft forever preferred_lft forever +""" + ], + [], + 0, + ), + ] + + res = d.get_ip_addresses() + assert res[0] == IPv4Interface("192.168.42.1/24") From 965e5e01c63b7095bc47faec5c7937fad2bc6a45 Mon Sep 17 00:00:00 2001 From: Maciej Grela Date: Tue, 3 Sep 2024 12:04:39 +0200 Subject: [PATCH 292/384] remote/coordinator: Fix typo to allow for reservations matching multiple tags Signed-off-by: Maciej Grela --- labgrid/remote/coordinator.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/labgrid/remote/coordinator.py b/labgrid/remote/coordinator.py index 97ed16d26..930f3306c 100644 --- a/labgrid/remote/coordinator.py +++ b/labgrid/remote/coordinator.py @@ -922,7 +922,7 @@ async def CreateReservation(self, request: labgrid_coordinator_pb2.CreateReserva await context.abort(grpc.StatusCode.INVALID_ARGUMENT, f"Key {k} is invalid") if not TAG_VAL.match(v): await context.abort(grpc.StatusCode.INVALID_ARGUMENT, f"Value {v} is invalid") - fltr[k] = v + fltr[k] = v owner = self.clients[peer].name res = Reservation(owner=owner, prio=request.prio, filters=fltrs) From a9295fca1f5e605073b7754604ec1cc1d01766c1 Mon Sep 17 00:00:00 2001 From: Michael Tretter Date: Wed, 11 Sep 2024 16:28:57 +0200 Subject: [PATCH 293/384] driver/usbvideodriver: reduce latency of GStreamer pipeline The playbin3 uses a urisourcebin to open and read the uri. The urisourcebin detects the fd as a stream uri and internally crates a multiqueue with a default buffer duration of 5 s and a high watermark of 0.60 to buffer the stream. This adds 3 s latency to the video stream. If the source signals that is it a live source, the urisourcebin would avoid buffering of the stream. Unfortunately, the fdsrc doesn't signal that the data is live, and doesn't have a property to change this. Use buffer-duration to set the size of the queue for buffering to 0 to avoid this additional latency. The queue could be completely removed by setting "buffering" in the flags property of playbin3 to 0, which would be equal to the behavior of a live source, but the changed flags are more difficult to understand than the buffer duration. --- labgrid/driver/usbvideodriver.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/labgrid/driver/usbvideodriver.py b/labgrid/driver/usbvideodriver.py index 15fad97ed..5a604d570 100644 --- a/labgrid/driver/usbvideodriver.py +++ b/labgrid/driver/usbvideodriver.py @@ -131,7 +131,7 @@ def stream(self, caps_hint=None, controls=None): tx_cmd = self.video.command_prefix + ["gst-launch-1.0", "-q"] tx_cmd += pipeline.split() rx_cmd = ["gst-launch-1.0"] - rx_cmd += "playbin3 uri=fd://0".split() + rx_cmd += "playbin3 buffer-duration=0 uri=fd://0".split() tx = subprocess.Popen( tx_cmd, From a3501a2ba510ab09f3cc24390670bb194d053929 Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Mon, 30 Sep 2024 12:33:52 +0200 Subject: [PATCH 294/384] github: install setuptools_scm via apt MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Installing setuptools_scm via pip system-wide is deprecated. With ubuntu 24.04 this becomes an error: pip install --upgrade setuptools pip install setuptools_scm WARNING: apt does not have a stable CLI interface. Use with caution in scripts. Reading package lists... Building dependency tree... Reading state information... python3-pip is already the newest version (24.0+dfsg-1ubuntu1). 0 upgraded, 0 newly installed, 0 to remove and 15 not upgraded. error: externally-managed-environment × This environment is externally managed ╰─> To install Python packages system-wide, try apt install python3-xyz, where xyz is the package you are trying to install. If you wish to install a non-Debian-packaged Python package, create a virtual environment using python3 -m venv path/to/venv. Then use path/to/venv/bin/python and path/to/venv/bin/pip. Make sure you have python3-full installed. If you wish to install a non-Debian packaged Python application, it may be easiest to use pipx install xyz, which will manage a virtual environment for you. Make sure you have pipx installed. See /usr/share/doc/python3.12/README.venv for more information. note: If you believe this is a mistake, please contact your Python installation or OS distribution provider. You can override this, at the risk of breaking your Python installation or OS, by passing --break-system-packages. hint: See PEP 668 for the detailed specification. labgrid does not rely on a special version of setuptools_scm anyway, so simply install the corresponding Ubuntu package. Signed-off-by: Bastian Krause --- .github/workflows/docker.yml | 4 +--- .github/workflows/reusable-unit-tests-docker.yml | 4 +--- 2 files changed, 2 insertions(+), 6 deletions(-) diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index 71fafa366..02a291024 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -21,9 +21,7 @@ jobs: - uses: actions/checkout@v4 - name: Install system dependencies run: | - sudo apt install -yq python3-pip - pip install --upgrade setuptools - pip install setuptools_scm + sudo apt install -yq python3-pip python3-setuptools-scm - name: Set up QEMU uses: docker/setup-qemu-action@v3 with: diff --git a/.github/workflows/reusable-unit-tests-docker.yml b/.github/workflows/reusable-unit-tests-docker.yml index 986d47161..e6ffca238 100644 --- a/.github/workflows/reusable-unit-tests-docker.yml +++ b/.github/workflows/reusable-unit-tests-docker.yml @@ -16,9 +16,7 @@ jobs: ref: ${{ inputs.branch }} - name: Install system dependencies run: | - sudo apt install -yq python3-pip - pip install --upgrade setuptools - pip install setuptools_scm + sudo apt install -yq python3-pip python3-setuptools-scm - name: Build docker images run: | ./dockerfiles/build.sh From b762cc9ed8415830c287038758031bb95003f746 Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Mon, 30 Sep 2024 12:20:56 +0200 Subject: [PATCH 295/384] github: bump ubuntu runner images to 24.04 Pinning the runner images to a release prevents unexpected breakages when a new LTS is released. We follow the releases, so let's keep this a manual step. Signed-off-by: Bastian Krause --- .github/workflows/docker.yml | 2 +- .github/workflows/reusable-unit-tests-docker.yml | 2 +- .github/workflows/reusable-unit-tests.yml | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index 02a291024..5fbfec049 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -16,7 +16,7 @@ env: jobs: docker: - runs-on: ubuntu-latest + runs-on: ubuntu-24.04 steps: - uses: actions/checkout@v4 - name: Install system dependencies diff --git a/.github/workflows/reusable-unit-tests-docker.yml b/.github/workflows/reusable-unit-tests-docker.yml index e6ffca238..947c413cb 100644 --- a/.github/workflows/reusable-unit-tests-docker.yml +++ b/.github/workflows/reusable-unit-tests-docker.yml @@ -9,7 +9,7 @@ on: jobs: docker: - runs-on: ubuntu-latest + runs-on: ubuntu-24.04 steps: - uses: actions/checkout@v4 with: diff --git a/.github/workflows/reusable-unit-tests.yml b/.github/workflows/reusable-unit-tests.yml index 6a6eef797..28848417c 100644 --- a/.github/workflows/reusable-unit-tests.yml +++ b/.github/workflows/reusable-unit-tests.yml @@ -12,7 +12,7 @@ on: jobs: build: - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 continue-on-error: false steps: - uses: actions/checkout@v4 From d377f0a27282dd09efead2cb8fd17f3b6682a8a0 Mon Sep 17 00:00:00 2001 From: Rouven Czerwinski Date: Fri, 20 Sep 2024 12:50:25 +0200 Subject: [PATCH 296/384] README: add getting started section Add links to the youtube tutorial series and getting started section of the documentation on readthedocs. Signed-off-by: Rouven Czerwinski Signed-off-by: Bastian Krause --- README.rst | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/README.rst b/README.rst index 7d3935f70..2ae9aafda 100644 --- a/README.rst +++ b/README.rst @@ -10,6 +10,8 @@ Labgrid is an embedded board control python library with a focus on testing, dev and general automation. It includes a remote control layer to control boards connected to other hosts. +* `Getting started <#Getting-started>`_ + * `Purpose and Features <#purpose-and-features>`_ * `Documentation <#documentation>`_ @@ -24,6 +26,15 @@ It includes a remote control layer to control boards connected to other hosts. * `Install Development State <#install-development-state>`_ +Getting started +--------------- +There is a tutorial series on the Pengutronix Youtube channel you can follow to +get started: `Labgrid Tutorial Playlist +`_. + +The other starting point is in the `documentation +`__. + Purpose and Features -------------------- The idea behind labgrid is to create an abstraction of the hardware control From de127469e59ccb9832eea415e067e3366cf16829 Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Tue, 8 Oct 2024 16:16:30 +0200 Subject: [PATCH 297/384] man: rename rst2man.py -> rst2man sphinx_rtd_theme 3.0.0 added "support for docutils >0.18, <0.22." [1]. Prior releases limited docutils to <0.21. docutils 0.21 added "entry points (without the .py extension) instead of installing the rst2*.py front end tools in the binary PATH." [2] So change the rst2man call from rst2man.py to rst2man in the man Makefile. [1] https://sphinx-rtd-theme.readthedocs.io/en/stable/changelog.html [2] https://docutils.sourceforge.io/RELEASE-NOTES.html#release-0-21-2024-04-09 Signed-off-by: Bastian Krause --- man/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/man/Makefile b/man/Makefile index 4c1c1accc..83189ca5c 100644 --- a/man/Makefile +++ b/man/Makefile @@ -2,7 +2,7 @@ PAGES := labgrid-client.1 labgrid-coordinator.1 labgrid-exporter.1 labgrid-devic COMPRESSED := labgrid-client.1.gz labgrid-coordinator.1.gz labgrid-exporter.1.gz labgrid-device-config.5.gz labgrid-pytest.7.gz labgrid-suggest.1.gz %.1 %.5 %.7: %.rst - rst2man.py $< >$@.tmp + rst2man $< >$@.tmp mv -f $@.tmp $@ %.gz : % From 9d96b69c59dbb97eb8a85b502222c29ff1f1fb97 Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Tue, 8 Oct 2024 15:49:18 +0200 Subject: [PATCH 298/384] doc/conf: drop obsolete html_theme_path MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Since sphinx_rtd_theme 3.0.0rc1, "html_theme_path" is obsolete: "Raise a warning when defining html_theme_path. This was an old config that’s not required anymore." [1] Fixes warnings such as: sphinx.errors.SphinxWarning: Calling get_html_theme_path is deprecated. If you are calling it to define html_theme_path, you are safe to remove that code. Fixes labgrid's CI which is building the docs with -W, treating warnings as errors. [1] https://sphinx-rtd-theme.readthedocs.io/en/stable/changelog.html Signed-off-by: Bastian Krause --- doc/conf.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/doc/conf.py b/doc/conf.py index cfef4259b..2fcea3267 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -95,9 +95,6 @@ # html_theme = 'sphinx_rtd_theme' -# Set correct html_path for rtd theme: -html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] - # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. From 3ef1d2ddb1b12403388cd59e053540a51cb71840 Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Tue, 8 Oct 2024 17:28:57 +0200 Subject: [PATCH 299/384] man: regenerate man pages with docutils 0.21 Signed-off-by: Bastian Krause --- man/labgrid-client.1 | 26 +++++--------- man/labgrid-coordinator.1 | 2 +- man/labgrid-device-config.5 | 72 ++++++++++++++++--------------------- man/labgrid-exporter.1 | 16 ++++----- man/labgrid-pytest.7 | 10 +++--- man/labgrid-suggest.1 | 8 ++--- 6 files changed, 54 insertions(+), 80 deletions(-) diff --git a/man/labgrid-client.1 b/man/labgrid-client.1 index 52db032db..150e7be3c 100644 --- a/man/labgrid-client.1 +++ b/man/labgrid-client.1 @@ -27,7 +27,7 @@ level margin: \\n[rst2man-indent\\n[rst2man-indent-level]] .\" new: \\n[rst2man-indent\\n[rst2man-indent-level]] .in \\n[rst2man-indent\\n[rst2man-indent-level]]u .. -.TH "LABGRID-CLIENT" 1 "2017-04-15" "0.0.1" "embedded testing" +.TH "LABGRID-CLIENT" "1" "2017-04-15" "0.0.1" "embedded testing" .SH NAME labgrid-client \- labgrid-client interface to control boards .SH SYNOPSIS @@ -247,11 +247,9 @@ To retrieve a list of places run: .INDENT 0.0 .INDENT 3.5 .sp -.nf -.ft C +.EX $ labgrid\-client places -.ft P -.fi +.EE .UNINDENT .UNINDENT .sp @@ -260,11 +258,9 @@ the \fBacquire command\fP and passing the placename as a \-p parameter: .INDENT 0.0 .INDENT 3.5 .sp -.nf -.ft C +.EX $ labgrid\-client \-p acquire -.ft P -.fi +.EE .UNINDENT .UNINDENT .sp @@ -272,11 +268,9 @@ Open a console to the acquired place: .INDENT 0.0 .INDENT 3.5 .sp -.nf -.ft C +.EX $ labgrid\-client \-p console -.ft P -.fi +.EE .UNINDENT .UNINDENT .sp @@ -284,11 +278,9 @@ Add all resources with the group \(dqexample\-group\(dq to the place example\-pl .INDENT 0.0 .INDENT 3.5 .sp -.nf -.ft C +.EX $ labgrid\-client \-p example\-place add\-match */example\-group/*/* -.ft P -.fi +.EE .UNINDENT .UNINDENT .SH SEE ALSO diff --git a/man/labgrid-coordinator.1 b/man/labgrid-coordinator.1 index 22673a065..a134cea9a 100644 --- a/man/labgrid-coordinator.1 +++ b/man/labgrid-coordinator.1 @@ -27,7 +27,7 @@ level margin: \\n[rst2man-indent\\n[rst2man-indent-level]] .\" new: \\n[rst2man-indent\\n[rst2man-indent-level]] .in \\n[rst2man-indent\\n[rst2man-indent-level]]u .. -.TH "LABGRID-COORDINATOR" 1 "2024-08-06" "0.0.1" "embedded testing" +.TH "LABGRID-COORDINATOR" "1" "2024-08-06" "0.0.1" "embedded testing" .SH NAME labgrid-coordinator \- labgrid-coordinator managing labgrid resources and places .SH SYNOPSIS diff --git a/man/labgrid-device-config.5 b/man/labgrid-device-config.5 index f6de7a954..fd9e05c2e 100644 --- a/man/labgrid-device-config.5 +++ b/man/labgrid-device-config.5 @@ -27,7 +27,7 @@ level margin: \\n[rst2man-indent\\n[rst2man-indent-level]] .\" new: \\n[rst2man-indent\\n[rst2man-indent-level]] .in \\n[rst2man-indent\\n[rst2man-indent-level]]u .. -.TH "LABGRID-DEVICE-CONFIG" 5 "2017-04-15" "0.0.1" "embedded testing" +.TH "LABGRID-DEVICE-CONFIG" "5" "2017-04-15" "0.0.1" "embedded testing" .SH NAME labgrid-device-config \- labgrid test configuration files .SH SYNOPSIS @@ -49,7 +49,7 @@ important, since they are parsed as an ordered dictionary and may depend on a previous driver. .sp For a list of available resources and drivers refer to -\fI\%https://labgrid.readthedocs.io/en/latest/configuration.html\fP\&. + \&. .SH OPTIONS .sp The \fBoptions:\fP top key configures various options such as the coordinator_address. @@ -75,13 +75,11 @@ Two configured images, one for the root filesystem, one for the bootloader: .INDENT 0.0 .INDENT 3.5 .sp -.nf -.ft C +.EX images: root: \(dqplatform\-v7a/images/root.img\(dq boot: \(dqplatform\-v7a/images/barebox.img\(dq -.ft P -.fi +.EE .UNINDENT .UNINDENT .SH TOOLS @@ -92,91 +90,89 @@ The \fBtools:\fP top key provides paths to binaries such as fastboot. .TP .B \fBdfu\-util\fP Path to the dfu\-util binary, used by the DFUDriver. -See: \fI\%https://dfu\-util.sourceforge.net/\fP +See: .TP .B \fBdpcmd\fP Path to the dpcmd binary, used by the DediprogFlashDriver. -See: \fI\%https://github.com/DediProgSW/SF100Linux\fP +See: .TP .B \fBfastboot\fP Path to the fastboot binary, used by the AndroidFastbootDriver. -See: \fI\%https://developer.android.com/studio/releases/platform\-tools\fP +See: .TP .B \fBflashrom\fP Path to the flashrom binary, used by the FlashromDriver. -See: \fI\%https://www.flashrom.org/\fP +See: .TP .B \fBimx_usb\fP Path to the imx_usb binary, used by the BDIMXUSBDriver. -See: \fI\%https://github.com/boundarydevices/imx_usb_loader\fP +See: .TP .B \fBimx\-usb\-loader\fP Path to the imx\-usb\-loader binary, used by the IMXUSBDriver. -See: \fI\%https://git.pengutronix.de/cgit/barebox/tree/scripts/imx/imx\-usb\-loader.c\fP +See: .TP .B \fBjtagconfig\fP Path to the jtagconfig binary, used by the QuartusHPSDriver. -See: \fI\%https://www.intel.com/content/www/us/en/docs/programmable/683689/current/jtagconfig.html\fP +See: .TP .B \fBmxs\-usb\-loader\fP Path to the mxs\-usb\-loader binary, used by the MXSUSBDriver. -See: \fI\%https://git.pengutronix.de/cgit/barebox/tree/scripts/mxs\-usb\-loader.c?h=v2017.03.0\fP +See: .TP .B \fBopenocd\fP Path to the openocd binary, used by the OpenOCDDriver. -See: \fI\%https://openocd.org/\fP +See: .TP .B \fBquartus_hps\fP Path to the quartus_hps binary, used by the QuartusHPSDriver. -See: \fI\%https://www.intel.com/content/www/us/en/docs/programmable/683039/22\-3/hps\-flash\-programmer.html\fP +See: .TP .B \fBrk\-usb\-loader\fP Path to the rk\-usb\-loader binary, used by the RKUSBDriver. -See: \fI\%https://git.pengutronix.de/cgit/barebox/tree/scripts/rk\-usb\-loader.c\fP +See: .TP .B \fBsd\-mux\-ctrl\fP Path to the sd\-mux\-ctrl binary, used by the USBSDWireDriver. -See: \fI\%https://git.tizen.org/cgit/tools/testlab/sd\-mux/\fP +See: .TP .B \fBsispmctl\fP Path to the sispmctl binary, used by the SiSPMPowerDriver. -See: \fI\%https://sispmctl.sourceforge.net/\fP +See: .TP .B \fBuhubctl\fP Path to the uhubctl binary, used by the USBPowerDriver. -See: \fI\%https://github.com/mvp/uhubctl\fP +See: .TP .B \fBusbmuxctl\fP Path to the usbmuxctl tool, used by the LXAUSBMuxDriver. -\fI\%https://github.com/linux\-automation/usbmuxctl\fP + .TP .B \fBusbsdmux\fP Path to the usbsdmux tool, used by the USBSDMuxDriver. -See: \fI\%https://github.com/linux\-automation/usbsdmux\fP +See: .TP .B \fBuuu\-loader\fP Path to the uuu\-loader binary, used by the UUUDriver. -See: \fI\%https://github.com/nxp\-imx/mfgtools\fP +See: .TP .B \fBykushcmd\fP Path to the ykushcmd binary, used by the YKUSHPowerDriver. -See: \fI\%https://github.com/Yepkit/ykush\fP +See: .UNINDENT .sp The QEMUDriver expects a custom key set via its \fBqemu_bin\fP argument. -See \fI\%https://www.qemu.org/\fP +See .SS TOOLS EXAMPLE .sp Configure the tool path for \fBimx\-usb\-loader\fP: .INDENT 0.0 .INDENT 3.5 .sp -.nf -.ft C +.EX tools: imx\-usb\-loader: \(dq/opt/labgrid\-helper/imx\-usb\-loader\(dq -.ft P -.fi +.EE .UNINDENT .UNINDENT .SH IMPORTS @@ -192,12 +188,10 @@ Import a local \fImyfunctions.py\fP file: .INDENT 0.0 .INDENT 3.5 .sp -.nf -.ft C +.EX imports: \- myfunctions.py -.ft P -.fi +.EE .UNINDENT .UNINDENT .SH EXAMPLES @@ -207,8 +201,7 @@ A sample configuration with one \fImain\fP target, accessible via SerialPort .INDENT 0.0 .INDENT 3.5 .sp -.nf -.ft C +.EX targets: main: resources: @@ -220,8 +213,7 @@ targets: prompt: \(aqroot@\ew+:[^ ]+ \(aq login_prompt: \(aq login: \(aq username: \(aqroot\(aq -.ft P -.fi +.EE .UNINDENT .UNINDENT .sp @@ -231,8 +223,7 @@ in the loaded local python file: .INDENT 0.0 .INDENT 3.5 .sp -.nf -.ft C +.EX targets: main: resources: @@ -250,8 +241,7 @@ tools: imx\-usb\-loader: \(dq/opt/lg\-tools/imx\-usb\-loader\(dq imports: \- mystrategy.py -.ft P -.fi +.EE .UNINDENT .UNINDENT .SH SEE ALSO diff --git a/man/labgrid-exporter.1 b/man/labgrid-exporter.1 index 66d1d69e0..3e46193b6 100644 --- a/man/labgrid-exporter.1 +++ b/man/labgrid-exporter.1 @@ -27,7 +27,7 @@ level margin: \\n[rst2man-indent\\n[rst2man-indent-level]] .\" new: \\n[rst2man-indent\\n[rst2man-indent-level]] .in \\n[rst2man-indent\\n[rst2man-indent-level]]u .. -.TH "LABGRID-EXPORTER" 1 "2017-04-15" "0.0.1" "embedded testing" +.TH "LABGRID-EXPORTER" "1" "2017-04-15" "0.0.1" "embedded testing" .SH NAME labgrid-exporter \- labgrid-exporter interface to control boards .SH SYNOPSIS @@ -95,7 +95,7 @@ explicitly set. .sp The exporter uses a YAML configuration file which defines groups of related resources. -See <\fI\%https://labgrid.readthedocs.io/en/latest/configuration.html#exporter\-configuration\fP> +See < > for more information. .SH ENVIRONMENT VARIABLES .sp @@ -110,11 +110,9 @@ Start the exporter with the configuration file \fImy\-config.yaml\fP: .INDENT 0.0 .INDENT 3.5 .sp -.nf -.ft C +.EX $ labgrid\-exporter my\-config.yaml -.ft P -.fi +.EE .UNINDENT .UNINDENT .sp @@ -122,11 +120,9 @@ Same as above, but with name \fBmyname\fP: .INDENT 0.0 .INDENT 3.5 .sp -.nf -.ft C +.EX $ labgrid\-exporter \-n myname my\-config.yaml -.ft P -.fi +.EE .UNINDENT .UNINDENT .SH SEE ALSO diff --git a/man/labgrid-pytest.7 b/man/labgrid-pytest.7 index f23eae478..40837d252 100644 --- a/man/labgrid-pytest.7 +++ b/man/labgrid-pytest.7 @@ -27,7 +27,7 @@ level margin: \\n[rst2man-indent\\n[rst2man-indent-level]] .\" new: \\n[rst2man-indent\\n[rst2man-indent-level]] .in \\n[rst2man-indent\\n[rst2man-indent-level]]u .. -.TH "LABGRID-PYTEST" 7 "2017-04-15" "0.0.1" "embedded testing" +.TH "LABGRID-PYTEST" "7" "2017-04-15" "0.0.1" "embedded testing" .SH NAME labgrid-pytest \- labgrid-pytest labgrid integration for pytest .SH SYNOPSIS @@ -42,18 +42,16 @@ The labgrid plugin parses the supplied configuration yaml file as described in \fBlabgrid\-device\-config\fP(5) and allows the usage of the target and environment fixtures. The complete documentation is available at -\fI\%https://labgrid.readthedocs.io/en/latest/usage.html#pytest\-plugin\fP\&. + \&. .SH EXAMPLES .sp Start tests with \fBmyconfig.yaml\fP and directory \fBtests\fP: .INDENT 0.0 .INDENT 3.5 .sp -.nf -.ft C +.EX $ pytest \-\-lg\-env myconfig.yaml tests -.ft P -.fi +.EE .UNINDENT .UNINDENT .SH SEE ALSO diff --git a/man/labgrid-suggest.1 b/man/labgrid-suggest.1 index 51efbae5e..f4f45a390 100644 --- a/man/labgrid-suggest.1 +++ b/man/labgrid-suggest.1 @@ -27,7 +27,7 @@ level margin: \\n[rst2man-indent\\n[rst2man-indent-level]] .\" new: \\n[rst2man-indent\\n[rst2man-indent-level]] .in \\n[rst2man-indent\\n[rst2man-indent-level]]u .. -.TH "LABGRID-SUGGEST" 1 "2021-05-20" "0.0.1" "embedded testing" +.TH "LABGRID-SUGGEST" "1" "2021-05-20" "0.0.1" "embedded testing" .SH NAME labgrid-suggest \- labgrid-suggest generator for YAML config files .SH SYNOPSIS @@ -58,8 +58,7 @@ in the USB\-serial converter, \fBlabgrid\-suggest\fP shows two alternatives: .INDENT 0.0 .INDENT 3.5 .sp -.nf -.ft C +.EX === added device === USBSerialPort for /devices/pci0000:00/0000:00:01.3/0000:02:00.0/usb1/1\-3/1\-3.1/1\-3.1:1.0/ttyUSB0/tty/ttyUSB0 === device properties === @@ -78,8 +77,7 @@ USBSerialPort: match: ID_SERIAL_SHORT: P\-00\-03564 \-\-\- -.ft P -.fi +.EE .UNINDENT .UNINDENT .SH SEE ALSO From 51205ed9c84a7799f0adebf673e073fd24f9f5db Mon Sep 17 00:00:00 2001 From: Nicolas VINCENT Date: Thu, 10 Oct 2024 08:21:58 +0200 Subject: [PATCH 300/384] udev: Adds model id for stlink-v2 and v3 Update list of USBDebugger to match stlink v2 and more stlink v3 Signed-off-by: Nicolas VINCENT --- labgrid/resource/udev.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/labgrid/resource/udev.py b/labgrid/resource/udev.py index 54c6a9fd3..4b3c84d91 100644 --- a/labgrid/resource/udev.py +++ b/labgrid/resource/udev.py @@ -710,7 +710,9 @@ def filter_match(self, device): if match not in [("0403", "6010"), # FT2232C/D/H Dual UART/FIFO IC ("0403", "6014"), # FT232HL/Q + ("0483", "3748"), # STLINK-V2 ("0483", "374b"), # STLINK-V3 + ("0483", "374e"), # STLINK-V3 ("0483", "374f"), # STLINK-V3 ("15ba", "0003"), # Olimex ARM-USB-OCD ("15ba", "002b"), # Olimex ARM-USB-OCD-H From c15f98b15941f17d15b97db5f712be821edfce4f Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Thu, 10 Oct 2024 12:10:05 +0200 Subject: [PATCH 301/384] pyproject.toml: drop Python 3.8 support Signed-off-by: Bastian Krause --- pyproject.toml | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 6eab8df10..cbe943ca4 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -16,7 +16,7 @@ authors = [ description = "embedded systems control library for development, testing and installation" readme = "README.rst" license = { file="LICENSE" } -requires-python = ">=3.8" +requires-python = ">=3.9" classifiers = [ "Intended Audience :: Developers", "Development Status :: 5 - Production/Stable", @@ -24,7 +24,6 @@ classifiers = [ "Topic :: Software Development :: Testing", "Framework :: Pytest", "Programming Language :: Python :: 3 :: Only", - "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", @@ -228,7 +227,7 @@ signature-mutators = ["labgrid.step.step"] [tool.tox] legacy_tox_ini = """ [tox] -envlist = py38, py39, py310, py311, py312 +envlist = py39, py310, py311, py312 isolated_build = true [testenv] From 76c21cf27c50e4a4931020687e1565795351d9aa Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Thu, 10 Oct 2024 12:10:20 +0200 Subject: [PATCH 302/384] github: drop Python 3.8 support Signed-off-by: Bastian Krause --- .github/workflows/push-pr-unit-tests.yml | 2 +- .github/workflows/reusable-unit-tests.yml | 1 - .github/workflows/scheduled-unit-tests.yml | 2 +- 3 files changed, 2 insertions(+), 3 deletions(-) diff --git a/.github/workflows/push-pr-unit-tests.yml b/.github/workflows/push-pr-unit-tests.yml index 4eb35620d..ca44774fa 100644 --- a/.github/workflows/push-pr-unit-tests.yml +++ b/.github/workflows/push-pr-unit-tests.yml @@ -8,7 +8,7 @@ jobs: strategy: fail-fast: false matrix: - python-version: ['3.8', '3.9', '3.10', '3.11', '3.12'] + python-version: ['3.9', '3.10', '3.11', '3.12'] uses: ./.github/workflows/reusable-unit-tests.yml with: python-version: ${{ matrix.python-version }} diff --git a/.github/workflows/reusable-unit-tests.yml b/.github/workflows/reusable-unit-tests.yml index 6a6eef797..921fd3114 100644 --- a/.github/workflows/reusable-unit-tests.yml +++ b/.github/workflows/reusable-unit-tests.yml @@ -47,7 +47,6 @@ jobs: run: | pip install -e ".[dev]" - name: Lint with pylint - if: inputs.python-version != '3.8' run: | pylint --list-msgs-enabled pylint labgrid diff --git a/.github/workflows/scheduled-unit-tests.yml b/.github/workflows/scheduled-unit-tests.yml index 87b4474db..edd55096b 100644 --- a/.github/workflows/scheduled-unit-tests.yml +++ b/.github/workflows/scheduled-unit-tests.yml @@ -10,7 +10,7 @@ jobs: strategy: fail-fast: false matrix: - python-version: ['3.8', '3.9', '3.10', '3.11', '3.12'] + python-version: ['3.9', '3.10', '3.11', '3.12'] branch: ['master'] uses: ./.github/workflows/reusable-unit-tests.yml with: From 8d68d8ba6dc8be8e8ebacdc17e7024cf2047dd8d Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Thu, 10 Oct 2024 12:19:11 +0200 Subject: [PATCH 303/384] CHANGES: drop Python 3.8 support Signed-off-by: Bastian Krause --- CHANGES.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGES.rst b/CHANGES.rst index b2c4ce84c..9b4ac18b6 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -42,7 +42,7 @@ Due to the migration, 24.1 includes the following breaking changes: Other breaking changes include: -FIXME +- Support for Python 3.8 was dropped. Known issues in 24.1 ~~~~~~~~~~~~~~~~~~~~ From b0812609dfab9dc7c405f827b49d87155eb3c1fd Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Sat, 28 Sep 2024 18:09:19 +0200 Subject: [PATCH 304/384] treewide: drop pylint disable=no-self-use comments pylint's 'no-self-use' check was moved to an optional extension, see [1]. [1] https://pylint.readthedocs.io/en/latest/whatsnew/2/2.14/summary.html#removed-checkers. Signed-off-by: Bastian Krause --- labgrid/remote/exporter.py | 4 ++-- labgrid/resource/udev.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/labgrid/remote/exporter.py b/labgrid/remote/exporter.py index 86a261c92..8f0f77243 100755 --- a/labgrid/remote/exporter.py +++ b/labgrid/remote/exporter.py @@ -92,10 +92,10 @@ def broken(self, reason): self.data["acquired"] = "" self.logger.error("marked as broken: %s", reason) - def _get_start_params(self): # pylint: disable=no-self-use + def _get_start_params(self): return {} - def _get_params(self): # pylint: disable=no-self-use + def _get_params(self): return {} def _start(self, start_params): diff --git a/labgrid/resource/udev.py b/labgrid/resource/udev.py index 54c6a9fd3..22c801a34 100644 --- a/labgrid/resource/udev.py +++ b/labgrid/resource/udev.py @@ -60,7 +60,7 @@ def __attrs_post_init__(self): self.match.setdefault('SUBSYSTEM', 'usb') super().__attrs_post_init__() - def filter_match(self, device): # pylint: disable=unused-argument,no-self-use + def filter_match(self, device): # pylint: disable=unused-argument return True def suggest_match(self, device): From 67266deacf5d085c24ab70efc65ad2b84415989a Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Sat, 28 Sep 2024 17:53:05 +0200 Subject: [PATCH 305/384] pyproject.toml: limit pysnmp's pyasn1 dependency to <0.6.0 pysnmp depends on pyasn1. `pyasn1.compat.octets` was removed in pyasn1 0.6.1 [1] leading to ModuleNotFoundErrors in labgrid's "eaton" and "poe_mib" power backends: _______________ TestNetworkPowerDriver.test_import_backend_eaton _______________ self = def test_import_backend_eaton(self): pytest.importorskip("pysnmp") > import labgrid.driver.power.eaton tests/test_powerdriver.py:295: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ labgrid/driver/power/eaton.py:2: in from ...util.snmp import SimpleSNMP labgrid/util/snmp.py:1: in from pysnmp import hlapi /opt/hostedtoolcache/Python/3.12.5/x64/lib/python3.12/site-packages/pysnmp/hlapi/__init__.py:7: in from pysnmp.proto.rfc1902 import * /opt/hostedtoolcache/Python/3.12.5/x64/lib/python3.12/site-packages/pysnmp/proto/rfc1902.py:8: in from pysnmp.proto import rfc1155, error /opt/hostedtoolcache/Python/3.12.5/x64/lib/python3.12/site-packages/pysnmp/proto/rfc1155.py:10: in from pysnmp.proto import error /opt/hostedtoolcache/Python/3.12.5/x64/lib/python3.12/site-packages/pysnmp/proto/error.py:9: in from pysnmp import debug _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ # # This file is part of pysnmp software. # # Copyright (c) 2005-2019, Ilya Etingof # License: https://www.pysnmp.com/pysnmp/license.html # import logging > from pyasn1.compat.octets import octs2ints E ModuleNotFoundError: No module named 'pyasn1.compat.octets' /opt/hostedtoolcache/Python/3.12.5/x64/lib/python3.12/site-packages/pysnmp/debug.py:8: ModuleNotFoundError ______________ TestNetworkPowerDriver.test_import_backend_poe_mib ______________ self = def test_import_backend_poe_mib(self): pytest.importorskip("pysnmp") > import labgrid.driver.power.poe_mib tests/test_powerdriver.py:307: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ labgrid/driver/power/poe_mib.py:4: in from ...util.snmp import SimpleSNMP labgrid/util/snmp.py:1: in from pysnmp import hlapi /opt/hostedtoolcache/Python/3.12.5/x64/lib/python3.12/site-packages/pysnmp/hlapi/__init__.py:7: in from pysnmp.proto.rfc1902 import * /opt/hostedtoolcache/Python/3.12.5/x64/lib/python3.12/site-packages/pysnmp/proto/rfc1902.py:8: in from pysnmp.proto import rfc1155, error /opt/hostedtoolcache/Python/3.12.5/x64/lib/python3.12/site-packages/pysnmp/proto/rfc1155.py:10: in from pysnmp.proto import error /opt/hostedtoolcache/Python/3.12.5/x64/lib/python3.12/site-packages/pysnmp/proto/error.py:9: in from pysnmp import debug _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ # # This file is part of pysnmp software. # # Copyright (c) 2005-2019, Ilya Etingof # License: https://www.pysnmp.com/pysnmp/license.html # import logging > from pyasn1.compat.octets import octs2ints E ModuleNotFoundError: No module named 'pyasn1.compat.octets' /opt/hostedtoolcache/Python/3.12.5/x64/lib/python3.12/site-packages/pysnmp/debug.py:8: ModuleNotFoundError The issue is documented upstream [2]. [3] limited the pysnmp version to <6. pysnmp 6.1.4, 6.2.6, and 7.1.0 are not affected. Limit compatible pyasn1 versions to <0.6.1 until [5] switches labgrid to pysnmp's asyncio API, thereby dropping the upper bound introduced by [3]. While at it, switch from "pysnmp-lextudio" to "pysnmp". The original author of pysnmp passed away and the lextudio folks took over maintenanc. While the request to take over the pysnmp PyPi project was pending, the maintained fork was called pysnmp-lextudio (see #1186, aa2549c). Now that the migration is complete, let's move back to the original package name. See: [6] [1] https://github.com/pyasn1/pyasn1/releases/tag/v0.6.1 [2] https://github.com/pyasn1/pyasn1/issues/76 [3] https://github.com/labgrid-project/labgrid/pull/1332 [4] https://github.com/lextudio/pysnmp/issues/113#issuecomment-2342699222 [5] https://github.com/labgrid-project/labgrid/pull/1497 [6] https://github.com/etingof/pysnmp/issues/429 Signed-off-by: Bastian Krause --- pyproject.toml | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 6eab8df10..2ff2f9960 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -69,7 +69,10 @@ pyvisa = [ "pyvisa>=1.11.3", "PyVISA-py>=0.5.2", ] -snmp = ["pysnmp-lextudio>=4.4.12, <6"] +snmp = [ + "pysnmp>=4.4.12, <6", + "pyasn1<0.6.1", +] vxi11 = ["python-vxi11>=0.9"] xena = ["xenavalkyrie>=3.0.1"] deb = [ @@ -80,7 +83,8 @@ deb = [ "onewire>=0.2", # labgrid[snmp] - "pysnmp-lextudio>=4.4.12, <6", + "pysnmp>=4.4.12, <6", + "pyasn1<0.6.1", ] dev = [ # references to other optional dependency groups @@ -114,7 +118,8 @@ dev = [ "PyVISA-py>=0.5.2", # labgrid[snmp] - "pysnmp-lextudio>=4.4.12, <6", + "pysnmp>=4.4.12, <6", + "pyasn1<0.6.1", # labgrid[vxi11] "python-vxi11>=0.9", From ea40a2232335eb72cbffd80f5c7828f0a74c305b Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Fri, 11 Oct 2024 16:47:46 +0200 Subject: [PATCH 306/384] helpers/labgrid-raw-interface: run ruff format Signed-off-by: Bastian Krause --- helpers/labgrid-raw-interface | 19 +++++++------------ 1 file changed, 7 insertions(+), 12 deletions(-) diff --git a/helpers/labgrid-raw-interface b/helpers/labgrid-raw-interface index ad54dcf10..6e51278b0 100755 --- a/helpers/labgrid-raw-interface +++ b/helpers/labgrid-raw-interface @@ -55,13 +55,13 @@ def main(program, ifname, count): if program == "tcpreplay": args.append(f"--intf1={ifname}") - args.append('-') + args.append("-") if program == "tcpdump": args.append("-n") args.append(f"--interface={ifname}") args.append("-w") - args.append('-') + args.append("-") if count: args.append("-c") @@ -75,22 +75,17 @@ def main(program, ifname, count): if __name__ == "__main__": parser = argparse.ArgumentParser() - parser.add_argument( - '-d', - '--debug', - action='store_true', - default=False, - help="enable debug mode" - ) - parser.add_argument('program', type=str, help='program to run, either tcpreplay or tcpdump') - parser.add_argument('interface', type=str, help='interface name') - parser.add_argument('count', nargs="?", type=int, default=None, help='amount of frames to capture while recording') + parser.add_argument("-d", "--debug", action="store_true", default=False, help="enable debug mode") + parser.add_argument("program", type=str, help="program to run, either tcpreplay or tcpdump") + parser.add_argument("interface", type=str, help="interface name") + parser.add_argument("count", nargs="?", type=int, default=None, help="amount of frames to capture while recording") args = parser.parse_args() try: main(args.program, args.interface, args.count) except Exception as e: # pylint: disable=broad-except if args.debug: import traceback + traceback.print_exc(file=sys.stderr) print(f"ERROR: {e}", file=sys.stderr) exit(1) From c1aec37e2e3c90fb0598a2ad951e6b9847f2e0c9 Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Fri, 11 Oct 2024 16:15:35 +0200 Subject: [PATCH 307/384] helpers/labgrid-raw-interface: introduce subparsers per program, pass options Adding a subparser per program keeps the code cleaner, especially when introducing ethtool commands in the future. To keep up with various options, pass the args namespace instead of adding kwargs for each new setting. Signed-off-by: Bastian Krause --- helpers/labgrid-raw-interface | 40 +++++++++++++++++++++-------------- 1 file changed, 24 insertions(+), 16 deletions(-) diff --git a/helpers/labgrid-raw-interface b/helpers/labgrid-raw-interface index 6e51278b0..9dc0a0dfc 100755 --- a/helpers/labgrid-raw-interface +++ b/helpers/labgrid-raw-interface @@ -32,18 +32,18 @@ def get_denylist(): return denylist -def main(program, ifname, count): - if not ifname: +def main(program, options): + if not options.ifname: raise ValueError("Empty interface name.") - if any((c == "/" or c.isspace()) for c in ifname): - raise ValueError(f"Interface name '{ifname}' contains invalid characters.") - if len(ifname) > 16: - raise ValueError(f"Interface name '{ifname}' is too long.") + if any((c == "/" or c.isspace()) for c in options.ifname): + raise ValueError(f"Interface name '{options.ifname}' contains invalid characters.") + if len(options.ifname) > 16: + raise ValueError(f"Interface name '{options.ifname}' is too long.") denylist = get_denylist() - if ifname in denylist: - raise ValueError(f"Interface name '{ifname}' is denied in denylist.") + if options.ifname in denylist: + raise ValueError(f"Interface name '{options.ifname}' is denied in denylist.") programs = ["tcpreplay", "tcpdump"] if program not in programs: @@ -54,18 +54,18 @@ def main(program, ifname, count): ] if program == "tcpreplay": - args.append(f"--intf1={ifname}") + args.append(f"--intf1={options.ifname}") args.append("-") if program == "tcpdump": args.append("-n") - args.append(f"--interface={ifname}") + args.append(f"--interface={options.ifname}") args.append("-w") args.append("-") - if count: + if options.count: args.append("-c") - args.append(str(count)) + args.append(str(options.count)) try: os.execvp(args[0], args) @@ -76,12 +76,20 @@ def main(program, ifname, count): if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument("-d", "--debug", action="store_true", default=False, help="enable debug mode") - parser.add_argument("program", type=str, help="program to run, either tcpreplay or tcpdump") - parser.add_argument("interface", type=str, help="interface name") - parser.add_argument("count", nargs="?", type=int, default=None, help="amount of frames to capture while recording") + subparsers = parser.add_subparsers(dest="program", help="program to run") + + # tcpdump + tcpdump_parser = subparsers.add_parser("tcpdump") + tcpdump_parser.add_argument("ifname", type=str, help="interface name") + tcpdump_parser.add_argument("count", type=int, default=None, help="amount of frames to capture while recording") + + # tcpreplay + tcpreplay_parser = subparsers.add_parser("tcpreplay") + tcpreplay_parser.add_argument("ifname", type=str, help="interface name") + args = parser.parse_args() try: - main(args.program, args.interface, args.count) + main(args.program, args) except Exception as e: # pylint: disable=broad-except if args.debug: import traceback From f31f3dacc4084679056f49df68d0e947e74d0598 Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Fri, 11 Oct 2024 17:59:17 +0200 Subject: [PATCH 308/384] driver/rawnetworkinterfacedriver: run ruff format Signed-off-by: Bastian Krause --- labgrid/driver/rawnetworkinterfacedriver.py | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/labgrid/driver/rawnetworkinterfacedriver.py b/labgrid/driver/rawnetworkinterfacedriver.py index 3be80960f..5396e572d 100644 --- a/labgrid/driver/rawnetworkinterfacedriver.py +++ b/labgrid/driver/rawnetworkinterfacedriver.py @@ -170,12 +170,7 @@ def get_statistics(self): """ Returns basic interface statistics of bound network interface resource. """ - cmd = self.iface.command_prefix + [ - "ip", - "--json", - "-stats", "-stats", - "link", "show", - self.iface.ifname] + cmd = self.iface.command_prefix + ["ip", "--json", "-stats", "-stats", "link", "show", self.iface.ifname] output = processwrapper.check_output(cmd) return json.loads(output)[0] From 4f6fe09f60be1341dabd6925b624ff89ca9870be Mon Sep 17 00:00:00 2001 From: Joshua Watt Date: Fri, 19 Jan 2024 13:45:17 -0700 Subject: [PATCH 309/384] driver/rawnetworkinterfacedriver: move timeout to tcpdump Reworks the way that timeouts are handled so that instead of terminating the process in stop_record, tcpdump will exit after a set time. This is will allow for live streaming of packets in a future patch Signed-off-by: Joshua Watt Signed-off-by: Bastian Krause --- helpers/labgrid-raw-interface | 11 +++++++++++ labgrid/driver/rawnetworkinterfacedriver.py | 17 +++++++++++------ 2 files changed, 22 insertions(+), 6 deletions(-) diff --git a/helpers/labgrid-raw-interface b/helpers/labgrid-raw-interface index 9dc0a0dfc..97797c843 100755 --- a/helpers/labgrid-raw-interface +++ b/helpers/labgrid-raw-interface @@ -67,6 +67,14 @@ def main(program, options): args.append("-c") args.append(str(options.count)) + if options.timeout: + # The timeout is implemented by specifying the number of seconds before rotating the + # dump file, but limiting the number of files to 1 + args.append("-G") + args.append(str(options.timeout)) + args.append("-W") + args.append("1") + try: os.execvp(args[0], args) except FileNotFoundError as e: @@ -82,6 +90,9 @@ if __name__ == "__main__": tcpdump_parser = subparsers.add_parser("tcpdump") tcpdump_parser.add_argument("ifname", type=str, help="interface name") tcpdump_parser.add_argument("count", type=int, default=None, help="amount of frames to capture while recording") + tcpdump_parser.add_argument( + "--timeout", type=int, default=None, help="Amount of time to capture while recording. 0 means capture forever" + ) # tcpreplay tcpreplay_parser = subparsers.add_parser("tcpreplay") diff --git a/labgrid/driver/rawnetworkinterfacedriver.py b/labgrid/driver/rawnetworkinterfacedriver.py index 5396e572d..62fad1af8 100644 --- a/labgrid/driver/rawnetworkinterfacedriver.py +++ b/labgrid/driver/rawnetworkinterfacedriver.py @@ -53,14 +53,15 @@ def _stop(self, proc, *, timeout=None): ) @Driver.check_active - @step(args=["filename", "count"]) - def start_record(self, filename, *, count=None): + @step(args=["filename", "count", "timeout"]) + def start_record(self, filename, *, count=None, timeout=None): """ Starts tcpdump on bound network interface resource. Args: filename (str): name of a file to record to count (int): optional, exit after receiving this many number of packets + timeout (int): optional, number of seconds to capture packets before tcpdump exits Returns: Popen object of tcpdump process """ @@ -69,6 +70,9 @@ def start_record(self, filename, *, count=None): cmd = ["tcpdump", self.iface.ifname] if count is not None: cmd.append(str(count)) + if timeout is not None: + cmd.append("--timeout") + cmd.append(str(timeout)) cmd = self._wrap_command(cmd) with open(filename, "wb") as outdata: self._record_handle = subprocess.Popen(cmd, stdout=outdata, stderr=subprocess.PIPE) @@ -99,15 +103,16 @@ def record(self, filename, *, count=None, timeout=None): Args: filename (str): name of a file to record to count (int): optional, exit after receiving this many number of packets - timeout (int): optional, maximum number of seconds to wait for the tcpdump process to - terminate + timeout (int): optional, number of seconds to capture packets before tcpdump exits + Returns: + Popen object of tcpdump process. """ assert count or timeout try: - yield self.start_record(filename, count=count) + yield self.start_record(filename, count=count, timeout=timeout) finally: - self.stop_record(timeout=timeout) + self.stop_record() @Driver.check_active @step(args=["filename"]) From dcfa087398bfeeb484c987527c9cad45def5fa23 Mon Sep 17 00:00:00 2001 From: Joshua Watt Date: Fri, 19 Jan 2024 13:47:41 -0700 Subject: [PATCH 310/384] driver/rawnetworkinterfacedriver: implement live streaming Adds support for live streaming of captured packets such that they can be processed in real time by tests instead of needing to capture for a set period of time time and do post-processing. As an example: ```python import dpkt drv = target.get_driver("RawNetworkInterfaceDriver") with drv.record(None, timeout=60) as p: pcap = dpkt.pcap.Reader(p.stdout) for timestamp, buf in pcap: eth = dpkt.ethernet.Ethernet(buf) .... ``` Signed-off-by: Joshua Watt Signed-off-by: Bastian Krause --- helpers/labgrid-raw-interface | 4 ++++ labgrid/driver/rawnetworkinterfacedriver.py | 20 ++++++++++++++------ 2 files changed, 18 insertions(+), 6 deletions(-) diff --git a/helpers/labgrid-raw-interface b/helpers/labgrid-raw-interface index 97797c843..8aa4c9eac 100755 --- a/helpers/labgrid-raw-interface +++ b/helpers/labgrid-raw-interface @@ -60,6 +60,10 @@ def main(program, options): if program == "tcpdump": args.append("-n") args.append(f"--interface={options.ifname}") + # Write out each packet as it is received + args.append("--packet-buffered") + # Capture complete packets (for compatibility with older tcpdump versions) + args.append("--snapshot-length=0") args.append("-w") args.append("-") diff --git a/labgrid/driver/rawnetworkinterfacedriver.py b/labgrid/driver/rawnetworkinterfacedriver.py index 62fad1af8..7fafa6e08 100644 --- a/labgrid/driver/rawnetworkinterfacedriver.py +++ b/labgrid/driver/rawnetworkinterfacedriver.py @@ -59,7 +59,7 @@ def start_record(self, filename, *, count=None, timeout=None): Starts tcpdump on bound network interface resource. Args: - filename (str): name of a file to record to + filename (str): name of a file to record to, or None to record to stdout count (int): optional, exit after receiving this many number of packets timeout (int): optional, number of seconds to capture packets before tcpdump exits Returns: @@ -74,8 +74,11 @@ def start_record(self, filename, *, count=None, timeout=None): cmd.append("--timeout") cmd.append(str(timeout)) cmd = self._wrap_command(cmd) - with open(filename, "wb") as outdata: - self._record_handle = subprocess.Popen(cmd, stdout=outdata, stderr=subprocess.PIPE) + if filename is None: + self._record_handle = subprocess.Popen(cmd, stdout=subprocess.PIPE) + else: + with open(filename, "wb") as outdata: + self._record_handle = subprocess.Popen(cmd, stdout=outdata, stderr=subprocess.PIPE) return self._record_handle @Driver.check_active @@ -90,6 +93,11 @@ def stop_record(self, *, timeout=None): """ try: self._stop(self._record_handle, timeout=timeout) + except subprocess.TimeoutExpired: + # If live streaming packets, there is no reason to wait for tcpdump + # to finish, so expect a timeout if piping to stdout + if self._record_handle.stdout is None: + raise finally: self._record_handle = None @@ -101,18 +109,18 @@ def record(self, filename, *, count=None, timeout=None): Either count or timeout must be specified. Args: - filename (str): name of a file to record to + filename (str): name of a file to record to, or None to live stream packets count (int): optional, exit after receiving this many number of packets timeout (int): optional, number of seconds to capture packets before tcpdump exits Returns: - Popen object of tcpdump process. + Popen object of tcpdump process. If filename is None, packets can be read from stdout """ assert count or timeout try: yield self.start_record(filename, count=count, timeout=timeout) finally: - self.stop_record() + self.stop_record(timeout=0 if filename is None else None) @Driver.check_active @step(args=["filename"]) From b6c9e4e98fe106d07302c190bcd8627e01804a70 Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Mon, 30 Sep 2024 14:34:10 +0200 Subject: [PATCH 311/384] github: update codecov/codecov-action v3 -> v4 GitHub emits warnings like: The following actions use a deprecated Node.js version and will be forced to run on node20: codecov/codecov-action@v3. For more info: https://github.blog/changelog/2024-03-07-github-actions-all-actions-will-run-on-node20-instead-of-node16-by-default/ The codecov upload hasn't worked reliably in a while. This should fix it. Signed-off-by: Bastian Krause --- .github/workflows/reusable-unit-tests.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/reusable-unit-tests.yml b/.github/workflows/reusable-unit-tests.yml index 1222500cf..b232bf63e 100644 --- a/.github/workflows/reusable-unit-tests.yml +++ b/.github/workflows/reusable-unit-tests.yml @@ -67,4 +67,4 @@ jobs: # check README.rst separately pip install rstcheck rstcheck --ignore-languages=bash --report-level=WARNING README.rst - - uses: codecov/codecov-action@v3 + - uses: codecov/codecov-action@v4 From e2ecd3e969d241dbddadeedb486407a5a71d6f6e Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Thu, 10 Oct 2024 12:53:59 +0200 Subject: [PATCH 312/384] pyproject.toml: use recursive dependencies Instead of repeating the dependencies in the "deb" and "dev" extras, switch to recursively referencing the relevant extras. This has worked in pip for a while. Signed-off-by: Bastian Krause --- pyproject.toml | 49 ++----------------------------------------------- 1 file changed, 2 insertions(+), 47 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index 3c34ec0cf..c9ce540a1 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -74,54 +74,9 @@ snmp = [ ] vxi11 = ["python-vxi11>=0.9"] xena = ["xenavalkyrie>=3.0.1"] -deb = [ - # labgrid[modbus] - "pyModbusTCP>=0.1.10", - - # labgrid[onewire] - "onewire>=0.2", - - # labgrid[snmp] - "pysnmp>=4.4.12, <6", - "pyasn1<0.6.1", -] +deb = ["labgrid[modbus,onewire,snmp]"] dev = [ - # references to other optional dependency groups - # labgrid[doc] - "sphinx_rtd_theme>=1.0.0", - "Sphinx>=2.0.0", - - # labgrid[docker] - "docker>=5.0.2", - - # labgrid[graph] - "graphviz>=0.17.0", - - # labgrid[kasa] - "python-kasa>=0.4.0", - - # labgrid[modbus] - "pyModbusTCP>=0.1.10", - - # labgrid[modbusrtu] - "minimalmodbus>=1.0.2", - - # labgrid[mqtt] - "paho-mqtt>=2.0.0", - - # labgrid[onewire] - "onewire>=0.2", - - # labgrid[pyvisa] - "pyvisa>=1.11.3", - "PyVISA-py>=0.5.2", - - # labgrid[snmp] - "pysnmp>=4.4.12, <6", - "pyasn1<0.6.1", - - # labgrid[vxi11] - "python-vxi11>=0.9", + "labgrid[doc,docker,graph,kasa,modbus,modbusrtu,mqtt,onewire,pyvisa,snmp,vxi11]", # additional dev dependencies "psutil>=5.8.0", From 7ab624019225e272c91dcaed9df48daadb602b6b Mon Sep 17 00:00:00 2001 From: Jan Luebbe Date: Tue, 15 Oct 2024 14:56:31 +0200 Subject: [PATCH 313/384] util/agents/usb_hid_relay: fix concurrent access Since c9fc5bfdb9da, it was no longer possible to use the same USB relay device from multiple labgrid processes, as the USB device was kept open and claimed. To fix this, we use a context manager which first claims the USB interface (with retry while busy) and releases it after the transaction. With this fix, multiple processes can toggle outputs in a busy loop without causing 'USBError(16, 'Resource busy')' failures. Fixes: c9fc5bfdb9da ("labgrid/util/agents/usb_hid_relay: keep the USB device open") Signed-off-by: Jan Luebbe --- labgrid/util/agents/usb_hid_relay.py | 46 ++++++++++++++++++++++------ 1 file changed, 36 insertions(+), 10 deletions(-) diff --git a/labgrid/util/agents/usb_hid_relay.py b/labgrid/util/agents/usb_hid_relay.py index 9cbcaf2c5..bf09a59d8 100644 --- a/labgrid/util/agents/usb_hid_relay.py +++ b/labgrid/util/agents/usb_hid_relay.py @@ -11,6 +11,10 @@ - Turn digital output on and off """ +import errno +from contextlib import contextmanager +from time import monotonic, sleep + import usb.core import usb.util @@ -26,18 +30,35 @@ def __init__(self, **args): raise ValueError("Device not found") if self._dev.idVendor == 0x16C0: - self.set_output = self.set_output_dcttech - self.get_output = self.get_output_dcttech + self._set_output = self._set_output_dcttech + self._get_output = self._get_output_dcttech elif self._dev.idVendor == 0x5131: - self.set_output = self.set_output_lcus - self.get_output = self.get_output_lcus + self._set_output = self._set_output_lcus + self._get_output = self._get_output_lcus else: raise ValueError(f"Unknown vendor/protocol for VID {self._dev.idVendor:x}") if self._dev.is_kernel_driver_active(0): self._dev.detach_kernel_driver(0) - def set_output_dcttech(self, number, status): + @contextmanager + def _claimed(self): + timeout = monotonic() + 1.0 + while True: + try: + usb.util.claim_interface(self._dev, 0) + break + except usb.core.USBError as e: + if monotonic() > timeout: + raise e + if e.errno == errno.EBUSY: + sleep(0.01) + else: + raise e + yield + usb.util.release_interface(self._dev, 0) + + def _set_output_dcttech(self, number, status): assert 1 <= number <= 8 req = [0xFF if status else 0xFD, number] self._dev.ctrl_transfer( @@ -48,7 +69,7 @@ def set_output_dcttech(self, number, status): req, # payload ) - def get_output_dcttech(self, number): + def _get_output_dcttech(self, number): assert 1 <= number <= 8 resp = self._dev.ctrl_transfer( usb.util.CTRL_TYPE_CLASS | usb.util.CTRL_RECIPIENT_DEVICE | usb.util.ENDPOINT_IN, @@ -59,7 +80,7 @@ def get_output_dcttech(self, number): ) return bool(resp[7] & (1 << (number - 1))) - def set_output_lcus(self, number, status): + def _set_output_lcus(self, number, status): assert 1 <= number <= 8 ep_in = self._dev[0][(0, 0)][0] ep_out = self._dev[0][(0, 0)][1] @@ -68,13 +89,18 @@ def set_output_lcus(self, number, status): ep_out.write(req) ep_in.read(64) - def get_output_lcus(self, number): + def _get_output_lcus(self, number): assert 1 <= number <= 8 # we have no information on how to read the current value return False - def __del__(self): - usb.util.release_interface(self._dev, 0) + def set_output(self, number, status): + with self._claimed(): + self._set_output(number, status) + + def get_output(self, number): + with self._claimed(): + self._get_output(number) _relays = {} From 809b2867b2b8525268aab4493239134fd1ce1811 Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Mon, 30 Sep 2024 13:47:22 +0200 Subject: [PATCH 314/384] labgrid/driver/power: deprecate siglent backend See https://github.com/labgrid-project/labgrid/issues/1507 for more details. Signed-off-by: Bastian Krause --- CHANGES.rst | 4 ++++ labgrid/driver/power/siglent.py | 12 ++++++++++++ 2 files changed, 16 insertions(+) diff --git a/CHANGES.rst b/CHANGES.rst index 9b4ac18b6..1cdb602e7 100644 --- a/CHANGES.rst +++ b/CHANGES.rst @@ -43,6 +43,10 @@ Due to the migration, 24.1 includes the following breaking changes: Other breaking changes include: - Support for Python 3.8 was dropped. +- The siglent power backend is deprecated because it uses the no longer + maintained vxi11 module which again uses the deprecated (and in Python 3.13 + removed) xdrlib. See + `issue #1507 `_. Known issues in 24.1 ~~~~~~~~~~~~~~~~~~~~ diff --git a/labgrid/driver/power/siglent.py b/labgrid/driver/power/siglent.py index bf92ee16d..d06e7725e 100644 --- a/labgrid/driver/power/siglent.py +++ b/labgrid/driver/power/siglent.py @@ -1,9 +1,16 @@ """ tested with Siglent SPD3303X-E, and should be compatible with all SPD3000X series modules""" +import warnings + import vxi11 def power_set(host, port, index, value): + warnings.warn( + "siglent power backend uses vxi11 module using deprecated xdrlib module, see https://github.com/labgrid-project/labgrid/issues/1507", + DeprecationWarning, + ) + assert port is None index = int(index) assert 1 <= index <= 2 @@ -13,6 +20,11 @@ def power_set(host, port, index, value): def power_get(host, port, index): + warnings.warn( + "siglent power backend uses vxi11 module using deprecated xdrlib module, see https://github.com/labgrid-project/labgrid/issues/1507", + DeprecationWarning, + ) + assert port is None index = int(index) assert 1 <= index <= 2 From c9651ef2577cbbb2756362b9a9f7a5b182079e70 Mon Sep 17 00:00:00 2001 From: Jan Luebbe Date: Fri, 18 Oct 2024 19:11:08 +0200 Subject: [PATCH 315/384] github/workflows: set the codecov token The codecov-action v4 requires a token. Signed-off-by: Jan Luebbe --- .github/workflows/reusable-unit-tests.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/workflows/reusable-unit-tests.yml b/.github/workflows/reusable-unit-tests.yml index b232bf63e..d638c71a1 100644 --- a/.github/workflows/reusable-unit-tests.yml +++ b/.github/workflows/reusable-unit-tests.yml @@ -68,3 +68,5 @@ jobs: pip install rstcheck rstcheck --ignore-languages=bash --report-level=WARNING README.rst - uses: codecov/codecov-action@v4 + with: + token: ${{ secrets.CODECOV_TOKEN }} From 6784f20c67cd8117c30d22777c9566b348d70fac Mon Sep 17 00:00:00 2001 From: Jan Luebbe Date: Fri, 18 Oct 2024 19:14:36 +0200 Subject: [PATCH 316/384] github/workflows: upload coverage directly after testing Also add a name to the step. Signed-off-by: Jan Luebbe --- .github/workflows/reusable-unit-tests.yml | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/.github/workflows/reusable-unit-tests.yml b/.github/workflows/reusable-unit-tests.yml index d638c71a1..e907182b4 100644 --- a/.github/workflows/reusable-unit-tests.yml +++ b/.github/workflows/reusable-unit-tests.yml @@ -56,6 +56,10 @@ jobs: - name: Test with pytest run: | pytest --cov-config .coveragerc --cov=labgrid --local-sshmanager --ssh-username runner -k "not test_docker_with_daemon" + - name: Upload coverage to Codecov + uses: codecov/codecov-action@v4 + with: + token: ${{ secrets.CODECOV_TOKEN }} - name: Build documentation run: | make -C doc clean @@ -67,6 +71,3 @@ jobs: # check README.rst separately pip install rstcheck rstcheck --ignore-languages=bash --report-level=WARNING README.rst - - uses: codecov/codecov-action@v4 - with: - token: ${{ secrets.CODECOV_TOKEN }} From c9119f29a702d23e1b9370691d56b12467afa47a Mon Sep 17 00:00:00 2001 From: Jan Luebbe Date: Fri, 18 Oct 2024 19:43:56 +0200 Subject: [PATCH 317/384] github/workflows: pass CODECOV_TOKEN to reusable workflow Signed-off-by: Jan Luebbe --- .github/workflows/push-pr-unit-tests.yml | 2 ++ .github/workflows/reusable-unit-tests.yml | 3 +++ .github/workflows/scheduled-unit-tests.yml | 2 ++ 3 files changed, 7 insertions(+) diff --git a/.github/workflows/push-pr-unit-tests.yml b/.github/workflows/push-pr-unit-tests.yml index ca44774fa..4923ad254 100644 --- a/.github/workflows/push-pr-unit-tests.yml +++ b/.github/workflows/push-pr-unit-tests.yml @@ -10,6 +10,8 @@ jobs: matrix: python-version: ['3.9', '3.10', '3.11', '3.12'] uses: ./.github/workflows/reusable-unit-tests.yml + secrets: + CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }} with: python-version: ${{ matrix.python-version }} push-pr-unit-tests-docker: diff --git a/.github/workflows/reusable-unit-tests.yml b/.github/workflows/reusable-unit-tests.yml index e907182b4..619a9c1e1 100644 --- a/.github/workflows/reusable-unit-tests.yml +++ b/.github/workflows/reusable-unit-tests.yml @@ -9,6 +9,9 @@ on: branch: type: string required: false + secrets: + CODECOV_TOKEN: + required: false jobs: build: diff --git a/.github/workflows/scheduled-unit-tests.yml b/.github/workflows/scheduled-unit-tests.yml index edd55096b..ea4524cbe 100644 --- a/.github/workflows/scheduled-unit-tests.yml +++ b/.github/workflows/scheduled-unit-tests.yml @@ -13,6 +13,8 @@ jobs: python-version: ['3.9', '3.10', '3.11', '3.12'] branch: ['master'] uses: ./.github/workflows/reusable-unit-tests.yml + secrets: + CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }} with: python-version: ${{ matrix.python-version }} branch: ${{ matrix.branch }} From c00e74a9b61dcf505e66329290fd3560e6f3fe90 Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Mon, 30 Sep 2024 11:25:48 +0200 Subject: [PATCH 318/384] github: add tests for Python 3.13 Signed-off-by: Bastian Krause --- .github/workflows/push-pr-unit-tests.yml | 2 +- .github/workflows/scheduled-unit-tests.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/push-pr-unit-tests.yml b/.github/workflows/push-pr-unit-tests.yml index 4923ad254..f8dec40f4 100644 --- a/.github/workflows/push-pr-unit-tests.yml +++ b/.github/workflows/push-pr-unit-tests.yml @@ -8,7 +8,7 @@ jobs: strategy: fail-fast: false matrix: - python-version: ['3.9', '3.10', '3.11', '3.12'] + python-version: ['3.9', '3.10', '3.11', '3.12', '3.13'] uses: ./.github/workflows/reusable-unit-tests.yml secrets: CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }} diff --git a/.github/workflows/scheduled-unit-tests.yml b/.github/workflows/scheduled-unit-tests.yml index ea4524cbe..a01ee7e89 100644 --- a/.github/workflows/scheduled-unit-tests.yml +++ b/.github/workflows/scheduled-unit-tests.yml @@ -10,7 +10,7 @@ jobs: strategy: fail-fast: false matrix: - python-version: ['3.9', '3.10', '3.11', '3.12'] + python-version: ['3.9', '3.10', '3.11', '3.12', '3.13'] branch: ['master'] uses: ./.github/workflows/reusable-unit-tests.yml secrets: From ebe45fc3099302e0f4386c255a3f3ae0b3917a7e Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Mon, 30 Sep 2024 11:59:24 +0200 Subject: [PATCH 319/384] pyproject: advertise Python 3.13 compatibility Signed-off-by: Bastian Krause --- pyproject.toml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index c9ce540a1..f19952943 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -28,6 +28,7 @@ classifiers = [ "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", + "Programming Language :: Python :: 3.13", "License :: OSI Approved :: GNU Lesser General Public License v2 or later (LGPLv2+)", ] dependencies = [ @@ -187,7 +188,7 @@ signature-mutators = ["labgrid.step.step"] [tool.tox] legacy_tox_ini = """ [tox] -envlist = py39, py310, py311, py312 +envlist = py39, py310, py311, py312, py313 isolated_build = true [testenv] From dc5a9f7945bd0969e036a8cc1b5d9ea1e7a05848 Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Mon, 30 Sep 2024 12:57:55 +0200 Subject: [PATCH 320/384] github: show extra test summary The extra test summary shows additional information such as why a test was skipped. This is helpful to compare test runs on different Python versions and git states. Signed-off-by: Bastian Krause --- .github/workflows/reusable-unit-tests.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/reusable-unit-tests.yml b/.github/workflows/reusable-unit-tests.yml index 619a9c1e1..e68a53d03 100644 --- a/.github/workflows/reusable-unit-tests.yml +++ b/.github/workflows/reusable-unit-tests.yml @@ -58,7 +58,7 @@ jobs: ruff format --check --diff labgrid/remote/ - name: Test with pytest run: | - pytest --cov-config .coveragerc --cov=labgrid --local-sshmanager --ssh-username runner -k "not test_docker_with_daemon" + pytest -r a --cov-config .coveragerc --cov=labgrid --local-sshmanager --ssh-username runner -k "not test_docker_with_daemon" - name: Upload coverage to Codecov uses: codecov/codecov-action@v4 with: From bf9bf13c951941d56651f957267c64eaa0a2543c Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Mon, 30 Sep 2024 13:46:42 +0200 Subject: [PATCH 321/384] doc: mock vxi11 module On Python 3.13, the vxi11 module import fails: autodoc: failed to import module 'siglent' from module 'labgrid.driver.power'; the following exception was raised: No module named 'xdrlib' It uses xdrlib which was dropped from Python's standard library with Python 3.13. See https://github.com/labgrid-project/labgrid/issues/1507 for more details. For the time being, mock the vxi11 module to make the doc builds work on Python 3.13. Signed-off-by: Bastian Krause --- doc/conf.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/doc/conf.py b/doc/conf.py index 2fcea3267..1196c76bb 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -180,7 +180,8 @@ } autodoc_mock_imports = ['onewire', 'gi', - 'gi.repository',] + 'gi.repository', + 'vxi11'] # -- Options for autosection ---------------------------------------------- autosectionlabel_prefix_document = True From 24ad7ef101fecef09321b4df226deebebfc3e5d2 Mon Sep 17 00:00:00 2001 From: Jan Luebbe Date: Fri, 18 Oct 2024 23:39:45 +0200 Subject: [PATCH 322/384] github/workflows: set the python version as a codecov flag This allows codecov to show the coverage sepeately for each tested python version. Signed-off-by: Jan Luebbe --- .github/workflows/reusable-unit-tests.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/reusable-unit-tests.yml b/.github/workflows/reusable-unit-tests.yml index 619a9c1e1..1a44b3fb3 100644 --- a/.github/workflows/reusable-unit-tests.yml +++ b/.github/workflows/reusable-unit-tests.yml @@ -63,6 +63,7 @@ jobs: uses: codecov/codecov-action@v4 with: token: ${{ secrets.CODECOV_TOKEN }} + flags: ${{ inputs.python-version }} - name: Build documentation run: | make -C doc clean From 0025a9a0ce865008b5cbb5f365a630dc5cd11c43 Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Fri, 11 Oct 2024 16:59:11 +0200 Subject: [PATCH 323/384] pyproject.toml: drop ruff excludes redundant to gitignore, rather extend excludes ruff's "exclude" documentation [1] reads: "Note that you'll typically want to use extend-exclude to modify the excluded paths." Do that. The default for the "respect-gitignore" setting [2] is "true", so we can safely drop entries that are already specified there. [1] https://docs.astral.sh/ruff/settings/#exclude [2] https://docs.astral.sh/ruff/settings/#respect-gitignore Signed-off-by: Bastian Krause --- pyproject.toml | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index f19952943..64686167c 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -206,16 +206,11 @@ commands = pylint -f colorized labgrid [tool.ruff] line-length = 119 -exclude = [ - "__pycache__", - "labgrid.egg-info", +extend-exclude = [ ".pybuild", - "build", "debian", "env", - "venv", "envs", - "dist", "labgrid/remote/generated", ] From 8576744611124f3251f70c2c60409f3d13fffff9 Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Fri, 11 Oct 2024 17:00:43 +0200 Subject: [PATCH 324/384] pyproject.toml: maintain ruff include centrally The default value of the "include" setting is ["*.py", "*.pyi", "*.ipynb", "**/pyproject.toml"] [1]. We're only interested in pyproject.toml and *.py files, so add pyproject.toml and the pre-formatted labgrid.remote module (previously specified in the CI configuration only). [1] https://docs.astral.sh/ruff/settings/#include Signed-off-by: Bastian Krause --- .github/workflows/reusable-unit-tests.yml | 2 +- pyproject.toml | 4 ++++ 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/.github/workflows/reusable-unit-tests.yml b/.github/workflows/reusable-unit-tests.yml index 064b7f1b1..a415034a3 100644 --- a/.github/workflows/reusable-unit-tests.yml +++ b/.github/workflows/reusable-unit-tests.yml @@ -55,7 +55,7 @@ jobs: pylint labgrid - name: Format with ruff run: | - ruff format --check --diff labgrid/remote/ + ruff format --check --diff - name: Test with pytest run: | pytest -r a --cov-config .coveragerc --cov=labgrid --local-sshmanager --ssh-username runner -k "not test_docker_with_daemon" diff --git a/pyproject.toml b/pyproject.toml index 64686167c..546d33595 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -213,6 +213,10 @@ extend-exclude = [ "envs", "labgrid/remote/generated", ] +include = [ + "**/pyproject.toml", + "labgrid/remote/**/*.py", +] [tool.ruff.lint] select = ["B", "E", "F", "I", "SIM", "UP"] From 55c2c3a6904ccf83c4f1f517d461cc06aac950f7 Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Fri, 11 Oct 2024 17:03:35 +0200 Subject: [PATCH 325/384] labgrid/protocol: run ruff format Signed-off-by: Bastian Krause --- labgrid/protocol/infoprotocol.py | 2 +- labgrid/protocol/resetprotocol.py | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/labgrid/protocol/infoprotocol.py b/labgrid/protocol/infoprotocol.py index c95e1411c..e266c3121 100644 --- a/labgrid/protocol/infoprotocol.py +++ b/labgrid/protocol/infoprotocol.py @@ -5,7 +5,7 @@ class InfoProtocol(abc.ABC): """Abstract class providing the InfoProtocol interface""" @abc.abstractmethod - def get_ip(self, interface: str = 'eth0'): + def get_ip(self, interface: str = "eth0"): """Implementations should return the IP address for the supplied interface.""" raise NotImplementedError diff --git a/labgrid/protocol/resetprotocol.py b/labgrid/protocol/resetprotocol.py index f7a6fe388..6dc838706 100644 --- a/labgrid/protocol/resetprotocol.py +++ b/labgrid/protocol/resetprotocol.py @@ -1,5 +1,6 @@ import abc + class ResetProtocol(abc.ABC): @abc.abstractmethod def reset(self): From fb2cfba44dc2f9b97e737e3072ee85b2cafcc590 Mon Sep 17 00:00:00 2001 From: Bastian Krause Date: Fri, 11 Oct 2024 17:32:07 +0200 Subject: [PATCH 326/384] examples: run ruff format Signed-off-by: Bastian Krause --- examples/barebox/conftest.py | 4 +- examples/barebox/test_barebox.py | 6 +- examples/barebox/test_bootchooser.py | 6 +- examples/barebox/test_sleep.py | 4 +- examples/barebox/test_state.py | 4 +- examples/barebox/test_watchdog.py | 6 +- examples/deditec-relais8/deditec.py | 2 +- examples/deditec-relais8/deditec_remote.py | 2 +- examples/docker/conftest.py | 8 +- examples/docker/test_shell.py | 6 +- examples/library/test.py | 12 +-- examples/modbusrtu/conftest.py | 4 +- examples/network-test/pkg-replay-record.py | 1 + examples/networkmanager/nm.py | 77 +++++++++---------- examples/power/power_example.py | 2 +- examples/pyvisa/pyvisa_example.py | 8 +- .../qemu-networking/test_qemu_networking.py | 1 + examples/remote/test_barebox.py | 8 +- examples/shell/conftest.py | 4 +- examples/shell/test_hwclock.py | 8 +- examples/shell/test_memory.py | 10 +-- examples/shell/test_rt.py | 10 +-- examples/shell/test_shell.py | 6 +- examples/sigrok/main.py | 2 +- examples/strategy/bareboxrebootstrategy.py | 6 +- examples/strategy/quartusstrategy.py | 7 +- examples/strategy/test_barebox_strategy.py | 22 +++--- examples/strategy/test_uboot_strategy.py | 22 +++--- examples/sysfsgpio/sysfsgpio.py | 2 +- examples/sysfsgpio/sysfsgpio_remote.py | 2 +- examples/usb/test_usb_mxs.py | 5 +- examples/usb/test_usb_storage.py | 5 +- examples/usbpower/cycle.py | 5 +- examples/usbpower/examplestrategy.py | 9 +-- examples/usbpower/test_example.py | 14 ++-- examples/usbsdmux/test_sdmux.py | 6 +- 36 files changed, 153 insertions(+), 153 deletions(-) diff --git a/examples/barebox/conftest.py b/examples/barebox/conftest.py index 151cacb1b..f0f6af199 100644 --- a/examples/barebox/conftest.py +++ b/examples/barebox/conftest.py @@ -1,8 +1,8 @@ import pytest -@pytest.fixture(scope='session') +@pytest.fixture(scope="session") def command(target): - barebox = target.get_driver('CommandProtocol') + barebox = target.get_driver("CommandProtocol") target.activate(barebox) return barebox diff --git a/examples/barebox/test_barebox.py b/examples/barebox/test_barebox.py index 271ac17ff..b9ebf9c4b 100644 --- a/examples/barebox/test_barebox.py +++ b/examples/barebox/test_barebox.py @@ -1,11 +1,11 @@ def test_barebox(command): - stdout, stderr, returncode = command.run('version') + stdout, stderr, returncode = command.run("version") assert returncode == 0 assert stdout assert not stderr - assert 'barebox' in '\n'.join(stdout) + assert "barebox" in "\n".join(stdout) - stdout, stderr, returncode = command.run('false') + stdout, stderr, returncode = command.run("false") assert returncode == 1 assert not stdout assert not stderr diff --git a/examples/barebox/test_bootchooser.py b/examples/barebox/test_bootchooser.py index 57e68e2e6..76498b3b4 100644 --- a/examples/barebox/test_bootchooser.py +++ b/examples/barebox/test_bootchooser.py @@ -2,10 +2,10 @@ def test_bootchooser(command): - stdout, stderr, returncode = command.run('bootchooser -i') + stdout, stderr, returncode = command.run("bootchooser -i") if returncode == 127: pytest.skip("bootchooser command not available") assert returncode == 0 assert not stderr - assert stdout[0].startswith('Good targets') - assert stdout[1] != 'none' + assert stdout[0].startswith("Good targets") + assert stdout[1] != "none" diff --git a/examples/barebox/test_sleep.py b/examples/barebox/test_sleep.py index 1df4ce66e..b7f654243 100644 --- a/examples/barebox/test_sleep.py +++ b/examples/barebox/test_sleep.py @@ -6,14 +6,14 @@ def test_sleep(command): # measure the round-trip-time timestamp = monotonic() - stdout, stderr, returncode = command.run('true') + stdout, stderr, returncode = command.run("true") elapsed_true = monotonic() - timestamp assert returncode == 0 assert not stdout assert not stderr timestamp = monotonic() - stdout, stderr, returncode = command.run('sleep 1') + stdout, stderr, returncode = command.run("sleep 1") elapsed_sleep = monotonic() - timestamp assert returncode == 0 assert not stdout diff --git a/examples/barebox/test_state.py b/examples/barebox/test_state.py index 13e397dcf..49415cdcf 100644 --- a/examples/barebox/test_state.py +++ b/examples/barebox/test_state.py @@ -2,10 +2,10 @@ def test_state(command): - stdout, stderr, returncode = command.run('state') + stdout, stderr, returncode = command.run("state") if returncode == 127: pytest.skip("state command not available") assert returncode == 0 assert not stderr - assert stdout[0] == 'registered state instances:' + assert stdout[0] == "registered state instances:" assert len(stdout) > 1 diff --git a/examples/barebox/test_watchdog.py b/examples/barebox/test_watchdog.py index 43b75183b..2d0b58a86 100644 --- a/examples/barebox/test_watchdog.py +++ b/examples/barebox/test_watchdog.py @@ -2,7 +2,7 @@ def test_watchdog(command): - stdout, stderr, returncode = command.run('wd 1') + stdout, stderr, returncode = command.run("wd 1") if returncode == 127: pytest.skip("wd command not available") assert returncode == 0 @@ -11,6 +11,6 @@ def test_watchdog(command): command._await_prompt() - stdout = command.run_check('echo ${global.system.reset}') + stdout = command.run_check("echo ${global.system.reset}") assert len(stdout) == 1 - assert stdout[0] == 'WDG' + assert stdout[0] == "WDG" diff --git a/examples/deditec-relais8/deditec.py b/examples/deditec-relais8/deditec.py index 0a47982ca..b007994de 100644 --- a/examples/deditec-relais8/deditec.py +++ b/examples/deditec-relais8/deditec.py @@ -12,7 +12,7 @@ # log labgrid steps StepLogger.start() -t = Target('main') +t = Target("main") r = DeditecRelais8(t, name=None, index=1) d = DeditecRelaisDriver(t, name=None) diff --git a/examples/deditec-relais8/deditec_remote.py b/examples/deditec-relais8/deditec_remote.py index 53a860ed3..c0e45e385 100644 --- a/examples/deditec-relais8/deditec_remote.py +++ b/examples/deditec-relais8/deditec_remote.py @@ -10,7 +10,7 @@ # show labgrid steps on the console StepLogger.start() -e = Environment('import-dedicontrol.yaml') +e = Environment("import-dedicontrol.yaml") t = e.get_target() p = t.get_driver("DigitalOutputProtocol") diff --git a/examples/docker/conftest.py b/examples/docker/conftest.py index 6c407bf4e..46bacd6de 100644 --- a/examples/docker/conftest.py +++ b/examples/docker/conftest.py @@ -1,9 +1,9 @@ import pytest -@pytest.fixture(scope='session') + +@pytest.fixture(scope="session") def command(target): - strategy = target.get_driver('DockerStrategy') + strategy = target.get_driver("DockerStrategy") strategy.transition("accessible") - shell = target.get_driver('CommandProtocol') + shell = target.get_driver("CommandProtocol") return shell - diff --git a/examples/docker/test_shell.py b/examples/docker/test_shell.py index 50b9b7d5d..450af06bb 100644 --- a/examples/docker/test_shell.py +++ b/examples/docker/test_shell.py @@ -1,11 +1,11 @@ def test_shell(command): - stdout, stderr, returncode = command.run('cat /proc/version') + stdout, stderr, returncode = command.run("cat /proc/version") assert returncode == 0 assert len(stdout) > 0 assert len(stderr) == 0 - assert 'Linux' in stdout[0] + assert "Linux" in stdout[0] - stdout, stderr, returncode = command.run('false') + stdout, stderr, returncode = command.run("false") assert returncode != 0 assert len(stdout) == 0 assert len(stderr) == 0 diff --git a/examples/library/test.py b/examples/library/test.py index 9870e1670..e2851eaed 100755 --- a/examples/library/test.py +++ b/examples/library/test.py @@ -15,15 +15,17 @@ # log labgrid steps StepLogger.start() + def run_once(target): - s = target.get_driver('BareboxStrategy') + s = target.get_driver("BareboxStrategy") s.status = Status.unknown # force a power-cycle - s.transition('barebox') - cmd = target['CommandProtocol'] - cmd.run_check('test -e /dev/nand0') + s.transition("barebox") + cmd = target["CommandProtocol"] + cmd.run_check("test -e /dev/nand0") target.deactivate(cmd) + env = Environment(sys.argv[1]) -target = env.get_target('main') +target = env.get_target("main") while True: run_once(target) diff --git a/examples/modbusrtu/conftest.py b/examples/modbusrtu/conftest.py index 4d28384ff..cf9696a98 100644 --- a/examples/modbusrtu/conftest.py +++ b/examples/modbusrtu/conftest.py @@ -1,7 +1,7 @@ import pytest -@pytest.fixture(scope='session') +@pytest.fixture(scope="session") def instrument(target): - _modbus = target.get_driver('ModbusRTUDriver') + _modbus = target.get_driver("ModbusRTUDriver") return _modbus diff --git a/examples/network-test/pkg-replay-record.py b/examples/network-test/pkg-replay-record.py index 124991ca0..6cbcf3d3f 100755 --- a/examples/network-test/pkg-replay-record.py +++ b/examples/network-test/pkg-replay-record.py @@ -11,6 +11,7 @@ from labgrid import Environment from labgrid.logging import basicConfig, StepLogger + def generate_frame(): frame = Ether(dst="11:22:33:44:55:66", src="66:55:44:33:22:11", type=0x9000) padding = "\x00" * (conf.min_pkt_size - len(frame)) diff --git a/examples/networkmanager/nm.py b/examples/networkmanager/nm.py index e2e9f8876..505364d50 100644 --- a/examples/networkmanager/nm.py +++ b/examples/networkmanager/nm.py @@ -12,72 +12,71 @@ StepLogger.start() -e = Environment('nm.env') +e = Environment("nm.env") t = e.get_target() -d = t.get_driver('NetworkInterfaceDriver') +d = t.get_driver("NetworkInterfaceDriver") # based on https://developer.gnome.org/NetworkManager/stable/ch01.html, but adapted to python dicts s_client = { - 'connection': { - 'type': "802-11-wireless", + "connection": { + "type": "802-11-wireless", }, - '802-11-wireless': { - 'mode': "infrastructure", - 'ssid': "local-rpi", + "802-11-wireless": { + "mode": "infrastructure", + "ssid": "local-rpi", }, - '802-11-wireless-security': { - 'key-mgmt': "wpa-psk", - 'psk': "obMinwyurArc5", + "802-11-wireless-security": { + "key-mgmt": "wpa-psk", + "psk": "obMinwyurArc5", }, - 'ipv4': { - 'method': "auto", - 'ignore-auto-dns': True, - 'ignore-auto-routes': True, - 'never-default': True, + "ipv4": { + "method": "auto", + "ignore-auto-dns": True, + "ignore-auto-routes": True, + "never-default": True, }, - 'ipv6': { - 'method': "link-local", + "ipv6": { + "method": "link-local", }, } s_ap = { - 'connection': { - 'type': "802-11-wireless", + "connection": { + "type": "802-11-wireless", }, - '802-11-wireless': { - 'mode': "ap", - 'ssid': "local-rpi", + "802-11-wireless": { + "mode": "ap", + "ssid": "local-rpi", }, - '802-11-wireless-security': { - 'key-mgmt': "wpa-psk", - 'psk': "obMinwyurArc5", + "802-11-wireless-security": { + "key-mgmt": "wpa-psk", + "psk": "obMinwyurArc5", }, - 'ipv4': { + "ipv4": { #'method': "auto", #'method': "link-local", - 'method': "shared", - 'addresses': ["172.16.0.2/29"], + "method": "shared", + "addresses": ["172.16.0.2/29"], }, - 'ipv6': { - 'method': "link-local", + "ipv6": { + "method": "link-local", }, } d.disable() -d.wait_state('disconnected') +d.wait_state("disconnected") print("access points after scan") pprint(d.get_access_points()) d.configure(s_ap) -d.wait_state('activated') +d.wait_state("activated") print("settings in AP mode") pprint(d.get_settings()) print("state in AP mode") pprint(d.get_state()) -#d.configure(s_client) -#d.wait_state('activated') -#print("settings in client mode") -#pprint(d.get_settings()) -#print("state in client mode") -#pprint(d.get_state()) - +# d.configure(s_client) +# d.wait_state('activated') +# print("settings in client mode") +# pprint(d.get_settings()) +# print("state in client mode") +# pprint(d.get_state()) diff --git a/examples/power/power_example.py b/examples/power/power_example.py index 82a8ac6f5..4fc5fed68 100644 --- a/examples/power/power_example.py +++ b/examples/power/power_example.py @@ -3,7 +3,7 @@ @pytest.fixture() def pdu(target): - return target.get_driver('NetworkPowerDriver') + return target.get_driver("NetworkPowerDriver") def test_something(pdu): diff --git a/examples/pyvisa/pyvisa_example.py b/examples/pyvisa/pyvisa_example.py index 16743ef40..da37ca052 100644 --- a/examples/pyvisa/pyvisa_example.py +++ b/examples/pyvisa/pyvisa_example.py @@ -3,13 +3,13 @@ @pytest.fixture() def signal_generator(target): - return target.get_driver('PyVISADriver').get_session() + return target.get_driver("PyVISADriver").get_session() def test_with_signal_generator_example(signal_generator): - signal_generator.write('*RST') + signal_generator.write("*RST") # Setup channel 1 - signal_generator.write('C1:BSWV WVTP,SQUARE,HLEV,5,LLEV,0,DUTY,50') + signal_generator.write("C1:BSWV WVTP,SQUARE,HLEV,5,LLEV,0,DUTY,50") # Switch on channel 1 - signal_generator.write('C1:OUTP ON,LOAD,HZ,PLRT,NOR') + signal_generator.write("C1:OUTP ON,LOAD,HZ,PLRT,NOR") diff --git a/examples/qemu-networking/test_qemu_networking.py b/examples/qemu-networking/test_qemu_networking.py index 8f256128b..c1cc971b3 100644 --- a/examples/qemu-networking/test_qemu_networking.py +++ b/examples/qemu-networking/test_qemu_networking.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. + def test_shell(shell_command): shell_command.run("true") diff --git a/examples/remote/test_barebox.py b/examples/remote/test_barebox.py index e8ae6319d..4564c7a94 100644 --- a/examples/remote/test_barebox.py +++ b/examples/remote/test_barebox.py @@ -1,14 +1,14 @@ def test_target(target): - barebox = target.get_driver('CommandProtocol') + barebox = target.get_driver("CommandProtocol") target.activate(barebox) - stdout, stderr, returncode = barebox.run('version') + stdout, stderr, returncode = barebox.run("version") assert returncode == 0 assert stdout assert not stderr - assert 'barebox' in '\n'.join(stdout) + assert "barebox" in "\n".join(stdout) - stdout, stderr, returncode = barebox.run('false') + stdout, stderr, returncode = barebox.run("false") assert returncode == 1 assert not stdout assert not stderr diff --git a/examples/shell/conftest.py b/examples/shell/conftest.py index 22aecbdcf..c4ab03d57 100644 --- a/examples/shell/conftest.py +++ b/examples/shell/conftest.py @@ -1,8 +1,8 @@ import pytest -@pytest.fixture(scope='session') +@pytest.fixture(scope="session") def command(target): - shell = target.get_driver('CommandProtocol') + shell = target.get_driver("CommandProtocol") target.activate(shell) return shell diff --git a/examples/shell/test_hwclock.py b/examples/shell/test_hwclock.py index c774d0ee2..843c6fc83 100644 --- a/examples/shell/test_hwclock.py +++ b/examples/shell/test_hwclock.py @@ -3,7 +3,7 @@ def test_hwclock_rate(command): """Test that the hardware clock rate is not too inaccurate.""" - result = command.run_check('hwclock -c | head -n 3') + result = command.run_check("hwclock -c | head -n 3") hw_time, sys_time, freq_offset_ppm, tick = result[-1].strip().split() assert abs(int(freq_offset_ppm)) < 1000 @@ -15,11 +15,11 @@ def test_hwclock_value(command): """ def get_time(): - result = command.run_check('hwclock --utc --show')[0].strip() - return datetime.strptime(result, '%Y-%m-%d %H:%M:%S.%f+0:00') + result = command.run_check("hwclock --utc --show")[0].strip() + return datetime.strptime(result, "%Y-%m-%d %H:%M:%S.%f+0:00") def set_time(time): - time = time.strftime('%Y-%m-%d %H:%M:%S.%f+0:00') + time = time.strftime("%Y-%m-%d %H:%M:%S.%f+0:00") command.run_check(f'hwclock --utc --set --date "{time}"') offset = abs((get_time() - datetime.utcnow()).total_seconds()) diff --git a/examples/shell/test_memory.py b/examples/shell/test_memory.py index c4c8841bc..7c6477b2c 100644 --- a/examples/shell/test_memory.py +++ b/examples/shell/test_memory.py @@ -8,26 +8,26 @@ def test_memory_mbw(command): """Test memcopy bandwidth""" try: - command.run_check('which mbw') + command.run_check("which mbw") except ExecutionError: pytest.skip("mbw missing") - result = command.run_check('mbw -qt0 8M') + result = command.run_check("mbw -qt0 8M") result = result[-1].strip() pattern = r"AVG\s+.*Copy:\s+(?P\S+)\s+MiB/s" - bw, = map(float, re.fullmatch(pattern, result).groups()) + (bw,) = map(float, re.fullmatch(pattern, result).groups()) assert bw > 40 # > 40 MiB/second def test_memory_memtester_short(command): """Test RAM for errors""" try: - command.run_check('which memtester') + command.run_check("which memtester") except ExecutionError: pytest.skip("memtester missing") - result = command.run_check('memtester 128k 1 | tail -n 1') + result = command.run_check("memtester 128k 1 | tail -n 1") result = result[-1].strip() assert result == "Done." diff --git a/examples/shell/test_rt.py b/examples/shell/test_rt.py index 85824ea62..b2fa1b96f 100644 --- a/examples/shell/test_rt.py +++ b/examples/shell/test_rt.py @@ -8,11 +8,11 @@ def test_rt_cyclictest_short(command): """Test a basic cyclictest run""" try: - command.run_check('which cyclictest') + command.run_check("which cyclictest") except ExecutionError: pytest.skip("cyclictest missing") - result = command.run_check('cyclictest -SN -D 5 -q') + result = command.run_check("cyclictest -SN -D 5 -q") result = result[-1].strip() pattern = r"Min:\s+(?P\w+)\s+Act:\s+\w+\s+Avg:\s+(?P\w+)\s+Max:\s+(?P\w+)" @@ -25,13 +25,13 @@ def test_rt_cyclictest_short(command): def test_rt_hackbench_short(command): """Test a basic hackbench run""" try: - command.run_check('which hackbench') + command.run_check("which hackbench") except ExecutionError: pytest.skip("hackbench missing") - result = command.run_check('hackbench -f 10') + result = command.run_check("hackbench -f 10") result = result[-1].strip() pattern = r"Time:\s+(?P