diff --git a/commonl/__init__.py b/commonl/__init__.py index 6cd8c6dd..7c977358 100755 --- a/commonl/__init__.py +++ b/commonl/__init__.py @@ -209,6 +209,41 @@ def retval(self): return None +def processes_guess(factor: int): + """Very simple parallelization count adjuster + + This is meant to be passed to something as + *concurrent.futures.ProcessPoolExecutor*: + + >>> paralellization_factor = -4 + >>> processes = commonl.processes_guess(paralellization_factor) + >>> concurrent.futures.ProcessPoolExecutor(processes) + + :param int factor: parallelization factor: + + - positive: absolute number of threads to use; use *1* to + serialize. + + - 0: get the best value for a CPU intensive workload; this + returns the amount of CPUs in the system. + + - < 0: get the best value for an IO intensive workload, where we + can do N IO operations / CPU. + + """ + if factor == 0: + factor = int(os.environ.get("THREADS_GUESS", 0)) + if factor > 0: + return factor + if factor == 0: + # factor for a CPU intensive workload + return multiprocessing.cpu_count() + # factor is negative; the absolute value is the intesiveness of + # the IO vs CPU, so how many IO we can run in parallel for each CPU + return -factor * multiprocessing.cpu_count() + + + class Process(fork_c): # COMPAT pass @@ -3024,6 +3059,8 @@ def cmdline_str_to_value(value): :returns: value as int, float, bool or string """ + if not value: + return value if value.startswith("i:"): return int(value[2:]) if value.startswith("f:"): diff --git a/tcf b/tcf index eec0347a..5ef23b09 100755 --- a/tcf +++ b/tcf @@ -1,33 +1,46 @@ #! /usr/bin/env python3 # -# Copyright (c) 2017 Intel Corporation +# Copyright (c) 2014-23 Intel Corporation # # SPDX-License-Identifier: Apache-2.0 # +# This tool provides command line access to: # -# * FIXME: Make the cookie loading thing a context, so even in the exit -# path we save +# - discover / login / manage ttbd servers and users of said servers # -# * join ttb_client and tcf, move all functions in ttb_client to -# rest_tb, as they are target broker specific +# - managed targets exported by ttbd, so you can power cycle them, +# access the console, press buttons, ... (so using the ttbd REST +# HTTP API from the command line). +# +# - other more advanced functions, like getting cookies, etc +# +# This file just a multiplexor of subcommands +# +# Initializes the command line argument, which has a bunch of common +# command line options and then starts importing submodules, running +# for each their CLI setup, which creates subcommands. +# +# Current transitioning CLI initialization from tcfl/target_ext_*.py +# (which are API modules) to tcfl/ui_cli_*.py, so it is clearly +# separated what deals with CLI and what is actually API. +# +# When all that is done, parse the arguments, then the config files, +# and run whatever subcommand has been specified in the command line. # - import argparse -import argcomplete -import collections import copy -import getpass import inspect -import json +import logging import os import platform import re -import requests import shutil import sys import tempfile -import traceback + +import argcomplete +import requests import commonl import tcfl @@ -37,20 +50,6 @@ import tcfl.config import tcfl._install import tcfl.ui_cli -# I bet there is a better way to do this...but we need the symbol to -# be in the logging module so that it is not included in the "function -# that called this" by the logging's internals. -# For debugging, levels are D2: 9, D3: 8, D4:7 ... -import logging -setattr(logging, "logc", logging.root.critical) -setattr(logging, "logx", logging.root.exception) -setattr(logging, "loge", logging.root.error) -setattr(logging, "logw", logging.root.warning) -setattr(logging, "logi", logging.root.info) -setattr(logging, "logd", logging.root.debug) -setattr(logging, "logdl", logging.root.log) -from logging import logc, loge, logx, logw, logi, logd, logdl - commonl.logging_short_level_names() def join_args_for_make_shell_command(args): @@ -101,189 +100,13 @@ servers: {len(tcfl.ttb_client.rest_target_brokers)}""") -def _target_get(args): - # pure target get w/o going through the cache - - if args.projection: - data = { 'projections': json.dumps(args.projection) } - else: - data = None - rtb, rt = ttb_client._rest_target_find_by_id(args.target) - r = rtb.send_request("GET", "targets/" + rt['id'], data = data, - raw = True) - # Keep the order -- even if json spec doesn't contemplate it, we - # use it so the client can tell (if they want) the order in which - # for example, power rail components are defined in interfaces.power - rt = json.loads(r.text, object_pairs_hook = collections.OrderedDict) - print(json.dumps(rt, skipkeys = True, indent = 4)) - -def _target_patch(args): - # set data - data = collections.OrderedDict() # respect user's order - for data_s in args.data: - if not "=" in data_s: - raise AssertionError( - "data specification has to be in the format KEY=JSON-DATA;" - " got (%s) %s" % (type(data_s), data_s)) - k, v = data_s.split("=", 1) - data[k] = v - rtb, _rt = ttb_client._rest_target_find_by_id(args.target) - if data: - rtb.send_request("PATCH", "targets/" + args.target, data = data) - else: - # JSON from stdin - rtb.send_request("PATCH", "targets/" + args.target, - json = json.loads(sys.stdin.read())) - -def _target_property_set(args): - with tcfl.msgid_c("cmdline"): - target = tcfl.tc.target_c.create_from_cmdline_args(args) - value = args.value - if not value: - pass # used to unset a value - elif value.startswith("i:"): - value = int(value.split(":", 1)[1]) - elif value.startswith("f:"): - value = float(value.split(":", 1)[1]) - elif value.startswith("b:"): - val = value.split(":", 1)[1] - if val.lower() == "true": - value = True - elif val.lower() == "false": - value = False - else: - raise ValueError("value %s: bad boolean '%s' (true or false)" - % (value, val)) - elif value.startswith("s:"): - # string that might start with s: or empty - value = value.split(":", 1)[1] - # FIXME: use commonl.cmdline_str_to_value() - target.property_set(args.property, value) - -def _target_property_get(args): - with tcfl.msgid_c("cmdline"): - target = tcfl.tc.target_c.create_from_cmdline_args(args) - r = target.property_get(args.property) - if r: # print nothing if None - print(r) - - -def _target_disable(args): - with tcfl.msgid_c("cmdline"): - for target_name in args.target: - target = tcfl.tc.target_c.create_from_cmdline_args( - args, target_name = target_name) - target.disable(args.reason) - -def _target_enable(args): - with tcfl.msgid_c("cmdline"): - for target_name in args.target: - target = tcfl.tc.target_c.create_from_cmdline_args( - args, target_name = target_name) - target.enable() - - - -def _healthcheck(target, args): - - if args.interfaces == []: - # no interface list give; scan the list of interfaces the - # target exposes, starting with "power" (always) - args.interfaces.append("power") - - # list extensions/interfaces w/ healthcheck - for attr, value in target.__dict__.items(): - if isinstance(value, tcfl.tc.target_extension_c) \ - and hasattr(value, "_healthcheck") \ - and attr != "power": # we did this first - args.interfaces.append(attr) - - - for interface_name in args.interfaces: - interface = getattr(target, interface_name, None) - if interface == None: - target.report_blck("%s: non-existing interface" % interface_name) - continue - if not isinstance(interface, tcfl.tc.target_extension_c): - target.report_blck( - "%s: interface not a real interface (type %s)" - % (interface_name, type(interface))) - continue - - if interface == "power" and not hasattr(target, "power"): - target.report_info("WARNING: No power control interface") - - target.report_info( - "HEALTHCHECK for %s interface" % interface_name, level = 0) - try: - interface._healthcheck() - except Exception as e: - target.report_blck( - "HEALTHCHECK for %s: exception" % interface_name, - dict(exception = e, trace = traceback.format_exc()), - alevel = 0) - - target.report_pass("HEALTHCHECK completed") - - -def healthcheck(args): - tcfl.tc.report_driver_c.add( # FIXME: hack console driver - tcfl.report_console.driver(0, None), - name = "console") - - with tcfl.msgid_c("cmdline"): - target = tcfl.tc.target_c.create_from_cmdline_args(args) - - allocid = None - try: - if args.allocid == None: - target.report_info("allocating") - allocid, _state, _group_allocated = \ - tcfl.target_ext_alloc._alloc_targets( - target.rtb, { "group": [ target.id ] }, - preempt = args.preempt, - queue = False, priority = args.priority, - reason = "healthcheck") - target.report_pass("allocated %s" % allocid) - else: - target.report_info("using existing allocation") - _healthcheck(target, args) - finally: - if allocid: - tcfl.target_ext_alloc._delete(target.rtb, allocid) - -def _cache_flush(_args): +def _cmdline_cache_flush(_args): cache_path = os.path.join(os.path.expanduser("~"), ".cache", "tcf") print(f"I: wiping {cache_path}") shutil.rmtree(cache_path, ignore_errors = True) -def _cookies(args): - if '/' in args.target: - # cache is by full id - rtb = ttb_client.rest_target_broker.rts_cache[args.target]['rtb'] - else: - for rt_fullid, rt in ttb_client.rest_target_broker.rts_cache.items(): - if rt['id'] == args.target: - rtb = rt['rtb'] - break - else: - raise ValueError("%s: unknown target" % args.target) - - if args.json: - json.dump(rtb.cookies, sys.stdout, indent = 4) - print() - elif args.cookiejar: - # Follow https://curl.se/docs/http-cookies.html - # Note we don't keep the TTL field, so we set it at zero - for cookie, value in rtb.cookies.items(): - print(f"{rtb.parsed_url.hostname}\tFALSE\t/\tTRUE\t0" - f"\t{cookie}\t{value}") - else: - commonl._dict_print_dotted(rtb.cookies, separator = "") - - if __name__ == "__main__": tcfl.tc.version = commonl.version_get(tcfl, "tcf") @@ -373,6 +196,11 @@ if __name__ == "__main__": action = "store", default = None, help = "Use this allocid to access targets") + arg_parser.add_argument( + "-A", + action = "store_const", dest = "allocid", const = "any", + help = "Use any existing allocation") + arg_parser.add_argument( "-C", action = "store", default = None, metavar = "DIR", dest = "chdir", @@ -448,40 +276,7 @@ if __name__ == "__main__": # Semi advanced commands - ap = arg_subparsers.add_parser( - "get", help = "Return target information straight from the " - "server formated as JSON (unlike 'list', which will add some " - "client fields)") - ap.add_argument( - "-p", "--projection", action = "append", - help = "List of fields to return (*? [CHARS] or [!CHARS] supported)" - " as per python's fnmatch module") - ap.add_argument("target", metavar = "TARGET", action = "store", - default = None, help = "Target's name") - ap.set_defaults(func = _target_get) - - - ap = arg_subparsers.add_parser("property-set", - help = "Set a target's property") - ap.add_argument("target", metavar = "TARGET", action = "store", - default = None, help = "Target's name or URL") - ap.add_argument("property", metavar = "PROPERTY", action = "store", - default = None, help = "Name of property to set") - ap.add_argument("value", metavar = "VALUE", action = "store", - nargs = "?", - default = None, help = "Value of property (none " - "to remove it; i:INTEGER, f:FLOAT b:false or b:true," - " otherwise it is considered a string)") - ap.set_defaults(func = _target_property_set) - - - ap = arg_subparsers.add_parser("property-get", - help = "Get a target's property") - ap.add_argument("target", metavar = "TARGET", action = "store", - default = None, help = "Target's name or URL") - ap.add_argument("property", metavar = "PROPERTY", action = "store", - default = None, help = "Name of property") - ap.set_defaults(func = _target_property_get) + tcfl.ui_cli_targets._cmdline_setup_advanced(arg_subparsers) tcfl.tc.argp_setup(arg_subparsers) @@ -494,75 +289,19 @@ if __name__ == "__main__": tcfl.tc.target_ext_alloc._cmdline_setup_intermediate(arg_subparsers) - ap = arg_subparsers.add_parser("healthcheck", - help = "Do a very basic health check") - ap.set_defaults(level = logging.ERROR) - ap.add_argument( - "-i", "--interface", metavar = "INTERFACE", - dest = "interfaces", action = "append", default = [], - help = "Names of interfaces to healtcheck (default all " - "exposed by the target)") - ap.add_argument( - "-p", "--priority", action = "store", type = int, default = 500, - help = "Priority (0 highest, 999 lowest)") - ap.add_argument( - "--preempt", action = "store_true", default = False, - help = "Enable preemption (disabled by default)") - ap.add_argument("target", metavar = "TARGET", action = "store", - default = None, help = "Target's name or URL") - ap.set_defaults(func = healthcheck) - import tcfl.ui_cli_certs tcfl.ui_cli_certs.cmdline_setup(arg_subparsers) # advanced commands - - ap = arg_subparsers.add_parser("cookies", - help = "Show logging cookies (to feed" - " into curl, etc) maybe only for one server") - ap.add_argument("target", metavar = "TARGET", action = "store", - default = [], help = "Target name") - ap.add_argument("-c","--cookiejar", action = "store_true", default = False, - help = "Print in cookiejar format" - " (https://curl.se/docs/http-cookies.html)") - ap.add_argument("-j","--json", action = "store_true", default = False, - help = "Print in JSON format") - ap.set_defaults(func = _cookies) + tcfl.ui_cli_servers.cmdline_setup_advanced(arg_subparsers) import tcfl.ui_cli_users tcfl.ui_cli_users.cmdline_setup_advanced(arg_subparsers) - ap = arg_subparsers.add_parser("enable", - help = "Enable a disabled target") - ap.add_argument("target", metavar = "TARGET", action = "store", - nargs = "+", default = None, help = "Target's name or URL") - ap.set_defaults(func = _target_enable) - - ap = arg_subparsers.add_parser("disable", - help = "Disable an enabled target") - ap.add_argument("target", metavar = "TARGET", action = "store", - nargs = "+", default = None, help = "Target's name or URL") - ap.add_argument("-r", "--reason", metavar = "REASON", action = "store", - default = 'disabled by the administrator', - help = "Reason why targets are disabled") - ap.set_defaults(func = _target_disable) - - - ap = arg_subparsers.add_parser( - "patch", help = "Store target information in the server") - ap.add_argument("target", metavar = "TARGET", action = "store", - default = None, help = "Target's name") - ap.add_argument("data", metavar = "KEY=JSON-VALUE", nargs = "*", - default = None, help = "Data items to store; if" - " none, specify a JSON dictionary over stdin") - ap.set_defaults(func = _target_patch) - - ap = arg_subparsers.add_parser("cache-flush", help = "wipe all caches") - ap.set_defaults(func = _cache_flush) - + ap.set_defaults(func = _cmdline_cache_flush) tcfl.tc.pos.cmdline_setup(arg_subparsers) @@ -648,7 +387,8 @@ if __name__ == "__main__": # to maintain FDs open and keep the environment. with tempfile.NamedTemporaryFile(suffix = '.mk', prefix = 'tcf-', delete = False) as tf: - logd("%s: creating makefile for jobserver run" % tf.name) + logging.debug("%s: creating makefile for jobserver run" + % tf.name) tf.write(("""\ tcf-jobserver-run: \t+@%s --make-jobserver=%s %s @@ -656,17 +396,18 @@ tcf-jobserver-run: join_args_for_make_shell_command(sys.argv[1:]))).encode('utf-8')) tf.flush() tf.seek(0) - logd("%s: makefile:\n%s" % (tf.name, tf.read())) - logi("%s: executing makefile jobserver that will re-run " - "this command" % tf.name) + logging.debug("%s: makefile:\n%s" % (tf.name, tf.read())) + logging.info("%s: executing makefile jobserver that will" + " re-run this command" % tf.name) os.execvp("make", [ "make", "-s", "-f", tf.name, "-j%s" % args.make_j, "tcf-jobserver-run" ]) elif args.make_jobserver == False: - logi("%s: not re-running under make-jobserver" - % (args.make_jobserver)) + logging.info("%s: not re-running under make-jobserver" + % (args.make_jobserver)) pass # No jobserver wanted else: # We running under the jobserver, remove the makefile - logd("%s: removing make-jobserver makefile" % (args.make_jobserver)) + logging.debug("%s: removing make-jobserver makefile" + % (args.make_jobserver)) # Wipe the makefile we used to run tcf/run under a make # jobserver, not needed anymore. os.unlink(args.make_jobserver) @@ -681,12 +422,16 @@ tcf-jobserver-run: config_files = args.config_files, state_dir = args.state_path, ignore_ssl = args.ignore_ssl) - logi("state path: %s" % tcfl.config.state_path) - logi("share path: %s" % tcfl.config.share_path) + logging.info("state path: %s" % tcfl.config.state_path) + logging.info("share path: %s" % tcfl.config.share_path) if 'func' in args: if args.as_admin: + # FIXME: This has to be replaced, since it is not + # reentrant; need to move it to the protocol so we can + # tell it "run this call with these roles enabled, those + # roles disabled". import tcfl.ui_cli_users _args = copy.copy(args) _args.username = "self" @@ -694,11 +439,11 @@ tcf-jobserver-run: # this is quite dirty, but it'll do until we add this to # the protocol try: - logi("gaining admin role per --as-admin") + logging.info("gaining admin role per --as-admin") tcfl.ui_cli_users._cmdline_role_gain(_args) - logi("gained admin role per --as-admin") + logging.info("gained admin role per --as-admin") except Exception as e: - logx("can't get admin role per --as-admin") + logging.exception("can't get admin role per --as-admin") raise try: retval = args.func(args) @@ -722,10 +467,10 @@ tcf-jobserver-run: _args = copy.copy(args) _args.username = "self" _args.role = "admin" - logi("dropping admin role per --as-admin") + logging.info("dropping admin role per --as-admin") tcfl.ui_cli_users._cmdline_role_drop(_args) else: - logx("No command specified") + logging.exception("No command specified") retval = 1 # Hack the different return values we can get from the APIs to a @@ -750,7 +495,7 @@ tcf-jobserver-run: # just fail with exceptions retval = 0 else: - logw("Don't know how to interpret retval %s (%s) as exit code" - % (retval, type(retval))) + logging.warning("Don't know how to interpret retval %s (%s) as" + " exit code" % (retval, type(retval))) retval = 1 sys.exit(retval) diff --git a/tcfl/healthcheck.py b/tcfl/healthcheck.py new file mode 100644 index 00000000..b72b1d44 --- /dev/null +++ b/tcfl/healthcheck.py @@ -0,0 +1,78 @@ +#! /usr/bin/env python3 +# +# Copyright (c) 2017-23 Intel Corporation +# +# SPDX-License-Identifier: Apache-2.0 +# + +import argparse +import traceback + +import tcfl.tc + +def _healthcheck(target, cli_args): + + if cli_args.interfaces == []: + # no interface list give; scan the list of interfaces the + # target exposes, starting with "power" (always) + cli_args.interfaces.append("power") + + # list extensions/interfaces w/ healthcheck + for attr, value in target.__dict__.items(): + if isinstance(value, tcfl.tc.target_extension_c) \ + and hasattr(value, "_healthcheck") \ + and attr != "power": # we did this first + cli_args.interfaces.append(attr) + + + for interface_name in cli_args.interfaces: + interface = getattr(target, interface_name, None) + if interface == None: + target.report_blck("%s: non-existing interface" % interface_name) + continue + if not isinstance(interface, tcfl.tc.target_extension_c): + target.report_blck( + "%s: interface not a real interface (type %s)" + % (interface_name, type(interface))) + continue + + if interface == "power" and not hasattr(target, "power"): + target.report_info("WARNING: No power control interface") + + target.report_info( + "HEALTHCHECK for %s interface" % interface_name, level = 0) + try: + interface._healthcheck() + except Exception as e: + target.report_blck( + "HEALTHCHECK for %s: exception" % interface_name, + dict(exception = e, trace = traceback.format_exc()), + alevel = 0) + + target.report_pass("HEALTHCHECK completed") + + +def _target_healthcheck(target, cli_args: argparse.Namespace): + tcfl.tc.report_driver_c.add( # FIXME: hack console driver + tcfl.report_console.driver(0, None), + name = "console") + + # FIXME: this needs to be moved (when the orchestrator is improved + # done) to just run with tcf run + allocid = None + try: + if cli_args.allocid == None: + target.report_info("allocating") + allocid, _state, _group_allocated = \ + tcfl.target_ext_alloc._alloc_targets( + target.rtb, { "group": [ target.id ] }, + preempt = cli_args.preempt, + queue = False, priority = cli_args.priority, + reason = "healthcheck") + target.report_pass("allocated %s" % allocid) + else: + target.report_info("using existing allocation") + _healthcheck(target, cli_args) + finally: + if allocid: + tcfl.target_ext_alloc._delete(target.rtb, allocid) diff --git a/tcfl/servers.py b/tcfl/servers.py index 066b18a4..6dff8f8c 100644 --- a/tcfl/servers.py +++ b/tcfl/servers.py @@ -22,6 +22,7 @@ import logging import os +import commonl import tcfl.ttb_client # COMPAT: FIXME remove logger = logging.getLogger("tcfl.servers") @@ -68,6 +69,7 @@ def by_targetspec(targetspec: list = None, verbosity: int = 0): """ if targetspec: + import tcfl.targets # dependency loop otherwise # we are given a list of targets to look for their servers or # default to all, so pass it on to initialize the inventory # system so we can filter @@ -102,9 +104,9 @@ def _run_on_server(server_name, fn, *args, return None, e - def run_fn_on_each_server(servers: dict, fn: callable, *args, serialize: bool = False, traces: bool = False, + parallelization_factor: int = -4, **kwargs): """ Run a function on each server in parallel @@ -122,24 +124,22 @@ def run_fn_on_each_server(servers: dict, fn: callable, *args, :data:`tcfl.server_c.servers` for all servers or any other dict with whatever server names are chosen. - :param bool serialize: (optional, default *False*) if calls to - each server need to be run in a single thread or can be run in - parallel (default). + :param int parallelization_factor: (optional, default -4, run + four operations per processor) number of threads to use to + parallelize the operation; use *1* to serialize. :param bool traces: (optional, default *True*) if log messages for exceptions shall include stack traces. """ - if serialize: - threads = 1 - else: - threads = len(servers) - + processes = min( + len(servers), + commonl.processes_guess(parallelization_factor)) results = {} - if threads == 0: + if processes == 0: return results - with concurrent.futures.ProcessPoolExecutor(threads) as executor: + with concurrent.futures.ProcessPoolExecutor(processes) as executor: futures = { # for each server, queue a thread that will call # _fn, who will call fn taking care of exceptions @@ -164,6 +164,7 @@ def run_fn_on_each_server(servers: dict, fn: callable, *args, return results + def subsystem_setup(*args, **kwargs): """ Initialize the server management system in a synchronous way diff --git a/tcfl/target_ext_shell.py b/tcfl/target_ext_shell.py index 850b845b..e10331b3 100644 --- a/tcfl/target_ext_shell.py +++ b/tcfl/target_ext_shell.py @@ -179,7 +179,7 @@ class shell(tc.target_extension_c): """ def __init__(self, target): - if 'console' not in target.rt['interfaces']: + if 'console' not in target.rt.get('interfaces', {}): raise self.unneeded tc.target_extension_c.__init__(self, target) self.tls = threading.local() diff --git a/tcfl/ui_cli.py b/tcfl/ui_cli.py index e2e917ed..407b0ee6 100644 --- a/tcfl/ui_cli.py +++ b/tcfl/ui_cli.py @@ -67,9 +67,14 @@ def args_targetspec_add( help = "Consider also disabled targets") if targetspec_n != 1: ap.add_argument( - "--serialize", action = "store_true", default = False, + "--serialize", + action = "store_const", dest = "parellization_factor", default = 1, help = "Serialize (don't parallelize) the operation on" " multiple targets") + ap.add_argument( + "--parallelization-factor", + action = "store", type = int, default = -4, + help = "(advanced) parallelization factor") if isinstance(targetspec_n, bool): if targetspec_n: nargs = "+" @@ -116,7 +121,7 @@ def run_fn_on_each_targetspec( # COMPAT: removing list[str] so we work in python 3.8 iface: str = None, extensions_only: list = None, only_one: bool = False, - projections = None, + projections = None, targets_all = None, **kwargs): """Initialize the target discovery and run a function on each target that matches a specification @@ -180,7 +185,9 @@ def run_fn_on_each_targetspec( with tcfl.msgid_c("ui_cli"): - project = { 'id', 'disabled', 'type', 'interfaces.' + iface } + project = { 'id', 'disabled', 'type' } + if iface: + project.add('interfaces.' + iface) if projections: commonl.assert_list_of_strings(projections, "projetions", "field") @@ -189,16 +196,22 @@ def run_fn_on_each_targetspec( # Discover all the targets that match the specs in the command # line and pull the minimal inventories as specified per # arguments + if targets_all != None: + assert isinstance(targets_all, bool), \ + "targets_all: expected bool; got {type(targets_all)}" + else: + targets_all = cli_args.all tcfl.targets.setup_by_spec( cli_args.target, cli_args.verbosity - cli_args.quietosity, project = project, - targets_all = cli_args.all) + targets_all = targets_all) # FIXMEh: this should be grouped by servera, but since is not # that we are going to do it so much, (hence the meh) targetids = tcfl.targets.discovery_agent.rts_fullid_sorted if not targetids: - logger.error(f"No targets match the specification: {cli_args.target}") + logger.error(f"No targets match the specification (disabled?):" + f" {' '.join(cli_args.target)}") return 1 if only_one and len(targetids) > 1: logger.error( diff --git a/tcfl/ui_cli_servers.py b/tcfl/ui_cli_servers.py index df97ee7f..93960b45 100644 --- a/tcfl/ui_cli_servers.py +++ b/tcfl/ui_cli_servers.py @@ -42,6 +42,47 @@ def _logged_in_username(_server_name, server): return server.logged_in_username() + +def _cookies(_server_name, server, + _cli_args: argparse.Namespace): + return server.state_load() + +def _cmdline_cookies(cli_args: argparse.Namespace): + + tcfl.ui_cli.logger_verbosity_from_cli(logger, cli_args) + verbosity = cli_args.verbosity - cli_args.quietosity + servers = tcfl.servers.by_targetspec( + cli_args.target, verbosity = verbosity) + + r = tcfl.servers.run_fn_on_each_server( + servers, _cookies, cli_args, + parallelization_factor = cli_args.parallelization_factor, + traces = cli_args.traces) + # r now is a dict keyed by server_name of tuples cookies, exception + if cli_args.json: + d = {} + for server_name, ( cookies, _e ) in r.items(): + d[server_name] = cookies + json.dump(d, sys.stdout, indent = 4) + print() + elif cli_args.cookiejar: + # Follow https://curl.se/docs/http-cookies.html + # Note we don't keep the TTL field, so we set it at zero + for server_name, ( cookies, _e ) in r.items(): + for cookie, value in cookies.items(): + print(f"{server_name}\tFALSE\t/\tTRUE\t0" + f"\t{cookie}\t{value}") + else: + d = {} + for server_name, ( cookies, _e ) in r.items(): + d[server_name] = cookies + if len(d) == 1: # print less info if there is only one + commonl._dict_print_dotted(d[server_name], separator = ".") + else: + commonl._dict_print_dotted(d, separator = ".") + + + def _cmdline_servers(cli_args: argparse.Namespace): import tcfl.servers import tcfl.targets @@ -60,7 +101,8 @@ def _cmdline_servers(cli_args: argparse.Namespace): r = tcfl.servers.run_fn_on_each_server( servers, _logged_in_username, - serialize = cli_args.serialize, traces = cli_args.traces) + parallelization_factor = cli_args.parallelization_factor, + traces = cli_args.traces) # r now is a dict keyed by server_name of tuples usernames, # exception for server_name, ( username, _e ) in r.items(): @@ -231,3 +273,21 @@ def cmdline_setup(arg_subparser): help = "Flush currently cached/known servers" " (might need to servers-discover after)") ap.set_defaults(func = _cmdline_servers_flush) + + + +def cmdline_setup_advanced(arg_subparser): + + ap = arg_subparser.add_parser( + "cookies", + help = "Show login cookies (to feed into curl, etc)") + tcfl.ui_cli.args_verbosity_add(ap) + tcfl.ui_cli.args_targetspec_add(ap) + ap.add_argument( + "-c","--cookiejar", action = "store_true", default = False, + help = "Print in cookiejar format" + " (https://curl.se/docs/http-cookies.html)") + ap.add_argument( + "-j","--json", action = "store_true", default = False, + help = "Print in JSON format") + ap.set_defaults(func = _cmdline_cookies) diff --git a/tcfl/ui_cli_targets.py b/tcfl/ui_cli_targets.py index c9d506f7..61f2e587 100644 --- a/tcfl/ui_cli_targets.py +++ b/tcfl/ui_cli_targets.py @@ -20,17 +20,20 @@ """ +import argparse +import collections +import json import logging import math import os import sys import commonl +import tcfl.ui_cli logger = logging.getLogger("ui_cli_testcases") - def _cmdline_targets_init(args): # initialize reporting based on what the commandline wants @@ -51,6 +54,24 @@ def _cmdline_targets_init(args): +def _cmdline_target_get(cli_args: argparse.Namespace): + + def _target_get(target, _cli_args): + projections = cli_args.project + server = tcfl.server_c.servers[target.rt['server']] + rt = server.targets_get(target_id = target.id, + projections = cli_args.project) + # rt is a list of dicts keyed by fullid, we care only for the first + json.dump(rt[0][target.fullid], sys.stdout, indent = 4) + + return tcfl.ui_cli.run_fn_on_each_targetspec( + # note we scan ignoring --projections, since that we'll use + # later; we want to identify the target to get as soon as + # possible and then in _target_get() we do the stuff + _target_get, cli_args, only_one = True) + + + def _targets_list_v0_table(l): if not l: # avoid divide by zero errors @@ -196,6 +217,86 @@ def _cmdline_ls(cli_args): +def _target_patch(target, cli_args): + # set data + data = collections.OrderedDict() # respect user's order + for data_s in cli_args.data: + if not "=" in data_s: + raise AssertionError( + "data specification has to be in the format KEY=JSON-DATA;" + " got (%s) %s" % (type(data_s), data_s)) + k, v = data_s.split("=", 1) + data[k] = v + server = tcfl.server_c.servers[target.rt['server']] + if data: + server.send_request("PATCH", "targets/" + target.id, json = data) + else: + # JSON from stdin + server.send_request("PATCH", "targets/" + target.id, + json = json.load(sys.stdin)) + + +def _cmdline_target_patch(cli_args: argparse.Namespace): + + return tcfl.ui_cli.run_fn_on_each_targetspec( + _target_patch, cli_args, only_one = True) + + + +def _target_enable(target, _cli_args): + target.enable() + +def _cmdline_target_enable(cli_args: argparse.Namespace): + + # force seeing all targets, will ease confusion, since normally we + # want to enable disabled targets + return tcfl.ui_cli.run_fn_on_each_targetspec( + _target_enable, cli_args, targets_all = True) + + + +def _target_disable(target, cli_args): + target.disable(cli_args.reason) + +def _cmdline_target_disable(cli_args: argparse.Namespace): + + # force seeing all targets, will ease confusion, in case we run the + # command twice (trying to disable a disabled target shall just work) + return tcfl.ui_cli.run_fn_on_each_targetspec( + _target_disable, cli_args, targets_all = True) + + + +def _target_property_set(target, cli_args): + value = commonl.cmdline_str_to_value(cli_args.value) + target.property_set(cli_args.property, value) + +def _cmdline_target_property_set(cli_args: argparse.Namespace): + + return tcfl.ui_cli.run_fn_on_each_targetspec( + _target_property_set, cli_args, only_one = True) + + + +def _target_property_get(target, cli_args): + r = target.property_get(cli_args.property) + if r: # print nothing if None + print(r) + +def _cmdline_target_property_get(cli_args: argparse.Namespace): + + return tcfl.ui_cli.run_fn_on_each_targetspec( + _target_property_get, cli_args, only_one = True) + + + +def _cmdline_target_healthcheck(cli_args: argparse.Namespace): + import tcfl.healthcheck + return tcfl.ui_cli.run_fn_on_each_targetspec( + tcfl.healthcheck._target_healthcheck, cli_args) + + + def _cmdline_setup(arg_subparsers): import tcfl.ui_cli @@ -211,3 +312,98 @@ def _cmdline_setup(arg_subparsers): help = "consider only the given fields " "(default depends on verbosity") ap.set_defaults(func = _cmdline_ls) + + + +def _cmdline_setup_advanced(arg_subparsers): + + ap = arg_subparsers.add_parser( + "get", help = "Return target information straight from the " + "server formated as JSON (unlike 'ls', which will add some " + "client fields)") + tcfl.ui_cli.args_verbosity_add(ap) + tcfl.ui_cli.args_targetspec_add(ap, targetspec_n = 1) + ap.add_argument( + "-p", "--project", "--projection", metavar = "FIELD", + action = "append", type = str, + help = "consider only the given fields " + "(default depends on verbosity") + ap.set_defaults(func = _cmdline_target_get) + + ap = arg_subparsers.add_parser( + "patch", + help = "Store multiple fields of data on the target's inventory" + " from JSON or KEY=VALUE (vs property-set just storing one)") + tcfl.ui_cli.args_verbosity_add(ap) + tcfl.ui_cli.args_targetspec_add(ap, targetspec_n = 1) + ap.add_argument( + "data", metavar = "KEY=JSON-VALUE", nargs = "*", + default = None, help = "Data items to store; if" + " none, specify a JSON dictionary over stdin") + ap.set_defaults(func = _cmdline_target_patch) + + + ap = arg_subparsers.add_parser( + "enable", + help = "Enable disabled target/s") + tcfl.ui_cli.args_verbosity_add(ap) + tcfl.ui_cli.args_targetspec_add(ap) + ap.set_defaults(func = _cmdline_target_enable) + + + ap = arg_subparsers.add_parser( + "disable", + help = "Disable enabled target/s") + tcfl.ui_cli.args_verbosity_add(ap) + tcfl.ui_cli.args_targetspec_add(ap) + ap.add_argument( + "-r", "--reason", metavar = "REASON", action = "store", + default = 'disabled by the administrator', + help = "Reason why targets are disabled") + ap.set_defaults(func = _cmdline_target_disable) + + + ap = arg_subparsers.add_parser( + "property-set", + help = "Set a target's property") + tcfl.ui_cli.args_verbosity_add(ap) + tcfl.ui_cli.args_targetspec_add(ap, targetspec_n = 1) + ap.add_argument( + "property", metavar = "PROPERTY", action = "store", + default = None, help = "Name of property to set") + ap.add_argument( + "value", metavar = "VALUE", action = "store", + nargs = "?", default = None, + help = "Value of property (none to remove it; i:INTEGER, f:FLOAT" + " b:false or b:true, otherwise it is considered a string)") + ap.set_defaults(func = _cmdline_target_property_set) + + + ap = arg_subparsers.add_parser( + "property-get", + help = "Get a target's property") + tcfl.ui_cli.args_verbosity_add(ap) + tcfl.ui_cli.args_targetspec_add(ap, targetspec_n = 1) + ap.add_argument( + "property", metavar = "PROPERTY", action = "store", default = None, + help = "Name of property to read") + ap.set_defaults(func = _cmdline_target_property_get) + + + ap = arg_subparsers.add_parser( + "healthcheck", + help = "Do a very basic health check") + tcfl.ui_cli.args_verbosity_add(ap) + ap.add_argument( + "-i", "--interface", metavar = "INTERFACE", + dest = "interfaces", action = "append", default = [], + help = "Names of interfaces to healtcheck (default all " + "exposed by the target)") + ap.add_argument( + "-p", "--priority", action = "store", type = int, default = 500, + help = "Priority for allocation (0 highest, 999 lowest)") + ap.add_argument( + "--preempt", action = "store_true", default = False, + help = "Enable allocation preemption (disabled by default)") + tcfl.ui_cli.args_targetspec_add(ap) + ap.set_defaults(func = _cmdline_target_healthcheck) diff --git a/tcfl/ui_cli_users.py b/tcfl/ui_cli_users.py index 4b31773b..76fa6ed1 100644 --- a/tcfl/ui_cli_users.py +++ b/tcfl/ui_cli_users.py @@ -42,13 +42,10 @@ -def _credentials_get(domain: str, aka: str, cli_args: argparse.Namespace): +def _credentials_get_global(cli_args: argparse.Namespace): # env general user_env = os.environ.get("TCF_USER", None) password_env = os.environ.get("TCF_PASSWORD", None) - # server specific - user_env_aka = os.environ.get("TCF_USER_" + aka, None) - password_env_aka = os.environ.get("TCF_PASSWORD_" + aka, None) # from commandline user_cmdline = cli_args.username @@ -57,11 +54,6 @@ def _credentials_get(domain: str, aka: str, cli_args: argparse.Namespace): # default to what came from environment user = user_env password = password_env - # override with server specific from envrionment - if user_env_aka: - user = user_env_aka - if password_env_aka: - password = password_env_aka # override with what came from the command line if user_cmdline: user = user_cmdline @@ -74,13 +66,66 @@ def _credentials_get(domain: str, aka: str, cli_args: argparse.Namespace): "Cannot obtain login name and" " -q was given (can't ask); " " please specify a login name or use environment" - " TCF_USER[_AKA]") + " TCF_USER[_]") if not sys.stdout.isatty(): raise RuntimeError( "Cannot obtain login name and" " terminal is not a TTY (can't ask); " " please specify a login name or use environment" - " TCF_USER[_AKA]") + " TCF_USER[_]") + user = input(f'Login for all servers [{getpass.getuser()}]' + ' (use *ask* for server-specific): ') + if user == "": # default to LOGIN name + user = getpass.getuser() + print("I: defaulting to login name '{user}'") + elif user == "ask": + user = None + + if user and not password: + if cli_args.quiet: + raise RuntimeError( + "Cannot obtain password and" + " -q was given (can't ask); " + " please specify a login name or use environment" + " TCF_PASSWORD[_]") + if not sys.stdout.isatty(): + raise RuntimeError( + "Cannot obtain password and" + " terminal is not a TTY (can't ask); " + " please specify a login name or use environment" + " TCF_PASSWORD[_]") + password = getpass.getpass(f"Password for {user} (on all servers): ") + return user, password + + + +def _credentials_get(domain: str, aka: str, user: str, password: str, + cli_args: argparse.Namespace): + # server specific + user_env_aka = os.environ.get("TCF_USER_" + aka, None) + password_env_aka = os.environ.get("TCF_PASSWORD_" + aka, None) + + # override with server specific from envrionment + if user_env_aka: + user = user_env_aka + if password_env_aka: + password = password_env_aka + # we don't override from the commandline, since we did it in + # _credentials_get_global() + + if not user: + if cli_args.quiet: + raise RuntimeError( + "Cannot obtain login name and" + " -q was given (can't ask); " + " please specify a login name or use environment" + " TCF_USER[_]") + if not sys.stdout.isatty(): + raise RuntimeError( + "Cannot obtain login name and" + " terminal is not a TTY (can't ask); " + " please specify a login name or use environment" + " TCF_USER[_]") user = input('Login for %s [%s]: ' \ % (domain, getpass.getuser())) if user == "": # default to LOGIN name @@ -124,14 +169,16 @@ def _cmdline_login(cli_args: argparse.Namespace): logged = False servers = tcfl.server_c.servers # we only ask on the terminal HERE! + user, password = _credentials_get_global(cli_args) credentials = {} for server_name, server in servers.items(): credentials[server.aka] = \ - _credentials_get(server.url, server.aka, cli_args) + _credentials_get(server.url, server.aka, user, password, cli_args) r = tcfl.servers.run_fn_on_each_server( servers, _login, credentials, - serialize = cli_args.serialize, traces = cli_args.traces) + parallelization_factor = cli_args.parallelization_factor, + traces = cli_args.traces) # r now is a dict keyed by server_name of tuples usernames, # exception logged_count = 0 @@ -170,7 +217,8 @@ def _cmdline_logout(cli_args: argparse.Namespace): r = tcfl.servers.run_fn_on_each_server( servers, _logout, cli_args, - serialize = cli_args.serialize, traces = cli_args.traces) + parallelization_factor = cli_args.parallelization_factor, + traces = cli_args.traces) # r now is a dict keyed by server_name of tuples usernames, # exception for server_name, ( _, e ) in r.items(): @@ -212,7 +260,8 @@ def _cmdline_role_gain(cli_args: argparse.Namespace): tcfl.servers.run_fn_on_each_server( servers, _user_role, cli_args, "gain", - serialize = cli_args.serialize, traces = cli_args.traces) + parallelization_factor = cli_args.parallelization_factor, + traces = cli_args.traces) def _cmdline_role_drop(cli_args: argparse.Namespace): @@ -223,7 +272,8 @@ def _cmdline_role_drop(cli_args: argparse.Namespace): tcfl.servers.run_fn_on_each_server( servers, _user_role, cli_args, "drop", - serialize = cli_args.serialize, traces = cli_args.traces) + parallelization_factor = cli_args.parallelization_factor, + traces = cli_args.traces) @@ -244,7 +294,8 @@ def _cmdline_user_list(cli_args: argparse.Namespace): result = tcfl.servers.run_fn_on_each_server( tcfl.server_c.servers, _user_list, cli_args, - serialize = cli_args.serialize, traces = cli_args.traces) + parallelization_factor = cli_args.parallelization_factor, + traces = cli_args.traces) # so now result is a dictionary of SERVER: ( DATA, EXCEPTION ), # where DATA is dictionaries of USERNAME: USERDATA @@ -353,9 +404,14 @@ def cmdline_setup(arg_subparser): "AKA is the short name of the server (defaults to the sole " "host name, without the domain). Find it with 'tcf servers'") ap.add_argument( - "--serialize", action = "store_true", default = False, + "--serialize", + action = "store_const", dest = "parellization_factor", default = 1, help = "Serialize (don't parallelize) the operation on" - " multiple servers") + " multiple targets") + ap.add_argument( + "--parallelization-factor", + action = "store", type = int, default = -4, + help = "(advanced) parallelization factor") ap.add_argument( "username", nargs = '?', metavar = "USERNAME", action = "store", default = None, @@ -388,9 +444,14 @@ def cmdline_setup_advanced(arg_subparser): help = "ID of user whose role is to be dropped" " (optional, defaults to yourself)") ap.add_argument( - "--serialize", action = "store_true", default = False, + "--serialize", + action = "store_const", dest = "parellization_factor", default = 1, help = "Serialize (don't parallelize) the operation on" - " multiple servers") + " multiple targets") + ap.add_argument( + "--parallelization-factor", + action = "store", type = int, default = -4, + help = "(advanced) parallelization factor") ap.add_argument( "role", action = "store", help = "Role to gain") @@ -406,9 +467,14 @@ def cmdline_setup_advanced(arg_subparser): help = "ID of user whose role is to be dropped" " (optional, defaults to yourself)") ap.add_argument( - "--serialize", action = "store_true", default = False, + "--serialize", + action = "store_const", dest = "parellization_factor", default = 1, help = "Serialize (don't parallelize) the operation on" - " multiple servers") + " multiple targets") + ap.add_argument( + "--parallelization-factor", + action = "store", type = int, default = -4, + help = "(advanced) parallelization factor") ap.add_argument( "role", action = "store", help = "Role to drop") @@ -421,9 +487,14 @@ def cmdline_setup_advanced(arg_subparser): "admin role privilege to list users others than your own)") tcfl.ui_cli.args_verbosity_add(ap) ap.add_argument( - "--serialize", action = "store_true", default = False, + "--serialize", + action = "store_const", dest = "parellization_factor", default = 1, help = "Serialize (don't parallelize) the operation on" " multiple targets") + ap.add_argument( + "--parallelization-factor", + action = "store", type = int, default = -4, + help = "(advanced) parallelization factor") ap.add_argument("userid", action = "store", default = None, nargs = "*", help = "Users to list (default all)") diff --git a/tcfl/util.py b/tcfl/util.py deleted file mode 100644 index 0dc48607..00000000 --- a/tcfl/util.py +++ /dev/null @@ -1,61 +0,0 @@ -#! /usr/bin/python3 -# -# Copyright (c) 2017 Intel Corporation -# -# SPDX-License-Identifier: Apache-2.0 -# - -import logging - -import commonl -from . import tcfl - - -def healthcheck_power(rtb, rt): - print("Powering off") - rtb.rest_tb_target_power_off(rt) - print("Powered off") - - print("Querying power status") - power = rtb.rest_tb_target_power_get(rt) - if power != 0: - msg = "Power should be 0, reported %d" % power - raise Exception(msg) - print("Power is reported correctly as %d" % power) - - print("Powering on") - rtb.rest_tb_target_power_on(rt) - print("Powered on") - - print("Querying power status") - power = rtb.rest_tb_target_power_get(rt) - if power == 0: - msg = "Power should be !0, reported %d" % power - raise Exception(msg) - print("Power is reported correctly as %d" % power) - - print("power test passed") - - -def healthcheck(args): - rtb, rt = tcfl.ttb_client._rest_target_find_by_id(args.target) - - print("Acquiring") - rtb.rest_tb_target_acquire(rt) - print("Acquired") - try: - healthcheck_power(rtb, rt) - finally: - print("Releasing") - rtb.rest_tb_target_release(rt) - print("Released") - print("%s: healthcheck completed" % rt['id']) - -def argp_setup(arg_subparsers): - ap = arg_subparsers.add_parser("healthcheck", - help = "List testcases") - commonl.cmdline_log_options(ap) - ap.set_defaults(level = logging.ERROR) - ap.add_argument("target", metavar = "TARGET", action = "store", - default = None, help = "Target's name or URL") - ap.set_defaults(func = healthcheck)