diff --git a/deployment/azure/azureresourcemanager/agents_only/cyperf_agents_only_existing_vnet.json b/deployment/azure/azureresourcemanager/agents_only/cyperf_agents_only_existing_vnet.json index 7eda0f98..ec5321ee 100644 --- a/deployment/azure/azureresourcemanager/agents_only/cyperf_agents_only_existing_vnet.json +++ b/deployment/azure/azureresourcemanager/agents_only/cyperf_agents_only_existing_vnet.json @@ -277,6 +277,9 @@ "skunames" : { "0.6.0": { + "skuname": "keysight-cyperf-agent-60" + }, + "0.5.0": { "skuname": "keysight-cyperf-agent-50" }, "0.4.0": { diff --git a/deployment/azure/azureresourcemanager/agents_only/cyperf_agents_only_new_vnet.json b/deployment/azure/azureresourcemanager/agents_only/cyperf_agents_only_new_vnet.json index bfa5ab1a..f3918817 100644 --- a/deployment/azure/azureresourcemanager/agents_only/cyperf_agents_only_new_vnet.json +++ b/deployment/azure/azureresourcemanager/agents_only/cyperf_agents_only_new_vnet.json @@ -288,6 +288,9 @@ "skunames" : { "0.6.0": { + "skuname": "keysight-cyperf-agent-60" + }, + "0.5.0": { "skuname": "keysight-cyperf-agent-50" }, "0.4.0": { diff --git a/deployment/azure/azureresourcemanager/controller_and_agent_pair/cyperf_controller_and_agent_pair_existing_vnet.json b/deployment/azure/azureresourcemanager/controller_and_agent_pair/cyperf_controller_and_agent_pair_existing_vnet.json index 7c3c400c..b296544f 100644 --- a/deployment/azure/azureresourcemanager/controller_and_agent_pair/cyperf_controller_and_agent_pair_existing_vnet.json +++ b/deployment/azure/azureresourcemanager/controller_and_agent_pair/cyperf_controller_and_agent_pair_existing_vnet.json @@ -266,6 +266,10 @@ "skunames" : { "0.6.0": { + "skunameagent": "keysight-cyperf-agent-60", + "skunamecontroller": "keysight-cyperf-controller-60" + }, + "0.5.0": { "skunameagent": "keysight-cyperf-agent-50", "skunamecontroller": "keysight-cyperf-controller-50" }, diff --git a/deployment/azure/azureresourcemanager/controller_and_agent_pair/cyperf_controller_and_agent_pair_new_vnet.json b/deployment/azure/azureresourcemanager/controller_and_agent_pair/cyperf_controller_and_agent_pair_new_vnet.json index 8ce10892..436bc48a 100644 --- a/deployment/azure/azureresourcemanager/controller_and_agent_pair/cyperf_controller_and_agent_pair_new_vnet.json +++ b/deployment/azure/azureresourcemanager/controller_and_agent_pair/cyperf_controller_and_agent_pair_new_vnet.json @@ -280,6 +280,10 @@ "skunames" : { "0.6.0": { + "skunameagent": "keysight-cyperf-agent-60", + "skunamecontroller": "keysight-cyperf-controller-60" + }, + "0.5.0": { "skunameagent": "keysight-cyperf-agent-50", "skunamecontroller": "keysight-cyperf-controller-50" }, diff --git a/deployment/azure/azureresourcemanager/controller_proxy_and_agent_pair/cyperf_controller_proxy_and_agent_pair_existing_vnet.json b/deployment/azure/azureresourcemanager/controller_proxy_and_agent_pair/cyperf_controller_proxy_and_agent_pair_existing_vnet.json index f6d3bbe2..2a69c550 100644 --- a/deployment/azure/azureresourcemanager/controller_proxy_and_agent_pair/cyperf_controller_proxy_and_agent_pair_existing_vnet.json +++ b/deployment/azure/azureresourcemanager/controller_proxy_and_agent_pair/cyperf_controller_proxy_and_agent_pair_existing_vnet.json @@ -267,6 +267,10 @@ "skunames" : { "0.6.0": { + "skunameagent": "keysight-cyperf-agent-60", + "skunamecontrollerproxy": "keysight-cyperf-controllerproxy-60" + }, + "0.5.0": { "skunameagent": "keysight-cyperf-agent-50", "skunamecontrollerproxy": "keysight-cyperf-controllerproxy-50" }, diff --git a/deployment/azure/azureresourcemanager/controller_proxy_and_agent_pair/cyperf_controller_proxy_and_agent_pair_new_vnet.json b/deployment/azure/azureresourcemanager/controller_proxy_and_agent_pair/cyperf_controller_proxy_and_agent_pair_new_vnet.json index 0a066a96..64af3b81 100644 --- a/deployment/azure/azureresourcemanager/controller_proxy_and_agent_pair/cyperf_controller_proxy_and_agent_pair_new_vnet.json +++ b/deployment/azure/azureresourcemanager/controller_proxy_and_agent_pair/cyperf_controller_proxy_and_agent_pair_new_vnet.json @@ -279,6 +279,10 @@ "skunames" : { "0.6.0": { + "skunameagent": "keysight-cyperf-agent-60", + "skunamecontrollerproxy": "keysight-cyperf-controllerproxy-60" + }, + "0.5.0": { "skunameagent": "keysight-cyperf-agent-50", "skunamecontrollerproxy": "keysight-cyperf-controllerproxy-50" }, diff --git a/utils/RFC 6349/1_RTT.zip b/utils/RFC 6349/1_RTT.zip new file mode 100644 index 00000000..76f091e5 Binary files /dev/null and b/utils/RFC 6349/1_RTT.zip differ diff --git a/utils/RFC 6349/2_Bottleneck_bandwidth.zip b/utils/RFC 6349/2_Bottleneck_bandwidth.zip new file mode 100644 index 00000000..5e6edf4f Binary files /dev/null and b/utils/RFC 6349/2_Bottleneck_bandwidth.zip differ diff --git a/utils/RFC 6349/3_Upstream_tput.zip b/utils/RFC 6349/3_Upstream_tput.zip new file mode 100644 index 00000000..ff342870 Binary files /dev/null and b/utils/RFC 6349/3_Upstream_tput.zip differ diff --git a/utils/RFC 6349/4_Downstream_tput.zip b/utils/RFC 6349/4_Downstream_tput.zip new file mode 100644 index 00000000..ba68a276 Binary files /dev/null and b/utils/RFC 6349/4_Downstream_tput.zip differ diff --git a/utils/RFC 6349/5_Bidirectional_tput.zip b/utils/RFC 6349/5_Bidirectional_tput.zip new file mode 100644 index 00000000..069c0027 Binary files /dev/null and b/utils/RFC 6349/5_Bidirectional_tput.zip differ diff --git a/utils/RFC 6349/6_Bidirectional_tput_user_constraint.zip b/utils/RFC 6349/6_Bidirectional_tput_user_constraint.zip new file mode 100644 index 00000000..a6c68029 Binary files /dev/null and b/utils/RFC 6349/6_Bidirectional_tput_user_constraint.zip differ diff --git a/utils/RFC 6349/README.md b/utils/RFC 6349/README.md new file mode 100644 index 00000000..16d05be4 --- /dev/null +++ b/utils/RFC 6349/README.md @@ -0,0 +1,28 @@ +# Introduction +Welcome to the GitHub repository for CyPerf, a Keysight product. CyPerf is an agent-based network application and security test solution, that meticulously recreates realistic workloads across diverse physical and cloud environments to deliver unparalleled insights into the end-user quality of experience (QoE), security posture, and performance bottlenecks of distributed networks. + +A licensed CyPerf product is compatible with multiple environments. Choose from the following supported platforms for accessing ready-to-use deployment templates. + +# RFC 6349 Test + +The RFC 6349 “Framework for TCP Throughput Testing” provides a methodology for testing sustained TCP Layer performance. + In addition to finding the TCP throughput at the optimal buffer size, RFC 6349 presents metrics that can be used to better understand the results. + RFC 6349 testing is done in 3 steps: + 1) Identify the Path Maximum Transmission Unit (MTU) + 2) Identify the Baseline Round-Trip Time (RTT) and the Bottleneck Bandwidth (BB) + 3) Perform the TCP Connection Throughput Tests + +# Steps to execute RFC 6349 test script + + 1) Install the latest version of Python 3. + 2) Clone the CyPerf github reprository. + 3) Modify “Cyperf/utils/RFC6349/test/params.yaml” such as IPAddress, "username", "password", "client_id", "ClientAgent", "ServerAgent", "ClientIP", "ClientSubnetMask", "ClientGatewayIP", "ServerIP", "ServerSubnetMask", "ServerGatewayIP" with a desire value. + 4) Run "python setup.py setup" to install all the dependent python library (all the necessary packages are updated in file requirements.txt). This will setup the environment needed to run the script. + 5) Navigate to RFC6349 folder and run the script:- python -m pytest test/rfc_test.py --logstatus testlog.log + 6) Once execution is complete, view testlog.log logfile in the extracted directory for the results. + 7) All the result cyperf reports will be avalable under folder start with "Result" followed by execution date, exmaple : Result2025_01_22_23_57_15 + + + + + diff --git a/utils/RFC 6349/Result2025_01_22_23_57_15/2_Bottleneck_bandwidth16384.pdf b/utils/RFC 6349/Result2025_01_22_23_57_15/2_Bottleneck_bandwidth16384.pdf new file mode 100644 index 00000000..74fa3a59 Binary files /dev/null and b/utils/RFC 6349/Result2025_01_22_23_57_15/2_Bottleneck_bandwidth16384.pdf differ diff --git a/utils/RFC 6349/Result2025_01_22_23_57_15/2_Bottleneck_bandwidth32768.pdf b/utils/RFC 6349/Result2025_01_22_23_57_15/2_Bottleneck_bandwidth32768.pdf new file mode 100644 index 00000000..c1ffb545 Binary files /dev/null and b/utils/RFC 6349/Result2025_01_22_23_57_15/2_Bottleneck_bandwidth32768.pdf differ diff --git a/utils/RFC 6349/Result2025_01_22_23_57_15/2_Bottleneck_bandwidth4096.pdf b/utils/RFC 6349/Result2025_01_22_23_57_15/2_Bottleneck_bandwidth4096.pdf new file mode 100644 index 00000000..290d85af Binary files /dev/null and b/utils/RFC 6349/Result2025_01_22_23_57_15/2_Bottleneck_bandwidth4096.pdf differ diff --git a/utils/RFC 6349/Result2025_01_22_23_57_15/2_Bottleneck_bandwidth49152.pdf b/utils/RFC 6349/Result2025_01_22_23_57_15/2_Bottleneck_bandwidth49152.pdf new file mode 100644 index 00000000..bf88ca68 Binary files /dev/null and b/utils/RFC 6349/Result2025_01_22_23_57_15/2_Bottleneck_bandwidth49152.pdf differ diff --git a/utils/RFC 6349/Result2025_01_22_23_57_15/2_Bottleneck_bandwidth65536.pdf b/utils/RFC 6349/Result2025_01_22_23_57_15/2_Bottleneck_bandwidth65536.pdf new file mode 100644 index 00000000..95724ecf Binary files /dev/null and b/utils/RFC 6349/Result2025_01_22_23_57_15/2_Bottleneck_bandwidth65536.pdf differ diff --git a/utils/RFC 6349/Result2025_01_22_23_57_15/2_Bottleneck_bandwidth8192.pdf b/utils/RFC 6349/Result2025_01_22_23_57_15/2_Bottleneck_bandwidth8192.pdf new file mode 100644 index 00000000..f0bce2c7 Binary files /dev/null and b/utils/RFC 6349/Result2025_01_22_23_57_15/2_Bottleneck_bandwidth8192.pdf differ diff --git a/utils/RFC 6349/Result2025_01_22_23_57_15/3_Upstream_tput16384.pdf b/utils/RFC 6349/Result2025_01_22_23_57_15/3_Upstream_tput16384.pdf new file mode 100644 index 00000000..c01a515d Binary files /dev/null and b/utils/RFC 6349/Result2025_01_22_23_57_15/3_Upstream_tput16384.pdf differ diff --git a/utils/RFC 6349/Result2025_01_22_23_57_15/3_Upstream_tput32768.pdf b/utils/RFC 6349/Result2025_01_22_23_57_15/3_Upstream_tput32768.pdf new file mode 100644 index 00000000..c0d3310c Binary files /dev/null and b/utils/RFC 6349/Result2025_01_22_23_57_15/3_Upstream_tput32768.pdf differ diff --git a/utils/RFC 6349/Result2025_01_22_23_57_15/3_Upstream_tput4096.pdf b/utils/RFC 6349/Result2025_01_22_23_57_15/3_Upstream_tput4096.pdf new file mode 100644 index 00000000..159f2c8c Binary files /dev/null and b/utils/RFC 6349/Result2025_01_22_23_57_15/3_Upstream_tput4096.pdf differ diff --git a/utils/RFC 6349/Result2025_01_22_23_57_15/3_Upstream_tput49152.pdf b/utils/RFC 6349/Result2025_01_22_23_57_15/3_Upstream_tput49152.pdf new file mode 100644 index 00000000..c0581db8 Binary files /dev/null and b/utils/RFC 6349/Result2025_01_22_23_57_15/3_Upstream_tput49152.pdf differ diff --git a/utils/RFC 6349/Result2025_01_22_23_57_15/3_Upstream_tput65536.pdf b/utils/RFC 6349/Result2025_01_22_23_57_15/3_Upstream_tput65536.pdf new file mode 100644 index 00000000..af9e52aa Binary files /dev/null and b/utils/RFC 6349/Result2025_01_22_23_57_15/3_Upstream_tput65536.pdf differ diff --git a/utils/RFC 6349/Result2025_01_22_23_57_15/3_Upstream_tput8192.pdf b/utils/RFC 6349/Result2025_01_22_23_57_15/3_Upstream_tput8192.pdf new file mode 100644 index 00000000..8ccfb7c4 Binary files /dev/null and b/utils/RFC 6349/Result2025_01_22_23_57_15/3_Upstream_tput8192.pdf differ diff --git a/utils/RFC 6349/Result2025_01_22_23_57_15/4_Downstream_tput16384.pdf b/utils/RFC 6349/Result2025_01_22_23_57_15/4_Downstream_tput16384.pdf new file mode 100644 index 00000000..866c8d9e Binary files /dev/null and b/utils/RFC 6349/Result2025_01_22_23_57_15/4_Downstream_tput16384.pdf differ diff --git a/utils/RFC 6349/Result2025_01_22_23_57_15/4_Downstream_tput32768.pdf b/utils/RFC 6349/Result2025_01_22_23_57_15/4_Downstream_tput32768.pdf new file mode 100644 index 00000000..55764e36 Binary files /dev/null and b/utils/RFC 6349/Result2025_01_22_23_57_15/4_Downstream_tput32768.pdf differ diff --git a/utils/RFC 6349/Result2025_01_22_23_57_15/4_Downstream_tput4096.pdf b/utils/RFC 6349/Result2025_01_22_23_57_15/4_Downstream_tput4096.pdf new file mode 100644 index 00000000..220414fb Binary files /dev/null and b/utils/RFC 6349/Result2025_01_22_23_57_15/4_Downstream_tput4096.pdf differ diff --git a/utils/RFC 6349/Result2025_01_22_23_57_15/4_Downstream_tput8192.pdf b/utils/RFC 6349/Result2025_01_22_23_57_15/4_Downstream_tput8192.pdf new file mode 100644 index 00000000..d7a945a0 Binary files /dev/null and b/utils/RFC 6349/Result2025_01_22_23_57_15/4_Downstream_tput8192.pdf differ diff --git a/utils/RFC 6349/conftest.py b/utils/RFC 6349/conftest.py new file mode 100644 index 00000000..10093e71 --- /dev/null +++ b/utils/RFC 6349/conftest.py @@ -0,0 +1,30 @@ +import re, logging +import pytest +import json +import os + +def pytest_exception_interact(node, call, report): + # end all pytests on first exception that is encountered + pytest.exit(call.excinfo.traceback[0]) + +def pytest_addoption(parser): + # called before running tests to register command line options for pytest + parser.addoption("--logstatus", action="store", default=None) + parser.addoption("--paramlist", action="store", default=None) + parser.addoption("--log", action="store", default='final_result.log') + +@pytest.fixture(scope='session') +def logger(request): + log_file = request.config.getvalue("--logstatus") + if log_file is None: + raise Exception("logstatus is a mandatory command line") + logging + log = open(log_file, 'w+') + return log_file + +@pytest.fixture(scope='session') +def logger_report(request): + final_log = request.config.getvalue("--log") + logging + log1 = open(final_log, 'w+') + return final_log diff --git a/utils/RFC 6349/final_result.log b/utils/RFC 6349/final_result.log new file mode 100644 index 00000000..e69de29b diff --git a/utils/RFC 6349/requirements.txt b/utils/RFC 6349/requirements.txt new file mode 100644 index 00000000..ddc2607f --- /dev/null +++ b/utils/RFC 6349/requirements.txt @@ -0,0 +1,9 @@ +--prefer-binary +pytest +paramiko +urllib3 +requests +simplejson +pandas +tabulate +Pyyaml \ No newline at end of file diff --git a/utils/RFC 6349/setup.py b/utils/RFC 6349/setup.py new file mode 100644 index 00000000..f27f9b7e --- /dev/null +++ b/utils/RFC 6349/setup.py @@ -0,0 +1,79 @@ +from platform import platform +from pathlib import Path +import sys +import os +import subprocess + + +def setup(): + run( + [ + py() + " -m pip install -r requirements.txt", + ] + ) + + +def py(): + """ + Returns path to python executable to be used. + """ + BASE_DIR = BASE_DIR = Path(__file__).resolve().parent + py.path = os.path.join( + os.path.join(BASE_DIR.parent), + ".env", + "bin", + "python.exe" if "win" in sys.platform else "python" + ) + if not os.path.exists(py.path): + py.path = sys.executable + + # since some paths may contain spaces + py.path = '"' + py.path + '"' + print(py.path) + return py.path + + +def flush_output(fd, filename): + """ + Flush the log file and print to console + """ + if fd is None: + return + fd.flush() + fd.seek(0) + ret = fd.read() + print(ret) + fd.close() + os.remove(filename) + return ret + + +def run(commands, capture_output=False): + """ + Executes a list of commands in a native shell and raises exception upon + failure. + """ + fd = None + logfile = "log.txt" + if capture_output: + fd = open(logfile, "w+") + try: + for cmd in commands: + print(">>>> " + cmd + " <<<<") + if sys.platform != "win32": + cmd = cmd.encode("utf-8", errors="ignore") + subprocess.check_call(cmd, shell=True, stdout=fd) + return flush_output(fd, logfile) + except Exception: + flush_output(fd, logfile) + sys.exit(1) + +def main(): + if len(sys.argv) >= 2: + globals()[sys.argv[1]](*sys.argv[2:]) + else: + print("usage: python do.py [args]") + + +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/utils/RFC 6349/test/__pycache__/rfc_test.cpython-310-pytest-8.3.4.pyc b/utils/RFC 6349/test/__pycache__/rfc_test.cpython-310-pytest-8.3.4.pyc new file mode 100644 index 00000000..dad7eff8 Binary files /dev/null and b/utils/RFC 6349/test/__pycache__/rfc_test.cpython-310-pytest-8.3.4.pyc differ diff --git a/utils/RFC 6349/test/__pycache__/rfc_test.cpython-38-pytest-7.4.2.pyc b/utils/RFC 6349/test/__pycache__/rfc_test.cpython-38-pytest-7.4.2.pyc new file mode 100644 index 00000000..63e3ae1f Binary files /dev/null and b/utils/RFC 6349/test/__pycache__/rfc_test.cpython-38-pytest-7.4.2.pyc differ diff --git a/utils/RFC 6349/test/__pycache__/rfc_test.cpython-39-pytest-6.2.4.pyc b/utils/RFC 6349/test/__pycache__/rfc_test.cpython-39-pytest-6.2.4.pyc new file mode 100644 index 00000000..ef3abc0e Binary files /dev/null and b/utils/RFC 6349/test/__pycache__/rfc_test.cpython-39-pytest-6.2.4.pyc differ diff --git a/utils/RFC 6349/test/__pycache__/rfc_test.cpython-39-pytest-8.2.2.pyc b/utils/RFC 6349/test/__pycache__/rfc_test.cpython-39-pytest-8.2.2.pyc new file mode 100644 index 00000000..1f4e7827 Binary files /dev/null and b/utils/RFC 6349/test/__pycache__/rfc_test.cpython-39-pytest-8.2.2.pyc differ diff --git a/utils/RFC 6349/test/__pycache__/rfc_test.cpython-39-pytest-8.3.2.pyc b/utils/RFC 6349/test/__pycache__/rfc_test.cpython-39-pytest-8.3.2.pyc new file mode 100644 index 00000000..113dd4c7 Binary files /dev/null and b/utils/RFC 6349/test/__pycache__/rfc_test.cpython-39-pytest-8.3.2.pyc differ diff --git a/utils/RFC 6349/test/params.yaml b/utils/RFC 6349/test/params.yaml new file mode 100644 index 00000000..8bb7ccf0 --- /dev/null +++ b/utils/RFC 6349/test/params.yaml @@ -0,0 +1,15 @@ +description: + #Paramaters for CyPerf RFC 6349 configs. +config: + "IPAddress" : "10.39.46.181" #CyPerf App server IP + "username" : "admin" + "password" : "CyPerf&Keysight#1" + "client_id" : "clt-wap" + "ClientAgent" : "10.39.47.53" #10.39.47.254 + "ServerAgent" : "10.39.47.97" + "ClientIP" : "10.0.0.30" + "ClientSubnetMask" : 16 + "ClientGatewayIP" : "0.0.0.0" + "ServerIP" : "10.0.0.40" + "ServerSubnetMask" : 16 + "ServerGatewayIP" : "0.0.0.0" \ No newline at end of file diff --git a/utils/RFC 6349/test/rfc_test.py b/utils/RFC 6349/test/rfc_test.py new file mode 100644 index 00000000..dc957c24 --- /dev/null +++ b/utils/RFC 6349/test/rfc_test.py @@ -0,0 +1,477 @@ +import re +import time, os, sys +from turtle import pd +# + +libpath = os.path.abspath(__file__+"/../../testsuite/rest_api_wrapper/lib") +sys.path.insert(0,libpath) +from REST_WRAPPER import create_new_config, create_traffic_profile, run_test, collect_stats +from util import * +import pytest +import paramiko +import yaml +import telnetlib +from paramiko.util import log_to_file +res = [] +result = True +stats_result = {} +cmtu = None +smtu = None +rtt = None +port_speed = None +msg = [] +#version 1.1 05-april-2023 + + + +def check_ipv4(logger, ip): + ipv4Regex = r'(\d{1,3}\.){3}\d{1,3}' + ipv4_match = re.search(ipv4Regex, ipaddr) + if ipv4_match: + return True + else: + return False +def ping_ssh(logger, payload_size, args, port): + ssh_cl = paramiko.client.SSHClient() + ssh_cl.set_missing_host_key_policy(paramiko.AutoAddPolicy()) + ssh_cl.connect(hostname=args['host'], username="cyperf", password="cyperf", allow_agent=False, look_for_keys=False) + channel = None + execute = 1 + ipv4Regex = r'(\d{1,3}\.){3}\d{1,3}' + ipv4_match = re.search(ipv4Regex, str(args['target'])) + ping_data = "ping" + " " + str(args['target']) + " " + "-I " + str(port) + " -c 1 -s" + " " + str(payload_size) + " " + "-M do" + if ipv4_match: + ping_data = "ping" + " " + str(args['target']) + " " + "-I " + str(port) + " -c 1 -s" + " " + str(payload_size) + " " + "-M do" + else: + ping_data = "ping6" + " " + str(args['target']) + " " + "-I " + str(port) + " -c 1 -s" + " " + str(payload_size) + " " + "-M do" + stdin, stdout, stderr = ssh_cl.exec_command(ping_data, get_pty=True) + print("ping data") + #print(stdout.read()) + if "ttl" in str(stdout.read()): + print("True") + return True + else: + print("False") + return False + + + + + + +def ping_works(logger, payload_size, args, port_no): + # we capture the output to prevent ping + # from printing to terminal + tn = telnetlib.Telnet(args['host'], 8021) + tn.read_until(b"login: ") + tn.write(bytes(args['port'], 'ascii') + b"\r\n") + ipv4Regex = r'(\d{1,3}\.){3}\d{1,3}' + ipv4_match = re.search(ipv4Regex, str(args['target'])) + ping_data = "ping" + " " + str(args['target']) + " " + "-I ixint1 -c 1 -s" + " " + str(payload_size) + " " + "-M do" + if ipv4_match: + ping_data = "ping" + " " + str(args['target']) + " " + "-I ixint1 -c 1 -s" + " " + str(payload_size) + " " + "-M do" + else: + ping_data = "ping6" + " " + str(args['target']) + " " + "-I ixint1 -c 1 -s" + " " + str(payload_size) + " " + "-M do" + #sys.stdout.write('%s: ' % ping_data) + if b'#' in tn.read_until(b'#', timeout=5): + tn.write(bytes(ping_data, 'ascii') + b"\r\n") + if b'ttl' in tn.read_until(b'ttl', timeout=5): + #sys.stdout.write('%s: ' % "success") + #sys.stdout.write('%s: ' % tn.read_until(b'ttl', timeout=5)) + return True + else: + #sys.stdout.write('%s: '% tn.read_until(b'PING', timeout=5)) + return False + +def assign_ip(logger, host, ip, mask): + # + ssh_cl = paramiko.client.SSHClient() + ssh_cl.set_missing_host_key_policy(paramiko.AutoAddPolicy()) + ssh_cl.connect(hostname=host, username="cyperf", password="cyperf", allow_agent=False, look_for_keys=False) + channel = None + execute = 1 + stdin, stdout, stderr = ssh_cl.exec_command('cyperfagent interface test show', get_pty=True) + for i in stdout: + if "Currently" in i: + interface = re.match(r"Currently configured Test Interface is\:\s+([a-z|A-Z|0-9]+)", i) + test_interface = interface.group(1) + command = "sudo ifconfig " + test_interface + " " + ip + "/" + str(mask) + stdin, stdout, stderr = ssh_cl.exec_command(command, get_pty=True) + stdin.write("cyperf" + '\n') + if stdout.channel.recv_exit_status() != 0: + print("Error while setting IP") + stdin.flush() + return (test_interface) + #set_logger(logger, level="INFO", message= "Successfull IP set:") + + #s.sendline ('cyperfagent interface test show') + +def get_port_speed(logger, host, interface, mtu): + + ssh_cl = paramiko.client.SSHClient() + ssh_cl.set_missing_host_key_policy(paramiko.AutoAddPolicy()) + ssh_cl.connect(hostname=host, username="cyperf", password="cyperf", allow_agent=False, look_for_keys=False) + channel = None + execute = 1 + + data = "cat /sys/class/net/" + interface + "/speed" + stdin, stdout, stderr = ssh_cl.exec_command(data, get_pty=True) + speed = stdout.read() + port_speed = speed.decode().rstrip() + + +def assign_mtu(logger, host, interface, mtu): + + ssh_cl = paramiko.client.SSHClient() + ssh_cl.set_missing_host_key_policy(paramiko.AutoAddPolicy()) + ssh_cl.connect(hostname=host, username="cyperf", password="cyperf", allow_agent=False, look_for_keys=False) + channel = None + execute = 1 + + data = "cat /sys/class/net/" + interface + "/speed" + stdin, stdout, stderr = ssh_cl.exec_command(data, get_pty=True) + speed = stdout.read() + + port_speed = speed.decode().rstrip() + + data = "sudo ip link set " + str(interface) + " mtu 9000" + stdin, stdout, stderr = ssh_cl.exec_command(data, get_pty=True) + stdin.write("cyperf" + '\n') + if stdout.channel.recv_exit_status() != 0: + set_logger(logger, level="INFO", message= "Test Failed: while setting jumbo mtu = 9000") + pytest_assert(logger, result == True, "Test Failed while setting jumbo mtu = 9000") + print("Error while setting IP") + stdin.flush() + return port_speed + +def telnet(logger, host, ip, port): + lo = 0 # MTUs lower or equal do work + hi = 9000 # MTUs greater or equal don't work + #print('>>> PMTU to %s in range [%d, %d)' % (args.target, lo, hi)) + + + arg = {'host':host, 'target': ip, 'lo':0, 'hi': 9000} + while lo + 1 < hi: + mid = (lo + hi) // 2 + sys.stdout.write('%d: ' % mid) + sys.stdout.flush() + for i in range(2): + if ping_ssh(logger, mid, arg, port): + lo = mid + break + else: + sys.stdout.write('* ') + sys.stdout.flush() + time.sleep(0.2) + else: + hi = mid + print('') + + # header_size = 28 if args.ipv4 else 48 + header_size = 28 + lo = lo + 28 + print('>>> optimal MTU to %s: %d = %d' % ( + arg['target'], lo, lo + )) + return (lo) + + +def parse_yaml(): + config_params = {} + yaml_to_import = os.path.dirname(__file__) + "/params.yaml" + testinfo = yaml.load(open(yaml_to_import), Loader=yaml.FullLoader) + for key, values in testinfo.items(): + if key != "description": + config_params[key] = values + return config_params['config'] + +def report(logger, res, BB, rtt, cmtu, smtu,stats_result, logger_report): + result1 = parse_status(res) + set_logger(logger, level="INFO", message= "\nresult\n " + str(res) + "\n") + if "fail" in result1.keys(): + ix.disconnect() + set_logger(logger, level="INFO", message= str(res)) + set_logger(logger, level="INFO", message= "Test Failed: "+ str(result1['fail'])) + pytest_assert(logger, result == True, "Test Failed") + final_report(logger,logger_report, False) + pytest.fail("Test Failed: "+str(result1['fail'])) + elif "pass" in result1.keys(): + ix.disconnect() + set_logger(logger, level="INFO", message=str(stats_result)) + set_logger(logger, level="INFO", message="\nConsolidated Result\n****************\nPath MTU of Client: " + str(cmtu) + "\nPath Mtu of server: " + str(smtu) + "\nRTT : " + str(rtt) + " us" + "\nTheoretical Bottleneck Bandwidth: " + str(BB)) + consolidate_output(logger, stats_result) + final_report(logger,logger_report, True) + #consolidate_result(logger, stats_result) + set_logger(logger, level="INFO", message="Test Passed") + pytest_assert(logger, result == True, "Test Passed") + +def final_report(logger, logger_report, flag): + endtime = datetime.datetime.now().strftime("%Y%m%d-%H:%M:%S") + set_logger(logger_report, message="\nFinal Report\n*******************\nStart Time : " + str(starttime)+"\nEnd Time : " + str(endtime)) + txt = '' + for status in msg : + txt = txt + "\n" + status + set_logger(logger_report, level="INFO", message="\nConsolidated Result\n****************"+ txt) + if flag: + consolidate_output(logger_report, stats_result) + set_logger(logger_report, level="INFO", message= "Test Passed") + else: + set_logger(logger_report, level="INFO", message= "Test Failed") + +def update_cmd_args(ix, cmd_dict, cmd_arg, value): + for cmd in cmd_dict['cmd_list']: + if cmd_arg == 'destination': + res.append(ix.emulation_http(mode='modify_command', role='Client', network_name=cmd_dict['network_name'], + agent_name=cmd_dict['agent_name'], destination = value, command_name = cmd)) + +#cyperf methods + + +def wait_for_test_stop(): + test_is_running = True + while test_is_running: + status = conn.get_test_status() + if status['status'].lower() == "stopped": + test_is_running = False + return + + + + +def modify_ip_config(logger, ip_start, GwStart, NetMask, network_segment): + + conn.set_ip_range_automatic_ip(ip_auto=False, network_segment=1, ) + conn.set_ip_range_ip_start(ip_start=ip_start, network_segment=network_segment) + conn.set_ip_range_gateway(gateway = GwStart, network_segment=network_segment) + conn.set_ip_range_netmask(netmask = NetMask, network_segment=network_segment) + + +def get_rtt(logger, data): + try: + # + #conn.load_config(rtt_config) + # + modify_ip_config(logger, ip_start=data['ClientIP'], GwStart = data['ClientGatewayIP'], NetMask = data['ClientSubnetMask'], network_segment=1) + modify_ip_config(logger, ip_start=data['ServerIP'], GwStart = data['ServerGatewayIP'], NetMask = data['ServerSubnetMask'], network_segment=2) + server_intf = assign_ip(logger, data["ServerAgent"], data['ServerIP'], data['ServerSubnetMask'] ) + client_intf = assign_ip(logger, data["ClientAgent"], data['ClientIP'], data['ClientSubnetMask']) + global cmtu + global smtu + global port_speed + port_speed = assign_mtu(logger, data["ClientAgent"], client_intf , 9000) + port_speed = assign_mtu(logger, data["ServerAgent"], server_intf , 9000) + cmtu = telnet(logger, data["ClientAgent"], data['ServerIP'], client_intf) + smtu = telnet(logger, data["ServerAgent"], data['ClientIP'], server_intf) + set_logger(logger, level="INFO", message= "Test 1 completed.") + set_logger(logger, level="INFO", message= "Result - Path Mtu of Client is " + str(cmtu)) + set_logger(logger, level="INFO", message= "Result - Path Mtu of Server is " + str(smtu)) + conn.set_ip_range_mss(mss = (int(cmtu)-28), network_segment=1) + conn.set_ip_range_mss(mss = (int(smtu)-28), network_segment=2) + conn.assign_agents() + st = conn.start_test() + #wait_for_test_stop() + conn.wait_test_finished() + time.sleep(10) + x = conn.get_stats_values("client-latency") + # + rtt_stats = [] + for stat in x['snapshots']: + if stat['values'][0][1] != 'null': + rtt_stats.append(stat['values'][0][1]) + if rtt_stats: + rtt = str(average(rtt_stats)) + avg1 = rtt.split(".") + rtt = avg1[0]+"."+avg1[1][:2] + print("rtt : %s", rtt) + set_logger(logger, level="INFO", message= "Test 2 completed. ") + set_logger(logger, level="INFO", message= "RTT is " + str(rtt) +" us") + return (rtt, port_speed) + else: + print("error in finding rtt") + except Exception as err: + raise Exception(err) + + +def get_theory_bottleneck(logger, port_speed): + + MTU = int(smtu) + MSS = MTU -40 + MEF = int(1038) + BB = (MSS/MEF) * int(port_speed) + BB = BB*1024 + BB = str(BB) + avg1 = BB.split(".") + BB = avg1[0]+"."+avg1[1][:2] + BB = str(BB) + " Mbps" + + set_logger(logger, level="INFO", message= "Theoretical Bottleneck Bandwidth is " + str(BB)) + return (BB) + + +def max_throughput(logger, logger_report, name, buffer_size, t_num): + band_tput = {} + for buffer in buffer_size: + print ("Running Test for %s with buffer size %s" % (name, buffer)) + set_logger(logger, level="INFO", message= "Running Test for" + str(name) + " with buffer size " + str(buffer)) + band_tput[buffer]= {} + # + conn.set_client_http_profile({"ConnectionsMaxTransactions":t_num}) + conn.set_server_http_profile({"ConnectionsMaxTransactions":t_num}) + conn.set_client_recieve_buffer_size_traffic_profile(buffer) + conn.set_client_transmit_buffer_size_traffic_profile(buffer) + conn.set_server_recieve_buffer_size_traffic_profile(buffer) + conn.set_server_transmit_buffer_size_traffic_profile(buffer) + conn.assign_agents() + conn.start_test() + test_duration = 60 + real_time_stats = [] + start_time = time.time() + + conn.wait_test_finished() + time.sleep(10) + x = conn.get_stats_values("client-throughput") + rtt_stats = [] + for stat in x['snapshots']: + if stat['values'][0][1] != 'null': + num = float(stat['values'][0][1])/1000 + num = num/1000 + rtt_stats.append(int(round(num))) + + if rtt_stats: + tput_average = round(average(rtt_stats)) + print("tput_average : %s mpbs", str(tput_average)) + band_tput[buffer][tput_average] = x['snapshots'] + + file_path = os.path.abspath(__file__+"/../../") + file_path = file_path + "\\" + result_path + "\\" + name + str(buffer) + ".pdf" + conn.get_pdf_report(file_path) + #conn.get_capture_files(file_path) + #print ("Completed Test for %s with buffer size %s, Report file %s/%s" % (name, buffer)) + set_logger(logger, level="INFO", message= "Completed Test for" + str(name) + " with buffer size " + str(buffer)) + # + max_stats = maxtput(band_tput) + return (max_stats) + + # while time.time()-start_time < test_duration: + # real_time_stats.append({}) + # for stat in conn.get_available_stats_name(): + # + # real_time_stats[-1][stat] = conn.get_stats_values(statName=stat) + # print(real_time_stats) + # print('Number of read in {} seconds is {}'.format(test_duration,len(real_time_stats))) + # rest.wait_test_finished() + # collect_stats("../test_results", "stats_during_runtime") + +def get_bottleneck(logger, data, logger_report, buffer_size, number, name, tcp= []): + try : + # + conn.load_config(test_name = name) + conn.set_traffic_profile_timeline(duration=120, objective_value = 100) + conn.set_ip_range_mss(mss = (int(cmtu)-28), network_segment=1) + conn.set_ip_range_mss(mss = (int(smtu)-28), network_segment=2) + modify_ip_config(logger, ip_start=data['ClientIP'], GwStart = data['ClientGatewayIP'], NetMask = data['ClientSubnetMask'], network_segment=1) + modify_ip_config(logger, ip_start=data['ServerIP'], GwStart = data['ServerGatewayIP'], NetMask = data['ServerSubnetMask'], network_segment=2) + if tcp: + for tcp_no in tcp: + print ("Running Test for TCP %s conn" % tcp) + set_logger(logger, level="INFO", message= "Running Test for " + str(tcp_no) + " TCP connections:") + tput = max_throughput(logger, logger_report, name,buffer_size, t_num=tcp_no) + buffer_size_new = list(tput.keys())[0] + avg = list(tput[buffer_size_new].keys())[0] + avg = int(avg)/1000 + key= name + "_" + "tcp" + "_" + str(tcp_no) + "_conn" + stats_result[key] = {buffer_size_new:avg} + set_logger(logger, level="INFO", message= "Max Throughput for: " + str(name) + "\nBuffer size: " + str(buffer_size_new) +"\nAchieve TPUT: "+str(avg) + " mpbs") + else: + tput = max_throughput(logger, logger_report, name,buffer_size, t_num=2) + buffer_size_new = list(tput.keys())[0] + avg = list(tput[buffer_size_new].keys())[0] + #avg = int(avg)/1000 + stats_result[name] = {buffer_size_new:avg} + set_logger(logger, level="INFO", message= "Max Throughput for: " + str(name) + "\nBuffer size: " + str(buffer_size_new) +"\nAchieve TPUT: "+str(avg) + " mpbs") + except Exception as err: + raise Exception(err) + +def test_script(logger, logger_report): + data = parse_yaml() + + + server = data['IPAddress'] + username = data['username'] + password = data['password'] + client_id = data['client_id'] + client_ip = data['ClientAgent'] + server_ip = data['ServerAgent'] + client_gateway_ip = data['ClientGatewayIP'] + server_gateway_ip = data['ServerGatewayIP'] + client_subnet_mask = data['ClientSubnetMask'] + server_subnet_mask = data['ServerSubnetMask'] + global conn + global result_path + set_logger(logger, level="INFO", message = "RUNNING RFC 6349 TEST WITH PARAMETERS") + set_logger(logger, level="INFO", message="CyPerf Server " + str(server) + " Client ID " + str(client_id)) + set_logger(logger, level="INFO", message="Client Agent " + str(client_ip) + " Server Agent " + str(server_ip)) + set_logger(logger, level="INFO", message="Client Ip " + str(data["ClientIP"]) + " Client gateway IP " + str(data["ClientGatewayIP"]) + "Client subnet mask " + str(data["ClientSubnetMask"]) ) + set_logger(logger, level="INFO", message="Server Ip " + str(data["ServerIP"]) + " Server gateway IP " + data['ServerGatewayIP'] + " Server Subnet Mask " + str(data['ServerSubnetMask'])) + + set_logger(logger, level="INFO", message="RFC 6349 based Throughput Testing \n ********************************** \nThe RFC 6349 “Framework for TCP Throughput Testing” provides a methodology for testing sustained TCP Layer performance. \n In addition to finding the TCP throughput at the optimal buffer size, RFC 6349 presents metrics that can be used to better understand the results.\n RFC 6349 testing is done in 3 steps:\n 1. Identify the Path Maximum Transmission Unit (MTU) \n 2. Identify the Baseline Round-Trip Time (RTT) and the Bottleneck Bandwidth (BB) \n 3. Perform the TCP Connection Throughput Tests \n ***********************************\n") + set_logger(logger, level="INFO", message="Test 1 - Determine the Path MTU between the client and the server\n -----------------------------------------------------------------\n") + rtt_name = "1_RTT" + rtt_config= os.path.abspath(__file__+"/../../" + rtt_name + ".zip") + + conn = create_new_config(server, username, password, client_id, rtt_config, rtt_name) + #rtt = "1_RTT" + bottleneck = "2_Bottleneck_bandwidth" + upstream = "3_Upstream_tput" + downstream = "4_Downstream_tput" + bidirectional = "5_Bidirectional_tput" + bidirectional_user = "6_Bidirectional_tput_user_constraint" + #rtt_config= os.path.abspath(__file__+"/../../" + rtt + ".zip") + bottleneck_config= os.path.abspath(__file__+"/../../" + bottleneck + ".zip") + upstream_config= os.path.abspath(__file__+"/../../" + upstream + ".zip") + downstream_config= os.path.abspath(__file__+"/../../" + downstream + ".zip") + bidirectional_config= os.path.abspath(__file__+"/../../" + bidirectional + ".zip") + bidirectional_user_config= os.path.abspath(__file__+"/../../" + bidirectional_user + ".zip") + conn.import_config(bottleneck_config) + conn.import_config(upstream_config) + conn.import_config(downstream_config) + # + conn.import_config(bidirectional_config) + time.sleep(5) + conn.import_config(bidirectional_user_config) + (rtt, speed) = get_rtt(logger, data) + + result_path = result_log() + set_logger(logger, level="INFO", message="Test 3 - Determine the Theoretical Bottleneck Bandwidth \n -----------------------------------------------------------------\n") + BB = get_theory_bottleneck(logger=logger, port_speed=speed) + set_logger(logger, level="INFO", message="Test 3 completed.\n Theoretical Bottleneck Bandwidth " + BB + "\n-----------------------------------------------------------------\n") + buffer_size = [4096, 8192, 16384, 32768, 49152, 65536] + set_logger(logger, level="INFO", message="Test 3 completed.\n -----------------------------------------------------------------\n") + set_logger(logger, level="INFO", message="Test 4 - Determine the Bottleneck Bandwidth of the network \n -----------------------------------------------------------------\n") + set_logger(logger, level="INFO", message= "Running Bottleneck test for rxf " + str(bottleneck)) + get_bottleneck(logger=logger, data = data, logger_report=logger_report, buffer_size= buffer_size, number=3, name=bottleneck) + set_logger(logger, level="INFO", message="Test 4 completed.\n -----------------------------------------------------------------\n") + set_logger(logger, level="INFO", message="Test 5 - TCP Connection Throughput tests \n -----------------------------------------------------------------\n") + set_logger(logger, level="INFO", message="Test 5.1 - Upstream TCP Throughput test \n -----------------------------------------------------------------\n") + set_logger(logger, level="INFO", message= "Running Upstream test for rxf " + str(upstream)) + + get_bottleneck(logger=logger, data = data, logger_report=logger_report, buffer_size= buffer_size, number=3, name=upstream) + set_logger(logger, level="INFO", message="Test 5.1 completed.\n -----------------------------------------------------------------\n") + set_logger(logger, level="INFO", message="Test 5.2 - Downstream TCP Throughput test \n -----------------------------------------------------------------\n") + set_logger(logger, level="INFO", message= "Running Downstream test for rxf " + str(downstream)) + get_bottleneck(logger=logger, data = data, logger_report=logger_report, buffer_size= buffer_size, number=3, name=downstream) + set_logger(logger, level="INFO", message="Test 5.2 completed.\n -----------------------------------------------------------------\n") + set_logger(logger, level="INFO", message="Test 5.3 - Bidirectional TCP Throughput test \n -----------------------------------------------------------------\n") + set_logger(logger, level="INFO", message= "Running Bidirectonal test for rxf " + str(bidirectional)) + get_bottleneck(logger=logger, data = data, logger_report=logger_report, buffer_size= buffer_size, number=3, name=bidirectional) + set_logger(logger, level="INFO", message="Test 5.3 completed.\n ----------------------------------------------------------------\n") + tcp_conn = [2, 4, 8, 16] + buffer_size = [4096, 16384, 49152] + set_logger(logger, level="INFO", message= "Test 5.4 - Multiple TCP Connection test ") + set_logger(logger, level="INFO", message= "Running Multi Connection test for rxf " + str(bidirectional_user)) + get_bottleneck(logger=logger, data = data, logger_report=logger_report, buffer_size= buffer_size, number=3, name=bidirectional_user, tcp=tcp_conn) + set_logger(logger, level="INFO", message="\nConsolidated Result\n****************\nPath MTU of Client: " + str(cmtu) + "\nPath Mtu of server: " + str(smtu) + "\n RTT : " + str(rtt) + " us" +"\nTheoretical Bottleneck Bandwidth: " + str(BB)) + + consolidate_result(logger, stats_result) + set_logger(logger, level="INFO", message="Test Passed") + pytest_assert(logger, result == True, "Test Passed") \ No newline at end of file diff --git a/utils/RFC 6349/testlogcyperf_pass_test.log b/utils/RFC 6349/testlogcyperf_pass_test.log new file mode 100644 index 00000000..e60f61ce --- /dev/null +++ b/utils/RFC 6349/testlogcyperf_pass_test.log @@ -0,0 +1,58 @@ +2025-01-23 11:58:44,348 INFO RUNNING RFC 6349 TEST WITH PARAMETERS + +2025-01-23 11:58:44,348 INFO CyPerf Server 10.39.46.181 Client ID clt-wap + +2025-01-23 11:58:44,353 INFO Client Agent 10.39.47.53 Server Agent 10.39.47.97 + +2025-01-23 11:58:44,353 INFO Client Ip 10.0.0.30 Client gateway IP 0.0.0.0Client subnet mask 16 + +2025-01-23 11:58:44,356 INFO Server Ip 10.0.0.40 Server gateway IP 0.0.0.0 Server Subnet Mask 16 + +2025-01-23 11:58:44,358 INFO RFC 6349 based Throughput Testing + ********************************** +The RFC 6349 Framework for TCP Throughput Testing provides a methodology for testing sustained TCP Layer performance. + In addition to finding the TCP throughput at the optimal buffer size, RFC 6349 presents metrics that can be used to better understand the results. + RFC 6349 testing is done in 3 steps: + 1. Identify the Path Maximum Transmission Unit (MTU) + 2. Identify the Baseline Round-Trip Time (RTT) and the Bottleneck Bandwidth (BB) + 3. Perform the TCP Connection Throughput Tests + *********************************** + + +2025-01-23 11:58:44,360 INFO Test 1 - Determine the Path MTU between the client and the server + ----------------------------------------------------------------- + + +2025-01-23 11:59:26,814 INFO Test 1 completed. + +2025-01-23 11:59:26,814 INFO Result - Path Mtu of Client is 9000 + +2025-01-23 11:59:26,814 INFO Result - Path Mtu of Server is 9000 + +2025-01-23 12:02:07,997 INFO Test 2 completed. + +2025-01-23 12:02:07,997 INFO RTT is 138.60 us + +2025-01-23 12:02:08,012 INFO Test 3 - Determine the Theoretical Bottleneck Bandwidth + ----------------------------------------------------------------- + + +2025-01-23 12:02:08,014 INFO Theoretical Bottleneck Bandwidth is 88391522.15 Mbps + +2025-01-23 12:02:08,016 INFO Test 3 completed. + Theoretical Bottleneck Bandwidth 88391522.15 Mbps +----------------------------------------------------------------- + + +2025-01-23 12:02:08,016 INFO Test 3 completed. + ----------------------------------------------------------------- + + +2025-01-23 12:02:08,019 INFO Test 4 - Determine the Bottleneck Bandwidth of the network + ----------------------------------------------------------------- + + +2025-01-23 12:02:08,022 INFO Running Bottleneck test for rxf 2_Bottleneck_bandwidth + +2025-01-23 12:02:10,983 INFO Running Test for2_Bottleneck_bandwidth with buffer size 4096 + diff --git a/utils/RFC 6349/testlogcyperf_pass_test1.log b/utils/RFC 6349/testlogcyperf_pass_test1.log new file mode 100644 index 00000000..5bfe0e2d --- /dev/null +++ b/utils/RFC 6349/testlogcyperf_pass_test1.log @@ -0,0 +1,25 @@ +2025-01-24 12:45:07,299 INFO RUNNING RFC 6349 TEST WITH PARAMETERS + +2025-01-24 12:45:07,299 INFO CyPerf Server 10.39.46.181 Client ID clt-wap + +2025-01-24 12:45:07,299 INFO Client Agent 10.39.47.53 Server Agent 10.39.47.97 + +2025-01-24 12:45:07,299 INFO Client Ip 10.0.0.30 Client gateway IP 0.0.0.0Client subnet mask 16 + +2025-01-24 12:45:07,299 INFO Server Ip 10.0.0.40 Server gateway IP 0.0.0.0 Server Subnet Mask 16 + +2025-01-24 12:45:07,299 INFO RFC 6349 based Throughput Testing + ********************************** +The RFC 6349 Framework for TCP Throughput Testing provides a methodology for testing sustained TCP Layer performance. + In addition to finding the TCP throughput at the optimal buffer size, RFC 6349 presents metrics that can be used to better understand the results. + RFC 6349 testing is done in 3 steps: + 1. Identify the Path Maximum Transmission Unit (MTU) + 2. Identify the Baseline Round-Trip Time (RTT) and the Bottleneck Bandwidth (BB) + 3. Perform the TCP Connection Throughput Tests + *********************************** + + +2025-01-24 12:45:07,299 INFO Test 1 - Determine the Path MTU between the client and the server + ----------------------------------------------------------------- + + diff --git a/utils/RFC 6349/testsuite/rest_api_wrapper/__pycache__/RESTasV3.cpython-39.pyc b/utils/RFC 6349/testsuite/rest_api_wrapper/__pycache__/RESTasV3.cpython-39.pyc new file mode 100644 index 00000000..be6c79cd Binary files /dev/null and b/utils/RFC 6349/testsuite/rest_api_wrapper/__pycache__/RESTasV3.cpython-39.pyc differ diff --git a/utils/RFC 6349/testsuite/rest_api_wrapper/lib/LICENSE b/utils/RFC 6349/testsuite/rest_api_wrapper/lib/LICENSE new file mode 100644 index 00000000..376e0ac1 --- /dev/null +++ b/utils/RFC 6349/testsuite/rest_api_wrapper/lib/LICENSE @@ -0,0 +1,5 @@ +Copyright Keysight Technologies 2021. + +IMPORTANT: If the Software includes one or more computer programs bearing a Keysight copyright notice and in source code format (“Source Files”), such Source Files are subject to the terms and conditions of the Keysight Software End-User License Agreement (“EULA”) www.Keysight.com/find/sweula and these Supplemental Terms. BY USING THE SOURCE FILES, YOU AGREE TO BE BOUND BY THE TERMS AND CONDITIONS OF THE EULA INCLUDING THESE SUPPLEMENTAL TERMS. IF YOU DO NOT AGREE TO THESE TERMS AND CONDITIONS, DO NOT COPY OR DISTRIBUTE THE SOURCE FILES. +1. Additional Rights and Limitations. Keysight grants you a limited, non-exclusive license, without a right to sub-license, to copy and modify the Source Files solely for use with Keysight products, or systems that contain at least one Keysight product. You own any such modifications and Keysight retains all right, title and interest in the underlying Source Files. All rights not expressly granted are reserved by Keysight. +2. General. Capitalized terms used in these Supplemental Terms and not otherwise defined herein shall have the meanings assigned to them in the EULA. To the extent that any of these Supplemental Terms conflict with terms in the EULA, these Supplemental Terms control solely with respect to the Source Files. diff --git a/utils/RFC 6349/testsuite/rest_api_wrapper/lib/REST_WRAPPER.py b/utils/RFC 6349/testsuite/rest_api_wrapper/lib/REST_WRAPPER.py new file mode 100644 index 00000000..762ee0c4 --- /dev/null +++ b/utils/RFC 6349/testsuite/rest_api_wrapper/lib/REST_WRAPPER.py @@ -0,0 +1,145 @@ +import os +import sys +import time +import datetime +#libpath = os.path.abspath(__file__+"/../../testsuite/rest_api_wrapper") +#sys.path.insert(0,libpath) +#sys.path.append("../") +#from ... import nib +from RESTasV3 import RESTasV3 +from Statistics import Statistics + +#mdw_address = sys.argv[1] + + + +def create_new_config(server, username, password, client_id, config_path=None, config_name=None): + """ + Creates an empty CyPerf config with network assigned (or Imports a custom config) + + config_path (str): path to the zip config file to be imported + """ + + global rest + rest = RESTasV3(ipAddress=server, username=username, password = password, client_id= client_id) + config = config_path if config_path else None + rest.setup(config, config_name) + return rest + + +def create_traffic_profile(apps, objective, objective_value, objective_unit, duration, ssl=None): + """ + Adds a traffic profile in the current test configuration + + apps (list): the application to be added to the traffic profile (i.e. HTTP App) + objective (str): the objective type of the traffic profile (i.e. Throughput, Simulated Users, CPS) + objective_value (int): the value of the configured objective + duration (int): the total time (sec.) for the objective to run + ssl (str): the SSL version + """ + + rest.add_traffic_profile() + for app in apps: + rest.add_application(app) + rest.set_primary_objective(objective) + rest.set_traffic_profile_timeline( + duration=duration, + objective_value=objective_value, + objective_unit=objective_unit + ) + if ssl: + rest.set_traffic_profile_client_tls(version=ssl) + rest.set_traffic_profile_server_tls(version=ssl) + + +def create_attack_profile(attacks, objective_value, max_concurrent_attacks, duration, iteration_count=0, ssl=None): + """ + Creates an attack profile in the current test configuration + + attacks (list): the attack to be added to the attack profile (i.e. eShop Attack on Chrome browser) + objective_value (int): the value of the attacks per seconds objective + max_concurrent_attacks (int): the maximum no. of concurrent attacks to run + duration (int): the total time (sec.) for the objective to run + iteration_count (int): the number of iterations the attack profile will execute + ssl (str): the SSL version + """ + + rest.add_attack_profile() + for attack in attacks: + rest.add_attack(attack) + rest.set_attack_profile_timeline( + duration=duration, + objective_value=objective_value, + max_concurrent_attacks=max_concurrent_attacks, + iteration_count=iteration_count + ) + if ssl: + rest.set_attack_profile_client_tls(version=ssl) + rest.set_attack_profile_server_tls(version=ssl) + + +def run_test(): + """ + Start the CyPerf test config and wait for it to finish + """ + + rest.start_test() + rest.wait_test_finished() + + +def delete_test(): + """ + Deletes the current CyPerf session + """ + + rest.delete_current_session() + + +def collect_stats(results_folder, test_name, perform_validation=True): + """ + Collects the test results resources as CSV files + + results_folder (str): path where to store the tests results + test_name (str): the name of the test + """ + + results_path = os.path.join(results_folder, test_name) + "_" + datetime.datetime.now().strftime("%Y%m%d-%H%M%S") + if not os.path.exists(results_path): + os.makedirs(results_path) + print("Saving CSV test resources to path {}".format(results_path)) + rest.get_all_stats(results_path) + + if perform_validation: + validate_stats(results_path) + + +def validate_stats(results_path): + """ + Validates the test results resources using a generic baseline validation + + results_path (str): path of the CSV tests results + """ + + config_type = rest.get_config_type() + stats = Statistics(results_path) + stats_failure_list = stats.validate_mdw_stats(config_type) + if len(stats_failure_list) > 0: + print("Following stats failed validation: {}".format(stats_failure_list)) + else: + print("All stats PASSED validation") + + +def wait_for_eula(timeout=600): + init_timeout = timeout + count = 2 + + while timeout > 0: + response = rest.get_automation_token() + if "KEYSIGHT SOFTWARE END USER LICENSE AGREEMENT" in response.text.upper(): + print("Keysight EULA has was prompted") + return True + else: + time.sleep(count) + timeout -= count + else: + raise Exception("CyPerf controller did not properly boot after timeout {}s".format(init_timeout)) \ No newline at end of file diff --git a/utils/RFC 6349/testsuite/rest_api_wrapper/lib/RESTasV3.py b/utils/RFC 6349/testsuite/rest_api_wrapper/lib/RESTasV3.py new file mode 100644 index 00000000..47825a0a --- /dev/null +++ b/utils/RFC 6349/testsuite/rest_api_wrapper/lib/RESTasV3.py @@ -0,0 +1,1210 @@ +import os +import io +import sys +import glob +import time +import urllib3 +import requests +import simplejson as json +from zipfile import ZipFile +from datetime import datetime + +#sys.path.insert(0, os.path.join(os.path.dirname(__file__+"/.."))) + +#from resources.configuration import WAP_USERNAME, WAP_PASSWORD, WAP_CLIENT_ID + + +class RESTasV3: + + def __init__(self, ipAddress, username, password, client_id, verify=True): + + self.ipAddress = ipAddress + self.username = username + self.password = password + self.client_id = client_id + self.verify = verify + self.session = requests.Session() + urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) + self.session.verify = False + self.host = 'https://{}'.format(ipAddress) + self.cookie = self.get_automation_token() + self.headers = {'authorization': self.cookie} + self.startTime = None + self.startTrafficTime = None + self.stopTrafficTime = None + self.stopTime = None + self.configID = None + self.sessionID = None + self.config = None + self.testDuration = 60 + + + + def __sendPost(self, url, payload, customHeaders=None, files=None, debug=True): + expectedResponse = [200, 201, 202] + print("POST at URL: {} with payload: {}".format(url, payload)) + payload = json.dumps(payload) if customHeaders is None else payload + response = self.session.post('{}{}'.format(self.host, url), + headers=customHeaders if customHeaders else self.headers, data=payload, + files=files, verify=False) + if debug: + print("POST response message: {}, response code: {}".format(response.content, response.status_code)) + if response.status_code == 401: + print('Token has expired, resending request') + self.refresh_access_token() + response = self.session.post('{}{}'.format(self.host, url), + headers=customHeaders if customHeaders else self.headers, data=payload, + files=files, verify=False) + print("POST response message: {}, response code: {}".format(response.content, response.status_code)) + if self.verify and response.status_code not in expectedResponse: + raise Exception( + 'Unexpected response code. Actual: {} Expected: {}'.format(response.status_code, expectedResponse)) + + return response + + def __sendGet(self, url, expectedResponse, customHeaders=None, debug=True): + print("GET at URL: {}".format(url)) + response = self.session.get('{}{}'.format(self.host, url), + headers=customHeaders if customHeaders else self.headers) + if debug: + print("GET response message: {}, response code: {}".format(response.content, response.status_code)) + if response.status_code == 401: + print('Token has expired, resending request') + self.refresh_access_token() + response = self.session.get('{}{}'.format(self.host, url), + headers=customHeaders if customHeaders else self.headers) + print("GET response message: {}, response code: {}".format(response.content, response.status_code)) + if self.verify and response.status_code != expectedResponse: + raise Exception( + 'Unexpected response code. Actual: {} Expected: {}'.format(response.status_code, expectedResponse)) + + return response + + def __sendPut(self, url, payload, debug=True): + print("PUT at URL: {} with payload: {}".format(url, payload)) + expectedResponse = [200, 204] + response = self.session.put('{}{}'.format(self.host, url), headers=self.headers, data=json.dumps(payload)) + if debug: + print("PUT response message: {}, response code: {}".format(response.content, response.status_code)) + if response.status_code == 401: + print('Token has expired, resending request') + self.refresh_access_token() + response = self.session.put('{}{}'.format(self.host, url), headers=self.headers, data=json.dumps(payload)) + print("PUT response message: {}, response code: {}".format(response.content, response.status_code)) + if self.verify and response.status_code not in expectedResponse: + raise Exception( + 'Unexpected response code. Actual: {} Expected: {}'.format(response.status_code, expectedResponse)) + + return response + + def __sendPatch(self, url, payload, debug=True): + print("PATCH at URL: {} with payload: {}".format(url, payload)) + expectedResponse = [200, 204] + response = self.session.patch('{}{}'.format(self.host, url), headers=self.headers, data=json.dumps(payload)) + if debug: + print("PATCH response message: {}, response code: {}".format(response.content, response.status_code)) + if response.status_code == 401: + print('Token has expired, resending request') + self.refresh_access_token() + response = self.session.patch('{}{}'.format(self.host, url), headers=self.headers, data=json.dumps(payload)) + print("PATCH response message: {}, response code: {}".format(response.content, response.status_code)) + if self.verify and response.status_code not in expectedResponse: + raise Exception( + 'Unexpected response code. Actual: {} Expected: {}'.format(response.status_code, expectedResponse)) + + return response + + def __sendDelete(self, url, headers=None, debug=True): + print("DELETE at URL: {}".format(url)) + expectedResponse = [200, 202, 204] + response = self.session.delete('%s%s' % (self.host, url), headers=headers) + if debug: + print("DELETE response message: {}, response code: {}".format(response.content, response.status_code)) + if response.status_code == 401: + print('Token has expired, resending request') + self.refresh_access_token() + response = self.session.delete('%s%s' % (self.host, url), headers=headers) + print("DELETE response message: {}, response code: {}".format(response.content, response.status_code)) + if self.verify and response.status_code not in expectedResponse: + raise Exception( + 'Unexpected response code. Actual: {} Expected: {}'.format(response.status_code, expectedResponse)) + + return response + + def get_automation_token(self): + apiPath = '/auth/realms/keysight/protocol/openid-connect/token' + headers = {"content-type": "application/x-www-form-urlencoded"} + payload = {"username": self.username, + "password": self.password, + "grant_type": "password", + "client_id": self.client_id} + + response = self.__sendPost(apiPath, payload, customHeaders=headers) + if self.verify: + if response.headers.get('content-type') == 'application/json': + response = response.json() + print('Access Token: {}'.format(response["access_token"])) + return response['access_token'] + else: + raise Exception('Fail to obtain authentication token') + + return response + + def refresh_access_token(self): + access_token = self.get_automation_token() + self.headers = {'authorization': access_token} + print('Authentication token refreshed!') + + def setup(self, config=None, config_name=None): + + if config: + self.configID = self.import_config(config) + else: + self.configID = config + self.load_config(test_name = config_name) + + #self.sessionID = self.open_config() + self.config = self.get_session_config() + + def get_session(self, session_id): + apiPath = '/api/v2/sessions/{}'.format(session_id) + response = self.__sendGet(apiPath, 200).json() + return response + + def delete_session(self, session_id): + """ + Delete a session by its id + :param session_id: The id got from getSessions + """ + apiPath = '/api/v2/sessions/{}'.format(session_id) + self.__sendDelete(apiPath, headers=self.headers) + + def delete_current_session(self): + """ + Delete the current session + return: None + """ + apiPath = '/api/v2/sessions/{}'.format(self.sessionID) + self.__sendDelete(apiPath, headers=self.headers) + + def get_all_sessions(self): + apiPath = '/api/v2/sessions' + response = self.__sendGet(apiPath, 200).json() + return response + + def delete_all_sessions(self): + """ + Delete all the current sessions opened on the application + :return: None + """ + print('Deleting all sessions...') + session_list = self.get_all_sessions() + for i in range(0, len(session_list)): + try: + self.delete_session(session_list[i]['id']) + except Exception as e: + print('{} could not be deleted because: {}'.format(session_list[i]['id'], e)) + pass + if len(self.get_all_sessions()) > 0: + raise Exception('Not all sessions could be deleted!') + else: + print('No sessions opened!') + + def get_test_details(self, session_id): + apiPath = '/api/v2/sessions/{}/test'.format(session_id) + response = self.__sendGet(apiPath, 200).json() + return response + + def set_license_server(self, licenseServerIP, retries=3, wait=30): + apiPath = '/api/v2/license-servers' + self.__sendPost(apiPath, payload={"hostName": licenseServerIP}) + + def get_license_servers(self): + apiPath = '/api/v2/license-servers' + return self.__sendGet(apiPath, 200).json() + + def wait_event_success(self, apiPath, timeout): + counter = 1 + while timeout > 0: + response = self.__sendGet(apiPath, 200).json() + if response['state'] == "SUCCESS": + return response + else: + timeout -= counter + time.sleep(counter) + + def activate_license(self, activation_code, quantity=1, timeout=60): + apiPath = '/api/v2/licensing/operations/activate' + response = self.__sendPost(apiPath, payload=[{"activationCode": activation_code, "quantity": quantity}]).json() + apiPath = '/api/v2/licensing/operations/activate/{}'.format(response["id"]) + if not self.wait_event_success(apiPath, timeout): + raise TimeoutError("Failed to activate license. Timeout reached = {} seconds".format(timeout)) + + def deactivate_license(self, activation_code, quantity=1, timeout=60): + apiPath = '/api/v2/licensing/operations/deactivate' + response = self.__sendPost(apiPath, payload=[{"activationCode": activation_code, "quantity": quantity}]).json() + apiPath = '/api/v2/licensing/operations/deactivate/{}'.format(response["id"]) + if "The Activation Code : \'{}\' is not installed.".format(activation_code) == \ + self.__sendGet(apiPath, 200).json()['message']: + print('License code {} is not installed'.format(activation_code)) + elif not self.wait_event_success(apiPath, timeout): + raise TimeoutError("Failed to deactivate license. Timeout reached = {} seconds".format(timeout)) + + def get_license_statistics(self, timeout=30): + apiPath = '/api/v2/licensing/operations/retrieve-counted-feature-stats' + response = self.__sendPost(apiPath, payload={}).json() + apiPath = '/api/v2/licensing/operations/retrieve-counted-feature-stats/{}'.format(response["id"]) + if not self.wait_event_success(apiPath, timeout): + raise TimeoutError("Failed to obtain license stats. Timeout reached = {} seconds".format(timeout)) + apiPath = '/api/v2/licensing/operations/retrieve-counted-feature-stats/{}/result'.format(response["id"]) + response = self.__sendGet(apiPath, 200).json() + return response + + def nats_update_route(self, nats_address, retries=3, wait=120): + apiPath = '/api/v2/brokers' + self.__sendPost(apiPath, payload={"host": nats_address}) + + def import_config(self, config): + apiPath = '/api/v2/configs' + if config.endswith('.json'): + config = json.loads(open(config, "r").read()) + response = self.__sendPost(apiPath, config) + elif config.endswith('.zip'): + zip_file_path = {"archive": (config, open(config, "rb"), "application/zip")} + response = self.__sendPost(apiPath, None, customHeaders=self.headers, files=zip_file_path) + else: + raise Exception("Config type not supported. Requires zip or json.") + if response: + print('Config successfully imported, config ID: {}'.format(response.json()[0]['id'])) + return response.json()[0]['id'] + else: + raise Exception('Failed to import test config') + + def export_config(self, export_path=None): + config_id = self.configID + apiPath = '/api/v2/configs/{}?include=all&resolveDependencies=true'.format(config_id) + customHeaders = self.headers + customHeaders['Accept'] = 'application/zip' + response = self.__sendGet(apiPath, 200, customHeaders=customHeaders) + + file_name = response.headers.get('content-disposition').split("=")[1].strip('"') + + if export_path: + file_name = os.path.join(export_path, file_name) + print("Export path/file: {}".format(file_name)) + with open(file_name, 'wb') as archive: + archive.write(response.content) + return file_name + + def export_config_by_name(self, export_path=None, config_name=None): + config_id = self.get_config_id(config_name) + apiPath = '/api/v2/configs/{}?include=all&resolveDependencies=true'.format(config_id) + customHeaders = self.headers + customHeaders['Accept'] = 'application/zip' + response = self.__sendGet(apiPath, 200, customHeaders=customHeaders) + + file_name = response.headers.get('content-disposition').split("=")[1].strip('"') + + if export_path: + file_name = os.path.join(export_path, file_name) + print("Export path/file: {}".format(file_name)) + with open(file_name, 'wb') as archive: + archive.write(response.content) + return file_name + + def open_config(self): + apiPath = '/api/v2/sessions/' + response = self.__sendPost(apiPath, payload={"configUrl": self.configID}) + if response: + print('Config successfully opened, session ID: {}'.format(response.json()[0]['id'])) + return response.json()[0]['id'] + + def get_session_config(self, sessionId=None): + sessionId = sessionId if sessionId else self.sessionID + apiPath = '/api/v2/sessions/{}/config?include=all'.format(sessionId) + return self.__sendGet(apiPath, 200, debug=False).json() + + def delete_config(self, config_id): + """ + Delete a config after you've specified its id + :param config_id: The id of the config + :return: None + """ + apiPath = '/api/v2/configs/{}'.format(config_id) + self.__sendDelete(apiPath, self.headers) + + def delete_config_by_name(self, config_name=None): + config_id = self.get_config_id(config_name) + apiPath = '/api/v2/configs/{}'.format(config_id) + self.__sendDelete(apiPath, self.headers) + + def add_network_segment(self): + apiPath = '/api/v2/sessions/{}/config/config/NetworkProfiles/1/IPNetworkSegment'.format(self.sessionID) + self.__sendPost(apiPath, {}) + + def wait_agents_connect(self, agents_nr=3, timeout=300): + response = [] + init_timeout = timeout + print('Waiting for agents to connect to the CyPerf controller...') + while timeout > 0: + response = self.get_agents() + if len(response) >= agents_nr: + print('There are {} agents connected to the CyPerf controller'.format(len(response))) + return True + else: + time.sleep(10) + timeout -= 10 + else: + raise Exception( + "Expected {} agents connected after {}s. Got only {}.".format(agents_nr, init_timeout, len(response))) + + def get_agents(self): + apiPath = '/api/v2/agents' + return self.__sendGet(apiPath, 200).json() + + def get_agents_ids(self, agentIPs=None, wait=None): + if wait: + self.wait_agents_connect() + agentsIDs = list() + response = self.get_agents() + print('Found {} agents'.format(len(response))) + if type(agentIPs) is str: agentIPs = [agentIPs] + for agentIP in agentIPs: + for agent in response: + if agent['IP'] in agentIP: + print("agent_IP: {}, agent_ID: {}".format(agent['IP'], agent['id'])) + agentsIDs.append(agent['id']) + break + return agentsIDs + + def get_agents_ips(self, wait=None): + if wait: + self.wait_agents_connect() + agentsIPs = list() + response = self.get_agents() + print('Found {} agents'.format(len(response))) + # fixme B2B only - ClientAgent is excluded in AWS scenario + for agent in response: + agentsIPs.append(agent['IP']) + print('Agents IP List: {}'.format(agentsIPs)) + return agentsIPs + + def assign_agents(self): + agents_ips = self.get_agents_ips() + self.assign_agents_by_ip(agents_ips=agents_ips[0], network_segment=1) + self.assign_agents_by_ip(agents_ips=agents_ips[1], network_segment=2) + + def assign_agents_by_ip(self, agents_ips, network_segment): + apiPath = '/api/v2/sessions/{}/config/config/NetworkProfiles/1/IPNetworkSegment/{}/agentAssignments'.format( + self.sessionID, network_segment) + payload = {"ByID": [], "ByTag": []} + agents_ids = self.get_agents_ids(agentIPs=agents_ips) + for agent_id in agents_ids: + payload["ByID"].append({"agentId": agent_id}) + self.__sendPatch(apiPath, payload) + + def assign_agents_by_tag(self, agents_tags, network_segment): + apiPath = '/api/v2/sessions/{}/config/config/NetworkProfiles/1/IPNetworkSegment/{}/agentAssignments'.format( + self.sessionID, network_segment) + self.__sendPatch(apiPath, payload={"ByID": [], "ByTag": [agents_tags]}) + + def set_traffic_capture(self, agents_ips, network_segment, is_enabled=True, capture_latest_packets=False, + max_capture_size=104857600): + apiPath = '/api/v2/sessions/{}/config/config/NetworkProfiles/1/IPNetworkSegment/{}/agentAssignments'.format( + self.sessionID, network_segment) + payload = {"ByID": []} + capture_settings = {"captureEnabled": is_enabled, "captureLatestPackets": capture_latest_packets, + "maxCaptureSize": max_capture_size} + agents_ids = self.get_agents_ids(agentIPs=agents_ips) + for agent_id in agents_ids: + payload["ByID"].append({"agentId": agent_id, "captureSettings": capture_settings}) + self.__sendPatch(apiPath, payload) + + def get_capture_files(self, captureLocation, exportTimeout=180): + self.get_result_ended() + test_id = self.get_test_id() + apiPath = '/api/v2/results/{}/operations/generate-results'.format(test_id) + response = self.__sendPost(apiPath, None).json() + apiPath = response['url'][len(self.host):] + response = self.wait_event_success(apiPath, timeout=exportTimeout) + if not response: + raise TimeoutError("Failed to download Captures. Timeout reached = {} seconds".format(exportTimeout)) + apiPath = response['resultUrl'] + response = self.__sendGet(apiPath, 200, debug=False) + zf = ZipFile(io.BytesIO(response.content), 'r') + zf.extractall(captureLocation) + for arh in glob.iglob(os.path.join(captureLocation, "*.zip")): + files = os.path.splitext(os.path.basename(arh))[0] + zf = ZipFile(arh) + zf.extractall(path=os.path.join(captureLocation, "pcaps", files)) + return response + + def get_pdf_report(self, pdfLocation, exportTimeout=180): + test_id = self.get_test_id() + apiPath = '/api/v2/results/{}/operations/generate-pdf'.format(test_id) + response = self.__sendPost(apiPath, None).json() + apiPath = response['url'][len(self.host):] + response = self.wait_event_success(apiPath, timeout=exportTimeout) + if not response: + raise TimeoutError("Failed to download PDF report. Timeout reached = {} seconds".format(exportTimeout)) + apiPath = response['resultUrl'] + with open(pdfLocation, "wb") as f: + response = self.__sendGet(apiPath, 200, debug=False) + if response.status_code == 200: + pdf_response_content = response.content + f.write(pdf_response_content) + return response + + def add_dut(self): + apiPath = '/api/v2/sessions/{}/config/config/NetworkProfiles/1/DUTNetworkSegment'.format(self.sessionID) + response = self.__sendPost(apiPath, payload={}).json() + return response + + def delete_dut(self, network_segment): + apiPath = '/api/v2/sessions/{}/config/config/NetworkProfiles/1/DUTNetworkSegment/{}'.format(self.sessionID, + network_segment) + self.__sendDelete(apiPath, self.headers) + + def set_dut(self, active=True, network_segment=1): + apiPath = '/api/v2/sessions/{}/config/config/NetworkProfiles/1/DUTNetworkSegment/{}'.format(self.sessionID, + network_segment) + self.__sendPatch(apiPath, payload={"active": active}) + + def set_dut_host(self, host, network_segment=1): + apiPath = '/api/v2/sessions/{}/config/config/NetworkProfiles/1/DUTNetworkSegment/{}'.format(self.sessionID, + network_segment) + self.__sendPatch(apiPath, payload={"host": host}) + + def set_http_health_check(self, enabled=True, network_segment=1): + apiPath = '/api/v2/sessions/{}/config/config/NetworkProfiles/1/DUTNetworkSegment/{}/HTTPHealthCheck'.format( + self.sessionID, network_segment) + self.__sendPatch(apiPath, payload={"Enabled": enabled}) + + def set_http_health_check_port(self, port, network_segment=1): + apiPath = '/api/v2/sessions/{}/config/config/NetworkProfiles/1/DUTNetworkSegment/{}/HTTPHealthCheck'.format( + self.sessionID, network_segment) + self.__sendPatch(apiPath, payload={"Port": port}) + + def set_http_health_check_url(self, target_url, network_segment=1): + apiPath = '/api/v2/sessions/{}/config/config/NetworkProfiles/1/DUTNetworkSegment/{}/HTTPHealthCheck'.format( + self.sessionID, network_segment) + self.__sendPatch(apiPath + '/Params/1', payload={"Value": target_url}) + + def set_http_health_check_payload(self, payload_file, network_segment=1): + apiPath = '/api/v2/sessions/{}/config/config/NetworkProfiles/1/DUTNetworkSegment/{}/HTTPHealthCheck'.format( + self.sessionID, network_segment) + if isinstance(payload_file, float): + self.__sendPatch(apiPath + '/Params/2', payload={"Value": payload_file}) + else: + self.set_custom_payload(apiPath + '/Params/2', payload_file) + + def set_http_health_check_version(self, http_version, network_segment=1): + apiPath = '/api/v2/sessions/{}/config/config/NetworkProfiles/1/DUTNetworkSegment/{}/HTTPHealthCheck'.format( + self.sessionID, network_segment) + self.__sendPatch(apiPath + '/Params/3', payload={"Value": http_version}) + + def set_https_health_check(self, enabled=True, network_segment=1): + apiPath = '/api/v2/sessions/{}/config/config/NetworkProfiles/1/DUTNetworkSegment/{}/HTTPSHealthCheck'.format( + self.sessionID, network_segment) + self.__sendPatch(apiPath, payload={"Enabled": enabled}) + + def set_https_health_check_port(self, port, network_segment=1): + apiPath = '/api/v2/sessions/{}/config/config/NetworkProfiles/1/DUTNetworkSegment/{}/HTTPSHealthCheck'.format( + self.sessionID, network_segment) + self.__sendPatch(apiPath, payload={"Port": port}) + + def set_https_health_check_url(self, target_url, network_segment=1): + apiPath = '/api/v2/sessions/{}/config/config/NetworkProfiles/1/DUTNetworkSegment/{}/HTTPSHealthCheck'.format( + self.sessionID, network_segment) + self.__sendPatch(apiPath + '/Params/1', payload={"Value": target_url}) + + def set_https_health_check_payload(self, payload_file, network_segment=1): + apiPath = '/api/v2/sessions/{}/config/config/NetworkProfiles/1/DUTNetworkSegment/{}/HTTPSHealthCheck'.format( + self.sessionID, network_segment) + if isinstance(payload_file, float): + self.__sendPatch(apiPath + '/Params/2', payload={"Value": payload_file}) + else: + self.set_custom_payload(apiPath + '/Params/2', payload_file) + + def set_https_health_check_version(self, https_version, network_segment=1): + apiPath = '/api/v2/sessions/{}/config/config/NetworkProfiles/1/DUTNetworkSegment/{}/HTTPHealthCheck'.format( + self.sessionID, network_segment) + self.__sendPatch(apiPath + '/Params/3', payload={"Value": https_version}) + + def set_tcp_health_check(self, enabled=True, network_segment=1): + apiPath = '/api/v2/sessions/{}/config/config/NetworkProfiles/1/DUTNetworkSegment/{}/TCPHealthCheck'.format( + self.sessionID, network_segment) + self.__sendPatch(apiPath, payload={"Enabled": enabled}) + + def set_tcp_health_check_port(self, port, network_segment=1): + apiPath = '/api/v2/sessions/{}/config/config/NetworkProfiles/1/DUTNetworkSegment/{}/TCPHealthCheck'.format( + self.sessionID, network_segment) + self.__sendPatch(apiPath, payload={"Port": port}) + + def set_client_recieve_buffer_size_attack_profile(self, value): + apiPath = '/api/v2/sessions/{}/config/config/AttackProfiles/1/TrafficSettings/DefaultTransportProfile/ClientTcpProfile'.format( + self.sessionID) + self.__sendPatch(apiPath, payload={"RxBuffer": value}) + + def set_client_transmit_buffer_size_attack_profile(self, value): + apiPath = '/api/v2/sessions/{}/config/config/AttackProfiles/1/TrafficSettings/DefaultTransportProfile/ClientTcpProfile'.format( + self.sessionID) + self.__sendPatch(apiPath, payload={"TxBuffer": value}) + + def set_client_recieve_buffer_size_traffic_profile(self, value): + apiPath = '/api/v2/sessions/{}/config/config/TrafficProfiles/1/TrafficSettings/DefaultTransportProfile/ClientTcpProfile'.format( + self.sessionID) + self.__sendPatch(apiPath, payload={"RxBuffer": value}) + + def set_client_transmit_buffer_size_traffic_profile(self, value): + apiPath = '/api/v2/sessions/{}/config/config/TrafficProfiles/1/TrafficSettings/DefaultTransportProfile/ClientTcpProfile'.format( + self.sessionID) + self.__sendPatch(apiPath, payload={"TxBuffer": value}) + + def set_server_recieve_buffer_size_attack_profile(self, value): + apiPath = '/api/v2/sessions/{}/config/config/AttackProfiles/1/TrafficSettings/DefaultTransportProfile/ServerTcpProfile'.format( + self.sessionID) + self.__sendPatch(apiPath, payload={"RxBuffer": value}) + + def set_server_transmit_buffer_size_attack_profile(self, value): + apiPath = '/api/v2/sessions/{}/config/config/AttackProfiles/1/TrafficSettings/DefaultTransportProfile/ServerTcpProfile'.format( + self.sessionID) + self.__sendPatch(apiPath, payload={"TxBuffer": value}) + + def set_server_recieve_buffer_size_traffic_profile(self, value): + apiPath = '/api/v2/sessions/{}/config/config/TrafficProfiles/1/TrafficSettings/DefaultTransportProfile/ServerTcpProfile'.format( + self.sessionID) + self.__sendPatch(apiPath, payload={"RxBuffer": value}) + + def set_server_transmit_buffer_size_traffic_profile(self, value): + apiPath = '/api/v2/sessions/{}/config/config/TrafficProfiles/1/TrafficSettings/DefaultTransportProfile/ServerTcpProfile'.format( + self.sessionID) + self.__sendPatch(apiPath, payload={"TxBuffer": value}) + + def set_ip_network_segment(self, active=True, network_segment=1): + apiPath = '/api/v2/sessions/{}/config/config/NetworkProfiles/1/IPNetworkSegment/{}'.format(self.sessionID, + network_segment) + self.__sendPatch(apiPath, payload={"active": active}) + + def set_network_tags(self, tags="Client", network_segment=1): + apiPath = '/api/v2/sessions/{}/config/config/NetworkProfiles/1/IPNetworkSegment/{}'.format(self.sessionID, + network_segment) + self.__sendPatch(apiPath, payload={"networkTags": [tags]}) + + def set_application_client_network_tags(self, tags, app_nr): + apiPath = '/api/v2/sessions/{}/config/config/TrafficProfiles/1/Applications/{}/operations/modify-tags-recursively'.format( + self.sessionID, app_nr) + self.__sendPost(apiPath, payload={"SelectTags": True, "ClientNetworkTags": [tags]}) + + def remove_application_client_network_tags(self, tags, app_nr): + apiPath = '/api/v2/sessions/{}/config/config/TrafficProfiles/1/Applications/{}/operations/modify-tags-recursively'.format( + self.sessionID, app_nr) + self.__sendPost(apiPath, payload={"SelectTags": False, "ClientNetworkTags": [tags]}) + + def set_network_min_agents(self, min_agents=1, network_segment=1): + apiPath = '/api/v2/sessions/{}/config/config/NetworkProfiles/1/IPNetworkSegment/{}'.format(self.sessionID, + network_segment) + self.__sendPatch(apiPath, payload={"minAgents": min_agents}) + + def add_ip_range(self, network_segment=1): + apiPath = '/api/v2/sessions/{}/config/config/NetworkProfiles/1/IPNetworkSegment/{}/IPRanges'.format( + self.sessionID, network_segment) + response = self.__sendPost(apiPath, payload={}).json() + return response[-1]['id'] + + def delete_ip_range(self, ip_range=1, network_segment=1): + apiPath = '/api/v2/sessions/{}/config/config/NetworkProfiles/1/IPNetworkSegment/{}/IPRanges/{}'.format( + self.sessionID, network_segment, ip_range) + self.__sendDelete(apiPath, self.headers) + + def set_ip_range_automatic_ip(self, ip_auto=True, network_segment=1, ip_range=1, ): + + apiPath = '/api/v2/sessions/{}/config/config/NetworkProfiles/1/IPNetworkSegment/{}/IPRanges/{}'.format( + self.sessionID, network_segment, ip_range) + self.__sendPatch(apiPath, payload={"IpAuto": ip_auto}) + + def set_ip_range_ip_start(self, ip_start, network_segment=1, ip_range=1, ): + apiPath = '/api/v2/sessions/{}/config/config/NetworkProfiles/1/IPNetworkSegment/{}/IPRanges/{}'.format( + self.sessionID, network_segment, ip_range) + self.__sendPatch(apiPath, payload={"IpStart": ip_start}) + + def set_ip_range_ip_increment(self, ip_increment="0.0.0.1", network_segment=1, ip_range=1): + apiPath = '/api/v2/sessions/{}/config/config/NetworkProfiles/1/IPNetworkSegment/{}/IPRanges/{}'.format( + self.sessionID, network_segment, ip_range) + self.__sendPatch(apiPath, payload={"IpIncr": ip_increment}) + + def set_ip_range_ip_count(self, count=1, network_segment=1, ip_range=1): + apiPath = '/api/v2/sessions/{}/config/config/NetworkProfiles/1/IPNetworkSegment/{}/IPRanges/{}'.format( + self.sessionID, network_segment, ip_range) + self.__sendPatch(apiPath, payload={"Count": count}) + + def set_ip_range_max_count_per_agent(self, max_count_per_agent=1, network_segment=1, ip_range=1): + apiPath = '/api/v2/sessions/{}/config/config/NetworkProfiles/1/IPNetworkSegment/{}/IPRanges/{}'.format( + self.sessionID, network_segment, ip_range) + self.__sendPatch(apiPath, payload={"maxCountPerAgent": max_count_per_agent}) + + def set_ip_range_automatic_netmask(self, netmask_auto=True, network_segment=1, ip_range=1): + apiPath = '/api/v2/sessions/{}/config/config/NetworkProfiles/1/IPNetworkSegment/{}/IPRanges/{}'.format( + self.sessionID, network_segment, ip_range) + self.__sendPatch(apiPath, payload={"NetMaskAuto": netmask_auto}) + + def set_ip_range_netmask(self, netmask=16, network_segment=1, ip_range=1): + apiPath = '/api/v2/sessions/{}/config/config/NetworkProfiles/1/IPNetworkSegment/{}/IPRanges/{}'.format( + self.sessionID, network_segment, ip_range) + self.__sendPatch(apiPath, payload={"NetMask": netmask}) + + def set_ip_range_automatic_gateway(self, gateway_auto=True, network_segment=1, ip_range=1): + apiPath = '/api/v2/sessions/{}/config/config/NetworkProfiles/1/IPNetworkSegment/{}/IPRanges/{}'.format( + self.sessionID, network_segment, ip_range) + self.__sendPatch(apiPath, payload={"GwAuto": gateway_auto}) + + def set_ip_range_gateway(self, gateway="10.0.0.1", network_segment=1, ip_range=1): + apiPath = '/api/v2/sessions/{}/config/config/NetworkProfiles/1/IPNetworkSegment/{}/IPRanges/{}'.format( + self.sessionID, network_segment, ip_range) + self.__sendPatch(apiPath, payload={"GwStart": gateway}) + + def set_ip_range_network_tags(self, tags, network_segment=1, ip_range=1): + apiPath = '/api/v2/sessions/{}/config/config/NetworkProfiles/1/IPNetworkSegment/{}/IPRanges/{}'.format( + self.sessionID, network_segment, ip_range) + self.__sendPatch(apiPath, payload={"networkTags": [tags]}) + + def set_ip_range_mss(self, mss=1460, network_segment=1, ip_range=1): + apiPath = '/api/v2/sessions/{}/config/config/NetworkProfiles/1/IPNetworkSegment/{}/IPRanges/{}'.format( + self.sessionID, network_segment, ip_range) + self.__sendPatch(apiPath, payload={"Mss": mss}) + + def set_eth_range_mac_auto_false(self, network_segment=1): + apiPath = '/api/v2/sessions/{}/config/config/NetworkProfiles/1/IPNetworkSegment/{}/EthRange'.format( + self.sessionID, network_segment) + self.__sendPatch(apiPath, payload={"MacAuto": False}) + + def set_eth_range_mac_start(self, mac_start, network_segment=1): + self.set_eth_range_mac_auto_false(network_segment) + apiPath = '/api/v2/sessions/{}/config/config/NetworkProfiles/1/IPNetworkSegment/{}/EthRange'.format( + self.sessionID, network_segment) + self.__sendPatch(apiPath, payload={"MacStart": mac_start}) + + def set_eth_range_mac_increment(self, mac_increment, network_segment=1): + apiPath = '/api/v2/sessions/{}/config/config/NetworkProfiles/1/IPNetworkSegment/{}/EthRange'.format( + self.sessionID, network_segment) + self.__sendPatch(apiPath, payload={"MacIncr": mac_increment}) + + def set_eth_range_one_mac_per_ip_false(self, network_segment=1): + apiPath = '/api/v2/sessions/{}/config/config/NetworkProfiles/1/IPNetworkSegment/{}/EthRange'.format( + self.sessionID, network_segment) + self.__sendPatch(apiPath, payload={"OneMacPerIP": False}) + + def set_eth_range_max_mac_count(self, count, network_segment=1): + self.set_eth_range_one_mac_per_ip_false(network_segment) + apiPath = '/api/v2/sessions/{}/config/config/NetworkProfiles/1/IPNetworkSegment/{}/EthRange'.format( + self.sessionID, network_segment) + self.__sendPatch(apiPath, payload={"Count": count}) + + def set_eth_range_max_mac_count_per_agent(self, max_count_per_agent, network_segment=1): + apiPath = '/api/v2/sessions/{}/config/config/NetworkProfiles/1/IPNetworkSegment/{}/EthRange'.format( + self.sessionID, network_segment) + self.__sendPatch(apiPath, payload={"maxCountPerAgent": max_count_per_agent}) + + def set_dns_resolver(self, name_server, network_segment=1): + apiPath = '/api/v2/sessions/{}/config/config/NetworkProfiles/1/IPNetworkSegment/{}/DNSResolver'.format( + self.sessionID, network_segment) + self.__sendPatch(apiPath, payload={"nameServers": [{"name": name_server}]}) + + def set_dns_resolver_cache_timeout(self, timeout=0, network_segment=1): + apiPath = '/api/v2/sessions/{}/config/config/NetworkProfiles/1/IPNetworkSegment/{}/DNSResolver'.format( + self.sessionID, network_segment) + self.__sendPatch(apiPath, payload={"cacheTimeout": timeout}) + + def set_dut_connections(self, connections, network_segment=1): + apiPath = '/api/v2/sessions/{}/config/config/NetworkProfiles/1/IPNetworkSegment/{}'.format(self.sessionID, + network_segment) + self.__sendPatch(apiPath, payload={"DUTConnections": connections}) + + def set_profile_duration(self, profile_type, value): + apiPath = '/api/v2/sessions/{}/config/config/{}/1/ObjectivesAndTimeline/TimelineSegments/1'.format( + self.sessionID, profile_type) + self.__sendPatch(apiPath, payload={"Duration": value}) + + def get_iteration_count_info(self, ap_id=1): + apiPath = '/api/v2/sessions/{}/config/config/AttackProfiles/{}/ObjectivesAndTimeline/TimelineSegments/1'.format( + self.sessionID, ap_id) + config_type = self.get_config_type() + if config_type["traffic"]: + print('Parameter not available in traffic profile') + if config_type["attack"]: + response = self.__sendGet(apiPath, 200).json() + return response["IterationCount"] + + def get_profile_duration(self, profile_type): + apiPath = '/api/v2/sessions/{}/config/config/{}/1/ObjectivesAndTimeline/TimelineSegments/1'.format( + self.sessionID, profile_type) + response = self.__sendGet(apiPath, 200).json() + return response["Duration"] + + def set_test_duration(self, value): + config_type = self.get_config_type() + if config_type["traffic"]: + self.set_profile_duration(profile_type='TrafficProfiles', value=int(value)) + if config_type["attack"]: + self.set_profile_duration(profile_type='AttackProfiles', value=int(value)) + self.testDuration = int(value) + + def read_test_duration(self): + TrafficProfilesDuration = 0 + AttackProfilesDuration = 0 + config_type = self.get_config_type() + if config_type["traffic"]: + TrafficProfilesDuration = self.get_profile_duration(profile_type='TrafficProfiles') + if config_type["attack"]: + AttackProfilesDuration = self.get_profile_duration(profile_type='AttackProfiles') + self.testDuration = max(TrafficProfilesDuration, AttackProfilesDuration) + + def send_modified_config(self): + apiPath = '/api/v2/sessions/{}/config'.format(self.sessionID) + self.__sendPut(apiPath, self.config) + + def get_config_type(self): + self.config = self.get_session_config() + config_type = {"traffic": False, + "traffic_profiles": [], + "tp_primary_obj": None, + "tp_secondary_obj": None, + "tp_ssl": False, + "attack": False, + "attack_profiles": [], + "att_obj": False, + "at_ssl": False, + "dut": False} + + if len(self.config['Config']['TrafficProfiles']) > 0: + config_type['traffic'] = True + tp_profiles = self.config['Config']['TrafficProfiles'][0] + for application in tp_profiles['Applications']: + if application['ProtocolID'] not in config_type['traffic_profiles']: + config_type['traffic_profiles'].append(application['ProtocolID']) + objectives = tp_profiles['ObjectivesAndTimeline'] + objective_dm = { + "type": objectives['PrimaryObjective']['Type'], + "unit": objectives['TimelineSegments'][0]['PrimaryObjectiveUnit'], + "value": objectives['TimelineSegments'][0]['PrimaryObjectiveValue'] + } + config_type['tp_primary_obj'] = objective_dm + if len(objectives['TimelineSegments'][0]['SecondaryObjectiveValues']) > 0: + objective_dm = { + "type": objectives['SecondaryObjectives'][0]['Type'], + "unit": objectives['TimelineSegments'][0]['SecondaryObjectiveValues'][0]['Unit'], + "value": objectives['TimelineSegments'][0]['SecondaryObjectiveValues'][0]['Value'] + } + config_type['tp_secondary_obj'] = objective_dm + if tp_profiles['TrafficSettings']['DefaultTransportProfile']['ClientTLSProfile']['version'] != None: + config_type['tp_ssl'] = True + + if len(self.config['Config']['AttackProfiles']) > 0: + config_type['attack'] = True + at_profiles = self.config['Config']['AttackProfiles'][0] + for attack in at_profiles['Attacks']: + if attack['ProtocolID'] not in config_type['attack_profiles']: + config_type['attack_profiles'].append(attack['ProtocolID']) + objective_dm = { + "attack_rate": at_profiles['ObjectivesAndTimeline']['TimelineSegments'][0]['AttackRate'], + "max_concurrent_attack": at_profiles['ObjectivesAndTimeline']['TimelineSegments'][0][ + 'MaxConcurrentAttack'] + } + config_type['att_obj'] = objective_dm + if at_profiles['TrafficSettings']['DefaultTransportProfile']['ClientTLSProfile']['version'] != None: + config_type['tp_ssl'] = True + + if self.config['Config']['NetworkProfiles'][0]['DUTNetworkSegment'][0]['active']: + config_type['dut'] = True + return config_type + + def start_test(self, initializationTimeout=60): + apiPath = '/api/v2/sessions/{}/test-run/operations/start'.format(self.sessionID) + response = self.__sendPost(apiPath, payload={}).json() + self.startTime = self.__getEpochTime() + startID = response['id'] + print('Start ID : {}'.format(startID)) + progressPath = '/api/v2/sessions/{}/test-run/operations/start/{}'.format(self.sessionID, startID) + self.__sendGet(progressPath, 200).json() + counter = 1 + iteration = 0 + while iteration < initializationTimeout: + response = self.__sendGet(progressPath, 200).json() + print('Test Progress: {}'.format(response['progress'])) + if response['state'] == 'SUCCESS': + self.startTrafficTime = self.__getEpochTime() + return self.startTrafficTime + if response['state'] == 'ERROR': + raise Exception('Error when starting the test! {}'.format(self.get_test_details(self.sessionID))) + iteration += counter + time.sleep(counter) + else: + raise Exception('ERROR! Test could not start in {} seconds, test state: {}'.format(initializationTimeout, + response['state'])) + + def stop_test(self): + apiPath = '/api/v2/sessions/{}/test-run/operations/stop'.format(self.sessionID) + response = self.__sendPost(apiPath, payload={}).json() + stopID = response['id'] + print('Stop ID : {}'.format(stopID)) + progressPath = '/api/v2/sessions/{}/test-run/operations/stop/{}'.format(self.sessionID, stopID) + self.__sendGet(progressPath, 200).json() + counter = 2 + iteration = 0 + while iteration < 60: + response = self.__sendGet(progressPath, 200).json() + print('Stop Test Progress: {}'.format(response['progress'])) + if response['state'] == 'SUCCESS': + break + if response['state'] == 'ERROR': + raise Exception('Error when stopping the test! {}'.format(self.get_test_details(self.sessionID))) + iteration += counter + time.sleep(counter) + + def get_test_status(self): + apiPath = '/api/v2/sessions/{}/test'.format(self.sessionID) + return self.__sendGet(apiPath, 200).json() + + def wait_test_finished(self, timeout=300): + print('Waiting for the test to finish...') + response = self.get_test_status() + actual_duration = 0 + counter = 1 + while actual_duration < self.testDuration + timeout: + response = self.get_test_status() + if response['status'] == 'STOPPING' and not self.stopTrafficTime: + self.stopTrafficTime = self.__getEpochTime() + if response['status'] == 'STOPPED': + if response['testElapsed'] >= response['testDuration']: + print('Test gracefully finished') + self.stopTime = self.__getEpochTime() + return self.stopTime + else: + raise Exception("Error! Test stopped before reaching the configured duration = {}; Elapsed = {}" + .format(response['testDuration'], response['testElapsed'])) + else: + print('Test duration = {}; Elapsed = {}'.format(response['testDuration'], response['testElapsed'])) + actual_duration += counter + time.sleep(counter) + else: + print( + "Test did not stop after timeout {}s. Test status= {}. Aborting...".format(timeout, response['status'])) + self.stop_test() + raise Exception("Error! Test was aborted after timeout {}s.".format(timeout)) + + @staticmethod + def __getEpochTime(): + pattern = "%d.%m.%Y %H:%M:%S" + timeH = datetime.now().strftime(pattern) + epoch = int(time.mktime(time.strptime(timeH, pattern))) + return epoch + + def get_test_id(self): + apiPath = '/api/v2/sessions/{}/test'.format(self.sessionID) + response = self.__sendGet(apiPath, 200).json() + return response['testId'] + + def get_available_stats_name(self): + apiPath = '/api/v2/results/{}/stats'.format(self.get_test_id()) + response = self.__sendGet(apiPath, 200).json() + available_stats = [] + for stat in response: + available_stats.append(stat['name']) + return available_stats + + def get_stats_values(self, statName): + print('Get the values for {}'.format(statName)) + apiPath = '/api/v2/results/{}/stats/{}'.format(self.get_test_id(), statName) + response = self.__sendGet(apiPath, 200).json() + return response + + def get_all_stats(self, csvLocation, exportTimeout=180): + test_id = self.get_test_id() + apiPath = '/api/v2/results/{}/operations/generate-csv'.format(test_id) + response = self.__sendPost(apiPath, None).json() + apiPath = response['url'][len(self.host):] + response = self.wait_event_success(apiPath, timeout=exportTimeout) + if not response: + raise TimeoutError("Failed to download CSVs. Timeout reached = {} seconds".format(exportTimeout)) + apiPath = response['resultUrl'] + response = self.__sendGet(apiPath, 200, debug=False) + zf = ZipFile(io.BytesIO(response.content), 'r') + zf.extractall(csvLocation) + return response + + def get_result_ended(self, timeout=5): + apiPath = '/api/v2/results/{}'.format(self.get_test_id()) + while timeout > 0: + print('Pending result availability...') + response = self.__sendGet(apiPath, 200).json() + result_end_time = response['endTime'] + result_availability = result_end_time > 0 + if result_availability: + print('Result may now be downloaded...') + return result_availability + else: + time.sleep(1) + timeout -= 1 + raise Exception('Result are not available for {}'.format(self.get_test_id())) + + def get_applications(self): + apiPath = '/api/v2/resources/apps?include=all' + response = self.__sendGet(apiPath, 200, debug=False).json() + return response + + def get_attacks(self): + apiPath = '/api/v2/resources/attacks?include=all' + response = self.__sendGet(apiPath, 200, debug=False).json() + return response + + def get_strikes(self): + apiPath = '/api/v2/resources/strikes?include=all' + response = self.__sendGet(apiPath, 200).json() + return response + + def get_application_id(self, app_name): + app_list = self.get_applications() + print('Getting application {} ID...'.format(app_name)) + for app in app_list: + if app['Name'] == app_name: + print('Application ID = {}'.format(app['id'])) + return app['id'] + + def get_strike_id(self, strike_name): + strike_list = self.get_strikes() + for strike in strike_list: + if strike['Name'] == strike_name: + print('Strike ID = {}'.format(strike['id'])) + return strike['id'] + + def get_attack_id(self, attack_name): + attack_list = self.get_attacks() + for attack in attack_list: + if attack['Name'] == attack_name: + print('Attack ID = {}'.format(attack['id'])) + return attack['id'] + + def add_attack(self, attack_name, ap_id=1): + app_id = self.get_attack_id(attack_name=attack_name) + apiPath = '/api/v2/sessions/{}/config/config/AttackProfiles/{}/Attacks'.format(self.sessionID, ap_id) + response = self.__sendPost(apiPath, payload={"ExternalResourceURL": app_id}).json() + return response[-1]['id'] + + def add_strike_as_attack(self, strike_name, ap_id=1): + app_id = self.get_strike_id(strike_name=strike_name) + apiPath = '/api/v2/sessions/{}/config/config/AttackProfiles/{}/Attacks'.format(self.sessionID, ap_id) + response = self.__sendPost(apiPath, payload={"ProtocolID": app_id}).json() + return response[-1]['id'] + + def add_application(self, app_name, tp_id=1): + app_id = self.get_application_id(app_name=app_name) + apiPath = '/api/v2/sessions/{}/config/config/TrafficProfiles/{}/Applications'.format(self.sessionID, tp_id) + response = self.__sendPost(apiPath, payload={"ExternalResourceURL": app_id}).json() + return response[-1]['id'] + + def add_application_action(self, app_id, action_name, tp_id=1): + apiPath = '/api/v2/sessions/{}/config/config/TrafficProfiles/{}/Applications/{}/Tracks/1/Actions'.format( + self.sessionID, tp_id, app_id) + self.__sendPost(apiPath, payload={"Name": action_name}).json() + + def set_application_action_value(self, app_id, action_id, param_id, value, file_value=None, source=None, tp_id=1): + apiPath = '/api/v2/sessions/{}/config/config/TrafficProfiles/{}/Applications/{}/Tracks/1/Actions/{}/Params/{}'.format( + self.sessionID, tp_id, app_id, action_id, param_id) + payload = {"Value": value, "FileValue": file_value, "Source": source} + self.__sendPatch(apiPath, payload) + + def delete_application_action(self, app_id, action_id, tp_id=1): + apiPath = '/api/v2/sessions/{}/config/config/TrafficProfiles/{}/Applications/{}/Tracks/1/Actions/{}'.format( + self.sessionID, tp_id, app_id, action_id) + self.__sendDelete(apiPath, self.headers) + + def add_attack_action(self, att_id, action_name, ap_id=1): + apiPath = '/api/v2/sessions/{}/config/config/AttackProfiles/{}/Attacks/{}/Tracks/1/Actions'.format( + self.sessionID, ap_id, att_id) + self.__sendPost(apiPath, payload={"Name": action_name}).json() + + def add_attack_profile(self): + apiPath = '/api/v2/sessions/{}/config/config/AttackProfiles'.format(self.sessionID) + response = self.__sendPost(apiPath, payload={}).json() + return response[-1]['id'] + + def add_traffic_profile(self): + apiPath = '/api/v2/sessions/{}/config/config/TrafficProfiles'.format(self.sessionID) + response = self.__sendPost(apiPath, payload={}).json() + return response[-1]['id'] + + def set_traffic_profile_timeline(self, duration, objective_value, objective_unit=None, pr_id=1): + + apiPath = '/api/v2/sessions/{}/config/config/TrafficProfiles/{}/ObjectivesAndTimeline/TimelineSegments/1'.format( + self.sessionID, pr_id) + response = self.__sendGet(apiPath, 200).json() + payload = {"Duration": duration, "PrimaryObjectiveValue": objective_value} + self.__sendPatch(apiPath, payload) + + def set_primary_objective(self, objective, tp_id=1): + apiPath = '/api/v2/sessions/{}/config/config/TrafficProfiles/{}/ObjectivesAndTimeline/PrimaryObjective'.format( + self.sessionID, tp_id) + self.__sendPut(apiPath, payload={"Type": objective, "Unit": ""}) + + def add_primary_objective(self, objective, tp_id=1): + apiPath = '/api/v2/sessions/{}/config/config/TrafficProfiles/{}/ObjectivesAndTimeline/PrimaryObjective'.format( + self.sessionID, tp_id) + self.__sendPatch(apiPath, payload={"Type": objective, "Unit": ""}) + + def add_secondary_objective(self, tp_id=1): + apiPath = '/api/v2/sessions/{}/config/config/TrafficProfiles/{}/ObjectivesAndTimeline/SecondaryObjectives'.format( + self.sessionID, tp_id) + self.__sendPost(apiPath, payload={}).json() + + def add_secondary_objective_value(self, objective, objective_value, objective_unit=None, tp_id=1): + apiPath = '/api/v2/sessions/{}/config/config/TrafficProfiles/{}/ObjectivesAndTimeline/SecondaryObjectives/1'.format( + self.sessionID, tp_id) + self.__sendPatch(apiPath, payload={"Type": objective}) + apiPath = '/api/v2/sessions/{}/config/config/TrafficProfiles/{}/ObjectivesAndTimeline/TimelineSegments/1/SecondaryObjectiveValues/1'.format( + self.sessionID, tp_id) + self.__sendPatch(apiPath, payload={"Value": objective_value}) + if objective_unit: + self.__sendPatch(apiPath, payload={"Unit": objective_unit}) + + def set_client_http_profile(self, http_profile): + + apiPath = "/api/v2/sessions/{}/config/config/TrafficProfiles/1/TrafficSettings/DefaultTransportProfile/ClientHTTPProfile".format( + self.sessionID + ) + self.__sendPatch(apiPath, http_profile) + + def set_server_http_profile(self, http_profile): + apiPath = "/api/v2/sessions/{}/config/config/TrafficProfiles/1/TrafficSettings/DefaultTransportProfile/ServerHTTPProfile".format( + self.sessionID + ) + self.__sendPatch(apiPath, http_profile) + + def set_traffic_profile_client_tls(self, version, pr_id=1): + apiPath = '/api/v2/sessions/{}/config/config/TrafficProfiles/{}/TrafficSettings/DefaultTransportProfile/ClientTLSProfile'.format( + self.sessionID, pr_id) + self.__sendPatch(apiPath, payload={"version": version}) + + def set_traffic_profile_server_tls(self, version, pr_id=1): + apiPath = '/api/v2/sessions/{}/config/config/TrafficProfiles/{}/TrafficSettings/DefaultTransportProfile/ServerTLSProfile'.format( + self.sessionID, pr_id) + self.__sendPatch(apiPath, payload={"version": version}) + + def set_attack_profile_timeline(self, duration, objective_value, max_concurrent_attacks=None, iteration_count=0, + ap_id=1): + apiPath = '/api/v2/sessions/{}/config/config/AttackProfiles/{}/ObjectivesAndTimeline/TimelineSegments/1'.format( + self.sessionID, ap_id) + payload = {"Duration": duration, "AttackRate": objective_value, "MaxConcurrentAttack": max_concurrent_attacks, + "IterationCount": iteration_count} + self.__sendPatch(apiPath, payload) + + def set_attack_profile_client_tls(self, version, pr_id=1): + apiPath = '/api/v2/sessions/{}/config/config/AttackProfiles/{}/TrafficSettings/DefaultTransportProfile/ClientTLSProfile'.format( + self.sessionID, pr_id) + self.__sendPatch(apiPath, payload={"version": version}) + + def set_attack_profile_server_tls(self, version, pr_id=1): + apiPath = '/api/v2/sessions/{}/config/config/AttackProfiles/{}/TrafficSettings/DefaultTransportProfile/ServerTLSProfile'.format( + self.sessionID, pr_id) + self.__sendPatch(apiPath, payload={"version": version}) + + def set_custom_payload(self, apiPath, fileName): + resp = self.__sendPatch(apiPath, payload={"Source": "PayloadProfile"}) + if resp.status_code != 204: + print("Error patching payload type: {}".format(resp.json())) + uploadUrl = "/api/v2/resources/payloads" + payload = self.get_resource(uploadUrl, name=os.path.basename(fileName)) + if not payload: + payloadFile = open(fileName, 'rb') + resp = self.__sendPost(uploadUrl, payload=None, customHeaders=self.headers, + files={'file': payloadFile}).json() + payload = {"FileValue": {"fileName": resp["fileName"], "resourceURL": resp["resourceURL"]}} + else: + payload = {"FileValue": {"fileName": payload["name"], "resourceURL": payload["links"][0]["href"]}} + self.__sendPatch(apiPath, payload=payload) + + def set_application_custom_payload(self, appName, actionName, paramName, fileName): + config = self.get_session_config()['Config'] + applicationsByName = {app['Name']: app for app in config['TrafficProfiles'][0]['Applications']} + httpApp = applicationsByName[appName] + actionsByName = {action['Name']: action for action in httpApp['Tracks'][0]['Actions']} + postAction = actionsByName[actionName] + actionParametersByName = {param['Name']: param for param in postAction['Params']} + bodyParam = actionParametersByName[paramName] + apiPath = "/api/v2/sessions/{}/config/config/TrafficProfiles/1/Applications/{}/Tracks/1/Actions/{}/Params/{}".format( + self.sessionID, httpApp['id'], postAction['id'], bodyParam['id'] + ) + self.set_custom_payload(apiPath, fileName) + + def set_custom_playlist(self, apiPath, fileName, value=None): + resp = self.__sendPatch(apiPath, payload={"Source": "Playlist"}) + if resp.status_code != 204: + print("Error patching payload type: {}".format(resp.json())) + uploadUrl = "/api/v2/resources/playlists" + playlist = self.get_resource(uploadUrl, name=os.path.basename(fileName)) + if not playlist: + playlistFile = open(fileName, 'rb') + resp = self.__sendPost(uploadUrl, payload=None, customHeaders=self.headers, + files={'file': playlistFile}).json() + payload = {"FileValue": {"fileName": resp["fileName"], "resourceURL": resp["resourceURL"]}, "Value": value} + else: + payload = {"FileValue": {"fileName": playlist["name"], "resourceURL": playlist["links"][0]["href"]}, + "Value": value} + self.__sendPatch(apiPath, payload=payload) + + def set_attack_custom_playlist(self, attackName, actionName, paramName, fileName, value="Query"): + config = self.get_session_config()['Config'] + attacksByName = {app['Name']: app for app in config['AttackProfiles'][0]['Attacks']} + attack = attacksByName[attackName] + actionsByName = {action['Name']: action for action in attack['Tracks'][0]['Actions']} + postAction = actionsByName[actionName] + actionParametersByName = {param['Name']: param for param in postAction['Params']} + bodyParam = actionParametersByName[paramName] + apiPath = "/api/v2/sessions/{}/config/config/AttackProfiles/1/Attacks/{}/Tracks/1/Actions/{}/Params/{}".format( + self.sessionID, attack['id'], postAction['id'], bodyParam['id'] + ) + self.set_custom_playlist(apiPath, fileName, value) + + def get_all_configs(self): + apiPath = '/api/v2/configs' + response = self.__sendGet(apiPath, 200).json() + return response + + def get_config_id(self, test_name): + configs = self.get_all_configs() + for config in configs: + if config['displayName'] == test_name: + print('Config ID = {}'.format(config['id'])) + return config['id'] + + def get_resource(self, apiPath, name): + resp = self.__sendGet(apiPath, 200).json() + for resource in resp: + if resource["name"] == name: + return resource + + def save_config(self, test_name, timeout=10): + apiPath = '/api/v2/sessions/{}/config/operations/save'.format(self.sessionID) + response = self.__sendPost(apiPath, payload={"Name": test_name}).json() + apiPath = '/api/v2/sessions/{}/config/operations/save/{}'.format(self.sessionID, response['id']) + if not self.wait_event_success(apiPath, timeout): + raise TimeoutError( + "Could not save copy for test= {}. Timeout reached = {} seconds".format(test_name, timeout)) + + def load_config(self, test_name): + configID = self.get_config_id(test_name=test_name) + apiPath = '/api/v2/sessions' + response = self.__sendPost(apiPath, payload={"configUrl": configID}).json() + if response: + print('Test= {} was loaded with ID= {}'.format(test_name, response[-1]['id'])) + self.sessionID = response[-1]['id'] + return + else: + raise Exception('Failed to load test= {}'.format(test_name)) + + def collect_diagnostics(self, timeout=600): + apiPath = '/api/v2/diagnostics/operations/export' + response = self.__sendPost(apiPath, payload={"componentList": [], "sessionId": self.sessionID}).json() + apiPath = '/api/v2/diagnostics/operations/export/{}'.format(response["id"]) + response = self.wait_event_success(apiPath, timeout) + + return response['id'] diff --git a/utils/RFC 6349/testsuite/rest_api_wrapper/lib/Statistics.py b/utils/RFC 6349/testsuite/rest_api_wrapper/lib/Statistics.py new file mode 100644 index 00000000..97924920 --- /dev/null +++ b/utils/RFC 6349/testsuite/rest_api_wrapper/lib/Statistics.py @@ -0,0 +1,140 @@ +import json +import os + +import pandas as pd +from tabulate import tabulate + + +class JSONObject: + def __init__(self, dict): + vars(self).update(dict) + + +def json_to_class(path): + """ + Converts a json into a class + """ + json_file = open(path) + s = json_file.read() + return json.loads(s, object_hook=JSONObject) + + +class Statistics: + criteria_message = '' + + def __init__(self, csvs_path): + """ + Takes in the path to csv folder + """ + self.csvs_path = csvs_path + self.headers = ['Condition', 'Status'] + self.table = [] + self.stats_failures = [] + self.config_type = None + self.stats = {} + self.include_baseline_file = True + for csv in os.listdir(csvs_path): + if csv.endswith(".csv"): + self.stats[csv[:-4]] = self.make_dataframe(os.path.join(csvs_path, csv)) + + def make_dataframe(self, csv_file_path): + ''' + Creates a data frame from a csv found at that path + :csv_file_path + ''' + with open(csv_file_path, encoding='utf-8') as csvf: + try: + csv = pd.read_csv(csvf) + except pd.errors.EmptyDataError: + raise Exception("{} is empty".format(csv_file_path)) + except pd.errors.ParserError: + raise Exception("{} is corupt".format(csv_file_path)) + return csv + + @staticmethod + def last(df): + df = df[df['Timestamp epoch ms'] == max(df['Timestamp epoch ms'])] + return df + + def preform_validation(self, validation_entry): + stats = self.stats + last = Statistics.last + try: + validation_ok = eval(validation_entry.condition) + except : + raise Exception("This validation is not written correctly: {}".format(validation_entry.condition)) + if validation_ok: + self.table.append([validation_entry.description, 'Pass']) + else: + self.table.append([validation_entry.description, 'Fail']) + self.stats_failures.append(validation_entry.description) + + def validate_criteria_file(self, criteria_path): + """ + Preforms specific validation for a config and decides if the baseline validation needs to be added + criteria path: path to the json criteria + """ + validator = json_to_class(criteria_path) + self.include_baseline_file = validator.include_baseline + if self.config_type['dut']: + validator = validator.DUT + else: + validator = validator.B2B + for validation_entry in validator: + self.preform_validation(validation_entry) + + def validate_baseline_file(self, criteria_path): + """ + Checks what type of profiles are present in the test and preforms general validation + criteria path: path to the json criteria + config_type: A dictionary that flags the types of profiles present inside the test + """ + validator = json_to_class(criteria_path) + if self.config_type['dut']: + validator = validator.DUT + else: + validator = validator.B2B + if self.config_type['traffic'] or self.config_type['attack']: + for validation_entry in validator.general: + self.preform_validation(validation_entry) + else: + raise Exception('The config does not have an attack or traffic profile') + if self.config_type['traffic']: + for validation_entry in validator.traffic: + self.preform_validation(validation_entry) + if self.config_type['attack']: + for validation_entry in validator.attack: + self.preform_validation(validation_entry) + + def validate_mdw_stats(self, config_type, config_path=""): + """ + Using a the criteria json and the baseline json, validates the resources returned after the run. + config_type: A dictionary that flags the types of profiles present inside the test + config_name: same name as the test that ran. + """ + + self.config_type = config_type + if os.path.exists(config_path): + config_name = os.path.basename(config_path) + print("Config: {}\n\n".format(config_name)) + criteria_path = os.path.join(config_path, 'validation.json') + print(criteria_path) + print('Running Validations for {}'.format(config_name)) + + if os.path.exists(criteria_path): + + try: + self.validate_criteria_file(criteria_path) + except AttributeError as e: + print('Criteria {} could not be applied due to: {}'.format(config_name, e)) + else: + self.include_baseline_file = True + if self.include_baseline_file: + print('Baseline validation applied') + criteria_path = os.path.join("../resources", "baseline_validation.json") + self.validate_baseline_file(criteria_path) + else: + print('Baseline validation skipped') + print(tabulate(self.table, self.headers, tablefmt="grid")) + return "; ".join(self.stats_failures) + diff --git a/utils/RFC 6349/testsuite/rest_api_wrapper/lib/__pycache__/REST_WRAPPER.cpython-310.pyc b/utils/RFC 6349/testsuite/rest_api_wrapper/lib/__pycache__/REST_WRAPPER.cpython-310.pyc new file mode 100644 index 00000000..23c7f528 Binary files /dev/null and b/utils/RFC 6349/testsuite/rest_api_wrapper/lib/__pycache__/REST_WRAPPER.cpython-310.pyc differ diff --git a/utils/RFC 6349/testsuite/rest_api_wrapper/lib/__pycache__/REST_WRAPPER.cpython-39.pyc b/utils/RFC 6349/testsuite/rest_api_wrapper/lib/__pycache__/REST_WRAPPER.cpython-39.pyc new file mode 100644 index 00000000..fdd58f08 Binary files /dev/null and b/utils/RFC 6349/testsuite/rest_api_wrapper/lib/__pycache__/REST_WRAPPER.cpython-39.pyc differ diff --git a/utils/RFC 6349/testsuite/rest_api_wrapper/lib/__pycache__/RESTasV3.cpython-310.pyc b/utils/RFC 6349/testsuite/rest_api_wrapper/lib/__pycache__/RESTasV3.cpython-310.pyc new file mode 100644 index 00000000..bee0cc42 Binary files /dev/null and b/utils/RFC 6349/testsuite/rest_api_wrapper/lib/__pycache__/RESTasV3.cpython-310.pyc differ diff --git a/utils/RFC 6349/testsuite/rest_api_wrapper/lib/__pycache__/RESTasV3.cpython-39.pyc b/utils/RFC 6349/testsuite/rest_api_wrapper/lib/__pycache__/RESTasV3.cpython-39.pyc new file mode 100644 index 00000000..7529f336 Binary files /dev/null and b/utils/RFC 6349/testsuite/rest_api_wrapper/lib/__pycache__/RESTasV3.cpython-39.pyc differ diff --git a/utils/RFC 6349/testsuite/rest_api_wrapper/lib/__pycache__/Statistics.cpython-310.pyc b/utils/RFC 6349/testsuite/rest_api_wrapper/lib/__pycache__/Statistics.cpython-310.pyc new file mode 100644 index 00000000..f9736174 Binary files /dev/null and b/utils/RFC 6349/testsuite/rest_api_wrapper/lib/__pycache__/Statistics.cpython-310.pyc differ diff --git a/utils/RFC 6349/testsuite/rest_api_wrapper/lib/__pycache__/Statistics.cpython-39.pyc b/utils/RFC 6349/testsuite/rest_api_wrapper/lib/__pycache__/Statistics.cpython-39.pyc new file mode 100644 index 00000000..37bf089f Binary files /dev/null and b/utils/RFC 6349/testsuite/rest_api_wrapper/lib/__pycache__/Statistics.cpython-39.pyc differ diff --git a/utils/RFC 6349/testsuite/rest_api_wrapper/lib/__pycache__/util.cpython-310.pyc b/utils/RFC 6349/testsuite/rest_api_wrapper/lib/__pycache__/util.cpython-310.pyc new file mode 100644 index 00000000..8faf90ff Binary files /dev/null and b/utils/RFC 6349/testsuite/rest_api_wrapper/lib/__pycache__/util.cpython-310.pyc differ diff --git a/utils/RFC 6349/testsuite/rest_api_wrapper/lib/__pycache__/util.cpython-39.pyc b/utils/RFC 6349/testsuite/rest_api_wrapper/lib/__pycache__/util.cpython-39.pyc new file mode 100644 index 00000000..1513db34 Binary files /dev/null and b/utils/RFC 6349/testsuite/rest_api_wrapper/lib/__pycache__/util.cpython-39.pyc differ diff --git a/utils/RFC 6349/testsuite/rest_api_wrapper/lib/util.py b/utils/RFC 6349/testsuite/rest_api_wrapper/lib/util.py new file mode 100644 index 00000000..a4da5dc8 --- /dev/null +++ b/utils/RFC 6349/testsuite/rest_api_wrapper/lib/util.py @@ -0,0 +1,166 @@ +#!/usr/bin/env python +import re +import sys +import time +import subprocess +import argparse +import socket +import telnetlib +#from paramiko.util import log_to_file +import logging +import pytest +import re, os +import datetime +import pandas as pd +from copy import deepcopy +#libpath = os.path.abspath(__file__+"/../../testsuite") +#sys.path.insert(0,libpath) +#from ixload import IxLoad +from tabulate import tabulate +#version 1.0 +def result_log(ret_name=0): + path = os.path.abspath(__file__+"/../../../../") + os.chdir(path) + datestring = datetime.datetime.now().strftime("%Y_%m_%d_%H_%M_%S") + name = "Result"+str(datestring) + os.mkdir(name) + if ret_name: + return name + return name + + +def parse_status(result): + #[{"status": 0, "log": err}, {"status": 1}] + for r in result: + if (r["status"]==0): + return ({"fail":r['log']}) + return ({"pass":"success"}) + +def pytest_assert(logger, condition, message = None): + __tracebackhide__ = True + if not condition: + set_logger(logger, level="ERROR", message=message) + pytest.fail(message) + else: + set_logger(logger, level="INFO", message="Test Passed") + +def consolidate_output(logger, input): + new_dict = {} + for key in input.keys(): + for k, v in input[key].items(): + new_dict[key] = (k, v) + headers = ["Type", "Buffer", "Average"] + #print(tabulate([(k,) + v for k, v in new_dict.items()], headers=headers)) + set_logger(logger, level="INFO", message="\n" + str(tabulate([(k,) + v for k, v in new_dict.items()], headers=headers))) + + +def consolidate_result(logger, input): + pd.set_option('display.max_rows', 500) + pd.set_option('display.max_columns', 500) + column_name = 'buffer_size' + ' |' + ' average (Mbps)' + input_dict = {"buffer_size" : [], "average": []} + for column, values in input.items(): + row_value = [] + for buffer_size, avg_value in values.items(): + input_dict["buffer_size"].append(str(buffer_size)) + input_dict["average"].append(str(avg_value)) + df = pd.DataFrame(input_dict, index = list(input.keys())) + print(df) + set_logger(logger, level="INFO", message= df) + + +def set_logger( + logger, + log_format='%(asctime)-8s %(levelname)-8s %(message)s\n', + log_name='', + level = 'INFO', + message = '', + ): + log_file = logger + log = logging.getLogger(log_name) + log_formatter = logging.Formatter(log_format) + if log_file: + file_handler_info = logging.FileHandler(log_file) + else: + file_handler_info = logging.FileHandler(log) + file_handler_info.setFormatter(log_formatter) + + log_level = getattr(logging, level) + log.addHandler(file_handler_info) + + log.setLevel(log_level) + log.log(log.getEffectiveLevel(), message) + + log.handlers = [] + + file_handler_info.flush() + +def logger_msg(logger, msg, level='INFO'): + set_logger(logger, level=level, message=msg) + + +def maxtput(tputDict): + max = 0 + tput = {} + for key, value in tputDict.items(): + for key2 in value.keys(): + if int(key2) > int(max): + max = key2 + tput = {key: {key2: value[key2]}} + return (tput) + +def average(lst): + sum_num = 0 + for t in lst: + sum_num = sum_num + int(t) + return sum_num / len(lst) + +# def ping_works(logger, payload_size, args): +# # we capture the output to prevent ping +# # from printing to terminal +# tn = telnetlib.Telnet(args['host'], 8021) +# tn.read_until(b"login: ") +# tn.write(bytes(args['port'], 'ascii') + b"\r\n") +# ping_data = "ping" + " " + str(args['target']) + " " + "-I ixint1 -c 1 -s" + " " + str(payload_size) + " " + "-M do" +# #sys.stdout.write('%s: ' % ping_data) +# if b'#' in tn.read_until(b'#', timeout=5): +# tn.write(bytes(ping_data, 'ascii') + b"\r\n") +# if b'ttl' in tn.read_until(b'ttl', timeout=5): +# sys.stdout.write('%s: ' % "success") +# sys.stdout.write('%s: ' % tn.read_until(b'ttl', timeout=5)) +# return True +# else: +# sys.stdout.write('%s: '% tn.read_until(b'PING', timeout=5)) +# return False + +# def telnet(logger, host, port, ip, obj): +# lo = 0 # MTUs lower or equal do work +# hi = 9000 # MTUs greater or equal don't work +# #print('>>> PMTU to %s in range [%d, %d)' % (args.target, lo, hi)) +# arg = {'host':host, 'port': port, 'target': ip, 'lo':0, 'hi': 9000} +# while lo + 1 < hi: +# mid = (lo + hi) // 2 + +# sys.stdout.write('%d: ' % mid) +# sys.stdout.flush() +# for i in range(2): +# if ping_works(logger, mid, arg): +# import pdb;pdb.set_trace() +# lo = mid +# break +# else: +# import pdb;pdb.set_trace() +# sys.stdout.write('* ') +# sys.stdout.flush() +# time.sleep(0.2) +# else: +# import pdb;pdb.set_trace() +# hi = mid +# print('') + +# # header_size = 28 if args.ipv4 else 48 +# header_size = 28 +# print('>>> optimal MTU to %s: %d = %d' % ( +# arg['target'], lo, lo +# )) +# return (lo) \ No newline at end of file diff --git a/utils/rest_api_wrapper/RESTasV3.py b/utils/rest_api_wrapper/RESTasV3.py index b7e66b09..1bef95da 100644 --- a/utils/rest_api_wrapper/RESTasV3.py +++ b/utils/rest_api_wrapper/RESTasV3.py @@ -64,48 +64,6 @@ def __sendPost(self, url, payload, customHeaders=None, files=None, debug=True): return response - - - def sendPost(self, url, payload, customHeaders=None, files=None, debug=True): - expectedResponse = [200, 201, 202, 204] - print("POST at URL: {} with payload: {}".format(url, payload)) - payload = json.dumps(payload) if customHeaders is None else payload - response = self.session.post('{}{}'.format(self.host, url), - headers=customHeaders if customHeaders else self.headers, data=payload, - files=files, verify=False) - if debug: - print("POST response message: {}, response code: {}".format(response.content, response.status_code)) - if response.status_code == 401: - print('Token has expired, resending request') - self.refresh_access_token() - response = self.session.post('{}{}'.format(self.host, url), - headers=customHeaders if customHeaders else self.headers, data=payload, - files=files, verify=False) - print("POST response message: {}, response code: {}".format(response.content, response.status_code)) - if self.verify and response.status_code not in expectedResponse: - raise Exception( - 'Unexpected response code. Actual: {} Expected: {}'.format(response.status_code, expectedResponse)) - - return response - - def sendGet(self, url, expectedResponse, customHeaders=None, debug=True): - print("GET at URL: {}".format(url)) - response = self.session.get('{}{}'.format(self.host, url), - headers=customHeaders if customHeaders else self.headers) - if debug: - print("GET response message: {}, response code: {}".format(response.content, response.status_code)) - if response.status_code == 401: - print('Token has expired, resending request') - self.refresh_access_token() - response = self.session.get('{}{}'.format(self.host, url), - headers=customHeaders if customHeaders else self.headers) - print("GET response message: {}, response code: {}".format(response.content, response.status_code)) - if self.verify and response.status_code != expectedResponse: - raise Exception( - 'Unexpected response code. Actual: {} Expected: {}'.format(response.status_code, expectedResponse)) - - return response - def sendPost(self, url, payload, customHeaders=None, files=None, debug=True): expectedResponse = [200, 201, 202, 204] print("POST at URL: {} with payload: {}".format(url, payload)) @@ -127,48 +85,22 @@ def sendPost(self, url, payload, customHeaders=None, files=None, debug=True): 'Unexpected response code. Actual: {} Expected: {}'.format(response.status_code, expectedResponse)) return response - - def sendGet(self, url, expectedResponse, customHeaders=None, debug=True): + + def __sendGet(self, url, expectedResponse, customHeaders=None, debug=True): print("GET at URL: {}".format(url)) - response = self.session.get('{}{}'.format(self.host, url), - headers=customHeaders if customHeaders else self.headers) + response = self.session.get('{}{}'.format(self.host, url), headers=customHeaders if customHeaders else self.headers) if debug: print("GET response message: {}, response code: {}".format(response.content, response.status_code)) if response.status_code == 401: print('Token has expired, resending request') self.refresh_access_token() - response = self.session.get('{}{}'.format(self.host, url), - headers=customHeaders if customHeaders else self.headers) + response = self.session.get('{}{}'.format(self.host, url), headers=customHeaders if customHeaders else self.headers) print("GET response message: {}, response code: {}".format(response.content, response.status_code)) if self.verify and response.status_code != expectedResponse: - raise Exception( - 'Unexpected response code. Actual: {} Expected: {}'.format(response.status_code, expectedResponse)) + raise Exception('Unexpected response code. Actual: {} Expected: {}'.format(response.status_code, expectedResponse)) return response - - def sendPost(self, url, payload, customHeaders=None, files=None, debug=True): - expectedResponse = [200, 201, 202, 204] - print("POST at URL: {} with payload: {}".format(url, payload)) - payload = json.dumps(payload) if customHeaders is None else payload - response = self.session.post('{}{}'.format(self.host, url), - headers=customHeaders if customHeaders else self.headers, data=payload, - files=files, verify=False) - if debug: - print("POST response message: {}, response code: {}".format(response.content, response.status_code)) - if response.status_code == 401: - print('Token has expired, resending request') - self.refresh_access_token() - response = self.session.post('{}{}'.format(self.host, url), - headers=customHeaders if customHeaders else self.headers, data=payload, - files=files, verify=False) - print("POST response message: {}, response code: {}".format(response.content, response.status_code)) - if self.verify and response.status_code not in expectedResponse: - raise Exception( - 'Unexpected response code. Actual: {} Expected: {}'.format(response.status_code, expectedResponse)) - - return response - def sendGet(self, url, expectedResponse, customHeaders=None, debug=True): print("GET at URL: {}".format(url)) response = self.session.get('{}{}'.format(self.host, url), @@ -187,61 +119,23 @@ def sendGet(self, url, expectedResponse, customHeaders=None, debug=True): return response - def sendPost(self, url, payload, customHeaders=None, files=None, debug=True): - expectedResponse = [200, 201, 202, 204] - print("POST at URL: {} with payload: {}".format(url, payload)) - payload = json.dumps(payload) if customHeaders is None else payload - response = self.session.post('{}{}'.format(self.host, url), - headers=customHeaders if customHeaders else self.headers, data=payload, - files=files, verify=False) + def __sendPut(self, url, payload, customHeaders=None, debug=True): + print("PUT at URL: {} with payload: {}".format(url, payload)) + expectedResponse = [200, 204] + response = self.session.put('{}{}'.format(self.host, url), headers=customHeaders if customHeaders else self.headers, data=json.dumps(payload)) if debug: - print("POST response message: {}, response code: {}".format(response.content, response.status_code)) + print("PUT response message: {}, response code: {}".format(response.content, response.status_code)) if response.status_code == 401: print('Token has expired, resending request') self.refresh_access_token() - response = self.session.post('{}{}'.format(self.host, url), - headers=customHeaders if customHeaders else self.headers, data=payload, - files=files, verify=False) - print("POST response message: {}, response code: {}".format(response.content, response.status_code)) + response = self.session.put('{}{}'.format(self.host, url), headers=customHeaders if customHeaders else self.headers, data=json.dumps(payload)) + print("PUT response message: {}, response code: {}".format(response.content, response.status_code)) if self.verify and response.status_code not in expectedResponse: - raise Exception( - 'Unexpected response code. Actual: {} Expected: {}'.format(response.status_code, expectedResponse)) - - return response - - def sendGet(self, url, expectedResponse, customHeaders=None, debug=True): - print("GET at URL: {}".format(url)) - response = self.session.get('{}{}'.format(self.host, url), - headers=customHeaders if customHeaders else self.headers) - if debug: - print("GET response message: {}, response code: {}".format(response.content, response.status_code)) - if response.status_code == 401: - print('Token has expired, resending request') - self.refresh_access_token() - response = self.session.get('{}{}'.format(self.host, url), - headers=customHeaders if customHeaders else self.headers) - print("GET response message: {}, response code: {}".format(response.content, response.status_code)) - if self.verify and response.status_code != expectedResponse: - raise Exception( - 'Unexpected response code. Actual: {} Expected: {}'.format(response.status_code, expectedResponse)) - - return response - def __sendGet(self, url, expectedResponse, customHeaders=None, debug=True): - print("GET at URL: {}".format(url)) - response = self.session.get('{}{}'.format(self.host, url), headers=customHeaders if customHeaders else self.headers) - if debug: - print("GET response message: {}, response code: {}".format(response.content, response.status_code)) - if response.status_code == 401: - print('Token has expired, resending request') - self.refresh_access_token() - response = self.session.get('{}{}'.format(self.host, url), headers=customHeaders if customHeaders else self.headers) - print("GET response message: {}, response code: {}".format(response.content, response.status_code)) - if self.verify and response.status_code != expectedResponse: raise Exception('Unexpected response code. Actual: {} Expected: {}'.format(response.status_code, expectedResponse)) return response - - def __sendPut(self, url, payload, customHeaders=None, debug=True): + + def sendPut(self, url, payload, customHeaders=None, debug=True): print("PUT at URL: {} with payload: {}".format(url, payload)) expectedResponse = [200, 204] response = self.session.put('{}{}'.format(self.host, url), headers=customHeaders if customHeaders else self.headers, data=json.dumps(payload)) @@ -272,6 +166,22 @@ def __sendPatch(self, url, payload, debug=True): raise Exception('Unexpected response code. Actual: {} Expected: {}'.format(response.status_code, expectedResponse)) return response + + def sendPatch(self, url, payload, debug=True): + print("PATCH at URL: {} with payload: {}".format(url, payload)) + expectedResponse = [200, 204] + response = self.session.patch('{}{}'.format(self.host, url), headers=self.headers, data=json.dumps(payload)) + if debug: + print("PATCH response message: {}, response code: {}".format(response.content, response.status_code)) + if response.status_code == 401: + print('Token has expired, resending request') + self.refresh_access_token() + response = self.session.patch('{}{}'.format(self.host, url), headers=self.headers, data=json.dumps(payload)) + print("PATCH response message: {}, response code: {}".format(response.content, response.status_code)) + if self.verify and response.status_code not in expectedResponse: + raise Exception('Unexpected response code. Actual: {} Expected: {}'.format(response.status_code, expectedResponse)) + + return response def __sendDelete(self, url, headers=None, debug=True): print("DELETE at URL: {}".format(url)) @@ -288,6 +198,22 @@ def __sendDelete(self, url, headers=None, debug=True): raise Exception('Unexpected response code. Actual: {} Expected: {}'.format(response.status_code, expectedResponse)) return response + + def sendDelete(self, url, headers=None, debug=True): + print("DELETE at URL: {}".format(url)) + expectedResponse = [200, 202, 204] + response = self.session.delete('%s%s' % (self.host, url), headers=headers) + if debug: + print("DELETE response message: {}, response code: {}".format(response.content, response.status_code)) + if response.status_code == 401: + print('Token has expired, resending request') + self.refresh_access_token() + response = self.session.delete('%s%s' % (self.host, url), headers=headers) + print("DELETE response message: {}, response code: {}".format(response.content, response.status_code)) + if self.verify and response.status_code not in expectedResponse: + raise Exception('Unexpected response code. Actual: {} Expected: {}'.format(response.status_code, expectedResponse)) + + return response def get_automation_token(self): apiPath = '/auth/realms/keysight/protocol/openid-connect/token'